query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Returns details about the ship's hull. Resists are integers from 0 to 100. | def hull(self):
capacity = self._getAttribute(Attribute.hullCapacity)
em = self._getAttribute(Attribute.hullEM)
explosive = self._getAttribute(Attribute.hullExplosive)
kinetic = self._getAttribute(Attribute.hullKinetic)
thermal = self._getAttribute(Attribute.hullThermal)
em = 1.0 - em
explosive = 1.0 - explosive
kinetic = 1.0 - kinetic
thermal = 1.0 - thermal
return {
"capacity": capacity,
"resists": {
"em": em,
"explosive": explosive,
"kinetic": kinetic,
"thermal": thermal
}
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_hull_points(self, show_progress):\n if self.points and not self.hull_points:\n self.graham_scan(show_progress)\n print(\"Input: {} points\").format(len(self.points))\n print(\"Convex hull: {} points\").format(len(self.hull_points))\n return self.hull_points",
"def convex_hull(self):\n return _property_geo(arctern.ST_ConvexHull, self)",
"def _create_grid(self):\n\n # Check if hull dimensions are sensible for deck-dimensions (rows & lanes)\n grid = np.zeros((self.rows, self.lanes), dtype=np.int)\n if self.rows > self.hull_catheti_length and self.lanes >= self.hull_catheti_length * 2:\n for i in range(self.hull_catheti_length):\n t = (self.hull_catheti_length - i)\n grid[i] += np.hstack([-np.ones(t, dtype=np.int), np.zeros(self.lanes - t, dtype=np.int)])\n grid[i] += np.hstack([np.zeros(self.lanes - t, dtype=np.int), -np.ones(t, dtype=np.int)])\n else:\n logging.getLogger(__name__).error(\"Ship hull does not match grid dimensions -> return without hull\")\n return grid",
"def get_hulls():\n hulls = {}\n hull_attributes = Hull.get_hull_attributes()\n all_parts = part.Part.get_parts()\n for hull_name in hull_attributes.keys():\n default_parts = []\n for part_name in hull_attributes[hull_name]['loadout']:\n default_parts.append(all_parts[part_name])\n new_hull = Hull(hull_name,\n hull_attributes[hull_name]['nmax'],\n hull_attributes[hull_name]['nslots'],\n hull_attributes[hull_name]['bonus_power'],\n hull_attributes[hull_name]['bonus_initiative'],\n hull_attributes[hull_name]['needs_drive'],\n hull_attributes[hull_name]['is_mobile'],\n default_parts)\n hulls[hull_name] = new_hull\n return hulls",
"def get_hull_x(self):\n return [p.x for p in self._hull_points]",
"def make_convex_hull(self):\n hull_points_d = []\n try:\n print \"self.V_bar_list_d******************\", self.V_bar_list_d\n hull = ConvexHull(self.V_bar_list_d)\n hull_vertices = hull.vertices\n\n for i in hull_vertices:\n hull_points_d.append(self.V_bar_list_d[i])\n\n except scipy.spatial.qhull.QhullError:\n hull_points_d = self.V_bar_list_d\n\n return hull_points_d",
"def convex_hull(self):\n return self._geomgen(capi.geom_convex_hull)",
"def getContourRep(self):\n\t\tvertex1 = [[self.startX, self.startY]]\n\t\tvertex2 = [[self.startX, self.endY]]\n\t\tvertex3 = [[self.endX, self.startY]]\n\t\tvertex4 = [[self.endX, self.endY]]\n\t\tvertices = [vertex1, vertex2, vertex3, vertex4]\n\t\treturn convexHull(np.asarray(vertices, dtype = np.int32))",
"def extract_hull_from_shapefile ( logger, shape_file ) :\n try :\n logger.info ( \"Extract hull from shapefile \" + str(shape_file) ) \n fIn = ogr.Open ( str(shape_file) )\n layer = fIn.GetLayer(0)\n feature = layer.GetNextFeature() \n geom = feature.GetGeometryRef()\n hull_wkt = str(geom.ExportToWkt())\n return hull_wkt\n except Exception, err:\n logger.critical(\"Extract hull from shapefile failed: ERROR: %s\\n\" % str(err))\n raise",
"def convex_hull(self):\n if self._faces is None:\n if self._vertices is None:\n return None\n self.triangulate()\n return self._convex_hull",
"def get_hull_attributes():\n hulls = {}\n hull_table = db_parser.get_table_as_dict('hull')\n hull_loadouts = Hull.get_hull_loadouts()\n for row in hull_table:\n # Make a new nested dictionary indexed by this hull's name\n hull_name = row['hull_name']\n hulls[hull_name] = {}\n for key in row.keys():\n if key == 'hull_name':\n pass\n else:\n hulls[hull_name][key] = row[key]\n # Now add this hull's loadout to its dictionary\n hulls[hull_name]['loadout'] = hull_loadouts[hull_name]\n return hulls",
"def convex_hull(L):\r\n CH=list()\r\n if L != []:\r\n P = list(L)\r\n # find the starting point of the algorithm and add it to the convex hull:\r\n ind0 = find_start(P)\r\n CH.append(P.pop(ind0))\r\n # find the next point and add it to the convex hull list CH:\r\n if P != []:\r\n ind1 = next_in_hull(CH[0], np.array([1,0]), P)\r\n CH.append(P.pop(ind1))\r\n # use the hyperplane criterion as function side_points to complete CH:\r\n while P != []:\r\n p = CH[-2]\r\n q = CH[-1]\r\n v = q - p \r\n P = side_points(CH[0], CH[-1] - CH[0], P)\r\n ind = next_in_hull(q, v, P)\r\n if P != []:\r\n CH.append(P.pop(ind))\r\n return CH",
"def visualHull(sils, length):\n result = sils.pop(0).cone(length)\n assert result.pnFacesInPoly()\n i = 0\n for s in sils:\n # print(i)\n assert result.pnFacesInPoly()\n result = result.intersection(s.cone(length), True)\n # result.plot()\n i += 1\n return result",
"def convex_hull(l):\n\tpass",
"def getHull(x_data, y_data):\n xhull = []\n yhull = []\n if len(x_data) == 0 or len(y_data) == 0:\n return xhull, yhull\n xhull.append(x_data[0])\n yhull.append(y_data[0])\n\n lasthullindex = 0\n\n points = len(y_data)\n while lasthullindex < points - 1:\n slope = (y_data[lasthullindex + 1] - y_data[lasthullindex]) / (\n x_data[lasthullindex + 1] - x_data[lasthullindex])\n currenthullindex = lasthullindex + 1\n currenthully = y_data[lasthullindex]\n\n for i in range(currenthullindex + 1, points):\n extrapolation = currenthully + slope * (x_data[i] - x_data[lasthullindex])\n if y_data[i] < extrapolation:\n slope = ((y_data[i] - y_data[lasthullindex]) / (x_data[i] - x_data[lasthullindex]))\n currenthullindex = i\n\n # Store the hull points to be used for a spline fit\n xhull.append(x_data[currenthullindex])\n yhull.append(y_data[currenthullindex])\n lasthullindex = currenthullindex\n\n return xhull, yhull",
"def convex_hull(points):\n\n # Sort the points lexicographically (tuples are compared lexicographically).\n # Remove duplicates to detect the case we have just one unique point.\n points = sorted(set(points))\n\n # Boring case: no points or a single point, possibly repeated multiple times.\n if len(points) <= 1:\n return points\n\n # 2D cross product of OA and OB vectors, i.e. z-component of their 3D cross product.\n # Returns a positive value, if OAB makes a counter-clockwise turn,\n # negative for clockwise turn, and zero if the points are collinear.\n def cross(o, a, b):\n return (a[0] - o[0]) * (b[1] - o[1]) - (a[1] - o[1]) * (b[0] - o[0])\n\n # Build lower hull\n lower = []\n for p in points:\n cont = 1\n while len(lower) >= 2 and cross(lower[-2], lower[-1], p) <= 0:\n print(\"antes \"), print(cont), print(lower)\n lower.pop()\n print(\"despues \"),print(lower)\n cont += 1\n lower.append(p)\n xlower ,ylower = getlists(lower)\n plt.plot(xlower,ylower,color=\"yellow\")\n # Build upper hull\n upper = []\n for p in reversed(points):\n while len(upper) >= 2 and cross(upper[-2], upper[-1], p) <= 0:\n upper.pop()\n upper.append(p)\n print(upper)\n print(\"hello2 \")\n print(cross((2,0),(2,4),(2.5,3)))\n\n xupper ,yupper = getlists(upper)\n plt.plot(xupper,yupper,color=\"blue\")\n\n\n return lower[:-1] + upper[:-1]",
"def convex_hull(points):\n points = np.array(points)\n hull = ConvexHull(points)\n return points[hull.vertices, :]",
"def convex_hull(self):\n nodes = self._datacontroller.get_data('nodes')\n scale = self._datacontroller.get_data('scale')\n hull = tsputil.convex_hull_helper(nodes)\n if hull:\n result = construct_step(hull, 'Most Top Left Node', 'Clockwise', nodes, scale)\n self._datacontroller.commit_change('path', result)",
"def find_pore_hulls(self, pores=None):\n if pores is None:\n pores = self.pores('delaunay')\n else:\n pores = self.filter_by_label(pores, labels='delaunay')\n if 'pore.hull_coords' not in self.keys():\n self['pore.hull_coords'] = sp.ndarray((self.Np, ), dtype=object)\n tvals = self['throat.interconnect'].astype(int)\n am = self.create_adjacency_matrix(data=tvals, sprsfmt='lil')\n for p in pores:\n Ps = am.rows[p]\n if sp.size(Ps) > 0:\n self['pore.hull_coords'][p] = self['pore.coords'][Ps]",
"def give_convex_hull(rand_points):\n return ConvexHull(rand_points)",
"def get_ships():\n return {\"data\": db['ships']}",
"def recursive_hull(self, sublist, side):\n m = len(sublist)\n # Base case\n # Order left list counter-clockwise from right most\n if side == \"l\":\n if m == 2:\n return sublist[::-1], 1\n elif m == 3:\n first_slope = self.calc_slope(sublist[0], sublist[1])\n second_slope = self.calc_slope(sublist[0], sublist[2])\n if first_slope > second_slope:\n return sublist[::-1], 2\n else:\n return [sublist[2], sublist[0], sublist[1]], 1\n else: # Order right list clockwise from left most\n if m == 2:\n return sublist, 1\n elif m == 3:\n first_slope = self.calc_slope(sublist[0], sublist[1])\n second_slope = self.calc_slope(sublist[0], sublist[2])\n if first_slope > second_slope:\n return sublist, 2\n else:\n return [sublist[0], sublist[2], sublist[1]], 1\n\n mid = m // 2\n left, right = sublist[:mid], sublist[mid:]\n l_hull, left_most = self.recursive_hull(left, \"l\") \n r_hull, right_most = self.recursive_hull(right, \"r\")\n \n return self.combine_hulls(l_hull, r_hull, side, left_most, right_most)",
"def get_hull_y(self):\n return [p.y for p in self._hull_points]",
"def convex_hull(*args):\n from point import Point\n from line import Segment\n from polygon import Polygon\n\n def uniquify(a):\n # not order preserving\n return list(set(a))\n\n p = args[0]\n if isinstance(p, Point):\n p = uniquify(args)\n\n if len(p) == 1:\n return p[0]\n elif len(p) == 2:\n return Segment(p[0], p[1])\n\n def orientation(p, q, r):\n '''Return positive if p-q-r are clockwise, neg if ccw, zero if\n collinear.'''\n return (q[1] - p[1])*(r[0] - p[0]) - (q[0] - p[0])*(r[1] - p[1])\n\n # scan to find upper and lower convex hulls of a set of 2d points.\n U = []\n L = []\n p.sort()\n for p_i in p:\n while len(U) > 1 and orientation(U[-2], U[-1], p_i) <= 0:\n U.pop()\n while len(L) > 1 and orientation(L[-2], L[-1], p_i) >= 0:\n L.pop()\n U.append(p_i)\n L.append(p_i)\n U.reverse()\n convexHull = tuple(L + U[1:-1])\n\n if len(convexHull) == 2:\n return Segment(convexHull[0], convexHull[1])\n return Polygon(convexHull)",
"def display_and_label_hulls(self, hulls, src):\n \n labels = []\n\n for hull in hulls:\n\n angle = 0\n MA = 1\n ma = 1\n try:\n _,(MA,ma),angle = cv.fitEllipse(hull)\n except:\n pass\n cosAngle = np.abs(np.cos(angle*np.pi/180))\n\n # Only human-classify hulls if it is reasonably a vertically oriented rectangle\n # This is a hueristic to not have to waste time clasifying hulls clearly not poles\n if (cosAngle < 1.75) and (cosAngle > 0.85) and (MA/ma < 0.28):\n cpy = src.copy()\n hull_img = cv.polylines(cpy, [hull], True, (0,0,255), 3)\n cv.imshow(\"Hull\", hull_img)\n keycode = cv.waitKey(0)\n if keycode == 49:\n labels.append((hull, 0))\n print(\"Not a Pole\")\n elif keycode == 50:\n labels.append((hull, 1))\n print(\"A Pole!\")\n else:\n raise Exception(\"Unexpected Key Pressed\")\n else:\n labels.append((hull, 0))\n cv.destroyAllWindows()\n return labels",
"def _FindHull(s: List[sg.Point2], p: sg.Point2, q: sg.Point2, hull_points: List[sg.Point2]):\n if len(s) == 0:\n return\n seg = sg.Segment2(p, q)\n c = max(s, key=lambda point: sg.squared_distance(seg, point))\n hull_points.insert(hull_points.index(p) + 1, c)\n s.remove(c)\n s1, s2 = split_points_triangle(s, (p, q, c))\n _FindHull(s1, p, c, hull_points)\n _FindHull(s2, c, q, hull_points)",
"def show_convex_hull(points, input_choice, timing,percent_pts,size,hull_points = None):\n\texists = os.path.isdir('plots')\n\tif not exists: \n\t\tos.mkdir('plots')\n\n\n\tfor each in points:\n\t\tplt.plot(each[0],each[1],'o-')\n\n\tif hull_points is not None:\n\t\thull_pt_list = []\n\t\tfor each in hull_points:\n\t\t\thull_pt_list.append(list(each))\n\n\t\thull_pt_arr = np.asarray(hull_pt_list)\n\t\t# print(hull_pt_arr)\n\t\tplt.plot(hull_pt_arr[:,0],hull_pt_arr[:,1],'k-')\n\t\tfirst_coord = hull_pt_arr[0,:].reshape(1,2)\n\t\tlast_coord = hull_pt_arr[len(hull_pt_arr)-1,:].reshape(1,2)\n\n\t\tlast_coord_arr = np.append(first_coord, last_coord, axis = 0)\n\t\tplt.plot(last_coord_arr[:,0],last_coord_arr[:,1],'k-')\n\t\tplt.title(label = 'For input : '+input_choice+percent_pts+' time taken = '+str(timing)+' s\\n'+'N='+str(size))\n\t\n\tplt.savefig('plots/'+'Graham_Scan_'+str(input_choice)+str(percent_pts)+'_N='+str(size)+'.png')\n\tplt.show()",
"def eddy_floyd(points, side=\"\", p_min=[], p_max=[], show=True, save=False, detailed=True):\n# :param points: the points from which to find the convex hull\n# :param side: if \"up\", we care about the points above the line (p_min,p_max), else, below\n# :param p_min: the point on the left of the line (min = min abscissa)\n# :param p_max: the point on the right of the line\n# :param show: if True, the progress in constructing the hull will be plotted on each iteration in a window\n# :param save: if True, the progress in constructing the hull will be saved on each iteration in a .png file\n# :param detailed: if True, even non convex explored polygons are plotted\n if p_min==[] or p_max==[]:\n #Find the point the most on the left (p_min) and the most on the right (p_max)\n p_min,p_max=points[0],points[0]\n for p in points:\n if p[0]<p_min[0]: p_min=p\n if p[0]>p_max[0]: p_max=p\n\n #Divide the points in 2 subproblems (E2=above line, E1=below line)\n #Remark: p_min and p_max are neither in E2 nore in E1 \n E1,E2=[],[]\n for p in points:\n if (p[1]-p_min[1])*(p_max[0]-p_min[0])-(p_max[1]-p_min[1])*(p[0]-p_min[0])>0: E2+=[p]\n if (p[1]-p_min[1])*(p_max[0]-p_min[0])-(p_max[1]-p_min[1])*(p[0]-p_min[0])<0: E1+=[p]\n #Go to next step and plot results, the element to return is first divided in 2 parts to plot them seperately\n to_be_returned_2=eddy_floyd(E2,side=\"up\",p_min=p_min,p_max=p_max,show=show,save=save,detailed=detailed)\n if (show or save) and len(to_be_returned_2)>0:\n scatter_plot(points, [[p_max]+to_be_returned_2+[p_min]], title=\"eddy-floyd search\", show=show, save=save)\n to_be_returned_1=eddy_floyd(E1,side=\"down\",p_min=p_min,p_max=p_max,show=show,save=save,detailed=detailed)\n if (show or save) and len(to_be_returned_1)>0:\n scatter_plot(points, [[p_min]+to_be_returned_1+[p_max]], title=\"eddy-floyd search\", show=show, save=save)\n return [p_max]+to_be_returned_2+[p_min]+to_be_returned_1\n\n \"\"\"End algorithm ?\"\"\"\n #Find if points remain outside the line (either above if up or below if done)\n end=True\n i=0\n while end and i<len(points):\n p=points[i]\n if side==\"up\" and (p[1]-p_min[1])*(p_max[0]-p_min[0])-(p_max[1]-p_min[1])*(p[0]-p_min[0])>0: end=False \n if side==\"down\" and (p[1]-p_min[1])*(p_max[0]-p_min[0])-(p_max[1]-p_min[1])*(p[0]-p_min[0])<0: end=False \n i+=1\n\n \"\"\"Intermidiate case, look for the furthest point and divide the pb in 2 pbs\"\"\"\n if not end:\n p_extr,dist=p_min,0\n E1,E2=[],[]\n if side==\"up\":\n #Find the furthest point from the line (above)\n for p in points:\n if (p[1]-p_min[1])*(p_max[0]-p_min[0])-(p_max[1]-p_min[1])*(p[0]-p_min[0])>dist:\n p_extr,dist=p,(p[1]-p_min[1])*(p_max[0]-p_min[0])-(p_max[1]-p_min[1])*(p[0]-p_min[0])\n \n #Divide the points which are still outside of the 2 lines in 2 subproblems\n for p in points:\n if (p[1]-p_extr[1])*(p_max[0]-p_extr[0])-(p_max[1]-p_extr[1])*(p[0]-p_extr[0])>0: E2+=[p]\n if (p[1]-p_min[1])*(p_extr[0]-p_min[0])-(p_extr[1]-p_min[1])*(p[0]-p_min[0])>0: E1+=[p]\n\n #Go to next step and plot results, the element to return is first divided in 2 parts to plot them seperately\n to_be_returned_1=eddy_floyd(E1,side=side,p_min=p_min,p_max=p_extr,show=show,save=save,detailed=detailed)\n if (show or save) and len(to_be_returned_1)>0:\n scatter_plot(points, [[p_extr]+to_be_returned_1+[p_min]], title=\"eddy-floyd search\", show=show, save=save)\n to_be_returned_2=eddy_floyd(E2,side=side,p_min=p_extr,p_max=p_max,show=show,save=save,detailed=detailed)\n if (show or save) and len(to_be_returned_2)>0:\n scatter_plot(points, [[p_max]+to_be_returned_2+[p_extr]], title=\"eddy-floyd search\", show=show, save=save)\n to_be_returned=to_be_returned_2+[p_extr]+to_be_returned_1\n if (show or save) and len(to_be_returned)>2:\n scatter_plot(points, [[p_max]+to_be_returned+[p_min]], title=\"eddy-floyd search\", show=show, save=save)\n print\n return to_be_returned \n\n if side==\"down\":\n #Find the furthest point from the line (below) \n for p in points:\n if (p[1]-p_min[1])*(p_max[0]-p_min[0])-(p_max[1]-p_min[1])*(p[0]-p_min[0])<dist:\n p_extr,dist=p,(p[1]-p_min[1])*(p_max[0]-p_min[0])-(p_max[1]-p_min[1])*(p[0]-p_min[0])\n \n #Divide the points which are still outside of the 2 lines in 2 subproblems \n for p in points:\n if (p[1]-p_min[1])*(p_extr[0]-p_min[0])-(p_extr[1]-p_min[1])*(p[0]-p_min[0])<0: E2+=[p]\n if (p[1]-p_extr[1])*(p_max[0]-p_extr[0])-(p_max[1]-p_extr[1])*(p[0]-p_extr[0])<0: E1+=[p]\n\n #Go to next step and plot results, the element to return is first divided in 2 parts to plot them seperately\n to_be_returned_2=eddy_floyd(E2,side=side,p_min=p_min,p_max=p_extr,show=show,save=save,detailed=detailed)\n if (show or save) and len(to_be_returned_2)>0:\n scatter_plot(points, [[p_min]+to_be_returned_2+[p_extr]], title=\"eddy-floyd search\", show=show, save=save)\n print\n to_be_returned_1=eddy_floyd(E1,side=side,p_min=p_extr,p_max=p_max,show=show,save=save,detailed=detailed)\n if (show or save) and len(to_be_returned_1)>0:\n scatter_plot(points, [[p_extr]+to_be_returned_1+[p_max]], title=\"eddy-floyd search\", show=show, save=save)\n print\n to_be_returned=to_be_returned_2+[p_extr]+to_be_returned_1\n if (show or save) and len(to_be_returned)>2:\n scatter_plot(points, [[p_min]+to_be_returned+[p_max]], title=\"eddy-floyd search\", show=show, save=save)\n print\n return to_be_returned \n \n \"\"\"End case\"\"\"\n if end:\n return []\n\n \"\"\"None of these cases\"\"\"\n print(\"ERREUR\")\n return []",
"def convex_hull(points):\n\n # Sort the points lexicographically (tuples are compared lexicographically).\n # Remove duplicates to detect the case we have just one unique point.\n points = sorted(set(points))\n\n # Boring case: no points or a single point, possibly repeated multiple times.\n if len(points) <= 1:\n return points\n\n # 2D cross product of OA and OB vectors, i.e. z-component of their 3D cross product.\n # Returns a positive value, if OAB makes a counter-clockwise turn,\n # negative for clockwise turn, and zero if the points are collinear.\n def cross(o, a, b):\n return (a[0] - o[0]) * (b[1] - o[1]) - (a[1] - o[1]) * (b[0] - o[0])\n\n # Build lower hull \n lower = []\n for p in points:\n while len(lower) >= 2 and cross(lower[-2], lower[-1], p) <= 0:\n lower.pop()\n lower.append(p)\n\n # Build upper hull\n upper = []\n for p in reversed(points):\n while len(upper) >= 2 and cross(upper[-2], upper[-1], p) <= 0:\n upper.pop()\n upper.append(p)\n\n # Concatenation of the lower and upper hulls gives the convex hull.\n # Last point of each list is omitted because it is repeated at the beginning of the other list. \n return lower[:-1] + upper[:-1]",
"def convex_hull(points):\n pointList = ExtendedTupleList(points)\n complete_ranges = pointList.range_within(0, 1)\n # Filters for four quadrants\n filters = [\n ((0, complete_ranges[1][\"max\"][2], \">=\"), (1, complete_ranges[0][\"max\"][2], \">=\")), #Q1\n ((0, complete_ranges[1][\"max\"][1], \"<=\"), (1, complete_ranges[0][\"min\"][2], \">=\")), #Q2\n ((0, complete_ranges[1][\"min\"][1], \"<=\"), (1, complete_ranges[0][\"min\"][1], \"<=\")), #Q3\n ((0, complete_ranges[1][\"min\"][2], \">=\"), (1, complete_ranges[0][\"max\"][1], \"<=\")) #Q4\n ]\n # Sorting reversals (True means Desc sort, False means Asc sort. Y sort given first)\n sorts = [\n (True, True),\n (True, False),\n (False, False),\n (False, True),\n ]\n hull = ExtendedTupleList([])\n # In CW order of quadrants...\n for index in [0, 3, 2, 1]:\n # Find all the relevant points\n quad_points = ExtendedTupleList([point for point in pointList.filter(filters[index])])\n # Sort them properly\n quad_points.double_sort(1, 0, reverse_outside=sorts[index][0], reverse_inside=sorts[index][1])\n # Build a convex line segment\n line_segment = convex_line_segment(quad_points, sorts[index][0], sorts[index][1])\n # Reverse it, if we need to\n if index % 2 == 1:\n line_segment.reverse()\n # Add all the points in, avoiding repeated points.\n hull.extend(line_segment, avoid_repeats=True)\n return hull"
]
| [
"0.6672058",
"0.6176164",
"0.59618896",
"0.58829105",
"0.58734185",
"0.56309587",
"0.5607765",
"0.5572041",
"0.55508894",
"0.55086654",
"0.55001277",
"0.54714054",
"0.54605114",
"0.54518104",
"0.54211336",
"0.5401091",
"0.53931373",
"0.53588456",
"0.5355742",
"0.53469115",
"0.53310204",
"0.52889675",
"0.52738893",
"0.52222836",
"0.5207644",
"0.51994187",
"0.5177103",
"0.51736206",
"0.5142748",
"0.5135309"
]
| 0.7092801 | 0 |
Returns the agility of the ship. | def agility(self):
return self._getAttribute(Attribute.agility) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_agility(self):\n return self.__agility",
"def getAgility(self):\n return self.ag",
"def ship_status(self):\n try:\n if self.warp_drive.is_at_warp:\n return STATUS_AT_WARP\n except AttributeError:\n pass\n if self.hull < self.ship_class.max_hull * -0.5:\n return STATUS_OBLITERATED\n if self.hull <= 0:\n return STATUS_HULK\n try: \n if self.life_support.is_derlict:\n return STATUS_DERLICT\n except AttributeError:\n pass\n try:\n if self.cloak.cloak_is_turned_on:\n return STATUS_CLOAKED if self.cloak.cloak_status == CloakStatus.ACTIVE else STATUS_CLOAK_COMPRIMISED\n except AttributeError:\n pass\n return STATUS_ACTIVE",
"def __call__(self, state: Grid2D.State):\n if self.problem.goals:\n pos = state.agent_position\n return max(\n min([abs(pos[0] - g[0]) for g in self.problem.goals]),\n min([abs(pos[1] - g[1]) for g in self.problem.goals]),\n )\n return INFINITY",
"def ship_collecting_halite_coefficient(ship, gmap):\n ship_cargo_free = constants.MAX_HALITE - ship.halite_amount\n cell = gmap[ship.position].halite_amount or 10 ** -10\n return max((cell - ship_cargo_free) / cell, .1)",
"def get_strength(self):\n return 10 - self.get_agility()",
"def is_ship_alive(ship):\n\n # If and when flag systems become advanced enough **FUN** things can\n # be applied to make this check more hilarious.\n return ship.attributes.hull > 0 # though it can't be < 0",
"def getShip(self):\r\n return self._ship",
"def getShip(self):\n \"return self._ship\"\n if self._ship == None:\n return True\n return False",
"def query(self):\n aggro = -1\n # i hear accessing is thread safe.. but im not sure\n self._lock.acquire()\n try:\n aggro = self._aggro\n finally:\n self._lock.release()\n if aggroMgr.AGGRO_MIN_VALUE <= aggro and aggro < aggroMgr.NEUTRAL:\n return 0\n elif aggroMgr.NEUTRAL <= aggro and aggro < aggroMgr.UPSET:\n return 1\n elif aggroMgr.UPSET <= aggro and aggro < aggroMgr.IRRITATED:\n return 2\n elif aggroMgr.IRRITATED <= aggro and aggro < aggroMgr.FURIOUS:\n return 3\n elif aggroMgr.FURIOUS <= aggro and aggro < aggroMgr.ENRAGED:\n return 4\n elif aggroMgr.ENRAGED <= aggro:\n return 5",
"def get_objective(self):\n self.objective = 0\n for r in self.routes:\n r.update_route(self.vrpdata)\n self.objective += r.distance\n # all() returns True if all elements of the iterable are true\n self.solutionValid = (all([r.tourValid for r in self.routes]) and len(self.routes) <= self.vrpdata.MaxNumVeh)\n if self.solutionValid:\n return self.objective\n return -1",
"def check_fleet(self):\n if len(self.ships) > 0:\n response = False\n for ship in self.ships:\n if ship.afloat == True:\n response = True\n return response",
"def __call__(self, state: Grid2D.State):\n if state.agent_position in self.problem.goals:\n return 0\n return 1",
"def check_enemy_fleet(self):\n if len(self.enemyShips) > 0:\n response = False\n for ship in self.enemyShips:\n if ship.afloat == True:\n response = True\n return response",
"def heuristic(self, state: ODState) -> int:\n h = 0\n if self.assigned_goals is None:\n for agent in state.new_agents:\n h += self.grid.get_heuristic(agent.coords, agent.color)\n for j in range(len(state.new_agents), len(state.agents)):\n h += self.grid.get_heuristic(state.agents[j].coords, state.agents[j].color)\n else:\n for agent in state.new_agents:\n h += self.grid.get_heuristic(agent.coords, self.assigned_goals[agent.id])\n for j in range(len(state.new_agents), len(state.agents)):\n h += self.grid.get_heuristic(state.agents[j].coords, self.assigned_goals[state.agents[j].id])\n return h",
"def score(self):\n xg, yg = self.goal\n xe, ye = self.empty_node()\n score = len(self.history) + 4*(xg + yg)\n if xg == 1:\n score -= 3\n if ye > 1:\n score += ye - 1\n dx = abs(xe - xg + 1)\n if xg and dx:\n score += dx\n return score",
"def ship_rate(self):\n\t\treturn self.industry * (self.manufacturing.level + 5) / 24.0",
"def canItakeEnemyShip(self, enemyShip):\n if self.assaultStrength/enemyShip.getPersonStrength() > 1.5:\n return 1\n return 0",
"def h_score(self):\n if self.estimated_moves_to_goal is None:\n self.estimated_moves_to_goal = \\\n max(nx.single_source_shortest_path_length(self.graph, self.head_node).items(), key=lambda x: x[1])[1]\n return self.estimated_moves_to_goal",
"def bridge_score(bridge):\n return (bridge_strength(bridge), len(bridge))",
"def move_ok(game, ship):\n cell_halite = game.game_map[ship.position].halite_amount\n\n if ship.is_full:\n return True\n\n # generally ignore low value cells. Note Mining_threshold may be dynamic\n if cell_halite < Mining_threshold:\n return True\n\n dropoffs = get_dropoff_positions(game)\n fuel_status = ship.halite_amount / SHIP_MAX_HALITE\n\n # the amount of halite we'll get if we refuel/mine\n # if ship in a dropoff/shipyard, set fuel to max to the ship departs\n refuel_amount = constants.MAX_HALITE if ship.position in dropoffs else cell_halite * SHIP_MINING_EFFICIENCY\n\n net_mine = (cell_halite * SHIP_MINING_EFFICIENCY) + (cell_halite * SHIP_MINING_EFFICIENCY) * -SHIP_FUEL_COST\n net_move = cell_halite * -SHIP_FUEL_COST + game.get_mining_rate(MINING_RATE_LOOKBACK) * SHIP_MINING_EFFICIENCY\n\n #logging.debug(\"fuel_status: {}\".format(fuel_status))\n #logging.debug(\"refuel_amount: {}\".format(refuel_amount))\n #logging.debug(\"net_mine: {}, net_move: {}\".format(net_mine, net_move))\n\n if ship.status == \"transiting\":\n #if refuel_amount > net_mining_yield and fuel_status < SHIP_REFUEL_THRESHOLD:\n # return True\n pass\n elif ship.status == \"exploring\":\n #if cell_halite < Mining_threshold:\n # return True\n pass\n elif ship.status == \"returning\":\n if net_move > net_mine or fuel_status > SHIP_REFUEL_THRESHOLD:\n return True\n else:\n raise RuntimeError(\"Unknown ship status: {}\".format(ship.status))\n\n return False",
"def get_active_ships_count(self):\n active_ship_count = 0\n for row_index in range(self.rows):\n for column_index in range(self.columns):\n cell = self.grid[row_index][column_index]\n if cell.has_active_ship():\n active_ship_count += 1\n\n return active_ship_count",
"def subAgility(self):\n\t\tself.agility -= 1\n\t\tif self.agility < -10:\n\t\t\tself.agility = -10",
"def heuristic(self):\n game_score = (self.get_game_score(), 0.85)\n road_score = (self.get_longest_road_score(), 0.05)\n steps_score = (self.get_steps_available_score(), 0.05)\n reachable_nodes_score = (self.get_reachable_nodes_score(), 0.05)\n heuristics = [game_score, road_score, steps_score, reachable_nodes_score]\n result = 0\n for score, weight in heuristics:\n result += score * weight\n if DEBUG_PRINT:\n print(f\"Heuristic value for location {self.loc} is {result}\")\n print(f\"\\treachable score: {reachable_nodes_score[0] * reachable_nodes_score[1]}\")\n print(f\"\\tsteps score: {steps_score[0] * steps_score[1]}\")\n print(f\"\\tlongest road score: {road_score[0] * road_score[1]}\")\n print(f\"\\tgame score: {game_score[0] * game_score[1]}\")\n return result",
"def __call__(self, state: Grid2D.State):\n if self.problem.goals:\n pos = state.agent_position\n return min([manhattan_distance_2d(pos, g) for g in self.problem.goals])\n return INFINITY",
"def isGoal( self ):\n if self.numPlayer1 == 0:\n return 1\n if self.numPlayer2 == 0:\n return -1\n return 0\n # d_n1 = 0 # count number(-1)\n # d1 = 0 # count number(1))\n # for row in range( self.n ):\n # for col in range( self.n ):\n # if (self.board[row][col] == -1):\n # d_n1 += 1\n # if (self.board[row][col] == 1):\n # d1 += 1\n # if d_n1 > 0 and d1 > 0:\n # return 0 # //not goal state\n # if d_n1:\n # return -1\n # return 1",
"def __ge__(G, H):\n if isinstance(H, Surreal):\n return G._n >= H.n\n if isinstance(H, numbers.Number):\n return G._n >= H\n if isinstance(H, Game):\n return super().__ge__(H)\n\n return NotImplemented",
"def ility(self) -> str:\n return self.system_quality_attribute()",
"def betterEvaluationFunction(currentGameState):\n \"*** YOUR CODE HERE ***\"\n # Useful information you can extract from a GameState (pacman.py)\n \"\"\n foodPos = currentGameState.getFood().asList() \n foodDist = [] \n ghostStates = currentGameState.getGhostStates() \n capPos = currentGameState.getCapsules() \n currentPos = list(currentGameState.getPacmanPosition()) \n \n for food in foodPos:\n food2pacmanDist = manhattanDistance(food, currentPos)\n foodDist.append(-1*food2pacmanDist)\n \n if not foodDist:\n foodDist.append(0)\n\n return max(foodDist) + currentGameState.getScore()",
"def get_ship_x(self):\n return self.x"
]
| [
"0.71741396",
"0.6533135",
"0.62021345",
"0.58686894",
"0.5845468",
"0.5738765",
"0.57350934",
"0.57290393",
"0.5692616",
"0.56269544",
"0.5558341",
"0.5537012",
"0.5521611",
"0.5497823",
"0.5494352",
"0.5493872",
"0.5486315",
"0.5460528",
"0.5449845",
"0.5426114",
"0.541868",
"0.539396",
"0.5392203",
"0.53881824",
"0.5338038",
"0.53274137",
"0.5326822",
"0.532619",
"0.53157216",
"0.5303565"
]
| 0.72832453 | 0 |
Returns the signature radius of the ship. | def signatureRadius(self):
return self._getAttribute(Attribute.signatureRadius) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_radius(self):\n return self.radius",
"def get_radius(self):\n return self.radius",
"def get_radius(self):\n return self.__radius",
"def get_radius(self):\r\n return self._handler.get_radius()",
"def get_radius(self):\n return self.r",
"def get_radius(self):\n return self.R",
"def getRadius(self):\n return self.radius",
"def getRadius(self):\n return self.__radius",
"def radius(self):\n return self._radius",
"def radius(self):\n return self._radius",
"def radius(self):\n return self._radius",
"def radius(self):\n return self._radius",
"def radius(self):\n return self._radius",
"def radius(self) -> Union[int, float]:\n return self.proto.radius",
"def radius(self) -> float:\n return math.hypot(self.x, self.y)",
"def radius(self) -> float:\n return self._radius",
"def get_radius(self):\r\n return 1",
"def radius(self) -> int:\n pass",
"def get_radius(self):",
"def radius(self):\n return sqrt(self.radius_square())",
"def get_radius(self):\n if self.no_dist is False:\n dist = self.distance\n radius = (dist * self.ang_size / 60. *\n np.pi/180. * ct._kpc_over_pc_)/2.\n self.radius = radius\n else:\n self.radius = -1 # use -1 to indicate unknown diameter\n\n return self.radius",
"def radius(self) -> float:\n return get_radius_from_element(self.element)",
"def radius(self):\n if self._radius is None:\n self._radius = self.stem / 2\n if self._radius * 2 > self.stem:\n raise Exception('Invalid radius. Maximum radius = 2 * stem.')\n return self._radius",
"def radius(self) -> npt.NDArray[np.float_]:\n return dist(self.center, self.vertices[0])",
"def diameter(self):\n return self.radius * 2",
"def diameter(self):\n return self.radius * 2",
"def getS(self):\n\t\tsValue = math.sqrt((math.pow(self.x,2)) + (math.pow(self.y,2)))/self.radius\n\t\treturn sValue",
"def diameter(self):\n return 2 * self.radius",
"def get_receptive_field_radius(self):\n raise NotImplementedError()",
"def get_radius(size):\n return (size * 10) - 5"
]
| [
"0.6727663",
"0.6727663",
"0.6722686",
"0.6706938",
"0.6699158",
"0.6697993",
"0.65364933",
"0.65341675",
"0.6533542",
"0.6533542",
"0.6533542",
"0.6533542",
"0.6533542",
"0.6469756",
"0.6409528",
"0.64033717",
"0.63899565",
"0.63726634",
"0.627117",
"0.6267666",
"0.62168795",
"0.60957515",
"0.6064251",
"0.6014325",
"0.6011059",
"0.6011059",
"0.5985965",
"0.59594786",
"0.59558827",
"0.5876103"
]
| 0.8012885 | 0 |
Returns the warp speed of the ship in AU/s. | def warpSpeed(self):
multiplier = self._getAttribute(Attribute.warpSpeedMultiplier)
return multiplier * self.baseWarpSpeed | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def wind_speed(self):\n return self.flow_field.wind_speed",
"def speed(self):\n return sqrt(self.velocity_x ** 2 + self.velocity_y ** 2)",
"def get_speed(self):\n return self._speed",
"def speed(self) -> float:\n return self._speed",
"def speed(self) -> float:\n return self._speed",
"def speed(self) -> float:\n return linalg.norm(self.velocity)",
"def get_speed(self):\n return self.get_par(\"slew_speed\")",
"def __get_speed(self):\n if self.speed_method == 'average_gap':\n total_gap = 0\n for i in range(1, len(self.__spike_buffer)):\n total_gap += self.__spike_buffer[i] - self.__spike_buffer[i-1]\n\n average_gap = total_gap / len(self.__spike_buffer)\n\n\n if self.__spike_buffer[-1] > timeit.default_timer() - self.cooldown:\n speed = self.tick_length/average_gap\n else:\n speed = 0.00\n\n return speed",
"def speed(self) -> int:\n return self._speed",
"def speed(self) -> int:\n return self._speed",
"def get_speed(self) -> float: \r\n if self.distance < self.distance_stop:\r\n print(\"STOP: Obstacle detected ({} cm)\".format(self.distance))\r\n return 0\r\n elif self.distance < self.distance_slow: \r\n return self.speed * 0.8\r\n else:\r\n return self.speed",
"def speed(self):\n return self._speed.value",
"def native_wind_gust_speed(self) -> float | None:\n return self._wind_gust_speed",
"def wind_speed(self):\r\n return self._yesterdays_weather.get_average_wind_speed()",
"def get_windtspeed(self):\n return self.read_register(4111, 0, 3)",
"def native_wind_speed(self) -> float:\r\n return self._first_timeserie[\"data\"][\"instant\"][\"details\"][\"wind_speed\"]",
"def speed(self) -> str:\n return self._current_speed",
"def speed(self):\n return self._turtle.speed()",
"def speed(self):\n return self._turtle.speed()",
"def native_wind_speed(self) -> float | None:\n return self._wind_speed",
"def movement_speed(self) -> Union[int, float]:\n return self.type_data.proto.movement_speed",
"def movement_speed(self) -> Union[int, float]:\n return self.type_data.proto.movement_speed",
"def speed(self):\n return self._getAttribute(Attribute.maxVelocity)",
"def speed(self):\n return 1 # speed system not implemented yet",
"def speed(self) -> int:",
"def speed(self) -> int:",
"def GetSpeed(self):\n pass",
"def speed(self) -> str:\n return self._attributes.get(\"current_speed\")",
"def get_speed(self):\n return self.send(self.cmd.GET_ROTATION_ACT)",
"def wind_speed(self):\n names = ['anc_mean_wind_speed']\n return self.sensor.get_with_fallback('wind_speed', names)"
]
| [
"0.6979192",
"0.6968374",
"0.69073606",
"0.68811303",
"0.68811303",
"0.684289",
"0.6759257",
"0.66982025",
"0.6667661",
"0.6667661",
"0.66550624",
"0.66299623",
"0.66035396",
"0.65631795",
"0.6509234",
"0.64987284",
"0.64891005",
"0.64751405",
"0.64751405",
"0.64723843",
"0.64122784",
"0.64122784",
"0.63910484",
"0.6338115",
"0.63325834",
"0.63325834",
"0.6307804",
"0.62866884",
"0.62822616",
"0.62631315"
]
| 0.82695305 | 0 |
Here we drop the first column, which is useless and rename the columns with uppercase. | def cleaning_data():
data.drop(["Unnamed: 0"], axis = 1, inplace = True)
data.columns = map(str.upper, data.columns)
return data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def uppercase_all_column_names(df:DataFrame)->DataFrame:\n for col in df.columns:\n df = df.withColumnRenamed(col, col.upper())\n return df",
"def _clean_up_table_column_names(loop_dict):\n \n # Make the column names all lowercase\n # and remove any underscores from the beginning\n for key in loop_dict.keys():\n rename_dict = { x:re.sub(r\"\"\"^_\"\"\", '', x.lower()) for x in loop_dict[key].columns }\n loop_dict[key].rename(columns=rename_dict, inplace=True)\n \n return loop_dict",
"def _remove_redundant_columns(self):\n self.dataframe.drop(['letter', 'sentiment'], axis=1, inplace=True)",
"def cleanup_column_names(df, rename_dict={}, do_inplace=True):\n if not rename_dict:\n return df.rename(columns={col: col.lower().replace(' ', '_')\n for col in df.columns.values.tolist()},\n inplace=do_inplace)\n else:\n return df.rename(columns=rename_dict, inplace=do_inplace)",
"def drop_columns(self, col):\n try:\n self.cleaned_data.drop(col, axis=1, inplace=True)\n except Exception as e:\n raise e",
"def clear_columns(prefixlist,datas,style=0, inplace=False):\n func = {0: str.lower,\n 1: str.upper,\n 2: str.capitalize}\n\n ori_columns=datas.columns.tolist()\n ccc=rem_str(prefixlist,ori_columns)\n ccc=rem_str('_',ccc)\n# ccc=[c.lower() for c in ccc]\n ccc=[func[style](c) for c in ccc]\n\n d = {key: value for (key, value) in zip(ori_columns,ccc)}\n datas_renamed=datas.rename(columns=d,inplace=inplace)\n new_datas=datas if inplace else datas_renamed\n\n u, i = np.unique(new_datas.columns, return_index=True)\n y=u[np.argsort(i)]\n\n r=[new_datas.columns.tolist().index(rr)for rr in y]\n\n return new_datas.iloc[:, r]",
"def __clean_repeated_columns(self, df, column_type):\n for column in df.columns:\n if column_type in column.lower():\n # Fill main column with data from \"prefix + _\" type column names.\n df[column_type[:-1]].fillna(df[column], inplace=True)\n # Drop the \"prefix + _\" type column names.\n df.drop(column, axis=1, inplace=True)",
"def trim_long_colnames(cat):\n import re\n long_short_pairs = [\n ('GeneralShapeletPsf', 'GSPsf'),\n ('DoubleShapelet', 'DS'),\n ('noSecondDerivative', 'NoSecDer')]\n for long, short in long_short_pairs:\n long_re = re.compile(long)\n for col_name in cat.colnames:\n if long_re.search(col_name):\n new_col_name = long_re.sub(short, col_name)\n cat.rename_column(col_name, new_col_name)",
"def rename_cyano_columns(df): \n cols = list(df.columns)\n for i, col in enumerate(df.columns):\n if col.lower().find(\"pro\") != -1 and col.lower().find(\"abun\") != -1: # prochlorococcus abundance\n cols[i] = PROC\n elif col.lower().find(\"syn\") != -1 and col.lower().find(\"abun\") != -1: # synechococcus abundance\n cols[i] = SYNC\n elif col.lower().find(\"pico\") != -1 and col.lower().find(\"abun\") != -1: # picoeukaryote abundance\n cols[i] = PICO\n df.columns = cols \n return df.columns",
"def _remap_column_names(self, frame):\n\n frame[TransactionColumns.BANK.name] = self.INSTITUTION\n frame[TransactionColumns.ACCOUNT.name] = self.account\n frame.rename(columns=self._FIELD_2_TRANSACTION, inplace=True)\n frame[TransactionColumns.CHECK_NO.name] = None\n return frame",
"def trim_column_names(self, table: Table):\n self._requires_table(table)\n table.columns = [\n column.strip() if isinstance(column, str) else column\n for column in table.columns\n ]",
"def tidy_cols(my_csv):\n return [re.sub(\" \", \"_\", col.lower()) for col in my_csv.columns]",
"def _drop_cols(self, duplicate_cols):\n self._hybrid_meta.drop(\n duplicate_cols + DROPPED_COLUMNS,\n axis=1, inplace=True, errors='ignore'\n )",
"def reorder_columns(df,first_cols=['']):\n\n last_cols = [col for col in df.columns if col not in first_cols]\n df = df[first_cols+last_cols]\n return(df)",
"def standardize_columns(df):\n rename_pairs = [(from_col, to_col) for (from_col, to_col) in RENAME\n if from_col in df.columns]\n return df.rename(columns=dict(rename_pairs))",
"def remove_spaces_from_columns_names(file_path):\n cprint(f\"### Function Name:-> {inspect.stack()[0][3]} ###\", 'yellow', 'on_grey', attrs=['bold'])\n try:\n path_obj = Path(file_path)\n df = get_df_from_data_file(file_path)\n df.columns = df.columns.str.strip()\n delete_data_file(file_path)\n if path_obj.suffix == \".xlsx\":\n df.to_excel(path_obj.as_posix(), index=False)\n elif path_obj.suffix == \".csv\":\n df.to_csv(path_obj.as_posix(), index=False, sep=',')\n except Exception as ex:\n cprint(traceback.format_exc(), 'red')\n log_exception(traceback.format_exc())",
"def clear_columns(prefixlist,datas):\n\n ori_columns=datas.columns.tolist()\n ccc=rem_str(prefixlist,ori_columns)\n ccc=rem_str('_',ccc)\n ccc=[c.lower() for c in ccc]\n \n d = {key: value for (key, value) in zip(ori_columns,ccc)}\n datas.rename(columns=d,inplace=True)\n\n u, i = np.unique(datas.columns, return_index=True)\n y=u[np.argsort(i)] \n \n r=[datas.columns.tolist().index(rr)for rr in y]\n\n return datas.iloc[:, r]",
"def strip_static_cols(df):\n for col in df.columns:\n if len((df[col]).unique()) == 1:\n df.drop(columns=[col], inplace=True)\n return df",
"def lowercase_columns(df):\n cols = list(df.columns)\n lower_cols = [col.lower() for col in cols]\n df.columns = lower_cols\n return df",
"def lowercase_all_column_names(df:DataFrame)->DataFrame:\n for col in df.columns:\n df = df.withColumnRenamed(col, col.lower())\n return df",
"def rename_columns(self, col):\n try:\n self.cleaned_data.columns = col\n except Exception as e:\n raise e",
"def _column_original_name(name):\n if ':' in name:\n return name.split(':')[-1]\n else:\n return name",
"def test_remove_columns(self):\n table = Table('table1', key=['col1', 'col2'])[\n Column('col1'),\n Column('col2'),\n Column('col3'),\n Column('col4'),\n ]\n\n table.remove_columns(('col2', 'col3'))\n\n self.assertEqual(2, len(table.columns))\n self.assertEqual('col1', table.columns[0].name)\n self.assertEqual('col4', table.columns[1].name)\n self.assertEqual([], table.key)",
"def to_upper(df: DataFrame) -> DataFrame:\r\n return df.apply(lambda x: x.str.upper() if x.dtype == 'object' else x)",
"def smooth_columns(input_frame):\n column_labels = list(input_frame.columns)\n input_frame.columns = [c.lower().replace('_','') for c in column_labels]\n return input_frame",
"def _postprocess_name_columns(\n table: pyarrow.Table, has_header: bool, settings: Settings\n) -> Tuple[pyarrow.Table, List[I18nMessage]]:\n if has_header and table.num_rows > 0:\n names, warnings = gen_unique_clean_colnames_and_warn(\n list((c[0].as_py() if c[0].is_valid else \"\") for c in table.columns),\n settings=settings,\n )\n\n # Remove header (zero-copy: builds new pa.Table with same backing data)\n table = table.slice(1)\n else:\n names = [f\"Column {i + 1}\" for i in range(len(table.columns))]\n warnings = []\n\n return (\n pyarrow.table(dict(zip(names, table.columns))),\n warnings,\n )",
"def test_multicolumn_factorize_columns_empty_suffix():\n df = pd.DataFrame(\n {\n \"a\": [\"hello\", \"hello\", \"sup\"],\n \"b\": [1, 2, 3],\n \"c\": [\"aloha\", \"nihao\", \"nihao\"],\n }\n ).factorize_columns(column_names=[\"a\", \"c\"], suffix=\"\")\n assert \"a_enc\" not in df.columns\n assert \"c_enc\" not in df.columns\n assert 3 == len(df.columns)",
"def parse_column_names(df):\n cols = set(df.columns.tolist())\n if \"StreamID\" in cols:\n df.rename(columns={\"StreamID\": \"stream_id\"}, inplace=True)\n if \"TimesViewed\" in cols:\n df.rename(columns={\"TimesViewed\": \"times_viewed\"}, inplace=True)\n if \"total_price\" in cols:\n df.rename(columns={\"total_price\": \"price\"}, inplace=True)\n\n return df",
"def _manage_cols(df, drop_list=[], name_dict={}):\n\n for colname in drop_list:\n if colname not in df:\n raise ValueError(f\"Can't drop column '{colname}' - '{colname}' does not exist in dataframe\")\n for colname in list(name_dict.keys()):\n if colname not in df:\n raise ValueError(f\"Can't rename '{colname}' to '{name_dict[colname]}' - '{colname}' does not exist in dataframe\")\n if colname in drop_list:\n raise ValueError(f\"Can't rename '{colname}' to '{name_dict[colname]}' - '{colname}' in drop_list\")\n\n column_names = np.setdiff1d(list(df.columns), list(name_dict.keys()))\n lower_columns = [name.lower().replace(' ','').replace('_','') for name in column_names]\n for i in range(len(column_names)):\n name_dict[column_names[i]] = lower_columns[i]\n \n df = df.drop(drop_list, axis=1)\n df = df.rename(columns=name_dict)\n \n return df",
"def __clean_column_names(self, columns):\r\n cols = []\r\n for column in columns:\r\n cols.append(column.replace('\"', ''))\r\n return cols"
]
| [
"0.670421",
"0.6379539",
"0.6352936",
"0.6272925",
"0.62530506",
"0.62204903",
"0.6190862",
"0.6184449",
"0.61701965",
"0.6132059",
"0.6099708",
"0.60867906",
"0.6080659",
"0.60554224",
"0.6015632",
"0.599467",
"0.59028536",
"0.58984935",
"0.5895571",
"0.5886753",
"0.5850004",
"0.58481586",
"0.58473885",
"0.5818931",
"0.579977",
"0.5791403",
"0.5789574",
"0.5765661",
"0.57439595",
"0.5737166"
]
| 0.75469196 | 0 |
In this function, we call the API of NBA Stats thanks to the inspecting console to update our original dataset from Kaggle. The cool point here is that we are gonna update this dataset with info from the current season. Although is not ended yet, it's fine in order to enrich our data. | def calling_api():
url_bio = "https://stats.nba.com/stats/leaguedashplayerbiostats?College=&Conference=&Country=&DateFrom=&DateTo=&Division=&DraftPick=&DraftYear=&GameScope=&GameSegment=&Height=&LastNGames=0&LeagueID=00&Location=&Month=0&OpponentTeamID=0&Outcome=&PORound=0&PerMode=PerGame&Period=0&PlayerExperience=&PlayerPosition=&Season=2020-21&SeasonSegment=&SeasonType=Regular+Season&ShotClockRange=&StarterBench=&TeamID=0&VsConference=&VsDivision=&Weight="
headers = {
"Accept": "application/json, text/plain, */*",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "es-ES,es;q=0.9",
"Origin": "https://www.nba.com",
"Referer": "https://www.nba.com/",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-site",
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36",
"x-nba-stats-origin": "stats",
"x-nba-stats-token": "true"
}
response_bio = requests.get(url_bio, headers=headers).json()
return response_bio
def updating_api(response_bio):
"""
We just set the columns and the rows from our call and do some modifications in the resulting columns of our DataFrame.
"""
frame_bio = pd.DataFrame(response_bio['resultSets'][0]['rowSet'])
frame_bio.columns = response_bio['resultSets'][0]['headers']
frame_bio.drop(["PLAYER_ID", "TEAM_ID", "PLAYER_HEIGHT_INCHES"], axis=1, inplace=True)
frame_bio["SEASON"] = "2020-21"
return frame_bio | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_player_team_data(self, start_date, end_date = None, \n get_player_data_ind = True, get_team_data_ind = True, \n pre_player_data_dir = None, pre_team_data_dir = None):\n #Converts start and end date from string to datetime\n start_date = datetime.strptime(start_date, '%Y-%m-%d').date()\n \n if end_date:\n end_date = datetime.strptime(end_date, '%Y-%m-%d').date()\n else: \n end_date = start_date\n \n if pre_player_data_dir:\n try: \n #Reads in the existing player dataset to append the scraped data to \n exist_player_data = pd.read_csv(pre_player_data_dir)\n except:\n raise Exception('Cannot read in existing player dataset please ensure the directory is correct')\n \n if pre_team_data_dir:\n try: \n #Reads in the existing player dataset to append the scraped data to \n exist_team_data = pd.read_csv(pre_team_data_dir)\n except:\n raise Exception('Cannot read in existing team dataset please ensure the directory is correct')\n \n delta = end_date - start_date \n #Appends list of date between start and end date to strings\n date_list = []\n for i in range(delta.days + 1):\n day = start_date + timedelta(days=i)\n date_list.append(str(day))\n \n for date in date_list:\n \n print(f'Now scraping data from NBA games on {date}')\n home_team_list = get_list_of_hometeams(self.driver, date)\n\n if len(home_team_list) > 0:\n\n counter = 1 \n\n for home_team in home_team_list:\n \n if counter == 1: \n if get_player_data_ind: \n player_df_full = get_player_data(home_team = team_full_abrv_config[home_team]['Full Name'], \n date_played = date, \n driver = self.driver)\n if get_team_data_ind:\n team_df_full = get_team_data(home_team = team_full_abrv_config[home_team]['Full Name'], \n date_played = date, \n driver = self.driver)\n else:\n if get_player_data_ind: \n player_df_full = player_df_full.append(get_player_data(home_team = team_full_abrv_config[home_team]['Full Name'], \n date_played = date, \n driver = self.driver), ignore_index=True)\n if get_team_data_ind:\n team_df_full = team_df_full.append(get_team_data(home_team = team_full_abrv_config[home_team]['Full Name'], \n date_played = date, \n driver = self.driver), ignore_index=True)\n counter+=1\n \n if pre_player_data_dir:\n exist_player_data = exist_player_data.append(player_df_full)\n exist_player_data.to_csv(pre_player_data_dir, index = False)\n print(f'Updated player dataset will be overwritten in {pre_player_data_dir}')\n \n if pre_team_data_dir:\n exist_team_data = exist_team_data.append(team_df_full)\n exist_team_data.to_csv(pre_team_data_dir, index = False)\n print(f'Updated team dataset will be overwritten in {pre_team_data_dir}')\n \n if pre_player_data_dir and pre_team_data_dir:\n return exist_player_data, exist_team_data\n elif pre_player_data_dir:\n return exist_player_data\n elif pre_team_data_dir:\n return exist_team_data\n elif get_player_data_ind and get_team_data_ind:\n return player_df_full, team_df_full \n elif get_player_data_ind:\n return player_df_full\n elif get_team_data_ind:\n return team_df_full",
"def getTeamStat(self, year = 2014):\r\n \r\n year_next = (year % 100) + 1\r\n season = str(year) + '-' + str(year_next)\r\n \r\n stat_url = 'http://stats.nba.com/stats/leaguedashteamstats?Conference=&'\\\r\n 'DateFrom=&DateTo=&Division=&GameScope=&GameSegment=&'\\\r\n 'LastNGames=0&LeagueID=00&Location=&MeasureType=Base&'\\\r\n 'Month=0&OpponentTeamID=0&Outcome=&PORound=0&PaceAdjust=N&'\\\r\n 'PerMode=PerGame&Period=0&PlayerExperience=&PlayerPosition=&'\\\r\n 'PlusMinus=N&Rank=N&Season=' + season + '&SeasonSegment=&'\\\r\n 'SeasonType=Regular+Season&ShotClockRange=&StarterBench=&'\\\r\n 'TeamID=0&VsConference=&VsDivision='\r\n \r\n response = requests.get(stat_url)\r\n data = json.loads(response.text)\r\n \r\n headers = data['resultSets'][0]['headers']\r\n stat_data = data['resultSets'][0]['rowSet']\r\n df = pd.DataFrame(stat_data,columns=headers) \r\n \r\n team_df = df[[\"TEAM_ID\",\"TEAM_NAME\",\"GP\",\"W\",\"L\",\"W_PCT\",\"MIN\",\"FGM\",\r\n \"FGA\",\"FG_PCT\",\"FG3M\",\"FG3A\",\"FG3_PCT\",\"FTM\",\"FTA\",\"FT_PCT\",\r\n \"OREB\",\"DREB\",\"REB\",\"AST\",\"TOV\",\"STL\",\"BLK\",\"BLKA\",\"PF\",\r\n \"PFD\",\"PTS\",\"PLUS_MINUS\"]]\r\n \r\n return team_df",
"def getSeasonStats(self):\n df_season_agg = self.toSeasonAggFormat()\n\n # Calculate Possessions for each game\n df_season_agg['possessions'] = 0.5 * (df_season_agg['FGA'] + 0.475 * df_season_agg['FTA'] - df_season_agg['OR'] + df_season_agg['TO']) \\\n + 0.5 * (df_season_agg['OppFGA'] + 0.475 * df_season_agg['OppFTA'] - df_season_agg['OppOR'] + df_season_agg['OppTO'])\n\n # Aggregate to Season Summary Level\n season_stats = df_season_agg.groupby(['TeamID', 'Season']).sum()\n\n season_stats = season_stats.rename(columns={'Win':'wins'})\n\n # Season Advanced Stats\n season_stats['o_eff'] = season_stats['Score'] / season_stats['possessions'] * 100\n season_stats['d_eff'] = season_stats['OppScore'] / season_stats['possessions'] * 100\n season_stats['net_eff'] = season_stats['o_eff'] - season_stats['d_eff']\n\n season_stats.drop('DayNum', axis=1, inplace=True)\n season_stats.drop('OppTeamID', axis=1, inplace=True)\n season_stats.drop('rand', axis=1, inplace=True)\n\n return season_stats",
"def getPlayerBaseStat(self, year = 2014):\r\n \r\n year_next = (year % 100) + 1\r\n season = str(year) + '-' + str(year_next)\r\n \r\n stat_url = 'http://stats.nba.com/stats/leaguedashplayerstats?College=&'\\\r\n 'Conference=&Country=&DateFrom=&DateTo=&Division=&'\\\r\n 'DraftPick=&DraftYear=&GameScope=&GameSegment=&Height=&'\\\r\n 'LastNGames=0&LeagueID=00&Location=&MeasureType=Base&Month=0&'\\\r\n 'OpponentTeamID=0&Outcome=&PORound=0&PaceAdjust=N&'\\\r\n 'PerMode=PerGame&Period=0&PlayerExperience=&PlayerPosition=&'\\\r\n 'PlusMinus=N&Rank=N&Season='+ season + '&SeasonSegment=&'\\\r\n 'SeasonType=Regular+Season&ShotClockRange=&StarterBench=&'\\\r\n 'TeamID=0&VsConference=&VsDivision=&Weight='\r\n \r\n response = requests.get(stat_url)\r\n data = json.loads(response.text)\r\n \r\n headers = data['resultSets'][0]['headers']\r\n stat_data = data['resultSets'][0]['rowSet']\r\n baseStat_df = pd.DataFrame(stat_data,columns=headers) \r\n \r\n return baseStat_df",
"def replace_season(data,season):\n if not SeasonCharts.matchseason(season):\n raise SeasonCharts.SeasonError\n ## Check data format\n if test_rawdata(data):\n for cat,shows in data.items():\n for show in shows: show['season'] = season\n else:\n for show in data: show['season'] = season",
"def test_get_seasonal_statistics___season_to_date(self):\n msg = \"Response status is not 200\"\n response = self.api.get_seasonal_statistics___season_to_date(self.season, self.nhl_season, self.team_id)\n self.assertEqual(response.status_code, 200, msg)",
"def get_seasons_information():\n\n #getting the guidebox_id variable from show_page.html\n guidebox_id = request.args.get(\"guidebox_id\")\n\n #make API to get season information, gets back list of season information\n seasons_results = guidebox_season_info(guidebox_id)\n\n for season in seasons_results:\n date = season[\"first_airdate\"]\n year = str(date)[0:4]\n season[\"first_airdate\"] = year\n\n return jsonify(seasons_results)",
"def make_stats_df(self):\n columns = ['DATE', 'TEAM', 'teamId', 'R', 'HR', 'RBI', 'SBN', 'OBP', \n 'K', 'QS', 'SV', 'ERA', 'WHIP', 'MOVES', 'CHANGE']\n trimmed_table = self.parse_soup(self.stats)\n self.df_stats = pd.DataFrame(trimmed_table, columns=columns) \n # load season standings csv from file\n try: # if it already exists\n df = pd.read_csv('2016_stats.csv', index_col=0)\n except OSError:\n df = pd.DataFrame(columns=columns) # if it doesn't already exist\n df = df.append(self.df_stats)\n df.to_csv('2016_stats.csv')",
"def dataLoader(stationDict, startDate, endDate):\n\n # Generate a URL\n url = ('https://waterservices.usgs.gov/nwis/dv/?format=json' +\n # Specify the sites to download\n '&sites=' + stationDict['DatasetExternalID'] +\n # Specify the start date\n '&startDT=' + datetime.strftime( startDate, '%Y-%m-%d' ) +\n #Specify the end data\n '&endDT=' + datetime.strftime( endDate, '%Y-%m-%d' ) +\n # Specify that we want streamflow\n '¶meterCd=00060' +\n # Specify that we want daily means\n '&statCd=00003' +\n # Allow all sites\n '&siteStatus=all' )\n \n # Get the data\n response = requests.get(url)\n\n # Check the status code\n if response.status_code != 200:\n return \n else:\n response = response.json()\n \n # Create a dataframe from the data\n df = pd.DataFrame(response['value']['timeSeries'][0]['values'][0]['value'])\n\n # Set the index to the dateTime index\n df.set_index(pd.DatetimeIndex(pd.to_datetime(df['dateTime'])), inplace = True)\n del df['dateTime'] # Delete the redundant column\n\n # Replace missing data with NaN's\n df['value'].replace(to_replace = '-999999', value = np.nan, inplace = True)\n\n # Convert to numeric\n df['value'] = pd.to_numeric(df['value'])\n \n # Remove any duplicate data in the dataset\n df = df[~df.index.duplicated(keep='last')] # Remove duplicates from the dataset\n df = df[~df.index.isnull()]\n\n # Rename the columns\n df.columns = ['USGS | ' + stationDict['DatasetExternalID'] + ' | Flag', 'USGS | ' + stationDict['DatasetExternalID'] + ' | Streamflow | CFS']\n del df['USGS | ' + stationDict['DatasetExternalID'] + ' | Flag']\n\n # Return the data frame\n return df",
"def update_database(self):\r\n \r\n self.initgta()\r\n \r\n credentials = self.get_credentials()\r\n http = credentials.authorize(httplib2.Http())\r\n discoveryUrl = ('https://sheets.googleapis.com/$discovery/rest?'\r\n 'version=v4')\r\n service = discovery.build('sheets', 'v4', http=http,\r\n discoveryServiceUrl=discoveryUrl)\r\n\r\n spreadsheetId = '1Avxh9i3ObSn7rf8iA75JBwdmdWRis7FS8WezsO9E6sE'\r\n rangeName = 'Statistiken (Details) #2017!A1:M'\r\n result = service.spreadsheets().values().get(\r\n spreadsheetId=spreadsheetId, range=rangeName, valueRenderOption='FORMULA').execute()\r\n values = result.get('values', [])\r\n \r\n rangeName = 'Statistiken (Details) #2018!A1:M'\r\n result = service.spreadsheets().values().get(\r\n spreadsheetId=spreadsheetId, range=rangeName, valueRenderOption='FORMULA').execute()\r\n values += result.get('values', [])\r\n \r\n players = [None] * 7\r\n points = [None] * 7\r\n game = ''\r\n playDate = 0\r\n raceid = None\r\n playlistid = None\r\n \r\n if not values:\r\n print('No data found.')\r\n else:\r\n for row in values:\r\n #print('%s' % row)\r\n \r\n if(len(row) >= 9 and row[8] != None):\r\n # new playlist\r\n self.insertPlaylist(row[8], row[9])\r\n \r\n if len(row) >= 8 and row[2] != None and row[2] != \"\":\r\n # new race\r\n if raceid == None:\r\n isCanceled = False\r\n if row[1] == None or row[1] == \"\":\r\n isCanceled = True\r\n playlistid = int(self.getCurrentPlaylistId())\r\n racenumber = self.getNextRaceNumber(playlistid)\r\n self.insertRace(playlistid, racenumber, isCanceled)\r\n raceid = self.getCurrentRaceId()\r\n rank = 0\r\n \r\n # new raced\r\n rank = row[1]\r\n player = row[2].lower()\r\n vehicle = row[3]\r\n racetime = row[4]\r\n bestlap = row[5]\r\n money = row[6] \r\n \r\n self.checkPlayer(player)\r\n self.addVehicle(vehicle)\r\n self.insertRaced(raceid, rank, bestlap, racetime, vehicle, player, money, \"\")\r\n rank += 1\r\n else:\r\n playlistid = None\r\n raceid = None\r\n \r\n # 2019 sheet has a different structure\r\n #rangeName = 'Statistiken (Details) #2019!A2:N'\r\n #rangeName2020 = 'Statistiken (Details) #2020!A2:N'\r\n rangeNames = ['Statistiken (Details) #2019!A2:N', 'Statistiken (Details) #2020!A2:N']\r\n result2019 = service.spreadsheets().values().batchGet(\r\n spreadsheetId=spreadsheetId, ranges=rangeNames, valueRenderOption='FORMULA').execute()\r\n rangesV2 = result2019.get('valueRanges', [])\r\n print('{0} ranges retrieved.'.format(len(rangesV2)))\r\n\r\n\r\n for range in rangesV2:\r\n if not range:\r\n print('No data found.')\r\n else:\r\n values = range.get('values', [])\r\n print('{0} values in range retrieved.'.format(len(values)))\r\n for row in values:\r\n #print('Row: %s' % row)\r\n \r\n if(len(row) >= 13 and row[12] != None):\r\n # new playlist\r\n print('%s' % row)\r\n if(len(row) < 14):\r\n print(\"missing date for playlist \" + str(row[12]))\r\n playlistdate = ''\r\n else:\r\n playlistdate = str(row[13])\r\n #print('new v2 playlist ' + str(row[12]) + ' - ' + playlistdate)\r\n self.insertPlaylist(row[12], playlistdate)\r\n\r\n if len(row) >= 12 and row[8] != None:\r\n # new race\r\n #print(\"new race\")\r\n vehicle = row[11]\r\n isCanceled = False\r\n if row[1] == None or row[1] == \"\":\r\n isCanceled = True\r\n playlistid = int(self.getCurrentPlaylistId())\r\n racenumber = self.getNextRaceNumber(playlistid)\r\n self.insertRaceWithMetadata(playlistid, racenumber, isCanceled, row[8], row[9], row[10], row[11])\r\n raceid = self.getCurrentRaceId()\r\n rank = 0\r\n rankmod = 0\r\n\r\n if len(row) >= 7 and row[1] != None and row[2] != None and row[2] != \"\":\r\n # new raced\r\n #print(\"new raced\")\r\n rank = row[1]\r\n player = row[2].lower()\r\n wrongvehicle = row[3]\r\n if wrongvehicle == \"x\":\r\n rankmod += 1\r\n rank = 0\r\n racetime = row[4]\r\n bestlap = row[5]\r\n money = row[6]\r\n \r\n self.checkPlayer(player)\r\n self.addVehicle(vehicle)\r\n self.insertRaced(raceid, rank - rankmod, bestlap, racetime, vehicle, player, money, wrongvehicle)\r\n rank += 1\r\n\r\n # close\r\n self.conn.commit()\r\n self.end()",
"def fill_dataset(self):\n rm, rstd = self.get_rolling_stats()\n\n self.add_rolling_mean(rm)\n self.add_bollinger_bands(rstd)\n self.add_spy_info()\n self.add_beta_and_sharpe()\n self.add_stlouis_data()",
"def updating_api(response_bio):\n\n frame_bio = pd.DataFrame(response_bio['resultSets'][0]['rowSet'])\n frame_bio.columns = response_bio['resultSets'][0]['headers']\n frame_bio.drop([\"PLAYER_ID\", \"TEAM_ID\", \"PLAYER_HEIGHT_INCHES\"], axis=1, inplace=True)\n frame_bio[\"SEASON\"] = \"2020-21\"\n\n return frame_bio",
"def season_game_logs(team, year):\n\n # Check year value\n if year > 2019 or year < 1950:\n raise ValueError('Year Value Incorrect')\n\n # Rename teams that moved\n team = scrape_utils.rename_team(team, year)\n\n # Get HTML content\n url = 'http://www.basketball-reference.com/teams/%s/%s/gamelog' % (team, year)\n r = requests.get(url)\n soup = BeautifulSoup(r.content, \"html.parser\")\n season_stats = soup.find(id='tgl_basic')\n games = season_stats.find('tbody')\n\n # MongoDB Collection\n m = mongo.Mongo()\n\n # To find opponent statistics\n opponent = re.compile('^opp_.*$')\n\n # Loop through every game in a team's season\n for game in games.find_all('tr', {'class': None}):\n\n curr_team = {'team': team}\n opp_team = {}\n\n # Loop through each stat\n for stat in game.find_all('td'):\n\n stat_name = stat['data-stat']\n\n # These are opponent stats\n if re.match(opponent, stat_name):\n opp_team[stat_name[4:]] = scrape_utils.stat_parse(stat_name, stat.string)\n else:\n curr_team[stat_name] = scrape_utils.stat_parse(stat_name, stat.string)\n\n # Remove unnecessary information\n del curr_team['game_season']\n del curr_team['x']\n\n # Rename relocated teams\n curr_team['team'] = scrape_utils.rename_team(team)\n opp_team['team'] = scrape_utils.rename_team(opp_team.pop('id'))\n\n # Use the same ID as basketball reference\n result = {'date': datetime.strptime(curr_team.pop('date_game'), \"%Y-%m-%d\"),\n 'season': year,\n 'result': scrape_utils.determine_home_win(curr_team['game_location'], curr_team.pop('game_result')),\n '_id': game.find('a')['href'][-17:-5]}\n\n # Place the teams in the correct spot depending on who is the home team\n if curr_team.pop('game_location') == 0:\n result['home'] = curr_team\n result['away'] = opp_team\n else:\n result['home'] = opp_team\n result['away'] = curr_team\n\n # Insert into database\n m.insert('game_log', result)",
"def update_player_stats(season_start_year, csv_file_name = None,\n single_date = None):\n \n if single_date == None:\n single_date = date.today().strftime(\"%m/%d/%Y\")\n \n season_year_full = convert_season_start_to_season_years(\n starting_year = season_start_year\n )\n \n updates_df = playergamelogs.PlayerGameLogs(\n season_nullable = season_year_full,\n date_from_nullable = single_date,\n date_to_nullable = single_date\n ).player_game_logs.get_data_frame()\n \n # Keep the relevant columns\n updates_df = updates_df[[\n \"SEASON_YEAR\", \"PLAYER_ID\", \"PLAYER_NAME\", \"TEAM_NAME\",\n \"GAME_ID\", \"GAME_DATE\", \"MATCHUP\", \"WL\", \"MIN\",\n \"FGM\", \"FGA\", \"FTM\", \"FTA\", \"FG3M\", \"PTS\", \"REB\",\n \"AST\", \"STL\", \"BLK\", \"TOV\"]]\n \n # Save the data frame to a csv if a file name exists\n if csv_file_name != None:\n # Save to current directory\n csv_path = Path(\"./data/\" + csv_file_name + \".csv\")\n updates_df.to_csv(path_or_buf = csv_path,\n index = False,\n na_rep = 'NULL')\n \n return updates_df",
"def dataframe():\n\t#allows function to access station, gmt, and miss_station functions\n global stations\n\tglobal gmt\n\tglobal miss_station\n\t\n\t#read predictor file\n\tcontrol = cfg.read_yaml('../registry/graphs.yaml')\n\tpred_ctrl = cfg.read_yaml(cfg.get_config_path(control.pred_file))\n\tpredd_ctrl = cfg.read_yaml(cfg.get_config_path(control.predd_file))\n\n\t#get file paths and update database\n\tpredictor_file_path = control.predictor_file_path\n\tpredictand_file_path = control.predictand_file_path\n\tpred_file_id = update(predictor_file_path)\n\tpredd_file_id = update(predictand_file_path)\n\t\n\t#store lead time and date range\n\tlead_time = control.lead_time\n\tdate_range = control.date_range\n\n\t#get info for fetch many dates\n\tstart,end,stride = read_pred.parse_range(date_range)\n\tfcst_ref_time = control.date_range[0].split('-')[0][-2:]\n\t\n\t#initialize list of predictors\n\tpred_list = pred_ctrl.predictors\n\tpredictor = []\n\n\t#loops through predictors to build camps data objects\n\tfor entry_dict in pred_list:\n\t\t#formats metadata\n\t\tpred = create.preprocess_entries(entry_dict, fcst_ref_time)\n\t\t\n\t\t#adds info to metadata that's not currently being stored\n\t\tpred.search_metadata['reserved2'] = lead_time*3600\n pred.search_metadata['file_id'] = pred_file_id\n\t\tpred.search_metadata['reserved1'] = 'vector'\n\n\t\t#build camps data objects for each day\n\t\tvariable = fetch_many_dates(predictor_file_path,start,end,stride,pred.search_metadata)\n\t\t\n\t\t#appends all data to single camps object\n\t\tif variable[0] is not None:\n\t\t\tvar = variable[0]\n\t\t\tarrs = []\n\t\t\tfor i in range(len(variable)):\n\t\t\t\tarrs.append(variable[i].data)\n\t\t\tvar.data = np.stack(arrs)\n\t\t\tpredictor.append(var)\n\n\t#initializes list of predictands\n\tpredd_list = predd_ctrl.predictands\n predictand = []\n\t\n\t#loops through predictands to build camps data objects\n for entry_dict in predd_list:\n\t\t#formats metadata\n \tvertical_coordinate = entry_dict.pop('Vertical_Coordinate')\n\t\tentry_dict['file_id'] = predd_file_id\n\n\t\t#build camps objects for each day\n variable = fetch_many_dates(predictand_file_path,start, end, stride, entry_dict)\n\n\t\t#append all data to single camps object\n var = variable[0]\n arrs = []\n for i in range(len(variable)):\n arrs.append(variable[i].data)\n try:\n\t\t\tvar.data = np.stack(arrs)\n\t\t\tpredictand.append(var)\n\t\texcept:\n\t\t\tprint(\"Can't read \" + variable.name)\n\n\t#getting predictor station and time data\n\tpredr = Dataset(predictor_file_path[0])\n\tpredr_stat = predr.variables['station'][:]\n\tif lead_time == 3:\n\t\tpredr_time = predr.variables['OM__phenomenonTimeInstant'][:]\n\telif lead_time == 6:\n\t\tpredr_time = predr.variables['OM__phenomenonTimeInstant1'][:]\n\telif lead_time == 12:\n\t\tpredr_time = predr.variables['OM__phenomenonTimeInstant2'][:]\n\tpredr.close()\n\n\t#reformatting predictor station and time data\n\tpredr_stations = stations(predr_stat)\n\tpredr_gmt = gmt(predr_time)\n\t\n\t#getting predictand station and time data\n\tpredd = Dataset(predictand_file_path[0])\n\tpredd_stat = predd.variables['station'][:]\n\tpredd_time = predd.variables['OM__resultTime'][:]\n\tpredd.close()\n\t\n\t#reformatting predictand station and time data\n\tpredd_stations = stations(predd_stat)\n\tpredd_gmt = gmt(predd_time)\n\n\t#choosing predictand observations that line up with predictor time\n\thour = (predictor[0].metadata['FcstTime_hour']/3600) + lead_time\n\tdays = len(predd_gmt)/24\n\tpredd_hours = [0]*days\n k=0\n for i in range(len(predd_gmt)):\n if i%24 == hour:\n\t\t\tpredd_hours[k]=predd_gmt[i]\n\t\t\tk+=1\n\t\n\t#catches when GFS data doesn't cover the last day of the month\n\tif len(predr_gmt) < len(predd_hours):\n\t\tpredd_hours = predd_hours[:-1]\t\n\t\n\t#find missing stations\n\tmiss_stations = miss_station(predr_stations,predd_stations)\n\tstations = predd_stations\n\t\n\t#station and time array\n\tinfo = [['',''] for k in range(len(predr_gmt)*len(stations))]\n\tfor i in range(len(predr_gmt)):\n\t\tfor j in range(len(stations)):\n\t\t\tk = i*len(stations)+j\n\t\t\tinfo[k][0]=predr_gmt[i]\n\t\t\tinfo[k][1]=stations[j]\n\n\t#create column names\n\tnames = ['']*(len(predictor)+len(predictand)+2)\n\tnames[0]='Time'\n\tnames[1]='Station'\n\n\t#creating array\n\tarr = np.zeros((len(stations)*len(predr_gmt),len(predictor)+len(predictand)))\n\t\n\t#adding predictor data\n\tfor i in range(len(predictor)):\n\t\t#remove lead time and forecast reference time from variable name\n\t\t#and add variable name to column list of final dataframe\n\t\tif lead_time == 12:\n\t\t\tnames[i+2]='GFS_'+predictor[i].get_variable_name()[:-11]\n\t\telse:\n\t\t\t names[i+2]='GFS_'+predictor[i].get_variable_name()[:-10]\n\n\t\t#create pandas dataframe of data and sort alphabetically by station name\n\t\tpredictor[i].data = np.squeeze(predictor[i].data,axis=2)\n\t\tpredictor[i].data = pd.DataFrame(predictor[i].data,columns=predr_stations,index=predr_gmt)\n\t\tpredictor[i].data = predictor[i].data.reindex(sorted(predictor[i].data.columns),axis=1)\n\t\t\n\t\t#remove stations with no predictand data\n\t\tk=0\n\t\ta=miss_stations[:]\n\t\tfor j in predictor[i].data.columns:\n\t\t\tif not a:\n\t\t\t\tbreak\n\t\t\tif j==a[k]:\n\t\t\t\tpredictor[i].data=predictor[i].data.drop(j,axis=1)\n\t\t\t\tdel a[k]\n\t\t\n\t\t#add data to final dataframe\n\t\tfor b in range(len(predr_gmt)):\n\t\t\tfor c in range(len(stations)):\n\t\t\t\tk = b*len(stations)+c\n\t\t\t\tarr[k][i] = predictor[i].data.iloc[b][c]\n\n\t#add predictand data\n\tfor i in range(len(predictand)):\n\t\t#removing extra underscore, adding variable name to column names\n\t\tnames[len(predictor)+2+i]='METAR_'+predictand[i].get_variable_name()[:-1]\n\t\n\t\t#resize array and create pandas dataframe\n\t\tpredictand[i].data = np.squeeze(predictand[i].data,axis=2)\n\t\tpredictand[i].data = pd.DataFrame(predictand[i].data,columns=predd_stations,index=predd_hours)\n\t\tpredictand[i].data = predictand[i].data.reindex(sorted(predictand[i].data.columns),axis=1)\n\t\t\n\t\t#remove extra days of predictand data\n\t\tpredictand[i].data = predictand[i].data.iloc[0:len(predr_time),:]\n\t\t\t\n\t\t#add predictand data to array\n\t\tfor b in range(len(predr_gmt)):\n\t\t\tfor c in range(len(stations)):\n\t\t\t\tk = b*len(stations)+c\n\t\t\t\tval = predictand[i].data.iloc[b][c]\n\t\t\t\t\n\t\t\t\t#catch metar fill data\n\t\t\t\tif val == 9999: \n\t\t\t\t\tval = np.nan\n\t\t\t\tarr[k][len(predictor)+i]=val\n\t\n\t#add station and time data to array and save as csv\n\tdata = np.concatenate([info,arr],axis = 1)\n\tto_save = pd.DataFrame(data,columns=names)\n\tto_save.to_csv(str(start)+'_'+str(end)+'_'+str(lead_time)+'hrs.csv')",
"def getseason(data):\n ## Season key is the most reliable\n season = data.get(\"season\")\n if season:\n ## Season key is an integer formatted \"YYS\" and is 2000-based (i.e.- 171 == 2017-Winter)\n season = str(season)\n year = int(f\"20{season[:2]}\")\n ## Anichart Season key is 1-indexed\n season = int(season[2]) - 1\n ## This should normally pass; if it consistently does not, we'll have to investigate why\n try: return SeasonCharts.buildseason(season,year)\n ## If something goes wrong, we'll try another method\n except: print(f\"Failed to parse season: {data['season']}\")\n ## Next, we'll iterate over rankings to try to determine the season/year\n ## There are multiple types of rankings based on season, year, and both combined,\n ## so we'll piece it together based on whatever we come across first\n season,year = None,None\n for ranking in data.get(\"rankings\",list()):\n ## Quicker exit (without just making this loop its own function)\n if season and year: continue\n ## We'll ignore stuff we've already gotten and assume that nothing in\n ## rankings contradicts eachother\n if not season:\n ## Defaults to None one way or another if it's not supplied\n season = ranking.get(\"season\")\n if not year: year = ranking.get(\"year\")\n ## Check if we made it\n if season and year:\n ## As above, this should always work out-of-the-box\n try: return SeasonCharts.buildseason(season,year)\n except: print(season,year)\n ## Welp, we're stumped...\n return None",
"def _load_data(self):\n if self._api_response.status_code == 200:\n self._dataset = self._api_response.json()\n self._fill_day_dicts()",
"def update(self):\n try:\n response = requests.get(\n self.API_URL, headers=self.API_HEADERS, timeout=15)\n except requests.exceptions.RequestException:\n self._logger.exception(\"While fetching data from server\")\n return\n\n if response.status_code != 200:\n self._logger.error(\"API call returned with status %s\",\n response.status_code)\n return\n\n content_type = response.headers.get('Content-Type', 'whatever')\n if content_type != 'text/csv':\n self._logger.error(\"Expected text/csv but got %s\", content_type)\n return\n\n response.encoding = 'UTF8'\n content = response.text\n data = (line for line in content.split('\\n'))\n reader = csv.DictReader(data, delimiter=';', quotechar='\"')\n for row in reader:\n if row.get(\"Station\", None) == self._station_id:\n self.data = {\n self.API_FIELDS.get(k)[0]:\n self.API_FIELDS.get(k)[1](v.replace(',', '.'))\n for k, v in row.items()\n if v and k in self.API_FIELDS\n }\n break",
"def team_season_stats(team):\n\n # Get HTML Content\n url = 'http://www.basketball-reference.com/teams/%s/stats_per_game_totals.html' % team\n r = requests.get(url)\n soup = BeautifulSoup(r.content, \"html.parser\")\n\n # MongoDB Collection\n m = mongo.Mongo()\n\n # Team's yearly stats are displayed in a table\n season_stats = soup.find(id='stats').find('tbody')\n\n # Iterate through each year\n for year in season_stats.find_all('tr', {'class': None}):\n\n season_year = year.find('th').text[0:4]\n season_year = int(season_year) + 1\n season = {'year': season_year}\n\n # Loop through each stat\n for stat in year.find_all('td'):\n season[stat['data-stat']] = stat.string\n\n # Rename relocated teams\n season['team_id'] = scrape_utils.rename_team(season['team_id'])\n season['_id'] = season['team_id'] + '_' + str(season_year)\n\n # Remove unwanted stats\n to_remove = ['rank_team', 'foo', 'g', 'mp_per_g']\n for k in to_remove:\n season.pop(k, None)\n\n # Add to MongoDB\n m.insert('team_season', season)",
"def get_data():\n wga_df = pd.read_csv(os.path.join(MODEL_DIR, 'wga/sculptures/wga_sculpture_periods.csv'), index_col=0)\n wikiart_df = pd.read_csv(os.path.join(MODEL_DIR, 'wikiart/sculptures/wikiart_sculpture_periods.csv'), index_col=0)\n nga_df = pd.read_csv(os.path.join(MODEL_DIR, 'nga/sculptures/nga_sculpture_periods.csv'), index_col=0)\n\n ######## Fix name for WGA and WikiaRt ###########\n wga_df['Author'] = wga_df.apply(lambda x: fix_name_wga(x['Author']), axis=1)\n wikiart_df['Author'] = wikiart_df.apply(lambda x: fix_name_wiki(x['Author']), axis=1)\n nga_df['Author'] = nga_df.apply(lambda x: fix_name_nga(x['Author']), axis=1)\n\n df = pd.concat([wga_df, wikiart_df, nga_df], ignore_index=True, sort=True)\n\n df['Author_Fixed'] = df.apply(lambda x: fix_text(x['Author']), axis=1)\n df['title_fixed'] = df.apply(lambda x: fix_text(x['title']), axis=1)\n\n periods = [\"BAROQUE\", \"EARLY RENAISSANCE\", \"MEDIEVAL\", \"NEOCLASSICISM\", \"HIGH RENAISSANCE\", \"MINIMALISM\", \"REALISM\",\n \"IMPRESSIONISM\", \"ROCOCO\", \"SURREALISM\", \"MANNERISM\", \"ROMANTICISM\",\n ]\n df['Period'] = df.apply(lambda row: row['Period'].upper(), axis=1)\n\n # Get Desired Periods\n df['Period'] = df.apply(lambda x: \"SURREALISM\" if \"SURREALISM\" in x['Period'] else x['Period'], axis=1)\n df = df[(df['Period'].isin(periods))]\n df = df.sort_values(['Author_Fixed', 'title_fixed'])\n\n #print(\"Combined Drop Rows:\", df.shape[0] - df.drop_duplicates(subset=['Author_Fixed', 'title_fixed']).shape[0])\n\n df = df.drop_duplicates(subset=['Author_Fixed', 'title_fixed'], keep='last')\n\n # Drop Duplicate Sculptures\n df = df[~df['file'].isin(DUP_SCULPTURES)].reset_index(drop=True)\n\n #print(df['Period'].value_counts())\n\n return df",
"def add_dreamteam_count(df, raw_seasons):\n df17 = df.loc[df['season'] == 17].copy()\n df18 = df.loc[df['season'] == 18].copy()\n df19 = df.loc[df['season'] == 19].copy()\n df20 = df.loc[df['season'] == 20].copy()\n df21 = df.loc[df['season'] == 21].copy()\n\n dreamteam_count_yearly_average = []\n\n for _, row in df21.iterrows():\n dreamteam_count_yearly_average.append(create_dreamteam_count_yearly(row['name'], raw_seasons))\n df21['dreamteam_yearly_average'] = dreamteam_count_yearly_average\n\n complete_data = pd.concat([df17, df18, df19, df20, df21])\n\n return complete_data",
"def update_data(self):\n data, meta_data = ts.get_daily(symbol=self.stock_ticker, outputsize='full')\n self.data = data\n self.meta_data = meta_data",
"def get_fresh_data(get_team):\n\n \"\"\" Create the URL for today \"\"\"\n now = datetime.datetime.now()\n url = \"http://gd2.mlb.com/gdcross/components/game/mlb/year_\" + '{}'.format(now.year) + \"/month_\" + '{:02d}'.format(now.month) + \"/day_\" + '{:02d}'.format(now.day) + \"/miniscoreboard.json\"\n\n \"\"\" Grab the first response and write it to a file, we'll update it once the game starts \"\"\"\n data_write_file = status_dir + '{}'.format(now.year) + '{:02d}'.format(now.month) + '{:02d}'.format(now.day) + \".json\"\n\n \"\"\" Get the json data if the file doesn't exist, or if it's over three minutes old \"\"\"\n if not os.path.isfile(data_write_file) or time.time() - os.path.getmtime(data_write_file) > 60:\n response = urllib.urlopen(url)\n full_data = json.loads(response.read())\n with open(data_write_file, 'w') as outfile:\n json.dump(full_data, outfile, sort_keys=True, indent=2, ensure_ascii=False)\n\n \"\"\" Use the data from the status file \"\"\"\n with open(data_write_file, 'r') as json_data:\n full_data = json.load(json_data)\n\n \"\"\" This will return false if there is no game today, else will return json data for just our team \"\"\"\n my_game = False\n for game in full_data['data']['games']['game']:\n if get_team in game['home_file_code'] or get_team in game['away_file_code']:\n my_game = game\n \n return my_game",
"def getPlayerAdvStat(self, stat, year = 2014):\r\n \r\n year_next = (year % 100) + 1\r\n season = str(year) + '-' + str(year_next)\r\n \r\n stat_call = stat.lower()\r\n stat_dict = {'touch':'Possessions', 'possession':'Possessions',\r\n 'speed':'SpeedDistance', 'distance':'SpeedDistance'}\r\n \r\n stat_url = 'http://stats.nba.com/stats/leaguedashptstats?College=&'\\\r\n 'Conference=&Country=&DateFrom=&DateTo=&Division=&'\\\r\n 'DraftPick=&DraftYear=&GameScope=&Height=&LastNGames=0&'\\\r\n 'LeagueID=00&Location=&Month=0&OpponentTeamID=0&Outcome=&'\\\r\n 'PORound=0&PerMode=PerGame&PlayerExperience=&PlayerOr'\\\r\n 'Team=Player&PlayerPosition=&PtMeasureType=' + \\\r\n stat_dict[stat_call] + '&Season=' + season + \\\r\n '&SeasonSegment=&SeasonType=Regular+Season&StarterBench=&'\\\r\n 'TeamID=0&VsConference=&VsDivision=&Weight='\r\n \r\n response = requests.get(stat_url)\r\n data = json.loads(response.text)\r\n \r\n headers = data['resultSets'][0]['headers']\r\n stat_data = data['resultSets'][0]['rowSet']\r\n advStat_df = pd.DataFrame(stat_data,columns=headers) \r\n \r\n return advStat_df",
"def Get_Player_Historic_Data(data_path, player_history_path): \n players = os.listdir(player_history_path) # Lists All The Player Folders in the Dir\n players_data = pd.read_csv(data_path + 'players_raw.csv')\n for ind in pbar(players_data.index): # ind in [0:693:1]\n # Get the Seasonal History\n player_path = players_data['first_name'][ind] + '_' + players_data['second_name'][ind] + '_' + str(players_data['id'][ind]) # Create player_history_path\n if player_path not in players: # If the player (read from players_raw.csv) is not within the existing directory, continue: \n json = Access_URL(url = \"https://fantasy.premierleague.com/api/element-summary/{}/\".format(str(players_data['id'][ind]))) # Feed in Player ID\n # print(json.keys())\n history_df = pd.DataFrame(json['history_past']) # Extract history\n if not history_df.empty: # If history returned\n os.makedirs(player_history_path + player_path, exist_ok = True) # Create a new path for the player \n history_df.to_csv(player_history_path + player_path + '/history.csv', encoding='utf-8', index = False) # And write his syeasonal history\n else: # However, if the player is within the existing directory\n if not os.path.isfile(player_history_path + player_path + \"/history.csv\"): # And a history file does not exist\n json = Access_URL(url = \"https://fantasy.premierleague.com/api/element-summary/{}/\".format(str(players_data['id'][ind]))) # Feed in Player ID\n history_df = pd.DataFrame(json['history_past']) # Extract history\n if not history_df.empty: # If history returned\n history_df.to_csv(player_history_path + player_path + '/history.csv', encoding='utf-8', index = False) # And write his seasonal history\n # Get the Gameweek History\n json = Access_URL(url = \"https://fantasy.premierleague.com/api/element-summary/{}/\".format(str(players_data['id'][ind]))) # Feed in Player ID \n history_df_gw = pd.DataFrame(json['history']) # Extract Gameweek History\n if not history_df_gw.empty: # If history returned\n if player_path not in players: # If the player (read from players_raw.csv) is not within the existing directory, continue: \n os.makedirs(player_history_path + player_path, exist_ok = True) # Create the directory, exit\n history_df_gw.to_csv(player_history_path + player_path + '/gw.csv', encoding='utf-8', index = False) # Write the CSV",
"def download(self, verbose):\n # Download datasets\n if verbose:\n print(\"Retrieving datasets from Our World In Data https://github.com/owid/covid-19-data/\")\n # Vaccinations\n v_rec_cols = [\n \"date\", \"location\", \"iso_code\", \"total_vaccinations\", \"people_vaccinated\", \"people_fully_vaccinated\"]\n v_rec_df = pd.read_csv(self.URL_V_REC, usecols=v_rec_cols)\n v_loc_df = pd.read_csv(self.URL_V_LOC, usecols=[\"location\", \"vaccines\"])\n v_df = v_rec_df.merge(v_loc_df, how=\"left\", on=\"location\")\n # Tests\n pcr_rec_cols = [\"ISO code\", \"Date\", \"Daily change in cumulative total\", \"Cumulative total\"]\n pcr_df = pd.read_csv(self.URL_P_REC, usecols=pcr_rec_cols)\n pcr_df = pcr_df.rename(columns={\"ISO code\": \"iso_code\", \"Date\": \"date\"})\n pcr_df[\"cumsum\"] = pcr_df.groupby(\"iso_code\")[\"Daily change in cumulative total\"].cumsum()\n pcr_df = pcr_df.assign(tests=lambda x: x[\"Cumulative total\"].fillna(x[\"cumsum\"]))\n # Combine data (vaccinations/tests)\n df = v_df.set_index([\"iso_code\", \"date\"])\n df = df.combine_first(pcr_df.set_index([\"iso_code\", \"date\"]).loc[:, [\"tests\"]])\n df = df.reset_index()\n # Location (country/province)\n df[\"location\"] = df[\"location\"].replace(\n {\n # COG\n \"Congo\": \"Republic of the Congo\",\n }\n )\n df = df.loc[~df[\"iso_code\"].str.contains(\"OWID_\")]\n df[\"location\"] = df.groupby(\"iso_code\")[\"location\"].bfill()\n df.loc[df[\"location\"] == df[\"iso_code\"], \"location\"] = None\n df.loc[df[\"location\"].isna(), \"location\"] = df.loc[df[\"location\"].isna(), \"iso_code\"].apply(\n lambda x: coco.convert(x, to=\"name_short\", not_found=None))\n df[self.PROVINCE] = self.UNKNOWN\n return df",
"def update(self):\n if not self.should_update():\n return\n try:\n two_hour_forecast = \"https://www.nea.gov.sg/api/WeatherForecast/forecast24hrnowcast2hrs/\" + \\\n str(int(time.time()))\n two_hour_result = requests.get(two_hour_forecast, timeout=10).json()\n if (two_hour_result is not None and two_hour_result[\"Channel2HrForecast\"] is not None and\n two_hour_result[\"Channel2HrForecast\"][\"Item\"] is not None and\n two_hour_result[\"Channel2HrForecast\"][\"Item\"][\"WeatherForecast\"] is not None and\n two_hour_result[\"Channel2HrForecast\"][\"Item\"][\"WeatherForecast\"][\"Area\"] is not None):\n self._data = two_hour_result[\"Channel2HrForecast\"][\"Item\"][\"WeatherForecast\"][\"Area\"]\n\n if (two_hour_result is not None and two_hour_result[\"Channel24HrForecast\"] is not None and\n two_hour_result[\"Channel24HrForecast\"][\"Main\"] is not None):\n self._today_data = two_hour_result[\"Channel24HrForecast\"][\"Main\"]\n\n four_day_forecast = \"https://www.nea.gov.sg/api/Weather4DayOutlook/GetData/\" + \\\n str(int(time.time()))\n self._forecast_data = requests.get(four_day_forecast, timeout=10).json()\n\n self.last_updated = dt_util.utcnow()\n return\n\n except ValueError as err:\n _LOGGER.error(\"Check NEA %s\", err.args)\n self._data = None\n self._forecast_data = None\n self._today_data = None\n raise",
"def __init__(self, api_key, season, week):\n\n self._ak = api_key\n self._base_url = 'https://api.sportsdata.io/v3/nfl/'\n self.season = season\n self.week = week\n self._player_dict = filter_players(load_players_file(), position='QB')",
"def makeLegacyData():\n\twith open('C:/Users/NeilS/Desktop/FantasyBoyzUSA/info/newLegacy.json','r') as file:\n\t\tlegacy = json.load(file)\n\n\tregWinsBig = []\n\tleaderBig = []\n\tbigGamesBig = []\n\tregPointsBig = []\n\tpostWinsBig = []\n\tchampsBig = []\n\tstandingBig = []\n\tteamList = []\n\tfor team in legacy: teamList.append(team)\n\tyear = 0 # corresponds to 2013 season\n\twhile(year<8): # 7 seasons plus total\n\t\tregWinsSmall = []\n\t\tleaderSmall = []\n\t\tbigGamesSmall = []\n\t\tregPointsSmall = []\n\t\tpostWinsSmall = []\n\t\tchampsSmall = []\n\t\tstandingSmall = []\n\t\tfor team in legacy:\n\t\t\tregWinsSmall.append(legacy[team]['RegSesWins'][year])\n\t\t\tleaderSmall.append(legacy[team]['GamesLeader'][year])\n\t\t\tbigGamesSmall.append(legacy[team]['BigGames'][year])\n\t\t\tregPointsSmall.append(legacy[team]['RegSesPts'][year])\n\t\t\tpostWinsSmall.append(legacy[team]['PlayoffWins'][year])\n\t\t\tchampsSmall.append(legacy[team]['Championship'][year])\n\t\t\tstandingSmall.append(legacy[team]['FinalStanding'][year])\n\t\tregWinsBig.append(regWinsSmall)\n\t\tleaderBig.append(leaderSmall)\n\t\tbigGamesBig.append(bigGamesSmall)\n\t\tregPointsBig.append(regPointsSmall)\n\t\tpostWinsBig.append(postWinsSmall)\n\t\tchampsBig.append(champsSmall)\n\t\tstandingBig.append(standingSmall)\n\t\tyear += 1\n\n\tfor i in range(len(regWinsBig)): # turn all values into floats\n\t\tfor j in range(len(regWinsBig[0])):\n\t\t\tif regWinsBig[i][j]=='-': regWinsBig[i][j] = 0\n\t\t\tif leaderBig[i][j]=='-': leaderBig[i][j] = 0\n\t\t\tif bigGamesBig[i][j]=='-': bigGamesBig[i][j] = 0\n\t\t\tif regPointsBig[i][j]=='-': regPointsBig[i][j] = 0\n\t\t\tif postWinsBig[i][j]=='-': postWinsBig[i][j] = 0\n\t\t\tif champsBig[i][j]=='-': champsBig[i][j] = 0\n\t\t\tif standingBig[i][j]=='-': standingBig[i][j] = 0\n\t\t\tregWinsBig[i][j] = float(regWinsBig[i][j])\n\t\t\tleaderBig[i][j] = float(leaderBig[i][j])\n\t\t\tbigGamesBig[i][j] = float(bigGamesBig[i][j])\n\t\t\tregPointsBig[i][j] = float(regPointsBig[i][j])\n\t\t\tpostWinsBig[i][j] = float(postWinsBig[i][j])\n\t\t\tchampsBig[i][j] = float(champsBig[i][j])\n\t\t\tstandingBig[i][j] = float(standingBig[i][j])\n\tlegacyStats = [teamList, regWinsBig, leaderBig, bigGamesBig, regPointsBig, postWinsBig, champsBig, standingBig]\n\treturn legacyStats",
"def season_series(game_id, pref_team, other_team, last_season=False):\n\n # Init empty dictionaries and lists\n games_against = list()\n pref_toi = dict()\n pref_goals = dict()\n pref_assists = dict()\n pref_points = dict()\n pref_record = {\"wins\": 0, \"losses\": 0, \"ot\": 0}\n roster_player = True\n\n # If this is the first game of the season, we can set the 'last_season' flag to enable the\n # season series function to check last year's season series between the two teams.\n if not last_season:\n season_start = str(game_id)[0:4]\n season_end = str(int(season_start) + 1)\n yesterday = datetime.now() - timedelta(days=1)\n # yesterday = datetime.now() + timedelta(days=50)\n # schedule_url = (\n # f\"/schedule?teamId={pref_team.team_id}\"\n # f\"&expand=schedule.broadcasts,schedule.teams&startDate=\"\n # f\"{season_start}-08-01&endDate={yesterday:%Y-%m-%d}\"\n # )\n schedule_url = (\n f\"/schedule?teamId={pref_team.team_id}\"\n f\"&expand=schedule.broadcasts,schedule.teams\"\n f\"&season={season_start}{season_end}\"\n )\n else:\n season_start = int(str(game_id)[0:4]) - 1\n season_end = str(int(season_start) + 1)\n yesterday = datetime.now() - timedelta(days=1)\n # yesterday = datetime.now() + timedelta(days=50)\n # schedule_url = (\n # f\"/schedule?teamId={pref_team.team_id}\"\n # f\"&expand=schedule.broadcasts,schedule.teams&startDate=\"\n # f\"{season_start}-08-01&endDate={season_end}-06-01\"\n # )\n schedule_url = (\n f\"/schedule?teamId={pref_team.team_id}\"\n f\"&expand=schedule.broadcasts,schedule.teams\"\n f\"&season={season_start}{season_end}\"\n )\n\n schedule = api.nhl_api(schedule_url).json()\n dates = schedule[\"dates\"]\n\n # Loop through scheduled to get previously played games against\n for date in dates:\n game = date[\"games\"][0]\n game_type = game[\"gameType\"]\n game_id = game[\"gamePk\"]\n game_team_home = game[\"teams\"][\"home\"][\"team\"][\"name\"]\n game_team_away = game[\"teams\"][\"away\"][\"team\"][\"name\"]\n teams = [game_team_away, game_team_home]\n game_status = game[\"status\"][\"abstractGameState\"]\n if game_type == \"R\" and game_status == \"Final\" and other_team.team_name in teams:\n game_feed = f\"/game/{game_id}/feed/live\"\n games_against.append(game_feed)\n\n # If the two teams haven't played yet, just exit this function\n if not games_against:\n return None, None, None\n\n # Loop through newly created games_against list to get each stats\n for feed in games_against:\n game = api.nhl_api(feed).json()\n game_data = game[\"gameData\"]\n home_team_name = game_data[\"teams\"][\"home\"][\"name\"]\n pref_homeaway = \"home\" if home_team_name == pref_team.team_name else \"away\"\n other_homeaway = \"away\" if home_team_name == pref_team.team_name else \"home\"\n\n # Get season series\n end_period = game[\"liveData\"][\"linescore\"][\"currentPeriod\"]\n extra_time = True if end_period > 3 else False\n pref_score = game[\"liveData\"][\"linescore\"][\"teams\"][pref_homeaway][\"goals\"]\n other_score = game[\"liveData\"][\"linescore\"][\"teams\"][other_homeaway][\"goals\"]\n if pref_score > other_score:\n pref_record[\"wins\"] += 1\n elif other_score > pref_score and extra_time:\n pref_record[\"ot\"] += 1\n else:\n pref_record[\"losses\"] += 1\n\n season_series_str = f\"Series: {pref_record['wins']}-\" f\"{pref_record['losses']}-{pref_record['ot']}\"\n\n # Get stats leaders\n # pref_teamstats = game[\"liveData\"][\"boxscore\"][\"teams\"][pref_homeaway][\"teamStats\"]\n pref_playerstats = game[\"liveData\"][\"boxscore\"][\"teams\"][pref_homeaway][\"players\"]\n for id, player in pref_playerstats.items():\n try:\n # Calculate TOI\n player_toi_str = player[\"stats\"][\"skaterStats\"][\"timeOnIce\"]\n player_toi_minutes = int(player_toi_str.split(\":\")[0])\n player_toi_seconds = int(player_toi_str.split(\":\")[1])\n player_toi = (player_toi_minutes * 60) + player_toi_seconds\n pref_toi[id] = pref_toi.get(id, 0) + player_toi\n\n # Point Totals\n player_goal_str = player[\"stats\"][\"skaterStats\"][\"goals\"]\n pref_goals[id] = pref_goals.get(id, 0) + int(player_goal_str)\n player_assist_str = player[\"stats\"][\"skaterStats\"][\"assists\"]\n pref_assists[id] = pref_assists.get(id, 0) + int(player_assist_str)\n player_points = int(player_goal_str) + int(player_assist_str)\n pref_points[id] = pref_points.get(id, 0) + int(player_points)\n\n except KeyError:\n pass\n\n # Calculate Stats Leaders\n sorted_toi = sorted(pref_toi.values(), reverse=True)\n leader_toi = sorted_toi[0]\n\n sorted_points = sorted(pref_points.values(), reverse=True)\n leader_points = sorted_points[0]\n\n # Get TOI leader\n for id in pref_toi.keys():\n if pref_toi[id] == leader_toi:\n player_name = roster.player_attr_by_id(pref_team.roster, id, \"fullName\")\n if player_name is None:\n roster_player = False\n player_id_only = id.replace(\"ID\", \"\")\n player_name = roster.nonroster_player_attr_by_id(player_id_only, \"fullName\")\n leader_toi_avg = leader_toi / len(games_against)\n m, s = divmod(leader_toi_avg, 60)\n toi_m = int(m)\n toi_s = int(s)\n toi_s = \"0{}\".format(toi_s) if toi_s < 10 else toi_s\n toi_avg = \"{}:{}\".format(toi_m, toi_s)\n player_short_name = f\"{player_name[0]}. {' '.join(player_name.split()[1:])}\"\n toi_leader_str = \"TOI Leader: {} with {} / game.\".format(player_short_name, toi_avg)\n\n # Handle tied points leaders\n point_leaders = list()\n for id in pref_points.keys():\n if pref_points[id] == leader_points:\n point_leaders.append(id)\n\n if leader_points == 0:\n points_leader_str = \"Points Leader: None (all players have 0 points).\"\n\n elif len(point_leaders) == 1:\n leader = point_leaders[0]\n player_name = roster.player_attr_by_id(pref_team.roster, leader, \"fullName\")\n # If the player is no longer on the team, get their information (change string here?)\n if player_name is None:\n roster_player = False\n player_id_only = leader.replace(\"ID\", \"\")\n player_name = roster.nonroster_player_attr_by_id(player_id_only, \"fullName\")\n player_goals = pref_goals[leader]\n player_assists = pref_assists[leader]\n if not roster_player:\n points_leader_str = (\n f\"Points Leader: {player_name} with {leader_points} points \"\n f\"({player_goals}G {player_assists}A) \"\n )\n else:\n points_leader_str = \"Points Leader: {} with {} ({}G {}A).\".format(\n player_name, leader_points, player_goals, player_assists\n )\n\n elif len(point_leaders) > 3:\n point_leaders_with_attrs = list()\n for leader in point_leaders:\n player_name = roster.player_attr_by_id(pref_team.roster, leader, \"fullName\")\n if player_name is None:\n player_id_only = leader.replace(\"ID\", \"\")\n player_name = roster.nonroster_player_attr_by_id(player_id_only, \"fullName\")\n player_goals = pref_goals[leader]\n player_assists = pref_assists[leader]\n player_short_name = f\"{player_name[0]}. {' '.join(player_name.split()[1:])}\"\n point_leaders_with_attrs.append(player_short_name)\n\n point_leaders_joined = \", \".join(point_leaders_with_attrs[0:3])\n leftover_leaders = len(point_leaders) - 3\n points_leader_str = (\n f\"Points Leaders: {point_leaders_joined} & {leftover_leaders} others ({leader_points} each).\"\n )\n\n else:\n point_leaders_with_attrs = list()\n for leader in point_leaders:\n player_name = roster.player_attr_by_id(pref_team.roster, leader, \"fullName\")\n if player_name is None:\n player_id_only = leader.replace(\"ID\", \"\")\n player_name = roster.nonroster_player_attr_by_id(player_id_only, \"fullName\")\n player_goals = pref_goals[leader]\n player_assists = pref_assists[leader]\n player_short_name = f\"{player_name[0]}. {' '.join(player_name.split()[1:])}\"\n player_str = f\"{player_short_name} ({player_goals}G {player_assists}A)\"\n point_leaders_with_attrs.append(player_str)\n\n point_leaders_joined = (\n f\", \".join(point_leaders_with_attrs[:-1]) + f\" & {point_leaders_with_attrs[-1]}\"\n )\n points_leader_str = \"Points Leaders: {} with {} each.\".format(point_leaders_joined, leader_points)\n\n return season_series_str, points_leader_str, toi_leader_str"
]
| [
"0.5904324",
"0.5902132",
"0.57382816",
"0.57373065",
"0.5718162",
"0.56430864",
"0.5641176",
"0.56379896",
"0.55986893",
"0.55755377",
"0.5567273",
"0.55645984",
"0.55612344",
"0.55196846",
"0.5490538",
"0.5481568",
"0.5469396",
"0.54392934",
"0.54362196",
"0.54210824",
"0.54117787",
"0.539662",
"0.53870296",
"0.5386166",
"0.53788066",
"0.53252864",
"0.5322462",
"0.5318668",
"0.531488",
"0.5294352"
]
| 0.70550686 | 0 |
A room with an unknown room version should not break sync (and should be excluded). | def test_unknown_room_version(self) -> None:
inviter = self.register_user("creator", "pass", admin=True)
inviter_tok = self.login("@creator:test", "pass")
user = self.register_user("user", "pass")
tok = self.login("user", "pass")
# Do an initial sync on a different device.
requester = create_requester(user)
initial_result = self.get_success(
self.sync_handler.wait_for_sync_for_user(
requester, sync_config=generate_sync_config(user, device_id="dev")
)
)
# Create a room as the user.
joined_room = self.helper.create_room_as(user, tok=tok)
# Invite the user to the room as someone else.
invite_room = self.helper.create_room_as(inviter, tok=inviter_tok)
self.helper.invite(invite_room, targ=user, tok=inviter_tok)
knock_room = self.helper.create_room_as(
inviter, room_version=RoomVersions.V7.identifier, tok=inviter_tok
)
self.helper.send_state(
knock_room,
EventTypes.JoinRules,
{"join_rule": JoinRules.KNOCK},
tok=inviter_tok,
)
channel = self.make_request(
"POST",
"/_matrix/client/r0/knock/%s" % (knock_room,),
b"{}",
tok,
)
self.assertEqual(200, channel.code, channel.result)
# The rooms should appear in the sync response.
result = self.get_success(
self.sync_handler.wait_for_sync_for_user(
requester, sync_config=generate_sync_config(user)
)
)
self.assertIn(joined_room, [r.room_id for r in result.joined])
self.assertIn(invite_room, [r.room_id for r in result.invited])
self.assertIn(knock_room, [r.room_id for r in result.knocked])
# Test a incremental sync (by providing a since_token).
result = self.get_success(
self.sync_handler.wait_for_sync_for_user(
requester,
sync_config=generate_sync_config(user, device_id="dev"),
since_token=initial_result.next_batch,
)
)
self.assertIn(joined_room, [r.room_id for r in result.joined])
self.assertIn(invite_room, [r.room_id for r in result.invited])
self.assertIn(knock_room, [r.room_id for r in result.knocked])
# Poke the database and update the room version to an unknown one.
for room_id in (joined_room, invite_room, knock_room):
self.get_success(
self.hs.get_datastores().main.db_pool.simple_update(
"rooms",
keyvalues={"room_id": room_id},
updatevalues={"room_version": "unknown-room-version"},
desc="updated-room-version",
)
)
# Blow away caches (supported room versions can only change due to a restart).
self.store.get_rooms_for_user_with_stream_ordering.invalidate_all()
self.store.get_rooms_for_user.invalidate_all()
self.store._get_event_cache.clear()
self.store._event_ref.clear()
# The rooms should be excluded from the sync response.
# Get a new request key.
result = self.get_success(
self.sync_handler.wait_for_sync_for_user(
requester, sync_config=generate_sync_config(user)
)
)
self.assertNotIn(joined_room, [r.room_id for r in result.joined])
self.assertNotIn(invite_room, [r.room_id for r in result.invited])
self.assertNotIn(knock_room, [r.room_id for r in result.knocked])
# The rooms should also not be in an incremental sync.
result = self.get_success(
self.sync_handler.wait_for_sync_for_user(
requester,
sync_config=generate_sync_config(user, device_id="dev"),
since_token=initial_result.next_batch,
)
)
self.assertNotIn(joined_room, [r.room_id for r in result.joined])
self.assertNotIn(invite_room, [r.room_id for r in result.invited])
self.assertNotIn(knock_room, [r.room_id for r in result.knocked]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def room_of(self, guest_name):\n pass",
"def get_room_or_error(room_id) -> PublicChatRoom:\n try:\n return PublicChatRoom.objects.get(pk=room_id)\n except PublicChatRoom.DoesNotExist:\n raise ClientError(\"ROOM_INVALID\", \"Room not created\")",
"def test_new_room(new_room):\n try:\n uuid.UUID(str(new_room.id), version=4)\n except ValueError:\n raise ValueError('new_room id is not valid uuid4')\n assert new_room.name == 'TestRoom'\n assert new_room.type == 'public'\n assert new_room.status == 'playing'\n assert new_room.global_score == 340\n assert json.loads(new_room.field) == []\n assert new_room.number_of_moves == 0",
"def test_upgrade_non_vendor(self):\n with pytest.raises(\n ClickException,\n match=r\"The .* with id '.*' already has version .*. Nothing to upgrade.\",\n ):\n self.runner.invoke(\n cli,\n [\n \"upgrade\",\n *self.LOCAL,\n self.ITEM_TYPE,\n f\"{self.ITEM_PUBLIC_ID.author}/{self.ITEM_PUBLIC_ID.name}:100.0.0\",\n ],\n standalone_mode=False,\n catch_exceptions=False,\n )",
"def groom_model(model):\n model.commits, model.ticket = 0, None\n return model",
"def test_no_unlisted(self):\n Version.objects.get(pk=self.version_1_2_2).update(\n channel=amo.RELEASE_CHANNEL_UNLISTED)\n self.addon.reload()\n assert self.addon.status == amo.STATUS_PUBLIC\n version, file = self.get('1.2', self.version_int,\n self.app, self.platform)\n assert version == self.version_1_2_1",
"def room(self, room):\n if self.local_vars_configuration.client_side_validation and room is None: # noqa: E501\n raise ValueError(\"Invalid value for `room`, must not be `None`\") # noqa: E501\n\n self._room = room",
"def r_is_ok(self, router):\r\n for e in self.exclude:\r\n if e == router.version:\r\n return False\r\n return True",
"def remove_room_from_current(self, login, room):\n pass",
"def __getNullVersion(self):\n print(\"Can't get version\")\n return \"unknownVendor\", \"unknownRelease\"",
"def test_platform_does_not_exist(self):\n version = Version.objects.get(pk=115509)\n for file in version.files.all():\n file.platform = amo.PLATFORM_LINUX.id\n file.save()\n\n version, file = self.get('1.2', self.version_int,\n self.app, self.platform)\n assert version == self.version_1_2_1",
"def test_min_client(self):\n for version in Version.objects.filter(pk__gte=self.version_1_2_0):\n appversion = version.apps.all()[0]\n appversion.min = AppVersion.objects.get(pk=325) # 3.7a5\n appversion.save()\n\n version, file = self.get('', '3070000005000', # 3.7a5pre\n self.app, self.platform)\n assert version == self.version_1_1_3",
"def update_room_type(self):\n\t\t# otherwise gamelog room will just stay fixed at value obtained during construction\n\t\tself.current_room = self.grid.current_room_type",
"def add_vertex(self, room):\r\n if room['room_id'] not in self.rooms:\r\n self.rooms[room['room_id']] = room\r\n # self.rooms[room['room_id']]['exits'] = {\r\n # d: '?' for d in room['exits']}\r",
"def is_valid_version(self):\n pass",
"def get_random_room(rooms) -> str:\n new_id = random.randbytes(12).hex().upper()\n while new_id in rooms:\n new_id = random.randbytes(12).hex().upper()\n return new_id",
"def test_get_rooms_model(self):\r\n self.assertIsInstance(self.db.get_rooms_model(\"SC2011\"), QtSql.QSqlQueryModel)",
"def __str__(self):\n return self.room.name",
"def test_delete_from_non_existing_room(self):\n CommonTestCases.admin_token_assert_in(\n self,\n delete_assigned_resource_from_non_existing_room,\n \"Room does not exist\"\n )",
"def test_not_public(self):\n self.change_status(self.version_1_2_2, amo.STATUS_NULL)\n self.addon.update(status=amo.STATUS_NULL)\n version, file = self.get('1.2.1', self.version_int,\n self.app, self.platform)\n assert version == self.version_1_2_1",
"def test_PUT_room(self):\n\t\t# 1)\n\t\tself.POST_room()\n\t\t# 2)\n\t\tNEW_ROOM_DATA = {'count': '3', 'name': 'NEW-ROOM-NAME'}\n\t\trv = self.PUT_data('/api/room/' + self.room_id, NEW_ROOM_DATA)\n\t\t# 3)\n\t\tdata = self.GET_data('/api/room/' + self.room_id)\n\t\tself.assertDataMatch(TEST_ROOM_DATA, data, ['type'])\n\t\t# 4)\n\t\tself.assertDataMatch(NEW_ROOM_DATA, data, NEW_ROOM_DATA.keys())\n\t\tself.validate_last_modified(data)",
"def room(self) -> Room:\n return self.__room",
"def generate_room_id():\r\n id_length = 6\r\n while True:\r\n id_tmp = ''.join(random.SystemRandom().choice(\r\n string.ascii_uppercase) for _ in range(id_length))\r\n conflict = id_tmp in rooms\r\n if not conflict:\r\n return id_tmp",
"def test_release_update_available_NO(self):\n self._uri({\n '%d.%d/maintained/%d.%d-%d/all/Packages.gz' % (MAJOR, MINOR, MAJOR, MINOR, PATCH): DATA,\n })\n next = self.u.release_update_available()\n self.assertEqual(None, next)",
"def room_finder(k):\n# current = room_map['blank']\n try:\n current = room_map[k]\n current.vis = {}\n vis = visable(current)\n current.vis = vis\n# print('found room')\n except:\n error()\n print('error in room_finder')\n \n return current",
"async def test_release_bad_version(doof, repo_info, event_loop, command):\n command_words = command.split() + ['a.b.c']\n await doof.run_command(\n manager='mitodl_user',\n channel_id=repo_info.channel_id,\n words=command_words,\n loop=event_loop,\n )\n assert doof.said(\n 'having trouble figuring out what that means',\n )",
"def room(roomid):\n if db.checkCache(roomid):\n data = db.showCache(roomid)\n get_events(data[\"info\"],None,None)\n resp = jsonify(data)\n resp.status_code = 200\n else:\n try:\n r = requests.get(FenixSpacesAPI_URL + \"/\" + str(roomid))\n data = r.json()\n\n if(data['type'] != 'ROOM'):\n resp = jsonify(\"Not Found\")\n resp.status_code = 404\n\n else:\n data = format_room(data)\n db.add(roomid, data)\n get_events(data[\"info\"],None,None)\n resp = jsonify(data)\n resp.status_code = 200\n\n except Exception as e:\n print(e)\n resp = jsonify(\"Unsuccess\")\n resp.status_code = 400\n\n return resp",
"def test__get_component_version_empty(self):\n self._ucr({'repository/online/component/a/version': ''})\n self._uri({\n '%d.%d/maintained/%d.%d-%d/' % (MAJOR, MINOR, MAJOR, MINOR, PATCH): '',\n })\n ver = U.UCS_Version((MAJOR, MINOR, 0)) # comonent.erratalevel!\n comp_ver = self.u._get_component_versions('a', start=ver, end=ver)\n self.assertEqual(set((ver,)), comp_ver)",
"def test_undefined_semver(self):\n v1 = versions.Version(version='1.2.3', name='foo')\n expected = None\n\n self.assertEqual(v1.build, expected)",
"def test_version_missing(self):\r\n self.assertIsNone(self._version_test(self.no_version))"
]
| [
"0.5479755",
"0.5352999",
"0.53118664",
"0.5281718",
"0.51888007",
"0.5170421",
"0.5131571",
"0.5111089",
"0.5103384",
"0.5095766",
"0.50885797",
"0.50845325",
"0.5068195",
"0.49836266",
"0.49664915",
"0.495951",
"0.4904172",
"0.4896691",
"0.48871648",
"0.48798552",
"0.48651993",
"0.4852167",
"0.48512483",
"0.48502526",
"0.48447588",
"0.48441747",
"0.48374146",
"0.48159048",
"0.48091614",
"0.4795726"
]
| 0.6844382 | 0 |
Rooms shouldn't appear under "joined" if a join loses a race to a ban. | def test_ban_wins_race_with_join(self) -> None:
# A local user Alice creates a room.
owner = self.register_user("alice", "password")
owner_tok = self.login(owner, "password")
room_id = self.helper.create_room_as(owner, is_public=True, tok=owner_tok)
# Do a sync as Alice to get the latest event in the room.
alice_sync_result: SyncResult = self.get_success(
self.sync_handler.wait_for_sync_for_user(
create_requester(owner), generate_sync_config(owner)
)
)
self.assertEqual(len(alice_sync_result.joined), 1)
self.assertEqual(alice_sync_result.joined[0].room_id, room_id)
last_room_creation_event_id = (
alice_sync_result.joined[0].timeline.events[-1].event_id
)
# Eve, a ne'er-do-well, registers.
eve = self.register_user("eve", "password")
eve_token = self.login(eve, "password")
# Alice preemptively bans Eve.
self.helper.ban(room_id, owner, eve, tok=owner_tok)
# Eve syncs.
eve_requester = create_requester(eve)
eve_sync_config = generate_sync_config(eve)
eve_sync_after_ban: SyncResult = self.get_success(
self.sync_handler.wait_for_sync_for_user(eve_requester, eve_sync_config)
)
# Sanity check this sync result. We shouldn't be joined to the room.
self.assertEqual(eve_sync_after_ban.joined, [])
# Eve tries to join the room. We monkey patch the internal logic which selects
# the prev_events used when creating the join event, such that the ban does not
# precede the join.
mocked_get_prev_events = patch.object(
self.hs.get_datastores().main,
"get_prev_events_for_room",
new_callable=AsyncMock,
return_value=[last_room_creation_event_id],
)
with mocked_get_prev_events:
self.helper.join(room_id, eve, tok=eve_token)
# Eve makes a second, incremental sync.
eve_incremental_sync_after_join: SyncResult = self.get_success(
self.sync_handler.wait_for_sync_for_user(
eve_requester,
eve_sync_config,
since_token=eve_sync_after_ban.next_batch,
)
)
# Eve should not see herself as joined to the room.
self.assertEqual(eve_incremental_sync_after_join.joined, [])
# If we did a third initial sync, we should _still_ see eve is not joined to the room.
eve_initial_sync_after_join: SyncResult = self.get_success(
self.sync_handler.wait_for_sync_for_user(
eve_requester,
eve_sync_config,
since_token=None,
)
)
self.assertEqual(eve_initial_sync_after_join.joined, []) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_rooms(self, exclude=[]):\n stmt = Session.query(Lesson.room, Lesson.day, Lesson.order,\n Lesson.schedule_id)\n stmt = stmt.group_by(Lesson.room, Lesson.order, Lesson.day, Lesson.schedule_id)\n stmt = stmt.having(func.count(Lesson.room)>1)\n stmt = stmt.filter(not_(Lesson.room.in_(exclude)))\n stmt = stmt.subquery()\n q = Session.query(Lesson).join((stmt, and_(\n Lesson.room == stmt.c.room,\n Lesson.day == stmt.c.day,\n Lesson.order == stmt.c.order,\n Lesson.schedule_id == stmt.c.schedule_id)))\n q = q.order_by(Lesson.day, Lesson.order, Lesson.room)\n\n conflicts = q.all()\n if len(conflicts) == 0:\n return []\n rooms = [[conflicts.pop(0), conflicts.pop(0)]]\n for c in conflicts:\n prev = rooms[-1][-1]\n if c.room == prev.room and c.day == prev.day and c.order == \\\n prev.order and c.schedule_id == prev.schedule_id:\n rooms[-1].append(c)\n else:\n rooms.append([c])\n return rooms",
"def test_private_rooms_do_not_have_profiles_collected(self) -> None:\n room_id = self.helper.create_room_as(\n self.alice, is_public=False, tok=self.alice_tok\n )\n self.get_success(\n event_injection.inject_member_event(\n self.hs,\n room_id,\n \"@bruce:remote\",\n \"join\",\n \"@bruce:remote\",\n extra_content={\n \"displayname\": \"super-duper bruce\",\n \"avatar_url\": \"mxc://remote/456\",\n },\n )\n )\n # Sending this event makes the streams move forward after the injection...\n self.helper.send(room_id, \"Test\", tok=self.alice_tok)\n self.pump(0.1)\n\n profiles = self.get_success(\n self.user_dir_helper.get_profiles_in_user_directory()\n )\n self.assertNotIn(\"@bruce:remote\", profiles)",
"def test_joining_private_room_with_excluded_user(self) -> None:\n # Setup a support and two normal users.\n alice = self.register_user(\"alice\", \"pass\")\n alice_token = self.login(alice, \"pass\")\n bob = self.register_user(\"bob\", \"pass\")\n bob_token = self.login(bob, \"pass\")\n support = \"@support1:test\"\n self.get_success(\n self.store.register_user(\n user_id=support, password_hash=None, user_type=UserTypes.SUPPORT\n )\n )\n\n # Alice makes a room. Inject the support user into the room.\n room = self.helper.create_room_as(alice, is_public=False, tok=alice_token)\n self.get_success(inject_member_event(self.hs, room, support, \"join\"))\n # Check the DB state. The support user should not be in the directory.\n users, in_public, in_private = self.get_success(\n self.user_dir_helper.get_tables()\n )\n self.assertEqual(users, {alice, bob})\n self.assertEqual(in_public, set())\n self.assertEqual(in_private, set())\n\n # Then invite Bob, who accepts.\n self.helper.invite(room, alice, bob, tok=alice_token)\n self.helper.join(room, bob, tok=bob_token)\n\n # Check the DB state. The support user should not be in the directory.\n users, in_public, in_private = self.get_success(\n self.user_dir_helper.get_tables()\n )\n self.assertEqual(users, {alice, bob})\n self.assertEqual(in_public, set())\n self.assertEqual(in_private, {(alice, bob, room), (bob, alice, room)})",
"def test_join_after_leave(self) -> None:\n\n self._perform_background_initial_update()\n\n u1 = self.register_user(\"u1\", \"pass\")\n u1token = self.login(\"u1\", \"pass\")\n r1 = self.helper.create_room_as(u1, tok=u1token)\n\n u2 = self.register_user(\"u2\", \"pass\")\n u2token = self.login(\"u2\", \"pass\")\n\n self.helper.join(r1, u2, tok=u2token)\n self.helper.leave(r1, u2, tok=u2token)\n\n r1stats_ante = self._get_current_stats(\"room\", r1)\n assert r1stats_ante is not None\n\n self.helper.join(r1, u2, tok=u2token)\n\n r1stats_post = self._get_current_stats(\"room\", r1)\n assert r1stats_post is not None\n\n self.assertEqual(\n r1stats_post[\"current_state_events\"] - r1stats_ante[\"current_state_events\"],\n 0,\n )\n self.assertEqual(\n r1stats_post[\"joined_members\"] - r1stats_ante[\"joined_members\"], +1\n )\n self.assertEqual(\n r1stats_post[\"left_members\"] - r1stats_ante[\"left_members\"], -1\n )",
"def test_banned(self) -> None:\n\n self._perform_background_initial_update()\n\n u1 = self.register_user(\"u1\", \"pass\")\n u1token = self.login(\"u1\", \"pass\")\n r1 = self.helper.create_room_as(u1, tok=u1token)\n\n u2 = self.register_user(\"u2\", \"pass\")\n u2token = self.login(\"u2\", \"pass\")\n\n self.helper.join(r1, u2, tok=u2token)\n\n r1stats_ante = self._get_current_stats(\"room\", r1)\n assert r1stats_ante is not None\n\n self.helper.change_membership(r1, u1, u2, \"ban\", tok=u1token)\n\n r1stats_post = self._get_current_stats(\"room\", r1)\n assert r1stats_post is not None\n\n self.assertEqual(\n r1stats_post[\"current_state_events\"] - r1stats_ante[\"current_state_events\"],\n 0,\n )\n self.assertEqual(\n r1stats_post[\"banned_members\"] - r1stats_ante[\"banned_members\"], +1\n )\n self.assertEqual(\n r1stats_post[\"joined_members\"] - r1stats_ante[\"joined_members\"], -1\n )",
"def test_listing_from_wall_when_blocked_some_users(self):",
"async def join(ctx):\n if ctx.message.channel.name.lower() not in tod_channels:\n return\n\n room = ctx.message.channel.name.lower()\n if room not in tod_games:\n await amor_manager.say(\"Truth Or Dare not in progress in {}\".format(room))\n return\n player = ctx.message.author.name\n if player.lower() in list(tod_games[room]['participants'].keys()):\n await amor_manager.say(\"{}... you're already playing Truth or Dare here!\".format(room))\n else:\n tod_games[room]['participants'][player.lower()] = {'spins': 0}\n await amor_manager.say(\"{} has joined Truth or Dare!\".format(player))",
"def test_missing_room(self, mock_join):\n mock_join.side_effect = KeyError('Unknown room.')\n response = self.fetch('/rooms/1234', method='GET')\n self.assertTrue(mock_join.called)\n self.assertEqual(response.code, 404)",
"def room_of(self, guest_name):\n pass",
"def test_user_not_in_users_table(self) -> None:\n user1 = self.register_user(\"user1\", \"pass\")\n token1 = self.login(user1, \"pass\")\n room = self.helper.create_room_as(user1, is_public=True, tok=token1)\n\n # Inject a join event for a user who doesn't exist\n self.get_success(inject_member_event(self.hs, room, \"@not-a-user:test\", \"join\"))\n\n # Another new user registers and joins the room\n user2 = self.register_user(\"user2\", \"pass\")\n token2 = self.login(user2, \"pass\")\n self.helper.join(room, user2, tok=token2)\n\n # The dodgy event should not have stopped us from processing user2's join.\n in_public = self.get_success(self.user_dir_helper.get_users_in_public_rooms())\n self.assertEqual(set(in_public), {(user1, room), (user2, room)})",
"async def leave(ctx):\n if ctx.message.channel.name.lower() not in tod_channels:\n return\n\n room = ctx.message.channel.name.lower()\n if room not in tod_games:\n await amor_manager.say(\"Truth Or Dare not in progress in {}\".format(room))\n else:\n player = ctx.message.author.name\n if player.lower() not in list(tod_games[room]['participants'].keys()):\n await amor_manager.say(\"{}, you cannot leave the game if you have not joined\".format(player))\n elif player == tod_games[room]['host']:\n await amor_manager.say(\"{}, you cannot leave the game you're the host\".format(player))\n else:\n del tod_games[room]['participants'][player.lower()]\n await amor_manager.say(\"{} has left Truth or Dare.\".format(player))",
"def join_rooms(self):\n logging.info(\"Joining MUC rooms\")\n xrooms = self.botconfig.findall('rooms/muc')\n rooms = {}\n for xroom in xrooms:\n rooms[xroom.attrib['room']] = xroom.attrib['nick']\n for room in set(self.rooms.keys()).difference(rooms.keys()):\n logging.info(\"Parting room %s.\" % room)\n self.plugin['xep_0045'].leaveMUC(room, self.rooms[room])\n del self.rooms[room]\n for room in set(rooms.keys()).difference(self.rooms.keys()):\n self.rooms[room] = rooms[room]\n logging.info(\"Joining room %s as %s.\" % (room, rooms[room]))\n self.plugin['xep_0045'].joinMUC(room, rooms[room])",
"async def join(self, ctx):\n if lobby.count(f\"{ctx.author.mention}\") == 0:\n add(lobby, ctx.author.mention)\n await ctx.channel.send(\"You've been added to the queue!\")\n else:\n await ctx.channel.send(\"You're already queued for a match!\")\n await ctx.channel.send(embed=lobby_list())\n if len(lobby) == teamSizeMax:\n if roster:\n await ctx.channel.send(\n \"There is currently a match being picked right now, please try again after picking is finished\")\n else:\n assign_captains()",
"def win(self):\n print \"\\n{0} has escaped the dungeon, as few before have. {0} survived {1} rooms.\\n\".format(self.name, self.roomCt)\n exit()",
"async def on_member_join(self, member):\n sid = member.server.id\n role = await self.get_role(member.server)\n try:\n muterole = self.riceCog2[server.id][\"muterole\"]\n except:\n muterole = default_muterole \n\n if 'poop' in self.riceCog2[sid]:\n if self.riceCog2[sid]['poop'] == True:\n if member.id in self.riceCog[sid]:\n if count >= 1:\n count = self.riceCog[sid][member.id][\"Count\"]\n poops = \"\\U0001f528\" * count\n role_name = \"Warning {}\".format(poops)\n is_there = False\n colour = 0xbc7642\n for role in member.server.roles:\n if role.name == role_name:\n poop_role = role\n is_there = True\n if not is_there:\n server = member.server\n poop_role = await self.bot.create_role(server)\n await self.bot.edit_role(role=poop_role,\n name=role_name,\n server=server)\n try:\n await self.bot.add_roles(member,\n poop_role)\n except discord.errors.Forbidden:\n await self.bot.say(\"No permission to add roles\")\n else:\n pass\n if member.id in self.norole[sid]:\n if self.norole[sid]['role'] == True:\n role = discord.utils.get(member.server.roles, name=\"NoBNL\")\n await self.bot.add_roles(member, role)\n \n if not role or not (sid in self.json and member.id in self.json[sid]):\n return\n\n duration = self.json[sid][member.id]['until'] - time.time()\n if duration > 0:\n role = discord.utils.get(member.server.roles, name=muterole)\n await self.bot.add_roles(member, role)\n\n reason = 'Punishment re-added on rejoin. '\n if self.json[sid][member.id]['reason']:\n reason += self.json[sid][member.id]['reason']\n\n if member.id not in self.handles[sid]:\n self.schedule_unpunish(duration, member, reason)",
"def validate_can_enter(self, user, contest_pool):\n\n # the contest attempting to be joined\n target_skill_level = contest_pool.skill_level\n if target_skill_level.enforced == False:\n return # the skill level of this contest is not enforced -- anyone can join no matter what\n\n # find any enforced skill_levels we have an entry in not matching our target.\n # if any are found, that means we cant join and must raise exception\n entries = Entry.objects.filter(\n user=user,\n contest_pool__draft_group=contest_pool.draft_group,\n contest_pool__skill_level__enforced=True\n ).exclude(contest_pool__skill_level=target_skill_level)\n\n if entries.count() > 0:\n raise self.CanNotEnterSkillLevel()",
"def test_join_not_needed(self):\n client = self.mock_client([])\n coord = self.make_coordinator(client)\n _join = coord._join_and_sync = Mock()\n coord._rejoin_d = defer.Deferred()\n coord.join_and_sync()\n _join.assert_not_called()\n coord._rejoin_d = None\n coord._rejoin_needed = False\n coord.join_and_sync()\n _join.assert_not_called()",
"def handle_join_room(self, lobby_command, client_socket):\n user = self.clients[client_socket]['data'].decode('utf-8')\n words = lobby_command.split()\n roomname = words[1]\n print(f\"Handling join room {roomname} for {user}\")\n for _room in self.rooms:\n if _room.name == roomname:\n print(\"Requested roomname found..\")\n if user in _room.room_attrbts['members']:\n msg = f\"Client {user} is already a member of room {_room.name}\"\n self.log_and_send(client_socket, msg)\n return\n else:\n _room.room_attrbts['members'].add(user)\n msg = f\"{user} successfully joined membership of room {roomname}\"\n self.log_and_send(client_socket, msg)\n return\n msg = f'Client {user} passed invalid room. Could not join room {roomname}'\n self.log_and_send(client_socket, msg)\n return",
"def on_leave(data):\n logger.info(f\"Leaving: {data}\")\n to = data[\"to\"]\n if to in TO_OPTIONS.keys():\n leave_room(to)\n logger.info(f\"Rooms: {rooms()}\")\n else:\n logger.warning(f\"{to} not in TO_OPTIONS\")",
"def can_exist_outside_of_game(self):\n return True",
"def can_exist_outside_of_game(self):\n return True",
"def exists(self):\n logging.warning(\n \"IRC back-end does not support determining if a room exists. \"\n \"Returning the result of joined instead.\"\n )\n return self.joined",
"def _joined_all(self):\n if not self.channels:\n return False\n for channel in self:\n if not channel.joined:\n return False\n return True",
"def on_join(data):\n logger.info(f\"Joining: {data}\")\n to = data[\"to\"]\n if to in TO_OPTIONS.keys():\n join_room(to)\n logger.info(f\"Rooms: {rooms()}\")\n else:\n logger.warning(f\"{to} not in TO_OPTIONS\")",
"def join_room(self, client, room):\n if room.verify_if_is_invited(client):\n room.add_member(client)\n self.send_message('Te has unido a la sala {}'.format(room.get_name()), client)\n else:\n self.send_message('No estas invitado a la sala.', client)",
"def _check_for_win(self):\n slots_available = any(\n [slot.available for slot in self.board.iter_slots() if not slot.mine]\n )\n if not slots_available:\n self.status = GameStatusEnum.won\n self.end_time = datetime.utcnow()",
"def handle_leave_room(self, lobby_command, client_socket):\n user = self.clients[client_socket]['data'].decode('utf-8')\n words = lobby_command.split()\n roomname = words[1]\n print(f\"Handling leave room {roomname} for {user}\")\n for _room in self.rooms:\n if _room.name == roomname:\n print(\"Requested roomname found..\")\n if user not in _room.room_attrbts['members']:\n msg = f\"Client {user} is already NOT a member of room {_room.name}\"\n self.log_and_send(client_socket, msg)\n return\n else:\n _room.room_attrbts['members'].remove(user)\n msg = f\"User {user} successfully removed from room {roomname}\"\n self.log_and_send(client_socket, msg)\n return\n msg = f'Client {user} passed invalid room. Could not join room {roomname}'\n self.log_and_send(client_socket, msg)\n return",
"def is_sealed(self):\n return self.walls == Direction.All",
"def joined(message):\n #room = session.get('room')\n room='abc'\n join_room(room)\n #emit('status', {'msg': session.get('name') + ' has entered the room.' + message['msg']}, room=room)\n emit('status', {'msg': 'Yao has entered the room.'}, room=room)\n #emit('status', {'msg': 'Yao has entered the room.'}, room='room1')",
"def check_for_unguarded_rooms(museum):\r\n\tempty_rooms = []\r\n\r\n\tfor row_idx in range(len(museum)):\t\r\n\t\t\r\n\t\tfor item_idx in range(len(museum[row_idx])): #Go back and fix this to be enumerate instead\r\n\t\t\t\r\n\t\t\tif museum[row_idx][item_idx] == \"0\":\r\n\t\t\t\tempty_rooms.append([row_idx, item_idx])\r\n\r\n\t# for row_idx, row_value in enumerate(museum):\r\n\t# \tfor item_idx, item_value in enumerate(row):\r\n\t# \t\tif item_value == \" \":\r\n\t# \t\t\tprint(item)\r\n\t# \t\t\tempty_rooms.append([row_idx, item_idx]) # need index\r\n\r\n\tif not empty_rooms:\r\n\t\tprint(\"true\")\r\n\t\r\n\telse:\r\n\t\tprint(\"false\")\r\n\r\n\t\tfor room in empty_rooms:\r\n\t\t\tprint(str(room[0]) + \" \" + str(room[1]))"
]
| [
"0.60549825",
"0.5959514",
"0.5958933",
"0.59361064",
"0.58495605",
"0.5714841",
"0.5688591",
"0.5605994",
"0.5584611",
"0.55729496",
"0.556611",
"0.553233",
"0.54888505",
"0.5464354",
"0.5461276",
"0.5449192",
"0.54279804",
"0.5424823",
"0.5380059",
"0.5365503",
"0.5365503",
"0.5353837",
"0.53537005",
"0.5338505",
"0.5335356",
"0.5323812",
"0.53236413",
"0.5312703",
"0.53096074",
"0.5309204"
]
| 0.6758452 | 0 |
Generate a sync config (with a unique request key). | def generate_sync_config(
user_id: str, device_id: Optional[str] = "device_id"
) -> SyncConfig:
global _request_key
_request_key += 1
return SyncConfig(
user=UserID.from_string(user_id),
filter_collection=Filtering(Mock()).DEFAULT_FILTER_COLLECTION,
is_guest=False,
request_key=("request_key", _request_key),
device_id=device_id,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_generated_config(self, auth_provider: KeyProvider, secret_key):\n\n generated_config = {\n 'jupyterhub': {\n 'proxy': {\n 'https': {\n 'hosts': [self.spec['domain']]\n }\n },\n 'ingress': {\n 'hosts': [self.spec['domain']],\n 'tls': [\n {\n 'secretName': 'https-auto-tls',\n 'hosts': [self.spec['domain']]\n }\n ]\n\n },\n 'singleuser': {\n # If image_repo isn't set, just have an empty image dict\n 'image': {'name': self.cluster.spec['image_repo']} if 'image_repo' in self.cluster.spec else {},\n },\n 'hub': {\n 'config': {},\n 'initContainers': [\n {\n 'name': 'templates-clone',\n 'image': 'alpine/git',\n 'args': [\n 'clone',\n '--',\n 'https://github.com/2i2c-org/pilot-homepage',\n '/srv/repo',\n ],\n 'securityContext': {\n 'runAsUser': 1000,\n 'allowPrivilegeEscalation': False,\n 'readOnlyRootFilesystem': True,\n },\n 'volumeMounts': [\n {\n 'name': 'custom-templates',\n 'mountPath': '/srv/repo'\n }\n ]\n }\n ],\n 'extraContainers': [\n {\n 'name': 'templates-sync',\n 'image': 'alpine/git',\n 'workingDir': '/srv/repo',\n 'command': ['/bin/sh'],\n 'args': [\n '-c',\n dedent(\n f'''\\\n while true; do git fetch origin;\n if [[ $(git ls-remote --heads origin {self.spec[\"name\"]} | wc -c) -ne 0 ]]; then\n git reset --hard origin/{self.spec[\"name\"]};\n else\n git reset --hard origin/master;\n fi\n sleep 5m; done\n '''\n )\n ],\n 'securityContext': {\n 'runAsUser': 1000,\n 'allowPrivilegeEscalation': False,\n 'readOnlyRootFilesystem': True,\n },\n 'volumeMounts': [\n {\n 'name': 'custom-templates',\n 'mountPath': '/srv/repo'\n }\n ]\n }\n ],\n 'extraVolumes': [\n {\n 'name': 'custom-templates',\n 'emptyDir': {}\n }\n ],\n 'extraVolumeMounts':[\n {\n 'mountPath': '/usr/local/share/jupyterhub/custom_templates',\n 'name': 'custom-templates',\n 'subPath': 'templates'\n },\n {\n 'mountPath': '/usr/local/share/jupyterhub/static/extra-assets',\n 'name': 'custom-templates',\n 'subPath': 'extra-assets'\n }\n ]\n }\n },\n }\n #\n # Allow explicilty ignoring auth0 setup\n if self.spec['auth0'].get('enabled', True):\n # Auth0 sends users back to this URL after they authenticate\n callback_url = f\"https://{self.spec['domain']}/hub/oauth_callback\"\n # Users are redirected to this URL after they log out\n logout_url = f\"https://{self.spec['domain']}\"\n client = auth_provider.ensure_client(\n name=self.spec['auth0'].get('application_name', f\"{self.cluster.spec['name']}-{self.spec['name']}\"),\n callback_url=callback_url,\n logout_url=logout_url,\n connection_name=self.spec['auth0']['connection'],\n connection_config=self.spec['auth0'].get(self.spec['auth0']['connection'], {}),\n )\n # FIXME: We're hardcoding Auth0OAuthenticator here\n # We should *not*. We need dictionary merging in code, so\n # these can all exist fine.\n generated_config['jupyterhub']['hub']['config']['Auth0OAuthenticator'] = auth_provider.get_client_creds(client, self.spec['auth0']['connection'])\n\n return self.apply_hub_template_fixes(generated_config, secret_key)",
"def _generate_conn_info_key(type_, version, regions, timee, carrier):\n params = f\"{type_}{version}{regions}{timee}{carrier}\"\n return MD5.new(params.encode() + MAGIC_VALUE).hexdigest()",
"def generate_config():\n\n return {\n \"email_subject\": DEFAULT_EMAIL_SUBJECT,\n \"from_email\": DEFAULT_FROM_EMAIL,\n \"to_email\": DEFAULT_TO_EMAIL,\n \"url\": DEFAULT_URL,\n \"start_value\": DEFAULT_START_VALUE,\n \"look_ahead\": DEFAULT_LOOK_AHEAD,\n \"slide_window\": DEFAULT_SLIDE_WINDOW,\n }",
"def push_sync_device(\n task,\n dry_run: bool = True,\n generate_only: bool = False,\n job_id: Optional[str] = None,\n scheduled_by: Optional[str] = None,\n confirm_mode_override: Optional[int] = None,\n):\n set_thread_data(job_id)\n logger = get_logger()\n hostname = task.host.name\n with sqla_session() as session:\n dev: Device = session.query(Device).filter(Device.hostname == hostname).one()\n template_vars = populate_device_vars(session, dev)\n platform = dev.platform\n devtype = dev.device_type\n\n local_repo_path = app_settings.TEMPLATES_LOCAL\n\n mapfile = os.path.join(local_repo_path, platform, \"mapping.yml\")\n if not os.path.isfile(mapfile):\n raise RepoStructureException(\"File {} not found in template repo\".format(mapfile))\n with open(mapfile, \"r\") as f:\n mapping = yaml.safe_load(f)\n template = mapping[devtype.name][\"entrypoint\"]\n\n logger.debug(\"Generate config for host: {}\".format(task.host.name))\n r = task.run(\n task=template_file,\n name=\"Generate device config\",\n template=template,\n jinja_env=get_jinja_env(f\"{local_repo_path}/{task.host.platform}\"),\n path=f\"{local_repo_path}/{task.host.platform}\",\n **template_vars,\n )\n\n # TODO: Handle template not found, variables not defined\n # jinja2.exceptions.UndefinedError\n\n task.host[\"config\"] = r.result\n task.host[\"template_vars\"] = template_vars\n\n if generate_only:\n task.host[\"change_score\"] = 0\n else:\n logger.debug(\n \"Synchronize device config for host: {} ({}:{})\".format(task.host.name, task.host.hostname, task.host.port)\n )\n\n if api_settings.COMMIT_CONFIRMED_MODE != 2:\n task.host.open_connection(\"napalm\", configuration=task.nornir.config)\n task_args = {\n \"name\": \"Sync device config\",\n \"replace\": True,\n \"configuration\": task.host[\"config\"],\n \"dry_run\": dry_run,\n \"commit_message\": \"Job id {}\".format(job_id),\n }\n if dry_run:\n task_args[\"task\"] = napalm_configure\n elif api_settings.COMMIT_CONFIRMED_MODE == 0:\n task_args[\"task\"] = napalm_configure\n else:\n task_args[\"task\"] = napalm_configure_confirmed\n task_args[\"job_id\"] = job_id\n task_args[\"confirm_mode_override\"] = confirm_mode_override\n logger.debug(\n \"Commit confirm mode for host {}: {} (dry_run: {})\".format(\n task.host.name, api_settings.COMMIT_CONFIRMED_MODE, dry_run\n )\n )\n task.run(**task_args)\n if api_settings.COMMIT_CONFIRMED_MODE != 2:\n task.host.close_connection(\"napalm\")\n\n if task.results[1].diff:\n config = task.results[1].host[\"config\"]\n diff = task.results[1].diff\n task.host[\"change_score\"] = calculate_score(config, diff)\n else:\n task.host[\"change_score\"] = 0\n if job_id:\n with redis_session() as db:\n db.lpush(\"finished_devices_\" + str(job_id), task.host.name)",
"def make_config(self, cfg_t, cfgname):\n\n if cfg_t == 'pool':\n prop_d = MBRAT_DEF_POOL_D\n prop_d[cfg_t].update( {'name': cfgname,} )\n args = self._mkcfg_args( cfgname, MBRAT_POOLSD, ['data',], prop_d ) \n\n elif cfg_t == 'poolkey':\n targetd = self.get_cfg_parentd(cfg_t)\n prop_d = MBRAT_DEF_POOLKEY_D\n prop_d[cfg_t].update( {'name': cfgname,} )\n args = self._mkcfg_args( cfgname, targetd, [], prop_d )\n\n elif cfg_t == 'profile':\n prop_d = { cfg_t: {'info': \"\", 'name': cfgname,}, }\n args = self._mkcfg_args( cfgname, MBRAT_PROFILESD, \n ['data', 'public',], prop_d )\n\n elif cfg_t == 'privkey':\n targetd = self.get_cfg_parentd(cfg_t)\n prop_d = MBRAT_DEF_PRIVKEY_D\n prop_d[cfg_t].update( {'name': cfgname,} )\n prop_d['pool'].update( {'name': \"{}_pool\".format(cfgname),} )\n args = self._mkcfg_args( cfgname, targetd, ['public',], prop_d )\n\n elif cfg_t == 'pubkey':\n return self._mkcfg_pubkey(cfgname)\n\n # now make the new config dir...\n return self._mkcfg(cfg_t, args)",
"def syncToken(self):\n if self._cachedSyncToken is None:\n pieces = []\n self._syncTokenKeys.sort()\n for key in self._syncTokenKeys:\n value = self.getKeyPath(key)\n if value is None:\n value = \"\"\n pieces.append(key + \":\" + str(value))\n whole = \"|\".join(pieces)\n self._cachedSyncToken = hashlib.md5(whole).hexdigest()\n return self._cachedSyncToken",
"def config_sync(self) -> Optional['outputs.FeatureMembershipConfigmanagementConfigSync']:\n return pulumi.get(self, \"config_sync\")",
"def async_request_configuration(hass, config, oauth):\n if len(_CONFIGURING) > 0:\n return\n configurator = hass.components.configurator\n global OAUTH_CLIENT_ID\n OAUTH_CLIENT_ID = oauth.client_id\n\n async def async_configuration_callback(data):\n \"\"\"Handle configuration changes.\"\"\"\n _LOGGER.info('Spotify async_configuration_callback')\n\n def success():\n \"\"\"Signal successful setup.\"\"\"\n req_config = _CONFIGURING.pop(OAUTH_CLIENT_ID)\n configurator.request_done(req_config)\n\n hass.async_add_job(success)\n async_setup_spotify(hass, config, configurator)\n\n _CONFIGURING[OAUTH_CLIENT_ID] = configurator.async_request_config(\n DEFAULT_NAME,\n async_configuration_callback,\n link_name=CONFIGURATOR_LINK_NAME,\n link_url=oauth.get_authorize_url(),\n description=CONFIGURATOR_DESCRIPTION,\n submit_caption=CONFIGURATOR_SUBMIT_CAPTION\n )\n setUrl(oauth.get_authorize_url())",
"def generate_config(context):\n\n resources = []\n properties = context.properties\n project_id = properties.get('project', context.env['project'])\n name = properties.get('name', context.env['name'])\n\n resource = {\n 'name': context.env['name'],\n # https://cloud.google.com/filestore/docs/reference/rest/v1beta1/projects.locations.instances/create\n 'type': 'gcp-types/file-v1beta1:projects.locations.instances',\n 'properties': {\n 'parent': 'projects/{}/locations/{}'.format(project_id, properties['location']),\n 'instanceId': name,\n }\n }\n\n optional_props = [\n 'description',\n 'tier',\n 'labels',\n 'fileShares',\n 'networks',\n ]\n\n for prop in optional_props:\n if prop in properties:\n resource['properties'][prop] = properties[prop]\n\n resources.append(resource)\n\n return {\n 'resources':\n resources,\n 'outputs':\n [\n {\n 'name': 'name',\n 'value': name\n },\n {\n 'name': 'fileShares',\n 'value': '$(ref.{}.fileShares)'.format(context.env['name'])\n },\n {\n 'name': 'networks',\n 'value': '$(ref.{}.networks)'.format(context.env['name'])\n }\n ]\n }",
"def generate_config(self):\n\n # Change crypto-config.yaml and add organizations\n yaml = YAML()\n with open(os.path.join(self.config_path, \"crypto-config-template.yaml\"), \"r\") as crypto_config_file:\n config = yaml.load(crypto_config_file)\n\n config[\"OrdererOrgs\"][0][\"Specs\"] = []\n for orderer_index in range(1, self.num_validators + 1):\n orderer_host, _ = self.experiment.get_peer_ip_port_by_id(orderer_index)\n config[\"OrdererOrgs\"][0][\"Specs\"].append({\n \"Hostname\": \"orderer%d\" % orderer_index,\n \"SANS\": [orderer_host]\n })\n\n config[\"PeerOrgs\"] = []\n for organization_index in range(1, self.num_validators + 1):\n organization_host, _ = self.experiment.get_peer_ip_port_by_id(organization_index)\n organization_config = {\n \"Name\": \"Org%d\" % organization_index,\n \"Domain\": \"org%d.example.com\" % organization_index,\n \"EnableNodeOUs\": True,\n \"Template\": {\n \"Count\": 1,\n \"SANS\": [organization_host]\n },\n \"Users\": {\n \"Count\": 1\n }\n }\n config[\"PeerOrgs\"].append(organization_config)\n\n with open(os.path.join(self.config_path, \"crypto-config.yaml\"), \"w\") as crypto_config_file:\n yaml.dump(config, crypto_config_file)\n\n # Change configtx.yaml\n yaml = YAML()\n with open(os.path.join(self.config_path, \"configtx-template.yaml\"), \"r\") as configtx_file:\n config = yaml.load(configtx_file)\n\n config[\"Profiles\"][\"TwoOrgsChannel\"][\"Application\"][\"Organizations\"] = []\n config[\"Profiles\"][\"SampleMultiNodeEtcdRaft\"][\"Consortiums\"][\"SampleConsortium\"][\"Organizations\"] = []\n\n for organization_index in range(1, self.num_validators + 1):\n org_admin = \"Org%dMSP.admin\" % organization_index\n org_peer = \"Org%dMSP.peer\" % organization_index\n org_client = \"Org%dMSP.client\" % organization_index\n organization_host, _ = self.experiment.get_peer_ip_port_by_id(organization_index)\n\n organization_config = {\n \"Name\": \"Org%dMSP\" % organization_index,\n \"ID\": \"Org%dMSP\" % organization_index,\n \"MSPDir\": \"crypto-config/peerOrganizations/org%d.example.com/msp\" % organization_index,\n \"Policies\": {\n \"Readers\": {\n \"Type\": \"Signature\",\n \"Rule\": \"OR('%s', '%s', '%s')\" % (org_admin, org_peer, org_client)\n },\n \"Writers\": {\n \"Type\": \"Signature\",\n \"Rule\": \"OR('%s', '%s')\" % (org_admin, org_peer)\n },\n \"Admins\": {\n \"Type\": \"Signature\",\n \"Rule\": \"OR('%s')\" % (org_admin)\n }\n },\n \"AnchorPeers\": [{\n \"Host\": organization_host,\n \"Port\": 7000 + organization_index\n }]\n }\n\n commented_map = CommentedMap(organization_config)\n commented_map.yaml_set_anchor(\"Org%d\" % organization_index, always_dump=True)\n config[\"Organizations\"].append(commented_map)\n config[\"Profiles\"][\"TwoOrgsChannel\"][\"Application\"][\"Organizations\"].append(commented_map)\n config[\"Profiles\"][\"SampleMultiNodeEtcdRaft\"][\"Consortiums\"][\"SampleConsortium\"][\"Organizations\"]\\\n .append(commented_map)\n\n config[\"Profiles\"][\"SampleMultiNodeEtcdRaft\"][\"Orderer\"][\"EtcdRaft\"][\"Consenters\"] = []\n config[\"Profiles\"][\"SampleMultiNodeEtcdRaft\"][\"Orderer\"][\"Addresses\"] = []\n\n for organization_index in range(1, self.num_validators + 1):\n organization_host, _ = self.experiment.get_peer_ip_port_by_id(organization_index)\n consenter_port = 7000 + organization_index\n consenter_info = {\n \"Host\": organization_host,\n \"Port\": consenter_port,\n \"ClientTLSCert\": \"crypto-config/ordererOrganizations/example.com/orderers/\"\n \"orderer%d.example.com/tls/server.crt\" % organization_index,\n \"ServerTLSCert\": \"crypto-config/ordererOrganizations/example.com/orderers/\"\n \"orderer%d.example.com/tls/server.crt\" % organization_index\n }\n config[\"Profiles\"][\"SampleMultiNodeEtcdRaft\"][\"Orderer\"][\"EtcdRaft\"][\"Consenters\"].append(consenter_info)\n config[\"Profiles\"][\"SampleMultiNodeEtcdRaft\"][\"Orderer\"][\"Addresses\"].append(\n \"%s:%d\" % (organization_host, consenter_port))\n\n with open(os.path.join(self.config_path, \"configtx.yaml\"), \"w\") as configtx_file:\n round_trip_dump(config, configtx_file, Dumper=RoundTripDumper)",
"def config(self):\n state_file_id = \"{env}-{component}\".format(env=self.environment, component=self.component)\n\n grunt_config_template = \"\"\"lock = {{\nbackend = \"dynamodb\"\nconfig {{\nstate_file_id = \"{state_file_id}\"\naws_region = \"{region}\"\ntable_name = \"terragrunt_locks\"\nmax_lock_retries = 360\n}}\n}}\nremote_state = {{\nbackend = \"s3\"\nconfig {{\nencrypt = \"true\"\nbucket = \"{s3_bucket}\"\nkey = \"{env}/{component}/terraform.tfstate\"\nregion = \"{region}\"\n}}\n}}\"\"\"\n\n with open('.terragrunt', 'w') as f:\n f.write(grunt_config_template.format(\n state_file_id=state_file_id,\n region=self.metadata['REGION'],\n s3_bucket=self.s3_bucket,\n env=self.environment,\n component=self.component\n ))",
"def get_config_template(self) -> cconfig.Config:",
"def get_config_tag(config):\n\n # Configuration attributes that affect representation value\n config_attributes = dict(\n frame_sampling=config.proc.frame_sampling\n )\n\n sha256 = hashlib.sha256()\n sha256.update(json.dumps(config_attributes).encode(\"utf-8\"))\n return sha256.hexdigest()[:40]",
"def build_config():\n if not os.path.exists(config_path):\n # generate key pair\n priv_key, pub_key = crypt.ecdsa_generate()\n if not priv_key or not pub_key:\n log.error(\"Unable to generate public/private keypair....\")\n exit(0)\n else:\n # fill default config with generated keypair\n base_config['key']['pub'] = pub_key\n base_config['key']['priv'] = priv_key\n\n # dump default config\n log.info(\"Dumping initial config to: %s\", config_path)\n with open(config_path, 'w') as fp:\n json.dump(base_config, fp, sort_keys=True, indent=2)\n return True\n else:\n return False",
"def generate_master_key(self):\n return utils.random(secret.SecretBox.KEY_SIZE)",
"def generate_request_id():\n return 'req-%s' % uuid.uuid4()",
"def generate_request_id():\n return 'req-%s' % uuid.uuid4()",
"def _build_config() -> dict:\n d : dict = {}\n d['api'] = {}\n d['interval'] = FoobarExtensionBot.STD_INTERVAL\n d['api']['cmd_id'] = 'dummy'\n d['api']['client_id'] = input('client_id: ')\n d['api']['client_secret'] = input('client_secret: ')\n d['outtext'] = input('output_text: ')\n # build dummy bot to retrieve command info\n try:\n b : FoobarExtensionBot = FoobarExtensionBot(ExtensionConfig(**d))\n except InvalidTokenError:\n print('error: could not retrive access token with your given credentials')\n _exit()\n except NoReplaceTokenFoundError:\n print(f'error: there was no {FoobarExtensionBot.REPLACE_TOKEN} in your given output')\n _exit()\n # get commands and make user select\n cmds : list = b.get_custom_commands()\n cmd_id : int = cmds[_prompt_choice([c.command_name for c in cmds])].id\n # build and return config\n d['api']['cmd_id'] = cmd_id\n return d",
"def req_id_generator() -> str:\n # 8 chars long should be long enough, add the 'Generated' prefix to know not to search for this id in the elb logs\n return f'Generated-{str(uuid.uuid4())[:8]}'",
"def generate_project_key():\n return shortuuid.ShortUUID().random(length=32)",
"def sync_config():\n rsync_project(remote_dir='/apps/sharejs-rethinkdb-example/config/', local_dir='./config/')",
"def util_generate_key(conf_file=None):\n keyname = DebRepo(**config(conf_file=conf_file)).generate_key()\n print(keyname)",
"def _generate_global_config() -> str:\n logger = getLogger(__name__)\n dst = os.path.join(os.path.expanduser(\"~\"),\n \".aiscalator/config/aiscalator.conf\")\n logger.info(\"Generating a new configuration file for aiscalator:\\n\\t%s\",\n dst)\n pattern = [\n \"testUserID\",\n \"generation_date\",\n ]\n replace_value = [\n generate_user_id(),\n '\"' + str(datetime\n .utcnow()\n .replace(tzinfo=timezone(\"UTC\"))) +\n '\" // in UTC timezone',\n ]\n dst_dir = os.path.dirname(dst)\n if dst_dir:\n os.makedirs(dst_dir, exist_ok=True)\n copy_replace(data_file(\"../config/template/aiscalator.conf\"),\n dst, pattern=pattern, replace_value=replace_value)\n open(os.path.join(dst_dir, \"apt_packages.txt\"), 'a').close()\n open(os.path.join(dst_dir, \"requirements.txt\"), 'a').close()\n open(os.path.join(dst_dir, \"lab_extensions.txt\"), 'a').close()\n return dst",
"def gnupg_gen_key_conf(\n pytestconfig: \"_pytest.config.Config\", tmp_path_factory: TempPathFactory\n) -> Generator[Path, None, None]:\n name = \"gnupg-gen-key.conf\"\n yield from get_user_defined_file(pytestconfig, name)\n yield from get_embedded_file(tmp_path_factory, name=name)",
"async def generate_wallet_key(config: Optional[str]) -> str:\n\n logger = logging.getLogger(__name__)\n logger.debug(\"generate_wallet_key: >>> config: %r\",\n config)\n\n if not hasattr(generate_wallet_key, \"cb\"):\n logger.debug(\"generate_wallet_key: Creating callback\")\n generate_wallet_key.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p))\n\n c_config = c_char_p(config.encode('utf-8')) if config is not None else None\n\n key = await do_call('indy_generate_wallet_key',\n c_config,\n generate_wallet_key.cb)\n\n res = key.decode()\n\n logger.debug(\"generate_wallet_key: <<< res: %r\", res)\n return res",
"def generate_settings():\r\n conf_file = os.path.join(os.path.dirname(base_settings.__file__),\r\n 'example', 'conf.py')\r\n conf_template = open(conf_file).read()\r\n default_url = 'http://salmon.example.com'\r\n site_url = raw_input(\"What will be the URL for Salmon? [{0}]\".format(\r\n default_url))\r\n site_url = site_url or default_url\r\n secret_key = base64.b64encode(os.urandom(KEY_LENGTH))\r\n api_key = base64.b64encode(os.urandom(KEY_LENGTH))\r\n output = conf_template.format(api_key=api_key, secret_key=secret_key,\r\n site_url=site_url)\r\n return output",
"def prepare_config_request(self, req):\n\t\tself.content_type = 'text/javascript'\n\t\tself.template = 'fckconfig-custom.js.tmpl'",
"def get_sunspec_unique_id(\n config_entry_id: str, key: str, model_id: int, model_index: int\n) -> str:\n return f\"{config_entry_id}_{key}-{model_id}-{model_index}\"",
"def _gen_config():\n cfg = {\"frontends\": {}, \"backends\": {}}\n for machine in Machine.objects(\n monitoring__hasmonitoring=True,\n ):\n frontend, backend = _gen_machine_config(machine)\n cfg[\"frontends\"][machine.id] = frontend\n cfg[\"backends\"][machine.id] = backend\n return cfg",
"def generate_config(context):\n resources = []\n\n # Create an initial 'STARTED' pubsub notification.\n if 'pubsubTopic' in context.properties:\n resources.extend(\n create_pubsub_notification(\n context,\n depends_on=[],\n status_string='STARTED',\n ))\n\n # Required properties.\n billing_account_id = context.properties['billingAccountId']\n parent_organization = context.properties['parentOrganization']\n project_id = context.properties['projectId']\n\n # Optional properties, with defaults.\n high_security_network = context.properties.get('highSecurityNetwork', False)\n private_ip_google_access = context.properties.get('privateIpGoogleAccess', False)\n storage_bucket_lifecycle = context.properties.get('storageBucketLifecycle', 180)\n billing_account_friendly_name = context.properties.get('billingAccountFriendlyName', billing_account_id)\n # Use a project name if given, otherwise it's safe to fallback to use the\n # project ID as the name.\n project_name = context.properties.get('projectName', project_id)\n labels_obj = context.properties.get('labels', {})\n\n # Save this template's version number and all parameters inputs to the project metadata to keep track of what\n # operations were performed on a project.\n labels_obj.update({\n \"firecloud-project-template-version\" : str(FIRECLOUD_PROJECT_TEMPLATE_VERSION_ID)\n })\n\n for k, v in context.properties.items():\n label_k, label_v = satisfy_label_requirements('param--' + str(k), v)\n labels_obj.update({\n label_k: label_v\n })\n\n\n if high_security_network:\n labels_obj.update({\n \"vpc-network-name\" : FIRECLOUD_VPC_NETWORK_NAME,\n \"vpc-subnetwork-name\" : FIRECLOUD_VPC_SUBNETWORK_NAME\n })\n\n if 'parentFolder' in context.properties:\n parent_obj = {\n 'id': context.properties['parentFolder'],\n 'type': 'folder',\n }\n else:\n parent_obj = {\n 'id': context.properties['parentOrganization'],\n 'type': 'organization',\n }\n\n # Create the main project resource.\n resources.append({\n 'type': 'templates/project.py',\n 'name': 'fc-project',\n 'properties': {\n 'activateApis': FIRECLOUD_REQUIRED_APIS,\n 'billingAccountId': billing_account_id,\n 'billingAccountFriendlyName': billing_account_friendly_name,\n 'iamPolicies': create_iam_policies(context),\n 'labels': labels_obj,\n 'name': project_name,\n # The project parent. For FireCloud, this should refer to the\n # firecloud.org (or equivalent) GCP organization ID.\n 'parent': parent_obj,\n 'projectId': project_id,\n # If true, this would remove the default compute egine service\n # account. FireCloud doesn't use this SA, but we're leaving this set\n # to False to avoid changing any legacy behavior, at least initially.\n 'removeDefaultSA': False,\n # Removes the default VPC network for projects requiring stringent\n # network security configurations.\n 'removeDefaultVPC': high_security_network,\n 'createUsageExportBucket': False,\n # Always set up the storage logs and cromwell auth buckets for Firecloud\n 'storageLogsBucket': True,\n 'storageBucketLifecycle': storage_bucket_lifecycle,\n 'cromwellAuthBucket': True\n }\n })\n\n if high_security_network:\n resources.extend(create_high_security_network(context))\n resources.extend(create_firewall(context))\n if private_ip_google_access:\n resources.extend(create_private_google_access_dns_zone(context))\n else:\n resources.extend(create_default_network(context))\n\n if 'pubsubTopic' in context.properties:\n resources.extend(\n create_pubsub_notification(\n context,\n # This is somewhat hacky, but we can't simply collect the name of each\n # collected resource since template call nodes aren't \"real\" resources\n # that can be part of a dependsOn stanza. So instead, we collect the\n # names of all resources that are output by the network (which itself\n # depends on the project). It doesn't seem to be possible to concatenate\n # dependsOn arrays within the reference syntax, otherwise we could make\n # this depend explicitly on all resources from the template nodes.\n depends_on='$(ref.fc-network.resourceNames)',\n status_string='COMPLETED'))\n\n return {'resources': resources}"
]
| [
"0.596656",
"0.5661122",
"0.55148077",
"0.5476729",
"0.5386889",
"0.52841794",
"0.52840906",
"0.5246648",
"0.5237421",
"0.52204037",
"0.51961136",
"0.51553464",
"0.5150974",
"0.51448613",
"0.5127649",
"0.5121277",
"0.5121277",
"0.5118716",
"0.5100069",
"0.5089299",
"0.5077849",
"0.5064849",
"0.5041753",
"0.5023571",
"0.5014973",
"0.5009277",
"0.5002329",
"0.4984746",
"0.4984212",
"0.4971303"
]
| 0.7949307 | 0 |
Save the beam parameter type with the specified name, description and units. | def _savebeamparamproptype(self, cursor, bpptname, bpptunit=None, bpptdesc=None):
sql = """
INSERT INTO beam_param_prop_type (
beam_param_prop_type_name,
beam_param_prop_type_desc,
beam_param_prop_type_unit
) VALUES (
'%s', %s, %s
)
"""
if bpptunit is None:
bpptunit = "NULL"
else:
bpptunit = "'%s'" % (bpptunit,)
if bpptdesc is None:
bpptdesc = "NULL"
else:
bpptdesc = "'%s'" % (bpptdesc,)
cursor.execute(sql % (bpptname, bpptdesc, bpptunit))
return cursor.lastrowid | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def save_parms(self, name):\n self._save_parms(name.encode())",
"def save(\n cls,\n save_location: typing.Union[str, BytesIO, Path],\n project_info,\n parameters: dict,\n range_changed=None,\n step_changed=None,\n ):",
"def write_parameter(self, parameter_name: str, parameter_value: Union[str, float, int]):\n self._parameters.append(Parameter(parameter_name, parameter_value))",
"def add_parameter(self,\n name, # The name of the parameter\n scaling=None, # The type of scaling to be used for the parameter\n type=\"int\", # The type of the parameter, such as float\n min=0, # The minimum value of the parameter\n max=100, # The maximum value of the parameter\n significance=1, # The smallest significant step size\n value=None, # The value or value parameters\n distribution=None): # The distribution of the parameter\n config = {\"scaling\" : scaling, \n \"type\": type,\n \"min\": min, \n \"max\": max, \n \"significance\": significance,\n \"value\": value,\n \"distribution\": distribution}\n self.param_names.append(name)\n self.param_settings.append(config)",
"def save_params(model_name: str):\n with open(model_name + '.params', 'w') as f:\n json.dump(pr.__dict__, f)",
"def add_parameter(self, param_type, param_name, this_class_name=''):\n self._set_instance_data('parameters',\n ' '.join([self.add_dependency(param_type, this_class_name),\n param_name]))",
"def __setitem__(self, name: str, value):\n super(Parameter, self).__setitem__(name, value)",
"def setParameter(self, name, value):",
"def on_save_parameters(self):\n obj_points = self.get_object_points()\n cam_pos = self.get_camera_position()\n distortion = self.get_distortion_coeeficients()\n\n d = {\n 'object positions': obj_points,\n 'camera positions': cam_pos,\n 'distortion coefficients': distortion\n }\n\n jsn = json.dumps(d)\n h = hashlib.sha1(jsn.encode('utf-8')).hexdigest()\n fn = f'{h}.json'\n\n with open(fn, 'w') as f:\n f.write(jsn)\n\n self.statusBar().showMessage(f'Parameters have been save to {fn}.')\n self.param_file = fn",
"def put_var_param(self, var_type, num_vars):\n if var_type.upper() not in EX_VAR_TYPES:\n raise ExodusIIWriterError(\n \"var_type {0} not recognized\".format(var_type))\n ierr = exolib.py_expvp(self.exoid, var_type.lower(), num_vars)\n if ierr:\n raise ExodusIIWriterError(\"Error putting var params\")",
"def save(self, filename, ftype='HDF5'):\n from . import Param\n from ...util.misc import param_to_array\n def gather_params(self, plist):\n if isinstance(self,Param):\n plist.append(self)\n plist = []\n self.traverse(gather_params, plist)\n names = self.parameter_names(adjust_for_printing=True)\n if ftype=='HDF5':\n try:\n import h5py\n f = h5py.File(filename,'w')\n for p,n in zip(plist,names):\n n = n.replace('.','_')\n p = param_to_array(p)\n d = f.create_dataset(n,p.shape,dtype=p.dtype)\n d[:] = p\n if hasattr(self, 'param_array'):\n d = f.create_dataset('param_array',self.param_array.shape, dtype=self.param_array.dtype)\n d[:] = self.param_array\n f.close()\n except:\n raise 'Fails to write the parameters into a HDF5 file!'",
"def put_parameter(parameter_name, parameter_value, parameter_type, overwrite=False):\n ssm_client = boto3.client('ssm')\n\n try:\n result = ssm_client.put_parameter(\n Name=parameter_name,\n Value=parameter_value,\n Type=parameter_type,\n Overwrite=overwrite\n )\n logging.info(\"%s was added to parameter store\", parameter_name)\n except ClientError as e:\n logging.error(e)\n return None\n return result['Version']",
"def save(self):\n with open(os.path.join(self.save_path, \"experiment.delira.pkl\"),\n \"wb\") as f:\n pickle.dump(self, f)\n\n self.params.save(os.path.join(self.save_path, \"parameters\"))",
"def saveParams(self, trnParams):\n\n SystemIO.save(trnParams.toNpArray(), self.path.model_info_file)",
"def saveParams(self, trnParams):\n\n SystemIO.save(trnParams.toNpArray(), self.path.model_info_file)",
"def save_params(params):\r\n pickle.dump(params, open('params.p', 'wb'))",
"def save_parameters(self):\n self.read_parameters()\n group = NXprocess()\n group['model'] = self.composite_model\n group['data'] = self.data\n for m in self.models:\n group[m['name']] = self.get_model(m['model'])\n parameters = NXparameters(attrs={'model': m['class']})\n for n, p in m['parameters'].items():\n n = n.replace(m['model'].prefix, '')\n parameters[n] = NXfield(p.value, error=p.stderr,\n initial_value=p.init_value,\n min=str(p.min), max=str(p.max),\n vary=p.vary, expr=p.expr)\n group[m['name']].insert(parameters)\n group['title'] = 'Fit Model'\n group['model'] = self.get_model()\n self.write_group(group)",
"def save_params(params):\n with open('params.p', 'wb') as out_file:\n pickle.dump(params, out_file)",
"def save(self):\n # type: () -> None\n setattr(self.fn, self.PARAM_NAME, self)",
"def _put_ssm_param(self, parameter, parameter_name):\n self.ssm_client.put_parameter(\n Name=parameter_name,\n Type=\"String\",\n Value=json.dumps(parameter),\n Overwrite=True,\n Tier=\"Intelligent-Tiering\",\n )",
"def apply(self, name, size, type):\n self.properties['name'] = name\n self.properties['size'] = size\n self.properties['type'] = type",
"def store_type(self, ptype):\n attr = self.node.get_attr(Type)\n attr.store(ptype)",
"def save():\n self.SSM_CLIENT.put_parameter(\n Name=self._state_name,\n Description=self._STATE_DESCRIPTION.format(self._app_type, self.function_name),\n Value=param_value,\n Type='SecureString',\n Overwrite=True\n )",
"def save_parameters(self):\n paramfile = os.path.join(self._datadir, self.id.lower() + '.cfg')\n \n params_var = {}\n params_var['eta'] = self.system_param['eta']\n params_var['cov'] = self.system_param['cov']\n \n with open(paramfile, 'w') as paramjson:\n json.dump(params_var, paramjson)",
"def _write_param(parameters):\n # Load data\n from ._common import options, extra_options\n\n data = deepcopy(options)\n data.update(parameters[\"options\"])\n\n # Table\n if not isinstance(data[\"t_steps\"], (list, tuple, numpy.ndarray)):\n data[\"t_steps\"] = [data[\"t_steps\"]]\n\n # Formats\n fmt = block_to_format[\"PARAM\"]\n fmt1 = str2format(fmt[1])\n fmt2 = str2format(fmt[2])\n fmt3 = str2format(fmt[3])\n fmt4 = str2format(fmt[4])\n fmt5 = str2format(fmt[5])\n\n # Record 1\n _mop = deepcopy(extra_options)\n _mop.update(parameters[\"extra_options\"])\n mop = [\" \" if _mop[k] is None else str(_mop[k]) for k in sorted(_mop.keys())]\n\n values = [\n data[\"n_iteration\"],\n data[\"verbosity\"],\n data[\"n_cycle\"],\n data[\"n_second\"],\n data[\"n_cycle_print\"],\n \"{}\".format(\"\".join(mop)),\n None,\n data[\"temperature_dependence_gas\"],\n data[\"effective_strength_vapor\"],\n ]\n out = write_record(values, fmt1)\n\n # Record 2\n values = [\n data[\"t_ini\"],\n data[\"t_max\"],\n -((len(data[\"t_steps\"]) - 1) // 8 + 1),\n data[\"t_step_max\"],\n None,\n data[\"gravity\"],\n data[\"t_reduce_factor\"],\n data[\"mesh_scale_factor\"],\n ]\n out += write_record(values, fmt2)\n\n # Record 2.1\n values = [x for x in data[\"t_steps\"]]\n out += write_record(values, fmt3, multi=True)\n\n # Record 3\n values = [\n data[\"eps1\"],\n data[\"eps2\"],\n None,\n data[\"w_upstream\"],\n data[\"w_newton\"],\n data[\"derivative_factor\"],\n ]\n out += write_record(values, fmt4)\n\n # Record 4\n n = min(4, len(parameters[\"default\"][\"initial_condition\"]))\n values = parameters[\"default\"][\"initial_condition\"][:n]\n out += write_record(values, fmt5)\n\n # Record 5 (EOS7R)\n if len(parameters[\"default\"][\"initial_condition\"]) > 4:\n values = parameters[\"default\"][\"initial_condition\"][n:]\n out += write_record(values, fmt5)\n\n return out",
"def set_parameter(self, params, name, val):\n raise NotImplementedError()",
"def save(self, fName, **kwargs):\n f = h5py.File(fName + \".hdf5\", \"w\")\n f.create_dataset(\"psi\", data=self.psi.get().astype(np.complex64))\n f.create_dataset(\"n\", data=self.n.get().astype(np.float32))\n f.create_dataset(\"x_ax\",\n data=self.grid.x_axis_scaled.astype(np.float32))\n f.create_dataset(\"k_ax\",\n data=self.grid.k_axis_scaled.astype(np.float32))\n f.create_dataset(\"Pdt\", data=self.Pdt.get().astype(np.float32))\n if hasattr(self, \"energy\"):\n f.create_dataset(\"energy\", data=self.energy.astype(np.float32))\n if hasattr(self, \"number\"):\n f.create_dataset(\"number\", data=self.number.astype(np.float32))\n if hasattr(self, \"times\"):\n f.create_dataset(\"times\", data=self.times.astype(np.float32))\n if hasattr(self, \"spectrum\"):\n f.create_dataset(\"spectrum\",\n data=self.spectrum.astype(np.complex64))\n f.create_dataset(\"omega_axis\",\n data=self.omega_axis.astype(np.float32))\n\n # paramsToSave = ['R', 'g_C', 'g_R', 'gamma_C', 'gamma_R', 'm', 'charT',\n # 'charL']\n for (param, value) in self.paramContainer.getOutputParams().items():\n f.attrs[param] = value.magnitude\n for (attr, value) in kwargs.items():\n f.attrs[attr] = value\n f.attrs[\"t\"] = self.time\n f.close()",
"def test_parameter(self):\n # Setup test\n infilename = os.path.join(_SAMPLE_FILES_DIR, \"reg_good_simple.xml\")\n filename = os.path.join(_TMP_DIR, \"reg_parameter.xml\")\n out_source_name = \"physics_types_parameter\"\n in_source = os.path.join(_SAMPLE_FILES_DIR, out_source_name + '.F90')\n out_source = os.path.join(_TMP_DIR, out_source_name + '.F90')\n in_meta = os.path.join(_SAMPLE_FILES_DIR, out_source_name + '.meta')\n out_meta = os.path.join(_TMP_DIR, out_source_name + '.meta')\n remove_files([out_source, out_meta])\n tree, root = read_xml_file(infilename)\n # Change output filename and add a parameter with an initial value\n for obj in root:\n oname = obj.get('name')\n if (obj.tag == 'file') and (oname == 'physics_types_simple'):\n # Reset the filename\n obj.set('name', out_source_name)\n # Add a new variable with an unknown dimension\n new_var = ET.SubElement(obj, \"variable\")\n new_var.set(\"local_name\", \"pver\")\n new_var.set(\"standard_name\", \"vertical_layer_dimension\")\n new_var.set(\"units\", \"count\")\n new_var.set(\"type\", \"integer\")\n new_var.set(\"allocatable\", \"parameter\")\n dims_elem = ET.SubElement(new_var, \"initial_value\")\n dims_elem.text = '42'\n break\n # End if\n # End for\n tree.write(filename)\n # Run test\n retcode, files = gen_registry(filename, 'eul', {}, _TMP_DIR, 2,\n _SRC_MOD_DIR, _CAM_ROOT,\n loglevel=logging.ERROR,\n error_on_no_validate=True)\n # Check return code\n amsg = \"Test failure: retcode={}\".format(retcode)\n self.assertEqual(retcode, 0, msg=amsg)\n flen = len(files)\n amsg = \"Test failure: Found {} files, expected 1\".format(flen)\n self.assertEqual(flen, 1, msg=amsg)\n # Make sure each output file was created\n self.assertTrue(os.path.exists(out_meta))\n self.assertTrue(os.path.exists(out_source))\n # For each output file, make sure it matches input file\n amsg = \"{} does not match {}\".format(in_meta, out_meta)\n self.assertTrue(filecmp.cmp(in_meta, out_meta, shallow=False), msg=amsg)\n amsg = \"{} does not match {}\".format(in_source, out_source)\n self.assertTrue(filecmp.cmp(in_source, out_source, shallow=False),\n msg=amsg)",
"def putparam(self,parname_,parvalue_):\n if isinstance(parname_,unicode):\n parname_ = parname_.encode(\"utf-8\",errors=\"replace\")\n if isinstance(parvalue_,unicode):\n parvalue_ = parvalue_.encode(\"utf-8\",errors=\"replace\")\n res = __library__.MSK_XX_putparam(self.__nativep,parname_,parvalue_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)",
"def __add_to_nlp(self, param_name, param, duplicate_if_size_is_one, _type=None):\n if isinstance(param, (list, tuple)):\n if len(param) != self.nb_phases:\n raise RuntimeError(\n f\"{param_name} size({len(param)}) does not correspond to the number of phases({self.nb_phases}).\"\n )\n else:\n for i in range(self.nb_phases):\n self.nlp[i][param_name] = param[i]\n elif isinstance(param, OptionList):\n if len(param) == self.nb_phases:\n for i in range(self.nb_phases):\n self.nlp[i][param_name] = param[i]\n else:\n if len(param) == 1 and duplicate_if_size_is_one:\n for i in range(self.nb_phases):\n self.nlp[i][param_name] = param[0]\n else:\n raise RuntimeError(\n f\"{param_name} size({len(param)}) does not correspond \"\n f\"to the number of phases({self.nb_phases}).\"\n )\n else:\n if self.nb_phases == 1:\n self.nlp[0][param_name] = param\n else:\n if duplicate_if_size_is_one:\n for i in range(self.nb_phases):\n self.nlp[i][param_name] = param\n else:\n raise RuntimeError(f\"{param_name} must be a list or tuple when number of phase is not equal to 1\")\n\n if _type is not None:\n for nlp in self.nlp:\n if nlp[param_name] is not None and not isinstance(nlp[param_name], _type):\n raise RuntimeError(f\"Parameter {param_name} must be a {str(_type)}\")"
]
| [
"0.6326744",
"0.5966426",
"0.596266",
"0.5775812",
"0.57679296",
"0.5691061",
"0.5647583",
"0.5620973",
"0.56005216",
"0.5581241",
"0.55669177",
"0.5535288",
"0.5534114",
"0.5506977",
"0.5506977",
"0.5477506",
"0.54759413",
"0.5464111",
"0.54561913",
"0.5452761",
"0.5451538",
"0.5414546",
"0.5397341",
"0.5360923",
"0.5349132",
"0.5309585",
"0.53059775",
"0.5297867",
"0.52906764",
"0.52897036"
]
| 0.6978819 | 0 |
Query the DB for the beam parameter property type with the given name and units. | def retrievebeamparamproptype(self, cursor, bpptname, bpptunit=None):
sql = """
SELECT
beam_param_prop_type_id,
beam_param_prop_type_name,
beam_param_prop_type_desc,
beam_param_prop_type_unit
FROM
beam_param_prop_type
WHERE
"""
sql += "beam_param_prop_type_name = '%s'" % (bpptname,)
if bpptunit is None:
sql += " AND beam_param_prop_type_unit IS NULL"
else:
sql += " AND beam_param_prop_type_unit = '%s'" % (bpptunit,)
cursor.execute(sql)
return cursor.fetchone() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_parameter_type(self, name):\n phil_scope = phil.parse(\n \"\"\"include scope dials.command_line.scale.phil_scope\"\"\",\n process_includes=True,\n )\n obj = phil.find_scope(phil_scope, name)\n if not obj:\n raise ValueError(\n \"\"\"Unable to resolve %s in the phil scope, make sure full phil path\nis provided. For example, physical.decay_correction rather than decay_correction\"\"\"\n % name\n )\n return obj.type.phil_type # a str: \"int\", \"bool\" etc",
"def find_param(self, ptype, name):\n param_to_find = PBRTParam(ptype, name)\n for p in self._data:\n if p == param_to_find:\n return p\n return None",
"def _savebeamparamproptype(self, cursor, bpptname, bpptunit=None, bpptdesc=None):\n sql = \"\"\"\n INSERT INTO beam_param_prop_type (\n beam_param_prop_type_name,\n beam_param_prop_type_desc,\n beam_param_prop_type_unit\n ) VALUES (\n '%s', %s, %s\n )\n \"\"\"\n if bpptunit is None:\n bpptunit = \"NULL\"\n else:\n bpptunit = \"'%s'\" % (bpptunit,)\n\n if bpptdesc is None:\n bpptdesc = \"NULL\"\n else:\n bpptdesc = \"'%s'\" % (bpptdesc,)\n\n cursor.execute(sql % (bpptname, bpptdesc, bpptunit))\n return cursor.lastrowid",
"def getParameter(self, session: Session, name: str) -> Parameter:\n\n try:\n dbParam = self._globalParametersDbHandler.getParameter(\n session, name)\n\n return Parameter.getFromDbDict(dbParam.__dict__)\n except TortugaException:\n raise\n except Exception as ex:\n self._logger.exception(str(ex))\n raise",
"def query(self, name, python_type, optional=False, **kwargs):\n return self.simple_param('query', name, python_type, optional=optional,\n **kwargs)",
"def get_parameter_type(self, name):\n raise NotImplementedError()",
"def create_query_param(name: str, type_: Type, default) -> pydantic.fields.ModelField:\n param = inspect.Parameter(\n name=name,\n kind=inspect.Parameter.POSITIONAL_OR_KEYWORD,\n default=default,\n annotation=type_,\n )\n field = fastapi.dependencies.utils.get_param_field(\n param=param, param_name=name, default_field_info=fastapi.params.Query\n )\n return field",
"def get_layout_property_type(layout_name, property_name, base_url=DEFAULT_BASE_URL):\n res = commands.cyrest_get('apply/layouts/' + layout_name + '/parameters', base_url=base_url)\n param_types = {param['name']: param['type'] for param in res}\n return param_types[property_name]",
"def get_parameter_unit(self, parameter_name):\n parameter_units = {\n 'tsky': units.Unit(\"Kelvin\"),\n 'kelvin': self.data_unit\n }\n return parameter_units.get(parameter_name)",
"def fetch(self, tp: 'str'):\n\t\tif tp not in Param.PARAM_TYPE:\n\t\t\traise KeyError\n\t\telse:\n\t\t\treturn self.data[tp]",
"def getParameter(self, name):",
"def query(cls, **kwds):\n #NOTE: Only static properties can be indexed by homer, \n # so we don't worry about querying for dynamic properties\n query = \"\"\n started = False\n for name in kwds:\n if not started:\n pattern = \"%s=:%s\" % (name, name)\n query += pattern\n started = True\n else:\n pattern = \" AND %s=:%s\" % (name, name)\n query += pattern\n\n q = 'SELECT * FROM %s WHERE %s' % (cls.kind(), query)\n query = CqlQuery(cls, q, **kwds)\n query.convert = True\n return query",
"def for_property(self, name):\n return self[self.types_map.get(name, 'text')]",
"def __getitem__(self, name: str) -> object:\n return super(Parameter, self).__getitem__(name)",
"def testTypeProperties(self):\n cmisClient = CmisClient(self.url, self.user, self.pwd,\n binding=self.binding,\n **self.ext_args)\n repo = cmisClient.getDefaultRepository()\n docTypeDef = repo.getTypeDefinition('cmis:document')\n assert 'cmis:document' == docTypeDef.getTypeId()\n props = docTypeDef.getProperties().values()\n assert len(props) > 0\n for prop in props:\n if prop.queryable:\n assert prop.queryName\n assert prop.propertyType",
"def getParam(self, params, name):\n return params.get(name)",
"def query_schema(self, name, param):\n\n alias, name, need_list = self.parse_entry(name)\n\n if not name:\n result = self.process_multiple_query(need_list, param)\n else:\n result = self.process_single_query(name, need_list, param)\n return alias, result",
"def getParam(self, name, enum=None):\n return Parameter(self, name, enum)",
"def resolve_type(name):\n types = {\n 'string': StringProperty,\n 'name': NameProperty,\n 'date': DateProperty,\n 'country': CountryProperty,\n 'address': AddressProperty,\n 'phone': PhoneProperty,\n 'email': EmailProperty,\n 'url': URLProperty,\n 'uri': URLProperty,\n 'identifier': IdentiferProperty\n }\n type_ = types.get(name.strip().lower())\n if type_ is None:\n raise TypeError(\"No such type: %s\" % name)\n return type_",
"def type_name(self):\n return \"%s %s\" % (self.param_type, self.name)",
"def query_types(self, types_params):\n username, password, api_key, max_items_to_return = SettingsOps.get_settings()\n types_runnable = TypeRunnable(username, password, api_key, types_params)\n types_runnable.type_object.task_complete.connect(self.on_new_types)\n self.init_progress_bar()\n self.search_thread_pool.start(types_runnable)",
"def test_ParameterVariable_init_basic_type(self):\n\n par = provide_parameter(\"double\", \"test\")\n\n self.assertEqual(par.name, \"test\")\n self.assertEqual(par.type, \"double\")",
"def queryComponent(type=None, filter=None, all=0):",
"def _query_properties(self, props=None, depth=0):\n root = None\n # build the propfind request\n if props is not None and len(props) > 0:\n prop = dav.Prop() + props\n root = dav.Propfind() + prop\n\n return self._query(root, depth)",
"def get_creature_type_properties(self, name):\n return self._get_monster_class(name).PROPERTIES",
"def get_by_name(name):\n\n result = {}\n status = 404\n print id\n # nodes=Property.query.all()\n obj = Property.query.filter_by(name=name).filter(Property.users.contains(current_user)).first()\n if obj:\n result['prop'] = obj\n status = 200\n\n return result, status",
"def types_query(owner_name):\n query = Products.query.with_entities(Products.type_name.label('Type'))\\\n .filter_by(owner_name=owner_name)\\\n .distinct()\n return query",
"def transclusion_param(self, **kw):\n return self._open('param',\n allowed_attrs=['name', 'type', 'value', 'valuetype', ],\n **kw)",
"def test_ParameterVariable_init_basic_type_value(self):\n\n par = provide_parameter(\"double\", \"test\", value=518)\n\n self.assertEqual(par.name, \"test\")\n self.assertEqual(par.type, \"double\")\n self.assertEqual(par.value, 518)",
"def get_units(self,):\n self.UNITS = {'pressure':'Pa',}\n return"
]
| [
"0.55514073",
"0.5370979",
"0.53658986",
"0.5357402",
"0.52250284",
"0.5210971",
"0.51845634",
"0.5178483",
"0.51416516",
"0.51144356",
"0.50588375",
"0.5011084",
"0.49770406",
"0.49436116",
"0.49431488",
"0.49372455",
"0.48987404",
"0.4895201",
"0.4863563",
"0.4856202",
"0.48516756",
"0.4835247",
"0.48172754",
"0.47981817",
"0.4796929",
"0.4794342",
"0.47908664",
"0.47876406",
"0.47698626",
"0.47590616"
]
| 0.6423724 | 0 |
Checks validity of post request | def check_post(*params, conn, event):
# check that connection to db valid
if conn is None:
return (False, connection_error())
# check that post request body is not empty
if event.get('body') is None:
return (False, format_response(404, {"error":"post request is empty"}))
request = json.loads(event['body'])
# check that post request contains all required values
missing_params = check_missing(*params, request=request)
if missing_params is not None:
return (False, format_response(400, {"errors": missing_params}))
return (True, request) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def verify_post_data ( ):\n # check every field is present\n try:\n request.json[ 'source_lang' ]\n request.json[ 'target_lang' ]\n request.json[ 'text' ]\n\n TranslatorApp.verify_rpc_value ( request.json )\n\n except KeyError: # All the values are not present\n # 400 Bad Request\n abort ( 400, \"All mandatory fields are not provided\" )\n except ValueError as err:\n # 422 Unprocessable Entity\n abort ( 422, \"Unprocessable value: {0}\".format ( err.args ) )\n except BadRequest:\n # 400 Bad Request\n abort ( 400, \"Provided values are having malformed syntax\" )",
"def _validate_post(self, value, name, result):\n return result",
"def test_client_can_do_post_request(self):\n response = self.httpbin_4.test_requests_post_method()\n self.assertEqual(response.request.method, 'POST')\n self.assertEqual(response.status_code, 200)",
"def test_two_legged_post(self):\n resp, content = self._two_legged(\"POST\")\n\n self.assertEqual(int(resp['status']), 200)",
"def test_POST(self):\n if not self.url:\n return\n response = self.client.post(self.url, {}, format='json')\n self.assertIn(response.status_code, [status.HTTP_405_METHOD_NOT_ALLOWED,\n status.HTTP_401_UNAUTHORIZED])",
"def test_validate_post(client):\n response = client.post(\n '/user/',\n data=json.dumps({\n 'name': 'Jeff Knupp',\n 'email': '[email protected]',\n }),\n headers={'Content-Type': 'application/json'}\n )\n assert response.status_code == 400\n assert response.json['message'] == INVALID_ACTION_MESSAGE",
"def test_post(self):\n return self.doRequest(self.url, method=\"POST\", body=self.input)",
"def test_post(self):\n self.assertEqual(\n status.HTTP_405_METHOD_NOT_ALLOWED, self.response.status_code)",
"def test_post_rejects_missing_host(self) -> None:\n\n response = self.request(\"/\", method=\"POST\")\n self.assertEqual(response.code, 400)",
"def request_is_valid(request):\n return 'method' in request",
"def test_post_invalid(self):\n self.post_data['name'] = ''\n response = self._post()\n self.assertEquals(response.status_code, 200)\n self.assertTemplateUsed(response, self.template_name)\n self.assertTrue('object' in response.context)\n self.assertEquals(response.context['object'], self.obj)\n self.assertTrue('form' in response.context)\n self.assertTrue(response.context['form'].is_bound)\n self.assertFalse(response.context['form'].is_valid())\n self.assertEquals(response.context['form'].instance, self.obj)\n self._assert_no_change()",
"def test_post_invalid(self):\n self.post_data['name'] = ''\n response = self._post()\n self.assertEquals(self.model.objects.count(), 0)\n self.assertEquals(response.status_code, 200)\n self.assertTemplateUsed(response, self.template_name)\n self.assertTrue('form' in response.context)\n self.assertTrue(response.context['form'].is_bound)\n self.assertFalse(response.context['form'].is_valid())",
"def post(self):\n # by default post is not supported\n return False",
"def test_post_valid_data_question(self):\n\n response = self.post_question(self.valid_question)\n self.assertEqual(response.status_code, 201)",
"def post(self, request, *args, **kwargs):\n form = self.get_form()\n if form.validate():\n return self.form_valid(form)\n else:\n return self.form_invalid(form)",
"def is_post(request):\n return request.method == 'POST'",
"def test_post_invalid(self):\n for field in ['language', 'style']:\n response = self.post(\n **{'content': 'foo', field: '123-invalid-abc'})\n self.assertEqual(\n response.status_code, status.HTTP_400_BAD_REQUEST)",
"def test_post(self):\n pass",
"def test_post(self):\n self.assertEqual(200, self.resp.status_code)",
"def test_post(self):\n self.assertEqual(200, self.resp.status_code)",
"def test_post_valid(self):\n response = self._post()\n self.assertEquals(self.model.objects.count(), 1)\n obj = self.model.objects.get()\n self.assertEquals(obj.pk, self.obj.pk)\n self.assertRedirectsNoFollow(response, obj.get_absolute_url())\n self.assertEquals(obj.name, self.post_data['name'])\n self.assertEquals(obj.business.pk, self.post_data['business_1'])\n self.assertEquals(obj.point_person.pk, self.post_data['point_person'])\n self.assertEquals(obj.activity_group.pk, self.post_data['activity_group'])\n self.assertEquals(obj.type.pk, self.post_data['type'])\n self.assertEquals(obj.status.pk, self.post_data['status'])\n self.assertEquals(obj.description, self.post_data['description'])",
"def test_post_invalid_data_question(self):\n\n response = self.post_question(self.invalid_question)\n\n self.assertEqual(response.status_code, 400)",
"def post(self, request, *args, **kwargs):\n verify_secure(request)\n return super().post(request, args, kwargs)",
"def post(self, request, *args, **kwargs):\n verify_secure(request)\n return super().post(request, args, kwargs)",
"def test_post(self):\n url, port = self.server.address\n\n #couple of basic POSTs\n #request parameters\n r = self.client.get(\"http://{0}:{1}/\".format(url, port))\n self.assertEqual(200, r.status_code)\n r = self.client.get(\"http://{0}:{1}\".format(url, port))\n self.assertEqual(200, r.status_code)\n r = self.client.get(\"http://{0}:{1}/200\".format(url, port))\n self.assertEqual(200, r.status_code)\n r = self.client.get(\"http://{0}:{1}/400\".format(url, port))\n self.assertEqual(400, r.status_code)",
"def test_create_invalid_submission(self):\n with self.client:\n # invalid submission registration\n sub_response = register_illegal_submission(self, self.token)\n response_data = json.loads(sub_response.data.decode())\n self.assertTrue(response_data['errors']!=None)",
"def _validate_http_request(self):\n if self.path != '/':\n print('Invalid request path:', self.path)\n self.send_error(HTTPStatus.NOT_FOUND, 'Request Must Have Path Of /')\n raise ValueError\n\n content_type = self.headers.get('Content-Type', None)\n if content_type != 'application/json':\n print('Invalid request Content-Type:', self.path)\n self.send_error(HTTPStatus.BAD_REQUEST, 'Content-Type Must Be application/json')\n raise ValueError",
"def test_post_valid(self):\n response = self._post()\n self.assertEquals(self.model.objects.count(), 1)\n obj = self.model.objects.get()\n self.assertRedirectsNoFollow(response, obj.get_absolute_url())\n self.assertEquals(obj.name, self.post_data['name'])\n self.assertEquals(obj.business.pk, self.post_data['business_1'])\n self.assertEquals(obj.point_person.pk, self.post_data['point_person'])\n self.assertEquals(obj.activity_group.pk, self.post_data['activity_group'])\n self.assertEquals(obj.type.pk, self.post_data['type'])\n self.assertEquals(obj.status.pk, self.post_data['status'])\n self.assertEquals(obj.description, self.post_data['description'])",
"def test_post(self):\n response = self._post()\n self.assertEquals(response.status_code, 405)",
"def post(self, request):\n pass"
]
| [
"0.68396235",
"0.68267936",
"0.67970574",
"0.67464995",
"0.6743694",
"0.6737659",
"0.67249835",
"0.6723429",
"0.6690368",
"0.6686085",
"0.6655791",
"0.6647895",
"0.6620084",
"0.65968996",
"0.6478333",
"0.643864",
"0.6372505",
"0.6351049",
"0.63355416",
"0.63355416",
"0.6320541",
"0.6320243",
"0.63139886",
"0.63139886",
"0.63134944",
"0.63126755",
"0.6289089",
"0.6286113",
"0.627939",
"0.6265914"
]
| 0.7285991 | 0 |
Sets the data_aggregation_setting of this RawDataSettingsV1. | def data_aggregation_setting(self, data_aggregation_setting):
self._data_aggregation_setting = data_aggregation_setting | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def aggregate(self, aggregation):\n self._data = self._data.aggregate(**aggregation)",
"def query_aggregation(self, query_aggregation: ConfigNodePropertyBoolean):\n\n self._query_aggregation = query_aggregation",
"def raw_data_setting(self, raw_data_setting):\n\n self._raw_data_setting = raw_data_setting",
"def set_aggregate_data(self, event_name, value, key=None):\n \n raise NotImplementedError()",
"def __init__(self, data_aggregation_setting=None, raw_data_setting=None, units_setting=None, work_hours_setting=None): # noqa: E501 # noqa: E501\n\n self._data_aggregation_setting = None\n self._raw_data_setting = None\n self._units_setting = None\n self._work_hours_setting = None\n self.discriminator = None\n\n if data_aggregation_setting is not None:\n self.data_aggregation_setting = data_aggregation_setting\n if raw_data_setting is not None:\n self.raw_data_setting = raw_data_setting\n if units_setting is not None:\n self.units_setting = units_setting\n if work_hours_setting is not None:\n self.work_hours_setting = work_hours_setting",
"def default_aggregation(self):\n return self._aggregation",
"def aggregation_mode(self, aggregation_mode):\n allowed_values = [\"roundrobin\", \"failover\", \"lacp\", \"fec\"]\n if aggregation_mode is not None and aggregation_mode not in allowed_values:\n raise ValueError(\n \"Invalid value for `aggregation_mode`, must be one of {0}\"\n .format(allowed_values)\n )\n\n self._aggregation_mode = aggregation_mode",
"def add_aggregation_data(self, payload):\n raise NotImplementedError()",
"def set_imagedata(self, imagedata, masking=None):\n self._properties[\"imagedata\"] = imagedata\n if masking is not None:\n self.imagedata[masking] = np.NaN",
"def __push_aggregation_lowest_layer(self, aggregation_object, aggregation_name, table, id_name):\n id = 0\n aggregation_value = 0\n for aggregation in aggregation_object:\n id = aggregation[aggregation_name][0]\n aggregation_value = aggregation[aggregation_name][1]\n self.__postgre_db.update(table, \"aggregation=\" + str(aggregation_value), id_name + \"=\" + str(id))",
"def __init__(__self__, *,\n aggregation_kind: Optional[str] = None):\n if aggregation_kind is not None:\n pulumi.set(__self__, \"aggregation_kind\", aggregation_kind)",
"def setGroupDataStretch(self, groupName, dataStretch):\n if dataStretch is not None:\n if not isinstance(dataStretch, bool):\n return\n self.setGroupSetting(groupName, self._dataStretchToken, dataStretch)",
"def aggregate_rating(self, aggregate_rating: object):\n\n self._aggregate_rating = aggregate_rating",
"def set_column_aggregations(self, column, type, min=False, max=False, count=False, count_distinct=False,\n sum=False,concat=False,stddev=False,avg=False):\n cs = self.get_or_create_column_settings(column)\n cs[\"type\"] = type\n cs[\"min\"] = min\n cs[\"max\"] = max\n cs[\"count\"] = count\n cs[\"countDistinct\"] = count_distinct\n cs[\"sum\"] = sum\n cs[\"concat\"] = concat\n cs[\"stddev\"] = stddev\n return cs",
"def calc_aggregate(self, dataset):\n if not self.needs_aggregate:\n logup('no aggregate calculation needed', level='warning')\n logger.warning(\"no aggregate calculation needed\")\n return # no need to calculate\n if not dataset.is_cached:\n raise HXLException(\"need a cached dataset for calculating an aggregate value\")\n if self.value == 'min':\n self.value = dataset.min(self.pattern)\n self.op = operator.eq\n elif self.value == 'max':\n self.value = dataset.max(self.pattern)\n self.op = operator.eq\n elif self.value == 'not min':\n self.value = dataset.min(self.pattern)\n self.op = operator.ne\n elif self.value == 'not max':\n self.value = dataset.max(self.pattern)\n self.op = operator.ne\n else:\n raise HXLException(\"Unrecognised aggregate: {}\".format(value))\n self.needs_aggregate = False",
"def _setup_aggregation(self, aggregator=None, **kwargs):\n return super(ACLFilterViewMixin, self)._setup_aggregation(\n aggregator=ACLESAggregator, **kwargs)",
"def _set_atomic_aggregate_set(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name=\"atomic-aggregate-set\", rest_name=\"atomic-aggregate-set\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='boolean', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"atomic_aggregate_set must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"atomic-aggregate-set\", rest_name=\"atomic-aggregate-set\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='boolean', is_config=False)\"\"\",\n })\n\n self.__atomic_aggregate_set = t\n if hasattr(self, '_set'):\n self._set()",
"def aggregation_mode(self):\n return self._aggregation_mode",
"def aggregate_hash(self, aggregate_hash):\n\n self._aggregate_hash = aggregate_hash",
"def aggregation_config(self) -> Optional[pulumi.Input['FlowDestinationFlowConfigDestinationConnectorPropertiesS3S3OutputFormatConfigAggregationConfigArgs']]:\n return pulumi.get(self, \"aggregation_config\")",
"def __init__(self, query_aggregation: ConfigNodePropertyBoolean=None): # noqa: E501\n self.openapi_types = {\n 'query_aggregation': ConfigNodePropertyBoolean\n }\n\n self.attribute_map = {\n 'query_aggregation': 'query.aggregation'\n }\n\n self._query_aggregation = query_aggregation",
"def aggregation_config(self) -> Optional[pulumi.Input['FlowDestinationFlowConfigDestinationConnectorPropertiesUpsolverS3OutputFormatConfigAggregationConfigArgs']]:\n return pulumi.get(self, \"aggregation_config\")",
"def update_aggregated_data(aggregated_data, datum):\n if 'last_date' not in aggregated_data:\n aggregated_data['last_date'] = datum['date']\n\n if aggregated_data['last_date'] != datum['date']:\n \"\"\"\n We are calculating daily min, max values so only update when hit new date.\n \"\"\"\n\n if aggregated_data['sum'] < aggregated_data['min']:\n aggregated_data['min'] = aggregated_data['sum']\n\n if aggregated_data['sum'] > aggregated_data['max']:\n aggregated_data['max'] = aggregated_data['sum']\n\n aggregated_data['last_date'] = datum['date']\n \n\n sign = 1\n if datum['type'] == 'debit':\n sign = -1\n\n aggregated_data['n'] += 1\n aggregated_data['sum'] += sign * Decimal(datum['amount'])\n\n return aggregated_data",
"def setscaling(self, scaling):\n\n self.__scaling = scaling",
"def aggregation_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"aggregation_type\")",
"def aggregation_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"aggregation_type\")",
"def __push_aggregation(self, table, sub_table, table_id, sub_table_id):\n table_entries = self.__postgre_db.get_data_from_table(table)\n for entry in table_entries:\n aggregation = 0\n entry_id = entry[table_id]\n entries_to_look_up = entry[sub_table_id]\n\n for look_up in entries_to_look_up:\n # calcutate aggregations differently depending on how the table structure is\n if len(entries_to_look_up) > 1:\n stored_value = self.__postgre_db.get(sub_table, sub_table_id + \"=\" + str(look_up), \"aggregation\")\n if stored_value is None:\n stored_value = 0\n aggregation += stored_value\n\n else:\n query = \"SELECT SUM(aggregation) FROM \" + sub_table + \" WHERE \" + sub_table_id + \"=\" + str(look_up)\n aggregation = self.__postgre_db.query(query)[0]['sum']\n if aggregation is None:\n aggregation = 0\n\n self.__postgre_db.update(table, \"aggregation=\" + str(aggregation), table_id + \"=\" + str(entry_id))",
"def set_data_encoding(self, encoding):\n self._data_encoding = encoding",
"def statistic_aggregation(self, resource=None, resource_type=None,\n meter_name=None, period=300, aggregate='mean',\n granularity=300):\n\n pass",
"def finalize_aggregated_data(aggregated_data):\n\n if aggregated_data['sum'] < aggregated_data['min']:\n aggregated_data['min'] = aggregated_data['sum']\n\n if aggregated_data['sum'] > aggregated_data['max']:\n aggregated_data['max'] = aggregated_data['sum']\n\n return aggregated_data"
]
| [
"0.618007",
"0.5699229",
"0.5593449",
"0.5534642",
"0.5492791",
"0.5159963",
"0.51563",
"0.5152062",
"0.51290625",
"0.48896068",
"0.48860914",
"0.4833387",
"0.47638878",
"0.47190312",
"0.47081825",
"0.465748",
"0.4626875",
"0.46223742",
"0.46195033",
"0.4480558",
"0.44752845",
"0.44700497",
"0.44623813",
"0.44345787",
"0.44244406",
"0.44244406",
"0.43849048",
"0.43547243",
"0.43507105",
"0.43375778"
]
| 0.8585378 | 0 |
Sets the raw_data_setting of this RawDataSettingsV1. | def raw_data_setting(self, raw_data_setting):
self._raw_data_setting = raw_data_setting | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def raw(self, raw):\n\n self._raw = raw",
"def setOnIRRawDataHandler(self, IRRawDataHandler):\r\n if IRRawDataHandler == None:\r\n self.__IRRawDataDelegate = None\r\n self.__onIRRawDataHandler = None\r\n else:\r\n self.__IRRawDataDelegate = IRRawDataHandler\r\n self.__onIRRawDataHandler = self.__IRRAWDATAHANDLER(self.__nativeIRRawDataEvent)\r\n \r\n try:\r\n result = PhidgetLibrary.getDll().CPhidgetIR_set_OnRawData_Handler(self.handle, self.__onIRRawDataHandler, None)\r\n except RuntimeError:\r\n self.__IRRawDataDelegate = None\r\n self.__onIRRawDataHandler = None\r\n raise\r\n \r\n if result > 0:\r\n raise PhidgetException(result)",
"def load_data(self, data):\n self._load_raw_data = data",
"def set_data(self, data):\n self.data = data",
"def set_data(self, data):\n\n pass",
"def set_data(self, data):\n self._set_data(data)",
"def _original_data(self, data: np.ndarray):\n if self._raw_data is None:\n self._raw_data = data",
"def data_aggregation_setting(self, data_aggregation_setting):\n\n self._data_aggregation_setting = data_aggregation_setting",
"def set_qr_data(self, qr_data: Sequence[Decoded]):\n self.qr_data = qr_data",
"def setData(self, data):\n self._data = data",
"def set_data(self, data):\n\n self._data = data",
"def set_data(self, data):\n\n self._data = data",
"def set_data(self, data):\n\n self._data = data",
"def AddRawData(self, data):\n self.RawData = data",
"def set_data(self, data):\n\n # Convert voltages to currents and overwrite\n if self.use_unit == 'A':\n data['data'] = self.convert_to_unit(data['data'], self.use_unit)\n\n super(RawDataPlot, self).set_data(data)",
"def setData(self, data):\n self.data = data",
"def setData(self, data):\n self.data = data",
"def SetData(self, data):\r\n\r\n self._data = data",
"def set_data(self, value):\n self._set_data(value)\n self.data_changed = True\n return",
"def setData(self, data):\n return None",
"def set_temp_data(self, data):\n self.data = data",
"def SetValueRaw(self, row, rawcol, value):\n try:\n self.data[row, rawcol] = value\n except IndexError:\n while self.GetNumberRows()-1<=row:\n self.AppendRows()\n self.data[row, rawcol] = value",
"def set_data(self, data):\n self._model.set_data(data)\n self.__refresh()",
"def set_custom_wave(self, wave: np.ndarray, update_config: bool = True) -> None:\n self.wave = wave\n if update_config:\n self.start_wavelength = self.wave[0]\n self.end_wavelength = self.wave[-1]\n self.R_samp = np.nan\n self._custom_wave = True",
"def setDataUnit(self, dataUnit):\n\t\tself.urmaswin.setDataUnit(dataUnit)",
"def set_data(self, data):\n assert data.shape == self.shape, \"data should be an array of the same shape as Mask\"\n assert data.dtype == bool, \"data should be an array of data type bool\"\n\n self.data = data",
"def setData(self,data):\n self.data = struct.pack(\"!Q\",data)",
"def setData(self,data):\n self.data = struct.pack(\"!Q\",data)",
"def setData(self, data):\n self.data = struct.pack(\"!I\",data)",
"def SetSampleParameters(self, data):\n self._SetParameters(data, 'SetSampleParameters')"
]
| [
"0.55611116",
"0.5153382",
"0.5131934",
"0.509038",
"0.508902",
"0.5088541",
"0.5048942",
"0.50320804",
"0.49918416",
"0.4986974",
"0.49691084",
"0.49691084",
"0.49691084",
"0.49582824",
"0.4947286",
"0.49458918",
"0.49458918",
"0.49162272",
"0.49108624",
"0.48483956",
"0.48000088",
"0.47904742",
"0.47874847",
"0.4779106",
"0.4755141",
"0.47526425",
"0.47345704",
"0.47345704",
"0.47283745",
"0.47210255"
]
| 0.8099918 | 0 |
Sets the units_setting of this RawDataSettingsV1. | def units_setting(self, units_setting):
self._units_setting = units_setting | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_units(self, units):\n self.units = units",
"def units(self, units):\n\n self._units = units",
"def units(self, units):\n\n self._units = units",
"def units(self, units):\n\n self._units = units",
"def setUnits(self, *args):\n return _libsbml.Parameter_setUnits(self, *args)",
"def setunits(self, *args, **kwargs):\n return _coordsys.coordsys_setunits(self, *args, **kwargs)",
"def setDistanceUnits(self, units: Unit) -> None:\n self.units = ...",
"def set_unit(self,unit):\n self.unit = unit",
"def setVolumeUnits(self, *args):\n return _libsbml.Model_setVolumeUnits(self, *args)",
"def setUnits(self, *args):\n return _libsbml.Species_setUnits(self, *args)",
"def setUnits(self, *args):\n return _libsbml.Compartment_setUnits(self, *args)",
"def get_setting_units(cls, key, **kwargs):\n setting = cls.get_setting_definition(key, **kwargs)\n\n return setting.get('units', '')",
"def units(self):\n return self.__class__.get_setting_units(self.key, **self.get_kwargs())",
"def set_wavelength_unit(self, unit):\n try: # get units from the inputs\n self.wavelength_unit = str(self._wavelength.unit)\n except AttributeError:\n self.wavelength_unit = unit",
"def setUnits(self, *args):\n return _libsbml.Rule_setUnits(self, *args)",
"def setDataUnits(self, dataunits):\n\t\tself.dataUnits = dataunits\n\t\t\n\t\tx, y, z = self.dataUnits[0].getDimensions()\n\t\tself.dims = (x, y, z)\n\t\tself.newDimX.SetValue(\"%d\" % x)\n\t\tself.newDimY.SetValue(\"%d\" % y)\n\t\tself.newDimZ.SetValue(\"%d\" % z)\n\t\tself.dimsLbl.SetLabel(self.currDimText % (x, y, z))\n\t\tself.onUpdateDims(None)\n\t\tself.onSetToHalfSize(None)",
"def set_current_units(units=None):\n manager = Manager() \n if units is not None:\n # set units using a supplied dictionary\n for utype in units:\n if utype in manager.allowed_utypes:\n un = units[utype]\n # handle the identity of \"frequency\" and \"energy\"\n if utype==\"frequency\":\n utype=\"energy\"\n un = units[\"frequency\"]\n \n manager.set_current_units(utype,un)\n else:\n raise Exception(\"Unknown units type %s\" % utype)\n\n else:\n # reset units to the default\n for utype in manager.internal_units:\n if utype in manager.allowed_utypes:\n manager.set_current_units(utype,manager.internal_units[utype])\n else:\n raise Exception(\"Unknown units type %s\" % utype)",
"def setDataUnit(self, dataUnit):\n\t\tself.urmaswin.setDataUnit(dataUnit)",
"def convert_units(self, units):\n self.unit_array = self.unit_array.to(units)",
"def setAllAxisUnits(self,units): \n self.__axis_units__ = units",
"def unit(self,unit_str,unit_scale):\n self.units[unit_str] = unit_scale\n return self",
"def unit_array(self, values):\n self._data_array.values = values\n self._units = self._data_array.attrs['units'] = str(values.units)",
"def measurement_unit(self, measurement_unit):\n\n self._measurement_unit = measurement_unit",
"def setUnits(self, *args):\n return _libsbml.ASTNode_setUnits(self, *args)",
"def set_units(\n self,\n amount: _PyomoUnit = units.mol,\n current: _PyomoUnit = units.ampere,\n length: _PyomoUnit = units.meter,\n luminous_intensity: _PyomoUnit = units.candela,\n mass: _PyomoUnit = units.kilogram,\n temperature: _PyomoUnit = units.kelvin,\n time: _PyomoUnit = units.seconds,\n ):\n self._time = time\n self._length = length\n self._mass = mass\n self._amount = amount\n self._temperature = temperature\n self._current = current\n self._luminous_intensity = luminous_intensity\n\n # Check that valid units were assigned\n for q, expected_dim in self._base_quantities.items():\n u = getattr(self, q)\n if not isinstance(u, _PyomoUnit):\n # Check for non-unit inputs from user\n raise PropertyPackageError(\n f\"Unrecognized units of measurement for quantity {q} ({u})\"\n )\n\n # Check for expected dimensionality\n try:\n # Try to convert user-input to SI units of expected dimensions\n units.convert(u, expected_dim)\n except InconsistentUnitsError:\n # An error indicates a mismatch in units or the units registry\n raise PropertyPackageError(\n f\"Invalid units of measurement for quantity {q} ({u}). \"\n \"Please ensure units provided are valid for this quantity and \"\n \"use the Pyomo unit registry.\"\n )",
"def setAxisUnits(self, dim, units): \n try:\n self.__axis_units__[dim] = units\n except IndexError:\n self.__axis_units__.append(units)",
"def update_units(self):\n unit_var_value = self.view.vars['unit'].get()\n if unit_var_value == 'm3ph':\n self.minran_u_label.config(text='m³/h')\n self.maxran_u_label.config(text='m³/h')\n self.points_tview.heading('vflow', text='Przepływ [m³/h]', anchor=tk.CENTER)\n elif unit_var_value == 'lps':\n self.minran_u_label.config(text='l/s')\n self.maxran_u_label.config(text='l/s')\n self.points_tview.heading('vflow', text='Przepływ [l/s]', anchor=tk.CENTER)\n self.view.vars['pump_eff_min'].convert_unit(unit_var_value)\n self.view.vars['pump_eff_max'].convert_unit(unit_var_value)\n self.view.vars['pump_characteristic'].convert_unit(unit_var_value)",
"def reset_units(shared, *args):\n shared.config.remove_section('units')\n shared.config.add_section('units')\n \n return",
"def setTimeUnits(self, *args):\n return _libsbml.Model_setTimeUnits(self, *args)",
"def set_cpu_units(self, nVmCpuUnits):\n\t\tcall_sdk_function('PrlVmCfg_SetCpuUnits', self.handle, nVmCpuUnits)"
]
| [
"0.73392993",
"0.69501215",
"0.69501215",
"0.69501215",
"0.6416668",
"0.64136463",
"0.6368304",
"0.62724364",
"0.62644994",
"0.6144763",
"0.61432904",
"0.6136528",
"0.6065689",
"0.59879696",
"0.5953695",
"0.5952562",
"0.59308696",
"0.583724",
"0.5836631",
"0.5828655",
"0.5815951",
"0.57926494",
"0.5733837",
"0.57091993",
"0.57086766",
"0.5680095",
"0.56697553",
"0.5640823",
"0.5574125",
"0.55734164"
]
| 0.81460136 | 0 |
Sets the work_hours_setting of this RawDataSettingsV1. | def work_hours_setting(self, work_hours_setting):
self._work_hours_setting = work_hours_setting | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def work_hours(self, work_hours):\n if work_hours is not None and len(work_hours) > 1024:\n raise ValueError(\"Invalid value for `work_hours`, length must be less than or equal to `1024`\") # noqa: E501\n\n self._work_hours = work_hours",
"def engine_hours(self, engine_hours):\n\n self._engine_hours = engine_hours",
"def _work_hour_value(self):\n if self.month_workdays == 0 or self.workday_hours == 0:\n self.work_hour_value = 0\n else:\n self.work_hour_value = round(self.wage / self.month_workdays / self.workday_hours, 2)",
"def get_working_hour(self):\n working_hrs_id = self.search([('active', '=', True)])\n if working_hrs_id:\n return {\n 'biz_open_time': time(int(working_hrs_id.start_hour), int(working_hrs_id.start_minute), 0),\n 'biz_close_time': time(int(working_hrs_id.end_hour), int(working_hrs_id.end_minute), 0),\n 'holiday_list': {line.occ_date: line.name for line in working_hrs_id.non_working_days_line},\n 'deadline_revise_percentage': working_hrs_id.deadline_revise_percentage,\n }\n raise ValidationError(\"Working Hour configuration is missing!\")",
"def save_hours(validated_data):\n\n for w_h in validated_data['working_hours']:\n new_w_h = WorkingHoursSerializer(data={\n 'courier_id': validated_data['courier_id'],\n 'working_hours': w_h\n })\n if not new_w_h.is_valid():\n raise ValidationError(new_w_h.errors)\n new_w_h.save()",
"def set_hour(self, hour):\n if hour not in range(24):\n raise ValueError(\"Hour value for 24h must be in range [1..23] but is {}\".format(hour))\n\n # In case there was an issue with enabling the 14hour mode, we still want\n # to be able to write the hour correctly\n if self.__get_bit_12_24() == 0:\n # First we separate the tens and the digit\n tens, digit = divmod(int(hour), 10)\n\n # In 24h mode, we add them in a single int\n reg_value = (tens << 4) | digit\n\n else: # 12h mode\n # We get the meridien\n if hour <= 12:\n meridien = 0\n else:\n meridien = 1\n\n # We treat the hour\n if hour == 12:\n tens, digit = divmod(int(12), 10)\n else:\n tens, digit = divmod(int(hour % 12), 10)\n\n # In 24h mode, we add them in a single int\n reg_value = (meridien << 5) | (tens << 4) | digit\n\n # Then we print the value to the register\n self.__write_register(_REGISTER_HOUR, reg_value)",
"def setTime(self, timeObj, day=None):\n\n # override day if it's None\n if not day:\n day = getDayFromNum(timeObj.weekday())\n\n self._fileCache[day][\"time-hr\"] = timeObj.hour\n self._fileCache[day][\"time-min\"] = timeObj.minute\n self._updateConfig()",
"def setHour(self, *args):\n return _libsbml.Date_setHour(self, *args)",
"def validate_working_hours(self, value):\n\n if len(value) == 0:\n raise ValidationError('List should not be empty.')\n return value",
"async def set_homework(\n self, group_id: int, lesson: str, homework: str\n ) -> None:\n pass",
"def setHBin(self, hbin):\n with self.lock:\n self.hbin = hbin",
"def submit_hours(self, report):\n raise NotImplementedError",
"def active_hours(self, active_hours):\n\n self._active_hours = active_hours",
"def set_Hour(self, value):\n super(GetTimestampFromDateParametersInputSet, self)._set_input('Hour', value)",
"def opening_hours(self, opening_hours):\n if self.local_vars_configuration.client_side_validation and opening_hours is None: # noqa: E501\n raise ValueError(\"Invalid value for `opening_hours`, must not be `None`\") # noqa: E501\n\n self._opening_hours = opening_hours",
"def set_walltime(self, walltime: str) -> None:\n if not self.batch:\n raise SmartSimError(\"Not running as batch, cannot set walltime\")\n\n if hasattr(self, \"batch_settings\") and self.batch_settings:\n self.batch_settings.set_walltime(walltime)",
"def week(self, week):\n\n self._week = week",
"def create(self, validated_data):\n\n w_h = WorkingHours.objects.create(\n courier_id=Courier.objects.get(courier_id=validated_data['courier_id']),\n work_start=validated_data['working_hours'][:5],\n work_end=validated_data['working_hours'][6:]\n )\n return w_h",
"def preprocess_hours_extend_workdays(business):\n\tworkdays = list()\n\tfor (day, hour) in business[HOURS].items():\n\t\tworkdays.append(day)\n\t\tstart_end = hour.split(\"-\")\n\t\tbusiness[WORKDAYS_START(day)] = start_end[0]\n\t\tbusiness[WORDDAYS_END(day)] = start_end[1]\n\n\tbusiness[WORKDAYS] = workdays",
"def setHoursOffset(self, *args):\n return _libsbml.Date_setHoursOffset(self, *args)",
"def __init__(self, data_aggregation_setting=None, raw_data_setting=None, units_setting=None, work_hours_setting=None): # noqa: E501 # noqa: E501\n\n self._data_aggregation_setting = None\n self._raw_data_setting = None\n self._units_setting = None\n self._work_hours_setting = None\n self.discriminator = None\n\n if data_aggregation_setting is not None:\n self.data_aggregation_setting = data_aggregation_setting\n if raw_data_setting is not None:\n self.raw_data_setting = raw_data_setting\n if units_setting is not None:\n self.units_setting = units_setting\n if work_hours_setting is not None:\n self.work_hours_setting = work_hours_setting",
"def hours(self):\n return self.config['hours']",
"def set_dwell_time(self, dwell_time):\n raise NotImplementedError",
"def time_windows(self, time_windows):\n\n self._time_windows = time_windows",
"def interval_hours(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"interval_hours\")",
"def set_wait_time(self, wtime, long_wait=False):\n if not (2.78 <= wtime <= 712):\n raise ValueError(\"The wait time must be between 2.78 ms and 712 ms\")\n\n # long_wait\n self.write_flag_data([long_wait], APDS_9960.CONFIG_1_REG_ADDRESS, 1)\n # wtime\n reg_value = 256 - int(wtime / 2.78)\n self.write_byte_data(reg_value, APDS_9960.WAIT_TIME_REG_ADDRESS)",
"def setwealth(self, w):\n self.wealth = w",
"def set_H0(self):\n self.slot.H0 = self.lf_H0.value()\n self.w_out.comp_output()\n # Notify the machine GUI that the machine has changed\n self.saveNeeded.emit()",
"def hklin(self, hklin):\n self._hklin = hklin",
"def wall_energy_efficiency(self, wall_energy_efficiency):\n\n self._wall_energy_efficiency = wall_energy_efficiency"
]
| [
"0.7084544",
"0.62833107",
"0.61234754",
"0.5710867",
"0.53572357",
"0.53548336",
"0.5301712",
"0.5130025",
"0.50256395",
"0.49284708",
"0.49263301",
"0.49175677",
"0.4872653",
"0.48705786",
"0.4800906",
"0.47688413",
"0.47624823",
"0.475025",
"0.47354597",
"0.4734547",
"0.4709041",
"0.4696291",
"0.46633378",
"0.46104598",
"0.45806098",
"0.4540178",
"0.44863355",
"0.44685763",
"0.44655252",
"0.44645724"
]
| 0.8516128 | 0 |
computes the closing balance, reports the invalid transactions and displays the closing balance as well as the account status | def balance(self):
#a couple of assumptions not clear in assignment
#1) there is always an invalid transaction
#2) there is only 1 invalid transaction
closeBalance=0
invalidTrans=0
withdrawCount=0
depositCount=0
# print(self.numList)
for i in range(len(self.numList)):
addValue=0
if self.numList[i]<0:
if (-1*self.numList[i])>closeBalance:
invalidTrans=self.numList[i]
else:
addValue=self.numList[i]
withdrawCount+=1
elif self.numList[i]>0:
if i!=0:depositCount+=1
addValue=self.numList[i]
closeBalance+=addValue
# print(i,addValue,closeBalance)
print("Invalid transaction %.2f" %invalidTrans)
print("Closing balance = %.2f" %closeBalance)
print("Number of withdrawals = %d" %withdrawCount)
print("Number of deposits = %d" %depositCount) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def displayBalance(self):\n orders = self.trader.tradeData.get(\n 'openOrders',\n 'Failed to read orderCount')\n# uncomment 3 lines below for orderType debug printing\n## ordertype = type(orders)\n# print'DEBUG: helper.displayBalance orders TYPE is',ordertype\n# print'DEBUG: helper.displayBalance orders:',orders\n if isinstance(orders, int) and orders > 0:\n print\"Open Orders:\", orders\n self.processOrders(printOutput=True)\n self.separator()\n print'Available Balances:'\n funds = self.trader.tradeData['funds']\n for bal in funds.keys():\n if funds[bal] >= 0.01:\n print bal.upper() + ':', funds[bal]\n self.separator()",
"def report_balance(self):\n print(f\"\\nThe current balance in your account is ${self.balance}.\\n\")",
"def balance(self) -> Decimal:\n withdrawals = self.withdrawal_requests.filter(\n status=WithdrawalStatus.open,\n )\n if len(withdrawals) == 0:\n return self.internal_balance\n else:\n withdrawal_total = sum(map(lambda w: w.amount, withdrawals))\n return self.internal_balance - withdrawal_total",
"def balances():\n loop.run_until_complete(app.exchanges.fetch_balances())\n print(app.exchanges.balances_str)",
"def show_balance(self):\n\t\tbalance = 0\n\t\tfor acct in self.wallet:\n\t\t\tutxos = get_unspent(acct[\"address\"], self.testnet)\n\t\t\tbalance += sum(i['value'] for i in utxos)\n\t\treturn f\"{self.name} current balance: {str(balance/100000000.0)} BTC\"",
"async def balance(self, ctx):\r\n author = ctx.author\r\n with DB() as db:\r\n company = await self.get_active_company(ctx, db, author)\r\n history = db.query(CompanyHistory).filter(CompanyHistory.company == company.id).order_by(CompanyHistory.date.desc()).limit(2).all()\r\n net_worth = history[0].value\r\n delta = history[0].value - history[1].value if len(history) == 2 else 0\r\n percent = delta * 100 / history[1].value if len(history) == 2 else 0\r\n symbol = '⮝' if delta >= 0 else '⮟'\r\n embed = discord.Embed(title=f'{company.name}', description=f'{symbol}{round(percent, 2)}%', inline=True)\r\n embed.add_field(name='Cash Assets:', value=f'{round(company.balance, 2)} USD')\r\n embed.add_field(name='Net worth:', value=f'{round(net_worth, 2)} USD')\r\n await ctx.send(embed=embed)",
"def checkBalance(self, dt):\n try:\n state, header, data = get_balance(self.usertoken_usr1)\n if state == 200:\n self.balance_int = data['balance']\n self.balance_str = str(self.balance_int)\n self.label_balance.text = \"Current Balance: \" + self.balance_str\n self.label_balance2.text = \"Current Balance: \" + self.balance_str\n self.label_balance3.text = \"Current Balance: \" + self.balance_str\n else:\n print \"Balance check failed\"\n except:\n pass",
"def balanceHistory(self):\n dt_only, tm_only = self.getDatetimeSplit()\n\n balance = self.user[\"Accounts\"][self.account_id][\"Account_Balance\"]\n\n available_for_trading = self.user[\"Accounts\"][self.account_id][\"Available_For_Trading\"]\n\n # GET CURRENT BALANCE\n balance_found = self.balance_history.find_one(\n {\"Date\": dt_only, \"Trader\": self.user[\"Name\"], \"Asset_Type\": self.asset_type, \"Account_ID\": self.account_id})\n\n profit_loss = 0\n\n closed_positions = self.closed_positions.find(\n {\"Trader\": self.user[\"Name\"], \"Asset_Type\": self.asset_type, \"Account_ID\": self.account_id})\n \n for position in closed_positions:\n\n buy_price = position[\"Buy_Price\"]\n\n sell_price = position[\"Sell_Price\"]\n\n qty = position[\"Qty\"]\n\n profit_loss += ((sell_price * qty) - (buy_price * qty))\n\n if not balance_found:\n \n self.balance_history.insert_one({\n \"Trader\": self.user[\"Name\"],\n \"Date\": dt_only,\n \"Asset_Type\": self.asset_type,\n \"Account_ID\": self.account_id,\n \"Balance\": balance,\n \"Available_For_Trading\": available_for_trading,\n \"Profit_Loss\": profit_loss\n })",
"def balance(self) -> float:\n\t\tbalance = 0\n\t\tfor transaction in self.transactions:\n\t\t\tsign = 1 if transaction.receiving_account == self.__number else -1\n\t\t\tbalance += sign*transaction.usd*transaction.completed\n\t\t# The bank has infinite money\n\t\tif self.name == Account.BANK:\n\t\t\tbalance = Decimal('Infinity')\n\t\treturn balance",
"def get_balance(self, currency):\n\n result = self.api_query('getInfo', {'coinName': currency, 'need_new':0})\n\n #{'success': True, 'message': '', 'result': {'Currency': 'NXS', 'Balance': 1.55257461, 'Available': 1.55257461, 'Pending': 0.0, 'CryptoAddress': None}}\n #{'success': 1, 'return': {'rights': {'info': 1, 'trade': 1, 'deposit': 1, 'withdraw': 0}, 'funds': {'btc': 0.00705219, 'lsk': 2}, 'funds_incl_orders': {'btc': 0.00705219, 'lsk': 2}, 'transaction_count': 0, 'open_orders': 0, 'server_time': 1499255221}}\n #{'success': 1, 'return': {'rights': {'info': 1, 'trade': 1, 'deposit': 1, 'withdraw': 0}, 'funds': {'btc': 0.00705219, 'lsk': 1}, 'funds_incl_orders': {'btc': 0.00705219, 'lsk': 2}, 'transaction_count': 0, 'open_orders': 0, 'server_time': 1499255362}}\n\n #{'success': False, 'message': 'INVALID_CURRENCY', 'result': None}\n #{'success': 1, 'return': {'rights': {'info': 1, 'trade': 1, 'deposit': 1, 'withdraw': 0}, 'funds': {'btc': 0.00705219, 'lsk': 1}, 'funds_incl_orders': {'btc': 0.00705219, 'lsk': 2}, 'transaction_count': 0, 'open_orders': 0, 'server_time': 1499255600}}\n try:\n result = {'success': True, 'message' :'', 'result':{'Currency': currency, 'Balance': result['return']['funds_incl_orders'][currency], 'Available': result['return']['funds'][currency], 'Pending': 0.0, 'CryptoAddress': None}}\n except:\n result = {'success': False, 'message' :'', 'result':{'Currency': currency, 'Balance': 0.0, 'Available': 0.0, 'Pending': 0.0, 'CryptoAddress': None}}\n return result",
"def do_balance(self, args):\n \n balance = self.cur.execute(\"SELECT * FROM balance ORDER BY date DESC\").fetchone()[2]\n print(\"Your current balance is $%.2f\" % balance)",
"def do_balance(self,args):\n \"\"\"Can show total, available(available for trading), or reserved(reserved in open orders)\"\"\"\n \"\"\"usage: balance [available/reserved](optional)\"\"\"\n args = stripoffensive(args)\n if 'available' in args:\n btc,usd = available() \n elif 'reserved' in args:\n btc,usd = reserved()\n else:\n btc,usd = bal()\n word = args if args else \"total\"\n print 'Your %s balance is %.8f BTC and $%.2f USD ' % (word,btc,usd)\n if word == \"total\":\n last = D(bitstamp.ticker()['last'])\n print 'Account Value: $%.2f @ Last BTC Price of $%.2f' % (btc*last+usd,last)",
"def checkbalance(self):\n logging.debug('Checked user balance')",
"def report_UI_balance_date(account):\n\t_day = read_day()\n\tprint(\"Soldul contului in ziua %d este: %.2f\" % (_day, \n\treport_balance_date(account, _day)))",
"def balance_report(abroker):\n log.info('*** balances ***\\n')\n s = \"*** balances ***\\n\"\n \"\"\"\n for asset in assets:\n v = abroker.balance_currency(asset)['Total']\n log.info('%s => %f'%(asset,v))\n s += '%s => %f\\n'%(asset,v)\n print (\"send \" + str(s))\n \"\"\"\n\n y = abroker.balance_all()\n for x in y: \n if x['Total'] > 0:\n v = x['Total']\n s += '%s => %f\\n'%(x['Symbol'],v)\n #print (x)\n print (\"send \" + str(s))\n mail.send_simple_message(abroker.mail_api_key, abroker.mail_domain, \"Balance Report\",s)",
"def balance_money_check():\r\n print(balance_money)",
"def withdraw(self, amount):\r\n balance = self['get']('balance')\r\n if amount > balance:\r\n return 'Insufficient funds'\r\n self['set']('balance', balance - amount)\r\n return self['get']('balance')",
"async def handle_get_trading_balance_response(self, response: RequesterResponse\n ) -> HitbtcTradingCurrencyBalances:",
"def double_declining_balance():\r\n cost = float(input(\"Please Enter The Cost Of Asset: \"))\r\n accdepreciation = float(input(\"Please Enter The Value Of Accumulated Depreciation: \"))\r\n life = float(input(\"Please Enter Estimated Useful Life Of Asset(Years): \"))\r\n rv = float(input(\"Please Enter Estimated Residual Value Of Asset: \"))\r\n n = 0\r\n a = (float(cost)-float(accdepreciation)) * (float(2)/float(life))\r\n bn = float(a)/float(12)\r\n print \">> Your Monthly Depreciation For First Year is\",bn\r\n while(n != (life-1)):\r\n bk = float(cost)\r\n a = ((float(cost)-float(accdepreciation)) * (float(2)/float(life)))\r\n cost -= float(a)\r\n bk -= float(a)\r\n n += 1\r\n vvv = float(bk)-float(rv)\r\n print \">> Your Depreciation For Year No.\",n,\"is\",a\r\n print \">> Your Book Value After\",n,\"Years is\",bk,\"\\n\"\r\n print \">> Your Depreciation For Year No.\",int(life),\"is\",vvv\r\n print \">> Your Book Value After\",int(life),\"Years is\",rv",
"def balance(self):\n balance = self.opening_balance\n for trx in Transaction.query.filter_by(kid_id=self.id):\n balance += trx.amount\n return balance",
"def check_balance():\n print(\"\\n\")\n print(messages.check_balance)\n u_id = pyip.inputInt(\"Your Id: \", greaterThan=0)\n password = pyip.inputPassword(\"Your Password: \")\n\n credentials = {\"id\":u_id, \"password\":password}\n result = BankOperationsBackend.check_balance(credentials)\n start_again() if result else BankOperationsUi.check_balance()",
"def returnCompleteBalances(self):\n pass",
"def __balance__(self) -> float:\n\n with dataset.connect(database.get_db()) as db:\n # Find last bank transaction.\n statement = statement = f\"\"\"\n SELECT opening_balance, transaction_amount\n FROM bank\n WHERE author_id = {self.user.id}\n ORDER BY id DESC\n LIMIT 1\n \"\"\"\n result = db.query(statement)\n\n for row in result:\n balance = row[\"opening_balance\"] + row[\"transaction_amount\"]\n break\n else:\n # If there was no result for the user, default balance is given.\n balance = 500\n\n return float(balance)",
"def do_withdraw(self, args):\n \n amount = float(input(\"How much? \"))\n \n balance = self.cur.execute(\"SELECT * FROM balance ORDER BY date DESC\").fetchone()[2]\n if amount > balance:\n print(\"Insufficient funds! Withdrawl canceled.\")\n print(\"Use the `balance` command to check your account balance\")\n return\n \n balance -= amount\n now = time()\n self.cur.execute(\"INSERT INTO withdrawls VALUES (?,?)\", (now, amount))\n self.cur.execute(\"INSERT INTO balance VALUES (?,?,?)\", (now, 0.0, balance))\n self.db.commit()\n print(\"Withdrawl complete. Your new balance is $%.2f\" % balance)",
"def withdraw(self,withdrawal_money):\r\n if self.balance < withdrawal_money:\r\n print(\"Funds are insufficient\")\r\n \r\n else:\r\n self.balance -= withdrawal_money\r\n print(\"Withdrawal Accepted\")",
"def balance(self):\n return self._rbal - self._lbal",
"def get_balance(self):\n final_amount = 0\n for i in range(len(self.ledger)):\n final_amount += self.ledger[i]['amount']\n return final_amount",
"def withdraw(self, amount):\n if amount > self.balance:\n return 'Insufficient funds'\n self.balance = self.balance - amount\n return self.balance",
"def test_balance(self):\n\n self.assertEqual(self.cash_report.balance(), 150)",
"def validate_bank(accounts: List[Account], total: int, quiet=False):\n #with transfer_lock:\n # current = sum(a.balance for a in accounts)\n [a.lock.acquire() for a in accounts]\n current = sum(a.balance for a in accounts)\n [a.lock.release() for a in accounts]\n\n if current != total:\n print(\"ERROR: Inconsistent account balance: ${:,} vs ${:,}\".format(\n current, total\n ), flush=True)\n elif not quiet:\n print(\"All good: Consistent account balance: ${:,}\".format(\n total), flush=True)"
]
| [
"0.6902592",
"0.6551523",
"0.6535679",
"0.64133596",
"0.6371079",
"0.6279522",
"0.6243903",
"0.62162966",
"0.6196909",
"0.61803025",
"0.6129244",
"0.60479075",
"0.60446423",
"0.6038427",
"0.60363066",
"0.6032662",
"0.5967132",
"0.5929868",
"0.5929786",
"0.59251976",
"0.59018505",
"0.58724827",
"0.58724743",
"0.5859656",
"0.5854603",
"0.58513695",
"0.584411",
"0.58381784",
"0.5834665",
"0.58312637"
]
| 0.74515235 | 0 |
extract pages and then Request those pages sequecely | def extract(self, response):
# print response.url,"extract response url"
sel = response.selector
pages = []
try:
# print "pages work"
pages = sel.xpath("//div[contains(@class,'fen_ye_nav')]//td/text()").re(u"共([\d]{1,3})页")
# print pages
except Exception, e:
print e,"error pages"
log.msg(e, level=log.ERROR)
log.msg(response.url, level=log.ERROR)
if len(pages) == 0:
self.getUserName(response) #only one page
else:
for page in range(int(pages[0])+1)[1:]: #fro test
url = response.url+"_m0_p"+str(page)
yield Request(url, callback=self.getUserName,dont_filter=True) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def identify_and_parse_page(self, response):\n if self.initial_page_filter(response):\n if self.is_index_page(url=response.url, response=response):\n self.process_index_page(response)\n elif self.is_captcha_page(response.url, response):\n self.process_captcha(response)\n elif self.is_results_page(response.url, response):\n items = self.process_question_answer_page(response)\n if self.duplicate_url:\n yield Request(url=self.duplicate_url, callback=self.identify_and_parse_page)\n self.duplicate_url = None\n for item in items:\n yield item\n else:\n self.classification_file.write(\"other, {}\\n\".format(response.url))\n print('other: {}'.format(response.url))\n else:\n self.classification_file.write(\"other, {}\\n\".format(response.url))\n print('other: {}'.format(response.url))",
"def parsing_all_page(url):\n html_doc = get_html(url)\n# html_doc = get_html_local()\n page_count = get_html_count(html_doc)\n print 'All have find pages %d' % page_count\n\n projects = []\n\n for page in range(1, page_count + 1):\n print 'Parsing %d%%' % (page*100/page_count)\n\n url = BASE_URL + '?page=%d' % page\n projects.extend(process_page(url))\n\n return projects",
"async def scrape_pages(session, pages):\n tasks = [scrape_page(session, url) for url in pages]\n await asyncio.gather(*tasks)",
"def parse_index(self, response):\n items = response.css('.item')\n for item in items:\n href = item.css('.top a::attr(href)').extract_first()\n detail_url = response.urljoin(href)\n logger.info('detail url %s', detail_url)\n yield PyppeteerRequest(detail_url, callback=self.parse_detail, wait_for='.item .name')\n \n # next page\n match = re.search(r'page/(\\d+)', response.url)\n if not match: return\n page = int(match.group(1)) + 1\n next_url = f'{self.base_url}/page/{page}'\n yield PyppeteerRequest(next_url, callback=self.parse_index, wait_for='.item .name')",
"def extract_page_urls(self, _):\n url = \"https://mossadams.taleo.net/careersection/rest/jobboard/searchjobs?lang=en&portal=4160751617\"\n page_num = 1\n last_count = 0\n this_count = 0\n\n while True:\n last_count = len(self.urls_to_scrape)\n payload = PAYLOAD + '\"pageNo\":' + str(page_num) + \"}\"\n json_data = self.post_request(url, out_format='json', headers=HEADERS, data=payload)\n\n for job in json_data['requisitionList']:\n job_url = \"https://mossadams.taleo.net/careersection/6/jobdetail.ftl?job=\" + job['contestNo']\n self.urls_to_scrape.add(job_url)\n\n # check to see if any new records were scraped; if not, I've reach the end\n this_count = len(self.urls_to_scrape)\n if last_count == this_count:\n break\n else:\n last_count = this_count\n page_num += 1",
"def getAllListPage():\n firstPage = city + '/line1'\n data = urlopen(firstPage).read().decode('gbk')\n urlList = getLineTypeList(data)\n urlList.append(firstPage)\n num = len(urlList)\n i = 0\n p = Pool(processes=4)\n pageData = p.map(readData, urlList)\n# manager = Manager()\n# pageData = manager.list()\n# while i < num:\n# procline = Process(target=readData, args=(urlList[i], pageData,))\n# procline.start()\n# procline.join()\n# i += 1\n return pageData",
"def parallel_get_pages(args):\n n_requests, from_id, step, index_name, es = args\n all_sites_arr = []\n for _ in range(n_requests):\n waiting_response_time = 0\n for i in range(5):\n time.sleep(waiting_response_time)\n\n try:\n res = es.search(\n index=index_name,\n body={\n \"from\": from_id,\n \"query\": {\n \"match_all\": {}\n },\n \"size\": step,\n \"sort\": {\n \"site_id\": \"asc\"\n }\n },\n request_timeout=1000\n )\n print(\"Got %d Hits\" % len(res['hits']['hits']))\n\n for site in res['hits']['hits']:\n all_sites_arr.append({\n \"link\": site[\"_source\"][\"link\"],\n \"hyperlinks\": site[\"_source\"][\"hyperlinks\"]\n })\n\n break\n except TransportError as exc:\n print('index setup error', exc)\n\n waiting_response_time = math.exp(i + 1)\n\n from_id += step\n time.sleep(10)\n\n return all_sites_arr",
"def parse(self, response):\n announcement_urls = response.css('#TD1 > table > tbody > tr > td.tdline2 > a::attr(href)').extract()\n for announcement_url in announcement_urls:\n yield Request(url=parse.urljoin(response.url, announcement_url), callback=self.parse_detail)\n\n # next page\n total_num_text = response.css('#Table1 > tbody > tr > td:nth-child(1)::text').extract()[-1]\n match_re = re.match('.*?共(\\d+)页', total_num_text)\n if not match_re:\n print('extract total page number error, please check the page source.')\n return\n total_num = int(match_re.group(1))\n if self.current_page <= total_num:\n form_request_text = re.match(\".*'(.*)?'\", response.css(\n '#Table1 > tbody > tr > td:nth-child(3) > input.cls-navigate-next::attr(onclick)').extract_first()).group(1)\n next_page_url = form_request_text.split('?')[0]\n form_data = form_request_text.split('?', 1)[1].split('&')\n yield scrapy.FormRequest(\n url=parse.urljoin(response.url, next_page_url),\n formdata={\n 'ISAJAXLOAD': form_data[0].split('=')[1],\n 'displayContentId': form_data[1].split('=')[1],\n 'SHOWTYPE': form_data[2].split('=')[1],\n 'CATALOGTYPE': form_data[3].split('=')[1],\n 'ORIGINAL_CATALOGID': form_data[4].split('=')[1],\n 'HEAD': '本所公告', # todo 第二页返回时发现乱码 经测试该字段是固定的 先这样处理\n 'CATALOGID': form_data[6].split('=')[1],\n 'TYPE': form_data[7].split('=')[1],\n 'COUNT': form_data[8].split('=')[1],\n 'ARTICLESOURCE': form_data[9].split('=')[1],\n 'LANGUAGE': form_data[10].split('=')[1],\n 'REPETITION': form_data[11].split('=')[1],\n 'DATESTYLE': form_data[12].split('=')[1],\n 'DATETYPE': form_data[13].split('=')[1],\n 'SEARCHBOXSHOWSTYLE': form_data[14].split('=')[1],\n 'INHERIT': form_data[15].split('=')[1],\n 'USESEARCHCATALOGID': form_data[16].split('=')[1],\n 'REPORT_ACTION': form_data[17].split('=')[1],\n 'PAGESIZE': form_data[18].split('=')[1],\n 'PAGECOUNT': form_data[19].split('=')[1],\n 'RECORDCOUNT': form_data[20].split('=')[1],\n 'PAGENO': form_data[21].split('=')[1],\n },\n callback=self.parse\n )\n self.current_page += 1",
"def start_requests(self):\n NUM_PAGES = 74\n warnings.warn(\n 'ECACCSpider: Num pages is hard-coded!'\n )\n \n URL_TEMPLATE = \"https://www.phe-culturecollections.org.uk/products/celllines/generalcell/browse.jsp?a2z=All&d-49653-p={}\"\n urls = [\n URL_TEMPLATE.format(i) \n for i in range(1, NUM_PAGES+1)\n ]\n for url in urls:\n yield scrapy.Request(\n url=url,\n callback=self.parse_catalog_page\n )",
"def __call__(self):\r\n self.init_data = td.import_data(self.__module__)\r\n self.page1() # GET navigation (requests 101-153)\r\n\r\n grinder.sleep(20)\r\n self.page2() # GET case (requests 201-252)\r\n\r\n grinder.sleep(20)\r\n self.page3() # GET view (requests 301-365)\r\n\r\n grinder.sleep(20)\r\n self.page4() # POST view (requests 401-452)\r",
"def __call__(self):\n self.page1() # GET supercars.do (requests 101-111)\n\n grinder.sleep(2117)\n self.page2() # GET cars.do (requests 201-202)\n\n grinder.sleep(1867)\n self.page3() # GET car.do (request 301)\n\n grinder.sleep(4351)\n self.page4() # GET enquire.do (requests 401-402)\n\n grinder.sleep(16341)\n self.page5() # POST enquire.do (request 501)\n\n grinder.sleep(1309)\n self.page6() # GET supercars.do (request 601)\n\n grinder.sleep(669)\n self.page7() # GET cars.do (requests 701-702)\n\n grinder.sleep(1260)\n self.page8() # GET car.do (request 801)\n\n grinder.sleep(837)\n self.page9() # GET car.do (request 901)\n\n grinder.sleep(1108)\n self.page10() # GET search.do (request 1001)\n\n grinder.sleep(3146)\n self.page11() # POST search.do (requests 1101-1102)\n\n grinder.sleep(2822)\n self.page12() # POST search.do (request 1201)\n\n grinder.sleep(1333)\n self.page13() # GET sell.do (request 1301)\n\n grinder.sleep(17417)\n self.page14() # POST sell.do (request 1401)\n\n grinder.sleep(6680)\n self.page15() # GET insurance.do (request 1501)\n\n grinder.sleep(600)\n self.page16() # GET about.do (requests 1601-1602)\n\n grinder.sleep(584)\n self.page17() # GET supercars.do (request 1701)\n\n grinder.sleep(1049)\n self.page18() # GET cars.do (requests 1801-1802)\n\n grinder.sleep(2901)\n self.page19() # GET car.do (request 1901)\n\n grinder.sleep(1441)\n self.page20() # GET car.do (request 2001)\n\n grinder.sleep(791)\n self.page21() # GET supercars.do (request 2101)\n\n grinder.sleep(1365)\n self.page22() # GET cars.do (request 2201)\n\n grinder.sleep(1067)\n self.page23() # GET supercars.do (request 2301)\n\n grinder.sleep(1284)\n self.page24() # GET cars.do (request 2401)\n\n grinder.sleep(879)\n self.page25() # GET supercars.do (request 2501)\n\n grinder.sleep(1066)\n self.page26() # GET cars.do (request 2601)\n\n grinder.sleep(974)\n self.page27() # GET supercars.do (request 2701)",
"def parse_further_pages(self, response):\n # print(\"Page num: \", response.meta[\"page_number\"])\n page_num = response.meta[\"page_number\"]\n tile_path = \"//div[@class='product-tile']\"\n # gets between 1 and 48 SelectorLists, depending on how many products are on the page.\n product_tiles_from_the_page = response.xpath(tile_path)\n for page in product_tiles_from_the_page:\n self.convert_product_tiles_from_this_page_to_items(page,\n product_category=response.meta[\"category_name\"],\n page_num=page_num)\n\n return None",
"def parse_main(self, response):\n\n for i in response.xpath('//div[contains(@class,\"products-list__item\")]'):\n item = {\n \"VENDORID\": 1055,\n \"VENDOR\": 'JC SALES',\n \"ITEMNO\": i.xpath('.//span[contains(text(),\"Item No:\")]/text()').get().replace('Item No:', '').strip(),\n \"DESCRIPTION\": i.xpath('.//div[contains(@class,\"product-card__name\")]//a/text()').get(),\n \"IMAGE_URL\": i.xpath('.//div[contains(@class,\"product-card__image\")]//img[1]/@src').get(),\n \"PAGE_TITLE\": response.css('title::text').get(),\n \"PAGE_URL\": response.request.url\n }\n yield Request(response.urljoin(i.xpath('.//a[contains(@class,\"image__body\")]/@href').get()),\n self.parse_details, meta={'item': item})\n\n next_page = response.xpath('//a[text()=\">\"]/@href').get()\n if next_page is not None:\n next_page = response.urljoin(next_page)\n yield scrapy.Request(next_page, callback=self.parse_main)",
"def start_requests(self):\n try:\n # query = \"select website from company where status is null ORDER BY id asc limit 10000\"\n query = \"select website from company2 where web_addr = 'Web Address';\"\n self.cursor.execute(query)\n numrows = self.cursor.rowcount\n for x in xrange(0, numrows):\n row = self.cursor.fetchone()\n detailed_pag_url = str(row[0])\n yield scrapy.Request(url=detailed_pag_url, callback=self.parse)\n except MySQLdb.Error, e:\n print(\"Database connection Error\", e)",
"def _get_pages(self,url,params,section):\n if self.verbose:\n print('Get Pages for {}'.format(url))\n print(params)\n page = 1\n maxPage = 1\n \n all_results = []\n this_batch = []\n while page <= maxPage: \n \n params['page']=page\n resp = self._get(url=url,params=params)\n maxPage = int(resp.headers.get('X-Total-Page-Count',0))\n try:\n results=resp.json()\n except:\n results=None\n if isinstance(results,(list,dict)):\n if 'errors' in results:\n print(results['errors'])\n return results\n \n this_batch = results[section]\n all_results.extend(this_batch)\n\n page+=1\n else:\n if self.verbose:\n print(\"PROBLEM\")\n return results\n\n return all_results",
"def parse(self, response):\n s = Selector(response)\n\n page_nums = s.xpath('//ul[@class=\"paging-container\"]//a[not(@class=\"current\")]/@data-page').extract()\n\n if page_nums:\n last_page = int(page_nums[-1])\n else:\n last_page = 2\n\n for page in range(1, last_page):\n next_url = change_url_params(page_num=str(page), url=response.url)\n yield scrapy.Request(next_url, callback=self.parse_inner_urls)",
"def _get_allpages(self, url:str, paramsdict:Dict[str,str]):\n r1 = self._get_dict_from_url(url, paramsdict)\n r = [r1]\n #display(r)\n if 'total_pages' in r1:\n # print('more than one page')\n for next_page in range(2, r1['total_pages']+1):\n # print(f\"load page {next_page} \")\n r.append(self._get_dict_from_url(url, {**paramsdict, 'page':next_page}))\n # print(len(r))\n # print([len(rx['results']) for rx in r])\n results = [entry for rx in r for entry in rx['results'] ]\n\n return results",
"def get_pages(url):\n page_list = []\n while True:\n # this is the only place in which we do a request and get the page from the online website case.law\n # result variable is the entire content for that page: a big dictionary of data\n result = requests.get(url).json()\n\n # result is what the previous request returns. It is what accessing the URL will show on the page. This entire result we store in the list below:\n page_list.append(result)\n\n # then we get the URL for the next page that we want to load\n # since result is a dictionary, we get the value for key next which will point us to the next page of results (it it exists)\n url = result['next']\n\n # if there is no URL is means that we have processed all the pages\n if not url:\n break\n # page_list is the list that contains all the pages we will process\n return page_list",
"def start_extracting(self, response):\n input_file = open('input.txt', 'r')\n start_url = []\n\n # Check for command line input\n try:\n start_url = [self.url]\n except AttributeError:\n # If no CL argument, read links from txt file\n start_url = [link for link in input_file]\n\n # Generating request for every url\n for url in start_url:\n yield scrapy.Request(url=url\n , meta={'dont_merge_cookies': False}\n , callback=self.parse\n )",
"def parse(self, response):\n # Parse all article urls and handover it to parse\n post_nodes = response.css(\"#archive .post.floated-thumb .post-thumb a\")\n for post_node in post_nodes:\n image_url = post_node.css(\"img::attr(src)\").extract_first(\"\")\n post_url = post_node.css(\"::attr(href)\").extract_first(\"\")\n yield Request(url=parse.urljoin(response.url, post_url), meta={\"front_image_url\": image_url}, callback=self.parse_detail)\n print(post_url)\n\n # Extract next page's url and handover it to scrapy\n next_url = response.css(\".next.page-numbers::attr(href)\").extract_first(\"\")\n if next_url:\n yield Request(url=parse.urljoin(response.url, next_url), callback=self.parse)",
"def after_parse(self, response):\n\n extraction_requests = []\n\n for container in response.xpath('//tr[@align=\"center\"]'):\n detail_url = container.xpath('./td[1]/a/ @href').extract()[0]\n\n l = BusinessLoader(selector=container, response=response)\n l.add_xpath('telephone', './td[1]/span/ text()')\n l.add_xpath('website', './td[2]/a/ @href')\n l.add_xpath('email', \"substring-after(./td[4]/a/ @href,'mailto:')\")\n l.add_xpath('legalName', './td[1]/a/ text()')\n item = l.load_item()\n\n log.msg('business details extracted from index: {0}'.format(item))\n\n extraction_requests.append(Request(url = urljoin(response.url, detail_url), meta={'item':item}, callback=self.extract))\n\n return extraction_requests",
"def parse(self, response):\n content_type = self.get_content_type(response.headers)\n\n sitescan = response.meta.get('sitescan')\n\n if 'text/html' not in self.get_content_type(response.headers):\n\n # For linked content, find the urlscan it linked from\n urlscan = model.URLScan.objects.get(\n site_scan=sitescan,\n page_url_hash=sha256(response.meta['referrer']).hexdigest())\n else:\n # Only create urlscans for text/html\n urlscan, us_created = model.URLScan.objects.get_or_create(\n\n site_scan=sitescan,\n page_url_hash=sha256(response.url).hexdigest(),\n defaults={'page_url': response.url,\n 'timestamp': self.get_now_time()})\n\n # Continue crawling\n # Parse stylesheet links, scripts, and hyperlinks\n hxs = HtmlXPathSelector(response)\n\n # Extract other target links\n try:\n css_links = hxs.select('//link/@href').extract()\n except TypeError:\n css_links = []\n\n try:\n js_links = hxs.select('//script/@src').extract()\n except TypeError:\n js_links = []\n\n try:\n hyperlinks = hxs.select('//a/@href').extract()\n except TypeError:\n hyperlinks = []\n\n # Using a set removes duplicate links.\n all_links = set(hyperlinks + js_links + css_links)\n\n # Examine links, yield requests if they are valid\n for url in all_links:\n\n if not url.startswith('http://'):\n # ensure that links are to real sites\n if url.startswith('javascript:'):\n continue\n else:\n url = urljoin(response.url, url)\n\n ua = response.meta['user_agent']\n\n request = Request(url)\n request.headers.setdefault('User-Agent', ua.ua_string)\n request.meta['referrer'] = response.url\n request.meta['sitescan'] = sitescan\n request.meta['user_agent'] = ua\n request.meta['content_type'] = None\n\n yield request\n\n # The response contains a user agent, we should yield an item\n item = MarkupItem()\n item['content_type'] = self.get_content_type(response.headers)\n item['filename'] = os.path.basename(urlparse(response.url).path)\n item['headers'] = unicode(response.headers)\n item['meta'] = response.meta\n item['raw_content'] = response.body\n item['sitescan'] = sitescan\n item['urlscan'] = urlscan\n item['url'] = response.url\n item['user_agent'] = response.meta.get('user_agent')\n item['redirected_from'] = response.meta.get('redirected_from',\n u'')\n yield item",
"def get_data(self):\n has_next_page = True\n page = 1\n while has_next_page:\n print(f'Getting page {page}')\n response = self.get_articles(\n page=page,\n size=200,\n order_by='extracted_at',\n order_type='asc'\n )\n pagination = response.get('pagination')\n has_next_page = pagination.get('has_next')\n self.save_articles(response.get('articles'))\n page += 1\n time.sleep(2.5)",
"def get_pages(self, url_list):\n page_helper = self.get_page\n pool = ThreadPool(self.max_threads)\n results = pool.map(page_helper, url_list)\n pool.close()\n pool.join()\n return results",
"def parse(self, response):\n\n links = response.xpath('//td/font/a[contains(@href,\"chart\")]/@href').extract()\n for href in links:\n url = response.urljoin(href)\n yield scrapy.Request(url, callback=self.parse_director_page)\n\n pages = response.xpath('//font[@size=4]/b/a/@href').extract()\n next_page = \"\"\n\n for page in pages:\n page = response.urljoin(page)\n if page not in self.page_seen:\n next_page = page\n self.page_seen.add(page)\n break\n else:\n next\n\n if len(next_page) > 0:\n yield scrapy.Request(next_page, callback=self.parse)",
"def parse(self, response):\n\n hxs = HtmlXPathSelector(response)\n\n listlinkExtractor = SgmlLinkExtractor(allow=(r\"/clinics/\\d+/doctors(|\\?page=\\d+)\",), unique=True)\n list_links = listlinkExtractor.extract_links(response)\n for link in list_links:\n yield Request(link.url, callback=self.parse)\n\n\n docdetail_linkExtractor = SgmlLinkExtractor(allow=(r\"/doctor/clinic_web_\\w+$\",), unique=True)\n docdetail_links = docdetail_linkExtractor.extract_links(response)\n for link in docdetail_links:\n yield Request(link.url, callback=self.parse_doctor_detail)",
"def parse(self, response):\n # single page\n product_urls = response.css('.product-title > a::attr(href)').getall()\n for product_url in product_urls:\n yield scrapy.Request(response.urljoin(product_url), self.parse_product)\n categorie_urls = response.css('.elementor-button ::attr(href)').getall()\n for categorie_url in categorie_urls:\n yield scrapy.Request(response.urljoin(categorie_url))",
"def fetch_pages(folder=pages_folder):\r\n if not (folder.endswith('/') or folder.endswith('\\\\')):\r\n folder += '/'\r\n _classes = classes\r\n if not _classes:\r\n _classes = extract_classes(getsoup())\r\n for classnum in _classes.keys():\r\n with open(folder + str(classnum) + '.html', 'x') as f:\r\n f.write(getsoup(classnum).prettify())",
"def get_page_requests(self, delete=True):\n logger.info(\"===> getting pages from pages.log\")\n p = []\n try:\n with open(self.pages_log, 'r+') as f:\n lines = f.readlines()\n for l in lines:\n t, path, size, offset = l.strip().split(\" \")\n logger.debug(\"%s %s\", t, path)\n ckpt, page_file = path.split(\"/\")\n p.append((ckpt, page_file, int(size), int(offset)))\n # Remove it before we go on so that page server will start over\n logger.info(\"finished getting pages\")\n if delete:\n os.remove(self.pages_log)\n except IOError as e:\n logger.warn(e)\n logger.warn(\"failed getting pages\")\n logger.debug(\"get_page_requests() -> \"+str(p))\n return p",
"def parse(self, response):\n product_urls = response.css(\n '.product-li .product-image a::attr(href)'\n ).getall()\n for product_url in product_urls:\n yield scrapy.Request(response.urljoin(product_url), self.parse_product)\n\n\n next_page_number = 2\n if '?' in response.url:\n return\n while next_page_number < 37:\n # import logging\n # logging.log(logging.WARNING, f\"This is a warning {len(product_urls)} : {product_urls[0]}\")\n next_page_url = f'{response.url}?p={next_page_number}'\n yield scrapy.Request(response.urljoin(next_page_url))\n next_page_number += 1"
]
| [
"0.68995947",
"0.67791885",
"0.65988064",
"0.65479517",
"0.6542743",
"0.64892554",
"0.6487471",
"0.64674",
"0.6452624",
"0.64335346",
"0.64018846",
"0.63740575",
"0.63678265",
"0.63652563",
"0.6361725",
"0.6356936",
"0.6353673",
"0.63518137",
"0.63436663",
"0.6335191",
"0.63293827",
"0.6329343",
"0.6317354",
"0.6317168",
"0.6314466",
"0.6256278",
"0.62478346",
"0.62464315",
"0.6242757",
"0.62352383"
]
| 0.74264663 | 0 |
check whether the input is in self.app_path | def check_path(self, path):
if path in self.app_path:
return True
else:
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _check_study_app_request(context):\n # NOTE: This assumes 'scopes' was overwritten by get_context_data.\n scopes = [x[0] for x in context['scopes']]\n\n try:\n scopes.remove('read')\n scopes.remove('write')\n except ValueError:\n return False\n\n if len(scopes) != 1:\n return False\n\n app_label = re.sub('-', '_', scopes[0])\n app = apps.get_app_config(app_label)\n\n if app and app.verbose_name == context['application'].name:\n return app_label\n\n return False",
"def check_path(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"check_path\")",
"def check_path(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"check_path\")",
"def bool_apps_filho(self, path_app):\n LIST_APPS_MENUS = menu_apps.MenuApps.GetAppsOnMenu(only_visible=True) \n dirs = next(os.walk(path_app))[1]\n for directory in dirs:\n tree_app = self.get_app_name_on_path(path_app + \"/\"+ directory)\n \n if tree_app in LIST_APPS_MENUS:\n return True\n \n return False",
"def _is_fluxcd_app_compliant(path):\n mandatory_components = (\"base\", constants.APP_ROOT_KUSTOMIZE_FILE)\n check_mandatory = all(comp in os.listdir(path)\n for comp in mandatory_components)\n return check_mandatory",
"def is_app_exists(self, name, app_path) -> bool:\n ah_write = self.get_iis_object()\n section = ah_write.GetAdminSection(\"system.applicationHost/sites\", \"MACHINE/WEBROOT/APPHOST\")\n collection = section.Collection\n\n for i in range(collection.Count):\n site = collection[i]\n prop = site.Properties\n site_name = prop[\"name\"].Value\n if site_name != name:\n continue\n app_collection = site.Collection\n for ii in range(app_collection.Count):\n app_prop = app_collection[ii].Properties\n if remove_starting_backward_slash(app_prop[\"path\"].Value) == remove_starting_backward_slash(app_path):\n # found!\n return True\n\n return False",
"def test_app_exists(self):\n self.assertFalse(current_app is None)",
"def check_for_application_file(self, application):\n # 1. Get the path of the app_config directory\n app_conf_dir = self.sys_conf['configs']['env'][self.env]['app_config_url']\n\n # 2. Get the path for the given application configuration file\n app_conf_dir += '/{file}.yaml'.format(file=application)\n\n # 3. Validate the path exists\n chk = file_check(app_conf_dir)\n\n # 4. Return the result\n return chk",
"def _has_app(self, app):\n # Search the easiest things first and save the full-text search of the\n # HTML for last\n\n for regex in app['url']:\n if regex.search(self.url):\n return True\n\n for name, regex in app['headers'].items():\n if name in self.headers:\n content = self.headers[name]\n if regex.search(content):\n return True\n\n for regex in app['script']:\n for script in self.scripts:\n if regex.search(script):\n return True\n\n for name, regex in app['meta'].items():\n if name in self.meta:\n content = self.meta[name]\n if regex.search(content):\n return True\n\n for regex in app['html']:\n if regex.search(self.html):\n return True",
"def is_app_dir(path):\n try:\n find_app_yamls(path)\n return True\n except ValueError:\n return False",
"def validate_input_file(self):\r\n return os.path.isfile(self.input_file)",
"def ValidatePath(self, root_path: str) -> bool:\n if 'gold' in root_path:\n return True\n\n return False",
"def ValidatePath(self, root_path: str) -> bool:\n if 'silver' in root_path:\n return True\n\n return False",
"def is_in_path(self):\n exe = self.command.split()[0]\n for try_path in os.environ[\"PATH\"].split(os.pathsep):\n try_path = try_path.strip('\"')\n exe_try = os.path.join(try_path, exe).strip()\n if os.path.isfile(exe_try) and os.access(exe_try, os.X_OK):\n return True\n return False",
"def current_app_should_be(self, app_name):\n locator = lex_locators[\"app_launcher\"][\"current_app\"].format(app_name)\n elem = self.selenium.get_webelement(locator)\n assert app_name == elem.text, \"Expected app to be {} but found {}\".format(\n app_name, elem.text\n )",
"def check_paths(self):\n self.data[\"app_path\"] = list(map(\n self.replace_vars_path, self.data[\"app_path\"]))\n self.data[\"icons_path\"] = list(map(\n self.replace_vars_path, self.data[\"icons_path\"]))\n new_app_path = []\n for app_path in self.data[\"app_path\"]:\n if path.isdir(app_path) or path.isfile(app_path):\n new_app_path.append(app_path)\n self.data[\"app_path\"] = new_app_path\n if not len(self.data[\"app_path\"]) == 0:\n new_icons_path = []\n for icon_path in self.data[\"icons_path\"]:\n if (self.data[\"force_create_folder\"] and\n not path.exists(icon_path)):\n log(\"Creating application folder for {0}\".format(self.data[\"name\"]))\n create_dir(icon_path)\n if path.isdir(icon_path):\n if (\"binary\" in self.data.keys()\n and path.isfile(icon_path + self.data[\"binary\"])):\n new_icons_path.append(icon_path)\n elif \"binary\" not in self.data.keys():\n new_icons_path.append(icon_path)\n self.data[\"icons_path\"] = new_icons_path",
"def _is_request_in_include_path(self, request):\n if self._include_paths:\n for path in self._include_paths:\n if request.path.startswith(path):\n return True\n return False\n else:\n return True",
"def exit_if_invalid(app):\n\n app = sanitize_app_name(app)\n if not exists(join(APP_ROOT, app)):\n echo(\"Error: app '{}' not found.\".format(app), fg='red')\n exit(1)\n return app",
"def validpath(self, path):\n root = self.realpath(self.root)\n path = self.realpath(path)\n if not self.root.endswith(os.sep):\n root = self.root + os.sep\n if not path.endswith(os.sep):\n path = path + os.sep\n if path[0:len(root)] == root:\n return True\n return False",
"def is_valid_path(input_path):\n if not os.path.exists(input_path):\n print('\\'{}\\' is not a valid path.'.format(input_path))\n exit(1)\n return input_path",
"def __check_exist_path(self):\n if 'path_out' not in self.params:\n raise ValueError('missing \"path_out\" among parameters')\n self.params['path_out'] = update_path(self.params.get('path_out'))\n list_names = [n for n in self.params if any(m in n.lower() for m in ['path', 'dir', 'file'])]\n for n in list_names:\n p = os.path.abspath(os.path.expanduser(self.params[n]))\n if not os.path.exists(p):\n raise FileNotFoundError('given path/file/dir \"%s\" does not exist!' % p)\n self.params[n] = p\n for n in [n for n in self.params if 'exec' in n]:\n # in case you define executable in your home\n if os.path.expanduser(self.params[n]) != self.params[n]:\n self.params[n] = os.path.expanduser(self.params[n])",
"def check_PATH_for_program(f):\n\n path = os.environ[\"PATH\"].split(\":\")\n\n for p in path:\n\n if os.path.isfile(os.path.join(p,f)):\n return True\n\n return False",
"def find_in_app(self, app, path):\n storage = self.storages.get(app)\n if storage:\n # only try to find a file if the source dir actually exists\n if storage.exists(path):\n matched_path = storage.path(path)\n if matched_path:\n return matched_path",
"def check_app(self, package):\n return self.adb.check_app(package)",
"def has_appname(appname):\n return appname in Registry.monomers",
"def exists_in_path(self):\n return os.path.isfile(self.IN_PATH)",
"def is_embedded(request):\n hx_current_url = request.headers.get('HX-Current-URL', None)\n if not hx_current_url:\n return False\n return request.path != urlparse(hx_current_url).path",
"def _check(self, config: Dict):\n if 'path' not in config:\n raise FileNotFoundError(\"File not found.\")",
"def check_path(self,path):\r\n self.__path=path\r\n list_of_file_names={'Structure.txt','test.csv','train.csv'}\r\n if list_of_file_names <= set(os.listdir(self.__path)):#checking if the path have all the requierd files\r\n if os.path.getsize(os.path.join(self.__path,'Structure.txt')) > 0 and os.path.getsize(os.path.join(self.__path,'test.csv')) >0 and os.path.getsize(os.path.join(self.__path,'train.csv'))>0:#checking if the files are not empty\r\n self.view.Build_Button.config(state='active')\r\n self.view.Bins_Entry.configure(state='normal',text='Enter number of bins',font=(\"Calibri\",12),justify=\"center\",exportselection=0)\r\n #sending to the view the error messages\r\n else:\r\n self.view.file_error_handling(\"\",\"One or more of the files is empty\")\r\n else:\r\n self.view.file_error_handling(\"required files are missing\", \"The directory must have Structure.txt,test.csv and train.csv files\")",
"def GetInputPath(self):\n self.inputDir = raw_input(\"Where should files be read from? This can be a file or a folder of files\\n\\r>>> \")\n if os.path.isabs(self.inputDir):\n if os.path.isdir(self.inputDir):\n self.isFolder = True\n self.inputDirs = os.listdir(self.inputDir)\n elif os.path.isfile(self.inputDir):\n self.isFolder = False\n self.inputDirs = [self.inputDir]\n else:\n print \"That path does not exist. Try again\"\n self.GetInputPath()\n else:\n print \"that was not an excepted path name. Try again.\"\n self.GetInputPath()"
]
| [
"0.70307356",
"0.65499437",
"0.65499437",
"0.65025043",
"0.63630617",
"0.63465846",
"0.63110805",
"0.6154497",
"0.6062121",
"0.604911",
"0.59717196",
"0.5928342",
"0.5911152",
"0.5910593",
"0.5897845",
"0.5871106",
"0.58121234",
"0.57989126",
"0.5795736",
"0.5753665",
"0.57453686",
"0.57214403",
"0.5693549",
"0.5682836",
"0.5680563",
"0.56441873",
"0.56112194",
"0.56065345",
"0.56005156",
"0.55657"
]
| 0.8126205 | 0 |
invoke hadoop dfs commands | def do_dfs(self, line):
args = filter(None, line.strip().split())
if not args:
self.help_dfs()
else:
cmds = ["dfs"]+args
(retcode, stdout) = hadoop_cmd(cmds, MJob.hadoop_home)
if retcode is False:
pass # Popen failed
else:
print stdout
if retcode != 0:
print "hadoop dfs retcode=%s" % retcode | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def hadoop(self, command, *args, **kwargs):\n hadoop_cmd = \"-{}\".format(re.sub(\"^-*\", \"\", command))\n return self.exec(\"hadoop fs\", hadoop_cmd, *args, **kwargs)",
"def run_cmd(cmd):\n command = cmd.split(\" \")[0]\n if command == \"ls\":\n r = requests.get(url.format(cmd.split(\" \")[1], \"OPEN\", userName))\n print(r.json())\n elif command == 'put':\n # https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/WebHDFS.html#Append_to_a_File\n # this part usess system call to contact the remote\n # server first creating the file then append toit\n # Sample use\n # >>> PUT <file-name> <file-path>\n fileName = cmd.split(\" \")[1]\n system_call = ['curl', '-i', '-X', 'PUT', url.format(\n fileName, 'CREATE', userName)]\n subprocess.call(system_call)\n system_call = ['curl', '-i', '-X', 'POST', url.format(\n fileName, 'APPEND', userName)]\n subprocess.call(system_call)\n system_call = ['curl', '-i', '-X', 'POST', '-T', cmd.slpit(\" \")[2],\n url.format(fileName, 'APPEND', userName)]\n subprocess.call(system_call)\n\n elif command == 'get':\n # https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/WebHDFS.html#Open_and_Read_a_File\n # this part usess system call to contact the remote\n # to read from file\n # Sample use\n # >>> GET <file-path>\n fileName = cmd.split(\" \")[1]\n system_call = ['curl', '-i', '-L', url.format(\n fileName, 'OPEN', userName)]\n subprocess.call(system_call)\n elif command == 'mkdir':\n # https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/WebHDFS.html#Make_a_Directory\n # this part usess system call to contact the remote\n # to read from file\n # Sample use\n # >>> mkdir <folder-Path>\n folderPath = cmd.split(\" \")[1]\n system_call = ['curl', '-i', '-X', 'PUT', url.format(\n folderPath, 'MKDIRS', userName)]\n subprocess.call(system_call)\n elif command == 'rmdir':\n # https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/WebHDFS.html#Delete_a_FileDirectory\n # this part usess system call to contact the remote\n # to read from file\n # Sample use\n # >>> rmdir <file-path>\n folderPath = cmd.split(\" \")[1]\n system_call = ['curl', '-i', '-X', 'DELETE', url.format(\n folderPath, 'DELETE', userName)]\n subprocess.call(system_call)\n else:\n print 'Command is invalid.'",
"def _call(self, cmd, *args, **kwargs):\r\n cmd = ['hadoop', '--config', self._config, 'dfs', cmd] + list(args)\r\n heapsize = str(int(self._heap_limit.as_(Data.MB)))\r\n with environment_as(HADOOP_HEAPSIZE=heapsize):\r\n if kwargs.get('check'):\r\n return self._cmd_class.check_call(cmd)\r\n elif kwargs.get('return_output'):\r\n return self._cmd_class.execute_and_get_output(cmd)\r\n elif kwargs.get('supress_output'):\r\n return self._cmd_class.execute_suppress_stdout(cmd)\r\n else:\r\n return self._cmd_class.execute(cmd)",
"def hdfs(self, *args, **kwargs):\n return self.hadoop(*args, **kwargs)",
"def run_hdfs_command(svc_name, command):\n full_command = 'docker run -e HDFS_SERVICE_NAME={} mesosphere/hdfs-client:2.6.4 /bin/bash -c \"/configure-hdfs.sh && {}\"'.format(svc_name, command)\n\n rc, output = shakedown.run_command_on_master(full_command)\n return rc, output",
"def dfs_ls(self, path):\n out, err = self.execute_command(\"hdfs dfs -ls \" + path)\n if len(err) > 0:\n raise RuntimeError(\n \"unable to execute hdfs dfs -ls \" +\n path +\n \"\\nERR:\\n\" +\n err)\n return ASSHClient.parse_lsout(out, False)",
"def client():\n return hdfs.connect()",
"def get_from_hadoop(from_, to_=None):\n import os\n if to_ is None:\n to_ = \"data/\"+from_.split(\"/\")[-1]\n cmd = \"hadoop fs -get \"+from_+\" \"+to_\n os.system(cmd)",
"def _read_hdfs(self):\n\t\traise NotImplementedError()",
"def run(self, check=True, bind_cores=None):\n # pylint: disable=arguments-differ\n self.log.info('Starting dfuse at %s', self.mount_dir.value)\n\n # A log file must be defined to ensure logs are captured\n if \"D_LOG_FILE\" not in self.env:\n raise CommandFailure(\n \"Dfuse missing environment variables for D_LOG_FILE\")\n\n if 'D_LOG_MASK' not in self.env:\n self.env['D_LOG_MASK'] = 'INFO'\n\n # create dfuse dir if does not exist\n self.create_mount_point()\n\n # run dfuse command\n cmd = self.env.get_export_str()\n if bind_cores:\n cmd += 'taskset -c {} '.format(bind_cores)\n cmd += str(self)\n self.log.info(\"Command is '%s'\", cmd)\n ret_code = pcmd(self.hosts, cmd, timeout=30)\n\n if 0 in ret_code:\n self.running_hosts.add(ret_code[0])\n del ret_code[0]\n\n if ret_code:\n error_hosts = NodeSet(\n \",\".join(\n [str(node_set) for code, node_set in list(ret_code.items())\n if code != 0]))\n raise CommandFailure(\n \"Error starting dfuse on the following hosts: {}\".format(\n error_hosts))\n\n if check:\n # Dfuse will block in the command for the mount to complete, even\n # if run in background mode so it should be possible to start using\n # it immediately after the command returns.\n if not self.check_running(fail_on_error=False):\n self.log.info('Waiting two seconds for dfuse to start')\n time.sleep(2)\n if not self.check_running(fail_on_error=False):\n self.log.info('Waiting five seconds for dfuse to start')\n time.sleep(5)\n self.check_running()",
"def execute_process(uuid, output, hadoop):\n # Ruta del proceso\n backend_path = \"/home/bigdata07/backend\"\n # Path para el proceso de log\n path = \"%s/logs/%s.txt\" % (backend_path, uuid)\n # Comando para crear la carpeta para guardar los resultados del proceso de Hadoop\n backend_output_dir = \"%s/output/%s\" % (backend_path, uuid)\n mkdir_output = \"mkdir -p %s\" % (backend_output_dir)\n # Comando para hacer get de HDFS al home\n get_output = \"hdfs dfs -get %s/* %s/\" % (output, backend_output_dir)\n with open(path, \"w\") as file:\n # Ejecutar Hadoop\n subprocess.run(hadoop.split(\" \"), check=True, stdout=file, stderr=file)\n subprocess.run(mkdir_output.split(\" \"), check=True, stdout=file, stderr=file)\n subprocess.run(get_output.split(\" \"), check=True, stdout=file, stderr=file)\n # Resolve() de una promesa en JS\n return backend_output_dir",
"def run(self):\n self.log.info(\"Start CMD>>> %s\", str(self.runner))\n\n # Temporary display debug mount information\n self.log.info(\"%s\", \"=\" * 80)\n pcmd(self._hosts, \"df -h -t tmpfs\", True, None, None)\n self.log.info(\"%s\", \"=\" * 80)\n\n return self.runner.run()",
"def dfs_mkdir(self, path):\n return self.execute_command(\"hdfs dfs -mkdir \" + path)",
"def df():\n run(\"df -h\")",
"def run_system_analyzer(cluster, scheduler_commands_factory, request, partition=None):\n\n out_dir = request.config.getoption(\"output_dir\")\n local_result_dir = f\"{out_dir}/system_analyzer\"\n compute_node_shared_dir = \"/opt/parallelcluster/shared\"\n head_node_dir = \"/tmp\"\n\n logging.info(\"Creating remote_command_executor and scheduler_commands\")\n remote_command_executor = RemoteCommandExecutor(cluster)\n scheduler_commands = scheduler_commands_factory(remote_command_executor)\n\n logging.info(f\"Retrieve head node system information for test: {request.node.name}\")\n result = remote_command_executor.run_remote_script(SYSTEM_ANALYZER_SCRIPT, args=[head_node_dir], timeout=180)\n logging.debug(f\"result.failed={result.failed}\")\n logging.debug(f\"result.stdout={result.stdout}\")\n logging.info(\n \"Copy results from remote cluster into: \"\n f\"{local_result_dir}/system_information_head_node_{request.node.name}.tar.gz\"\n )\n os.makedirs(f\"{local_result_dir}\", exist_ok=True)\n remote_command_executor.get_remote_files(\n f\"{head_node_dir}/system-information.tar.gz\",\n f\"{local_result_dir}/system_information_head_node_{request.node.name}.tar.gz\",\n preserve_mode=False,\n )\n logging.info(\"Head node system information correctly retrieved.\")\n\n logging.info(f\"Retrieve compute node system information for test: {request.node.name}\")\n result = scheduler_commands.submit_script(\n SYSTEM_ANALYZER_SCRIPT, script_args=[compute_node_shared_dir], partition=partition\n )\n job_id = scheduler_commands.assert_job_submitted(result.stdout)\n scheduler_commands.wait_job_completed(job_id, timeout=180)\n scheduler_commands.assert_job_succeeded(job_id)\n logging.info(\n \"Copy results from remote cluster into: \"\n f\"{local_result_dir}/system_information_compute_node_{request.node.name}.tar.gz\"\n )\n remote_command_executor.get_remote_files(\n f\"{compute_node_shared_dir}/system-information.tar.gz\",\n f\"{local_result_dir}/system_information_compute_node_{request.node.name}.tar.gz\",\n preserve_mode=False,\n )\n logging.info(\"Compute node system information correctly retrieved.\")",
"def handle_dfs_data_dir(func, params, update_cache=True):\n\n # Get the data dirs that Ambari knows about and their last known mount point\n prev_data_dir_to_mount_point = get_data_dir_to_mount_from_file(params)\n\n # Dictionary from data dir to the mount point that will be written to the history file.\n # If a data dir becomes unmounted, we should still keep its original value.\n # If a data dir was previously on / and is now mounted on a drive, we should store that too.\n data_dir_to_mount_point = prev_data_dir_to_mount_point.copy()\n\n # This should typically be False for customers, but True the first time.\n allowed_to_create_any_dir = False\n\n if params.data_dir_mount_file is None:\n allowed_to_create_any_dir = True\n Logger.warning(\"DataNode is allowed to create any data directory since dfs.datanode.data.dir.mount.file property is null.\")\n else:\n if not os.path.exists(params.data_dir_mount_file):\n allowed_to_create_any_dir = True\n Logger.warning(\"DataNode is allowed to create any data directory since dfs.datanode.data.dir.mount.file property has file %s and it does not exist.\" % params.data_dir_mount_file)\n\n valid_data_dirs = [] # data dirs that have been normalized\n error_messages = [] # list of error messages to report at the end\n data_dirs_unmounted = set() # set of data dirs that have become unmounted\n\n for data_dir in params.dfs_data_dir.split(\",\"):\n if data_dir is None or data_dir.strip() == \"\":\n continue\n\n data_dir = data_dir.strip()\n valid_data_dirs.append(data_dir)\n\n if not os.path.isdir(data_dir):\n may_create_this_dir = allowed_to_create_any_dir\n last_mount_point_for_dir = None\n\n # Determine if should be allowed to create the data_dir directory.\n # Either first time, became unmounted, or was just mounted on a drive\n if not may_create_this_dir:\n last_mount_point_for_dir = prev_data_dir_to_mount_point[data_dir] if data_dir in prev_data_dir_to_mount_point else None\n\n if last_mount_point_for_dir is None:\n # Couldn't retrieve any information about where this dir used to be mounted, so allow creating the directory to be safe.\n may_create_this_dir = True\n else:\n curr_mount_point = get_mount_point_for_dir(data_dir)\n\n # This means that create_this_dir will stay false if the directory became unmounted.\n # In other words, allow creating if it was already on /, or it's currently not on /\n if last_mount_point_for_dir == \"/\" or (curr_mount_point is not None and curr_mount_point != \"/\"):\n may_create_this_dir = True\n\n if may_create_this_dir:\n Logger.info(\"Forcefully creating directory: {0}\".format(data_dir))\n\n # Call the function\n func(data_dir, params)\n else:\n # Additional check that wasn't allowed to create this dir and became unmounted.\n if last_mount_point_for_dir is not None:\n data_dirs_unmounted.add(data_dir)\n msg = \"Directory {0} does not exist and became unmounted from {1} .\".format(data_dir, last_mount_point_for_dir)\n error_messages.append(msg)\n pass\n\n # This is set to false during unit tests.\n if update_cache:\n get_and_cache_mount_points(refresh=True)\n\n # Update all data dirs (except the unmounted ones) with their current mount points.\n for data_dir in valid_data_dirs:\n # At this point, the directory may or may not exist\n if os.path.isdir(data_dir) and data_dir not in data_dirs_unmounted:\n curr_mount_point = get_mount_point_for_dir(data_dir)\n data_dir_to_mount_point[data_dir] = curr_mount_point\n\n if error_messages and len(error_messages) > 0:\n header = \" ERROR \".join([\"*****\"] * 6)\n header = \"\\n\" + \"\\n\".join([header, ] * 3) + \"\\n\"\n msg = \" \".join(error_messages) + \\\n \" Please remount the data dir(s) and run this command again. To ignore this failure and allow writing to the \" \\\n \"root partition, either update the contents of {0}, or delete that file.\".format(params.data_dir_mount_file)\n Logger.error(header + msg + header)\n\n data_dir_to_mount = DATA_DIR_TO_MOUNT_HEADER\n for kv in data_dir_to_mount_point.iteritems():\n data_dir_to_mount += kv[0] + \",\" + kv[1] + \"\\n\"\n\n return data_dir_to_mount",
"def download_cluster(self, remotepath, localpath, merge=False):\n cget = \"getmerge\" if merge else \"get\"\n if isinstance(remotepath, str):\n filename = os.path.split(localpath)[-1]\n self.execute_command(\n \"hdfs dfs -{2} {0} {1}\".format(remotepath, filename, cget))\n self.download(filename, localpath)\n self.execute_command(\"rm {0}\".format(filename))\n else:\n tod = []\n for afile in remotepath:\n filename = os.path.split(afile)[-1]\n self.execute_command(\n \"hdfs dfs -{2} {0} {1}\".format(afile, filename, cget))\n tod.append(filename)\n self.download(tod, localpath)\n for afile in tod:\n self.execute_command(\"rm {0}\".format(afile))\n\n return remotepath",
"def run(self, check=True, mount_callback=None):\n # pylint: disable=arguments-differ,arguments-renamed\n self.log.info('Starting dfuse at %s on %s', self.mount_dir.value, str(self.hosts))\n\n # A log file must be defined to ensure logs are captured\n if \"D_LOG_FILE\" not in self.env:\n raise CommandFailure(\"Dfuse missing environment variables for D_LOG_FILE\")\n\n if 'D_LOG_MASK' not in self.env:\n self.env['D_LOG_MASK'] = 'INFO'\n\n if 'COVFILE' not in self.env:\n self.env['COVFILE'] = '/tmp/test.cov'\n\n # Determine which fusermount command to use before mounting\n if not self._fusermount_cmd:\n self.log.info('Check which fusermount command to use')\n for fusermount in ('fusermount3', 'fusermount'):\n if run_remote(self.log, self.hosts, f'{fusermount} --version').passed:\n self._fusermount_cmd = fusermount\n break\n if not self._fusermount_cmd:\n raise CommandFailure(f'Failed to get fusermount command on: {self.hosts}')\n\n # mark the instance as needing cleanup before starting setup\n self.__need_cleanup = True\n\n # setup the mount point\n self._setup_mount_point()\n\n # run dfuse command\n result = run_remote(self.log, self.hosts, self.with_exports, timeout=30)\n self._running_hosts.add(result.passed_hosts)\n if mount_callback:\n mount_callback(result)\n elif not result.passed:\n raise CommandFailure(f\"dfuse command failed on hosts {result.failed_hosts}\")\n\n if check:\n # Dfuse will block in the command for the mount to complete, even\n # if run in background mode so it should be possible to start using\n # it immediately after the command returns.\n num_retries = 3\n for retry in range(1, num_retries + 1):\n if not self.check_running(fail_on_error=retry == num_retries):\n self.log.info('Waiting two seconds for dfuse to start')\n time.sleep(2)",
"def getClusterRole(self):\n\tlist = []\n\tcounter = 0\n\tf = self.sendCmd(\"cat /etc/hadoop/conf/hdfs-site.xml\")\n\tlist = f.split(\"\\n\")\n\tfor line in list:\n\t\tcounter = counter + 1\n\t\tif \"dfs.ha.namenodes\" in line:\n\t\t\tlogger.info(\"Setup is HA\")\n\t\t\tlogger.info (\"Finding nameservices id\")\n\t\t\ta = list[counter]\n\t\t\tnameservice1=a.split(\"value>\")[1].split(\"<\")[0].split(\",\")[0].strip()\n\t\t\tnameservice2=a.split(\"value>\")[1].split(\"<\")[0].split(\",\")[1].strip()\n\tnode_ip = self.run_cmd(\"grep \" + nameservice2 + \" /etc/hosts | awk '{print $1}'\")\n print \"===================\"\n\tprint \"node ip is %s\" %node_ip.split(\"\\n\")[0]\n print \"===================\"\n cmd = \"ifconfig | grep \" + node_ip.split(\"\\n\")[0] \n\toutput = self.run_cmd(cmd)\n output1 = self.sendCmd(\"echo $?\").split(\"\\n\")\n output2 = [item.replace(\"\\r\", \"\") for item in output1]\n if \"0\" not in output2 :\n cmd=\"/usr/bin/hdfs haadmin -getServiceState \" + nameservice2\n\t\trole=self.run_cmd(cmd)\n\t\tif \"active\" in role:\n\t\t\treturn \"master\"\n\t\telif \"standby\" in role:\n\t\t\treturn \"standby\"\n\t\telse:\n\t\t\tlogger.info(\"command was not executed successfully.Please check hadoop\")\n\telse:\n\t\tprint \"in else....\"\n\t\tcmd=\"/usr/bin/hdfs haadmin -getServiceState \" + nameservice1\n\t\trole=self.run_cmd(cmd)\n\t\tif \"active\" in role:\n\t\t\treturn \"master\"\n\t\telif \"standby\" in role:\n\t\t\treturn \"standby\"\n\t\telse:\n\t\t\tlogger.info(\"command was not executed successfully.Please check hadoop\")",
"def cli(ctx):\n if ctx.obj[\"debug\"]:\n click.echo(\"Debug mode initiated\")\n set_trace()\n\n logger.debug(\"cluster subcommand called from cli\")",
"def _do_mount(self, cmd, ensure):\n try:\n self._execute(*cmd, run_as_root=True)\n except exception.ProcessExecutionError as exc:\n if ensure and 'already mounted' in exc.stderr:\n LOG.warn(_LW(\"%s is already mounted\"),\n self.gluster_manager.export)\n else:\n raise exception.GlusterfsException(\n 'Unable to mount Gluster volume'\n )",
"def main():\n\n # this next line of code is pretty tense ... let me explain what\n # it does:\n # subprocess.check_relay([\"df\"]) runs the df command and returns\n # the relay as a string\n # rstrip() trims of the last whitespace character, which is a '\\n'\n # split('\\n') breaks the string at the newline characters ... the\n # result is an array of strings\n # the list comprehension then applies shlex.split() to each string,\n # breaking each into tokens\n # when we're done, we have a two-dimensional array with rows of\n # tokens and we're ready to make objects out of them\n df_array = [shlex.split(x) for x in\n subprocess.check_relay([\"df\"]).rstrip().split('\\n')]\n df_num_lines = df_array[:].__len__()\n\n df_json = {}\n df_json[\"filesystems\"] = []\n for row in range(1, df_num_lines):\n df_json[\"filesystems\"].append(df_to_json(df_array[row]))\n sys.stdout.write(json.dumps(df_json, sort_keys=True))\n sys.stdout.flush()\n return",
"def upload_hdfs(outfile):\n\ttry :\n\t\tdestination_dir = '/team40/' + city_name + '_search_data/'+ time.strftime('%Y-%m-%d_%H-%M',time.localtime()) + outfile\n\t\thdfs = InsecureClient('http://115.146.86.32:50070', user='qilongz')\n\t\thdfs.upload(destination_dir, outfile)\n\texcept Exception as e:\n\t\tlogging.error(str(e))",
"def task_run(taskname,mynodes):\n print \"FULLRUN\"\n task = task_self()\n print \"Booting task: \" , taskname\n \n # first initiate environment to run our python+java\n os.chdir(CASSANDRA_HOME)\n \n #FIXME: set init_environment to actually work\n #task.shell(\"cluster_config/init_environment.sh\",nodes=mynodes)\n cmdenv = \"export PYTHONHOME=/opt/python2.7.2; \\\n export JAVA_HOME=/opt/jdk1.6.0_27; \\\n export PYTHONPATH=/opt/python2.7.2/lib; \\\n export \\\n PATH=/opt/python2.7.2/lib:/opt/python2.7.2/bin:/opt/jdk1.6.0_27/bin:/usr/kerberos/sbin:/usr/kerberos/bin:/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin:/root/bin;\"\n \n\n \n task.run(cmdenv+taskname,nodes=mynodes)\n print \":\\n\".join([\"%s=%s\" % (i,j) for j,i in task.iter_buffers()])",
"def submit_jobs(args, udf_command):\n hosts = []\n thread_list = []\n server_count_per_machine = 0\n\n # Get the host addresses of the cluster.\n ip_config = args.ip_config\n with open(ip_config) as f:\n for line in f:\n result = line.strip().split()\n if len(result) >= 3:\n ip = result[0]\n host = result[2]\n hosts.append((ip, host))\n else:\n raise RuntimeError(\"Format error of ip_config.\")\n server_count_per_machine = args.num_servers\n assert args.num_parts == len(hosts), \\\n 'The number of graph partitions has to match the number of machines in the cluster.'\n\n tot_num_clients = args.num_trainers * (1 + args.num_samplers) * len(hosts)\n # launch server tasks\n server_cmd = 'DGL_ROLE=server DGL_NUM_SAMPLER=' + str(args.num_samplers)\n server_cmd = server_cmd + ' ' + 'OMP_NUM_THREADS=' + str(args.num_server_threads)\n server_cmd = server_cmd + ' ' + 'DGL_NUM_CLIENT=' + str(tot_num_clients)\n server_cmd = server_cmd + ' ' + 'DGL_CONF_PATH=' + str(args.part_config)\n server_cmd = server_cmd + ' ' + 'DGL_IP_CONFIG=' + str(args.ip_config)\n server_cmd = server_cmd + ' ' + 'DGL_NUM_SERVER=' + str(args.num_servers)\n for i in range(len(hosts)*server_count_per_machine):\n _, pod_name = hosts[int(i / server_count_per_machine)]\n cmd = server_cmd + ' ' + 'DGL_SERVER_ID=' + str(i)\n cmd = cmd + ' ' + udf_command\n cmd = 'cd ' + str(args.workspace) + '; ' + cmd\n kubexec_multi(cmd, pod_name, thread_list)\n # launch client tasks\n client_cmd = 'DGL_DIST_MODE=\"distributed\" DGL_ROLE=client DGL_NUM_SAMPLER=' + str(args.num_samplers)\n client_cmd = client_cmd + ' ' + 'DGL_NUM_CLIENT=' + str(tot_num_clients)\n client_cmd = client_cmd + ' ' + 'DGL_CONF_PATH=' + str(args.part_config)\n client_cmd = client_cmd + ' ' + 'DGL_IP_CONFIG=' + str(args.ip_config)\n client_cmd = client_cmd + ' ' + 'DGL_NUM_SERVER=' + str(args.num_servers)\n if os.environ.get('OMP_NUM_THREADS') is not None:\n client_cmd = client_cmd + ' ' + 'OMP_NUM_THREADS=' + os.environ.get('OMP_NUM_THREADS')\n if os.environ.get('PYTHONPATH') is not None:\n client_cmd = client_cmd + ' ' + 'PYTHONPATH=' + os.environ.get('PYTHONPATH')\n\n torch_cmd = '-m torch.distributed.launch'\n torch_cmd = torch_cmd + ' ' + '--nproc_per_node=' + str(args.num_trainers)\n torch_cmd = torch_cmd + ' ' + '--nnodes=' + str(len(hosts))\n torch_cmd = torch_cmd + ' ' + '--node_rank=' + str(0)\n torch_cmd = torch_cmd + ' ' + '--master_addr=' + str(hosts[0][0])\n torch_cmd = torch_cmd + ' ' + '--master_port=' + str(1234)\n for node_id, tu in enumerate(hosts):\n _, pod_name = tu\n new_torch_cmd = torch_cmd.replace('node_rank=0', 'node_rank='+str(node_id))\n if 'python3' in udf_command:\n new_udf_command = udf_command.replace('python3', 'python3 ' + new_torch_cmd)\n elif 'python2' in udf_command:\n new_udf_command = udf_command.replace('python2', 'python2 ' + new_torch_cmd)\n else:\n new_udf_command = udf_command.replace('python', 'python ' + new_torch_cmd)\n cmd = client_cmd + ' ' + new_udf_command\n cmd = 'cd ' + str(args.workspace) + '; ' + cmd\n kubexec_multi(cmd, pod_name, thread_list)\n\n for thread in thread_list:\n thread.join()",
"def cli(host_cluster_db, path, time_restore, target_host):\n rubrik = rbk.connect_rubrik()\n cluster_info = rbk.get_cluster_info(rubrik)\n timezone = cluster_info['timezone']['timezone']\n print(\"Connected to cluster: {}, version: {}, Timezone: {}.\".format(cluster_info['name'], cluster_info['version'], timezone))\n host_cluster_db = host_cluster_db.split(\":\")\n oracle_db_id = rbk.get_oracle_db_id(rubrik, host_cluster_db[1], host_cluster_db[0])\n oracle_db_info = rbk.get_oracle_db_info(rubrik, oracle_db_id)\n # If not target host is provide mount the backup pieces on the source database host\n if not target_host:\n target_host = host_cluster_db[0]\n # If the source database is on a RAC cluster the target must be a RAC cluster otherwise it will be an Oracle Host\n if 'racName' in oracle_db_info.keys():\n if oracle_db_info['racName']:\n host_id = rbk.get_rac_id(rubrik, cluster_info['id'], target_host)\n else:\n host_id = rbk.get_host_id(rubrik, cluster_info['id'], target_host)\n # Use the provided time or if no time has been provided use the teh most recent recovery point\n if time_restore:\n time_ms = rbk.epoch_time(time_restore, timezone)\n print(\"Using {} for mount.\". format(time_restore))\n else:\n print(\"Using most recent recovery point for mount.\")\n oracle_db_info = rbk.get_oracle_db_info(rubrik, oracle_db_id)\n time_ms = rbk.epoch_time(oracle_db_info['latestRecoveryPoint'], timezone)\n print(\"Starting the mount of the requested {} backup pieces on {}.\".format(host_cluster_db[1], target_host))\n live_mount_info = rbk.live_mount(rubrik, oracle_db_id, host_id, time_ms, files_only=True, mount_path=path)\n cluster_timezone = pytz.timezone(timezone)\n utc = pytz.utc\n start_time = utc.localize(datetime.datetime.fromisoformat(live_mount_info['startTime'][:-1])).astimezone(cluster_timezone)\n fmt = '%Y-%m-%d %H:%M:%S %Z'\n print(\"Live mount status: {}, Started at {}.\".format(live_mount_info['status'], start_time.strftime(fmt)))\n return live_mount_info",
"def get_node_cmd(name, tech, sam_files, res_file, out_fpath,\n points=slice(0, 100), points_range=None,\n sites_per_worker=None, max_workers=None,\n logdir='./out/log_gen', output_request=('cf_mean',),\n site_data=None, mem_util_lim=0.4, timeout=1800,\n curtailment=None, gid_map=None, verbose=False):\n\n # mark a cli arg string for main() in this module\n arg_main = '-n {}'.format(SLURM.s(name))\n\n # make a cli arg string for direct() in this module\n arg_direct = ['-t {}'.format(SLURM.s(tech)),\n '-p {}'.format(SLURM.s(points)),\n '-sf {}'.format(SLURM.s(sam_files)),\n '-rf {}'.format(SLURM.s(res_file)),\n '-o {}'.format(SLURM.s(out_fpath)),\n '-spw {}'.format(SLURM.s(sites_per_worker)),\n '-lo {}'.format(SLURM.s(logdir)),\n '-or {}'.format(SLURM.s(output_request)),\n '-mem {}'.format(SLURM.s(mem_util_lim))]\n\n if site_data:\n arg_direct.append('-sd {}'.format(SLURM.s(site_data)))\n\n if curtailment:\n arg_direct.append('-curt {}'.format(SLURM.s(curtailment)))\n\n if gid_map:\n arg_direct.append('-gm {}'.format(SLURM.s(gid_map)))\n\n # make a cli arg string for local() in this module\n arg_loc = ['-mw {}'.format(SLURM.s(max_workers)),\n '-to {}'.format(SLURM.s(timeout)),\n '-pr {}'.format(SLURM.s(points_range))]\n\n if verbose:\n arg_loc.append('-v')\n\n # Python command that will be executed on a node\n # command strings after cli v7.0 use dashes instead of underscores\n cmd = ('python -m reV.generation.cli_gen '\n '{arg_main} direct {arg_direct} local {arg_loc}'\n .format(arg_main=arg_main,\n arg_direct=' '.join(arg_direct),\n arg_loc=' '.join(arg_loc)))\n logger.debug('Creating the following command line call:\\n\\t{}'.format(cmd))\n\n return cmd",
"def hdfs_listdir(self, hdfs_dir, entry_type='all', client=None, **kwargs):\n \n self.logger.debug(\"In method, hdfs_listdir\")\n \n if client is None:\n client = self.Client\n \n directory = hdfs_dir\n entry_type = entry_type.lower()\n self.logger.debug(\"Listing '%s' entry types at HDFS location '%s'\"\n %(entry_type,directory))\n \n for i in range(self.RETRIES):\n try:\n #The lstrip is required for pywebhdfs- Ex: home/data/\n status = client.list_dir(directory.lstrip('/'))\n \n except Exception as e:\n self.logger.debug(\"In method, hdfs_listdir\")\n self.logger.error(\"Received '%s' while listing directory for '%s'\"\n %(e,directory),exc_info=True)\n \n if i+1 != self.RETRIES:\n if isinstance(e, IOError):\n traceback.print_exc(file=sys.stderr)\n self.logger.warning(\"Resubmitting request for directory listing\")\n time.sleep(i*3)\n else:\n raise\n \n elif isinstance(e, IOError):\n traceback.print_exc(file=sys.stderr)\n #ConnectionError, ConnectionRefusedError,...etc. Need to confirm that all\n #connection type errors are an instance of IOError\n raise ResourceError(\"WebHDFS error during list_dir on path '%s'\"%directory) \n else:\n raise\n else:\n self.logger.debug(\"In method, hdfs_listdir\")\n self.logger.debug(\"Successful HDFS directory listing\")\n if entry_type == 'all':\n hdfsDirList = [item['pathSuffix'] for item in status['FileStatuses']['FileStatus']]\n \n elif entry_type == 'file':\n hdfsDirList = [item['pathSuffix'] for item in status['FileStatuses']['FileStatus'] \n if item['type'] == 'FILE']\n \n elif entry_type == 'dir': \n hdfsDirList = [item['pathSuffix'] for item in status['FileStatuses']['FileStatus'] \n if item['type'] == 'DIRECTORY']\n break\n \n return hdfsDirList",
"def executeOnMaster(self, cmd):\n if self._hostnameResolves(self.getManagementEndpoint()):\n ssh = SSHClient()\n ssh.load_system_host_keys()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh.connect(\n self.getManagementEndpoint(),\n username = self.config.get('ACS', \"username\"),\n port = 2200,\n key_filename = os.path.expanduser(self.config.get('SSH', \"privatekey\")))\n session = ssh.get_transport().open_session()\n self.log.debug(\"Session opened on master.\")\n self.log.debug(\"Executing on master: \" + cmd)\n\n AgentRequestHandler(session)\n stdin, stdout, stderr = ssh.exec_command(cmd)\n stdin.close()\n \n result = \"\"\n for line in stdout.read().splitlines():\n self.log.debug(line.decude(\"utf-8\"))\n result = result + line.decode(\"utf-8\") + \"\\n\"\n for line in stderr.read().splitlines():\n self.log.error(line.decode(\"utf-8\"))\n else:\n self.log.error(\"Endpoint \" + self.getManagementEndpoint() + \" does not exist, cannot SSH into it.\")\n result = \"Exception: No cluster is available at \" + self.getManagementEndpoint()\n ssh.close()\n return result",
"def run_example_cluster_cmd(example_module_name, example_argv):\n run_example_cluster(example_module_name, example_argv)"
]
| [
"0.7834996",
"0.69288254",
"0.68616354",
"0.67395175",
"0.65236706",
"0.62854147",
"0.61580133",
"0.60025924",
"0.5775269",
"0.5685871",
"0.5550844",
"0.5356002",
"0.53527516",
"0.5334417",
"0.5288347",
"0.52522856",
"0.5249708",
"0.52452034",
"0.5217709",
"0.5211675",
"0.51995856",
"0.5196586",
"0.5160361",
"0.5125912",
"0.50846523",
"0.5075187",
"0.4994934",
"0.49651924",
"0.49499175",
"0.49465775"
]
| 0.78665143 | 0 |
Used internally by all save queries to save the json responses directly to an elasticsearch node. | def _save_elasticsearch(self, json_response, index, doc_type):
try:
_ = self._ensure_es_index(index)
data = self.elasticsearch.index(index=index,
doc_type=doc_type,
body=json.dumps(json_response))
self.elasticsearch.indices.refresh(index=index)
except TransportError as error_msg:
self.logger.error('%s triggered while trying to index type %s with body: %s',
error_msg.error, doc_type, json.dumps(json_response))
return False
self.logger.debug("Document added to index '%s' with type '%s'. Document: %s which " \
"returned data: %s", index, doc_type, json.dumps(json_response), data)
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def serialize(self, value):\n # (Any) -> json\n # this is called when writing to elasticsearch",
"def backup_es(esdoc_class, outfile=None):\n data = esdoc_class._index.get()\n idx_name = list(data)[0]\n data[idx_name][\"docs\"] = list(\n dict(_id=hit.meta.id, **hit.to_dict()) for hit in esdoc_class.search().scan()\n )\n if outfile:\n with open(outfile, \"w\") as out_f:\n json.dump(data, out_f, indent=2, default=json_serial)\n return data",
"def save(self, data, **kwargs):\r\n\r\n lg = logging.getLogger(\"%s.%s\" % (self.ln, inspect.stack()[0][3]))\r\n lg.setLevel(self.log_level)\r\n\r\n es = self.es\r\n es_index = get2(kwargs, \"es_index\", self.es_index)\r\n reset_index = kwargs.get(\"reset_index\",self.reset_index)\r\n doc_type = kwargs.get(\"doc_type\", self.doc_type)\r\n op_type = kwargs.get(\"op_type\", self.op_type)\r\n id_value = kwargs.get(\"id\")\r\n id_field = kwargs.get(\"id_field\")\r\n if id_field:\r\n id_value = data.get(id_field)\r\n if op_type == \"index\":\r\n result = es.index(index=es_index,\r\n id=id_value,\r\n doc_type=doc_type,\r\n body=data)\r\n elif op_type == \"create\":\r\n result = es.create(index=es_index,\r\n id=id_value,\r\n doc_type=doc_type,\r\n body=data)\r\n\r\n lg.debug(\"Result = \\n%s\",pp.pformat(result))\r\n return result",
"def save(self) -> None:\n try:\n js = json.loads(\n self.reset_index().to_json(orient=\"records\", date_format=\"iso\")\n )\n\n with open(self._fp, \"w\") as f:\n f.writelines(json.dumps(js, indent=4))\n logger.debug(f\"Saved index to {self._fp}\")\n except Exception as e:\n logger.error(f\"Could not update database -- {e}\")",
"def save_index(self):\n vsn_objs = [dict(Id = v['id'], Name = v['name']) for v in self.versions]\n self.backend.write_json(dict(\n Versions = vsn_objs,\n Channels = [], # This is unused.\n ApiVersion = 0,\n ), self.index_path())",
"def save(self):\n return api.put([self])",
"def elastic_search_json(self):\n return {\n 'id': self.id,\n 'name': self.name,\n 'code': self.code,\n 'description': self.description,\n 'type': self.type,\n 'active': \"true\" if self.active else \"false\",\n }",
"def save_json(node):\n return _api_internal._save_json(node)",
"def es_hit():\n return {\n \"_index\": \"testindex\",\n \"_id\": \"4beb3b3e-a935-442e-a47b-6d386947ea20\",\n \"_version\": 5,\n \"_seq_no\": 0,\n \"_primary_term\": 1,\n \"found\": True,\n \"_source\": {\n \"uuid\": \"4beb3b3e-a935-442e-a47b-6d386947ea20\",\n \"version_id\": 4,\n \"created\": \"2020-09-01T14:26:00+00:00\",\n \"updated\": \"2020-09-02T14:28:21.968149+00:00'\",\n \"id\": \"12345-abcde\",\n \"metadata\": {\n \"title\": \"My record\",\n \"date\": \"2020-09-20\",\n },\n \"pids\": {\n \"oaiid\": {\"value\": \"\", \"provider\": \"local\"},\n },\n },\n }",
"def save_object(self, obj):\n return AlgoliaUtils_request(self.client.headers, self.write_hosts, \"PUT\", \"/1/indexes/%s/%s\" % (self.url_index_name, quote((\"%s\" % obj[\"objectID\"]).encode('utf8'), safe='')), self.client.timeout, obj)",
"def save_results(self, export_json_path):\n with open(export_json_path, 'w') as f:\n json.dump(self.results, f)",
"def as_json(self):",
"def set_in_index(document, index, type):\n response = None\n\n #Try 3 times to store the document in ES, each time picking a random ES node address in case of failure\n for retries in range(3):\n try:\n log('ES Set Request :: ' + json.dumps(document) + ' : ' + index + ':' + type)\n response = es.index(index=index, doc_type=type, id=document['id'], body=document)\n log(\"ES Set Response :: \" + json.dumps(response))\n except ImproperlyConfigured:\n log(\"ES ImproperlyConfigured!\" + traceback.format_exc())\n continue\n except ElasticsearchException:\n log(\"ES ElasticsearchException!\" + traceback.format_exc())\n continue\n except TransportError:\n log(\"ES TransportError!\" + traceback.format_exc())\n continue\n except NotFoundError:\n log(\"ES NotFoundError!\" + traceback.format_exc())\n continue\n except ConflictError:\n log(\"ES ConflictError!\" + traceback.format_exc())\n continue\n except RequestError:\n log(\"ES RequestError!\" + traceback.format_exc())\n continue\n except SerializationError:\n log(\"ES SerializationError!\" + traceback.format_exc())\n continue\n except ConnectionError:\n log(\"ES ConnectionError!\" + traceback.format_exc())\n continue\n except Exception:\n log(\"ES Exception!\" + traceback.format_exc())\n continue\n finally:\n log(\"Total number of ES write attempts: \" + str(retries + 1))\n #Exit for loop if ES transaction is successful otherwise pick another node and continue retrying\n break\n\n if response is None or response == '':\n return 'false'\n else:\n return 'true'",
"def reset_elasticsearch_endpoint():\n reset_elasticsearch()\n resp = Response(response=json.dumps({\"success\": True}),\n status=200,\n mimetype=\"application/json\")\n return resp",
"def save(self, *args, **kwargs):\n # create the index string\n message = str()\n data = kwargs.get(\"data\")\n ts = kwargs.get(\"ts\")\n # prepare the doc for indexing\n doc = dict()\n doc['_type'] = kwargs.get(\"type\")\n doc['info'] = data\n doc['@timestamp'] = datetime.datetime(*ts[:6])\n try:\n exists = self.es.indices.exists(kwargs.get(\"index\"))\n if not exists:\n map_st, map_msg = self.build_mapping(**kwargs)\n if not map_st:\n return map_st, map_msg\n res = self.es.index(\n index=kwargs.get(\"index\"),\n doc_type=doc['_type'],\n body=doc, # message\n timestamp=datetime.datetime.utcnow(), # set to current time\n consistency='one', # do not wait for quorum / all shards\n replication='async', # async\n ttl=ELASTICSEARCH_TTL) # as defined in settings\n return True, res\n except TransportError, e:\n # fail silently - just log and die ...\n message = 'Error in indexing, host: {}, unable to index'.format(\n ELASTICSEARCH_CONN)\n if ADD_LOG_FAILURES:\n LOGGER.exception(e)\n return False, message",
"def post(self, **kwargs):\n data = request.json\n return save_new_writer(data=data)",
"def test_write(self, fake_Session):\n fake_resp = MagicMock()\n fake_session = MagicMock()\n fake_session.post.return_value = fake_resp\n fake_Session.return_value = fake_session\n es = elasticsearch.ElasticSearch(server='8.8.8.8',\n user='alice',\n password='iLoveDogs',\n doc_type='someLogCategory')\n\n es.write(document='{\"some\":\"JSON\"}')\n\n self.assertTrue(fake_resp.raise_for_status.called)",
"def elasticsearch(self):\n return {\n 'up': ESClientFactory.get().ping(),\n }",
"def write(self):\n self.json_o.write()",
"def publish_impl(self) -> None:\n\n LOGGER.warn('ElasticsearchPublisher is being deprecated in favor of using SearchMetadatatoElasticasearchTask\\\n which publishes ES metadata with mappings compatible with amundsensearch >= 4.0.0')\n\n actions = [json.loads(line) for line in self.file_handler.readlines()]\n # ensure new data exists\n if not actions:\n LOGGER.warning(\"received no data to upload to Elasticsearch!\")\n return\n\n # Convert object to json for elasticsearch bulk upload\n # Bulk load JSON format is defined here:\n # https://www.elastic.co/guide/en/elasticsearch/reference/6.2/docs-bulk.html\n bulk_actions = []\n cnt = 0\n\n # create new index with mapping\n self.elasticsearch_client.indices.create(index=self.elasticsearch_new_index, body=self.elasticsearch_mapping)\n\n for action in actions:\n index_row = dict(index=dict(_index=self.elasticsearch_new_index))\n action['resource_type'] = self.elasticsearch_type\n\n bulk_actions.append(index_row)\n bulk_actions.append(action)\n cnt += 1\n if cnt == self.elasticsearch_batch_size:\n self.elasticsearch_client.bulk(bulk_actions)\n LOGGER.info('Publish %i of records to ES', cnt)\n cnt = 0\n bulk_actions = []\n\n # Do the final bulk actions\n if bulk_actions:\n self.elasticsearch_client.bulk(bulk_actions)\n\n # fetch indices that have {elasticsearch_alias} as alias\n elasticsearch_old_indices = self._fetch_old_index()\n\n # update alias to point to the new index\n actions = [{\"add\": {\"index\": self.elasticsearch_new_index, \"alias\": self.elasticsearch_alias}}]\n\n # delete old indices\n delete_actions = [{\"remove_index\": {\"index\": index}} for index in elasticsearch_old_indices]\n actions.extend(delete_actions)\n\n update_action = {\"actions\": actions}\n\n # perform alias update and index delete in single atomic operation\n self.elasticsearch_client.indices.update_aliases(update_action)",
"def save(self):\n if self.get('_id'):\n return self.connection.update({'_id': self.get('_id')}, {'$set': self._export(without_id=True)})\n else:\n return self.connection.insert(self._export())",
"def save(self):\n d1 = {}\n with open(self.__file_path, mode=\"w\") as f:\n for k, v in self.__objects.items():\n d1[k] = v.to_dict()\n json.dump(d1, f)",
"def save(self, data):\n activities = [json.loads(activity['Json']) for activity in data]\n\n for i in range(len(activities)):\n activities[i]['created_at'] = to_datetime(activities[i]['created_at'])\n\n with Elastic(index='wink', doc_type='activity') as elastic:\n elastic.upload(activities, 'created_at')\n\n Log.info(\"Successfully uploaded wink activity data into elasticsearch.\")",
"def _save(self):\n with open(self.file_path, 'w') as fid:\n json.dump(self.data, fid, indent=4, sort_keys=True)",
"def _write_index(self):\n # Make sure to only write non-default objects to the index.\n self.store.write_object(\n object=[obj.to_dict() for ns in self.index.values() for obj in ns.values() if not obj.is_default],\n object_id=self.identifier\n )",
"def save(self, resq=None):\n map(lambda x: x.save(resq), self.backends)",
"def save_results_internal_json(self, results_internal_dict: Dict):\r\n filename = f\"{self.search_internal_path}/results_internal.json\"\r\n\r\n with open_(filename, \"w+\") as f:\r\n json.dump(results_internal_dict, f, indent=4)",
"def proxy(path):\n try:\n r = None\n url = f\"{ES_URL}/{path}\"\n headers = dict(request.headers)\n headers.pop('Host')\n if request.method == 'POST':\n data = json.dumps(request.get_json())\n\n if settings.NO_AUTH:\n r = post(url=url,\n data=data,\n headers=headers)\n else:\n r = post(url=url,\n auth=HTTPBasicAuth(ES_USER, ES_PASSWORD),\n data=data,\n headers=headers)\n\n elif request.method == 'GET':\n if settings.NO_AUTH:\n r = get(url=url,\n headers=headers)\n else:\n r = get(url=url,\n auth=HTTPBasicAuth(ES_USER, ES_PASSWORD),\n headers=headers)\n\n return Response(\n response=json.dumps(r.json()),\n status=r.status_code,\n mimetype=\"application/json\"\n )\n except Exception as e:\n msg = 'Error while fetching data from elasticsearch'\n logging.error(msg)\n raise",
"def store_elasticsearch(self, item):\n self.datastore.create(\n index=\"dminer-alphabay-{date}\".format(\n date=datetime.datetime.strptime(item[\"timestamp\"], \"%Y:%m:%d %H:%M:%S\").date().strftime(\"%Y-%m-%d\")\n ),\n doc_type= \"alphabay_listing\",\n body=item\n )",
"def store_elasticsearch(self, item):\n self.datastore.create(\n index=\"dminer-dreammarket-{date}\".format(\n date=datetime.datetime.strptime(item[\"timestamp\"], \"%Y:%m:%d %H:%M:%S\").date().strftime(\"%Y-%m-%d\")\n ),\n doc_type= \"dreammarket_listing\",\n body=item\n )"
]
| [
"0.6234668",
"0.6036308",
"0.60285056",
"0.57445127",
"0.571755",
"0.56642663",
"0.56466454",
"0.55992895",
"0.55825806",
"0.55509573",
"0.5518996",
"0.55122334",
"0.5505802",
"0.5483249",
"0.5466571",
"0.545259",
"0.5443516",
"0.54149437",
"0.5413945",
"0.53993225",
"0.5398654",
"0.5384743",
"0.53798383",
"0.53683543",
"0.53183424",
"0.5317063",
"0.53001946",
"0.52845865",
"0.52764666",
"0.52741313"
]
| 0.724785 | 0 |
Used internally when writing to elasticsearch to ensure a given index exists. | def _ensure_es_index(self, index):
if not self.elasticsearch.indices.exists(index):
try:
self.elasticsearch.indices.create(index=index)
except TransportError as error_msg:
self.logger.error(str(error_msg.error))
return False
self.logger.info('Created Index: %s', index)
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_exists(self, index: str) -> bool:\n\n if self.es.indices.exists(index=index):\n return True\n return False",
"def index_exists(self, index: str) -> bool:\n return self.__client__.indices.exists(index)",
"def index_exists(self, index):\n req = requests.head(\n urljoin(self.base_url, '{0}'.format(index)),\n verify=self.verify_certs)\n return req.status_code == 200",
"def index_exists(index_name):\n return ES.indices.exists(index=index_name)",
"def create_index(self):\n if self.index_exists():\n logger.info('Index {} already exists'.format(self.index_name))\n logger.info('Deleting existing index')\n self.indices_client.delete(index=self.index_name)\n self.create_index_if_not_exist()",
"def test_recreate_index_that_exists(self):\n indices = self.elasticsearch_cls().indices\n indices.exists.return_value = True\n\n index_name = 'abcd'\n self.client._recreate_index(index_name)\n indices.delete.assert_called_once_with(index_name)\n indices.create.assert_called_once_with(index_name)",
"def _checkIndex(self, index):\n # OPT: lets not reuse isKnown, to don't incure 1 more function\n # call\n if not self._items.has_key(index):\n raise KeyError, \\\n \"%s of %s has no key '%s' registered\" \\\n % (self.__class__.__name__,\n self.__owner.__class__.__name__,\n index)",
"def test_index_exists(mock_es_client, expected):\n index_name = 'test'\n\n connection = mock_es_client.return_value\n connection.indices.exists.return_value = expected\n\n assert elasticsearch.index_exists(index_name) == expected\n connection.indices.exists.assert_called_with(index_name)",
"def create_index(self):\n\n indice = client.IndicesClient(self.es)\n\n if not indice.exists(self.es_main_index):\n indice.create(\n index=self.es_main_index\n )\n\n return True",
"def create_index(self):\n self.send_robust(self.es_index, data=self.es_meta)\n self.set_index_normal_settings()",
"def has(self, index):\n raise NotImplementedError()",
"def create_index(self, index_name, body):\n if self.es.indices.exists(index_name):\n print(\"deleting '%s' index...\" % index_name)\n res = self.es.indices.delete(index=index_name)\n print(\" response: '%s'\" % res)\n\n print(\"creating '%s' index...\" % index_name)\n res = self.es.indices.create(index=index_name, body=body)\n print(\" response: '%s'\" % res)",
"def does_exist(self, index):\n if index in self.map:\n return True\n return False",
"def _assert_indices_exist(self, catalog: CatalogName):\n es_client = ESClientFactory.get()\n service = IndexService()\n for index_name in service.index_names(catalog):\n self.assertTrue(es_client.indices.exists(index_name))",
"def exist_idx(index_name):\n query = \"\"\"SELECT EXISTS(SELECT 1 \n FROM pg_indexes\n WHERE indexname = '{0}') \n AS idx_exists\"\"\".format(index_name)\n res = db.engine.execute(query).first()\n return res.idx_exists",
"def wait_for_elasticsearch_index(cluster: str, index: str):\n\n elastic = sreElastic(host=cluster)\n elastic.wait_index_relocation(index=index)",
"def create_index(es_object, index_name):\n created = False\n \"\"\" index settings \"\"\"\n settings = {\n \"settings\": {\n \"number_of_shards\": 1,\n \"number_of_replicas\": 0\n },\n \"mappings\": {\n \"physicians\": {\n \"dynamic\": \"strict\",\n \"properties\": {\n \"overview\": {\n \"type\": \"text\"\n },\n \"full_name\": {\n \"type\": \"text\"\n },\n \"years_of_practice\": {\n \"type\": \"text\"\n },\n \"language\": {\n \"type\": \"text\"\n },\n \"office_location\": {\n \"type\": \"text\"\n },\n \"hospital_affiliation\": {\n \"type\": \"text\"\n },\n \"specialties\": {\n \"type\": \"text\"\n },\n \"education_and_medical_training\": {\n \"type\": \"text\"\n },\n \"certification_and_licensure\": {\n \"type\": \"text\"\n },\n }\n }\n }\n }\n\n try:\n if not es_object.indices.exists(index_name):\n # Ignore 400 means to ignore \"Index Already Exist\" error.\n es_object.indices.create(index=index_name, ignore=400, body=settings)\n print('Created Index')\n created = True\n except Exception as ex:\n print(str(ex))\n finally:\n return created",
"def has_index(self):\n return self.index is not None",
"def create_index(index_name):\n resp = es.indices.create(index=index_name)\n print(resp)",
"def ensure_index(self, schema, language, mapping, return_index='external'):\n assert schema and language and mapping\n assert len(language) == 2\n assert return_index == 'external' or return_index == 'internal'\n\n external = self.get_external_index_name(schema, language, mapping.name)\n internal = self.get_internal_index_name(\n schema, language, mapping.name, mapping.version)\n\n return_value = return_index == 'external' and external or internal\n\n if internal in self.created_indices:\n return return_value\n\n if self.es_client.indices.exists(index=internal):\n self.created_indices.add(internal)\n return return_value\n\n # create the index\n self.es_client.indices.create(\n index=internal,\n mappings={\n 'properties': mapping.for_language(language)\n },\n settings={\n 'analysis': ANALYSIS_CONFIG,\n 'index': {\n 'number_of_shards': 1,\n 'number_of_replicas': 0,\n 'refresh_interval': '5s'\n }\n }\n )\n\n # point the alias to the new index\n self.es_client.indices.put_alias(name=external, index=internal)\n\n # cache the result\n self.created_indices.add(internal)\n\n return return_value",
"def create(client, name, index):\n r = client.indices.put_alias(index=index, name=name)\n LOG.info(json.dumps(r))\n ok = r.get(\"acknowledged\")\n if not ok:\n sys.exit(UNKNOWN_ERROR)",
"def has_index(self):\n\n if self._check_idx and self._index:\n return self._check_idx",
"def create_index(self, table_name, index, timeout):\n _abstract()",
"def create_index(self, table_name, index, timeout):\n _abstract()",
"def __checkFeatureIndex__(self, index, indexes):\n if index is not False:\n indexes.append(index)",
"async def exists(\n self, *, header: Optional[headers.RequestHeader] = None, key: str = \"\"\n ) -> ExistsResponse:\n\n request = ExistsRequest()\n if header is not None:\n request.header = header\n request.key = key\n\n return await self._unary_unary(\n \"/atomix.indexedmap.IndexedMapService/Exists\", request, ExistsResponse,\n )",
"def has_index(self, index):\n return index in [s[0] for s in self.get_index_list()]",
"def check_index(self, index_filename=None, req_idx=False):\n if index_filename is None:\n possible_index_path = r'./{}.bai'.format(os.path.relpath(self._handle.name))\n if os.path.isfile(possible_index_path):\n self._index_path = possible_index_path\n self._random_access = True\n return True\n else:\n if req_idx:\n raise IOError('htsfile is closed or index could not be opened')\n warnings.warn(\"No supplied index file and '{}' was not found. Random access disabled\".format(possible_index_path), UserWarning)\n self._random_access = False\n return False\n else:\n if os.path.isfile(index_filename):\n self._index_path = index_filename\n self._random_access = True\n return True\n else:\n if req_idx:\n raise IOError('htsfile is closed or index could not be opened')\n warnings.warn(\"Index file '{}' was not found. Random access disabled\".format(index_filename), UserWarning)\n self._random_access = False\n return False",
"def create(\n self,\n index: IO,\n request_options: Optional[_models.RequestOptions] = None,\n *,\n content_type: str = \"application/json\",\n **kwargs: Any\n ) -> _models.SearchIndex:",
"def test_get_document_inexistent(empty_index):\n with pytest.raises(Exception):\n empty_index().get_document(\"123\")"
]
| [
"0.8182904",
"0.76497215",
"0.73477477",
"0.7327194",
"0.7218652",
"0.7159125",
"0.7102047",
"0.69194615",
"0.6916148",
"0.6838917",
"0.6767421",
"0.6728982",
"0.6684531",
"0.6592431",
"0.6525627",
"0.6522849",
"0.6518431",
"0.6451222",
"0.6449229",
"0.6417483",
"0.63714355",
"0.6250634",
"0.62235165",
"0.62235165",
"0.6201448",
"0.61989766",
"0.61979955",
"0.61757",
"0.6063043",
"0.6059401"
]
| 0.8055633 | 1 |
Writes to either the filesystem or elasticsearch depending on the configuration settings. | def _write_to_datastore(self, index, doc_type, document, login, path):
if self.config['Github']['datastore'] == 'filesystem':
filename = self._generate_filename(doc_type, login)
self._save_file(json.dumps(document), path, filename)
elif self.config['Github']['datastore'] == 'elasticsearch':
self._save_elasticsearch(document, index, doc_type)
elif self.config['Github']['datastore'] == 'both':
filename = self._generate_filename(doc_type, login)
self._save_file(json.dumps(document), path, filename)
self._save_elasticsearch(document, index, doc_type)
else:
error_msg = "Unable to save result data for {}. Check " \
" configuration file setting: {}" \
.format(doc_type, self.config['Github']['datastore'])
self.logger.error(error_msg) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _save_elasticsearch(self, json_response, index, doc_type):\n try:\n _ = self._ensure_es_index(index)\n data = self.elasticsearch.index(index=index,\n doc_type=doc_type,\n body=json.dumps(json_response))\n self.elasticsearch.indices.refresh(index=index)\n except TransportError as error_msg:\n self.logger.error('%s triggered while trying to index type %s with body: %s',\n error_msg.error, doc_type, json.dumps(json_response))\n return False\n self.logger.debug(\"Document added to index '%s' with type '%s'. Document: %s which \" \\\n \"returned data: %s\", index, doc_type, json.dumps(json_response), data)\n return True",
"def set_in_index(document, index, type):\n response = None\n\n #Try 3 times to store the document in ES, each time picking a random ES node address in case of failure\n for retries in range(3):\n try:\n log('ES Set Request :: ' + json.dumps(document) + ' : ' + index + ':' + type)\n response = es.index(index=index, doc_type=type, id=document['id'], body=document)\n log(\"ES Set Response :: \" + json.dumps(response))\n except ImproperlyConfigured:\n log(\"ES ImproperlyConfigured!\" + traceback.format_exc())\n continue\n except ElasticsearchException:\n log(\"ES ElasticsearchException!\" + traceback.format_exc())\n continue\n except TransportError:\n log(\"ES TransportError!\" + traceback.format_exc())\n continue\n except NotFoundError:\n log(\"ES NotFoundError!\" + traceback.format_exc())\n continue\n except ConflictError:\n log(\"ES ConflictError!\" + traceback.format_exc())\n continue\n except RequestError:\n log(\"ES RequestError!\" + traceback.format_exc())\n continue\n except SerializationError:\n log(\"ES SerializationError!\" + traceback.format_exc())\n continue\n except ConnectionError:\n log(\"ES ConnectionError!\" + traceback.format_exc())\n continue\n except Exception:\n log(\"ES Exception!\" + traceback.format_exc())\n continue\n finally:\n log(\"Total number of ES write attempts: \" + str(retries + 1))\n #Exit for loop if ES transaction is successful otherwise pick another node and continue retrying\n break\n\n if response is None or response == '':\n return 'false'\n else:\n return 'true'",
"def publish_impl(self) -> None:\n\n LOGGER.warn('ElasticsearchPublisher is being deprecated in favor of using SearchMetadatatoElasticasearchTask\\\n which publishes ES metadata with mappings compatible with amundsensearch >= 4.0.0')\n\n actions = [json.loads(line) for line in self.file_handler.readlines()]\n # ensure new data exists\n if not actions:\n LOGGER.warning(\"received no data to upload to Elasticsearch!\")\n return\n\n # Convert object to json for elasticsearch bulk upload\n # Bulk load JSON format is defined here:\n # https://www.elastic.co/guide/en/elasticsearch/reference/6.2/docs-bulk.html\n bulk_actions = []\n cnt = 0\n\n # create new index with mapping\n self.elasticsearch_client.indices.create(index=self.elasticsearch_new_index, body=self.elasticsearch_mapping)\n\n for action in actions:\n index_row = dict(index=dict(_index=self.elasticsearch_new_index))\n action['resource_type'] = self.elasticsearch_type\n\n bulk_actions.append(index_row)\n bulk_actions.append(action)\n cnt += 1\n if cnt == self.elasticsearch_batch_size:\n self.elasticsearch_client.bulk(bulk_actions)\n LOGGER.info('Publish %i of records to ES', cnt)\n cnt = 0\n bulk_actions = []\n\n # Do the final bulk actions\n if bulk_actions:\n self.elasticsearch_client.bulk(bulk_actions)\n\n # fetch indices that have {elasticsearch_alias} as alias\n elasticsearch_old_indices = self._fetch_old_index()\n\n # update alias to point to the new index\n actions = [{\"add\": {\"index\": self.elasticsearch_new_index, \"alias\": self.elasticsearch_alias}}]\n\n # delete old indices\n delete_actions = [{\"remove_index\": {\"index\": index}} for index in elasticsearch_old_indices]\n actions.extend(delete_actions)\n\n update_action = {\"actions\": actions}\n\n # perform alias update and index delete in single atomic operation\n self.elasticsearch_client.indices.update_aliases(update_action)",
"def test_write(self, fake_Session):\n fake_resp = MagicMock()\n fake_session = MagicMock()\n fake_session.post.return_value = fake_resp\n fake_Session.return_value = fake_session\n es = elasticsearch.ElasticSearch(server='8.8.8.8',\n user='alice',\n password='iLoveDogs',\n doc_type='someLogCategory')\n\n es.write(document='{\"some\":\"JSON\"}')\n\n self.assertTrue(fake_resp.raise_for_status.called)",
"def _write_index(self):\n # Make sure to only write non-default objects to the index.\n self.store.write_object(\n object=[obj.to_dict() for ns in self.index.values() for obj in ns.values() if not obj.is_default],\n object_id=self.identifier\n )",
"def push_to_es(sc, es_write_conf):\n din_trainready_with_kw_ctr = 'din_testing_ucdocs_09112020_gdin'\n hive_context = HiveContext(sc)\n df = hive_context.sql('select * from {}'.format(din_trainready_with_kw_ctr))\n df = df.withColumn('kw_ctr', create_map([col('keyword'), col('ctr')]))\n uckey_window = Window.partitionBy('uckey')\n df = df.withColumn('_kws_0', collect_list('kw_ctr').over(uckey_window))\n df = df.dropDuplicates(['uckey'])\n df = df.withColumn('kws', udf(lambda x: dict(kv for _map in x for kv in _map.items()), MapType(StringType(), StringType()))('_kws_0'))\n rdd = df.rdd.map(lambda x: format_data(x, 'ucdoc'))\n rdd.saveAsNewAPIHadoopFile(\n path='-',\n outputFormatClass=\"org.elasticsearch.hadoop.mr.EsOutputFormat\",\n keyClass=\"org.apache.hadoop.io.NullWritable\",\n valueClass=\"org.elasticsearch.hadoop.mr.LinkedMapWritable\",\n conf=es_write_conf)",
"def write_to_index(self,write_dict):\n self.__mode = self.WRITE_MODE\n if not self.__storage:\n self.__load_index()\n try:\n for key,value in write_dict.iteritems():\n self.__storage[key]=value\n except Exception,e:\n print e\n self.__storage = None\n return False\n\n self.__close_storage()\n return True",
"def write(cls, products, values, *args):\n IndexBacklog = Pool().get('elasticsearch.index_backlog')\n rv = super(Product, cls).write(products, values, *args)\n IndexBacklog.create_from_records(products)\n return rv",
"def save(self, *args, **kwargs):\n # create the index string\n message = str()\n data = kwargs.get(\"data\")\n ts = kwargs.get(\"ts\")\n # prepare the doc for indexing\n doc = dict()\n doc['_type'] = kwargs.get(\"type\")\n doc['info'] = data\n doc['@timestamp'] = datetime.datetime(*ts[:6])\n try:\n exists = self.es.indices.exists(kwargs.get(\"index\"))\n if not exists:\n map_st, map_msg = self.build_mapping(**kwargs)\n if not map_st:\n return map_st, map_msg\n res = self.es.index(\n index=kwargs.get(\"index\"),\n doc_type=doc['_type'],\n body=doc, # message\n timestamp=datetime.datetime.utcnow(), # set to current time\n consistency='one', # do not wait for quorum / all shards\n replication='async', # async\n ttl=ELASTICSEARCH_TTL) # as defined in settings\n return True, res\n except TransportError, e:\n # fail silently - just log and die ...\n message = 'Error in indexing, host: {}, unable to index'.format(\n ELASTICSEARCH_CONN)\n if ADD_LOG_FAILURES:\n LOGGER.exception(e)\n return False, message",
"def _handle_write(self):\n pass",
"def write_search_index(self, search_index):\n self.logger.info('writing search index')\n with tempfile.NamedTemporaryFile(mode='w', dir=str(self.output_path),\n encoding='utf-8',\n delete=False) as index:\n try:\n json.dump(search_index, index,\n check_circular=False, separators=(',', ':'))\n except BaseException:\n index.delete = True\n raise\n else:\n os.fchmod(index.file.fileno(), 0o664)\n os.replace(index.name, str(self.output_path / 'packages.json'))",
"def write(cls, templates, values, *args):\n IndexBacklog = Pool().get('elasticsearch.index_backlog')\n Product = Pool().get('product.product')\n\n rv = super(Template, cls).write(templates, values, *args)\n\n products = []\n for template in templates:\n products.extend([Product(p) for p in template.products])\n IndexBacklog.create_from_records(products)\n return rv",
"def save(self):\n logging.debug(\"environment save entered\")\n filename = \"index.json\"\n content_dict = {}\n for fpname in self.footprints:\n # for now, just using the patteern ${footprint_name}-metadata for the name \n content_dict[fpname] = fpname\n content = json.dumps(content_dict)\n index = cf.store_object(self.container, filename, content) \n return True",
"def __init__(self, file, frame, host, port, esindex, indctrl, bulksize, devmode, filemode, outsubDir):\n self.file = file\n self.frame = frame\n self.host = host\n self.port = port\n self.index = esindex\n self.indctrl = indctrl\n self.bulksize = bulksize\n self.bulknum = 0\n self.devmode = devmode\n self.filemode = filemode\n self.esdocs = list()\n self.outsubDir = outsubDir\n self.numberOfFilesInSubDir = 300\n self.openedFilesInSubDir = 0\n self.currentSubDir = 1\n self.writtenDocuments = 0\n if self.devmode > 0:\n self.doccounter = 0\n if self.filemode:\n self._openFile()\n #self.of = open('output.json', 'w')\n else:\n try:\n h1 = client.HTTPConnection(self.host, self.port)\n h1.connect()\n h1.close()\n self.of = Elasticsearch([{'host': self.host, 'port': self.port}])\n if not self.of.indices.exists(self.index) is True:\n if self.indctrl is not None:\n self.of.indices.create(index=self.index, body=self.loadjson(self.indctrl))\n else:\n self.of.indices.create(index=self.index)\n except Exception as inst:\n exit(\"Error: \" + inst.args[1])",
"def handle(self, **options):\n using = options.get(\"using\")\n if not isinstance(connections[using].get_backend(), SolrSearchBackend):\n raise ImproperlyConfigured(\"'%s' isn't configured as a SolrEngine\" % using)\n\n schema_xml = self.build_template(\n using=using, template_filename=Command.schema_template_loc\n )\n solrcfg_xml = self.build_template(\n using=using, template_filename=Command.solrcfg_template_loc\n )\n\n filename = options.get(\"filename\")\n configure_directory = options.get(\"configure_directory\")\n reload_core = options.get(\"reload_core\")\n\n if filename:\n self.stdout.write(\n \"Trying to write schema file located at {}\".format(filename)\n )\n self.write_file(filename, schema_xml)\n\n if reload_core:\n connections[using].get_backend().reload()\n\n if configure_directory:\n self.stdout.write(\n \"Trying to configure core located at {}\".format(configure_directory)\n )\n\n managed_schema_path = os.path.join(configure_directory, \"managed-schema\")\n\n if os.path.isfile(managed_schema_path):\n try:\n os.rename(managed_schema_path, \"%s.old\" % managed_schema_path)\n except OSError as exc:\n raise CommandError(\n \"Could not rename old managed schema file {}: {}\".format(\n managed_schema_path, exc\n )\n )\n\n schema_xml_path = os.path.join(configure_directory, \"schema.xml\")\n\n try:\n self.write_file(schema_xml_path, schema_xml)\n except EnvironmentError as exc:\n raise CommandError(\n \"Could not configure {}: {}\".format(schema_xml_path, exc)\n )\n\n solrconfig_path = os.path.join(configure_directory, \"solrconfig.xml\")\n\n try:\n self.write_file(solrconfig_path, solrcfg_xml)\n except EnvironmentError as exc:\n raise CommandError(\n \"Could not write {}: {}\".format(solrconfig_path, exc)\n )\n\n if reload_core:\n core = settings.HAYSTACK_CONNECTIONS[using][\"URL\"].rsplit(\"/\", 1)[-1]\n\n if \"ADMIN_URL\" not in settings.HAYSTACK_CONNECTIONS[using]:\n raise ImproperlyConfigured(\n \"'ADMIN_URL' must be specified in the HAYSTACK_CONNECTIONS\"\n \" for the %s backend\" % using\n )\n if \"URL\" not in settings.HAYSTACK_CONNECTIONS[using]:\n raise ImproperlyConfigured(\n \"'URL' must be specified in the HAYSTACK_CONNECTIONS\"\n \" for the %s backend\" % using\n )\n\n try:\n self.stdout.write(\"Trying to reload core named {}\".format(core))\n resp = requests.get(\n settings.HAYSTACK_CONNECTIONS[using][\"ADMIN_URL\"],\n params={\"action\": \"RELOAD\", \"core\": core},\n )\n\n if not resp.ok:\n raise CommandError(\n \"Failed to reload core – Solr error: {}\".format(resp)\n )\n except CommandError:\n raise\n except Exception as exc:\n raise CommandError(\"Failed to reload core {}: {}\".format(core, exc))\n\n if not filename and not configure_directory and not reload_core:\n self.print_stdout(schema_xml)",
"def write(self, object, content_type, to_file):\n return to_file",
"def storage(self, datastore=None):\n # usually check the datastore attribute on request (set on GET/HEAD)\n request = get_current_request()\n if self.read is not None and request and request.datastore == 'elasticsearch':\n return self.read\n\n # check the datastore specified by Connection (not always used)\n if datastore is not None:\n if datastore in self.used_datastores:\n if self.used_datastores[datastore] is None:\n raise HTTPInternalServerError('Forced datastore %s is not'\n ' configured' % datastore)\n return self.used_datastores[datastore]\n else:\n raise HTTPInternalServerError('Invalid forced datastore %s. Must be one of: %s'\n % (datastore, list(self.used_datastores.keys())))\n # return write as a fallback\n return self.write",
"def WriteHeader(self):\n mapping = {\n self._doc_type: {\n u'_timestamp': {\n u'enabled': True,\n u'path': u'datetime',\n u'format': u'date_time_no_millis'},\n }\n }\n # Check if the mappings exist (only create if not there).\n try:\n old_mapping_index = self._elastic_db.get_mapping(self._index_name)\n old_mapping = old_mapping_index.get(self._index_name, {})\n if self._doc_type not in old_mapping:\n self._elastic_db.put_mapping(\n self._index_name, self._doc_type, mapping=mapping)\n except (pyelasticsearch.ElasticHttpNotFoundError,\n pyelasticsearch.exceptions.ElasticHttpError):\n try:\n self._elastic_db.create_index(self._index_name, settings={\n u'mappings': mapping})\n except pyelasticsearch.IndexAlreadyExistsError:\n raise RuntimeError(u'Unable to created the index')\n except requests.exceptions.ConnectionError as exception:\n logging.error(\n u'Unable to proceed, cannot connect to ElasticSearch backend '\n u'with error: {0:s}.\\nPlease verify connection.'.format(exception))\n raise RuntimeError(u'Unable to connect to ElasticSearch backend.')\n\n # pylint: disable=unexpected-keyword-arg\n self._elastic_db.health(wait_for_status=u'yellow')\n\n sys.stdout.write(u'Inserting data')\n sys.stdout.flush()",
"def change_transport(self, doc_dict):\n try:\n if not self.type_exists(doc_dict):\n #if type is never seen, apply mapping for said type\n type_mapping = self.get_mapping_from_type(doc_dict)\n #update metadata\n type_mapping[self.get_type_string(doc_dict)]['_meta'][\n 'created'] = datetime.isoformat(datetime.utcnow())\n mapping_res = self.set_mapping(self.get_type_string(doc_dict), type_mapping)\n if mapping_res.get('ok', False) and mapping_res.get('acknowledged', False):\n #API confirms OK, trust it.\n pillow_logging.info(\n \"Mapping set: [%s] %s\" % (self.get_type_string(doc_dict), mapping_res))\n #manually update in memory dict\n self.seen_types[self.get_type_string(doc_dict)] = {}\n\n if not self.bulk:\n doc_path = self.get_doc_path_typed(doc_dict)\n\n doc_exists = self.doc_exists(doc_dict)\n\n if self.allow_updates:\n can_put = True\n else:\n can_put = not doc_exists\n\n if can_put and not self.bulk:\n res = self.send_robust(doc_path, data=doc_dict, update=doc_exists)\n return res\n except Exception, ex:\n tb = traceback.format_exc()\n pillow_logging.error(\"PillowTop [%(pillow_name)s]: Aliased Elastic Pillow transport change data doc_id: %(doc_id)s to elasticsearch error: %(error)s\\ntraceback: %(tb)s\\n\" %\n {\n \"pillow_name\": self.get_name(),\n \"doc_id\": doc_dict['_id'],\n \"error\": ex,\n \"tb\": tb\n })\n return None",
"def handle_write(self):\n pass",
"def write(self, filename=None, as_type='json'):\n if not filename:\n filename = self.uri\n self.create_output_dir(filename)\n if as_type == 'json':\n with open(filename, 'w') as outfile:\n outfile.write(self.transform_data(outformat=formats.JSON))\n elif as_type == 'shapefile':\n self.data.to_file(filename)\n else:\n raise NotImplementedError('{} not a valid type'.format(as_type))\n return self.uri",
"def build_mapping(self, *args, **kwargs):\n index = kwargs.get(\"index\")\n doc_type = kwargs.get(\"_type\")\n body = kwargs.get(\"mapping\", {})\n if kwargs.get(\"settings\"):\n body.update({'settings': kwargs.get(\"settings\")})\n try:\n self.es.indices.create(index=index, body=body)\n self.es.indices.put_mapping(index=index, body=body,\n doc_type=doc_type)\n return True, \"Index Mapping successful.\"\n except TransportError, e:\n # fail silently - just log and die ...\n message = 'Error in indexing, host: {}, unable to create mapping'.format(\n ELASTICSEARCH_CONN)\n if ADD_LOG_FAILURES:\n LOGGER.exception(e)\n return False, message",
"def write(self):\n self.output_directory.mkdir(parents=True, exist_ok=True)\n parameter_set_files = [pathlib.Path(set_name) for set_name in\n self.parameter_study.coords[_set_coordinate_key].values]\n if self.write_meta and self.provided_output_file_template:\n self._write_meta(parameter_set_files)\n if self.output_file_type == 'h5':\n self._write_dataset()\n elif self.output_file_type == 'yaml':\n self._write_yaml(parameter_set_files)\n else:\n raise ValueError(f\"Unsupported output file type '{self.output_file_type}'\")",
"def write_config(self, config_file):\n \n # write root paths\n \n # write reference data\n \n # write tool paths\n \n pass",
"def __write_config(self):\n with open(self.config_file, 'w') as data_file:\n config = {\"ibooks_doc_root\":self.ibooks_doc_root,\n \"library_folder\":self.library_folder,\n \"annotation_folder\":self.annotation_folder,\n \"tmp_dir\":self.tmp_dir\n } \n data = json.dumps(config, ensure_ascii=False)\n data_file.write(data)",
"def write_index(self):\n self.Lock = True\n self.file_out.seek(self.index_offset)\n for identifier, offset in self.index.items():\n self._write_identifier(identifier)\n self._write_offset(offset)",
"def write(self, content):\n ...",
"def write_settings(self, output_path):\n j2_env = Environment(loader=FileSystemLoader(DIR_PATH),\n trim_blocks=True)\n\n with open(output_path, 'w') as config_settings:\n try:\n config_settings.write(\n j2_env.get_template(TEMPLATE_PATH).render(\n i18n=self.data.xpath('//Internationalization')[0],\n map_server=self.data.xpath('//MapServer')[0],\n adjacencies=self.data.xpath('//Adjacencies/*'),\n convex=self.data.xpath(\n '//Scoring/ScoreFunctions/ScoreFunction[@id=\"district_convex\"]'\n ),\n mailer=self.data.xpath('//Mailer')[0],\n project=self.data.xpath('//Project')[0],\n google_analytics=self.data.xpath('//GoogleAnalytics'),\n upload=self.data.xpath('//Upload'),\n fix_unassigned=self.data.xpath('//FixUnassigned'),\n max_undos=self.data.xpath('//MaxUndos'),\n leaderboard=self.data.xpath('//Leaderboard'),\n )\n )\n\n return True\n\n except Exception as ex:\n # An error occurred during the processing of the settings file\n logging.warning(traceback.format_exc())\n\n return False",
"def log_setup():\n logger = logging.getLogger('diskover')\n logger_warn = logging.getLogger('diskover_warn')\n eslogger = logging.getLogger('elasticsearch')\n diskover_eslogger = logging.getLogger('diskover_elasticsearch')\n loglevel = config['logLevel'].get()\n if options.debug:\n loglevel = 'DEBUG'\n if loglevel == 'DEBUG':\n loglevel = logging.DEBUG\n elif loglevel == 'INFO':\n loglevel = logging.INFO\n else:\n loglevel = logging.WARN\n logformat = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n if logtofile:\n # create log file name using top dir names and datestamp\n treedirsstr = ''\n if args:\n n = 1\n dirs = args[0:]\n x = len(dirs)\n for d in dirs:\n if d != '/':\n d = d.rstrip('/')\n treedirsstr += os.path.basename(d)\n if n < x:\n treedirsstr += '_'\n n += 1\n else:\n treedirsstr = os.path.basename(os.getcwd())\n logfiletime = datetime.now().isoformat()\n logname = 'diskover_' + treedirsstr + '_' + logfiletime + '.log'\n logfile = os.path.join(logdir, logname)\n handler_file = logging.FileHandler(logfile)\n handler_file.setFormatter(logging.Formatter(logformat))\n logger.setLevel(loglevel)\n logger.addHandler(handler_file)\n # console logging\n handler_con = logging.StreamHandler()\n handler_con.setFormatter(logging.Formatter(logformat))\n logger.addHandler(handler_con)\n # warnings log\n logname_warn = 'diskover_' + treedirsstr + '_' + logfiletime + '_warnings.log'\n logfile_warn = os.path.join(logdir, logname_warn)\n handler_warnfile = logging.FileHandler(logfile_warn)\n handler_warnfile.setFormatter(logging.Formatter(logformat))\n logger_warn.setLevel(logging.WARN)\n logger_warn.addHandler(handler_warnfile)\n # es logger\n eslogger.setLevel(logging.WARN)\n eslogger.addHandler(handler_file)\n eslogger.addHandler(handler_con)\n # diskover es logger\n diskover_eslogger.setLevel(loglevel)\n diskover_eslogger.addHandler(handler_file)\n diskover_eslogger.addHandler(handler_con)\n else:\n handler_file = None\n handler_warnfile = None\n handler_con = None\n logging.basicConfig(format=logformat, level=loglevel)\n eslogger.setLevel(logging.WARN)\n return logger, logger_warn, loglevel, logformat, \\\n handler_file, handler_warnfile, handler_con",
"def write(self, host, index):\n msg = []\n operation = \"WRITE\"\n if not self.create_uid(host, index):\n return False\n url = \"%s%s%s\" % (\"http://\", host, \"/api/put\")\n payload = {\"metric\": METRIC_NAME, \"timestamp\": TIMESTAMP_MILLIS(), \\\n \"value\": METRIC_VAL, \"tags\":{TAGK: \"%s.%d\" % (TAGV, index)}}\n headers = {\"content-type\": \"application/json\"}\n try:\n response = requests.post(url, data=json.dumps(payload), headers=headers)\n if response.status_code == 204:\n LOGGER.debug(\"Value 1 inserted to metric %s\", METRIC_NAME)\n self.process_resp([], operation, \"1\", index)\n return True\n response_dict = json.loads(response.text)\n msg.append(response_dict[\"error\"][\"message\"])\n LOGGER.warning(\"Unable to write 1, error message is %s\", \\\n response_dict[\"error\"][\"message\"])\n self.process_resp(msg, operation, \"0\", index)\n return False\n except requests.exceptions.ConnectionError as ex_message:\n LOGGER.warning(\"Unable to write 1, error message is %s\", str(ex_message))\n self.process_resp([str(ex_message)], operation, \"0\", index)\n return False"
]
| [
"0.5568147",
"0.553713",
"0.53876334",
"0.5316416",
"0.52936685",
"0.5262763",
"0.5184505",
"0.51619303",
"0.51611596",
"0.5145693",
"0.50859374",
"0.508506",
"0.5059363",
"0.50341135",
"0.50315386",
"0.49795464",
"0.4977128",
"0.49740523",
"0.49292436",
"0.49255317",
"0.49227864",
"0.490721",
"0.48854175",
"0.48851585",
"0.4863658",
"0.48418784",
"0.48054388",
"0.48013598",
"0.47917914",
"0.47857392"
]
| 0.6313111 | 0 |
Saves user account information to disk by querying Github GraphQL v4 API. | def save_user(self, user, path=None):
# Check if this user already exists in elasticsearch
index = ''.join(['gh_user-', self.timestamp])
self._write_to_datastore(index=index,
doc_type='GithubUser',
document=user.response,
login=user.login,
path=path)
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def save_accounts(account):\n account.save_account()",
"def save_accounts(account):\n account.save_account()",
"def save_users(user):\n user.save_user()",
"async def github_user_info(self, ctx: commands.Context, username: str) -> None:\n async with ctx.typing():\n user_data = await self.fetch_data(f\"{GITHUB_API_URL}/users/{quote_plus(username)}\")\n\n # User_data will not have a message key if the user exists\n if \"message\" in user_data:\n embed = discord.Embed(\n title=random.choice(NEGATIVE_REPLIES),\n description=f\"The profile for `{username}` was not found.\",\n colour=Colours.soft_red\n )\n\n await ctx.send(embed=embed)\n return\n\n org_data = await self.fetch_data(user_data[\"organizations_url\"])\n orgs = [f\"[{org['login']}](https://github.com/{org['login']})\" for org in org_data]\n orgs_to_add = \" | \".join(orgs)\n\n gists = user_data[\"public_gists\"]\n\n # Forming blog link\n if user_data[\"blog\"].startswith(\"http\"): # Blog link is complete\n blog = user_data[\"blog\"]\n elif user_data[\"blog\"]: # Blog exists but the link is not complete\n blog = f\"https://{user_data['blog']}\"\n else:\n blog = \"No website link available\"\n\n embed = discord.Embed(\n title=f\"`{user_data['login']}`'s GitHub profile info\",\n description=f\"```{user_data['bio']}```\\n\" if user_data[\"bio\"] else \"\",\n colour=discord.Colour.blurple(),\n url=user_data[\"html_url\"],\n timestamp=datetime.strptime(user_data[\"created_at\"], \"%Y-%m-%dT%H:%M:%SZ\")\n )\n embed.set_thumbnail(url=user_data[\"avatar_url\"])\n embed.set_footer(text=\"Account created at\")\n\n if user_data[\"type\"] == \"User\":\n\n embed.add_field(\n name=\"Followers\",\n value=f\"[{user_data['followers']}]({user_data['html_url']}?tab=followers)\"\n )\n embed.add_field(\n name=\"Following\",\n value=f\"[{user_data['following']}]({user_data['html_url']}?tab=following)\"\n )\n\n embed.add_field(\n name=\"Public repos\",\n value=f\"[{user_data['public_repos']}]({user_data['html_url']}?tab=repositories)\"\n )\n\n if user_data[\"type\"] == \"User\":\n embed.add_field(\n name=\"Gists\",\n value=f\"[{gists}](https://gist.github.com/{quote_plus(username, safe='')})\"\n )\n\n embed.add_field(\n name=f\"Organization{'s' if len(orgs)!=1 else ''}\",\n value=orgs_to_add if orgs else \"No organizations.\"\n )\n embed.add_field(name=\"Website\", value=blog)\n\n await ctx.send(embed=embed)",
"def save_user(self):\n User.user_list.append(self)\n\n # finding a user's credentials",
"def save_credentials(credentials):\n credentials. save_details()",
"def save_user(user):\n User.save_user(user)",
"def save(self):\n # EXERCISE:\n # - save self.access_token, self.user_id, self.save_message to access token file AccessData.ACCESS_TOKEN_FILE\n # @see http://stackoverflow.com/questions/12309269/write-json-data-to-file-in-python\n# TODO ==> INSERT CODE HERE <==\n\n logger.debug('saved access token in file %s' % (AccessData.ACCESS_TOKEN_FILE))",
"async def register_user(host, port, entry_field):\n\n async with get_connection(host, port) as connection:\n reader, writer = connection\n nickname = entry_field.get()\n\n await reader.readline()\n\n writer.write('\\n'.encode())\n await writer.drain()\n\n await reader.readline()\n\n formatted_nickname = nickname.replace('\\n', '')\n writer.write(f'{formatted_nickname}\\n'.encode())\n await writer.drain()\n\n data = await reader.readline()\n user_credentials = json.loads(data)\n account_hash = user_credentials.get('account_hash')\n\n writer.close()\n await writer.wait_closed()\n\n async with aiofiles.open('user_token.txt', 'w') as f:\n await f.write(account_hash)",
"def store_credentials(user, access_token, access_secret):\n with open(\"access.json\", \"w\") as f:\n json.dump({user : {\"access_token\": access_token, \n \"access_secret\": access_secret} }, f)",
"def saveUser(self):\n self.user[\"Video\"] = \"\"\n with open(self.user_file, \"w+\") as json_file:\n json.dump(self.user, json_file, indent=4)",
"def save_user(username, data):\n\n hashed_username = base64.b64encode(Cryptography.hash(username).digest()).decode()\n\n file = open(getcwd() + Database.__DB_FILENAME, 'a')\n iv, ciphered_data = Cryptography.cipher(Cryptography.get_passphrase(), data)\n file.write(hashed_username + ':' + ciphered_data.hex() + '.' + iv.hex() + '\\n')\n file.flush()\n file.close()",
"def save_credentials(self):\n Stores.account_store.append(self.register_stores())",
"def SaveData(self):\n \n try:\n with open(self.users_file, 'r+') as outfile:\n json.dump(self.user_db, outfile, indent=4)\n outfile.truncate()\n except:\n messagebox.showerror('Error',\n f'{self.users_file} could not be accessed.' \\\n 'New user information won\\'t be saved')",
"def _save_credentials(self):\n from .. import TOKENS\n credentials = os.path.join(TOKENS, \"drive.json\")\n self._gauth.SaveCredentialsFile(credentials)",
"def save(self):\n with open(DEFAULT_DATA_FILE, \"r\") as f:\n data = json.load(f)\n\n if self.user.is_valid_index():\n data['users'][self.user.data_index]['balance'] = self.user.balance\n if self.user.record:\n for i in self.user.record:\n data['users'][self.user.data_index]['record'].append(i)\n data['isLocked'] = self.locked\n with open(DEFAULT_DATA_FILE, \"w\") as f:\n json.dump(data, f, indent=4)",
"def write_user_data(accounts, budget):\n\n print('Your Accounts\\n------------------------\\n\\n')\n\n for idx, account in enumerate(accounts):\n account_values = {\n 'idx': idx,\n 'name': account['accountName'],\n 'display_name': account['fiLoginDisplayName'],\n 'currentBalance': account['currentBalance']\n }\n print('{idx}: {name} ({display_name}) {currentBalance}'.format_map(account_values))\n\n account_indicies = input('\\n\\nSelect an account by number. Use commas for multiple accounts (e.g. 1, 9): ')\n\n print('\\nYour current expense budget (from Mint): {0}\\n\\n'.format(int(get_expenses(budget))))\n monthly_expenses = input('\\nInput your monthly expenses: ')\n account_indicies = map(lambda x: int(x), account_indicies.split(','))\n monthly_expenses = float(monthly_expenses)\n\n user_data = {\n 'account_ids': [accounts[idx]['accountId'] for idx in account_indicies],\n 'monthly_expenses': monthly_expenses\n }\n\n with open(get_user_data_path(), 'w') as outfile:\n json.dump(user_data, outfile)\n\n return user_data",
"def save_to_db(self):\n # update\n if self.user_db:\n self.db.session.query(UserDB).filter(UserDB.login == self.params['login']).\\\n update({'access_token': self.params['access_token'],\n# 'social_net': self.params['social_net'] or 'social_net',\n 'profile_url': self.params.get('profile_url', None),\n 'fio': self.params.get('name', None),\n 'email': self.params.get('email', None)},\n synchronize_session='fetch')\n self.db.commit()\n log.debug('Updated social user: %s', self.params['login'],)\n # create\n else:\n user = UserDB(self.params['login'],\n self.params['email'],\n fio = self.params['name'],\n avatar = '',\n access_token = self.params['access_token'],\n social_net = self.params['social_net'],\n profile_url = self.params['link']\n )\n\n self.db.create(user)\n log.debug('Social user <%s> created', self.params['login'])\n return {'success': True}",
"def register_new_user():\n\n username = input('Write down your Instagram username:\\n>> ')\n password = input('Write down your instagram password:\\n>> ')\n\n read_write_to_json(json_file, {'username': username, 'password': password}, 'w')",
"def write_user(self, _user):\n try:\n self.conn_cursor.execute(\"INSERT INTO users (id,bank) VALUES (?, ?)\", (_user.id, _user.bank))\n except sqlite3.IntegrityError:\n pass\n self.conn_cursor.execute(\"UPDATE users SET bank=? WHERE id=?\", (_user.bank, _user.id ))",
"def save_to_users(self):\n Data.add_data(self.user_data())",
"def _save_user(self, user):\n self.firebase.patch(f'/{self.USERS_KEY}', {str(user.id): user.username})",
"def register_user():\n username = input(\"What's your name, dear? \")\n try:\n with open(filename, 'w') as file_obj:\n json.dump(username, file_obj)\n print(\"Hello, \" + username + \", we'll make sure to remember you!\")\n except FileNotFoundError:\n print(\"Sorry, we have a temporary issue, we can't register you yet.\")",
"def save_users(users):\n with open(STORAGE_PATH, \"wb\") as fp:\n pickle.dump(users, fp)",
"def save_user_db(file_path: str, user_db: dict) -> None:\n # In case directory is not yet existed.\n os.makedirs(os.path.dirname(file_path), exist_ok=True)\n\n with open(file_path, 'w') as f:\n print(f\"[I/O] Saving user data...{file_path}\")\n json.dump(user_db, f)",
"def save(self):\n path = self.user.get_session_path()\n with open(path, 'a', encoding='utf8') as file:\n self.write(file=file)",
"def save_credentials(credentials):\n Credentials.save_credentials(credentials)",
"def setuser(repo, username, email, password):\n print('Repo: %s' % repo)\n print('Username: %s' % username)\n print('E-Mail: %s' % email)\n print('Password: %s' % ('*' * len(password)))",
"async def github(self, ctx: commands.Context, *, path: str):\n user, _, repo = path.replace(' ', '/', 1).partition('/')\n if repo:\n async with self.bot.session.get(\n f\"https://api.github.com/repos/{user}/{repo}\",\n headers={\"Authorization\": f\"token {self.config['token']}\"}\n ) as r:\n data = await r.json()\n embed = discord.Embed(\n title=data['full_name'],\n description=f\"stars: {data['stargazers_count']} forks: {data['forks_count']}\\n\"\n f\"language: {data['language']} license: {data['license']['name'] if data['license'] else 'no'}\\n\"\n +(f\"homepage: {data['homepage']}\" if data['homepage'] else ''),\n url=data['html_url']\n ).set_author(\n name=data['owner']['login'],\n url=data['owner']['html_url'],\n icon_url=data['owner']['avatar_url']\n ).set_thumbnail(\n url=data['owner']['avatar_url']\n ).add_field(\n name=\"Description\",\n value=data['description']\n )\n await ctx.send(embed=embed)\n else:\n async with self.bot.session.get(\n f\"https://api.github.com/users/{user}\",\n headers={\"Authorization\": f\"token {self.config['token']}\"}\n ) as r:\n data = await r.json()\n embed = discord.Embed(\n title=f\"{data['name']} ({data['login']})\",\n description=f\"repos: {data['public_repos']} gists: {data['public_gists']}\\n\"\n f\"followers: {data['followers']} following: {data['following']}\\n\"\n f\"location: {data['location']}\",\n url=data['html_url']\n ).set_thumbnail(\n url=data['avatar_url']\n ).add_field(\n name=\"Bio\",\n value=data['bio']\n ).add_field(\n name=\"Contact\",\n value=''.join([\n (f\"email: [{data['email']}](mailto:{data['email']})\\n\" if data['email'] else ''),\n (f\"twitter: [{data['twitter_username']}](https://twitter.com/{data['twitter_username']})\\n\" if data['twitter_username'] else ''),\n (f\"company: {data['company']}\\n\" if data['company'] else ''),\n \n ]) or 'no contact avalible'\n ).set_footer(\n text=f\"id: {data['id']}\"\n )\n await ctx.send(embed=embed)",
"def save(self):\n payload = self.context['payload']\n user = User.objects.get(username=payload['user'])\n user.is_verified = True\n user.save()"
]
| [
"0.5895206",
"0.5895206",
"0.5842903",
"0.5794815",
"0.5791631",
"0.5766027",
"0.5755653",
"0.5744066",
"0.57106656",
"0.57064176",
"0.5683181",
"0.56528974",
"0.5606123",
"0.55971473",
"0.5578096",
"0.55295384",
"0.5487997",
"0.54804254",
"0.5474375",
"0.5470711",
"0.5453501",
"0.5352945",
"0.53508",
"0.5341563",
"0.53375214",
"0.53348124",
"0.53310305",
"0.5307718",
"0.52877766",
"0.52796006"
]
| 0.61525184 | 0 |
Saves a list of commit comments made by this user. | def save_commit_comments(self, user, path=None):
# Redis has an end_cursor if we've collected this data before
end_cursor = self.redis.get(''.join(['gh:', user.login, ':commitComments:endCursor']))
if end_cursor:
end_cursor = end_cursor.decode('utf-8')
end_cursor = ''.join(['"', end_cursor, '"'])
commit_comments = u.commitComments(first=100, after=end_cursor)
else:
commit_comments = u.commitComments(first=100)
if not commit_comments: # False when errors occured (check log file)
return False
while True:
if commit_comments['data']['user']['commitComments']['edges']:
index = ''.join(['gh_commit_comments-', self.timestamp])
self._write_to_datastore(index=index,
doc_type='GithubCommitComments',
document=commit_comments,
login=user.login,
path=path)
has_next_page = commit_comments['data']['user']['commitComments']['pageInfo']['hasNextPage']
end_cursor = commit_comments['data']['user']['commitComments']['pageInfo']['endCursor']
if has_next_page:
commit_comments = u.commitComments(first=100, after=end_cursor)
else:
# Cache the end_cursor where we last collected data
self.redis.set(''.join(['gh:', u.login, ':commitComments:endCursor']), end_cursor)
break
else:
break
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def save_comment(data):\n data['comment_id'] = len(commentslist) + 1\n data['date_created'] = datetime.datetime.now()\n # save to list\n commentslist.append(data)",
"def save_comment(data):\n data['comment_id'] = len(commentslist) + 1\n data['message'] =\"message\"\n data['author'] = \"author\"\n data['date_created'] = datetime.datetime.now()\n # save to list\n commentslist.append(data)",
"def commit_data(self, file_list, comment=None):\n if not comment:\n comment = 'Svn2: autocommit'\n\n # First try to update\n if not self.Update():\n self.logger.error(\"Failed to update svn repository, refusing to commit changes\")\n return\n\n #FIXME - look for conflicts?\n\n for fname in file_list:\n stat = self.client.status(fname)\n self.client.add([f.path for f in stat \\\n if f.text_status == pysvn.wc_status_kind.unversioned])\n try:\n self.revision = self.client.checkin([self.datastore], comment,\n recurse=True)\n self.revision = self.client.update(self.datastore, recurse=True)[0]\n self.logger.info(\"Svn2: Commited changes. At %s\" %\n self.revision.number)\n except Exception, err:\n # try to be smart about the error we got back\n details = None\n if \"callback_ssl_server_trust_prompt\" in str(err):\n details = \"SVN server certificate is not trusted\"\n elif \"callback_get_login\" in str(err):\n details = \"SVN credentials not cached\"\n\n if details is None:\n self.logger.error(\"Svn2: Failed to commit changes\",\n exc_info=1)\n else:\n self.logger.error(\"Svn2: Failed to commit changes: %s\" %\n details)",
"def save_comments():\n potential_deal_id = int(request.form.get(\"id\"))\n action = request.form.get(\"action\")\n if action.lower() == \"none\":\n action = None\n comments = request.form.get(\"comments\")\n db_handler = DBHandler()\n db_handler.update_by_id(potential_deal_id, action, comments)\n # return redirect(url_for(\"home\"))\n return jsonify({\"success\": True}), 200",
"def handle_comments(self):\r\n comments = Comment.objects.all()\r\n for c in comments:\r\n new = ThreadedComment(\r\n content_type = c.content_type,\r\n object_id = c.object_id,\r\n comment = c.comment,\r\n user = c.user,\r\n date_submitted = c.submit_date,\r\n date_modified = c.submit_date,\r\n date_approved = c.submit_date,\r\n is_public = c.is_public,\r\n ip_address = c.ip_address,\r\n is_approved = not c.is_removed\r\n )\r\n new.save()",
"def save_comment(self):\n self.save()",
"def comments(self, comments):\n\n self.container['comments'] = comments",
"def save_gist_comments(self, user, path=None):\n # Redis has an end_cursor if we've collected this data before\n end_cursor = self.redis.get(''.join(['gh:', user.login, ':gistComments:endCursor']))\n if end_cursor:\n end_cursor = end_cursor.decode('utf-8')\n end_cursor = ''.join(['\"', end_cursor, '\"'])\n gist_comments = u.gistComments(first=100, after=end_cursor)\n else:\n gist_comments = u.gistComments(first=100)\n if not gist_comments:\n return False\n while True:\n if gist_comments['data']['user']['gistComments']['edges']:\n index = ''.join(['gh_gist_comments-', self.timestamp])\n self._write_to_datastore(index=index,\n doc_type='GithubGistComments',\n document=gist_comments,\n login=user.login,\n path=path)\n has_next_page = gist_comments['data']['user']['gistComments']['pageInfo']['hasNextPage']\n end_cursor = gist_comments['data']['user']['gistComments']['pageInfo']['endCursor']\n if has_next_page:\n gist_comments = u.gistComments(first=100, after=end_cursor)\n else:\n # Cache the end_cursor where we last collected data\n self.redis.set(''.join(['gh:', u.login, ':gistComments:endCursor']), end_cursor)\n break\n else:\n break\n\n return True",
"def save_issue_comments(self, user, path=None):\n # Redis has an end_cursor if we've collected this data before\n end_cursor = self.redis.get(''.join(['gh:', user.login, ':issueComments:endCursor']))\n if end_cursor:\n end_cursor = end_cursor.decode('utf-8')\n end_cursor = ''.join(['\"', end_cursor, '\"'])\n issue_comments = u.issueComments(first=100, after=end_cursor)\n else:\n issue_comments = u.issueComments(first=100)\n\n if not issue_comments:\n return False\n\n while True:\n if issue_comments['data']['user']['issueComments']['edges']:\n index = ''.join(['gh_issue_comments-', self.timestamp])\n self._write_to_datastore(index=index,\n doc_type='GithubIssueComments',\n document=issue_comments,\n login=user.login,\n path=path)\n has_next_page = issue_comments['data']['user']['issueComments']['pageInfo']['hasNextPage']\n end_cursor = issue_comments['data']['user']['issueComments']['pageInfo']['endCursor']\n if has_next_page:\n issue_comments = u.issueComments(first=100, after=end_cursor)\n else:\n # Cache the end_cursor where we last collected data\n self.redis.set(''.join(['gh:', u.login, ':issueComments:endCursor']), end_cursor)\n break\n else:\n break\n\n return True",
"def save(self):\n cur = get_cursor()\n args = (self.associated_page, self.associated_name, self.title,\n self.author, self.author_mail, self.comment_body, self.pub_date)\n if self.comment_id is None:\n cur.execute('''insert into comments (associated_page, associated_name,\n title,\n author, author_mail,\n comment_body, pub_date)\n values (?, ?, ?, ?, ?, ?, ?)''', args)\n self.comment_id = cur.lastrowid\n else:\n args += (self.comment_id,)\n cur.execute('''update comments set associated_page=?,\n associated_name=?,\n title=?, author=?,\n author_mail=?, comment_body=?,\n pub_date=? where comment_id = ?''', args)\n cur.close()",
"def commits(self):\r\n url = '{0}/commits'.format(self.get_url())\r\n\r\n return http.Request('GET', url), parsers.parse_json",
"def committees():\n os_committees = Committee()\n os_committees.query()\n os_committees.parse()\n wiki_functions.write_to_csv_file_for_DataTransfer(os_committees,\n os_committees.table)",
"def save_historical_submission_comments(list_of_dictionary_submissions, file_name):\n all_comments_list = []\n submission_count = 1\n\n for submission_dict in list_of_dictionary_submissions:\n print('saving comments from submission', submission_count, '/', len(list_of_dictionary_submissions))\n submission_count += 1\n submission = (REDDIT.submission(id=submission_dict['id']))\n\n submission.comments.replace_more(limit=None)\n for comment in submission.comments.list():\n temp_dict = {'body': comment.body, 'comment_id': comment, 'author': comment.author,\n 'created_utc': comment.created_utc, 'permalink': comment.permalink,\n 'link_id': comment.link_id, 'score': comment.score}\n all_comments_list.append(temp_dict)\n print('total comments: ', len(all_comments_list))\n\n comments_df = pd.DataFrame(all_comments_list, columns=['body', 'comment_id', 'author', 'created_utc',\n 'permalink', 'link_id', 'score'])\n\n print(comments_df)\n\n print('saving comments to file:', file_name, '...')\n comments_df.to_csv(file_name)\n print('done.')",
"def save_committees(event, committees):\n for committee in committees:\n name = committee.name\n organization = Organization.objects.get(id=committee.id)\n entity_type = \"organization\"\n new_committee = EventParticipant(\n name=name,\n event=event,\n organization=organization,\n entity_type=entity_type\n )\n new_committee.save()",
"def update_comments(self):\n self.nb_comments = self.comments.count()\n self.save()",
"def update_comments(comments, account_name, post_url):\n inc_number = 0\n for index, comment in comments.iterrows():\n # increment + 1\n inc_number = inc_number + 1\n # get preprocessed comment\n comment_spaces, comment_no_stopwords = preprocess_comment(comment['comment'])\n # get sentiment score from comment\n sentiment_score = get_sentiment(comment_no_stopwords)\n # update collection with comments\n collection.update_one(\n {\n 'Codename': account_name,\n 'Posts.URL': post_url\n },\n {\n '$push': {\n 'Posts.$.All Comments': {'comment_id': inc_number,\n 'user': comment['user'],\n 'comment': comment['comment'],\n 'comment_no_stopwords': comment_no_stopwords,\n 'comment_spaces': comment_spaces,\n 'like': comment['like'],\n 'sentiment_score': sentiment_score\n }\n }\n }\n )",
"def all_user_comments(username):\n return commentslist",
"def make_comments(post, comments):\n for comment in comments:\n try:\n com = RedditComment(reddit_post=post, **comment)\n com.save()\n except Exception as ex:\n print 'comment could not be created'\n print ex",
"def comments(self, comments):\n\n self._comments = comments",
"def comments(self, comments):\n\n self._comments = comments",
"def comments(self, comments):\n\n self._comments = comments",
"def comments(self, comments):\n\n self._comments = comments",
"def save_changes(self, objs):\n # Save to the database and possibly write tags.\n for ob in objs:\n if ob._dirty:\n self._log.debug('saving changes to {}', ob)\n ob.try_sync(ui.should_write(), ui.should_move())",
"def save_action_comments(n_clicks, derived_viewport_data):\n if n_clicks > 0:\n records = [\n {\n \"PotentialDealID\": record[\"PotentialDealID\"],\n \"Action\": record[\"Action\"],\n \"Comment\": record[\"Comment\"]\n }\n for record in derived_viewport_data\n ]\n DBApi.get_instance().save_actions_comments(records=records)\n return [\"Data saved successfully\", True, 5000]\n return [\"\", False, 5000]",
"def comments(self):\r\n return RepoCommitsComments(self)",
"def commit(self,form_list):\n raise NotImplementedError",
"def save_comments(self, videoId):\n comm_obj = self.get_comment_obj(videoId)# need to get the id \n\n file_exists = os.path.isfile(self.path)\n f = open(self.path, 'a', encoding='utf-8-sig')\n writer_top = csv.writer(f, delimiter=',', quoting=csv.QUOTE_MINIMAL)\n if not file_exists:\n writer_top.writerow(['etag'] + ['videoId'] + ['commentId'] + ['text'] + ['author'] + ['like'] + ['time'])\n f.close()\n\n f = open(self.path, 'a', encoding='utf-8-sig')\n writer_top = csv.writer(f, delimiter=',', quoting=csv.QUOTE_MINIMAL)\n\n for i in comm_obj['items']:\n\n result_row = [[i['etag'], i['snippet']['videoId'], i['snippet']['topLevelComment']['id'], i['snippet']['topLevelComment']['snippet']['textDisplay'], i['snippet']['topLevelComment']['snippet']['authorDisplayName'], i['snippet']['topLevelComment']['snippet']['likeCount'], i['snippet']['topLevelComment']['snippet']['publishedAt']]]\n writer_top.writerows(result_row)\n f.close()",
"def save_comments(doc):\n global collection\n if doc.get('ida_comments', ''):\n print('Comments already extracted for document [%s], skipping.' %\n doc['id'])\n else:\n print('Saving comments for document [%s].' % doc['id'])\n asm = open_asm(doc['id'])\n #asm = [to_utf(line) for line in asm]\n comments = filter_comments(asm)\n doc['ida_comments'] = comments\n collection.save(doc)",
"def comments(self):\r\n return RepoCommitsComments(self.parent)",
"def commit(self):\n params = {'commit': 'true'}\n\n return self.client.post(\n self._get_collection_url('update/json'), params=params)"
]
| [
"0.63125753",
"0.6055158",
"0.57577026",
"0.55333656",
"0.5506833",
"0.5483902",
"0.5372578",
"0.5357004",
"0.5353247",
"0.5295684",
"0.529495",
"0.5247602",
"0.5233638",
"0.5223587",
"0.5222193",
"0.5211724",
"0.51945955",
"0.5191858",
"0.51756215",
"0.51756215",
"0.51756215",
"0.51756215",
"0.51461965",
"0.514579",
"0.5125639",
"0.5118519",
"0.51148146",
"0.5104814",
"0.50940084",
"0.5087451"
]
| 0.65404683 | 0 |
Saves a list of gist comments made by this user. | def save_gist_comments(self, user, path=None):
# Redis has an end_cursor if we've collected this data before
end_cursor = self.redis.get(''.join(['gh:', user.login, ':gistComments:endCursor']))
if end_cursor:
end_cursor = end_cursor.decode('utf-8')
end_cursor = ''.join(['"', end_cursor, '"'])
gist_comments = u.gistComments(first=100, after=end_cursor)
else:
gist_comments = u.gistComments(first=100)
if not gist_comments:
return False
while True:
if gist_comments['data']['user']['gistComments']['edges']:
index = ''.join(['gh_gist_comments-', self.timestamp])
self._write_to_datastore(index=index,
doc_type='GithubGistComments',
document=gist_comments,
login=user.login,
path=path)
has_next_page = gist_comments['data']['user']['gistComments']['pageInfo']['hasNextPage']
end_cursor = gist_comments['data']['user']['gistComments']['pageInfo']['endCursor']
if has_next_page:
gist_comments = u.gistComments(first=100, after=end_cursor)
else:
# Cache the end_cursor where we last collected data
self.redis.set(''.join(['gh:', u.login, ':gistComments:endCursor']), end_cursor)
break
else:
break
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def save_comment(data):\n data['comment_id'] = len(commentslist) + 1\n data['date_created'] = datetime.datetime.now()\n # save to list\n commentslist.append(data)",
"def save_comment(data):\n data['comment_id'] = len(commentslist) + 1\n data['message'] =\"message\"\n data['author'] = \"author\"\n data['date_created'] = datetime.datetime.now()\n # save to list\n commentslist.append(data)",
"def save_comments():\n potential_deal_id = int(request.form.get(\"id\"))\n action = request.form.get(\"action\")\n if action.lower() == \"none\":\n action = None\n comments = request.form.get(\"comments\")\n db_handler = DBHandler()\n db_handler.update_by_id(potential_deal_id, action, comments)\n # return redirect(url_for(\"home\"))\n return jsonify({\"success\": True}), 200",
"def save_comment(self):\n self.save()",
"def comments(self):\r\n return GistComments(self)",
"def save(self):\n cur = get_cursor()\n args = (self.associated_page, self.associated_name, self.title,\n self.author, self.author_mail, self.comment_body, self.pub_date)\n if self.comment_id is None:\n cur.execute('''insert into comments (associated_page, associated_name,\n title,\n author, author_mail,\n comment_body, pub_date)\n values (?, ?, ?, ?, ?, ?, ?)''', args)\n self.comment_id = cur.lastrowid\n else:\n args += (self.comment_id,)\n cur.execute('''update comments set associated_page=?,\n associated_name=?,\n title=?, author=?,\n author_mail=?, comment_body=?,\n pub_date=? where comment_id = ?''', args)\n cur.close()",
"def handle_comments(self):\r\n comments = Comment.objects.all()\r\n for c in comments:\r\n new = ThreadedComment(\r\n content_type = c.content_type,\r\n object_id = c.object_id,\r\n comment = c.comment,\r\n user = c.user,\r\n date_submitted = c.submit_date,\r\n date_modified = c.submit_date,\r\n date_approved = c.submit_date,\r\n is_public = c.is_public,\r\n ip_address = c.ip_address,\r\n is_approved = not c.is_removed\r\n )\r\n new.save()",
"def post(self):\n modified_content = self.request.get('comment_edit')\n comment_id = self.request.get('comment_id')\n comment = Comments.get_by_id(int(comment_id))\n user = self.get_active_user()\n\n if user.key().id() == comment.submitter_id:\n comment.content = modified_content\n comment.put()\n self.redirect('/%s' % str(comment.post_id))\n else:\n self.error(403)",
"def save_gist(gist, path=None):\n start_dir = os.getcwd()\n files = gist['files']\n if len(files) == 1:\n print(\"Single file gist saved to current directory\")\n else:\n path = path if path is not None else 'gist-' + gist['id']\n print(\"Multifile gist saved to new directory:\", path)\n # If directory already exists, move to timestamped backups. \n if os.path.isdir(path):\n timestr = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime())\n bkp_path = os.path.join(path+'-old', timestr)\n os.renames(path, bkp_path)\n print(\"Old directory renamed: %s -> %s\" % (path, bkp_path))\n os.mkdir(path) \n os.chdir(path)\n \n try:\n for fname,data in files.iteritems():\n if data['truncated']:\n content = requests.get(data['raw_url']).text\n else:\n content = data['content']\n with open(fname, 'w') as fd:\n fd.write(content)\n print(\"Files:\", files.keys())\n finally:\n if os.getcwd() != start_dir:\n os.chdir(start_dir)",
"def comments(self, comments):\n\n self.container['comments'] = comments",
"def save_comments(doc):\n global collection\n if doc.get('ida_comments', ''):\n print('Comments already extracted for document [%s], skipping.' %\n doc['id'])\n else:\n print('Saving comments for document [%s].' % doc['id'])\n asm = open_asm(doc['id'])\n #asm = [to_utf(line) for line in asm]\n comments = filter_comments(asm)\n doc['ida_comments'] = comments\n collection.save(doc)",
"def save_commit_comments(self, user, path=None):\n # Redis has an end_cursor if we've collected this data before\n end_cursor = self.redis.get(''.join(['gh:', user.login, ':commitComments:endCursor']))\n if end_cursor:\n end_cursor = end_cursor.decode('utf-8')\n end_cursor = ''.join(['\"', end_cursor, '\"'])\n commit_comments = u.commitComments(first=100, after=end_cursor)\n else:\n commit_comments = u.commitComments(first=100)\n if not commit_comments: # False when errors occured (check log file)\n return False\n while True:\n if commit_comments['data']['user']['commitComments']['edges']:\n index = ''.join(['gh_commit_comments-', self.timestamp])\n self._write_to_datastore(index=index,\n doc_type='GithubCommitComments',\n document=commit_comments,\n login=user.login,\n path=path)\n has_next_page = commit_comments['data']['user']['commitComments']['pageInfo']['hasNextPage']\n end_cursor = commit_comments['data']['user']['commitComments']['pageInfo']['endCursor']\n if has_next_page:\n commit_comments = u.commitComments(first=100, after=end_cursor)\n else:\n # Cache the end_cursor where we last collected data\n self.redis.set(''.join(['gh:', u.login, ':commitComments:endCursor']), end_cursor)\n break\n else:\n break\n\n return True",
"def write_comment_to_post(self):\n for friend in random.sample(self._friends, len(self._friends)):\n post: Post = friend.get_random_post()\n if not post:\n continue\n attitude = self._interests[random.choice(post.tags)]\n self.update_positive_and_negative_actions(friend, attitude)\n comment = Comment(attitude, self.unique_id)\n post.add_comment(comment)\n friend.update_relation(self, WRITE_COMMENT)\n friend.append_comment(post, comment)\n # self.update_relation(friend, WRITE_COMMENT)\n break",
"def all_user_comments(username):\n return commentslist",
"def save_issue_comments(self, user, path=None):\n # Redis has an end_cursor if we've collected this data before\n end_cursor = self.redis.get(''.join(['gh:', user.login, ':issueComments:endCursor']))\n if end_cursor:\n end_cursor = end_cursor.decode('utf-8')\n end_cursor = ''.join(['\"', end_cursor, '\"'])\n issue_comments = u.issueComments(first=100, after=end_cursor)\n else:\n issue_comments = u.issueComments(first=100)\n\n if not issue_comments:\n return False\n\n while True:\n if issue_comments['data']['user']['issueComments']['edges']:\n index = ''.join(['gh_issue_comments-', self.timestamp])\n self._write_to_datastore(index=index,\n doc_type='GithubIssueComments',\n document=issue_comments,\n login=user.login,\n path=path)\n has_next_page = issue_comments['data']['user']['issueComments']['pageInfo']['hasNextPage']\n end_cursor = issue_comments['data']['user']['issueComments']['pageInfo']['endCursor']\n if has_next_page:\n issue_comments = u.issueComments(first=100, after=end_cursor)\n else:\n # Cache the end_cursor where we last collected data\n self.redis.set(''.join(['gh:', u.login, ':issueComments:endCursor']), end_cursor)\n break\n else:\n break\n\n return True",
"def update_comments_replied_to(self, comment_id):\n self.checked_comments.append(comment_id)\n with open(\"reddit_comments_replied_to.txt\", \"w\") as f:\n for comment in self.checked_comments:\n f.write(comment + \"\\n\")",
"def comments(self, comments):\n\n self._comments = comments",
"def comments(self, comments):\n\n self._comments = comments",
"def comments(self, comments):\n\n self._comments = comments",
"def comments(self, comments):\n\n self._comments = comments",
"def make_comments(post, comments):\n for comment in comments:\n try:\n com = RedditComment(reddit_post=post, **comment)\n com.save()\n except Exception as ex:\n print 'comment could not be created'\n print ex",
"def save_historical_submission_comments(list_of_dictionary_submissions, file_name):\n all_comments_list = []\n submission_count = 1\n\n for submission_dict in list_of_dictionary_submissions:\n print('saving comments from submission', submission_count, '/', len(list_of_dictionary_submissions))\n submission_count += 1\n submission = (REDDIT.submission(id=submission_dict['id']))\n\n submission.comments.replace_more(limit=None)\n for comment in submission.comments.list():\n temp_dict = {'body': comment.body, 'comment_id': comment, 'author': comment.author,\n 'created_utc': comment.created_utc, 'permalink': comment.permalink,\n 'link_id': comment.link_id, 'score': comment.score}\n all_comments_list.append(temp_dict)\n print('total comments: ', len(all_comments_list))\n\n comments_df = pd.DataFrame(all_comments_list, columns=['body', 'comment_id', 'author', 'created_utc',\n 'permalink', 'link_id', 'score'])\n\n print(comments_df)\n\n print('saving comments to file:', file_name, '...')\n comments_df.to_csv(file_name)\n print('done.')",
"async def create_gist(self, ctx: commands.Context, *, inp):\n files_and_names = self.files_regex.split(inp)[:-1]\n # Dict comprehension to create the files 'object'\n files = {name:{\"content\": content+\"\\n\"} for name, content in zip(files_and_names[0::2], files_and_names[1::2])}\n\n req = await self.session.post(\"https://api.github.com/gists\", headers={\"Authorization\":f\"Bearer {ctx.user_obj.github_oauth_token}\"}, json={\"files\":files})\n\n res = await req.json()\n # TODO: Make this more verbose to the user and log errors\n await ctx.send(res.get(\"html_url\", \"Something went wrong.\"))",
"def save_users(user_list):\n with open(user_path, \"w\") as outfile:\n json.dump(user_list, outfile)",
"def write_submission_to_db(cls, file_name, obj_list):\n conn = sqlite3.connect('database.db')\n c = conn.cursor()\n query = \"DELETE FROM `submission`;\"\n c.execute(query)\n\n for obj in obj_list:\n params = [obj.send_date, obj.name, obj.grade, obj.github_link, obj.student_id]\n c.execute(\"INSERT INTO submission (send_date, grade, name, github_link, student_id) VALUES (?, ?, ?, ?, ?)\", params)\n conn.commit()\n conn.close()",
"def delete_comments(redditor):\n\n for index, comment in enumerate(redditor.comments.new(limit=None)):\n print(\"Deleting comment {}\".format(index))\n comment.edit(\"-\")\n comment.delete()",
"def create_comment_file():\n club = read_json()\n comment_dict = {}\n\n for club in clubs:\n comment_dict[club.name] = []\n\n with open('club_comments.json', 'w') as outfile:\n json.dump(comment_dict, outfile)",
"def process_comments(session, comments):\n for c in tqdm(comments, desc=\"Injecting comments into DB\"):\n db_comment = session.query(Comment).get(c['id'])\n if db_comment:\n db_comment.update(session, **c)\n else:\n Comment.create(session, **c)",
"def save_comment(newComment):\n conn = pymongo.Connection(\"localhost\",27017)\n db = conn[\"paperDB\"]\n infoDB = db.infoDB\n record = infoDB.find_one()\n\n record['comment'].append(newComment)\n infoDB.save(record)",
"def save_to_users(self):\n Data.add_data(self.user_data())"
]
| [
"0.6658145",
"0.6442756",
"0.5789015",
"0.5736194",
"0.5645761",
"0.55168533",
"0.5502217",
"0.54077274",
"0.5404561",
"0.5373963",
"0.5331853",
"0.52870333",
"0.5219599",
"0.5178907",
"0.5175677",
"0.5168752",
"0.5137833",
"0.5137833",
"0.5137833",
"0.5137833",
"0.51184213",
"0.51084703",
"0.51027834",
"0.5077096",
"0.5064814",
"0.5058255",
"0.5052238",
"0.5011234",
"0.49666098",
"0.4959893"
]
| 0.646073 | 1 |
Saves a list of Gists the user has created. | def save_gists(self, user, path=None):
# Redis has an end_cursor if we've collected this data before
end_cursor = self.redis.get(''.join(['gh:', user.login, ':gists:endCursor']))
if end_cursor:
end_cursor = end_cursor.decode('utf-8')
end_cursor = ''.join(['"', end_cursor, '"'])
gists = u.gists(first=100,
after=end_cursor,
orderBy='{direction: DESC, field: CREATED_AT}',
privacy='ALL')
else:
gists = u.gists(first=100,
orderBy='{direction: DESC, field: CREATED_AT}',
privacy='ALL')
if not gists:
return False
while True:
if gists['data']['user']['gists']['edges']:
index = ''.join(['gh_gists-', self.timestamp])
self._write_to_datastore(index=index,
doc_type='GithubGists',
document=gists,
login=user.login,
path=path)
has_next_page = gists['data']['user']['gists']['pageInfo']['hasNextPage']
end_cursor = gists['data']['user']['gists']['pageInfo']['endCursor']
if has_next_page:
gists = u.gists(first=100,
after=end_cursor,
orderBy='{direction: DESC, field: CREATED_AT}',
privacy='ALL')
else:
# Cache the end_cursor where we last collected data
self.redis.set(''.join(['gh:', u.login, ':gists:endCursor']), end_cursor)
break
else:
break
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create(self, gist_data):\n\n _data = json.dumps(Utils.merge_objects(self.__defaults, gist_data))\n\n response = requests.post(\n self.BASE_URL + '/gists',\n data = _data,\n headers = self.__headers\n )\n\n if response.status_code == 201:\n return response.json()\n\n raise GistException(Gist.__get_response_error('Gist cannot created', response))",
"def save_user_unions(*args):\n return _ida_hexrays.save_user_unions(*args)",
"def save_users(user):\n user.save_user()",
"def save_users(self):\n\n User.user_list.append(self)",
"def save_to_users(self):\n Data.add_data(self.user_data())",
"def main(user):\n user = user.strip()\n url = \"https://api.github.com/users/{}/gists\".format(user)\n results, req = get_req(url)\n status = req.status_code\n if status == 200:\n gist_db = pickledb.load('gists.db', True)\n user_data = gist_db.get(user)\n #print(\"user_data:\\n{}\\n\".format(user_data))\n #print(results)\n if results:\n if user_data:\n gist_date = user_data.get(\"gist_date\")\n gist_date_formatted = datetime.strptime(str(gist_date), \"%Y-%m-%dT%H:%M:%SZ\")\n new_request_gist_date = datetime.strptime(results[0][\"created_at\"], \"%Y-%m-%dT%H:%M:%SZ\")\n diff_time = (new_request_gist_date-gist_date_formatted).total_seconds()\n #print(diff_time)\n if diff_time <= 0:\n print(\"No new published gists\")\n\n elif diff_time > 0:\n print(\"User has published new gist:\\nLatest Gist:\\n{}\".format(results[0][\"html_url\"]))\n data = {}\n data['user'] = user\n data['gist_date'] = results[0][\"created_at\"]\n gist_db.set(str(user), data)\n\n else:\n print(\"New user, saving user's details and gists\")\n data = {}\n data['user'] = user\n data['gist_date'] = results[0][\"created_at\"]\n gist_db.set(str(user), data)\n else:\n print(\"User has no published gists\")\n\n elif status == 404:\n print(\"Provided Github user doesn't exist\")\n\n else:\n print(\"Error:\\n{}\".format(req.text))",
"def insert_students(ids, fname, lname, db_name='./grades.sqlite3'):\n names_tupple = list(zip(ids, fname, lname, [0] * len(ids)))\n with lite.connect(db_name) as con:\n cur = con.cursor()\n cur.executemany('INSERT OR REPLACE INTO STUDENTS \\\n (pipeline_id, first_name, second_name, cheating_ratio)'\n ' VALUES (?, ?, ?, ?)', names_tupple)\n con.commit()",
"async def create_gist(self, ctx: commands.Context, *, inp):\n files_and_names = self.files_regex.split(inp)[:-1]\n # Dict comprehension to create the files 'object'\n files = {name:{\"content\": content+\"\\n\"} for name, content in zip(files_and_names[0::2], files_and_names[1::2])}\n\n req = await self.session.post(\"https://api.github.com/gists\", headers={\"Authorization\":f\"Bearer {ctx.user_obj.github_oauth_token}\"}, json={\"files\":files})\n\n res = await req.json()\n # TODO: Make this more verbose to the user and log errors\n await ctx.send(res.get(\"html_url\", \"Something went wrong.\"))",
"def save_users(self, path):\n os.makedirs(path, exist_ok=True)\n np.save(os.path.join(path, 'ids'), self.user_ids)",
"def save_users(user_list):\n with open(user_path, \"w\") as outfile:\n json.dump(user_list, outfile)",
"def save_user(self):\n\n User.user_list.append(self)",
"def list(self):\n\n base_url = ''.join((\n self.BASE_URL + '/users/',\n self.__user_data.get('login') + '/gists',\n ))\n\n response = requests.get(base_url, headers=self.__headers)\n\n if response.status_code == 200:\n return response.json()\n\n raise GistException(Gist.__get_response_error('It is not possible to list files', response))",
"def save_many(self, values, expires_in=None):\n raise NotImplementedError()",
"def gists(self):\r\n return gists.Gists(self)",
"def save_gist(gist, path=None):\n start_dir = os.getcwd()\n files = gist['files']\n if len(files) == 1:\n print(\"Single file gist saved to current directory\")\n else:\n path = path if path is not None else 'gist-' + gist['id']\n print(\"Multifile gist saved to new directory:\", path)\n # If directory already exists, move to timestamped backups. \n if os.path.isdir(path):\n timestr = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime())\n bkp_path = os.path.join(path+'-old', timestr)\n os.renames(path, bkp_path)\n print(\"Old directory renamed: %s -> %s\" % (path, bkp_path))\n os.mkdir(path) \n os.chdir(path)\n \n try:\n for fname,data in files.iteritems():\n if data['truncated']:\n content = requests.get(data['raw_url']).text\n else:\n content = data['content']\n with open(fname, 'w') as fd:\n fd.write(content)\n print(\"Files:\", files.keys())\n finally:\n if os.getcwd() != start_dir:\n os.chdir(start_dir)",
"def assists(self, assists):\n\n self._assists = assists",
"def gists_for_user(username):\n\n rs = []\n\n def loop(url=None):\n # build the url\n per_page = 2\n page = 1\n gists_url = url or 'https://api.github.com/users/{username}/gists?per_page={per_page}&page={page}'.format(per_page=per_page, page=page, username=username)\n gists_url += \"&client_id=%s&client_secret=%s\" %(\"3cc58ae648e5bfd676cf\", \"f908a9ddcebdbc36d38c1fe902f98cb12d15c44c\")\n\n try:\n response = requests.get(gists_url)\n except requests.exceptions.Timeout as e:\n time.sleep(1)\n response = requests.get(url)\n except requests.exceptions.HTTPError as err:\n response = type('lamdbaobject', (object,), {})()\n response.ok = false\n \n if response.ok:\n # grab the data response\n rs.extend(response.json())\n\n if response.headers.get('Link', None):\n # parse the for the next url\n links = response.headers['Link'].split(',')\n \n # extra the url\n next_url = re.match('<(.*)>; rel=\"next\"', links[0])\n\n # BONUS: What about huge gists?\n if next_url:\n gists_url = next_url.groups()[0]\n loop(gists_url)\n\n else:\n rs.extend(response.json())\n\n else:\n # BONUS: Handle invalid users?\n return response.json()\n\n return rs\n\n return loop()",
"def test_save_multiple_user(self):\n self.new_user.save_user()\n test_user = User(\"Test\", \"user\", \"test\", \"walIas15\")\n test_user.save_user()\n self.assertEqual(len(User.user_list), 2)",
"def save_user(self):\n\n User.user_list.append(self)",
"def save(album_id=None, group_id=None, server=None, photos_list=None, hash=None,\\\n latitude=None, longitude=None, caption=None):\n params = {\n 'album_id': album_id,\n 'group_id': group_id,\n 'server': server,\n 'photos_list': photos_list,\n 'hash': hash,\n 'latitude': latitude,\n 'longitude': longitude,\n 'caption': caption\n }\n result = call('photos.save', **params)\n return parse_response(result)",
"def save(self, *args, **kwargs):\n instance = forms.ModelForm.save(self)\n instance.recipient_set.clear()\n for recipient in self.cleaned_data['members']:\n instance.recipient_set.add(recipient)",
"def save_user(self):\n User.user_list.append(self)",
"def save_user(self):\n User.user_list.append(self)",
"def save(self):\n groups = defaultdict(list)\n for group, user, var in self.items:\n if var.get():\n groups[group].append(user)\n\n save_groups(self.filename, groups)",
"def current_user_saved_albums_add(self, albums=None, **kwargs):\n album_list = map(self._get_album_id, albums or [])\n return self._put(API.MY_ALBUMS.value, ids=\",\".join(album_list), **kwargs)",
"def save(self, *args, **kwargs):\n grupo_vendedor = Group.objects.get_or_create(name=\"vendedor\")[0]\n self.user.groups.add(grupo_vendedor)\n return super(Vendedor, self).save(*args, **kwargs)",
"def push_user(cnt, family_list, items, user, user_json, hof, family_id):\n for item_key in items:\n if item_key == 'AADHAR_ID':\n user.aadhaar_id = items[item_key]\n user_json['AADHAR_ID'] = items[item_key]\n elif item_key == 'NAME_ENG':\n user.name_eng = items[item_key]\n user_json['NAME_ENG'] = items[item_key]\n elif item_key == 'NAME_HND':\n user.name_hnd = items[item_key]\n user_json['NAME_HND'] = items[item_key]\n elif item_key == 'M_ID':\n user.m_id = items[item_key]\n user_json['M_ID'] = items[item_key]\n elif item_key == 'GENDER':\n user.gender = items[item_key]\n user_json['GENDER'] = items[item_key]\n elif item_key == 'DOB':\n date_ = preprocess_date(items[item_key])\n user.dob = date_\n user_json['DOB'] = str(date_)\n if hof:\n user.hof = True\n user_json['HOF'] = True\n else:\n user_json['HOF'] = user.hof\n user.family_id = family_id\n user_json['FAMILY_ID'] = family_id\n user.save()\n if hof:\n bi = BhamashahIndex()\n bi.user = user\n bi.bhamashah_id = items['BHAMASHAH_ID']\n bi.save()\n\n loc = LocHof()\n loc.user = user\n loc.pincode = items['PIN_CODE']\n loc.save()\n\n index_creator(loc, 'location', 'location_index')\n index_creator(bi, 'bhamashah', 'bhamashah_index')\n family_list[cnt] = user_json",
"def save_users(users):\n with open(STORAGE_PATH, \"wb\") as fp:\n pickle.dump(users, fp)",
"def save_to_db(self):\n db = DBConnection()\n db.cur.execute(\n \"\"\"\n INSERT INTO lists(user_id, heading, display_order, created_at, updated_at) VALUES(%s, %s, %s, CURRENT_TIMESTAMP, CURRENT_TIMESTAMP) RETURNING row_to_json(lists)\n \"\"\", (self.user_id, self.heading, self.display_order)\n )\n db.con.commit()\n new_list = db.cur.fetchone()[0]\n self.id = new_list[\"id\"]\n db.close()",
"def test_save_users(self):\n\n self.new_users.save_users() # saving the new user\n self.assertEqual(len(User.user_list), 1)"
]
| [
"0.60818684",
"0.57005566",
"0.5474985",
"0.5431386",
"0.53533036",
"0.5352984",
"0.53463316",
"0.5342834",
"0.52751255",
"0.52674055",
"0.5221041",
"0.5169331",
"0.5146974",
"0.50621134",
"0.50617534",
"0.50604665",
"0.5020252",
"0.50186867",
"0.50097346",
"0.50028855",
"0.5001918",
"0.49932462",
"0.49932462",
"0.49770766",
"0.49644697",
"0.49119332",
"0.48912632",
"0.48910522",
"0.4880823",
"0.48736018"
]
| 0.6503054 | 0 |
Saves a list of issue comments made by this user. | def save_issue_comments(self, user, path=None):
# Redis has an end_cursor if we've collected this data before
end_cursor = self.redis.get(''.join(['gh:', user.login, ':issueComments:endCursor']))
if end_cursor:
end_cursor = end_cursor.decode('utf-8')
end_cursor = ''.join(['"', end_cursor, '"'])
issue_comments = u.issueComments(first=100, after=end_cursor)
else:
issue_comments = u.issueComments(first=100)
if not issue_comments:
return False
while True:
if issue_comments['data']['user']['issueComments']['edges']:
index = ''.join(['gh_issue_comments-', self.timestamp])
self._write_to_datastore(index=index,
doc_type='GithubIssueComments',
document=issue_comments,
login=user.login,
path=path)
has_next_page = issue_comments['data']['user']['issueComments']['pageInfo']['hasNextPage']
end_cursor = issue_comments['data']['user']['issueComments']['pageInfo']['endCursor']
if has_next_page:
issue_comments = u.issueComments(first=100, after=end_cursor)
else:
# Cache the end_cursor where we last collected data
self.redis.set(''.join(['gh:', u.login, ':issueComments:endCursor']), end_cursor)
break
else:
break
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def save_comment(data):\n data['comment_id'] = len(commentslist) + 1\n data['date_created'] = datetime.datetime.now()\n # save to list\n commentslist.append(data)",
"def save_comments():\n potential_deal_id = int(request.form.get(\"id\"))\n action = request.form.get(\"action\")\n if action.lower() == \"none\":\n action = None\n comments = request.form.get(\"comments\")\n db_handler = DBHandler()\n db_handler.update_by_id(potential_deal_id, action, comments)\n # return redirect(url_for(\"home\"))\n return jsonify({\"success\": True}), 200",
"def save_comment(data):\n data['comment_id'] = len(commentslist) + 1\n data['message'] =\"message\"\n data['author'] = \"author\"\n data['date_created'] = datetime.datetime.now()\n # save to list\n commentslist.append(data)",
"def handle_comments(self):\r\n comments = Comment.objects.all()\r\n for c in comments:\r\n new = ThreadedComment(\r\n content_type = c.content_type,\r\n object_id = c.object_id,\r\n comment = c.comment,\r\n user = c.user,\r\n date_submitted = c.submit_date,\r\n date_modified = c.submit_date,\r\n date_approved = c.submit_date,\r\n is_public = c.is_public,\r\n ip_address = c.ip_address,\r\n is_approved = not c.is_removed\r\n )\r\n new.save()",
"def save_issues(self, user, path=None):\n # Redis has an end_cursor if we've collected this data before\n last_run = self.redis.get('ghc_last_run').decode('utf-8')\n if last_run is None:\n last_run = '2004-01-01' # pull everything\n\n end_cursor = self.redis.get(''.join(['gh:', user.login, ':issues:endCursor']))\n if end_cursor:\n end_cursor = end_cursor.decode('utf-8')\n end_cursor = ''.join(['\"', end_cursor, '\"'])\n issues = u.issues(first=100,\n after=end_cursor,\n orderBy='{direction: DESC, field: CREATED_AT}')\n else:\n issues = u.issues(first=100,\n orderBy='{direction: DESC, field: CREATED_AT}')\n\n if not issues:\n return False\n\n while True:\n if issues['data']['user']['issues']['edges']:\n index = ''.join(['gh_issues-', self.timestamp])\n self._write_to_datastore(index=index,\n doc_type='GithubIssues',\n document=issues,\n login=user.login,\n path=path)\n has_next_page = issues['data']['user']['issues']['pageInfo']['hasNextPage']\n end_cursor = issues['data']['user']['issues']['pageInfo']['endCursor']\n if has_next_page:\n issues = u.issues(first=100,\n after=end_cursor,\n orderBy='{direction: DESC, field: CREATED_AT}',\n filterBy='{ since: \"'+last_run+'\" }')\n else:\n # Cache the end_cursor where we last collected data\n self.redis.set(''.join(['gh:', u.login, ':issues:endCursor']), end_cursor)\n break\n else:\n break\n\n return True",
"def list(self, number, user=None, repo=None):\n request = self.make_request('issues.comments.list', user=user,\n repo=repo, number=number)\n return self._get_result(request)",
"def create_comment(self, body):\n return self.client.request(\n \"{}/issues/{}/comments\".format(self.repo.base_path, self.num),\n params={\"body\": body},\n method=\"POST\"\n )",
"def comments(accountable):\n comments = accountable.issue_comments()\n headers = sorted(['author_name', 'body', 'updated'])\n\n if comments:\n rows = [[v for k, v in sorted(c.items()) if k in headers]\n for c in comments]\n rows.insert(0, headers)\n print_table(SingleTable(rows))\n else:\n click.secho('No comments found for {}'.format(\n accountable.issue_key\n ), fg='red')",
"def make_comments(post, comments):\n for comment in comments:\n try:\n com = RedditComment(reddit_post=post, **comment)\n com.save()\n except Exception as ex:\n print 'comment could not be created'\n print ex",
"def save_comment(self):\n self.save()",
"def update_comments(comments, account_name, post_url):\n inc_number = 0\n for index, comment in comments.iterrows():\n # increment + 1\n inc_number = inc_number + 1\n # get preprocessed comment\n comment_spaces, comment_no_stopwords = preprocess_comment(comment['comment'])\n # get sentiment score from comment\n sentiment_score = get_sentiment(comment_no_stopwords)\n # update collection with comments\n collection.update_one(\n {\n 'Codename': account_name,\n 'Posts.URL': post_url\n },\n {\n '$push': {\n 'Posts.$.All Comments': {'comment_id': inc_number,\n 'user': comment['user'],\n 'comment': comment['comment'],\n 'comment_no_stopwords': comment_no_stopwords,\n 'comment_spaces': comment_spaces,\n 'like': comment['like'],\n 'sentiment_score': sentiment_score\n }\n }\n }\n )",
"def issue_reactions(self):\n if self.issue_nums:\n repo = self.g.get_repo(\"apache/airflow\")\n for num in self.issue_nums:\n try:\n issue = repo.get_issue(num)\n except UnknownObjectException:\n continue\n for reaction in issue.get_reactions():\n self._users.add(reaction.user.login)\n self.num_issue_reactions += 1\n for issue_comment in issue.get_comments():\n self.num_issue_comments += 1\n self._users.add(issue_comment.user.login)\n if issue_comment.body is not None:\n self.len_issue_comments += len(issue_comment.body)",
"def addcomment(accountable, body):\n\n r = accountable.issue_add_comment(body)\n headers = sorted(['author_name', 'body', 'updated'])\n rows = [[v for k, v in sorted(r.items()) if k in headers]]\n rows.insert(0, headers)\n print_table(SingleTable(rows))",
"def add_comment_to_issue(repo, issue_number, body, allow_duplicates):\n found = False\n issue = repo.issue(issue_number)\n\n if not allow_duplicates:\n for comment in issue.iter_comments():\n if comment.body == body:\n found = True\n break\n\n if allow_duplicates or not found:\n success = issue.create_comment(body)\n if success:\n click.echo(\"The comment was successfully posted to the issue.\")\n else:\n click.echo(\"There was a failure commenting on the issue.\")\n raise SystemExit(1)\n else:\n click.echo(\"An identical comment was found, skipping posting comment.\")",
"def __post_ticket_comment(ticket_id, user_id, comments, token):\n\n user = user_profile(user_id)\n display_name = user['user']['profile']['real_name']\n rt_api.ticket_comment(ticket_id, comments + \"\\n\\n- \" + display_name, True, token=token)",
"def delete_all_my_issues(username):\n # global username\n for issue_id in get_issue_list_by_user_id(username):\n requests.request(\"DELETE\", baseUrl+api_url+\"/issue/\"+issue_id, headers=headers, auth=(username, password))\n # print(issue_id)\n logger.debug(\"deleting issue with ID# {}\".format(issue_id))",
"def comments(self, comments):\n\n self.container['comments'] = comments",
"def comments(self):\r\n return IssueComments(self)",
"def comments(self):\r\n return IssueComments(self)",
"def save_commit_comments(self, user, path=None):\n # Redis has an end_cursor if we've collected this data before\n end_cursor = self.redis.get(''.join(['gh:', user.login, ':commitComments:endCursor']))\n if end_cursor:\n end_cursor = end_cursor.decode('utf-8')\n end_cursor = ''.join(['\"', end_cursor, '\"'])\n commit_comments = u.commitComments(first=100, after=end_cursor)\n else:\n commit_comments = u.commitComments(first=100)\n if not commit_comments: # False when errors occured (check log file)\n return False\n while True:\n if commit_comments['data']['user']['commitComments']['edges']:\n index = ''.join(['gh_commit_comments-', self.timestamp])\n self._write_to_datastore(index=index,\n doc_type='GithubCommitComments',\n document=commit_comments,\n login=user.login,\n path=path)\n has_next_page = commit_comments['data']['user']['commitComments']['pageInfo']['hasNextPage']\n end_cursor = commit_comments['data']['user']['commitComments']['pageInfo']['endCursor']\n if has_next_page:\n commit_comments = u.commitComments(first=100, after=end_cursor)\n else:\n # Cache the end_cursor where we last collected data\n self.redis.set(''.join(['gh:', u.login, ':commitComments:endCursor']), end_cursor)\n break\n else:\n break\n\n return True",
"def issues(db):\n db.session.query(Issue).delete()\n\n issues = [\n {\n 'label': 'login',\n 'email': '[email protected]',\n 'question': '42.',\n 'status': 'unread'\n },\n {\n 'label': 'login',\n 'email': '[email protected]',\n 'question': 'Hello.',\n 'status': 'unread'\n }\n ]\n\n for issue in issues:\n db.session.add(Issue(**issue))\n\n db.session.commit()\n\n return db",
"def save_action_comments(n_clicks, derived_viewport_data):\n if n_clicks > 0:\n records = [\n {\n \"PotentialDealID\": record[\"PotentialDealID\"],\n \"Action\": record[\"Action\"],\n \"Comment\": record[\"Comment\"]\n }\n for record in derived_viewport_data\n ]\n DBApi.get_instance().save_actions_comments(records=records)\n return [\"Data saved successfully\", True, 5000]\n return [\"\", False, 5000]",
"def issues_comments_list(self, mar, request):\n issue = self._services.issue.GetIssueByLocalID(\n mar.cnxn, mar.project_id, request.issueId)\n comments = self._services.issue.GetCommentsForIssue(\n mar.cnxn, issue.issue_id)\n comments = [comment for comment in comments if not comment.approval_id]\n visible_comments = []\n for comment in comments[\n request.startIndex:(request.startIndex + request.maxResults)]:\n visible_comments.append(\n api_pb2_v1_helpers.convert_comment(\n issue, comment, mar, self._services, mar.granted_perms))\n\n return api_pb2_v1.IssuesCommentsListResponse(\n kind='monorail#issueCommentList',\n totalResults=len(comments),\n items=visible_comments)",
"def save_historical_submission_comments(list_of_dictionary_submissions, file_name):\n all_comments_list = []\n submission_count = 1\n\n for submission_dict in list_of_dictionary_submissions:\n print('saving comments from submission', submission_count, '/', len(list_of_dictionary_submissions))\n submission_count += 1\n submission = (REDDIT.submission(id=submission_dict['id']))\n\n submission.comments.replace_more(limit=None)\n for comment in submission.comments.list():\n temp_dict = {'body': comment.body, 'comment_id': comment, 'author': comment.author,\n 'created_utc': comment.created_utc, 'permalink': comment.permalink,\n 'link_id': comment.link_id, 'score': comment.score}\n all_comments_list.append(temp_dict)\n print('total comments: ', len(all_comments_list))\n\n comments_df = pd.DataFrame(all_comments_list, columns=['body', 'comment_id', 'author', 'created_utc',\n 'permalink', 'link_id', 'score'])\n\n print(comments_df)\n\n print('saving comments to file:', file_name, '...')\n comments_df.to_csv(file_name)\n print('done.')",
"def update_comments(self):\n self.nb_comments = self.comments.count()\n self.save()",
"def process_comments(session, comments):\n for c in tqdm(comments, desc=\"Injecting comments into DB\"):\n db_comment = session.query(Comment).get(c['id'])\n if db_comment:\n db_comment.update(session, **c)\n else:\n Comment.create(session, **c)",
"def _get_comments(self, issue_id):\n data = self._get(\"/issues/{}/comments\".format(issue_id))\n comments = []\n for item in data:\n comments.append(\n Comment(item['user']['login'], item['body'])\n )\n return comments",
"def comments(self, comments):\n\n self._comments = comments",
"def comments(self, comments):\n\n self._comments = comments",
"def comments(self, comments):\n\n self._comments = comments"
]
| [
"0.5847415",
"0.56875473",
"0.55866",
"0.55214906",
"0.5442948",
"0.53258395",
"0.53073525",
"0.5284097",
"0.5237606",
"0.5235629",
"0.522124",
"0.5177813",
"0.5167031",
"0.51373035",
"0.5129606",
"0.5121784",
"0.51109034",
"0.51038647",
"0.51038647",
"0.5060211",
"0.50482506",
"0.503917",
"0.5001175",
"0.49983707",
"0.49038514",
"0.49020025",
"0.48999017",
"0.48820844",
"0.48820844",
"0.48820844"
]
| 0.6056529 | 0 |
Saves a list of issues associated with this user. | def save_issues(self, user, path=None):
# Redis has an end_cursor if we've collected this data before
last_run = self.redis.get('ghc_last_run').decode('utf-8')
if last_run is None:
last_run = '2004-01-01' # pull everything
end_cursor = self.redis.get(''.join(['gh:', user.login, ':issues:endCursor']))
if end_cursor:
end_cursor = end_cursor.decode('utf-8')
end_cursor = ''.join(['"', end_cursor, '"'])
issues = u.issues(first=100,
after=end_cursor,
orderBy='{direction: DESC, field: CREATED_AT}')
else:
issues = u.issues(first=100,
orderBy='{direction: DESC, field: CREATED_AT}')
if not issues:
return False
while True:
if issues['data']['user']['issues']['edges']:
index = ''.join(['gh_issues-', self.timestamp])
self._write_to_datastore(index=index,
doc_type='GithubIssues',
document=issues,
login=user.login,
path=path)
has_next_page = issues['data']['user']['issues']['pageInfo']['hasNextPage']
end_cursor = issues['data']['user']['issues']['pageInfo']['endCursor']
if has_next_page:
issues = u.issues(first=100,
after=end_cursor,
orderBy='{direction: DESC, field: CREATED_AT}',
filterBy='{ since: "'+last_run+'" }')
else:
# Cache the end_cursor where we last collected data
self.redis.set(''.join(['gh:', u.login, ':issues:endCursor']), end_cursor)
break
else:
break
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def issues(db):\n db.session.query(Issue).delete()\n\n issues = [\n {\n 'label': 'login',\n 'email': '[email protected]',\n 'question': '42.',\n 'status': 'unread'\n },\n {\n 'label': 'login',\n 'email': '[email protected]',\n 'question': 'Hello.',\n 'status': 'unread'\n }\n ]\n\n for issue in issues:\n db.session.add(Issue(**issue))\n\n db.session.commit()\n\n return db",
"def issues_insert(self, mar, request):\n if not mar.perms.CanUsePerm(\n permissions.CREATE_ISSUE, mar.auth.effective_ids, mar.project, []):\n raise permissions.PermissionException(\n 'The requester %s is not allowed to create issues for project %s.' %\n (mar.auth.email, mar.project_name))\n\n with work_env.WorkEnv(mar, self._services) as we:\n owner_id = None\n if request.owner and request.owner.name:\n try:\n owner_id = self._services.user.LookupUserID(\n mar.cnxn, request.owner.name)\n except exceptions.NoSuchUserException:\n raise endpoints.BadRequestException(\n 'The specified owner %s does not exist.' % request.owner.name)\n\n cc_ids = []\n request.cc = [cc for cc in request.cc if cc]\n if request.cc:\n cc_ids = list(self._services.user.LookupUserIDs(\n mar.cnxn, [ap.name for ap in request.cc],\n autocreate=True).values())\n comp_ids = api_pb2_v1_helpers.convert_component_ids(\n mar.config, request.components)\n fields_add, _, _, fields_labels, _ = (\n api_pb2_v1_helpers.convert_field_values(\n request.fieldValues, mar, self._services))\n field_helpers.ValidateCustomFields(\n mar, self._services, fields_add, mar.config, mar.errors)\n if mar.errors.AnyErrors():\n raise endpoints.BadRequestException(\n 'Invalid field values: %s' % mar.errors.custom_fields)\n\n logging.info('request.author is %r', request.author)\n reporter_id, timestamp = self.parse_imported_reporter(mar, request)\n new_issue, _ = we.CreateIssue(\n mar.project_id, request.summary, request.status, owner_id,\n cc_ids, request.labels + fields_labels, fields_add,\n comp_ids, request.description,\n blocked_on=api_pb2_v1_helpers.convert_issueref_pbs(\n request.blockedOn, mar, self._services),\n blocking=api_pb2_v1_helpers.convert_issueref_pbs(\n request.blocking, mar, self._services),\n reporter_id=reporter_id, timestamp=timestamp,\n send_email=request.sendEmail)\n we.StarIssue(new_issue, True)\n\n return api_pb2_v1_helpers.convert_issue(\n api_pb2_v1.IssuesGetInsertResponse, new_issue, mar, self._services)",
"def issues(self):\n if self.pull_request.body is not None:\n regex = r\"(?<=closes: #|elated: #)\\d{5}\"\n issue_strs = re.findall(regex, self.pull_request.body)\n self.issue_nums = [eval(s) for s in issue_strs]",
"def _delete_all_issues(self):\n log.info(\"Github API does not currently support deleting issues. Sorry!\")\n return\n data = self._get(\"/issues\")\n if not data:\n log.info(\"No issues found!\")\n return\n for item in data:\n resp = self._delete(\n self._base + \"/issues/{}\".format(item['number']))\n if resp.status_code == 204:\n log.resp(\"204: Deleted issue \" + item['title'])\n else:\n log.resp(\"{}: {}\".format(resp.status_code, resp.text))",
"def save_issue(self, item):\n logger.debug(\"saving the issues\")\n issue = Issue(id=item['title'])\n issue.title = item['title']\n if 'subtitle' in item:\n if any(word in item['subtitle'] for word in [\"variant\", \"Variant\"]):\n issue.key = ndb.Key(Issue, item['title'] + \" variant\")\n logger.debug(\"found variant, new issue id is \" + item['title'] + \" variant\")\n issue.subtitle = item['subtitle']\n\n if 'series' in item:\n series = Series(id=item['series'].rstrip('1234567890 '), title=item['series'].rstrip('1234567890 '))\n series.put()\n issue.series = series.key\n\n if 'reprint' in item:\n issue.reprint = item['reprint']\n\n if 'url' in item:\n issue.url = item['url']\n else:\n issue.url = \"#\"\n\n if 'summary' in item:\n issue.summary = item['summary']\n\n if 'date' in item:\n issue.date = item['date']\n\n if 'price' in item:\n issue.price = item['price']\n\n if \"placeholder/default/no-photo\" in item['image']:\n issue.image = item['image']\n else:\n issue.image = item['image'].replace('small_image/200x', 'image')\n\n issue.put_async()\n logger.debug(\"issue \" + issue.title + \" saved\")",
"def do_the_issues(user_id, repo_id):\n with tempfile.TemporaryDirectory() as tmp:\n path = os.path.join(tmp, \"{}_{}_issues.txt\".format(repo_id, user_id))\n issues_initial_url = get_initial_url_issues(user_id, repo_id)\n resp_obj = requests.get(issues_initial_url, headers=headers)\n # prase the initial request. for Issue\n all_issues = json.loads(resp_obj.text)\n with open(path, \"w\") as out_stream:\n for an_issue in all_issues:\n print(an_issue, file=out_stream)\n print(\"the len of resp is {}\".format(len(all_issues)))\n LINK_HEADER = \"Link\"\n next_url = None\n if LINK_HEADER in resp_obj.headers:\n # parse next page (if present)\n next_url = parse_next_url(resp_obj.headers[LINK_HEADER])\n # subsequent page\n while next_url:\n resp_obj = requests.get(next_url, headers=headers)\n all_issues = json.loads(resp_obj.text)\n with open(path, \"a\") as out_stream:\n for an_issue in all_issues:\n print(an_issue, file=out_stream)\n if LINK_HEADER in resp_obj.headers:\n next_url = parse_next_url(resp_obj.headers[LINK_HEADER])\n print(next_url)\n else:\n next_url = None\n GsUpload.upload_blob(GS_BUCKET_NAME, path, basename(path))\n print(\"the issues path is \" + str(path))",
"def save_many(self, reports: List[Report]):\n for report in reports:\n self.save(report)",
"def sync_issue_status_server(erpnext_support_user, erpnext_support_issues, bench_site):\n\tauthenticate_erpnext_support_user(erpnext_support_user)\n\n\tissues = {}\n\terpnext_support_issues = json.loads(erpnext_support_issues)\n\n\tfor erpnext_support_issue in erpnext_support_issues:\n\t\tfilters = {\n\t\t\t'name': erpnext_support_issue.get('associated_issue'),\n\t\t\t'client_issue_id': erpnext_support_issue.get('name'),\n\t\t\t'bench_site': bench_site\n\t\t}\n\n\t\tissue_status = frappe.db.get_value(\"Issue\", filters, \"status\")\n\t\tif issue_status not in ['Open', 'Closed']:\n\t\t\tissue_status = 'Open'\n\n\t\tissues[erpnext_support_issue.get('name')] = [{\"status\": issue_status}]\n\n\tissues = json.dumps(issues)\n\treturn issues",
"async def issues(self, ctx):\n await ctx.message.delete()\n await ctx.send(\"Issue tracker: https://github.com/TheSuperGamer20578/Sudan-bot/issues\")",
"def add_issues(self, mantis_id=None, add_after=False, database=None,\n pause_count=None, pause_seconds=None, report_progress=None,\n verbose=False):\n issues = []\n for inum in self.__project_issue_numbers:\n if mantis_id is not None:\n if not add_after and inum >= mantis_id:\n # we've added all issues before 'mantis_id', exit the loop\n break\n if add_after and inum <= mantis_id:\n # we haven't started adding yet, keep looking\n continue\n\n if inum in self.__mantis2github:\n # issue has been added, continue looking\n continue\n\n if inum not in self.__all_issues:\n if inum not in self.__missing:\n # add to list of missing issues and complain\n self.__missing.append(inum)\n\n if mantis_id is None:\n extra = \"\"\n else:\n extra = \" (before adding #%s)\" % (mantis_id, )\n print(\"ERROR: Cannot add missing issue #%s%s\" %\n (inum, extra), file=sys.stderr)\n\n continue\n\n issues.append(self.__all_issues[inum])\n\n if verbose:\n print(\"\\nOpening %d issues%s\" %\n (len(issues), \"\" if mantis_id is None\n else \" preceeding Mantis #%s\" % (mantis_id, )))\n else:\n # start the Mantis progress on a new line\n print()\n\n # attempt to create all the preceding issues\n for count, issue in enumerate(issues):\n if report_progress is not None:\n report_progress(count, len(issues), \"Mantis\", \"issue\",\n issue.id)\n\n try:\n gh_issue = self.__open_issue(issue, database=database)\n\n if gh_issue is not None:\n if issue.is_closed or (self.__close_resolved and\n issue.is_resolved):\n gh_issue.edit(body=\"No associated GitHub commit\",\n state=\"closed\")\n except KeyboardInterrupt:\n raise\n except:\n print(\"Failed to open & close issue #%s (%d of %d)\" %\n (issue.id, count, len(issues)), file=sys.stderr)\n traceback.print_exc()\n gh_issue = None\n\n # if requested, pause a bit after adding the number of issues\n # specified by 'pause_count'\n if pause_count is not None and pause_seconds is not None and \\\n count > 0 and count % pause_count == 0:\n time.sleep(pause_seconds)",
"def issues(self):\r\n return issues.RepoIssues(self)",
"def issues(self):\r\n return issues.RepoIssues(self)",
"def delete_all_my_issues(username):\n # global username\n for issue_id in get_issue_list_by_user_id(username):\n requests.request(\"DELETE\", baseUrl+api_url+\"/issue/\"+issue_id, headers=headers, auth=(username, password))\n # print(issue_id)\n logger.debug(\"deleting issue with ID# {}\".format(issue_id))",
"def issues(self):\r\n return issues.Issues(self)",
"def issues_list(self, mar, request):\n if request.additionalProject:\n for project_name in request.additionalProject:\n project = self._services.project.GetProjectByName(\n mar.cnxn, project_name)\n if project and not permissions.UserCanViewProject(\n mar.auth.user_pb, mar.auth.effective_ids, project):\n raise permissions.PermissionException(\n 'The user %s has no permission for project %s' %\n (mar.auth.email, project_name))\n url_params = [(name, mar.GetParam(name)) for name in\n framework_helpers.RECOGNIZED_PARAMS]\n # TODO(jrobbins): This should go through work_env.\n pipeline = frontendsearchpipeline.FrontendSearchPipeline(\n mar.cnxn, self._services, mar.auth, [mar.me_user_id], mar.query,\n mar.query_project_names, mar.num, mar.start, url_params, mar.can,\n mar.group_by_spec, mar.sort_spec, mar.warnings, mar.errors,\n mar.use_cached_searches, mar.profiler, display_mode=mar.mode,\n project=mar.project)\n if not mar.errors.AnyErrors():\n pipeline.SearchForIIDs()\n pipeline.MergeAndSortIssues()\n pipeline.Paginate()\n else:\n raise endpoints.BadRequestException(mar.errors.query)\n\n issue_list = [\n api_pb2_v1_helpers.convert_issue(\n api_pb2_v1.IssueWrapper, r, mar, self._services)\n for r in pipeline.visible_results]\n return api_pb2_v1.IssuesListResponse(\n kind='monorail#issueList',\n totalResults=pipeline.total_count,\n items=issue_list)",
"async def get_issues(self) -> [\"AIOGitHubAPIRepositoryIssue\"]:\n _endpoint = f\"/repos/{self.full_name}/issues\"\n\n response = await self.client.get(endpoint=_endpoint)\n return [AIOGitHubAPIRepositoryIssue(self.client, x) for x in response or []]",
"def save(self):\n\n for i in self.bots:\n try:\n i.save()\n except Exception, ex:\n handle_exception()",
"def list_issues(self, chat):\n issues = self.url_handler.get_json_from_url(constants.URL_GITHUB)\n msg = ''\n msg += '\\U0001F4CB Issues List\\n\\n'\n for aux in issues:\n msg += \"[[{}]] - {}\\n\\n\".format(str(aux['number']), aux['title'])\n\n self.url_handler.send_message(msg, chat)",
"def issues(self) -> Iterable[Issue]:\n # Make request\n issues = self.shards_xml(\"issues\")[\"issues\"]\n # Return boxed Issues (may crash badly if authentication failed)\n return [Issue.from_xml(node) for node in issues]",
"def get_sprint_board_issues(self) -> list:\n endpoint_path = f'agile/1.0/sprint/{self.sprint}/issue'\n data = {\n 'jql': 'status=\"In Review\"',\n 'fields': ['assignee', 'status', 'summary'],\n }\n response = self._get(endpoint_path, data)\n return self._parser.filter_out_important_data(response)",
"def save(self):\n\n # We need to set updated, even if it's the same as created,\n # so we have a consistent timestamp to sort issues by.\n self.updated = time.time()\n\n if not hasattr(self, 'id'):\n # IDs are generated from the JSON dump of the\n # issue. This includes the UTC-format timestamp, so \n # they can be considered pretty unique.\n self.created = self.updated \n self.id = get_hash(to_json(self.fields))\n # set the paths now that we have an id\n self._set_paths()\n \n # Make the parent directory if it doesn't exist.\n if not os.path.isdir(self.paths['root']):\n os.mkdir(self.paths['root'])\n # Make the comments dir if it doesn't exist.\n if not os.path.isdir(self.paths['comments']):\n os.mkdir(self.paths['comments'])\n # Save it in the db.\n self.tracker.db.insert(self)\n # Save it.\n return self.to_file(self.paths['issue'])",
"def write_submission_to_db(cls, file_name, obj_list):\n conn = sqlite3.connect('database.db')\n c = conn.cursor()\n query = \"DELETE FROM `submission`;\"\n c.execute(query)\n\n for obj in obj_list:\n params = [obj.send_date, obj.name, obj.grade, obj.github_link, obj.student_id]\n c.execute(\"INSERT INTO submission (send_date, grade, name, github_link, student_id) VALUES (?, ?, ?, ?, ?)\", params)\n conn.commit()\n conn.close()",
"def update_existing_issue(\n self, token: str, customer_id: str, project_id: str, items: list\n ):\n\n # Type guarding\n assert check_argument_types()\n\n # TODO: Make table name an config env variable\n table_name = f\"Projects-{customer_id}\"\n\n # Query items\n key = {\"projectId\": project_id, \"customerId\": customer_id}\n\n # Check if customer and project exist\n logger.info(f\"Checking if project ID or organization ID exists: {key}\")\n response, http_status_code = self._db.read_single_item(\n table_name, key, \"projectId\"\n )\n\n success, fail = [], []\n for item in items:\n\n scope_id = item[\"scopeId\"]\n issues_id = item[\"issueId\"]\n\n # Query item from DynamoDB\n projection_expression = f\"scopes.{scope_id}.issues.{issues_id}\"\n previous_item, _ = self._db.read_single_item(\n table_name, key, projection_expression\n )\n\n if not previous_item:\n continue\n\n previous_item = previous_item[\"scopes\"][scope_id][\"issues\"][issues_id]\n\n # Define DynamoDB expressions & update issue\n logger.info(f\"Updating issue {issues_id}\")\n item[\"lastUpdate\"] = str(date.today())\n update_expression = \"SET {}\".format(\n \", \".join(\n f\"scopes.{scope_id}.issues.{issues_id}.#{k}=:{k}\"\n for k in item.keys()\n )\n )\n expression_attribute_names = {f\"#{k}\": k for k in item.keys()}\n expression_attribute_values = {f\":{k}\": v for k, v in item.items()}\n response, http_status_code = self._db.update_item(\n table_name,\n key,\n update_expression,\n expression_attribute_names,\n expression_attribute_values,\n )\n\n # Log workflow\n message = generate_differences_message(previous_item, item)\n if message:\n workflow = Workflows.update_workflows(\n token, \"Update\", message, project_id, issues_id\n )\n self._db.create_item(f\"Workflows-{customer_id}\", workflow)\n\n if 200 <= http_status_code < 300:\n logger.info(\n f\"Issue {issues_id}'s details successfully updated, {http_status_code}\"\n )\n success.append(issues_id)\n else:\n logger.error(f\"{response}, {http_status_code}\")\n fail.append(issues_id)\n\n # Determine status codes\n\n # Default vavlue\n http_status_code = 200\n\n if len(success) >= 1 and len(fail) == 0:\n http_status_code = 200\n elif len(success) == 0 and len(fail) >= 1:\n http_status_code = 403\n elif len(success) >= 1 and len(fail) >= 1:\n http_status_code = 405\n else:\n http_status_code = 304\n\n return {\"success\": success, \"fail\": fail}, http_status_code",
"def process(self):\n for user in self.repos:\n for repo in self.repos[user]:\n self.process_issues(user, repo)",
"def save_issue(issue: Issue, user: User) -> None:\n user.settings.last_topic = issue",
"def cleanIssues(issues):\n response = []\n for issue in issues:\n response.append({\"title\": issue[\"title\"], \"body\": issue[\"body\"]})\n return response",
"def save_changes(self, objs):\n # Save to the database and possibly write tags.\n for ob in objs:\n if ob._dirty:\n self._log.debug('saving changes to {}', ob)\n ob.try_sync(ui.should_write(), ui.should_move())",
"def test_issue_list_issues(self):\n pass",
"def visit_all_issues_in_list(self, issues):\n for issue in issues:\n self.driver.implicitly_wait(3)\n self.driver.get(issue)\n config_type_text = self.driver.find_element_by_xpath(\"/html/body/b-service-bootstrap/\"\\\n \"app-root/div[7]/div/div/edit-issue-page/b-resolving-issue-references/div[2]/div[1]/\"\\\n \"div[3]/div/div/div[2]/div[2]/div[3]/div/div[1]/div/span/span[6]/span/span/a\").text\n\n source_html = self.driver.page_source\n soup = BeautifulSoup(source_html, \"html.parser\")\n\n advanced_fields = {}\n advanced_fields[\"Issue Id\"] = issue.replace(\"https://b.corp.google.com/issues/\", \"\")\n reporter_tag = soup.find(\"div\", \"bv2-issue-metadata-field-inner \"\\\n \"bv2-issue-metadata-field-reporter\")\n reporter = reporter_tag[\"aria-label\"].replace(\n \" value is \", \"\\n\").split(\"\\n\")\n advanced_fields[reporter[0]] = reporter[1]\n assignee_tag = soup.find(\"div\", \"bv2-issue-metadata-field-inner bv2-issue-metadata-\"\\\n \"field-assignee\")\n assignee = assignee_tag[\"aria-label\"].replace(\n \" value is \", \"\\n\").split(\"\\n\")\n if assignee[1] != \"empty\":\n advanced_fields[assignee[0]] = assignee[1]\n\n if \"EnqueueRule\" in config_type_text:\n config_type = \"EnqueueRules\"\n elif \"RoutingTargets\" in config_type_text:\n config_type = \"RoutingTargets\"\n elif \"QueueInfo\" in config_type_text:\n config_type = \"QueueInfo\"\n\n advanced_fields[\"Config Type\"] = config_type\n\n if config_type == \"QueueInfo\":\n if assignee[1] != constants.AUTOMATION_USER:\n continue\n\n self.scrape_queue_info(advanced_fields)\n elif config_type == \"RoutingTargets\":\n if assignee[1] != constants.AUTOMATION_USER:\n continue\n self.scrape_routing_targets(advanced_fields)\n elif config_type == \"EnqueueRules\":\n self._message_parsing_util.parse_page(soup, reporter[1], issue)",
"def delete_existing_issues(\n self, token: str, customer_id: str, project_id: str, issues: list\n ):\n\n # Type guarding\n assert check_argument_types()\n\n # TODO: Make table name an config env variable\n table_name = f\"Projects-{customer_id}\"\n\n # Query items\n key = {\"customerId\": customer_id, \"projectId\": project_id}\n\n # Check if customer and project exist\n logger.info(f\"Checking if project ID or organization ID exists: {key}\")\n self._db.read_single_item(table_name, key, \"projectId\")\n\n # DynamoDB expression & delete\n logger.info(f\"Deleting project issues {issues}\")\n update_expression = \"REMOVE {}\".format(\n \", \".join([f\"issues.{k}\" for k in issues])\n )\n self._db.update_item(\n table_name=table_name,\n key=key,\n update_expression=update_expression,\n return_values=\"UPDATED_NEW\",\n )\n\n # Log workflow\n for issue_id in issues:\n message = f\"deleted issue {issue_id}\"\n workflow = Workflows.update_workflows(\n token, \"Delete\", message, project_id, issue_id\n )\n self._db.create_item(f\"Workflows-{customer_id}\", workflow)\n\n logger.info(\"Project issues deleted successfully\")\n return \"Project issues deleted successfully\", 200"
]
| [
"0.58301854",
"0.54856616",
"0.51859164",
"0.5134195",
"0.509845",
"0.50796527",
"0.5029606",
"0.49997592",
"0.49965605",
"0.49530712",
"0.49469295",
"0.49469295",
"0.49100602",
"0.48562244",
"0.48497698",
"0.4818374",
"0.48037708",
"0.47605178",
"0.47588396",
"0.47507063",
"0.47188488",
"0.4707179",
"0.470524",
"0.46649963",
"0.45915323",
"0.45861143",
"0.4575376",
"0.45719022",
"0.45699936",
"0.45643377"
]
| 0.58010536 | 1 |
Saves a list of organizations the user belongs to. | def save_organizations(self, user, path=None):
# Redis has an end_cursor if we've collected this data before
end_cursor = self.redis.get(''.join(['gh:', user.login, ':organizations:endCursor']))
if end_cursor:
end_cursor = end_cursor.decode('utf-8')
end_cursor = ''.join(['"', end_cursor, '"'])
organizations = u.organizations(first=100, after=end_cursor)
else:
organizations = u.organizations(first=100)
if not organizations:
return False
while True:
if organizations['data']['user']['organizations']['edges']:
index = ''.join(['gh_organizations-', self.timestamp])
self._write_to_datastore(index=index,
doc_type='GithubOrganizations',
document=organizations,
login=user.login,
path=path)
has_next_page = organizations['data']['user']['organizations']['pageInfo']['hasNextPage']
end_cursor = organizations['data']['user']['organizations']['pageInfo']['endCursor']
if has_next_page:
organizations = u.organizations(first=100, after=end_cursor)
else:
# Cache the end_cursor where we last collected data
self.redis.set(''.join(['gh:', u.login, ':organizations:endCursor']), end_cursor)
break
else:
break
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_put_list_replace(self):\n for org in Organization.objects.all():\n OrganizationMembership.objects.create(\n user=self.user, organization=org)\n self.user.save()\n self.story.organizations.add(*list(Organization.objects.filter(organizationtranslation__name__in=(\"Urban Land Conservancy\", \"America Scores Denver\"))))\n self.story.save()\n self.assertEqual(self.story.organizations.count(), 2)\n put_data = [organization.organization_id for organization in\n Organization.objects.filter(organizationtranslation__name__in=(\"Mile High Connects\", \"Piton Foundation\"))]\n self.api_client.client.login(username=self.username, password=self.password)\n uri = '/api/0.1/stories/%s/organizations/' % (self.story.story_id)\n response = self.api_client.put(uri, format='json', data=put_data)\n self.assertHttpAccepted(response)\n self.story = Story.objects.get(story_id=self.story.story_id)\n self.assertEqual(self.story.organizations.count(), 2)\n ids = [organization.organization_id for organization in self.story.organizations.all()]\n self.assertEqual(ids, put_data)",
"def organizations(self):\n self.elements('organizations')",
"def process_organizations(self, organizations):\n self.process_elements(\n organizations,\n self.organization_table,\n self.extract_organization,\n ['organization_data', 'member', 'organization']\n )",
"def test_put_list_new(self):\n for org in Organization.objects.all():\n OrganizationMembership.objects.create(\n user=self.user, organization=org)\n self.user.save()\n self.story.save()\n self.assertEqual(self.story.organizations.count(), 0)\n put_data = [org.organization_id for org in\n Organization.objects.filter(organizationtranslation__name=\"Piton Foundation\")]\n self.api_client.client.login(username=self.username, password=self.password)\n uri = '/api/0.1/stories/%s/organizations/' % (self.story.story_id)\n response = self.api_client.put(uri, format='json', data=put_data)\n self.assertHttpAccepted(response)\n self.story = Story.objects.get(story_id=self.story.story_id)\n self.assertEqual(self.story.organizations.count(), 1)\n ids = [org.organization_id for org in self.story.organizations.all()]\n self.assertEqual(ids, put_data)",
"def sync_org(config, orgs):\n\n logger = logging.getLogger(\"sync-org\")\n\n for org in orgs:\n logger.info(\"Syncing {} organization\".format(org))\n config.get_manager().sync_org(org)",
"def test_putorganizations_item(self):\n pass",
"def test_get_all_for_organization(self):\n org = Organization.create(name='foo', program_id=self.program.uid)\n org.put()\n user = User.create(name='foo', email='[email protected]',\n owned_organizations=[org.uid])\n user.put()\n response = self.testapp.get(\n '/api/organizations/{}/users'.format(org.uid),\n headers=self.login_headers(user),\n )\n response_list = json.loads(response.body)\n self.assertEqual(len(response_list), 1)",
"def test_success_with_all_orgs(self):\n auth_client = self.create_auth_client()\n all_orgs = ListOrgSerializer(Org.objects.all(), many=True)\n response = auth_client.get(self.search_org_api)\n self.assertEqual(response.data, all_orgs.data)",
"def save_committees(event, committees):\n for committee in committees:\n name = committee.name\n organization = Organization.objects.get(id=committee.id)\n entity_type = \"organization\"\n new_committee = EventParticipant(\n name=name,\n event=event,\n organization=organization,\n entity_type=entity_type\n )\n new_committee.save()",
"def organizations(self):\n return self.get('{}/orgs'.format(ApiVersion.A1.value))",
"def generate_test_organizations(self):\n def generate_organizations_for_parent(org_names, parent_name=None):\n item_dict = {}\n for name in org_names:\n if parent_name:\n item_dict['{}_{}'.format(name, parent_name)] = {\n 'name': name,\n 'parent': parent_name\n }\n else:\n item_dict['{}'.format(name)] = {\n 'name': name\n }\n return item_dict\n\n self.os_dict = \\\n generate_organizations_for_parent(\n ['o1', 'o1', 'o2', 'o3', 'o4_del', 'o5_del'])\n\n # generate organizations in database\n self.orgs = self.create_orgs_from_data(self.os_dict)\n\n # generate sub organizations\n self.subs_o1_dict = \\\n generate_organizations_for_parent(\n ['sub1', 'sub2', 'sub3_del', 'sub4_del', 'sub5_del'], 'o1')\n\n self.subs_o2_dict = \\\n generate_organizations_for_parent(\n ['sub1', 'sub2', 'sub3', 'sub4_del', 'sub5_del'], 'o2')\n\n self.subs_o3_dict = \\\n generate_organizations_for_parent(\n ['sub1', 'sub2', 'sub3_del'], 'o3')\n\n # generate sub organizations dictionary\n self.subs_dict = {\n **self.subs_o1_dict,\n **self.subs_o2_dict,\n **self.subs_o3_dict,\n }\n\n # update organizations list with sub_organizations in database\n self.orgs.update(\n self.create_orgs_from_data(self.subs_dict, self.orgs))",
"def create_orgs_from_data(self, data, orgs=None):\n item_dict = {}\n for (item_name, data) in data.items():\n if orgs:\n item_dict[item_name] = Organization(\n name=data['name'],\n parent=orgs.get(data['parent']))\n else:\n item_dict[item_name] = Organization(name=data['name'])\n item_dict[item_name].save()\n return item_dict",
"def export_organizations(self):\n print('\\n=== Exporting all organization data...')\n\n for organization in self.client.organizations:\n print('- Exporting organizations:', organization.name)\n\n json = {\n 'id': self.get_id(organization),\n 'href': organization.href,\n 'name': organization.name,\n 'nameKey': organization.name_key,\n 'description': organization.description,\n 'status': organization.status,\n 'createdAt': organization.created_at.isoformat(),\n 'modifiedAt': organization.modified_at.isoformat(),\n 'customData': self.get_custom_data(organization),\n 'default_account_store_mapping': None,\n 'default_group_store_mapping': None,\n 'account_store_mappings': [],\n }\n\n default_account_store_mapping = organization.default_account_store_mapping\n default_group_store_mapping = organization.default_group_store_mapping\n\n if default_account_store_mapping:\n json['default_account_store_mapping'] = {\n 'id': organization.default_account_store_mapping.href.split('/')[-1],\n 'href': organization.default_account_store_mapping.href,\n 'type': organization.default_account_store_mapping.account_store.__class__.__name__,\n 'name': organization.default_account_store_mapping.account_store.name,\n 'list_index': organization.default_account_store_mapping.list_index,\n }\n\n if default_group_store_mapping:\n json['default_group_store_mapping'] = {\n 'id': organization.default_group_store_mapping.href.split('/')[-1],\n 'href': organization.default_group_store_mapping.href,\n 'type': organization.default_group_store_mapping.account_store.__class__.__name__,\n 'name': organization.default_group_store_mapping.account_store.name,\n 'list_index': organization.default_group_store_mapping.list_index,\n }\n\n for account_store_mapping in organization.account_store_mappings:\n json['account_store_mappings'].append({\n 'id': self.get_id(account_store_mapping),\n 'href': account_store_mapping.href,\n 'account_store': {\n 'type': account_store_mapping.account_store.__class__.__name__,\n 'id': self.get_id(account_store_mapping.account_store),\n 'href': account_store_mapping.account_store.href,\n 'name': account_store_mapping.account_store.name,\n 'description': account_store_mapping.account_store.description,\n 'status': account_store_mapping.account_store.status,\n },\n 'list_index': account_store_mapping.list_index,\n 'is_default_account_store': account_store_mapping.is_default_account_store,\n 'is_default_group_store': account_store_mapping.is_default_group_store,\n })\n\n tenant = self.get_id(organization.tenant)\n self.write('%s/%s/organizations/%s' % (self.location, tenant, json['id']), json)\n\n print('=== Done!\\n')",
"def listOrganizations(self, name='', type=''):\n return self.get_json('/organization', {'name': name, 'type': type})",
"def organizations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"organizations\")",
"def organizations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"organizations\")",
"def organizations(self):\r\n return organizations.Organizations(self)",
"def test_organizations_list(self):\n pass",
"def _org_select(organisations):\n\n T = current.T\n\n response = current.response\n settings = current.deployment_settings\n\n options = {row.id: row.name for row in organisations}\n\n formfields = [Field(\"organisation_id\",\n label = T(\"Organization\"),\n requires = IS_IN_SET(options),\n ),\n ]\n\n # Generate labels (and mark required fields in the process)\n labels = s3_mark_required(formfields)[0]\n response.s3.has_required = False\n\n # Form buttons\n SUBMIT = T(\"Continue\")\n buttons = [INPUT(_type = \"submit\",\n _value = SUBMIT,\n ),\n ]\n\n # Construct the form\n response.form_label_separator = \"\"\n form = SQLFORM.factory(table_name = \"organisation\",\n record = None,\n labels = labels,\n separator = \"\",\n showid = False,\n submit_button = SUBMIT,\n formstyle = settings.get_ui_formstyle(),\n buttons = buttons,\n *formfields)\n\n return form",
"def test_add_organization(self):\n pass",
"def list_all_organizations(ctx):\n pprint(ctx.obj.orgs.get().data)",
"def test_remove_last_from_organization_forbidden(self):\n org = Organization.create(name='foo', program_id=self.program.uid)\n org.put()\n\n user = User.create(name='foo', email='[email protected]', user_type='user',\n owned_organizations=[org.uid])\n user.put()\n\n self.testapp.put_json(\n '/api/users/{}'.format(user.uid),\n {'owned_organizations': []},\n headers=self.login_headers(user),\n )\n\n # not changed in the db\n fetched_user = User.get_by_id(user.uid)\n self.assertEqual(user.user_type, fetched_user.user_type)\n self.assertEqual(user.owned_organizations,\n fetched_user.owned_organizations)",
"async def add_organization(request: Request, data: dict):\n redis = request.app.state.redis\n organizations_obj = orjson.loads(await redis.get_key(\"influxdb_organizations\"))\n organizations_obj[data[\"organization\"]] = data[\"organization_data\"]\n await redis.set_key(\"influxdb_organizations\", orjson.dumps(organizations_obj))\n logger.info(\"Organization %s added\", data['organization'])\n return {\"message\": \"Organization {} added\".format(\n data['organization']\n )}",
"def post(self):\n request_user = User.get_by_id(token_auth.current_user())\n if request_user.role != 1:\n return {\n \"Error\": \"Only admin users can create organisations.\",\n \"SubCode\": \"OnlyAdminAccess\",\n }, 403\n\n try:\n organisation_dto = NewOrganisationDTO(request.get_json())\n if request_user.username not in organisation_dto.managers:\n organisation_dto.managers.append(request_user.username)\n organisation_dto.validate()\n except DataError as e:\n current_app.logger.error(f\"error validating request: {str(e)}\")\n return {\"Error\": str(e), \"SubCode\": \"InvalidData\"}, 400\n\n try:\n org_id = OrganisationService.create_organisation(organisation_dto)\n return {\"organisationId\": org_id}, 201\n except OrganisationServiceError as e:\n return {\"Error\": str(e).split(\"-\")[1], \"SubCode\": str(e).split(\"-\")[0]}, 400",
"def save_user_unions(*args):\n return _ida_hexrays.save_user_unions(*args)",
"def get_organizations_to_delete():\n\n all_organizations = seed.models.Organization.objects.all()\n bad_organizations = [org for org in all_organizations if org.id not in get_core_organizations()]\n return bad_organizations",
"def testCreateOrg(self):\n self.timeline.orgSignup()\n self.data.createProfile()\n self.record.createOrgApp('new_org', self.data.user)\n\n url = '/gci/profile/organization/' + self.gci.key().name()\n create_url = url + '?org_id=new_org'\n response = self.get(create_url)\n self.assertResponseOK(response)\n self.assertOrgProfilePageTemplatesUsed(response)\n \n postdata = {\n 'founder': self.data.user, 'home': self.createDocument().key(),\n 'scope': self.gci, 'irc_channel': 'irc://example.com',\n 'pub_mailing_list': 'http://example.com',\n }\n response, properties = self.modelPost(create_url, GCIOrganization, postdata)\n self.assertResponseRedirect(response, url + '/new_org?validated')\n profile = db.get(self.data.profile.key())\n self.assertEqual(1, len(profile.org_admin_for))",
"def ListOrganizations(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def updateOrgAdmins(request):\n\n return updateRole('gsoc_org_admin')",
"def test_remove_fellow_from_organization_success(self):\n org = Organization.create(name='foo', program_id=self.program.uid)\n org.put()\n user = User.create(name='Admin', email='[email protected]', user_type='user',\n owned_organizations=['Organization_foo'])\n req = User.create(name='Valid Requestor', email='[email protected]',\n user_type='user',\n owned_organizations=['Organization_foo'])\n user.put()\n req.put()\n\n # Successful removal.\n response = self.testapp.put_json(\n '/api/users/{}'.format(user.uid),\n {'owned_organizations': []},\n headers=self.login_headers(req),\n )\n self.assertEqual(json.loads(response.body)['owned_organizations'], [])\n\n # Changed in the db.\n fetched_user = User.get_by_id(user.uid)\n self.assertEqual(fetched_user.owned_organizations, [])\n self.assertEqual(user.user_type, fetched_user.user_type)"
]
| [
"0.59265953",
"0.58648086",
"0.56789476",
"0.56515485",
"0.54156107",
"0.5396966",
"0.53800625",
"0.53793323",
"0.5349588",
"0.53471106",
"0.5318676",
"0.5316024",
"0.5313007",
"0.5301353",
"0.52717745",
"0.52717745",
"0.51998657",
"0.51996726",
"0.5185852",
"0.51782054",
"0.5159896",
"0.51515055",
"0.51426286",
"0.51393527",
"0.51052547",
"0.5071669",
"0.5053148",
"0.5048023",
"0.503966",
"0.5033241"
]
| 0.60336745 | 0 |
Saves a list of repositories this user has pinned to their profile. | def save_pinned_repositories(self, user, path=None):
# Redis has an end_cursor if we've collected this data before
end_cursor = self.redis.get(''.join(['gh:', user.login, ':pinnedRepositories:endCursor']))
if end_cursor:
end_cursor = end_cursor.decode('utf-8')
end_cursor = ''.join(['"', end_cursor, '"'])
pinned_repositories = u.pinnedRepositories(first=100, # usually more like 6, but we want all possible
after=end_cursor,
orderBy='{direction: DESC, field: CREATED_AT}')
else:
pinned_repositories = u.pinnedRepositories(first=100,
orderBy='{direction: DESC, field: CREATED_AT}')
if not pinned_repositories:
return False
while True:
if pinned_repositories['data']['user']['pinnedRepositories']['edges']:
index = ''.join(['gh_pinned_repositories-', self.timestamp])
self._write_to_datastore(index=index,
doc_type='GithubPinnedRepositories',
document=pinned_repositories,
login=user.login,
path=path)
has_next_page = pinned_repositories['data']['user']['pinnedRepositories']['pageInfo']['hasNextPage']
end_cursor = pinned_repositories['data']['user']['pinnedRepositories']['pageInfo']['endCursor']
if has_next_page:
pinned_repositories = u.pinnedRepositories(first=100,
after=end_cursor,
orderBy='{direction: DESC, field: CREATED_AT}')
else:
# Cache the end_cursor where we last collected data
self.redis.set(''.join(['gh:', u.login, ':pinnedRepositories:endCursor']), end_cursor)
break
else:
break
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sync():\n _ownered_project = []\n _tmp_project_list = get_user_repo_list(current_user.username)\n if _tmp_project_list:\n for project in _tmp_project_list:\n _ownered_project.append((project, project))\n # Add upperstream_repo\n upperstream_repo = get_upperstream_repo(project)\n if upperstream_repo is not None:\n _ownered_project.append((upperstream_repo, upperstream_repo + \"(Upperstream of %s)\" % project))\n\n User.objects(username=current_user.username).update_one(set__owned_repo_sync_time=datetime.utcnow())\n\n # mongoDB don't support key value contains '.'\n for i in range(len(_ownered_project)):\n _ownered_project[i] = (_ownered_project[i][0].replace('.', '[dot]'), _ownered_project[i][1])\n User.objects(username=current_user.username).update_one(set__owned_repo=dict(_ownered_project))\n\n flash('Refresh your own GitHub repositories list successfully!', 'success')\n return redirect(url_for('main.load_from_github'))",
"def save_repositories(self, user, path=None):\n # Redis has an end_cursor if we've collected this data before\n end_cursor = self.redis.get(''.join(['gh:', user.login, ':repositories:endCursor']))\n if end_cursor:\n end_cursor = end_cursor.decode('utf-8')\n end_cursor = ''.join(['\"', end_cursor, '\"'])\n repositories = u.repositories(first=100,\n after=end_cursor,\n orderBy='{direction: DESC, field: CREATED_AT}')\n else:\n repositories = u.repositories(first=100,\n orderBy='{direction: DESC, field: CREATED_AT}')\n\n if not repositories:\n return False\n\n while True:\n if repositories['data']['user']['repositories']['edges']:\n index = ''.join(['gh_repositories-', self.timestamp])\n self._write_to_datastore(index=index,\n doc_type='GithubRepositories',\n document=repositories,\n login=user.login,\n path=path)\n has_next_page = repositories['data']['user']['repositories']['pageInfo']['hasNextPage']\n end_cursor = repositories['data']['user']['repositories']['pageInfo']['endCursor']\n if has_next_page:\n repositories = u.repositories(first=100,\n after=end_cursor,\n orderBy='{direction: DESC, field: CREATED_AT}')\n else:\n # Cache the end_cursor where we last collected data\n self.redis.set(''.join(['gh:', u.login, ':repositories:endCursor']), end_cursor)\n break\n else:\n break\n\n return True",
"def save_starred_repositories(self, user, path=None):\n # Redis has an end_cursor if we've collected this data before\n end_cursor = self.redis.get(''.join(['gh:', user.login, ':starredRepositories:endCursor']))\n if end_cursor:\n end_cursor = end_cursor.decode('utf-8')\n end_cursor = ''.join(['\"', end_cursor, '\"'])\n starred_repositories = u.starredRepositories(first=100,\n after=end_cursor,\n orderBy='{direction: DESC, field: STARRED_AT}')\n else:\n starred_repositories = u.starredRepositories(first=100,\n orderBy='{direction: DESC, field: STARRED_AT}')\n\n if not starred_repositories:\n return False\n\n while True:\n try:\n if starred_repositories['data']['user']['starredRepositories']['edges']:\n index = ''.join(['gh_starred_repositories-', self.timestamp])\n self._write_to_datastore(index=index,\n doc_type='GithubStarredRepositories',\n document=starred_repositories,\n login=user.login,\n path=path)\n has_next_page = starred_repositories['data']['user']['starredRepositories']['pageInfo']['hasNextPage']\n end_cursor = starred_repositories['data']['user']['starredRepositories']['pageInfo']['endCursor']\n if has_next_page:\n end_cursor = ''.join(['\"', end_cursor, '\"'])\n starred_repositories = u.starredRepositories(first=100,\n after=end_cursor,\n orderBy='{direction: DESC, field: STARRED_AT}')\n else:\n # Cache the end_cursor where we last collected data\n self.redis.set(''.join(['gh:', u.login, ':starredRepositories:endCursor']), end_cursor)\n break\n else:\n break\n except TypeError as e:\n self.logger.error('GithubStarredRepositories', u.login, e)\n break\n\n return True",
"def save_repositories_contributed_to(self, user, path=None):\n # Redis has an end_cursor if we've collected this data before\n end_cursor = self.redis.get(''.join(['gh:', user.login, ':repositoriesContributedTo:endCursor']))\n if end_cursor:\n end_cursor = end_cursor.decode('utf-8')\n end_cursor = ''.join(['\"', end_cursor, '\"'])\n repositories_contributed_to = u.repositoriesContributedTo(first=100,\n after=end_cursor,\n orderBy='{direction: DESC, field: CREATED_AT}')\n else:\n repositories_contributed_to = u.repositoriesContributedTo(first=100,\n orderBy='{direction: DESC, field: CREATED_AT}')\n\n if not repositories_contributed_to:\n return False\n\n while True:\n if repositories_contributed_to['data']['user']['repositoriesContributedTo']['edges']:\n index = ''.join(['gh_repositories_contributed_to-', self.timestamp])\n self._write_to_datastore(index=index,\n doc_type='GithubRepositoriesContributedTo',\n document=repositories_contributed_to,\n login=user.login,\n path=path)\n has_next_page = repositories_contributed_to['data']['user']['repositoriesContributedTo']['pageInfo']['hasNextPage']\n end_cursor = repositories_contributed_to['data']['user']['repositoriesContributedTo']['pageInfo']['endCursor']\n if has_next_page:\n repositories_contributed_to = u.repositoriesContributedTo(first=100,\n after=end_cursor,\n orderBy='{direction: DESC, field: CREATED_AT}')\n else:\n # Cache the end_cursor where we last collected data\n self.redis.set(''.join(['gh:', u.login, ':repositoriesContributedTo:endCursor']), end_cursor)\n break\n else:\n break\n\n return True",
"def fetch_repos(self):\n for repo in self.json_repos['repos']:\n title = str(repo[\"title\"])\n repo_url = str(repo['repo'])\n self.repos[title] = repo_url",
"def save_users(user_list):\n with open(user_path, \"w\") as outfile:\n json.dump(user_list, outfile)",
"def process(self):\n\n manager_list = []\n\n with Manager() as manager:\n manager_list = manager.list()\n processes = []\n\n for data in self.repos:\n process = Process(\n target=self.get_repos_lists, args=(data, manager_list)\n )\n process.start()\n processes.append(process)\n\n for proc in processes:\n proc.join()\n\n manager_list = list(manager_list)\n\n for element in manager_list:\n domains, ips = self.__separate_domains_from_ip(element)\n Settings.domains.extend(domains)\n Settings.ips.extend(ips)\n\n del domains, ips\n\n Settings.domains = Helpers.List(Settings.domains).format()\n Settings.ips = Helpers.List(Settings.ips).format()\n\n Helpers.Dict(self.repos).to_json(Settings.repositories_file)\n del Settings.repositories\n\n Generate()\n Compress()\n Deploy()\n Clean()",
"def repositories(self, user_name=None):\n user_name = user_name if user_name else self._auth[0]\n data = self._request('GET', 'users', user_name)\n return data.repositories\n #ret_val = []\n #for repository in data.repositories:\n # ret_val.append(repository.name)\n # #print 'repo', repository['name'] # can use as dict or as object\n #return ret_val",
"def update_repos():\n with open(repolist_file, \"r\") as repofile:\n repolist = repofile.readlines()\n for idx in xrange(len(repolist)):\n l = repolist[idx].strip()\n if re.match('^[i]',l):\n repodir = clone_dir + \"/\" + os.path.basename(l)\n git(\"fetch\", \"--all\", cwd = repodir)\n pass",
"def set_pinned_by_index(self, ids, pinned=True):\n\n if isinstance(ids, (int, long)):\n ids = [ids]\n\n ids = [int(id) for id in ids]\n\n update_result = self.update_many({ '$and': [{ pair_data.SEQUENCE: { '$in': ids } }, { pair_data.PROPERTIES + '.' + pair_data.DISABLED: False }] }, \n { '$set': { pair_data.PROPERTIES + '.' + pair_data.PINNED: pinned }})\n\n return update_result.matched_count == update_result.modified_count and update_result.matched_count > 0",
"def save_users(users):\n with open(STORAGE_PATH, \"wb\") as fp:\n pickle.dump(users, fp)",
"def save_users(self):\n\n User.user_list.append(self)",
"def handle_bookmark(user_id, project_id, status):\n user = user_collection.find_one({\"_id\": user_id})\n bookmark_list = user[\"bookmarks\"]\n if status:\n bookmark_list.append(project_id)\n else:\n bookmark_list.remove(project_id)\n user_collection.find_one_and_update(\n {\"_id\": user_id},\n {\n \"$set\": {\n \"bookmarks\": bookmark_list,\n }\n },\n upsert=False,\n )",
"def add_to_repository(user):\n try:\n repository.add(user)\n except (KeyError, DuplicateIndexError):\n raise",
"def save(self):\n settings_path = os.path.join(self.file_path, \"__file_data.json\")\n f = open( settings_path, 'w' )\n f.write( simplejson.dumps( self.fileList ) )\n f.close()\n\n settings_path = os.path.join(self.file_path, \"__user_data.json\")\n f = open( settings_path, 'w' )\n f.write( simplejson.dumps( self.userList ) )\n f.close()",
"def list_repositories(self):\n data = self._get_all_data('/user/repos')\n return [repo['full_name'] for repo in data]",
"def user_repositories(self, host: (str), user: (str)) -> Any:\n return search_api(\"user_repositories\", host, user)",
"def test_pinned_ordering(self):\n # Add test projects.\n add_project(title='Title 1', description='Project 1', pinned=False)\n add_project(title='Title 2', description='Project 2', pinned=True)\n add_project(title='Title 3', description='Project 3', pinned=False)\n add_project(title='Title 4', description='Project 4', pinned=True)\n add_project(title='Title 5', description='Project 5', pinned=False)\n\n # Make sure pinned projects are retrieved before unpinned.\n projects = Project.objects.all()\n for index, project in enumerate(projects):\n if index > 0 and not projects[index-1].pinned and project.pinned:\n self.fail('Unpinned project retrieved before pinned project.')",
"def pinboard(auth_token):\n\n\n existing_links = requests.get('https://api.pinboard.in/v1/posts/all', params={\n 'auth_token': auth_token,\n 'format': 'json',\n }).json()\n existing_links = set(link['href'] for link in existing_links)\n\n entries = get_entries()\n unsynced_entries = [e for e in entries if urlify(e) not in existing_links]\n\n print(f\"{len(unsynced_entries)} entries to sync...\")\n\n def show_item(entry):\n if entry == None:\n return \"\"\n return (entry.title or urlify(entry))[:50]\n\n with click.progressbar(unsynced_entries, item_show_func=show_item, label='Exporting to Pinboard...') as bar:\n for entry in bar:\n url = urlify(entry)\n title = entry.title or url\n\n time.sleep(3)\n\n res = requests.get('https://api.pinboard.in/v1/posts/add', params={\n 'auth_token': auth_token,\n 'url': url,\n 'description': title,\n 'extended': entry.summary,\n 'toread': 'no' if entry.read else 'yes',\n 'replace': 'no',\n 'dt': entry.time_added.isoformat(),\n 'tags': [slugify(tag) for tag in entry.tags],\n 'format': 'json',\n })\n\n result_code = res.json()['result_code']\n if result_code != 'done' and result_code != 'item already exists':\n click.echo(f\"Failed to add {title} with result code '{result_code}'.\", err=True)",
"def clone_repos():\n with open(repolist_file, \"r+\") as repofile:\n repolist = repofile.readlines()\n for idx in range(0,len(repolist)):\n l = repolist[idx].strip()\n if re.match('^[^\\six#]',l):\n # clone repo\n repo = l\n if not git(\"clone\", \"--mirror\", repo, cwd = clone_dir):\n continue\n # mark as cloned\n repo = \"i {0}\\n\".format(repo)\n repolist[idx] = repo\n repofile.seek(0)\n repofile.truncate(0)\n repofile.flush()\n repofile.writelines(repolist)\n pass",
"def push(self):\n origin = self.git_repo.remotes.origin\n origin.push()",
"def save_user_unions(*args):\n return _ida_hexrays.save_user_unions(*args)",
"def save(self):\n\n err = C.git_remote_save(self._remote)\n check_error(err)",
"def save_users(user):\n user.save_user()",
"def list_repos(self):\n return sorted(self.user_con.list_repos())",
"def test_create_repositories_by_username_by_repo_slug_commit_by_node_statuses_build(self):\n pass",
"def updateGrabberListFromGitHub(self):\n url = \"https://raw.githubusercontent.com/swharden/QRSSplus/master/grabbers.csv\"\n headers = {'User-Agent': 'Wget/1.12 (linux-gnu)'}\n req = urllib2.Request(url, headers=headers)\n r = urllib2.urlopen(req, timeout=3)\n raw = r.read()\n raw = raw.split(\"\\n\")\n raw = [x.strip() for x in raw]\n raw = [x for x in raw if len(x)]\n raw = \"\\n\".join(raw)\n f = open(\"grabbers.csv\", 'w')\n f.write(raw)\n f.close()\n self.log(\"Downloaded the latest grabbers.csv\")",
"def save_to_users(self):\n Data.add_data(self.user_data())",
"def saveEditorsList(self, editors):\n for editor in editors:\n ok = editor.saveFile()\n if ok:\n self.setEditorName(editor, editor.getFileName())",
"def _get_repo_list(self, *args, **kwargs): \r\n repo_list = kwargs['repositories'] if kwargs.get('repositories', None) else self.get_list(\r\n api_endpoint=settings.GITHUB_SETTINGS['GITHUB_USER_REPO_API'].format(**kwargs), **kwargs\r\n )\r\n for r in repo_list:\r\n if isinstance(r, dict):\r\n yield r['name']\r\n else:\r\n yield r"
]
| [
"0.5855746",
"0.580509",
"0.5551184",
"0.5105534",
"0.49644792",
"0.4959064",
"0.49568492",
"0.4902213",
"0.48758385",
"0.4837906",
"0.4787995",
"0.47868484",
"0.47789016",
"0.4749357",
"0.47052976",
"0.4705086",
"0.46817786",
"0.46698788",
"0.4640423",
"0.4619886",
"0.46059602",
"0.46015152",
"0.45991313",
"0.45942512",
"0.45662627",
"0.453525",
"0.45330137",
"0.45217612",
"0.45141804",
"0.45002514"
]
| 0.66172725 | 0 |
Saves a list of public keys associated with this user. | def save_public_keys(self, user, path=None):
# Redis has an end_cursor if we've collected this data before
end_cursor = self.redis.get(''.join(['gh:', user.login, ':publicKeys:endCursor']))
if end_cursor:
end_cursor = end_cursor.decode('utf-8')
end_cursor = ''.join(['"', end_cursor, '"'])
public_keys = u.publicKeys(first=100, after=end_cursor)
else:
public_keys = u.publicKeys(first=100)
if not public_keys:
return False
while True:
if public_keys['data']['user']['publicKeys']['edges']:
index = ''.join(['gh_public_keys-', self.timestamp])
self._write_to_datastore(index=index,
doc_type='GithubPublicKeys',
document=public_keys,
login=user.login,
path=path)
has_next_page = public_keys['data']['user']['publicKeys']['pageInfo']['hasNextPage']
end_cursor = public_keys['data']['user']['publicKeys']['pageInfo']['endCursor']
if has_next_page:
public_keys = u.publicKeys(first=100, after=end_cursor)
else:
# Cache the end_cursor where we last collected data
self.redis.set(''.join(['gh:', u.login, ':publicKeys:endCursor']), end_cursor)
break
else:
break
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def save_keys(self, save_path=DEFAULT_KEY_PATH):\n try:\n with open(f'{save_path}/id_elgamal', 'w') as f:\n f.write(self.keys['private'])\n with open(f'{save_path}/id_elgamal.pub', 'w') as f:\n f.write(self.keys['public']['p'] + '\\n')\n f.write(self.keys['public']['g'] + '\\n')\n f.write(self.keys['public']['y'] + '\\n')\n debug_message('Saving complete!')\n return 1\n except Exception:\n debug_message(f'Saving error! ({Exception})')\n return 0",
"def update_keys(user_id):\n\n if not request.json:\n abort(400)\n\n new_pub_keys = request.json[\"public_keys\"]\n\n db_conn = sqlite3.connect(db_path)\n db = db_conn.cursor()\n db_pub_keys = []\n try:\n for row in db.execute(\"SELECT public_key FROM public_keys WHERE username=? AND status=?;\", [user_id, PK_STATUS_OK]):\n db_pub_keys.append(row[0])\n except sqlite3.IntegrityError:\n db_conn.close()\n abort(400)\n\n to_add = []\n to_revoke = []\n\n # Put the keys not present in the database in the list of keys to add\n for new_key in new_pub_keys:\n if(new_key not in db_pub_keys):\n to_add.append((user_id, new_key, PK_STATUS_OK))\n # Put the keys not in the new list in the list of keys to revoke\n for db_key in db_pub_keys:\n if(db_key not in new_pub_keys):\n to_revoke.append((PK_STATUS_REVOKED, user_id, db_key))\n\n try:\n db.executemany('INSERT INTO public_keys (username, public_key, status) VALUES (?,?,?);', to_add)\n db.executemany('UPDATE public_keys SET status=? WHERE username=? AND public_key=?;', to_revoke)\n db_conn.commit()\n db_conn.close()\n except sqlite3.IntegrityError:\n db_conn.close()\n abort(400)\n\n return jsonify({'status':True})",
"def get_public_keys():\n return public_keys",
"def get_keys(self):\n return list(self.public_keys.keys())",
"def upload_public_key():\n log('Adicionando chave publica no servidor', green)\n ssh_file = '~/.ssh/id_rsa.pub'\n target_path = '~/.ssh/uploaded_key.pub'\n put(ssh_file, target_path)\n run('echo `cat ~/.ssh/uploaded_key.pub` >> ~/.ssh/authorized_keys && rm -f ~/.ssh/uploaded_key.pub')",
"def public_keys(self) -> pulumi.Input[Sequence[pulumi.Input['ContainerServiceSshPublicKeyArgs']]]:\n return pulumi.get(self, \"public_keys\")",
"def save(self, save_dir):\n path = os.path.join(save_dir, self.name + \".pem\")\n with open(path, \"wb\") as f:\n f.write(self.public_key)",
"def putPubs(self, key, val):\n if hasattr(key, \"encode\"):\n key = key.encode(\"utf-8\") # convert str to bytes\n if hasattr(val, \"encode\"):\n val = val.encode(\"utf-8\") # convert str to bytes\n return self.putVal(self.pubs, key, val)",
"def _save_keys(self) -> None:\n algorithm = self.algorithm_combobox.currentText()\n filename = AesKeyGenerator(algorithm).save_session_key()\n msg_success(f\"Created keys as {filename}\")",
"def public_keys(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['LinuxProfilePropertiesPublicKeysArgs']]]]:\n return pulumi.get(self, \"public_keys\")",
"def export_pubkeys(keyids, homedir=None, timeout=GPG_TIMEOUT):\n public_key_dict = {}\n for gpg_keyid in keyids:\n public_key = export_pubkey(gpg_keyid, homedir=homedir, timeout=timeout)\n keyid = public_key[\"keyid\"]\n public_key_dict[keyid] = public_key\n\n return public_key_dict",
"def save_users(user_list):\n with open(user_path, \"w\") as outfile:\n json.dump(user_list, outfile)",
"def write_keys(path, keys):\n p_keys = pickle.dumps(keys)\n b_keys = base64.b64encode(p_keys)\n with open(path, \"wb+\") as walletfile:\n walletfile.write(b_keys)",
"def getPublicKeys(self):\n ks = {}\n for filename in os.listdir(self.dataRoot):\n if filename[:9] == 'ssh_host_' and filename[-8:]=='_key.pub':\n try:\n k = keys.Key.fromFile(\n os.path.join(self.dataRoot, filename))\n t = common.getNS(k.blob())[0]\n ks[t] = k\n except Exception as e:\n log.msg('bad public key file %s: %s' % (filename, e))\n return ks",
"def store_public_key(public_key,filename):\n\twith open(str(filename) +'_pub_key.pem','wb') as fin:\n\t\tpem = public_key.public_bytes(\n\t\tencoding=serialization.Encoding.PEM,\n\t\tformat=serialization.PublicFormat.SubjectPublicKeyInfo\n\t\t)\n\t\tfin.write(pem)",
"def post(self):\n user = users.get_current_user()\n if not user or not users.is_current_user_admin():\n self.abort(400)\n key_data = self.request.POST.get('default_public_key').value\n email = self.request.POST.get('email', '')\n key_name = \"default encryption key\"\n key_description = \"This is the default encryption key used when no user encryption key found.\"\n is_default_key = True\n if len(email) > 0:\n is_default_key = False\n user = users.User(email)\n key_name = \"encryption key for %s\" % user.nickname()\n key_description = \"The encryption key used for encrypting data uploaded by %s\" % user.nickname()\n is_success = KeyUtils.save_publickey(key_data, key_name, key_description, is_default_key, user) \n self.response.write({'status' : 'success' if is_success else 'failure'})",
"def save(self, path=None):\n if path is None:\n path = self.path\n try:\n with open(path, 'w') as fd:\n for entry in self:\n fd.write('{}\\n'.format(entry))\n except Exception as e:\n raise SSHKeyError('Error writing {}: {}'.format(path, e))",
"def load_keys(self, keys: List[Union[str, jwcrypto.jwk.JWK]] = None):\n if keys:\n for key in keys:\n if isinstance(key, jwcrypto.jwk.JWK):\n self.jwk_set.add(key)\n logger.info(\"Added {0} key {1}\".format(key.key_type, key.key_id))\n else:\n pem = open(key, \"rb\").read()\n\n jwk_obj = jwcrypto.jwk.JWK.from_pem(pem)\n self.jwk_set.add(jwk_obj)\n logger.info(\"Added {0} key {1}\".format(jwk_obj.key_type, jwk_obj.key_id))",
"def get_public_keys(self):\n return self.control_connection.call('get_agents_publickeys')",
"def get_keys(user_id):\n\n db_conn = sqlite3.connect(db_path)\n db = db_conn.cursor()\n keys = []\n try:\n for row in db.execute(\"SELECT public_key FROM public_keys WHERE username=? AND status=?\", [user_id, PK_STATUS_OK]):\n keys.append({\"public\": row[0]})\n db_conn.close()\n except sqlite3.IntegrityError:\n db_conn.close()\n abort(400)\n if(keys == []):\n abort(404)\n return jsonify({'user':{'username':user_id, 'keys':keys}})",
"def put_keys(set_name, keys, value, send_key):\n\tfor key in keys:\n\t\tlib.write_record(set_name, key, [\"value\"], [value], send_key)",
"def _gpg_keys(self) -> ListKeys:\n return self.gpg.list_keys()",
"def __init__(__self__, *,\n public_keys: pulumi.Input[Sequence[pulumi.Input['ContainerServiceSshPublicKeyArgs']]]):\n pulumi.set(__self__, \"public_keys\", public_keys)",
"def save(self):\n users = User.getall()\n users[self.username] = dict(self)\n return self.db().put(self.udb, users)",
"def save_users(users):\n with open(STORAGE_PATH, \"wb\") as fp:\n pickle.dump(users, fp)",
"def save(self):\n settings_path = os.path.join(self.file_path, \"__file_data.json\")\n f = open( settings_path, 'w' )\n f.write( simplejson.dumps( self.fileList ) )\n f.close()\n\n settings_path = os.path.join(self.file_path, \"__user_data.json\")\n f = open( settings_path, 'w' )\n f.write( simplejson.dumps( self.userList ) )\n f.close()",
"def setPubs(self, key, val):\n if hasattr(key, \"encode\"):\n key = key.encode(\"utf-8\") # convert str to bytes\n if hasattr(val, \"encode\"):\n val = val.encode(\"utf-8\") # convert str to bytes\n return self.setVal(self.pubs, key, val)",
"def send_keys(self, element, keys):\n pass",
"def public_key(self, public_key):\n\n self._public_key = public_key",
"def saveUsersProfiles_(self, plist):\r\n \r\n LogInfo(u\"Saving update profiles with PublicationDate %@\", plist[u\"PublicationDate\"])\r\n if not plist.writeToFile_atomically_(self.userUpdateProfilesPath, False):\r\n LogError(u\"Failed to write %@\", self.userUpdateProfilesPath)"
]
| [
"0.6537619",
"0.6313641",
"0.5977645",
"0.57419777",
"0.5697117",
"0.56217366",
"0.55947566",
"0.55806047",
"0.55734",
"0.55458343",
"0.5535723",
"0.5477158",
"0.5443913",
"0.5440483",
"0.5410733",
"0.54072666",
"0.5363117",
"0.535979",
"0.535871",
"0.5317353",
"0.52932185",
"0.52221304",
"0.51862353",
"0.5182071",
"0.51707524",
"0.5169577",
"0.5156464",
"0.51360214",
"0.5093526",
"0.5092528"
]
| 0.6494126 | 1 |
Saves a list of pull requests associated with this user. | def save_pull_requests(self, user, path=None):
# Redis has an end_cursor if we've collected this data before
end_cursor = self.redis.get(''.join(['gh:', user.login, ':pullRequests:endCursor']))
if end_cursor:
end_cursor = end_cursor.decode('utf-8')
end_cursor = ''.join(['"', end_cursor, '"'])
pull_requests = u.pullRequests(first=100,
after=end_cursor,
orderBy='{direction: DESC, field: CREATED_AT}')
else:
pull_requests = u.pullRequests(first=100,
orderBy='{direction: DESC, field: CREATED_AT}')
if not pull_requests:
return False
while True:
if pull_requests['data']['user']['pullRequests']['edges']:
index = ''.join(['gh_pull_requests-', self.timestamp])
self._write_to_datastore(index=index,
doc_type='GithubPullRequests',
document=pull_requests,
login=user.login,
path=path)
has_next_page = pull_requests['data']['user']['pullRequests']['pageInfo']['hasNextPage']
end_cursor = pull_requests['data']['user']['pullRequests']['pageInfo']['endCursor']
if has_next_page:
pull_requests = u.pullRequests(first=100,
after=end_cursor,
orderBy='{direction: DESC, field: CREATED_AT}')
else:
# Cache the end_cursor where we last collected data
self.redis.set(''.join(['gh:', u.login, ':pullRequests:endCursor']), end_cursor)
break
else:
break
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create(self):\n resp = yield self.client.request(\n self.repo.base_path + \"/pulls\", params={\n \"title\": self.title,\n \"head\": self.head,\n \"base\": self.base,\n \"body\": self.body,\n \"maintainer_can_modify\": self.maintainer_can_modify\n },\n method=\"POST\")\n self.c = resp.data\n self.after_sync()\n self.num = self.c[\"number\"]\n raise gen.Return(self)",
"def pull_requests_model(self, entry_info, repo_id):\n github_url = entry_info['given']['github_url']\n\n logging.info('Beginning collection of Pull Requests...\\n')\n logging.info(f'Repo ID: {repo_id}, Git URL: {github_url}\\n')\n record_model_process(self, repo_id, 'pull_requests')\n\n owner, repo = self.get_owner_repo(github_url)\n\n url = (f'https://api.github.com/repos/{owner}/{repo}/pulls?state=all&' +\n 'direction=asc&per_page=100&page={}')\n\n # Get pull requests that we already have stored\n # Set pseudo key (something other than PK) to \n # check dupicates with\n table = 'pull_requests'\n table_pkey = 'pull_request_id'\n update_col_map = {'pr_src_state': 'state'} \n duplicate_col_map = {'pr_src_id': 'id'}\n\n #list to hold pull requests needing insertion\n prs = paginate(self, url, duplicate_col_map, update_col_map, table, table_pkey, \n where_clause='WHERE repo_id = {}'.format(repo_id),\n value_update_col_map={'pr_augur_contributor_id': float('nan')})\n\n # Discover and remove duplicates before we start inserting\n logging.info(\"Count of pull requests needing update or insertion: \" + str(len(prs)) + \"\\n\")\n\n for pr_dict in prs:\n\n pr = {\n 'repo_id': repo_id,\n 'pr_url': pr_dict['url'],\n 'pr_src_id': pr_dict['id'],\n 'pr_src_node_id': None,\n 'pr_html_url': pr_dict['html_url'],\n 'pr_diff_url': pr_dict['diff_url'],\n 'pr_patch_url': pr_dict['patch_url'],\n 'pr_issue_url': pr_dict['issue_url'],\n 'pr_augur_issue_id': None,\n 'pr_src_number': pr_dict['number'],\n 'pr_src_state': pr_dict['state'],\n 'pr_src_locked': pr_dict['locked'],\n 'pr_src_title': pr_dict['title'],\n 'pr_augur_contributor_id': find_id_from_login(self, pr_dict['user']['login']),\n 'pr_body': pr_dict['body'],\n 'pr_created_at': pr_dict['created_at'],\n 'pr_updated_at': pr_dict['updated_at'],\n 'pr_closed_at': pr_dict['closed_at'],\n 'pr_merged_at': pr_dict['merged_at'],\n 'pr_merge_commit_sha': pr_dict['merge_commit_sha'],\n 'pr_teams': None,\n 'pr_milestone': pr_dict['milestone']['title'] if pr_dict['milestone'] else None,\n 'pr_commits_url': pr_dict['commits_url'],\n 'pr_review_comments_url': pr_dict['review_comments_url'],\n 'pr_review_comment_url': pr_dict['review_comment_url'],\n 'pr_comments_url': pr_dict['comments_url'],\n 'pr_statuses_url': pr_dict['statuses_url'],\n 'pr_meta_head_id': None,\n 'pr_meta_base_id': None,\n 'pr_src_issue_url': pr_dict['issue_url'],\n 'pr_src_comments_url': pr_dict['comments_url'], # NOTE: this seems redundant\n 'pr_src_review_comments_url': pr_dict['review_comments_url'], # this too\n 'pr_src_commits_url': pr_dict['commits_url'], # this one also seems redundant\n 'pr_src_statuses_url': pr_dict['statuses_url'],\n 'pr_src_author_association': pr_dict['author_association'],\n 'tool_source': self.tool_source,\n 'tool_version': self.tool_version,\n 'data_source': 'GitHub API'\n }\n\n if pr_dict['flag'] == 'need_insertion':\n logging.info(f'PR {pr_dict[\"id\"]} needs to be inserted\\n')\n\n result = self.db.execute(self.pull_requests_table.insert().values(pr))\n logging.info(f\"Added Pull Request: {result.inserted_primary_key}\")\n self.pr_id_inc = int(result.inserted_primary_key[0])\n\n elif pr_dict['flag'] == 'need_update':\n result = self.db.execute(self.pull_requests_table.update().where(\n self.pull_requests_table.c.pr_src_id==pr_dict['id']).values(pr))\n logging.info(\"Updated tuple in the pull_requests table with existing pr_src_id: {}\".format(\n pr_dict['id']))\n self.pr_id_inc = pr_dict['pkey']\n\n else:\n logging.info(\"PR does not need to be inserted. Fetching its id from DB\")\n pr_id_sql = s.sql.text(\"\"\"\n SELECT pull_request_id FROM pull_requests\n WHERE pr_src_id={}\n \"\"\".format(pr_dict['id']))\n\n self.pr_id_inc = int(pd.read_sql(pr_id_sql, self.db).iloc[0]['pull_request_id'])\n\n self.query_labels(pr_dict['labels'], self.pr_id_inc)\n self.query_pr_events(owner, repo, pr_dict['number'], self.pr_id_inc)\n self.query_pr_comments(owner, repo, pr_dict['number'], self.pr_id_inc)\n self.query_reviewers(pr_dict['requested_reviewers'], self.pr_id_inc)\n self.query_pr_meta(pr_dict['head'], pr_dict['base'], self.pr_id_inc)\n\n logging.info(f\"Inserted PR data for {owner}/{repo}\")\n self.results_counter += 1\n\n register_task_completion(self, entry_info, repo_id, 'pull_requests')",
"def get_pull_requests():\n pull_requests = []\n url_base = f\"https://github.com/{GITHUB_OWNER}/{GITHUB_REPO}/pull/\"\n repo = GITHUB.get_user(GITHUB_OWNER).get_repo(GITHUB_REPO)\n pulls = repo.get_pulls(base=\"main\", state=\"closed\")\n last_release_date = repo.get_latest_release().published_at\n for pull in pulls:\n if not pull.draft and pull.closed_at > last_release_date and pull.merged:\n log_line = f\"* {pull.title} [#{pull.number}]({url_base}{pull.number})\"\n pull_requests.append(log_line)\n return pull_requests",
"def do_the_pulls(user_id, repo_id):\n with tempfile.TemporaryDirectory() as tmp_dir:\n path = os.path.join(tmp_dir, \"{}_{}_pulls.txt\".format(repo_id, user_id)\n )\n\n # the first request for pull\n the_url = get_initial_url_pulls(user_id, repo_id)\n resp_obj = requests.get(the_url, headers=headers)\n pull_requests = json.loads(resp_obj.text)\n with open(path, \"w\") as out_stream:\n for a_pull_request in pull_requests:\n print(a_pull_request, file=out_stream)\n\n # prase the initial request.\n rsp_json = json.loads(resp_obj.text)\n print(\"the len of resp is {}\".format(len(rsp_json)))\n next_url = None\n if LINK_HEADER in resp_obj.headers:\n next_url = parse_next_url(resp_obj.headers[LINK_HEADER])\n\n # subsequent requests for pull\n while next_url:\n resp_obj = requests.get(next_url, headers=headers)\n pull_requests = json.loads(resp_obj.text)\n with open(path, \"a\") as out_stream:\n for a_pull_request in pull_requests:\n print(a_pull_request, file=out_stream)\n if LINK_HEADER in resp_obj.headers:\n next_url = parse_next_url(resp_obj.headers[LINK_HEADER])\n print(next_url)\n else:\n next_url = None\n GsUpload.upload_blob(GS_BUCKET_NAME, path, basename(path))",
"def create_pull_requests(self, repos, key, msrp, summary, cred_hash, qa_title):\n response = {'status': True, 'data': []}\n\n for repo in repos:\n repo_name = repo['repositoryName']\n reviewed_branch = repo['reviewedBranch']\n base_branch = repo['baseBranch']\n\n json_data = {\n \"title\": qa_title,\n \"description\": summary,\n \"state\": \"OPEN\",\n \"open\": True,\n \"closed\": False,\n \"fromRef\": {\n \"id\": f\"refs/heads/{reviewed_branch}\",\n \"repository\": {\n \"slug\": repo_name,\n \"name\": None,\n \"project\": {\n \"key\": self.code_cloud_api.project_name\n }\n }\n },\n \"toRef\": {\n \"id\": f\"refs/heads/{base_branch}\",\n \"repository\": {\n \"slug\": repo_name,\n \"name\": None,\n \"project\": {\n \"key\": self.code_cloud_api.project_name\n }\n }\n },\n \"locked\": False,\n \"reviewers\": [],\n \"links\": {\"self\":[None]}\n }\n\n url = f'{self.code_cloud_api.branch_api}/{repo_name}/pull-requests'\n pull_response = self.code_cloud_api.post_json(\n url=url, \n json_data=json_data, \n cred_hash=cred_hash\n )\n\n if not pull_response['status']:\n response['data'].append({\n 'error': pull_response['data']['errors'][0]['message'],\n 'repo': repo_name\n })\n else:\n response['data'].append({\n 'link': pull_response['data']['links']['self'][0]['href'],\n 'repo': repo_name\n })\n\n return response",
"def pullrequests(self):\r\n return pullrequests.PullRequests(self)",
"async def run(self) -> list:\n issues = self.adapter.get_sprint_board_issues()\n pull_requests = await self.adapter.get_pull_requests(issues)\n return pull_requests",
"def post(self):\n parser = reqparse.RequestParser()\n for arg in self.REQUIRED_ARGUMENTS:\n parser.add_argument(arg)\n parser.add_argument('token') # token is also required but it is checking separately\n args = parser.parse_args()\n\n if not args.get('token'):\n return {'message': 'Missing authorization token'}, 401\n\n if all(args.get(key) for key in self.REQUIRED_ARGUMENTS):\n repo_owner, repo_name = args['repository'].split('/') # full repository name format : 'owner/repo_name'\n\n post_data = {\"title\": args.get('title'),\n \"body\": args.get('body') or \"This is a pull request.\",\n \"head\": '{}:{}'.format(repo_owner, args['changeset']),\n # Regards to Github API documentation, changeset is the branch name\n \"base\": args.get('base')}\n\n headers = {'Authorization': 'Basic ' + args.get('token')}\n r = requests.post(flask.current_app.config['GITHUB_API_CREATE_PULL_REQUEST'].format(owner=repo_owner, repos=repo_name),\n data=json.dumps(post_data), headers=headers)\n\n if r.status_code == 201:\n number = r.json().get('number')\n resp = self._request_reviews(args.get('token'), repo_owner, repo_name, number, args.get('reviewers'))\n\n return resp.json(), r.status_code\n\n return r.json(), r.status_code\n else:\n missing_args = set(self.REQUIRED_ARGUMENTS) - {key for key in args.keys() if args.get(key)}\n return {'message': 'Missing required arguments : ' + ','.join(sorted(list(missing_args)))}, 422",
"def post(self, request, framework):\n self._logger.debug(\"WebhookScoreBot post entered.\")\n\n pr_data = request.data.get(\"pull_request\")\n if not pr_data:\n self._logger.error(f\"Invalid pull request data received: {pr_data}\")\n return response.Response(status=status.HTTP_400_BAD_REQUEST)\n\n pr_url = pr_data.get(\"html_url\")\n if not pr_url:\n self._logger.error(f\"Invalid pull request url received: {pr_data}\")\n return response.Response(status=status.HTTP_400_BAD_REQUEST)\n\n user = pr_data.get(\"user\")\n if user:\n login = user.get(\"login\")\n self._logger.info(f\"Username associated with PR: {login}\")\n\n # Drop excluded repos\n excluded_repos = ScorebotConfig.objects.filter(config=\"excluded_repos\")\n excluded_repos = excluded_repos.values()[0][\"value\"].split(\",\") if excluded_repos else []\n\n try:\n repo = pr_url.split(\"/pull\")[0]\n repo = repo[repo.rfind(\"/\")+1:]\n\n except Exception:\n self._logger.error(f\"Invalid pull request url received: {pr_url}\")\n return response.Response(status=status.HTTP_400_BAD_REQUEST)\n\n if repo in excluded_repos:\n self._logger.info(f\"Repo {repo} found in excluded repos list\")\n return response.Response(status=status.HTTP_200_OK)\n\n # Otherwise queue pull request data for processing\n self._logger.info(f\"Queuing action: {pr_url} for pull request\")\n response_status = self._queue_pull_request_data(pr_url, framework)\n\n return response.Response(status=response_status)",
"def handle_contribution(user_id, project_id, status):\n user = user_collection.find_one({\"_id\": user_id})\n contribution_list = user[\"contributions\"]\n if status:\n contribution_list.append(project_id)\n else:\n contribution_list.remove(project_id)\n user_collection.find_one_and_update(\n {\"_id\": user_id},\n {\n \"$set\": {\n \"contributions\": contribution_list,\n }\n },\n upsert=False,\n )",
"def pull_request_commits_model(self, task_info, repo_id):\n\n # query existing PRs and the respective url we will append the commits url to\n pr_url_sql = s.sql.text(\"\"\"\n SELECT DISTINCT pr_url, pull_requests.pull_request_id\n FROM pull_requests--, pull_request_meta\n WHERE repo_id = {}\n \"\"\".format(repo_id))\n urls = pd.read_sql(pr_url_sql, self.db, params={})\n\n for pull_request in urls.itertuples(): # for each url of PRs we have inserted\n commits_url = pull_request.pr_url + '/commits?page={}'\n table = 'pull_request_commits'\n table_pkey = 'pr_cmt_id'\n duplicate_col_map = {'pr_cmt_sha': 'sha'}\n update_col_map = {}\n\n # Use helper paginate function to iterate the commits url and check for dupes\n pr_commits = paginate(self, commits_url, duplicate_col_map, update_col_map, table, table_pkey, \n where_clause=\"where pull_request_id = {}\".format(pull_request.pull_request_id))\n\n for pr_commit in pr_commits: # post-pagination, iterate results\n if pr_commit['flag'] == 'need_insertion': # if non-dupe\n pr_commit_row = {\n 'pull_request_id': pull_request.pull_request_id,\n 'pr_cmt_sha': pr_commit['sha'],\n 'pr_cmt_node_id': pr_commit['node_id'],\n 'pr_cmt_message': pr_commit['commit']['message'],\n # 'pr_cmt_comments_url': pr_commit['comments_url'],\n 'tool_source': self.tool_source,\n 'tool_version': self.tool_version,\n 'data_source': 'GitHub API',\n }\n result = self.db.execute(self.pull_request_commits_table.insert().values(pr_commit_row))\n logging.info(f\"Inserted Pull Request Commit: {result.inserted_primary_key}\\n\")\n\n register_task_completion(self, task_info, repo_id, 'pull_request_commits')",
"def push_rspecs(host, auth, rspecs):\n for rspec in rspecs:\n description = rspec[\"fields\"][\"description\"]\n click.echo(f\"Pushing {rspec['key']} \", err=True)\n data = {\n \"update\": {\n \"description\": [\n {\n \"set\": description\n }\n ],\n }\n }\n result = requests.put(\n f\"{host}/rest/api/latest/issue/{rspec['key']}\",\n json=data,\n auth=auth\n )\n result.raise_for_status()",
"def get_open_pull_requests(self, required_labels=None):\n json = self.get_all('repos/%(owner)s/%(repo)s/pulls' % {\n 'owner': self.repo_owner,\n 'repo': self.repo_name,\n }, params={\n 'direction': 'asc',\n })\n required_labels = required_labels or []\n\n prs = []\n for pr_data in json:\n # Construct the pull request and issue data structures\n pr_id = pr_data['number']\n issue_obj = self.get_issue(pr_id)\n pr_obj = PullRequest(pr_id, pr_data['title'], pr_data['body'], pr_data['head']['ref'],\n pr_data['html_url'], pr_data['created_at'], pr_data['updated_at'], issue_obj)\n\n # Check if the PR survives the filters\n include_pr = True\n for required_label in required_labels:\n if not pr_obj.has_label(required_label):\n include_pr = False\n break\n if include_pr:\n prs.append(pr_obj)\n\n return prs",
"def get_pulls(self):\n url = self.base_url + 'pulls'\n\n req = requests.get(headers=self.headers, url=url)\n\n return req.json()",
"def update(self):\n resp = yield self.client.request(\n \"{}/pulls/{}\".format(self.repo.base_path, self.num),\n params={\n \"title\": self.title,\n \"body\": self.body,\n \"state\": self.state,\n \"base\": self.base,\n \"maintainer_can_modify\": self.maintainer_can_modify,\n }, method=\"PATCH\")\n self.c = resp.data\n self.after_sync()\n raise gen.Return(self)",
"def write_submission_to_db(cls, file_name, obj_list):\n conn = sqlite3.connect('database.db')\n c = conn.cursor()\n query = \"DELETE FROM `submission`;\"\n c.execute(query)\n\n for obj in obj_list:\n params = [obj.send_date, obj.name, obj.grade, obj.github_link, obj.student_id]\n c.execute(\"INSERT INTO submission (send_date, grade, name, github_link, student_id) VALUES (?, ?, ?, ?, ?)\", params)\n conn.commit()\n conn.close()",
"def post_to_github(report, user=None, pw=None, proxies=None):\n proxies = proxies or dict()\n # Determine authentication method. No username or password search for\n # configuration file with GITHUB section\n if not user and not pw:\n # Find configuration file\n cfg = ConfigParser()\n cfgs = cfg.read(['web.cfg', '.web.cfg',\n os.path.expanduser('~/.web.cfg'),\n 'qs.cfg', '.qs.cfg',\n os.path.expanduser('~/.qs.cfg')])\n if cfgs:\n # Grab login information\n try:\n user = cfg.get('GITHUB', 'user')\n pw = cfg.get('GITHUB', 'pw')\n except (NoOptionError, NoSectionError):\n logger.debug('No GITHUB section in configuration file '\n 'with user and pw entries')\n # Grab proxy information if we will be using web.cfg\n if (user or pw) and not proxies:\n try:\n proxy_name = cfg.get('GITHUB', 'proxy')\n logger.debug(\"Using proxy host %s\", proxy_name)\n proxies = {'https': proxy_name}\n except NoOptionError:\n logger.debug(\"No proxy information found\")\n # No valid configurations\n else:\n logger.debug('No \"web.cfg\" file found')\n # Manually ask if we didn't get the username or password already\n if not user:\n user = input('Github Username: ')\n if not pw:\n pw = getpass.getpass('Password for GitHub Account {}: '\n ''.format(user))\n # Our url to create issues via POST\n url = 'https://api.github.com/repos/pcdshub/Bug-Reports/issues'\n # Create the body of the template\n env = Environment(loader=PackageLoader('hutch_python'),\n trim_blocks=True, lstrip_blocks=True)\n template = env.get_template('issue.template')\n body = template.render(report)\n # Requests session\n session = requests.Session()\n session.auth = (user, pw)\n session.proxies.update(proxies)\n issue = {'title': report['title'],\n 'body': body,\n 'assignee': None,\n 'milestone': None,\n 'labels': []} # TODO: Determine hutch to create issue for\n # Post to GitHub\n r = session.post(url, simplejson.dumps(issue))\n if r.status_code == 201:\n logger.info(\"Succesfully created GitHub issue\")\n else:\n logger.exception(\"Could not create GitHub issue. HTTP Status Code: %s\",\n r.status_code)",
"def post_list(self, request, **kwargs):\n\t\tself.method_check(request, allowed=['post'])\n\t\tself.is_authenticated(request)\n\n\t\tif not request.user or not request.user.is_authenticated():\n\t\t\treturn self.create_response(request, { 'success': False, 'error_message': 'You are not authenticated, %s' % request.user })\n\n\t\tdata = self.deserialize(request, request.raw_post_data, format=request.META.get('CONTENT_TYPE', 'application/json'))\n\n\t\t# Get the slide node that the user is answering before creation of the submission -- this also needs further checks (e.g. can they even submit to this slide yet?)\n\t\ttry:\n\t\t\tslide_node = Slide.objects.get(pk=data.get(\"slide\"))\n\t\texcept ObjectDoesNotExist as e:\n\t\t\treturn self.create_response(request, {\n\t\t\t\t'success': False,\n\t\t\t\t'error': e\n\t\t\t})\n\n\t\t# Ensuring that the user is who s/he says s/he is, handled by user objs. auth.\n\t\ttry:\n\t\t\tuser_node = AppUser.objects.get(username=data.get(\"user\"))\n\t\texcept ObjectDoesNotExist as e:\n\t\t\t# Is it possible this could occur if the user passes authentication?\n\t\t\treturn self.create_response(request, {\n\t\t\t\t'success': False,\n\t\t\t\t'error': e\n\t\t\t})\n\n\t\t# some validation in the manager class of the model\n\t\t# on errors which are not caught in the manager class, the node will still be created because save is called (too?) soon\n\t\t# look into django.db.models.Model save method for saving behaviour on error?!\n\t\tnode = Submission.objects.create(\n\t\t\tvalue = data.get(\"value\"),\n\t\t\tstarted = data.get(\"started\"),\n\t\t\tfinished = data.get(\"finished\")\n\t\t)\n\t\tif node is None :\n\t\t\t# in case an error wasn't already raised \t\t\t\n\t\t\traise ValidationError('Something went wrong with the submission creation.')\n\t\n\t\t# Form the connections from the new Submission node to the existing slide and user nodes\n\t\tnode.slide = slide_node\n\t\tnode.user = user_node\n\n\t\t# create the body\n\t\tbody = json.loads(request.body) if type(request.body) is str else request.body\n\t\t# data = body.clone()\n\n\t\t# Check to see if the user answered the question correctly or not\n\t\tnode.save()\n\n\t\treturn self.create_response(request, body)",
"def save_users(user_list):\n with open(user_path, \"w\") as outfile:\n json.dump(user_list, outfile)",
"def update(ID, **updates):\n # Filter out any None values.\n review_updates = {k:v for k,v in updates.items() if v}\n\n if len(review_updates) > 0:\n data = json_encode(review_updates)\n gh_request('POST', '/repos/:user/:repo/pulls/:id', uri_vars={'id': ID}, body=data)\n printers.print_review_updated()",
"def post_pr_review(owner, repo,\n commit_sha, pull_number,\n message, event='COMMENT'):\n review = {\n 'commit_id': commit_sha,\n 'body': message,\n 'event': event,\n }\n\n res = post(GIT_PULL_REVIEW_URL.format(host=host_api,\n owner=owner,\n repo=repo,\n pull_number=pull_number),\n json=review, auth=auth)\n assert res.status_code == 200, f'Got non 201 status, ' \\\n f'error message: {res.content}'",
"async def remind_about_pull_requests(self, issues: list) -> None:\n author = deepcopy(self.blocks['author'])\n author['elements'][1]['text'] = self.version\n starting_blocks = [\n self.blocks['header'],\n author,\n self.blocks['divider'],\n ]\n message = {'blocks': deepcopy(starting_blocks)}\n tasks = []\n for issue in issues:\n pull_requests = self._create_pull_requests_descriptions(issue['pull_requests'])\n if pull_requests:\n title = deepcopy(self.blocks['title'])\n title['text']['text'] = f':bender: *[{issue[\"key\"]}] {issue[\"title\"]}*'\n message['blocks'].extend([title] + pull_requests + [self.blocks['divider']])\n\n if len(message['blocks']) > 45:\n tasks.append(asyncio.create_task(self.send_message(message)))\n message = {'blocks': deepcopy(starting_blocks)}\n\n if not tasks:\n tasks = [asyncio.create_task(self.send_message(message))]\n\n set_key_in_redis('slack-known-user-ids', self.known_user_ids)\n await asyncio.gather(*tasks)",
"def save_issues(self, user, path=None):\n # Redis has an end_cursor if we've collected this data before\n last_run = self.redis.get('ghc_last_run').decode('utf-8')\n if last_run is None:\n last_run = '2004-01-01' # pull everything\n\n end_cursor = self.redis.get(''.join(['gh:', user.login, ':issues:endCursor']))\n if end_cursor:\n end_cursor = end_cursor.decode('utf-8')\n end_cursor = ''.join(['\"', end_cursor, '\"'])\n issues = u.issues(first=100,\n after=end_cursor,\n orderBy='{direction: DESC, field: CREATED_AT}')\n else:\n issues = u.issues(first=100,\n orderBy='{direction: DESC, field: CREATED_AT}')\n\n if not issues:\n return False\n\n while True:\n if issues['data']['user']['issues']['edges']:\n index = ''.join(['gh_issues-', self.timestamp])\n self._write_to_datastore(index=index,\n doc_type='GithubIssues',\n document=issues,\n login=user.login,\n path=path)\n has_next_page = issues['data']['user']['issues']['pageInfo']['hasNextPage']\n end_cursor = issues['data']['user']['issues']['pageInfo']['endCursor']\n if has_next_page:\n issues = u.issues(first=100,\n after=end_cursor,\n orderBy='{direction: DESC, field: CREATED_AT}',\n filterBy='{ since: \"'+last_run+'\" }')\n else:\n # Cache the end_cursor where we last collected data\n self.redis.set(''.join(['gh:', u.login, ':issues:endCursor']), end_cursor)\n break\n else:\n break\n\n return True",
"def post_list(self, request, **kwargs):\n deserialized = self.deserialize(request, request.raw_post_data, format=request.META.get('CONTENT_TYPE', 'application/json'))\n\n # Force this in an ugly way, at least should do \"reverse\"\n deserialized[\"user\"] = \"/api/v1/user/%s/\" % request.user.id\n bundle = self.build_bundle(data=dict_strip_unicode_keys(deserialized))\n self.is_valid(bundle, request)\n updated_bundle = self.obj_create(bundle, request=request)\n return HttpCreated(location=self.get_resource_uri(updated_bundle))",
"def main(assignee, browse, force, file, message, issue, base, head):\n # Above is copy/pasted from `man hub`\n\n branch_ready, error_msg = current_branch_is_pushed()\n if not branch_ready:\n if force:\n click.echo(\"force-opening not yet supported\")\n else:\n raise Exception(error_msg)\n\n assignment_label = get_assignment_label()\n if assignment_label is None:\n raise Exception(\"No label with the text 'review' and without the text 'self' found\")\n\n if not validate_assignee(assignee):\n raise Exception(\"No assignee named {} found\".format(assignee))\n\n if not message and not file:\n message = get_message()\n\n issue_number = create_pull_request(browse, force, file, message, issue, base, head)\n\n if not label_and_assign(issue_number, assignment_label, assignee):\n raise Exception(\"Failed to mark issue {issue_number} with label {label} and assign {assignee}\".format(\n issue_number=issue_number,\n label=assignment_label,\n assignee=assignee\n ))\n\n click.echo('PR opened!')",
"def submit_sr(cls, user):\r\n sub_ids = cls.user_subreddits(user, False)\r\n srs = Subreddit._byID(sub_ids, True,\r\n return_dict = False)\r\n srs = [s for s in srs if s.can_submit(user) or s.name == g.default_sr]\r\n\r\n # Add the discussion subreddit manually. Need to do this because users\r\n # are not subscribed to it.\r\n try:\r\n discussion_sr = Subreddit._by_name('discussion')\r\n if discussion_sr._id not in sub_ids and discussion_sr.can_submit(user):\r\n srs.insert(0, discussion_sr)\r\n except NotFound:\r\n pass\r\n try:\r\n meetup_sr = Subreddit._by_name('meetups')\r\n if meetup_sr in srs:\r\n srs.remove(meetup_sr)\r\n except NotFound:\r\n pass\r\n\r\n srs.sort(key=lambda a:a.title)\r\n return srs",
"def process_pull_requests(git_client, repo_id, pull, ignore_words, ignore_extensionless_files=True) -> [dict, list, list]:\n processed_changes = {}\n commits = git_client.get_pull_request_commits(repo_id, pull.pull_request_id)\n\n ignored_commits = []\n processed_commits = []\n\n for commit in commits:\n commit_text = f'{commit.comment}, author: {color.Fore.BLUE}{color.Style.BRIGHT}{commit.author.name}{color.Style.RESET_ALL}'\n ignore_commit = False\n for word in ignore_words:\n if commit.comment.lower().find(word) != -1:\n ignore_commit = True\n break\n\n if not ignore_commit:\n changes = git_client.get_changes(commit.commit_id, repo_id).changes\n for change in changes:\n file_name = change['item']['path']\n\n if not '.' in file_name and ignore_extensionless_files:\n continue\n\n counter = processed_changes.get(file_name)\n if not counter:\n counter = 0\n counter +=1\n processed_changes[file_name] = counter\n processed_commits.append(commit_text)\n else:\n ignored_commits.append(commit_text)\n\n return processed_changes, processed_commits, ignored_commits",
"def AddReviewers(host, change, add=None):\n if not add:\n return\n if isinstance(add, basestring):\n add = (add,)\n path = '%s/reviewers' % _GetChangePath(change)\n for r in add:\n body = {'reviewer': r}\n jmsg = FetchUrlJson(host, path, reqtype='POST', body=body, ignore_404=False)\n return jmsg",
"def post_on_github(params, logger=None):\r\n\r\n try:\r\n logger.debug(\"[*] Trying to upload file(s) {f} in {r} for handle {h}\".format(\r\n f=params[\"to_be_uploaded_file_list\"], r=params[\"repo\"], h=params[\"user\"]))\r\n\r\n g = Github(params[\"user\"], params[\"password\"])\r\n\r\n repo = g.get_user().get_repo(params[\"repo\"])\r\n file_list = params[\"to_be_uploaded_file_list\"]\r\n\r\n file_names = [x.rsplit(\"/\", 1)[1] for x in file_list]\r\n if params[\"commit_message\"] is None:\r\n commit_message = 'KML-file update {}'.format(\r\n randint(0, 100) * randint(0, 100) / randint(1, 100))\r\n else:\r\n commit_message = params[\"commit_message\"]\r\n\r\n master_ref = repo.get_git_ref('heads/' + str(params[\"branch\"]))\r\n master_sha = master_ref.object.sha\r\n base_tree = repo.get_git_tree(master_sha)\r\n\r\n element_list = list()\r\n for i, entry in enumerate(file_list):\r\n with open(entry) as input_file:\r\n data = input_file.read()\r\n element = InputGitTreeElement(\r\n file_names[i], '100644', 'blob', data)\r\n element_list.append(element)\r\n\r\n tree = repo.create_git_tree(element_list, base_tree)\r\n parent = repo.get_git_commit(master_sha)\r\n\r\n commit = repo.create_git_commit(commit_message, tree, [parent])\r\n master_ref.edit(commit.sha)\r\n except Exception as e:\r\n logger.critical(\"Exception: {}\".format(e))\r\n return False\r\n\r\n logger.info(\"[*] Uploading successful!\")\r\n return True",
"def pull(args):\n do_all_projects(args + [\"pull\"])"
]
| [
"0.550293",
"0.54397523",
"0.5235431",
"0.5223959",
"0.5181559",
"0.512005",
"0.51147157",
"0.50955063",
"0.50473064",
"0.48162797",
"0.48110226",
"0.4776085",
"0.47734773",
"0.47709897",
"0.4756326",
"0.47487405",
"0.47319442",
"0.47248226",
"0.46943775",
"0.46660092",
"0.46495166",
"0.46459937",
"0.46443927",
"0.4627021",
"0.46262145",
"0.4624306",
"0.4587671",
"0.4587427",
"0.45856065",
"0.45825285"
]
| 0.62108105 | 0 |
Saves a list of repositories that the user owns. | def save_repositories(self, user, path=None):
# Redis has an end_cursor if we've collected this data before
end_cursor = self.redis.get(''.join(['gh:', user.login, ':repositories:endCursor']))
if end_cursor:
end_cursor = end_cursor.decode('utf-8')
end_cursor = ''.join(['"', end_cursor, '"'])
repositories = u.repositories(first=100,
after=end_cursor,
orderBy='{direction: DESC, field: CREATED_AT}')
else:
repositories = u.repositories(first=100,
orderBy='{direction: DESC, field: CREATED_AT}')
if not repositories:
return False
while True:
if repositories['data']['user']['repositories']['edges']:
index = ''.join(['gh_repositories-', self.timestamp])
self._write_to_datastore(index=index,
doc_type='GithubRepositories',
document=repositories,
login=user.login,
path=path)
has_next_page = repositories['data']['user']['repositories']['pageInfo']['hasNextPage']
end_cursor = repositories['data']['user']['repositories']['pageInfo']['endCursor']
if has_next_page:
repositories = u.repositories(first=100,
after=end_cursor,
orderBy='{direction: DESC, field: CREATED_AT}')
else:
# Cache the end_cursor where we last collected data
self.redis.set(''.join(['gh:', u.login, ':repositories:endCursor']), end_cursor)
break
else:
break
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sync():\n _ownered_project = []\n _tmp_project_list = get_user_repo_list(current_user.username)\n if _tmp_project_list:\n for project in _tmp_project_list:\n _ownered_project.append((project, project))\n # Add upperstream_repo\n upperstream_repo = get_upperstream_repo(project)\n if upperstream_repo is not None:\n _ownered_project.append((upperstream_repo, upperstream_repo + \"(Upperstream of %s)\" % project))\n\n User.objects(username=current_user.username).update_one(set__owned_repo_sync_time=datetime.utcnow())\n\n # mongoDB don't support key value contains '.'\n for i in range(len(_ownered_project)):\n _ownered_project[i] = (_ownered_project[i][0].replace('.', '[dot]'), _ownered_project[i][1])\n User.objects(username=current_user.username).update_one(set__owned_repo=dict(_ownered_project))\n\n flash('Refresh your own GitHub repositories list successfully!', 'success')\n return redirect(url_for('main.load_from_github'))",
"def repositories(self, user_name=None):\n user_name = user_name if user_name else self._auth[0]\n data = self._request('GET', 'users', user_name)\n return data.repositories\n #ret_val = []\n #for repository in data.repositories:\n # ret_val.append(repository.name)\n # #print 'repo', repository['name'] # can use as dict or as object\n #return ret_val",
"def saveEditorsList(self, editors):\n for editor in editors:\n ok = editor.saveFile()\n if ok:\n self.setEditorName(editor, editor.getFileName())",
"def repositories(db):\n repos = list()\n repos.append(RepositoryFactory(full_name='python', language='Python'))\n repos.append(RepositoryFactory(full_name='ruby', language='Ruby'))\n repos.append(RepositoryFactory(full_name='java', language='Java'))\n db.session.commit()\n return repos",
"def save_users(user):\n user.save_user()",
"def save_starred_repositories(self, user, path=None):\n # Redis has an end_cursor if we've collected this data before\n end_cursor = self.redis.get(''.join(['gh:', user.login, ':starredRepositories:endCursor']))\n if end_cursor:\n end_cursor = end_cursor.decode('utf-8')\n end_cursor = ''.join(['\"', end_cursor, '\"'])\n starred_repositories = u.starredRepositories(first=100,\n after=end_cursor,\n orderBy='{direction: DESC, field: STARRED_AT}')\n else:\n starred_repositories = u.starredRepositories(first=100,\n orderBy='{direction: DESC, field: STARRED_AT}')\n\n if not starred_repositories:\n return False\n\n while True:\n try:\n if starred_repositories['data']['user']['starredRepositories']['edges']:\n index = ''.join(['gh_starred_repositories-', self.timestamp])\n self._write_to_datastore(index=index,\n doc_type='GithubStarredRepositories',\n document=starred_repositories,\n login=user.login,\n path=path)\n has_next_page = starred_repositories['data']['user']['starredRepositories']['pageInfo']['hasNextPage']\n end_cursor = starred_repositories['data']['user']['starredRepositories']['pageInfo']['endCursor']\n if has_next_page:\n end_cursor = ''.join(['\"', end_cursor, '\"'])\n starred_repositories = u.starredRepositories(first=100,\n after=end_cursor,\n orderBy='{direction: DESC, field: STARRED_AT}')\n else:\n # Cache the end_cursor where we last collected data\n self.redis.set(''.join(['gh:', u.login, ':starredRepositories:endCursor']), end_cursor)\n break\n else:\n break\n except TypeError as e:\n self.logger.error('GithubStarredRepositories', u.login, e)\n break\n\n return True",
"def save_user_unions(*args):\n return _ida_hexrays.save_user_unions(*args)",
"def do_list(client, args):\n\trepos = client.repos.list(args.user)\n\tprint '%s has the following repositories:' % args.user\n\tprint 'Name - Description'\n\tfor repo in repos:\n\t\tprint '%s - %s' % (repo.name, repo.description)",
"def save_users(user_list):\n with open(user_path, \"w\") as outfile:\n json.dump(user_list, outfile)",
"def save_changes(self, objs):\n # Save to the database and possibly write tags.\n for ob in objs:\n if ob._dirty:\n self._log.debug('saving changes to {}', ob)\n ob.try_sync(ui.should_write(), ui.should_move())",
"def save_pinned_repositories(self, user, path=None):\n # Redis has an end_cursor if we've collected this data before\n end_cursor = self.redis.get(''.join(['gh:', user.login, ':pinnedRepositories:endCursor']))\n if end_cursor:\n end_cursor = end_cursor.decode('utf-8')\n end_cursor = ''.join(['\"', end_cursor, '\"'])\n pinned_repositories = u.pinnedRepositories(first=100, # usually more like 6, but we want all possible\n after=end_cursor,\n orderBy='{direction: DESC, field: CREATED_AT}')\n else:\n pinned_repositories = u.pinnedRepositories(first=100,\n orderBy='{direction: DESC, field: CREATED_AT}')\n\n if not pinned_repositories:\n return False\n\n while True:\n if pinned_repositories['data']['user']['pinnedRepositories']['edges']:\n index = ''.join(['gh_pinned_repositories-', self.timestamp])\n self._write_to_datastore(index=index,\n doc_type='GithubPinnedRepositories',\n document=pinned_repositories,\n login=user.login,\n path=path)\n has_next_page = pinned_repositories['data']['user']['pinnedRepositories']['pageInfo']['hasNextPage']\n end_cursor = pinned_repositories['data']['user']['pinnedRepositories']['pageInfo']['endCursor']\n if has_next_page:\n pinned_repositories = u.pinnedRepositories(first=100,\n after=end_cursor,\n orderBy='{direction: DESC, field: CREATED_AT}')\n else:\n # Cache the end_cursor where we last collected data\n self.redis.set(''.join(['gh:', u.login, ':pinnedRepositories:endCursor']), end_cursor)\n break\n else:\n break\n\n return True",
"def fetch_repos(self):\n for repo in self.json_repos['repos']:\n title = str(repo[\"title\"])\n repo_url = str(repo['repo'])\n self.repos[title] = repo_url",
"def get_repositories(self):\n \n endpoint = 'repositories'\n parameters = [('pagelen', '100')]\n \n if len(self.organization):\n endpoint += f'/{self.organization}' \n parameters.append(('role', 'contributor')) \n else: \n parameters.append(('role', 'owner'))\n \n repositories_raw_data = self.__request_api(f'{self.base_url}{endpoint}?{urllib.parse.urlencode(parameters)}', method='GET')\n repositories = []\n has_next_page = True\n \n while has_next_page:\n for datum in repositories_raw_data['values']:\n clone_url = None\n for link in datum['links']['clone']:\n if link['name'] == 'ssh':\n clone_url = link['href']\n break\n \n project_name = None\n if \"name\" in datum['project']:\n project_name = datum['project']['name']\n \n repositories.append(VcsRepository(datum['slug'], datum['description'], clone_url, datum['is_private'], project_name))\n \n has_next_page = \"next\" in repositories_raw_data\n \n if has_next_page: \n repositories_raw_data = self.__request_api(repositories_raw_data[\"next\"], method='GET')\n\n return repositories",
"def repos(request):\n # Clean up garbage created by buggy edits\n bad_branch_keys = models.Branch.query(models.Branch.owner == None).fetch(\n 100, keys_only=True)\n if bad_branch_keys:\n ndb.delete_multi(bad_branch_keys)\n repo_map = {}\n for repo in models.Repository.query().fetch(1000, batch_size=100):\n repo_map[repo.key] = repo\n branches = []\n for branch in models.Branch.query().fetch(2000, batch_size=100):\n repo_key = branch.repo_key\n if repo_key in repo_map:\n branch.repository = repo_map[repo_key]\n branches.append(branch)\n branches.sort(key=lambda b: map(\n unicode.lower, (b.repository.name, b.category, b.name)))\n return respond(request, 'repos.html', {'branches': branches})",
"def save_users(self):\n\n User.user_list.append(self)",
"def save_repositories_contributed_to(self, user, path=None):\n # Redis has an end_cursor if we've collected this data before\n end_cursor = self.redis.get(''.join(['gh:', user.login, ':repositoriesContributedTo:endCursor']))\n if end_cursor:\n end_cursor = end_cursor.decode('utf-8')\n end_cursor = ''.join(['\"', end_cursor, '\"'])\n repositories_contributed_to = u.repositoriesContributedTo(first=100,\n after=end_cursor,\n orderBy='{direction: DESC, field: CREATED_AT}')\n else:\n repositories_contributed_to = u.repositoriesContributedTo(first=100,\n orderBy='{direction: DESC, field: CREATED_AT}')\n\n if not repositories_contributed_to:\n return False\n\n while True:\n if repositories_contributed_to['data']['user']['repositoriesContributedTo']['edges']:\n index = ''.join(['gh_repositories_contributed_to-', self.timestamp])\n self._write_to_datastore(index=index,\n doc_type='GithubRepositoriesContributedTo',\n document=repositories_contributed_to,\n login=user.login,\n path=path)\n has_next_page = repositories_contributed_to['data']['user']['repositoriesContributedTo']['pageInfo']['hasNextPage']\n end_cursor = repositories_contributed_to['data']['user']['repositoriesContributedTo']['pageInfo']['endCursor']\n if has_next_page:\n repositories_contributed_to = u.repositoriesContributedTo(first=100,\n after=end_cursor,\n orderBy='{direction: DESC, field: CREATED_AT}')\n else:\n # Cache the end_cursor where we last collected data\n self.redis.set(''.join(['gh:', u.login, ':repositoriesContributedTo:endCursor']), end_cursor)\n break\n else:\n break\n\n return True",
"def save_all(self):\n self.save_client_list_to_file()\n self.save_project_list_to_file()",
"def update_repos():\n with open(repolist_file, \"r\") as repofile:\n repolist = repofile.readlines()\n for idx in xrange(len(repolist)):\n l = repolist[idx].strip()\n if re.match('^[i]',l):\n repodir = clone_dir + \"/\" + os.path.basename(l)\n git(\"fetch\", \"--all\", cwd = repodir)\n pass",
"def list_repositories(self):\n data = self._get_all_data('/user/repos')\n return [repo['full_name'] for repo in data]",
"def save_committees(event, committees):\n for committee in committees:\n name = committee.name\n organization = Organization.objects.get(id=committee.id)\n entity_type = \"organization\"\n new_committee = EventParticipant(\n name=name,\n event=event,\n organization=organization,\n entity_type=entity_type\n )\n new_committee.save()",
"def save_to_users(self):\n Data.add_data(self.user_data())",
"def save_users(users):\n with open(STORAGE_PATH, \"wb\") as fp:\n pickle.dump(users, fp)",
"def _get_repo_list(self, *args, **kwargs): \r\n repo_list = kwargs['repositories'] if kwargs.get('repositories', None) else self.get_list(\r\n api_endpoint=settings.GITHUB_SETTINGS['GITHUB_USER_REPO_API'].format(**kwargs), **kwargs\r\n )\r\n for r in repo_list:\r\n if isinstance(r, dict):\r\n yield r['name']\r\n else:\r\n yield r",
"def manage_owners():\n\n owner_data = request.get_json(force=True)\n return _get_owner_service().create_owner(owner_data)",
"def add_to_repository(user):\n try:\n repository.add(user)\n except (KeyError, DuplicateIndexError):\n raise",
"def create_or_update_repository():\n user = get_jwt_identity()\n data = request.get_json()\n if data is None:\n raise ApiException(422, \"No data.\")\n data, errors = __repositorySchema.load(data)\n if errors:\n return jsonify({'error': errors}), 422\n if 'id' not in data or data['id'] is None:\n data['owner_id'] = user['id']\n repository = Repository.create(data)\n app.db.session.add(repository)\n else:\n repository = Repository.query.get_by_id(data['id'], user)\n repository.update(data)\n app.db.session.commit()\n data = __repositorySchema.dump(repository).data\n return jsonify(data)",
"def __add_repo(repo_name, url):\n\n conn = sqlite3.connect(DTF_DB)\n cur = conn.cursor()\n\n entry = [(repo_name, url)]\n\n sql = ('INSERT INTO repos (repo_name, url)'\n 'VALUES (?, ?)')\n\n cur.executemany(sql, entry)\n conn.commit()\n\n return 0",
"def user_repos(self, username: str) -> requests.Response:\n\n api_url = 'https://api.github.com/users/{username}/repos'\n url = api_url.format(username=username)\n response = requests.get(url)\n return response\n\n\n\n #user_url = self.user_info(username=username)\n #repos_url = user_url\n #pprint.pprint(repos_url)\n #url = repos_url['repos_url']\n #response = requests.get(url)\n #return response",
"def process(self):\n\n manager_list = []\n\n with Manager() as manager:\n manager_list = manager.list()\n processes = []\n\n for data in self.repos:\n process = Process(\n target=self.get_repos_lists, args=(data, manager_list)\n )\n process.start()\n processes.append(process)\n\n for proc in processes:\n proc.join()\n\n manager_list = list(manager_list)\n\n for element in manager_list:\n domains, ips = self.__separate_domains_from_ip(element)\n Settings.domains.extend(domains)\n Settings.ips.extend(ips)\n\n del domains, ips\n\n Settings.domains = Helpers.List(Settings.domains).format()\n Settings.ips = Helpers.List(Settings.ips).format()\n\n Helpers.Dict(self.repos).to_json(Settings.repositories_file)\n del Settings.repositories\n\n Generate()\n Compress()\n Deploy()\n Clean()",
"def _save_clicked(self, info):\n\n assignment = self._validate(info)\n if assignment is None:\n return\n\n # Update the data in the database.\n try:\n get_permissions_manager().policy_manager.policy_storage.set_assignment(assignment.user_name, [r.name for r in assignment.roles])\n\n info.ui.dispose()\n except PolicyStorageError, e:\n self._ps_error(e)"
]
| [
"0.6054562",
"0.5576589",
"0.5434746",
"0.53765297",
"0.5325816",
"0.5315701",
"0.528101",
"0.5267747",
"0.5260667",
"0.5243365",
"0.52378726",
"0.52375865",
"0.5204801",
"0.51619995",
"0.51211464",
"0.51200926",
"0.510265",
"0.50712204",
"0.50696415",
"0.5054528",
"0.4969991",
"0.49544743",
"0.49510762",
"0.4950422",
"0.49336076",
"0.49234983",
"0.49072996",
"0.49055183",
"0.48955274",
"0.48936418"
]
| 0.6030274 | 1 |
Saves a list of repositories that the user recently contributed to other than their own. | def save_repositories_contributed_to(self, user, path=None):
# Redis has an end_cursor if we've collected this data before
end_cursor = self.redis.get(''.join(['gh:', user.login, ':repositoriesContributedTo:endCursor']))
if end_cursor:
end_cursor = end_cursor.decode('utf-8')
end_cursor = ''.join(['"', end_cursor, '"'])
repositories_contributed_to = u.repositoriesContributedTo(first=100,
after=end_cursor,
orderBy='{direction: DESC, field: CREATED_AT}')
else:
repositories_contributed_to = u.repositoriesContributedTo(first=100,
orderBy='{direction: DESC, field: CREATED_AT}')
if not repositories_contributed_to:
return False
while True:
if repositories_contributed_to['data']['user']['repositoriesContributedTo']['edges']:
index = ''.join(['gh_repositories_contributed_to-', self.timestamp])
self._write_to_datastore(index=index,
doc_type='GithubRepositoriesContributedTo',
document=repositories_contributed_to,
login=user.login,
path=path)
has_next_page = repositories_contributed_to['data']['user']['repositoriesContributedTo']['pageInfo']['hasNextPage']
end_cursor = repositories_contributed_to['data']['user']['repositoriesContributedTo']['pageInfo']['endCursor']
if has_next_page:
repositories_contributed_to = u.repositoriesContributedTo(first=100,
after=end_cursor,
orderBy='{direction: DESC, field: CREATED_AT}')
else:
# Cache the end_cursor where we last collected data
self.redis.set(''.join(['gh:', u.login, ':repositoriesContributedTo:endCursor']), end_cursor)
break
else:
break
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def save_repositories(self, user, path=None):\n # Redis has an end_cursor if we've collected this data before\n end_cursor = self.redis.get(''.join(['gh:', user.login, ':repositories:endCursor']))\n if end_cursor:\n end_cursor = end_cursor.decode('utf-8')\n end_cursor = ''.join(['\"', end_cursor, '\"'])\n repositories = u.repositories(first=100,\n after=end_cursor,\n orderBy='{direction: DESC, field: CREATED_AT}')\n else:\n repositories = u.repositories(first=100,\n orderBy='{direction: DESC, field: CREATED_AT}')\n\n if not repositories:\n return False\n\n while True:\n if repositories['data']['user']['repositories']['edges']:\n index = ''.join(['gh_repositories-', self.timestamp])\n self._write_to_datastore(index=index,\n doc_type='GithubRepositories',\n document=repositories,\n login=user.login,\n path=path)\n has_next_page = repositories['data']['user']['repositories']['pageInfo']['hasNextPage']\n end_cursor = repositories['data']['user']['repositories']['pageInfo']['endCursor']\n if has_next_page:\n repositories = u.repositories(first=100,\n after=end_cursor,\n orderBy='{direction: DESC, field: CREATED_AT}')\n else:\n # Cache the end_cursor where we last collected data\n self.redis.set(''.join(['gh:', u.login, ':repositories:endCursor']), end_cursor)\n break\n else:\n break\n\n return True",
"def sync():\n _ownered_project = []\n _tmp_project_list = get_user_repo_list(current_user.username)\n if _tmp_project_list:\n for project in _tmp_project_list:\n _ownered_project.append((project, project))\n # Add upperstream_repo\n upperstream_repo = get_upperstream_repo(project)\n if upperstream_repo is not None:\n _ownered_project.append((upperstream_repo, upperstream_repo + \"(Upperstream of %s)\" % project))\n\n User.objects(username=current_user.username).update_one(set__owned_repo_sync_time=datetime.utcnow())\n\n # mongoDB don't support key value contains '.'\n for i in range(len(_ownered_project)):\n _ownered_project[i] = (_ownered_project[i][0].replace('.', '[dot]'), _ownered_project[i][1])\n User.objects(username=current_user.username).update_one(set__owned_repo=dict(_ownered_project))\n\n flash('Refresh your own GitHub repositories list successfully!', 'success')\n return redirect(url_for('main.load_from_github'))",
"def save_starred_repositories(self, user, path=None):\n # Redis has an end_cursor if we've collected this data before\n end_cursor = self.redis.get(''.join(['gh:', user.login, ':starredRepositories:endCursor']))\n if end_cursor:\n end_cursor = end_cursor.decode('utf-8')\n end_cursor = ''.join(['\"', end_cursor, '\"'])\n starred_repositories = u.starredRepositories(first=100,\n after=end_cursor,\n orderBy='{direction: DESC, field: STARRED_AT}')\n else:\n starred_repositories = u.starredRepositories(first=100,\n orderBy='{direction: DESC, field: STARRED_AT}')\n\n if not starred_repositories:\n return False\n\n while True:\n try:\n if starred_repositories['data']['user']['starredRepositories']['edges']:\n index = ''.join(['gh_starred_repositories-', self.timestamp])\n self._write_to_datastore(index=index,\n doc_type='GithubStarredRepositories',\n document=starred_repositories,\n login=user.login,\n path=path)\n has_next_page = starred_repositories['data']['user']['starredRepositories']['pageInfo']['hasNextPage']\n end_cursor = starred_repositories['data']['user']['starredRepositories']['pageInfo']['endCursor']\n if has_next_page:\n end_cursor = ''.join(['\"', end_cursor, '\"'])\n starred_repositories = u.starredRepositories(first=100,\n after=end_cursor,\n orderBy='{direction: DESC, field: STARRED_AT}')\n else:\n # Cache the end_cursor where we last collected data\n self.redis.set(''.join(['gh:', u.login, ':starredRepositories:endCursor']), end_cursor)\n break\n else:\n break\n except TypeError as e:\n self.logger.error('GithubStarredRepositories', u.login, e)\n break\n\n return True",
"def save_changes(self, objs):\n # Save to the database and possibly write tags.\n for ob in objs:\n if ob._dirty:\n self._log.debug('saving changes to {}', ob)\n ob.try_sync(ui.should_write(), ui.should_move())",
"def saveEditorsList(self, editors):\n for editor in editors:\n ok = editor.saveFile()\n if ok:\n self.setEditorName(editor, editor.getFileName())",
"def save_all(self):\n self.save_client_list_to_file()\n self.save_project_list_to_file()",
"def handle_contribution(user_id, project_id, status):\n user = user_collection.find_one({\"_id\": user_id})\n contribution_list = user[\"contributions\"]\n if status:\n contribution_list.append(project_id)\n else:\n contribution_list.remove(project_id)\n user_collection.find_one_and_update(\n {\"_id\": user_id},\n {\n \"$set\": {\n \"contributions\": contribution_list,\n }\n },\n upsert=False,\n )",
"def save_users(self):\n\n User.user_list.append(self)",
"def update_repos():\n with open(repolist_file, \"r\") as repofile:\n repolist = repofile.readlines()\n for idx in xrange(len(repolist)):\n l = repolist[idx].strip()\n if re.match('^[i]',l):\n repodir = clone_dir + \"/\" + os.path.basename(l)\n git(\"fetch\", \"--all\", cwd = repodir)\n pass",
"def repo_refresh_for_unfinished():\n project_list = Project.objects()\n crawl_list = []\n for repo in project_list:\n if repo.analyser_progress != \"100%\":\n crawl_list.append(repo.project_name)\n analyser.add_repos(current_user.username, crawl_list)\n flash('Refresh for unfinished successfully!', 'success')\n return redirect(url_for('main.admin_manage'))",
"def fetch_repos(self):\n for repo in self.json_repos['repos']:\n title = str(repo[\"title\"])\n repo_url = str(repo['repo'])\n self.repos[title] = repo_url",
"def project_refresh_all():\n project_list = Project.objects()\n analyser.add_repos(current_user.username, [repo.project_name for repo in project_list])\n flash('Refresh all successfully!', 'success')\n return redirect(url_for('main.admin_manage'))",
"def save_data_for_later(self):\n if self.pr.is_merged and self.pr.base_branch == self.pr.config.devBranch:\n msg = DATA_SAVE_MERGED.format(title=self.pr.title, desc=self.pr.description, pr=self.pr.link_pretty,\n by=self.pr.opened_by_slack)\n write_to_file_from_top(self.pr.config.releaseItemsFilePath, msg)\n with open(self.pr.config.releaseItemsFileMergedBy, \"a+\") as f:\n name = \"<@{0}>\".format(self.pr.opened_by_slack)\n existing_names = f.read()\n if name not in existing_names:\n f.write(name + \", \")\n LOG.debug(msg + ' added unique names to file ' + self.pr.config.releaseItemsFileMergedBy)\n f.close()",
"def __saveRecent(self):\n Preferences.Prefs.rsettings.setValue(recentNameFiles, self.recent)\n Preferences.Prefs.rsettings.sync()",
"def save_pinned_repositories(self, user, path=None):\n # Redis has an end_cursor if we've collected this data before\n end_cursor = self.redis.get(''.join(['gh:', user.login, ':pinnedRepositories:endCursor']))\n if end_cursor:\n end_cursor = end_cursor.decode('utf-8')\n end_cursor = ''.join(['\"', end_cursor, '\"'])\n pinned_repositories = u.pinnedRepositories(first=100, # usually more like 6, but we want all possible\n after=end_cursor,\n orderBy='{direction: DESC, field: CREATED_AT}')\n else:\n pinned_repositories = u.pinnedRepositories(first=100,\n orderBy='{direction: DESC, field: CREATED_AT}')\n\n if not pinned_repositories:\n return False\n\n while True:\n if pinned_repositories['data']['user']['pinnedRepositories']['edges']:\n index = ''.join(['gh_pinned_repositories-', self.timestamp])\n self._write_to_datastore(index=index,\n doc_type='GithubPinnedRepositories',\n document=pinned_repositories,\n login=user.login,\n path=path)\n has_next_page = pinned_repositories['data']['user']['pinnedRepositories']['pageInfo']['hasNextPage']\n end_cursor = pinned_repositories['data']['user']['pinnedRepositories']['pageInfo']['endCursor']\n if has_next_page:\n pinned_repositories = u.pinnedRepositories(first=100,\n after=end_cursor,\n orderBy='{direction: DESC, field: CREATED_AT}')\n else:\n # Cache the end_cursor where we last collected data\n self.redis.set(''.join(['gh:', u.login, ':pinnedRepositories:endCursor']), end_cursor)\n break\n else:\n break\n\n return True",
"def repositories(self, user_name=None):\n user_name = user_name if user_name else self._auth[0]\n data = self._request('GET', 'users', user_name)\n return data.repositories\n #ret_val = []\n #for repository in data.repositories:\n # ret_val.append(repository.name)\n # #print 'repo', repository['name'] # can use as dict or as object\n #return ret_val",
"def add_to_repository(user):\n try:\n repository.add(user)\n except (KeyError, DuplicateIndexError):\n raise",
"def save_all(self):\n data = []\n for key, albums in self.albums_to_update.items():\n self.albums_to_save[key] += albums\n with wait_cursor(self._parent):\n for artist, albumdata in self.albums_to_save.items():\n if not albumdata:\n continue\n artistid = self.artist_map[artist]\n data = []\n for name, year, key, is_live, tracks in albumdata:\n if key == 'X':\n key = 0\n data.append((key, name, year, is_live, tracks))\n albums = dmla.update_albums_by_artist(artistid, data)\n albums_map_lookup = {build_album_name(x): x.id for x in albums}\n for c_name, value in self.albums_map[artist].items():\n a_name, id = value\n try:\n test = albums_map_lookup[a_name]\n except KeyError:\n continue\n if id != test:\n self.albums_map[artist][c_name] = (a_name, test)\n self.albums_to_save.clear()\n self.albums_to_update.clear()\n self._parent.albums_map = self.albums_map\n self._parent.albums_map.update({x: {} for x, y in self.albums_map.items()\n if not y})\n ## self.last_handled = None\n save_appdata([self._parent.artist_map, self._parent.albums_map])\n self.refresh_screen(self.artist_list.currentIndex())",
"def updateGrabberListFromGitHub(self):\n url = \"https://raw.githubusercontent.com/swharden/QRSSplus/master/grabbers.csv\"\n headers = {'User-Agent': 'Wget/1.12 (linux-gnu)'}\n req = urllib2.Request(url, headers=headers)\n r = urllib2.urlopen(req, timeout=3)\n raw = r.read()\n raw = raw.split(\"\\n\")\n raw = [x.strip() for x in raw]\n raw = [x for x in raw if len(x)]\n raw = \"\\n\".join(raw)\n f = open(\"grabbers.csv\", 'w')\n f.write(raw)\n f.close()\n self.log(\"Downloaded the latest grabbers.csv\")",
"def update_from_logins(self, logins):\n # Now add contributors using cache (new GitHub contributors) with known email or orcid that isn't present\n for login in logins:\n # Check against contribution threshold, and not bot\n if not self.include_contributor(login):\n continue\n\n cache = self.cache.get(login) or {}\n email = cache.get(\"email\")\n orcid = cache.get(\"orcid\")\n\n # We can only add completely new entries that don't already exist\n if (email != None or orcid != None) and (\n email not in self.email_lookup and orcid not in self.orcid_lookup\n ):\n bot.info(f\" Updating {login}\")\n parts = (cache.get(\"name\") or login).split(\" \")\n entry = {\"@type\": \"Person\", \"givenName\": parts[0]}\n\n # Add the last name if it's defined\n if len(parts) > 1:\n entry[\"familyName\"] = \" \".join(parts[1:])\n\n if email != None:\n entry[\"email\"] = email\n if orcid != None:\n entry[\"@id\"] = \"https://orcid.org/%s\" % orcid\n self.lookup.append(entry)",
"def add_addon_repository(self, repo: str) -> None:\n if repo in self._data[ATTR_ADDONS_CUSTOM_LIST]:\n return\n\n self._data[ATTR_ADDONS_CUSTOM_LIST].append(repo)",
"def _add_missing_repositories(frozen_repos, ret, **kwargs):\n repos = __salt__[\"pkg.list_repos\"](**kwargs)\n missing_repos = set(frozen_repos) - set(repos)\n for repo in missing_repos:\n try:\n # In Python 2 we cannot do advance destructuring, so we\n # need to create a temporary dictionary that will merge\n # all the parameters\n _tmp_kwargs = frozen_repos[repo].copy()\n _tmp_kwargs.update(kwargs)\n __salt__[\"pkg.mod_repo\"](repo, **_tmp_kwargs)\n ret[\"repos\"][\"add\"].append(repo)\n log.info(\"Added missing repository %s\", repo)\n except Exception as e: # pylint: disable=broad-except\n msg = \"Error adding %s repository: %s\"\n log.error(msg, repo, e)\n ret[\"comment\"].append(msg % (repo, e))",
"def saveAllEditors(self):\n for editor in self.editors:\n ok = editor.saveFile()\n if ok:\n self.setEditorName(editor, editor.getFileName())\n \n # restart autosave timer\n if self.autosaveInterval > 0:\n self.autosaveTimer.start(self.autosaveInterval * 60000)",
"def save_users(user):\n user.save_user()",
"def add_all(self, top_repo_path):\n my_output = subprocess.check_output([\"git\", \"add\", \"-A\"], cwd=top_repo_path)\n return my_output",
"def repos(request):\n # Clean up garbage created by buggy edits\n bad_branch_keys = models.Branch.query(models.Branch.owner == None).fetch(\n 100, keys_only=True)\n if bad_branch_keys:\n ndb.delete_multi(bad_branch_keys)\n repo_map = {}\n for repo in models.Repository.query().fetch(1000, batch_size=100):\n repo_map[repo.key] = repo\n branches = []\n for branch in models.Branch.query().fetch(2000, batch_size=100):\n repo_key = branch.repo_key\n if repo_key in repo_map:\n branch.repository = repo_map[repo_key]\n branches.append(branch)\n branches.sort(key=lambda b: map(\n unicode.lower, (b.repository.name, b.category, b.name)))\n return respond(request, 'repos.html', {'branches': branches})",
"def save(self):\n\n for i in self.bots:\n try:\n i.save()\n except Exception, ex:\n handle_exception()",
"def _save_sync_list(self):\n\t\tfp = open(self.sync_file, 'w')\n\t\tself.sync_list.write(fp)\n\t\tfp.close()",
"def save_committees(event, committees):\n for committee in committees:\n name = committee.name\n organization = Organization.objects.get(id=committee.id)\n entity_type = \"organization\"\n new_committee = EventParticipant(\n name=name,\n event=event,\n organization=organization,\n entity_type=entity_type\n )\n new_committee.save()",
"def save_users(user_list):\n with open(user_path, \"w\") as outfile:\n json.dump(user_list, outfile)"
]
| [
"0.5901712",
"0.5878179",
"0.5528805",
"0.5410074",
"0.5305616",
"0.5223621",
"0.520857",
"0.51607317",
"0.51521593",
"0.51481813",
"0.5140866",
"0.51395434",
"0.5116052",
"0.5105078",
"0.50949955",
"0.5020492",
"0.5018968",
"0.5012153",
"0.49822968",
"0.49732697",
"0.49666265",
"0.49468276",
"0.49298418",
"0.49294245",
"0.49247524",
"0.49178404",
"0.48896083",
"0.4886949",
"0.488583",
"0.4857644"
]
| 0.60250443 | 0 |
Saves a list of repositories that the user has starred. | def save_starred_repositories(self, user, path=None):
# Redis has an end_cursor if we've collected this data before
end_cursor = self.redis.get(''.join(['gh:', user.login, ':starredRepositories:endCursor']))
if end_cursor:
end_cursor = end_cursor.decode('utf-8')
end_cursor = ''.join(['"', end_cursor, '"'])
starred_repositories = u.starredRepositories(first=100,
after=end_cursor,
orderBy='{direction: DESC, field: STARRED_AT}')
else:
starred_repositories = u.starredRepositories(first=100,
orderBy='{direction: DESC, field: STARRED_AT}')
if not starred_repositories:
return False
while True:
try:
if starred_repositories['data']['user']['starredRepositories']['edges']:
index = ''.join(['gh_starred_repositories-', self.timestamp])
self._write_to_datastore(index=index,
doc_type='GithubStarredRepositories',
document=starred_repositories,
login=user.login,
path=path)
has_next_page = starred_repositories['data']['user']['starredRepositories']['pageInfo']['hasNextPage']
end_cursor = starred_repositories['data']['user']['starredRepositories']['pageInfo']['endCursor']
if has_next_page:
end_cursor = ''.join(['"', end_cursor, '"'])
starred_repositories = u.starredRepositories(first=100,
after=end_cursor,
orderBy='{direction: DESC, field: STARRED_AT}')
else:
# Cache the end_cursor where we last collected data
self.redis.set(''.join(['gh:', u.login, ':starredRepositories:endCursor']), end_cursor)
break
else:
break
except TypeError as e:
self.logger.error('GithubStarredRepositories', u.login, e)
break
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def star(request):\n account = models.Account.current_user_account\n account.user_has_selected_nickname() # This will preserve account.fresh.\n if account.stars is None:\n account.stars = []\n keyid = request.issue.key.id()\n if keyid not in account.stars:\n account.stars.append(keyid)\n account.put()\n return respond(request, 'issue_star.html', {'issue': request.issue})",
"def starred(request):\n stars = models.Account.current_user_account.stars\n if not stars:\n issues = []\n else:\n starred_issue_keys = [ndb.Key(models.Issue, i) for i in stars]\n issues = [issue for issue in ndb.get_multi(starred_issue_keys)\n if issue and issue.view_allowed]\n _load_users_for_issues(issues)\n _optimize_draft_counts(issues)\n return respond(request, 'starred.html', {'issues': issues})",
"def starred(request):\n stars = models.Account.current_user_account.stars\n if not stars:\n issues = []\n else:\n starred_issue_keys = [ndb.Key(models.Issue, i) for i in stars]\n issues = [issue for issue in ndb.get_multi(starred_issue_keys)\n if issue and issue.view_allowed]\n _load_users_for_issues(issues)\n _optimize_draft_counts(issues)\n return respond(request, 'starred.html', {'issues': issues})",
"async def stars(self, ctx: commands.Context, stars: int):\n self.stars = stars\n await self._update_db()\n\n await ctx.send(\n f\"Done.Now this server needs `{stars}` :star: to appear on the starboard channel.\"\n )",
"def get_starred_files_by_username(self, username):\n starred_files = super(UserStarredFilesManager, self).filter(\n email=username, org_id=-1)\n\n ret = []\n repo_cache = {}\n for sfile in starred_files:\n # repo still exists?\n if repo_cache.has_key(sfile.repo_id):\n repo = repo_cache[sfile.repo_id]\n else:\n try:\n repo = seafile_api.get_repo(sfile.repo_id)\n except SearpcError:\n continue\n if repo is not None:\n repo_cache[sfile.repo_id] = repo\n else:\n sfile.delete()\n continue\n\n # file still exists?\n file_id = ''\n size = -1\n if sfile.path != \"/\":\n try:\n file_id = seafile_api.get_file_id_by_path(sfile.repo_id,\n sfile.path)\n # size = seafile_api.get_file_size(file_id)\n except SearpcError:\n continue\n if not file_id:\n sfile.delete()\n continue\n\n f = StarredFile(sfile.org_id, repo, file_id, sfile.path,\n sfile.is_dir, 0) # TODO: remove ``size`` from StarredFile\n ret.append(f)\n\n '''Calculate files last modification time'''\n files_list = []\n for sfile in ret:\n if sfile.is_dir:\n continue\n ele = (sfile.repo.id, sfile.path, sfile.file_id)\n files_list.append(ele)\n\n files_dict_with_last_modified = FileLastModifiedInfo.objects.get_files_last_modified(files_list)\n\n for sfile in ret:\n key = \"%s|%s|%s\" % (sfile.repo.id, sfile.path, sfile.file_id)\n if files_dict_with_last_modified.has_key(key):\n sfile.last_modified = files_dict_with_last_modified[key]\n else:\n # Should never reach here\n pass\n\n ret.sort(lambda x, y: cmp(y.last_modified, x.last_modified))\n\n return ret",
"def ajax_add_star(request, pk):\n report = get_object_or_404(Report, pk=pk)\n user = request.user\n if user in report.starred.all():\n added = False\n report.starred.remove(request.user)\n else:\n added = True\n report.starred.add(request.user)\n return HttpResponse(added)",
"def set_stars():\n prod_id = int(request.vars.prod_id)\n logger.info(\"changing stars on prod_id {%s}\" %prod_id)\n rating = int(request.vars.rating)\n logger.info(\"auth.user from api: %s\"%auth.user.email )\n db.stars.update_or_insert(\n (db.stars.prod_id == prod_id) & (db.stars.user_email == auth.user.email),\n prod_id = prod_id,\n user_email = auth.user.email,\n rating = rating\n )\n new_avg = calc_avg_rating(prod_id)\n return response.json(dict(new_avg=new_avg))",
"def get_starred_repos(org_list):\n print(\"\\nGetting repositories starred by members.\")\n jsonMembersStarred_list = []\n columns_list = [\n 'organization',\n 'user',\n 'full_name',\n 'html_url',\n 'language',\n 'description'\n ]\n for org in org_list:\n print('\\nGetting members of', org)\n jsonMembers = load_json(\"https://api.github.com/orgs/\" + org +\n \"/members?per_page=100\")\n for member in jsonMembers:\n print('Getting starred repositories of', member['login'])\n jsonStarred = load_json(\"https://api.github.com/users/\" +\n member['login'] +\n \"/starred?per_page=100\")\n for repo in jsonStarred:\n repo['organization'] = org\n repo['user'] = member['login']\n # Python 2: Using smart_str to deal with encodings\n repo['description'] = smart_str(repo['description'])\n jsonMembersStarred_list.append(repo)\n generate_csv(\"starred-list\", jsonMembersStarred_list, columns_list)",
"def unstar(request):\n account = models.Account.current_user_account\n account.user_has_selected_nickname() # This will preserve account.fresh.\n if account.stars is None:\n account.stars = []\n keyid = request.issue.key.id()\n if keyid in account.stars:\n account.stars[:] = [i for i in account.stars if i != keyid]\n account.put()\n return respond(request, 'issue_star.html', {'issue': request.issue})",
"def save_repositories(self, user, path=None):\n # Redis has an end_cursor if we've collected this data before\n end_cursor = self.redis.get(''.join(['gh:', user.login, ':repositories:endCursor']))\n if end_cursor:\n end_cursor = end_cursor.decode('utf-8')\n end_cursor = ''.join(['\"', end_cursor, '\"'])\n repositories = u.repositories(first=100,\n after=end_cursor,\n orderBy='{direction: DESC, field: CREATED_AT}')\n else:\n repositories = u.repositories(first=100,\n orderBy='{direction: DESC, field: CREATED_AT}')\n\n if not repositories:\n return False\n\n while True:\n if repositories['data']['user']['repositories']['edges']:\n index = ''.join(['gh_repositories-', self.timestamp])\n self._write_to_datastore(index=index,\n doc_type='GithubRepositories',\n document=repositories,\n login=user.login,\n path=path)\n has_next_page = repositories['data']['user']['repositories']['pageInfo']['hasNextPage']\n end_cursor = repositories['data']['user']['repositories']['pageInfo']['endCursor']\n if has_next_page:\n repositories = u.repositories(first=100,\n after=end_cursor,\n orderBy='{direction: DESC, field: CREATED_AT}')\n else:\n # Cache the end_cursor where we last collected data\n self.redis.set(''.join(['gh:', u.login, ':repositories:endCursor']), end_cursor)\n break\n else:\n break\n\n return True",
"def handle_contribution(user_id, project_id, status):\n user = user_collection.find_one({\"_id\": user_id})\n contribution_list = user[\"contributions\"]\n if status:\n contribution_list.append(project_id)\n else:\n contribution_list.remove(project_id)\n user_collection.find_one_and_update(\n {\"_id\": user_id},\n {\n \"$set\": {\n \"contributions\": contribution_list,\n }\n },\n upsert=False,\n )",
"def give_star_to_many(request, from_employee_id):\n if request.method == 'POST':\n serializer_bulk = StarBulkSerializer(data=request.data)\n errors = []\n stars_added = 0\n if serializer_bulk.is_valid():\n text = (request.data['text'] if 'text' in request.data.keys() else None)\n from_user = get_object_or_404(Employee, pk=from_employee_id)\n category = get_object_or_404(Category, pk=request.data['category'])\n keyword = get_object_or_404(Keyword, pk=request.data['keyword'])\n\n # Create data object to save\n data = {\"category\": category.id,\n \"keyword\": keyword.id,\n \"text\": text,\n \"from_user\": from_user.id}\n\n for user_pk in request.data['to_users']:\n data.update({\"to_user\": int(user_pk)})\n serializer = StarSerializer(data=data)\n if serializer.is_valid():\n serializer.save()\n stars_added += 1\n\n # Add points\n to_user = get_object_or_404(Employee, pk=user_pk)\n from_user.add_stars_given(1)\n from_user.save()\n\n current_level = to_user.level\n\n # Add points to to_user according category weight\n if from_user.position:\n weight = from_user.position.weight\n else:\n weight = 1\n\n to_user.add_stars(weight)\n message = config.RECOMMENDATION_MESSAGE % (weight, from_user.first_name, from_user.last_name)\n send_push_notification(to_user, message)\n to_user.evaluate_level()\n to_user.save()\n\n # Add activity log if user level up\n if to_user.level != current_level:\n message = config.LEVEL_UP_TEXT % (to_user.first_name, to_user.last_name, to_user.level)\n activity = Activity.objects.create(text=message, to_user=to_user)\n activity.save()\n\n else:\n errors.append(serializer.errors)\n else:\n errors.append(serializer_bulk.errors)\n\n if len(errors) == 0:\n content = {'detail': config.SUCCESSFULLY_STARS_ADDED}\n return Response(content, status=status.HTTP_201_CREATED)\n else:\n stars_results = {\"stars_added\": stars_added}\n detail = {'detail': errors}\n content = stars_results.copy()\n content.update(detail)\n return Response(content, status=status.HTTP_406_NOT_ACCEPTABLE)",
"def save_repositories_contributed_to(self, user, path=None):\n # Redis has an end_cursor if we've collected this data before\n end_cursor = self.redis.get(''.join(['gh:', user.login, ':repositoriesContributedTo:endCursor']))\n if end_cursor:\n end_cursor = end_cursor.decode('utf-8')\n end_cursor = ''.join(['\"', end_cursor, '\"'])\n repositories_contributed_to = u.repositoriesContributedTo(first=100,\n after=end_cursor,\n orderBy='{direction: DESC, field: CREATED_AT}')\n else:\n repositories_contributed_to = u.repositoriesContributedTo(first=100,\n orderBy='{direction: DESC, field: CREATED_AT}')\n\n if not repositories_contributed_to:\n return False\n\n while True:\n if repositories_contributed_to['data']['user']['repositoriesContributedTo']['edges']:\n index = ''.join(['gh_repositories_contributed_to-', self.timestamp])\n self._write_to_datastore(index=index,\n doc_type='GithubRepositoriesContributedTo',\n document=repositories_contributed_to,\n login=user.login,\n path=path)\n has_next_page = repositories_contributed_to['data']['user']['repositoriesContributedTo']['pageInfo']['hasNextPage']\n end_cursor = repositories_contributed_to['data']['user']['repositoriesContributedTo']['pageInfo']['endCursor']\n if has_next_page:\n repositories_contributed_to = u.repositoriesContributedTo(first=100,\n after=end_cursor,\n orderBy='{direction: DESC, field: CREATED_AT}')\n else:\n # Cache the end_cursor where we last collected data\n self.redis.set(''.join(['gh:', u.login, ':repositoriesContributedTo:endCursor']), end_cursor)\n break\n else:\n break\n\n return True",
"def sync():\n _ownered_project = []\n _tmp_project_list = get_user_repo_list(current_user.username)\n if _tmp_project_list:\n for project in _tmp_project_list:\n _ownered_project.append((project, project))\n # Add upperstream_repo\n upperstream_repo = get_upperstream_repo(project)\n if upperstream_repo is not None:\n _ownered_project.append((upperstream_repo, upperstream_repo + \"(Upperstream of %s)\" % project))\n\n User.objects(username=current_user.username).update_one(set__owned_repo_sync_time=datetime.utcnow())\n\n # mongoDB don't support key value contains '.'\n for i in range(len(_ownered_project)):\n _ownered_project[i] = (_ownered_project[i][0].replace('.', '[dot]'), _ownered_project[i][1])\n User.objects(username=current_user.username).update_one(set__owned_repo=dict(_ownered_project))\n\n flash('Refresh your own GitHub repositories list successfully!', 'success')\n return redirect(url_for('main.load_from_github'))",
"def add_star_team(client_id, team_id, now=None):\n\tnow = _get_now(now)\n\n\ttry:\n\t\t# Get the indexed name of the team.\n\t\tteam_indexed_name = session.query(Team.indexed_name)\\\n\t\t\t\t.filter(Team.id == team_id)\\\n\t\t\t\t.one()\\\n\t\t\t\t.indexed_name\n\t\t# Add the client's star for the team.\n\t\tstarred_team = StarredTeam(user_id=client_id,\n\t\t\t\tteam_id=team_id,\n\t\t\t\tindexed_name=team_indexed_name,\n\t\t\t\tadded=now)\n\t\tsession.add(starred_team)\n\t\tsession.flush()\n\texcept sa_orm.exc.NoResultFound:\n\t\tsession.rollback()\n\t\traise common_db.DbException._chain()\n\texcept sa.exc.IntegrityError:\n\t\t# The flush failed because the client has already starred this team.\n\t\tsession.rollback()\n\t\traise common_db.DbException._chain()\n\n\t# Increment the count of stars for the team.\n\tsession.execute(Teams.update()\n\t\t\t.where(Team.id == team_id)\n\t\t\t.values({Team.num_stars: Team.num_stars + 1}))\n\n\t# If needed, add a CalendarEntry for each streamed match.\n\tmatches_cursor = session.query(MatchOpponent.match_id, Match)\\\n\t\t\t.join(Match, MatchOpponent.match_id == Match.id)\\\n\t\t\t.filter(MatchOpponent.team_id == team_id, MatchOpponent.is_streamed == True)\n\tfor match_id, match in matches_cursor:\n\t\t_increment_num_user_stars(client_id, match, now)\n\t\n\tsession.commit()",
"def addstar(starname):\n try:\n Star.create(name=starname)\n except IntegrityError:\n print(('Star {0} already in database. Record not created, but can be updated.'.format(starname)))",
"def repo_refresh_for_unfinished():\n project_list = Project.objects()\n crawl_list = []\n for repo in project_list:\n if repo.analyser_progress != \"100%\":\n crawl_list.append(repo.project_name)\n analyser.add_repos(current_user.username, crawl_list)\n flash('Refresh for unfinished successfully!', 'success')\n return redirect(url_for('main.admin_manage'))",
"def add_to_repository(user):\n try:\n repository.add(user)\n except (KeyError, DuplicateIndexError):\n raise",
"def _save_ballot_shares(\n self,\n guardian_id: GuardianId,\n guardians_ballot_shares: Dict[BallotId, Optional[DecryptionShare]],\n ) -> None:\n for ballot_id, guardian_ballot_share in guardians_ballot_shares.items():\n shares = self._ballot_shares.get(ballot_id)\n if shares is None:\n shares = {}\n if guardian_ballot_share is not None:\n shares[guardian_id] = guardian_ballot_share\n self._ballot_shares[ballot_id] = shares",
"def project_refresh_all():\n project_list = Project.objects()\n analyser.add_repos(current_user.username, [repo.project_name for repo in project_list])\n flash('Refresh all successfully!', 'success')\n return redirect(url_for('main.admin_manage'))",
"def save_pinned_repositories(self, user, path=None):\n # Redis has an end_cursor if we've collected this data before\n end_cursor = self.redis.get(''.join(['gh:', user.login, ':pinnedRepositories:endCursor']))\n if end_cursor:\n end_cursor = end_cursor.decode('utf-8')\n end_cursor = ''.join(['\"', end_cursor, '\"'])\n pinned_repositories = u.pinnedRepositories(first=100, # usually more like 6, but we want all possible\n after=end_cursor,\n orderBy='{direction: DESC, field: CREATED_AT}')\n else:\n pinned_repositories = u.pinnedRepositories(first=100,\n orderBy='{direction: DESC, field: CREATED_AT}')\n\n if not pinned_repositories:\n return False\n\n while True:\n if pinned_repositories['data']['user']['pinnedRepositories']['edges']:\n index = ''.join(['gh_pinned_repositories-', self.timestamp])\n self._write_to_datastore(index=index,\n doc_type='GithubPinnedRepositories',\n document=pinned_repositories,\n login=user.login,\n path=path)\n has_next_page = pinned_repositories['data']['user']['pinnedRepositories']['pageInfo']['hasNextPage']\n end_cursor = pinned_repositories['data']['user']['pinnedRepositories']['pageInfo']['endCursor']\n if has_next_page:\n pinned_repositories = u.pinnedRepositories(first=100,\n after=end_cursor,\n orderBy='{direction: DESC, field: CREATED_AT}')\n else:\n # Cache the end_cursor where we last collected data\n self.redis.set(''.join(['gh:', u.login, ':pinnedRepositories:endCursor']), end_cursor)\n break\n else:\n break\n\n return True",
"async def post_starred(bot, config, message, reaction_emoji, user):\r\n\t\treacts = message.reactions\r\n\t\tmod_starred = False\r\n\t\tstarlist = None\r\n\t\tstarcount = None\r\n\t\tstarcount_reached = False\r\n\t\t# if the post is older than a week, don't bother putting it on the board\r\n\t\tif (datetime.datetime.now() - message.created_at).total_seconds() > 604800:\r\n\t\t\treturn\r\n\t\t# check if the poster of the starred message is blacklisted from the starboard\r\n\t\tif message.author.id in config[\"starboard\"][\"blacklisted_users\"]:\r\n\t\t\treturn\r\n\t\t# count the number of stars a post has\r\n\t\tfor react in reacts:\r\n\t\t\tif react.emoji == config[\"starboard\"][\"emoji\"]:\r\n\t\t\t\tstarlist = [x async for x in react.users()]\r\n\t\t\t\tstarcount = len(starlist)\r\n\t\t\t\tbreak\r\n\t\telse:\r\n\t\t\treturn\r\n\t\t# check if the star count was reached\r\n\t\ttry:\r\n\t\t\t# if there's a star requirement for a specific channel, and the starred message is in that channel,\r\n\t\t\t# check if the star count surpasses the requirement for that channel\r\n\t\t\tif starcount >= config[\"starboard\"][\"star_amounts\"][message.channel.name]:\r\n\t\t\t\tstarcount_reached = True\r\n\t\t# if there isn't a channel-specific star count this message must follow,\r\n\t\texcept KeyError:\r\n\t\t\t# just check to see if it meets the global requirement\r\n\t\t\tif starcount >= config[\"starboard\"][\"star_amounts\"][\"global\"]:\r\n\t\t\t\tstarcount_reached = True\r\n\t\t# check if a mod starred the post\r\n\t\tfor reactor in starlist:\r\n\t\t\tif Starboard.modcheck(bot, config, reactor) and config[\"starboard\"][\"role_override\"] == \"true\":\r\n\t\t\t\tstarcount_reached = True\r\n\t\t\t\tbreak\r\n\t\t# anti-self-star code\r\n\t\tif message.author.id == user.id:\r\n\t\t\tawait message.remove_reaction(reaction_emoji, message.author)\r\n\t\t\t# count the number of self-star alerts out of the last 50 messages\r\n\t\t\tcounter = 0\r\n\t\t\tasync for message in message.channel.history(limit=50):\r\n\t\t\t\tif \"IS A THOT AND SELF-STARRED THEIR MEME\" in message.content:\r\n\t\t\t\t\tcounter += 1\r\n\t\t\t# if there's been less than three, send a self-star alert\r\n\t\t\t# this is to prevent spam from CERTAIN THOTS THAT LOVE SPAMMING IT\r\n\t\t\tif counter < 3:\r\n\t\t\t\tselfstar_alert = '🚨 🚨 ' + user.mention + ' IS A THOT AND SELF-STARRED THEIR MEME 🚨 🚨'\r\n\t\t\t\tawait message.channel.send(selfstar_alert)\r\n\t\t\treturn\r\n\t\tif starcount_reached and message.author.id != bot.user.id:\r\n\t\t\tawait Starboard.post_to_starboard(bot, message, starcount)",
"def save_driver():\n driver_json = request.json\n drivers.append(driver_json)\n\n return jsonify({\"message\": \"The object was saved successfully\"})",
"def update_shares(self):\n self.nb_shares = self.shares.count()\n self.save()",
"def item_starred(self, item):\n self.update_item(item)",
"def update_db(self, force_scan):\n ## Get ignore repo from ignore_fname\n if exists(self.ignore_fname):\n f2 = open(self.ignore_fname,'r')\n self.ignore = [line.replace('\\n', '') for line in f2.readlines()]\n f2.close()\n else:\n self.ignore = []\n\n ## Get repos list\n if force_scan or not exists(self.fname):\n f = open(self.fname, 'w')\n self.repos = self.scan(f)\n f.close()\n else:\n f = open(self.fname, 'r')\n self.repos = [line.replace('\\n', '') for line in f.readlines()]\n f.close()\n self.clean_db()",
"def add_rating(user, item, rating):\n users.append(user)\n items.append(item)\n ratings.append(rating)",
"def is_starred(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'star')\r\n\r\n return http.Request('GET', url), resource.parse_boolean",
"def save_all(self):\n self.save_client_list_to_file()\n self.save_project_list_to_file()",
"async def add_starboard(self, ctx):\n channel = await ctx.get_text_channel(embed=CustomEmbeds.add(author=\"Channel\",\n description=\"Send a channel to add it to the starboard!\"))\n emotes = await ctx.get_emotes(embed=CustomEmbeds.add(author=\"Emotes\",\n description=\"React with emotes and then click ✅ to add them to the starboard.\"))\n threshold = await ctx.get_int(embed=CustomEmbeds.add(author=\"Add a Threshold\",\n description=\"Send message with the minimum number of reactions for it to be added to the starboard.\"))\n\n guild_starboards = await self.starboards_collection.find_one({\"_id\": ctx.guild.id})\n if guild_starboards is None:\n starboard_len = 0\n else:\n starboard_len = len(guild_starboards.get(\"starboards\"))\n\n starboard = Starboard(index=starboard_len,\n channel=channel,\n emotes=emotes,\n threshold=threshold)\n\n await self.db_add_starboard(ctx.guild, starboard.serialize())\n await ctx.send(embed=CustomEmbeds.confirm(author=\"Starboard Added\", description=f\"ID: {starboard_len}\\n\"\n f\"Channel: {channel.mention}\\n\"\n f\"Emotes: {' '.join(emotes)}\\n\"\n f\"Threshold: {threshold}\"))"
]
| [
"0.6128304",
"0.5736715",
"0.5736715",
"0.5705222",
"0.5658904",
"0.5468062",
"0.5434972",
"0.5356213",
"0.5105929",
"0.50867456",
"0.49527746",
"0.49316972",
"0.49227825",
"0.49065986",
"0.4899718",
"0.48933232",
"0.4852997",
"0.482119",
"0.47673383",
"0.47423387",
"0.47314525",
"0.4715679",
"0.4714063",
"0.4704843",
"0.47011468",
"0.46959752",
"0.46940073",
"0.4683189",
"0.46775195",
"0.4671331"
]
| 0.63887346 | 0 |
Find the given endpoint for the given domain. Only scan html element matching all criteria in look_in. optionally the content to be scanned can be given as an argument. | def discoverEndpoint(domain, endpoint, content=None, look_in={'name': 'link'}, test_urls=True, validateCerts=True):
if test_urls:
ronkyuu.URLValidator(message='invalid domain URL')(domain)
if content:
result = {'status': requests.codes.ok,
'headers': None,
'content': content
}
else:
r = requests.get(domain, verify=validateCerts)
result = {'status': r.status_code,
'headers': r.headers
}
# check for character encodings and use 'correct' data
if 'charset' in r.headers.get('content-type', ''):
result['content'] = r.text
else:
result['content'] = r.content
for key in endpoint:
result.update({key: set()})
result.update({'domain': domain})
if result['status'] == requests.codes.ok:
if 'link' in r.headers:
all_links = r.headers['link'].split(',', 1)
for link in all_links:
if ';' in link:
href, rel = link.split(';')
url = urlparse(href.strip()[1:-1])
if url.scheme in ('http', 'https') and rel in endpoint:
result[rel].add(url)
all_links = BeautifulSoup(result['content'], _html_parser, parse_only=SoupStrainer(**look_in)).find_all('link')
for link in all_links:
rel = link.get('rel', None)[0]
if rel in endpoint:
href = link.get('href', None)
if href:
url = urlparse(href)
if url.scheme == '' or url.netloc == '':
url = urlparse(urljoin(domain, href))
if url.scheme in ('http', 'https'):
result[rel].add(url)
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def discoverTokenEndpoints(domain, content=None, look_in={'name': 'link'}, test_urls=True, validateCerts=True):\n return discoverEndpoint(domain, ('token_endpoint',), content, look_in, test_urls, validateCerts)",
"def search(url, domain_list):\n resp = requests.get(url)\n if not resp.json().get('hits', '').get('hits', []):\n return\n for hit in resp.json()[\"hits\"][\"hits\"]:\n domain = hit.get(\"_source\", {}).get(\"domain\", \"\")\n if not domain:\n continue\n if not domain in domain_list:\n domain_list.append(domain)\n #print(hit[\"_source\"].get(\"title\", \"\").encode(\"ascii\",\"ignore\"))\n if domain not in ALLOWED_DOMAINS:\n print(domain)",
"def discoverMicropubEndpoints(domain, content=None, look_in={'name': 'link'}, test_urls=True, validateCerts=True):\n return discoverEndpoint(domain, ('micropub',), content, look_in, test_urls, validateCerts)",
"def _scrape_find(self, req, instruction, description, then):\n if 'find' not in instruction:\n raise InvalidInstructionError(\"Missing regex\")\n\n tags = req.tags\n find_sub = Substitution(instruction['find'], tags)\n replace_unsubbed = instruction.get('replace', '$0')\n name_sub = Substitution(instruction.get('name'), tags)\n tag_match_sub = Substitution(instruction.get('tag_match'), tags)\n join_sub = Substitution(instruction.get('join', None), tags)\n ignore_case = instruction.get('case_insensitive', False)\n multiline = instruction.get('multiline', False)\n dot_matches_all = instruction.get('dot_matches_all', True)\n\n # Default to full range\n min_match_raw = instruction.get('min_match', 0)\n max_match_raw = instruction.get('max_match', -1)\n match_raw = instruction.get('match', None)\n\n # Use single match if it was defined\n min_match_sub = Substitution(min_match_raw if match_raw is None else match_raw, tags)\n max_match_sub = Substitution(max_match_raw if match_raw is None else match_raw, tags)\n\n substitutions = [find_sub, name_sub, min_match_sub, max_match_sub, tag_match_sub,\n join_sub]\n # Parameterize input if it was supplied\n if 'input' in instruction:\n input_sub = Substitution(instruction['input'], tags)\n substitutions.append(input_sub)\n if not len(input_sub.missing_tags):\n input = input_sub.result\n else:\n input = req.input\n\n missing_tags = Substitution.add_missing(*substitutions)\n if len(missing_tags):\n return MissingTags(req, missing_tags)\n\n try:\n min_match = int(min_match_sub.result)\n max_match = int(max_match_sub.result)\n except ValueError as e:\n return Failed(req, \"Min_match '%s' or max_match '%s' is not an int: %s\" % (\n min_match_sub.result, max_match_sub.result, e))\n\n join = join_sub.result\n\n # Python counts a little differently\n single_match = min_match == max_match or join != None\n max_match = None if max_match == -1 else max_match + 1\n\n # Default to regex as string\n name = name_sub.result if name_sub.result else None\n\n tag_match = tag_match_sub.result\n tags = req.tags\n\n try:\n regex = Regex(find_sub.result, ignore_case, multiline, dot_matches_all,\n replace_unsubbed)\n\n # This lets through max_match = None, which is OK for generator\n if min_match > -1 and max_match > -1:\n subs = regex.substitutions(input, min_match, max_match)\n # Negative values mean we can't utilize the generator, sadly...\n else:\n subs = [s for s in regex.substitutions(input)][min_match:max_match]\n\n # Join subs into a single result.\n if join:\n subs = [join.join(subs)]\n\n greenlets = []\n replaced_subs = []\n # Call children once for each substitution, using it as input\n # and with a modified set of tags.\n for i, s_unsubbed in enumerate(subs):\n\n fork_tags = InheritedDict(tags)\n\n # Ensure we can use tag_match in children\n if tag_match:\n fork_tags[tag_match] = str(i)\n\n # Fail out if unable to replace.\n s_sub = Substitution(s_unsubbed, fork_tags)\n if s_sub.missing_tags:\n return MissingTags(req, s_sub.missing_tags)\n else:\n s_subbed = s_sub.result\n replaced_subs.append(s_subbed)\n\n # actually modify our available tags if it was 1-to-1\n if single_match and name is not None:\n tags[name] = s_subbed\n\n # The tag_match name is chosen in instruction, so it's OK\n # to propagate it -- no pollution risk\n if tag_match:\n tags[tag_match] = str(i)\n\n if name is not None:\n fork_tags[name] = s_subbed\n\n if then:\n child_scraper = Scraper(session=self._session, force_all=self._force_all, pool=self._pool)\n greenlets.append(child_scraper.scrape_async(then,\n id=req.id,\n tags=fork_tags,\n input=s_subbed,\n uri=req.uri))\n else:\n greenlets.append(None)\n except PatternError as e:\n return Failed(req, \"'%s' failed because of %s\" % (instruction['find'], e))\n\n if len(greenlets) == 0:\n return Failed(req, \"No matches for '%s', evaluated to '%s'\" % (\n instruction['find'], find_sub.result))\n\n #gevent.joinall(greenlets)\n\n # Build Results with responses from greenlets, substitute in tags\n results = []\n for i, replaced_sub in enumerate(replaced_subs):\n g = greenlets[i]\n if g == None:\n child_resps = []\n elif self._pool:\n child_resps = g.get()\n else:\n child_resps = g\n results.append(Result(replaced_sub, child_resps))\n\n return DoneFind(req, name, description, results)",
"def search(search_domains=None, format=None):\n # Try to parse out the list of domains\n try:\n qry_domains = map(\n tools.parse_domain,\n search_domains.split(',')\n )\n except:\n app.logger.info('Unable to decode valid domains from path')\n return flask.redirect('/error/1')\n\n if format is None:\n return html_render(qry_domains, search_domains)\n elif format == 'json':\n return json_render(qry_domains)\n elif format == 'csv':\n return csv_render(qry_domains)\n else:\n flask.abort(400, 'Unknown export format')",
"def search(url, depth, handler, html_rendering=False):\n if html_rendering and not QTWEBENGINE:\n print(\"QtWebEngine package not found, do not render\")\n html_rendering = False\n \n searchTask = URL(url)\n if not searchTask.valid:\n print(f\"Invalid url {url}\")\n sys.exit(1)\n\n n = Node(searchTask)\n n.visit(depth, handler, html_rendering=html_rendering)",
"def find(self, selector=\"*\", containing=None, clean=False, first=False,\n _encoding=None):\n\n # Convert a single containing into a list.\n if isinstance(containing, str):\n containing = [containing]\n if not isinstance(selector, str):\n raise TypeError(\"Expected string, got %r\" % type(selector))\n\n encoding = _encoding or self.encoding\n elements = [\n Element(element=found, url=self.url, default_encoding=encoding)\n for found in self.pq(selector)\n ]\n\n if containing:\n elements_copy = list(elements)\n elements = []\n\n for element in elements_copy:\n if any([c.lower() in element.full_text.lower() for c in containing]):\n elements.append(element)\n\n elements.reverse()\n\n # Sanitize the found HTML.\n if clean:\n elements_copy = list(elements)\n elements = []\n\n for element in elements_copy:\n element.raw_html = lxml.html.tostring(cleaner.clean_html(element.lxml))\n elements.append(element)\n\n if first and len(elements) > 0:\n return elements[0]\n else:\n return elements",
"def __call__(self, *paths):\n\n for item in self.site.items:\n if item.is_page() and item.match(*paths):\n yield item",
"def search_content_by_tag_and_type_get(self, currentpage, head, itemsperpage, locale, tag, type):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/Content/SearchContentByTagAndType/{tag}/{type}/{locale}/\"))",
"def search_by_email(self, request, **kwargs):\n self.method_check(request, allowed=['get'])\n self.throttle_check(request)\n\n keyword = request.GET['keyword']\n members = Member.objects.filter(email__icontains=keyword)\n\n bundles = []\n\n for member in members:\n bundle = self.build_bundle(obj=member, request=request)\n bundles.append(self.full_dehydrate(bundle, for_list=True))\n\n return self.create_response(request, bundles)",
"def match_endpoint(self, method, request):\n if \"?\" in request:\n raise InvalidRequest()\n all_endpoints = self.config.endpoints()\n match_str = \"%s %s\" % (method, request)\n matched_endpoints = set()\n # Note: fnmatch.filter seemed to be broken when trying to do exaclty this.\n for endpoint in all_endpoints:\n if fnmatch.fnmatch(match_str, endpoint):\n matched_endpoints.add(endpoint)\n return matched_endpoints",
"def _find_in_xml(self, pattern, element=None, namespace=Xmlns_path):\n el = self._xml if element is None else element\n return el.find('.//' + namespace + pattern)",
"def _find_filter(keyword):\n db = get_service_collection()\n result = db.find({\"name\": {\"$regex\": keyword}})\n service_endpoint = ''\n for item in result:\n service_endpoint = item[\"value\"][\"url\"]\n break\n return service_endpoint",
"def search(self, address='', url=True):\n baseurl = 'https://www.redfin.com/'\n try:\n self.driver.get(baseurl)\n if not address:\n print(f'---- testing {self.driver.current_url}')\n return None\n search_input = self.driver.find_element_by_xpath(\n '//input[@type=\"search\"]')\n search_input.send_keys(address)\n search_btn = self.driver.find_element_by_xpath(\n '//button[@data-rf-test-name=\"searchButton\"]')\n search_btn.click()\n self.driver.find_element_by_xpath(\n '//span[@itemprop=\"streetAddress\"]')\n result = self.driver.current_url\n self.detail_statu = True\n self.log.debug('---- Property page : %s', result)\n if url:\n return result\n except NoSuchElementException as e:\n self.log.info('---- No such element for : \"%s\"', address)\n return None\n except Exception as e:\n self.log.error('---- Search Error : %s', e)\n result = 'None'\n if url:\n return result",
"async def search(cls, url_path: str) -> Dict[str, Any]:\n\n url = urljoin(cls.Meta.root_url, url_path.lstrip('/'))\n\n async with ClientSession() as session:\n async with session.get(url) as response:\n return await cls.from_html(await response.read())",
"def url_for(self, attr=None, filter_value=None,\r\n service_type='network', endpoint_type='publicURL'):\r\n\r\n catalog = self.catalog['access'].get('serviceCatalog', [])\r\n matching_endpoints = []\r\n for service in catalog:\r\n if service['type'] != service_type:\r\n continue\r\n\r\n endpoints = service['endpoints']\r\n for endpoint in endpoints:\r\n if not filter_value or endpoint.get(attr) == filter_value:\r\n matching_endpoints.append(endpoint)\r\n\r\n if not matching_endpoints:\r\n raise exceptions.EndpointNotFound()\r\n elif len(matching_endpoints) > 1:\r\n raise exceptions.AmbiguousEndpoints(\r\n matching_endpoints=matching_endpoints)\r\n else:\r\n if endpoint_type not in matching_endpoints[0]:\r\n raise exceptions.EndpointTypeNotFound(type_=endpoint_type)\r\n\r\n return matching_endpoints[0][endpoint_type]",
"def find_end_point(source_website):\n\n try:\n # get the source website\n r = requests.get(source_website)\n except MissingSchema:\n raise MissingSchema(\"Source website was malformed; could not complete request: {0}\".format(source_website))\n\n # find tags with the rel=\"webmention\", indicating that it links to a webmention endpoint\n search_result = re.search('rel\\ *=\\ *\\\"*webmention\\\"*.*href\\ *=\\\"\\ *(.*)\\\"', r.content)\n if search_result:\n # if there is a result to the regular expression...\n # pick up the captured regular expression group which corresponds to the href url.\n search_result = search_result.group(1)\n if type(search_result) == list:\n # if there were multiple webmention tags, pick the first for the url\n return search_result[0]\n else:\n # return the webmention url\n return search_result\n else:\n # if we couldn't find any rel=\"webmention\" tags, there's not endpoint; return None\n return None",
"def search(self, pattern):\n raise NotImplementedError()",
"def search(query):\n\tprint('-> \tSeraching -> {}'.format(query))\n\told_html = driver.find_element_by_tag_name('html').text\n\ts = driver.find_element_by_name('q')\n\ts.send_keys(query)\n\ts.send_keys(Keys.ENTER) \n\treturn wait_for(old_html)",
"def contains_sale_pattern(content):\n regs = ['domain.*?\\\\.com for sale', '\\\\.com is for sale', 'buy\\s.*?this domain' 'domain name.*?for sale', 'domain sale',\n \"Below are sponsored listings for goods and services related to: <span class=\\\"htext\\\">\\S*</span>\",\n \"HugeDomains.com(.*?)is for sale\", \"domain is for sale\", \"the domain \\S+ is for sale. To purchase, call Afternic.com\",\n \"<a\\s?rel=\\\"nofollow\\\"\\s?href=\\\"https:\\/\\/www\\.hover\\.com\\/\\?source=parked\\\">\",\n \"<meta name=\\\"description\\\" content=\\\"Domain registriert bei united-domains\\.de\\\">\",\n \"<meta name=\\\"description\\\" content=\\\"\\S+ is a brandable business domain name for sale!\\\">\",\n \"<h2 class=\\\"txt-no-case\\\"><span id=\\\"txt-main-domain-name\\\" style=\\\"\\\">\\S+<\\/span><strong class=\\\"text-for-sale\\\">is for sale!<\\/strong><\\/h2>\"]\n for reg in regs:\n p = re.compile(reg, re.IGNORECASE | re.DOTALL)\n if p.search(content):\n debug(reg)\n return hit(reg)\n\n texts = [\"domain has recently been listed in the marketplace\", \"domain may be for sale\", \"Domain Brokerage providing Global Naming Assets Solutions\"]\n for text in texts:\n if text in content:\n debug(text)\n return hit(text)\n\n return False",
"def getContentList(self, content, index=-1):\n try:\n if index == -1: # this is a return for a single instance site\n repattern = re.compile(self.RegEx, re.IGNORECASE)\n foundlist = re.findall(repattern, content)\n return foundlist\n else: # this is the return for a multisite\n repattern = re.compile(self.RegEx[index], re.IGNORECASE)\n foundlist = re.findall(repattern, content)\n return foundlist\n except:\n self.postErrorMessage(self.ErrorMessage + \" \" + self.FullURL)\n return None",
"def find_page_part(page_list, regex, before, after):\n for x in page_list:\n if is_regex_in_string(regex, x):\n return between(before, after, x)",
"def search(self, url, size, contenthash, aggregate=False):\n raise Exception('Empty stub ContainerManager.search called!')",
"def search_by_contains(self, tl):\n print(\"Search by string\")\n string = input(\"Please enter search string: \")\n return tl.findall_contains(string)",
"def crawl_and_scan(start_url, options=[], same_domain=True):\n vulns = {}\n start_url = url_prepare(start_url)\n sitemap = crawler.spider(start_url, same_domain=same_domain)\n if len(sitemap) > 0:\n print(str(len(sitemap)) + ' urls detected')\n for url in sitemap:\n # progress logging\n print(str(list(sitemap).index(url)) + '/' + str(len(sitemap)) + ' ' + url)\n vulns[url] = scan(url, options)\n return vulns\n return False",
"def search(wiki, pattern):\n wiki.search_tags(pattern)",
"def search(self, url_or_path='', **kwargs):\n url_or_path = '{}/@search'.format(url_or_path.strip('/'))\n return self.request.get(url_or_path, params=kwargs).json()",
"def find_any(self, endpoint, user=None):\n private_emissaries = self.find_private(endpoint, user)\n if private_emissaries:\n return private_emissaries\n else:\n return self.find_public(endpoint)",
"def search_page(request):\n if request.method == \"GET\":\n page = request.GET.get('q')\n entries = util.list_entries()\n entries_set=set(entries)\n\n if page in entries_set:\n return render(request, \"encyclopedia/visit_entry.html\",{\n \"entry\": util.get_entry(page),\n \"title\": page\n })\n \n else:\n results = list(filter(lambda x: page in x, entries))\n return render(request, \"encyclopedia/search_page.html\",{\n \"results\": results\n })",
"def search(subdomain):\n\tsub = searchByDomain(subdomain)\n\tif sub :\n\t\tresults = sub\n\t\tfor row in sub:\n\t\t\tlogger.log('INFO',row.subdomain)\n\t\tlogger.log(\"STATS\", f'We have found {len(sub)} subdomains ! Happy Hacking $_$')\n\telse:\n\t\tlogger.log('WARNING',f'[-] {subdomain} NOT FOUND.')"
]
| [
"0.568334",
"0.54719615",
"0.54551363",
"0.54303116",
"0.52735686",
"0.50578773",
"0.50293565",
"0.49629194",
"0.49056768",
"0.48959708",
"0.4888849",
"0.48617947",
"0.4861624",
"0.48373106",
"0.4803633",
"0.47973126",
"0.47466186",
"0.46863022",
"0.46846643",
"0.46615353",
"0.46358696",
"0.461446",
"0.46111703",
"0.45939308",
"0.4580046",
"0.45735875",
"0.45700946",
"0.4554023",
"0.4551074",
"0.45257536"
]
| 0.6853434 | 0 |
Find the micropub for the given domain. Only scan html element matching all criteria in look_in. optionally the content to be scanned can be given as an argument. | def discoverMicropubEndpoints(domain, content=None, look_in={'name': 'link'}, test_urls=True, validateCerts=True):
return discoverEndpoint(domain, ('micropub',), content, look_in, test_urls, validateCerts) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def search(wiki, pattern):\n wiki.search_tags(pattern)",
"def search(self, phrase, content=False, username=None, ct=10, page=0):\r\n page = int(page) + 1\r\n\r\n with WIX.searcher() as search:\r\n fields = ['description', 'extended', 'tags']\r\n\r\n if content:\r\n fields.append('readable')\r\n\r\n parser = qparser.MultifieldParser(fields,\r\n schema=WIX.schema,\r\n group=qparser.OrGroup)\r\n qry = parser.parse(phrase)\r\n\r\n try:\r\n res = search.search_page(qry, page, pagelen=int(ct))\r\n except ValueError, exc:\r\n raise(exc)\r\n\r\n if res:\r\n qry = Bmark.query.filter(\r\n Bmark.bid.in_([r['bid'] for r in res])\r\n )\r\n\r\n if username:\r\n qry = qry.filter(Bmark.username == username)\r\n\r\n qry = qry.options(joinedload('hashed'))\r\n\r\n return qry.all()\r\n else:\r\n return []",
"def getContentList(self, webcontent):\n try:\n repattern = re.compile(self.RegEx, re.IGNORECASE)\n foundlist = re.findall(repattern, webcontent)\n return foundlist\n except:\n self.postErrorMessage(self.ErrorMessage + \" \" + self.FullURL)\n return None",
"def find(self, selector=\"*\", containing=None, clean=False, first=False,\n _encoding=None):\n\n # Convert a single containing into a list.\n if isinstance(containing, str):\n containing = [containing]\n if not isinstance(selector, str):\n raise TypeError(\"Expected string, got %r\" % type(selector))\n\n encoding = _encoding or self.encoding\n elements = [\n Element(element=found, url=self.url, default_encoding=encoding)\n for found in self.pq(selector)\n ]\n\n if containing:\n elements_copy = list(elements)\n elements = []\n\n for element in elements_copy:\n if any([c.lower() in element.full_text.lower() for c in containing]):\n elements.append(element)\n\n elements.reverse()\n\n # Sanitize the found HTML.\n if clean:\n elements_copy = list(elements)\n elements = []\n\n for element in elements_copy:\n element.raw_html = lxml.html.tostring(cleaner.clean_html(element.lxml))\n elements.append(element)\n\n if first and len(elements) > 0:\n return elements[0]\n else:\n return elements",
"def LookIn(play, item):\r\n\tspk(\"You look in %s and find\" % item.name)\r\n\tif item.items != []:\r\n\t\tlookiner(item)\r\n\telse:\r\n\t\tspk(\"Nothing\")",
"def search(url, domain_list):\n resp = requests.get(url)\n if not resp.json().get('hits', '').get('hits', []):\n return\n for hit in resp.json()[\"hits\"][\"hits\"]:\n domain = hit.get(\"_source\", {}).get(\"domain\", \"\")\n if not domain:\n continue\n if not domain in domain_list:\n domain_list.append(domain)\n #print(hit[\"_source\"].get(\"title\", \"\").encode(\"ascii\",\"ignore\"))\n if domain not in ALLOWED_DOMAINS:\n print(domain)",
"def getContentList(self, content, index=-1):\n try:\n if index == -1: # this is a return for a single instance site\n repattern = re.compile(self.RegEx, re.IGNORECASE)\n foundlist = re.findall(repattern, content)\n return foundlist\n else: # this is the return for a multisite\n repattern = re.compile(self.RegEx[index], re.IGNORECASE)\n foundlist = re.findall(repattern, content)\n return foundlist\n except:\n self.postErrorMessage(self.ErrorMessage + \" \" + self.FullURL)\n return None",
"def wikigg(self, irc, msg, args, wiki_subdomain, searchquery):\n baseurl = f'https://{wiki_subdomain}.wiki.gg/api.php'\n self._wiki(irc, baseurl, searchquery)",
"def search_for_meme(self, search):\n cursor = self.conn.cursor()\n cursor.execute(f\"select * from memes where lower(meme_name) like ?\", (f'%{search}%', ))\n results = cursor.fetchall()\n cursor.close()\n return results",
"def _scan_pwm_by_strand(self, pwm, strand):\n if strand == '+':\n sequences = self.sequences\n elif strand == '-':\n sequences = self.sequences_rc\n else:\n raise ValueError(f\"invalid strand option: {strand!r}\")\n score_cutoff = pwm.cutoffs[self.p_value]\n sliding_scores = sliding_motif_score(\n pwm.matrix.tolist(), pwm.length, pwm.max_raw_score, sequences)\n sites = pick_motif_sites(\n sliding_scores, score_cutoff, self.seq_starts, strand=strand)\n if self.remove_dup:\n sites = deduplicate_motif_sites(sites, pwm.length)\n return sites",
"def re_findall(text, website_source):\n pattern = re.compile(text)\n my_page = website_source\n my_match = pattern.findall(my_page, re.S)\n return my_match",
"def search_by_pattern(self, tl):\n print(\"Search by regex pattern\")\n pattern = input(\"Please enter search pattern: \")\n return tl.findall_pattern(pattern)",
"def search(request):\n if 'q' in request.GET:\n term = request.GET['q']\n story_list = Story.objects.filter(Q(title__contains=term)|Q(markdown_content__contains=term))\n heading = \"Search results\"\n return render_to_response(\"cms/story_list.html\",locals())",
"def test_searchWildcard(self):\n self.assertFalse(\n self.server.search_UID([b'2:3'], self.seq, self.msg, (1, 1234)))\n # 2:* should get translated to 2:<max UID> and then to 1:2\n self.assertTrue(\n self.server.search_UID([b'2:*'], self.seq, self.msg, (1, 1234)))\n self.assertTrue(\n self.server.search_UID([b'*'], self.seq, self.msg, (1, 1234)))",
"def perform_search(self):\n\n self.implicitly_wait(5)\n html_element = self.find_element_by_xpath(\n '/html/body').get_attribute('outerHTML')\n soup = Scraper(html_element)\n target = soup.find_search_field()\n\n for elem in target:\n for attr, value in elem.items():\n placeholder = self.find_elements_by_css_selector(\n f'input[{attr}=\"{value}\"]'\n )\n for element in placeholder:\n try:\n element.send_keys(self.keywords)\n element.send_keys(Keys.RETURN)\n print(colored(':: Placeholder fullfilled ::', 'green'))\n return\n except:\n print(\n colored('Can\\'t type inside the search input', 'yellow'))",
"def search_by_contains(self, tl):\n print(\"Search by string\")\n string = input(\"Please enter search string: \")\n return tl.findall_contains(string)",
"async def wikipedia(self, ctx, *args):\n if args[0] == 'random':\n search_string = wp.random()\n else:\n search_string = ' '.join(args)\n try:\n page = wp.page(search_string)\n await ctx.send(page.url)\n self.logger.info(misolog.format_log(ctx, f\"\"))\n except wp.exceptions.DisambiguationError as error:\n await ctx.send(f\"```{str(error)}```\")\n self.logger.info(misolog.format_log(ctx, f\"Disambiguation page\"))",
"def on_page_content(self, html, page, **kwargs):\n\n # Search for {% msx \"title.rom\" %}\n\n # markdown = re.sub(r\"\\{\\%(\\s)*msx(.*?)\\%\\}\",\n # webmsx_snippet(),\n # markdown,\n # flags=re.IGNORECASE)\n\n try:\n game_data = page.meta.get(\"MSX\") or page.meta.get(\"msx\")\n if game_data:\n game = \"\"\n machine = \"MSX1\"\n if \"game\" in game_data:\n game = game_data[\"game\"]\n game = game.replace(chr(34), \"\")\n game = game.replace(chr(39), \"\")\n self.config[\"game\"] = game\n if \"machine\" in game_data:\n machine = game_data[\"machine\"].upper()\n machine = machine.replace(chr(34), \"\")\n machine = machine.replace(chr(39), \"\")\n self.config[\"machine\"] = machine\n \n # breakpoint()\n html = webmsx_snippet(self.config) + html\n\n except KeyError:\n pass\n\n return html",
"def getContentList(self, webcontent, index):\n try:\n repattern = re.compile(self.RegEx[index], re.IGNORECASE)\n foundlist = re.findall(repattern, webcontent)\n return foundlist\n except:\n self.postErrorMessage(self.ErrorMessage + \" \" + self.FullURL)\n return None",
"def get_cuisine_search_pages(cuisine, page):\n link = SEARCH_URL.format(page, cuisine)\n cuisine_recipe_links = get_content_from_dynamic_url(link)\n if not cuisine_recipe_links:\n print \"no content for:\", link\n return None\n soup_search = BeautifulSoup(cuisine_recipe_links)\n return soup_search.find_all(\"h2\", {\"class\": \"node-title\"})",
"def search(subdomain):\n\tsub = searchByDomain(subdomain)\n\tif sub :\n\t\tresults = sub\n\t\tfor row in sub:\n\t\t\tlogger.log('INFO',row.subdomain)\n\t\tlogger.log(\"STATS\", f'We have found {len(sub)} subdomains ! Happy Hacking $_$')\n\telse:\n\t\tlogger.log('WARNING',f'[-] {subdomain} NOT FOUND.')",
"def search(self, pattern):\n raise NotImplementedError()",
"async def wiki(self, ctx, *, parse: str):\n parse = parse.split(' ', 1)\n anti = [\"antibirth\", \"anti\"]\n rev = [\"revelations\", \"rev\"]\n subdomain = \"antibirth\" if parse[0] in anti else \"tboirevelations\" if parse[0] in rev \\\n else \"bindingofisaacrebirth\"\n parse = ' '.join(parse) if subdomain == \"bindingofisaacrebirth\" else parse[1]\n page = requests.get(f\"https://{subdomain}.gamepedia.com/index.php?search={parse}\")\n if \"search\" in page.url:\n soup = BeautifulSoup(page.content, 'html.parser')\n if re.sub(r'\\W+', '', parse.lower()) == \\\n re.sub(r'\\W+', '', soup.find(class_=\"unified-search__result__title\").get(\"data-title\").lower()):\n await ctx.send(soup.find(class_=\"unified-search__result__title\").get(\"href\"))\n else:\n await ctx.send(f\"I couldn't find an exact match. Here is a link to this query's search page. {page.url}\")\n else: await ctx.send(page.url)",
"def __call__(self, *paths):\n\n for item in self.site.items:\n if item.is_page() and item.match(*paths):\n yield item",
"def post(self, request, custom_url: str) -> render:\n\n c_user = request.user\n\n context = self.get_menu_context('music', 'Музыка')\n context['matching'] = True\n if not request.POST.get('query'):\n context['matching'] = False\n context['music_list'] = c_user.profile.get_music_list()\n return render(request, 'music/search.html', context)\n query = request.POST.get('query')\n search_fields = ['title', 'artist']\n context['c_matches'] = Music.objects.filter(search_filter(search_fields, query))\n\n return render(request, 'music/search.html', context)",
"def run_search(term, imdb_page, debug = False):\n\n # confirm function call\n if debug:\n print(\"run_search()\")\n\n # scrub search term for imdb\n formatted_term = \"+\".join(term.split())\n\n # add page information to search term\n if imdb_page > 0:\n page_specifier = f\"&start={ (imdb_page * 50) + 1 }\"\n else:\n page_specifier = \"\"\n\n # get BeautifulSoup data for search term\n search_string = \"https://www.imdb.com/search/title?title=\" + formatted_term + \"&title_type=tv_series\" + page_specifier\n if debug:\n print(f\"search_string: {search_string}\")\n search_soup = bs4.BeautifulSoup(requests.get(search_string).text, features=\"html.parser\")\n\n #get max page\n if imdb_page < 1:\n\n # identify element that states range and number of results\n desc = search_soup.select(\".desc\")[0]\n span = desc.select(\"span\")[0].contents[0][0:-8]\n\n # get number of results\n if span[:8] == \"1-50 of \":\n span = span[8:]\n try:\n result_num = float(span)\n except:\n result_num = 0\n\n # calculate max_pages\n max_pages = int(ceil(result_num / 5))\n if debug:\n print(result_num)\n print(max_pages)\n\n else:\n max_pages = None;\n\n # get valid pages for no_results\n low = imdb_page * 10;\n high = low + 9\n page_range = [low, high]\n\n # cultivate return list\n links = search_soup.select(\"h3 > a\")\n\n if debug:\n print(links)\n\n search_results = []\n\n print(len(links))\n\n for i in range(len(links)):\n if debug:\n print(f\"result: {i}\")\n\n try:\n show_div = links[i]\n except:\n break\n s = (show_div.contents[0], show_div.get(\"href\"))\n search_results.append(s)\n\n if debug:\n print(f\"search results length: {len(search_results)}\")\n\n return {\"results\": search_results, \"max\": max_pages, \"range\": page_range}",
"def assert_in_html(member, container, **kwargs):\n member = markupsafe.escape(member)\n return assert_in(member, container, **kwargs)",
"def fandom(self, irc, msg, args, wiki_subdomain, searchquery):\n baseurl = f'https://{wiki_subdomain}.fandom.com/api.php'\n self._wiki(irc, baseurl, searchquery)",
"def just_in():\n soup = abcradionational.get_soup(URL + \"/podcasts\")\n \n playable_podcast = abcradionational.get_playable_podcast(soup)\n \n items = abcradionational.compile_playable_podcast(playable_podcast)\n\n\n return items",
"def execute_search(query, pagelen=1, type_=\"magnet\"):\n tasks = []\n loop = asyncio.get_event_loop()\n\n for page in range(0, pagelen):\n future = asyncio.ensure_future(\n search_magnets(query, page + 1, type_))\n tasks.append(future)\n\n loop.run_until_complete(asyncio.wait(tasks))\n\n for task in tasks:\n for magnet in task.result():\n yield magnet"
]
| [
"0.5157872",
"0.48303917",
"0.47415784",
"0.47138998",
"0.4580432",
"0.45471004",
"0.45214573",
"0.45160756",
"0.44645557",
"0.44120982",
"0.44019017",
"0.43961054",
"0.4391695",
"0.43534493",
"0.43461645",
"0.4341786",
"0.43401173",
"0.4313143",
"0.42990834",
"0.42850435",
"0.4255603",
"0.42438695",
"0.422952",
"0.42249143",
"0.42023265",
"0.41995174",
"0.41989183",
"0.41868815",
"0.4172027",
"0.41670227"
]
| 0.6394313 | 0 |
Find the token for the given domain. Only scan html element matching all criteria in look_in. optionally the content to be scanned can be given as an argument. | def discoverTokenEndpoints(domain, content=None, look_in={'name': 'link'}, test_urls=True, validateCerts=True):
return discoverEndpoint(domain, ('token_endpoint',), content, look_in, test_urls, validateCerts) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def search(wiki, pattern):\n wiki.search_tags(pattern)",
"def search(url, domain_list):\n resp = requests.get(url)\n if not resp.json().get('hits', '').get('hits', []):\n return\n for hit in resp.json()[\"hits\"][\"hits\"]:\n domain = hit.get(\"_source\", {}).get(\"domain\", \"\")\n if not domain:\n continue\n if not domain in domain_list:\n domain_list.append(domain)\n #print(hit[\"_source\"].get(\"title\", \"\").encode(\"ascii\",\"ignore\"))\n if domain not in ALLOWED_DOMAINS:\n print(domain)",
"def get_token(html, pattern):\n result = pattern.search(html)\n if result:\n return result.group(1)\n else:\n error('Failed to find token')\n return None",
"def _scrape_find(self, req, instruction, description, then):\n if 'find' not in instruction:\n raise InvalidInstructionError(\"Missing regex\")\n\n tags = req.tags\n find_sub = Substitution(instruction['find'], tags)\n replace_unsubbed = instruction.get('replace', '$0')\n name_sub = Substitution(instruction.get('name'), tags)\n tag_match_sub = Substitution(instruction.get('tag_match'), tags)\n join_sub = Substitution(instruction.get('join', None), tags)\n ignore_case = instruction.get('case_insensitive', False)\n multiline = instruction.get('multiline', False)\n dot_matches_all = instruction.get('dot_matches_all', True)\n\n # Default to full range\n min_match_raw = instruction.get('min_match', 0)\n max_match_raw = instruction.get('max_match', -1)\n match_raw = instruction.get('match', None)\n\n # Use single match if it was defined\n min_match_sub = Substitution(min_match_raw if match_raw is None else match_raw, tags)\n max_match_sub = Substitution(max_match_raw if match_raw is None else match_raw, tags)\n\n substitutions = [find_sub, name_sub, min_match_sub, max_match_sub, tag_match_sub,\n join_sub]\n # Parameterize input if it was supplied\n if 'input' in instruction:\n input_sub = Substitution(instruction['input'], tags)\n substitutions.append(input_sub)\n if not len(input_sub.missing_tags):\n input = input_sub.result\n else:\n input = req.input\n\n missing_tags = Substitution.add_missing(*substitutions)\n if len(missing_tags):\n return MissingTags(req, missing_tags)\n\n try:\n min_match = int(min_match_sub.result)\n max_match = int(max_match_sub.result)\n except ValueError as e:\n return Failed(req, \"Min_match '%s' or max_match '%s' is not an int: %s\" % (\n min_match_sub.result, max_match_sub.result, e))\n\n join = join_sub.result\n\n # Python counts a little differently\n single_match = min_match == max_match or join != None\n max_match = None if max_match == -1 else max_match + 1\n\n # Default to regex as string\n name = name_sub.result if name_sub.result else None\n\n tag_match = tag_match_sub.result\n tags = req.tags\n\n try:\n regex = Regex(find_sub.result, ignore_case, multiline, dot_matches_all,\n replace_unsubbed)\n\n # This lets through max_match = None, which is OK for generator\n if min_match > -1 and max_match > -1:\n subs = regex.substitutions(input, min_match, max_match)\n # Negative values mean we can't utilize the generator, sadly...\n else:\n subs = [s for s in regex.substitutions(input)][min_match:max_match]\n\n # Join subs into a single result.\n if join:\n subs = [join.join(subs)]\n\n greenlets = []\n replaced_subs = []\n # Call children once for each substitution, using it as input\n # and with a modified set of tags.\n for i, s_unsubbed in enumerate(subs):\n\n fork_tags = InheritedDict(tags)\n\n # Ensure we can use tag_match in children\n if tag_match:\n fork_tags[tag_match] = str(i)\n\n # Fail out if unable to replace.\n s_sub = Substitution(s_unsubbed, fork_tags)\n if s_sub.missing_tags:\n return MissingTags(req, s_sub.missing_tags)\n else:\n s_subbed = s_sub.result\n replaced_subs.append(s_subbed)\n\n # actually modify our available tags if it was 1-to-1\n if single_match and name is not None:\n tags[name] = s_subbed\n\n # The tag_match name is chosen in instruction, so it's OK\n # to propagate it -- no pollution risk\n if tag_match:\n tags[tag_match] = str(i)\n\n if name is not None:\n fork_tags[name] = s_subbed\n\n if then:\n child_scraper = Scraper(session=self._session, force_all=self._force_all, pool=self._pool)\n greenlets.append(child_scraper.scrape_async(then,\n id=req.id,\n tags=fork_tags,\n input=s_subbed,\n uri=req.uri))\n else:\n greenlets.append(None)\n except PatternError as e:\n return Failed(req, \"'%s' failed because of %s\" % (instruction['find'], e))\n\n if len(greenlets) == 0:\n return Failed(req, \"No matches for '%s', evaluated to '%s'\" % (\n instruction['find'], find_sub.result))\n\n #gevent.joinall(greenlets)\n\n # Build Results with responses from greenlets, substitute in tags\n results = []\n for i, replaced_sub in enumerate(replaced_subs):\n g = greenlets[i]\n if g == None:\n child_resps = []\n elif self._pool:\n child_resps = g.get()\n else:\n child_resps = g\n results.append(Result(replaced_sub, child_resps))\n\n return DoneFind(req, name, description, results)",
"def search_by_contains(self, tl):\n print(\"Search by string\")\n string = input(\"Please enter search string: \")\n return tl.findall_contains(string)",
"def getContentList(self, webcontent):\n try:\n repattern = re.compile(self.RegEx, re.IGNORECASE)\n foundlist = re.findall(repattern, webcontent)\n return foundlist\n except:\n self.postErrorMessage(self.ErrorMessage + \" \" + self.FullURL)\n return None",
"def search_by_pattern(self, tl):\n print(\"Search by regex pattern\")\n pattern = input(\"Please enter search pattern: \")\n return tl.findall_pattern(pattern)",
"def find_auth_token(document_html):\n search_result = re.search(AUTH_TOKEN_REGEX, document_html)\n if search_result:\n return search_result.group('auth_token')",
"def _find_reg(self, reg_str, content):\n reg_find = re.findall(reg_str, content)\n assert reg_find is not None, \"ERROR: Could not extract any content, check regex string\"\n return reg_find",
"def contains_sale_pattern(content):\n regs = ['domain.*?\\\\.com for sale', '\\\\.com is for sale', 'buy\\s.*?this domain' 'domain name.*?for sale', 'domain sale',\n \"Below are sponsored listings for goods and services related to: <span class=\\\"htext\\\">\\S*</span>\",\n \"HugeDomains.com(.*?)is for sale\", \"domain is for sale\", \"the domain \\S+ is for sale. To purchase, call Afternic.com\",\n \"<a\\s?rel=\\\"nofollow\\\"\\s?href=\\\"https:\\/\\/www\\.hover\\.com\\/\\?source=parked\\\">\",\n \"<meta name=\\\"description\\\" content=\\\"Domain registriert bei united-domains\\.de\\\">\",\n \"<meta name=\\\"description\\\" content=\\\"\\S+ is a brandable business domain name for sale!\\\">\",\n \"<h2 class=\\\"txt-no-case\\\"><span id=\\\"txt-main-domain-name\\\" style=\\\"\\\">\\S+<\\/span><strong class=\\\"text-for-sale\\\">is for sale!<\\/strong><\\/h2>\"]\n for reg in regs:\n p = re.compile(reg, re.IGNORECASE | re.DOTALL)\n if p.search(content):\n debug(reg)\n return hit(reg)\n\n texts = [\"domain has recently been listed in the marketplace\", \"domain may be for sale\", \"Domain Brokerage providing Global Naming Assets Solutions\"]\n for text in texts:\n if text in content:\n debug(text)\n return hit(text)\n\n return False",
"def find(self, selector=\"*\", containing=None, clean=False, first=False,\n _encoding=None):\n\n # Convert a single containing into a list.\n if isinstance(containing, str):\n containing = [containing]\n if not isinstance(selector, str):\n raise TypeError(\"Expected string, got %r\" % type(selector))\n\n encoding = _encoding or self.encoding\n elements = [\n Element(element=found, url=self.url, default_encoding=encoding)\n for found in self.pq(selector)\n ]\n\n if containing:\n elements_copy = list(elements)\n elements = []\n\n for element in elements_copy:\n if any([c.lower() in element.full_text.lower() for c in containing]):\n elements.append(element)\n\n elements.reverse()\n\n # Sanitize the found HTML.\n if clean:\n elements_copy = list(elements)\n elements = []\n\n for element in elements_copy:\n element.raw_html = lxml.html.tostring(cleaner.clean_html(element.lxml))\n elements.append(element)\n\n if first and len(elements) > 0:\n return elements[0]\n else:\n return elements",
"def __call__(self, doc):\n matches = self.matcher(doc)\n spans = [] # keep spans here to merge them later\n for match_id, start, end in matches:\n span = doc[start : end]\n for token in span:\n token._.set(self._is_url, True)\n token.lemma_ = get_domain(token.text)\n spans.append(span)\n\n return doc",
"def getContentList(self, content, index=-1):\n try:\n if index == -1: # this is a return for a single instance site\n repattern = re.compile(self.RegEx, re.IGNORECASE)\n foundlist = re.findall(repattern, content)\n return foundlist\n else: # this is the return for a multisite\n repattern = re.compile(self.RegEx[index], re.IGNORECASE)\n foundlist = re.findall(repattern, content)\n return foundlist\n except:\n self.postErrorMessage(self.ErrorMessage + \" \" + self.FullURL)\n return None",
"def __call__(self, doc):\n matches = self.matcher(doc)\n spans = [] # keep spans here to merge them later\n for match_id, start, end in matches:\n span = doc[start : end]\n for token in span:\n token._.set(self._is_email_addr, True)\n spans.append(span)\n\n return doc",
"def re_findall(text, website_source):\n pattern = re.compile(text)\n my_page = website_source\n my_match = pattern.findall(my_page, re.S)\n return my_match",
"def find_page_part(page_list, regex, before, after):\n for x in page_list:\n if is_regex_in_string(regex, x):\n return between(before, after, x)",
"def search(self, word):",
"def scan(self):\n\t\tif self.links:\n\t\t\tlogging.info(\"Scanning for CSRF Protection...\")\n\t\t\tfor link in self.links:\n\t\t\t\t#only focus on those that have input parameters\n\t\t\t\tinputs = link.getInputs()\n\t\t\t\turl = link.getUrl().strip()\n\t\t\t\tif inputs:\n\t\t\t\t\tcontent = \"\"\n\t\t\t\t\tfor input_tag in inputs:\n\t\t\t\t\t\tname = input_tag.get(\"name\")\n\t\t\t\t\t\tif name:\n\t\t\t\t\t\t\tcontent+=name\n\n\t\t\t\t\tif content:\n\t\t\t\t\t\tcontent = content.lower().strip()\n\t\t\t\t\t\tself.has_csrf_token(content,url)\n\n\t\t\t\t#get based url?\n\t\t\t\tif \"?\" in url:\n\t\t\t\t\turl = url.lower()\n\t\t\t\t\tself.has_csrf_token(url,url,False)",
"def _find_in_xml(self, pattern, element=None, namespace=Xmlns_path):\n el = self._xml if element is None else element\n return el.find('.//' + namespace + pattern)",
"def search(self, pattern):\n raise NotImplementedError()",
"def search(self, query: str):\n from googlesearch import search\n from urllib.error import HTTPError\n search_successful = False\n result = None\n\n # top level domains for the google search\n tld_array = [\"com\", \"co.in\", \"co.za\", \"co.uk\", \"co.de\", \"co.id\"]\n # the index of the top level domains to start off with\n tld_index = 0\n\n # if getting too many requests, change tld to co.in and com, co.za\n while not search_successful:\n try:\n urls = search(query, tld=tld_array[tld_index], num=1, stop=1, pause=2,\n # domains=[\"\"],\n user_agent=\"GoogleSearchBotThing/1.0\")\n for url in urls:\n result = url\n\n search_successful = True\n except HTTPError as error:\n tld_index = (tld_index + 1) % len(tld_array)\n printer = General(self.verbosity)\n printer.pprint(8, \"Too many requests from TLD. Switching to\", tld_array[tld_index], error)\n search_successful = False\n pass\n return result",
"def getContentList(self, webcontent, index):\n try:\n repattern = re.compile(self.RegEx[index], re.IGNORECASE)\n foundlist = re.findall(repattern, webcontent)\n return foundlist\n except:\n self.postErrorMessage(self.ErrorMessage + \" \" + self.FullURL)\n return None",
"def domain_search(self, terms, page=1, exclude='', max_length=25, min_length=1, has_hyphen='true',\n has_number='true', active_only='false', deleted_only='false', anchor_left='false',\n anchor_right='false'):\n params = {'query':terms,\n 'page':page,\n 'exclude_query':exclude,\n 'max_length':max_length,\n 'min_legnth':min_length,\n 'has_hyphen':has_hyphen,\n 'has_number':has_number,\n 'active_only':active_only,\n 'deleted_only':deleted_only,\n 'anchor_left':anchor_left,\n 'anchor_right':anchor_right}\n return self.apiquery('/v2/domain-search/', params=params)",
"def search_content_with_text_get(self, ctype, currentpage, head, locale, searchtext, source, tag):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/Content/Search/{locale}/\"))",
"def ask_for_token(domain):\n found = False\n\n # Keep asking until a valid authentication token has been entered by the user\n while not found:\n token = input(u\"\\nEnter authentication token (see 'Setup' section on https://github.com/perslev/CanvasSync for details):\\n$ \")\n found = helpers.validate_token(domain, token)\n\n return token",
"def findall(self, regexp):\n\n if \"_token_searcher\" not in self.__dict__:\n self._token_searcher = TokenSearcher(self)\n\n hits = self._token_searcher.findall(regexp)\n hits = [\" \".join(h) for h in hits]\n print(tokenwrap(hits, \"; \"))",
"def search(self, word):\n return self.helper(word, self.root)",
"def search(self, word):\n return self.helper(word, self.root)",
"def search(self, term):",
"def search_content_by_tag_and_type_get(self, currentpage, head, itemsperpage, locale, tag, type):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/Content/SearchContentByTagAndType/{tag}/{type}/{locale}/\"))"
]
| [
"0.521796",
"0.50288016",
"0.49580178",
"0.4880799",
"0.47114867",
"0.4708841",
"0.46950248",
"0.46874154",
"0.46816665",
"0.4663791",
"0.46451175",
"0.46395028",
"0.46025386",
"0.4575456",
"0.45328408",
"0.45127285",
"0.44772387",
"0.44075775",
"0.43700045",
"0.43473896",
"0.43321565",
"0.4328082",
"0.43270886",
"0.43113545",
"0.42981964",
"0.42912272",
"0.42795706",
"0.42795706",
"0.42329752",
"0.42227626"
]
| 0.5688619 | 0 |
Create and enqueue future enqueue (args, resolve) > source_id resolve (source, resolve_args) > None | def source_create (self, resolve, cancel, enqueue, args = None):
future, source = FutureSourcePair ()
def resolve_internal (*resolve_args):
self.sources.discard (source)
resolve (source, *resolve_args)
return False # remove from event loop
if cancel:
def cancel_cont (result, error):
GLib.source_remove (source_id)
self.sources.discard (source)
source.TrySetCanceled ()
cancel.Await ().OnCompleted (cancel_cont)
source_id = enqueue (*(args + (resolve_internal,))) if args else enqueue (resolve_internal)
self.sources.add (source)
return future | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def async(self, *args, **kwargs):\n uid = str(uuid.uuid4())\n message = {\n 'uid': uid,\n 'name': self.name,\n 'args': self.serializer.serialize(args),\n 'kwargs': self.serializer.serialize(kwargs),\n }\n Model = get_queue_model(self.queue_name)\n Model.objects.enqueue(message)\n return FutureResult(uid, self)",
"def enqueue(self, f, *args, **kwargs):\n if not isinstance(f, string_types) and f.__module__ == '__main__':\n raise ValueError('Functions from the __main__ module cannot be processed '\n 'by workers')\n\n # Detect explicit invocations, i.e. of the form:\n # q.enqueue(foo, args=(1, 2), kwargs={'a': 1}, timeout=30)\n timeout = kwargs.pop('timeout', None)\n description = kwargs.pop('description', None)\n result_ttl = kwargs.pop('result_ttl', None)\n ttl = kwargs.pop('ttl', None)\n depends_on = kwargs.pop('depends_on', None)\n at_front = kwargs.pop('at_front', False)\n meta = kwargs.pop('meta', None)\n\n if 'args' in kwargs or 'kwargs' in kwargs:\n assert args == (), 'Extra positional arguments cannot be used when using explicit args and kwargs' # noqa\n args = kwargs.pop('args', None)\n kwargs = kwargs.pop('kwargs', None)\n\n return self.enqueue_call(func=f, args=args, kwargs=kwargs,\n timeout=timeout, result_ttl=result_ttl, ttl=ttl,\n description=description, depends_on=depends_on,\n at_front=at_front, meta=meta)",
"def enqueue(self, f, *args, **kwargs):\n if not isinstance(f, basestring) and f.__module__ == '__main__':\n raise ValueError(\n 'Functions from the __main__ module cannot be processed '\n 'by workers.')\n\n # Detect explicit invocations, i.e. of the form:\n # q.enqueue(foo, args=(1, 2), kwargs={'a': 1}, timeout=30)\n timeout = None\n result_ttl = None\n if 'args' in kwargs or 'kwargs' in kwargs:\n assert args == (), 'Extra positional arguments cannot be used when using explicit args and kwargs.' # noqa\n timeout = kwargs.pop('timeout', None)\n args = kwargs.pop('args', None)\n result_ttl = kwargs.pop('result_ttl', None)\n kwargs = kwargs.pop('kwargs', None)\n\n job = yield self.enqueue_call(func=f, args=args, kwargs=kwargs,\n timeout=timeout, result_ttl=result_ttl)\n defer.returnValue(job)",
"def enqueue_call(self, func, args=None, kwargs=None, timeout=None, result_ttl=None): #noqa\n timeout = timeout or self._default_timeout\n job = Job.create(func, args, kwargs, connection=self.connection,\n result_ttl=result_ttl, status=Status.QUEUED)\n yield self.enqueue_job(job, timeout=timeout)\n defer.returnValue(job)",
"def apply(self, external_callable, *args, **kwargs):\n self.work_request_queue.put((external_callable, args, kwargs))\n return self.result_queue.get()",
"def enqueue(self, name):\n pass",
"def request(self, *args, **kwargs):\n self.work_request_queue.put((args, kwargs))\n return self.result_queue.get()",
"def request(self, *args, **kwargs):\n self.work_request_queue.put((args, kwargs))\n return self.result_queue.get()",
"def enqueue_call(self, func, args=None, kwargs=None, timeout=None,\n result_ttl=None, ttl=None, description=None,\n depends_on=None, at_front=False, meta=None):\n # Create job in memory\n timeout = timeout or self._default_timeout\n job = self.connection.mkjob(func=func,\n args=args, kwargs=kwargs,\n result_ttl=result_ttl, ttl=ttl,\n status=JobStatus.DEFERRED,\n description=description,\n depends_on=depends_on, timeout=timeout,\n origin=self.name, meta=meta)\n\n job.save()\n if self._async:\n job._enqueue_or_deferr(at_front=at_front)\n else:\n assert len(job._unfinished_parents()) == 0\n job.perform()\n\n return job",
"def enqueue(self, fn):\n self.queue.put(fn)",
"def _defer(queues, func, funcArgs, funcKwargs, countdown=None, eta=None,\n taskName=None, target=None, transactional=False, retry_options=None, parent=None):\n payload = _serialize(func, funcArgs, funcKwargs)\n\n queueName = random.choice(queues)\n\n # We track which function is called so that it appears clearly in the App admin dash-board.\n # Note: if it's a class method, we only track the method name and not the class name.\n url = \"/_cb/deferred/%s/%s\" % (getattr(func, '__module__', ''), getattr(func, '__name__', ''))\n\n headers = {\"Content-Type\": \"application/octet-stream\"}\n\n try:\n task = taskqueue.Task(payload=payload, target=target, url=url, headers=headers,\n countdown=countdown, eta=eta, name=taskName, retry_options=retry_options)\n except taskqueue.TaskTooLargeError:\n logging.info('Task Too Large. Storing payload in the data store')\n key = yield _DeferredTaskEntity(data=payload, parent=parent).put_async()\n payload = _serialize(_run_from_datastore, [key], {})\n task = taskqueue.Task(payload=payload, target=target, url=url, headers=headers,\n countdown=countdown, eta=eta, name=taskName, retry_options=retry_options)\n\n ret = yield task.add_async(queueName, transactional=transactional)\n raise ndb.Return(ret)",
"def enqueue(self, xyz):\n command = 'enqueue ' + str(xyz)\n self.run_command(command)",
"def _queue_create(self, **kwargs):\n name = self.generate_random_name()\n return self.clients(\"zaqar\").queue(name, **kwargs)",
"def load(self, *args, **kwargs):\n self._queue.put(tuple([args, kwargs]))",
"def enqueue(self,\n name,\n action=None,\n method=None,\n wait_url=None,\n wait_url_method=None,\n workflow_sid=None,\n **kwargs):\n return self.append(Enqueue(\n name,\n action=action,\n method=method,\n wait_url=wait_url,\n wait_url_method=wait_url_method,\n workflow_sid=workflow_sid,\n **kwargs\n ))",
"def enqueue(self, func):\n self.queue.put(func)",
"async def _register(self, name, source):\n self._last[name] = {}\n\n self._srcTaskList[name] = asyncio.create_task(\n self._monitor(name, source)\n )",
"async def enqueue_job(\n self,\n func_name: str,\n *args,\n queue: str = None,\n unique=False,\n timeout_seconds=0,\n expire_seconds=0,\n **kwargs,\n ):\n queue = queue or self.DEFAULT_QUEUE\n if self._concurrency_enabled:\n redis = self.redis or await self.get_redis()\n main_logger.debug(\"%s.%s → %s\", self.name, func_name, queue)\n return await self.job_future(\n redis,\n queue,\n func_name,\n unique,\n timeout_seconds,\n expire_seconds,\n *args,\n **kwargs,\n )\n\n main_logger.debug(\"%s.%s → %s (called directly)\", self.name, func_name, queue)\n data = self.job_class.encode(\n class_name=self.name,\n func_name=func_name,\n unique=unique,\n timeout_seconds=timeout_seconds,\n expire_seconds=expire_seconds,\n args=args,\n kwargs=kwargs,\n )\n j = self.job_class(data, queue_name=queue)\n await getattr(self, j.func_name).direct(*j.args, **j.kwargs)\n return j",
"def queue_buildrequest(event):\n get().build_queue.put(event)",
"def runQueueEnqueue(self):\n raise NotImplementedError",
"def apply(self, job_name, args=(), kwargs={}, **opts):\n queue = QueueType(\n self.get_queue_name(job_name), connection=self.connection, is_async=False\n )\n return queue.enqueue_call(\n job_name,\n args=args,\n kwargs=kwargs,\n result_ttl=0,\n timeout=DEFAULT_JOB_TIMEOUT,\n **opts,\n )",
"def do(self, f, *args, **kwArgs):\n self.queue.put((f, args, kwArgs))",
"def putting_on_queue(*args):\n results.put(main_func(*args))",
"def _submit_to_queue(self, script_file):",
"def async(func, *args, **kwargs):\n keywords = kwargs.copy()\n opt_keys = ('group', 'save', 'sync', 'cached', 'priority', 'chain', 'broker', 'uid')\n d_options = keywords.pop('d_options', None)\n # get an id\n tag = uuid()\n # build the task package\n task = {'id': tag[1],\n 'name': tag[0],\n 'func': func,\n 'args': args}\n # push optionals\n for key in opt_keys:\n if d_options and key in d_options:\n task[key] = d_options['key']\n elif key in keywords:\n task[key] = keywords.pop(key)\n # broker\n broker = keywords.pop('broker', get_broker())\n # group\n if task.get('uid', False):\n task['id'] = task['uid']\n # overrides\n if 'cached' not in task and Conf.CACHED:\n task['cached'] = Conf.CACHED\n if 'sync' not in task and Conf.SYNC:\n task['sync'] = Conf.SYNC\n if 'priority' not in task or task['priority'] is None:\n task['priority'] = Conf.PRIORITY\n # finalize\n task['kwargs'] = keywords\n task['started'] = datetime.now()\n # sign it\n pack = signing.PickleSerializer.dumps(task)\n # sync\n if task.get('sync', False):\n return _sync(pack)\n # push it\n ret = broker.enqueue(task['id'], pack, task['priority'])\n logger.debug('Pushed {}'.format(task['id']))\n return ret, task['id']",
"def enqueue(self,value):\n pass",
"def push(self, *args, **kwargs):\n self.queue.put((args, kwargs))",
"def queue (self):\n\n with self.__t.steps():\n import exception\n from lib import schema\n from lib import common\n from lib import process\n from lib import data\n from sqlalchemy import and_\n import json\n from collections import OrderedDict\n\n with schema.select(\"process_queue\", schema.table.process_queue.status==None) as select:\n for queued in select.limit(1000).all():\n blocked = False\n if len(queued.depend) > 0:\n for depend_id in queued.depend:\n depend = schema.select_one(\"series\", schema.table.series.id==depend_id)\n match_tags = json.dumps(OrderedDict(sorted(data.decode_tags(depend.tags).items())))\n if depend and schema.select_one(\"process_queue\", schema.table.process_queue.tags==match_tags):\n blocked = True\n break # queued dependencies\n if not blocked:\n queued.status = \"queued\"\n schema.save(queued)\n run.apply_async([queued.tags]) #queue process\n self.__t.ok()\n self.apply_async(queue=\"control\", countdown=30) #queue next",
"def enqueue(self, payload):\n self.message_count += 1\n self.buffer.append(payload)\n \n if len(self.buffer) > self.chunk_size:\n self.submit_lambdas()\n \n # Avoid spurious wakeups if possible\n if self.next_deadline is None:\n self.waker.set()",
"def __call__(self, *args, **kw):\n return Task(self, **self.__options)(*args, **kw)"
]
| [
"0.65122193",
"0.60749143",
"0.5991675",
"0.5934187",
"0.5817398",
"0.5680583",
"0.56742793",
"0.56742793",
"0.5626921",
"0.56141484",
"0.5594981",
"0.5588243",
"0.5585966",
"0.5519959",
"0.5494343",
"0.5458945",
"0.54240227",
"0.53948534",
"0.5388359",
"0.5381443",
"0.53267014",
"0.53169495",
"0.5316809",
"0.5315079",
"0.5289222",
"0.5279135",
"0.52697974",
"0.5247052",
"0.5237206",
"0.5231815"
]
| 0.6898187 | 0 |
Reads a .tab file into a 2D array. Separates meta info from data. | def readTab(file_name):
data = []
meta = []
l=0
for line in open(file_name):
if l<3:
meta.append(line.strip("\n").split("\t"))
else:
if len(line.strip("\n").split("\t")) == len(meta[0]):
data.append(line.strip("\n").split("\t"))
l += 1
return (meta, data) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def storeTabDelimitedFile(inputFile):\n\n\n list0 = []\n with open(inputFile, 'r') as f:\n newlist = f.readlines()\n #print(newlist)\n for i in range(len(newlist)):\n #newlist[i] = newlist[i].strip('\\t')\n newlist[i] = newlist[i].strip('\\n') # this makes the matrix easier to read as i will not be converting back to original format\n x = newlist[i].split('\\t') # everytime a tab appears in a lines string, the string is split and is storing data in a list\n list0.append(x)\n print(list0) # list0 is the matrix as it contains sublists and elements that can be individually accessed",
"def parse_blasttab(fhandle):\n retval = []\n for line in fhandle.readlines():\n splitline = line.split(\"\\t\")\n data = splitline[:2] # First two columns are strings\n data += [float(_) for _ in splitline[2:]] # The rest are numeric\n return retval",
"def numpy_read_features(path):\n import numpy\n # read table as a structured array (each row is a tuple)\n feature_array = numpy.genfromtxt(path, delimiter='\\t', names=True, dtype=None)\n source = feature_array['source']\n target = feature_array['target']\n status = feature_array['status']\n feature_names = numpy.array(feature_array.dtype.names[3: ])\n features = feature_array[feature_names]\n # convert from structured array to normal ndarray\n features = features.view((numpy.float, len(features.dtype.names)))\n return source, target, status, features, feature_names",
"def tabdes(filename, body):\n # XXX checksums ignored\n head = Struct(\"!BiHBxxxB\")\n body = Struct(body)\n # foot = Struct(\"!4s\")\n\n data = []\n with open(filename, \"rb\") as f:\n buffer = f.read()\n _, _, count, length, _ = head.unpack_from(buffer, 0)\n offset = head.size\n for i in range(count):\n row = body.unpack_from(buffer, offset)\n data.append(row)\n offset += body.size\n else:\n print(\"read %d rows\" % len(data))\n # offset = 2 ** 16 - foot.size\n # _, foot.unpack_from(buffer, offset))\n return data",
"def read_in_meta_table(metatable):\n obj, redshift, obj_type, \\\n my_peak, ebv = np.loadtxt(metatable, unpack=True, dtype=str, delimiter=' ')\n redshift = np.asarray(redshift, dtype=float)\n my_peak = np.asarray(my_peak, dtype=float)\n ebv = np.asarray(ebv, dtype=float)\n\n return obj, redshift, obj_type, my_peak, ebv",
"def load_info():\n data = np.loadtxt(\"u_sol_meta.txt\", dtype=int)\n return data",
"def read_2d_analysis_data(f):\n \n data = np.transpose(np.loadtxt(f, dtype=np.float64))\n x = data[0]\n y = data[1]\n\n return x, y",
"def read_file(filename):\n contents, labels = [], []\n with open_file(filename) as f:\n for line in f:\n try:\n label,content = line.strip().split('\\t')\n contents.append(list(content))\n labels.append(label)\n except:\n pass\n return contents,labels",
"def _read_annot_ctab_new_format(fobj, ctab_version):\n assert hasattr(fobj, 'read')\n\n dt = _ANNOT_DT\n # This code works with a file version == 2, nothing else\n if ctab_version != 2:\n raise Exception('Unrecognised .annot file version (%i)', ctab_version)\n # maximum LUT index present in the file\n max_index = np.fromfile(fobj, dt, 1)[0]\n ctab = np.zeros((max_index, 5), dt)\n # orig_tab string length + string\n length = np.fromfile(fobj, dt, 1)[0]\n np.fromfile(fobj, \"|S%d\" % length, 1)[0] # Orig table path\n # number of LUT entries present in the file\n entries_to_read = np.fromfile(fobj, dt, 1)[0]\n names = list()\n for _ in xrange(entries_to_read):\n # index of this entry\n idx = np.fromfile(fobj, dt, 1)[0]\n # structure name length + string\n name_length = np.fromfile(fobj, dt, 1)[0]\n name = np.fromfile(fobj, \"|S%d\" % name_length, 1)[0]\n names.append(name)\n # RGBT\n ctab[idx, :4] = np.fromfile(fobj, dt, 4)\n\n return ctab, names",
"def _read_annot_ctab_old_format(fobj, n_entries):\n assert hasattr(fobj, 'read')\n\n dt = _ANNOT_DT\n # orig_tab string length + string\n length = np.fromfile(fobj, dt, 1)[0]\n orig_tab = np.fromfile(fobj, '>c', length)\n orig_tab = orig_tab[:-1]\n names = list()\n ctab = np.zeros((n_entries, 5), dt)\n for i in xrange(n_entries):\n # structure name length + string\n name_length = np.fromfile(fobj, dt, 1)[0]\n name = np.fromfile(fobj, \"|S%d\" % name_length, 1)[0]\n names.append(name)\n # read RGBT for this entry\n ctab[i, :4] = np.fromfile(fobj, dt, 4)\n\n return ctab, names",
"def load_file(filename):\n f_data = []\n # open the data-set file\n file = open(filename, \"r\")\n for line in file:\n row = line.strip() # a row in the file\n f_data.append(row) # append it to the 2D array\n\n return f_data",
"def parse_tab_outfile(busco):\n with open(busco) as file:\n return file.read().split(\"\\n\")",
"def loadArray(fname):\n fh = open(fname,'r')\n try:\n x = int(fh.readline())\n except:\n raise Exception(\"couldn't read first dimension\")\n try:\n y = int(fh.readline())\n except:\n raise Exception(\"couldn't read second dimension\")\n raw = np.loadtxt(fh)\n if raw.shape[0] != x*y:\n raise Exception(\"number of rows of data=\",raw.shape[0],\" not equal to product of dimensions:\",x,y)\n if len(raw.shape) > 1:\n data = raw[:,0] + raw[:,1]*1j\n else:\n data = raw[:]\n data.shape = (x,y)\n fh.close()\n return data",
"def Read_t_file(file_name):\n t=[]\n \n with open(file_name,'r') as reader:\n temp=reader.readline().strip().split()[-1].split('-')\n t.append(temp[0])\n t.append(temp[1])\n for line in reader.readlines():\n t.append(line.strip().split()[-1].split('-')[-1])\n \n return np.array(t,dtype=np.float32)",
"def readfile(filepath, mode, separator='\\t', datatype=float, skiprows=1):\n if mode == 'df':\n data = pd.read_table(filepath, separator, engine='python')\n if mode == 'npa':\n data = np.loadtxt(filepath, dtype=datatype, skiprows=skiprows)\n return data",
"def atmparamread(filename):\n f = open(filename, 'r')\n f.readline()\n line = f.readline()\n #Td = float(line.split()[0])\n #Pd = float(line.split()[1])\n #Mc = float(line.split()[2])\n #rc = float(line.split()[3])\n n = int(line.split()[0])\n f.readline()\n atm = 0*numpy.ndarray(shape=(n, ncol), dtype=float)\n S = 0*numpy.ndarray(shape=(n), dtype=float)\n for i in range(n):\n line = f.readline()\n S[i] = float(line.split()[0])\n for j in range(ncol ):\n atm[i, j] = float(line.split()[j+1])\n f.close()\n return atm, S",
"def readData(fname):\n pd = pandas.read_csv(fname)\n return [numpy.array(pd[colname]) for colname in pd.columns[1:]]",
"def read_flow(filename):\n f = open(filename, 'rb')\n magic = np.fromfile(f, np.float32, count=1)\n data2d = None\n\n if 202021.25 != magic:\n print 'Magic number incorrect. Invalid .flo file'\n raise ValueError\n else:\n w = np.fromfile(f, np.int32, count=1)[0]\n h = np.fromfile(f, np.int32, count=1)[0]\n #print \"Reading %d x %d flo file\" % (h, w)\n data2d = np.fromfile(f, np.float32, count=2 * w * h)\n # reshape data into 3D array (columns, rows, channels)\n data2d = np.resize(data2d, (h, w, 2))\n f.close()\n return data2d",
"def read_external_data(fname,sep='\t',coma=False,bn=False,header=0):\n\tf = open(fname,\"r\")\n\tLines = f.readlines()[header:]\n\tN = len(Lines)\n\tnVal = len(Lines[N-1].split(sep)) # using last line as reference for number of cloumns\n\tA = np.zeros((N,nVal))\n\tfor line in range(N):\n\t\tif coma:\n\t\t\tLines[line] = Lines[line].replace(',' , '.')\n\t\tif bn:\n\t\t\tLines[line] = Lines[line].replace('\\n' , '')\n\t\tA[line] = np.array(Lines[line].split(sep))\n\tf.close()\n\treturn A.transpose()",
"def read_torque_table(table):\n f = open(table)\n lines = f.readlines()\n f.close()\n out = np.zeros((61, 136))\n line_num = 0\n for line in lines:\n fields = line.split()\n out[line_num, :] = [float(field) for field in fields]\n line_num = line_num + 1\n return out",
"def openfile(filename):\n Data = np.genfromtxt(filename, delimiter = \",\")\n data = [[]]\n for i in range(np.shape(Data)[0]):\n #Stores information row-by-row\n data.append(Data[i][0:])\n return data",
"def parse_txt_file(txtfile):\n array = np.genfromtxt(txtfile)\n return array",
"def read_array(filename, separator=','):\n dtype = np.dtype([('id','S12'),\n ('views','int32'),\n ('location','S140'),\n ('comments','int32'),\n ('tags_n','int32'),\n ('favorites','int32'),\n ('make','S50'),\n ('model','S100')])\n cast = np.cast\n data = [[] for dummy in xrange(len(dtype))]\n f = open(filename, 'r')\n lines = f.readlines()\n for line in lines[1:-100]:\n fields = line.strip().split(separator)\n for i, number in enumerate(fields):\n data[i].append(number)\n for i in xrange(len(dtype)):\n data[i] = cast[dtype[i]](data[i])\n return np.rec.array(data, dtype=dtype)",
"def readMatrix(file):\n file1 = open(file, \"r\")\n rawData = file1.readlines()\n file1.close() \n \n n = round(len(rawData[0])/2) \n \n matrix2D = [[None for x in range(n)] for y in range(n)] \n \n j = 0\n for line in rawData: \n i = 0 \n for element in line:\n if element != \" \":\n if i == n:\n break\n matrix2D[j][i] = element\n i+= 1 \n j+= 1 \n \n return matrix2D",
"def readfile(self, path, filename):\n # The DataStudio software uses ISO-8859-1 encoding (especially for the degree sign in temperature files)\n file = open(path + filename, encoding=\"iso-8859-1\")\n rowlist = file.readlines()\n\n title = rowlist[0].strip(\"\\n\")\n labels = rowlist[1].strip(\"\\n\").split(sep=\"\\t\")\n\n data = np.zeros((len(rowlist)-2, 2))\n\n for i in range(2, len(rowlist)):\n columns = rowlist[i].split(sep=\"\\t\")\n data[i-2, 0] = float(columns[0].replace(\",\", \".\"))\n data[i-2, 1] = float(columns[1].replace(\",\", \".\"))\n\n return data, title, labels",
"def tcv2array(path):\n a = []\n with open(path) as tsvfile:\n reader = csv.reader(tsvfile, delimiter='\\t')\n for row in reader:\n if row:\n if row[0][0] != '#':\n a.append(row)\n return a",
"def load_data(file_to_read):\n\n data = np.recfromtxt(file_to_read)\n data = np.asarray(data)\n\n return data",
"def read_array(self, filename):\n extension = filename.split('.')[-1] # Get file extension\n if extension == 'mat':\n array = sci.loadmat(filename)\n elif extension == 'npy':\n array = np.load(filename)\n else:\n print('Error!!! Unrecognised file type for read_array()')\n array = None\n return array",
"def load_data(self):\n \n # only loader implemented so far !\n try:\n _ascii_array = Utilities.load_ascii(filename=self.filename, sep='')\n start_row = TOF._first_line_number_with_real_data(_ascii_array[0, 0])\n\n _tof_column = _ascii_array[start_row:, 0]\n\n if not TOF._is_this_numeric(_tof_column[0]):\n start_row += 1\n\n _tof_column = _ascii_array[start_row:, 0]\n _counts_column = _ascii_array[start_row:, 1]\n\n self.tof_array = _tof_column\n self.counts_array = _counts_column\n return\n\n except IndexError:\n pass # try another format\n\n try:\n _ascii_array = Utilities.load_ascii(filename=self.filename, sep=',')\n start_row = TOF._first_line_number_with_real_data(_ascii_array[0, 0])\n\n _tof_column = _ascii_array[start_row:, 0] # first row must be excluded in this format\n\n if not TOF._is_this_numeric(_tof_column[0]):\n start_row += 1\n\n _tof_column = _ascii_array[start_row:, 0]\n _counts_column = _ascii_array[start_row:, 1]\n\n self.tof_array = _tof_column\n self.counts_array = _counts_column\n return\n\n except IndexError:\n raise IndexError(\"Format not implemented!\")",
"def parse_file_into_array(filename, separator):\n arr = []\n with open(filename) as file:\n for row in file.read().splitlines():\n try:\n row_arr = [float(cell) for cell in row.split(separator)]\n if 'winequality' in filename:\n row_arr[-1] = 1 if row_arr[-1] > 5 else 0 # convert to binary classification\n elif 'breast-cancer' in filename:\n row_arr[-1] = 1 if row_arr[-1] == 4 else 0 # convert to binary classification\n except ValueError:\n continue\n arr.append(row_arr)\n return arr"
]
| [
"0.6211566",
"0.60512006",
"0.5980201",
"0.595952",
"0.59593946",
"0.5899865",
"0.58882284",
"0.58542913",
"0.58366424",
"0.5757578",
"0.5756274",
"0.57214826",
"0.5718835",
"0.5707263",
"0.56501734",
"0.5639782",
"0.5633153",
"0.5614347",
"0.5608434",
"0.5604423",
"0.55811155",
"0.55679667",
"0.5564706",
"0.5553996",
"0.5550546",
"0.55258644",
"0.5500758",
"0.5487323",
"0.5473254",
"0.5472333"
]
| 0.75938237 | 0 |
check if elem is a county name | def isCountyName(elem):
return (elem.attrib['k'] == "tiger:county") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_in_county(p):\n try:\n float(p[0:1])\n return True\n except ValueError:\n return False",
"def identifyCounty(line):\n matches = re.findall('[a-zA-Z]', line)\n if len(matches) > 0 and ''.join(matches) != \"Total\":\n return True",
"def is_cuisine(elem):\n return elem.attrib['k'] == 'cuisine'",
"def is_named(ucs):\n try:\n return bool(unicodedata.name(ucs))\n except ValueError:\n return False",
"def is_street_name(elem):\n return elem.attrib['k'] == \"addr:street\"",
"def is_street_name(elem):\n return (elem.attrib['k'] == \"addr:street\")",
"def is_street_name(elem):\n\n return (elem.attrib[\"k\"] == \"addr:street\") or (elem.attrib[\"k\"] == \"addr:street_1\")",
"def county_name(zipcode): \n search = SearchEngine(simple_zipcode=True) # set simple_zipcode=False to use rich info database\n zipcode_query = search.by_zipcode(str(zipcode))\n zipcode_query_dict = zipcode_query.to_dict()\n county = zipcode_query_dict['county']\n if county is None:\n print('Invalid County')\n else :\n if 'County' in county:\n county = county[:-7]\n if county in county_list:\n print('County is County List')\n print(county)\n return county",
"def test_is_an_element_name():\n for el in roentgen.elements['name']:\n assert(is_an_element(el))",
"def get_county_by_name(self, name):\n raise NotImplementedError()",
"def is_postal_code(elem):\n return 'post' in elem.attrib['k']",
"def is_ucsc_reference_name(name):\n return (normalize_reference_name(name) in normalized_ucsc_reference_names)",
"def test_get_county_name(self):\n dag = self.dagbag.get_dag(self.dag_id)\n extract_task = dag.get_task('extract')\n resp = self.extract.getCountyNamesFromAPI()\n self.assertIsNotNone(resp)\n self.assertEqual(type(resp), list)",
"def xd_element(name):\n try:\n name = name[:2]\n except:\n pass\n try:\n covalence_radius[name]\n except:\n name = name[0]\n return name",
"def test_check_only_one_fontName(self):\n fonts = []\n result = False\n for css_class in self.pisa_doc.css[0].values():\n for font in css_class.values():\n fonts.append(font)\n for font in fonts:\n if not isinstance(font, list):\n result = True\n else:\n result = False\n break\n #here we are checking if all objects in fonts list are str, the result have to be True\n self.assertTrue(result)",
"def _is_name_type(self, type_id):\n return type_id == self.name_type",
"def contains(name):",
"def test_is_an_element_caseinsensitive_name():\n for el in roentgen.elements['name']:\n assert is_an_element(el.upper())\n assert is_an_element(el.lower())\n assert is_an_element(el.capitalize())",
"def is_an_oak(name):\n if 'quercus' in name.lower():\n return True\n else:\n return False",
"def test_city_country(self):\n formatted_name = make_formatted_name('santiago', 'chile')\n self.assertEqual(formatted_name, 'Santiago, Chile')",
"def is_calibration_tag_for_name(ins, exp, run, name='dark') :\n for attr in run_attributes(ins, exp, run) :\n if attr['class'] == 'Calibrations' and attr['name'] == name : return True\n return False",
"def test_name(self):\n self.assertEqual(type(City.name), str)",
"def has_any(self, name):\n counter = 0\n for element in self.cards:\n if name in str(element):\n counter += 1\n\n if counter > 0:\n return True\n else:\n return False",
"def test_city_country_population(self):\n formatted_name = make_formatted_name('santiago', 'chile', 5000000)\n self.assertEqual(formatted_name, 'Santiago, Chile - population 5000000')",
"def test_city_country(self):\n formatted_name = city_country('santiago', 'chile')\n self.assertEqual(formatted_name, 'Santiago, Chile')",
"def test_city_country(self):\n santiago_chile = get_city_name('santiago', 'chile')\n self.assertEqual(santiago_chile, 'Santiago, Chile')",
"def test_country_name_in_countries(self):\n\t\tcountry_code = get_country_code('Andorra')\n\t\tself.assertEqual(country_code, 'ad')",
"def is_name(self, cell):\n tokens = self._cell_tokenizer.tokenize(cell.get_text())\n cell_type = self._get_token_type(self._cell_parser.parse(tokens))\n if cell_type == 'NAME':\n return True\n return False",
"def test_name(self):\n self.assertTrue(type(x.name) == str)",
"def has_name(self):\n return self.unpack_word(0x2) != 0"
]
| [
"0.6464266",
"0.6227803",
"0.6202538",
"0.61037445",
"0.60834086",
"0.6082506",
"0.59991944",
"0.5927276",
"0.5760006",
"0.5758046",
"0.56099755",
"0.55842495",
"0.55784076",
"0.55266464",
"0.5466257",
"0.54466796",
"0.5416121",
"0.53680265",
"0.53628933",
"0.52790076",
"0.526509",
"0.5259199",
"0.52418435",
"0.52413553",
"0.52296907",
"0.52258164",
"0.5224218",
"0.5208673",
"0.51989985",
"0.5188252"
]
| 0.8547739 | 0 |
True if this NestedInteger holds a single integer, rather than a nested list. | def isInteger(self):
return isinstance(self.value, int) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def isInteger(self):\n pass",
"def isInteger(self):\n return self._is_int",
"def isInteger(self):\n return _libsbml.ASTNode_isInteger(self)",
"def isInteger(self):",
"def isInteger(self):",
"def isinteger(self):\n return self.den == 1",
"def is_int(self):\n return self.value_type in (int, arrow.JuArrow)",
"def _is_list(self):\n # TODO\n if self.is_int():\n return self.int() == 0\n else:\n return self.size_words() == 2 and self.tag() == 0 and self.field(1)._is_list()",
"def _is_integer(x):\n return (not isinstance(x, (bool, np.bool))) and \\\n isinstance(x, (numbers.Integral, int, np.int, np.long, long)) # no long type in python 3",
"def is_sequence_of_int(items):\n return all(isinstance(item, int) for item in items)",
"def is_int(self): \n return (self._den == 1)",
"def isInteger(data):\n\tif type(data) == list or type(data) == np.ndarray:\n\t\tcol = pd.Series(data)\n\telse:\n\t\tcol = data\n\treturn col.dtype == np.int32 or col.dtype == np.int64",
"def isInteger(data):\n\ttry:\n\t\tfrom types import LongType, IntType\n\t\tif type(data) == LongType or type(data) == IntType:\n\t\t\treturn True\n\texcept ImportError:\n\t\tif type(data) == type(int(0)):\n\t\t\treturn True\n\treturn False",
"def is_integer(i):\n import numpy as np\n if isinstance(i, (int, long)):\n return True\n if isinstance(i, float):\n return (i).is_integer()\n if issubclass(type(i), np.integer):\n return i\n else:\n return False",
"def DataIsInteger(self):\n return self.data_type in (\n definitions.REG_DWORD, definitions.REG_DWORD_BIG_ENDIAN,\n definitions.REG_QWORD)",
"def is_simple(self) -> bool:\n return self.data in ('int', 'bool', 'float', 'str')",
"def is_int(self, size=None):\n return False",
"def is_intscalar(x: Any) -> bool:\r\n return isinstance(x, (\r\n int,\r\n np.int8,\r\n np.int16,\r\n np.int32,\r\n np.int64,\r\n np.uint8,\r\n np.uint16,\r\n np.uint32,\r\n np.uint64,\r\n ))",
"def is_integer(value: Union[float, np.ndarray]) -> bool:\n if type(value) == np.ndarray:\n for entry in value:\n result = Comparator.is_integer(entry)\n if not result:\n return False\n return True\n else:\n value = abs(value)\n value -= int(value)\n if value > 0.5:\n return Comparator.is_close_to_zero(1 - value)\n return Comparator.is_close_to_zero(value)",
"def of_type(self, a):\n return type(a) == type(self.one)",
"def is_int(self):\n return self.v & 1 != 0",
"def is_one(self) -> bool:\n return self.field.one == self",
"def is_structure_of_integers(type_spec: computation_types.Type) -> bool:\n py_typecheck.check_type(type_spec, computation_types.Type)\n if isinstance(type_spec, computation_types.TensorType):\n py_typecheck.check_type(type_spec.dtype, tf.dtypes.DType) # pytype: disable=attribute-error\n return type_spec.dtype.is_integer # pytype: disable=attribute-error\n elif isinstance(type_spec, computation_types.StructType):\n return all(\n is_structure_of_integers(v)\n for _, v in structure.iter_elements(type_spec)\n )\n elif isinstance(type_spec, computation_types.FederatedType):\n return is_structure_of_integers(type_spec.member)\n else:\n return False",
"def is_int(x):\n # boolean are subclasses of integers in Python, so explicitly exclude them\n return isinstance(x, (int, np.integer)) and not isinstance(x, bool)",
"def is_int(value):\n return isinstance(value, int)",
"def is_integer_type(self):\n raise exceptions.NotImplementedError()",
"def is_integer(matrix):\n return numpy.issubdtype(matrix.dtype, numpy.integer)",
"def _is_primitive(val):\n\n prims = [int, float, str, bool]\n for prim in prims:\n if isinstance(val, prim):\n return True\n return False",
"def is_one(self, a):\n return a == self.one",
"def _is_integer_like(input):\n if _is_boolean_like(input):\n return True\n if type(input) is int:\n return True\n if isinstance(input, _ScalarConstant):\n if input.dtype in _int_like_types:\n return True\n return False"
]
| [
"0.68816566",
"0.68761814",
"0.68001324",
"0.6668329",
"0.6668329",
"0.6667199",
"0.65634376",
"0.653184",
"0.63604957",
"0.63311446",
"0.6296856",
"0.61985886",
"0.6147023",
"0.6113862",
"0.6112268",
"0.6085722",
"0.60296875",
"0.5978817",
"0.59638053",
"0.5953098",
"0.5942244",
"0.59256554",
"0.592517",
"0.5909241",
"0.58998376",
"0.58388954",
"0.5833294",
"0.5777632",
"0.5774019",
"0.57674515"
]
| 0.7202063 | 0 |
Set this NestedInteger to hold a nested list and adds a nested integer elem to it. | def add(self, elem: 'NestedInteger'):
if self.value is None:
self.value = [elem]
elif self.isInteger():
self.value = [NestedInteger(self.value), elem]
else:
self.value = [*self.value, elem] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add(self, elem):\n assert self._is_int is False\n self._list.append(elem)",
"def __init__(self, value=None):\n if isinstance(value, Iterable):\n self.value = [NestedInteger(v) for v in value]\n elif isinstance(value, NestedInteger):\n self.value = value.value\n else:\n self.value = value",
"def __init__(self, nestedList):\n self.idx = -1\n self.elements = []\n self.unNest(nestedList)",
"def __init__(self, nestedList):\n self.stack = [[nestedList, 0]]",
"def __init__(self, nestedList):\n self.curr = nestedList\n self.idx = 0",
"def __init__(self, nestedList):\n\n self.nestedList = nestedList",
"def __init__(self, nestedList):\n self.nums = []\n self.parse(nestedList)\n self.index = 0\n if self.nums:\n self.hasn = True",
"def __add(self, element):\n\t\tif element.value == None:\n\t\t\telement.value = self._last_value\n\t\t\tself._last_value += 1\n\t\telse:\n\t\t\ttry:\n\t\t\t\tself._last_value = element.value + 1\n\t\t\texcept ValueError:\n\t\t\t\tpass\n\t\t\n\t\tself.elements.append(element)",
"def set(self, value):\n if value is None:\n self.value = [] if self.list else None\n else:\n value = self.cast(value)\n if self.list:\n self.value.append(value)\n else:\n self.value = value",
"def __set__(self, instance, value):\n # Run process for the nested field type for each value in list\n instance._values[self.name] = [self.field.process(v) for v in value]",
"def __add__(self, element):\r\n self.elements += element",
"def add(self, value):\n if self._element_type is None:\n self._element_type = type(value)\n # check validation\n if isinstance(value, np.generic):\n # value = np.asscalar(value) # deprecated in numpy v1.16\n value = value.item()\n if not isinstance(value, self._element_type):\n raise TypeError(\n \"A %s parameter is expected, but received: %s\" % (str(self._element_type), str(type(value))))\n if value in self._innercontainer:\n warnings.warn(\"Adding element %s has already in the collection, skip.\" % (value.__str__()),\n category=RepeatElementWarning,\n stacklevel=3)\n else:\n self._innercontainer.append(value)\n return self",
"def add_to_list(self):\n list_item = self.entry_0.get()\n try: \n int(list_item)\n self.my_list.append(int(list_item))\n except:\n self.entry_0.delete(0, 'end')\n print(self.my_list)\n return self.my_list",
"def appendList(self, subnode_list):\n for subnode in subnode_list:\n self.append(subnode)",
"def __iadd__(self, other):\n if not isinstance(other, IDStruct):\n raise TypeError(\"other is not of type IDStruct\")\n for left, right in other:\n self.add(left, right)\n # retain debug information\n self.transfer_debug(left, other)\n return self",
"def add_number_back_list(self, target:list, value:int, position:int)->None:\n target[position].append(value)",
"def __init__(self, nestedList):\n self.flatten_list = []\n for item in nestedList:\n self._dfs(item)\n self.counter = 0",
"def add(self, element):\n if isinstance(element, list):\n for e in element:\n if isinstance(e, Label):\n self.labels.append(e)\n else:\n self.elements.append(e)\n else:\n if isinstance(element, Label):\n self.labels.append(element)\n else:\n self.elements.append(element)\n self._bb_valid = False\n return self",
"def add(self, item):\r\n self.root = self.recurse_add(self.root, item)",
"def add(self, value):\n # check validation\n assert (isinstance(value, tuple))\n if len(value) == 1:\n value = [(value[0], i) for i in range(self._label_size)]\n return self.update(value)\n elif len(value) == 2:\n if isinstance(value[1], collections.Iterable):\n for item in value[1]:\n if item >= self._label_size:\n raise ValueError(\"Index %s is out of bound %s\" % (str(item), str(self._label_size)))\n else:\n if value[1] >= self._label_size:\n raise ValueError(\"Index %s is out of bound %s\" % (str(value[1]), str(self._label_size)))\n else:\n raise ValueError(\"A tuple with 1 or 2 elements is expected, but received: %s\" % str(value))\n if value in self._innercontainer:\n warnings.warn(\"Adding element %s has already in the collection, skip.\" % (value.__str__()),\n category=RepeatElementWarning,\n stacklevel=3)\n else:\n self._innercontainer.add(value)\n return self",
"def __iadd__(self, other):\n self.children.append(other)\n return self",
"def __init__(self, nestedList):\n self.stack = []\n for i in range(-1, len(nestedList) * -1 - 1, -1):\n print \"original \", nestedList[i]\n self.stack.append(nestedList[i])",
"def __init__(self, *args):\n this = _libsbml.new_IdList(*args)\n try: self.this.append(this)\n except: self.this = this",
"def __set__(self, obj, value):\r\n if self.item_type in (int, long):\r\n item_type = (int, long)\r\n elif self.item_type in (str, unicode):\r\n item_type = (str, unicode)\r\n else:\r\n item_type = self.item_type\r\n if isinstance(value, item_type):\r\n value = [value]\r\n elif value == None: # Override to allow them to set this to \"None\" to remove everything\r\n value = []\r\n return super(ListProperty, self).__set__(obj,value)",
"def __set__(self,obj,val):\n self._check_bounds(val)\n super(List,self).__set__(obj,val)",
"def __Set_NumList(self,Num=None):\n DefaultIndexCounter=1e5\n \n if Num == None:\n if len(self.__IndList)==0:\n IndexCounter = DefaultIndexCounter\n \n else:\n IndexMax = max([max([abs(x) for x in Ind]) for Ind in self.__IndList])\n IndexCounter = int(max(DefaultIndexCounter,IndexMax)+0.1)\n\n else:\n IndexCounter = Num\n \n self.__IndexCounter=IndexCounter\n \n self.__NumList=[self.__IndToNum(Ind) for Ind in self.__IndList]",
"def num(self, num: List[int]):\n\n self._num = num",
"def __iadd__(self, obj):\n if not vedo.utils.is_sequence(obj):\n obj = [obj]\n for a in obj:\n if a:\n self.AddPart(a)\n return self",
"def setSublist(self, sublist):\n if self.cursor:\n self.cursor.sublist = sublist",
"def addList(self, numberType):\n\n newList = List(numberType)\n self.lists.append(newList)\n return newList.id"
]
| [
"0.63542014",
"0.6237883",
"0.60864973",
"0.60203195",
"0.59469235",
"0.580032",
"0.576964",
"0.5499853",
"0.5478583",
"0.5472345",
"0.53685707",
"0.52807355",
"0.522851",
"0.5194707",
"0.5173737",
"0.512586",
"0.511907",
"0.5111081",
"0.5087188",
"0.5053078",
"0.50245285",
"0.5023796",
"0.50180966",
"0.5003378",
"0.49951726",
"0.49802986",
"0.4977826",
"0.49764565",
"0.4967148",
"0.4963656"
]
| 0.7977217 | 0 |
Set this NestedInteger to hold a single integer equal to value. | def setInteger(self, value: int):
self.value = value | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setInteger(self, value):\n assert self._is_int is True\n self._value = value",
"def setInteger(self, value):",
"def setInteger(self, value):",
"def x(self, value=None):\n if isinstance(value, (int, float)):\n self[0] = value\n else:\n if value is not None:\n raise TypeError(\"Cannot be set to {}\".format(type(value)))\n return self[0]",
"def set_value(self, value):\n if self.value:\n raise ValueError(\"Already has a Value:\", self)\n\n self.value = value\n\n if self.value != 0:\n self.possible = None\n self.solved = True",
"def setValue(self, value):\n super().setValue(int(value * self._precision))",
"def value(self, value: Optional[int] = None) -> Optional[int]:\n ...",
"def set_value(self, value):\n self.value = value\n return self",
"def setValue(self, value):\r\n # Clamp values to [0,1]\r\n self.__value = max(0, min(value, 1))",
"def __init__(self, value=None):\n if isinstance(value, Iterable):\n self.value = [NestedInteger(v) for v in value]\n elif isinstance(value, NestedInteger):\n self.value = value.value\n else:\n self.value = value",
"def assign(self, value):\n generator = (bits for bits in self.data if bits.is_null)\n if isinstance(value, int):\n series_of_bits = next(generator, _bits_sentinel)\n series_of_bits.assign(value)\n elif isinstance(value, typing.Iterable):\n for value, bits in zip(value, generator):\n bits.assign(value)\n return self",
"def set_value(self, value: Hashable):\n\t\tself._value = value\n\t\tself._potential_values.clear()",
"def value(self, value):\n\n self._value = value",
"def value(self, value):\n\n self._value = value",
"def value(self, value):\n\n self._value = value",
"def value(self, value):\n\n self._value = value",
"def value(self, value):\n\n self._value = value",
"def value(self, value):\n\n self._value = value",
"def set_value(self,x):\n self._value = x",
"def set_value(self,x):\n self._value = x",
"def value(self, value):\n\n\t\tself.__value = value",
"def set_value(self, value):\n self.value = value",
"def set_value(self, value):\n self.value = value",
"def set_value(self, value):\n self.value = value",
"def value(self, value):\n self._update_value(value)",
"def set_val(self, value):\n self._value = value if value is not None else deepcopy(self._default)\n\n return self",
"def setValue(self, value):\n self._value = value",
"def integer(self, integer):\n\n self._integer = integer",
"def set_single(self) -> None:\n self._has_single = True",
"def set_value (self):\n raise NotImplementedError"
]
| [
"0.675394",
"0.6561978",
"0.6561978",
"0.63029945",
"0.61325914",
"0.59337676",
"0.59266293",
"0.5895697",
"0.589371",
"0.58535343",
"0.57826245",
"0.5781725",
"0.5777232",
"0.5777232",
"0.5777232",
"0.5777232",
"0.5777232",
"0.5777232",
"0.5767548",
"0.5767548",
"0.57450086",
"0.57432514",
"0.57432514",
"0.57432514",
"0.57051975",
"0.5703608",
"0.56485325",
"0.56386644",
"0.5623296",
"0.56154114"
]
| 0.6733747 | 1 |
the single integer that this NestedInteger holds, if it holds a single integer Return None if this NestedInteger holds a nested list | def getInteger(self):
return self.value if self.isInteger() else None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get(self) -> int:\n while self.stack: \n data, i = self.stack.pop()\n if i+1 < len(data): self.stack.append((data, i+1)) #backtracking point \n if data[i].isInteger(): return data[i].getInteger()\n if not data[i].getList(): continue #empty list \n self.stack.append((data[i].getList(), 0)) #push nested list on stack\n return None",
"def id(self):\n if (len(self.value) > 1) and isinstance(self.value[1], int):\n return self.value[1]\n return -1",
"def fetchone(self):\r\n debug.write(\"[SourceRPG] Fetching one object value\", 2)\r\n result = self.cursor.fetchone()\r\n debug.write(\"The initial value is: %s\" % (result,), 2)\r\n if hasattr(result, \"__iter__\"):\r\n debug.write(\"Attribute has iterable, test to see if there's only 1 value\", 2)\r\n if len(result) == 1:\r\n trueResults = result[0]\r\n if isinstance(trueResults, long):\r\n trueResults = int(trueResults)\r\n return trueResults\r\n \r\n else:\r\n debug.write(\"Single result attribute\", 2)\r\n trueResults = []\r\n for trueResult in result:\r\n if isinstance(trueResult, long):\r\n trueResult = int(trueResult)\r\n trueResults.append(trueResult)\r\n return trueResults\r\n\r\n if isinstance(result, long):\r\n result = int(result)\r\n debug.write(\"Result: %s\" % result, 3)\r\n debug.write(\"[SourceRPG] Returning the value of the object\", 2)\r\n return result",
"def scalar(self):\n try:\n ret = self.one()\n if not isinstance(ret, collections_abc.Sequence):\n return ret\n return ret[0]\n except orm_exc.NoResultFound:\n return None",
"def getList(self):\n return self.value if not self.isInteger() else None",
"def _get(self, value):\n if self.array is not None:\n len_array = self._len()\n if len_array == 0:\n return False\n elif value < len_array and value >= 0:\n return int(self.array[value])\n print(\">>> List is None\")\n return None",
"def one(self):\n return next(iter(self), None)",
"def getInteger(self):",
"def getInteger(self):",
"def first(self):\n return self.head and self.head.value or None",
"def one_or_none(self):\n return self._iter().one_or_none()",
"def getValue(self) -> Optional[int]:\n return self.__value",
"def first_value(self):\n if not self.is_empty():\n return self.data[self.head]\n return None",
"def get_first(self) -> object:\n if self.root is None: # If tree is empty\n return None\n\n return self.root.value # Returning root value",
"def getInteger(self):\n pass",
"def get_first(self) -> object:\n #binary search tree == empty\n if self.root is None:\n return None\n\n # return\n return self.root.value",
"def find_one(self):\n num = quotient = 0\n while num < self._len:\n chunk = self.data[quotient]\n if chunk & self.one_mask:\n remainder = 0\n while remainder < self.width and num < self._len:\n item = (chunk >> remainder) & 3\n if item == PC_ONE:\n return num\n remainder += 2\n num += 1\n else:\n num += (self.width >> 1)\n quotient += 1\n return None",
"def _get_optional_int32(serialized_all_simple):\n holder = test_pb2.AllSimple()\n holder.ParseFromString(serialized_all_simple)\n return holder.optional_int32",
"def get_val(self):\n if self.val_obj is None:\n return None\n else:\n return self.val_obj.val",
"def getSubintinfo(self,value):\n if value in self.subintinfo.keys():\n return self.subintinfo[value][-1]\n return None",
"def getInteger(self):\n assert self._is_int is True\n return self._value",
"def __index__(self):\n return int(self)",
"def one(self):\n return self._iter().one()",
"def getValue(self):\n \n if len(self._possibilities) is 1:\n \n return self._possibilities.copy().pop()\n \n else:\n \n return None",
"def __init__(self, value=None):\n if isinstance(value, Iterable):\n self.value = [NestedInteger(v) for v in value]\n elif isinstance(value, NestedInteger):\n self.value = value.value\n else:\n self.value = value",
"def GetFirstVisibleItem(self):\r\n\r\n id = self.GetRootItem()\r\n if not id:\r\n return id\r\n\r\n while id:\r\n if self.IsVisible(id):\r\n return id\r\n id = self.GetNext(id)\r\n\r\n return None",
"def getInteger(self):\n return _libsbml.ASTNode_getInteger(self)",
"def getCurrentValue(self) -> Optional[int]:\n try:\n return int(self.text())\n except ValueError:\n return None",
"def _parse_int(node, key):\n element = node.get(key)\n if element is not None:\n return int(element)\n else:\n return None",
"def getDbIntNone(self, db, key):\n val = self.getDbStrNone(db, key)\n if val != None:\n return int(val)\n else:\n return None"
]
| [
"0.6750461",
"0.58068335",
"0.5793719",
"0.5776481",
"0.5714846",
"0.5592736",
"0.55416214",
"0.5507665",
"0.5507665",
"0.5411828",
"0.538694",
"0.5385691",
"0.53667647",
"0.53541833",
"0.53436327",
"0.53330284",
"0.53313774",
"0.5331091",
"0.53203326",
"0.53196067",
"0.5303703",
"0.5287787",
"0.5278316",
"0.52570474",
"0.5250408",
"0.52454656",
"0.51894444",
"0.518048",
"0.5170191",
"0.5168965"
]
| 0.64898604 | 1 |
the nested list that this NestedInteger holds, if it holds a nested list Return None if this NestedInteger holds a single integer | def getList(self):
return self.value if not self.isInteger() else None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get(self) -> int:\n while self.stack: \n data, i = self.stack.pop()\n if i+1 < len(data): self.stack.append((data, i+1)) #backtracking point \n if data[i].isInteger(): return data[i].getInteger()\n if not data[i].getList(): continue #empty list \n self.stack.append((data[i].getList(), 0)) #push nested list on stack\n return None",
"def _is_list(self):\n # TODO\n if self.is_int():\n return self.int() == 0\n else:\n return self.size_words() == 2 and self.tag() == 0 and self.field(1)._is_list()",
"def get_nested_sum():\n l_int = [1,2,[], 3,[4,[], 5,[6]],[7],[8,9], 10,[[],11]]\n print 'Sum:', nested_sum(l_int) \n return",
"def __init__(self, value=None):\n if isinstance(value, Iterable):\n self.value = [NestedInteger(v) for v in value]\n elif isinstance(value, NestedInteger):\n self.value = value.value\n else:\n self.value = value",
"def nested_list_size(inputs: Sequence[Any]) -> List[int]:\n if hasattr(inputs, \"tensors\"):\n return nested_list_size(inputs.tensors) # type: ignore\n if isinstance(inputs[0], dict):\n return nested_list_size(list(inputs[0].items()))\n if hasattr(inputs[0], \"size\") and callable(inputs[0].size):\n return list(inputs[0].size())\n if isinstance(inputs, (list, tuple)):\n return nested_list_size(inputs[0])\n return []",
"def getSublist(self):\n if self.cursor:\n return self.cursor.sublist\n return None",
"def getInteger(self):\n return self.value if self.isInteger() else None",
"def _get(self, value):\n if self.array is not None:\n len_array = self._len()\n if len_array == 0:\n return False\n elif value < len_array and value >= 0:\n return int(self.array[value])\n print(\">>> List is None\")\n return None",
"def get_list_of_int2(self):\n pass",
"def __init__(self, nestedList):\n self.nums = []\n self.parse(nestedList)\n self.index = 0\n if self.nums:\n self.hasn = True",
"def getSubintinfo(self,value):\n if value in self.subintinfo.keys():\n return self.subintinfo[value][-1]\n return None",
"def add(self, elem: 'NestedInteger'):\n if self.value is None:\n self.value = [elem]\n elif self.isInteger():\n self.value = [NestedInteger(self.value), elem]\n else:\n self.value = [*self.value, elem]",
"def max_list_iter(int_list: Optional[List]) -> Optional[int]:\r\n if int_list is None:\r\n raise ValueError\r\n elif len(int_list) == 0:\r\n return None\r\n elif len(int_list) == 1:\r\n return int_list[0]\r\n else:\r\n maxVal = int_list[0]\r\n for value in int_list:\r\n if value > maxVal:\r\n maxVal = value\r\n return value",
"def flatten(nested_num_list):\n lst=[]\n\n for element in nested_num_list:\n if type(element) == type([]):\n element = flatten(element)\n for el in element:\n lst.append(el)\n else: \n lst.append(element) # element is not a list\n \n\n return lst",
"def _remove_nested_list(obj: Union[Tuple[int, int, int], List]) -> Any:\r\n if isinstance(obj, Tuple):\r\n return obj\r\n else:\r\n lst = []\r\n for sublist in obj:\r\n temp = _remove_nested_list(sublist)\r\n if isinstance(temp, Tuple):\r\n lst.append(temp)\r\n else:\r\n lst.extend(temp)\r\n return lst",
"def inner_part_of_list_of_list(self) -> AnnotationWrapper:\n return AnnotationWrapper(\n re.search(AnnotationWrapper.inner_part_of_list_of_list_re,\n self.data).group(1))",
"def get_depth(self):\r\n if isinstance(self.ttype, TypedListType):\r\n return self.ttype.get_depth() + 1\r\n else:\r\n return 0",
"def inner_part_of_list(self) -> AnnotationWrapper:\n return AnnotationWrapper(\n re.search(AnnotationWrapper.inner_part_of_list_re,\n self.data).group(1))",
"def depthSum(self, nestedList: List[NestedInteger]) -> int:\n final_sum = 0\n def dfs(nlist,depth):\n nonlocal final_sum\n #no base case\n \n #logic\n for ele in nlist:\n if ele.isInteger():\n #add the value to the sum\n final_sum += ele.getInteger() * depth\n else:\n dfs(ele.getList(),depth+1)\n dfs(nestedList,1)\n return final_sum",
"def list_value(self) -> global___Expression.RepeatedValue:",
"def max_list_iter(int_list): # must use iteration not recursion\n if int_list == []:\n return None\n elif int_list == None:\n raise ValueError\n max_int = int_list[0]\n \n for i in int_list:\n if i > max_int:\n max_int = i\n return max_int",
"def getValue(self):\n \n if len(self._possibilities) is 1:\n \n return self._possibilities.copy().pop()\n \n else:\n \n return None",
"def _get_nested(nested_dict, field):\n print(nested_dict, field)\n keys = field.split('.')\n current = nested_dict\n for k in keys:\n print('key', k, 'current', current)\n # return None for nested fields without a value in this doc\n if isinstance(current, list):\n # this list could contain anything. skip objects not containing `k`.\n return [x[k] for x in current if x.get(k) is not None]\n if not k in current:\n current = None\n break\n current = current[k]\n return current",
"def test_get_depth(self):\r\n myType = TypedListType(T.TensorType(theano.config.floatX,\r\n (False, False)))\r\n\r\n myManualNestedType = TypedListType(TypedListType(\r\n TypedListType(myType)))\r\n\r\n self.assertTrue(myManualNestedType.get_depth() == 3)",
"def unstructured(self):\n for leaf in self.sequences.values():\n if type(leaf) is list:\n for item in leaf:\n if item is None:\n continue\n yield item\n else:\n if leaf is None:\n continue\n yield leaf",
"def getInteger(self):",
"def getInteger(self):",
"def __init__(self, nestedList):\n self.curr = nestedList\n self.idx = 0",
"def ElementType(self) -> _n_0_t_1:",
"def _validate_nested_list_type(self, name, obj, nested_level, *args):\n if nested_level <= 1:\n self._validate_list_type(name, obj, *args)\n else:\n if obj is None:\n return\n if not isinstance(obj, list):\n raise TypeError(self.__class__.__name__ + '.' + name + ' contains value of type ' +\n type(obj).__name__ + ' where a list is expected')\n for sub_obj in obj:\n self._validate_nested_list_type(name, sub_obj, nested_level - 1, *args)"
]
| [
"0.69028354",
"0.62758094",
"0.58172405",
"0.55986816",
"0.5480824",
"0.5474453",
"0.5436319",
"0.5422447",
"0.5390378",
"0.5235933",
"0.51452607",
"0.5142659",
"0.5120265",
"0.5072609",
"0.50632876",
"0.50073016",
"0.49925464",
"0.49601611",
"0.495557",
"0.49379557",
"0.49373382",
"0.49369055",
"0.49332264",
"0.4926034",
"0.49175552",
"0.487154",
"0.487154",
"0.4847245",
"0.484572",
"0.48440284"
]
| 0.63026613 | 1 |
Do activation scale calibration on the given light_graph | def main(light_graph,
calibration_data,
hw_specs,
sw_config,
sim_params,
nodes_to_calibrate):
with graph_collection.GraphCollection() as graph_coll:
# Create calibration graph
hist_coll = graph_coll.histogram_collection()
convert_to_calib_graph = (convert_to_activation_scale_calibration_graph.
ConvertToActivationScaleCalibrationGraph(
nodes_to_calibrate,
sw_config,
hist_coll))
calib_graph = convert_to_calib_graph.process_transforms(light_graph)
runner = histogram_graph_runner.HistogramGraphRunner(calib_graph,
hw_specs,
sw_config,
sim_params,
graph_coll)
runner.run(calibration_data)
# Get scales data
logging.info("-Computing Scales")
activation_scales_data = get_scales_data(hist_coll,
nodes_to_calibrate,
convert_to_calib_graph,
sw_config)
return activation_scales_data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _RunCalibration(self, graph_key, gdef, input_data, config):\n return self._RunGraph(graph_key, gdef, input_data, config, 30)",
"def late_gradient_fusion():\n pass",
"def apply_calibration(self, cal):\n\n n_edges = len(self.channels) + 1\n channel_edges = np.linspace(-0.5, self.channels[-1] + 0.5, num=n_edges)\n self.bin_edges_kev = cal.ch2kev(channel_edges)",
"def early_gradient_fusion():\n pass",
"def _inexact_alm_l1(imgflt_stack,options):\n # Get basic image information and reshape input\n img_width = imgflt_stack.shape[0]\n img_height = imgflt_stack.shape[1]\n img_size = img_width* img_height\n img_3d = imgflt_stack.shape[2]\n imgflt_stack = np.reshape(imgflt_stack,(img_size, img_3d))\n options['weight'] = np.reshape(options['weight'],imgflt_stack.shape)\n\n # Matrix normalization factor\n temp = np.linalg.svd(imgflt_stack,full_matrices=False,compute_uv=False)\n norm_two = np.float64(temp[0])\n del temp\n\n # A is a low rank matrix that is being solved for\n A = np.zeros(imgflt_stack.shape,dtype=np.float64)\n A_coeff = np.ones((1, img_3d),dtype=np.float64) # per image scaling coefficient, accounts for things like photobleaching\n A_offset = np.zeros((img_size,1),dtype=np.float64) # offset per pixel across all images\n\n # E1 is the additive error. Since the goal is determining the background signal, this is the real signal at each pixel\n E1 = np.zeros(imgflt_stack.shape,dtype=np.float64)\n\n # Normalization factors\n ent1 = np.float64(1) # flatfield normalization\n ent2 = np.float64(10) # darkfield normalization\n\n # Weights\n weight_upd = _dct2(np.mean(np.reshape(A,(img_width, img_height, img_3d)),2))\n\n # Initialize gradient and weight normalization factors\n Y1 = np.float64(0)\n mu = np.float64(12.5)/norm_two\n mu_bar = mu * 10**7\n rho = np.float64(1.5)\n\n # Frobenius norm\n d_norm = np.linalg.norm(imgflt_stack,'fro')\n\n # Darkfield upper limit and offset\n B1_uplimit = np.min(imgflt_stack)\n B1_offset = np.float64(0)\n\n # Perform optimization\n iternum = 0\n converged = False\n while not converged:\n iternum += 1\n\n # Calculate the flatfield using existing weights, coefficients, and offsets\n W_idct_hat = _idct2(weight_upd)\n A = np.matmul(np.reshape(W_idct_hat,(img_size,1)),A_coeff) + A_offset\n temp_W = np.divide(imgflt_stack - A - E1 + np.multiply(1/mu,Y1),ent1)\n\n # Update the weights\n temp_W = np.reshape(temp_W,(img_width, img_height, img_3d))\n temp_W = np.mean(temp_W,2)\n weight_upd = weight_upd + _dct2(temp_W)\n weight_upd = np.max(np.reshape(weight_upd - options['lambda']/(ent1*mu),(img_width, img_height,1)),-1,initial=0) + np.min(np.reshape(weight_upd + options['lambda']/(ent1*mu),(img_width, img_height,1)),-1,initial=0)\n W_idct_hat = _idct2(weight_upd)\n\n # Calculate the flatfield using updated weights\n A = np.matmul(np.reshape(W_idct_hat,(img_size,1)),A_coeff) + A_offset\n\n # Determine the error\n E1 = E1 + np.divide(imgflt_stack - A - E1 + np.multiply(1/mu,Y1),ent1)\n E1 = np.max(np.reshape(E1 - options['weight']/(ent1*mu),(img_size, img_3d,1)),-1,initial=0) + np.min(np.reshape(E1 + options['weight']/(ent1*mu),(img_size, img_3d,1)),-1,initial=0)\n\n # Calculate the flatfield coefficients by subtracting the errors from the original data\n R1 = imgflt_stack-E1\n A_coeff = np.reshape(np.mean(R1,0)/np.mean(R1),(1, img_3d))\n A_coeff[A_coeff<0] = 0 # pixel values should never be negative\n\n # Calculate the darkfield component if specified by the user\n if options['darkfield']:\n # Get images with predominantly background pixels\n validA1coeff_idx = np.argwhere(A_coeff<1)[:,1]\n R1_upper = R1[np.argwhere(np.reshape(W_idct_hat,(-1,1)).astype(np.float64)>(np.float64(np.mean(W_idct_hat))-np.float64(10**-5)))[:,0],:]\n R1_upper = np.mean(R1_upper[:,validA1coeff_idx],0)\n R1_lower = R1[np.argwhere(np.reshape(W_idct_hat,(-1,1))<np.mean(W_idct_hat)+np.float64(10**-5))[:,0],:]\n R1_lower = np.mean(R1_lower[:,validA1coeff_idx],0)\n B1_coeff = (R1_upper-R1_lower)/np.mean(R1)\n k = validA1coeff_idx.size\n\n # Calculate the darkfield offset\n temp1 = np.sum(np.square(A_coeff[0,validA1coeff_idx]))\n temp2 = np.sum(A_coeff[0,validA1coeff_idx])\n temp3 = np.sum(B1_coeff)\n temp4 = np.sum(A_coeff[0,validA1coeff_idx]*B1_coeff)\n temp5 = temp2 * temp3 - k*temp4\n if temp5 == 0:\n B1_offset = np.float64(0)\n else:\n B1_offset = (temp1*temp3-temp2*temp4)/temp5\n B1_offset = np.max(B1_offset,initial=0)\n B1_offset = np.min(B1_offset,initial=B1_uplimit/(np.mean(W_idct_hat)+10**-7))\n B_offset = B1_offset * np.mean(W_idct_hat) - B1_offset*np.reshape(W_idct_hat,(-1,1))\n\n # Calculate darkfield\n A1_offset = np.reshape(np.mean(R1[:,validA1coeff_idx],1),(-1,1)) - np.mean(A_coeff[0,validA1coeff_idx]) * np.reshape(W_idct_hat,(-1,1))\n A1_offset = A1_offset - np.mean(A1_offset)\n A_offset = A1_offset - np.mean(A1_offset) - B_offset\n\n # Update darkfield weights\n W_offset = _dct2(np.reshape(A_offset,(img_width, img_height)))\n W_offset = np.max(np.reshape(W_offset - options['lambda_darkfield']/(ent2*mu),(img_width, img_height,1)),-1,initial=0) \\\n + np.min(np.reshape(W_offset + options['lambda_darkfield']/(ent2*mu),(img_width, img_height,1)),-1,initial=0)\n\n # Calculate darkfield based on updated weights\n A_offset = _idct2(W_offset)\n A_offset = np.reshape(A_offset,(-1,1))\n A_offset = np.max(np.reshape(A_offset - options['lambda_darkfield']/(ent2*mu),(A_offset.shape[0],A_offset.shape[1],1)),-1,initial=0) \\\n + np.min(np.reshape(A_offset + options['lambda_darkfield']/(ent2*mu),(A_offset.shape[0],A_offset.shape[1],1)),-1,initial=0)\n A_offset = A_offset + B_offset\n\n # Loss\n Z1 = imgflt_stack - A - E1\n\n # Update weight regularization term\n Y1 = Y1 + mu*Z1\n\n # Update learning rate\n mu = np.min(mu*rho,initial=mu_bar)\n\n # Stop if loss is below threshold\n stopCriterion = np.linalg.norm(Z1,ord='fro')/d_norm\n if stopCriterion < options['optimization_tol'] or iternum > options['max_iterations']:\n converged = True\n\n # Calculate final darkfield image\n A_offset = A_offset + B1_offset * np.reshape(W_idct_hat,(-1,1))\n\n return A,E1,A_offset",
"def activate_1(self, image):\n result = float()\n for i in range(len(self.weights)):\n for j in range(len(self.weights[i])):\n result += (image.get_col(i, j)/31) * self.weights[i][j]\n\n self.output = self.activate_2(result)",
"def gain_standardization(self):\r\n \"\"\"\r\n load all gain factors from any hm stage (gains are identical for all SHM stages)\r\n \"\"\"\r\n gain_factors = []\r\n for i in range(self.number_of_paths):\r\n value = self.data_of_hm_cycle['coupon']['path_data'][0][0][0][i][4][0][0]\r\n gain_factors.append(value)\r\n gain_factors = np.array(gain_factors)\r\n gains_factor_new_dim = gain_factors[np.newaxis, ...]\r\n matrix_gains_2d = np.repeat(gains_factor_new_dim, self.signal_length, axis=0).T\r\n matrix_of_gains = matrix_gains_2d[:, :, np.newaxis]\r\n\r\n \"\"\"\r\n divide all signals by the gain factors such that all gains are standardized to one\r\n \"\"\"\r\n for i in range(self.num_of_hm_stages):\r\n entries = i*self.number_of_paths\r\n hm_cycle_set = self.sensor_data_flattened_[entries : entries + self.number_of_paths]\r\n divided_data = np.divide(hm_cycle_set, matrix_of_gains)\r\n self.sensor_data_flattened_[entries : entries + self.number_of_paths] = divided_data\r\n self.sensor_data_original_shape_[i, :, :, :] = divided_data\r\n\r\n return",
"def calibrate_image(image, tl = 0.5):\n for y in range(len(image)):\n for x in range(len(image[y])):\n # Convert the exiting pixel color to HLS model.\n h, l, s = rgb_to_hls(image[y, x])\n\n # We convert the HLS model back to RGB, but using our target \n # brightness value `tl`.\n r, g, b = colorsys.hls_to_rgb(h, tl, s)\n \n # Set the new pixel color.\n image[y][x] = [r * 255, g * 255, b * 255]\n\n return image",
"def effective_resistance_weighting(adjacency_matrix, callback):\n\n\tn = adjacency_matrix.shape[0]\n\toptimal_laplacian = fast_linear_averaging(n, adjacency_matrix)[\"laplacian\"]\n\toptimal_lambda2 = second_smallest_eigenvalue(optimal_laplacian)\n\n\tlaplacian = adjacency_to_laplacian(adjacency_matrix)\n\n\teffective_resistance_matrix = compute_effective_resistances(laplacian)\n\tmasked_effective_resistance_matrix = effective_resistance_matrix * adjacency_matrix\n\tprint(\"Masked ERM: \", masked_effective_resistance_matrix)\n\n\ttransformed_matrix = callback(masked_effective_resistance_matrix)\n\n\n\tresistance_row_sums = np.sum(transformed_matrix, 1)\n\tmax_row_sum = np.amax(resistance_row_sums)\n\tprint(\"Max row sum: \", max_row_sum)\n\n\t# Normalize effective_resistance_matrix so rows sum to 1\n\tlcp_matrix = transformed_matrix / (max_row_sum + 1e-5)\n\tfor i in range(n):\n\t\tlcp_matrix[i, i] = 1. - np.sum(lcp_matrix[i, :])\n\toutput_laplacian = np.identity(n) - lcp_matrix\n\tprint(lcp_matrix)\n\n\tprint(\"Final lambda2: \", second_smallest_eigenvalue(output_laplacian))\n\tprint(\"Optimal lambda2: \", optimal_lambda2)\n\n\tprint(\"Final lambda_max: \", largest_eigenvalue(output_laplacian))\n\tprint(\"Optimal lambda_max: \", largest_eigenvalue(optimal_laplacian))\n\n\t#print(\"Spectrum of optimal laplacian: \", spectrum(optimal_laplacian))\n\t#print(\"Spectrum of output laplacian: \", spectrum(output_laplacian))\n\n\tdraw_graph_from_adjacency(adjacency_matrix)\n\tdraw_graph_from_laplacian(output_laplacian)\n\tdraw_graph_from_laplacian(optimal_laplacian)",
"def calculate_attribution(self, model_instance, input_graph):\n self.device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n self.model_instance = model_instance\n \n tmp_graph = copy.deepcopy(input_graph).to(self.device)\n node_feats = tmp_graph.ndata.pop('h').to(self.device)\n \n self.model_instance.to(self.device)\n if self.att_type == 'integrated_gradients':\n att_function = IntegratedGradients(self._model_forward)\n self.attr_weights = att_function.attribute(\n node_feats, \n additional_forward_args=tmp_graph, \n internal_batch_size=len(node_feats),\n n_steps = 100)\n elif self.att_type == 'input_gradient':\n att_function = InputXGradient(self._model_forward)\n self.attr_weights = att_function.attribute(\n node_feats, \n additional_forward_args=tmp_graph,)\n elif self.att_type == 'attention':\n edge_feats = tmp_graph.edata.pop('e').to('cuda')\n _, self.att_weights = self.model_instance(\n tmp_graph, node_feats, edge_feats, get_node_weight=True)\n return self.att_weights[0].to('cpu').detach().numpy()\n \n self.attributes = np.multiply(\n node_feats.to('cpu').numpy(), self.attr_weights.to('cpu').numpy())\n self.attributes[self.attributes < 0] = 0\n \n self.node_weights = self.normalize_node_weights(self.attributes.sum(axis=1))",
"def _UpdateGradient(self):\n self.mol.GetGradient('analytic')",
"def compute_activation(self):\r\n\r\n x=0\r\n edges=self.in_edges\r\n for edge in edges:\r\n x+= edge.source.activation*edge.weight\r\n self.activation=1/(1+exp(-x))",
"def scale_edge_weights(graph):\n g = graph.copy()\n original_weights = []\n for edge in g.edges():\n original_weights.append(g.edges()[edge]['weight'])\n scaler = MinMaxScaler(feature_range=(.5,12))\n new_weights = scaler.fit_transform(np.array(original_weights).reshape(-1,1)).flatten()\n for i,edge in enumerate(g.edges()):\n g.edges()[edge]['weight'] = new_weights[i]\n return g",
"def create_signal_which_maximizes_activation(model, layer, filt, input_size,\n lr=0.1, opt_steps=100,\n upscaling_steps=5,\n upscaling_factor=2.0,\n color='black'):\n mpl.rcParams['text.color'] = color\n mpl.rcParams['axes.labelcolor'] = color\n mpl.rcParams['xtick.color'] = color\n mpl.rcParams['ytick.color'] = color\n\n img_var = torch.randn((1, 1, int(input_size * ((1 / upscaling_factor)**upscaling_steps))))\n activations = SaveFeatures(list(model.children())[layer])\n optimizer = torch.optim.Adam(\n [img_var.requires_grad_()], lr=lr, weight_decay=1e-6)\n loss_history = []\n\n for step in range(upscaling_steps + 1):\n for n in range(opt_steps):\n optimizer.zero_grad()\n model(img_var)\n loss = -activations.features[:, filt].mean()\n loss_history.append(loss)\n loss.backward()\n optimizer.step()\n\n if step < upscaling_steps:\n img_var = torch.nn.functional.interpolate(\n img_var, scale_factor=upscaling_factor, mode='linear')\n\n plt.figure(figsize=(20, 4))\n plt.plot(img_var.clone().detach().numpy()[0, 0])\n plt.title(\"Input which maximizes activation of layer: conv_{}, filter: {}\".format(\n layer + 1, filt), fontsize=22)\n plt.show()\n\n return img_var",
"def update_threshold(self, activation_time):\n \n if self.model_type == 'linear':\n if(activation_time > 0):\n self.curr_threshold = (self.threshold +\n (1-self.threshold)*(1-activation_time/\n (self.t_max)))\n else:\n self.curr_threshold = self.threshold\n elif self.model_type == 'quadratic':\n if(activation_time > 0):\n self.curr_threshold = (self.threshold +\n (1-self.threshold)*(1-(activation_time/\n (self.t_max))**2))\n else:\n self.currthreshold = self.threshold",
"def _apply_gradient_decay():\n parameter_not_included = ['seg_emb', 'query_key_bias', 'query_emb_bias', 'query_seg_bias']\n num_layers = len(xlnet_base._net.transformer_cells)\n for (i, layer_parameters) in enumerate(xlnet_base._net.transformer_cells):\n layer_params = layer_parameters.collect_params()\n for key, value in layer_params.items():\n skip = False\n for pn in parameter_not_included:\n if pn in key:\n skip = True\n if skip:\n continue\n if value.grad_req != 'null':\n for arr in value.list_grad():\n arr *= args.layerwise_decay**(num_layers - i - 1)",
"def recalibrate_scores(weibull_model, img_layer_act, alpharank=10):\n\n num_labels = 10\n\n # Sort index of activations from highest to lowest.\n ranked_list = np.argsort(img_layer_act)\n ranked_list = np.ravel(ranked_list)\n ranked_list = ranked_list[::-1]\n\n # Obtain alpha weights for highest -> lowest activations.\n alpha_weights = [((alpharank + 1) - i) / float(alpharank) for i in range(1, alpharank + 1)]\n ranked_alpha = np.zeros(num_labels)\n for i in range(0, len(alpha_weights)):\n ranked_alpha[ranked_list[i]] = alpha_weights[i]\n\n # Calculate OpenMax probabilities\n openmax_penultimate, openmax_penultimate_unknown = [], []\n for categoryid in range(num_labels):\n label_weibull = weibull_model[str(categoryid)]['weibull_model'] # Obtain the corresponding Weibull model.\n label_mav = weibull_model[str(categoryid)]['mean_vec'] # Obtain MAV for specific class.\n img_dist = spd.euclidean(label_mav, img_layer_act)/200. + spd.cosine(label_mav, img_layer_act)\n\n weibull_score = label_weibull.w_score(img_dist)\n\n modified_layer_act = img_layer_act[0][categoryid] * (1 - weibull_score * ranked_alpha[categoryid]) # Revise av.\n openmax_penultimate += [modified_layer_act] # Append revised av. to a total list.\n openmax_penultimate_unknown += [img_layer_act[0][categoryid] - modified_layer_act] # A.v. 'unknown unknowns'.\n\n openmax_closedset_logit = np.asarray(openmax_penultimate)\n openmax_openset_logit = np.sum(openmax_penultimate_unknown)\n\n # Transform the recalibrated penultimate layer scores for the image into OpenMax probability.\n openmax_probab = compute_open_max_probability(openmax_closedset_logit, openmax_openset_logit)\n\n return openmax_probab",
"def pre_fit(\n self,\n signals,\n labels,\n initial_kernel_fct,\n upper_bound_similarity,\n lower_bound_dissimilarity,\n gamma,\n ):\n self.initial_kernel_fct = initial_kernel_fct\n self.u = upper_bound_similarity\n self.l = lower_bound_dissimilarity\n self.gamma = gamma\n\n self.training_samples, self.constrains = self.get_training_samples_and_constains(signals, labels)\n\n self.G = initial_kernel_fct(self.training_samples, self.training_samples)\n\n self.G_hat = self.compute_bregman()\n\n self.G_inv = np.linalg.inv(self.G) # Problem HERE\n\n # print(f\"self.G_inv @ self.G: {self.G_inv @ self.G}, must be equal to the identity\")\n\n self.G_core = self.G_inv @ (self.G_hat - self.G) @ self.G_inv",
"def recalibrate(self, batch_id, closure):\n loss = closure()\n #print(\"recal loss:\", loss)\n\n m = self.nbatches\n self.recalibration_i += 1\n\n if self.epoch >= self.vr_from_epoch:\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None:\n continue\n gk = p.grad.data.double()\n\n param_state = self.state[p]\n\n gktbl = param_state['gktbl']\n gavg = param_state['gavg']\n m2 = param_state['m2']\n #pdb.set_trace()\n\n # Online mean/variance calcuation from wikipedia\n delta = gk - gavg\n gavg.add_(1.0/self.recalibration_i, delta)\n delta2 = gk - gavg\n m2.add_((delta*delta2).type_as(m2))\n\n param_state['running_mean'].zero_().add_(gavg)\n param_state['running_cov'].zero_().add_(1.0/self.nbatches, m2.double())\n\n #########\n gktbl[batch_id, :] = p.grad.data.cpu().clone()\n\n return loss",
"def smooth_activation(layer_norm, linear, activation_scales):\n if not isinstance(linear.weight, np.ndarray):\n linear_weight = linear.weight.numpy()\n activation_scales = activation_scales.numpy()\n else:\n linear_weight = linear.weight\n\n weight_scales = np.amax(np.absolute(linear_weight), axis=0)\n weight_scales = np.maximum(weight_scales, 1e-5)\n\n activation_scales = activation_scales.astype(weight_scales.dtype)\n\n scales = np.sqrt(activation_scales / weight_scales)\n scales = np.maximum(scales, 1e-5)\n\n if not isinstance(linear.weight, np.ndarray):\n import torch\n\n scales = torch.from_numpy(scales)\n\n layer_norm.gamma /= scales\n layer_norm.beta /= scales\n\n linear.weight *= scales.reshape(1, -1)",
"def apply(self, fgraph):\r\n pass",
"def linalg_solve(self):\n self._assert_problem_is_valid()\n if self._background is None:\n print(self.__class__.background.__doc__)\n raise SilSubProblemError(\n \"Must specify a background spectrum for linalg_solve \"\n + \"(see above).\"\n )\n\n if isinstance(self._target_contrast, str):\n raise SilSubProblemError(\n \".linalg_solve() does not support 'max' or 'min' contrast \"\n + \"modes. Please specify a target_contrast.\"\n )\n\n if self._target_contrast is None:\n print(self.__class__.target_contrast.__doc__)\n raise SilSubProblemError(\n \"Must specify target_contrast for linalg_solve \"\n + \"(see above).\"\n )\n\n # Get a copy of the list of photoreceptors\n receptors = self.observer.photoreceptors.copy()\n for receptor in self.ignore:\n if receptor is not None:\n receptors.remove(receptor)\n\n # Get only the action spectra we need\n sss = self.observer.action_spectra[receptors]\n bg_spds = self.predict_multiprimary_spd(self.background, nosum=True)\n\n # Primary to sensor matrix\n A = sss.T.dot(bg_spds)\n\n # An array for requested contrasts\n requested_contrasts = np.zeros(len(receptors))\n\n # Plug target_contrast into the right place\n (target_indices,) = np.where(np.in1d(receptors, self.target))\n requested_contrasts[target_indices] = self.target_contrast\n\n # Scale requested values from percentage to native units as a function\n # of the background spectrum (divide by 2?)\n requested_contrasts = A.sum(axis=1).mul(requested_contrasts)\n\n # Get inverse function\n if A.shape[0] == A.shape[1]: # Square matrix, use inverse\n inverse_function = np.linalg.inv\n else: # Use pseudo inverse\n inverse_function = np.linalg.pinv\n\n # Inverse\n A1 = pd.DataFrame(inverse_function(A.values), A.columns, A.index)\n\n # TODO: Why does dividing by two give the expected contrast, and not\n # dividing by two gives roughly double!?\n solution = A1.dot(requested_contrasts) / 2 + self.background\n\n # print(f\"receptors: {receptors}\")\n # print(f\"requested: {requested_contrasts}\")\n\n if all([s > b[0] and s < b[1] for s in solution for b in self.bounds]):\n return solution\n else:\n raise ValueError(\n \"Solution is out of gamut, lower target contrast.\"\n )",
"def scaling():\n \n for i in range(cfg.nfea):\n dm = 0\n var = 0\n for j in range(cfg.ntrain):\n dm += cfg.a[j,i]\n dm = dm/cfg.ntrain\n \n for j in range(cfg.ntrain):\n var += (cfg.a[j,i]-dm)**2\n\n var = var/cfg.ntrain\n var = np.sqrt(var)\n \n if var >= 10**(-5):\n cfg.clin[i] = 1.0/var \n cfg.dlin[i] = -dm/var \n \n else: \n if np.abs(dm)<=1.0:\n cfg.clin[i] = 1.0\n cfg.dlin[i] = 0.0 \n else: \n cfg.clin[i] = 1.0/dm\n cfg.dlin[i] = 0.0 \n \n for j in range(cfg.ntrain):\n cfg.a_scaled[j,i] = cfg.clin[i]*cfg.a[j,i] + cfg.dlin[i]\n \n return",
"def updateGraph(self):\n self.initUnits()\n v = self.units.copy()\n v_old = v.copy() * 100 # initial value so it will skip the first break\n for step in range(self.numCycles): # for total number of cycles\n # keep the old version of v for paralel updating\n # if v_old and v every element differnce < 0.001, then stop\n if np.all(np.abs(v_old - v) < 0.001):\n break\n # assign to v_old v from the previous step\n v_old = v.copy()\n for i in range(self.graph.n): # for every unit in the graph\n if i not in self.graph.observed: # if the unit is not a special fixed value s\n net = np.dot(v_old, self.graph.c[i]) # compute total flow to the unit\n if net > 0:\n gradient = net*(self.min_max[1]-v_old[i])\n else:\n gradient = net*(v_old[i]-self.min_max[0])\n v[i] = v_old[i]*(1-self.decay) + gradient\n # should this be after every unit update, or after the whole graph updates ??\n v = np.where(v>1, self.min_max[1], v)\n v = np.where(v<-1,self.min_max[0],v)\n self.units = v",
"def rescale(self):\n # forecast on real data, don't need this anymore\n pass",
"def conv_init(conv, act='linear'):\r\n n = conv.kernel_size[0] * conv.kernel_size[1] * conv.out_channels\r\n conv.weight.data.normal_(0, math.sqrt(2. / n))",
"def convert_scale(g, op, block):\n\n scale = op.attr(\"scale\")\n bias = op.attr(\"bias\")\n bias_after_scale = op.attr(\"bias_after_scale\")\n x = g.get_node(op.input(\"X\")[0])\n if np.isclose(scale, 1.0) and np.isclose(bias, 0.0):\n out = x\n else:\n if np.isclose(bias, 0.0):\n out = x * _expr.const(np.array(scale).astype(\"float32\"))\n elif np.isclose(scale, 1.0):\n out = x + _expr.const(np.array(bias).astype(\"float32\"))\n else:\n if bias_after_scale:\n out = x * _expr.const(np.array(scale).astype(\"float32\")) + _expr.const(\n np.array(bias).astype(\"float32\")\n )\n else:\n out = (x + _expr.const(np.array(bias).astype(\"float32\"))) * _expr.const(\n np.array(scale).astype(\"float32\")\n )\n g.add_node(op.output(\"Out\")[0], out)",
"def _rescale_module(module):\n for sub in module.modules():\n if isinstance(sub, (nn.Conv1d, nn.ConvTranspose1d, nn.Conv2d, nn.ConvTranspose2d)):\n std = sub.weight.std().detach()\n scale = (std / 0.1) ** 0.5\n sub.weight.data /= scale\n if sub.bias is not None:\n sub.bias.data /= scale",
"def _doCalibration(self):\n self._cmdCalibration(2)",
"def __initilization(self,node_set):\n \n print \"*********************************\"\n \n for x in node_set:\n x.node_vol=np.transpose(np.matrix([cmath.exp(0), cmath.exp(complex(0,math.pi*2/3)), cmath.exp(complex(0,-math.pi*2/3))]))\n \n print \"Forward/Backward Algorithm Initialization Done!\""
]
| [
"0.5870315",
"0.5327208",
"0.5292108",
"0.5278136",
"0.51503456",
"0.5125558",
"0.5125329",
"0.5116323",
"0.50977707",
"0.5058684",
"0.504306",
"0.5031749",
"0.4979374",
"0.49528605",
"0.49525857",
"0.49299765",
"0.4891175",
"0.48627597",
"0.48579732",
"0.4849806",
"0.4846908",
"0.48322684",
"0.48298818",
"0.48237148",
"0.48188218",
"0.48153812",
"0.47920233",
"0.47680366",
"0.47540897",
"0.47528642"
]
| 0.73384583 | 0 |
Method to send message to asset_index websocket channel. Asset Index (request) Retrieve a list of all available underlyings and the corresponding contract types and duration boundaries. If the user is logged in, only the assets available for that user's landing company will be returned. | def __call__(self, landing_company: Optional[str] = None, passthrough: Optional[Any] = None, req_id: Optional[int] = None):
data = {
"asset_index": int(1)
}
if landing_company:
data['landing_company'] = str(landing_company)
return self.send_websocket_request(self.name, data, passthrough=passthrough, req_id=req_id) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def api_asset_list():\n return jsonify(app.bank.to_list()), 200",
"def _asset_index(request, course_key):\r\n course_module = modulestore().get_course(course_key)\r\n\r\n return render_to_response('asset_index.html', {\r\n 'context_course': course_module,\r\n 'asset_callback_url': reverse_course_url('assets_handler', course_key)\r\n })",
"def index():\n try:\n # Retrieve a list of active clients from the BancBox API for \n # the right side bar.\n active_clients = api.get_active_clients()\n except Exception, e:\n active_clients = []\n logger.error('Error retrieving active clients: %s', e)\n return render_template('index.html', active_clients=active_clients)",
"def sendIndex(self):\n self.updateIndex()\n outpkg = json.dumps(self.serverindex)\n self.send(outpkg)",
"def index():\n current_userid = session[\"user_id\"]\n userbalance = get_userbal(db, current_userid)\n userstocks = get_userstock(db, current_userid)\n stocklist = get_stocklist(db, stocksid=True, prices=True)\n if request.method == \"GET\" and current_userid:\n return render_template(\"index.html\", userbalance=usd(userbalance),\n userstocks=userstocks, buystocks=stocklist)\n else:\n return apology(\"TODO\")",
"def index():\n stocks = db.execute(\"SELECT Symbol, Company, SUM(NumberOfShares) AS Shares, UnitPrice, SUM(TotalPrice) AS TotalPrice FROM \"\n \"portfolio WHERE UserID = :userid GROUP BY Symbol\", userid=session.get(\"user_id\"))\n\n symbol = db.execute(\"SELECT Symbol FROM portfolio WHERE UserID = :userid\", userid=session.get(\"user_id\"))\n\n cash = db.execute(\"SELECT cash FROM users WHERE id = :userid\", userid=session.get(\"user_id\"))\n\n balance = cash[0][\"cash\"]\n grandTotal = 0\n for stock in stocks:\n grandTotal = grandTotal + stock[\"TotalPrice\"]\n\n grandTotal = grandTotal + balance\n\n return render_template(\"index.html\", stockList=stocks, cash=balance, totalAssets=grandTotal, currentUser=session.get(\"user_id\"))",
"def index():\n user_id = session[\"user_id\"]\n portfolio_table = port(user_id, db)\n \n if not isinstance(portfolio_table, dict): \n return apology(\"Error in portfolio\")\n \n return render_template(\"portfolio.html\",\n shares_list = portfolio_table[\"shares\"],\n cash = portfolio_table[\"cash\"],\n total = portfolio_table[\"total\"])",
"def GET(self):\n listing = sandbox.list(sandbox_name)\n return render.index(listing=listing)",
"def index():\n symbols = db.execute(\"SELECT symbol FROM History WHERE id = :id GROUP BY symbol\", id=session['user_id'])\n companies = db.execute(\"SELECT company FROM History WHERE id = :id GROUP BY symbol\", id=session['user_id'])\n get_shares = db.execute(\"SELECT SUM(shares) FROM History WHERE id = :id GROUP BY symbol\", id=session['user_id'])\n shares = [share['SUM(shares)'] for share in get_shares]\n cash = db.execute(\"SELECT cash FROM users WHERE id = :id\", id=session['user_id'])\n\n return render_template(\"index.html\", symbols_companies_shares=zip(symbols, companies, shares), lookup=lookup, cash=cash)",
"def list_assets(request):\n user_assets = Asset.objects.filter(user=request.user, deleted=False).all()\n\n json_assets = ASSET_LIST_RESOURCE.to_json(dict(\n user_id=request.user.id,\n next_page_token=uuid.uuid4(),\n assets=user_assets\n ))\n request_format = request.GET.get('format', '')\n if request_format.lower() == 'json':\n return partial_json_response(request, json_assets)\n else:\n render_data = {'resource': json.dumps(json_assets)}\n render_data.update(csrf(request))\n return render('index.html', render_data)",
"def index():\n from managers import banks_manager, rates_manager\n\n # acquiring number of all banks in DB\n banks_count = banks_manager.get_banks_count()\n\n # acquiring last update time\n updated = rates_manager.get_last_update_time()\n last_update = None if updated is None else updated.strftime('%Y %d %b, %H:%M')\n\n # acquiring list of banks with best USD selling rate\n usd_selling_rate = rates_manager.get_usd_selling_min_rate()\n usd_selling_banks = banks_manager.get_usd_selling_best_rate_banks()\n\n # acquiring list of banks with best EUR selling rate\n eur_selling_rate = rates_manager.get_eur_selling_min_rate()\n eur_selling_banks = banks_manager.get_eur_selling_best_rate_banks()\n\n # acquiring list of banks with best USD buying rate\n usd_buying_rate = rates_manager.get_usd_buying_max_rate()\n usd_buying_banks = banks_manager.get_usd_buying_best_rate_banks()\n\n # acquiring list of banks with best EUR buying rate\n eur_buying_rate = rates_manager.get_eur_buying_max_rate()\n eur_buying_banks = banks_manager.get_eur_buying_best_rate_banks()\n\n # initializing banks data map\n data_map = {\n 'usd_selling_rate': usd_selling_rate,\n 'eur_selling_rate': eur_selling_rate,\n 'usd_buying_rate': usd_buying_rate,\n 'eur_buying_rate': eur_buying_rate,\n 'usd_selling_banks': usd_selling_banks,\n 'eur_selling_banks': eur_selling_banks,\n 'usd_buying_banks': usd_buying_banks,\n 'eur_buying_banks': eur_buying_banks\n }\n return render_template(\"index.html\", title='Home', banks_count=banks_count, last_updated=last_update, data=data_map)",
"def index():\n # query database to get cash on hand\n user_cash = db.execute(\"SELECT cash FROM users WHERE id = :user_id\", user_id=session[\"user_id\"])[0][\"cash\"]\n\n # query database to get current holdings from transactions list\n stocks = db.execute(\n \"SELECT symbol, SUM(shares) AS shares, price FROM transactions WHERE user_id = :user_id GROUP BY symbol\", user_id=session[\"user_id\"])\n\n # assign names and totals for stocks\n for stock in stocks:\n stock_lookup = lookup(stock[\"symbol\"])\n stock[\"name\"] = stock_lookup[\"name\"]\n stock[\"total\"] = stock[\"shares\"] * stock_lookup[\"price\"]\n\n stocks[:] = [stock for stock in stocks if stock.get(\"shares\") > 0]\n\n totals = user_cash + sum([stock[\"total\"] for stock in stocks])\n\n return render_template(\"index.html\", user_cash=user_cash, stocks=stocks, total=totals, usd=usd)",
"def index():\n\n # Create lists containing values for the table\n symbols = []\n names = []\n shares = []\n totals = []\n prices = []\n\n # Query database for the current amount of cash and stocks\n cash = db.execute(\"SELECT cash FROM users WHERE id = :username\", username=session[\"user_id\"] )[0][\"cash\"]\n stocks = db.execute(\"SELECT * FROM summary WHERE id = :username\", username=session[\"user_id\"] )\n grand = cash\n\n # Append to the lists from the database\n for item in stocks:\n symbol = item[\"symbol\"]\n symbols.append(symbol)\n names.append(lookup(symbol)[\"name\"])\n share = db.execute(\"SELECT shares FROM summary WHERE id = :username AND symbol= :symbol\", username=session[\"user_id\"], symbol=symbol)[0][\"shares\"]\n shares.append(share)\n prices.append(lookup(symbol)[\"price\"])\n total = int(share) * lookup(symbol)[\"price\"]\n totals.append(total)\n grand += total\n\n # Obtain list length\n length = len(symbols)\n\n # Direct users to the index page\n return render_template(\"index.html\", symbols = symbols, length = length, cash=cash, names = names, shares = shares, totals = totals, prices = prices, grand = grand)",
"def index():\n\n # Access user's id\n user_id = session[\"user_id\"]\n\n # Select information from shares table for logged in user\n SHARES = db.execute(\"SELECT * FROM shares WHERE user_id = ?\", user_id)\n\n # Initialise OVERALL_TOTAL variable\n OVERALL_TOTAL = 0\n\n # Obtain latest share's price from API and update database\n for share in SHARES:\n QUOTED = lookup(share[\"symbol\"])\n new_shares_total = share[\"shares_count\"] * QUOTED[\"price\"]\n OVERALL_TOTAL += new_shares_total\n db.execute(\"UPDATE shares SET price = ? AND total = ? WHERE user_id = ? AND symbol = ?\",\n QUOTED[\"price\"], new_shares_total, user_id, share[\"symbol\"])\n share[\"price\"] = QUOTED[\"price\"]\n share[\"total\"] = new_shares_total\n\n # Check how much cash the user currently has\n CASH = db.execute(\"SELECT cash FROM users WHERE id = ?\", user_id)[0][\"cash\"]\n\n # Calculate final value for OVERALL_TOTAL\n OVERALL_TOTAL += CASH\n\n return render_template(\"index.html\", shares=SHARES, cash=CASH, overall_total=OVERALL_TOTAL)",
"def index():\n return 'Thanks for using the Bird Stats API.'",
"def index():\n\n # Build out pulling from the transaction history\n\n userId = session[\"user_id\"]\n cashRaw = db.execute(f\"SELECT cash FROM users WHERE id={userId}\")[0][\"cash\"]\n cash = usd(cashRaw)\n totalValue = float(cashRaw)\n\n sharesOwned = db.execute(f\"SELECT * FROM transactions WHERE user_id={userId}\")\n\n #Update prices\n for share in sharesOwned:\n symbol = share[\"symbol\"]\n currentPrice = lookup(symbol)[\"price\"]\n db.execute(f\"UPDATE transactions SET current_price = '{currentPrice}' WHERE user_id='{userId}' AND symbol='{symbol}'\")\n\n index = db.execute(f\"SELECT symbol, SUM(shares), name, AVG(current_price) FROM transactions WHERE user_id='{userId}' GROUP BY symbol HAVING SUM(shares)>0\")\n\n for share in index:\n symbol = share[\"symbol\"]\n count = share[\"SUM(shares)\"]\n price = lookup(symbol)[\"price\"]\n value = count * price\n totalValue += value\n\n totalValue = usd(totalValue)\n\n return render_template(\"index.html\", cash=cash, index=index, totalValue=totalValue)",
"def index(request):\n username = request.session.get('username')\n try:\n index_card = IndexCard.objects.get(username=username)\n except:\n index_card = None\n service_inf_elements = get_elements_by_component_name('Service Infrastructure')\n hard_inf_elements = get_elements_by_component_name('Hard Infrastructure')\n built_env_elements = get_elements_by_component_name('Built Environment')\n template = loader.get_template('crppindexcard/index.html')\n context = RequestContext(request, {\n 'index_card': index_card,\n 'username': username,\n 'service_inf_elements': service_inf_elements,\n 'hard_inf_elements':hard_inf_elements,\n 'built_env_elements':built_env_elements,\n })\n return HttpResponse(template.render(context))",
"def index(owner):\n result = logic.resource.list_by_owner(owner)\n return jsonify(list(result))",
"def get(self):\n if 'wipe' in self.request.path_url:\n InboundRequest.wipe()\n self.response.write('Db wiped')\n return\n\n # log out last 10 requests\n data = InboundRequest.get_latest()\n\n resultList = []\n for item in data:\n headers = ''\n header_list = item.request_headers.split('\\r\\n')\n\n # try format if json\n body = item.request_body\n try:\n json_body = json.loads(item.request_body)\n body = json.dumps(json_body, indent=2, separators=(',', ': '))\n except:\n #logging.info('not in json')\n pass\n\n resultfields = {'ts': item.time_stamp, 'host_url': item.request_host_url, 'path': item.request_path,\n 'query_params': item.request_query_params, 'headers': header_list, 'body': body}\n resultList.append(resultfields)\n\n template_values = {\n 'number_returned': len(data),\n 'results': resultList\n }\n self.render_template('index.html', template_values)",
"def index():\n def getListOfCompanies(username, symbolOrPriceOrNumber):\n if symbolOrPriceOrNumber == \"symbol\" or symbolOrPriceOrNumber == \"price\" or symbolOrPriceOrNumber == \"number\":\n rows = db.execute(\"SELECT {0} FROM portfolio WHERE username=:username\".format(symbolOrPriceOrNumber), username=username)\n if symbolOrPriceOrNumber == \"symbol\" and len(rows) >= 1:\n namesList = []\n for row in rows:\n namesList.append(lookup(row[symbolOrPriceOrNumber])[\"name\"])\n return namesList\n elif symbolOrPriceOrNumber == \"price\" and len(rows) >= 1:\n pricseList = []\n for row in rows:\n pricseList.append(row[symbolOrPriceOrNumber])\n return pricseList\n elif symbolOrPriceOrNumber == \"number\" and len(rows) >= 1:\n numbersList = []\n for row in rows:\n numbersList.append(row[symbolOrPriceOrNumber])\n return numbersList\n else:\n return None\n else:\n return None\n\n def getTotalValueHolding(username):\n priceRow = db.execute(\"SELECT price FROM portfolio WHERE username=:username\", username=username)\n numberRow = db.execute(\"SELECT number FROM portfolio WHERE username=:username\", username=username)\n\n if len(priceRow) >= 1 and len(numberRow) >= 1 and len(priceRow) == len(numberRow):\n totalList = []\n for i in range(len(priceRow)):\n totalList.append(float(priceRow[i][\"price\"]) * float(numberRow[i][\"number\"]))\n\n return totalList\n\n username = db.execute(\"SELECT username FROM users WHERE id=:userId\", userId=session[\"user_id\"])[0][\"username\"]\n companiesNames = getListOfCompanies(username, \"symbol\")\n numberOfShares = getListOfCompanies(username, \"number\")\n prices = getListOfCompanies(username, \"price\")\n totalValueHolding = getTotalValueHolding(username)\n\n currentCashBalance = db.execute(\"SELECT cash FROM users WHERE id=:userId\", userId=session[\"user_id\"])[0][\"cash\"]\n total = 0\n if totalValueHolding:\n for totalValue in totalValueHolding:\n total = total + totalValue\n\n cashAndStocksTotalValue = float(currentCashBalance) + total\n\n return render_template(\"index.html\", username=username, companiesNames=companiesNames, numberOfShares=numberOfShares,\n prices=prices, totalValueHolding=totalValueHolding, currentCashBalance=currentCashBalance, cashAndStocksTotalValue=cashAndStocksTotalValue)",
"def index(self, **kwargs):\n\n return self.api_list_request(self._get_method_fullname(\"index\"), kwargs)",
"def index(self, **kwargs):\n\n return self.api_list_request(self._get_method_fullname(\"index\"), kwargs)",
"def index(self, **kwargs):\n\n return self.api_list_request(self._get_method_fullname(\"index\"), kwargs)",
"def index(self, **kwargs):\n\n return self.api_list_request(self._get_method_fullname(\"index\"), kwargs)",
"def index(self, **kwargs):\n\n return self.api_list_request(self._get_method_fullname(\"index\"), kwargs)",
"def index(self, **kwargs):\n\n return self.api_list_request(self._get_method_fullname(\"index\"), kwargs)",
"def index(self, **kwargs):\n\n return self.api_list_request(self._get_method_fullname(\"index\"), kwargs)",
"def index(self, **kwargs):\n\n return self.api_list_request(self._get_method_fullname(\"index\"), kwargs)",
"def get_achievement_index(self, region, namespace, **filters):\n filters['namespace'] = namespace\n return self.get_resource('data/wow/achievement/index', region, **filters)",
"def index():\n\n badge_name = current_app.config.get(\"ACCREDITATION_BADGE\")\n if badge_name and not Path(public_bp.static_folder, badge_name).is_file():\n LOG.warning(f'No file with name \"{badge_name}\" in {public_bp.static_folder}')\n badge_name = None\n\n return render_template(\"public/index.html\", version=__version__, accred_badge=badge_name)"
]
| [
"0.5347752",
"0.5298236",
"0.52792937",
"0.5228874",
"0.52224773",
"0.5209268",
"0.51474935",
"0.5125685",
"0.5113717",
"0.50968444",
"0.5081047",
"0.5077979",
"0.5067293",
"0.50664",
"0.5046824",
"0.50325316",
"0.5027318",
"0.49806237",
"0.49736387",
"0.49670604",
"0.49498305",
"0.49498305",
"0.49498305",
"0.49498305",
"0.49498305",
"0.49498305",
"0.49498305",
"0.49498305",
"0.4940514",
"0.4936901"
]
| 0.6206942 | 0 |
Test ability to create a empty page | def test_create_page(self):
self.assertEqual(self.client.get(reverse('home')).status_code, 404)
page = Page.objects.create(**_page_data)
self.assertEqual(page.title, _page_data['title'])
self.assertEqual(page.page_type, _page_data['page_type'])
response = self.client.get(reverse('home'))
self.assertEqual(response.status_code, 200)
self.assertIn('page_settings', response.context_data) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_page_list_unauthorised(self):\n user = self.get_superuser()\n title_1 = 'page'\n title_2 = 'inner'\n title_3 = 'page 3'\n page = create_page(title_1, 'page.html', 'en', published=True)\n page_2 = create_page(title_2, 'page.html', 'en', published=True, parent=page)\n page_3 = create_page(title_3, 'page.html', 'en', published=False)\n\n url = reverse('api:page-list')\n response = self.client.get(url, format='json')\n self.assertEqual(len(response.data), 2)\n for page in response.data:\n self.assertIn(page.get('title'), {title_1, title_2})",
"def test_create_page_with_main_box(self):\n\n main_block = PageMainBlock.objects.create(**_main_block_data)\n Page.objects.create(main_block=main_block, **_page_data)\n response = self.client.get(reverse('home'))\n self.assertEqual(response.status_code, 200)\n\n self.assertIn('text', response.context)\n self.assertIn('title', response.context)\n self.assertIn('image', response.context)",
"def test_page_existence(self):\r\n # Log in\r\n self.auth_page.visit()\r\n\r\n # Verify that each page is available\r\n for page in self.pages:\r\n page.visit()",
"def test_page_existence(self):\r\n for page in self.pages:\r\n page.visit()",
"def test_program_page():\n program = ProgramFactory.create(page=None)\n assert program.page is None\n page = ProgramPageFactory.create(program=program)\n assert program.page == page",
"def test_no_such_page(self):\n path = '/no/such/page'\n with self.app.test_client() as client:\n client.get(path)\n self.assertContext('current_page', self.app.get_page(path))",
"def test_home_view_without_data(self):\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)\n self.assertIn('42 Coffee Cups Test Assignment', response.content)\n self.assertIn('No data', response.content)\n self.assertNotIn('Andrei', response.content)\n self.assertNotIn('Herasko', response.content)",
"def create_page(self):",
"def test_course_page():\n course = CourseFactory.create(page=None)\n assert course.page is None\n page = CoursePageFactory.create(course=course)\n assert course.page == page",
"def test_empty_data(self, client):\n url = reverse('users:create')\n response = client.post(url)\n assert response.status_code == 200\n assert 'This field is required.' in str(response.content)",
"def test_create_page_with_help_box(self):\n\n button = PageButton.objects.create(**_button_data)\n help_block = PageHelpBoxBlock.objects.create(\n button=button, **_help_box_data)\n page = Page.objects.create(help_box_block=help_block, **_page_data)\n\n response = self.client.get(reverse('home'))\n self.assertEqual(response.status_code, 200)\n\n self.assertEqual(page.help_box_block.button, button)\n self.assertIn('text', response.context)\n self.assertIn('button', response.context)",
"def test_empty_featured_pages(self, client, site, homepage):\n # no about or consult page: nothing rendered\n response = client.get(homepage.relative_url(site))\n assertTemplateNotUsed(response, \"cdhpages/snippets/featured_pages.html\")\n # only about page: nothing rendered\n about = ContentPage(title=\"about\", slug=\"about\", body=\"\")\n homepage.add_child(instance=about)\n homepage.save()\n response = client.get(homepage.relative_url(site))\n assertTemplateNotUsed(response, \"cdhpages/snippets/featured_pages.html\")\n # only consults page is live: nothing rendered\n about.unpublish()\n consult = ContentPage(title=\"consult\", slug=\"consult\")\n homepage.add_child(instance=consult)\n homepage.save()\n response = client.get(homepage.relative_url(site))\n assertTemplateNotUsed(response, \"cdhpages/snippets/featured_pages.html\")\n # both pages live; should render\n about.save_revision().publish()\n response = client.get(homepage.relative_url(site))\n assertTemplateUsed(response, \"cdhpages/snippets/featured_pages.html\")",
"def test_view_new_user(self):\n response = self.client.get(reverse('index'))\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.context['data']), 0)\n self.assertContains(response, '<h2>Hello there</h2>')",
"def test_homepage(self):\n\n with self.client as client:\n response = client.get('/')\n html = response.get_data(as_text=True)\n self.assertEqual(response.status_code, 200)\n self.assertIn('<table class=\"board\">', html)\n self.assertIn('<table', html)\n self.assertIn('boggle homepage. used in testing', html)\n # test that you're getting a template",
"def test_dashboard_no_courses(self):\r\n self.auth_page.visit()\r\n self.dashboard_page.visit()",
"def test_create_page_with_header(self):\n\n link_1 = PageHeadDropMenu.objects.create\\\n (internal_link='home', **_head_link_data)\n link_2 = PageHeadDropMenu.objects.create(\n external_link=_external_link, **_head_link_data)\n link_3 = PageHeadDropMenu.objects.create(**_head_link_data)\n link_element_1 = PageHeadDropMenuElement.objects.create(\n internal_link='home', **_head_drop_link_data)\n link_element_2 = PageHeadDropMenuElement.objects.create(\n external_link=_external_link, **_head_drop_link_data)\n link_3.drop_links.add(link_element_1, link_element_2)\n\n soc_link = PageHeadSocLink.objects.create(**_head_soc_link_data)\n\n header = PageHeaderBlock.objects.create(**_header_data)\n header.main_menus_elements.add(link_1, link_2, link_3)\n header.soc_links.add(soc_link)\n\n page = Page.objects.create(header_block=header, **_page_data)\n\n self.assertEqual(page.header_block, header)\n self.assertEqual(page.header_block.main_menus_elements.count(), 3)\n self.assertEqual(link_3.drop_links.count(), 2)\n\n response = self.client.get(reverse('home'))\n self.assertEqual(response.status_code, 200)\n\n self.assertIn('logo', response.context)\n self.assertTrue(response.context['logo'].endswith('.jpg'))\n\n self.assertIn('auth_menu', response.context)\n self.assertFalse(response.context['auth_menu'])\n\n self.assertIn('soc_links', response.context)\n self.assertIn(soc_link, response.context['soc_links'])\n\n self.assertIn('d_d_menu', response.context)\n self.assertIn(link_2, response.context['d_d_menu'])",
"def test_empty_ui(self):",
"def test_new_equipment_page(self):\n create_user()\n login(self.app, 'me1', 'password')\n\n response = self.app.get('/new_equipment', follow_redirects=True)\n self.assertEqual(response.status_code, 200)\n\n response_text = response.get_data(as_text=True)\n self.assertIn('New Equipment', response_text)\n self.assertIn('Name', response_text)\n self.assertIn('Quantity', response_text)\n self.assertIn('Submit', response_text)\n\n self.assertNotIn('Calendar ', response_text)\n self.assertNotIn('Logout', response_text)\n self.assertNotIn('Login', response_text)\n self.assertNotIn('Sign up', response_text)",
"def test_chooser_no_perms(self):\n root_page = Page.objects.get(pk=2)\n root_page.add_child(instance=NewsIndex(\n title='News', slug='news'))\n root_page.add_child(instance=SecondaryNewsIndex(\n title='Secondary News', slug='secondary-news'))\n\n response = self.client.get(reverse('wagtailnews:choose'))\n self.assertEqual(response.status_code, 403)",
"def test_empty_category_dashboard(self):\n self.signup('Bo', 'Theo', '[email protected]', 'Bo1995', 'Bo1995')\n rv = self.login('[email protected]', 'Bo1995')\n self.assertIn(b'Create a Recipe Category', rv.data)",
"def test_page_list_admin(self):\n user = self.get_superuser()\n title_1 = 'page'\n title_2 = 'inner'\n title_3 = 'page 3'\n page = create_page(title_1, 'page.html', 'en', published=True)\n page_2 = create_page(title_2, 'page.html', 'en', published=True, parent=page)\n page_3 = create_page(title_3, 'page.html', 'en', published=False)\n\n with self.login_user_context(user):\n url = reverse('api:page-list')\n response = self.client.get(url, format='json')\n self.assertEqual(len(response.data), 3)\n for page in response.data:\n self.assertIn(page.get('title'), {title_1, title_2, title_3})",
"def test_homepage(self):\n\n with self.client as client:\n response = client.get('/')\n ...\n # test that you're getting a template",
"def test_empty(self):\n pass",
"def test_indexPage(self):\n LOGGER.debug(\"XXX: test_indexPage entered\")\n LOGGER.debug(\"XXX: %s\", settings.TEMPLATES)\n self.post.status = 'publish'\n self.post.save()\n response = self.client.get('/blog/')\n self.assertContains(response, self.post.title)\n # how to debug the loading stuff\n # except TemplateDoesNotExist, e:\n # for tmpl, msg in e.tried:\n # LOGGER.debug(\"XXX Tried '%s'\", tmpl.name)",
"def test_new(self):\n result = self.client.get('/home/new')\n self.assertEqual(result.status, '200 OK')\n self.assertIn(b'New Home', result.data)",
"def test_create_record_empty(client):\n response = client.simulate_post('/page/create_record')\n assert response.status_code == 400",
"def test_create_page_with_whatyouneed_block(self):\n\n what_you_need_block = PageWhatYouNeedBlock.objects.create(\n **_whatyouneed_block_data)\n Page.objects.create(what_you_need_block=what_you_need_block,\n **_page_data)\n\n response = self.client.get(reverse('home'))\n self.assertEqual(response.status_code, 200)\n\n self.assertIn('title', response.context)\n self.assertIn('left_column_title', response.context)\n self.assertIn('is_touch', response.context)\n self.assertIn('is_colour', response.context)\n self.assertIn('middle_column_title', response.context)\n self.assertIn('middle_column_label', response.context)\n self.assertIn('middle_column_caption', response.context)\n self.assertIn('right_column_title', response.context)",
"def test_page(self):\n result = self.test_client.page\n\n assert result == 1",
"def test_lesson_list_empty(client, auth_user, init_database, add_data):\n response = client.post(url_for('root.index'),data=dict(email='[email protected]',password='password'))\n # try to get home\n response = client.get(url_for('lessons.list'))\n assert response.status_code == 200\n assert b'list-group-item-action disabled' in response.data #part of the table with lessons",
"def test_home_view_one_object(self):\n self.create_obj()\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)\n self.assertIn('42 Coffee Cups Test Assignment', response.content)\n self.assertIn('Name', response.content)\n self.assertIn('Last name', response.content)\n self.assertIn('Date of birth', response.content)\n self.assertIn('bio', response.content)\n self.assertIn('Email', response.content)\n self.assertIn('Jabber', response.content)\n self.assertIn('Andrei', response.content)\n self.assertIn('Herasko', response.content)\n self.assertIn('Feb. 23, 1998', response.content)\n self.assertIn('[email protected]', response.content)\n self.assertIn('[email protected]', response.content)\n self.assertIn('ander2299', response.content)"
]
| [
"0.7020648",
"0.69859916",
"0.6949012",
"0.68393856",
"0.68185186",
"0.6812438",
"0.6794315",
"0.6736821",
"0.67351717",
"0.6716539",
"0.66805714",
"0.6664649",
"0.66362906",
"0.6611464",
"0.6551422",
"0.6523061",
"0.6522048",
"0.65214545",
"0.6510145",
"0.65027374",
"0.6481382",
"0.6470341",
"0.6460958",
"0.64259064",
"0.6425337",
"0.64128214",
"0.64069253",
"0.64059144",
"0.6399937",
"0.6397814"
]
| 0.7788985 | 0 |
Test ability to create a page with full header | def test_create_page_with_header(self):
link_1 = PageHeadDropMenu.objects.create\
(internal_link='home', **_head_link_data)
link_2 = PageHeadDropMenu.objects.create(
external_link=_external_link, **_head_link_data)
link_3 = PageHeadDropMenu.objects.create(**_head_link_data)
link_element_1 = PageHeadDropMenuElement.objects.create(
internal_link='home', **_head_drop_link_data)
link_element_2 = PageHeadDropMenuElement.objects.create(
external_link=_external_link, **_head_drop_link_data)
link_3.drop_links.add(link_element_1, link_element_2)
soc_link = PageHeadSocLink.objects.create(**_head_soc_link_data)
header = PageHeaderBlock.objects.create(**_header_data)
header.main_menus_elements.add(link_1, link_2, link_3)
header.soc_links.add(soc_link)
page = Page.objects.create(header_block=header, **_page_data)
self.assertEqual(page.header_block, header)
self.assertEqual(page.header_block.main_menus_elements.count(), 3)
self.assertEqual(link_3.drop_links.count(), 2)
response = self.client.get(reverse('home'))
self.assertEqual(response.status_code, 200)
self.assertIn('logo', response.context)
self.assertTrue(response.context['logo'].endswith('.jpg'))
self.assertIn('auth_menu', response.context)
self.assertFalse(response.context['auth_menu'])
self.assertIn('soc_links', response.context)
self.assertIn(soc_link, response.context['soc_links'])
self.assertIn('d_d_menu', response.context)
self.assertIn(link_2, response.context['d_d_menu']) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_header(self):\n sel = self.selenium\n # Load the Create Shelter page\n sel.open(\"/eden/cr/shelter/create\")\n # Check that the location is currently blank\n self.check_blank()",
"def create_page(self):",
"def test_homepage(self):\n\n with self.client as client:\n response = client.get('/')\n html = response.get_data(as_text=True)\n self.assertEqual(response.status_code, 200)\n self.assertIn('<table class=\"board\">', html)\n self.assertIn('<table', html)\n self.assertIn('boggle homepage. used in testing', html)\n # test that you're getting a template",
"def test_homepage_render(self):\n\n result = self.client.get(\"/\")\n self.assertIn(\"<h1 class=\\\"title\\\">Bark Park!</h1>\", result.data)",
"def test_create_page_with_main_box(self):\n\n main_block = PageMainBlock.objects.create(**_main_block_data)\n Page.objects.create(main_block=main_block, **_page_data)\n response = self.client.get(reverse('home'))\n self.assertEqual(response.status_code, 200)\n\n self.assertIn('text', response.context)\n self.assertIn('title', response.context)\n self.assertIn('image', response.context)",
"def test_homepage(self):\n\n with self.client as client:\n response = client.get('/')\n ...\n # test that you're getting a template\n html = response.get_data(as_text=True)\n self.assertEqual(response.status_code, 200)\n self.assertIn('<table class=\"board\">', html)",
"def test_create_page(self):\n self.assertEqual(self.client.get(reverse('home')).status_code, 404)\n\n page = Page.objects.create(**_page_data)\n\n self.assertEqual(page.title, _page_data['title'])\n self.assertEqual(page.page_type, _page_data['page_type'])\n\n response = self.client.get(reverse('home'))\n\n self.assertEqual(response.status_code, 200)\n self.assertIn('page_settings', response.context_data)",
"def test_homepage(self):\n\n with self.client as client:\n response = client.get('/')\n ...\n # test that you're getting a template",
"def test_create_page_with_whatyouneed_block(self):\n\n what_you_need_block = PageWhatYouNeedBlock.objects.create(\n **_whatyouneed_block_data)\n Page.objects.create(what_you_need_block=what_you_need_block,\n **_page_data)\n\n response = self.client.get(reverse('home'))\n self.assertEqual(response.status_code, 200)\n\n self.assertIn('title', response.context)\n self.assertIn('left_column_title', response.context)\n self.assertIn('is_touch', response.context)\n self.assertIn('is_colour', response.context)\n self.assertIn('middle_column_title', response.context)\n self.assertIn('middle_column_label', response.context)\n self.assertIn('middle_column_caption', response.context)\n self.assertIn('right_column_title', response.context)",
"def test_about_layout(testapp):\n response = testapp.get('/about', status=200)\n html = response.html\n assert 'Chaitanya' in html.find(\"h1\").text",
"def test_page_intro(self, client, projects_landing_page):\n # create link page for project list\n sponsored_projects = LinkPage(\n title=\"Sponsored Projects\", link_url=\"projects/sponsored\"\n )\n projects_landing_page.add_child(instance=sponsored_projects)\n # create a snippet for the sponsored projects page\n PageIntro.objects.create(\n page=sponsored_projects, paragraph=\"<i>test content</i>\"\n )\n\n # visit and check that it renders\n response = client.get(reverse(\"projects:sponsored\"))\n assertContains(response, \"<i>test content</i>\")",
"def test_homepage(self):\n\n response = self.client.get(\"/\")\n self.assertIn(\"Books</title>\", response.data)\n self.assertIn(\"Goodreads ID\", response.data)",
"def test_page_content(self, client, site, homepage):\n response = client.get(homepage.get_url())\n assertTemplateUsed(response, \"cdhpages/home_page.html\")\n assertContains(response, homepage.body[0].value.source)",
"def test_public_unit_page_html(self):\r\n html = self.get_page_html(self.vertical)\r\n self.validate_html_for_add_buttons(html)",
"def test_home_view_one_object(self):\n self.create_obj()\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)\n self.assertIn('42 Coffee Cups Test Assignment', response.content)\n self.assertIn('Name', response.content)\n self.assertIn('Last name', response.content)\n self.assertIn('Date of birth', response.content)\n self.assertIn('bio', response.content)\n self.assertIn('Email', response.content)\n self.assertIn('Jabber', response.content)\n self.assertIn('Andrei', response.content)\n self.assertIn('Herasko', response.content)\n self.assertIn('Feb. 23, 1998', response.content)\n self.assertIn('[email protected]', response.content)\n self.assertIn('[email protected]', response.content)\n self.assertIn('ander2299', response.content)",
"def test_page():\n app = create_ctfd()\n with app.app_context():\n\n gen_page(app.db, title=\"Title\", route=\"this-is-a-route\", html=\"This is some HTML\")\n\n with app.test_client() as client:\n r = client.get('/this-is-a-route')\n assert r.status_code == 200\n destroy_ctfd(app)",
"def Header(page):\n style = {\n \"background-color\": \"var(--red_color)\",\n \"color\": \"black\",\n \"text-shadow\": \"0 0 10px #ffffff\"\n }\n\n button1 = html.A(id=\"btn1box\", children=[\n html.Button(\"OVERVIEW\", id=\"btn1\", className=\"btn\")\n ], href=\"/dashboard/overview\")\n\n button2 = html.A(id=\"btn2box\", children=[\n html.Button(\"BOOK\", id=\"btn2\", className=\"btn\")\n ], href=\"/dashboard/book\")\n\n button3 = html.A(id=\"btn3box\", children=[\n html.Button(\"WORD\", id=\"btn3\", className=\"btn\")\n ], href=\"/dashboard/word\")\n\n if page == \"overview\":\n button1.children[0].style = style\n if page == \"book\":\n button2.children[0].style = style\n if page == \"word\":\n button3.children[0].style = style\n\n return html.Div(id=\"header\", children=[\n get_title(),\n get_subtitle(),\n button1,\n button2,\n button3\n ])",
"def test_home_page_returns_correct_html(self):\n request = HttpRequest()\n response = home_page(request)\n self.assertIn(\n b'<h1>42 Coffee Cups Test Assignment</h1>',\n response.content)",
"def test_page_content(self, client, site, landing_page):\n response = client.get(landing_page.relative_url(site))\n assertContains(response, \"<p>content of the landing page</p>\")",
"def page_test():\n # get user-name and access rights from IAM\n\n html = \"<h3>Hello world! 2</h3>\"\n return html",
"def page_header(self, request, title):\n request['body'].append('''\\\n<html>\n <head><title>%s</title></head>\n <style type=\"text/css\">\n * {\n font-family: verdana,sans-serif;\n }\n body {\n width: 50em;\n margin: 1em;\n }\n div {\n padding: .5em;\n }\n table {\n margin: none;\n padding: none;\n }\n .alert {\n border: 1px solid #e7dc2b;\n background: #fff888;\n }\n .error {\n border: 1px solid #ff0000;\n background: #ffaaaa;\n }\n #verify-form {\n border: 1px solid #777777;\n background: #dddddd;\n margin-top: 1em;\n padding-bottom: 0em;\n }\n </style>\n <body>\n <h1>%s</h1>\n <p>\n This example consumer uses the <a\n href=\"http://openid.schtuff.com/\">Python OpenID</a> library. It\n just verifies that the URL that you enter is your identity URL.\n </p>\n''' % (title, title))",
"def _verify_page(self):",
"def test_home_content(self):\n bs = self.get_soup(baseUrl)\n self.assertOneExists(bs, \"#page_discover\")",
"def test_index_layout(testapp):\n response = testapp.get('/', status=200)\n html = response.html\n assert 'SPACE' in html.find(\"h1\").text",
"def test_create_journal_route_has_heading(self):\n self.client.login(username='mike', password='password')\n response = self.client.get(reverse_lazy('create_journal'))\n self.assertIn(b'Create Journal Entry', response.content)",
"def test_main_page(remote_browser, base_url, logger_fixture):\n logger_fixture.info(\"===== test_main_page =====\")\n main_page = MainPage(remote_browser, base_url)\n main_page\\\n .open(logger_fixture)\\\n .check_featured_block_is_not_empty()",
"def test_home_page_returns_correct_html(self):\n\n request = HttpRequest()\n response = home_view(request)\n html = response.content.decode('utf8')\n self.assertTrue(html.startswith('<!doctype html>'))\n self.assertIn('<title>home</title>', html)\n self.assertTrue(html.endswith('</html>'))",
"def test_main_page(self):\n response = self.client.get(reverse('home'))\n self.assertEqual(response.status_code, 200)\n\n content = response.content.decode('utf-8')\n self.assertTrue('Improving the FOIA request experience' in content)",
"def initialPage():\n\treturn header() + footer()",
"def test_create_page_with_footer(self):\n\n footer_block = PageFooterBlock.objects.create(**_footer_block_data)\n link_1 = PageHeadDropMenu.objects.create\\\n (internal_link='home', **_head_link_data)\n link_2 = PageHeadDropMenu.objects.create(\n external_link=_external_link, **_head_link_data)\n link_3 = PageHeadDropMenu.objects.create(**_head_link_data)\n link_element_1 = PageHeadDropMenuElement.objects.create(\n internal_link='home', **_head_drop_link_data)\n link_element_2 = PageHeadDropMenuElement.objects.create(\n external_link=_external_link, **_head_drop_link_data)\n link_3.drop_links.add(link_element_1, link_element_2)\n footer_block.top_links.add(link_1, link_2, link_3)\n contact_address = Address.objects.create(**_address_block)\n footer_block.contact_address.add(contact_address)\n footer_block.a_link.add(link_element_1)\n\n Page.objects.create(footer_block=footer_block, **_page_data)\n response = self.client.get(reverse('home'))\n self.assertEqual(response.status_code, 200)\n\n self.assertIn('top_links', response.context)\n self.assertIn('contact', response.context)\n self.assertIn('top_contacts', response.context)\n self.assertIn('bot_copy_right', response.context)\n self.assertIn('bot_link', response.context)\n self.assertIn('bot_back_top', response.context)"
]
| [
"0.71193373",
"0.69215447",
"0.67508227",
"0.6622254",
"0.65518856",
"0.6475695",
"0.642205",
"0.6361855",
"0.63545746",
"0.63482505",
"0.63463175",
"0.6337883",
"0.63279295",
"0.6293536",
"0.62900555",
"0.62784123",
"0.6273762",
"0.6235984",
"0.6225211",
"0.62113404",
"0.61894464",
"0.61880165",
"0.6186403",
"0.61750257",
"0.6140026",
"0.61070514",
"0.6104736",
"0.60859334",
"0.605888",
"0.60581696"
]
| 0.77367085 | 0 |
Test ability to create a page with help box | def test_create_page_with_help_box(self):
button = PageButton.objects.create(**_button_data)
help_block = PageHelpBoxBlock.objects.create(
button=button, **_help_box_data)
page = Page.objects.create(help_box_block=help_block, **_page_data)
response = self.client.get(reverse('home'))
self.assertEqual(response.status_code, 200)
self.assertEqual(page.help_box_block.button, button)
self.assertIn('text', response.context)
self.assertIn('button', response.context) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_help_menu(run):\n out, _err = run(dork.cli.help_menu)\n assert 'Help' in out, 'Help wasnt found'",
"def test_createExplicitHelp(self):\n self.assertSuccessStatus(self._makeConfig(None), [\"create\", \"--help\"])\n self.assertSpacelessEqual(self._createHelpText, sys.stdout.getvalue())",
"def test_helpful_page_view(self):\n target_url = url_for('dashboard.helpful_pages')\n redirect_url = url_for('users.login', next=target_url)\n response = self.client.get(target_url)\n self.assertEqual(response.status_code, 302)\n self.assertRedirects(response, redirect_url)",
"def create_page(self):",
"def test_anon_page_admin(self):\n self._make_quick_page()\n\n dropdown_menu_btn = self._get_one('.dropdown-toggle')\n show_opts_btn = self._get_one('.options-btn')\n options_menu = self._get_one('.pageOptions')\n\n self.assertTrue(dropdown_menu_btn.is_displayed())\n dropdown_menu_btn.click()\n self.assertTrue(show_opts_btn.is_displayed())\n\n # Something weird related to auto-open options menu.\n # Maybe this conditional will fix it? <:)\n if not options_menu.is_displayed():\n show_opts_btn.click()\n self.assertTrue(options_menu.is_displayed())\n\n settings_btn = self._find_one_with_text('li.tabs-tab', 'Settings')\n self.assertTrue(settings_btn.is_displayed())\n settings_btn.click()\n\n title_input = self._get_one('input.title')\n title_input.clear()\n TITLE = 'A Title Most Titular'\n title_input.send_keys(TITLE)\n\n # Make it published, so that our anon viewer can access it\n published_input = self._get_one('.published')\n published_input.click()\n\n # TODO: better solution. need to wait for autosave\n # This may have to wait for request-queuing in the Backbone model.\n import time; time.sleep(1) \n\n # Now pretend we're someone else\n self.selenium.delete_all_cookies()\n self.selenium.refresh()\n for cookie in self.selenium.get_cookies():\n self.assertFalse(cookie['name'].startswith('claim'))\n\n # make sure we aren't admins\n self.assertFalse(self._find('.dropdown-toggle')) \n\n # check that we got the new title\n self.assertEqual(self.selenium.title, TITLE)",
"def test_ui_menu(test):\n assert hl.test_help_ui_menu(test) == test",
"def test_createImplicitHelp(self):\n self.assertSuccessStatus(self._makeConfig(None), [\"create\"])\n self.assertSpacelessEqual(self._createHelpText, sys.stdout.getvalue())",
"def testHelp(self):\n self.widget.manager = QtWidgets.QWidget()\n self.widget.manager.showHelp = MagicMock()\n self.widget.displayHelp()\n self.assertTrue(self.widget.manager.showHelp.called_once())\n args = self.widget.manager.showHelp.call_args\n self.assertIn('sld_calculator_help.html', args[0][0])",
"def quick_test():\n do_command('Help: Command=Help')\n do_command('Help: Command=\"GetInfo\"')\n #do_command('SetPreference: Name=GUI/Theme Value=classic Reload=1')",
"def test_create_page(self):\n self.assertEqual(self.client.get(reverse('home')).status_code, 404)\n\n page = Page.objects.create(**_page_data)\n\n self.assertEqual(page.title, _page_data['title'])\n self.assertEqual(page.page_type, _page_data['page_type'])\n\n response = self.client.get(reverse('home'))\n\n self.assertEqual(response.status_code, 200)\n self.assertIn('page_settings', response.context_data)",
"def test_59_help_api(self):\r\n Fixtures.create()\r\n url = \"/help/api\"\r\n res = self.app.get(url, follow_redirects=True)\r\n err_msg = \"There should be a help api page\"\r\n assert \"API Help\" in res.data, err_msg",
"def test_handle_help(self):\r\n ret, code = self.testcommand.handle(\"project help\", user)\r\n self.assertEqual(ret, self.testcommand.get_help())\r\n self.assertEqual(code, 200)",
"def test_edit_tool_page(self):\n\n\n username,userpass = self.testdata.find_account_for('toolsubmitter')\n\n self.utils.account.login_as(username,userpass)\n\n self.contribtool.edit_tool_page(TOOLNAME,TOOLPAGEDATA)",
"def test_handle_help(self):\n ret, code = self.testcommand.handle(\"team help\", user)\n self.assertEqual(ret, self.testcommand.get_help())\n self.assertEqual(code, 200)",
"def _help_dialogue(self):\n webbrowser.open('https://github.com/ldrumm/yubikey-totp-gui/wiki')",
"def helper():\n \n import webbrowser, os.path\n \n path = os.path.splitext(__file__)[0]\n helpspec = \"file://\" + path + os.path.sep + \\\n \"markdown.html\"\n \n # webbrowser.open seems not to work well\n browser = webbrowser.get()\n if not browser.open_new(helpspec):\n print((\"Help file not found:\" + helpspec))",
"def helper():\n \n import webbrowser, os.path\n \n path = os.path.splitext(__file__)[0]\n helpspec = \"file://\" + path + os.path.sep + \\\n \"markdown.html\"\n \n # webbrowser.open seems not to work well\n browser = webbrowser.get()\n if not browser.open_new(helpspec):\n print((\"Help file not found:\" + helpspec))",
"def test_help(self):\n run_nbgrader([\"quickstart\", \"--help-all\"])",
"def about_page_test(self):\r\n # default for ENABLE_MKTG_SITE is False.\r\n self.assertEquals(self.get_about_page_link(), \"//localhost:8000/courses/mitX/101/test/about\")",
"def test_public_unit_page_html(self):\r\n html = self.get_page_html(self.vertical)\r\n self.validate_html_for_add_buttons(html)",
"def test_help_route():\n response = client.get(\"/\")\n assert response.status_code == 200\n assert response.json() == {\n \"repositories\": f\"{DOMAIN_NAME}/repositories\",\n \"developers\": f\"{DOMAIN_NAME}/developers\",\n \"docs\": f\"{DOMAIN_NAME}/docs\",\n \"redoc\": f\"{DOMAIN_NAME}/redoc\",\n }",
"def test_about_page(self):\n response = self.testapp.get('/about')\n self.assertEqual(response.status_int, 200)\n self.assertEqual(response.content_type, 'text/html')\n response.mustcontain('Community Guidelines', 'Lifecycle', 'License')",
"def testOnHelp(self):\n webbrowser.open = MagicMock()\n\n # invoke the tested method\n self.widget.onHelp()\n\n # see that webbrowser open was attempted\n webbrowser.open.assert_called_once()",
"def test_questions_page(self):\n # import pdb\n # pdb.set_trace()\n\n result = self.client.get('/questions')\n self.assertIn('<h2>Submit A Question</h2>', result.data)\n\n print \"DONE WITH QUESTIONS PAGE CHECK\"",
"def test_about_view(self):\n print 'Running %s ...' % getName()\n# test that URL resolves to correct views function \n found = resolve('/sequencelistings/about/')\n self.assertEqual(found.func, views.about)\n \n self.sequenceListingFixture.create_sequence_instance(self.sequenceListing)\n \n response = self.client.get(reverse('sequencelistings:about'))\n self.assertEqual(response.status_code, 200)\n \n# test that the page returns expected html contents\n self.assertContains(response, 'About')\n self.assertContains(response, 'only for information purposes')",
"def test_59_help_tos(self):\r\n url = \"/help/terms-of-use\"\r\n res = self.app.get(url, follow_redirects=True)\r\n err_msg = \"There should be a TOS page\"\r\n assert \"Terms for use\" in res.data, err_msg",
"def test_help(self):\n rc, stdout, _, msg = OIM().request('--help')\n self.assertEqual(rc, 0, \"Bad return code when requesting help\\n%s\" % msg)\n self.assert_(re.search(r'[Uu]sage:', stdout), msg)",
"def test_documentation_path_links(self):\r\n main_page = DogMainPage(self.driver)\r\n dog_page = main_page.navigate_documentation()\r\n # Switch to 'List all breeds' tab\r\n all_breeds_page = dog_page.switch_tab(dog_page.ALL_BREEDS)\r\n all_breeds_expected = all_breeds_page.get_expected_header()\r\n all_breeds_header = all_breeds_page.get_header()\r\n # Assert the title to verify the page\r\n self.assertEqual(all_breeds_expected, all_breeds_header,\r\n ('%s expected, instead found: %s. Page is wrong' % (all_breeds_expected, all_breeds_header)))\r\n # Switch to 'Random image' tab\r\n random_page = dog_page.switch_tab(dog_page.RANDOM)\r\n random_expected_header = random_page.get_expected_header()\r\n random_header = random_page.get_header()\r\n # Assert the title to verify the page\r\n self.assertEqual(random_expected_header, random_header,\r\n ('%s expected, instead found: %s. Page is wrong' % (random_expected_header, random_header)))\r\n # Switch to 'By breed' tab\r\n breed_page = dog_page.switch_tab(dog_page.BREED)\r\n breed_expected_header = breed_page.get_expected_header()\r\n breed_header = breed_page.get_header()\r\n # Assert the title to verify the page\r\n self.assertEqual(breed_expected_header, breed_header,\r\n ('%s expected, instead found: %s. Page is wrong' % (breed_expected_header, breed_header)))\r\n # Switch to 'By sub-breed' tab\r\n sub_breed_page = dog_page.switch_tab(dog_page.SUB_BREED)\r\n sub_expected_header = sub_breed_page.get_expected_header()\r\n sub_header = sub_breed_page.get_header()\r\n # Assert the title to verify the page\r\n self.assertEqual(sub_expected_header, sub_header,\r\n ('%s expected, instead found: %s. Page is wrong' % (sub_expected_header, sub_header)))",
"def page_test():\n # get user-name and access rights from IAM\n\n html = \"<h3>Hello world! 2</h3>\"\n return html",
"def test_aboutpage_view(self):\n response = self.client.get(url_for('about'))\n self.assertEqual(response.status_code, 200)"
]
| [
"0.6690504",
"0.6667255",
"0.664372",
"0.6599873",
"0.6584894",
"0.65563786",
"0.652604",
"0.6522679",
"0.6467151",
"0.6451444",
"0.64280933",
"0.6410487",
"0.64081675",
"0.63884115",
"0.63809764",
"0.63509667",
"0.63509667",
"0.6337427",
"0.6329947",
"0.6302536",
"0.6284349",
"0.6279236",
"0.62563306",
"0.6253489",
"0.6208211",
"0.61836654",
"0.6169669",
"0.6138207",
"0.61374485",
"0.6109906"
]
| 0.79399395 | 0 |
Test ability to create a page with main box | def test_create_page_with_main_box(self):
main_block = PageMainBlock.objects.create(**_main_block_data)
Page.objects.create(main_block=main_block, **_page_data)
response = self.client.get(reverse('home'))
self.assertEqual(response.status_code, 200)
self.assertIn('text', response.context)
self.assertIn('title', response.context)
self.assertIn('image', response.context) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_page(self):",
"def test_create_page_with_help_box(self):\n\n button = PageButton.objects.create(**_button_data)\n help_block = PageHelpBoxBlock.objects.create(\n button=button, **_help_box_data)\n page = Page.objects.create(help_box_block=help_block, **_page_data)\n\n response = self.client.get(reverse('home'))\n self.assertEqual(response.status_code, 200)\n\n self.assertEqual(page.help_box_block.button, button)\n self.assertIn('text', response.context)\n self.assertIn('button', response.context)",
"def test_main_page(remote_browser, base_url, logger_fixture):\n logger_fixture.info(\"===== test_main_page =====\")\n main_page = MainPage(remote_browser, base_url)\n main_page\\\n .open(logger_fixture)\\\n .check_featured_block_is_not_empty()",
"def test_create_page(self):\n self.assertEqual(self.client.get(reverse('home')).status_code, 404)\n\n page = Page.objects.create(**_page_data)\n\n self.assertEqual(page.title, _page_data['title'])\n self.assertEqual(page.page_type, _page_data['page_type'])\n\n response = self.client.get(reverse('home'))\n\n self.assertEqual(response.status_code, 200)\n self.assertIn('page_settings', response.context_data)",
"def test_anon_page_admin(self):\n self._make_quick_page()\n\n dropdown_menu_btn = self._get_one('.dropdown-toggle')\n show_opts_btn = self._get_one('.options-btn')\n options_menu = self._get_one('.pageOptions')\n\n self.assertTrue(dropdown_menu_btn.is_displayed())\n dropdown_menu_btn.click()\n self.assertTrue(show_opts_btn.is_displayed())\n\n # Something weird related to auto-open options menu.\n # Maybe this conditional will fix it? <:)\n if not options_menu.is_displayed():\n show_opts_btn.click()\n self.assertTrue(options_menu.is_displayed())\n\n settings_btn = self._find_one_with_text('li.tabs-tab', 'Settings')\n self.assertTrue(settings_btn.is_displayed())\n settings_btn.click()\n\n title_input = self._get_one('input.title')\n title_input.clear()\n TITLE = 'A Title Most Titular'\n title_input.send_keys(TITLE)\n\n # Make it published, so that our anon viewer can access it\n published_input = self._get_one('.published')\n published_input.click()\n\n # TODO: better solution. need to wait for autosave\n # This may have to wait for request-queuing in the Backbone model.\n import time; time.sleep(1) \n\n # Now pretend we're someone else\n self.selenium.delete_all_cookies()\n self.selenium.refresh()\n for cookie in self.selenium.get_cookies():\n self.assertFalse(cookie['name'].startswith('claim'))\n\n # make sure we aren't admins\n self.assertFalse(self._find('.dropdown-toggle')) \n\n # check that we got the new title\n self.assertEqual(self.selenium.title, TITLE)",
"def test_public_unit_page_html(self):\r\n html = self.get_page_html(self.vertical)\r\n self.validate_html_for_add_buttons(html)",
"def test_registered_user_can_create_project(self):\n user = self._create_user({\"username\":\"user2\",\"email\":\"[email protected]\"})\n testproject = self._create_comicsite_in_admin(user,\"user1project\") \n testpage1 = create_page_in_admin(testproject,\"testpage1\")\n testpage2 = create_page_in_admin(testproject,\"testpage2\")\n \n self._test_page_can_be_viewed(user,testpage1)\n self._test_page_can_be_viewed(self.root,testpage1)",
"def test_page():\n app = create_ctfd()\n with app.app_context():\n\n gen_page(app.db, title=\"Title\", route=\"this-is-a-route\", html=\"This is some HTML\")\n\n with app.test_client() as client:\n r = client.get('/this-is-a-route')\n assert r.status_code == 200\n destroy_ctfd(app)",
"def test_show_on_homepage(self) -> None:\n self.assert_show_on_homepage(apps.wakeup.main.Controller)",
"def test_homepage(self):\n\n with self.client as client:\n response = client.get('/')\n html = response.get_data(as_text=True)\n self.assertEqual(response.status_code, 200)\n self.assertIn('<table class=\"board\">', html)\n self.assertIn('<table', html)\n self.assertIn('boggle homepage. used in testing', html)\n # test that you're getting a template",
"def test_page_view_permission(self):\n \n adminonlypage = create_page_in_admin(self.testproject,\"adminonlypage\",\n permission_lvl=Page.ADMIN_ONLY) \n registeredonlypage = create_page_in_admin(self.testproject,\"registeredonlypage\",\n permission_lvl=Page.REGISTERED_ONLY)\n publicpage = create_page_in_admin(self.testproject,\"publicpage\",\n permission_lvl=Page.ALL)\n \n self._test_page_can_be_viewed(self.projectadmin,adminonlypage)\n self._test_page_can_not_be_viewed(self.participant,adminonlypage)\n self._test_page_can_not_be_viewed(self.registered_user,adminonlypage) \n self._test_page_can_not_be_viewed(None,adminonlypage) # None = not logged in\n \n self._test_page_can_be_viewed(self.projectadmin,registeredonlypage)\n self._test_page_can_be_viewed(self.participant,registeredonlypage)\n self._test_page_can_not_be_viewed(self.registered_user,registeredonlypage)\n self._test_page_can_not_be_viewed(None,registeredonlypage) # None = not logged in\n \n self._test_page_can_be_viewed(self.projectadmin,publicpage)\n self._test_page_can_be_viewed(self.participant,publicpage)\n self._test_page_can_be_viewed(self.registered_user,publicpage)\n self._test_page_can_be_viewed(None,publicpage) # None = not logged in",
"def test():\n test_app()\n test_pagebrowser()",
"def test_admin_user_login(browser):\n login_page = LoginPage(browser)\n login_page.start()\n\n login_page.login(email=\"[email protected]\",\n password=\"admin1234\")\n admin_page = AdminPage(browser)\n admin_page.start()\n\n # Check that admin has sufficient right to access all functionality \n assert admin_page.get_side_bar_menu_item(AdminPage.REPORTS_MENU_ITEM).size != 0\n assert admin_page.get_side_bar_menu_item(AdminPage.REWARDS_MENU_ITEM).size != 0\n assert admin_page.get_side_bar_menu_item(AdminPage.CAMPAIGNS_MENU_ITEM).size != 0\n assert admin_page.get_side_bar_menu_item(AdminPage.LOYALTIES_MENU_ITEM).size != 0\n assert admin_page.get_side_bar_menu_item(AdminPage.TRANSACTION_RULES_MENU_ITEM).size != 0\n assert admin_page.get_side_bar_menu_item(AdminPage.MERCHANTS_MENU_ITEM).size != 0\n assert admin_page.get_side_bar_menu_item(AdminPage.CUSTOMER_MANAGEMENT_MENU_ITEM).size != 0\n assert admin_page.get_side_bar_menu_item(AdminPage.SETTINGS_MENU_ITEM).size != 0\n assert admin_page.get_side_bar_menu_item(AdminPage.BUSINESS_INTELLIGENCE_MENU_ITEM).size != 0",
"def test_create_page_with_whatyouneed_block(self):\n\n what_you_need_block = PageWhatYouNeedBlock.objects.create(\n **_whatyouneed_block_data)\n Page.objects.create(what_you_need_block=what_you_need_block,\n **_page_data)\n\n response = self.client.get(reverse('home'))\n self.assertEqual(response.status_code, 200)\n\n self.assertIn('title', response.context)\n self.assertIn('left_column_title', response.context)\n self.assertIn('is_touch', response.context)\n self.assertIn('is_colour', response.context)\n self.assertIn('middle_column_title', response.context)\n self.assertIn('middle_column_label', response.context)\n self.assertIn('middle_column_caption', response.context)\n self.assertIn('right_column_title', response.context)",
"def test_chooser_no_perms(self):\n root_page = Page.objects.get(pk=2)\n root_page.add_child(instance=NewsIndex(\n title='News', slug='news'))\n root_page.add_child(instance=SecondaryNewsIndex(\n title='Secondary News', slug='secondary-news'))\n\n response = self.client.get(reverse('wagtailnews:choose'))\n self.assertEqual(response.status_code, 403)",
"def test_create_page_with_header(self):\n\n link_1 = PageHeadDropMenu.objects.create\\\n (internal_link='home', **_head_link_data)\n link_2 = PageHeadDropMenu.objects.create(\n external_link=_external_link, **_head_link_data)\n link_3 = PageHeadDropMenu.objects.create(**_head_link_data)\n link_element_1 = PageHeadDropMenuElement.objects.create(\n internal_link='home', **_head_drop_link_data)\n link_element_2 = PageHeadDropMenuElement.objects.create(\n external_link=_external_link, **_head_drop_link_data)\n link_3.drop_links.add(link_element_1, link_element_2)\n\n soc_link = PageHeadSocLink.objects.create(**_head_soc_link_data)\n\n header = PageHeaderBlock.objects.create(**_header_data)\n header.main_menus_elements.add(link_1, link_2, link_3)\n header.soc_links.add(soc_link)\n\n page = Page.objects.create(header_block=header, **_page_data)\n\n self.assertEqual(page.header_block, header)\n self.assertEqual(page.header_block.main_menus_elements.count(), 3)\n self.assertEqual(link_3.drop_links.count(), 2)\n\n response = self.client.get(reverse('home'))\n self.assertEqual(response.status_code, 200)\n\n self.assertIn('logo', response.context)\n self.assertTrue(response.context['logo'].endswith('.jpg'))\n\n self.assertIn('auth_menu', response.context)\n self.assertFalse(response.context['auth_menu'])\n\n self.assertIn('soc_links', response.context)\n self.assertIn(soc_link, response.context['soc_links'])\n\n self.assertIn('d_d_menu', response.context)\n self.assertIn(link_2, response.context['d_d_menu'])",
"def test_empty_ui(self):",
"def test_create(self):\n self.app\n pass",
"def test_fac_admin_page(self):\n self.login(self.fac_admin.user.username)\n self._got_to_fac_admin_page()\n self.check_page_title(self.admin_config.get('FAC_ADMIN').get('PAGE_TITLE'))\n self.check_page_contains_ids(self.admin_config.get('FAC_ADMIN').get('ADMIN_LINKS'))",
"def test_verify_main_screen_elements(self):\n\n test_name = sys._getframe().f_code.co_name\n\n log.info(\"###### TEST EXECUTION STARTED :: \" + test_name + \" ######\")\n\n with allure.step(\"Verify Main Screen Elements\"):\n result = self.main_page.verify_main_screen_elements()\n self.exe_status.mark_final(test_step=test_name, result=result)",
"def test_draft_unit_page_html(self):\r\n draft_unit = modulestore('draft').convert_to_draft(self.vertical.location)\r\n html = self.get_page_html(draft_unit)\r\n self.validate_html_for_add_buttons(html)",
"def test01_cloudspace_page_basic_elements(self):\n self.lg('check page url & title')\n self.assertEqual(self.driver.title, 'CBGrid - Virtual Machines')\n self.assertIn('cbgrid/Virtual%20Machines', self.driver.current_url)\n self.lg('check navigation bar')\n self.assertEqual(self.get_navigation_bar(self.navigation_bar), ['Cloud Broker','Virtual Machines'])\n self.lg('check page title')\n self.assertEqual(self.get_text('page title'), 'Virtual Machines')\n self.lg('check \"show records per page\" list')\n self.assertTrue(self.element_is_enabled('table_machines_selector'))",
"def create_page(self, parent):\n ui = self.edit_traits(parent=parent, kind=\"subpanel\")\n return ui.control",
"def test_landing_screen(self):\n # Checking current Screen(Login screen)\n self.assert_wait_no_except('//ScreenManager[@current]', timeout=15, value='login')\n # Dragging from sent to PROS: to NOTE:\n self.drag(\n '''//Login//Screen//ContentHead[1][@section_name=\\\"PROS:\\\"]''',\n '''//Login//Screen//ContentHead[0][@section_name=\\\"NOTE:\\\"]'''\n )\n # Assert the checkbox is rendered\n self.assertExists(\n '//Login//Screen[@name=\\\"check_screen\\\"]//AnchorLayout[1]/Check[@active=false]', timeout=5\n )\n # Clicking on the checkbox\n self.cli.wait_click(\n '//Login//Screen[@name=\\\"check_screen\\\"]//AnchorLayout[1]/Check', timeout=5\n )\n # Checking Status of checkbox after click\n self.assertExists(\n '//Login//Screen[@name=\\\"check_screen\\\"]//AnchorLayout[1]/Check[@active=true]', timeout=5\n )\n # Checking the Proceed Next button is rendered or not\n self.assertExists(\n '''//Login//Screen[@name=\\\"check_screen\\\"]'''\n '''//MDFillRoundFlatIconButton[@text=\\\"Proceed Next\\\"]''', timeout=5\n )\n # Clicking on Proceed Next Button to redirect to \"random\" screen\n self.cli.wait_click(\n '''//Login//Screen[@name=\\\"check_screen\\\"]'''\n '''//MDFillRoundFlatIconButton[@text=\\\"Proceed Next\\\"]''', timeout=5\n )\n self.assertExists(\"//ScreenManager[@current=\\\"random\\\"]\", timeout=5)",
"def test_homepage(self):\n\n with self.client as client:\n response = client.get('/')\n ...\n # test that you're getting a template",
"def test_page_existence(self):\r\n # Log in\r\n self.auth_page.visit()\r\n\r\n # Verify that each page is available\r\n for page in self.pages:\r\n page.visit()",
"def test_new_equipment_page(self):\n create_user()\n login(self.app, 'me1', 'password')\n\n response = self.app.get('/new_equipment', follow_redirects=True)\n self.assertEqual(response.status_code, 200)\n\n response_text = response.get_data(as_text=True)\n self.assertIn('New Equipment', response_text)\n self.assertIn('Name', response_text)\n self.assertIn('Quantity', response_text)\n self.assertIn('Submit', response_text)\n\n self.assertNotIn('Calendar ', response_text)\n self.assertNotIn('Logout', response_text)\n self.assertNotIn('Login', response_text)\n self.assertNotIn('Sign up', response_text)",
"def test_guest_writing(self):\n from main.models import PERMISSIONS_CHOICES as PC\n page = Page.objects.create(\n owner=self.user,\n published=True,\n text_writability = PC.PUBLIC\n )\n self._test_insert_textitem(page, 'lorem ipsum')",
"def test_main_page(self):\n response = self.client.get(reverse('home'))\n self.assertEqual(response.status_code, 200)\n\n content = response.content.decode('utf-8')\n self.assertTrue('Improving the FOIA request experience' in content)",
"def test_homepage(self):\n rc = self.app.get('/')\n assert b'Welcome to Code TA' in rc.data\n assert b'Logout' not in rc.data"
]
| [
"0.6924539",
"0.6889655",
"0.68546546",
"0.67670345",
"0.67154676",
"0.6582398",
"0.6547919",
"0.64649904",
"0.6284628",
"0.62567896",
"0.62402797",
"0.62172043",
"0.62079114",
"0.61982745",
"0.61932427",
"0.61767715",
"0.6131917",
"0.61279565",
"0.61247087",
"0.6123573",
"0.6105454",
"0.61032873",
"0.610086",
"0.6098104",
"0.60931456",
"0.60781825",
"0.60603917",
"0.6051658",
"0.60504776",
"0.60463107"
]
| 0.78971374 | 0 |
Test ability to create a page with main footer | def test_create_page_with_footer(self):
footer_block = PageFooterBlock.objects.create(**_footer_block_data)
link_1 = PageHeadDropMenu.objects.create\
(internal_link='home', **_head_link_data)
link_2 = PageHeadDropMenu.objects.create(
external_link=_external_link, **_head_link_data)
link_3 = PageHeadDropMenu.objects.create(**_head_link_data)
link_element_1 = PageHeadDropMenuElement.objects.create(
internal_link='home', **_head_drop_link_data)
link_element_2 = PageHeadDropMenuElement.objects.create(
external_link=_external_link, **_head_drop_link_data)
link_3.drop_links.add(link_element_1, link_element_2)
footer_block.top_links.add(link_1, link_2, link_3)
contact_address = Address.objects.create(**_address_block)
footer_block.contact_address.add(contact_address)
footer_block.a_link.add(link_element_1)
Page.objects.create(footer_block=footer_block, **_page_data)
response = self.client.get(reverse('home'))
self.assertEqual(response.status_code, 200)
self.assertIn('top_links', response.context)
self.assertIn('contact', response.context)
self.assertIn('top_contacts', response.context)
self.assertIn('bot_copy_right', response.context)
self.assertIn('bot_link', response.context)
self.assertIn('bot_back_top', response.context) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_00_footer(self):\r\n url = '/'\r\n # As Anonymou user\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Footer links should not be shown to anonymous users\"\r\n assert dom.find(id='footer_links') is None, err_msg\r\n # As Authenticated user but NOT ADMIN\r\n self.signin()\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Footer links should not be shown to authenticated users\"\r\n assert dom.find(id='footer_links') is None, err_msg\r\n self.signout\r\n # As Authenticated user but ADMIN\r\n self.signin(email=self.root_addr, password=self.root_password)\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Footer links should not be shown to admin users\"\r\n assert dom.find(id='footer_links') is None, err_msg\r\n self.signout()",
"def test_00_footer(self):\r\n url = '/'\r\n # As Anonymou user\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Footer links should be shown to anonymous users\"\r\n assert dom.find(id='footer_links') is not None, err_msg\r\n # As Authenticated user but NOT ADMIN\r\n self.signin()\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Footer links should be shown to authenticated users\"\r\n assert dom.find(id='footer_links') is not None, err_msg\r\n self.signout\r\n # As Authenticated user but ADMIN\r\n self.signin(email=self.root_addr, password=self.root_password)\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Footer links should be shown to admin users\"\r\n assert dom.find(id='footer_links') is not None, err_msg\r\n self.signout()",
"def test_create_page_with_main_box(self):\n\n main_block = PageMainBlock.objects.create(**_main_block_data)\n Page.objects.create(main_block=main_block, **_page_data)\n response = self.client.get(reverse('home'))\n self.assertEqual(response.status_code, 200)\n\n self.assertIn('text', response.context)\n self.assertIn('title', response.context)\n self.assertIn('image', response.context)",
"def initialPage():\n\treturn header() + footer()",
"def create_page(self):",
"def test_public_unit_page_html(self):\r\n html = self.get_page_html(self.vertical)\r\n self.validate_html_for_add_buttons(html)",
"def footer(self):\n pass",
"def footer(self):\n pass",
"def WriteFooter(self):\n return",
"def test_create_page_with_header(self):\n\n link_1 = PageHeadDropMenu.objects.create\\\n (internal_link='home', **_head_link_data)\n link_2 = PageHeadDropMenu.objects.create(\n external_link=_external_link, **_head_link_data)\n link_3 = PageHeadDropMenu.objects.create(**_head_link_data)\n link_element_1 = PageHeadDropMenuElement.objects.create(\n internal_link='home', **_head_drop_link_data)\n link_element_2 = PageHeadDropMenuElement.objects.create(\n external_link=_external_link, **_head_drop_link_data)\n link_3.drop_links.add(link_element_1, link_element_2)\n\n soc_link = PageHeadSocLink.objects.create(**_head_soc_link_data)\n\n header = PageHeaderBlock.objects.create(**_header_data)\n header.main_menus_elements.add(link_1, link_2, link_3)\n header.soc_links.add(soc_link)\n\n page = Page.objects.create(header_block=header, **_page_data)\n\n self.assertEqual(page.header_block, header)\n self.assertEqual(page.header_block.main_menus_elements.count(), 3)\n self.assertEqual(link_3.drop_links.count(), 2)\n\n response = self.client.get(reverse('home'))\n self.assertEqual(response.status_code, 200)\n\n self.assertIn('logo', response.context)\n self.assertTrue(response.context['logo'].endswith('.jpg'))\n\n self.assertIn('auth_menu', response.context)\n self.assertFalse(response.context['auth_menu'])\n\n self.assertIn('soc_links', response.context)\n self.assertIn(soc_link, response.context['soc_links'])\n\n self.assertIn('d_d_menu', response.context)\n self.assertIn(link_2, response.context['d_d_menu'])",
"def footer_html():\n note_div = html.Div(\n [\n dcc.Markdown(\n \"This website uses natural language processing (NLP) to power search on a set of research papers related to COVID-19.\"\n \" It was created by the team behind [Matscholar](https://www.matscholar.com), a research effort led by the [HackingMaterials](https://hackingmaterials.lbl.gov), \"\n \" [Persson](https://perssongroup.lbl.gov), and [Ceder](https://ceder.berkeley.edu) research\"\n \" groups at Lawrence Berkeley National Lab.\"\n \" The virus icon in our logo was made by Freepik from www.flaticon.com\",\n className=\"column is-half is-size-6\"\n )\n ],\n className=\"columns is-centered\"\n\n )\n\n common_footer_style = \"has-text-weight-bold\"\n\n about_matscholar = html.A(\n \"About Matscholar\",\n href=\"https://github.com/materialsintelligence/matscholar-web\",\n target=\"_blank\",\n className=common_footer_style,\n )\n\n privacy_policy = html.A(\n \"Privacy Policy\",\n href=\"https://www.iubenda.com/privacy-policy/55585319\",\n target=\"_blank\",\n className=common_footer_style,\n )\n\n submit_feedback = html.A(\n \"Matscholar Forum\",\n href=\"https://discuss.matsci.org/c/matscholar\",\n target=\"_blank\",\n className=common_footer_style,\n )\n\n footer_link_tree = html.Div(\n [\n about_matscholar,\n html.Span(\" | \"),\n privacy_policy,\n html.Span(\" | \"),\n submit_feedback,\n ]\n )\n\n footer_copyright = html.Div(\n html.Span(\"Copyright © 2019 - Materials Intelligence\")\n )\n\n footer = html.Div(\n [note_div, footer_link_tree, footer_copyright],\n id=\"footer_container\",\n className=\"content has-text-centered\",\n )\n\n footer_container = html.Div(footer)\n return footer_container",
"def test_homepage(self):\n\n with self.client as client:\n response = client.get('/')\n html = response.get_data(as_text=True)\n self.assertEqual(response.status_code, 200)\n self.assertIn('<table class=\"board\">', html)\n self.assertIn('<table', html)\n self.assertIn('boggle homepage. used in testing', html)\n # test that you're getting a template",
"def test_about_layout(testapp):\n response = testapp.get('/about', status=200)\n html = response.html\n assert 'Chaitanya' in html.find(\"h1\").text",
"def test_page_content(self, client, site, homepage):\n response = client.get(homepage.get_url())\n assertTemplateUsed(response, \"cdhpages/home_page.html\")\n assertContains(response, homepage.body[0].value.source)",
"def test_page_intro(self, client, projects_landing_page):\n # create link page for project list\n sponsored_projects = LinkPage(\n title=\"Sponsored Projects\", link_url=\"projects/sponsored\"\n )\n projects_landing_page.add_child(instance=sponsored_projects)\n # create a snippet for the sponsored projects page\n PageIntro.objects.create(\n page=sponsored_projects, paragraph=\"<i>test content</i>\"\n )\n\n # visit and check that it renders\n response = client.get(reverse(\"projects:sponsored\"))\n assertContains(response, \"<i>test content</i>\")",
"def test_homepage(self):\n\n with self.client as client:\n response = client.get('/')\n ...\n # test that you're getting a template",
"def test_main_page(remote_browser, base_url, logger_fixture):\n logger_fixture.info(\"===== test_main_page =====\")\n main_page = MainPage(remote_browser, base_url)\n main_page\\\n .open(logger_fixture)\\\n .check_featured_block_is_not_empty()",
"def footer(self, **args):\n return self.pageConfig['footer'] % self.pageConfig",
"def test_correct_main_page_template(self):\n response = self.client.get('/')\n self.assertTemplateUsed(response, 'main.html')",
"def test_create_page_with_whatyouneed_block(self):\n\n what_you_need_block = PageWhatYouNeedBlock.objects.create(\n **_whatyouneed_block_data)\n Page.objects.create(what_you_need_block=what_you_need_block,\n **_page_data)\n\n response = self.client.get(reverse('home'))\n self.assertEqual(response.status_code, 200)\n\n self.assertIn('title', response.context)\n self.assertIn('left_column_title', response.context)\n self.assertIn('is_touch', response.context)\n self.assertIn('is_colour', response.context)\n self.assertIn('middle_column_title', response.context)\n self.assertIn('middle_column_label', response.context)\n self.assertIn('middle_column_caption', response.context)\n self.assertIn('right_column_title', response.context)",
"def writeFooter(self):\n pass",
"def test_page_content(self, client, site, landing_page):\n response = client.get(landing_page.relative_url(site))\n assertContains(response, \"<p>content of the landing page</p>\")",
"def test_homepage_has_correct_buttons_showing_when_not_logged_in(testapp):\n html = testapp.get('/').html\n logout = html.find(class_=\"navbar-right\").text\n create = html.find(href=\"http://localhost/journal/new-entry\")\n assert logout == '\\n Login\\n'\n assert not create",
"def test_write_page_margins_footer(self):\n\n self.worksheet.set_footer(margin=0.5)\n self.worksheet._write_page_margins()\n\n exp = \"\"\"<pageMargins left=\"0.7\" right=\"0.7\" top=\"0.75\" bottom=\"0.75\" header=\"0.3\" footer=\"0.5\"/>\"\"\"\n got = self.fh.getvalue()\n\n self.assertEqual(got, exp)",
"def test_page():\n app = create_ctfd()\n with app.app_context():\n\n gen_page(app.db, title=\"Title\", route=\"this-is-a-route\", html=\"This is some HTML\")\n\n with app.test_client() as client:\n r = client.get('/this-is-a-route')\n assert r.status_code == 200\n destroy_ctfd(app)",
"def test_legal_page(self):\n response = self.client.get(\"/legal/\")\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, \"search/base.html\")\n self.assertTemplateUsed(response, \"search/search_form.html\")\n self.assertTemplateUsed(response, \"search/legal.html\")",
"def test_create_page(self):\n self.assertEqual(self.client.get(reverse('home')).status_code, 404)\n\n page = Page.objects.create(**_page_data)\n\n self.assertEqual(page.title, _page_data['title'])\n self.assertEqual(page.page_type, _page_data['page_type'])\n\n response = self.client.get(reverse('home'))\n\n self.assertEqual(response.status_code, 200)\n self.assertIn('page_settings', response.context_data)",
"def test_about_page(self):\n response = self.testapp.get('/about')\n self.assertEqual(response.status_int, 200)\n self.assertEqual(response.content_type, 'text/html')\n response.mustcontain('Community Guidelines', 'Lifecycle', 'License')",
"def gen_footer():\n return '</body></html>'",
"def test_homepage(self):\n rc = self.app.get('/')\n assert b'Welcome to Code TA' in rc.data\n assert b'Logout' not in rc.data"
]
| [
"0.73257196",
"0.72786623",
"0.65961885",
"0.65096605",
"0.6436854",
"0.62778157",
"0.6275215",
"0.6275215",
"0.6235414",
"0.621966",
"0.60667896",
"0.606618",
"0.60153687",
"0.6010727",
"0.6007516",
"0.5964707",
"0.5950918",
"0.59390473",
"0.589547",
"0.58854747",
"0.5876067",
"0.5871783",
"0.586691",
"0.5859113",
"0.58141226",
"0.5799248",
"0.57954866",
"0.5794906",
"0.57815737",
"0.5755665"
]
| 0.8047665 | 0 |
Test ability to create a page with main what you need block | def test_create_page_with_whatyouneed_block(self):
what_you_need_block = PageWhatYouNeedBlock.objects.create(
**_whatyouneed_block_data)
Page.objects.create(what_you_need_block=what_you_need_block,
**_page_data)
response = self.client.get(reverse('home'))
self.assertEqual(response.status_code, 200)
self.assertIn('title', response.context)
self.assertIn('left_column_title', response.context)
self.assertIn('is_touch', response.context)
self.assertIn('is_colour', response.context)
self.assertIn('middle_column_title', response.context)
self.assertIn('middle_column_label', response.context)
self.assertIn('middle_column_caption', response.context)
self.assertIn('right_column_title', response.context) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_create_page_with_main_box(self):\n\n main_block = PageMainBlock.objects.create(**_main_block_data)\n Page.objects.create(main_block=main_block, **_page_data)\n response = self.client.get(reverse('home'))\n self.assertEqual(response.status_code, 200)\n\n self.assertIn('text', response.context)\n self.assertIn('title', response.context)\n self.assertIn('image', response.context)",
"def create_page(self):",
"def test_main_page(remote_browser, base_url, logger_fixture):\n logger_fixture.info(\"===== test_main_page =====\")\n main_page = MainPage(remote_browser, base_url)\n main_page\\\n .open(logger_fixture)\\\n .check_featured_block_is_not_empty()",
"def test_anon_page_admin(self):\n self._make_quick_page()\n\n dropdown_menu_btn = self._get_one('.dropdown-toggle')\n show_opts_btn = self._get_one('.options-btn')\n options_menu = self._get_one('.pageOptions')\n\n self.assertTrue(dropdown_menu_btn.is_displayed())\n dropdown_menu_btn.click()\n self.assertTrue(show_opts_btn.is_displayed())\n\n # Something weird related to auto-open options menu.\n # Maybe this conditional will fix it? <:)\n if not options_menu.is_displayed():\n show_opts_btn.click()\n self.assertTrue(options_menu.is_displayed())\n\n settings_btn = self._find_one_with_text('li.tabs-tab', 'Settings')\n self.assertTrue(settings_btn.is_displayed())\n settings_btn.click()\n\n title_input = self._get_one('input.title')\n title_input.clear()\n TITLE = 'A Title Most Titular'\n title_input.send_keys(TITLE)\n\n # Make it published, so that our anon viewer can access it\n published_input = self._get_one('.published')\n published_input.click()\n\n # TODO: better solution. need to wait for autosave\n # This may have to wait for request-queuing in the Backbone model.\n import time; time.sleep(1) \n\n # Now pretend we're someone else\n self.selenium.delete_all_cookies()\n self.selenium.refresh()\n for cookie in self.selenium.get_cookies():\n self.assertFalse(cookie['name'].startswith('claim'))\n\n # make sure we aren't admins\n self.assertFalse(self._find('.dropdown-toggle')) \n\n # check that we got the new title\n self.assertEqual(self.selenium.title, TITLE)",
"def test_create_page(self):\n self.assertEqual(self.client.get(reverse('home')).status_code, 404)\n\n page = Page.objects.create(**_page_data)\n\n self.assertEqual(page.title, _page_data['title'])\n self.assertEqual(page.page_type, _page_data['page_type'])\n\n response = self.client.get(reverse('home'))\n\n self.assertEqual(response.status_code, 200)\n self.assertIn('page_settings', response.context_data)",
"def test_page_view_permission(self):\n \n adminonlypage = create_page_in_admin(self.testproject,\"adminonlypage\",\n permission_lvl=Page.ADMIN_ONLY) \n registeredonlypage = create_page_in_admin(self.testproject,\"registeredonlypage\",\n permission_lvl=Page.REGISTERED_ONLY)\n publicpage = create_page_in_admin(self.testproject,\"publicpage\",\n permission_lvl=Page.ALL)\n \n self._test_page_can_be_viewed(self.projectadmin,adminonlypage)\n self._test_page_can_not_be_viewed(self.participant,adminonlypage)\n self._test_page_can_not_be_viewed(self.registered_user,adminonlypage) \n self._test_page_can_not_be_viewed(None,adminonlypage) # None = not logged in\n \n self._test_page_can_be_viewed(self.projectadmin,registeredonlypage)\n self._test_page_can_be_viewed(self.participant,registeredonlypage)\n self._test_page_can_not_be_viewed(self.registered_user,registeredonlypage)\n self._test_page_can_not_be_viewed(None,registeredonlypage) # None = not logged in\n \n self._test_page_can_be_viewed(self.projectadmin,publicpage)\n self._test_page_can_be_viewed(self.participant,publicpage)\n self._test_page_can_be_viewed(self.registered_user,publicpage)\n self._test_page_can_be_viewed(None,publicpage) # None = not logged in",
"def test_public_unit_page_html(self):\r\n html = self.get_page_html(self.vertical)\r\n self.validate_html_for_add_buttons(html)",
"def test_create_page_with_help_box(self):\n\n button = PageButton.objects.create(**_button_data)\n help_block = PageHelpBoxBlock.objects.create(\n button=button, **_help_box_data)\n page = Page.objects.create(help_box_block=help_block, **_page_data)\n\n response = self.client.get(reverse('home'))\n self.assertEqual(response.status_code, 200)\n\n self.assertEqual(page.help_box_block.button, button)\n self.assertIn('text', response.context)\n self.assertIn('button', response.context)",
"def test_page():\n app = create_ctfd()\n with app.app_context():\n\n gen_page(app.db, title=\"Title\", route=\"this-is-a-route\", html=\"This is some HTML\")\n\n with app.test_client() as client:\n r = client.get('/this-is-a-route')\n assert r.status_code == 200\n destroy_ctfd(app)",
"def test_homepage(self):\n\n with self.client as client:\n response = client.get('/')\n ...\n # test that you're getting a template",
"def test_registered_user_can_create_project(self):\n user = self._create_user({\"username\":\"user2\",\"email\":\"[email protected]\"})\n testproject = self._create_comicsite_in_admin(user,\"user1project\") \n testpage1 = create_page_in_admin(testproject,\"testpage1\")\n testpage2 = create_page_in_admin(testproject,\"testpage2\")\n \n self._test_page_can_be_viewed(user,testpage1)\n self._test_page_can_be_viewed(self.root,testpage1)",
"def test_create_page_with_header(self):\n\n link_1 = PageHeadDropMenu.objects.create\\\n (internal_link='home', **_head_link_data)\n link_2 = PageHeadDropMenu.objects.create(\n external_link=_external_link, **_head_link_data)\n link_3 = PageHeadDropMenu.objects.create(**_head_link_data)\n link_element_1 = PageHeadDropMenuElement.objects.create(\n internal_link='home', **_head_drop_link_data)\n link_element_2 = PageHeadDropMenuElement.objects.create(\n external_link=_external_link, **_head_drop_link_data)\n link_3.drop_links.add(link_element_1, link_element_2)\n\n soc_link = PageHeadSocLink.objects.create(**_head_soc_link_data)\n\n header = PageHeaderBlock.objects.create(**_header_data)\n header.main_menus_elements.add(link_1, link_2, link_3)\n header.soc_links.add(soc_link)\n\n page = Page.objects.create(header_block=header, **_page_data)\n\n self.assertEqual(page.header_block, header)\n self.assertEqual(page.header_block.main_menus_elements.count(), 3)\n self.assertEqual(link_3.drop_links.count(), 2)\n\n response = self.client.get(reverse('home'))\n self.assertEqual(response.status_code, 200)\n\n self.assertIn('logo', response.context)\n self.assertTrue(response.context['logo'].endswith('.jpg'))\n\n self.assertIn('auth_menu', response.context)\n self.assertFalse(response.context['auth_menu'])\n\n self.assertIn('soc_links', response.context)\n self.assertIn(soc_link, response.context['soc_links'])\n\n self.assertIn('d_d_menu', response.context)\n self.assertIn(link_2, response.context['d_d_menu'])",
"def test_homepage(self):\n\n with self.client as client:\n response = client.get('/')\n html = response.get_data(as_text=True)\n self.assertEqual(response.status_code, 200)\n self.assertIn('<table class=\"board\">', html)\n self.assertIn('<table', html)\n self.assertIn('boggle homepage. used in testing', html)\n # test that you're getting a template",
"def test_create_page_with_footer(self):\n\n footer_block = PageFooterBlock.objects.create(**_footer_block_data)\n link_1 = PageHeadDropMenu.objects.create\\\n (internal_link='home', **_head_link_data)\n link_2 = PageHeadDropMenu.objects.create(\n external_link=_external_link, **_head_link_data)\n link_3 = PageHeadDropMenu.objects.create(**_head_link_data)\n link_element_1 = PageHeadDropMenuElement.objects.create(\n internal_link='home', **_head_drop_link_data)\n link_element_2 = PageHeadDropMenuElement.objects.create(\n external_link=_external_link, **_head_drop_link_data)\n link_3.drop_links.add(link_element_1, link_element_2)\n footer_block.top_links.add(link_1, link_2, link_3)\n contact_address = Address.objects.create(**_address_block)\n footer_block.contact_address.add(contact_address)\n footer_block.a_link.add(link_element_1)\n\n Page.objects.create(footer_block=footer_block, **_page_data)\n response = self.client.get(reverse('home'))\n self.assertEqual(response.status_code, 200)\n\n self.assertIn('top_links', response.context)\n self.assertIn('contact', response.context)\n self.assertIn('top_contacts', response.context)\n self.assertIn('bot_copy_right', response.context)\n self.assertIn('bot_link', response.context)\n self.assertIn('bot_back_top', response.context)",
"def _verify_page(self):",
"def test_page_list_admin(self):\n user = self.get_superuser()\n title_1 = 'page'\n title_2 = 'inner'\n title_3 = 'page 3'\n page = create_page(title_1, 'page.html', 'en', published=True)\n page_2 = create_page(title_2, 'page.html', 'en', published=True, parent=page)\n page_3 = create_page(title_3, 'page.html', 'en', published=False)\n\n with self.login_user_context(user):\n url = reverse('api:page-list')\n response = self.client.get(url, format='json')\n self.assertEqual(len(response.data), 3)\n for page in response.data:\n self.assertIn(page.get('title'), {title_1, title_2, title_3})",
"def page_test():\n # get user-name and access rights from IAM\n\n html = \"<h3>Hello world! 2</h3>\"\n return html",
"def test_main_page(self):\n response = self.client.get(reverse('home'))\n self.assertEqual(response.status_code, 200)\n\n content = response.content.decode('utf-8')\n self.assertTrue('Improving the FOIA request experience' in content)",
"def test_page_existence(self):\r\n # Log in\r\n self.auth_page.visit()\r\n\r\n # Verify that each page is available\r\n for page in self.pages:\r\n page.visit()",
"def test_correct_main_page_template(self):\n response = self.client.get('/')\n self.assertTemplateUsed(response, 'main.html')",
"def test_legal_page(self):\n response = self.client.get(\"/legal/\")\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, \"search/base.html\")\n self.assertTemplateUsed(response, \"search/search_form.html\")\n self.assertTemplateUsed(response, \"search/legal.html\")",
"def test_page_change_view(self):\n user = self._create_user({\"username\":\"user3\",\"email\":\"[email protected]\"})\n anotheruser = self._create_random_user(startname=\"another_user_\")\n testproject = self._create_comicsite_in_admin(user,\"user3project\") \n testpage1 = create_page_in_admin(testproject,\"testpage1\")\n testpage2 = create_page_in_admin(testproject,\"testpage2\") \n url = reverse(\"admin:comicmodels_page_change\",\n args=[testpage1.pk])\n\n self._test_url_can_be_viewed(user,url) \n self._test_url_can_be_viewed(self.root,url)\n #TODO: The permissions are not correct, https://github.com/comic/comic-django/issues/306\n #self._test_url_can_not_be_viewed(anotheruser,url)",
"def create_page_in_admin(comicsite,title,content=\"testcontent\",permission_lvl=\"\"):\n \n if permission_lvl == \"\":\n permission_lvl = Page.ALL\n \n page_admin = PageAdmin(Page,admin.site)\n page = Page.objects.create(title=title,\n comicsite=comicsite,\n html=content,\n permission_lvl=permission_lvl)\n page_admin.first_save(page)\n return page",
"def test_chooser_no_perms(self):\n root_page = Page.objects.get(pk=2)\n root_page.add_child(instance=NewsIndex(\n title='News', slug='news'))\n root_page.add_child(instance=SecondaryNewsIndex(\n title='Secondary News', slug='secondary-news'))\n\n response = self.client.get(reverse('wagtailnews:choose'))\n self.assertEqual(response.status_code, 403)",
"def test_public_pages_load(self):\r\n pages = (\r\n reverse('login'),\r\n reverse('signup'),\r\n )\r\n for page in pages:\r\n print(\"Checking '{0}'\".format(page))\r\n self.check_page_get(page, 200)",
"def test_page_content(self, client, site, homepage):\n response = client.get(homepage.get_url())\n assertTemplateUsed(response, \"cdhpages/home_page.html\")\n assertContains(response, homepage.body[0].value.source)",
"def test_homepage(self):\n\n response = self.client.get(\"/\")\n self.assertIn(\"Books</title>\", response.data)\n self.assertIn(\"Goodreads ID\", response.data)",
"def test_property_page(self):\n self.property_page.proceed_to_property_page()\n\n \"\"\"Step2 - Check rooms section\n Exp2 - Property page opened \"\"\"\n self.property_page.check_rooms_section()\n\n \"\"\"Step3 - Check other section\n Exp3 - Each item works well \"\"\"\n self.property_page.check_other_section()",
"def test_first_page_passes(self):\n\n self.page.open_site(PageLocators.PREVIOUS_LINK)\n self.page.fill_all_fields()\n self.page.send_the_data()",
"def test_static_tab(self):\r\n # From the course info page, navigate to the static tab\r\n self.course_info_page.visit()\r\n self.tab_nav.go_to_tab('Test Static Tab')\r\n self.assertTrue(self.tab_nav.is_on_tab('Test Static Tab'))"
]
| [
"0.7925535",
"0.71040845",
"0.70405143",
"0.6776286",
"0.67402023",
"0.6727634",
"0.6711986",
"0.6697805",
"0.6588746",
"0.65794003",
"0.65505695",
"0.6517028",
"0.6492822",
"0.6410374",
"0.6395514",
"0.6354633",
"0.63398767",
"0.6336393",
"0.630439",
"0.62780046",
"0.62673783",
"0.6260018",
"0.62444365",
"0.6219324",
"0.6194907",
"0.61624986",
"0.61586577",
"0.6157006",
"0.6132133",
"0.61247677"
]
| 0.74322563 | 1 |
Returns search results of the query obtained in request args. It Returns four seperate variables containing results for artists, music, albums and records. | def search():
if not request.vars.search_term:
redirect(URL('index'))
term = request.vars.search_term
origterm = term
term = term.replace(' ','|')
artists = db.executesql("select distinct(m1.id), m1.art_name, m1.artist_type, m1.country, m1.b_year,m1.b_month,m1.b_date,m1.e_year,m1.e_month,m1.e_day,ts_rank(to_tsvector(m1.art_name),to_tsquery('"+term+"')) rank from art_info m1 where to_tsvector('english',m1.art_name) @@ to_tsquery('"+term+"') order by rank desc limit 20;")
albums = db.executesql("select distinct(m1.id),m2.name,m1.art_id,m1.art_name,m1.rel_type,m1.count,ts_rank(to_tsvector(m2.name),to_tsquery('"+term+"')) rank from rel_art m1, release_name m2, release_group m3 where m3.name = m2.id and m3.id = m1.id and to_tsvector('english',m2.name) @@ to_tsquery('"+term+"') order by rank desc limit 20;")
songs = db.executesql("select m2.id, m1.name, m3.art_id, m3.art_name, m3.rel_id, m3.rel_name from track_name m1, recording m2, rec_rel_art m3 where m1.id = m2.name and m2.id = m3.rec_id and lower(m1.name) LIKE lower('%%"+origterm+"%%') limit 20;")
return dict(songs=songs, albums=albums, artists=artists) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_artists():\n return query_multiple(request.args, artist_search, \\\n artist_filter, Artist, artists_schema)",
"def get(self):\n mb = MusicbrainzClient()\n query = self.get_argument('q')\n artists, tracks = yield [mb.search_artists(query),\n mb.search_tracks(query)]\n data = {\n 'artists': [\n {\n 'id': artist['id'],\n 'artist': artist['name'],\n 'note': artist.get('disambiguation', '')\n }\n for artist in artists['artist-list']\n ],\n 'tracks': [\n {\n 'id': track['id'],\n 'title': track['title'],\n 'artist': track['artist-credit-phrase']\n }\n for track in tracks['recording-list']\n ]\n }\n self.finish(data)",
"def search_artists():\n # TODO: implement search on artists with partial string search. Ensure it is case-insensitive. (DONE)\n\n response = request.form.get('search_term', '')\n response = response.lower()\n\n artists = db.session.query(Artist).filter(Artist.name.ilike('%' + response + '%')).all()\n results = []\n \n for a in artists:\n print(a.name)\n results.append({\n 'id': a.id,\n 'name' : a.name\n })\n\n response={\n \"count\": len(results),\n \"data\": results\n }\n\n return render_template(\n \"pages/search_artists.html\",\n results=response,\n search_term=request.form.get(\"search_term\", \"\"),\n )",
"def search(self):\n query = self.get_request_arg(\"query\")\n if query:\n album = self.ctrl.library.search(query)\n return self.resp_from_data(album)\n return self.resp_from_data(\n {\"message\": \"No query parameters specified\"}, 400)",
"def search_results(request):\r\n mdict = request.matchdict\r\n rdict = request.GET\r\n\r\n if 'terms' in mdict:\r\n phrase = \" \".join(mdict['terms'])\r\n else:\r\n phrase = rdict.get('search', '')\r\n\r\n if rdict.get('search_mine') or 'username' in mdict:\r\n with_user = True\r\n else:\r\n with_user = False\r\n\r\n username = None\r\n if with_user:\r\n if 'username' in mdict:\r\n username = mdict.get('username')\r\n elif request.user and request.user.username:\r\n username = request.user.username\r\n\r\n # with content is always in the get string\r\n search_content = asbool(rdict.get('with_content', False))\r\n\r\n conn_str = request.registry.settings.get('sqlalchemy.url', False)\r\n searcher = get_fulltext_handler(conn_str)\r\n\r\n # check if we have a page count submitted\r\n page = rdict.get('page', 0)\r\n count = rdict.get('count', 10)\r\n\r\n try:\r\n res_list = searcher.search(\r\n phrase,\r\n content=search_content,\r\n username=username if with_user else None,\r\n ct=count,\r\n page=page\r\n )\r\n except ValueError:\r\n request.response.status_int = 404\r\n ret = {'error': \"Bad Request: Page number out of bound\"}\r\n return _api_response(request, ret)\r\n\r\n constructed_results = []\r\n for res in res_list:\r\n return_obj = dict(res)\r\n return_obj['tags'] = [dict(tag[1]) for tag in res.tags.items()]\r\n\r\n # the hashed object is there as well, we need to pull the url and\r\n # clicks from it as total_clicks\r\n return_obj['url'] = res.hashed.url\r\n return_obj['total_clicks'] = res.hashed.clicks\r\n\r\n constructed_results.append(return_obj)\r\n\r\n return _api_response(request, {\r\n 'search_results': constructed_results,\r\n 'result_count': len(constructed_results),\r\n 'phrase': phrase,\r\n 'page': page,\r\n 'with_content': search_content,\r\n 'username': username,\r\n })",
"def search_results(self):\r\n route_name = self.request.matched_route.name\r\n mdict = self.matchdict\r\n rdict = self.GET\r\n\r\n if 'terms' in mdict:\r\n phrase = \" \".join(mdict['terms'])\r\n else:\r\n phrase = rdict.get('search', '')\r\n\r\n # Always search the fulltext content\r\n with_content = True\r\n\r\n conn_str = self.settings.get('sqlalchemy.url', False)\r\n searcher = get_fulltext_handler(conn_str)\r\n\r\n # check if we have a page count submitted\r\n params = self.params\r\n page = params.get('page', 0)\r\n count = params.get('count', 50)\r\n\r\n if rdict.get('search_mine') or 'username' in mdict:\r\n with_user = True\r\n else:\r\n with_user = False\r\n\r\n username = None\r\n if with_user:\r\n if 'username' in mdict:\r\n username = mdict.get('username')\r\n elif self.request.user and self.request.user.username:\r\n username = self.request.user.username\r\n\r\n res_list = searcher.search(\r\n phrase,\r\n content=with_content,\r\n username=username if with_user else None,\r\n ct=count,\r\n page=page,\r\n )\r\n\r\n # if the route name is search_ajax we want a json response\r\n # else we just want to return the payload data to the mako template\r\n if 'ajax' in route_name or 'api' in route_name:\r\n return {\r\n 'success': True,\r\n 'message': \"\",\r\n 'payload': {\r\n 'search_results': [dict(res) for res in res_list],\r\n 'result_count': len(res_list),\r\n 'phrase': phrase,\r\n 'page': page,\r\n 'username': username,\r\n }\r\n }\r\n else:\r\n return {\r\n 'search_results': res_list,\r\n 'count': len(res_list),\r\n 'max_count': 50,\r\n 'phrase': phrase,\r\n 'page': page,\r\n 'username': username,\r\n }",
"def do_search(self, *args, **kwargs):\n return [{}]",
"def search_results(request):\n #key\n\n user_input = request.GET['q']\n\n people_objs = Person.objects.filter(Q(last__contains=user_input) | Q(\n first__contains=user_input))\n document_objs = Document.objects.filter(title__contains=user_input)\n folder_objs = Folder.objects.filter(full__contains=user_input)\n organization_objs = Organization.objects.filter(Q(name__contains=user_input)|Q(\n location__contains=user_input))\n obj_dict = {\n 'people_objs': people_objs,\n 'document_objs': document_objs,\n 'folder_objs': folder_objs,\n 'organization_objs': organization_objs,\n 'query': user_input,\n }\n response = render(request, 'search_results.jinja2', obj_dict)\n return response",
"def do_search(arg):\n result = {'count': 0, 'time': 0, 'records': []}\n try:\n uri, q, k, m = arg\n dqp = Pyro.core.getProxyForURI(uri)\n scoresLen,results,indocids,exdocids = dqp.search(q, k, m)\n result=(scoresLen,results,indocids,exdocids)\n except Exception as e:\n print \"Exception:\", e\n return result",
"def fetch_search_results (self, search_str, list_from=0, list_to=10):\n # properly encode the search string\n encoded_search_string = quote(search_str)\n\n paths = [\n ['search', encoded_search_string, 'titles', {'from': list_from, 'to': list_to}, ['summary', 'title']],\n ['search', encoded_search_string, 'titles', {'from': list_from, 'to': list_to}, 'boxarts', '_342x192', 'jpg'],\n ['search', encoded_search_string, 'titles', ['id', 'length', 'name', 'trackIds', 'requestId']],\n ['search', encoded_search_string, 'suggestions', 0, 'relatedvideos', {'from': list_from, 'to': list_to}, ['summary', 'title']],\n ['search', encoded_search_string, 'suggestions', 0, 'relatedvideos', {'from': list_from, 'to': list_to}, 'boxarts', '_342x192', 'jpg'],\n ['search', encoded_search_string, 'suggestions', 0, 'relatedvideos', ['id', 'length', 'name', 'trackIds', 'requestId']]\n ]\n response = self._path_request(paths=paths)\n return self._process_response(response=response, component='Search results')",
"def search(\n\tlog,\n\tverbose,\n\tquiet,\n\tusername,\n\tdevice_id,\n\tyes,\n\tfilters\n):\n\n\tconfigure_logging(verbose - quiet, username, log_to_file=log)\n\n\tlogger.info(\"Logging in to Google Music\")\n\tmc = google_music.mobileclient(username, device_id=device_id)\n\n\tif not mc.is_authenticated:\n\t\tsys.exit(\"Failed to authenticate client.\")\n\n\tsearch_results = filter_songs(mc.songs(), filters)\n\tsearch_results.sort(\n\t\tkey=lambda song: (\n\t\t\tsong.get('artist', ''),\n\t\t\tsong.get('album', ''),\n\t\t\tsong.get('trackNumber', 0)\n\t\t)\n\t)\n\n\tif search_results:\n\t\tresult_num = 0\n\t\ttotal = len(search_results)\n\t\tpad = len(str(total))\n\n\t\tconfirm = (\n\t\t\tyes\n\t\t\tor input(f\"\\nDisplay {len(search_results)} results? (y/n) \") in (\"y\", \"Y\")\n\t\t)\n\n\t\tif confirm:\n\t\t\tfor result in search_results:\n\t\t\t\tresult_num += 1\n\n\t\t\t\ttitle = result.get('title', \"<empty>\")\n\t\t\t\tartist = result.get('artist', \"<empty>\")\n\t\t\t\talbum = result.get('album', \"<empty>\")\n\t\t\t\tsong_id = result['id']\n\n\t\t\t\tlogger.info(\n\t\t\t\t\tf\"{result_num:>{pad}}/{total} {title} -- {artist} -- {album} ({song_id})\"\n\t\t\t\t)\n\telse:\n\t\tlogger.info(\"No songs found matching query\")\n\n\tmc.logout()\n\tlogger.info(\"All done!\")",
"def initiateSearch(search_context, text):\n searches = []\n log.msg(\"Searching: %s\" % text)\n\n if not isinstance(text, dict) :\n \n artistStart = text.find(SearchKeys.ARTIST)\n titleStart = text.find(SearchKeys.TITLE)\n albumStart = text.find(SearchKeys.ALBUM)\n\n if max(artistStart, titleStart, albumStart) != -1:\n d = dict()\n \n def pullout(text2):\n if text2.find(SearchKeys.ARTIST) != -1:\n text2 = text2[0:text2.find(SearchKeys.ARTIST)]\n\n if text2.find(SearchKeys.ALBUM) != -1:\n text2 = text2[0:text2.find(SearchKeys.ALBUM)]\n\n if text2.find(SearchKeys.TITLE) != -1:\n text2 = text2[0:text2.find(SearchKeys.TITLE)]\n return text2.strip()\n\n if artistStart != -1:\n d['artist'] = pullout(text[artistStart+len(SearchKeys.ARTIST) : len(text)])\n\n if titleStart != -1:\n d['title'] = pullout(text[titleStart+len(SearchKeys.TITLE) : len(text)])\n \n if albumStart != -1:\n d['album'] = pullout(text[albumStart+len(SearchKeys.ALBUM) : len(text)])\n log.msg(\"Parsed Search: \" + str(d))\n text = d\n\n # Fire off search in parallel\n for key, mediasrc in __mediaSources.items():\n log.msg(\"\\tSending Search Request to %s\" % key)\n searches.append(deferToThread(mediasrc.search, text))\n\n # When all searches return combine them. The lists will be\n # returned as a list of a touples consisting of a sucess/failure\n # boolean followed by the results returned by the individual source\n dl = DeferredList(searches)\n\n def sendResults(results):\n \"\"\"\n Combines the results returned by the deferred list\n into master_result and passes it to all the registered\n controllers.\n \"\"\"\n log.msg(\"Search Returned from all sources\")\n master_result = []\n for status, result in results:\n if status:\n master_result += result\n\n for key, mediactr in __controllers.items():\n log.msg(\"\\tSending Result to %s\" % key)\n mediactr.searchCompleted(search_context, master_result)\n\n dl.addCallback(sendResults)",
"def show_results():\n\n\tuser_query = request.args.get(\"search\")\n\tsearch_activity = SearchActivity(user_id=session.get('user_id'), search_query=user_query, datetime = datetime.now())\n\n\tdb.session.add(search_activity)\n\tdb.session.commit()\n\tsearch_items_not_filtered_list = user_search(user_query)\n\tfound_items = []\n\t\n\tfor item in search_items_not_filtered_list:\n\t\tTaxonomy_obj = db.session.query(Taxonomy).filter(Taxonomy.path.like(\"%Food%\")).filter_by(category_node=item[u'categoryNode']).all()\n\t\tfor obj in Taxonomy_obj:\n\t\t\tif item[u'categoryNode'] == obj.category_node:\t\n\t\t\t\tfound_items.append({\n\t\t\t\t\t\"name\": item.get(u'name', \"\"), \n\t\t\t\t\t\"item_id\": item.get(u'itemId', \"\"),\n\t\t\t\t\t\"category\": item.get(u'categoryPath', \"\"), \n\t\t\t\t\t\"sale_price\": format(item.get(u'salePrice', \"\"), \".2f\"), \n\t\t\t\t\t\"description\": unescape(item.get(u'shortDescription', \"\")), \n\t\t\t\t\t\"customer_rating_img\": item.get(u'customerRatingImage', \"\"),\n\t\t\t\t\t\"thumbnail_image\": item.get(u'thumbnailImage', \"\")\n\t\t\t\t\t})\n\t\t\t\t\n\treturn render_template(\"searchresults.html\", found_items=found_items)",
"def search_multiple_tracks(search_query, sp):\r\n \r\n # List to store the track ids\r\n track_ids = []\r\n # List to store the track names and artists\r\n tracks = []\r\n\r\n #Search for 10 results in the Spotify API given a search querry\r\n results = sp.search(q = search_query ,limit=10)\r\n results = results['tracks']['items']\r\n\r\n # Extract the track id's, names and artists for all the search results\r\n for i in range(len(results)):\r\n\r\n # Get track id, artist and name\r\n track_id = results[i]['id']\r\n artist = results[i]['artists'][0]['name']\r\n track_name = results[i]['name']\r\n\r\n # Get a string with the artist and track name\r\n track = artist + ' - ' + track_name\r\n\r\n # Append the track id's and track name/artist to the list\r\n track_ids.append(track_id)\r\n tracks.append(track)\r\n\r\n # Make a dictionary of the track id and track name/artist list.\r\n return dict(zip(tracks,track_ids))",
"def get_results_for(t_client, search_q):\n results = t_client.search(q=\"#\"+search_q)\n\n # This can be refactored\n return [\n {\n \"author\": \"@%s\" % t.from_user,\n \"text\": t.text,\n \"id\": t.id,\n \"date_h\": t.created_at.strftime(\"%H:%M:%S %d/%m/%Y\"),\n \"date\": time.mktime(t.created_at.timetuple()),\n } for t in results\n ]",
"def get_data(inp):\n movies = __get_movies(inp)\n series = __get_series(inp)\n\n exist_title(movies, series)\n is_response_larger_than_max_results(movies, series)\n\n search_dict = {}\n\n if movies['Response'] != 'False':\n for movie in movies['Search']:\n search_dict.update({'movie': __get_title_info(movie['imdbID'])})\n\n if series['Response'] != 'False':\n for show in series['Search']:\n search_dict.update({'series': __get_title_info(show['imdbID'])})\n\n return search_dict",
"def search_results(request):\n\n query = None\n results_list = ()\n filtered_products = Product.objects.filter(discontinued=False)\n filtered_programmes = Programme.objects.filter(discontinued=False)\n filtered_posts = Post.objects.filter(status=1)\n\n if 'q' in request.GET:\n query = request.GET['q']\n if not query:\n messages.warning(request,\n \"You didn't enter any search criteria!\")\n return redirect(reverse('search_results'))\n\n products = filtered_products.filter(Q(name__icontains=query) | Q(\n description__icontains=query))\n programmes = filtered_programmes.filter(Q(\n name__icontains=query) | Q(description__icontains=query))\n posts = filtered_posts.filter(Q(title__icontains=query))\n\n results_list = list(chain(products, programmes, posts))\n\n context = {\n 'search_term': query,\n 'results_list': results_list,\n }\n\n return render(request, 'home/search.html', context)",
"def search(\n self, query, callback=None,\n track_offset=0, track_count=20,\n album_offset=0, album_count=20,\n artist_offset=0, artist_count=20,\n playlist_offset=0, playlist_count=20,\n search_type=None):\n return spotify.Search(\n query=query, callback=callback,\n track_offset=track_offset, track_count=track_count,\n album_offset=album_offset, album_count=album_count,\n artist_offset=artist_offset, artist_count=artist_count,\n playlist_offset=playlist_offset, playlist_count=playlist_count,\n search_type=search_type)",
"def results():\n\n queryName = request.form['query']\n queryStars = request.form['stars']\n \n datasource = DataSource()\n listOfRestaurantNames = datasource.searchRestaurantsByNameAndMinimumStars(queryName, queryStars)\n restaurants = datasource.generateRestaurantObjects(listOfRestaurantNames[:15])\n\n return render_template('results.html', restaurants=restaurants)",
"def get_albums():\n return query_multiple(request.args, album_search, \\\n album_filter, Album, albums_schema)",
"def multiQuery(self, query, limit):\n try:\n results = self.sp.search(query, limit)\n resultLists = results['tracks']['items']\n return resultLists\n except spotipy.SpotifyException as se:\n self.authenticate()\n return self.multiQuery(query, limit)",
"def search_results():\n search = False\n if session['patron']:\n search = False\n try:\n page = int(request.args.get('page', 1))\n except ValueError:\n page = 1\n\n search_criteria = request.args.get('search')\n patron_id = session['patron']\n session['search_criteria'] = search_criteria\n\n if search_criteria != '':\n print \"do a search\"\n list_of_books = booksearch.search(search_criteria, patron_id)\n pagination = Pagination(page=page, \n total=len(list_of_books), \n search=search, \n record_name='list_of_books')\n return render_template('book_list.html', search=search_criteria,\n list_of_books=list_of_books,\n pagination=pagination,\n )\n else:\n flash(\"Please enter an author or a title.\")\n return render_template('index.html')",
"def search_json(request):\n query = request.GET.get('q')\n books = []\n authors = []\n sections = []\n if len(query) >= 3:\n for book in Book.objects.filter(title__icontains=query):\n books.append({\n 'title': book.title,\n 'url': book.get_absolute_url(),\n })\n for author in Author.objects.filter(name__icontains=query):\n authors.append({\n 'title': author.name,\n 'url': author.get_absolute_url(),\n })\n for section in Section.objects.filter(title__icontains=query):\n sections.append({\n 'title': section.title,\n 'url': section.get_absolute_url(),\n })\n\n return JsonResponse({\n 'results': {\n 'books': {\n 'name': 'Books',\n 'results': books,\n },\n 'authors': {\n 'name': 'Authors',\n 'results': authors,\n },\n 'sections': {\n 'name': 'Sections',\n 'results': sections,\n },\n }\n })",
"def results(self):\n\n return self._search_resut",
"def query(self):\n query_url = self.get_query_url()\n logging.info('Querying: ' + query_url)\n json_data = request.urlopen(query_url).read().decode()\n logging.debug('Retrieved the following ' + json_data)\n response = json.loads(json_data)\n\n return self.get_docs_from_response(response)",
"def getResults():",
"def parse_search_results (self, response_data):\n search_results = {}\n raw_search_results = response_data['value']['videos']\n for entry_id in raw_search_results:\n if self._is_size_key(key=entry_id) == False:\n # fetch information about each show & build up a proper search results dictionary\n show = self.parse_show_list_entry(id=entry_id, entry=raw_search_results[entry_id])\n show[entry_id].update(self.parse_show_information(id=entry_id, response_data=self.fetch_show_information(id=entry_id, type=show[entry_id]['type'])))\n search_results.update(show)\n return search_results",
"def search(request):\n template = 'tracks.html'\n search_by = request.GET.get('search_by')\n if search_by != 'genres':\n search_dict = {search_by + '__icontains': request.GET.get('lookup')}\n tracks_list = Tracks.objects.filter(**search_dict)\n else:\n gen_list = [x.strip() for x in request.GET.get('lookup').split(',')]\n id_list = Genres.objects.filter(genre__in=gen_list).values_list('id', flat=True)\n tracks_list = Tracks.objects.filter(genres__in=id_list).distinct()\n context = {'track_list': tracks_list, 'call': 'search', }\n return render(request, template, context)",
"def search(self, query):",
"def parse_search_results(fields, results):\n my_results = []\n for result in results:\n my_results.append(SearchAnimeResult(fields, result))\n return my_results"
]
| [
"0.6995513",
"0.6946495",
"0.69276816",
"0.6712662",
"0.66553944",
"0.6650486",
"0.66414595",
"0.66204256",
"0.66038275",
"0.65670973",
"0.65214497",
"0.65137446",
"0.64969873",
"0.6442194",
"0.6438711",
"0.6370827",
"0.6355204",
"0.6339323",
"0.6334207",
"0.63236696",
"0.63199663",
"0.63081115",
"0.6283248",
"0.625959",
"0.6243985",
"0.61976314",
"0.61953115",
"0.6173638",
"0.6173467",
"0.6161487"
]
| 0.73612744 | 0 |
This action is reponsible for displaying all the information related to an artist | def artist():
if not request.vars.id:
redirect(URL('index'))
id = request.vars.id
artistname = db.executesql("select m1.name from artist_name as m1, artist as m2 where m1.id = m2.name and m2.id = "+id+";")
urls = db.executesql("select distinct(m2.url) from l_artist_url m1, url m2 where m2.id = m1.entity1 and m1.entity0 = "+id+";")
discography = db.executesql("select m4.name,m5.name,m3.id,m6.count from artist_credit_name m1, artist_credit m2,release_group m3,release_name m4, release_group_primary_type m5,rel_group_count m6 where m4.id = m3.name and m3.artist_credit = m2.id and m2.id = m1.artist_credit and m5.id = m3.type and m6.id = m3.id and m1.artist = "+id+";")
links = []
wiki = ""
for url in urls:
if "wikipedia" in url[0]:
wiki = url[0]
else:
links.append(url[0])
return dict(discography=discography, wiki=wiki, links=links, artistname=artistname) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def artists():\n # TODO: replace with real data returned from querying the database (DONE)\n artists = Artist.query.group_by(Artist.id, Artist.name).all()\n\n data = []\n\n for a in artists :\n data.append({\n 'id' : a.id,\n 'name' : a.name\n })\n\n return render_template(\"pages/artists.html\", artists=data)",
"def display_all():\n results = artwork_db.get_all_artwork()\n for artist in results:\n print(artist)",
"async def arts(self, ctx: BBContext, artist: Optional[discord.Member] = None):\n\n if artist:\n query = f'SELECT url, artist_name FROM {TABLE_ARTS} WHERE artist_id = $1 LIMIT 20'\n args = [query, artist.id]\n else:\n query = f'SELECT url, artist_name FROM {TABLE_ARTS} ORDER BY random() LIMIT 20'\n args = [query]\n\n con = await ctx.get_connection()\n data: List[asyncpg.Record] = await con.fetch(*args)\n\n view = ArtsPagination(data, ctx.author) # type: ignore (Direct messages intent is not being used so author can only be a member)\n await view.start(ctx.channel)",
"def artist_page(name=None):\n\n # regular expressions: always almost never not a good idea\n match = search(r\"^artist-\\w{8}-\\w{4}-\\w{4}-\\w{4}-\\w{12}$\", name)\n\n if match:\n id = name\n artist_data = generate_artist_data(id, use_id=True)\n else:\n artist_data = generate_artist_data(name)\n id = artist_data[\"id\"]\n\n track_data = generate_track_data(id)\n similarity_data = generate_similar_artists(id)\n\n return render_template(\"artist.html\",\n data=artist_data, tdata=track_data, sim=similarity_data,\n pygmented={\n # \"adata\": highlight(str(artist_data), JsonLexer(), HtmlFormatter()),\n # \"tdata\": highlight(str(track_data), JsonLexer(), HtmlFormatter()),\n #\"sdata\": highlight(str(similarity_data), JsonLexer(), HtmlFormatter())\n \"adata\": str(artist_data),\n \"tdata\": str(track_data),\n \"sdata\": str(similarity_data)\n },\n json_uuid=url_for(\"static\",\n filename=\"json/\" + get_mood_json(track_data))\n )",
"def get_artists():\n return query_multiple(request.args, artist_search, \\\n artist_filter, Artist, artists_schema)",
"async def get_artist(self, artist_id: int) -> APIReturn:\n return await self._request(\"GET\", \"/getArtist\", extra_query={\"id\": artist_id})",
"def get_all_artists(self):\n self.all_artists = get_artist_list()\n self.artists = self.all_artists\n self.artist_names = [x.get_name() for x in self.artists]\n self.artist_ids = [x.id for x in self.artists]",
"def _artist_info(self, artist):\n url = (FANART_URL + self._url_encode(artist))\n try:\n parsed_json = self._get_json(url)\n self.__artist_id = parsed_json['artists'][0]['idArtist']\n self.__artist_name = parsed_json['artists'][0]['strArtist']\n self.__artist_aka = parsed_json['artists'][0]['strArtistAlternate']\n self.__rec_label = parsed_json['artists'][0]['strLabel']\n self.__rec_label_id = parsed_json['artists'][0]['idLabel']\n self.__year_formed = parsed_json['artists'][0]['intFormedYear']\n self.__year_born = parsed_json['artists'][0]['intBornYear']\n self.__year_died = parsed_json['artists'][0]['intDiedYear']\n self.__disbanded = parsed_json['artists'][0]['strDisbanded']\n self.__style = parsed_json['artists'][0]['strStyle']\n self.__genre = parsed_json['artists'][0]['strGenre']\n self.__mood = parsed_json['artists'][0]['strMood']\n self.__website = parsed_json['artists'][0]['strWebsite']\n self.__facebook = parsed_json['artists'][0]['strFacebook']\n self.__twitter = parsed_json['artists'][0]['strTwitter']\n self.__biography_en = parsed_json['artists'][0]['strBiographyEN']\n if self.__biography_en is not None:\n self.__biography_en = (parsed_json['artists'][0]\n ['strBiographyEN'].encode('utf-8'))\n self.__biography_de = parsed_json['artists'][0]['strBiographyDE']\n if self.__biography_de is not None:\n self.__biography_de = (parsed_json['artists'][0]\n ['strBiographyDE'].encode('utf-8'))\n self.__biography_fr = parsed_json['artists'][0]['strBiographyFR']\n if self.__biography_fr is not None:\n self.__biography_fr = (parsed_json['artists'][0]\n ['strBiographyFR'].encode('utf-8'))\n self.__biography_cn = parsed_json['artists'][0]['strBiographyCN']\n if self.__biography_cn is not None:\n self.__biography_cn = (parsed_json['artists'][0]\n ['strBiographyCN'].encode('utf-8'))\n self.__biography_it = parsed_json['artists'][0]['strBiographyIT']\n if self.__biography_it is not None:\n self.__biography_it = (parsed_json['artists'][0]\n ['strBiographyIT'].encode('utf-8'))\n self.__biography_jp = parsed_json['artists'][0]['strBiographyJP']\n if self.__biography_jp is not None:\n self.__biography_jp = (parsed_json['artists'][0]\n ['strBiographyJP'].encode('utf-8'))\n self.__biography_ru = parsed_json['artists'][0]['strBiographyRU']\n if self.__biography_ru is not None:\n self.__biography_ru = (parsed_json['artists'][0]\n ['strBiographyRU'].encode('utf-8'))\n self.__biography_es = parsed_json['artists'][0]['strBiographyES']\n if self.__biography_es is not None:\n self.__biography_es = (parsed_json['artists'][0]\n ['strBiographyES'].encode('utf-8'))\n self.__biography_pt = parsed_json['artists'][0]['strBiographyPT']\n if self.__biography_pt is not None:\n self.__biography_pt = (parsed_json['artists'][0]\n ['strBiographyPT'].encode('utf-8'))\n self.__biography_se = parsed_json['artists'][0]['strBiographySE']\n if self.__biography_se is not None:\n self.__biography_se = (parsed_json['artists'][0]\n ['strBiographySE'].encode('utf-8'))\n self.__biography_nl = parsed_json['artists'][0]['strBiographyNL']\n if self.__biography_nl is not None:\n self.__biography_nl = (parsed_json['artists'][0]\n ['strBiographyNL'].encode('utf-8'))\n self.__biography_hu = parsed_json['artists'][0]['strBiographyHU']\n if self.__biography_hu is not None:\n self.__biography_hu = (parsed_json['artists'][0]\n ['strBiographyHU'].encode('utf-8'))\n self.__biography_no = parsed_json['artists'][0]['strBiographyNO']\n if self.__biography_no is not None:\n self.__biography_no = (parsed_json['artists'][0]\n ['strBiographyNO'].encode('utf-8'))\n self.__biography_il = parsed_json['artists'][0]['strBiographyIL']\n if self.__biography_il is not None:\n self.__biography_il = (parsed_json['artists'][0]\n ['strBiographyIL'].encode('utf-8'))\n self.__biography_pl = parsed_json['artists'][0]['strBiographyPL']\n if self.__biography_pl is not None:\n self.__biography_pl = (parsed_json['artists'][0]\n ['strBiographyPL'].encode('utf-8'))\n self.__gender = parsed_json['artists'][0]['strGender']\n self.__members = parsed_json['artists'][0]['intMembers']\n self.__country = parsed_json['artists'][0]['strCountry']\n self.__country_code = parsed_json['artists'][0]['strCountryCode']\n self.__artist_thumb = parsed_json['artists'][0]['strArtistThumb']\n self.__artist_logo = parsed_json['artists'][0]['strArtistLogo']\n self.__fanart = parsed_json['artists'][0]['strArtistFanart']\n self.__fanart2 = parsed_json['artists'][0]['strArtistFanart2']\n self.__fanart3 = parsed_json['artists'][0]['strArtistFanart3']\n self.__banner = parsed_json['artists'][0]['strArtistBanner']\n self.__music_brainz_id = (parsed_json['artists'][0]\n ['strMusicBrainzID'])\n self.__last_fm_chart = parsed_json['artists'][0]['strLastFMChart']\n self.__locked = parsed_json['artists'][0]['strLocked']\n return {'artist_id': self.__artist_id,\n 'artist': self.__artist_name,\n 'artist_aka': self.__artist_aka,\n 'rec_label': self.__rec_label,\n 'rec_label_id': self.__rec_label_id,\n 'year_formed': self.__year_formed,\n 'year_born': self.__year_born,\n 'year_died': self.__year_died,\n 'disbanded': self.__disbanded,\n 'style': self.__style,\n 'genre': self.__genre,\n 'mood': self.__mood,\n 'website': self.__website,\n 'facebook': self.__facebook,\n 'twitter': self.__twitter,\n 'biography_en': self.__biography_en,\n 'biography_de': self.__biography_de,\n 'biography_fr': self.__biography_fr,\n 'biography_cn': self.__biography_cn,\n 'biography_it': self.__biography_it,\n 'biography_jp': self.__biography_jp,\n 'biography_ru': self.__biography_ru,\n 'biography_es': self.__biography_es,\n 'biography_pt': self.__biography_pt,\n 'biography_se': self.__biography_se,\n 'biography_nl': self.__biography_nl,\n 'biography_hu': self.__biography_hu,\n 'biography_no': self.__biography_no,\n 'biography_il': self.__biography_il,\n 'biography_pl': self.__biography_pl,\n 'gender': self.__gender,\n 'members': self.__members,\n 'country': self.__country,\n 'country_code': self.__country_code,\n 'artist_thumb': self.__artist_thumb,\n 'artist_logo': self.__artist_logo,\n 'fanart': self.__fanart,\n 'fanart2': self.__fanart2,\n 'fanart3': self.__fanart3,\n 'banner': self.__banner,\n 'music_brainz_id': self.__music_brainz_id,\n 'last_fm_chart': self.__last_fm_chart,\n 'locked': self.__locked\n }\n except:\n return 0",
"def featured(request):\n\n artist = Artist.objects.all()\n\n \n\n context = {\n 'artist': artist,\n }\n\n return render(request, 'artist/featured.html', context)",
"def artist(self, q, page=None):\r\n return self.get('artist', q, page)",
"def artist():\n\n if request.method == \"GET\":\n return render_template(\"/artist.html\")\n\n else:\n # initialise the variables from the hidden html form input\n type = request.form.get(\"type\")\n url = request.form.get(\"url\")\n thumb = request.form.get(\"thumb\")\n\n # Authorization header to be embedded into the url \n headers = {\n 'Authorization': 'Discogs token=mqjXUBBzjnqrjUkKFIrOPAmlEZsGoDXjkRZgnRIR'\n }\n\n # search the database for artist information\n artists = requests.get(\"%s\" % url, headers=headers)\n artist = artists.json()\n\n # set variable if user is selecting pagination\n goto = request.form.get(\"goto\")\n\n if goto == None:\n\n # search the database for artists releases\n releases = requests.get(\"%s/releases?per_page=50\" % url, headers=headers)\n release = releases.json()\n\n # retreiving useful data\n data = release[\"releases\"]\n pagination = release[\"pagination\"]\n pages = pagination[\"pages\"]\n page = pagination[\"page\"]\n \n\n return render_template(\"/artist.html\",artist=artist, data=data, artistThumb=thumb, page=page, pages=pages, pagination=pagination, type=type, url=url, thumb=thumb)\n\n else:\n\n # search the database for artists releases goto page\n releases = requests.get(\"%s\" % goto, headers=headers)\n release = releases.json()\n\n # retreiving useful data\n data = release[\"releases\"]\n pagination = release[\"pagination\"]\n pages = pagination[\"pages\"]\n page = pagination[\"page\"]\n \n\n return render_template(\"/artist.html\",artist=artist, data=data, artistThumb=thumb, page=page, pages=pages, pagination=pagination, type=type, url=url, thumb=thumb)",
"async def artists(self, ctx: BBContext):\n\n query = \"\"\"SELECT DISTINCT artist_name, COUNT(*)\n FROM extras.arts\n WHERE artist_name IS NOT NULL\n GROUP BY artist_name\n ORDER BY COUNT(*) DESC\"\"\"\n\n args = [query]\n\n con = await ctx.get_connection()\n data: List[asyncpg.Record] = await con.fetch(*args)\n view = ArtsLeaderboardPagination(data, ctx.author) # type: ignore (Direct messages intent is not being used so author can only be a member)\n await view.start(ctx.channel)",
"def get_artists(self):\n contents = self.ctrl.library.get_artists()\n return self.resp_from_data(contents)",
"def search_artists():\n # TODO: implement search on artists with partial string search. Ensure it is case-insensitive. (DONE)\n\n response = request.form.get('search_term', '')\n response = response.lower()\n\n artists = db.session.query(Artist).filter(Artist.name.ilike('%' + response + '%')).all()\n results = []\n \n for a in artists:\n print(a.name)\n results.append({\n 'id': a.id,\n 'name' : a.name\n })\n\n response={\n \"count\": len(results),\n \"data\": results\n }\n\n return render_template(\n \"pages/search_artists.html\",\n results=response,\n search_term=request.form.get(\"search_term\", \"\"),\n )",
"def artist_link(self, obj):\n if not obj.artist:\n return None\n url = reverse(\n 'admin:music_publisher_artist_change', args=[obj.artist.id])\n link = '<a href=\"{}\">{}</a>'.format(url, obj.artist)\n return mark_safe(link)",
"def artist(self, artist_id, **kwargs):\n _id = self._get_artist_id(artist_id)\n # pylint: disable=no-member\n return self._get(API.ARTIST.value.format(id=_id), **kwargs)",
"def get_artist_list():\n return list(dmla.list_artists())",
"def artist_related_artists(self, artist_id, **kwargs):\n _id = self._get_artist_id(artist_id)\n # pylint: disable=no-member\n return self._get(API.ARTIST_RELATED_ARTISTS.value.format(id=_id), **kwargs)",
"def get_artist(self):\n return self._artist",
"def show_artist(artist_id):\n\n result = db.session.query(Artist).filter(Artist.id == artist_id)\n result = result[0]\n\n past_shows_count = 0\n upcoming_shows_count = 0\n\n past_shows = []\n upcoming_shows = []\n\n all_shows = Shows.query.all()\n\n print(all_shows)\n\n for show in all_shows:\n if show.artist_id == result.id:\n show_time = datetime.strptime(show.start_time, '%Y-%m-%d %H:%M:%S')\n if show_time > datetime.now() :\n upcoming_shows.append(show)\n else: \n past_shows.append(show)\n \n past_shows_count = len(past_shows)\n upcoming_shows_count = len(upcoming_shows)\n\n resdata = {\n \"id\": result.id,\n \"name\": result.name,\n \"genres\": json.loads(result.genres),\n \"city\": result.city,\n \"state\": result.state,\n \"phone\": result.phone,\n \"website\": result.website,\n \"facebook_link\": result.facebook_link,\n \"seeking_venue\": result.seeking_venue,\n \"seeking_description\": result.seeking_description,\n \"image_link\": result.image_link,\n \"past_shows\": past_shows,\n \"upcoming_shows\": upcoming_shows,\n \"past_shows_count\": past_shows_count,\n \"upcoming_shows_count\": upcoming_shows_count,\n }\n\n data = list(filter(lambda d: d[\"id\"] == artist_id, [resdata]))[0]\n return render_template(\"pages/show_artist.html\", artist=data)",
"def add_artists(self, params):\n artists = params\n\n # Lists to load\n names = []\n for n in self.listIDs.keys():\n for a in artists:\n if 'artist:' + a in n:\n names.append(n)\n\n self.add_playlist(names)",
"def get_artists(self, offset=None):\n return self.__get('artists')",
"def _get_artist(self):\n artists = FileRecord.query(FileRecord.artist).distinct().filter(\n FileRecord.album == self.name).filter(\n FileRecord.year == self.year).all()\n\n if len(artists) > 1:\n return \"Various Artists\"\n elif len(artists) == 1:\n return artists[0][0]\n else:\n return \"(Unknown)\"",
"def get_artist(artist_id):\n return query_single(artist_id, Artist, artist_schema)",
"def display_artist_complete_portfolio(artist_name):\n if controls_utils.artist_has_work_in_db(artist_name):\n results = artwork_db.get_all_artwork_from_one_artist(artist_name)\n for piece in results:\n print(piece)\n else:\n print('Sorry, no artwork from this artist to display ')",
"def test_artist(self):\n a = self.d.artist(1)\n self.assertEqual(a.name, 'Persuader, The')",
"def create_artist_submission():\n\n # called upon submitting the new artist listing form\n # TODO: insert form data as a new Venue record in the db, instead\n # TODO: modify data to be the data object returned from db insertion\n\n try:\n name = request.form.get(\"name\")\n city = request.form.get(\"city\")\n state = request.form.get(\"state\")\n phone = request.form.get(\"phone\")\n imageLink = request.form.get(\"image_link\")\n genres = request.form.getlist(\"genres\")\n facebookLink = request.form.get(\"facebook_link\")\n website = request.form.get(\"website\")\n seeking_venue = request.form.get(\"seeking_venue\")\n seeking_description = request.form.get(\"seeking_description\")\n \n artist_to_add = Artist(\n name=name,\n city=city,\n state=state,\n phone=phone,\n image_link=imageLink,\n genres=genres,\n facebook_link=facebookLink,\n website=website,\n seeking_venue=seeking_venue,\n seeking_description=seeking_description,\n )\n\n db.session.add(artist_to_add)\n db.session.commit()\n\n # on successful db insert, flash success\n flash(\"Artist \" + request.form[\"name\"] + \" was successfully listed!\")\n\n except:\n flash(\"An error occurred. Artist \" + name + \" could not be listed.\")\n db.session.rollback()\n finally:\n db.session.close()\n\n return render_template(\"pages/home.html\")",
"def __add_artist(self, artist, genius_api):\n\t\tentry = {\n\t\t\t'id' : int(artist['id']),\n\t\t\t'name' : artist['name'].lower(),\n\t\t\t'is_verified' : artist['is_verified'],\n\t\t\t'url' : artist['url'],\n\t\t\t'songs' : genius_api.get_artist_songs_id(artist['id'], artist_name=artist['name'])\n\t\t\t}\n\t\t\t\t#Step 3: Insert Artist into MongoDB via isnert_one\n\t\tself.db.artists.insert_one(entry)",
"def menu_python_for_artists(self, event=None):\n self.link('http://spe.pycs.net/stories/6.html')",
"def __repr__(self):\n return f'<Artist {self.id} {self.name}>'"
]
| [
"0.7046416",
"0.6953733",
"0.6649395",
"0.66083264",
"0.657852",
"0.65465295",
"0.6491085",
"0.64804745",
"0.64677274",
"0.64607227",
"0.64456445",
"0.6401886",
"0.63846064",
"0.6378521",
"0.6368321",
"0.6331579",
"0.6211678",
"0.62032557",
"0.6153709",
"0.61417043",
"0.61027473",
"0.6089153",
"0.6078868",
"0.6070855",
"0.6054406",
"0.6045691",
"0.6038256",
"0.60255015",
"0.6024254",
"0.6012469"
]
| 0.78098094 | 0 |
Extract consecutive ners from the result of CoreNLPNERTagger | def merge_ners(tokens):
ners = list()
merged_tokens = list()
candid_entity = list()
keep = False
prev_tag = 'O'
for i, (token, tag) in enumerate(tokens):
if keep:
if tag not in IGNORE_NER_TAG:
candid_entity.append(token)
keep = True
else:
# ner ends in prev step
merged_tokens.append(candid_entity)
merged_tokens.append(token)
ners.append((candid_entity, prev_tag))
keep = False
else:
if tag not in IGNORE_NER_TAG:
# new ner
candid_entity = list()
candid_entity.append(token)
keep = True
else:
# not ner token
merged_tokens.append(token)
prev_tag = tag
return ners, merged_tokens | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def extract_ngrams(self, sequence):\n sequence = self.prefix + sequence + self.suffix\n for i, event in enumerate(sequence[self.n:], self.n):\n yield event, sequence[i-self.n: i]",
"def extract_nps(text, annotation):\n np_starts = [i for i in range(len(annotation)) if annotation[i] == 'B-NP']\n np_indexes = []\n for s in np_starts:\n i = 1\n while s+i < len(annotation) and annotation[s + i] == 'I-NP':\n i += 1\n np_indexes.append((s, s + i))\n return [' '.join(text[s:e]) for s, e in np_indexes]",
"def get_nerspos(tokens, ners):\n pos_list = list()\n for ner in ners:\n pos = get_nerpos(tokens, ner)\n pos_list.append(pos)\n\n return pos_list",
"def get_nnp_ngrams(original_text, highlight=4, minsize=0):\n minsize = minsize-1\n if minsize<0:\n minsize = 0 \n \n tokens = nltk.wordpunct_tokenize(original_text)\n tagged = nltk.word_tokenize(original_text)\n tagged = nltk.pos_tag(tokens)\n #for word in tagged:\n # print word\n doc_length = len(tokens)\n counter = 0\n counter2 = 0\n if highlight==0:\n concated_test = doc_length # This is set to doc_length but could be anything recommend 3.\n else:\n concated_test = highlight\n list_of_NNPs = []\n while counter < (doc_length-1):\n while counter2 < concated_test:\n counter2 = counter2+1\n counter3 = 0\n #print '--------------------'\n temp_array = []\n all_nnp = True\n while counter3 < counter2:\n if counter < (doc_length-counter3):\n #print tokens[counter+counter3],tagged[counter+counter3][1]\n temp_array.append(tokens[counter+counter3])\n if tagged[counter+counter3][1] != 'NNP':\n all_nnp = False\n counter3 = counter3+1\n counter3 = 0\n if all_nnp == True:\n if(len(temp_array)>minsize):\n list_of_NNPs.append(temp_array)\n #print 'added to main array'\n #else:\n #print 'not all NNPs'\n counter2 = 0\n counter = counter+1\n #for ngram in list_of_NNPs:\n # print ngram\n import itertools\n list_of_NNPs.sort()\n unique_NNPs = list(list_of_NNPs for list_of_NNPs,_ in itertools.groupby(list_of_NNPs))\n return unique_NNPs",
"def get_ngrams(seq, n):\n return",
"def get_nerpos(tokens, ner):\n\n loc = list()\n for i, token in enumerate(tokens):\n if token == ner:\n loc.append(i)\n return loc",
"def _sentence_segmenter(paragr):\n # this is relatively high because we are only looking for sentences that\n # will have subject and object\n MIN_SENTLENGTH = 100\n MAX_SENTLENGTH = 512\n\n # sentence termination pattern used in sentence_segmenter(paragr)\n terpat = re.compile('[\\.\\?!]\\s+[A-Z\\\"]')\n\n # source: LbjNerTagger1.11.release/Data/KnownLists/known_title.lst from\n # University of Illinois with editing\n ABBREV_LIST = ['mrs.', 'ms.', 'mr.', 'dr.', 'gov.', 'sr.', 'rev.', 'r.n.',\n 'pres.', 'treas.', 'sect.', 'maj.', 'ph.d.', 'ed. psy.',\n 'proc.', 'fr.', 'asst.', 'p.f.c.', 'prof.', 'admr.',\n 'engr.', 'mgr.', 'supt.', 'admin.', 'assoc.', 'voc.',\n 'hon.', 'm.d.', 'dpty.', 'sec.', 'capt.', 'c.e.o.',\n 'c.f.o.', 'c.i.o.', 'c.o.o.', 'c.p.a.', 'c.n.a.', 'acct.',\n 'llc.', 'inc.', 'dir.', 'esq.', 'lt.', 'd.d.', 'ed.',\n 'revd.', 'psy.d.', 'v.p.', 'senr.', 'gen.', 'prov.',\n 'cmdr.', 'sgt.', 'sen.', 'col.', 'lieut.', 'cpl.', 'pfc.',\n 'k.p.h.', 'cent.', 'deg.', 'doz.', 'Fahr.', 'Cel.', 'F.',\n 'C.', 'K.', 'ft.', 'fur.', 'gal.', 'gr.', 'in.', 'kg.',\n 'km.', 'kw.', 'l.', 'lat.', 'lb.', 'lb per sq in.', 'long.',\n 'mg.', 'mm.,, m.p.g.', 'm.p.h.', 'cc.', 'qr.', 'qt.', 'sq.',\n 't.', 'vol.', 'w.', 'wt.']\n\n sentlist = []\n # controls skipping over non-terminal conditions\n searchstart = 0\n terloc = terpat.search(paragr)\n while terloc:\n isok = True\n if paragr[terloc.start()] == '.':\n if (paragr[terloc.start() - 1].isupper() and\n paragr[terloc.start() - 2] == ' '):\n isok = False # single initials\n else:\n # check abbreviations\n loc = paragr.rfind(' ', 0, terloc.start() - 1)\n if loc > 0:\n if paragr[loc + 1:terloc.start() + 1].lower() in ABBREV_LIST:\n isok = False\n if paragr[:terloc.start()].count('(') != paragr[:terloc.start()].count(')'):\n isok = False\n if paragr[:terloc.start()].count('\"') % 2 != 0:\n isok = False\n if isok:\n if (len(paragr[:terloc.start()]) > MIN_SENTLENGTH and\n len(paragr[:terloc.start()]) < MAX_SENTLENGTH):\n sentlist.append(paragr[:terloc.start() + 2])\n paragr = paragr[terloc.end() - 1:]\n searchstart = 0\n else:\n searchstart = terloc.start() + 2\n\n terloc = terpat.search(paragr, searchstart)\n\n # add final sentence\n if (len(paragr) > MIN_SENTLENGTH and len(paragr) < MAX_SENTLENGTH):\n sentlist.append(paragr)\n\n return sentlist",
"def findGaps(sequence):\n patt = re.compile(r\"[Nn]+\")\n for match in patt.finditer(sequence):\n yield (match.start(), match.end())",
"def ngrams_(text, n):\n return zip(*[text[i:] for i in range(n)])",
"def extract_entities(event):\n # TODO The text should probably already be tagged and tokenized before this step\n tree = ne_chunk(event.pos_tagged)\n entities = set([])\n\n people = tree.subtrees(lambda x: x.node == \"PERSON\")\n for person in people:\n entities.add(\" \".join([leaf[0] for leaf in person.leaves()]))\n\n places = tree.subtrees(lambda x: x.node == \"GPE\")\n for place in places:\n entities.add(\" \".join([leaf[0] for leaf in place.leaves()]))\n\n organizations = tree.subtrees(lambda x: x.node == \"ORGANIZATION\")\n for org in organizations:\n entities.add(\" \".join([leaf[0] for leaf in org.leaves()]))\n \n return entities",
"def entity_groups(self):\n entities = self.entities()\n if not entities:\n return None\n non_ent = self.opts.get('non_ent', 'O')\n groups = []\n idx = 0\n while idx < len(entities):\n ner_tag = entities[idx]\n # Check for entity tag\n if ner_tag != non_ent:\n # Chomp the sequence\n start = idx\n while (idx < len(entities) and entities[idx] == ner_tag):\n idx += 1\n groups.append((self.slice(start, idx).untokenize(), ner_tag))\n else:\n idx += 1\n return groups",
"def entity_groups(self):\n entities = self.entities()\n if not entities:\n return None\n non_ent = self.opts.get('non_ent', 'O')\n groups = []\n idx = 0\n while idx < len(entities):\n ner_tag = entities[idx]\n # Check for entity tag\n if ner_tag != non_ent:\n # Chomp the sequence\n start = idx\n while (idx < len(entities) and entities[idx] == ner_tag):\n idx += 1\n groups.append((self.slice(start, idx).untokenize(), ner_tag))\n else:\n idx += 1\n return groups",
"def collect_ners(self, ents):\r\n collected_ners = []\r\n for token in ents:\r\n if token.label_ in self.ners_label:\r\n collected_ners.append(token.text + '/' + token.label_)\r\n return collected_ners",
"def __tagsToNgrams__(self):\n bigrams = defaultdict(int)\n trigrams = defaultdict(int)\n for tags in self.getTags():\n tags = list(tags)\n for i in range(2):\n tags.insert(0, BEGIN)\n for k in range(2, len(tags)):\n trigrams[tuple(tags[k-2:k+1])] += 1\n bigrams[tuple(tags[k-1:k+1])] += 1\n return bigrams, trigrams",
"def ngrams(iterable, n=1):\n return zip(*(iterable[i:] for i in range(n)))",
"def getTags(number=None):",
"def build_ngrams(tokens, n=2):\n ngrams = zip(*(islice(group, idx, None) for idx, group in enumerate(tee(tokens, n))))\n return ngrams",
"def iter_ngrams(self, sentence, n):\n return [tuple(sentence[i : i+n]) for i in range(len(sentence)-n+1)]",
"def __iter__(self):\n return self.ngrams()",
"def combine_persons(sent_tagged):\n persons = []\n i = 0\n while i < len(sent_tagged) - 3:\n if sent_tagged[i][1] == 'PERSON':\n if sent_tagged[i+1][1] == 'PERSON':\n if sent_tagged[i+2][1] == 'PERSON':\n persons.append(sent_tagged[i][0].lower() + \n \" \" + sent_tagged[i+1][0].lower() + \n \" \" + sent_tagged[i+2][0].lower())\n i+=1\n else:\n persons.append(sent_tagged[i][0].lower() + \n \" \" + sent_tagged[i+1][0].lower())\n i+=1 \n else:\n persons.append(sent_tagged[i][0].lower())\n i+=1 \n return(persons)",
"def sent_tagged(novel_text):\n novel = []\n novel_tagged = NER_tagger.tag(word_tokenize(novel_text))\n novel_sent_tokenized = sent_tokenize(novel_text)\n novel_tokenized = [word_tokenize(novel_sent_tokenized[i]) for i in range(len(novel_sent_tokenized))]\n sent_length = [len(novel_tokenized[i]) for i in range(len(novel_sent_tokenized))]\n \n a = 0\n b = 0\n for i in sent_length:\n b += i\n novel.append(novel_tagged[a:b])\n a = b\n \n return(novel)",
"def tokenize(self):\n count = 0\n for entry in self._entries:\n token_pairs = []\n for relation in entry['relations']:\n assert len(relation) == 3\n token_pairs.append((relation[0][0],relation[1][0],relation[2][0]))\n\n num_rels = len(entry['relations'])\n num_random_rels = (self._max_seq_length - 2) // 3 - num_rels\n\n if num_random_rels>0:\n pass\n # gt_pairs = {(rel[0],rel[2]) for rel in entry['relations']}\n # random_pairs = self._get_random_pair(entry['objects'], gt_pairs, num_random_rels)\n # for pair in list(random_pairs):\n # token_pairs.append((pair[0][0],'background', pair[1][0]))\n else:\n for i in range(-num_random_rels):\n token_pairs.pop()\n\n random.shuffle(token_pairs)\n tokens = []\n for pair in token_pairs:\n tokens.extend(pair)\n\n tokens = ['[CLS]'] + tokens + ['[SEP]']\n tokens_char = tokens\n\n target = [self._tokenizer.vocab.get(self._tokenizer.tokenize(x)[0], self._tokenizer.vocab['[UNK]']) if i%3==2 else -1 for i, x in enumerate(tokens)]\n tokens = [self._tokenizer.vocab.get(self._tokenizer.tokenize(x)[0], self._tokenizer.vocab['[UNK]']) if i%3!=2 else self._tokenizer.vocab.get('[MASK]', self._tokenizer.vocab['[UNK]']) for i, x in enumerate(tokens)]\n \n for i in range(len(tokens)):\n if target[i] != -1:\n print(tokens_char[i],tokens[i],target[i])\n\n segment_ids = [0] * len(tokens)\n input_mask = [1] * len(tokens)\n # input_mask = [1 if i%3==2 else 0 for i in range(len(tokens))]\n # co_attention_mask = [-1 if i%3==2 else 1 for i in range(len(tokens))]\n # co_attention_mask = torch.zeros((self._max_region_num, self._max_seq_length))\n # co_attention_mask[0] = -1\n # co_attention_mask[-1] = -1\n \n if len(tokens) < self._max_seq_length:\n padding = [self._padding_index] * (self._max_seq_length - len(tokens))\n tokens = tokens + padding\n input_mask += padding\n segment_ids += padding \n target += [-1] * len(padding) \n\n assert_eq(len(tokens), self._max_seq_length)\n entry['input_ids'] = tokens \n entry[\"input_mask\"] = input_mask\n entry['segment_ids'] = segment_ids\n # entry[\"co_attention_mask\"] = co_attention_mask\n entry['target'] = target\n\n sys.stdout.write('%d/%d\\r' % (count, len(self._entries)))\n sys.stdout.flush()\n count += 1",
"def create_ner_tags_dict():\r\n global ne_tags_set, ner_to_id, ne_tags, id_to_ner\r\n\r\n ne_tags = list(ne_tags_set) + ['[CLS]', '[SEP]']\r\n ne_tags.sort()\r\n id_to_ner = {idx: tag for idx, tag in enumerate(ne_tags)}\r\n ner_to_id = {tag: idx for idx, tag in enumerate(ne_tags)}\r\n print(f'Total NER tag size: {len(ne_tags)}; Tags: {ne_tags}')",
"def basic_ner(sentence, power = True, locations = False):\n res, previous_word = [], None\n mod_sentence = re.sub(r'\\s+', ' ', re.sub(r'[^\\w\\s]', ' ', sentence)).strip().split(' ')\n for word in mod_sentence:\n if word.isalpha() and word[0] == word[0].upper() and (locations or word not in ner_lists.all_location_names):\n res.append(word)\n previous_word = word \n else:\n previous_word = None\n if nltk_enabled:\n tagged_sentence = pos_tag(mod_sentence)\n for word, tag in tagged_sentence:\n if word in res and 'N' not in tag:\n res.remove(word)\n return list_powerset(res) if power else res",
"def entities(self):\n if 'ner' not in self.annotators:\n return None\n return [t[self.NER] for t in self.data]",
"def entities(self):\n if 'ner' not in self.annotators:\n return None\n return [t[self.NER] for t in self.data]",
"def generate_ngrams(iterable, n):\n return zip(*[itertools.islice(it, i, None) for i, it in enumerate(itertools.tee(iterable, n))])",
"def group(seq):\n pass # replace with your solution",
"def convert_to_t5_format(nlp, texts):\n\n inputs = []\n outputs = []\n original_texts = []\n\n for text, doc in zip(texts, nlp.pipe(texts, n_process=-1)):\n\n pairs = set()\n\n for chunk in doc.noun_chunks:\n if chunk.text == text:\n continue\n input_ = text[0 : chunk.start_char] + \"<extra_id_0> \" + text[chunk.end_char + 1 :]\n output = \"<extra_id_0> \" + chunk.text + \" <extra_id_1> </s>\"\n\n pairs.add((input_.strip(), output))\n\n for token in doc:\n left_edge_i = token.left_edge.i\n right_edge_i = token.right_edge.i\n chunk_length = right_edge_i - left_edge_i + 1\n if chunk_length / len(doc) > 0.5 or chunk_length > 10: # if chunk is too long, just skip it\n continue\n\n input_ = str(doc[:left_edge_i]) + \" <extra_id_0> \" + str(doc[right_edge_i + 1 :])\n output = \"<extra_id_0> \" + str(doc[left_edge_i : right_edge_i + 1]) + \" <extra_id_1> </s>\"\n\n pairs.add((input_.strip(), output))\n\n for token in doc:\n if token.pos_ in [\"NOUN\", \"PRON\", \"PROPN\"]: # we don't want to mask parts of noun chunks\n continue\n input_ = str(doc[: token.i]) + \" <extra_id_0> \" + str(doc[token.i + 1 :])\n output = \"<extra_id_0> \" + token.text + \" <extra_id_1> </s>\"\n\n pairs.add((input_.strip(), output))\n\n for (input_, output) in pairs:\n inputs.append(input_)\n outputs.append(output)\n original_texts.append(text)\n\n return inputs, outputs, original_texts",
"def ngrams(text, n):\n grams = zip(*[text[i:] for i in range(n)])\n return [''.join(gram) for gram in grams]"
]
| [
"0.5992052",
"0.58803827",
"0.57412916",
"0.57153046",
"0.56501865",
"0.56453776",
"0.55940616",
"0.5551463",
"0.54952145",
"0.5488924",
"0.54851663",
"0.54851663",
"0.54396814",
"0.53640854",
"0.5351113",
"0.5312324",
"0.5291228",
"0.52684474",
"0.52250415",
"0.5201601",
"0.51974916",
"0.5181774",
"0.5173056",
"0.5166415",
"0.51550156",
"0.51550156",
"0.5142436",
"0.51353985",
"0.5131579",
"0.51310396"
]
| 0.59793997 | 1 |
Load freebase entity dictionary from saved dict | def load_freebase_entity(path="../data/freebase/dict.txt"):
logger.info('Loading freebase entity dictionary...')
name2id = dict()
id2name = dict()
with open(path, 'r', buffering=1024 * 1024 * 100) as f:
for line in f:
tokens = line.split('\t')
_name = tokens[0].strip()
_id = tokens[1].strip()
name2id[_name] = _id
id2name[_id] = _name
logger.info('Successfully loaded {} entities from file'.format(len(name2id)))
return name2id, id2name | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def from_dict(cls, dikt) -> 'GnbrEntity':\n return util.deserialize_model(dikt, cls)",
"def load_dict(self, dct):\n pass",
"def load(self):\n\n args = self.id, self.name\n self.loader.session.logger.debug(\"loading CDR%d (%r)\", *args)\n cursor = self.loader.dictionary_cursor\n cursor.execute(self.DICTIONARY_INSERT, self.entry)\n for alias in self.aliases:\n cursor.execute(self.ALIAS_INSERT, alias)\n self.loader.dictionary_conn.commit()",
"def _load(self, load_dict):\n self._data_ = load_dict",
"def _load(self) -> dict:\n raise NotImplementedError()",
"def _to_load_dict(self, d: dict) -> dict:\n return _to_bucket_dict(d)",
"async def load(self) -> Dict[str, Dict]:\n raise NotImplementedError()",
"def __load(self) -> Dict:\n return dict()",
"def load_key():",
"def from_dict(cls: T, source: dict[str, Any], connection: Connection) -> T:\n return super(Entity, cls).from_dict(source=source, connection=connection)",
"def loadFromDict(self, d):\n self._data = d\n self._doDefaults()",
"def make_entity_dict(class_reference, template, partial_dict): \n _data = class_reference.properties()\n for _key in _data:\n _data[_key] = partial_dict.get(_key, template.get(_key, '')) \n return _data",
"def _load(self, load_dict):\n try:\n self.v_protocol = load_dict.pop(PickleParameter.PROTOCOL)\n except KeyError:\n # For backwards compatibility\n dump = next(load_dict.values())\n self.v_protocol = PickleParameter._get_protocol(dump)\n for key in load_dict:\n val = load_dict[key]\n self._data[key] = pickle.loads(val)",
"def _to_entity(self):\n obj_dict = copy.deepcopy(vars(self))\n exclude_from_indexes = ()\n try:\n exclude_from_indexes = self._exclude_from_indexes_\n except AttributeError:\n pass\n\n try:\n key = self.key\n except AttributeError:\n key = CustomKey(self.__class__.__name__)\n\n entity = datastore.Entity(key=key, exclude_from_indexes=exclude_from_indexes)\n for dict_key, dict_val in obj_dict.copy().items():\n if dict_val is not None:\n if isinstance(dict_val, BaseModel):\n # If the value is an instance of BaseModel, convert the instance\n # into a \"dotted\" dictionary compatible with NDB entities.\n del obj_dict[dict_key]\n obj_dict.update(dict_val.dottify(dict_key))\n if isinstance(dict_val, list) and len(dict_val) > 0 and isinstance(dict_val[0], BaseModel):\n # if the value is a list of BaseModel objects\n dotted_dict_list = []\n dotted_dict = dict()\n for i, val in enumerate(dict_val):\n dotted_dict_list.append(val.dottify(dict_key))\n for dict_ in dotted_dict_list:\n for k, v in dict_.items():\n temp_val = dotted_dict.get(k) or []\n temp_val.append(v)\n dotted_dict[k] = temp_val\n del obj_dict[dict_key]\n obj_dict.update(dotted_dict)\n else:\n # if the value is False-y i.e. the key has not been set in the object,\n # delete the key from the object\n del obj_dict[dict_key]\n entity.update(obj_dict)\n return entity",
"def from_dict(cls, dikt) -> 'BundleData':\n return util.deserialize_model(dikt, cls)",
"def _packaged_dict_for_entity(rt):\n entity = rt.entity\n return {u'entity_id': entity.id,\\\n u'name': entity.aggregation_paths['_geo'][-1]}",
"def _load_entity(client, entity_type, entity_id, parent_key=None):\n\n key = _load_key(client, entity_type, entity_id, parent_key)\n entity = client.get(key)\n log('retrieved entity: ' + entity_type + ' for ID: ' + str(entity_id))\n return entity",
"def load_lookup_dicto():\n with open(LOOKUP_PATH, 'rb') as f:\n lookup_dicto = pickle.load(f)\n return lookup_dicto",
"def load_id_dict(self) -> None:\n sys.stdout.write(\"Loading identifier dictionaries...\\n\")\n assert os.path.exists(self.mapping_file)\n with open(self.mapping_file, 'r') as f:\n self.forward_map, self.backward_map = json.load(f)\n self.forward_map = {int(k): v for k, v in self.forward_map.items()}",
"def load(cls, data):\n if isinstance(data, dict):\n print('>>> dict')\n else:\n print('>>> obj')\n # cls_fields = fields(cls)\n init()",
"def _load_dict(local_path):\n with open(local_path, \"rb\") as f:\n d = pickle.load(f)\n print(f\"SidechainNet was loaded from {local_path}.\")\n return d",
"def _load(self, load_dict):\n if self.v_locked:\n raise pex.ParameterLockedException(\n \"Parameter `%s` is locked!\" % self.v_full_name\n )\n\n if \"data\" in load_dict:\n dump = load_dict[\"data\"]\n self._data = pickle.loads(dump)\n else:\n self._logger.warning(\n \"Your parameter `%s` is empty, \"\n \"I did not find any data on disk.\" % self.v_full_name\n )\n\n try:\n self.v_protocol = load_dict[PickleParameter.PROTOCOL]\n except KeyError:\n # For backwards compatibility\n self.v_protocol = PickleParameter._get_protocol(dump)\n\n if \"explored_data\" in load_dict:\n explore_table = load_dict[\"explored_data\"]\n\n name_col = explore_table[\"idx\"]\n\n explore_list = []\n for name_id in name_col:\n arrayname = self._build_name(name_id)\n loaded = pickle.loads(load_dict[arrayname])\n explore_list.append(loaded)\n\n self._explored_range = explore_list\n self._explored = True\n\n self._default = self._data\n self._locked = True",
"def load_entities():\n # TODO dynamic look into entities folder\n return ['location']",
"def load_synthetic_db_fields(self, db_entity):\n for field in self.synthetic_fields:\n objclasses = object_base.VersionedObjectRegistry.obj_classes(\n ).get(self.fields[field].objname)\n\n objclass = objclasses[0]\n synth_db_objs = db_entity.get(field, None)\n\n # NOTE(namnh): synth_db_objs can be list, dict, empty list\n if isinstance(self.fields[field], fields.DictOfObjectsField):\n dict_entity_object = {key: objclass.load_object(value)\n for key, value in synth_db_objs.items()}\n setattr(self, field, dict_entity_object)\n elif isinstance(self.fields[field], fields.ListOfObjectsField):\n entities_object = [objclass.load_object(entity)\n for entity in synth_db_objs]\n setattr(self, field, entities_object)\n else:\n # At this moment, this field is an ObjectField.\n entity_object = objclass.load_object(synth_db_objs)\n setattr(self, field, entity_object)\n self.obj_reset_changes([fields])",
"def load(self) -> None:\n data = get_dictionary()\n if 'error' in data:\n quit()\n self.data = data",
"def _load(self):\n self.logger.debug(\"Loading from persistence\")\n # load whole item from persistence\n data = self._persistence.load(self.id(), default={})\n if not data:\n return\n\n try:\n self.persistence_deserialize(data)\n except NotImplementedError:\n # allow backwards compatibility or persisted_values way\n for persisted_var in self.persisted_values():\n if persisted_var in data:\n self.logger.debug(\"Loaded value {} for attribute {}\".format(\n data[persisted_var], persisted_var))\n # Set the loaded value to the attribute on this class\n setattr(self, persisted_var, data[persisted_var])\n except:\n # log exception while loading and let it continue\n self.logger.exception(\n \"Failed to deserialize block with data: {}\".format(data))",
"def reload(self):\n\n dict_of_dicts = {}\n classes = {\n \"BaseModel\": BaseModel,\n \"User\": User,\n \"Amenity\": Amenity,\n \"City\": City,\n \"Place\": Place,\n \"Review\": Review,\n \"State\": State}\n\n try:\n temp_dict = {}\n with open(self.__file_path, \"r\") as r:\n dict_of_dicts = json.load(r)\n for k, v in dict_of_dicts.items():\n if v['__class__'] in classes:\n temp_dict[k] = classes[v['__class__']](**v)\n self.__objects = temp_dict\n except Exception:\n pass",
"def load_storage_model(self, data: dict) -> None:\n if \"parent_key\" in data:\n self.parent_key = (\n data[\"parent_key\"] if data[\"parent_key\"] is not None else NoKey\n )\n del data[\"parent_key\"]\n\n self.data = self.storage_model.parse_obj(data)",
"def load_state_dict(self, state_dict: Dict[str, torch.Tensor]):\n pass",
"def from_datastore(entity):\n if not entity:\n return None\n if isinstance(entity, builtin_list):\n entity = entity.pop()\n\n entity['id'] = entity.key.name\n return entity"
]
| [
"0.64202756",
"0.6255847",
"0.62532294",
"0.61488426",
"0.59458196",
"0.59139955",
"0.58941555",
"0.5862503",
"0.58609146",
"0.5860008",
"0.5859102",
"0.5848029",
"0.5788074",
"0.57809716",
"0.5733735",
"0.5718896",
"0.5706553",
"0.5698764",
"0.5692765",
"0.56742126",
"0.56615496",
"0.56502473",
"0.56211776",
"0.5601984",
"0.55537057",
"0.5553511",
"0.5537815",
"0.5493246",
"0.54893893",
"0.5478396"
]
| 0.7637444 | 0 |
Return position of ner in list of tokens | def get_nerpos(tokens, ner):
loc = list()
for i, token in enumerate(tokens):
if token == ner:
loc.append(i)
return loc | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_nerspos(tokens, ners):\n pos_list = list()\n for ner in ners:\n pos = get_nerpos(tokens, ner)\n pos_list.append(pos)\n\n return pos_list",
"def get_head_pos( head, ngram ):\n try:\n tokens = ngram.split( ' ' )\n return str([ i for i, t in enumerate( tokens ) if t.startswith( head + \"/\" )][0] + 1 )\n except ValueError:\n return None",
"def _get_text_positions_tokenless(self, node, padded):\n # type: (ast.AST, bool) -> Tuple[Tuple[int, int], Tuple[int, int]]\n if sys.version_info[:2] < (3, 8):\n raise AssertionError(\"This method should only be called internally after checking supports_tokenless()\")\n\n if isinstance(node, ast.Module):\n # Modules don't have position info, so just return the range of the whole text.\n # The token-using method does something different, but its behavior seems weird and inconsistent.\n # For example, in a file with only comments, it only returns the first line.\n # It's hard to imagine a case when this matters.\n return (1, 0), self._line_numbers.offset_to_line(len(self._text))\n\n if not hasattr(node, 'lineno'):\n return (1, 0), (1, 0)\n\n assert node # tell mypy that node is not None, which we allowed up to here for compatibility\n\n decorators = getattr(node, 'decorator_list', [])\n if decorators:\n # Function/Class definition nodes are marked by AST as starting at def/class,\n # not the first decorator. This doesn't match the token-using behavior,\n # or inspect.getsource(), and just seems weird.\n start_node = decorators[0]\n else:\n start_node = node\n\n if padded and last_stmt(node).lineno != node.lineno:\n # Include leading indentation for multiline statements.\n start_col_offset = 0\n else:\n start_col_offset = self._line_numbers.from_utf8_col(start_node.lineno, start_node.col_offset)\n\n start = (start_node.lineno, start_col_offset)\n\n # To match the token-using behaviour, we exclude trailing semicolons and comments.\n # This means that for blocks containing multiple statements, we have to use the last one\n # instead of the actual node for end_lineno and end_col_offset.\n end_node = last_stmt(node)\n end_lineno = cast(int, end_node.end_lineno)\n end_col_offset = cast(int, end_node.end_col_offset)\n end_col_offset = self._line_numbers.from_utf8_col(end_lineno, end_col_offset)\n end = (end_lineno, end_col_offset)\n\n return start, end",
"def listPosition(word):\n if len(word) == 1: return 1\n pos = 0\n for c in set(word):\n if c < word[0]:\n letters = list(word)\n letters.remove(c)\n pos += arrangements(letters)\n pos += listPosition(word[1:])\n return pos",
"def getIndices(self, tokens):\n tokenTxt, posTxt = attachTokens(tokens)\n if tokenTxt in self.tokenIndices:\n tokenIdx = self.tokenIndices[tokenTxt]\n else:\n tokenIdx = self.tokenIndices[unk]\n if posTxt in self.posIndices:\n posIdx = self.posIndices[posTxt]\n else:\n posIdx = self.posIndices[unk]\n return tokenIdx, posIdx",
"def convert_idx(text, tokens):\n current = 0\n spans = []\n for token in tokens:\n current = text.find(token, current) # Find position of 1st occurrence; start search from 'current' \n if current < 0:\n raise Exception(f\"Token '{token}' cannot be found\")\n spans.append((current, current + len(token)))\n current += len(token) # next search start from the token afterwards\n return spans",
"def get_context_position_in_tokenized_input(tokenized_row, i, pad_on_right):\r\n # List that holds for each index (up to the lenght of the tokenized input sequence)\r\n # 1 if its corresponding token is a context's token, 0 if it's a question's token\r\n # (the contrair if pad_on_right is true). Null for the special tokens.\r\n sequence_ids = tokenized_row.sequence_ids(i)\r\n\r\n # Start context's token's index inside the input sequence.\r\n token_start_index = sequence_ids.index(1 if pad_on_right else 0)\r\n\r\n # End context's token's index inside the input sequence.\r\n token_end_index = len(sequence_ids) - list(reversed(sequence_ids)).index(1 if pad_on_right else 0)\r\n\r\n return Position(start=token_start_index, end=token_end_index)",
"def GetCurrentToken(tokens, pos):\n i = 0\n while i < len(tokens):\n if pos > tokens[i].start and pos < tokens[i].end:\n return tokens[i]\n if pos < tokens[i].start:\n return tokens[i-1] if i > 0 else None\n i += 1\n\n return tokens[len(tokens)-1] if tokens else None",
"def get_positions(token, docs):\n\n all_matches = [token]\n for doc in docs:\n matches = []\n if token in doc:\n indexes = [i for i, x in enumerate(doc) if x == token]\n matches += [docs.index(doc), len(indexes), indexes]\n if matches:\n all_matches.append(matches)\n return all_matches",
"def append_position_to_token_list(token_list):\r\n return [PositionToken(value.content, value.gd, index, index+1) for (index, value) in enumerate(token_list)]",
"def get_lexpos(self):\n return self.lexer.get_lexpos()",
"def whereis_token(self, tid, silent=False):\n tk = self.get_token(tid)\n if tk:\n rs = tk.position()\n else:\n rs = None\n if not silent:\n msg = \"Token %s position is %s\" % (tid, rs)\n self.parser.status(msg)\n return rs",
"def treepos(self, tree):\n if tree is None:\n raise ValueError(\"Parse tree not available\")\n stack = [tree]\n treepos = []\n\n wordnum = 0\n while True:\n # tree node:\n if isinstance(stack[-1], Tree):\n # Select the next child.\n if len(treepos) < len(stack):\n treepos.append(0)\n else:\n treepos[-1] += 1\n # Update the stack.\n if treepos[-1] < len(stack[-1]):\n stack.append(stack[-1][treepos[-1]])\n else:\n # End of node's child list: pop up a level.\n stack.pop()\n treepos.pop()\n # word node:\n else:\n if wordnum == self.wordnum:\n return tuple(treepos[: len(treepos) - self.height - 1])\n else:\n wordnum += 1\n stack.pop()",
"def index(self, word):\n return self.tokens.index(word)",
"def get_text_positions(self, node, padded):\n # type: (AstNode, bool) -> Tuple[Tuple[int, int], Tuple[int, int]]\n if getattr(node, \"_broken_positions\", None):\n # This node was marked in util.annotate_fstring_nodes as having untrustworthy lineno/col_offset.\n return (1, 0), (1, 0)\n\n if supports_tokenless(node):\n return self._get_text_positions_tokenless(node, padded)\n\n return self.asttokens.get_text_positions(node, padded)",
"def ind(self, pos):\n row = int(pos[1:]) - 1\n column = self.letter_to_column(pos[0])\n return row, column",
"def ind(self, pos):\n row = int(pos[1:]) - 1\n column = self.letter_to_column(pos[0])\n return row, column",
"def ind(self, pos):\n row = int(pos[1:]) - 1\n column = self.letter_to_column(pos[0])\n return row, column",
"def get_text_positions(self, node, padded):\n # type: (AstNode, bool) -> Tuple[Tuple[int, int], Tuple[int, int]]\n if not hasattr(node, 'first_token'):\n return (1, 0), (1, 0)\n\n start = node.first_token.start\n end = node.last_token.end\n if padded and any(match_token(t, token.NEWLINE) for t in self.get_tokens(node)):\n # Set col_offset to 0 to include leading indentation for multiline statements.\n start = (start[0], 0)\n\n return start, end",
"def find_special_token_index(identified_concepts: IdentifiedConcepts, special_token: str):\n for i in range(len(identified_concepts.ordered_concepts)):\n concept = identified_concepts.ordered_concepts[i]\n if concept.name == special_token:\n return i\n return -1",
"def _num_tokens_in_direction(obs, player, row, col, row_add, col_add):\n player_tokens = obs[player]\n r, c = row, col\n num_tokens = -1\n\n # while we are still in bounds and the location belongs to the player.\n while (0 <= r < len(player_tokens) and 0 <= c < len(player_tokens[0]) and\n player_tokens[r, c] == 1):\n r += row_add\n c += col_add\n num_tokens += 1\n return num_tokens",
"def num_tokens(self, index):\r\n raise NotImplementedError",
"def get_words_position(self, words: List[Word]) -> Tuple[int, int]:\n start: int = self.get_word_postion(words[0])[0]\n end: int = self.get_word_postion(words[-1])[1]\n return start, end",
"def correct_token_begin_position(self, tokens, text):\n beginning = 0\n for token in tokens:\n token.text_begin = text.find(token.text_content, beginning) \n beginning += len(token.text_content)\n return tokens",
"def get_pos(sent):\n tokens = get_tokens(sent)\n return [tag for (token, tag) in nltk.pos_tag(tokens)]",
"def _tokens_to_index(self,tokens):\n wids = []\n for tk in tokens:\n if tk in self.wtoi.keys():\n wids.append(self.wtoi[tk])\n else:\n wids.append(1) # <UNK>\n for _ in range(self.sentence_max_length - len(wids)):\n wids.append(0)\n if len(wids) > self.sentence_max_length:\n wids = wids[:self.sentence_max_length]\n return wids",
"def get_pos(data, regex, cur, ignore_cur=False, count=0):\n # List of the *positions* of the found patterns.\n matches = [m.start() for m in re.finditer(regex, data[cur:])]\n pos = -1\n if count:\n if len(matches) > count - 1:\n if ignore_cur and matches[0] == 0:\n if len(matches) > count:\n pos = matches[count]\n else:\n pos = matches[count - 1]\n elif matches:\n if ignore_cur and matches[0] == 0:\n if len(matches) > 1:\n pos = matches[1]\n else:\n pos = matches[0]\n return pos",
"def pos_lettre(mot, lettre):\n lpos=[]\n for i in range(len(mot)):\n if lettre == mot[i]:\n lpos += [i]\n return lpos",
"def my_index(list_, element):\n pos = []\n for i in range(len(list_)):\n if list_[i] == element:\n pos.append(i)\n return pos",
"def get_leader(tokens, token_index, size):\n\n leader = []\n for offset in range(size):\n leader_index = (token_index - 1) - offset\n leader.append(tokens[leader_index] if leader_index >= 0 else None)\n\n return tuple(leader)"
]
| [
"0.68481386",
"0.64260495",
"0.6337191",
"0.6327533",
"0.6311592",
"0.63036317",
"0.62495536",
"0.62290925",
"0.6140072",
"0.6086388",
"0.5966749",
"0.59169406",
"0.59167016",
"0.590555",
"0.5892167",
"0.58879244",
"0.58879244",
"0.58879244",
"0.58823013",
"0.58707553",
"0.5818958",
"0.5811951",
"0.58037704",
"0.57988733",
"0.57955647",
"0.5784268",
"0.5776468",
"0.5766012",
"0.57558733",
"0.5750165"
]
| 0.81861734 | 0 |
Return positions of ners in list of tokens | def get_nerspos(tokens, ners):
pos_list = list()
for ner in ners:
pos = get_nerpos(tokens, ner)
pos_list.append(pos)
return pos_list | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_nerpos(tokens, ner):\n\n loc = list()\n for i, token in enumerate(tokens):\n if token == ner:\n loc.append(i)\n return loc",
"def get_positions(token, docs):\n\n all_matches = [token]\n for doc in docs:\n matches = []\n if token in doc:\n indexes = [i for i, x in enumerate(doc) if x == token]\n matches += [docs.index(doc), len(indexes), indexes]\n if matches:\n all_matches.append(matches)\n return all_matches",
"def convert_idx(text, tokens):\n current = 0\n spans = []\n for token in tokens:\n current = text.find(token, current) # Find position of 1st occurrence; start search from 'current' \n if current < 0:\n raise Exception(f\"Token '{token}' cannot be found\")\n spans.append((current, current + len(token)))\n current += len(token) # next search start from the token afterwards\n return spans",
"def getIndices(self, tokens):\n tokenTxt, posTxt = attachTokens(tokens)\n if tokenTxt in self.tokenIndices:\n tokenIdx = self.tokenIndices[tokenTxt]\n else:\n tokenIdx = self.tokenIndices[unk]\n if posTxt in self.posIndices:\n posIdx = self.posIndices[posTxt]\n else:\n posIdx = self.posIndices[unk]\n return tokenIdx, posIdx",
"def token_positions(separation):\n offsets = (-separation, 0, separation)\n for x_pos in offsets:\n for y_pos in offsets:\n yield x_pos, y_pos",
"def get_text_positions(self, node, padded):\n # type: (AstNode, bool) -> Tuple[Tuple[int, int], Tuple[int, int]]\n if getattr(node, \"_broken_positions\", None):\n # This node was marked in util.annotate_fstring_nodes as having untrustworthy lineno/col_offset.\n return (1, 0), (1, 0)\n\n if supports_tokenless(node):\n return self._get_text_positions_tokenless(node, padded)\n\n return self.asttokens.get_text_positions(node, padded)",
"def get_pos(sent):\n tokens = get_tokens(sent)\n return [tag for (token, tag) in nltk.pos_tag(tokens)]",
"def listPosition(word):\n if len(word) == 1: return 1\n pos = 0\n for c in set(word):\n if c < word[0]:\n letters = list(word)\n letters.remove(c)\n pos += arrangements(letters)\n pos += listPosition(word[1:])\n return pos",
"def _get_text_positions_tokenless(self, node, padded):\n # type: (ast.AST, bool) -> Tuple[Tuple[int, int], Tuple[int, int]]\n if sys.version_info[:2] < (3, 8):\n raise AssertionError(\"This method should only be called internally after checking supports_tokenless()\")\n\n if isinstance(node, ast.Module):\n # Modules don't have position info, so just return the range of the whole text.\n # The token-using method does something different, but its behavior seems weird and inconsistent.\n # For example, in a file with only comments, it only returns the first line.\n # It's hard to imagine a case when this matters.\n return (1, 0), self._line_numbers.offset_to_line(len(self._text))\n\n if not hasattr(node, 'lineno'):\n return (1, 0), (1, 0)\n\n assert node # tell mypy that node is not None, which we allowed up to here for compatibility\n\n decorators = getattr(node, 'decorator_list', [])\n if decorators:\n # Function/Class definition nodes are marked by AST as starting at def/class,\n # not the first decorator. This doesn't match the token-using behavior,\n # or inspect.getsource(), and just seems weird.\n start_node = decorators[0]\n else:\n start_node = node\n\n if padded and last_stmt(node).lineno != node.lineno:\n # Include leading indentation for multiline statements.\n start_col_offset = 0\n else:\n start_col_offset = self._line_numbers.from_utf8_col(start_node.lineno, start_node.col_offset)\n\n start = (start_node.lineno, start_col_offset)\n\n # To match the token-using behaviour, we exclude trailing semicolons and comments.\n # This means that for blocks containing multiple statements, we have to use the last one\n # instead of the actual node for end_lineno and end_col_offset.\n end_node = last_stmt(node)\n end_lineno = cast(int, end_node.end_lineno)\n end_col_offset = cast(int, end_node.end_col_offset)\n end_col_offset = self._line_numbers.from_utf8_col(end_lineno, end_col_offset)\n end = (end_lineno, end_col_offset)\n\n return start, end",
"def append_position_to_token_list(token_list):\r\n return [PositionToken(value.content, value.gd, index, index+1) for (index, value) in enumerate(token_list)]",
"def pos_lettre(mot, lettre):\n lpos=[]\n for i in range(len(mot)):\n if lettre == mot[i]:\n lpos += [i]\n return lpos",
"def prepocess_pos_tagged_texts(tweet_tokens):\n return [TextPreprocessor.additional_text_preprocessing_with_pos(json.loads(t)) for t in tweet_tokens]",
"def get_text_positions(self, node, padded):\n # type: (AstNode, bool) -> Tuple[Tuple[int, int], Tuple[int, int]]\n raise NotImplementedError",
"def _tokens_to_index(self,tokens):\n wids = []\n for tk in tokens:\n if tk in self.wtoi.keys():\n wids.append(self.wtoi[tk])\n else:\n wids.append(1) # <UNK>\n for _ in range(self.sentence_max_length - len(wids)):\n wids.append(0)\n if len(wids) > self.sentence_max_length:\n wids = wids[:self.sentence_max_length]\n return wids",
"def get_text_positions(self, node, padded):\n # type: (AstNode, bool) -> Tuple[Tuple[int, int], Tuple[int, int]]\n if not hasattr(node, 'first_token'):\n return (1, 0), (1, 0)\n\n start = node.first_token.start\n end = node.last_token.end\n if padded and any(match_token(t, token.NEWLINE) for t in self.get_tokens(node)):\n # Set col_offset to 0 to include leading indentation for multiline statements.\n start = (start[0], 0)\n\n return start, end",
"def _genPosTags(self, tagged):\n return [pos for (token, pos) in tagged]",
"def other_indices(mentions, toks):\r\n indices = []\r\n for i, tok in enumerate(toks):\r\n for m in mentions:\r\n if m['start'] <= tok['start'] and tok['end'] <= m['end']:\r\n indices.append(i)\r\n break\r\n return indices",
"def get_tokens(self):\n\t\treturn self.get_starttokens() + self.get_endtokens()",
"def tokens_to_idxs(self, token_seqs, lexicon):\n idx_seqs = [[lexicon[token] if token in lexicon else lexicon['<UNK>'] for \n token in token_seq] for token_seq in token_seqs]\n return idx_seqs",
"def treeposition2offsetPosition(subTrPosList, tr):\n\t\toffsetList = []\n\t\tcnt = 0\n\t\tfor pos in subTrPosList:\n\t\t\tpar = tr[pos]\n\t\t\twhile par != tr:\n\t\t\t\tfor i in xrange(par.parent_index()):\n\t\t\t\t\tif isinstance(par.parent()[i], nltk.ParentedTree):\n\t\t\t\t\t\tcnt += len(par.parent()[i].leaves())\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint >> debug_log, tr\n\t\t\t\tpar = par.parent()\n\n\t\t\tlabel = ''\n\t\t\tstart = False\n\t\t\tfor char in tr[pos].node:\n\t\t\t\tif not start:\n\t\t\t\t\tif char not in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ': \n\t\t\t\t\t\tcontinue\n\t\t\t\t\telse:\n\t\t\t\t\t\tstart = True\n\t\t\t\t\t\tlabel += char\n\t\t\t\telse:\n\t\t\t\t\tif char not in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ': \n\t\t\t\t\t\tbreak\n\t\t\t\t\telse:\n\t\t\t\t\t\tlabel += char\n\t\t\toffsetList.append((cnt, cnt+len(tr[pos].leaves()), label)) \n\t\t\tcnt = 0\n\t\treturn offsetList",
"def _get_token_spans(text, tokens):\n current = 0\n spans = []\n for token in tokens:\n current = text.find(token, current)\n if current < 0:\n print('Token {} cannot be found'.format(token))\n raise Exception()\n spans.append((current, current + len(token)))\n current += len(token)\n return spans",
"def extract_positions(lines):\n positions = []\n for line in lines:\n position = [int(s) for s in re.findall(r'-?\\d+', line)]\n positions.append(position)\n return positions",
"def correct_token_begin_position(self, tokens, text):\n beginning = 0\n for token in tokens:\n token.text_begin = text.find(token.text_content, beginning) \n beginning += len(token.text_content)\n return tokens",
"def seq_positions(seq, codon):\n\n positions = []\n i = 0\n\n while codon in seq[i:]:\n pos = seq.find(codon, i)\n positions.append(pos)\n i = pos + 1\n positions.sort()\n return positions",
"def vectorize_pos_tags(self, tokens):\n\n pos_counts = defaultdict(float)\n for token in tokens:\n pos_counts[token.pos] += 1.0\n return pos_counts",
"def get_words_position(self, words: List[Word]) -> Tuple[int, int]:\n start: int = self.get_word_postion(words[0])[0]\n end: int = self.get_word_postion(words[-1])[1]\n return start, end",
"def _find_tokens(doc, text):\n word_tokens, pos_tags = zip(*nltk.pos_tag(nltk.word_tokenize(text)))\n\n offset = 0\n tokens, missing = [], []\n for token, pos_tag in zip(word_tokens, pos_tags):\n while offset < len(text) and (text[offset] == '\\n' or text[offset] == ' '):\n if text[offset] == '\\n':\n tokens.append(Token(doc, offset, offset + 1, 'NL', get_shape_category_simple('\\n'), '\\n'))\n offset += 1\n pos = text.find(token, offset, offset + max(50, len(token)))\n if pos > -1:\n if missing:\n start = tokens[-1].end if len(tokens) > 1 else 0\n for m in missing:\n while text[start] in [' ', '\\n']:\n if text[start] == '\\n':\n tokens.append(Token(doc, start, start + 1, 'NL', get_shape_category_simple('\\n'), '\\n'))\n start += 1\n length = len(m[0]) if m[0] not in ['\\'\\'', '``'] else 1\n tokens.append(Token(doc, start, start + length, m[1], get_shape_category_simple(m[0]), m[0]))\n start = start + length\n missing = []\n tokens.append(Token(doc, pos, pos + len(token), pos_tag, get_shape_category_simple(token), token))\n offset = pos + len(token)\n else:\n missing.append((token, pos_tag))\n LOGGER.debug('Token \"{}\" not found'.format(token))\n return tokens",
"def _parse_indices(self):\n v_name = self.prior_token\n v_indices = []\n\n while self.token in (',', '('):\n v_indices.append(self._parse_index(v_name))\n\n return v_indices",
"def to_indices(self, tokens: Union[str, List[str]]) -> Union[int, List[int]]:\n if isinstance(tokens, list):\n return [self._token_to_idx[tkn] if tkn in self._token_to_idx else self._token_to_idx[self._unknown_token]\n for tkn in tokens]\n else:\n return self._token_to_idx[tokens] if tokens in self._token_to_idx else \\\n self._token_to_idx[self._unknown_token]",
"def pos(self):\n if 'pos' not in self.annotators:\n return None\n return [t[self.POS] for t in self.data]"
]
| [
"0.8392064",
"0.69574887",
"0.6598042",
"0.6516395",
"0.6503853",
"0.63557386",
"0.6345",
"0.6334957",
"0.63280684",
"0.62971956",
"0.6194977",
"0.61768335",
"0.61675954",
"0.61302745",
"0.6104622",
"0.60178024",
"0.60013366",
"0.5981349",
"0.5938564",
"0.59180295",
"0.5915765",
"0.5891043",
"0.5867725",
"0.5867397",
"0.5866744",
"0.5821618",
"0.5820358",
"0.5814763",
"0.58099407",
"0.578766"
]
| 0.78989506 | 1 |
Get a appropriate OTP for the current Vault version under test. | def get_generate_root_otp():
if vault_version_ge("1.10.0"):
test_otp = "BMjzW3wAsEzINXCM05Wbas3u9zSl"
elif vault_version_ge("1.0.0"):
test_otp = "ygs0vL8GIxu0AjRVEmJ5jLCVq8"
else:
test_otp = "RSMGkAqBH5WnVLrDTbZ+UQ=="
return test_otp | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def otp_generate(request):\n phone = request.GET.get('phone', None)\n otp = UserAuth(phone).generate_otp()\n return Response(\n {\n 'success': True,\n 'phone': phone,\n 'otp': otp\n }\n )",
"def generate_otp(email):\n\tprint \"generate_otp\"\n\totp_key = pyotp.random_base32()\n\ttotp = pyotp.TOTP(otp_key)\n\n\t# Data for generating user specific QR code\n\tqrcode_data = totp.provisioning_uri(email)\n\tprint \"otp_key = \", otp_key\n\tprint \"qrcode_data = \", qrcode_data\n\n\treturn (otp_key, qrcode_data)",
"def test_fetch_otp(self):\n otp = self.api.fetch_otp()\n self.assertIn('code', otp)",
"def get_otp_code(self, otp_name, value_override=None):\n params = {'name': otp_name}\n if value_override is not None:\n params['override'] = value_override\n return self._jadeRpc('get_otp_code', params)",
"def get_time_based_otp(self, otp_passcode: Optional[str] = None):\n if otp_passcode:\n self.set_time_based_otp(otp_passcode)\n if not self._totp:\n raise TOTPNotSetError(TOTPNotSetError.ERROR_MSG)\n return self._totp.now()",
"def verify_otp(request: Request, body: VerifyOTPIn, db: Session = Depends(get_db)):\n mgr = LoginManager()\n mgr.verify_otp(db, body.identifier, body.code)\n request.session[\"access_token\"] = secrets.token_hex(16)\n return {\"status\": \"OK\"}",
"def createOTP():\n\t code = []\n\t for i in range(6):\n\t\t code.append(random.randint(0,9))\n\t return \"\".join(str(code) for c in code)",
"def get_token():\n global vault_token\n global vault_token_time\n current_app.logger.info('************* GET TOKEN METHOD **************')\n return 'root'\n if validate_token():\n vault_duration = None\n try:\n auth_type = current_app.config.get('VAULT_AUTH', 'TOKEN')\n current_app.logger.info('*********** Auth Type: ' + auth_type)\n if auth_type == 'TOKEN':\n vault_token = current_app.config.get('VAULT_AUTH_TOKEN')\n elif auth_type == 'USERPASS':\n vault_token, vault_duration = authenticate_userpass()\n elif auth_type == 'LDAP':\n vault_token, vault_duration = authenticate_ldap()\n elif auth_type == 'CERT':\n vault_token, vault_duration = authenticate_certificate()\n elif auth_type == 'GCP':\n vault_token, vault_duration = authenticate_gcp()\n elif auth_type == 'APPROLE':\n vault_token, vault_duration = authenticate_approle()\n else:\n current_app.logger.info('Vault: VAULT_AUTH not configured correctly.')\n raise RuntimeError('Vault: VAULT_AUTH not configured correctly.')\n if vault_duration is not None:\n vault_token_time = datetime.datetime.now() + datetime.timedelta(seconds=int(vault_duration))\n \n current_app.logger.info('*********** TOKEN: ' + vault_token) \n\n except ConnectionError as ConnError:\n current_app.logger.info('Vault: There was an error while connecting to Vault server.')\n raise ConnError\n\n return vault_token",
"def otp_verify(request):\n phone = request.GET.get('phone', None)\n otp = request.GET.get('otp', None)\n verified, user_exists, auth_token, user_id = UserAuth(phone).verify_otp(otp)\n return Response(\n {\n 'phone': phone,\n 'success': verified,\n 'is_registered': user_exists,\n 'auth_token': auth_token,\n 'user_id': user_id\n }\n )",
"def get_counter_based_otp(\n self,\n counter: int,\n otp_passcode: Optional[str] = None,\n ):\n if otp_passcode:\n self.set_counter_based_otp(otp_passcode)\n if not self._hotp:\n raise HOTPNotSetError(HOTPNotSetError.ERROR_MSG)\n return self._hotp.at(counter)",
"def __initialize_totp(self) -> pyotp.totp.TOTP:\n return pyotp.totp.TOTP(self.user.totp_secret)",
"def get(self, request):\n\n email_phone = request.GET.get('email_phone')\n if email_phone is None:\n return unsuccessful_response(message='set phone or email into query param!', status=200)\n\n code = generate_otp()\n\n request_json = {\n 'email_phone': email_phone,\n 'code': code\n }\n\n otp_serialized = OtpSerializer(data=request_json)\n if not otp_serialized.is_valid():\n return validate_error(otp_serialized)\n otp_serialized.save()\n\n send_email(\"Two step verification\", \"Here is your code for verification {0}\".format(code), email_phone)\n\n response_json = {\n 'status': True,\n 'message': 'otp successfully sent to user',\n 'data': {}\n }\n\n return Response(response_json, status=200)",
"def get_otp(self, key):\n packed = self.pack()\n obj = AES.new(key, AES.MODE_ECB)\n ciphertext = obj.encrypt(packed)\n return ciphertext",
"def get_ot_api_key() -> str|None:\n # Reuse the API key's value if we've already obtained it.\n if settings.OT_API_KEY is not None:\n return settings.OT_API_KEY\n\n if settings.DEV_MODE or settings.UNIT_TEST_MODE:\n # In dev or unit test mode, pull the API key from a local file.\n try:\n with open(f'{settings.ROOT_DIR}/ot_api_key.txt', 'r') as f:\n settings.OT_API_KEY = f.read().strip()\n return settings.OT_API_KEY\n except:\n logging.info('No key found locally for the Origin Trials API.')\n return None\n else:\n # If in staging or prod, pull the API key from the project secrets.\n from google.cloud.secretmanager import SecretManagerServiceClient\n client = SecretManagerServiceClient()\n name = (f'{client.secret_path(settings.APP_ID, \"OT_API_KEY\")}'\n '/versions/latest')\n response = client.access_secret_version(request={'name': name})\n if response:\n settings.OT_API_KEY = response.payload.data.decode(\"UTF-8\")\n return settings.OT_API_KEY\n return None",
"def version():\n with settings(hide('running', 'warnings'), warn_only=True):\n res = local('vagrant --version', capture=True)\n if res.failed:\n return None\n line = res.splitlines()[-1]\n version = re.match(r'Vagrant (?:v(?:ersion )?)?(.*)', line).group(1)\n return tuple(_to_int(part) for part in version.split('.'))",
"def check_otp(email, otp_code):\n\tprint \"Inside check_otp\"\n\totp_key, qrcode_data = get_otp_key(email)\n\tprint \"DEBUG qrcode_data: \", qrcode_data\n\ttotp = pyotp.TOTP(otp_key)\n\n\tprint \"otp_code = \", otp_code\n\tprint \"otp_key = \", otp_key\n\tprint \"totp.now() = \", totp.now()\n\tprint \"TOTP provisioning_uri = \", totp.provisioning_uri(email) \n\n\tif totp.verify(otp_code):\n\t\tprint \"totp.verify() = True\"\n\t\treturn True\n\tprint \"totp.verify() = False\"\n\treturn False",
"def yubikey_public_id(otp: str) -> str:\n\n return otp[:12]",
"def generate_verification_code():\n new_ver_code = str(random.randint(1000000, 9999999))\n return new_ver_code",
"def get_version(self):\n\t\treturn call_sdk_function('PrlApi_GetVersion')",
"def use_mfa_secret_from_vault(\n self, vault_name: str, vault_key: str, mode: OTPMode = OTPMode.TIME\n ):\n secrets = Vault().get_secret(vault_name)\n if mode == OTPMode.TIME:\n self.set_time_based_otp(secrets[vault_key])\n elif mode == OTPMode.COUNTER:\n self.set_counter_based_otp(secrets[vault_key])",
"def __get_version_seq_typing():\n\n try:\n cli = [\"seq_typing.py\", \"--version\"]\n p = subprocess.Popen(cli, stdout=PIPE, stderr=PIPE)\n stdout = p.communicate()[0]\n\n version = stdout.splitlines()[0].split()[-1].decode(\"utf8\")\n except Exception as e:\n logger.debug(e)\n version = \"undefined\"\n\n return version",
"def get_default_os_version(self, nGuestOsType):\n\t\treturn call_sdk_function('PrlApi_GetDefaultOsVersion', nGuestOsType)",
"def get(self, request):\n\n email_phone = request.GET.get('email_phone')\n if email_phone is None:\n return unsuccessful_response(message='set phone or email into query param!', status=200)\n\n code = generate_otp()\n\n request_json = {\n 'email_phone': email_phone,\n 'code': code\n }\n\n otp_serialized = OtpSerializer(data=request_json)\n if not otp_serialized.is_valid():\n return validate_error(otp_serialized)\n otp_serialized.save()\n\n email_body = \"Hi there,You're almost set! Verify your email by enter this code: {0}\".format(code)\n send_email('Verification Code', email_body, email_phone)\n\n response_json = {\n 'status': True,\n 'message': 'otp successfully sent to user',\n 'data': {}\n }\n\n return Response(response_json, status=200)",
"def get_tgpio_version(self):\r\n return self._arm.get_tgpio_version()",
"def req_display_otp(self):\n\n ret = self.ui_auth.create_new_one_time_pwd()\n if ret is not None:\n self.error_msg_queue_list.append(ret)",
"def get_version():\n\n version_string = version_from_versioneer()\n\n if not version_string:\n version_string = version_from_pip()\n\n return version_string",
"def yubikey_otp_to_serial(otp: str) -> int | None:\n\n if not is_valid_yubikey_format(otp):\n return None\n\n token = 'cccc' + otp[:12]\n\n toggle = False\n keep = 0\n\n bytesarray = []\n\n for char in token:\n n = ALPHABET.index(char)\n\n toggle = not toggle\n\n if toggle:\n keep = n\n else:\n bytesarray.append((keep << 4) | n)\n\n value = 0\n\n # in Java, shifts on integers are masked with 0x1f using AND\n # https://docs.oracle.com/javase/specs/jls/se8/html/jls-15.html#jls-15.19\n mask_value = 0x1f\n\n for i in range(0, 8):\n shift = (4 - 1 - i) * 8\n value += (bytesarray[i] & 255) << (shift & mask_value)\n\n return value",
"def set_time_based_otp(self, otp_passcode: str):\n self._totp = TOTP(otp_passcode)",
"def get_version():\n click.echo(get_current_version_number())",
"async def request_apptoken(self) -> Optional[str]:\n # get fresh app token and activate it\n apptoken = await self.get_application_token_from_server()\n temptoken = await self.get_temp_token()\n if await self.activate_application_token(\n apptoken=apptoken, temptoken=temptoken\n ):\n return apptoken\n return None"
]
| [
"0.58393794",
"0.5716151",
"0.56288546",
"0.56154954",
"0.55863196",
"0.55287445",
"0.5509557",
"0.542242",
"0.53520805",
"0.5223635",
"0.52086926",
"0.5172018",
"0.5132177",
"0.50504476",
"0.5044505",
"0.50082654",
"0.50049675",
"0.4994672",
"0.49763277",
"0.49655774",
"0.4932501",
"0.49323663",
"0.49062273",
"0.49015445",
"0.49014023",
"0.48956951",
"0.48898444",
"0.48618314",
"0.48581883",
"0.4826583"
]
| 0.79360807 | 0 |
Load test config file data for use by various test cases. | def load_config_file(filename):
test_data_path = get_config_file_path(filename)
with open(test_data_path) as f:
test_data = f.read()
return test_data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _load_test_data(self):\n self._save_test_data()",
"def test_load_from_file(self):\n cf = ConfigFile()\n cf.load_from_file(TestConfigFile.TEST_CONFIG)\n\n self.assertEqual(4, len(cf))\n self.assertEqual(cf[\"key1\"], \"val1\")\n self.assertEqual(cf[\"key2\"], \"val2\")\n self.assertEqual(cf[\"key3\"], \"val3\")\n self.assertEqual(cf[\"key4\"], \"val4\")",
"def setUp(self):\n with open(SRC_PATH + \"configs/etl_config.json\", \"r\") as f:\n self.config = json.loads(f.read())\n self.spark = SparkBuilder(\"test\").build_sc()\n self.test_data_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../tests/test_data/')",
"def test_load_configs_testing(self):\n global locator, config_paths\n locator.load_config(config_paths[0])\n\n self.assertEqual(locator.config['routines'], ['debug'])\n self.assertEqual(locator.config['driver'],\n {\n 'type': 'TestDriver',\n 'kwargs': {\n 'verbose': False\n }\n })",
"def setUp(self):\n self.test_data = self.read_data('test_data/clients.txt')",
"def testLoadConfiguration(self):\n loader = Loader()\n loader.loadFromDirectory(self.__exampleDirectory)\n\n self.assertEqual(len(loader.taskHolders()), 1)\n\n self.assertEqual(\n os.path.basename(loader.taskHolders()[0].var('contextConfig')),\n 'config.hjson'\n )",
"def load_config(test = False, config_file = \"config.json\"):\n\n global config\n with open(config_file, \"r\") as f:\n config = json.load(f)\n print(\"config:\", config)\n\n for key in config:\n # Check if every configuration is set\n if config[key]==\"\":\n print(\"Please complete the config.json first!\")\n sys.exit(1)\n else:\n config[\"default-k\"] = int(config[\"default-k\"])\n if test:\n config[\"default-suffix\"] = config[\"test-suffix\"]\n config[\"default-table\"] = \"knn_\"+config['test-suffix']\n config[\"data-width\"] = 3\n config[\"data-dir\"] += \"_test\"\n else:\n config[\"default-suffix\"] = config[\"suffix\"]\n config[\"default-table\"] = \"knn_\" + config[\"suffix\"]\n config[\"data-width\"] = int(config[\"data-width\"])\n\n print(\"Configuration Check success\")",
"def load_test_config(root_fs) -> Config:\n with root_fs.open('config.yaml') as fd:\n return Config.parse(fd)",
"def test_load_config_safe(self):\n self.__test_load_config_safe(\".scuba.yml\")",
"def setUp(self):\n\n self.test_data_path = 'testing/test_data/'",
"def _setup_configfiles(self, Testboard):\n\n # Delete all root files which are already in the directory\n root_files = glob.glob(Testboard.testdir+'/*.root')\n for f in root_files:\n os.remove(f)\n # Change testboard name\n\tif Testboard.DTB and os.path.isfile(Testboard.testdir + \"/tb\"):\n self._config_file_content_substitute(Testboard.testdir + \"/tb\", {\"id\":Testboard.address})\n else:\n self._config_file_content_substitute(Testboard.testdir + \"/configParameters.dat\", {\"testboardName\":Testboard.address})\n\n # Get test specific config parameters (if available)\n params = ()\n try:\n params = self.init.items(\"Test \" + self.test.testname)\n except:\n return\n for par in params:\n file = par[0]\n if '.cfg' in file:\n section,pair = par[1].split(':')\n key,value = pair.split('=')\n config_file = BetterConfigParser()\n config_file.read(Testboard.testdir + \"/\" + file)\n config_file.set(section,key,value)\n write_file = open(Testboard.testdir + \"/\" + file, 'write')\n config_file.write(write_file)\n write_file.close()\n continue\n # Check for valid keys that represent config files\n elif \"testParameters\" in file or \"dacParameters\" in file or \"configParameters\" in file:\n pass\n elif \"tbmParameters\" in file or \"tbParameters\" in file:\n pass\n else:\n continue\n\n encoded_keys = par[1].split(\",\")\n keys = {}\n for key in encoded_keys:\n key = key.split(\"=\", 2)\n if len(key) != 2:\n continue\n keys[key[0]] = key[1]\n if len(file) < 4 or file[-4:] != \".dat\":\n file += \".dat\"\n self._config_file_content_substitute(Testboard.testdir + \"/\" + file, keys)",
"def test_load_from_cache(self, mock_config_file):\n config_instance, working_dir = mock_config_file\n configuration = Configuration()\n\n assert configuration.config['core']['import_demo_on_first_login'] is False\n assert configuration.config['environment']['repo_url'] == \\\n [\"https://github.com/gigantum/base-images-testing.git\"]\n assert configuration.config['git']['working_directory'] == configuration.app_workdir",
"def test_init_from(config):\n\n config.init_from()\n config.init_from(file='../../config.cfg')",
"def load_config(self):\n pass",
"def get_test_config() -> Config:\n # overwrite some settings for unit tests\n args = dict(\n datapath=os.path.abspath(os.path.join(os.path.dirname(__file__), 'testdata')),\n debug=True\n )\n return Config(**args)",
"def load_testing_data(self) -> List[np.ndarray]:\n input_data = self._load_set(config.TEST_DIR, False)\n return input_data",
"def load_data_from_config(self):\n\n config_file_name = \"cicada/config/config.yaml\"\n config_dict = None\n self.labels = []\n self.to_add_labels = []\n if os.path.isfile(config_file_name):\n with open(config_file_name, 'r') as stream:\n config_dict = yaml.safe_load(stream)\n print(f\"config_dict {config_dict}\")\n if (config_dict is not None) and config_dict.get(\"dir_name\"):\n self.load_data_from_dir(dir_name=config_dict[\"dir_name\"], method='clear')",
"def setup(self):\n file_under_test = os.path.join(os.curdir, 'application-core',\n 'app.core.config.xml')\n with open(file_under_test) as f:\n config = f.read()\n self.config = objectify.fromstring(config)",
"def test_site_load_local_file(self):\n with patch('__builtin__.open', mock_open(read_data=self.test_config)) as mock_file:\n test_config = ef_site_config.EFSiteConfig().load_from_local_file()\n self.assertEqual(test_config[\"ENV_ACCOUNT_MAP\"][\"test\"], \"testaccount\")",
"def _configure(self):\n test_lib.test_config.setdefault('config_files', []).append(\n self.filename)\n self._write_config_content()",
"def test_load(yaml_config_file):\n config = Config()\n config.load(PATH_FILE_CONFIG)\n assert config.backup_root_directory == yaml_config_file.backup\n assert config.docker_compose_wordpress_project_directory == yaml_config_file.docker_compose_wordpress_project",
"def configure_test(self, test, config_json):\n pass",
"def test_config_from_file(self):\n parser = Parser()\n args = parser.parser.parse_args(['-c'])\n if args.config:\n config = Config()\n config.config_file = \"./config\"\n config.config = test_config\n config.config_from_file()\n self.assertTrue(config.config)\n os.remove(config.config_file)",
"def _load_test_configs(filename, required_keys):\n # type: (str, List[str]) -> List[Dict]\n with open(filename, 'r') as f:\n tests = json.loads(f.read())\n _validate_test_configs(tests, filename, required_keys)\n return tests",
"def test_load_configuration_loads_main_file():\n config.load_configuration(main_configuration_path)\n assert config.get('test.nested.path.value') == 'test value'",
"def test_loads_a_config_file(self):\n from test.resources import config\n self.assertIsInstance(config, type(sys))\n self.assertIsNotNone(config.example)\n self.assertEqual(config.example.config_option, 'config-value')",
"def fixture_example_data():\n import_example_data()",
"def setUp(self):\n \n with open(\"config_script.json\", \"r\") as json_data:\n config_data = json.load(json_data)\n\n logging.basicConfig(\n filename=config_data[\"filename_logging\"], \n filemode=config_data[\"filemode_logging\"],\n level=config_data[\"level_logging\"], \n format=config_data[\"format_logging\"])\n\n self.database = DB_Worker() \n self.info_list = GoodInfoList()\n self.file_goods = FileWork()\n self.file_data = self.file_goods.select_path_file(\"test\")\n\n if len(self.file_data) > 0:\n self.info_list.get_from_file(self.file_data)",
"def load_data_conf(self):\n data_file = select_file(os.getcwd())\n if data_file is not None:\n self.load_tab(data_file)\n else:\n msg_window('please select valid data config file')",
"def setUpConfig(self):\n pass"
]
| [
"0.7346483",
"0.7241617",
"0.7041901",
"0.7021762",
"0.69928503",
"0.69685555",
"0.6907617",
"0.68597746",
"0.68017393",
"0.6799866",
"0.67664844",
"0.6723095",
"0.6710996",
"0.6676708",
"0.66698784",
"0.66493666",
"0.6597196",
"0.6592818",
"0.6589089",
"0.65828204",
"0.6575638",
"0.6573127",
"0.6568257",
"0.65601957",
"0.65467566",
"0.6536119",
"0.6530503",
"0.65253544",
"0.65152013",
"0.6502171"
]
| 0.7834851 | 0 |
Get the path to a config file under the "tests/config_files" directory. I.e., the directory containing selfsigned certificates, configuration files, etc. that are used for various tests. | def get_config_file_path(filename):
# Use __file__ to derive a path relative to this module's location which points to the tests data directory.
relative_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "..", "config_files"
)
return os.path.join(os.path.abspath(relative_path), filename) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_config_path():\n\n root = os.path.dirname(os.path.abspath(__file__))[:-5]\n config_path = os.path.join(root, 'config.ini')\n\n return config_path",
"def get_config_path(config):\n section = config.sections()[0]\n return Path(config.get(section, \"path\")).expanduser().absolute()",
"def get_config_file_location():\n\n return './' + CONFIG_FILE_NAME",
"def get_configuration_file():\n path = os.path.abspath(os.curdir)\n while path != os.sep:\n config_path = os.path.join(path, CONFIG_FILE_NAME)\n if os.path.exists(config_path):\n return config_path\n path = os.path.dirname(path)\n return None",
"def configPath(self):\n return os.path.dirname(__file__)",
"def _get_config_filepath(self):\n\t\tif self.configfilepath is None:\n\t\t\treturn os.path.join(self.workdir, \"config.txt\")\n\t\telse:\n\t\t\treturn self.configfilepath",
"def get_config_file_path(config_file: str) -> str:\n\n if not isinstance(config_file, str):\n raise ValueError(\"value for 'config_file' of 'parse_ini' must be of type str\")\n\n if len(config_file) == 0:\n raise ValueError(f\"value for 'config_file' can't be empty\")\n\n base_dir = os.sep.join(__file__.split(os.sep)[0:-3])\n if config_file[0] != os.sep:\n config_file = f\"{base_dir}{os.sep}{config_file}\"\n\n return os.path.realpath(config_file)",
"def get_config_filepath():\n scs_installation_dirs = _path_utils.get_addon_installation_paths()\n\n # SEARCH FOR CONFIG...\n scs_config_file = ''\n for i, location in enumerate(scs_installation_dirs):\n test_path = os.path.join(location, 'config.txt')\n if os.path.isfile(test_path):\n scs_config_file = test_path\n break\n\n # IF NO CONFIG FILE, CREATE ONE...\n if scs_config_file == '':\n lprint(\"S Creating new 'config.txt' file:\\n\\t %r\", (os.path.join(scs_installation_dirs[0], 'config.txt'),))\n scs_config_file = new_config_file(os.path.join(scs_installation_dirs[0], 'config.txt'))\n\n # print('SCS Blender Tools Config File:\\n \"%s\"\\n' % os.path.join(scs_installation_dirs[0], 'config.txt'))\n return scs_config_file",
"def get_cfg_path(filename):\n return os.path.join(get_cfg_dir(), filename)",
"def getConfigFile(self):\n if not self.__args.configfile:\n msg = \"not set configfile\"\n self.__logger.error(msg)\n return \"\"\n cf = os.getcwd() + os.sep + self.__args.configfile\n if not os.path.exists(self.__args.configfile):\n msg = \"file \" + cf + \" not exist!\"\n self.__logger.error(msg)\n return \"\"\n return cf",
"def config_path(self):\n if os.path.exists(self._config_path):\n if pyhocon.ConfigFactory.parse_file(self._config_path):\n return os.path.realpath(self._config_path)\n # TODO if string is url/git repo, download file locally first\n return None",
"def config_path():\n dir_ = os.path.dirname(__file__)\n demo_dir = os.path.join(dir_, '../..')\n return os.path.join(demo_dir, 'mike_dev.ini')",
"def get_config_path() -> Path:\n config = os.getenv('TOM_CONFIG', '')\n return Path(config)",
"def _build_config_file_path(cls, filename):\n if os.path.exists(filename):\n return filename\n res = os.path.join(os.path.dirname(__file__), '..', 'config', filename)\n if not os.path.exists(res):\n raise ValueError(\"requested config file %s does not exist!\" % filename)\n return res",
"def config_file_address() -> str:\n\n config_files = json_files_from_folder(\"config\")\n config_file = choose_config(config_files) # Choice a config file if there is more then 1 in config folder\n return config_file",
"def _get_config_fname():\n directory = _get_vispy_app_dir()\n if directory is None:\n return None\n fname = op.join(directory, 'vispy.json')\n if os.environ.get('_VISPY_CONFIG_TESTING', None) is not None:\n fname = op.join(_TempDir(), 'vispy.json')\n return fname",
"def config_path_fixture(fixtures_dir: Path) -> Path:\n _file_path = fixtures_dir / \"config.json\"\n return _file_path",
"def _get_config_path(config_arg: Optional[str]) -> Path:\n if config_arg:\n config_file = Path(config_arg)\n elif os.environ.get(ENV_VAR_FOR_CONFIG_FILE_PATH):\n config_file = Path(os.environ[ENV_VAR_FOR_CONFIG_FILE_PATH])\n else:\n config_file = None\n\n if not config_file or not config_file.is_file():\n logging.fatal(f\"Config file not found: {config_file}\")\n sys.exit(1)\n return config_file",
"def _findconfigfile():\n\n # A ordered list of possible config files\n configfiles = [\"~/.githubhooksrc\",\n \"/etc/githubhooks\"]\n\n for configfile in configfiles:\n if os.path.isfile(os.path.expanduser(configfile)):\n return os.path.expanduser(configfile)\n\n # No valid config file found\n print \"ERROR: No valid config file found in any of the following locations:\"\n for configfile in configfiles:\n print \" - %s\" % configfile\n sys.exit(1)",
"def get_test_config_dir(*add_path):\n return os.path.join(os.path.dirname(__file__), \"testing_config\", *add_path)",
"def config_file_and_path():\n return str(rmfriend_dir() / 'config.cfg')",
"def get_config_file(self):\r\n return os.path.join(self.cloudletdir, \"applied_config\")",
"def __setup_config_file_abspath():\n if \"APPDATA\" in os.environ:\n basedir = os.environ[\"APPDATA\"]\n elif \"HOME\" in os.environ:\n basedir = os.environ[\"HOME\"]\n else:\n raise AssertionError(\"APPDATA or HOME env vars must be defined \"\n \"to store config file\")\n abs_dir_path = os.path.join(\n basedir, TestManager.APPDATA_SUBDIRECTORY_NAME)\n os.makedirs(abs_dir_path, exist_ok=True, mode=0o660)\n return os.path.join(abs_dir_path, ConfigManager.CONFIG_FILE_NAME)",
"def getConfigPath():\n\n global args, ConfigPathDefault\n\n if args.config_location:\n return args.config_location;\n return ConfigPathDefault;",
"def config_file(self):\n return self[CONFIG_FILE_KEY]",
"def full_path(self, config_path=CONFIG_PATH):\n return os.path.join(config_path, self.filename)",
"def get_production_config_file_path(path: pathlib.Path) -> pathlib.Path:\n return get_production_config_dir_path(path) / \"config.py\"",
"def get_config_dir():\n return Path(environ.get(CONFIG_DIR_ENV_VAR, _default_dir))",
"def get_config(_config_file):\n ''' script absolute location '''\n abs_path = os.path.dirname(inspect.getfile(inspect.currentframe()))\n\n if _config_file[0] not in ('/', '~'):\n if os.path.isfile(os.path.join(abs_path, _config_file)):\n config_path = os.path.join(abs_path, _config_file)\n else:\n raise IOError('Failed to find config file')\n else:\n if os.path.isfile(_config_file):\n config_path = _config_file\n else:\n raise IOError('Failed to find config file')\n\n with open(config_path) as cjson:\n config_data = json.load(cjson)\n # config must not be empty:\n if len(config_data) > 0:\n return config_data\n else:\n raise Exception('Failed to load config file')",
"def get_config_filepath(config: configs.Config) -> str:\n return os.path.join(config.model_training.dir_out, configs.DEFAULT_FILENAME_CONFIG)"
]
| [
"0.80562377",
"0.8025532",
"0.7936335",
"0.7636513",
"0.759419",
"0.75863504",
"0.7586312",
"0.74758565",
"0.7409882",
"0.7362901",
"0.7351157",
"0.73110783",
"0.725994",
"0.725583",
"0.71291775",
"0.70951337",
"0.7093658",
"0.7087657",
"0.708164",
"0.7075927",
"0.70503074",
"0.70314413",
"0.7029806",
"0.70295566",
"0.696609",
"0.69528794",
"0.69497156",
"0.69491494",
"0.6933524",
"0.6923357"
]
| 0.8256649 | 0 |
Decode a newly generated root token via Vault CLI. | def decode_generated_root_token(encoded_token, otp):
command = ["vault"]
if vault_version_ge("0.9.6"):
# before Vault ~0.9.6, the generate-root command was the first positional argument
# afterwards, it was moved under the "operator" category
command.append("operator")
command.extend(
[
"generate-root",
"-address",
"https://127.0.0.1:8200",
"-tls-skip-verify",
"-decode",
encoded_token,
"-otp",
otp,
]
)
process = subprocess.Popen(
**get_popen_kwargs(args=command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
)
stdout, stderr = process.communicate()
logging.debug('decode_generated_root_token stdout: "%s"' % str(stdout))
if stderr != "":
logging.error("decode_generated_root_token stderr: %s" % stderr)
try:
# On the off chance VAULT_FORMAT=json or such is set in the test environment:
new_token = json.loads(stdout)["token"]
except ValueError:
new_token = stdout.replace("Root token:", "")
new_token = new_token.strip()
return new_token | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def meraki_vault_r_secret(mount, path):\n read_secret_result = client.secrets.kv.v1.read_secret(path=meraki_vault_path, mount_point=vault_mount_point)\n api_token = read_secret_result['data']['token']\n return api_token",
"def vault_auth():\n # Check if vault is sealed\n if client.sys.is_sealed() == True:\n # if the vault is SEALED, UNSEAL IT using the unseal_key\n unseal_response = client.sys.submit_unseal_key(vault_unseal_key)\n\n # [Uncomment line below only if you want to generate a new API token for the application your ROOT admin registered]\n # Keep in mind you need Application Role ID and Secret ID\n client_data = client.auth_approle(vault_role_id, vault_secret_id)\n # print(client_data['auth']['client_token'])\n\n # Authenticate against the VAULT using the new CLIENT TOKEN conatained in the new dict object\n client.token = client_data['auth']['client_token']",
"def get_token():\n global vault_token\n global vault_token_time\n current_app.logger.info('************* GET TOKEN METHOD **************')\n return 'root'\n if validate_token():\n vault_duration = None\n try:\n auth_type = current_app.config.get('VAULT_AUTH', 'TOKEN')\n current_app.logger.info('*********** Auth Type: ' + auth_type)\n if auth_type == 'TOKEN':\n vault_token = current_app.config.get('VAULT_AUTH_TOKEN')\n elif auth_type == 'USERPASS':\n vault_token, vault_duration = authenticate_userpass()\n elif auth_type == 'LDAP':\n vault_token, vault_duration = authenticate_ldap()\n elif auth_type == 'CERT':\n vault_token, vault_duration = authenticate_certificate()\n elif auth_type == 'GCP':\n vault_token, vault_duration = authenticate_gcp()\n elif auth_type == 'APPROLE':\n vault_token, vault_duration = authenticate_approle()\n else:\n current_app.logger.info('Vault: VAULT_AUTH not configured correctly.')\n raise RuntimeError('Vault: VAULT_AUTH not configured correctly.')\n if vault_duration is not None:\n vault_token_time = datetime.datetime.now() + datetime.timedelta(seconds=int(vault_duration))\n \n current_app.logger.info('*********** TOKEN: ' + vault_token) \n\n except ConnectionError as ConnError:\n current_app.logger.info('Vault: There was an error while connecting to Vault server.')\n raise ConnError\n\n return vault_token",
"def test_decode_token():\n pass",
"def decode(token):\n return jwt.decode(token, app.config[\"JWT_SECRET\"], algorithms=[\"HS256\"])",
"def decode_token(token):\n decoded_token = jwt.decode(token, secret_key, algorithms=['HS256'])\n return decoded_token",
"def main():\t\n\t# read in short term auth\n\tf = open('./input.txt', 'r')\n\tshort_term_auth = f.read()\n\n\tlong_term_access_token = long_term_token(short_term_auth)\n\tprint(long_term_access_token)\n\tprint('run program like normal now')",
"def decode(encoded_token):\n return jwt.decode(encoded_token, key=settings.JWT_AUTH['JWT_SECRET_KEY'])",
"def decode_token(token):\n payload = None\n try:\n payload = jwt.decode(token.encode('utf-8'), '1$Arh\"1bWa/7+OS', algorithm='HS256')['u_id']\n except jwt.InvalidTokenError:\n pass\n return payload",
"def _set_token(self):\n f = open(\".cli_token\")\n data = f.read()\n if data is not None:\n self.token = data\n return self.token",
"def decode_token(token):\n\n return jwt.decode(\n token, settings.JWT_SECRET, algorithms=[settings.JWT_ALGO])",
"def _lookup_token(self):\n path = '/authn/{account}/{login}/authenticate'.format(\n account=self.account, login='admin'\n )\n res = self._post(path, data=self.api_token, skip_auth=True)\n return base64.b64encode(res.text)",
"def deserialize_tokens():\n\ttry:\n\t\twith open(config.TOKENPATH, \"r+\") as f:\n\t\t\tcontext = f.read()\n\t\t\tres = eval(context)\n\t\t\t# load into memory\n\t\t\treturn res[\"access_token\"], res[\"refresh_token\"]\n\texcept:\n\t\t# unexcept token format\n\t\tfrom common import ApplicationException\n\t\traise ApplicationException(\"authorization file is broken, please run init\")",
"def decode_token(token):\n try:\n # Decode token with our secret key\n payload = jwt.decode(token, SECRET_KEY)\n return payload['sub']\n except jwt.ExpiredSignatureError:\n # token has expired\n return \"Timed out. Please login to get a new token\"\n except jwt.InvalidTokenError:\n return \"Invalid token. Please register or login\"",
"def peek_app_token():\n if not os.path.exists(_token_storage_path):\n return None\n\n try:\n with open(_token_storage_path) as secret_file:\n return json.loads(secret_file.read())\n\n except Exception as exc:\n log.error(f'Could not read secret file.\\n{exc}')\n traceback.print_exc(file=sys.stderr)",
"def parse_token(token):\n return jwt.decode(token, app.config['JWT_SECRET'])",
"def decode_token(token):\n try:\n payload = jwt.decode(\n token, app.config.get('SECRET_KEY'), algorithms='HS256')\n return payload['sub']\n except jwt.ExpiredSignatureError:\n return \"Expired token. Please login to get a new token\"\n except jwt.InvalidTokenError:\n return \"Invalid token. Please register or login\"",
"def decode_token(token):\n try:\n # try to decode the token using our SECRET variable\n payload = jwt.decode(token, app.config.get('SECRET_KEY'), algorithms=['HS256'])\n return payload['sub']\n except jwt.ExpiredSignatureError:\n # the token is expired, return an error string\n return \"Expired token. Please login to get a new token\"\n except jwt.InvalidTokenError:\n # the token is invalid, return an error string\n return \"Invalid token. Please register or login\"",
"def decrypt(key, input_token):\n try:\n target = decrypt_string(input_token.strip(), key=key)\n except InvalidToken:\n click.echo('Error: Token is invalid')\n sys.exit(1)\n\n click.echo('The decrypted result is: ', nl=False)\n click.echo(click.style(target, fg='blue'))",
"def test_TreebankTokenReader():",
"def decode_token(token):\n try:\n # try to decode the token using our SECRET variable\n payload = jwt.decode(token, os.environ.get('SECRET', 'test'))\n return \"\", payload['sub']\n except jwt.ExpiredSignatureError:\n # the token is expired, return an error string\n return \"Expired token. Please login to get a new token\", None\n except jwt.InvalidTokenError:\n # the token is invalid, return an error string\n return \"Invalid token. Please register or login\", None",
"def decode_request(self, data):\n return decode_jwt(data[\"jwt\"], data[\"cert_name\"], self.node.node_name, self.node.id)",
"def decode_auth_token(auth_token):\n if len(auth_token) != 139:\n return \"Invalid token. Please log in again.\"\n try:\n payload = jwt.decode(auth_token, key)\n is_blacklisted_token = BlacklistToken.check_blacklist(auth_token)\n if is_blacklisted_token:\n return 'Token blacklisted. Please log in again.'\n else:\n return payload['sub']\n except jwt.ExpiredSignatureError:\n return 'Signature expired. Please log in again.'\n except jwt.InvalidTokenError:\n return 'Invalid token. Please log in again.'",
"def decode_token_appengine(credentials, token, verify=False):\n return _decode_token(credentials, token, False)",
"def token(uncapped_token: Contract):\n return uncapped_token",
"def get_token(self):\n\n try:\n return jwt.decode(self.fetch_token(), KEY, algorithms=['HS256'])\n except jwt.exceptions.DecodeError:\n raise InvalidToken",
"def get_payload(cls, token):\n \n secret = cls.secret\n algo = cls.algo\n decoded = jwt.decode(token, secret, algo)\n return decoded",
"def decode_token(self, token: str, max_age: int) -> Optional[object]:\n try:\n return self.serializer.loads(token, max_age)\n except (BadSignature, SignatureExpired) as e:\n return None",
"def decode(self, *args, **kwargs):\n return self.tokenizer.decode(*args, **kwargs)",
"def decode(self, *args, **kwargs):\n return self.tokenizer.decode(*args, **kwargs)"
]
| [
"0.59399545",
"0.5710058",
"0.56865376",
"0.5683078",
"0.5500785",
"0.5490273",
"0.5472639",
"0.5260887",
"0.5217738",
"0.521536",
"0.5193939",
"0.5115839",
"0.51024985",
"0.5093198",
"0.50621367",
"0.5052655",
"0.5043405",
"0.5037708",
"0.503704",
"0.49572414",
"0.49545163",
"0.49511185",
"0.4947781",
"0.49454632",
"0.49429142",
"0.49191523",
"0.49088123",
"0.49029386",
"0.4901814",
"0.4901814"
]
| 0.75455594 | 0 |
Helper method to add `encoding='utf8'` to subprocess.Popen. | def get_popen_kwargs(**popen_kwargs):
popen_kwargs["encoding"] = "utf-8"
return popen_kwargs | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_terminal_encoding(encoding='utf_8'):\n sys.stdin = codecs.getreader(encoding)(sys.stdin)\n sys.stdout = codecs.getwriter(encoding)(sys.stdout)\n sys.stderr = codecs.getwriter(encoding)(sys.stderr)",
"def defaultProcessOutputEncodingDecider(context, executable, **forfutureuse):\n\treturn __DEFAULT_PROCESS_ENCODING # stdout encoding will be None unless in a terminal",
"def exec_command(*cmdargs, **kwargs):\n encoding = kwargs.pop('encoding', None)\n out = subprocess.Popen(cmdargs, stdout=subprocess.PIPE, **kwargs).communicate()[0]\n # Python 3 returns stdout/stderr as a byte array NOT as string.\n # Thus we need to convert that to proper encoding.\n\n if is_py3:\n if encoding:\n out = out.decode(encoding)\n else:\n # If no encoding is given, assume we're reading filenames from stdout\n # only because it's the common case.\n out = os.fsdecode(out)\n\n return out",
"def set_encoding(cls, encoding):\n import sys\n default_stdout = sys.stdout\n default_stderr = sys.stderr\n #importlib.reload(sys)\n #sys.setdefaultencoding(encoding)\n sys.stdout = default_stdout\n sys.stderr = default_stderr",
"def exec_command_rc(*cmdargs, **kwargs):\n # 'encoding' keyword is not supported for 'subprocess.call'.\n # Remove it thus from kwargs.\n if 'encoding' in kwargs:\n kwargs.pop('encoding')\n return subprocess.call(cmdargs, **kwargs)",
"def decodeCommandLine(self, cmdline):\n if isinstance(cmdline, unicode):\n return cmdline\n codec = getattr(sys.stdin, 'encoding', None) or sys.getdefaultencoding()\n return unicode(cmdline, codec)",
"def test_sendUnicodeCommand(self):\n self.p.sendCommand(\"CMD\", (\"param\\u00b9\", \"param\\u00b2\"))\n self.check(b\"CMD param\\xc2\\xb9 param\\xc2\\xb2\\r\\n\")",
"def encoding(options):\n pass",
"def setencoding():\r\n encoding = \"ascii\" # Default value set by _PyUnicode_Init()\r\n if 0:\r\n # Enable to support locale aware default string encodings.\r\n import locale\r\n loc = locale.getdefaultlocale()\r\n if loc[1]:\r\n encoding = loc[1]\r\n if 0:\r\n # Enable to switch off string to Unicode coercion and implicit\r\n # Unicode to string conversion.\r\n encoding = \"undefined\"\r\n if encoding != \"ascii\":\r\n # On Non-Unicode builds this will raise an AttributeError...\r\n sys.setdefaultencoding(encoding) # Needs Python Unicode build !\r",
"def _encode(line, output_file, encoding=None):\n # Convert string to Unicode\n if not isinstance(line, text_type):\n try:\n line = text_type(line)\n except UnicodeDecodeError:\n line = b(line).decode('utf-8')\n\n # Choose output encoding\n if not encoding:\n # choose between terminal's and system's preferred encodings\n if output_file.isatty():\n encoding = getattr(output_file, 'encoding', None)\n encoding = encoding or locale.getpreferredencoding()\n\n # Convert string from Unicode to the output encoding\n return line.encode(encoding)",
"def exec_command_all(*cmdargs, **kwargs):\n proc = subprocess.Popen(cmdargs, bufsize=-1, # Default OS buffer size.\n stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs)\n # Waits for subprocess to complete.\n out, err = proc.communicate()\n # Python 3 returns stdout/stderr as a byte array NOT as string.\n # Thus we need to convert that to proper encoding.\n if is_py3:\n encoding = kwargs.get('encoding')\n if encoding:\n out = out.decode(encoding)\n err = err.decode(encoding)\n else:\n # If no encoding is given, assume we're reading filenames from stdout\n # only because it's the common case.\n out = os.fsdecode(out)\n err = os.fsdecode(err)\n\n\n return proc.returncode, out, err",
"def __init__(self, mock_responses=None):\n super(CLIHelper, self).__init__()\n self.mock_responses = mock_responses\n self.preferred_encoding = locale.getpreferredencoding()",
"def set_utf8_locale():\n lang, encoding = locale.getlocale()\n if encoding != 'UTF-8':\n locale.setlocale(locale.LC_CTYPE, (lang, 'UTF-8'))",
"def fix_unicode_encode_error(cls, safe=False):\n from .path9 import Path\n from .file9 import File\n from .print9 import Print\n lockfile = Path.combine(Path.commands(), \".windows_codepage_lock\")\n if File.exist(lockfile):\n cp = cls.get_cmd_code_page()\n if not safe:\n raise IOError(f\"Cannot use codepage 65001, continue using {cp}, \"\n f\"you can set other by Windows.set_cmd_code_page\")\n return cp\n previous_codepage = cls.get_cmd_code_page()\n try:\n if previous_codepage != 65001:\n cls.set_cmd_code_page(65001)\n import os\n with Print.s_print_lock:\n command = r'''python3 -c \"print('йЙ\\r', end='')\"'''\n Print(\"йЙ\\r\", end=\"\")\n Print(\" \\r\", end=\"\")\n os.system(command)\n Print(\" \\r\", end=\"\")\n return cls.get_cmd_code_page()\n except Exception:\n if int(previous_codepage) >= 0:\n if previous_codepage != 65001:\n cls.set_cmd_code_page(previous_codepage)\n else:\n cls.set_cmd_code_page(437)\n Print(\" \\r\", end=\"\")\n from .os9 import OS\n OS._cyrillic_support = False\n File.create(lockfile)\n if not safe:\n raise IOError(f\"Cannot use codepage 65001, returning to {previous_codepage}, \"\n f\"you can set other by Windows.set_cmd_code_page\")\n return previous_codepage",
"def _run_cmd(args, cwd):\n p = subprocess.Popen(\n args, stdin=subprocess.PIPE, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, cwd=cwd)\n streams = tuple(s.decode('latin1').strip() for s in p.communicate())\n for stream_content in streams:\n print(stream_content)\n return (streams) + (p.returncode,)",
"def popen_text(cmd):\n return Popen(cmd, stdout=PIPE, universal_newlines=True)",
"def test_utf8_cp1252_char_file(self):\n\t\tmain.Main(['input/utf8.txt']).run()\n\t\tself.assertTrue(filecmp.cmp('output/output.csv', 'output/utf8.csv'))",
"def unicode2utf8(arg):\n\n try:\n if isinstance(arg, unicode):\n return arg.encode('utf-8')\n except NameError:\n pass # Python 3\n return arg",
"def console_encode(s):\n if self.encoding.lower() != \"utf-8\":\n return s.encode(self.encoding, \"replace\").decode(self.encoding)\n return s",
"def popen(self, args, bufsize=0, stdin=None, stdout=None, stderr=None, cwd=None, env=None, tty=False, compress=False): \n return subprocess.Popen(args, bufsize=bufsize, cwd=cwd, env=env, stdin=stdin, stdout=stdout, stderr=stderr)",
"def force_utf8(text):\n if isinstance(text, binary_type):\n return text\n else:\n return text.encode('utf-8')",
"def run_command(cmd):\n if env.PY2 and isinstance(cmd, unicode):\n cmd = cmd.encode(sys.getfilesystemencoding())\n\n # In some strange cases (PyPy3 in a virtualenv!?) the stdout encoding of\n # the subprocess is set incorrectly to ascii. Use an environment variable\n # to force the encoding to be the same as ours.\n sub_env = dict(os.environ)\n encoding = output_encoding()\n if encoding:\n sub_env['PYTHONIOENCODING'] = encoding\n\n proc = subprocess.Popen(\n cmd,\n shell=True,\n env=sub_env,\n stdin=subprocess.PIPE, stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT\n )\n output, _ = proc.communicate()\n status = proc.returncode\n\n # Get the output, and canonicalize it to strings with newlines.\n if not isinstance(output, str):\n output = output.decode(output_encoding())\n output = output.replace('\\r', '')\n\n return status, output",
"def cmd(args):\n return subprocess.check_output(args).decode(\"utf-8\")",
"def command(cmd: list, stdin: str):\n proc = Popen(cmd, stdout=PIPE, stderr=STDOUT, stdin=PIPE)\n out, err = proc.communicate(stdin.encode(\"utf-8\"))\n exit = proc.wait()\n return out.decode(\"utf-8\")",
"def make_utf8_env():\n global _CACHED_ENV\n if not _CACHED_ENV:\n # LANG are in the form of <language>[.<encoding>[@<modifier>]]\n # We want to replace the \"encoding\" part with UTF-8\n lang_re = re.compile('\\.([^@]*)')\n\n env = os.environ.copy()\n lang = env.get('LANG', DEFAULT_LANG)\n if lang_re.search(lang):\n lang = lang_re.sub('.UTF-8', lang)\n else:\n lang = DEFAULT_LANG\n\n env['LANG'] = lang\n _CACHED_ENV = env\n return _CACHED_ENV",
"def set_encoding(self, encoding, asset=None):\n self._set_property('pc:encoding', encoding, asset)",
"def set_encoding(self, encoding):\n\n self._encoding = encoding",
"def run_cmd( command ):\n return subprocess.check_output( command ).decode( \"utf-8\" )",
"def subprocess_attach_stdin(cmd, shell=False):\n # type: (str, bool) -> subprocess.Process\n return subprocess.Popen(cmd, shell=shell, stdin=subprocess.PIPE)",
"def _get_environ(args=None):\n if args is not None:\n if not isinstance(args, list):\n args = [args]\n args += ['>', 'nul', '&&']\n args += ['set']\n\n popen = subprocess.Popen(args, shell=True, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n stdout, stderr = popen.communicate()\n if popen.returncode != 0:\n raise subprocess.CalledProcessError(popen.returncode, ' '.join(args),\n output=stderr.decode('cp866'))\n result = {}\n for line in stdout.decode('cp866').splitlines():\n key, value = line.strip().split('=', 1)\n try:\n result[str(key).upper()] = str(value)\n except UnicodeEncodeError:\n pass\n return result"
]
| [
"0.64557153",
"0.6064261",
"0.5924003",
"0.58920264",
"0.5797352",
"0.57451856",
"0.5647091",
"0.5566803",
"0.5552143",
"0.54971397",
"0.5480613",
"0.5451589",
"0.5397502",
"0.53734374",
"0.5352288",
"0.5268057",
"0.5194355",
"0.51533914",
"0.5115416",
"0.5112865",
"0.50740886",
"0.50405335",
"0.5006767",
"0.50045633",
"0.4996671",
"0.4969082",
"0.49661952",
"0.4965011",
"0.49637732",
"0.49446452"
]
| 0.6455326 | 1 |
Input file_path, save model weights into a file of given format. | def save_weights(self, file_path, format=None):
_save_weights(self, file_path, format) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def save(self, weights_file):\r\n \r\n self.model.save_weights(weights_file)",
"def save_weights_file(self, file_path, file_name):\n\n # Join the path with the file name and append the extension (h5)\n path = join(file_path, \"{}.h5\".format(file_name))\n\n # Store the weights\n self._cnn_model.save_weights(path)",
"def save_weights(self, file):\n self.model.save_weights(file)\n return",
"def save_weights(self, filepath, overwrite=True):\n self.model.save_weights(filepath=filepath, overwrite=overwrite,\n save_format=None)",
"def save_model(self, file_name):\n\t\tself.model.save_weights(file_name)",
"def save_model_weights(self, filename):\n self.model.save_weights(filename)",
"def save(self, filename):\n self.model.save_weights(filename)",
"def save_weights(self, path):\n Path(path).mkdir(parents=True, exist_ok=True)\n self.model.save_weights(path)",
"def save_weights(self, the_path):\n torch.save(self.model.state_dict(), the_path)",
"def save_weights(self, path: str):\n torch.save(self.state_dict(), path)",
"def save_weights(self, path: str):\n torch.save(self.state_dict(), path)",
"def save_weights_and_monitoring_data(self, file_path):\n self.model.save_weights(file_path + '_' + self.energy_system.electricity_pricing_model + '.h5')\n print(\"\\nsaved weights of the policy network to disk.\\n\")\n\n file_name_end = self.energy_system.electricity_pricing_model + '.pkl'\n with open('./stored_data/monitoring/policy_network/training_average_electricity_cost_in_euros_' + file_name_end, 'wb') as f:\n pickle.dump(self.training_average_electricity_cost_in_euros, f)\n with open('./stored_data/monitoring/policy_network/training_average_reward_' + file_name_end, 'wb') as f:\n pickle.dump(self.training_average_reward, f)\n print(\"saved policy monitoring data to disk.\\n\")",
"def save(self, filename, overwrite=False):\n self.model.save_weights(filename, overwrite=overwrite)",
"def save_model_weight(model, filename):\n weights = model.get_weights()\n with open (filename, 'wb') as obj:\n pkl.dump(weights, obj, pkl.HIGHEST_PROTOCOL)",
"def save_model_weights(model, output_path):\n output_dir = os.path.dirname(output_path)\n if output_dir and not os.path.exists(output_dir):\n os.makedirs(output_dir)\n \n model.save_weights(output_path)",
"def save(self, file_path):\n self.model.save(file_path)",
"def save(self, file_path):\n self.model.save(file_path)",
"def save(self, file_path):\n self.model.save(file_path)",
"def write_keras_model_to_file(keras_model, file_path):\n keras_model.save(file_path)",
"def save(self, weight_path, vocab_path, output_path, binary=False):\n w2v_save_model(weight_path.encode(\"ascii\"),\n vocab_path.encode(\"ascii\"), \n output_path.encode(\"ascii\"),\n self.minCount, binary)\n excpt = check_server_exception()\n if excpt[\"status\"]:\n raise RuntimeError(excpt[\"info\"])",
"def write_workflow_file(workflow_model, file_path):\n WorkflowWriter().write(workflow_model, file_path)",
"def save_model(self, path='./model_checkpoint', name='tf_model'):\n json_config = self._model.to_json()\n with open(os.path.join(path, name + '.json'), 'w') as json_file:\n json_file.write(json_config)\n weights_path = os.path.join(path, name + '_weights.h5')\n self._model.save_weights(weights_path)",
"def save_model(self, dir_path):\n np.savez(\n dir_path + os.path.sep + \"weights.npz\", W1=self.W1, W2=self.W2, W3=self.W3\n )\n np.savez(\n dir_path + os.path.sep + \"biases.npz\", b1=self.b1, b2=self.b2, b3=self.b3\n )",
"def save_weights(self, filepath, overwrite=True):\n if h5py is None:\n raise ImportError('`save_weights` requires h5py.')\n # If file exists and should not be overwritten:\n if not overwrite and os.path.isfile(filepath):\n proceed = ask_to_proceed_with_overwrite(filepath)\n if not proceed:\n return\n with h5py.File(filepath, 'w') as f:\n save_weights_to_hdf5_group(f, self.layers)",
"def save(self, fpath):\n logging.info(\"Saving agent with filepath={}\".format(fpath))\n self.agent.save_weights(fpath, overwrite=True)",
"def generate_weights(self, weights_path: str) -> None:\n for file in os.listdir(self.processed_path):\n doc_path = f\"{self.processed_path}/{file}\"\n weights = self.weights_for_doc(doc_path)\n save_path = f\"{weights_path}/{file}\"\n np.save(save_path, weights)",
"def save(self, model_path: str) -> None:\n metadata_string = json.dumps({ \"classes\": self.classes })\n with open(os.path.join(model_path, \"metadata.json\"), \"w\") as metadata_file:\n metadata_file.write(metadata_string)\n with self.graph.as_default():\n with self.session.as_default():\n self.model.save_weights(os.path.join(model_path, \"weights.h5\"))",
"def saveModel(self, modelFilepath):\n\n if self.memory is None:\n raise Exception('Memory not constructed - cannot save model')\n \n saveDict = {\n 'memorySize' : self.memorySize,\n 'windowSize' : self.windowSize,\n 'optimizer' : self.optimizer,\n 'inputDimension' : self.inputDimension,\n 'encoderStateSize' : self.encoderStateSize,\n 'lstmStateSize' : self.lstmStateSize,\n 'memory' : self.memory,\n 'q' : self.q,\n 'gruEncoder' : self.gruEncoder.get_weights(),\n 'lstm' : self.lstm.get_weights(),\n 'W' : self.W.read_value(),\n 'A' : self.memOut.A.read_value(),\n 'b' : self.b.read_value()\n }\n\n fl = open(modelFilepath, 'wb')\n pickle.dump(saveDict, fl)\n fl.close()",
"def save_model_weights(model, filename, verbose=1, cp_folder=\"\"):\n if verbose:\n print(f\"\\n -> Saving weights to {os.path.join(cp_folder, filename)}\\n\")\n torch.save(model.state_dict(), os.path.join(cp_folder, filename))",
"def save_weight(model):\n file = h5py.File(WEIGHT_SAVE, 'w')\n weight = model.get_weights()\n for i in range(len(weight)):\n file.create_dataset('weight' + str(i), data=weight[i])\n file.close()"
]
| [
"0.7974793",
"0.79393816",
"0.7818803",
"0.76390755",
"0.748463",
"0.7457383",
"0.7365727",
"0.7359829",
"0.7352166",
"0.7283044",
"0.7283044",
"0.7068052",
"0.7067362",
"0.70322907",
"0.7007359",
"0.7005004",
"0.7005004",
"0.7005004",
"0.69427085",
"0.6855376",
"0.68439704",
"0.6841068",
"0.68371755",
"0.68157053",
"0.6747557",
"0.6736263",
"0.6726216",
"0.6722221",
"0.6713208",
"0.66978735"
]
| 0.8305987 | 0 |
Add a LayerNode for this layer given input_tensors, output_tensors. | def _add_node(self, input_tensors, output_tensors):
raise NotImplementedError | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_layer(inputs, in_size, out_size, n_layer, activation_function=None, ):\r\n layer_name = \"layer%s\" % n_layer\r\n with tf.name_scope(layer_name):\r\n with tf.name_scope(\"Weights\"):\r\n Weights = tf.Variable(tf.random_normal([in_size, out_size]), name=\"W\")\r\n tf.summary.histogram(layer_name + \"/Weight\", Weights)\r\n with tf.name_scope(\"Biases\"):\r\n biases = tf.Variable(tf.zeros([1, out_size]) + 0.1, name=\"b\")\r\n tf.summary.histogram(layer_name + \"/Biases\", biases)\r\n with tf.name_scope(\"Wx_plus_b\"):\r\n Wx_plus_b = tf.matmul(inputs, Weights) + biases\r\n if activation_function is None:\r\n outputs = Wx_plus_b\r\n else:\r\n outputs = activation_function(Wx_plus_b, )\r\n tf.summary.histogram(layer_name + \"/output\", outputs)\r\n return outputs",
"def add_layer(inputs, in_size, out_size, activation_function=None):\n Weights = tf.Variable(tf.random_normal([in_size, out_size]))\n baises = tf.Variable(tf.zeros([1, out_size]) + 0.1)\n Wx_plus_b = tf.matmul(inputs, Weights) + baises\n if activation_function is None:\n out_puts = Wx_plus_b\n else:\n out_puts = activation_function(Wx_plus_b)\n return out_puts",
"def add_layer(self, layer):\r\n\r\n if not isinstance(layer, Layer):\r\n raise ValueError('layer must be a Layer object')\r\n\r\n # the first layer added define the input dimension of the neural network\r\n if len(self.layers) == 0:\r\n self.input_dimension = layer.get_num_input()\r\n self.topology.append(layer.get_num_input())\r\n # the new layer must have an input dimension equal\r\n # to the number of units in the last layer added\r\n elif layer.get_num_input() != self.output_dimension:\r\n raise ValueError(\r\n \"The number of input for this new layer must be equal to previous layer\")\r\n\r\n self.topology.append(layer.get_num_unit())\r\n\r\n # the last layer inserted define the output dimension\r\n self.output_dimension = layer.get_num_unit()\r\n\r\n self.layers.append(layer)",
"def add(self, layer):\n if len(self.layers) == 0:\n if not layer.n_inputs:\n raise Exception('Need to have n_inputs for layer.')\n else:\n layer.n_inputs = self.layers[-1].units\n self.layers.append(layer)",
"def add_layer(self, nodes_labels, connection_func=None, inter_connection_func=None, layer_params=None):\n nodes_indices, edges = self.add_nodes(nodes_labels), []\n if len(self.layers) > 0 and connection_func is not None:\n edges += [(i1, i2) for i1, i2 in itt.product(self.layers[-1][\"nodes\"], nodes_indices)\n if connection_func(self.node_labels[i1], self.node_labels[i2])]\n if inter_connection_func is not None:\n edges += [(i1, i2) for i1, i2 in itt.permutations(nodes_indices, r=2)\n if inter_connection_func(self.node_labels[i1], self.node_labels[i2])]\n self.layers.append({\"nodes\": nodes_indices, \"nb\": len(self.layers), **layer_params})\n self.graph.add_edges_from(edges)",
"def add_layer(self, func, *args, **kwargs):\n scope_name = self.name + '_layer' + str(self.layer_count)\n with tf.variable_scope(scope_name, reuse=self.reuse):\n self.last_layer = func(self.last_layer, *args, **kwargs)\n self.layer_seq += [self.last_layer]\n pass\n self.layer_count += 1\n return self.last_layer",
"def _add_layer(self, layer_dict, layer_name, input_layers, merge_mode=None, share_params_with=None):\n util.colorprint(layer_name, 'teal')\n \n layer_dict = dict(layer_dict)\n util.colorprint(layer_dict, 'red')\n \n if share_params_with is not None:\n print \"Warning: ignoring share_params_with\"\n \n layer_options = layer_dict[\"options\"]\n layer=None\n if layer_dict[\"type\"]==\"conv2d\":\n #TODO: remove below\n nb_filter, nb_row, nb_col = 3,3,3\n layer = keras.layers.convolutional.Convolution2D(nb_filter, nb_row, nb_col, **layer_options)\n elif layer_dict[\"type\"]==\"dense\":\n dim = layer_dict[\"output_dim\"]\n # del layer_options[\"output_dim\"]\n layer = keras.layers.core.Dense(dim, **layer_options) \n else:\n print \"Ursol Major\"\n RaiseError()\n # TODO: one of the layers is a string\n if isinstance(input_layers, list):\n #this means that there is input from a loop to this layer\n self.model.add_node(layer, name=layer_name, inputs=input_layers, merge_mode=merge_mode)\n else:\n self.model.add_node(layer, name=layer_name, input=input_layers)\n\n return layer_name",
"def append_layer(self, *args, **kwargs) :\n \n self.insert_layer(len(self._layers), *args, **kwargs)",
"def add_layer(self, layer):\n self.__layers.append(layer)",
"def new_layer(self, nodes, inputs, alpha=0.1):\n weights = [[random.uniform(-0.1, 0.1) for _ in range(inputs)] for i in range(nodes)]\n alphas = [alpha for _ in range(nodes)]\n self._layers.append(Layer(weights, alphas))",
"def construct_layer(\n self,\n input_layer: \"NeuralNetworkLayer\",\n output_layer: \"NeuralNetworkLayer\",\n **kwargs\n ):\n # Add Nodes\n for node_number in range(self.num_nodes):\n node_object = Circle(\n radius=self.node_radius,\n color=self.node_color,\n stroke_width=self.node_stroke_width,\n )\n self.node_group.add(node_object)\n # Space the nodes\n # Assumes Vertical orientation\n for node_index, node_object in enumerate(self.node_group):\n location = node_index * self.node_spacing\n node_object.move_to([0, location, 0])\n # Create Surrounding Rectangle\n self.surrounding_rectangle = SurroundingRectangle(\n self.node_group,\n color=self.rectangle_color,\n fill_color=self.rectangle_fill_color,\n fill_opacity=1.0,\n buff=self.layer_buffer,\n stroke_width=self.rectangle_stroke_width,\n )\n self.surrounding_rectangle.set_z_index(1)\n # Add the objects to the class\n self.add(self.surrounding_rectangle, self.node_group)\n\n self.construct_activation_function()\n super().construct_layer(input_layer, output_layer, **kwargs)",
"def _add_layer(self, layer_dict, name, id, input_layers, merge_mode=None):\n \n layer_options = layer_dict[\"keras_options\"]\n layer=None\n if layer_dict[\"type\"]==\"conv2d\":\n #TODO: remove below\n nb_filter, nb_row, nb_col = 3,3,3\n layer = keras.layers.convolutional.Convolution2D(nb_filter, nb_row, nb_col, **layer_options)\n elif layer_dict[\"type\"]==\"dense\":\n layer = keras.layers.core.Dense(**layer_options) \n else:\n print \"ur sol\"\n sys.exit(0)\n # TODO: one of the layers is a string\n if isinstance(input_layers, list):\n #this means that there is input from a loop to this layer\n merged_name = \" * \".join(layers_to_merge) #TODO-this assumes all merges are multiplication \n self.model.add_node(layer, name=layer_name, inputs=input_layers, merge_mode=merge_mode)\n else:\n layer_name = self._label(name, id)\n self.model.add_node(layer, name=layer_name, input=input_layers)\n\n return layer_name",
"def add_dense_layer(self, input_layer, hyperparams, func='relu', bn=True):\n W = self._weight_variable(shape=hyperparams[0])\n b = self._bias_variable(shape=hyperparams[1])\n x_ravel = tf.reshape(input_layer, shape=[-1, hyperparams[0][0]])\n if bn:\n return self._batch_normalize(\n self._nonlinearity(func)(tf.matmul(x_ravel, W) + b))\n elif not bn:\n return self._nonlinearity(func)(tf.matmul(x_ravel, W) + b)",
"def add_layer(self, num_nodes, transfer_function=\"Linear\"):\r\n self.weights.append(np.random.randn(self.input_dimension, num_nodes))\r\n self.biases.append(np.random.randn(num_nodes))\r\n self.transferfunction.append(transfer_function)\r\n self.input_dimension = num_nodes",
"def add_layer(self, in_dim, out_dim, activation: Module or None, i=None):\n i = i or len(self.modules)\n self.modules.insert(i, Linear(in_dim=in_dim, out_dim=out_dim, activation=activation))",
"def add_layer(self, layer):\n\n self._layers.append(layer)",
"def add_layer(self, layer_pos, lay_dims, init_w_function, init_a_function, dropout, drop_prob, batch_norm):\n\n # If not within feasible bounds, return\n if layer_pos < 0 or layer_pos >= self.number_hidden_layers:\n return\n\n # We create the new layer and add it to the network descriptor\n self.dims = np.insert(self.dims, layer_pos, lay_dims)\n self.init_functions = np.insert(self.init_functions, layer_pos, init_w_function)\n self.act_functions = np.insert(self.act_functions, layer_pos, init_a_function)\n\n # Finally the number of hidden layers is updated\n self.number_hidden_layers = self.number_hidden_layers + 1\n if not (isinstance(self.batch_norm, tuple) or self.batch_norm.shape[0] == 0):\n self.batch_norm = np.insert(self.batch_norm, layer_pos, batch_norm)\n if not (isinstance(self.dropout, tuple) or self.dropout.shape[0] == 0):\n self.dropout = np.insert(self.dropout, layer_pos, dropout)\n self.dropout_probs = np.insert(self.dropout_probs, layer_pos, drop_prob)",
"def add_layers(self, layers):\n\n existing_layers = self.layers\n assert len(existing_layers) > 0\n for layer in layers:\n assert layer.get_mlp() is None\n layer.set_mlp(self)\n layer.set_input_space(existing_layers[-1].get_output_space())\n existing_layers.append(layer)\n assert layer.layer_name not in self.layer_names\n self.layer_names.add(layer.layer_name)",
"def addLayer(self, layer):\n self.layers.append(layer)",
"def _add_replacement_layer(layer_node):\n self._config['layers'].append(layer_node.layer)\n layer_name = layer_node.layer['config']['name']\n # TODO(b/184603494): Remove weight map structure from model_transformer.\n if layer_node.weights:\n self._layer_weights_map[layer_name] = layer_node.weights\n if layer_node.names_and_weights:\n self._layer_names_and_weights_map[\n layer_name] = layer_node.names_and_weights\n if layer_node.metadata:\n self._layer_metadata_map[layer_name] = layer_node.metadata\n if self.candidate_layers:\n self.candidate_layers.add(layer_name)\n\n for input_layer in layer_node.input_layers:\n _add_replacement_layer(input_layer)",
"def fc_layer(input_layer, nodes_input, nodes_output, name_scope, final_layer=False):\n W = tf.get_variable(name=name_scope + 'W', shape=[nodes_input, nodes_output],\n initializer=tf.truncated_normal_initializer())\n b = tf.get_variable(name=name_scope + 'b', shape=[nodes_output], initializer=tf.constant_initializer(0))\n\n if final_layer:\n return tf.matmul(input_layer, W) + b # no activation\n else:\n return tf.nn.relu(tf.matmul(input_layer, W) + b) # relu activation\n # return tf.sigmoid(tf.matmul(input_layer, W) + b) # sigmoid activation",
"def add_node(self, node):\n self.nodes[node.id] = node\n\n self.layers = max(self.layers, node.layer + 1)",
"def add(self, layer):\n layer.set_dtype(self.dtype)\n self.layers = np.append(self.layers, layer)",
"def add_block(self, _input, growth_rate, layers_per_block):\n output = _input\n for layer in range(layers_per_block):\n with tf.variable_scope(\"layer_%d\" % layer):\n output = self.add_internal_layer(output, growth_rate)\n return output",
"def add_layer(self, layer):\n idx = len(self.dict_topo)\n idx += 1\n self.dict_topo[idx] = layer",
"def forward(self, in_tensors: List[Tensor], out_tensors: List[Tensor]):\n pass",
"def forward(self, in_tensors: List[Tensor], out_tensors: List[Tensor]):\n pass",
"def add_read_out_layer(self, input_layer):\n input_layer_m = int(input_layer.get_shape()[1])\n W = self._weight_variable(shape=[input_layer_m, self._n_class])\n b = self._bias_variable(shape=[self._n_class])\n\n return tf.matmul(input_layer, W) + b",
"def _add_output(self, node_entries):\n\n for node_entry in node_entries:\n for node_type, output_name in zip(node_entry[\"types\"], node_entry[\"output_names\"]):\n dtype = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[numpy.dtype(node_type.dtype)]\n output = onnx.helper.make_tensor_value_info(\n output_name, dtype, shape=get_node_shape(node_type)\n )\n self._mc.add_outputs([output])",
"def add_layer(self, layer):\n assert isinstance(layer, torch.nn.Module)\n setattr(self, 'layer'+str(self._layer_counter), layer)\n self._layer_counter += 1\n # layer indexing : layer 0 is closest to input"
]
| [
"0.6593948",
"0.6312318",
"0.6243013",
"0.62338334",
"0.6224892",
"0.5977068",
"0.592607",
"0.5898218",
"0.58804685",
"0.5858943",
"0.58344376",
"0.57562846",
"0.5721505",
"0.5678528",
"0.56682277",
"0.56655985",
"0.5646037",
"0.56409556",
"0.56349766",
"0.55707383",
"0.55651325",
"0.5496356",
"0.54851025",
"0.54248405",
"0.54090524",
"0.53841025",
"0.53841025",
"0.53746635",
"0.53600043",
"0.5357641"
]
| 0.7810959 | 0 |
Returns all trainable weights. Returns a list of all trainable parmeters. | def trainable_weights(self):
self._trainable_weights = list(filter(lambda x: x.requires_grad, self.get_parameters(expand=True)))
return self._trainable_weights | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def weights_lst(self):\n assert self.sess is not None, \"Model has not been fitted yet!\"\n return self.sess.run(self.W_lst)",
"def get_weights(self):\n return [self.W]",
"def get_weights(self):\n return [self.W]",
"def get_weights(self):\n return []",
"def get_weights(self):\n return self.nn.get_param_values()",
"def get_weight_list(self) -> List[float]:\n return self._weight_list",
"def get_trainable_weights(model):\n trainable_weights = []\n for layer in model.layers:\n # trainable_weights += keras.engine.training.collect_trainable_weights(layer)\n trainable_weights += layer.trainable_weights\n return trainable_weights",
"def get_weights(self):\n return self.model.get_weights()",
"def get_weights(self):\n return self.model.get_weights()",
"def get_weights(self):\n return self.weights",
"def get_weights(self):\n return self.weights",
"def get_weights(self):\r\n return self.weights",
"def get_weights(self):\r\n return self.weights",
"def gather_trainable_weights(trainable, sub_layers, extra_variables):\n if not trainable:\n return []\n weights = []\n for layer in sub_layers:\n weights += layer.trainable_weights\n trainable_extra_variables = [\n v for v in extra_variables if v.trainable]\n return weights + trainable_extra_variables",
"def get_weights(self):\n return self._weights",
"def get_weights(self):\n return self._weights",
"def get_weights(self):\n return self._weights",
"def get_weights(self):\n weights = []\n for layer in self.layers:\n weights += layer.weights\n return K.batch_get_value(weights)",
"def get_weights(self):\n return self.weights\n #print(W)",
"def get_weights(self):\n return self.__weights",
"def nontrainable_weights(self):\n return list(filter(lambda x: not x.requires_grad, self.get_parameters(expand=True)))",
"def weights(self):\n return self._weights",
"def weights(self):\n return self._weights",
"def weights(self):\n return self._weights",
"def weights(self):\n return self._weights",
"def get_weights(self):\n return [self.w, self.b]",
"def weights(self):\n return [x.numpy() for x in self.core.w]",
"def weights(self) -> List[float]:",
"def get_weights(self):\n if self.W_tickers is not None:\n return [w for w in [self.W_fwd_conc, self.W_bwd_conc, self.W_red_rec] + list(self.W_tickers.values())\n if w is not None]\n else:\n return [w for w in [self.W_fwd_conc, self.W_bwd_conc, self.W_red_rec] if w is not None]",
"def weights(self):\n return self.__weights"
]
| [
"0.7333756",
"0.70962805",
"0.70962805",
"0.70862687",
"0.7070256",
"0.6915223",
"0.68708634",
"0.6868791",
"0.6868791",
"0.679045",
"0.679045",
"0.6756826",
"0.6756826",
"0.675156",
"0.673351",
"0.673351",
"0.673351",
"0.6731417",
"0.6691864",
"0.66902167",
"0.668324",
"0.66728544",
"0.66728544",
"0.66728544",
"0.66728544",
"0.66707045",
"0.66705585",
"0.6625488",
"0.662382",
"0.66185975"
]
| 0.76837504 | 0 |
Returns all untrainable weights. Returns a list of all untrainable weights. | def nontrainable_weights(self):
return list(filter(lambda x: not x.requires_grad, self.get_parameters(expand=True))) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_weights(self):\n return []",
"def trainable_weights(self):\n self._trainable_weights = list(filter(lambda x: x.requires_grad, self.get_parameters(expand=True)))\n return self._trainable_weights",
"def get_weights(self):\n return [self.W]",
"def get_weights(self):\n return [self.W]",
"def get_weights(self):\n if self.W_tickers is not None:\n return [w for w in [self.W_fwd_conc, self.W_bwd_conc, self.W_red_rec] + list(self.W_tickers.values())\n if w is not None]\n else:\n return [w for w in [self.W_fwd_conc, self.W_bwd_conc, self.W_red_rec] if w is not None]",
"def get_weights(self):\n if self.W_tickers is not None:\n return [w for w in [self.W_fwd_conc, self.W_bwd_conc] + list(self.W_tickers.values())\n if w is not None]\n else:\n return [w for w in [self.W_fwd_conc, self.W_bwd_conc] if w is not None]",
"def get_weights(self):\n if self.W_tickers is not None:\n return [w for w in [self.W_fwd_conc, self.W_bwd_conc] + list(self.W_tickers.values())\n if w is not None]\n else:\n return [w for w in [self.W_fwd_conc, self.W_bwd_conc] if w is not None]",
"def get_weights(self):\n if self.W_tickers is not None:\n return [w for w in [self.W_fwd_conc, self.W_bwd_conc] + list(self.W_tickers.values())\n if w is not None]\n else:\n return [w for w in [self.W_fwd_conc, self.W_bwd_conc] if w is not None]",
"def weights_lst(self):\n assert self.sess is not None, \"Model has not been fitted yet!\"\n return self.sess.run(self.W_lst)",
"def get_weights_without_biases(self, layer_number):\r\n return self.weights[layer_number]",
"def get_weights(self):\n return self._weights",
"def get_weights(self):\n return self._weights",
"def get_weights(self):\n return self._weights",
"def get_weights(self):\n return self.model.get_weights()",
"def get_weights(self):\n return self.model.get_weights()",
"def weights(self):\n return [x.numpy() for x in self.core.w]",
"def get_weights(self):\n return self.weights",
"def get_weights(self):\n return self.weights",
"def GetWeights(self) -> numpy.ndarray:\n return numpy.concatenate(list(\n weight_layer.GetWeights() for weight_layer in self._weight_layers))",
"def get_weights(self):\n # First part is iterating over hidden weights. Then append the output weight.\n return [self.hidden_layers[i].get_weight() for i in range(self.depth)] + \\\n [self.output_weight.cpu().detach().numpy()]",
"def get_weights(self):\n weights = {}\n for idx, layer in enumerate(self.model.layers):\n if len(layer.get_weights())>0:\n weights[idx] = layer.get_weights()[0]\n else:\n weights[idx] = [] \n return weights",
"def get_weights(self):\r\n return self.weights",
"def get_weights(self):\r\n return self.weights",
"def get_weights(self):\n return [self.w, self.b]",
"def weights(self):\n return self._weights",
"def weights(self):\n return self._weights",
"def weights(self):\n return self._weights",
"def weights(self):\n return self._weights",
"def get_weights(self):\n # First part is iterating over hidden weights. Then append the output weight.\n return [self.hidden_layers[i].get_weight().cpu().detach().numpy() for i in range(self.depth)] + \\\n [self.output_weight.cpu().detach().numpy()]",
"def get_weights(self):\n return self.forcing_term.weights_.ravel()"
]
| [
"0.7384943",
"0.7170098",
"0.70482403",
"0.70482403",
"0.6928857",
"0.69194895",
"0.69194895",
"0.69194895",
"0.6848169",
"0.6782915",
"0.6730147",
"0.6730147",
"0.6730147",
"0.6663011",
"0.6663011",
"0.6640928",
"0.6623843",
"0.6623843",
"0.6608877",
"0.65747327",
"0.65710324",
"0.6552081",
"0.6552081",
"0.65514004",
"0.65174305",
"0.65174305",
"0.65174305",
"0.65174305",
"0.65109086",
"0.6494564"
]
| 0.7795881 | 0 |
Return the default form class used for user registration. | def get_form_class(self, request):
return RegistrationForm | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_form_class(self, request):\n return RegistrationForm",
"def get_form_class(self):\n return self.form_class",
"def get_form_class(self):\n if self.form_class:\n return self.form_class\n else:\n raise ImproperlyConfigured(\n \"在定义类视图%s的时候,你必须明确指定一个form_class.\"%self.__class__.__name__)",
"def get_form_class(self):\r\n return modelform_factory(self.model)",
"def get_form(self, form_class=None):\n\t\tif form_class is None:\n\t\t\tform_class = self.get_form_class()\n\t\treturn form_class(self.request.user, **self.get_form_kwargs())",
"def get_token_form_class(self):\n from two_factor.forms import AuthenticationTokenForm\n\n return AuthenticationTokenForm",
"def get_form_class():\n return RazorPaymentForm",
"def get_form_class(self):\n form_options = self.get_form_options()\n # If a custom form class was passed to the EditHandler, use it.\n # Otherwise, use the base_form_class from the model.\n # If that is not defined, use WagtailAdminModelForm.\n model_form_class = getattr(self.model, \"base_form_class\", WagtailAdminModelForm)\n base_form_class = self.base_form_class or model_form_class\n\n return get_form_for_model(\n self.model,\n form_class=base_form_class,\n **form_options,\n )",
"def get_form():\n global form_class\n from fluent_comments import appsettings\n\n if form_class is None:\n if appsettings.FLUENT_COMMENTS_FORM_CLASS:\n from django.utils.module_loading import import_string\n\n form_class = import_string(appsettings.FLUENT_COMMENTS_FORM_CLASS)\n else:\n from fluent_comments.forms import FluentCommentForm\n\n form_class = FluentCommentForm\n\n return form_class",
"def get_form_class(self, form_key):\n return self.get_form_classes()[form_key]",
"def get_form_class(self):\n\t\treturn formset_factory(super(FormsetMixin, self).get_form_class(), **self.get_formset_kwargs())",
"def get_default_form(self, display=False):\n form_selector = display_form_selector if display else county_form_selector\n return form_selector.get_combined_form_class(counties=[self.county.slug])",
"def get_form_class(self):\n return get_review_form(review=self.get_object(), user=self.request.user)",
"def get_form_classes(self):\n return {\n **self.form_classes\n }",
"def get_form(self, form_class):\n return form_class(**self.get_form_kwargs())",
"def get_form(self, form_class=None):\n if form_class is None:\n form_class = self.get_form_class()\n return form_class(\n token=self.request.session.get('token', False),\n aiid=self.kwargs['aiid'],\n **self.get_form_kwargs()\n )",
"def get_form(self):\n kwargs = {\n \"instance\": self.profile if self.form_object == \"profile\" else self.user,\n \"prefix\": self.name,\n }\n\n if self.request.method == \"POST\":\n return self.form_class(self.request.POST, self.request.FILES, **kwargs)\n else:\n return self.form_class(**kwargs)",
"def signup_form(request):\n return {'signup_form': UserForm()}",
"def _get_bulk_change_form_class(self):\n return BulkChangeFormWizardHandlerPluginsForm",
"def _get_bulk_change_form_class(self):\n return BulkChangeFormElementPluginsForm",
"def get_form_class(self):\n login_try_count = self.request.session.get('login_try_count', 0)\n\n # If the form has been submitted...\n if self.request.method == \"POST\":\n self.request.session['login_try_count'] = login_try_count + 1\n\n if login_try_count >= 20:\n return CaptchaAuthenticationForm\n\n return super(LoginView, self).get_form_class()",
"def register_form(self):\n f = Form()\n self.forms = f\n return f",
"def _get_bulk_change_form_class(self):\n return BulkChangeFormHandlerPluginsForm",
"def get_form_class(self):\n \n \"\"\"\n Construct a form class that has all the fields and formsets named in\n the children of this edit handler. \n \"\"\"\n if not hasattr(self, 'model'):\n raise AttributeError(\n '%s is not bound to a model yet. Use `.bind_to(model=model)` '\n 'before using this method.' % self.__class__.__name__)\n # If a custom form class was passed to the EditHandler, use it.\n # Otherwise, use the rai_base_form_class from the model.\n # If that is not defined, use RAIAdminModelForm.\n model_form_class = getattr(self.model, 'rai_base_form_class',\n RAIAdminModelForm)\n base_form_class = self.base_form_class or model_form_class\n\n formsets = self.required_formsets()\n\n form_class = rai_modelform_factory(\n self.decorator.get_rai_model(),\n form_class=base_form_class,\n fields=self.required_internal_fields(),\n formsets=formsets,\n widgets=self.widget_overrides())\n form_class.readonly_fields = self.readonly_fields()\n return form_class",
"def name(self) -> Text:\n\n return \"user_form\"",
"def get_form_class(self):\n if self.survey.get_requires_payment():\n return AuthorizenetSurveyPurchaseForm\n return super(AuthorizenetSurveyPurchaseCreate, self).get_form_class()",
"def get_form(self, form_class):\n if self.get_locked_form(form_class):\n return None\n return form_class(**self.get_form_kwargs())",
"def get_basic_form(self):\n return self.basic_form",
"def _form_for_type(request, C, defn, add_id_and_rev=False):\n form = build(defn, C, add_id_and_rev=add_id_and_rev,\n widget_registry=_widget_registry(request))\n form.renderer = request.environ['restish.templating'].renderer\n return form",
"def get_form(self, form_class=None):\n # 设置初始值\n if self.request.method == \"GET\":\n return SecondMenuModelForm(initial={'menu': self.menu_obj})\n else:\n # post提交的时候,不要忘记设置data\n return SecondMenuModelForm(data=self.request.POST)"
]
| [
"0.7956627",
"0.770102",
"0.74282926",
"0.72233987",
"0.71631217",
"0.7097246",
"0.7082445",
"0.696311",
"0.692484",
"0.6767896",
"0.67418265",
"0.66456175",
"0.66020036",
"0.64767134",
"0.64665145",
"0.64210325",
"0.63494134",
"0.6314577",
"0.62687373",
"0.6240669",
"0.6233663",
"0.6218351",
"0.6175523",
"0.6134979",
"0.6127915",
"0.611254",
"0.5986515",
"0.5926572",
"0.58976483",
"0.5886312"
]
| 0.8111784 | 0 |
Get a historic of locations | def get_historic_location(): # noqa: E501
db = PostgresDB()
historial = db.get_locations()
if "Error" in historial:
return jsonify(msg=historial)
if len(historial) > 0:
data = {"historial" : []}
for row in historial:
data['historial'].append(
{
"id": row[0],
"name": row[1]
}
)
return jsonify(data), 200
else:
return '', 204 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def historic(self) -> dict:\n return await self._request(\n \"get\", \"https://www.asthmaforecast.com/api/forecast/historic/asthma\"\n )",
"def get_historic_data(self):\n\n historic_market_events = []\n\n return historic_market_events",
"def History(self):\n return self.historydict.get('history', [])",
"def get_all_locations(self):",
"def history():",
"def get_history(self):\n return self.history",
"def get_locations(self, **values):\n Location = self.env[\"stock.location\"]\n return Location.browse()",
"def history(self):\n return self.info['history']",
"def get_locations():\n return STATUS['locations']",
"def create_demo_location_history() -> geopandas.GeoDataFrame:\n np.random.seed(123)\n\n time = pd.date_range(start=datetime.fromtimestamp(1624241116), end=datetime.now(), freq=\"1min\").values\n\n center_point = (-36.875990410695394, 174.76398830024274)\n lat = np.random.normal(loc=center_point[0], scale=0.01, size=len(time))\n lon = np.random.normal(loc=center_point[1], scale=0.01, size=len(time))\n\n geometry = [Point(lon, lat) for lon, lat in zip(lon, lat)]\n return geopandas.GeoDataFrame(pd.DataFrame(dict(time=time, lat=lat, lon=lon)), geometry=geometry)",
"def get_all_locations():\n rs = run_query('''select * from zlrz_office_location''')\n return [] if rs is None else list(map(lambda t: Location(t[1], t[2], t[3], t[4], t[5], t[0]), rs))",
"def QueryHistory(self):\n return []",
"def gethistory(itemID):\n\n return harvest(GET_HISTORIC_VALUE_URL, itemID)",
"def historical():\n\n return {\n 'page': 'historical',\n }",
"def get_history(self):\n return self.__history[:]",
"def orders_history(self): \n return(self._d_orders['history'])",
"def svn_fs_history_location(*args):\r\n return _fs.svn_fs_history_location(*args)",
"def fetch_history(*args, **kwargs):\n return collect_history(*args, **kwargs)",
"def history(self):\n return self._history",
"def history(self):\n return self._history",
"def get_historical_data_copy(self):\n return copy.deepcopy(self._historical_data)",
"def get_history():\n return response_texts_to_entries(make_post_request(HISTORY_API, data={\"k\": config[\"api_key\"]}))",
"def history(self, maxresults=9999999, mindate=None):\n server = self._server.resource(self.name).connect()\n return server.history(maxresults=maxresults, mindate=mindate, accountID=self.accountID)",
"def get_states(hass, utc_point_in_time, entity_ids=None, run=None, filters=None):\n if run is None:\n run = recorder.run_information_from_instance(hass, utc_point_in_time)\n\n # History did not run before utc_point_in_time\n if run is None:\n return []\n\n with session_scope(hass=hass) as session:\n return _get_states_with_session(\n hass, session, utc_point_in_time, entity_ids, run, filters\n )",
"def temporal_database():\n return TimeHistory()",
"def get_order_history(self):\n return self.__call__('orders', 'getorderhistory')",
"def locations(self):\r\n return Locations(self)",
"def get_history(self, symbol, limit=1000, offset=0):\r\n return self.api.get_history(self.account, symbol, limit, offset)",
"def get_gis_historical_data():\n logging.info(\"Generating baseline reference and historical weather data.\")\n \n # Initialising function variables\n fake = Faker()\n geolocator = Nominatim()\n config_data = get_config()\n locations = config_data[\"location\"]\n \n # Check if there are no duplicate locations in the config.yaml file.\n if len(locations) != len(set(locations)):\n logging.error(\"Duplicate location found. Please check config.yaml file.\")\n raise ValueError\n \n # Initialise pandas dataframe column name for baseline reference\n # and historical data.\n df_ref = pd.DataFrame(columns=[\"Location\", \"Latitude\"\n ,\"Longitude\", \"Elevation\"\n ,\"Timezone\"])\n df_hist = pd.DataFrame(columns=[\"Location\", \"Date\"\n ,\"Month\", \"Temperature_Min\"\n ,\"Temperature_Max\", \"Humidity\"\n ,\"Pressure\"])\n \n # Generate weather data for each location.\n for idx, loc in enumerate(locations):\n \n logging.info(\"Retrieving geolocation data for {}.\".format(loc))\n \n # Retrieving geolocation data from geopy library.\n loc_data = geolocator.geocode(loc)\n \n logging.info(\"Check if the location {} is valid.\".format(loc))\n if loc_data is None:\n logging.error(\"Invalid location value supplied ({}). Please check config.yaml file.\".format(loc))\n raise ValueError\n logging.info(\"The location {} is valid.\".format(loc))\n \n city = get_city(loc)\n lat = loc_data.latitude\n lon = loc_data.longitude\n \n # Retrieving elevation data for the location.\n elev = get_elevation_data(lat, lon)\n \n for month in range(1, 13):\n \n logging.info(\"Retrieving {} weather data for month {}.\".format(loc, month))\n \n for sample in range(config_data[\"gis\"][\"sampling_number\"]):\n \n temp_min = None\n temp_max = None\n humidity = None\n pressure = None\n \n while temp_min is None or temp_max is None or humidity is None or pressure is None:\n \n year = random.randint(config_data[\"gis\"][\"year_start\"], config_data[\"gis\"][\"year_end\"])\n\n _, last_day = calendar.monthrange(year, month)\n\n datetime_start = datetime.datetime(year, month, 1)\n datetime_end = datetime.datetime(year, month, last_day)\n\n date_gen = fake.date_time_between_dates(datetime_start=datetime_start\n ,datetime_end=datetime_end)\n\n forecast = forecastio.load_forecast(config_data[\"forecastio_api_key\"]\n ,lat\n ,lon\n ,time=date_gen\n ,units=\"si\")\n\n historical_data = forecast.json[\"daily\"][\"data\"][0]\n \n timezone = forecast.json.get(\"timezone\", None)\n temp_min = historical_data.get(\"temperatureMin\", None)\n temp_max = historical_data.get(\"temperatureMax\", None)\n humidity = historical_data.get(\"humidity\", None) * 100\n pressure = historical_data.get(\"pressure\", None)\n \n df_temp_hist = pd.Series(dict(zip(df_hist.columns\n ,[city, date_gen\n ,date_gen.month, temp_min\n ,temp_max, humidity\n ,pressure])))\n \n df_hist = df_hist.append(df_temp_hist, ignore_index=True)\n \n df_temp_ref = pd.Series(dict(zip(df_ref.columns\n ,[city, lat\n ,lon, elev\n ,timezone])))\n df_ref = df_ref.append(df_temp_ref, ignore_index=True)\n \n logging.info(\"Generating position to consolidate latitude, longitude and elevation data\")\n df_pos = df_ref[[\"Latitude\", \"Longitude\", \"Elevation\"]].round(2)\n df_pos[\"Elevation\"] = df_pos[\"Elevation\"].astype(int) \n df_ref[\"Position\"] = df_pos.astype(str).apply(lambda x: \",\".join(x), axis=1)\n \n logging.info(\"Saving baseline reference data.\")\n df_ref.to_csv(get_file_path(folder_name=\"data\"\n ,subdirectory=config_data[\"gis\"][\"output_subdirectory\"]\n ,file_name=config_data[\"gis\"][\"output_base_reference_file_name\"])\n ,index=False)\n logging.info(\"Completed saving baseline reference data.\")\n\n logging.info(\"Saving baseline historical data.\")\n df_hist.to_csv(get_file_path(folder_name=\"data\"\n ,subdirectory=config_data[\"gis\"][\"output_subdirectory\"]\n ,file_name=config_data[\"gis\"][\"output_base_historical_file_name\"])\n ,index=False)\n logging.info(\"Completed saving baseline historical data.\")",
"def history(self, maxresults=None, mindate=None):\n server = self._server._server.resource(self._server.name).connect()\n return server.history(maxresults=maxresults, mindate=mindate,\n accountID=self._server.accountID, librarySectionID=self.sectionKey)"
]
| [
"0.64075005",
"0.6306349",
"0.61227405",
"0.60741436",
"0.5974619",
"0.5956443",
"0.59440225",
"0.5868649",
"0.58360744",
"0.5833078",
"0.5829477",
"0.5814553",
"0.5808685",
"0.5795159",
"0.5782138",
"0.57627606",
"0.5699749",
"0.568246",
"0.5676575",
"0.5676575",
"0.56725854",
"0.5639999",
"0.56286067",
"0.5619502",
"0.5613151",
"0.5594146",
"0.5583746",
"0.55793494",
"0.5574321",
"0.5572516"
]
| 0.77446115 | 0 |
Convert from keras to tf | def keras_to_tensorflow(
keras_model,
output_dir: Path,
model_name,
out_prefix="output_",
log_tensorboard=True,
):
if not output_dir.exists():
output_dir.mkdir(parents=True, exist_ok=True)
output_dir: str = str(output_dir)
out_nodes = []
for i in range(len(keras_model.outputs)):
out_nodes.append(out_prefix + str(i + 1))
tf.identity(keras_model.output[i], out_prefix + str(i + 1))
sess = K.get_session()
init_graph = sess.graph.as_graph_def()
main_graph = graph_util.convert_variables_to_constants(sess, init_graph, out_nodes)
graph_io.write_graph(main_graph, output_dir, name=model_name, as_text=False)
if log_tensorboard:
import_pb_to_tensorboard.import_to_tensorboard(
os.path.join(output_dir, model_name), output_dir
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert(self, example):\n tf_example = _convert_to_tf_example(example, self.tokenizer, self.rules,\n self.config, self.max_sizes)\n return tf_example",
"def convert_to_tf_record(_):\n\n mnist = input_data.read_data_sets(\n \"/tmp/tensorflow/mnist/input_data\",\n reshape=False\n )\n\n convert_to(mnist.validation, 'validation', FLAGS.data_directory)\n convert_to(mnist.train, 'train', FLAGS.data_directory, num_shards=10)\n convert_to(mnist.test, 'test', FLAGS.data_directory)",
"def model(inputs, is_training):\n\n\n if data_format == 'channels_first':\n # Convert the inputs from channels_last (NHWC) to channels_first (NCHW).\n # This provides a large performance boost on GPU. See\n # https://www.tensorflow.org/performance/performance_guide#data_formats\n inputs = tf.transpose(inputs, [0, 3, 1, 2])\n\n\n #localize network to generate the transformation parameters\n # raw_inputs = inputs\n\n # inputs = tf.layers.conv2d(inputs = inputs, filters = 32, strides = 2, kernel_size = 5, padding = 'SAME', kernel_initializer=tf.variance_scaling_initializer())\n\n # print(inputs.shape)\n # inputs = tf.layers.max_pooling2d(inputs = inputs, pool_size = 2, strides = 2, padding = 'VALID')\n # print(inputs.shape)\n # inputs = tf.layers.conv2d(inputs = inputs, filters = 64, strides = 2, kernel_size = 5, padding = 'SAME', kernel_initializer = tf.variance_scaling_initializer())\n # print(inputs.shape)\n # inputs = tf.layers.max_pooling2d(inputs = inputs, pool_size = 2, strides = 2, padding = 'VALID')\n # print(inputs.shape)\n # inputs = tf.layers.dropout(inputs = inputs, rate = _DROPOUT_RATE)\n\n # inputs = tf.layers.flatten(inputs = inputs)\n\n # inputs = tf.layers.dense(inputs = inputs, units = 128)\n # print(inputs.shape)\n # trans_parameters = tf.layers.dense(inputs = inputs, units = 6)\n # print(trans_parameters.shape)\n # inputs = stn(input_fmap = raw_inputs, theta = trans_parameters, out_dims = [60, 60])\n\n\n\n #embedding network\n inputs = conv2d_fixed_padding(inputs = inputs, filters = 64, kernel_size = 7, strides = 2, data_format = data_format)\n\n print('height:', inputs.shape[1])\n inputs = tf.identity(inputs, 'initial_conv')\n\n inputs = tf.layers.max_pooling2d(inputs = inputs, pool_size = 3, strides = 2, padding = 'SAME', data_format = data_format)\n\n print('height:', inputs.shape[1])\n inputs = tf.identity(inputs, 'initial_max_pool')\n\n inputs = block_layer(inputs = inputs, filters = 64, block_fn = block_fn, blocks = layers[0], strides = 1, \n is_training = is_training, name = 'blcok_layer1', data_format = data_format)\n print('height:', inputs.shape[1])\n\n #attention module\n #input_fmap = inputs\n # inputs = tf.reshape(inputs, (-1, 64))\n #inputs = tf.layers.dense(inputs = inputs, units = 32, activation = tf.tanh)\n\n #inputs = tf.reshape(inputs, [-1, 32])\n #inputs = tf.layers.dense(inputs = inputs, units = 1, activation = tf.sigmoid)\n\n #attention_para = tf.reshape(inputs, [-1, 21, 21, 1])\n\n \n #inputs = tf.multiply(input_fmap, attention_para)\n\n inputs = block_layer(inputs = inputs, filters = 128, block_fn = block_fn, blocks = layers[1], strides = 2,\n is_training = is_training, name = 'block_layer2', data_format = data_format)\n print('height:', inputs.shape[1])\n inputs = block_layer(inputs = inputs, filters = 256, block_fn = block_fn, blocks = layers[2], strides = 2, \n is_training = is_training, name = 'block_layer3', data_format = data_format)\n print('height:', inputs.shape[1])\n inputs = block_layer(inputs = inputs, filters = 512, block_fn = block_fn, blocks = layers[3], strides = 2, \n is_training = is_training, name = 'block_layer4', data_format = data_format)\n\n print('height:', inputs.shape)\n inputs = batch_norm_relu(inputs, is_training, data_format)\n \n inputs = tf.layers.average_pooling2d(inputs = inputs, pool_size = 3, strides = 2, padding = 'VALID', data_format = data_format)\n\n inputs = tf.layers.dropout(inputs = inputs, rate = _DROPOUT_RATE)\n\n inputs = tf.identity(inputs, 'final_avg_pool')\n\n inputs = tf.layers.flatten(inputs = inputs)\n\n #TODO\n inputs = tf.layers.dense(inputs = inputs, units = num_classes)\n\n print(inputs.shape)\n outputs = tf.identity(inputs, 'final_dense')\n\n return outputs",
"def predict_from(self, inputs, to_layers):",
"def _from_numpy(array):\n return tf.constant(array)",
"def create_model(self):\n\t\twith tf.name_scope(\"input\"):\n\t\t\tself.user_embedding = tf.get_variable(\"user_embed\", \n\t\t\t\t[self.user_size, self.embed_size], dtype=tf.float32)\n\t\t\tself.item_embedding = tf.get_variable(\"item_embed\", \n\t\t\t\t[self.item_size, self.embed_size], dtype=tf.float32)\n\t\t\tself.user_embed = tf.nn.embedding_lookup(\n\t\t\t\t\t\t\tself.user_embedding, self.user)\n\t\t\tself.item_embed = tf.nn.embedding_lookup(\n\t\t\t\t\t\t\tself.user_embedding, self.item)\n\t\twith tf.name_scope(\"fusion\"):\n\t\t\tself.user_fusion_add = self.user_embed + self.user_feature\n\t\t\tself.item_fusion_add = self.item_embed + self.item_feature\n\n\t\t\tself.user_fusion = tf.layers.dense(inputs=self.user_fusion_add,\n\t\t\t\t\t\t\t\tunits=self.embed_size,\n\t\t\t\t\t\t\t\tactivation=self.activation_func,\n\t\t\t\t\t\t\t\tkernel_regularizer=self.regularizer,\n\t\t\t\t\t\t\t\tname='user_fusion')\n\t\t\tself.item_fusion = tf.layers.dense(inputs=self.item_fusion_add,\n\t\t\t\t\t\t\t\tunits=self.embed_size,\n\t\t\t\t\t\t\t\tactivation=self.activation_func,\n\t\t\t\t\t\t\t\tkernel_regularizer=self.regularizer,\n\t\t\t\t\t\t\t\tname='item_fusion')\n\n\t\twith tf.name_scope(\"attention\"):\n\t\t\tself.feature_all = tf.concat([\n\t\t\t\t\t\tself.user_fusion, self.item_fusion], -1)\n\t\t\tself.att_layer1 = tf.layers.dense(inputs=self.feature_all,\n\t\t\t\t\t\t\t\tunits=1,\n\t\t\t\t\t\t\t\tactivation=self.activation_func,\n\t\t\t\t\t\t\t\tkernel_regularizer=self.regularizer,\n\t\t\t\t\t\t\t\tname='att_layer1')\n\t\t\tself.att_layer2 = tf.layers.dense(inputs=self.att_layer1,\n\t\t\t\t\t\t\t\tunits=self.embed_size,\n\t\t\t\t\t\t\t\tactivation=self.activation_func,\n\t\t\t\t\t\t\t\tkernel_regularizer=self.regularizer,\n\t\t\t\t\t\t\t\tname='att_layer2')\n\t\t\tself.att_weights = tf.nn.softmax(self.att_layer2, \n\t\t\t\t\t\t\t\taxis=-1, name='att_softmax')\n\n\t\twith tf.name_scope(\"prediction\"):\n\t\t\tself.interact = self.att_weights*self.user_fusion*self.item_fusion\n\t\t\tself.interact1 = tf.layers.dense(inputs=self.interact,\n\t\t\t\t\t\t\t\tunits=self.embed_size,\n\t\t\t\t\t\t\t\tactivation=self.activation_func,\n\t\t\t\t\t\t\t\tkernel_regularizer=self.regularizer,\n\t\t\t\t\t\t\t\tname='interact1')\n\t\t\tself.interact1 = tf.nn.dropout(self.interact1, self.dropout)\n\t\t\tself.prediction = tf.layers.dense(inputs=self.interact,\n\t\t\t\t\t\t\t\tunits=1,\n\t\t\t\t\t\t\t\tactivation=None,\n\t\t\t\t\t\t\t\tkernel_regularizer=self.regularizer,\n\t\t\t\t\t\t\t\tname='prediction')\n\t\t\tself.prediction = tf.reshape(self.prediction, [-1])",
"def _model_output(inputs, data_format):\n if data_format == 'channels_first':\n return tf.transpose(a=inputs, perm=[0, 2, 3, 1])\n else:\n return inputs",
"def keras_model_functional_for_tf2():\n inputs = tf.keras.Input(shape=(32, 32, 3,))\n x = tf.keras.layers.Conv2D(32, (3, 3))(inputs)\n x = tf.keras.layers.BatchNormalization(momentum=.3, epsilon=.65)(x, training=True)\n with tf.compat.v1.variable_scope(\"scope_1\"):\n x = tf.keras.layers.Conv2D(16, (2, 2), activation=tf.nn.tanh)(x)\n x = tf.keras.layers.BatchNormalization(momentum=.4, epsilon=.25)(x, training=False)\n x = tf.keras.layers.Conv2D(8, (2, 2), activation=tf.nn.tanh)(x)\n x = tf.keras.layers.BatchNormalization(momentum=.5, epsilon=.35)(x, training=False)\n x = tf.keras.layers.Conv2D(4, (2, 2), activation=tf.nn.relu6)(x)\n x = tf.keras.layers.Flatten()(x)\n outputs = tf.keras.layers.Dense(10, activation=tf.nn.softmax, name=\"keras_model_functional\")(x)\n model = tf.keras.Model(inputs=inputs, outputs=outputs)\n return model",
"def model_initializer():\n model = tf.keras.models.Sequential()\n model.add(tf.keras.layers.Flatten())\n # model.add(tf.keras.layers.Dense(128, activation=tf.nn.elu))\n model.add(tf.keras.layers.Dense(64, activation=tf.nn.elu))\n model.add(tf.keras.layers.Dense(32, activation=tf.nn.elu))\n model.add(tf.keras.layers.Dense(10, activation=tf.nn.softmax))\n\n\n model.compile(optimizer='rmsprop',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\n return model",
"def gensim_to_keras(model):\n layer = model.wv.get_keras_embedding()\n return (layer)",
"def decode_tensorflow(self, encoded_chunks: tf.Tensor) -> tf.Tensor:",
"def convert_example(example, use_bfloat16=False):\n for key in list(example.keys()):\n val = example[key]\n if tf.keras.backend.is_sparse(val):\n val = tf.sparse.to_dense(val)\n if val.dtype == tf.int64:\n val = tf.cast(val, tf.int32)\n if use_bfloat16 and val.dtype == tf.float32:\n val = tf.cast(val, tf.bfloat16)\n\n example[key] = val",
"def get_model_tweetonly(batch_size, max_seq_length, input_size, hidden_size, target_size,\n vocab_size, pretrain, tanhOrSoftmax, dropout):\n\n # batch_size x max_seq_length\n inputs = tf.placeholder(tf.int32, [batch_size, max_seq_length])\n\n cont_train = True\n if pretrain == \"pre\":\n cont_train = False\n embedding_matrix = tf.Variable(tf.random_uniform([vocab_size, input_size], -0.1, 0.1), # input_size is embeddings size\n name=\"embedding_matrix\", trainable=cont_train)\n\n # batch_size x max_seq_length x input_size\n embedded_inputs = tf.nn.embedding_lookup(embedding_matrix, inputs)\n\n\n # [batch_size x inputs_size] with max_seq_length elements\n # fixme: possibly inefficient\n # inputs_list[0]: batch_size x input[0] <-- word vector of the first word\n inputs_list = [tf.squeeze(x) for x in\n tf.split(1, max_seq_length, embedded_inputs)]\n\n lstm_encoder = Encoder(rnn_cell.BasicLSTMCell, input_size, hidden_size)\n start_state = tf.zeros([batch_size, lstm_encoder.state_size])\n\n # [h_i], [h_i, c_i] <-- LSTM\n # [h_i], [h_i] <-- RNN\n outputs, states = lstm_encoder(inputs_list, start_state, \"LSTM\")\n\n drop_prob = None\n if dropout:\n drop_prob = 0.1\n\n lstm_encoder = Encoder(rnn_cell.BasicLSTMCell, input_size, hidden_size, drop_prob, drop_prob)\n\n outputs_fin = outputs[-1]\n if tanhOrSoftmax == \"tanh\":\n model = Projector(target_size, non_linearity=tf.nn.tanh)(outputs_fin) #tf.nn.softmax\n else:\n model = Projector(target_size, non_linearity=tf.nn.softmax)(outputs_fin) # tf.nn.softmax\n\n\n return model, [inputs]",
"def model_with_dtype_int():\n\n input_1 = tf.keras.Input(shape=(8, 8, 3,), dtype=tf.int32)\n input_2 = tf.keras.Input(shape=(8, 8, 3,), dtype=tf.float32)\n x = tf.cast(input_1, tf.float32)\n x = tf.add(x, input_2)\n x = tf.keras.layers.Conv2D(8, (2, 2))(x)\n x = tf.keras.layers.Flatten()(x)\n outputs = tf.keras.layers.Dense(10, activation=tf.nn.softmax, name=\"model_with_dtype_int\")(x)\n return outputs",
"def _convert_example(example, use_bfloat16):\n for key in list(example.keys()):\n val = example[key]\n if tf.keras.backend.is_sparse(val):\n val = tf.sparse.to_dense(val)\n if val.dtype == tf.int64:\n val = tf.cast(val, tf.int32)\n if use_bfloat16 and val.dtype == tf.float32:\n val = tf.cast(val, tf.bfloat16)\n\n example[key] = val",
"def create_model(input_shape):\n h, w, c = input_shape\n\n\n # %%%\n inpt = tf.placeholder(tf.float32, (None,h,w,c), 'attributes')\n inpt_flattenned = tf.contrib.layers.flatten(inpt) #tf.reshape(inpt, [-1, h*w*c])\n H1 = tf.layers.dense(inpt_flattenned, units=200, activation=tf.sigmoid)\n H2 = tf.layers.dense(H1, units=20, activation=tf.sigmoid)\n encoding = tf.layers.dense(H2, units=latent_space_size, activation=tf.nn.sigmoid)\n H22 = tf.layers.dense(encoding, units=20, activation=tf.sigmoid)\n H11 = tf.layers.dense(H22, units=200, activation=tf.nn.sigmoid)\n dec_flt = tf.layers.dense(H11, units=h*w*c)\n decode = tf.reshape(dec_flt, [-1, h,w,c])\n# The results which were the most pleasing to the eye were achieved with the following loss function, 2 hidden layers and the latent spaces of tens of dimentions. \n# cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=dec_flt, labels=tf.sigmoid(inpt_flattenned)))\n cost = tf.reduce_mean(tf.square(dec_flt-inpt_flattenned))\n # %%%\n\n model = {'cost': cost,\n 'input': input,\n 'enc': encoding,\n 'dec': decode\n }\n return model",
"def convert_keras_to_tflite(model,\n output_path,\n custom_objects=None,\n is_quantized=True,\n inference_type=None,\n inference_input_type=None,\n input_quant_params=(-128., 255.)):\n if custom_objects is None:\n custom_objects = {}\n\n if not compat.is_v1_apis():\n converter = tf.lite.TFLiteConverter.from_keras_model(model)\n else:\n _, keras_file = tempfile.mkstemp(\".h5\")\n tf.keras.models.save_model(model, keras_file)\n converter = tf.lite.TFLiteConverter.from_keras_model_file(\n keras_file, custom_objects=custom_objects)\n\n if is_quantized:\n if not compat.is_v1_apis():\n converter.optimizations = [tf.lite.Optimize.DEFAULT]\n else:\n converter.inference_type = tf.lite.constants.INT8\n converter.inference_input_type = tf.lite.constants.FLOAT\n # TODO(tfmot): should be able to make everything use the\n # same inference_type in TF 1.X tests.\n if inference_type:\n converter.inference_type = inference_type\n if inference_input_type:\n converter.inference_input_type = inference_input_type\n\n input_arrays = converter.get_input_arrays()\n converter.quantized_input_stats = {\n input_arrays[0]: input_quant_params\n } # mean, std_dev values for float to quantized int8 values.\n\n tflite_model = converter.convert()\n\n if output_path is not None:\n with open(output_path, \"wb\") as f:\n f.write(tflite_model)\n\n return tflite_model",
"def build_model(\n data_tensor,\n reuse,\n training,\n output_shape,\n data_format='NHWC'):\n if isinstance(output_shape, list):\n output_shape = output_shape[-1]\n elif isinstance(output_shape, dict):\n output_shape = output_shape['output']\n # norm_moments_training = training # Force instance norm\n # normalization_type = 'no_param_batch_norm_original'\n # output_normalization_type = 'batch_norm_original_renorm'\n output_normalization_type = 'instance_norm'\n data_tensor, long_data_format = tf_fun.interpret_data_format(\n data_tensor=data_tensor,\n data_format=data_format)\n\n # Build model\n with tf.variable_scope('vgg', reuse=reuse):\n vgg = vgg16.Vgg16(\n vgg16_npy_path='/media/data_cifs/clicktionary/pretrained_weights/vgg16.npy')\n vgg(rgb=data_tensor, train=training, ff_reuse=reuse)\n\n with tf.variable_scope('fgru', reuse=reuse):\n # Get side weights\n h2_rem = [\n vgg.conv1_2,\n vgg.conv2_2,\n vgg.conv3_3,\n vgg.conv4_3,\n vgg.conv5_3]\n res_act = []\n for idx, h in enumerate(h2_rem):\n res = normalization.apply_normalization(\n activity=h,\n name='output_norm1_%s' % idx,\n normalization_type=output_normalization_type,\n data_format=data_format,\n training=training,\n trainable=training,\n reuse=reuse)\n res_act += [tf.image.resize_bilinear(\n res,\n data_tensor.get_shape().as_list()[1:3],\n align_corners=True)]\n\n activity = tf.layers.conv2d(\n tf.concat(res_act, -1),\n filters=output_shape,\n kernel_size=(1, 1),\n padding='same',\n data_format=long_data_format,\n name='out',\n activation=None,\n trainable=training,\n use_bias=True,\n reuse=reuse)\n\n if long_data_format is 'channels_first':\n activity = tf.transpose(activity, (0, 2, 3, 1))\n extra_activities = {} # idx: v for idx, v in enumerate(hs_0)}\n if activity.dtype != tf.float32:\n activity = tf.cast(activity, tf.float32)\n return activity, extra_activities",
"def tf_linear_model(input_shape, output_shape):\n return keras.Sequential([\n tf.keras.layers.InputLayer(input_shape=input_shape),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(output_shape),\n tf.keras.layers.Softmax()\n ])",
"def get_model(input_shape, output_len):\n\n model = tf.keras.Sequential()\n\n model.add(tf.keras.layers.Conv2D(24, 5, data_format='channels_last',\n padding='same', activation='relu',\n input_shape=input_shape))\n model.add(tf.keras.layers.MaxPool2D(2))\n\n model.add(tf.keras.layers.Conv2D(\n 48, 5, activation='relu', padding='same'))\n model.add(tf.keras.layers.MaxPool2D(2))\n\n model.add(tf.keras.layers.Conv2D(\n 48, 5, activation='relu', padding='same'))\n\n model.add(tf.keras.layers.MaxPool2D(2))\n\n model.add(tf.keras.layers.Flatten())\n\n model.add(tf.keras.layers.Dense(64, activation='relu'))\n\n model.add(tf.keras.layers.Dense(\n output_len, activation='softmax'))\n\n model.compile(loss='categorical_crossentropy',\n metrics=['accuracy'], optimizer='SGD')\n\n return model",
"def create_model(input_tensor, mode, hyper_params):\n model = {}\n with tf.variable_scope('vgg16') as scope:\n net = tf.cast(input_tensor[\"image\"], dtype=tf.float32, name=\"input/cast\")\n model[\"image\"] = net\n mean = tf.constant([123.68, 116.779, 103.939], dtype=tf.float32, shape=[1, 1, 1, 3], name='img_mean')\n net = net - mean\n model[\"image-normalized\"] = net\n\n net = _create_conv2_block(model, net, filters=64, layer_number=1)\n net = _create_conv2_block(model, net, filters=128, layer_number=2)\n net = _create_conv3_block(model, net, filters=256, layer_number=3)\n net = _create_conv3_block(model, net, filters=512, layer_number=4)\n net = _create_conv3_block(model, net, filters=512, layer_number=5)\n print(net.get_shape())\n\n if not hyper_params.vgg16.encoder_only:\n net = tf.layers.conv2d(inputs=net, filters=4096, kernel_size=(7, 7), strides=(1, 1), name=\"fc1\", activation=tf.nn.relu)\n model[\"vgg16/fc1\"] = net\n net = tf.layers.conv2d(inputs=net, filters=4096, kernel_size=(1, 1), strides=(1, 1), name=\"fc2\", activation=tf.nn.relu)\n model[\"vgg16/fc2\"] = net\n net = tf.layers.conv2d(inputs=net, filters=1000, kernel_size=(1, 1), strides=(1, 1), name=\"logits\", activation=None)\n model[\"logits\"] = net\n net = tf.nn.softmax(net)\n model[\"probs\"] = net\n return model",
"def _encode(self, input_dict):\n\n source_sequence, src_length = input_dict['source_tensors']\n\n training = (self._mode == \"train\")\n dropout_keep_prob = self.params['dropout_keep_prob'] if training else 1.0\n regularizer = self.params.get('regularizer', None)\n data_format = self.params.get('data_format', 'channels_last')\n bn_momentum = self.params.get('bn_momentum', 0.99)\n bn_epsilon = self.params.get('bn_epsilon', 1e-3)\n\n input_layer = tf.expand_dims(source_sequence, axis=-1) # BTFC\n # print(\"<<< input :\", input_layer.get_shape().as_list())\n\n batch_size = input_layer.get_shape().as_list()[0]\n freq = input_layer.get_shape().as_list()[2]\n\n # supported data_formats:\n # BTFC = channel_last (legacy)\n # BCTF = channel_first(legacy)\n # BFTC\n # BCFT\n\n if data_format=='channels_last' or data_format=='BTFC':\n layout = 'BTFC'\n dformat = 'channels_last'\n elif data_format=='channels_first' or data_format=='BCTF':\n layout = 'BCTF'\n dformat = 'channels_first'\n elif data_format=='BFTC':\n layout = 'BFTC'\n dformat = 'channels_last'\n elif data_format=='BCFT':\n layout = 'BCFT'\n dformat = 'channels_first'\n else:\n print(\"WARNING: unsupported data format: will use channels_last (BTFC) instead\")\n layout = 'BTFC'\n dformat = 'channels_last'\n\n #input_layer is BTFC\n\n if layout == 'BCTF':\n top_layer = tf.transpose(input_layer, [0, 3, 1, 2])\n elif layout == 'BFTC':\n top_layer = tf.transpose(input_layer, [0, 2, 1, 3])\n elif layout == 'BCFT':\n top_layer = tf.transpose(input_layer, [0, 3, 2, 1])\n else:\n top_layer = input_layer\n\n # print(\"<<< pre-conv:\", top_layer.get_shape().as_list())\n\n # ----- Convolutional layers ---------------------------------------------\n conv_layers = self.params['conv_layers']\n\n for idx_conv in range(len(conv_layers)):\n ch_out = conv_layers[idx_conv]['num_channels']\n kernel_size = conv_layers[idx_conv]['kernel_size'] # [T,F] format\n strides = conv_layers[idx_conv]['stride'] # [T,F] format\n padding = conv_layers[idx_conv]['padding']\n\n if padding == \"VALID\":\n src_length = (src_length - kernel_size[0] + strides[0]) // strides[0]\n freq = (freq - kernel_size[1] + strides[1]) // strides[1]\n else:\n src_length = (src_length + strides[0] - 1) // strides[0]\n freq = (freq + strides[1] -1) // strides[1]\n\n if layout == 'BFTC' or layout == 'BCFT':\n kernel_size = kernel_size[::-1]\n strides = strides[::-1]\n # print(kernel_size, strides)\n\n top_layer = conv_bn_actv(\n layer_type=\"conv2d\",\n name=\"conv{}\".format(idx_conv + 1),\n inputs=top_layer,\n filters=ch_out,\n kernel_size=kernel_size,\n activation_fn=self.params['activation_fn'],\n strides=strides,\n padding=padding,\n regularizer=regularizer,\n training=training,\n data_format=dformat,\n bn_momentum=bn_momentum,\n bn_epsilon=bn_epsilon,\n )\n # print(idx_conv, \"++++\", top_layer.get_shape().as_list())\n\n # convert layout --> BTFC\n # if data_format == 'channels_first':\n # top_layer = tf.transpose(top_layer, [0, 2, 3, 1])\n\n if layout == 'BCTF': # BCTF --> BTFC\n top_layer = tf.transpose(top_layer, [0, 2, 3, 1])\n elif layout == 'BFTC': # BFTC --> BTFC\n top_layer = tf.transpose(top_layer, [0, 2, 1, 3])\n elif layout == 'BCFT': # BCFT --> BTFC\n top_layer = tf.transpose(top_layer, [0, 3, 2, 1])\n\n\n # print(\">>> post-conv:\", top_layer.get_shape().as_list())\n\n # reshape to [B, T, FxC]\n f = top_layer.get_shape().as_list()[2]\n c = top_layer.get_shape().as_list()[3]\n fc = f * c\n top_layer = tf.reshape(top_layer, [batch_size, -1, fc])\n\n # ----- RNN ---------------------------------------------------------------\n num_rnn_layers = self.params['num_rnn_layers']\n if num_rnn_layers > 0:\n rnn_cell_dim = self.params['rnn_cell_dim']\n rnn_type = self.params['rnn_type']\n if self.params['use_cudnn_rnn']:\n # reshape to [B, T, C] --> [T, B, C]\n rnn_input = tf.transpose(top_layer, [1, 0, 2])\n if self.params['rnn_unidirectional']:\n direction = cudnn_rnn_ops.CUDNN_RNN_UNIDIRECTION\n else:\n direction = cudnn_rnn_ops.CUDNN_RNN_BIDIRECTION\n\n if rnn_type == \"cudnn_gru\" or rnn_type == \"gru\":\n # pylint: disable=no-member\n rnn_block = tf.contrib.cudnn_rnn.CudnnGRU(\n num_layers=num_rnn_layers,\n num_units=rnn_cell_dim,\n direction=direction,\n dropout=1.0 - dropout_keep_prob,\n dtype=rnn_input.dtype,\n name=\"cudnn_gru\",\n )\n elif rnn_type == \"cudnn_lstm\" or rnn_type == \"lstm\":\n # pylint: disable=no-member\n rnn_block = tf.contrib.cudnn_rnn.CudnnLSTM(\n num_layers=num_rnn_layers,\n num_units=rnn_cell_dim,\n direction=direction,\n dropout=1.0 - dropout_keep_prob,\n dtype=rnn_input.dtype,\n name=\"cudnn_lstm\",\n )\n else:\n raise ValueError(\n \"{} is not a valid rnn_type for cudnn_rnn layers\".format(\n rnn_type)\n )\n top_layer, state = rnn_block(rnn_input)\n top_layer = tf.transpose(top_layer, [1, 0, 2])\n else:\n rnn_input = top_layer\n multirnn_cell_fw = tf.nn.rnn_cell.MultiRNNCell(\n [rnn_cell(rnn_cell_dim=rnn_cell_dim, layer_type=rnn_type,\n dropout_keep_prob=dropout_keep_prob)\n for _ in range(num_rnn_layers)]\n )\n if self.params['rnn_unidirectional']:\n top_layer, state = tf.nn.dynamic_rnn(\n cell=multirnn_cell_fw,\n inputs=rnn_input,\n sequence_length=src_length,\n dtype=rnn_input.dtype,\n time_major=False,\n )\n else:\n multirnn_cell_bw = tf.nn.rnn_cell.MultiRNNCell(\n [rnn_cell(rnn_cell_dim=rnn_cell_dim, layer_type=rnn_type,\n dropout_keep_prob=dropout_keep_prob)\n for _ in range(num_rnn_layers)]\n )\n top_layer, state = tf.nn.bidirectional_dynamic_rnn(\n cell_fw=multirnn_cell_fw, cell_bw=multirnn_cell_bw,\n inputs=rnn_input,\n sequence_length=src_length,\n dtype=rnn_input.dtype,\n time_major=False\n )\n # concat 2 tensors [B, T, n_cell_dim] --> [B, T, 2*n_cell_dim]\n top_layer = tf.concat(top_layer, 2)\n # -- end of rnn------------------------------------------------------------\n\n if self.params['row_conv']:\n channels = top_layer.get_shape().as_list()[-1]\n top_layer = row_conv(\n name=\"row_conv\",\n input_layer=top_layer,\n batch=batch_size,\n channels=channels,\n activation_fn=self.params['activation_fn'],\n width=self.params['row_conv_width'],\n regularizer=regularizer,\n training=training,\n data_format=data_format,\n bn_momentum=bn_momentum,\n bn_epsilon=bn_epsilon,\n )\n\n # Reshape [B, T, C] --> [B*T, C]\n c = top_layer.get_shape().as_list()[-1]\n top_layer = tf.reshape(top_layer, [-1, c])\n\n # --- hidden layer with clipped ReLU activation and dropout---------------\n top_layer = tf.layers.dense(\n inputs=top_layer,\n units=self.params['n_hidden'],\n kernel_regularizer=regularizer,\n activation=self.params['activation_fn'],\n name='fully_connected',\n )\n outputs = tf.nn.dropout(x=top_layer, keep_prob=dropout_keep_prob)\n\n # reshape from [B*T,A] --> [B, T, A].\n # Output shape: [batch_size, n_steps, n_hidden]\n outputs = tf.reshape(\n outputs,\n [batch_size, -1, self.params['n_hidden']],\n )\n\n return {\n 'outputs': outputs,\n 'src_length': src_length,\n }",
"def tff_model_fn():\n keras_model = load_model(FLAGS.batch_size)\n loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n return simple_fedavg_tf.KerasModelWrapper(keras_model,\n test_data.element_spec, loss)",
"def _build_keras_model(tf_transform_output: tft.TFTransformOutput,\n hidden_units: List[int] = None,\n learning_rate: float = 0.01) -> tf.keras.Model:\n numeric_columns = [\n tf.feature_column.numeric_column(transformed_name(key), shape=())\n for key in NUMERIC_FEATURE_KEYS\n ]\n \n categorical_columns = [\n tf.feature_column.categorical_column_with_vocabulary_file(\n transformed_name(key), \n vocabulary_file=tf_transform_output.vocabulary_file_by_name(\n vocab_filename=key), \n dtype=tf.dtypes.string,\n default_value=None, \n num_oov_buckets=0)\n for key in CATEGORICAL_FEATURE_KEYS\n ]\n \n indicator_columns = [\n tf.feature_column.indicator_column(categorical_column)\n for categorical_column in categorical_columns\n ]\n \n \n model = dnn_regressor(\n input_columns=numeric_columns + indicator_columns,\n dnn_hidden_units=hidden_units or [16, 16, 16],\n learning_rate=learning_rate)\n return model",
"def list_to_backend_type(data: List) -> TTensor:",
"def get_model():\n model = tf.keras.models.Sequential(\n [\n tf.keras.layers.Flatten(input_shape=(28, 28)),\n tf.keras.layers.Dense(128, activation=\"relu\"),\n tf.keras.layers.Dropout(0.2),\n tf.keras.layers.Dense(10, activation=\"softmax\"),\n ]\n )\n model.compile(\"adam\", \"sparse_categorical_crossentropy\", metrics=[\"accuracy\"])\n return model",
"def gensim_to_keras(model):\n return model.wv.get_keras_embedding()",
"def get_model():\n\n # Create a convolutional neural network\n model = tf.keras.models.Sequential([\n\n tf.keras.layers.Conv2D(32, (3,3), activation=\"relu\", input_shape=(IMG_WIDTH, IMG_HEIGHT, 3)), # Convolutional layer with 32 filters of a 3 x 3 kernel\n\n tf.keras.layers.MaxPooling2D(pool_size=(2, 2)), # Max-pooling layer with a 2 x 2 pool size\n\n tf.keras.layers.Conv2D(64, (3, 3), activation=\"relu\"), # Convolutional layer with 64 filters of a 3 x 3 kernel\n \n tf.keras.layers.MaxPooling2D(pool_size=(2, 2)), # Max-pooling layer with a 2 x 2 pool size\n\n tf.keras.layers.Flatten(), # Flatten units\n\n tf.keras.layers.Dense(256, activation=\"relu\"), # Hidden layer with 256 neurons\n\n tf.keras.layers.Dropout(0.25), # Dropout layer with a rate of 0.25\n\n tf.keras.layers.Dense(NUM_CATEGORIES, activation=\"softmax\") # Output layer with an output unit for each image category\n ])\n\n # Compile model\n model.compile(\n optimizer=\"adam\",\n loss=\"categorical_crossentropy\",\n metrics=[\"accuracy\"]\n )\n\n return model",
"def to_tensor(self): \n raise NotImplementedError",
"def keras_model_fn_cpu(model_config, vocab_size, embedding_size, embeddings):\n ## hyperparams\n model_name = model_config['model_name']\n num_class = model_config['num_class']\n lstm_hs = model_config['lstm_hs']\n gru_hs = model_config['gru_hs']\n learning_rate = model_config['learning_rate']\n \n with tf.device('/cpu:0'):\n ## build model\n inputs = ks.Input(shape=(None,), dtype='int32', name='inputs')\n embedded_sequences_ft1 = layers.Embedding(vocab_size, embedding_size, trainable = False, mask_zero = False)(inputs)\n embedded_sequences_ft2 = layers.Embedding(vocab_size, embedding_size, trainable = False, mask_zero = False)(inputs)\n concat_embed = layers.concatenate([embedded_sequences_ft1 ,embedded_sequences_ft2])\n concat_embed = layers.SpatialDropout1D(0.5)(concat_embed)\n x = layers.Bidirectional(layers.LSTM(lstm_hs,recurrent_activation = 'sigmoid', return_sequences = True))(concat_embed)\n x, x_h, x_c = layers.Bidirectional(layers.GRU(gru_hs, reset_after = True, recurrent_activation = 'sigmoid', return_sequences = True, return_state = True))(x)\n x_1 = layers.GlobalMaxPool1D()(x)\n x_2 = layers.GlobalAvgPool1D()(x)\n x_out = layers.concatenate([x_1 ,x_2, x_h])\n x_out = layers.BatchNormalization()(x_out)\n outputs = layers.Dense(num_class, activation = 'softmax', name = 'outputs')(x_out) # outputs\n model = ks.Model(inputs, outputs, name = model_name)\n\n ## compile\n model.compile(loss = 'categorical_crossentropy', \n optimizer=ks.optimizers.Adam(lr=learning_rate, clipnorm=.25, beta_1=0.7, beta_2=0.99), \n metrics=['categorical_accuracy', ks.metrics.TopKCategoricalAccuracy(k=3)])\n return model"
]
| [
"0.6639992",
"0.6630255",
"0.6283206",
"0.6280251",
"0.6261455",
"0.61814857",
"0.61665124",
"0.6141866",
"0.6130877",
"0.6106019",
"0.60859215",
"0.60848236",
"0.6076101",
"0.60558754",
"0.60113984",
"0.6006671",
"0.59937596",
"0.59848094",
"0.5983833",
"0.5973008",
"0.59620005",
"0.59556806",
"0.59508324",
"0.59466463",
"0.5936793",
"0.5935574",
"0.5929721",
"0.59193814",
"0.59140253",
"0.59115714"
]
| 0.67439127 | 0 |
as the method name suggests this returns the up to date station information. | def get_current_station_info(cls, dbsession):
sub = dbsession.query(UsageData.station_id, func.max(UsageData.id).label('max_update')).group_by(
UsageData.station_id).subquery()
return dbsession.query(
UsageData.last_update,
UsageData.available_bike_stands, UsageData.available_bikes).join(sub, and_(
sub.c.max_update == UsageData.id)).all() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_station_boroughs(self):\\",
"def show_info(self):\n\n print(\"Querying the station...\")\n val = getvalues(self.station, '', fixed_format)\n\n print('Fine Offset station settings:')\n print('%s: %s' % ('local time'.rjust(30),\n time.strftime('%Y.%m.%d %H:%M:%S %Z',\n time.localtime())))\n print('%s: %s' % ('polling mode'.rjust(30), self.station.polling_mode))\n\n slist = {'values':[], 'minmax_values':[], 'settings':[],\n 'display_settings':[], 'alarm_settings':[]}\n for x in sorted(val.keys()):\n if type(val[x]) is dict:\n for y in val[x].keys():\n label = x + '.' + y\n s = fmtparam(label, val[x][y])\n slist = stash(slist, s)\n else:\n s = fmtparam(x, val[x])\n slist = stash(slist, s)\n for k in ('values', 'minmax_values', 'settings',\n 'display_settings', 'alarm_settings'):\n print('')\n for s in slist[k]:\n print(s)",
"def status(self, station=1):\n return self.statuslist()[station][2]",
"def stations():\n\n return station_list",
"def get_station(station_id):\n return STATIONS.station_details_for(station_id)",
"def prism_station_details(self, station_id):\n if self.cursor is None: #singleton pattern\n db_conn = MySQLdb.connect(host=\"kp-amps-director1\",\n user=\"amps\", passwd=\"amps\", db=\"AMPS\")\n self.cursor = db_conn.cursor()\n if station_id.isdigit() and int(station_id[0]) in range(0, 10):\n selectstatement = 'SELECT latitude,longitude,elevation ' \\\n 'FROM StationLocation WHERE stationId = \"%s\"' % (station_id)\n else:\n selectstatement = \\\n 'SELECT latitude,longitude,elevation ' \\\n 'FROM StationLocation, Station ' \\\n 'WHERE StationLocation.stationId = Station.stationId and ' \\\n 'Station.icaoId = \"%s\"' % (station_id[1:])\n try:\n self.cursor.execute(selectstatement)\n first_result = self.cursor.fetchone()\n except Exception: #what sorts should this be?\n return None\n if not first_result:\n return None\n try:\n result = {LATITUDE:first_result[0],\n LONGITUDE:first_result[1],\n ELEVATION:first_result[2]}\n return result\n except Exception: #what types of error should this be?\n return None",
"def getStationData(self):\n dtime = datetime.strptime(self.refTime, \"%y%m%d/%H%M\")\n trange = TimeRange()\n trange.setStart(dtime)\n trange.setEnd(dtime)\n dataTime = DataTime(refTime=dtime, validPeriod=trange)\n req = StationDataRequest()\n req.setPluginName(self.pluginName)\n req.setStationId(self.stationId)\n req.setRefTime(dataTime)\n req.setParmList(self.parmList)\n req.setPartNumber(self.partNumber)\n resp = self.client.sendRequest(req)\n\n for i, rec in enumerate(resp):\n resp[i] = {\n key.decode() if isinstance(key, bytes) else key:\n val.decode() if isinstance(val, bytes) else val\n for key, val in rec.items()\n }\n\n return resp",
"def get_station_info(config_dict):\n stn_info = dict()\n if config_dict is not None:\n if 'Station' in config_dict:\n stn_info['location'] = weeutil.weeutil.list_as_string(config_dict['Station'].get('location'))\n stn_info['latitude'] = config_dict['Station'].get('latitude')\n stn_info['longitude'] = config_dict['Station'].get('longitude')\n stn_info['altitude'] = config_dict['Station'].get('altitude')\n if 'station_type' in config_dict['Station']:\n stn_info['station_type'] = config_dict['Station']['station_type']\n if stn_info['station_type'] in config_dict:\n stn_info['driver'] = config_dict[stn_info['station_type']]['driver']\n if 'StdReport' in config_dict:\n stn_info['units'] = get_unit_info(config_dict)\n\n return stn_info",
"def _get_ogd_stations():\n return {r[\"Station\"] for r in ZamgData.current_observations()}",
"def weatherstation_info_command(station_id):\n station = stations.get_station(station_id)\n print(station)",
"def Fetch_station(long, lat, y):\r\n global ddf\r\n dmin = 1000000\r\n rs = 0\r\n i=0\r\n for i in range(len(ddf[y])):\r\n #Calculate the distance between zip code location and weather station location\r\n dnew = Distance_orthonormique(ddf[y]['LON'][i], ddf[y]['LAT'][i], long, lat)\r\n\r\n if(dmin > dnew):\r\n #If the last smaller distance is superior than the current distance :\r\n #the new smaller distance is the current distance\r\n dmin = dnew\r\n rs = i\r\n\r\n #rs = index dataframe weather station\r\n #ddf[y]['STATION NAME'][rs] = Weather station name\r\n #round(dmin, 2) = Distance between weather station and zip code\r\n \r\n return rs, ddf[y]['STATION NAME'][rs], round(dmin,2)",
"def get_stations(self):\n return self.__request('stations')['stations']",
"def update(self):\n url = '/weather/summary?version=2&lat={}&lon={}' \\\n .format(self.lat, self.lon)\n self.result = self.api.get(url)['weather']['summary'][0]",
"def station_stats(df):",
"def station_details_for(self, station_id):\n if not self.enabled:\n return None\n station_id_upper = station_id.upper()\n if not self.is_loaded():\n self.reload()\n if not station_id_upper in self.known_stations:\n prism_station = self.prism_station_details(station_id_upper)\n self.known_stations[station_id_upper] = prism_station\n return self.known_stations[station_id_upper]",
"def get_last_location_weather(location=\"Ljubljana\"):\n address = \"http://www.arso.gov.si/vreme/napovedi%20in%20podatki/vreme_avt.html\"\n page = requests.get(address)\n html = page.content\n\n parsed_html = BeautifulSoup(html)\n\n station_data = parsed_html.find(\"table\", attrs={\"class\": \"online\"})\n station_names = [x.string for x in station_data.findAll(\"td\", attrs={\"class\": \"onlineimena\"})]\n station_data = [x for x in station_data.findAll(\"tr\")]\n print station_names\n datad = namedtuple(\"location_weather\", (\"temperature\",\n \"humidity\",\n \"wind_direction\",\n \"wind_speed\",\n \"wind_gusts\",\n \"air_pressure\",\n \"rain\",\n \"sun_radiation\"))\n for i, station_name in enumerate(station_names):\n if station_name.lower() == location.lower():\n print station_name\n data = [x.string for x in station_data[i + 2].findAll(\"td\")]\n data = data[1:]\n d = datad(data[0], data[1], slo_to_eng_compass(data[2]), data[3], data[5], *data[7:])\n return d",
"def collect_stations(self):\n # First, iterate provinces and build url's\n site = urllib.request.urlopen(self.base_url)\n\n # Check that the site is still valid or operating by collecting a list of provinces\n print(\"Collecting provinces\")\n provinces = [s[9:11] for s in re.findall('<a href=\"../\">../</a>', site.read())]\n\n # Iterate provinces and collect list of available times\n print(\"Collecting time periods and station ID's\")\n self.stations = defaultdict(dict)\n for prov in provinces:\n site = urllib.request.urlopen(self.build_url(prov))\n expression = '<a href=\"[hd][a-zA-Z]*/\">[hd][a-zA-Z]*/</a>'\n times = [s.split('>')[1].split('<')[0].replace('/', '') for s in re.findall(expression, site.read())]\n\n # Iterate times and collect the station ID's\n for time in times:\n site = urllib.request.urlopen(self.build_url(prov, time))\n expression = '<a href=\"{0}_[a-zA-Z0-9]*_{1}_hydrometric.csv\">{0}_[a-zA-Z0-9]*_{1}_hydrometric.csv</a>'\n expression = expression.format(prov.upper(), time.lower())\n stations = [s.split('_')[1] for s in re.findall(expression, site.read())]\n self.stations[prov][time] = stations",
"def station_list() -> List[Dict]:\n return STATIONS",
"def update_stationlist(time_res='hourly',dbase_dir='dbase'):\r\n\r\n \r\n dwd_abbr = {'air_temperature': 'TU',\r\n 'cloud_type': 'CS', \r\n 'cloudiness': 'N',\r\n 'dew_point' : 'TD',\r\n 'extreme_temperature': 'TX',\r\n 'extreme_wind': 'FX',\r\n 'precipitation': 'RR',\r\n 'pressure': 'P0',\r\n 'soil_temperature': 'EB',\r\n 'solar': 'ST',\r\n 'sun': 'SD',\r\n 'visibility': 'VV',\r\n 'wind': 'FF',\r\n 'wind_synop': 'F'\r\n }\r\n \r\n # lets start\r\n print('Updating station list')\r\n \r\n # create output directory if not existing\r\n \r\n if not os.path.exists(dbase_dir):\r\n os.makedirs(dbase_dir)\r\n \r\n #check whether we have an up-to-date-station-list-already\r\n try:\r\n stations_network_old=[s for s in os.listdir(dbase_dir) if 'dwd_station_network' in s][0]\r\n datetime_network=datetime.date(datetime.strptime(re.findall('\\d+',stations_network_old)[0],'%Y%m%d'))\r\n #update if more than 24hours\r\n dt_today=datetime.date(datetime.now())\r\n if (dt_today-datetime_network)<timedelta(days=1):\r\n print('DWD network list is up-to-date, no update needed')\r\n filename_stations=dbase_dir+'\\\\'+stations_network_old\r\n return filename_stations\r\n else:\r\n print('DWD network list neeeds to be updated')\r\n os.remove(dbase_dir+'\\\\'+stations_network_old)\r\n except:\r\n print('DWD network list neeeds to be updated')\r\n pass\r\n \r\n \r\n # header\r\n stations_network=pd.DataFrame()\r\n \r\n # connect to ftp server and go to the folder\r\n \r\n # Connect to the Server\r\n server='opendata.dwd.de'\r\n ftp=connect_ftp(server = server,connected = False)\r\n #change to subfolder\r\n ftp.cwd('/climate_environment/CDC/observations_germany/climate/' + time_res +'/')\r\n #get dwd categories\r\n dwd_categories=ftp.nlst()\r\n #loop through the subfolders to get the station lists\r\n for category in dwd_categories:\r\n print('retrieve stationlist for', category)\r\n #try to get historical data\r\n try:\r\n dir_path='/climate_environment/CDC/observations_germany/climate/' + time_res +'/'+category+'/historical/'\r\n ftp.cwd(dir_path)\r\n except Exception as e:\r\n print(e, 'try to download category', category, 'from other folder')\r\n try:\r\n dir_path='/climate_environment/CDC/observations_germany/climate/' + time_res +'/'+category+'/'\r\n ftp.cwd(dir_path)\r\n except:\r\n print('Category', category, 'could not have been downloaded')\r\n pass\r\n #retrieve the stationlist\r\n stationlist = []\r\n # try to retrieve file\r\n retrieved=False\r\n filename=dwd_abbr[category]+'_Stundenwerte_Beschreibung_Stationen.txt'\r\n while not retrieved:\r\n try:\r\n ftp.retrlines(\"RETR \" + filename, stationlist.append)\r\n #ftp.retrbinary(\"RETR \" + filestr, stationlist.write)\r\n retrieved = True\r\n except:\r\n ftp=connect_ftp(server = server,connected = False)\r\n ftp.cwd(dir_path)\r\n #remove first two lines\r\n stationlist=stationlist[2:]\r\n #delete uncessary blanks\r\n stationlist=[re.sub(' +', ' ', station.rstrip()) for station in stationlist]\r\n #split the list\r\n stationlist=[station.split(\" \")[:7] for station in stationlist]\r\n #read as dataframe\r\n dfstations=pd.DataFrame(stationlist,columns=['STATIONS_ID','date_start','date_end','height','geo_lat','geo_lon','name'])\r\n #add true information to category\r\n dfstations[category]=True\r\n \r\n stations_network=stations_network.append(dfstations,sort=False,ignore_index=True)\r\n #A=[sub.split(\" \") for sub in stationlist] \r\n \r\n #replace all Na by False\r\n stations_network[stations_network.isna()]=0 \r\n #aggregate\r\n stations_network=stations_network.groupby(['STATIONS_ID'],as_index=False).agg('max')\r\n #replace zero by False in order to have pure boolean data\r\n stations_network.replace(0,False,inplace=True)\r\n #fix the error with station 14138 and 05614 and 07325, which does not have pressure cord\r\n stations_network.loc[stations_network.STATIONS_ID=='14138','pressure']=False\r\n stations_network.loc[stations_network.STATIONS_ID=='05614','pressure']=False\r\n stations_network.loc[stations_network.STATIONS_ID=='07325','pressure']=False\r\n stations_network.loc[stations_network.STATIONS_ID=='01572','pressure']=False\r\n #for temperature the same\r\n stations_network.loc[stations_network.STATIONS_ID=='14138','air_temperature']=False\r\n #save to database writing the time as well\r\n filename_stations=dbase_dir+'\\\\dwd_station_network_'+datetime.now().strftime('%Y%m%d')+'.csv'\r\n stations_network.to_csv(filename_stations,index=False)\r\n \r\n print('Updating station list...finished')\r\n \r\n return filename_stations",
"def getInfo():\n\tcity_list_url = 'https://airnow.gov/index.cfm?action=airnow.local_state&stateid=5'\n\taq_info_url = 'https://airnow.gov/index.cfm?action=airnow.local_city&mapcenter=0&cityid='\n\tcity_list = []\n\tcity_name_list = []\n\tgetCityList(city_list, city_name_list, city_list_url)\n\tinfo = []\n\tgetAllAQInfo(city_list, aq_info_url, info)\n\t\n\t# for i in info:\n\t# \tprint(i)\n\treturn info",
"def get_operational_forecasts(self):\n\n # Real time ensemble data:\n # https://www.ftp.ncep.noaa.gov/data/nccf/com/ens_tracker/prod/\n\n # If forecasts dict already exist, simply return the dict\n try:\n self.forecast_dict\n return self.forecast_dict\n except:\n pass\n\n # Follow HURDAT procedure\n if self.source == \"hurdat\":\n\n # Get storm ID & corresponding data URL\n storm_id = self.dict['operational_id']\n storm_year = self.dict['year']\n if storm_year <= 2006:\n storm_id = self.dict['id']\n if storm_year < 1954:\n msg = \"Forecast data is unavailable for storms prior to 1954.\"\n raise RuntimeError(msg)\n\n # Error check\n if storm_id == '':\n msg = \"No NHC operational data is available for this storm.\"\n raise RuntimeError(msg)\n\n # Check if archive directory exists for requested year, if not redirect to realtime directory\n url_models = f\"https://ftp.nhc.noaa.gov/atcf/archive/{storm_year}/a{storm_id.lower()}.dat.gz\"\n if requests.get(url_models).status_code != 200:\n url_models = f\"https://ftp.nhc.noaa.gov/atcf/aid_public/a{storm_id.lower()}.dat.gz\"\n\n # Retrieve model data text\n if requests.get(url_models).status_code == 200:\n request = urllib.request.Request(url_models)\n response = urllib.request.urlopen(request)\n sio_buffer = BytesIO(response.read())\n gzf = gzip.GzipFile(fileobj=sio_buffer)\n data = gzf.read()\n content = data.splitlines()\n content = [(i.decode()).split(\",\") for i in content]\n content = [i for i in content if len(i) > 10]\n response.close()\n else:\n raise RuntimeError(\n \"No operational model data is available for this storm.\")\n\n # Follow JTWC procedure\n else:\n\n url_models_noaa = f\"https://www.ssd.noaa.gov/PS/TROP/DATA/ATCF/JTWC/a{self.id.lower()}.dat\"\n url_models_ucar = f\"http://hurricanes.ral.ucar.edu/repository/data/adecks_open/{self.year}/a{self.id.lower()}.dat\"\n\n # Retrieve model data text\n try:\n content = read_url(url_models_noaa, split=True, subsplit=False)\n except:\n try:\n content = read_url(\n url_models_ucar, split=True, subsplit=False)\n except:\n raise RuntimeError(\n \"No operational model data is available for this storm.\")\n content = [i.split(\",\") for i in content]\n content = [i for i in content if len(i) > 10]\n\n # Iterate through every line in content:\n forecasts = {}\n for line in content:\n\n # Get basic components\n lineArray = [i.replace(\" \", \"\") for i in line]\n try:\n basin, number, run_init, n_a, model, fhr, lat, lon, vmax, mslp, stype, rad, windcode, neq, seq, swq, nwq = lineArray[\n :17]\n use_wind = True\n except:\n basin, number, run_init, n_a, model, fhr, lat, lon, vmax, mslp, stype = lineArray[\n :11]\n use_wind = False\n\n # Check init time is within storm time range\n run_init_dt = dt.strptime(run_init, '%Y%m%d%H')\n if run_init_dt < self.dict['time'][0] - timedelta(hours=6) or run_init_dt > self.dict['time'][-1] + timedelta(hours=6):\n continue\n \n # Skip erroneous lines\n try:\n if int(fhr) > 240:\n continue\n except:\n continue\n\n # Enter into forecast dict\n if model not in forecasts.keys():\n forecasts[model] = {}\n if run_init not in forecasts[model].keys():\n forecasts[model][run_init] = {\n 'init': run_init_dt, 'fhr': [], 'lat': [], 'lon': [], 'vmax': [], 'mslp': [], 'type': [], 'windrad': []\n }\n\n # Format lat & lon\n fhr = int(fhr)\n if \"N\" in lat:\n lat_temp = lat.split(\"N\")[0]\n lat = round(float(lat_temp) * 0.1, 1)\n elif \"S\" in lat:\n lat_temp = lat.split(\"S\")[0]\n lat = round(float(lat_temp) * -0.1, 1)\n if \"W\" in lon:\n lon_temp = lon.split(\"W\")[0]\n lon = round(float(lon_temp) * -0.1, 1)\n elif \"E\" in lon:\n lon_temp = lon.split(\"E\")[0]\n lon = round(float(lon_temp) * 0.1, 1)\n\n # Format vmax & MSLP\n if vmax == '':\n vmax = np.nan\n else:\n vmax = int(vmax)\n if vmax < 10 or vmax > 300:\n vmax = np.nan\n if mslp == '':\n mslp = np.nan\n else:\n mslp = int(mslp)\n if mslp < 1:\n mslp = np.nan\n\n # Format wind radii\n if use_wind:\n try:\n rad = int(rad)\n if rad in [0, 35]:\n rad = 34\n neq = np.nan if windcode == '' else int(neq)\n seq = np.nan if windcode in ['', 'AAA'] else int(seq)\n swq = np.nan if windcode in ['', 'AAA'] else int(swq)\n nwq = np.nan if windcode in ['', 'AAA'] else int(nwq)\n except:\n rad = 34\n neq = np.nan\n seq = np.nan\n swq = np.nan\n nwq = np.nan\n else:\n rad = 34\n neq = np.nan\n seq = np.nan\n swq = np.nan\n nwq = np.nan\n\n # Add forecast data to dict if forecast hour isn't already there\n if fhr not in forecasts[model][run_init]['fhr']:\n if model in ['OFCL', 'OFCI'] and fhr > 120:\n pass\n else:\n if lat == 0.0 and lon == 0.0:\n continue\n forecasts[model][run_init]['fhr'].append(fhr)\n forecasts[model][run_init]['lat'].append(lat)\n forecasts[model][run_init]['lon'].append(lon)\n forecasts[model][run_init]['vmax'].append(vmax)\n forecasts[model][run_init]['mslp'].append(mslp)\n forecasts[model][run_init]['windrad'].append(\n {rad: [neq, seq, swq, nwq]})\n\n # Get storm type, if it can be determined\n if stype in ['', 'DB'] and vmax != 0 and not np.isnan(vmax):\n stype = get_storm_type(vmax, False)\n forecasts[model][run_init]['type'].append(stype)\n else:\n ifhr = forecasts[model][run_init]['fhr'].index(fhr)\n forecasts[model][run_init]['windrad'][ifhr][rad] = [\n neq, seq, swq, nwq]\n\n # Save dict locally\n self.forecast_dict = forecasts\n\n # Return dict\n return forecasts",
"def stations_dict(self):\n return self.__stations_dict",
"async def current(self) -> dict:\n return await self._request(\n \"get\", \"https://www.asthmaforecast.com/api/forecast/current/asthma\"\n )",
"def update(self):\n if self.last_update and (\n self.last_update + timedelta(hours=1)\n > datetime.utcnow().replace(tzinfo=dt_util.UTC)\n ):\n return # Not time to update yet; data is only hourly\n\n for row in self.current_observations():\n if row.get(\"Station\") == self._station_id:\n api_fields = {\n col_heading: (standard_name, dtype)\n for standard_name, (\n _,\n _,\n _,\n col_heading,\n dtype,\n ) in SENSOR_TYPES.items()\n }\n self.data = {\n api_fields.get(col_heading)[0]: api_fields.get(col_heading)[1](\n v.replace(\",\", \".\")\n )\n for col_heading, v in row.items()\n if col_heading in api_fields and v\n }\n break\n else:\n raise ValueError(f\"No weather data for station {self._station_id}\")",
"def location(self):\r\n try:\r\n return self.data['location']\r\n except KeyError:\r\n return self.data['station_name']",
"def update(self):\n url = '/weather/current/minutely' \\\n '?version=2&lat={}&lon={}&city={}&county={}&village={}' \\\n .format(self.lat, self.lon, self.city, self.county, self.village)\n self.result = self.api.get(url)['weather']['minutely'][0]",
"def dates(self):\n #{{{ function to return start and end times for a station\n return self.wfdates.keys()",
"def xmlRootGetStation(root, whenCreated):\n lineCode = root.findtext(\"{http://trackernet.lul.co.uk}Line\")\n lineName = root.findtext(\"{http://trackernet.lul.co.uk}LineName\")\n stationElement = root.find(\"{http://trackernet.lul.co.uk}S\")\n code = stationElement.get(\"Code\")\n name = stationElement.get(\"N\")\n stationObj = station.Station(code, name, lineCode, lineName)\n xmlStationGetPlatforms(stationObj, stationElement, whenCreated)\n return stationObj",
"def update(self):\n if not self.should_update():\n return\n try:\n two_hour_forecast = \"https://www.nea.gov.sg/api/WeatherForecast/forecast24hrnowcast2hrs/\" + \\\n str(int(time.time()))\n two_hour_result = requests.get(two_hour_forecast, timeout=10).json()\n if (two_hour_result is not None and two_hour_result[\"Channel2HrForecast\"] is not None and\n two_hour_result[\"Channel2HrForecast\"][\"Item\"] is not None and\n two_hour_result[\"Channel2HrForecast\"][\"Item\"][\"WeatherForecast\"] is not None and\n two_hour_result[\"Channel2HrForecast\"][\"Item\"][\"WeatherForecast\"][\"Area\"] is not None):\n self._data = two_hour_result[\"Channel2HrForecast\"][\"Item\"][\"WeatherForecast\"][\"Area\"]\n\n if (two_hour_result is not None and two_hour_result[\"Channel24HrForecast\"] is not None and\n two_hour_result[\"Channel24HrForecast\"][\"Main\"] is not None):\n self._today_data = two_hour_result[\"Channel24HrForecast\"][\"Main\"]\n\n four_day_forecast = \"https://www.nea.gov.sg/api/Weather4DayOutlook/GetData/\" + \\\n str(int(time.time()))\n self._forecast_data = requests.get(four_day_forecast, timeout=10).json()\n\n self.last_updated = dt_util.utcnow()\n return\n\n except ValueError as err:\n _LOGGER.error(\"Check NEA %s\", err.args)\n self._data = None\n self._forecast_data = None\n self._today_data = None\n raise",
"def event_info_data(event, station):\n origin = event.preferred_origin() or event.origins[0]\n latter = origin.latitude\n lonter = origin.longitude\n startev = origin.time\n depth = origin.depth * 0.001\n\n # set station and channel information\n if station == 'FUR':\n net_s = 'GR'\n sta_s = 'FUR'\n loc_s = ''\n chan2 = 'BHE'\n chan3 = 'BHN'\n chan4 = 'BHZ'\n\n # broadband station signal\n acE = download_data(startev, net_s, sta_s, loc_s, chan2)\n acN = download_data(startev, net_s, sta_s, loc_s, chan3)\n acZ = download_data(startev, net_s, sta_s, loc_s, chan4)\n ac = Stream(traces=[acE[0], acN[0], acZ[0]])\n\n for ca in [ac[0], ac[1], ac[2]]:\n ca.stats.coordinates = AttribDict()\n ca.stats.coordinates['longitude'] = 11.275\n ca.stats.coordinates['latitude'] = 48.163\n ca.stats['starttime'] = startev - 180\n ca.stats['sampling_rate'] = 20.\n\n # theoretical event backazimuth and distance\n baz = gps2dist_azimuth(latter, lonter, ac[0].stats.coordinates.latitude,\n ac[0].stats.coordinates.longitude)\n # great circle distance\n gcdist = locations2degrees(latter, lonter,\n ac[0].stats.coordinates.latitude,\n ac[0].stats.coordinates.longitude)\n\n return latter, lonter, depth, startev, ac, baz, gcdist, \\\n net_s, chan2, chan3, chan4, sta_s, loc_s"
]
| [
"0.66889495",
"0.6452908",
"0.62583035",
"0.62394136",
"0.619208",
"0.6112521",
"0.60889035",
"0.6042705",
"0.6017993",
"0.5981389",
"0.59698784",
"0.5953793",
"0.59492654",
"0.591016",
"0.59028894",
"0.589664",
"0.58665407",
"0.5864966",
"0.58616376",
"0.5854664",
"0.5849501",
"0.58329463",
"0.5792934",
"0.57900023",
"0.5782455",
"0.5775528",
"0.5770754",
"0.5760692",
"0.5758757",
"0.57122177"
]
| 0.71660936 | 0 |
returns a list of bikes for a provided weekday and station. averaged per hour so 24 results. | def get_bikes_for_weekday(cls, dbsession, weekday, station_id):
station = [("Time", "Available Bikes", "Available Stands")]
station_data = dbsession.query(func.hour(cls.last_update),
func.avg(cls.available_bikes),
func.avg(cls.available_bike_stands)) \
.filter(cls.station_id == station_id,
func.weekday(cls.last_update) == weekday) \
.group_by(func.hour(cls.last_update)) \
.all()
# this section parses the query return into a readable list.
# from docs:extend() appends the contents of seq to list.
if station_data:
station.extend([(a, float(b), float(c)) for a, b, c in station_data])
else:
station.extend([(0,0,0)])
return station | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_bikes_for_week(cls, dbsession, station_id):\n station = [(\"Day\", \"Available Bikes\")]\n station_data = dbsession.query(func.weekday(cls.last_update),\n func.avg(cls.available_bikes)) \\\n .filter(cls.station_id == station_id) \\\n .group_by(func.weekday(cls.last_update)) \\\n .all()\n\n # this section parses the query return into a readable list.\n # from docs:extend() appends the contents of seq to list.\n if station_data:\n station.extend([(days[a], float(b)) for a, b in station_data])\n else:\n station.extend([(0,0)])\n\n return station",
"def day_avg(rows, day):\r\n\r\n available_bikes = []\r\n available_bike_stands = []\r\n\r\n for row in rows:\r\n # Convert Unix stamp to Numbers 0-6, representing Sunday to Saturday\r\n unix_stamp = time.localtime(int(row[\"last_update\"]))\r\n weekday = int(time.strftime(\"%w\", unix_stamp))\r\n if weekday == day:\r\n available_bikes.append(row[\"available_bikes\"])\r\n available_bike_stands.append(row[\"available_bike_stands\"])\r\n\r\n day_avg_bikes = int(round((sum(available_bikes) / len(available_bikes)), 0))\r\n day_avg_bike_stands = int(round((sum(available_bike_stands) / len(available_bike_stands)), 0))\r\n\r\n return day_avg_bikes, day_avg_bike_stands",
"def week_chart(station_id):\r\n\r\n engine = get_db()\r\n # According to the parameter:station_id\r\n # select the occupancy of the corresponding station from the database.\r\n sql = \"SELECT available_bikes, available_bike_stands, last_update FROM STATION where number={};\".format(station_id)\r\n rows = engine.execute(sql).fetchall()\r\n\r\n week_average_bikes = []\r\n week_average_stands = []\r\n\r\n # The values 0 - 6 in the list day represent the days from Sunday to Saturday\r\n days = [0, 1, 2, 3, 4, 5, 6]\r\n for day in days:\r\n # Invoking the function:day_avg, calculate the average occupancy on a single day, and then add it to the list\r\n week_average_bikes.append(day_avg(rows, day)[0])\r\n week_average_stands.append(day_avg(rows, day)[1])\r\n daily = jsonify(week_average_bikes=week_average_bikes, week_average_stands=week_average_stands)\r\n return daily",
"def get_daily(station_id):\n dailydata = db.session.query(func.avg(DublinBike.available_bike)) \\\n .filter(DublinBike.number == station_id) \\\n .group_by(func.dayofweek(DublinBike.localtime)) \\\n .order_by(func.dayofweek(DublinBike.localtime)) \\\n .all()\n return jsonify([\n {'day': i,\n 'available_bike': float(dailydata[i][0])\n } for i in range(7)\n ])",
"def get_hourly(station_id):\n hourdata = db.session.query(func.avg(DublinBike.available_bike)) \\\n .filter(DublinBike.number == station_id) \\\n .group_by(extract('hour', DublinBike.localtime)) \\\n .order_by(extract('hour', DublinBike.localtime)) \\\n .all()\n return jsonify([\n {'hour': i,\n 'available_bike': float(hourdata[i][0])\n } for i in range(24)\n ])",
"def get_day_average(fsym, tsym, e='all', try_conversion=True, \n avgType='HourVWAP', UTCHourDiff=0):\n\n\t# build url\n\turl = build_url('dayAvg', fsym=fsym, tsym=tsym, e=e, \n\t try_conversion=try_conversion, avgType=avgType, \n\t UTCHourDiff=UTCHourDiff)\n\n\t# http request\n\tr = requests.get(url)\n\n\t# decode to json\n\tdata = r.json()\n\n\t# remove 'ConversionType' information\n\t#del data['ConversionType']\n\t\n\treturn {fsym: data}",
"def mean_by_airline_dow(flights):\n\n return ...",
"def bikes_prediction(station_id,time_hour):\r\n\r\n # get the data through openWeather api\r\n r = requests.get('http://api.openweathermap.org/data/2.5/forecast?appid=9511c6f09bf671d3bd65bf650197234f&q=Dublin')\r\n weathers = r.json()\r\n\r\n weather_detalis = weathers[\"list\"]\r\n temp = weather_detalis[0]['main']['temp']\r\n wind = weather_detalis[0]['wind']['speed']\r\n main = weather_detalis[0]['weather'][0]['main']\r\n weather_Drizzle = 0\r\n weather_Rain = 0\r\n if main == 'Drizzle':\r\n weather_Drizzle = 1\r\n elif main == 'Rain':\r\n weather_Rain = 1\r\n f2 = pd.DataFrame(np.array([station_id, time_hour, temp, wind, weather_Drizzle, weather_Rain])).T\r\n models = {}\r\n # open the folder of model\r\n with open('static/pickle/'+str(station_id) + \".pkl\", \"rb\") as handle:\r\n models[station_id] = pickle.load(handle)\r\n available_bikes_prediction = models[station_id].predict(f2).round()[0]\r\n return jsonify(bp=available_bikes_prediction)",
"def mean_time_weekday_view(user_id):\n #import pdb; pdb.set_trace()\n data = get_data()\n if user_id not in data:\n log.debug('User %s not found!', user_id)\n return []\n\n weekdays = group_by_weekday(data[user_id])\n result = [(calendar.day_abbr[weekday], mean(intervals))\n for weekday, intervals in weekdays.items()]\n\n return result",
"def tafs(station, hours_before_now=24, most_recent=True):\n return aviation_weather('tafs', station, hours_before_now, most_recent)",
"def group_by_weekday(items):\n result = [[], [], [], [], [], [], []] # one list for every day in week\n for date in items:\n start = items[date]['start']\n end = items[date]['end']\n result[date.weekday()].append(interval(start, end))\n return result",
"def get_station_boroughs(self):\\",
"def avg_based_on_forecast(city):\n wparams = { 'city': city,\n 'key': WEATHERBIT_API_KEY\n }\n resp = requests.get(WEATHERBIT_FORECAST_URL, params=wparams)\n alltemps = [farenheit(x['temp']) for x in json.loads(resp.text)['data']]\n return round(sum(alltemps) / len(alltemps))",
"def mean_time_weekday_view(user_id=None):\n data = get_data()\n if not user_id:\n raise abort(400)\n\n if user_id not in data:\n log.debug('User %s not found!', user_id)\n return []\n\n weekdays = group_by_weekday(data[user_id])\n result = [(calendar.day_abbr[weekday], mean(intervals))\n for weekday, intervals in weekdays.items()]\n return result",
"def averages(data, bbox):\n\n # load mapbox\n nb, sb, eb, wb = bbox\n G = ox.graph_from_bbox(nb, sb, eb, wb)\n dist = 0.0001\n edges = ox.utils_graph.graph_to_gdfs(G, nodes=False, fill_edge_geometry=True)\n edges['index'] = range(1, len(edges)+1)\n\n all_data = dict()\n for index, row in data.iterrows():\n date = datetime.fromtimestamp(row['time'])\n print(date)\n if date not in all_data:\n all_data[date] = [row]\n else:\n all_data[date].append(row)\n\n rows = []\n for key, value in all_data.items():\n # get closest point on each segment\n lng = value['long']\n lat = data['lat']\n ne, dist = ox.distance.nearest_edges(G, lng, lat, return_dist=True)\n print(ne)\n \n rows.append({\"\"})",
"def average_tod(self,d,feed_tod,feed_weights,mask): \n\n frequency = d['level1/spectrometer/frequency'][...]\n # This becomes (nBands, 64) \n self.frequency = np.mean(np.reshape(frequency,(frequency.shape[0],frequency.shape[1]//16,16)) ,axis=-1)\n all_tod = np.zeros((8, feed_tod.shape[-1]))\n all_weights=np.zeros((8, feed_tod.shape[-1]))\n all_frequency=np.zeros((8))\n for ichan, (flow,fhigh) in enumerate(zip(np.arange(8)+26,np.arange(8)+27)):\n sel = ((self.frequency >= flow) & (self.frequency < fhigh))\n top = np.sum(feed_tod[sel,:]*feed_weights[sel,:],axis=0)\n bot = np.sum(feed_weights[sel,:],axis=0)\n all_tod[ichan,:] = top/bot\n all_weights[ichan,:] = bot\n all_frequency[ichan] = (fhigh+flow)/2.\n \n diff = all_tod[:,mask]\n N = int(diff.shape[1]//2*2)\n diff = (diff[:,:N:2]-diff[:,1:N:2])\n auto = stats.MAD(diff.T)\n\n amean_rms = np.sqrt(1./np.nanmedian(all_weights[:,mask],axis=1))\n\n # Add the weighted average uncertainties to the auto-rms uncertainties\n all_weights = 1./(1./all_weights + auto[:,None]**2)\n\n return all_tod, all_weights, auto, all_frequency",
"def rideshare_avg_cost_cents(self, since):\n query = \"\"\"\n SELECT\n week as date,\n CAST(\n SUM(n_trips * avg_cost_no_tip_cents)\n / SUM(n_trips)\n as INTEGER) as value\n FROM rideshare\n WHERE week >= ?\n GROUP BY date\n \"\"\"\n cur = self.con.cursor()\n cur.execute(query, (since,))\n rows = rows_to_dicts(cur, cur.fetchall())\n return rows",
"def return_weekly_figure():\n today = datetime.datetime.now()\n\n while 1:\n try:\n today_str = str(today.day) + \"/\" + \"{:02d}\".format(today.month) + \"/\" + str(today.year)\n match = covid_table.find(date=today_str)\n match.next()\n running_total = 0\n for i in range(7):\n running_total += return_daily_figure(today)\n today = today - datetime.timedelta(days=1)\n average_dose_per_day = round(running_total/7)\n return running_total, average_dose_per_day \n except:\n today = today - datetime.timedelta(days=1)",
"def sort_bike_stations(bike_stations, location):\n\n stations = bike_stations.copy()\n\n for index, station in enumerate(stations):\n station_location = (station[\"lat\"], station[\"lon\"])\n dist = distance.distance(station_location, location).m\n stations[index][\"distance\"] = dist\n\n stations = sorted(stations, key=lambda station: station[\"distance\"])\n stations = list(filter(lambda station: station[\"bikesAvailable\"] > 0, stations))\n\n return stations",
"def forecast_means(data):\n\t# collect dates\n\tdate_keys = [x.date() for x in list(data)]\n\t# filter out full days\n\tdays = set([x for x in date_keys if date_keys.count(x) == 8])\n\t# group temperature by dates from the filtered list\n\ttemps_grouped = map(lambda x: [v for (k, v) in data.items() if x == k.date()], list(sorted(days)))\n\t# return a dictionary with dates and mean temperature\n\treturn dict([(x, round(statistics.mean(y), 2)) for x, y in zip(list(sorted(days)), list(temps_grouped))])",
"def find_average(self):\n df = self.find_top_seven_routes()\n # Find the total of the frequency of the top 7 traveled routes\n total =df.sort_values('Frequency', ascending=False).Frequency[:7].sum()\n # Calculate the average by dividing each frequency by the total\n df['average'] = df['Frequency'] / total\n\n return df",
"def generate_averages(self, pickle_path='data/days_statistics.pickle', override_pickle=False):\n\t\tif os.path.isfile(pickle_path) and not override_pickle:\n\t\t\twith open(pickle_path, 'rb') as input_file:\n\t\t\t\tself.averages_weekday, self.averages_weekend = pickle.load(input_file)\n\t\telse:\n\t\t\tn_weekday = list()\n\t\t\tsums_weekday = list()\n\t\t\tn_weekend = list()\n\t\t\tsums_weekend = list()\n\t\t\tfor month in range(12):\n\t\t\t\tself.averages_weekday.append([])\n\t\t\t\tself.averages_weekend.append([])\n\t\t\t\tn_weekday.append([])\n\t\t\t\tsums_weekday.append([])\n\t\t\t\tn_weekend.append([])\n\t\t\t\tsums_weekend.append([])\n\t\t\t\tfor i in range(288):\n\t\t\t\t\tself.averages_weekday[month].append(0)\n\t\t\t\t\tself.averages_weekend[month].append(0)\n\t\t\t\t\tn_weekday[month].append(0)\n\t\t\t\t\tsums_weekday[month].append(0)\n\t\t\t\t\tn_weekend[month].append(0)\n\t\t\t\t\tsums_weekend[month].append(0)\n\n\t\t\tfor day in self.days:\n\t\t\t\tts = datetime.strptime(day.data['time'].iloc[0], '%Y-%m-%d %H:%M:%S')\n\t\t\t\tif ts.strftime('%Y-%m-%d') not in self.bad_dates:\n\t\t\t\t\tfor index, row in day.data.iterrows():\n\t\t\t\t\t\tmonth = row['month']-1\n\t\t\t\t\t\tday_id = self.get_list_id(row['hour'], row['minute'])\n\t\t\t\t\t\tif row['day_of_week'] < 5:\n\t\t\t\t\t\t\tsums_weekday[month][day_id] += int(row['pool'])\n\t\t\t\t\t\t\tn_weekday[month][day_id] += 1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tsums_weekend[month][day_id] += int(row['pool'])\n\t\t\t\t\t\t\tn_weekend[month][day_id] += 1\n\n\t\t\tfor month in range(12):\n\t\t\t\tfor i in range(288):\n\t\t\t\t\tif n_weekday[month][i] > 0:\n\t\t\t\t\t\tself.averages_weekday[month][i] = sums_weekday[month][i]/n_weekday[month][i]\n\t\t\t\t\tif n_weekend[month][i] > 0:\n\t\t\t\t\t\tself.averages_weekend[month][i] = sums_weekend[month][i]/n_weekend[month][i]\n\n\t\t\twith open(pickle_path, 'wb') as f:\n\t\t\t\tpickle.dump([self.averages_weekday, self.averages_weekend], f)",
"def hourly():\n\n averages = []\n interactions = 0\n\n # What's the 24 for?\n for i in range(25):\n try:\n hfile = open(common.stats_path + '/hourly-average/' + str(i))\n data = hfile.read()\n\n if i == 24:\n interactions = int(data)\n else:\n averages.append(int(data))\n\n hfile.close()\n except IOError:\n if i < 24:\n averages.append(0)\n\n return {'averages': averages, 'interactions': interactions}",
"def get_btcoin_day_data(self):\n cursor = self.__connect().cursor()\n limit = (str(int(time.time() - 24*60*60)),)\n hashdata = []\n rewarddata = []\n summ = 0\n for row in cursor.execute('SELECT * from btcoin where key > ? ORDER BY key ASC', limit):\n date = int(row[0])\n hashrate = str(row[1])\n hashrate = self.convert_hashrate_to_float(hashrate)\n summ = summ + hashrate\n reward = float(row[2])\n hashdata.append([date, hashrate])\n rewarddata.append([date, reward])\n cursor.close()\n self.__disconnect()\n if len(hashdata) != 0:\n hashaverage = summ / len(hashdata)\n return (hashaverage, hashdata, rewarddata)\n else:\n return (-1, hashdata, rewarddata)",
"def road_day_hour(dataframe,list_roads,day,hour):\n list_pred = []\n\n for road in list_roads:\n inter = dataframe[(dataframe['routes'] == road) \\\n & (dataframe['day_of_the_week'] == day) \\\n & (dataframe['hour'] == hour)]\n result = inter['collision_severity'].mean()\n list_pred.append(result)\n dict_pred = {'names':list_roads,'collision_severity':list_pred}\n df_pred = pd.DataFrame(dict_pred)\n\n return df_pred",
"def get_average_for_month_at_time(self, month, hour, minute, weekend):\n\t\tif weekend:\n\t\t\treturn self.averages_weekend[month][self.get_list_id(hour, minute)]\n\t\telse:\n\t\t\treturn self.averages_weekday[month][self.get_list_id(hour, minute)]",
"def get_data(self):\n data = list(IgnitionRow.objects.all().order_by('-pub_date')[:self.num_ticks].values())\n two_hours = data[::-1] # The most recent two hours of data\n# print([elem['avg_pot_5'] for elem in two_hours])\n# avg_pot_data = [[float(elem['avg_pot_{}'.format(key)]) / (int(key) / 100) for elem in two_hours]\n avg_pot_data = [[float(elem['avg_pot_{}'.format(key)]) for elem in two_hours] \n \tfor key in self.keys]\n# print(avg_pot_data[0][-5:])\n avg_pot_data = [[max(min(elem, 100),0) for elem in arr] for arr in avg_pot_data] # Assume a max pot size of 2000 BBs\n avg_pot_data = [[elem if elem != 100 else 0 for elem in arr] for arr in avg_pot_data] # Assume a max pot size of 2000 BBs\n# print(avg_pot_data[0][-5:])\n return avg_pot_data",
"def get_data_for_day(i,t0):\n t0 = UTCDateTime(t0)\n\n # open clients\n client = FDSNClient(\"GEONET\")\n client_nrt = FDSNClient('https://service-nrt.geonet.org.nz')\n \n daysec = 24*3600\n data_streams = [[2, 5], [4.5, 8], [8,16]]\n names = ['rsam','mf','hf']\n\n # download data\n datas = []\n try:\n site = client.get_stations(starttime=t0+i*daysec, endtime=t0 + (i+1)*daysec, station='WIZ', level=\"response\", channel=\"HHZ\")\n except FDSNNoDataException:\n pass\n\n try:\n WIZ = client.get_waveforms('NZ','WIZ', \"10\", \"HHZ\", t0+i*daysec, t0 + (i+1)*daysec)\n \n # if less than 1 day of data, try different client\n if len(WIZ.traces[0].data) < 600*100:\n raise FDSNNoDataException('')\n except ObsPyMSEEDFilesizeTooSmallError:\n return\n except FDSNNoDataException:\n try:\n WIZ = client_nrt.get_waveforms('NZ','WIZ', \"10\", \"HHZ\", t0+i*daysec, t0 + (i+1)*daysec)\n except FDSNNoDataException:\n return\n\n # process frequency bands\n WIZ.remove_sensitivity(inventory=site)\n data = WIZ.traces[0].data\n ti = WIZ.traces[0].meta['starttime']\n # round start time to nearest 10 min increment\n tiday = UTCDateTime(\"{:d}-{:02d}-{:02d} 00:00:00\".format(ti.year, ti.month, ti.day))\n ti = tiday+int(np.round((ti-tiday)/600))*600\n N = 600*100 # 10 minute windows in seconds\n Nm = int(N*np.floor(len(data)/N))\n for data_stream, name in zip(data_streams, names):\n filtered_data = bandpass(data, data_stream[0], data_stream[1], 100)\n filtered_data = abs(filtered_data[:Nm])\n datas.append(filtered_data.reshape(-1,N).mean(axis=-1)*1.e9)\n\n # compute dsar\n data = cumtrapz(data, dx=1./100, initial=0)\n data -= np.mean(data)\n j = names.index('mf')\n mfd = bandpass(data, data_streams[j][0], data_streams[j][1], 100)\n mfd = abs(mfd[:Nm])\n mfd = mfd.reshape(-1,N).mean(axis=-1)\n j = names.index('hf')\n hfd = bandpass(data, data_streams[j][0], data_streams[j][1], 100)\n hfd = abs(hfd[:Nm])\n hfd = hfd.reshape(-1,N).mean(axis=-1)\n dsar = mfd/hfd\n datas.append(dsar)\n names.append('dsar')\n\n # write out temporary file\n datas = np.array(datas)\n time = [(ti+j*600).datetime for j in range(datas.shape[1])]\n df = pd.DataFrame(zip(*datas), columns=names, index=pd.Series(time))\n df.to_csv('_tmp/_tmp_fl_{:05d}.dat'.format(i), index=True, index_label='time')",
"def test_basic_daily_mean(self):\n self.testInst.bounds = self.bounds1\n ans = avg.mean_by_day(self.testInst, 'dummy4')\n assert np.all(ans == 86399 / 2.0)\n\n return",
"def fig_average(f, power, bad, gooddays, ncomp, key=''):\n\n c11 = power.c11\n c22 = power.c22\n cZZ = power.cZZ\n cPP = power.cPP\n bc11 = bad.c11\n bc22 = bad.c22\n bcZZ = bad.cZZ\n bcPP = bad.cPP\n\n if ncomp == 2:\n ccs = [cZZ, cPP]\n bcs = [bcZZ, bcPP]\n title = ['Average HZ, Station: '+key,\n 'Average HP, Station: '+key]\n elif ncomp == 3:\n ccs = [c11, c22, cZZ]\n bcs = [bc11, bc22, bcZZ]\n title = ['Average H1, Station: '+key,\n 'Average H2, Station: '+key,\n 'Average HZ, Station: '+key]\n else:\n ccs = [c11, c22, cZZ, cPP]\n bcs = [bc11, bc22, bcZZ, bcPP]\n title = ['Average H1, Station: '+key,\n 'Average H2, Station: '+key,\n 'Average HZ, Station: '+key,\n 'Average HP, Station: '+key]\n\n # Extract only positive frequencies\n faxis = f > 0\n\n plt.figure()\n for i, (cc, bc) in enumerate(zip(ccs, bcs)):\n ax = plt.subplot(ncomp, 1, i+1)\n ax.semilogx(\n f[faxis], utils.smooth(np.log(cc)[faxis], 50), 'k', lw=0.5)\n if np.sum(~gooddays) > 0:\n ax.semilogx(\n f[faxis], utils.smooth(np.log(bc)[faxis], 50), 'r', lw=0.5)\n ax.set_title(title[i], fontdict={'fontsize': 8})\n if i == len(ccs)-1:\n plt.xlabel('Frequency (Hz)', fontdict={'fontsize': 8})\n plt.tight_layout()\n\n return plt"
]
| [
"0.77892643",
"0.71780616",
"0.69516826",
"0.67270637",
"0.6636598",
"0.6216508",
"0.59292334",
"0.56938154",
"0.5609472",
"0.554261",
"0.55170375",
"0.5512873",
"0.546571",
"0.53780615",
"0.53184694",
"0.5302574",
"0.52739406",
"0.52701545",
"0.52644885",
"0.5257001",
"0.52560484",
"0.52453166",
"0.521423",
"0.51778686",
"0.50541556",
"0.5025394",
"0.5013763",
"0.5000242",
"0.4993352",
"0.4987535"
]
| 0.84364945 | 0 |
finds days where there was wet weather. | def findWetWeatherDays(self, dbsession, today):
wetDays = dbsession.query(self.dt).filter(or_(self.weather_description == "light rain", self.weather_description == "moderate rain")).all()
# if one of those days is today return it.
# else just return a wet day.
for i in range(len(wetDays)):
if today == wetDays[i][0].weekday():
return wetDays[i][0]
else:
return wetDays[0][0] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_typical_days(weather_data, cfg):\n settings = cfg['settings']\n # Flag to determine if any holidays have been found:\n interpolation_freq = pd.Timedelta(settings['intervall'])\n flag_holidays_found = False\n\n # --- Season --------------------------------------------------------------\n # The 'season' (transition, summer or winter) is defined by the daily\n # average of the ambient temperature.\n\n # Resample ambient temperatures in DataFrame to days and take mean\n tamb_avg_list = weather_data['TAMB'].resample('D', label='right',\n closed='right').mean()\n\n # Write the daily mean values to all original time steps\n tamb_avg_list = tamb_avg_list.reindex(weather_data.index)\n tamb_avg_list.fillna(method='backfill', inplace=True)\n\n season_list = []\n\n # The VDI 4655 default heat limit is 15°C (definition of summer days).\n # For low- and zero-energy houses, the average daily temperatures have\n # to be adapted to the actual conditions. (see VDI 4655, page 15)\n Tamb_heat_limit = settings.get('Tamb_heat_limit', 15) # °C\n\n # Read through list of temperatures line by line and apply the definition\n for tamb_avg in tamb_avg_list:\n if tamb_avg < 5:\n season_list.append('W') # Winter\n elif tamb_avg > Tamb_heat_limit:\n season_list.append('S') # Summer\n else:\n season_list.append('U') # Übergang (Transition)\n\n # Alternative season determination method:\n # From 'BDEW Standardlastprofile':\n season_list_BDEW = get_season_list_BDEW(weather_data)\n\n # Save the results in the weather_data DataFrame\n weather_data['TAMB_d'] = tamb_avg_list\n if settings.get('use_BDEW_seasons', False) is False:\n weather_data['season'] = season_list\n elif settings.get('use_BDEW_seasons', False) is True:\n weather_data['season'] = season_list_BDEW\n weather_data['season'].replace(to_replace={'Winter': 'W',\n 'Sommer': 'S',\n 'Übergangszeit': 'U'},\n inplace=True)\n\n # Store the BDEW seasons separately\n weather_data['season_BDEW'] = season_list_BDEW\n\n steps_per_day = 24 / (interpolation_freq.seconds / 3600.0)\n settings['steps_per_day'] = steps_per_day\n logger.debug('Number of days in winter: ' +\n str(season_list.count('W')/steps_per_day))\n logger.debug('Number of days in summer: ' +\n str(season_list.count('S')/steps_per_day))\n logger.debug('Number of days in transition: ' +\n str(season_list.count('U')/steps_per_day))\n\n # Use https://pypi.org/project/holidays/ for holiday-detection\n used_holidays = []\n if settings.get('holidays'):\n country = settings['holidays'].get('country', 'DE')\n province = settings['holidays'].get('province', None)\n used_holidays = holidays.country_holidays(country, subdiv=province)\n\n # Read through list of days line by line and see what kind of day they are.\n # Problem: In the weather data, the bins are labeled on the 'right'\n # (Each time stamp describes the interval before). Therefore the time stamp\n # midnight (00:00:00) describes the last interval of the day before.\n # However, asking for the weekday of a midnight time stamp gives the name\n # of the next day. Thus the resulting list of weekdays is shifted by one\n # time step.\n weekdays_list = []\n weekdays_list_BDEW = []\n for date_obj in weather_data.index:\n if date_obj.dayofweek == 6: # 6 equals Sunday\n weekdays_list.append('S')\n weekdays_list_BDEW.append('Sonntag')\n elif date_obj in used_holidays:\n weekdays_list.append('S')\n weekdays_list_BDEW.append('Sonntag')\n flag_holidays_found = True\n elif date_obj.dayofweek == 5: # 5 equals Saturday\n weekdays_list.append('W')\n weekdays_list_BDEW.append('Samstag')\n else:\n weekdays_list.append('W')\n weekdays_list_BDEW.append('Werktag')\n\n # Solution to problem: We take the first list entry, then add the rest of\n # the list minus the very last entry.\n weather_data['weekday'] = [weekdays_list[0]] + weekdays_list[:-1]\n weather_data['weekday_BDEW'] = [weekdays_list_BDEW[0]] + \\\n weekdays_list_BDEW[:-1]\n\n # Print a warning, if necessary\n if flag_holidays_found is False:\n logger.warning('Warning! No holidays were found for the chosen time!')\n\n # --- Cloud cover amount --------------------------------------------------\n ccover_avg_list = weather_data['CCOVER'].resample('D', label='right',\n closed='right').mean()\n ccover_avg_list = ccover_avg_list.reindex(weather_data.index)\n ccover_avg_list.fillna(method='backfill', inplace=True)\n # The interpolation to 15min may cause a slight difference of daily means\n # compared to 60min, in rare cases shifting from >5.0 to <5.0.\n # Rounding to the first decimal place may prevent this issue.\n ccover_avg_list = ccover_avg_list.round(decimals=1)\n\n # Read through list of cloud cover line by line and apply the definition\n cloudy_list = []\n for ccover_avg in ccover_avg_list:\n if (ccover_avg < 5.0):\n cloudy_list.append('H')\n else:\n cloudy_list.append('B')\n\n weather_data['cloudy'] = cloudy_list\n\n # Combine the gathered information from season, weekday and cloudyness\n # into one 'typtag' key\n weather_data['typtag'] = weather_data['season'] + \\\n weather_data['weekday'] + weather_data['cloudy']\n\n # For summer days, the VDI 4655 makes no distinction in terms of cloud\n # amount. So we need to replace 'heiter' and 'bewölkt' with 'X'\n typtage_replace = {'typtag':\n {'SWH': 'SWX', 'SWB': 'SWX', 'SSH': 'SSX', 'SSB': 'SSX'}\n }\n weather_data.replace(to_replace=typtage_replace, inplace=True)",
"def _do_checkWeather(self, mjd, w, config):\n # Convert mjd to the relevant time units of the weather dates.\n time = (mjd - config['sim_start'] + config['%s_start' %(w)]) * _day2sec\n # And wrap the time, if we need to. \n time = time % self.maxtime[w]\n # Find the observations which are closest in time to our requested time.\n time_order = (abs(self.dates[w] - time)).argsort()\n date1 = self.dates[w][time_order[0]]\n date2 = self.dates[w][time_order[1]]\n weather1 = self.weather[w][time_order[0]]\n weather2 = self.weather[w][time_order[1]]\n # Do interpolation for weather at this particular time.\n weather = (weather2 - weather1) / (date2 - date1) * (time - date1) + weather1\n return weather, weather1",
"def getTodaysWeather(self, keyword, temp):\n\n\t\t# Variables\n\t\tweather = {} \n\t\tfio = self.helper.getFio(keyword, temp) # Getting fio object\n\t\t\n\t\t# Getting todays weather data and populating the dictionary\n\t\tif fio.has_daily() is True and fio.has_hourly() is True:\n\t\t daily = FIODaily.FIODaily(fio)\n\t\t hourly = FIOHourly.FIOHourly(fio)\n\t\t for day in xrange(0, 1):\n\t\t\t\tfor item in daily.get_day(day).keys():\n\t\t\t\t\tif item == \"temperatureMin\":\n\t\t\t\t\t\tweather[item] = str(daily.get_day(day)[item]).split(\".\")[0]\n\t\t\t\t\tif item == \"summary\":\n\t\t\t\t\t\tweather[item] = unicode(daily.get_day(day)[item])\n\t\t\t\t\tif item == \"temperatureMax\":\n\t\t\t\t\t\tweather[item] = str(daily.get_day(day)[item]).split(\".\")[0]\n\t\t\t\t\tif item == \"windSpeed\":\n\t\t\t\t\t\twindSpeed = unicode(daily.get_day(day)[item])\n\t\t\t\t\tif item == \"windBearing\":\n\t\t\t\t\t\twindBearing = unicode(daily.get_day(day)[item])\n\t\t\t\t\t\twindBearing = self.helper.convertWindBearing(windBearing)\n\t\t\t\t\tif item == \"sunsetTime\":\n\t\t\t\t\t\tweather[item] = self.helper.getDateForWeather(daily.get_day(day)[item])\n\t\t\t\t\tif item == \"sunriseTime\":\n\t\t\t\t\t\tweather[item] = self.helper.getDateForWeather(daily.get_day(day)[item])\n\t\t\t\t\tif item == \"precipProbability\":\n\t\t\t\t\t\tweather[item] = str(daily.get_day(day)[item] * 100).split(\".\")[0] + \"%\"\n\t\t\t\tweather[\"wind\"] = windBearing + \" \" + windSpeed + \" mph\"\n\t\t\t\tfor item in hourly.get_hour(day).keys():\n\t\t\t\t\tif item == \"summary\":\n\t\t\t\t\t\tweather[\"current\"] = unicode(hourly.get_hour(0)[item])\n\t\t\t\t\tif item == \"temperature\":\n\t\t\t\t\t\tweather[item] = str(hourly.get_hour(0)[item]).split(\".\")[0]\n\t\t\t\t\tif item == \"icon\":\n\t\t\t\t\t\tweather[item] = unicode(hourly.get_hour(0)[item])\n\t\t\t\t\tif item == \"cloudCover\":\n\t\t\t\t\t\tweather[item] = str(hourly.get_hour(0)[item] * 100).split(\".\")[0] + \"%\"\n\t\t\t\tweather[\"town\"] = self.helper.getCoords(keyword)[2]\n\t\telse:\n\t\t\treturn 'No Todays data'\n\n\t\treturn weather",
"def _checkWeather(self, mjd, w, config):\n # Convert mjd to the relevant time units of the weather dates. \n time = (mjd - config['sim_start'] + config['%s_start' %(w)]) * _day2sec\n # And wrap the time, if we need to. \n time = time % self.maxtime[w]\n # Then use numpy interp to find the weather values for all of our times (works for single values or for arrays). \n # Requires that the 'weather' times are monotonically increasing, but this is true. (and checked when weather read in).\n # Find the *interpolated* weather values\n values = numpy.interp(time, self.dates[w], self.weather[w])\n return values",
"def forecast_weather(self):\n pass",
"def what_night_is_it():\n d = datetime.datetime.utcnow() - datetime.timedelta(7 / 24 + 0.5)\n tonight = int(d.strftime('%Y%m%d'))\n return tonight",
"def calculate_days(self):\n tweet_time = self.data['created_at']\n birthday = self.data['user']['created_at']\n my_dates = {\"Jan\": 1, \"Feb\": 2, \"Mar\": 3, \"Apr\": 4, \"May\": 5, \"Jun\": 6, \"Jul\": 7, \"Aug\": 8, \"Sep\": 9, \"Oct\": 10,\n \"Nov\": 11, \"Dec\": 12}\n # This could have easily been cast into one of the numerous datetime function's immediately, however\n # it was causing a major slowdown to the program and so the below was a quick fix.\n ######################################################################\n # NOTICE: IF SOMETHING BREAKS THIS IS MOST LIKELY TO BE WHAT IT IS #\n ######################################################################\n tweet_time2 = [my_dates[tweet_time[4:7]], int(tweet_time[8:10]), int(tweet_time[26:])]\n birthday2 = [my_dates[birthday[4:7]], int(birthday[8:10]), int(birthday[26:])]\n first = date(tweet_time2[2], tweet_time2[0], tweet_time2[1])\n second = date(birthday2[2], birthday2[0], birthday2[1])\n final = first - second\n days = final.days\n follows = self.data['user']['followers_count']\n favorites = self.data['user']['favourites_count']\n statuses = self.data['user']['statuses_count']\n favpd = favorites/days\n folpd = follows/days\n statpd = statuses/days\n return {\"days\": final.days, \"folpd\": folpd, \"favpd\": favpd, \"statpd\": statpd}",
"def get_number_days(self):\r\n return 1",
"def getDailyWeather(self, keyword, temp):\n\n\t\t# Variables\n\t\tdaily_weather = []\n\t\tweather = {}\n\t\tfio = self.helper.getFio(keyword, temp) # Getting fio object\n\n\t\t# Getting 4-day forecast, storing each day's data in a dictionary and\n\t\t# storing each dictionary in an array\n\t\tif fio.has_daily() is True:\n\t\t\tdaily = FIODaily.FIODaily(fio)\n\t\t\tfor day in xrange(0, 4):\n\t\t\t\tfor item in daily.get_day(day).keys():\n\t\t\t\t\tif item == \"summary\":\n\t\t\t\t\t\tweather[item] = unicode(daily.get_day(day)[item])\n\t\t\t\t\tif item == \"icon\":\n\t\t\t\t\t\tweather[item] = unicode(daily.get_day(day)[item])\n\t\t\t\t\tif item == \"temperatureMax\":\n\t\t\t\t\t\tweather[item] = str(daily.get_day(day)[item]).split(\".\")[0]\t\n\t\t\t\t\tif item == \"temperatureMin\":\n\t\t\t\t\t\tweather[item] = str(daily.get_day(day)[item]).split(\".\")[0]\n\t\t\t\t\tif item == \"precipProbability\":\n\t\t\t\t\t\tweather[item] = str(daily.get_day(day)[item] * 100).split(\".\")[0] + \"%\"\n\t\t\t\t\tif item == \"time\":\n\t\t\t\t\t\tweather[item] = self.helper.getDateForWeather(daily.get_day(day)[item])\n\t\t\t\t\tif item == \"cloudCover\":\n\t\t\t\t\t\tweather[item] = str(daily.get_day(day)[item] * 100).split(\".\")[0] + \"%\"\n\t\t\t\tdaily_weather.append(weather)\n\t\t\t\tweather = {}\n\t\telse:\n\t\t\treturn 'No Daily data'\n\t\treturn daily_weather",
"def get_seven_days_stat(cls):\n return cls.get_specified_days_stat(7)",
"def find_streekday_(self):\n # streekdays pattern repeats every 1280 years:\n epoch = self.year % 1280\n # ...and all 40 years if we ignore the 128-year rule.\n subepoch = epoch % 40\n year_offset = None\n if subepoch in (2,4,21,23):\n year_offset = 0\n elif subepoch in (6,8,25,27):\n year_offset = 1\n elif subepoch in (10,12,29,31):\n year_offset = 2\n elif subepoch in (14,16,33,35):\n year_offset = 3\n elif subepoch in (18,20,37,39):\n year_offset = 4\n elif subepoch in (1,3,22,24):\n year_offset = 5\n elif subepoch in (5,7,26,28):\n year_offset = 6\n elif subepoch in (9,11,30,32): \n year_offset = 7\n elif subepoch in (13,15,34,36):\n year_offset = 8\n elif subepoch in (17,19,38,0):\n year_offset = 9\n year_offset -= math.floor((epoch-1) / 128)\n # another -1 because 0-W-1 is mudday = index 0.\n day_offset = (self.day_in_year + year_offset - 1) % 10\n return day_offset",
"def day(self):\n return 0",
"def day(self):\n return 0",
"def dow(self):\n comparator = Date(11, 12, 2014) # known to be a 'Wednesday'\n DOW = ['Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday', 'Monday', 'Tuesday']\n diff = self.diff(comparator)\n return DOW[diff % 7]",
"def get_day():\n return handle_invalid_inputs(question_4, days)",
"def get_zakopane_daily_weather():\n zakopane = FiveDaysWeatherForecast(location.get(\"zakopane\", \"\"))\n zakopane_weather_detail = zakopane.get_weather_details()\n zakopane_daily_weather_detail = []\n for data in zakopane_weather_detail:\n zakopane_daily_weather_detail.append(data)\n return zakopane_daily_weather_detail",
"def cold_days(templist: list) -> int:\r\n days = 0\r\n for temp in templist:\r\n if temp < 0:\r\n days += 1\r\n return days",
"def _test_find_day(self, days):\n msg = \"Find day in list of %d elements\" % len(days)\n for d in range(0, len(days)):\n self._test_giod(days, days[d], 0,\n d, msg)\n self._test_giod(days, days[d], 1,\n d, msg + \" (next = 1)\")\n self._test_giod(days, days[d], \"next = -1\",\n d, msg)",
"def get_valid_days(self, pid: str = None):\n\n if pid is not None and pid in self.wearables:\n wlist = [self.wearables[pid]]\n else:\n wlist = self.wearables.values()\n\n r = {}\n for wearable in wlist:\n invalid_days = self.get_invalid_days(wearable.get_pid())[wearable.get_pid()]\n all_days = set(wearable.data[wearable.get_experiment_day_col()].unique())\n r[wearable.get_pid()] = all_days - invalid_days\n return r",
"def get_weather(self):\n return self.__weather",
"def compute_real_days(self):\n if (self.end_date > date.today()):\n return SchoolDB.models.get_num_days_in_period(\n self.start_date, date.today())\n else:\n return SchoolDB.models.get_num_days_in_period(\n self.start_date, self.end_date)",
"def days(self) -> Optional[int]:\n return pulumi.get(self, \"days\")",
"def week_days(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"week_days\")",
"def get_weather_data(weather_station):\n now = datetime.datetime.now()\n then = now - datetime.timedelta(days=7)\n\n query_date_start = (\"%d%02d%02d\" % (then.year, then.month, then.day))\n query_date_end = (\"%d%02d%02d\" % (now.year, now.month, now.day))\n\n api_key = '/api/%s' % WUNDERGROUND_KEY\n history_key = '/history_%s%s/lang:EN/units:english/bestfct:1/v:2.0' % (query_date_start, query_date_end)\n query = '/q/%s.json?showObs=0&ttl=120' % weather_station\n\n weather_url = (\"%s%s%s%s\" % (WUNDERGROUND_HOST, api_key, history_key, query))\n\n logger.info('Weather URL: %s', weather_url)\n response = requests.get(weather_url).text\n\n max_temp_avg = json.loads(response)['history']['summary']['max_temperature_avg']\n sum_precip = json.loads(response)['history']['summary']['precip_sum']\n\n return max_temp_avg, sum_precip",
"def test_get_index_of_day(self):\n days = [\"01.07.2013\",\n \"05.07.2013\",\n \"09.07.2013\",\n \"14.07.2013\",\n \"19.07.2013\"]\n # Find the days\n self._test_find_day(days)\n # Search for a day that is not part of the list\n # 1. A day before the first entry\n self._test_giod(days, \"01.01.2013\", 0,\n -1, \"Find not existing day in list\")\n self._test_giod(days, \"01.01.2013\", 1,\n 0, \"Find a date before days withe next = 1\")\n self._test_giod(days, \"01.01.2013\", 1,\n 0, \"Find a date before days withe next = -1\")\n # 2. A day after the last entry\n self._test_giod(days, \"01.12.2013\", 0,\n -1, \"Find not existing day in list\")\n self._test_giod(days, \"01.12.2013\", 1,\n 4, \"Find a date after days with next = 1\")\n self._test_giod(days, \"01.12.2013\", -1,\n 4, \"Find a date after days with next = -1\")\n # 3. A day in the middle\n self._test_giod(days, \"06.07.2013\", 0,\n -1, \"Find not existing day in list\")\n self._test_giod(days, \"06.07.2013\", 1,\n 2, \"Find a date after days with next = 1\")\n self._test_giod(days, \"06.07.2013\", -1,\n 1, \"Find a date after days with next = -1\")",
"def day(self):\n return self._days",
"def getHourlyWind(self, keyword):\n\n\t\tweather_data = self.getHourlyWeatherFromCSV(keyword, \"f\", \"wind\")\n\t\twind_values = [] # Array that will contain all the wind data\n\t\twind_data = {} # Dictionary of wind data\n\n\t\t# Getting humidity data\n\t\tfor data in weather_data:\n\t\t\twind_data[\"x\"] = self.helper.getDateInEpoch(data[\"date\"])\n\t\t\twind_data[\"y\"] = float(data[\"wind\"].split(\" \")[1])\n\t\t\twind_values.append(wind_data)\n\t\t\twind_data = {}\n\n\t\treturn wind_values",
"def get_weather(phenny, input):\n import wunderground\n \n report_type = 'conditions'\n\n unicode_input = unicode(input)\n if unicode_input[1:8] == 'weather':\n location_str = unicode_input[9:]\n elif unicode_input[1:3] == 'w ':\n location_str = unicode_input[3:]\n try:\n json_data = wunderground.format_json(location_str, input.weather_API, report_type)\n output_results(phenny, json_data)\n except Exception, e:\n print e\n phenny.say('Could not find results for \"%s\", please reword the search and try again.' % location_str)",
"def get_weather_report(takeoff,weather):\n # HINT: Looping through the dictionary is VERY slow because it is so large\n # You should convert the takeoff time to an ISO string and search for that first.\n # Only loop through the dictionary as a back-up if that fails.\n \n # Search for time in dictionary\n # As fall back, find the closest time before takeoff\n \n from dateutil.parser import parse\n \n result = []\n takeofftime = takeoff.isoformat()\n \n if takeofftime in weather.keys():\n result = weather[takeofftime]\n \n elif takeofftime not in weather.keys():\n weatherlist = list(weather.keys())\n count = len(weatherlist)\n for m in weatherlist[::-1]:\n if m < takeofftime:\n result = weather[m]\n \n else: \n result = None\n \n \n return result",
"def flag_day_without_diary(self):\n\n for wearable in self.wearables.values():\n tst = wearable.get_total_sleep_time_per_day(based_on_diary=True)\n # Gets the experiment days with 0 total sleep time (i.e., no diary entry)\n invalid_days = set(tst[tst[\"hyp_diary_sleep\"] == 0].index)\n # Flag them as invalid\n if len(invalid_days):\n wearable.data.loc[\n wearable.data[wearable.get_experiment_day_col()].isin(\n invalid_days), self.invalid_col] |= InvCode.FLAG_DAY_WITHOUT_DIARY"
]
| [
"0.6345522",
"0.62267625",
"0.620805",
"0.6172952",
"0.6154723",
"0.6078286",
"0.59555215",
"0.5860357",
"0.5847305",
"0.5832294",
"0.5819031",
"0.5799269",
"0.5799269",
"0.57958776",
"0.5793687",
"0.57477695",
"0.57152104",
"0.570199",
"0.57016325",
"0.5699181",
"0.5681354",
"0.56812596",
"0.56728905",
"0.56591123",
"0.5652579",
"0.564846",
"0.5640469",
"0.5631017",
"0.5624224",
"0.56214917"
]
| 0.7885582 | 0 |
For two participants, at most one channel can be opened | def test_max_1_channel(
token_network: Contract, get_accounts: Callable, create_channel: Callable
) -> None:
(A, B) = get_accounts(2)
create_channel(A, B)
with pytest.raises(TransactionFailed, match="TN/open: channel exists for participants"):
token_network.functions.openChannel(A, B).call()
with pytest.raises(TransactionFailed, match="TN/open: channel exists for participants"):
token_network.functions.openChannel(B, A).call() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def single_channel():\n return True",
"def have_channel_open(channels, user):\n for x in channels:\n chan = channels[x]\n if 'is_member' in chan:\n continue\n if \"user\" in chan and chan['user'] == user:\n return True\n return False",
"def have_channel_open(channels, user):\n for x in channels:\n chan = channels[x]\n if 'is_member' in chan:\n continue\n if chan['user'] == user:\n return True\n return False",
"def test_channel_list1():\n reset_data()\n user1 = auth_register(\"123eff45\", \"xxx\", \"yyyy\", email=\"[email protected]\")\n owner1 = auth_register(\"123eff45\", \"xxx\", \"yyyy\", email=\"[email protected]\")\n channel1_1 = channels_create(owner1['token'], \"channel1\", True)['channel_id']\n channel_join(user1['token'], channel1_1)\n channel_list1 = channels_list(user1['token'])\n channels = [channel['channel_id'] for channel in channel_list1]\n assert channels == [channel1_1]\n print(\"=========pass test1 : only one channel in channel_list========\")",
"def test_channel_join():\n\n # Clear the data structure\n clear_v1()\n \n\n # Call other functions to create the data and store in data structure\n auth_dict1 = auth_register_v2(\"[email protected]\", \"123456\", \"john\", \"smith\")\n auth_dict2 = auth_register_v2(\"[email protected]\", \"555555\", \"harry\", \"potter\")\n\n auth_token1 = auth_dict1[\"token\"]\n auth_token2 = auth_dict2[\"token\"]\n\n channel_id1 = channels_create_v2(auth_token1, \"Chill Soc\", True)\n \n \n channel_join_v2(auth_token2, channel_id1[\"channel_id\"])\n\n # Black box testing version in waiting\n # Check if the user is successfully added to the channel data frame\n assert channels_list_v2(auth_token2) == {\n 'channels': [\n \t{\n \t\t'channel_id': 1, # channel id start at 1 or 0 is worth checking ? It's currently start at 1.\n \t\t'name': 'Chill Soc',\n \t}\n ],\n }",
"def test_switch_channels(self):\n\t\t# not available yet, experimental\n\t\tpass",
"def open_channel(self):\n # LOGGER.info('Creating a new channel')\n self._connection.channel(on_open_callback=self.on_channel_task_open)\n self._connection.channel(on_open_callback=self.on_channel_ctrl_open)",
"def joinedChannel(self, channel, users):\n pass",
"def test_channel_join_already_in_channel():\n clear()\n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n userchannel_id = channels_create(user['token'], 'userchannel', True) \n with pytest.raises(AccessError):\n channel_join(user['token'], userchannel_id['channel_id'])",
"def test_channel_join_normal_case():\n \n clear()\n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n joiner = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n userchannel_id = channels_create(user['token'], 'userchannel', True) \n channel_join(joiner['token'], userchannel_id['channel_id']) \n randChannel_details = channel_details(user['token'], userchannel_id['channel_id'])\n assert(randChannel_details['all_members'] == [\n {\n 'u_id' : user['u_id'],\n 'name_first' : 'first',\n 'name_last' : 'last',\n 'profile_img_url': ''\n },\n {\n 'u_id' : joiner['u_id'],\n 'name_first' : 'first',\n 'name_last' : 'last',\n 'profile_img_url': ''\n }\n ])",
"def test_open_via_channel(testchannel, callit):\n\n channel = testchannel.channel() if callit else testchannel.channel\n\n with channel as t:\n assert t.state == ChannelState.open\n\n assert testchannel.state == ChannelState.closed",
"def test_channel_join_private_global():\n # Clear the data structure\n clear_v1()\n # Call other functions to create the data and store in data structure\n auth_dict1 = auth_register_v2(\"[email protected]\", \"123456\", \"john\", \"smith\")\n auth_dict2 = auth_register_v2(\"[email protected]\", \"555555\", \"harry\", \"potter\")\n\n auth_token1 = auth_dict1[\"token\"]\n auth_token2 = auth_dict2[\"token\"]\n\n channel_id1 = channels_create_v2(auth_token2, \"Chill Soc\", False)\n\n\n # Global DREAM owner attempt to join a private channel \n channel_join_v2(auth_token1, channel_id1[\"channel_id\"])\n\n # Check if the global owner successfully join private channel\n assert channels_list_v2(auth_token1) == {\n 'channels': [\n \t{\n \t\t'channel_id': 1, # channel id start at 1 or 0 is worth checking ? It's currently start at 1.\n \t\t'name': 'Chill Soc',\n \t}\n ],\n }",
"def test_channel_join_private_owner():\n clear()\n joiner = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n userchannel_id = channels_create(user['token'], 'userchannel', False)\n channel_join(joiner['token'], userchannel_id['channel_id']) \n randChannel_details = channel_details(user['token'], userchannel_id['channel_id'])\n assert(randChannel_details['all_members'] == [\n {\n 'u_id' : user['u_id'],\n 'name_first' : 'first',\n 'name_last' : 'last',\n 'profile_img_url': ''\n },\n {\n 'u_id' : joiner['u_id'],\n 'name_first' : 'first',\n 'name_last' : 'last',\n 'profile_img_url': ''\n }\n ])",
"def test_channel_join_except_repetitive():\n # Clear the data structure\n clear_v1()\n # Call other functions to create the data and store in data structure\n auth_register_v2(\"[email protected]\", \"123456\", \"john\", \"smith\")\n auth_dict2 = auth_register_v2(\"[email protected]\", \"555555\", \"harry\", \"potter\")\n\n auth_token2 = auth_dict2[\"token\"]\n\n channel_id1 = channels_create_v2(auth_token2, \"Chill Soc\", True)\n\n\n \n with pytest.raises(AccessError):\n channel_join_v2(auth_token2, channel_id1[\"channel_id\"])",
"def connect(self):\n\n label = self.scope[\"url_route\"][\"kwargs\"][\"label\"]\n self.user = self.scope[\"user\"]\n\n try:\n room = Relationship.objects.get(label=label)\n except Relationship.DoesNotExist:\n log.warning('No relationship have this label=%s', label)\n self.close()\n return\n except Exception as error:\n log.error(\"建立聊天室channel時發生錯誤: %s\" % error)\n self.close()\n return\n\n if not (room.client == self.user or room.performer == self.user):\n log.warning(\n '%s try to connect to the relationship that not belog to him', self.user)\n self.close()\n return\n\n self.scope[\"room\"] = room\n # Accept the incoming connection\n self.accept()\n\n async_to_sync(self.channel_layer.group_add)(\n \"chat\" + str(label), self.channel_name)",
"def test_new_channel(self):\n pattern = \"test.?.foo.?\"\n name1 = channel_layer.new_channel(pattern)\n self.assertIsInstance(name1, six.text_type)\n # Send a message and make sure new_channel on second pass changes\n channel_layer.send(name1, {\"value\": \"blue\"})\n name2 = channel_layer.new_channel(pattern)\n # Make sure the two ?s are replaced by the same string\n bits = name2.split(\".\")\n self.assertEqual(bits[1], bits[3], \"New channel random strings don't match\")\n # Make sure we can consume off of that new channel\n channel, message = channel_layer.receive_many([name1, name2])\n self.assertEqual(channel, name1)\n self.assertEqual(message, {\"value\": \"blue\"})",
"def open_channel(self):\n self.logger.info('Creating a new channel')\n self._connection.channel(on_open_callback=self.on_channel_open)",
"def open_channel(self):\n logger.info('Creating a new channel')\n self._connection.channel(on_open_callback=self.on_channel_open)",
"def is_channel(self):\n return True",
"def open_channel(self):\n self.logger.info('creating channel')\n self._connection.channel(on_open_callback=self.on_channel_opened)",
"async def managechannels(self, ctx:commands.Context):",
"def test_one_channel(self):\n\n # Add channel\n self.create_channel()\n\n # There should be one channel\n response = self.client.get('/api/channels/')\n self.assertEquals(response.json['total-channels'], 1)\n self.assertEquals(len(response.json['channels']), 1)",
"def test_open_alreadyopen(testchannel, state):\n\n testchannel._state = state\n with pytest.raises(ChannelOpenError):\n testchannel.open()",
"def test_channel_join_except_private():\n # Clear the data structure\n clear_v1()\n # Call other functions to create the data and store in data structure\n auth_dict1 = auth_register_v2(\"[email protected]\", \"123456\", \"john\", \"smith\")\n auth_dict2 = auth_register_v2(\"[email protected]\", \"555555\", \"harry\", \"potter\")\n\n auth_token1 = auth_dict1[\"token\"]\n auth_token2 = auth_dict2[\"token\"]\n\n channel_id1 = channels_create_v2(auth_token1, \"Chill Soc\", False)\n \n with pytest.raises(AccessError):\n channel_join_v2(auth_token2, channel_id1[\"channel_id\"])",
"def channel(self):\n raise NotImplementedError",
"async def joinchannel(self, ctx: commands.Context, *channels: str):\n for channel in channels:\n channel_query = self._channel_query(channel)\n\n if channel_query == None:\n await ctx.send(f\"Unable to join {channel}.\")\n continue\n\n channel = self.bot.get_channel(channel_query.id)\n guild = self.bot.get_guild(SERVER_ID)\n member = guild.get_member(ctx.author.id)\n\n if channel == None:\n await ctx.send(f\"Unable to join {channel}.\")\n continue\n\n # Don't let a user join the channel again if they are already in it.\n if channel.permissions_for(member).is_superset(JOINED_PERMISSIONS):\n await ctx.send(f\"You're already a member of {channel}.\")\n continue\n\n await channel.set_permissions(member, read_messages=True, reason=\"UQCSbot added.\")\n join_message = await channel.send(f\"{member.display_name} joined {channel.mention}\")\n await join_message.add_reaction(\"👋\")\n await ctx.send(f\"You've joined {channel.mention}.\")",
"def test_channel_join_except_channel():\n # Clear the data structure\n clear_v1()\n # Call other functions to create the data and store in data structure\n auth_dict1 = auth_register_v2(\"[email protected]\", \"123456\", \"john\", \"smith\")\n auth_dict2 = auth_register_v2(\"[email protected]\", \"555555\", \"harry\", \"potter\")\n\n auth_token1 = auth_dict1[\"token\"]\n auth_token2 = auth_dict2[\"token\"]\n\n channels_create_v2(auth_token1, \"Chill Soc\", True)\n invalid_channel = 50\n \n with pytest.raises(InputError):\n channel_join_v2(auth_token2, invalid_channel)",
"def test_channel_join_invalid_channel():\n clear()\n user = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n joiner = auth_register('[email protected]', '123abc!@#', 'first', 'last')\n channels_create(user['token'], 'userchannel', True)\n invalid_id = 0\n with pytest.raises(InputError):\n channel_join(joiner['token'], invalid_id)",
"def channels(message):\n load_users(message._client.users)\n for x in message._client.channels:\n chan = message._client.channels[x]\n if 'is_member' in chan:\n if chan['is_member']:\n message.reply(\"{} ({})\".format(chan['name'], chan['id']))\n# message.reply(pretty_json(chan, True))\n elif 'is_im' in chan:\n print(chan)\n friendlyname = chan['user']\n try:\n friendlyname = chan['user'].name\n except KeyError:\n pass\n message.reply(\"User channel: {} ({})\".format(friendlyname,\n chan['id']))",
"def _open(self):\n if self.channel is None:\n self.channel = self.transport.open_session()\n\n return self.channel"
]
| [
"0.68654567",
"0.6490941",
"0.6464989",
"0.63985145",
"0.6327629",
"0.6311468",
"0.6309975",
"0.6291697",
"0.6276977",
"0.62615794",
"0.6125024",
"0.6120147",
"0.60907483",
"0.6057845",
"0.6023134",
"0.59909755",
"0.5980023",
"0.5967421",
"0.5960192",
"0.5923796",
"0.59115165",
"0.58993405",
"0.587587",
"0.5872713",
"0.58579516",
"0.5837372",
"0.5833866",
"0.5815155",
"0.5806764",
"0.5802371"
]
| 0.6518068 | 1 |
getParticipantsHash() behaves as get_participants_hash | def test_participants_hash(token_network: Contract, get_accounts: Callable) -> None:
(A, B) = get_accounts(2)
AB_hash = get_participants_hash(A, B)
assert token_network.functions.getParticipantsHash(A, B).call() == AB_hash
assert token_network.functions.getParticipantsHash(B, A).call() == AB_hash | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_hash(self, composition):\n return",
"def __hash__(self):\n return hash((self.member_role, self.member_type, self.member_email))",
"def get_hash(self):\r\n return",
"def get_hash(self):\n return freeze_dict(self.get_hash_params())",
"def get_hash(self):\n return self.__hash",
"def test_participants_hash_equal(token_network: Contract, get_accounts: Callable) -> None:\n (A,) = get_accounts(1)\n\n with pytest.raises(ValueError):\n get_participants_hash(A, A)\n with pytest.raises(TransactionFailed, match=\"TN: identical addresses\"):\n token_network.functions.getParticipantsHash(A, A).call()",
"def get_hash(self):\n if self.contributes:\n return hash_from_values(self.iter_values())",
"def hash_key(self):",
"def hash(self) -> bytes:",
"def get_data_hash(args):\n pass",
"def hash(self) -> str:\r\n ...",
"def get_hash(self):\r\n block_data = self.prev_hash\r\n block_data += bytearray(struct.pack(\"f\", self.time))\r\n block_data += self.user_id.encode()\r\n block_data += self.public_key.public_bytes(serialization.Encoding.X962,\r\n serialization.PublicFormat.CompressedPoint)\r\n\r\n digest = hashes.Hash(hashes.SHA256())\r\n digest.update(block_data)\r\n return digest.finalize()",
"def internal_hash(self): \n return hash(tuple(sorted(self.hashtriples())))",
"def __hash__(self):\n return hash((self.get_first_name() + self.get_last_name() + self.get_birth_date()))",
"def _Hash(self):\n out = [self.key.string_id()]\n properties = self._PropList()\n for prop in properties:\n out.append(unicode(getattr(self, prop, '')))\n to_hash = ''.join(out)\n return hashlib.md5(to_hash.encode('utf-8')).hexdigest()",
"def _generate_hash(\n cls,\n recipient_id: str,\n email_subject: str,\n email_body: str\n ) -> str:\n hash_value = utils.convert_to_hash(\n recipient_id + email_subject + email_body,\n 100)\n\n return hash_value",
"def HashValue(self) -> _n_0_t_3[_n_0_t_9]:",
"def __hash__(self):\n return hash(self.hash)",
"def get_hash(self):\r\n block_data = self.prev_hash\r\n block_data += bytearray(struct.pack(\"!f\", self.time))\r\n block_data += self.user_id.encode()\r\n block_data += self.signature.encode()\r\n block_data += self.choice.encode()\r\n\r\n digest = hashes.Hash(hashes.SHA256())\r\n digest.update(block_data)\r\n return digest.finalize()",
"def __hash__(self):\n x = xxhash.xxh64()\n x.update(self.puzzle)\n return x.intdigest()",
"def current_hash(self):",
"def innerHash(self) -> str:\r\n\r\n return self.__inner_hash",
"def __Hash(self):\n return self._Hash()",
"def HashAlgorithm(self) -> _n_7_t_0:",
"def get_hash(data, exclude: Iterable[str] = tuple()) -> str:\n if isinstance(data, dict):\n data = {k: v for k, v in data.items() if k not in exclude}\n data_str = srsly.json_dumps(data, sort_keys=True).encode(\"utf8\")\n return hashlib.md5(data_str).hexdigest()",
"def hash(self):\n raise NotImplementedError() # To be subclassed",
"def __hash__(self):\n hash_value = 0\n \n # avatar\n hash_value ^= hash(self.avatar)\n \n # boosts_since\n boosts_since = self.boosts_since\n if (boosts_since is not None):\n hash_value ^= hash(boosts_since)\n \n # flags\n hash_value ^= self.flags\n \n # joined_at\n joined_at = self.joined_at\n if (joined_at is not None):\n hash_value ^= hash(joined_at)\n \n # nick\n nick = self.nick\n if (nick is not None):\n hash_value ^= hash(nick)\n \n # pending\n hash_value ^= self.pending\n \n # role_ids\n role_ids = self.role_ids\n if (role_ids is not None):\n hash_value ^= len(role_ids) << 4\n for role_id in role_ids:\n hash_value ^= role_id\n \n # timed_out_until\n timed_out_until = self.timed_out_until\n if (timed_out_until is not None):\n hash_value ^= hash(timed_out_until)\n \n return hash_value",
"def __hash__(self):\n return self.to_hash()",
"def unique_hash(self):\n raise NotImplementedError(\"unique_hash Method not implemented\")",
"def __hash__(self):\n return hash(self.joined())"
]
| [
"0.63765395",
"0.63318187",
"0.6321352",
"0.6252258",
"0.6228718",
"0.6136036",
"0.613001",
"0.61032784",
"0.6090188",
"0.60368407",
"0.59734964",
"0.5966896",
"0.5864445",
"0.58352035",
"0.58331984",
"0.5813834",
"0.57882416",
"0.57848483",
"0.57782173",
"0.5753588",
"0.5749111",
"0.57335836",
"0.5733332",
"0.57179487",
"0.57039016",
"0.56979007",
"0.5695509",
"0.56871516",
"0.56831",
"0.5677731"
]
| 0.7115849 | 0 |
getParticipantsHash() behaves as get_participants_hash on equal addresses | def test_participants_hash_equal(token_network: Contract, get_accounts: Callable) -> None:
(A,) = get_accounts(1)
with pytest.raises(ValueError):
get_participants_hash(A, A)
with pytest.raises(TransactionFailed, match="TN: identical addresses"):
token_network.functions.getParticipantsHash(A, A).call() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_participants_hash(token_network: Contract, get_accounts: Callable) -> None:\n (A, B) = get_accounts(2)\n\n AB_hash = get_participants_hash(A, B)\n assert token_network.functions.getParticipantsHash(A, B).call() == AB_hash\n assert token_network.functions.getParticipantsHash(B, A).call() == AB_hash",
"def __hash__(self):\n return hash((self.member_role, self.member_type, self.member_email))",
"def __hash__(self):\n return hash((self.get_first_name() + self.get_last_name() + self.get_birth_date()))",
"def get_hash(self):\r\n return",
"def hash(self) -> bytes:",
"def internal_hash(self): \n return hash(tuple(sorted(self.hashtriples())))",
"def HashValue(self) -> _n_0_t_3[_n_0_t_9]:",
"def __hash__(self):\n return hash(self.joined())",
"def _generate_hash(\n cls,\n recipient_id: str,\n email_subject: str,\n email_body: str\n ) -> str:\n hash_value = utils.convert_to_hash(\n recipient_id + email_subject + email_body,\n 100)\n\n return hash_value",
"def get_hash(self, composition):\n return",
"def hash(self) -> str:\r\n ...",
"def hash_key(self):",
"def hashcode(o):",
"def get_hash(self):\n return self.__hash",
"def __hash__(self):\n return hash((self.begin, self.end))",
"def get_hash(self):\n if self.contributes:\n return hash_from_values(self.iter_values())",
"def __hash__(self):\n return hash((self._start, self._end, self._name, self._value))",
"def __hash__(self):\n return hash((self._nele, self._m_s))",
"def __hash__(self) -> int:\n return hash(tuple(self.name,))",
"def get_hash(self):\n return freeze_dict(self.get_hash_params())",
"def deep_hash(obj):\n pass",
"def __hash__(self):\n return hash((super().__hash__(), tuple(self.regions)))",
"def __hash__(self) -> int:",
"def get_hash(self):\r\n block_data = self.prev_hash\r\n block_data += bytearray(struct.pack(\"f\", self.time))\r\n block_data += self.user_id.encode()\r\n block_data += self.public_key.public_bytes(serialization.Encoding.X962,\r\n serialization.PublicFormat.CompressedPoint)\r\n\r\n digest = hashes.Hash(hashes.SHA256())\r\n digest.update(block_data)\r\n return digest.finalize()",
"def __hash__(self):\n return hash(self.hash)",
"def hash_function(input_tuple):\n return hash(input_tuple)",
"def __hash__(self):\n return 31 * hash(self.head_vertex) + hash(self.tail_vertex)",
"def __hash__(self):\n return self.to_hash()",
"def get_hash(data, exclude: Iterable[str] = tuple()) -> str:\n if isinstance(data, dict):\n data = {k: v for k, v in data.items() if k not in exclude}\n data_str = srsly.json_dumps(data, sort_keys=True).encode(\"utf8\")\n return hashlib.md5(data_str).hexdigest()",
"def get_data_hash(args):\n pass"
]
| [
"0.70927805",
"0.6258063",
"0.6066081",
"0.6022483",
"0.59809077",
"0.5974033",
"0.5940762",
"0.5915285",
"0.5876238",
"0.58748066",
"0.5854341",
"0.5794211",
"0.57636255",
"0.57451165",
"0.57385176",
"0.5730055",
"0.5713905",
"0.5703602",
"0.5702778",
"0.56973827",
"0.56960404",
"0.5687619",
"0.5685792",
"0.5682598",
"0.5678037",
"0.5675012",
"0.56666964",
"0.56608677",
"0.56592953",
"0.5646645"
]
| 0.6733924 | 1 |
Adds a vdd rail at the top of the cell | def route_vdd_rail(self):
# adds the rail across the width of the cell
vdd_position = vector(0, self.height - self.m1_width)
self.add_rect(layer="metal1",
offset=vdd_position,
width=self.width,
height=self.m1_width)
pmos_pin = self.upper_pmos2_inst.get_pin("S")
# center of vdd rail
vdd_pos = vector(pmos_pin.cx(), vdd_position.y + 0.5*self.m1_width)
self.add_path("metal1", [pmos_pin.uc(), vdd_pos])
# Add the M1->M2->M3 stack at the left edge
self.add_via_center(layers=("metal1", "via1", "metal2"),
offset=vdd_pos.scale(0,1))
self.add_via_center(layers=("metal2", "via2", "metal3"),
offset=vdd_pos.scale(0,1))
self.add_layout_pin_rect_center(text="vdd",
layer="metal3",
offset=vdd_pos.scale(0,1)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _add_border(self):\n top = TopWallCell(self)\n left = SideWallCell(self, False)\n right = SideWallCell(self, True)\n for col in range(self._columns):\n self.cell_at(col, self._rows - 1, top)\n for row in range(self._rows):\n self.cell_at(0, row, left)\n self.cell_at(self._columns - 1, row, right)",
"def connect_rug(self):\n for index, value in self.df.loc[\n self.df[f\"highlight_{self.y}\"] == 1\n ].iterrows():\n color = (\n self.fgcolors[0]\n if self.df.loc[index, self.obs] == 0\n else self.fgcolors[1]\n )\n self.ax.hlines(\n y=value[f\"order_{self.y}\"],\n xmin=value[f\"order_{self.x}\"],\n xmax=len(self.df) + self.pad,\n color=color,\n alpha=self.con_alpha,\n zorder=3,\n lw=1.5,\n )\n # vlines for the x-axis.\n for index, value in self.df.loc[\n self.df[f\"highlight_{self.x}\"] == 1\n ].iterrows():\n color = (\n self.fgcolors[0]\n if self.df.loc[index, self.obs] == 0\n else self.fgcolors[1]\n )\n self.ax.vlines(\n x=value[f\"order_{self.x}\"],\n ymin=value[f\"order_{self.y}\"],\n ymax=0 - self.pad,\n color=color,\n alpha=self.con_alpha,\n zorder=3,\n lw=1.5,\n )\n return self",
"def line():\n tt.left(90)\n tt.down()\n tt.forward(50)\n tt.up()\n tt.right(90)\n tt.forward(10)\n tt.right(90)\n tt.forward(50)\n tt.left(90)",
"def add(topcell, subcell, center=(0,0)):\n topcell.add(gdspy.CellReference(subcell, origin=center))",
"def front_wall(self):\n self.place = \"bed\"\n print(\"You are infront of the bed.\"\n \"You look under it and find a notebook.\")\n nb = Notebook('notebook')\n nb.clue()",
"def add_adj_nodes(self):\n\n for x, row in enumerate(self.grid):\n for y, cell in enumerate(row):\n if x-1 >= 0:\n cell.above = self.grid[x-1][y]\n if y+1 < len(self.grid[0]):\n cell.right = self.grid[x][y+1]\n if x+1 < len(self.grid):\n cell.below = self.grid[x+1][y]\n if y-1 >= 0:\n cell.left = self.grid[x][y-1]",
"def rug(self):\n self.rax_y = self.ax.inset_axes(bounds=[0.97, 0, 0.03, 1], zorder=-1)\n for index, value in self.df[f\"order_{self.y}\"].items():\n if self.df.loc[index, f\"highlight_{self.y}\"] == 1:\n color_set = self.fgcolors\n zorder = 1\n else:\n color_set = self.bgcolors\n zorder = 0\n color = (\n color_set[0]\n if self.df.loc[index, self.obs] == 0\n else color_set[1]\n )\n self.rax_y.hlines(\n y=value,\n xmin=0,\n xmax=len(self.df),\n color=color,\n alpha=0.5,\n lw=3,\n zorder=zorder,\n )\n self.rax_y.margins(0.02)\n self.rax_y.axis(\"off\")\n # And the x-rug.\n self.rax_x = self.ax.inset_axes(bounds=[0, 0, 1, 0.03], zorder=-1)\n for index, value in self.df[f\"order_{self.x}\"].items():\n if self.df.loc[index, f\"highlight_{self.x}\"] == 1:\n color_set = self.fgcolors\n zorder = 1\n else:\n color_set = self.bgcolors\n zorder = 0\n color = (\n color_set[0]\n if self.df.loc[index, self.obs] == 0\n else color_set[1]\n )\n self.rax_x.vlines(\n x=value,\n ymin=0,\n ymax=len(self.df),\n color=color,\n alpha=0.5,\n lw=3,\n zorder=zorder,\n )\n self.rax_x.margins(0.02)\n self.rax_x.axis(\"off\")\n # Set some space for the rugs.\n xpad, ypad = (0 - self.pad, len(self.df) + self.pad)\n self.ax.set_xlim(xpad, ypad)\n self.ax.set_ylim(xpad, ypad)\n self.rax_y.set_ylim(xpad, ypad)\n self.rax_x.set_xlim(xpad, ypad)\n\n return self",
"def addBL(self):\n self.parent.copyCurrentWinState(self.pltw)\n vname = self.pltw.curvelist[self.cpos].name + 'BL'\n (nvec, npt) = np.shape(self.pltw.blklst[self.blkno])\n if self.pltw.pasteVector(self.data[2], self.blkno, vname):\n xname = self.pltw.getVnam(self.blkno, self.xpos)\n xvinfo = vectInfo(self.blkno, self.xpos, xname)\n yvinfo = vectInfo(self.blkno, nvec, vname)\n self.pltw.curvelist.append(curveInfo(vname, xvinfo, yvinfo))\n self.pltw.updatePlot()\n self.pltw.dirty = True\n self.pltw.activecurv = self.cpos\n self.parent.updateUI()\n self.hide()",
"def add_move(self, column, side):\n row = 0\n # Find the first occupied cell in this column\n while (row < self.height and self.data[row][column] == \" \"):\n row = row + 1\n # Now add the new peg to the row above it\n row = row - 1\n self.data[row][column] = side",
"def top_code(self, code):\n self.code_for_top = (self.y, code)\n self.y += self.unit*1.5",
"def bb_top(self, bb_top: float):\n\n self._bb_top = bb_top",
"def rokadaV(self, color):\n if(color == 'w'):\n self.data[7][3] = 'wr'\n self.data[7][2] = 'wk'\n self.data[7][4] = '.'\n self.data[7][0] = '.'\n self.kralj_beli_koriscen = True\n self.previous_positions = [7, 2]\n\n else:\n self.data[0][3] = 'br'\n self.data[0][2] = 'bk'\n self.data[0][4] = '.'\n self.data[0][0] = '.'\n self.kralj_crni_koriscen = True\n self.previous_positions = [0, 2]",
"def put_dashboard(self):\n self.sd.putNumber(\"vision_y\", self.range_finder.getDistance())",
"def set_cell_to_hole(self):\n self.tick = \"H\"\n self.is_hole = True\n self.is_active = False",
"def set_top_border(self, val):\n self.tborder = val",
"def __init__(self):\n super(StandardArrowHead, self).__init__()\n self._length = 10\n self._width = 0.4",
"def update_playhead(self, height):\r\n self.playhead_bar.setLine(0, 0, 0, -height)",
"def add_single_enclosure(self, track):\n pin = self.convert_track_to_pin(track)\n (ll,ur) = pin.rect\n self.cell.add_rect(layer=self.get_layer(track.z),\n offset=ll,\n width=ur.x-ll.x,\n height=ur.y-ll.y)",
"def setColorBarPositionHoriz(pos):\n dislin.vkxbar(pos)",
"def _add_across_and_down(self) -> None:\n numbering = self.puzzle.clue_numbering()\n small_nums = str.maketrans('1234567890', '₁₂₃₄₅₆₇₈₉₀')\n self.clues['across'] = []\n for i in numbering.across:\n row = int(i['cell']/numbering.width)\n col = i['cell'] % numbering.width\n self.cells[(row, col)].num = str(i['num']).translate(small_nums)\n self.clues['across'].append(str(i['num'])+\". \"+i['clue'])\n\n self.clues['down'] = []\n for i in numbering.down:\n row = int(i['cell']/numbering.width)\n col = i['cell'] % numbering.width\n self.cells[(row, col)].num = str(i['num']).translate(small_nums)\n self.clues['down'].append(str(i['num'])+\". \"+i['clue'])",
"def vline(self, x, y, height, color):\n self.rect(x, y, 1, height, color, fill=True)",
"def crown(self):\n self.crowned = True",
"def _render_vertical(self, gc, lx, ly, rx, ry, mx, my):\n mx = lx + (rx - lx) / 2.\n with gc:\n gc.set_line_width(20)\n gc.set_stroke_color(self._get_border_color())\n tee_v(gc, lx, ly, rx, mx, my)\n\n gc.set_line_width(10)\n self.set_fill_color(gc)\n tee_v(gc, lx, ly, rx, mx, my)",
"def draw_next_column(self):\n self.xPos += self.XCOLUMNSKIP + self.XCOLUMNSEP\n self.yPos = self.YORIGIN + Blender.Window.GetAreaSize()[1]",
"def moveDown(self):\n currentRow = self.getCurrentRow()\n if currentRow < (self.jobRow.rowCount() - 1):\n rowData = self.removeRow()\n self.insertRow(currentRow + 1, rowData)\n self.layers.insert(currentRow + 1, rowData)\n self.updateDependLabels()",
"def _place_down_row_( self, a_widget, columnspan, rowspan, sticky = None ):\n #print( \"_place_down_row_ still need to make sticky stick !!\")\n if sticky is None:\n sticky = self.sticky\n #rint( f\"_place_down_row_ row = {self.ix_row} col = {self.ix_col}\" )\n a_widget.grid( row = self.ix_row,\n column = self.ix_col,\n rowspan = rowspan,\n sticky = sticky, )\n\n self.ix_row += rowspan\n if ( self.max > 0 ) and ( self.ix_row >= self.max ):\n print( f\"hit max row {self.max}\" )\n self.ix_col += 1\n self.ix_row = 0",
"def arrow_head(self, diag, bbox, tags, size=0.5):\n x, y = bbox.midright()\n x += 0.1\n coords = [[x, y], [x+size, y+size], [x+size, y-size], [x, y]]\n item = self.canvas.line(coords, tags=tags, **self.lineoptions)\n return item",
"def __update_table(self):\n\n headlines = [\"\", ]\n headlines += range(1, + 1)\n headlines = [\" \"] + [str(x) for x in range(1, self.find_table_length() + 1)]\n self.__main_display_table.config(columns=headlines)\n\n for headline in headlines:\n self.__main_display_table.heading(headline, text=headline)\n self.__main_display_table.column(headline, anchor=\"center\", width=35)\n\n data = self.__display_buses_location()\n\n for i in self.__main_display_table.get_children():\n # deletes all the data in the chart\n self.__main_display_table.delete(i)\n for line in data:\n # inserts new data into the chart, goes line by line\n self.__main_display_table.insert(\"\", END, values=line)",
"def __add_node(self):\n next_pos = self.__get_next_tail_pos(self.__snake[-1])\n next_d = self.__snake[-1].direct()\n node = Snake(self, direction=next_d, size=self.__cell_edge, position=next_pos)\n self.__snake.append(node)",
"def header(self) -> NoReturn:\n self.set_x(self.t_margin + self.b_margin)\n self.ln(self.line_height)"
]
| [
"0.5719361",
"0.54055804",
"0.5304635",
"0.53003937",
"0.52842367",
"0.5275856",
"0.5258827",
"0.5232115",
"0.51538235",
"0.51155037",
"0.50955796",
"0.5081745",
"0.506918",
"0.5068402",
"0.50647056",
"0.50580543",
"0.5054077",
"0.50488394",
"0.50478745",
"0.49961856",
"0.49480549",
"0.4937923",
"0.4926986",
"0.491327",
"0.49132282",
"0.49063152",
"0.48972055",
"0.48885354",
"0.48881587",
"0.48743048"
]
| 0.6683286 | 0 |
Create both the upper_pmos and lower_pmos to the module | def create_ptx(self):
self.lower_pmos_inst=self.add_inst(name="lower_pmos",
mod=self.pmos)
self.connect_inst(["bl", "en", "br", "vdd"])
self.upper_pmos1_inst=self.add_inst(name="upper_pmos1",
mod=self.pmos)
self.connect_inst(["bl", "en", "vdd", "vdd"])
self.upper_pmos2_inst=self.add_inst(name="upper_pmos2",
mod=self.pmos)
self.connect_inst(["br", "en", "vdd", "vdd"]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_modules(self):\n self.nmos = ptx(width=self.nmos_size,\n mults=self.nmos_mults,\n tx_type=\"nmos\")\n self.add_mod(self.nmos)\n\n self.pmos = ptx(width=self.pmos_size,\n mults=self.pmos_mults,\n tx_type=\"pmos\")\n self.add_mod(self.pmos)",
"def place_ptx(self):\n\n # Compute the other pmos2 location, but determining offset to overlap the\n # source and drain pins\n self.overlap_offset = self.pmos.get_pin(\"D\").ll() - self.pmos.get_pin(\"S\").ll()\n \n # adds the lower pmos to layout\n #base = vector(self.width - 2*self.pmos.width + self.overlap_offset.x, 0)\n self.lower_pmos_position = vector(self.bitcell.get_pin(self.bitcell_bl).lx(),\n self.pmos.active_offset.y)\n self.lower_pmos_inst.place(self.lower_pmos_position)\n\n # adds the upper pmos(s) to layout\n ydiff = self.pmos.height + 2*self.m1_space + contact.poly.width\n self.upper_pmos1_pos = self.lower_pmos_position + vector(0, ydiff)\n self.upper_pmos1_inst.place(self.upper_pmos1_pos)\n\n upper_pmos2_pos = self.upper_pmos1_pos + self.overlap_offset\n self.upper_pmos2_inst.place(upper_pmos2_pos)",
"def create_modules(self):\n self.bitcell = self.replica_bitcell = self.mod_replica_bitcell()\n self.add_mod(self.bitcell)\n\n # This is the replica bitline load column that is the height of our array\n self.rbl = bitcell_array(name=\"bitline_load\", cols=1, rows=self.bitcell_loads)\n self.add_mod(self.rbl)\n\n # FIXME: The FO and depth of this should be tuned\n self.delay_chain = self.mod_delay_chain([self.delay_fanout]*self.delay_stages)\n self.add_mod(self.delay_chain)\n\n self.inv = pinv()\n self.add_mod(self.inv)\n\n self.access_tx = ptx(tx_type=\"pmos\")\n self.add_mod(self.access_tx)",
"def modules():",
"def _createModuleObj(self):\n ModuleOutputSolnDomain.__init__(self)",
"def _set_boron_ppm_positions(self):\n \n #################################################################################################################################################\n # Sets the boron impurity values in the active fuel region in each autofilled element position\n self.fuel_ppm_positions ={\n 'C___1':'7.6 ppm', 'B___1':'7.6 ppm', 'A___1':'7.6 ppm',\n 'C___2':'7.6 ppm', 'B___2':'7.6 ppm', 'A___2':'7.6 ppm',\n 'C___3':'7.6 ppm', 'B___3':'7.6 ppm', 'A___3':'7.6 ppm',\n }",
"def _createModuleObj(self):\n raise NotImplementedError(\"Implement in derived class.\")",
"def fix_pms(title, pmra, pmdec):\n pm_dict = {\n 'AquII':[-0.252, 0.011],\n 'BooI':[-0.554, -1.111],\n 'BooII':[-2.686, -0.53],\n 'CVenI':[-0.159, -0.067],\n 'CVenII':[-0.342, -0.473],\n 'CarI':[0.485, 0.131],\n 'CarII':[1.867, 0.082],\n 'CarIII':[3.046, 1.565],\n 'CBerI':[0.471, -1.716],\n 'CraI':[-0.045, -0.165],\n 'CraII':[-0.184, -0.106],\n 'DraI':[-0.012, -0.158],\n 'DraII':[1.242, 0.845],\n 'EriII':[0.159, 0.372],\n 'FnxI':[0.374, -0.401],\n 'GruI':[-0.261, -0.437],\n 'HerI':[-0.297, -0.329],\n 'HorI':[0.891, -0.55],\n 'HyaII':[-0.416, 0.134],\n 'HyiI':[3.733, -1.605],\n 'LeoI':[-0.086, -0.128],\n 'LeoII':[-0.025, -0.173],\n 'LeoIV':[-0.59, -0.449],\n 'LeoV':[-0.097, -0.628],\n 'PhxI':[0.079, -0.049],\n 'PisII':[-0.108, -0.586],\n 'RetII':[2.398, -1.319],\n 'SgrI':[-2.736, -1.357],\n 'SclI':[0.084, -0.133],\n 'Seg1':[-1.697, -3.501],\n 'Seg2':[1.656, 0.135],\n 'SxtI':[-0.438, 0.055],\n 'TriII':[0.588, 0.554],\n 'TucII':[0.91, -1.159],\n 'TucIII':[-0.025, -1.661],\n 'UMaI':[-0.683, -0.72],\n 'UMaII':[1.691, -1.902],\n 'UMiI':[-0.184, 0.082],\n 'Wil1':[0.199, -1.342]\n }\n try:\n pmra, pmdec = pm_dict[title]\n return pmra, pmdec\n except KeyError:\n return pmra, pmdec",
"def init_P_PHM_GIVEN_PHI():\n global P_PHM_GIVEN_PHI\n for i in INTERFACE_LEVEL_ACTIONS: # ui\n P_PHM_GIVEN_PHI[i] = collections.OrderedDict()\n for j in INTERFACE_LEVEL_ACTIONS: # um\n if i == j:\n # try to weight the true command more for realistic purposes. Can be offset by using a high UM_GIVEN_UI_NOISE\n P_PHM_GIVEN_PHI[i][j] = 1.0\n else:\n # P_PHM_GIVEN_PHI[i][j] = np.random.random()*UM_GIVEN_UI_NOISE#IF UM_GIVEN_UI_NOISE is 0, then the p(um|ui) is a deterministic mapping\n P_PHM_GIVEN_PHI[i][j] = 0.0\n\n delta_dist = np.array(P_PHM_GIVEN_PHI[i].values())\n uniform_dist = (1.0 / len(INTERFACE_LEVEL_ACTIONS)) * np.ones(len(INTERFACE_LEVEL_ACTIONS))\n blended_dist = (1 - PHM_GIVEN_PHI_NOISE) * delta_dist + PHM_GIVEN_PHI_NOISE * uniform_dist # np.array\n for index, j in enumerate(INTERFACE_LEVEL_ACTIONS):\n P_PHM_GIVEN_PHI[i][j] = blended_dist[index]",
"def gen_module(root_path, walls_height=3, floor_thickness=.3):\n levels = [gen_level(root_path + lv.rstrip() + '/')(\n floor_thickness = floor_thickness,\n walls_height = walls_height)\n for lv in os.popen('ls ' + root_path)]\n \n walls_hpc = []\n windows_hpc = []\n doors_hpc = []\n handrails_hpc = []\n floors_hpc = []\n stairs_foots = []\n lv = 0\n for walls, windows, doors, handrails, floor, stair_foot in levels:\n level_height = walls_height * lv\n \n walls_hpc.append(T(3)(level_height)(walls))\n windows_hpc.append(T(3)(level_height)(windows))\n doors_hpc.append(T(3)(level_height + floor_thickness)(doors))\n handrails_hpc.append(T(3)(level_height)(handrails))\n floors_hpc.append(T(3)(level_height)(floor))\n \n stairs_foots.append(stair_foot+[level_height])\n \n lv += 1\n \n walls_hpc = UNION(walls_hpc)\n windows_hpc = UNION(windows_hpc)\n doors_hpc = STRUCT(doors_hpc)\n handrails_hpc = UNION(handrails_hpc)\n floors_hpc = UNION(floors_hpc)\n \n cubes_hpc = []\n stairs_hpc = []\n for i in range(0, len(stairs_foots), 2):\n stair, cube = gen_stairs(stairs_foots[i], stairs_foots[i+1])\n cubes_hpc.append(cube)\n stairs_hpc.append(T(3)(floor_thickness)(stair))\n \n stairs_hpc = STRUCT(stairs_hpc)\n \n cubes_hpc = T(3)(floor_thickness)(STRUCT(cubes_hpc))\n floors_hpc = DIFFERENCE([floors_hpc, cubes_hpc])\n \n return STRUCT([\n SKEL_1(walls_hpc),\n windows_hpc,\n doors_hpc,\n handrails_hpc,\n floors_hpc,\n stairs_hpc])",
"def _make_modules(is_train):\n return {\n 'conversion': functools.partial(\n conversion, is_train=is_train, is_extrapolation=False),\n 'time': functools.partial(time, is_train=is_train),\n }",
"def _createModuleObj(self):\n # Create the SWIG module object to provide access to the C++ object.\n ModuleUniformVelModel.__init__(self)\n return",
"def _createModuleObj(self):\n ModuleFaultCohesiveKin.__init__(self)\n return",
"def _createModuleObj(self):\n ModuleInitialCondition.__init__(self)",
"def _createModuleObj(self):\n ModuleTimeWeakening.__init__(self)\n return",
"def get_alm_ps(mapp):\n alm = hp.sphtfunc.map2alm(mapp)\n powerSpec = hp.sphtfunc.anafast(mapp)\n ellArr = np.arange(len(powerSpec))\n return alm, powerSpec, ellArr",
"def set_pipes(lower_pipes, upper_pipes):\n Bird.upper_pipes = upper_pipes\n Bird.lower_pipes = lower_pipes",
"def __init__(self, pinForward, pinBackward, pinControlStraight,pinLeft, pinRight, pinControlSteering):\n\n self.pinForward = pinForward\n self.pinBackward = pinBackward\n self.pinControlStraight = pinControlStraight\n self.pinLeft = pinLeft\n self.pinRight = pinRight\n self.pinControlSteering = pinControlSteering\n GPIO.setup(self.pinForward, GPIO.OUT)\n GPIO.setup(self.pinBackward, GPIO.OUT)\n GPIO.setup(self.pinControlStraight, GPIO.OUT)\n\n GPIO.setup(self.pinLeft, GPIO.OUT)\n GPIO.setup(self.pinRight, GPIO.OUT)\n GPIO.setup(self.pinControlSteering, GPIO.OUT)\n\n self.pwm_forward = GPIO.PWM(self.pinForward, 100)\n self.pwm_backward = GPIO.PWM(self.pinBackward, 100)\n self.pwm_forward.start(0)\n self.pwm_backward.start(0)\n\n self.pwm_left = GPIO.PWM(self.pinLeft, 100)\n self.pwm_right = GPIO.PWM(self.pinRight, 100)\n self.pwm_left.start(0)\n self.pwm_right.start(0)\n\n GPIO.output(self.pinControlStraight,GPIO.HIGH) \n GPIO.output(self.pinControlSteering,GPIO.HIGH)",
"def make_pm_maps(input_file, input_pm_file, output_file, num_cones, num_bins=80, titles=None, mincount=0, maxcount=40, cut=None):\n # get titles for each subplot and dwarf proper motions\n titles, dwarf_pmra, dwarf_pmdec, = load_dwarf_info(input_file, titles)\n\n # load stellar pm values\n ra, dec, pmra, pmdec, parallax, parallax_error = load_gaia_search_info(input_pm_file)\n\n # from table 2 in\n # if titles is not None:\n # titles = fix_names(titles)\n # for i, title, dpmra, dpmdec in enumerate(zip(titles, dwarf_pmra, dwarf_pmdec)):\n # dwarf_pmra[i], dwarf_pmdec[i] = fix_pms(title, dpmra, dpmdec)\n # # dwarf_pmra[5] = 1.81\n # # dwarf_pmra[8] = -1.21\n # # dwarf_pmra[11] = 0.22\n # # dwarf_pmdec[5] = 0.14\n # # dwarf_pmdec[8] = -0.92\n # # dwarf_pmdec[11] = -1.41\n\n # set fig size and shape\n d = len(titles)\n rows = 3\n cols = int(np.ceil(d/rows))\n fig, axs = plot_setup(rows, cols, d)\n max_count = [0, 0]\n\n # plot each dwarf in separate subplots\n for ax, title, dwarfpmra, dwarfpmdec, *data in zip(axs, titles, dwarf_pmra, dwarf_pmdec, ra, dec, pmra, pmdec, parallax, parallax_error):\n counts, xedges, yedges, im = pm_histogram(fig, ax, data, title, dwarf_pmra=dwarfpmra, dwarf_pmdec=dwarfpmdec, cut=cut)\n\n # make labels across all subplots\n universal_plot_labels(fig, r\"Proper motion, right ascension [mas/yr]\", r\"Proper motion, declination [mas/yr]\")\n\n # add a universal colorbar, change cmap in hist2d above\n # fig.colorbar(im, ax=axs.ravel().tolist())\n\n fig.savefig(output_file, bbox_inches='tight')",
"def __init__(self, pinForward1, pinBackward1,pinForward2, pinBackward2):\n\n self.pinForward1 = pinForward1\n self.pinBackward1 = pinBackward1\n self.pinForward2 = pinForward2\n self.pinBackward2 = pinBackward2\n\n GPIO.setup(self.pinForward1, GPIO.OUT)\n GPIO.setup(self.pinBackward1, GPIO.OUT)\n GPIO.setup(self.pinForward2, GPIO.OUT)\n GPIO.setup(self.pinBackward2, GPIO.OUT)\n\n self.pwm_forward1 = GPIO.PWM(self.pinForward1, 100)\n self.pwm_backward1 = GPIO.PWM(self.pinBackward1, 100)\n self.pwm_forward2 = GPIO.PWM(self.pinForward2, 100)\n self.pwm_backward2 = GPIO.PWM(self.pinBackward2, 100)\n \n self.pwm_forward1.start(0)\n self.pwm_backward1.start(0)\n self.pwm_forward2.start(0)\n self.pwm_backward2.start(0)",
"def _createModuleObj(self):\n ModuleTimeHistory.__init__(self)",
"def __init__(self, module):\n super(SequenceWise, self).__init__()\n self.module = module",
"def __init__(self, module):\n super(SequenceWise, self).__init__()\n self.module = module",
"def __init__(self, module):\n super(SequenceWise, self).__init__()\n self.module = module",
"def _setup_pma(self) -> None:\n\n uncacheable_range = [\n AddrRange(dev.pio_addr, size=dev.pio_size)\n for dev in self._on_chip_devices + self._off_chip_devices\n ]\n\n # PCI\n uncacheable_range.append(AddrRange(0x2F000000, size=\"16MB\"))\n uncacheable_range.append(AddrRange(0x30000000, size=\"256MB\"))\n uncacheable_range.append(AddrRange(0x40000000, size=\"512MB\"))\n\n # TODO: Not sure if this should be done per-core like in the example\n for cpu in self.get_processor().get_cores():\n cpu.get_mmu().pma_checker = PMAChecker(\n uncacheable=uncacheable_range\n )",
"def setUp(self):\n\n self.thresholds = np.array([276, 277], dtype=np.float32)\n self.rain_name = \"probability_of_falling_rain_level_above_surface\"\n self.snow_name = \"probability_of_falling_snow_level_below_surface\"\n\n rain_prob = np.array(\n [\n [[0.5, 0.1, 1.0], [0.0, 0.2, 0.5], [0.1, 0.1, 0.3]],\n [[0.5, 0.1, 1.0], [0.0, 0.2, 0.5], [0.1, 0.1, 0.3]],\n ],\n dtype=np.float32,\n )\n self.rain_prob_cube = set_up_probability_cube(\n rain_prob, self.thresholds, variable_name=self.rain_name\n )\n\n snow_prob = np.array(\n [\n [[0.0, 0.4, 0.0], [0.5, 0.3, 0.1], [0.0, 0.4, 0.3]],\n [[0.0, 0.4, 0.0], [0.5, 0.3, 0.1], [0.0, 0.4, 0.3]],\n ],\n dtype=np.float32,\n )\n self.snow_prob_cube = set_up_probability_cube(\n snow_prob, self.thresholds, variable_name=self.snow_name\n )\n\n high_prob = np.array(\n [\n [[1.0, 0.7, 0.2], [0.8, 0.8, 0.7], [0.9, 0.9, 0.7]],\n [[1.0, 0.7, 0.2], [0.8, 0.8, 0.7], [0.9, 0.9, 0.7]],\n ],\n dtype=np.float32,\n )\n self.high_prob_cube = set_up_probability_cube(\n high_prob, self.thresholds, variable_name=self.snow_name\n )",
"def MODULES(self):\n pass",
"def biopythonMM(pwmFileName,genomeDict,mpbsDict,scoringMethod,tempLocation,pseudocounts=0.1,bitscore=12.0,fpr=0.01,precision=10**4,highCutoff=0.7,functionalDepth=0.9):\n \n # Reading PWM\n pwm = readPwmFile(pwmFileName,tempLocation,pseudocounts)\n pwmName = pwmFileName.split(\"/\")[-1].split(\".\")[0]\n pwmLen = len(pwm)\n\n # Evaluating threshold\n pwmThreshold = 0.0\n if(scoringMethod == \"bitscore\"):\n pwmThreshold = bitscore\n elif(scoringMethod == \"fpr\"):\n sd = Motif.ScoreDistribution(pwm,precision=precision)\n pwmThreshold = sd.threshold_fpr(fpr)\n elif(scoringMethod == \"boyle\"):\n maxScore = pwm.max_score()\n minScore = 0.0 # TODO Boyle's rule is not suited for negative values.\n pwmThreshold = min(highCutoff*maxScore,functionalDepth*(maxScore-minScore))\n else:\n sys.stderr.write(\"Choose a valid scoring method.\\n\")\n sys.exit(0)\n\n # Creating aditional parameters\n chrList = constants.getChromList(reference=[mpbsDict])\n tempMpbsDict = dict([(e,[]) for e in chrList])\n maxValue = -99.0\n\n # Iterating on chromosomes\n for chrName in chrList:\n\n # Reading genome\n sequence = genomeDict[chrName]\n\n # Performing biopython's motif matching\n for pos, score in pwm.search_pwm(sequence,threshold=pwmThreshold):\n if(score > maxValue): maxValue = score\n if(pos >= 0): tempMpbsDict[chrName].append([pos,pos+pwmLen,pwmName,score,\"+\"])\n else: tempMpbsDict[chrName].append([-pos,-pos+pwmLen,pwmName,score,\"-\"])\n\n # Update scores - new scores are within [0,1000]\n for chrName in chrList:\n for e in tempMpbsDict[chrName]:\n mpbsDict[chrName].append([e[0],e[1],e[2],int(1000*(e[3]-pwmThreshold)/(maxValue-pwmThreshold)),e[4]])\n \n return 0",
"def __init__(self, *args):\n _hypre.HypreBoomerAMG_swiginit(self, _hypre.new_HypreBoomerAMG(*args))",
"def test_wrapper_processing(self):\r\n result = mib2pysnmp('conpot/tests/data/VOGON-POEM-MIB.mib')\r\n self.assertTrue('mibBuilder.exportSymbols(\"VOGON-POEM-MIB\"' in result,\r\n 'mib2pysnmp did not generate the expected output. Output: {0}'.format(result))"
]
| [
"0.62417173",
"0.5595662",
"0.5592487",
"0.5253398",
"0.5209395",
"0.5176703",
"0.5076419",
"0.5046861",
"0.50380796",
"0.49966943",
"0.4981465",
"0.49621493",
"0.49106213",
"0.49073905",
"0.49046108",
"0.48944435",
"0.48900783",
"0.48836777",
"0.4832847",
"0.48023075",
"0.4794829",
"0.47702727",
"0.47702727",
"0.47702727",
"0.47509047",
"0.47324458",
"0.47238111",
"0.47132906",
"0.47123253",
"0.47039276"
]
| 0.626209 | 0 |
Place both the upper_pmos and lower_pmos to the module | def place_ptx(self):
# Compute the other pmos2 location, but determining offset to overlap the
# source and drain pins
self.overlap_offset = self.pmos.get_pin("D").ll() - self.pmos.get_pin("S").ll()
# adds the lower pmos to layout
#base = vector(self.width - 2*self.pmos.width + self.overlap_offset.x, 0)
self.lower_pmos_position = vector(self.bitcell.get_pin(self.bitcell_bl).lx(),
self.pmos.active_offset.y)
self.lower_pmos_inst.place(self.lower_pmos_position)
# adds the upper pmos(s) to layout
ydiff = self.pmos.height + 2*self.m1_space + contact.poly.width
self.upper_pmos1_pos = self.lower_pmos_position + vector(0, ydiff)
self.upper_pmos1_inst.place(self.upper_pmos1_pos)
upper_pmos2_pos = self.upper_pmos1_pos + self.overlap_offset
self.upper_pmos2_inst.place(upper_pmos2_pos) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def modules():",
"def create_modules(self):\n self.nmos = ptx(width=self.nmos_size,\n mults=self.nmos_mults,\n tx_type=\"nmos\")\n self.add_mod(self.nmos)\n\n self.pmos = ptx(width=self.pmos_size,\n mults=self.pmos_mults,\n tx_type=\"pmos\")\n self.add_mod(self.pmos)",
"def import_mm_order_functions():\n global check_mm_order, check_mm_equal\n global check_mm_half_order, check_mm_in_g_x0\n from mmgroup.mm_order import check_mm_order as f\n check_mm_order = f\n from mmgroup.mm_order import check_mm_equal as f\n check_mm_equal = f\n from mmgroup.mm_order import check_mm_half_order as f\n check_mm_half_order = f\n from mmgroup.mm_order import check_mm_in_g_x0 as f\n check_mm_in_g_x0 = f",
"def create_ptx(self):\n\n self.lower_pmos_inst=self.add_inst(name=\"lower_pmos\",\n mod=self.pmos)\n self.connect_inst([\"bl\", \"en\", \"br\", \"vdd\"])\n\n self.upper_pmos1_inst=self.add_inst(name=\"upper_pmos1\",\n mod=self.pmos)\n self.connect_inst([\"bl\", \"en\", \"vdd\", \"vdd\"])\n\n self.upper_pmos2_inst=self.add_inst(name=\"upper_pmos2\",\n mod=self.pmos)\n self.connect_inst([\"br\", \"en\", \"vdd\", \"vdd\"])",
"def MODULES(self):\n pass",
"def _set_boron_ppm_positions(self):\n \n #################################################################################################################################################\n # Sets the boron impurity values in the active fuel region in each autofilled element position\n self.fuel_ppm_positions ={\n 'C___1':'7.6 ppm', 'B___1':'7.6 ppm', 'A___1':'7.6 ppm',\n 'C___2':'7.6 ppm', 'B___2':'7.6 ppm', 'A___2':'7.6 ppm',\n 'C___3':'7.6 ppm', 'B___3':'7.6 ppm', 'A___3':'7.6 ppm',\n }",
"def create_modules(self):\n self.bitcell = self.replica_bitcell = self.mod_replica_bitcell()\n self.add_mod(self.bitcell)\n\n # This is the replica bitline load column that is the height of our array\n self.rbl = bitcell_array(name=\"bitline_load\", cols=1, rows=self.bitcell_loads)\n self.add_mod(self.rbl)\n\n # FIXME: The FO and depth of this should be tuned\n self.delay_chain = self.mod_delay_chain([self.delay_fanout]*self.delay_stages)\n self.add_mod(self.delay_chain)\n\n self.inv = pinv()\n self.add_mod(self.inv)\n\n self.access_tx = ptx(tx_type=\"pmos\")\n self.add_mod(self.access_tx)",
"def copie_modules(self):\n print \"copie du module necessaire\"\n if version_os[\"OS\"] == \"Ubuntu\":\n self.exec_cmd(\"cp -rpdf /lib/modules/%s %s/lib/modules/\" % (self.xenmgtconf[\"KERNEL_UBUNTU\"].split(\"/boot/vmlinuz-\")[1],self.rep_vhosts_vm))\n if version_os[\"OS\"] == \"Debian\":\n self.exec_cmd(\"cp -rpdf /lib/modules/%s %s/lib/modules/\" % (self.xenmgtconf[\"KERNEL_DEBIAN\"].split(\"/boot/vmlinuz-\")[1],self.rep_vhosts_vm))\n if version_os[\"OS\"] == \"CentOS\":\n self.exec_cmd(\"cp -rpdf /lib/modules/%s %s/lib/modules/\" % (self.xenmgtconf[\"KERNEL_CENTOS\"].split(\"/boot/vmlinuz-\")[1],self.rep_vhosts_vm))",
"def set_pipes(lower_pipes, upper_pipes):\n Bird.upper_pipes = upper_pipes\n Bird.lower_pipes = lower_pipes",
"def pre_move_hook(self, from_module, to_module):\n raise NotImplementedError()",
"def calc_pos_mod(nmodule):\n pass",
"def process_module_list(self, modules):",
"def _reset_module(m):\n raise NotImplementedError",
"def main(model,pmap):\n\n addPppParams(model)\n\n# addTransportParams(model,pmap)\n\n #translationSources(model)\n\n #addLipidMetabs(model)\n\n return",
"def test_wrapper_processing(self):\r\n result = mib2pysnmp('conpot/tests/data/VOGON-POEM-MIB.mib')\r\n self.assertTrue('mibBuilder.exportSymbols(\"VOGON-POEM-MIB\"' in result,\r\n 'mib2pysnmp did not generate the expected output. Output: {0}'.format(result))",
"def set_bootmodules(self, modules):\n raise NotImplementedError",
"def setup_layout_constants(self):\n # determines the spacing between the edge and nmos (rail to active\n # metal or poly_to_poly spacing)\n half_gate_to_gate = 0.5 * (drc[\"poly_to_poly\"] - drc[\"minwidth_metal1\"])\n edge_to_nmos = max(drc[\"metal1_to_metal1\"] - self.nmos.active_contact_positions[0].y,\n half_gate_to_gate - self.nmos.poly_positions[0].y)\n\n # determine the position of the first transistor from the left\n self.nmos_position1 = vector(0,\n 0.5 * drc[\"minwidth_metal1\"] + edge_to_nmos)\n offset = self.nmos_position1 + vector(0,self.nmos.height)\n\n x = vector(self.nmos.active_width - self.nmos.active_contact.width, 0)\n self.nmos_position2 = x + self.nmos_position1.scale(0,1)\n\n # determines the spacing between the edge and pmos\n edge_to_pmos = max(drc[\"metal1_to_metal1\"] - self.pmos.active_contact_positions[0].y,\n half_gate_to_gate - self.pmos.poly_positions[0].y)\n self.pmos_position1 = vector(0,\n self.height - 0.5 * drc[\"minwidth_metal1\"]\n - edge_to_pmos - self.pmos.height)\n self.pmos_position2 = self.pmos_position1 + vector(self.pmos.width,0)\n\n self.well_width = max(self.pmos_position2.x + self.pmos.active_position.x\n + self.pmos.active_width\n + drc[\"active_to_body_active\"] + self.nwell_contact.width \n + drc[\"well_enclosure_active\"],\n self.nmos_position2.x + self.nmos.active_position.x \n + self.nmos.active_width \n + drc[\"active_to_body_active\"] + drc[\"well_enclosure_active\"])\n self.width = self.well_width",
"def __init__(self):\n self.modules = {}",
"def patch_arr_pm(self):\n self._copyin('build_scripts/file.rb',\n '/usr/lib/ruby/gems/1.8/gems/arr-pm-0.0.7/lib/arr-pm/file.rb')\n self._copyin('build_scripts/rpm.rb',\n '/usr/lib/ruby/gems/1.8/gems/fpm-0.4.24/lib/fpm/package/rpm.rb')",
"def biopythonMM(pwmFileName,genomeDict,mpbsDict,scoringMethod,tempLocation,pseudocounts=0.1,bitscore=12.0,fpr=0.01,precision=10**4,highCutoff=0.7,functionalDepth=0.9):\n \n # Reading PWM\n pwm = readPwmFile(pwmFileName,tempLocation,pseudocounts)\n pwmName = pwmFileName.split(\"/\")[-1].split(\".\")[0]\n pwmLen = len(pwm)\n\n # Evaluating threshold\n pwmThreshold = 0.0\n if(scoringMethod == \"bitscore\"):\n pwmThreshold = bitscore\n elif(scoringMethod == \"fpr\"):\n sd = Motif.ScoreDistribution(pwm,precision=precision)\n pwmThreshold = sd.threshold_fpr(fpr)\n elif(scoringMethod == \"boyle\"):\n maxScore = pwm.max_score()\n minScore = 0.0 # TODO Boyle's rule is not suited for negative values.\n pwmThreshold = min(highCutoff*maxScore,functionalDepth*(maxScore-minScore))\n else:\n sys.stderr.write(\"Choose a valid scoring method.\\n\")\n sys.exit(0)\n\n # Creating aditional parameters\n chrList = constants.getChromList(reference=[mpbsDict])\n tempMpbsDict = dict([(e,[]) for e in chrList])\n maxValue = -99.0\n\n # Iterating on chromosomes\n for chrName in chrList:\n\n # Reading genome\n sequence = genomeDict[chrName]\n\n # Performing biopython's motif matching\n for pos, score in pwm.search_pwm(sequence,threshold=pwmThreshold):\n if(score > maxValue): maxValue = score\n if(pos >= 0): tempMpbsDict[chrName].append([pos,pos+pwmLen,pwmName,score,\"+\"])\n else: tempMpbsDict[chrName].append([-pos,-pos+pwmLen,pwmName,score,\"-\"])\n\n # Update scores - new scores are within [0,1000]\n for chrName in chrList:\n for e in tempMpbsDict[chrName]:\n mpbsDict[chrName].append([e[0],e[1],e[2],int(1000*(e[3]-pwmThreshold)/(maxValue-pwmThreshold)),e[4]])\n \n return 0",
"def setup_module():\n common_setup_module()",
"def test_module(self):\n pass",
"def add_mapping_rules(header, symbols_map, includes_map):\n symbols_map += [(header.classname, header.classname)]\n for include in header.get_private_headers():\n includes_map += [(header.modulename, include, header.classname)]",
"def setmodule(self, module, priority='project'):\n\t\tself._assert_mutability()\n\t\tif isinstance(module, six.string_types):\n\t\t\tmodule = import_module(module)\n\t\tfor key in dir(module):\n\t\t\tif key.isupper():\n\t\t\t\tself.set(key, getattr(module, key), priority)",
"def test_handle_collisions_with_base_module_rpms(mock_grft, mock_get_session):\n mmd = load_mmd(read_staged_data(\"formatted_testmodule.yaml\"))\n xmd = mmd.get_xmd()\n xmd[\"mbs\"][\"buildrequires\"][\"platform\"][\"koji_tag\"] = \"module-el-build\"\n xmd[\"mbs\"][\"buildrequires\"][\"python\"] = {\"koji_tag\": \"module-python27\"}\n xmd[\"mbs\"][\"buildrequires\"][\"bash\"] = {\"koji_tag\": \"module-bash\"}\n mmd.set_xmd(xmd)\n\n bm_rpms = {\n \"bash-completion-1:2.7-5.el8.noarch\",\n \"bash-0:4.4.19-7.el8.aarch64\",\n \"python2-tools-0:2.7.16-11.el8.aarch64\",\n \"python2-tools-0:2.7.16-11.el8.x86_64\",\n \"python3-ldap-0:3.1.0-4.el8.aarch64\",\n \"python3-ldap-0:3.1.0-4.el8.x86_64\",\n }\n non_bm_rpms = {\n \"bash-0:4.4.20-1.el8.aarch64\",\n \"python2-tools-0:2.7.18-1.module+el8.1.0+3568+bbd875cb.aarch64\",\n \"python2-tools-0:2.7.18-1.module+el8.1.0+3568+bbd875cb.x86_64\",\n }\n mock_grft.side_effect = [bm_rpms, non_bm_rpms]\n\n default_modules.handle_collisions_with_base_module_rpms(mmd, [\"aarch64\", \"x86_64\"])\n\n mock_get_session.assert_called_once()\n xmd_mbs = mmd.get_xmd()[\"mbs\"]\n assert set(xmd_mbs[\"ursine_rpms\"]) == {\n \"bash-0:4.4.19-7.el8.aarch64\",\n \"python2-tools-0:2.7.16-11.el8.aarch64\",\n \"python2-tools-0:2.7.16-11.el8.x86_64\",\n }\n assert mock_grft.call_count == 2\n # We can't check the calls directly because the second argument is a set converted to a list,\n # so the order can't be determined ahead of time.\n first_call = mock_grft.mock_calls[0][1]\n assert first_call[0] == mock_get_session.return_value\n assert first_call[1] == [\"module-el-build\"]\n assert first_call[2] == [\"aarch64\", \"x86_64\"]\n\n second_call = mock_grft.mock_calls[1][1]\n assert second_call[0] == mock_get_session.return_value\n assert set(second_call[1]) == {\"module-bash\", \"module-python27\"}\n assert second_call[2] == [\"aarch64\", \"x86_64\"]",
"def position_modules_fast(self, data, out=None):\n return self._snapped().position_modules(data, out=out)",
"def __init__(self, verbose):\n self.modules = maus_cpp.globals.get_monte_carlo_mice_modules()\n self.verbose = verbose",
"def precedence(cls, order, module=None):\n if module is None:\n module = sys._getframe(1).f_globals['__name__']\n key = (cls, module)\n cls.precedence_map.setdefault(key, [])\n cls.precedence_map[key].append(order)",
"def __init__(self, pinForward, pinBackward, pinControlStraight,pinLeft, pinRight, pinControlSteering):\n\n self.pinForward = pinForward\n self.pinBackward = pinBackward\n self.pinControlStraight = pinControlStraight\n self.pinLeft = pinLeft\n self.pinRight = pinRight\n self.pinControlSteering = pinControlSteering\n GPIO.setup(self.pinForward, GPIO.OUT)\n GPIO.setup(self.pinBackward, GPIO.OUT)\n GPIO.setup(self.pinControlStraight, GPIO.OUT)\n\n GPIO.setup(self.pinLeft, GPIO.OUT)\n GPIO.setup(self.pinRight, GPIO.OUT)\n GPIO.setup(self.pinControlSteering, GPIO.OUT)\n\n self.pwm_forward = GPIO.PWM(self.pinForward, 100)\n self.pwm_backward = GPIO.PWM(self.pinBackward, 100)\n self.pwm_forward.start(0)\n self.pwm_backward.start(0)\n\n self.pwm_left = GPIO.PWM(self.pinLeft, 100)\n self.pwm_right = GPIO.PWM(self.pinRight, 100)\n self.pwm_left.start(0)\n self.pwm_right.start(0)\n\n GPIO.output(self.pinControlStraight,GPIO.HIGH) \n GPIO.output(self.pinControlSteering,GPIO.HIGH)",
"def Mirrorprocs(p1, p2):\n return False"
]
| [
"0.5711919",
"0.56773573",
"0.5484845",
"0.5373545",
"0.5269169",
"0.5117529",
"0.503132",
"0.5020452",
"0.49931383",
"0.49220088",
"0.49029332",
"0.4861574",
"0.47709423",
"0.4747098",
"0.47383958",
"0.47374594",
"0.47188666",
"0.47152808",
"0.4713286",
"0.4705685",
"0.4705429",
"0.4690904",
"0.46889728",
"0.4660971",
"0.46592972",
"0.46575075",
"0.4644319",
"0.463176",
"0.4623482",
"0.46211258"
]
| 0.58079857 | 0 |
Connects the upper and lower pmos together | def connect_poly(self):
offset = self.lower_pmos_inst.get_pin("G").ll()
# connects the top and bottom pmos' gates together
ylength = self.upper_pmos1_inst.get_pin("G").ll().y - offset.y
self.add_rect(layer="poly",
offset=offset,
width=self.poly_width,
height=ylength)
# connects the two poly for the two upper pmos(s)
offset = offset + vector(0, ylength - self.poly_width)
xlength = self.upper_pmos2_inst.get_pin("G").lx() - self.upper_pmos1_inst.get_pin("G").lx() + self.poly_width
self.add_rect(layer="poly",
offset=offset,
width=xlength,
height=self.poly_width) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def connect_poly(self):\n # connect pmos1 poly\n nmos_gate = (self.nmos_position1 \n + self.nmos.poly_positions[0]\n + vector(0.5 * drc[\"minwidth_poly\"], 0))\n for i in range(len(self.pmos.poly_positions)):\n pmos_gate = (self.pmos_position1 \n + self.pmos.poly_positions[i]\n + vector(0.5 * drc[\"minwidth_poly\"], 0))\n mid1 = [pmos_gate.x, pmos_gate.y - drc[\"poly_to_active\"]]\n self.add_path(\"poly\", [nmos_gate, mid1, pmos_gate])\n\n # connect pmos2 poly\n nmos_gate = vector(self.nmos_position2[0] \n + self.nmos.poly_positions[0].x\n + 0.5 * drc[\"minwidth_poly\"], \n self.nmos_position1.y \n + self.nmos.poly_positions[0].y)\n for i in range(len(self.pmos.poly_positions)):\n pmos_gate = (self.pmos_position2\n + self.pmos.poly_positions[i]\n + vector(0.5 * drc[\"minwidth_poly\"], 0))\n mid1 = vector(pmos_gate.x,\n nmos_gate.y + self.nmos.height \n + drc[\"poly_to_active\"])\n self.add_path(\"poly\", [nmos_gate, mid1, pmos_gate])",
"def joint_pairs(self):\n return ((1, 4), (2, 5), (3, 6), (14, 11), (15, 12), (16, 13))",
"def connect(ends):\n d = np.diff(ends, axis=0)[0]\n j = np.argmax(np.abs(d))\n D = d[j]\n aD = np.abs(D)\n return ends[0] + (np.outer(np.arange(aD + 1), d) + (aD >> 1)) // aD",
"def crossOver(self, _other):\n # find the mid-point\n pivot = len(self.genes) / 2\n # swap the values around the mid-point\n newChrom1 = self.genes[0:pivot] + _other.genes[pivot:len(_other.genes)]\n newChrom2 = _other.genes[0:pivot] + self.genes[pivot:len(self.genes)]\n # return a tuple with the two new chromosomes\n return (newChrom1, newChrom2)",
"def joins((u,v,o)):\r\n return { W : ((u,v), (u-1,v)),\r\n S : ((u,v), (u,v-1)) }[o]",
"def place_ptx(self):\n\n # Compute the other pmos2 location, but determining offset to overlap the\n # source and drain pins\n self.overlap_offset = self.pmos.get_pin(\"D\").ll() - self.pmos.get_pin(\"S\").ll()\n \n # adds the lower pmos to layout\n #base = vector(self.width - 2*self.pmos.width + self.overlap_offset.x, 0)\n self.lower_pmos_position = vector(self.bitcell.get_pin(self.bitcell_bl).lx(),\n self.pmos.active_offset.y)\n self.lower_pmos_inst.place(self.lower_pmos_position)\n\n # adds the upper pmos(s) to layout\n ydiff = self.pmos.height + 2*self.m1_space + contact.poly.width\n self.upper_pmos1_pos = self.lower_pmos_position + vector(0, ydiff)\n self.upper_pmos1_inst.place(self.upper_pmos1_pos)\n\n upper_pmos2_pos = self.upper_pmos1_pos + self.overlap_offset\n self.upper_pmos2_inst.place(upper_pmos2_pos)",
"def center_flows(L_wprime, U_wprime, L_w3, U_w3, L_overlap, U_overlap):\n # examine every possible point\n current_dist_to_edge = -1\n point = (0,0)\n #print(\"w3 range: [{}, {}]\".format(L_w3, U_w3))\n #print(\"w' range: [{}, {}]\".format(L_wprime, U_wprime))\n #print(\"overlap range: [{},{}]\".format(L_overlap, U_overlap))\n for y in range(L_w3, U_w3 + 1):\n #print(\"y={}\".format(y))\n LH_bound = max(L_wprime, L_overlap - y)\n #print(\"LH bound = {}\".format(LH_bound))\n RH_bound = min(U_wprime, U_overlap - y)\n #print(\"RH bound = {}\".format(RH_bound))\n for x in range(LH_bound, RH_bound + 1):\n # w3 UB: 0x + 1y - U_w3 = 0\n # w3 LB: 0x + 1y - L_w3 = 0\n # wprime UB: 1x + 0y - U_wprime\n # wprime LB: 1x + 0y - L_wprime\n # wprime + w3 UB: 1x + 1y - U_wprime,wk\n # wprime + w3 LB: 1x + 1y - L_wprime,wk\n dist_to_edge = min(distance_point_to_line(x, y, 0, -1, U_w3), #0x-1y+U_w3=0\n distance_point_to_line(x, y, 0, -1, L_w3), #0x-1y+L_w3=0\n # -1x + 0y + U_wprime = 0\n distance_point_to_line(x, y, -1, 0, U_wprime),\n # -1x + 0y + L_wprime = 0\n distance_point_to_line(x, y, -1, 0, L_wprime),\n # -1x - 1y + U_overlap = 0\n distance_point_to_line(x, y, -1, -1, U_overlap),\n # -1 x - y + L_overlap = 0\n distance_point_to_line(x, y, -1, -1, L_overlap))\n if dist_to_edge > current_dist_to_edge:\n #print(\"At point ({},{}), distance to edge increased from {} to {}.\"\\\n # .format(x,y,current_dist_to_edge,dist_to_edge))\n current_dist_to_edge = dist_to_edge\n point = (x,y)\n return(point)",
"def set_connection_between_nodes(self):\n\n for i, node in enumerate(self.list_empty_nodes):\n line = node.labyrinth_position[0]\n column = node.labyrinth_position[1]\n\n for j in range(i+1, len(self.list_empty_nodes)):\n line_j = self.list_empty_nodes[j].labyrinth_position[0]\n column_j = self.list_empty_nodes[j].labyrinth_position[1]\n \n if i != j and ((line == line_j and column == column_j - 1) \\\n or (line == line_j and column == column_j + 1) \\\n or (column == column_j and line == line_j - 1) \\\n or (column == column_j and line == line_j + 1)) \\\n and (not node in self.list_empty_nodes[j].connected_to) \\\n and (not self.list_empty_nodes[j] in node.connected_to):\n node.connected_to.append(self.list_empty_nodes[j])\n self.list_empty_nodes[j].connected_to.append(node)",
"def connect_pp(self, p1, p2, color):\n\t\t\n\t\tdelta_p = p1 - p2\n\t\tmax_x, max_y = self.wh - P(1,1)\n\n\t\tif delta_p.y == 0:\n\t\t\tleft_p, right_p = (p1,p2) if p1.x < p2.x else (p2,p1)\n\t\t\tif left_p.x == 0 and right_p.x == max_x:\n\t\t\t\tleft_p = right_p\n\t\t\tself[left_p].paint('r', color)\n\t\telif delta_p.x == 0:\n\t\t\ttop_p, bottom_p = (p1,p2) if p1.y < p2.y else (p2,p1)\n\t\t\tif top_p.y == 0 and bottom_p.y == max_y:\n\t\t\t\ttop_p = bottom_p\n\t\t\tself[top_p].paint('d', color)",
"def _generate_throats(self):\n logger.info(\"Define connections between pores\")\n #Np = self._Np\n pts = self['pore.coords']\n Np = len(pts)\n #Generate 6 dummy domains to pad onto each face of real domain\n #This prevents surface pores from making long range connections to each other\n\n x,y,z = self[\"pore.coords\"].T\n if x.max() > self._Lx:\n Lx = x.max()*1.05\n else:\n Lx = self._Lx\n if y.max() > self._Ly:\n Ly = y.max()*1.05\n else:\n Ly = self._Ly\n if z.max() > self._Lz:\n Lz = z.max()*1.05\n else:\n Lz = self._Lz\n\n #Reflect in X = Lx and 0\n Pxp = pts.copy()\n Pxp[:,0]=(2*Lx-Pxp[:,0])\n Pxm= pts.copy()\n Pxm[:,0] = Pxm[:,0]*(-1)\n #Reflect in Y = Ly and 0\n Pyp = pts.copy()\n Pyp[:,1]=(2*Ly-Pxp[:,1])\n Pym = pts.copy()\n Pym[:,1] = Pxm[:,1]*(-1)\n #Reflect in Z = Lz and 0\n Pzp = pts.copy()\n Pzp[:,2]=(2*Lz-Pxp[:,2])\n Pzm = pts.copy()\n Pzm[:,2] = Pxm[:,2]*(-1)\n #Add dummy domains to real domain\n pts = np.vstack((pts,Pxp,Pxm,Pyp,Pym,Pzp,Pzm)) #Order important for boundary logic\n #Perform tessellation\n logger.debug(\"Beginning tessellation\")\n Tri = sptl.Delaunay(pts)\n logger.debug(\"Converting tessellation to adjacency matrix\")\n adjmat = sprs.lil_matrix((Np,Np),dtype=int)\n for i in sp.arange(0,sp.shape(Tri.simplices)[0]):\n #Keep only simplices that are fully in real domain\n #this used to be vectorize, but it stopped working...change in scipy?\n for j in Tri.simplices[i]:\n if j < Np:\n adjmat[j,Tri.simplices[i][Tri.simplices[i]<Np]] = 1\n #Remove duplicate (lower triangle) and self connections (diagonal)\n #and convert to coo\n adjmat = sprs.triu(adjmat,k=1,format=\"coo\")\n logger.debug(\"Conversion to adjacency matrix complete\")\n self['throat.conns']=sp.vstack((adjmat.row, adjmat.col)).T\n self['pore.all'] = np.ones(len(self['pore.coords']), dtype=bool)\n self['throat.all'] = np.ones(len(self['throat.conns']), dtype=bool)\n\n # Do Voronoi diagram - creating voronoi polyhedra around each pore and save vertex information\n self._vor = Voronoi(pts)\n all_vert_index = sp.ndarray(Np,dtype=object)\n for i,polygon in enumerate(self._vor.point_region[0:Np]):\n if -1 not in self._vor.regions[polygon]:\n all_vert_index[i]=dict(zip(self._vor.regions[polygon],self._vor.vertices[self._vor.regions[polygon]]))\n\n \" Add throat vertices by looking up vor.ridge_dict \"\n throat_verts = sp.ndarray(len(self[\"throat.conns\"]),dtype=object)\n for i,(p1,p2) in enumerate(self[\"throat.conns\"]):\n try:\n throat_verts[i]=dict(zip(self._vor.ridge_dict[(p1,p2)],self._vor.vertices[self._vor.ridge_dict[(p1,p2)]]))\n except KeyError:\n try:\n throat_verts[i]=dict(zip(self._vor.ridge_dict[(p2,p1)],self._vor.vertices[self._vor.ridge_dict[(p2,p1)]]))\n except KeyError:\n print(\"Throat Pair Not Found in Voronoi Ridge Dictionary\")\n\n self['pore.vert_index']=all_vert_index\n self['throat.vert_index']=throat_verts\n logger.debug(sys._getframe().f_code.co_name+\": End of method\")",
"def connect_tx(self, M1_track):\n # the first pmos drain to Vdd\n for i in range(len(self.pmos.active_contact_positions)):\n contact_pos = self.pmos_position1 + self.pmos.active_contact_positions[i]\n if i % 2 == 0:\n correct = self.pmos.active_contact.second_layer_position.scale(1,0) \n drain_posistion = contact_pos + correct \n height = self.vdd_position.y - drain_posistion.y\n self.add_rect(layer=\"metal1\",\n offset=drain_posistion,\n width=drc[\"minwidth_metal1\"],\n height=height)\n else:\n # source to pmos2\n correct = (self.pmos.active_contact.second_layer_position.scale(1,0)\n + vector(self.pmos.active_contact.second_layer_width,\n 0).scale(0.5,0))\n source_position = contact_pos + correct\n mid = [self.pmos_position2.x, M1_track]\n self.add_path(\"metal1\", [source_position, mid])\n\n # the second pmos\n for i in range(len(self.pmos.active_contact_positions)):\n if i % 2 == 0:\n # source to pmos2\n pmos_active =self.pmos_position2+self.pmos.active_contact_positions[i]\n correct= (self.pmos.active_contact.second_layer_position.scale(1,0)\n + vector(0.5 * self.pmos.active_contact.second_layer_width,0))\n source_position = pmos_active + correct\n mid = [self.pmos_position2.x, M1_track]\n self.add_path(\"metal1\", [source_position, mid])\n # two nmos source to gnd\n source_posistion1 = (self.nmos_position1\n + self.nmos.active_contact_positions[0]\n + self.nmos.active_contact.second_layer_position.scale(1,0))\n height = self.gnd_position.y - source_posistion1.y\n self.add_rect(layer=\"metal1\",\n offset=source_posistion1,\n width=drc[\"minwidth_metal1\"],\n height=height)\n\n source_posistion2 = (self.nmos_position2\n + self.nmos.active_contact_positions[1]\n + self.nmos.active_contact.second_layer_position.scale(1,0)) \n height = self.gnd_position.y - source_posistion2.y\n self.add_rect(layer=\"metal1\",\n offset=source_posistion2,\n width=drc[\"minwidth_metal1\"],\n height=height)",
"def bone_pairs(self):\n return ((0, 3), (1, 4), (2, 5), (10, 13), (11, 14), (12, 15))",
"def __create_connections(self):\n \"\"\"\n When adding diagonals, each node adds only diagonals to nodes below it.\n This prevents a case where two nodes add diagonals with each other, s.t. both diagonals are added.\n \"\"\"\n # top left corner:\n self.add_connection(self.get_junc((0, 0)).right, self.get_junc((0, 1)).left)\n self.add_connection(self.get_junc((0, 0)).down, self.get_junc((1, 0)).up)\n # diagonal to down right\n if randint(0, 1) == 0:\n self.add_connection(self.get_junc((0, 0)).right, self.get_junc((1, 1)).up)\n else:\n self.add_connection(self.get_junc((0, 0)).down, self.get_junc((1, 1)).left)\n # top row:\n for wi in range(1, self.width - 1):\n self.add_connection(self.get_junc((0, wi)).right, self.get_junc((0, wi + 1)).left)\n self.add_connection(self.get_junc((0, wi)).left, self.get_junc((0, wi - 1)).right)\n self.add_connection(self.get_junc((0, wi)).down, self.get_junc((1, wi)).up)\n # diagonal to down left\n if randint(0, 1) == 0:\n self.add_connection(self.get_junc((0, wi)).left, self.get_junc((1, wi - 1)).up)\n else:\n self.add_connection(self.get_junc((0, wi)).down, self.get_junc((1, wi - 1)).right)\n # diagonal to down right\n if randint(0, 1) == 0:\n self.add_connection(self.get_junc((0, wi)).right, self.get_junc((1, wi + 1)).up)\n else:\n self.add_connection(self.get_junc((0, wi)).down, self.get_junc((1, wi + 1)).left)\n # top right corner:\n self.add_connection(self.get_junc((0, -1)).left, self.get_junc((0, -2)).right)\n self.add_connection(self.get_junc((0, -1)).down, self.get_junc((1, -1)).up)\n # diagonal to down left\n if randint(0, 1) == 0:\n self.add_connection(self.get_junc((0, -1)).left, self.get_junc((1, -2)).up)\n else:\n self.add_connection(self.get_junc((0, -1)).down, self.get_junc((1, -2)).right)\n # middle rows:\n for hi in range(1, self.height - 1):\n # left node\n self.add_connection(self.get_junc((hi, 0)).right, self.get_junc((hi, 1)).left)\n self.add_connection(self.get_junc((hi, 0)).down, self.get_junc((hi + 1, 0)).up)\n self.add_connection(self.get_junc((hi, 0)).up, self.get_junc((hi - 1, 0)).down)\n # diagonal to down right\n if randint(0, 1) == 0:\n self.add_connection(self.get_junc((hi, 0)).right, self.get_junc((hi + 1, 1)).up)\n else:\n self.add_connection(self.get_junc((hi, 0)).down, self.get_junc((hi + 1, 1)).left)\n # middle nodes\n for wi in range(1, self.width - 1):\n self.add_connection(self.get_junc((hi, wi)).right, self.get_junc((hi, wi + 1)).left)\n self.add_connection(self.get_junc((hi, wi)).left, self.get_junc((hi, wi - 1)).right)\n self.add_connection(self.get_junc((hi, wi)).down, self.get_junc((hi + 1, wi)).up)\n self.add_connection(self.get_junc((hi, wi)).up, self.get_junc((hi - 1, wi)).down)\n # diagonal to down left\n if randint(0, 1) == 0:\n self.add_connection(self.get_junc((hi, wi)).left, self.get_junc((hi + 1, wi - 1)).up)\n else:\n self.add_connection(self.get_junc((hi, wi)).down, self.get_junc((hi + 1, wi - 1)).right)\n # diagonal to down right\n if randint(0, 1) == 0:\n self.add_connection(self.get_junc((hi, wi)).right, self.get_junc((hi + 1, wi + 1)).up)\n else:\n self.add_connection(self.get_junc((hi, wi)).down, self.get_junc((hi + 1, wi + 1)).left)\n # right node:\n self.add_connection(self.get_junc((hi, -1)).left, self.get_junc((hi, -2)).right)\n self.add_connection(self.get_junc((hi, -1)).down, self.get_junc((hi + 1, -1)).up)\n self.add_connection(self.get_junc((hi, -1)).up, self.get_junc((hi - 1, -1)).down)\n # diagonal to down left\n if randint(0, 1) == 0:\n self.add_connection(self.get_junc((hi, -1)).left, self.get_junc((hi + 1, -2)).up)\n else:\n self.add_connection(self.get_junc((hi, -1)).down, self.get_junc((hi + 1, -2)).right)\n # bottom left corner:\n self.add_connection(self.get_junc((-1, 0)).right, self.get_junc((-1, 1)).left)\n self.add_connection(self.get_junc((-1, 0)).up, self.get_junc((-2, 0)).down)\n # bottom row\n for wi in range(1, self.width - 1):\n self.add_connection(self.get_junc((-1, wi)).right, self.get_junc((-1, wi + 1)).left)\n self.add_connection(self.get_junc((-1, wi)).left, self.get_junc((-1, wi - 1)).right)\n self.add_connection(self.get_junc((-1, wi)).up, self.get_junc((-2, wi)).down)\n # bottom right corner:\n self.add_connection(self.get_junc((-1, -1)).left, self.get_junc((-1, -2)).right)\n self.add_connection(self.get_junc((-1, -1)).up, self.get_junc((-2, -1)).down)",
"def connect_pmos(self, pmos_pin, bit_pin):\n\n ll_pos = vector(min(pmos_pin.lx(),bit_pin.lx()), pmos_pin.by())\n ur_pos = vector(max(pmos_pin.rx(),bit_pin.rx()), pmos_pin.uy())\n\n width = ur_pos.x-ll_pos.x\n height = ur_pos.y-ll_pos.y\n self.add_rect(layer=\"metal2\",\n offset=ll_pos,\n width=width,\n height=height)",
"def domainConnect(self,left,right,Nc):\n if Nc == 0:\n return\n else:\n domainLength = right - left\n # Tirage des arets aleatoires\n possible_pairs = np.vstack(np.triu_indices(domainLength,k=2)).T\n Nl = len(possible_pairs) #Nl = int((domainLength-2)*(domainLength-1)/2)\n selected = left + possible_pairs[np.random.choice(Nl,size=Nc,replace=False)].T\n self.connect(selected)\n # Color the domain\n self.colors[left:right] = ['g']*(right-left)",
"def sinks(self):\n if self.rank < self.midpoint:\n partner = self.midpoint + (self.rank - self.left)\n if partner == self.right:\n partner -= 1\n else:\n partner = self.left + (self.rank - self.midpoint)\n if partner == self.midpoint:\n partner -= 1\n\n return {partner}",
"def add_bitline_contacts(self):\n\n stack=(\"metal1\", \"via1\", \"metal2\")\n pos = self.lower_pmos_inst.get_pin(\"S\").center()\n self.add_contact_center(layers=stack,\n offset=pos)\n pos = self.lower_pmos_inst.get_pin(\"D\").center()\n self.add_contact_center(layers=stack,\n offset=pos)\n pos = self.upper_pmos1_inst.get_pin(\"S\").center()\n self.add_contact_center(layers=stack,\n offset=pos)\n pos = self.upper_pmos2_inst.get_pin(\"D\").center()\n self.add_contact_center(layers=stack,\n offset=pos)",
"def connect_lines(horizontal_lines, vertical_lines):\n horizontal = []\n vertical = []\n\n for x1,y1,x2,y2 in horizontal_lines:\n closest_vertical_left = 20000\n closest_vertical_right = 20000\n for v_x1,v_y1,v_x2,v_y2 in vertical_lines:\n if abs(x1 - v_x1) < abs(closest_vertical_left):\n closest_vertical_left = x1 - v_x1\n if abs(x2 - v_x1) < abs(closest_vertical_right):\n closest_vertical_right = x2 - v_x1\n x1 = x1 - closest_vertical_left\n x2 = x2 - closest_vertical_right\n horizontal.append((x1,y1,x2,y2))\n\n for x1,y1,x2,y2 in vertical_lines:\n closest_horizontal_up = 20000\n closest_horizontal_down = 20000\n for h_x1,h_y1,h_x2,h_y2 in horizontal_lines:\n if abs(y1 - h_y1) < abs(closest_horizontal_up):\n closest_horizontal_up = y1 - h_y1\n if abs(y2 - h_y1) < abs(closest_horizontal_down):\n closest_horizontal_down = y2 - h_y1\n y1 = y1 - closest_horizontal_up\n y2 = y2 - closest_horizontal_down\n vertical.append((x1,y1,x2,y2))\n\n return (horizontal, vertical)",
"def add_adj_nodes(self):\n\n for x, row in enumerate(self.grid):\n for y, cell in enumerate(row):\n if x-1 >= 0:\n cell.above = self.grid[x-1][y]\n if y+1 < len(self.grid[0]):\n cell.right = self.grid[x][y+1]\n if x+1 < len(self.grid):\n cell.below = self.grid[x+1][y]\n if y-1 >= 0:\n cell.left = self.grid[x][y-1]",
"def _format_subgraph_angle(m, top):\n (sort_by_n_connections, _) = _get_sorted_by_n_connections(m)\n ends = sorted(\n [sort_by_n_connections[0], sort_by_n_connections[1]],\n key=lambda x: top.get_index(x),\n )\n middle = sort_by_n_connections[2]\n return (\n top.get_index(ends[0]),\n top.get_index(middle),\n top.get_index(ends[1]),\n )",
"def connect_well_contacts(self):\n well_tap_length = self.height - self.nwell_contact_position.y\n xoffset = (self.nwell_contact_position.x \n + self.nwell_contact.second_layer_position.x \n - self.nwell_contact.first_layer_position.x)\n offset = [xoffset, self.nwell_contact_position.y]\n self.add_rect(layer=\"metal1\",\n offset=offset,\n width=drc[\"minwidth_metal1\"],\n height=well_tap_length)\n\n offset = (self.pwell_contact_position.scale(1,0)\n + self.pwell_contact.second_layer_position.scale(1,0)\n - self.pwell_contact.first_layer_position.scale(1,0))\n well_tap_length = self.pwell_contact_position.y\n self.add_rect(layer=\"metal1\",\n offset=offset,\n width=drc[\"minwidth_metal1\"],\n height=well_tap_length)",
"def connect_portals(self):\n portal_coords = [tuple(coord)\n for coord in np.argwhere(self.maze.isalpha())]\n for portal_coord in portal_coords:\n\n portal_x = portal_coord[2]\n portal_y = portal_coord[1]\n portal_z = portal_coord[0]\n\n x_on_left = portal_x <= 3\n x_on_right = portal_x >= self.rim_x\n x_on_outside = x_on_left or x_on_right\n\n y_on_left = portal_y <= 3\n y_on_right = portal_y >= self.rim_y\n y_on_outside = y_on_left or y_on_right\n\n if x_on_outside or y_on_outside:\n portal_type = \"upward\"\n else:\n portal_type = \"downward\"\n\n for other_portal_coord in portal_coords:\n other_x = other_portal_coord[2]\n other_y = other_portal_coord[1]\n if (other_x == portal_x) and (other_y == portal_y):\n continue\n elif self.maze[portal_coord] == self.maze[other_portal_coord]:\n other_z = other_portal_coord[0]\n\n # Look for a the correspondig portal with a z-coord 1 lower\n if portal_type == \"upwards\":\n if portal_z == other_z + 1:\n self.graph.add_edge(\n portal_coord, other_portal_coord)\n\n # Look for a the correspondig portal with a z-coord 1 higher\n elif portal_type == \"downward\":\n if portal_z == other_z - 1:\n self.graph.add_edge(\n portal_coord, other_portal_coord)",
"def joint_pairs(self):\n return [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14], [15, 16], #17 body keypoints\n [20-3, 23-3], [21-3, 24-3], [22-3, 25-3], [26-3, 42-3], [27-3, 41-3], [28-3, 40-3], [29-3, 39-3], [30-3, 38-3], \n [31-3, 37-3], [32-3, 36-3], [33-3, 35-3], [43-3, 52-3], [44-3, 51-3], [45-3, 50-3], [46-3, 49-3], [47-3, 48-3], \n [62-3, 71-3], [63-3, 70-3], [64-3, 69-3], [65-3, 68-3], [66-3, 73-3], [67-3, 72-3], [57-3, 61-3], [58-3, 60-3],\n [74-3, 80-3], [75-3, 79-3], [76-3, 78-3], [87-3, 89-3], [93-3, 91-3], [86-3, 90-3], [85-3, 81-3], [84-3, 82-3],\n [94-3, 115-3], [95-3, 116-3], [96-3, 117-3], [97-3, 118-3], [98-3, 119-3], [99-3, 120-3], [100-3, 121-3],\n [101-3, 122-3], [102-3, 123-3], [103-3, 124-3], [104-3, 125-3], [105-3, 126-3], [106-3, 127-3], [107-3, 128-3],\n [108-3, 129-3], [109-3, 130-3], [110-3, 131-3], [111-3, 132-3], [112-3, 133-3], [113-3, 134-3], [114-3, 135-3]]",
"def extraDomainConnect(self,Nc):\n if Nc == 0:\n return\n else:\n possible_pairs = np.vstack(np.triu_indices(self.numMonomers,k=2)).T.tolist()\n for l,r in self.TADs:\n length = r-l\n for i in np.arange(l,r): \n for j in range(length-i):\n possible_pairs.remove([i,i+2+j])\n \n possible_pairs = np.array(possible_pairs)\n \n selected = possible_pairs[np.random.choice(len(possible_pairs),size=Nc,replace=False)].T\n \n self.connect(selected)",
"def interDomainConnect(self,l1,r1,l2,r2,Nc):\n if Nc == 0:\n return\n else:\n x = np.arange(l1,r1)\n y = np.arange(l2,r2)\n possible_pairs = [[a,b] for a,b in product(x,y)]\n if r1 == l2:\n possible_pairs.pop((r1-l1-1)*(r2-l2))\n Nl = len(possible_pairs)\n selected = np.array(possible_pairs)[np.random.choice(Nl,size=Nc,replace=False)].T\n self.connect(selected)",
"def handle_diagonals_crossing_connections(self, conn: Connection):\n if not self.is_connection_diagonal(conn):\n return False\n j1 = self.get_junc_from_node(conn.me)\n j2 = self.get_junc_from_node(conn.other)\n # check if top-left to bottom-right diagonal of top-right to bottom-left diagonal\n indices_diff = (j1.indices.row - j2.indices.row, j1.indices.col - j2.indices.col)\n if indices_diff[0] == indices_diff[1]:\n # top-left to bottom-right\n top_left = j1 if indices_diff[0] == -1 else j2 # else diff is 1\n top_right = self.get_junc((top_left.indices.row, top_left.indices.col + 1))\n bottom_left = self.get_junc((top_left.indices.row + 1, top_left.indices.col))\n if self.are_juncs_connected(top_right, bottom_left):\n # print(conn, top_right, bottom_left, sep=\"\\n\")\n # we should remove the connection.\n if bottom_left.right.node_id in top_right.down.get_connections_ids():\n top_right.down.remove_connection_by_id(bottom_left.right.node_id)\n if bottom_left.up.node_id in top_right.left.get_connections_ids():\n top_right.left.remove_connection_by_id(bottom_left.up.node_id)\n else:\n # top-right to bottom-left\n top_right = j1 if indices_diff[0] == -1 else j2 # else diff is 1\n top_left = self.get_junc((top_right.indices.row, top_right.indices.col - 1))\n bottom_right = self.get_junc((top_right.indices.row + 1, top_right.indices.col))\n if self.are_juncs_connected(top_left, bottom_right):\n # print(conn, top_left, bottom_right, sep=\"\\n\")\n # we should remove the connection.\n if bottom_right.left.node_id in top_left.down.get_connections_ids():\n top_left.down.remove_connection_by_id(bottom_right.left.node_id)\n if bottom_right.up.node_id in top_left.right.get_connections_ids():\n top_left.right.remove_connection_by_id(bottom_right.up.node_id)",
"def add_cell_and_edges(self,nodes,**kws): \n for a,b in circular_pairs(nodes):\n j=self.nodes_to_edge(a,b)\n if j is None:\n self.add_edge(nodes=[a,b])\n return self.add_cell(nodes=nodes,**kws)",
"def contracting_channel_cross(m, n, W_upstream = 1., W_downstream = 0.75,\n L_1 = 5.0, L_2 = 2.0, L_3 = 10, origin = (0.0, 0.0)):\n\n import math\n\n from anuga.config import epsilon\n\n\n lenx = L_1 + L_2 + L_3\n leny = W_upstream\n deltax = lenx/float(m)\n deltay = leny/float(n)\n\n x1 = 0\n y1 = 0\n x2 = L_1\n y2 = 0\n x3 = L_1 + L_2\n y3 = (W_upstream - W_downstream)/2\n x4 = L_1 + L_2 + L_3\n y4 = y3\n x5 = x4\n y5 = y4 + W_downstream\n x6 = L_1 + L_2\n y6 = y5\n x7 = L_1\n y7 = W_upstream\n x8 = 0\n y8 = W_upstream\n a1 = 0\n a2 = (W_upstream - W_downstream)/(2*L_2)\n a3 = 1\n a4 = (W_downstream - W_upstream)/(L_2*W_upstream)\n\n # Dictionary of vertex objects\n vertices = {}\n points = []\n\n for i in range(m+1):\n x = deltax*i\n for j in range(n+1):\n y = deltay*j\n if x > L_1 and x <= (L_1 + L_2):\n y = a1 + a2*(x - L_1) + a3*y + a4*(x - L_1)*y\n elif x > L_1 + L_2:\n y = (W_upstream - W_downstream)/2 + deltay*j*W_downstream/W_upstream\n\n vertices[i,j] = len(points)\n points.append([x + origin[0], y + origin[1]])\n\n # Construct 4 triangles per element\n elements = []\n boundary = {}\n for i in range(m):\n for j in range(n):\n v1 = vertices[i,j+1]\n v2 = vertices[i,j]\n v3 = vertices[i+1,j+1]\n v4 = vertices[i+1,j]\n x = (points[v1][0]+points[v2][0]+points[v3][0]+points[v4][0])*0.25\n y = (points[v1][1]+points[v2][1]+points[v3][1]+points[v4][1])*0.25\n v5 = len(points)\n points.append([x, y])\n\n #Create left triangle\n if i == 0:\n boundary[(len(elements), 1)] = 'left'\n elements.append([v2,v5,v1])\n\n #Create bottom triangle\n if j == 0:\n boundary[(len(elements), 1)] = 'bottom'\n elements.append([v4,v5,v2])\n\n #Create right triangle\n if i == m-1:\n boundary[(len(elements), 1)] = 'right'\n elements.append([v3,v5,v4])\n\n #Create top triangle\n if j == n-1:\n boundary[(len(elements), 1)] = 'top'\n elements.append([v1,v5,v3])\n\n\n return points, elements, boundary",
"def _connect_neighbours(self):\n for prev in self.unvisited:\n for next in self.unvisited:\n if (next[0] == prev[0] and next[1] == prev[1] + 1) or (next[0] == prev[0] + 1 and next[1] == prev[1]):\n self.graph.addEdge((prev, next))\n self.visited.add(prev)\n self.visited.add(next)\n if self._find_intersection():\n self.intersection.append(prev)\n self.intersection.append(next)",
"def phsp_edge(phsp, rtype1, rtype2):\n mr1_min, mr1_max = phsp.mass_sq_range[rtype1]\n mr1_space = np.linspace(mr1_min, mr1_max, 1000)\n mr2_mins, mr2_maxs = phsp.mr_sq_range(rtype2, mr1_space, rtype1)\n mr1 = np.concatenate([mr1_space, mr1_space[::-1]])\n mr2 = np.concatenate([mr2_mins, mr2_maxs[::-1]])\n return [mr1, mr2]"
]
| [
"0.5922714",
"0.5896698",
"0.5664321",
"0.5499741",
"0.54175603",
"0.53472805",
"0.5343195",
"0.5323919",
"0.5288541",
"0.5283576",
"0.5264222",
"0.5234746",
"0.5233384",
"0.5213443",
"0.5183542",
"0.5177952",
"0.51564336",
"0.515582",
"0.51377267",
"0.5134331",
"0.50970364",
"0.50934374",
"0.5038444",
"0.50179684",
"0.5014671",
"0.4989431",
"0.49785188",
"0.4975769",
"0.49732482",
"0.4967915"
]
| 0.6478284 | 0 |
Adds the en input rail, en contact/vias, and connects to the pmos | def route_en(self):
# adds the en contact to connect the gates to the en rail on metal1
offset = self.lower_pmos_inst.get_pin("G").ul() + vector(0,0.5*self.poly_space)
self.add_contact_center(layers=("poly", "contact", "metal1"),
offset=offset,
rotate=90)
# adds the en rail on metal1
self.add_layout_pin_segment_center(text="en",
layer="metal1",
start=offset.scale(0,1),
end=offset.scale(0,1)+vector(self.width,0)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setup_connections(self):\n\t\t# Lorsque l'on choisi une devise dans la cbb\n\t\tself.cbb_devisesFrom.activated.connect(self.compute)\n\t\tself.cbb_devisesTo.activated.connect(self.compute)\n\t\t# Lorsque l'on change le montant dans la spn\n\t\tself.spn_montant.valueChanged.connect(self.compute)\n\t\tself.spn_montantConverti.valueChanged.connect(self.compute)\n\t\t\n\t\t# Lorsque l'on clique sur le bouton\n\t\tself.btn_inverser.clicked.connect(self.inverser_devise)",
"def _add_route(self, connections):\n route = ArduinoSwitchControlRoute(connections)\n if route.input.label not in self.routes:\n self.routes[route.input.label] = {route.output.label: [route]}\n elif route.output.label not in self.routes[route.input.label]:\n self.routes[route.input.label][route.output.label] = [route]\n else:\n self.routes[route.input.label][route.output.label].append(route)",
"def Connect(self, pre, post, w):\n self.connections.append([pre, post, w])",
"def connect(self):",
"def connect(self):",
"def route_input_A(self):\n xoffset = self.nmos.poly_positions[0].x\n # HACK: added 1.5, since we're going to rewrite this.\n yoffset = self.nmos_position1.y + drc[\"well_enclosure_active\"] + self.nmos.active_height + 1.5*self.poly_contact.height \n self.A_position = vector(xoffset, yoffset)\n # gate input\n offset = self.A_position - vector(0, 0.5 * self.poly_contact.width)\n self.add_contact(layers=(\"poly\", \"contact\", \"metal1\"),\n offset=offset,\n rotate=90)\n\n # connect gate input to tx gate\n offset = self.A_position - vector(self.poly_contact.first_layer_position.y,\n 0.5 * self.poly_contact.width)\n self.add_rect(layer=\"poly\",\n offset=offset,\n width=self.poly_contact.first_layer_position.y + drc[\"minwidth_poly\"],\n height=self.poly_contact.first_layer_width)\n # extend the metal to the boundary of the cell\n input_length = self.A_position.x\n offset = [0, self.A_position.y - 0.5 * drc[\"minwidth_metal1\"]]\n self.add_layout_pin(text=\"A\",\n layer=\"metal1\",\n offset=offset,\n width=input_length,\n height=drc[\"minwidth_metal1\"])",
"def add_pins(self):\n\n for bit in range(self.addr_size):\n self.add_pin(\"addr_{0}\".format(bit),\"INPUT\")\n \n self.add_pin(\"wl_en\", \"INPUT\")\n\n for bit in range(self.num_rows):\n self.add_pin(\"wl_{0}\".format(bit),\"OUTPUT\")\n \n self.add_pin(\"vdd\",\"POWER\")\n self.add_pin(\"gnd\",\"GROUND\")",
"def add_road(ccTremb):\n pass",
"def add_incoming_connection(intersection, road):\n intersection.add_incoming_connection(road)",
"def make_connections(self):\n try:\n self.datatype.currentIndexChanged.connect(self.set_datatype)\n self.dyad.valueChanged.connect(self.set_dyad)#\n self.vid_or_channel.valueChanged.connect(self.set_channel_or_vid)\n except Exception as e:\n QMessageBox.about(self, str(e))",
"def build(self, *args, **kwargs):\n\n # BGP routers\n\n as1ra = self.bgp('as1ra',['2001:1234:1::/64'])\n as2rb = self.bgp('as2rb',['2001:1234:2::/64'])\n as3rc = self.bgp('as3rc',['2001:1234:3::/64'])\n as4rd = self.bgp('as4rd',['2001:1234:4::/64'])\n\n # Set AS-ownerships\n\n self.addOverlay(AS(1, (as1ra,)))\n self.addOverlay(AS(2, (as2rb,)))\n self.addOverlay(AS(3, (as3rc,)))\n self.addOverlay(AS(4, (as4rd,)))\n\n # Inter-AS links\n\n self.addLink(as1ra, as2rb, \n params1={\"ip\": \"2001:12::a/64\"},\n params2={\"ip\": \"2001:12::b/64\"})\n self.addLink(as1ra, as3rc, \n params1={\"ip\": \"2001:13::a/64\"},\n params2={\"ip\": \"2001:13::c/64\"})\n self.addLink(as2rb, as3rc, \n params1={\"ip\": \"2001:23::b/64\"},\n params2={\"ip\": \"2001:23::c/64\"})\n self.addLink(as2rb, as4rd, \n params1={\"ip\": \"2001:24::c/64\"},\n params2={\"ip\": \"2001:24::d/64\"})\n\n # Add eBGP peering\n bgp_peering(self, as1ra, as2rb)\n bgp_peering(self, as1ra, as3rc)\n bgp_peering(self, as2rb, as3rc)\n bgp_peering(self, as2rb, as4rd)\n\n\n # hosts attached to the routers\n\n self.addLink(as1ra, self.addHost('h1'),\n params1={\"ip\": \"2001:1234:1::a/64\"},\n params2={\"ip\": \"2001:1234:1::1/64\"})\n self.addLink(as2rb, self.addHost('h2'),\n params1={\"ip\": \"2001:1234:2::b/64\"},\n params2={\"ip\": \"2001:1234:2::2/64\"})\n self.addLink(as3rc, self.addHost('h3'),\n params1={\"ip\": \"2001:1234:3::c/64\"},\n params2={\"ip\": \"2001:1234:3::1/64\"})\n self.addLink(as4rd, self.addHost('h4'),\n params1={\"ip\": \"2001:1234:4::d/64\"},\n params2={\"ip\": \"2001:1234:4::4/64\"})\n\n super(SimpleBGP, self).build(*args, **kwargs)",
"def setup():\n jails = jails_list()\n\n jail_start_stop('stop', 'all') # stop ALL jail befor other action\n\n# Read jail.conf file \n jcs = open(jailconf, 'r')\n jcs_list = []\n for i in jcs:\n jcs_list.append(i)\n jcs.close()\n\n print \" \" \n set_menu = ['JADM', 'Settings']\n bz = [[\"Bridge interface:\", bridge_int], [\"Main zfs:\", jzfs]]\n print tabulate(bz, set_menu)\n print \" \"\n \n ch_choise = ['bridge', 'zfs', '!'] \n while True:\n choise = raw_input(\"change (bridge|zfs|!):> \")\n \n if choise == 'bridge':\n print \" \"\n \n br_interface = []\n bridges_sys = []\n gw_ipaddr = []\n gw_number = 0\n for i in netifaces.interfaces():\n if \"bridge\" in i:\n bridges_sys.append(i)\n \n br_count = 0\n for x in bridges_sys:\n try:\n bripadd = netifaces.ifaddresses(x)[netifaces.AF_INET]\n except:\n brake\n for i in bripadd:\n br_interface.append([' ', ' ', i['addr'], i['netmask']])\n gw_ipaddr.append(i['addr'])\n br_count = br_count + 1\n br_interface[br_count - 1][1] = str(x)\n br_interface[br_count - 1][0] = str(gw_number)\n gw_number = gw_number + 1\n\n br_menu = [\"Number\", \"Bridge name\", \"Gateway IP Address\", \"Gatewy Network Mask\"]\n print tabulate(br_interface, br_menu)\n print \" \"\n \n while True:\n brid = raw_input(\"bridge number(old: %s):> \" % (bridge_int))\n if brid == \"!\":\n log(\" INFO: Interrupted by user\")\n return False\n \n try:\n int(brid)\n except ValueError:\n msg = \" ERROR: slecet valid Bridge number (%s - %s)!\" % (0, len(bridges_sys) - 1)\n log(msg)\n continue\n \n if int(brid) >= len(bridges_sys):\n msg = \" ERROR: slecet valid Bridge number (%s - %s)!\" % (0, len(bridges_sys) - 1)\n log(msg)\n continue\n \n brid = bridges_sys[int(brid)]\n# check if we use the same brige\n if bridge_int == brid:\n log(\" INFO: bridge interface was not changed\")\n return False\n \n # update $bridge in jail.conf\n for i in jcs_list:\n if \"$bridge\" in i:\n update_jcs = jcs_list.index(i)\n jcs_list[update_jcs] = '$bridge = \"%s\";\\n' % (brid)\n msg = \" WARNING: please modify all jails for new '%s' networks!\" % (brid)\n log(msg)\n break\n\n break\n break\n \n elif choise == 'zfs':\n print \" Check for ZFS zpools ...\"\n os.system(\"zpool list\")\n print \" \"\n os.system(\"zfs list\")\n log(\" WARNING: JADM will rename all existing jails zfs :WARNING\")\n print \" \"\n\n while True:\n chjzfs = raw_input(\"zpool/tank:> \")\n if chjzfs == \"!\":\n log(\" INFO: Interrupted by user\")\n return False\n if chjzfs == jzfs:\n msg = \" ERROR: '%s' is current zfs please choose different!\" % (chjzfs)\n log(msg)\n continue\n \n zfs = subprocess.check_output(\"zfs list -H -o name\", shell=True)\n zfs = zfs.split('\\n')\n if chjzfs in zfs:\n msg = \" INFO: We will use existing zpool/tank: %s\" % (chjzfs)\n log(msg)\n print \" WARNING: '%s' will be destroyed!\" % (chjzfs)\n yn = raw_input('use it anyway (yes):> ')\n if yn == \"!\":\n log(\" INFO: Interrupted by user\")\n return False\n# destroy existing ZFS\n if yn != \"yes\":\n log(\" INFO: Interrupted by user\")\n return False\n else:\n if os.WEXITSTATUS(os.system(\"zfs destroy -r %s\" % (chjzfs))) !=0:\n msg = \" ERROR:'%s' cannot be destroyed!\" % (chjzfs)\n log(msg)\n else:\n msg = \" WARNING:'%s' was destroyed!\" % (chjzfs)\n log(msg)\n ''''\n chjpath = subprocess.check_output('zfs list -H -o mountpoint %s' % chjzfs, shell = True)\n chjpath = chjpath.strip('\\n')\n# check if exsiting zfs tank have mount point\n if chjpath == 'none':\n print \" \"\n print \" WARNING: '%s' have '%s' for mount point\" % (chjzfs, chjpath)\n print \" WARNING: Please create mount point for '%s' or select different zroot/tank\" % chjzfs\n continue\n break\n '''\n if os.WEXITSTATUS(os.system(\"zfs create %s\" % (chjzfs))) != 0:\n print \" \"\n print \" ERROR: Please enter correct zfs!\"\n continue\n else:\n while True:\n chjpath = raw_input(\"%s mount point:> \" % (chjzfs))\n if chjpath == \"!\":\n log(\" INFO: Interrupted by user\")\n return False\n if chjpath == jpath:\n msg = \" ERROR: '%s' is current mount point please choose different!\" % (chjpath)\n log(msg)\n continue \n \n# check if $japth content '/' if not add it\n if chjpath[0] != '/':\n chjpath = \"/%s\" % chjpath\n if chjpath[-1] != '/':\n chjpath = \"%s/\" % chjpath\n \n# check if mount point exitst\n zfsmount = os.path.isdir(chjpath)\n if zfsmount == True:\n print \" \"\n print \" ERROR: %s mount point exist!\" % chjpath\n yn = raw_input('use it anyway (yes):> ')\n if 'yes' in yn:\n os.system('zfs set mountpoint=%s %s' % (chjpath, chjzfs))\n break\n else:\n continue\n else:\n os.system('zfs set mountpoint=%s %s' % (chjpath, chjzfs))\n break\n break\n\n# create BASE-RW\n if 'BASE' in jails[1]:\n if os.WEXITSTATUS(os.system(\"zfs create %s\" % (chjzfs+\"/BASE-RW\"))) != 0:\n msg = \" ERROR: '%s' cannot be created!\" % (chjzfs+\"/BASE-RW\")\n log(msg)\n return False\n else:\n if os.WEXITSTATUS(os.system('zfs set mountpoint=%s %s' % (chjpath + \"BASE-RW\", chjzfs+\"/BASE-RW\"))) != 0:\n msg = \" ERROR: '%s' cannot be created!\" % (chjpath + \"BASE-RW\")\n log(msg)\n return False\n else:\n msg = \" INFO: '%s' was created!\" % (chjzfs+\"/BASE-RW\")\n log(msg)\n \n# try to rename all jails\n for i in jails[1]:\n \n orgJZFS = jzfs+\"/\"+i\n orgJPATH = jpath + i\n \n newJZFS = chjzfs+\"/\"+i\n newJPATH = chjpath + i\n# zfs fix BASE-\n if 'BASE-' in i:\n\n orgJZFS = jzfs+\"/BASE-RW/\"+i\n orgJPATH = jpath + \"BASE-RW/\" +i\n \n newJZFS = chjzfs+\"/BASE-RW/\"+i\n newBJPATH = newJPATH\n newJPATH = chjpath + \"BASE-RW/\" + i\n \n# rename jaisl zfs\n if os.WEXITSTATUS(os.system(\"zfs rename %s %s\" % (orgJZFS, newJZFS))) != 0:\n msg = \" ERROR: '%s' cannot be renamed to '%s' - skipped!\" % (orgJZFS, newJZFS)\n log(msg)\n msg = \" WARNING: Please move manualy '%s' to '%s' before destroy '%s'\" % (orgJZFS, newJZFS, jzfs)\n log(msg)\n else:\n# zfs fix BASE-SKE:ETON\n if i =='BASE':\n if os.WEXITSTATUS(os.system(\"zfs rename %s %s\" % ( jzfs+\"/\"+ i +'-SKELETON', chjzfs+\"/\"+i +'-SKELETON'))) != 0:\n msg = \" ERROR: '%s' cannot be renamed to '%s' - skipped!\" % ( jzfs+\"/\"+ i +'-SKELETON', chjzfs+\"/\"+i +'-SKELETON')\n log(msg)\n msg = \" WARNING: Please move manualy '%s' to '%s' before destroy '%s'\" % ( jzfs+\"/\"+ i +'-SKELETON', chjzfs+\"/\"+i +'-SKELETON', jzfs)\n log(msg)\n else:\n msg = \" INFO: '%s' was rename to '%s'\" % ( jzfs+\"/\"+ i +'-SKELETON', chjzfs+\"/\"+i +'-SKELETON')\n log(msg)\n \n print \" INFO: '%s' was rename to '%s'\" % (orgJZFS, newJZFS)\n# rename jails mountpoint\n if os.WEXITSTATUS(os.system('zfs set mountpoint=%s %s' % (newJPATH, newJZFS))) != 0:\n msg = \" ERROR: '%s' cannot be renamed to '%s' - skipped!\" % (orgJPATH, newJPATH)\n log(msg)\n msg = \" WARNING: Please move manualy '%s' to '%s' before destroy '%s'\" % (orgJPATH, newJPATH, jpath)\n log(msg)\n else:\n# mount point fix BASE-SKELETON\n if i =='BASE':\n if os.WEXITSTATUS(os.system('zfs set mountpoint=%s %s' % (chjpath + i +'-SKELETON', chjzfs+\"/\"+i +'-SKELETON'))) != 0:\n msg = \" ERROR: '%s' cannot be renamed to '%s' - skipped!\" % (jpath + i +'-SKELETON', chjpath + i +'-SKELETON')\n log(msg)\n msg = \" WARNING: Please move manualy '%s' to '%s' before destroy '%s'\" % (jpath + i +'-SKELETON', chjpath + i +'-SKELETON', jzfs)\n log(msg)\n else:\n msg = \" INFO: '%s' was rename to '%s'\" % (jpath + i +'-SKELETON', chjpath + i +'-SKELETON')\n log(msg)\n# create mount folder for BASE- jail\n if 'BASE-' in i:\n os.system('mkdir -p %s/%s' % (newBJPATH, i))\n msg = (\" INFO: '%s/%s' was created\" % (newBJPATH, i))\n log(msg)\n \n# update BASE- jail mount.fstab and /etc/fstab\n fj = find_jail(i)\n jb = fj[0]\n je = fj[1]\n jcl = fj[2]\n\n dt = str(datetime.now()) \n jcs_list[jb+2] = '# modified on %s by ... JADM ...\\n' % (dt)\n\n# check if jail mark as BASE skeleton model and fix mount 'exec.prestart +=' local options\n os.system('echo \\\"%sBASE %s nullfs ro 0 0\\\" > %s/etc/fstab' % (chjpath, newBJPATH, newJPATH))\n os.system('echo \\\"%s %s%s/SROOT nullfs rw 0 0\\\" >> %s/etc/fstab' % (newJPATH, chjpath, i, newJPATH))\n# check if is vnet\n if 'vnet;' in jcs_list[jb+8]:\n jcs_list[jb+23] = 'mount.fstab=\"%s/etc/fstab\";\\n' % (newJPATH)\n else:\n jcs_list[jb+12] = 'mount.fstab=\"%s/etc/fstab\";\\n' % (newJPATH)\n \n msg = \" INFO: '%s' was rename to '%s'\" % (orgJPATH, newJPATH)\n log(msg)\n\n jzfsyes = \"\"\n jzfsyes = raw_input(\"destroy old zfs '%s' (yes only):> \" % (jzfs))\n if jzfsyes == \"yes\":\n if os.WEXITSTATUS(os.system(\"zfs destroy -r %s\" % (jzfs))) !=0:\n msg = \" ERROR:'%s' cannot be destroyed!\" % (jzfs)\n log(msg)\n else:\n os.system('chflags -R 0 %s' % jpath)\n os.system('rm -rf %s' % jpath)\n msg = \" WARNING:'%s' was destroyed!\" % (jzfs)\n log(msg)\n elif jzfsyes != \"yes\":\n msg = \" INFO: '%s' was keeped!\" % (jzfs)\n log(msg)\n\n# update $jedir in jail.conf\n for i in jcs_list:\n if \"$jzfs\" in i:\n update_jcs = jcs_list.index(i)\n jcs_list[update_jcs] = '$jzfs = \"%s\";\\n' % (chjzfs)\n break\n \n for i in jcs_list:\n if \"$jedir\" in i:\n update_jcs = jcs_list.index(i)\n jcs_list[update_jcs] = '$jedir = \"%s\";\\n' % (chjpath)\n break\n\n break\n \n elif choise == '!':\n log(\" INFO: Interrupted by user\")\n return False\n else:\n log(\" INFO: To change setting type 'bridge', 'zfs' or '!' for exit\")\n \n# check if jail.conf exist\n check_jailconf = os.path.isfile(jailconf)\n if check_jailconf == True:\n dt = datetime.now().strftime(\"%d_%m_%y_%I%M%S\")\n os.system(\"cp %s %s\" % (jailconf, jailconf+\".\"+dt))\n msg = \" INFO: make a backup: %s\" % (jailconf+\".\"+dt)\n log(msg)\n\n# write jail.conf file\n jcs = open(jailconf, 'w+')\n for i in jcs_list:\n jcs.write(i)\n jcs.close()\n\n def_vars() \n print \" \"\n set_menu = ['JADM', 'Settings']\n bz = [[\"Bridge interface:\", bridge_int], [\"Main zfs:\", jzfs]]\n print tabulate(bz, set_menu)\n \n# print and add to log file \n log(\" WARNING: Jadm SETUP was modified\")",
"def connect(self) -> None:",
"def connect(self, bus_controller, gui, message_sender, data_base):\n\n self.bus_controller = bus_controller\n self.__gui = gui\n self.__message_sender = message_sender\n self.data_base = data_base",
"def route_gnd(self):\n \n gnd_start = self.rbl_inv_inst.get_pin(\"gnd\").bc()\n gnd_end = vector(gnd_start.x, self.rbl_inst.uy()+2*self.m2_pitch)\n \n # Add a rail in M1 from bottom of delay chain to two above the RBL\n # This prevents DRC errors with vias for the WL\n dc_top = self.dc_inst.ur()\n self.add_segment_center(layer=\"metal1\",\n start=vector(gnd_start.x, dc_top.y),\n end=gnd_end)\n\n # Add a rail in M2 from RBL inverter to two above the RBL\n self.add_segment_center(layer=\"metal2\",\n start=gnd_start,\n end=gnd_end)\n \n # Add pin from bottom to RBL inverter\n self.add_layout_pin_center_segment(text=\"gnd\",\n layer=\"metal1\",\n start=gnd_start.scale(1,0),\n end=gnd_start)\n \n # Connect the WL pins directly to gnd\n gnd_pin = self.get_pin(\"gnd\").rc()\n for row in range(self.bitcell_loads):\n wl = \"wl[{}]\".format(row)\n pin = self.rbl_inst.get_pin(wl)\n start = vector(gnd_pin.x,pin.cy())\n self.add_segment_center(layer=\"metal1\",\n start=start,\n end=pin.lc())\n self.add_via_center(layers=(\"metal1\", \"via1\", \"metal2\"),\n offset=start)\n\n # Add via for the delay chain\n offset = self.dc_inst.get_pins(\"gnd\")[0].bc() + vector(0.5*contact.m1m2.width,0.5*contact.m1m2.height)\n self.add_via_center(layers=(\"metal1\", \"via1\", \"metal2\"),\n offset=offset)\n\n # Add via for the inverter\n offset = self.rbl_inv_inst.get_pin(\"gnd\").bc() - vector(0,0.5*contact.m1m2.height)\n self.add_via_center(layers=(\"metal1\", \"via1\", \"metal2\"),\n offset=offset)\n\n # Connect the bitcell gnd pins to the rail\n gnd_pins = self.get_pins(\"gnd\")\n gnd_start = gnd_pins[0].ul()\n rbl_gnd_pins = self.rbl_inst.get_pins(\"gnd\")\n # Add L shapes to each vertical gnd rail\n for pin in rbl_gnd_pins:\n if pin.layer != \"metal2\":\n continue\n gnd_end = pin.uc()\n gnd_mid = vector(gnd_end.x, gnd_start.y)\n self.add_wire((\"metal1\",\"via1\",\"metal2\"), [gnd_start, gnd_mid, gnd_end])\n gnd_start = gnd_mid\n \n\n # Add a second gnd pin to the second delay chain rail. No need for full length.\n dc_gnd_offset = self.dc_inst.get_pins(\"gnd\")[1].ll()\n self.add_layout_pin(text=\"gnd\",\n layer=\"metal1\",\n offset=dc_gnd_offset.scale(1,0),\n width=self.m1_width,\n height=self.delay_chain_offset.y)",
"def addNeighbor(self, neighbor):",
"def connect_nodes(self):\n node1 = str(self.form.node1_text.toPlainText())\n node2 = str(self.form.node2_text.toPlainText())\n weight = str(self.form.weight_text.toPlainText())\n self.form.node1_text.clear()\n self.form.node2_text.clear()\n self.form.weight_text.clear()\n\n if not node1 or not node2 or not weight: \n self.show_dialog(\"Empty argument.\")\n return\n \n try:\n weight = int(weight)\n except:\n self.show_dialog(\"Weight should be an integer.\")\n return\n\n if self.G.has_edge(node1, node2):\n self.show_dialog(f\"Edge: {node1, node2} is already constructed.\")\n\n else:\n self.G.add_edge(node1, node2, weight=weight)\n self.form.plot_canvas.plot(self.G)",
"def connect_tx(self, M1_track):\n # the first pmos drain to Vdd\n for i in range(len(self.pmos.active_contact_positions)):\n contact_pos = self.pmos_position1 + self.pmos.active_contact_positions[i]\n if i % 2 == 0:\n correct = self.pmos.active_contact.second_layer_position.scale(1,0) \n drain_posistion = contact_pos + correct \n height = self.vdd_position.y - drain_posistion.y\n self.add_rect(layer=\"metal1\",\n offset=drain_posistion,\n width=drc[\"minwidth_metal1\"],\n height=height)\n else:\n # source to pmos2\n correct = (self.pmos.active_contact.second_layer_position.scale(1,0)\n + vector(self.pmos.active_contact.second_layer_width,\n 0).scale(0.5,0))\n source_position = contact_pos + correct\n mid = [self.pmos_position2.x, M1_track]\n self.add_path(\"metal1\", [source_position, mid])\n\n # the second pmos\n for i in range(len(self.pmos.active_contact_positions)):\n if i % 2 == 0:\n # source to pmos2\n pmos_active =self.pmos_position2+self.pmos.active_contact_positions[i]\n correct= (self.pmos.active_contact.second_layer_position.scale(1,0)\n + vector(0.5 * self.pmos.active_contact.second_layer_width,0))\n source_position = pmos_active + correct\n mid = [self.pmos_position2.x, M1_track]\n self.add_path(\"metal1\", [source_position, mid])\n # two nmos source to gnd\n source_posistion1 = (self.nmos_position1\n + self.nmos.active_contact_positions[0]\n + self.nmos.active_contact.second_layer_position.scale(1,0))\n height = self.gnd_position.y - source_posistion1.y\n self.add_rect(layer=\"metal1\",\n offset=source_posistion1,\n width=drc[\"minwidth_metal1\"],\n height=height)\n\n source_posistion2 = (self.nmos_position2\n + self.nmos.active_contact_positions[1]\n + self.nmos.active_contact.second_layer_position.scale(1,0)) \n height = self.gnd_position.y - source_posistion2.y\n self.add_rect(layer=\"metal1\",\n offset=source_posistion2,\n width=drc[\"minwidth_metal1\"],\n height=height)",
"def connect(self, dev_eui, app_eui, app_key):\n \n dev_eui = unhexlify(dev_eui)\n app_eui = unhexlify(app_eui)\n app_key = unhexlify(app_key)\n \n # Disable blue blinking and turn LED off\n LED.heartbeat(False)\n LED.off()\n\n # Initialize LoRa in LORAWAN mode\n self.lora = LoRa(mode = LoRa.LORAWAN)\n\n # Join a network using OTAA (Over the Air Activation)\n self.lora.join(activation = LoRa.OTAA, auth = (dev_eui, app_eui, app_key), timeout = 0)\n\n # Wait until the module has joined the network\n count = 0\n while not self.lora.has_joined():\n LED.blink(1, 2.5, 0xff0000)\n # print(\"Trying to join: \" , count)\n count = count + 1\n\n # Create a LoRa socket\n LED.blink(2, 0.1)\n self.s = socket.socket(socket.AF_LORA, socket.SOCK_RAW)\n\n # Set the LoRaWAN data rate\n self.s.setsockopt(socket.SOL_LORA, socket.SO_DR, 5)\n\n # Make the socket non-blocking\n self.s.setblocking(False)\n\n # print (\"Joined! \", count)\n # print(\"Create LoRaWAN socket\")\n\n # Create a raw LoRa socket\n self.s = socket.socket(socket.AF_LORA, socket.SOCK_RAW)\n self.s.setblocking(False)",
"def add_neighbor(self):\n self.fono += 1",
"def connectBus(self, bus):\n self.mem_side = bus.cpu_side_ports",
"def connect():",
"def msg_engine_connect(self,msg):\r\n log.debug('Adding edit() command to new engine')\r\n engname = msg.get_data()[0]\r\n\r\n #get the new engine interface\r\n app = wx.GetApp()\r\n console = app.toolmgr.get_tool('Console')\r\n eng = console.get_engine_console(engname)\r\n\r\n #When an engine is started add the edit() command\r\n eng.add_builtin(edit, 'edit')\r\n\r\n #add any set breakpoints to this engine's debugger\r\n for bpdata in self.bpoints:\r\n eng.debugger.set_breakpoint(bpdata)",
"def connect(self):\n try:\n # Port and packet handler set up\n self.port_handler = port_h.PortHandler(self.port_name)\n self.packet_handler = packet_h.PacketHandler(self.protocol_version)\n\n # Set up port and baud rate\n self.port_handler.openPort()\n self.port_handler.setBaudRate(self.baud_rate)\n self.__find_motors()\n except rospy.ROSInterruptException: pass\n\n self.running = True",
"def _connect_to_hardware(self):\n if False: # !!!TEMP:need to validate config...\n if len(self.config['ports']) > 1:\n self.log.fatal(\"only one slave com port is supported\")\n if len(self.config['ports']) == 0:\n self.log.warning(\"no communication port setted!\")\n return\n port = self.config['ports'][0]\n self.communicator = RaspSerialCommunicator(\n platform=self, port=port,\n baud=self.config['baud'])\n self.communicator = RaspSerialCommunicator(\n platform=self, port='/dev/ttyAMA0',\n baud=115200)",
"def port_setup(robot_name, num_cameras):\n\tglobal local_in_port\n\tglobal local_out_port\n\tglobal local_GPS_port\n\tglobal local_Dest_port\n\n\tglobal local_in_port_name\n\tglobal local_out_port_name\n\tglobal local_GPS_port_name\n\tglobal local_Dest_port_name\n\n\tglobal local_Radio_in_port\n\tglobal local_Radio_out_port\n\n\tglobal ors_in_port_name\n\tglobal ors_out_port_name\n\tglobal ors_GPS_port_name\n\tglobal ors_Dest_port_name\n\tglobal ors_Radio_in_port_name\n\tglobal ors_Radio_out_port_name\n\n\t# Define the names for all the ports\n\tport_prefix = \"/ors/robots/\" + robot_name + \"/\"\n\tlocal_port_prefix = \"/atrv_client/\" + robot_name + \"/\"\n\tview_prefix = \"/img/\" + robot_name + \"/\"\n\n\tors_in_port_name = port_prefix + \"in\"\n\tors_out_port_name = port_prefix + \"out\"\n\n\tors_Dest_port_name = port_prefix + \"Motion_Controller/in\"\n\tors_GPS_port_name = port_prefix + \"GPS/out\"\n\n\tors_Radio_out_port_name = port_prefix + \"Radio/out\"\n\tors_Radio_in_port_name = port_prefix + \"Radio/in\"\n\n\tlocal_in_port_name = local_port_prefix + \"in/\"\n\tlocal_out_port_name = local_port_prefix + \"out/\"\n\n\tlocal_GPS_port_name = local_port_prefix + \"GPS/in/\"\n\tlocal_Dest_port_name = local_port_prefix + \"Motion_Controller/out/\"\n\n\tlocal_Radio_in_port_name = local_port_prefix + \"Radio/in\"\n\tlocal_Radio_out_port_name = local_port_prefix + \"Radio/out\"\n\n\t# Start the yarp network connection\n\tyarp.Network.init()\n\n\t# Open the client ports\n\tlocal_in_port = yarp.BufferedPortBottle()\n\tlocal_in_port.open(local_in_port_name)\n\tlocal_out_port = yarp.BufferedPortBottle()\n\tlocal_out_port.open(local_out_port_name)\n\n\tlocal_GPS_port = yarp.BufferedPortBottle()\n\tlocal_GPS_port.open(local_GPS_port_name)\n\tlocal_Dest_port = yarp.BufferedPortBottle()\n\tlocal_Dest_port.open(local_Dest_port_name)\n\n\tlocal_Radio_out_port = yarp.BufferedPortBottle()\n\tlocal_Radio_out_port.open(local_Radio_out_port_name)\n\tlocal_Radio_in_port = yarp.BufferedPortBottle()\n\tlocal_Radio_in_port.open(local_Radio_in_port_name)\n\n\t# Connect the client ports to the simulator ports\n\tyarp.Network.connect (local_out_port_name, ors_in_port_name)\n\tyarp.Network.connect (ors_out_port_name, local_in_port_name)\n\n\tyarp.Network.connect (ors_GPS_port_name, local_GPS_port_name)\n\tyarp.Network.connect (local_Dest_port_name, ors_Dest_port_name)\n\n\tyarp.Network.connect (local_Radio_out_port_name, ors_Radio_in_port_name)\n\tyarp.Network.connect (ors_Radio_out_port_name, local_Radio_in_port_name)\n\n\n\t# Connect the cameras to yarpview windows\n\tprint (\" * Initializing yarpview windows.\")\n\tfor id in range(int(num_cameras)):\n\t\t# Build the name of the camera\n\t\tcamera_name = \"Camera{0}\".format(id+1)\n\n\t\t# Prepare the ports to be used\n\t\timg_view_port = view_prefix + camera_name\n\t\tatrv_camera_port = port_prefix + camera_name\n\n\t\tyarp.Network.connect (atrv_camera_port, img_view_port)",
"def connect(self):\n\t\tpass",
"def start(self):\n\n rospy.loginfo(self.name + \": Node started\")\n rospy.set_param(\"path_logger_active\", False)\n\n rospy.sleep(1)\n\n self.read_waypoints_pickle()\n rospy.loginfo(self.name + \": Global waypoints read from file\")\n\n while True:\n if self.uav_pose is None:\n rospy.loginfo(self.name + \": Waiting for UAV Pose\")\n self._rate_reached_waypoint.sleep()\n else:\n uav_pose_start = copy.copy(self.uav_pose) # copy is needed here, because uav_pose is mutable!\n rospy.loginfo(self.name + \": UAV Pose received\")\n break\n\n # Set mode to Offboard, Arm the UAV and takeoff to set altitude\n self._takeoff_procedure(uav_pose_start)\n rospy.sleep(1) # To prevent that takeoff goes directly into path following\n rospy.loginfo(self.name + ': Takeoff procedure finished')\n\n # Start publishing global waypoints\n uav_pose_after_takeoff = copy.copy(self.uav_pose)\n wp_global_previous_temp = Waypoint()\n wp_global_previous_temp.x_lat = uav_pose_after_takeoff.pose.position.x\n wp_global_previous_temp.y_long = uav_pose_after_takeoff.pose.position.y\n wp_global_previous_temp.z_alt = uav_pose_after_takeoff.pose.position.z\n wp_global_previous_temp = copy.copy(wp_global_previous_temp)\n self.waypoint_global_next = self.waypoint_global_all.waypoints[0]\n self.waypoint_global_previous = wp_global_previous_temp\n self._thread_waypoint_global.start()\n\n # Activate path logging node. Maybe not best coding practice to do this with a parameter and not a publish/\n # subscriber or service but the path logger was only needed to record test results\n rospy.set_param(\"path_logger_active\", True)\n\n # Starts forwarding the setpoints from the local planner\n self._thread_forward_local_setpoints.start()\n\n # Stops sending the takeoff waypoint. Between this and\n # sending the next waypoint from the local planner can be a maximum of .5 seconds, since waypoints have\n # to be published with >2Hz (PX4/MAVROS restriction)\n self._thread_takeoff_setpoint.do_run = False\n\n # Iterates over all global waypoints\n for wp_global_current in self.waypoint_global_all.waypoints:\n self.waypoint_global_next = wp_global_current\n self.waypoint_global_previous = wp_global_previous_temp\n rospy.loginfo(self.name + ': Published new global waypoint')\n\n while not self._is_at_position(self.uav_pose, wp_global_current, atol=self.tol_wp_reached) \\\n and not rospy.is_shutdown():\n self._rate_reached_waypoint.sleep()\n\n rospy.loginfo(self.name + ': Reached previous global waypoint')\n wp_global_previous_temp = copy.copy(wp_global_current)\n\n self.finished = True\n rospy.set_param(\"path_logger_active\", False)\n self._thread_forward_local_setpoints.do_run = False # Stops forwarding the setpoints from the local planner\n rospy.loginfo(self.name + ': Reached final global waypoint')\n rospy.sleep(10)\n return",
"def _add_lamp_outlets(self):\r\n lst = self.model.get_all_lamp_outlets()\r\n\r\n for itm in lst:\r\n self._add_lamp_outlet(itm)",
"def add(self, inp, pos):\n self.pos = pos\n self.para = list()\n # Call backend for dependency parsing.\n cabo = CabochaClient()\n cabo.add(self.proc.query(inp), self.pos)\n pool = [cabo.root]\n plist = [cabo.root]\n self.vlist = dict()\n # Use BFS to get a list of nodes.\n while pool:\n pid = pool.pop(0)\n for cid in cabo.childrenList[pid]:\n pool.append(cid)\n plist.insert(0, cid)\n # Add nodes using plist(from leaves to roots).\n for i in range(len(plist)):\n pid = plist[i]\n self._addChildren(pid, cabo.chunks)\n self._processPara()\n\n # Return here if self.autosub is False.\n if not self.autosub:\n return\n # If root has no subject, add omitted subject node.\n if self.G.nodes[cabo.chunks[cabo.root].main]['sub'] == '':\n omitted = CaboChunk(-1, cabo.root)\n omitted.main = \"省略される主体[{0}@{1}]\".format(self.pos, 0)\n omitted.func = \"(省略)\"\n omitted.type = 0\n omitted.pro = 7\n omitted.surface = \"省略される主体\"\n omitted.yomi = \"ショウリャクサレルシュゴ\"\n self._addNode(omitted)\n self._addEdge(omitted.main, cabo.chunks[cabo.root].main, label=\"(省略)主体\", etype=\"sub\")\n self.G.nodes[cabo.chunks[cabo.root].main]['sub'] = omitted.main\n # Add autosub\n for i in range(len(plist)):\n pid = plist[i]\n if cabo.chunks[pid].type in [1, 2] and self.G.nodes[cabo.chunks[pid].main]['sub']== \"\":\n self._addEdge(self.G.nodes[cabo.chunks[cabo.root].main]['sub'], cabo.chunks[pid].main, label=\"主体候補\", etype=\"autosub\")\n self.G.nodes[cabo.chunks[pid].main]['sub'] = self.G.nodes[cabo.chunks[cabo.root].main]['sub']"
]
| [
"0.56937957",
"0.5588377",
"0.5364039",
"0.53394985",
"0.53394985",
"0.5271092",
"0.5241046",
"0.5142711",
"0.5137675",
"0.512801",
"0.5114251",
"0.5101308",
"0.5082479",
"0.50810903",
"0.50637263",
"0.5054848",
"0.50368226",
"0.50169",
"0.50132143",
"0.5012345",
"0.49996084",
"0.49974918",
"0.49787015",
"0.4972965",
"0.49651182",
"0.4954637",
"0.49516633",
"0.49396792",
"0.4933926",
"0.4918604"
]
| 0.5722017 | 0 |
Adds both bitline and bitlinebar to the module | def route_bitlines(self):
# adds the BL on metal 2
offset = vector(self.bitcell.get_pin(self.bitcell_bl).cx(),0) - vector(0.5 * self.m2_width,0)
self.add_layout_pin(text="bl",
layer="metal2",
offset=offset,
width=drc['minwidth_metal2'],
height=self.height)
# adds the BR on metal 2
offset = vector(self.bitcell.get_pin(self.bitcell_br).cx(),0) - vector(0.5 * self.m2_width,0)
self.add_layout_pin(text="br",
layer="metal2",
offset=offset,
width=drc['minwidth_metal2'],
height=self.height) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def addToolBarButtons(self):",
"def addBarrelBlue(self, event):\n # let user draw second ROI\n ROI = RoiPoly(color='b') #let user draw ROI\n plt.show(block=False)\n mask = ROI.get_mask(self.greyimg)\n self.ROI += mask",
"def add_modules(self):\n # This is the threshold detect inverter on the output of the RBL\n self.rbl_inv_inst=self.add_inst(name=\"rbl_inv\",\n mod=self.inv,\n offset=self.rbl_inv_offset+vector(0,self.inv.width),\n rotate=270,\n mirror=\"MX\")\n self.connect_inst([\"bl[0]\", \"out\", \"vdd\", \"gnd\"])\n\n self.tx_inst=self.add_inst(name=\"rbl_access_tx\",\n mod=self.access_tx,\n offset=self.access_tx_offset,\n rotate=90)\n # D, G, S, B\n self.connect_inst([\"vdd\", \"delayed_en\", \"bl[0]\", \"vdd\"])\n # add the well and poly contact\n\n self.dc_inst=self.add_inst(name=\"delay_chain\",\n mod=self.delay_chain,\n offset=self.delay_chain_offset,\n rotate=90)\n self.connect_inst([\"en\", \"delayed_en\", \"vdd\", \"gnd\"])\n\n self.rbc_inst=self.add_inst(name=\"bitcell\",\n mod=self.replica_bitcell,\n offset=self.bitcell_offset,\n mirror=\"MX\")\n self.connect_inst([\"bl[0]\", \"br[0]\", \"delayed_en\", \"vdd\", \"gnd\"])\n\n self.rbl_inst=self.add_inst(name=\"load\",\n mod=self.rbl,\n offset=self.rbl_offset)\n self.connect_inst([\"bl[0]\", \"br[0]\"] + [\"gnd\"]*self.bitcell_loads + [\"vdd\", \"gnd\"])",
"def add_modules(self):\n # Bitcell array\n self.bitcell_array = factory.create(module_type=\"bitcell_array\",\n column_offset=1 + len(self.left_rbl),\n cols=self.column_size,\n rows=self.row_size)\n\n # Replica bitlines\n self.replica_columns = {}\n\n for port in self.all_ports:\n if port in self.left_rbl:\n # We will always have self.rbl[0] rows of replica wordlines below\n # the array.\n # These go from the top (where the bitcell array starts ) down\n replica_bit = self.rbl[0] - port\n column_offset = self.rbl[0]\n\n elif port in self.right_rbl:\n\n # We will always have self.rbl[0] rows of replica wordlines below\n # the array.\n # These go from the bottom up\n replica_bit = self.rbl[0] + self.row_size + port\n column_offset = self.rbl[0] + self.column_size + 1\n else:\n continue\n\n self.replica_columns[port] = factory.create(module_type=\"replica_column\",\n rows=self.row_size,\n rbl=self.rbl,\n column_offset=column_offset,\n replica_bit=replica_bit)\n\n # Dummy row\n self.dummy_row = factory.create(module_type=\"dummy_array\",\n cols=self.column_size,\n rows=1,\n # dummy column + left replica column\n column_offset=1 + len(self.left_rbl),\n mirror=0)\n\n # Dummy Row or Col Cap, depending on bitcell array properties\n col_cap_module_type = (\"col_cap_array\" if self.cell.end_caps else \"dummy_array\")\n self.col_cap_top = factory.create(module_type=col_cap_module_type,\n cols=self.column_size,\n rows=1,\n # dummy column + left replica column(s)\n column_offset=1 + len(self.left_rbl),\n mirror=0,\n location=\"top\")\n\n self.col_cap_bottom = factory.create(module_type=col_cap_module_type,\n cols=self.column_size,\n rows=1,\n # dummy column + left replica column(s)\n column_offset=1 + len(self.left_rbl),\n mirror=0,\n location=\"bottom\")\n\n # Dummy Col or Row Cap, depending on bitcell array properties\n row_cap_module_type = (\"row_cap_array\" if self.cell.end_caps else \"dummy_array\")\n\n self.row_cap_left = factory.create(module_type=row_cap_module_type,\n cols=1,\n column_offset=0,\n rows=self.row_size + self.extra_rows,\n mirror=(self.rbl[0] + 1) % 2)\n\n self.row_cap_right = factory.create(module_type=row_cap_module_type,\n cols=1,\n # dummy column\n # + left replica column(s)\n # + bitcell columns\n # + right replica column(s)\n column_offset=1 + len(self.left_rbl) + self.column_size + self.rbl[0],\n rows=self.row_size + self.extra_rows,\n mirror=(self.rbl[0] + 1) %2)",
"def addNonBarrelBlue(self, event):\n # let user draw second ROI\n ROI = RoiPoly(color='r') #let user draw ROI\n plt.show(block=False)\n mask = ROI.get_mask(self.greyimg)\n mask = mask*2\n self.ROI += mask",
"def addToolBarButtons(self):\n self.addNewCategoryButton()",
"def addBL(self):\n self.parent.copyCurrentWinState(self.pltw)\n vname = self.pltw.curvelist[self.cpos].name + 'BL'\n (nvec, npt) = np.shape(self.pltw.blklst[self.blkno])\n if self.pltw.pasteVector(self.data[2], self.blkno, vname):\n xname = self.pltw.getVnam(self.blkno, self.xpos)\n xvinfo = vectInfo(self.blkno, self.xpos, xname)\n yvinfo = vectInfo(self.blkno, nvec, vname)\n self.pltw.curvelist.append(curveInfo(vname, xvinfo, yvinfo))\n self.pltw.updatePlot()\n self.pltw.dirty = True\n self.pltw.activecurv = self.cpos\n self.parent.updateUI()\n self.hide()",
"def setBarType(bartype='vertical'):\n dislin.bartyp(bardict[bartype])",
"def _b_line_changed(self):\n self.bSpin.setValue(self.bLine.value())\n self._update_image()",
"def createToolBar(self):\n pass",
"def add_basic_block(self, basic_block):\n self.basic_blocks.append(basic_block)\n basic_block.function = self",
"def add_menu_bar(self):\n # File menu option to change difficulty level\n beginner_level_action = QtGui.QAction(QtGui.QIcon(\"\"), '&Beginner', self)\n beginner_level_action.setShortcut('Ctrl+B')\n beginner_level_action.setStatusTip('Set difficulty level to \"Beginner\" ')\n beginner_level_action.triggered.connect(functools.partial(self.change_game_level,\\\n DifficultyLevel.BeginnerLevel))\n\n # File menu option to change difficulty level\n intermediate_level_action = QtGui.QAction(QtGui.QIcon(\"\"), '&Intermediate', self)\n intermediate_level_action.setShortcut('Ctrl+I')\n intermediate_level_action.setStatusTip('Set difficulty level to \"Intermediate\" ')\n intermediate_level_action.triggered.connect(functools.partial(self.change_game_level,\\\n DifficultyLevel.IntermediateLevel))\n\n # File menu option to change difficulty level\n expert_level_action = QtGui.QAction(QtGui.QIcon(\"\"), '&Expert', self)\n expert_level_action.setShortcut('Ctrl+E')\n expert_level_action.setStatusTip('Set difficulty level to \"Expert\" ')\n expert_level_action.triggered.connect(functools.partial(self.change_game_level,\\\n DifficultyLevel.ExpertLevel))\n\n # File menu option \"About\" which gives information about game\n about_game_action = QtGui.QAction(QtGui.QIcon(\"\"), '&About', self)\n about_game_action.setShortcut('Ctrl+A')\n about_game_action.setStatusTip(\"Show Application's ABOUT box\")\n about_game_action.triggered.connect(self.about)\n\n # File menu option \"About\" which gives information about game\n game_help_action = QtGui.QAction(QtGui.QIcon(\"\"), '&Help', self)\n game_help_action.setShortcut('Ctrl+H')\n game_help_action.setStatusTip(\"Show game's help\")\n game_help_action.triggered.connect(self.game_help)\n\n # File Menu option to view the score.\n view_leaderboard_action = QtGui.QAction(QtGui.QIcon(\"\"), '&View Score', self)\n view_leaderboard_action.setShortcut('Ctrl+V')\n view_leaderboard_action.setStatusTip(\"View current game's leader board\")\n view_leaderboard_action.triggered.connect(self.showtopscores)\n\n # File Menu option for exit the game.\n exit_game_action = QtGui.QAction(QtGui.QIcon(\"exit.png\"), '&Exit', self)\n exit_game_action.setShortcut('Ctrl+Q')\n exit_game_action.setStatusTip('Exit application')\n exit_game_action.triggered.connect(QtGui.QApplication.quit)\n\n # create a menu bar and we need to add 2 menus\n # 1. File and 2. Help\n menubar = self.menuBar()\n file_menu = menubar.addMenu('&File')\n help_menu = menubar.addMenu('&Help')\n\n # Inside File menu create a submenu to change gane level\n # This sub menu has 3 actions (3 levels to choose from)\n change_level_sub_menu = file_menu.addMenu('&Game Level')\n change_level_sub_menu.addAction(beginner_level_action)\n change_level_sub_menu.addAction(intermediate_level_action)\n change_level_sub_menu.addAction(expert_level_action)\n\n # Add other actions in file menu after game level.\n file_menu.addAction(view_leaderboard_action)\n\n # Add seperator (visible line) before showing exit.\n file_menu.addSeparator().setText(\"Alignment\")\n file_menu.addAction(exit_game_action)\n\n # Add actions (sub menus) for help menu.\n help_menu.addAction(about_game_action)\n help_menu.addAction(game_help_action)",
"def _import_bh_(self):",
"def updateBar(self):\n pass",
"def add_bitline_contacts(self):\n\n stack=(\"metal1\", \"via1\", \"metal2\")\n pos = self.lower_pmos_inst.get_pin(\"S\").center()\n self.add_contact_center(layers=stack,\n offset=pos)\n pos = self.lower_pmos_inst.get_pin(\"D\").center()\n self.add_contact_center(layers=stack,\n offset=pos)\n pos = self.upper_pmos1_inst.get_pin(\"S\").center()\n self.add_contact_center(layers=stack,\n offset=pos)\n pos = self.upper_pmos2_inst.get_pin(\"D\").center()\n self.add_contact_center(layers=stack,\n offset=pos)",
"def AddToolbar(self, name):\n if name == \"digitMap\":\n self.toolbars[name] = RDigitMapToolbar(self)\n \n self._mgr.AddPane(self.toolbars[name],\n wx.aui.AuiPaneInfo().\n Name(name).Caption(_(\"Map Toolbar\")).\n ToolbarPane().Top().\n LeftDockable(False).RightDockable(False).\n BottomDockable(False).TopDockable(True).\n CloseButton(False).Layer(2).Row(1).\n BestSize((self.toolbars[name].GetBestSize())))\n \n elif name == \"rdigit\":\n self.toolbars[name] = RDigitToolbar(parent = self, MapWindow = self.MapWindow,\n digitClass = RDigit, layerTree = self.mapManager)\n \n self._mgr.AddPane(self.toolbars[name],\n wx.aui.AuiPaneInfo().\n Name(\"rdigittoolbar\").Caption(_(\"Raster Digitizer Toolbar\")).\n ToolbarPane().Top().Row(1).\n LeftDockable(False).RightDockable(False).\n BottomDockable(False).TopDockable(True).\n CloseButton(False).Layer(0).\n BestSize((self.toolbars['rdigit'].GetBestSize()))) \n self.MapWindow.SetToolbar(self.toolbars[name])\n #self._mgr.GetPane('rdigittoolbar').Hide()",
"def activate_statusbar_icon_mode():\n pass",
"def __init__(self, ax, useblit=False, **lineprops):\n self.ax = ax\n self.canvas = ax.figure.canvas\n\n self.canvas.mpl_connect('motion_notify_event', self.onmove)\n self.canvas.mpl_connect('draw_event', self.clear)\n\n self.visible = True\n self.horizOn = True\n self.vertOn = True\n self.useblit = useblit\n\n self.lineh = ax.axhline(0, visible=False, **lineprops)\n self.linev = ax.axvline(0, visible=False, **lineprops)\n\n self.background = None\n self.needclear = False",
"def __init__(self, ax, labels, bw=None, bh=None, colors=None, actives=None):\n AxesWidget.__init__(self, ax)\n\n labels = copy.deepcopy(labels)\n\n labels.append(\"select all\")\n labels.append(\"unselect all\")\n print(\"colors\", colors)\n colors = colors+[\"#000000\"]*2\n\n ax.set_xticks([])\n ax.set_yticks([])\n ax.set_navigate(False)\n\n if actives is None:\n actives = [False] * len(labels)\n actives = actives+[False]*2\n\n if len(labels) > 1:\n dy = 1. / (len(labels) + 1)\n ys = np.linspace(1 - dy, dy, len(labels))\n else:\n dy = 0.25\n ys = [0.5]\n\n axcolor = ax.get_facecolor()\n\n self.labels = []\n self.lines = []\n self.rectangles = []\n\n lineparams = {'color': 'k', 'linewidth': 1.2,\n 'transform': ax.transAxes, 'solid_capstyle': 'butt'}\n\n for index, (y, label, active) in enumerate(zip(ys, labels, actives)):\n if colors is None:\n t = ax.text(0.25, y, label, transform=ax.transAxes,\n horizontalalignment='left',\n verticalalignment='center')\n else:\n t = ax.text(0.25, y, label, transform=ax.transAxes,\n horizontalalignment='left',\n verticalalignment='center', color=colors[index])\n\n if bw is None:\n w = dy / 2\n else:\n w = bw\n\n if bh is None:\n h = dy / 2\n else:\n h = bh\n\n x, y = 0.05, y - h / 2\n\n p = Rectangle(xy=(x, y), width=w, height=h, edgecolor='black',\n facecolor=axcolor, transform=ax.transAxes)\n\n l1 = Line2D([x, x + w], [y + h, y], **lineparams)\n l2 = Line2D([x, x + w], [y, y + h], **lineparams)\n\n l1.set_visible(active)\n l2.set_visible(active)\n self.labels.append(t)\n self.rectangles.append(p)\n self.lines.append((l1, l2))\n ax.add_patch(p)\n ax.add_line(l1)\n ax.add_line(l2)\n\n self.connect_event('button_press_event', self._clicked)\n\n self.cnt = 0\n self.observers = {}",
"def create_modules(self):\n self.bitcell = self.replica_bitcell = self.mod_replica_bitcell()\n self.add_mod(self.bitcell)\n\n # This is the replica bitline load column that is the height of our array\n self.rbl = bitcell_array(name=\"bitline_load\", cols=1, rows=self.bitcell_loads)\n self.add_mod(self.rbl)\n\n # FIXME: The FO and depth of this should be tuned\n self.delay_chain = self.mod_delay_chain([self.delay_fanout]*self.delay_stages)\n self.add_mod(self.delay_chain)\n\n self.inv = pinv()\n self.add_mod(self.inv)\n\n self.access_tx = ptx(tx_type=\"pmos\")\n self.add_mod(self.access_tx)",
"def _export_bh_(cls, self):",
"def add_layout_pins(self):\n\n # All wordlines\n # Main array wl and bl/br\n for pin_name in self.all_wordline_names:\n pin_list = self.bitcell_array_inst.get_pins(pin_name)\n for pin in pin_list:\n self.add_layout_pin(text=pin_name,\n layer=pin.layer,\n offset=pin.ll().scale(0, 1),\n width=self.width,\n height=pin.height())\n\n # Replica wordlines (go by the row instead of replica column because we may have to add a pin\n # even though the column is in another local bitcell array)\n for (names, inst) in zip(self.rbl_wordline_names, self.dummy_row_replica_insts):\n for (wl_name, pin_name) in zip(names, self.dummy_row.get_wordline_names()):\n if wl_name in self.gnd_wordline_names:\n continue\n pin = inst.get_pin(pin_name)\n self.add_layout_pin(text=wl_name,\n layer=pin.layer,\n offset=pin.ll().scale(0, 1),\n width=self.width,\n height=pin.height())\n\n for pin_name in self.all_bitline_names:\n pin_list = self.bitcell_array_inst.get_pins(pin_name)\n for pin in pin_list:\n self.add_layout_pin(text=pin_name,\n layer=pin.layer,\n offset=pin.ll().scale(1, 0),\n width=pin.width(),\n height=self.height)\n\n # Replica bitlines\n if len(self.rbls) > 0:\n for (names, inst) in zip(self.rbl_bitline_names, self.replica_col_insts):\n pin_names = self.replica_columns[self.rbls[0]].all_bitline_names\n for (bl_name, pin_name) in zip(names, pin_names):\n pin = inst.get_pin(pin_name)\n self.add_layout_pin(text=bl_name,\n layer=pin.layer,\n offset=pin.ll().scale(1, 0),\n width=pin.width(),\n height=self.height)",
"def __init__(self):\n # This is border line, maybe need another structure to support this\n Relayer.__init__(self, interface.Power, DEFAULT_PRIORITIES)\n interface.Power.__init__(self)",
"def _add_hcolorbar(self, im, colormap=\"jet_r\", label=\"\"):\n pos = self.ax.get_position()\n cpos = [\n pos.x0 + 0.3 * pos.width,\n pos.y0 - 0.15 * pos.height,\n pos.width * 0.5,\n 0.02,\n ] # this list defines (left, bottom, width, height)\n cax = self.fig.add_axes(cpos)\n cb2 = self.fig.colorbar(\n im,\n cax=cax,\n cmap=colormap,\n spacing=\"uniform\",\n orientation=\"horizontal\",\n )\n cb2.set_label(label)\n return",
"def SetupToolBar( self ):\n tb = self.CreateToolBar( self.TBFLAGS )\n tsize = (24,24)\n tb.ToolBitmapSize = tsize\n open_bmp = wx.ArtProvider.GetBitmap(wx.ART_FILE_OPEN, wx.ART_TOOLBAR, tsize)\n tb.AddLabelTool(ID_OPEN, \"Open\", open_bmp, shortHelp=\"Open\", longHelp=\"Open a (c)Profile trace file\")\n tb.AddSeparator()\n# self.Bind(wx.EVT_TOOL, self.OnOpenFile, id=ID_OPEN)\n self.rootViewTool = tb.AddLabelTool(\n ID_ROOT_VIEW, _(\"Root View\"),\n wx.ArtProvider.GetBitmap(wx.ART_GO_HOME, wx.ART_TOOLBAR, tsize),\n shortHelp=_(\"Display the root of the current view tree (home view)\")\n )\n self.rootViewTool = tb.AddLabelTool(\n ID_BACK_VIEW, _(\"Back\"), \n wx.ArtProvider.GetBitmap(wx.ART_GO_BACK, wx.ART_TOOLBAR, tsize),\n shortHelp=_(\"Back to the previously activated node in the call tree\")\n )\n self.upViewTool = tb.AddLabelTool(\n ID_UP_VIEW, _(\"Up\"),\n wx.ArtProvider.GetBitmap(wx.ART_GO_UP, wx.ART_TOOLBAR, tsize),\n shortHelp=_(\"Go one level up the call tree (highest-percentage parent)\")\n )\n tb.AddSeparator()\n # TODO: figure out why the control is sizing the label incorrectly on Linux\n self.percentageViewTool = wx.CheckBox( tb, -1, _(\"Percent \") )\n self.percentageViewTool.SetToolTip( wx.ToolTip(_(\"Toggle display of percentages in list views\")) )\n tb.AddControl( self.percentageViewTool )\n wx.EVT_CHECKBOX( self.percentageViewTool, self.percentageViewTool.GetId(), self.OnPercentageView )\n \n self.packageViewTool = wx.CheckBox( tb, -1, _(\"File View \") )\n self.packageViewTool.SetToolTip( wx.ToolTip(_(\"Switch between call-hierarchy and package/module/function hierarchy\")) )\n tb.AddControl( self.packageViewTool )\n wx.EVT_CHECKBOX( self.packageViewTool, self.packageViewTool.GetId(), self.OnPackageView )\n tb.Realize()",
"def _add_color_menu(self):\n print 'adding color menu'\n self.menuBar.addcascademenu('Color', 'Color Atoms'); \n c_lambda = lambda: self.color_wireframe('cpk');\n self.menuBar.addmenuitem('Color Atoms','command','Color wireframes cpk', command=c_lambda, label='cpk')\n c_lambda = lambda: self.color_wireframe('type');\n self.menuBar.addmenuitem('Color Atoms','command','Color wireframes by type', command=c_lambda, label='type')\n c_lambda = lambda: self.color_wireframe('chain');\n self.menuBar.addmenuitem('Color Atoms','command','color wireframes by chain', command=c_lambda, label='chain')\n c_lambda = lambda: self.color_wireframe('hydrogen_type');\n self.menuBar.addmenuitem('Color Atoms','command','color wireframes by H type', command=c_lambda, label='H Type')\n \n self.menuBar.addcascademenu('Color', 'Color Trace')\n self.menuBar.addmenuitem('Color Trace','command','Color tubes by secondary', command=self.color_trace_by_secondary,label='secondary')\n self.menuBar.addmenuitem('Color Trace','command','Color tubes by type', command=self.color_tubes_type,label='type')\n self.menuBar.addmenuitem('Color Trace','command','Color tubes by chain', command=self.color_tubes_chain,label='chain')\n\n self.menuBar.addcascademenu('Color', 'Color Volumes')\n self.menuBar.addmenuitem('Color Volumes','command','Color volumes cpk', command=self.color_volumes_cpk,label='cpk')\n self.menuBar.addmenuitem('Color Volumes','command','Color volumes by type', command=self.color_volumes_type,label='type')\n self.menuBar.addmenuitem('Color Volumes','command','Color volumes by chain', command=self.color_volumes_chain,label='chain')\n\n # create menu items for .features keys for atoms and residues\n if self.system != 'None' and self.system != None:\n key_store = {}\n key_store['atom'] = self.system.ProteinList[0].atoms[0].features.keys()\n key_store['residue'] = self.system.ProteinList[0].residues[0].features.keys()\n for run_type in ['atom', 'residue']:\n broken = 0\n for key in key_store[run_type]:\n for pol in self.system.ProteinList:\n if key == 'domain':\n self.print_domain_info(pol)\n normalized = 1\n # if the feature includes non-digits, pass. if it is all digits, see if \n # it is normalized\n if run_type == 'atom':\n item_list = pol.atoms\n elif run_type == 'residue':\n item_list = pol.residues\n same_val_count = 0\n try:\n item_list[0].features[key]\n except KeyError:\n continue\n else:\n first_val = item_list[0].features[key]\n for item in item_list:\n try:\n feature = item.features[key]\n except KeyError:\n print 'key error on %s, breaking'%(key)\n broken = 1\n break\n try:\n int(feature)\n except ValueError:\n print '%s not digit, breaking'%(feature)\n broken = 1\n break\n else:\n if feature != -1 and (feature < 0.0 or feature > 1.0):\n normalized = 0\n if feature == first_val:\n same_val_count += 1\n if same_val_count == len(item_list):\n print '%s all the same value; breaking'%(key)\n broken = 1\n break\n if key == 'domain':\n if item.features[key] == 0.0:\n item.features[key] = -1\n else:\n # if not normalized, make a new key called key+'_normalized', and swap the old\n # key with the new key to color by it\n old_key = copy.copy(key)\n if not normalized and (key+'_normalized' not in item.features.keys()):\n min_f = 1000000\n max_f = -1000000\n for item2 in item_list:\n feature = item2.features[key]\n if feature != -1:\n if feature < min_f:\n min_f = feature\n if feature > max_f:\n max_f = feature\n key = key + '_normalized'\n for item2 in item_list:\n if item2.features[old_key] != -1.0:\n d = (item2.features[old_key]-min_f) / (max_f-min_f+0.0)\n item2.features[key] = d\n else:\n item2.features[key] = -1.0\n if run_type == 'residue':\n c_lambda1 = lambda p=pol, k=key: self.color_trace_by_residue_feature(p, k)\n self.menuBar.addmenuitem('Color Trace','command','Color trace by res '+key, command=c_lambda1, label='%s %s'%(pol.chain_name, key))\n c_lambda2 = lambda p=pol, k=key: self.color_volume_by_residue_feature(p, k)\n self.menuBar.addmenuitem('Color Volumes','command','Color volumes by res '+key, command=c_lambda2, label='%s %s'%(pol.chain_name, key))\n c_lambda3 = lambda p=pol, k=key: self.color_atoms_by_residue_feature(p, k)\n self.menuBar.addmenuitem('Color Atoms','command','Color atoms by res '+key, command=c_lambda3, label='%s %s'%(pol.chain_name, key))\n elif run_type == 'atom':\n c_lambda1 = lambda p=pol, k=key: self.color_trace_by_atom_feature(p, k)\n self.menuBar.addmenuitem('Color Trace','command','Color trace by atom '+key, command=c_lambda1, label='%s %s'%(pol.chain_name, key))\n c_lambda2 = lambda p=pol, k=key: self.color_volume_by_atom_feature(p, k)\n self.menuBar.addmenuitem('Color Volumes','command','Color volumes by atom '+key, command=c_lambda2, label='%s %s'%(pol.chain_name, key))\n c_lambda3 = lambda p=pol, k=key: self.color_atoms_by_atom_feature(p, k)\n self.menuBar.addmenuitem('Color Atoms','command','Color atoms by atom '+key, command=c_lambda3, label='%s %s'%(pol.chain_name, key))\n key = old_key\n #broken = 1\n #break\n if broken:\n break",
"def setColorBarPositionHoriz(pos):\n dislin.vkxbar(pos)",
"def __iadd__ (self, module):\n self.Add (module)\n return self",
"def connect_pmos(self, pmos_pin, bit_pin):\n\n ll_pos = vector(min(pmos_pin.lx(),bit_pin.lx()), pmos_pin.by())\n ur_pos = vector(max(pmos_pin.rx(),bit_pin.rx()), pmos_pin.uy())\n\n width = ur_pos.x-ll_pos.x\n height = ur_pos.y-ll_pos.y\n self.add_rect(layer=\"metal2\",\n offset=ll_pos,\n width=width,\n height=height)",
"def __init__(self):\n self.label = \"Surface Generation\"\n self.alias = \"far77\"\n\n # List of tool classes associated with this toolbox\n self.tools = [LineToFar77]"
]
| [
"0.5931237",
"0.56883824",
"0.5411799",
"0.5363328",
"0.523951",
"0.5219392",
"0.52153605",
"0.52136797",
"0.5135922",
"0.50847983",
"0.5036947",
"0.50350106",
"0.5027019",
"0.5021388",
"0.5010233",
"0.4987663",
"0.49871254",
"0.49766114",
"0.49440458",
"0.49363413",
"0.48943728",
"0.4883316",
"0.48688272",
"0.48598957",
"0.48435947",
"0.48325127",
"0.48101503",
"0.47950354",
"0.47916237",
"0.4791239"
]
| 0.61381006 | 0 |
Adds contacts/via from metal1 to metal2 for bitlines | def add_bitline_contacts(self):
stack=("metal1", "via1", "metal2")
pos = self.lower_pmos_inst.get_pin("S").center()
self.add_contact_center(layers=stack,
offset=pos)
pos = self.lower_pmos_inst.get_pin("D").center()
self.add_contact_center(layers=stack,
offset=pos)
pos = self.upper_pmos1_inst.get_pin("S").center()
self.add_contact_center(layers=stack,
offset=pos)
pos = self.upper_pmos2_inst.get_pin("D").center()
self.add_contact_center(layers=stack,
offset=pos) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def route_bitlines(self):\n # adds the BL on metal 2\n offset = vector(self.bitcell.get_pin(self.bitcell_bl).cx(),0) - vector(0.5 * self.m2_width,0)\n self.add_layout_pin(text=\"bl\",\n layer=\"metal2\",\n offset=offset,\n width=drc['minwidth_metal2'],\n height=self.height)\n\n # adds the BR on metal 2\n offset = vector(self.bitcell.get_pin(self.bitcell_br).cx(),0) - vector(0.5 * self.m2_width,0)\n self.add_layout_pin(text=\"br\",\n layer=\"metal2\",\n offset=offset,\n width=drc['minwidth_metal2'],\n height=self.height)",
"def connect_tx(self, M1_track):\n # the first pmos drain to Vdd\n for i in range(len(self.pmos.active_contact_positions)):\n contact_pos = self.pmos_position1 + self.pmos.active_contact_positions[i]\n if i % 2 == 0:\n correct = self.pmos.active_contact.second_layer_position.scale(1,0) \n drain_posistion = contact_pos + correct \n height = self.vdd_position.y - drain_posistion.y\n self.add_rect(layer=\"metal1\",\n offset=drain_posistion,\n width=drc[\"minwidth_metal1\"],\n height=height)\n else:\n # source to pmos2\n correct = (self.pmos.active_contact.second_layer_position.scale(1,0)\n + vector(self.pmos.active_contact.second_layer_width,\n 0).scale(0.5,0))\n source_position = contact_pos + correct\n mid = [self.pmos_position2.x, M1_track]\n self.add_path(\"metal1\", [source_position, mid])\n\n # the second pmos\n for i in range(len(self.pmos.active_contact_positions)):\n if i % 2 == 0:\n # source to pmos2\n pmos_active =self.pmos_position2+self.pmos.active_contact_positions[i]\n correct= (self.pmos.active_contact.second_layer_position.scale(1,0)\n + vector(0.5 * self.pmos.active_contact.second_layer_width,0))\n source_position = pmos_active + correct\n mid = [self.pmos_position2.x, M1_track]\n self.add_path(\"metal1\", [source_position, mid])\n # two nmos source to gnd\n source_posistion1 = (self.nmos_position1\n + self.nmos.active_contact_positions[0]\n + self.nmos.active_contact.second_layer_position.scale(1,0))\n height = self.gnd_position.y - source_posistion1.y\n self.add_rect(layer=\"metal1\",\n offset=source_posistion1,\n width=drc[\"minwidth_metal1\"],\n height=height)\n\n source_posistion2 = (self.nmos_position2\n + self.nmos.active_contact_positions[1]\n + self.nmos.active_contact.second_layer_position.scale(1,0)) \n height = self.gnd_position.y - source_posistion2.y\n self.add_rect(layer=\"metal1\",\n offset=source_posistion2,\n width=drc[\"minwidth_metal1\"],\n height=height)",
"def connect_well_contacts(self):\n well_tap_length = self.height - self.nwell_contact_position.y\n xoffset = (self.nwell_contact_position.x \n + self.nwell_contact.second_layer_position.x \n - self.nwell_contact.first_layer_position.x)\n offset = [xoffset, self.nwell_contact_position.y]\n self.add_rect(layer=\"metal1\",\n offset=offset,\n width=drc[\"minwidth_metal1\"],\n height=well_tap_length)\n\n offset = (self.pwell_contact_position.scale(1,0)\n + self.pwell_contact.second_layer_position.scale(1,0)\n - self.pwell_contact.first_layer_position.scale(1,0))\n well_tap_length = self.pwell_contact_position.y\n self.add_rect(layer=\"metal1\",\n offset=offset,\n width=drc[\"minwidth_metal1\"],\n height=well_tap_length)",
"def route_input_B(self):\n xoffset = self.pmos.poly_positions[0].x \\\n + self.pmos_position2.x\n yoffset = self.A_position.y \\\n + max(drc[\"minwidth_metal2\"], self.poly_contact.second_layer_width) + drc[\"metal2_to_metal2\"]\n self.B_position = vector(xoffset, yoffset)\n offset = self.B_position - vector(0, 0.5 * self.poly_contact.width) \n self.add_contact(layers=(\"poly\", \"contact\", \"metal1\"),\n offset=offset,\n rotate=90)\n\n self.add_rect(layer=\"poly\",\n offset=offset,\n width=-(self.poly_contact.first_layer_position.y + drc[\"minwidth_poly\"]),\n height=self.poly_contact.first_layer_width)\n self.add_layout_pin(text=\"B\",\n layer=\"metal1\",\n offset=[0,\n self.B_position.y - 0.5 * drc[\"minwidth_metal1\"]],\n width=self.B_position.x,\n height=drc[\"minwidth_metal1\"])",
"def connect_pmos(self, pmos_pin, bit_pin):\n\n ll_pos = vector(min(pmos_pin.lx(),bit_pin.lx()), pmos_pin.by())\n ur_pos = vector(max(pmos_pin.rx(),bit_pin.rx()), pmos_pin.uy())\n\n width = ur_pos.x-ll_pos.x\n height = ur_pos.y-ll_pos.y\n self.add_rect(layer=\"metal2\",\n offset=ll_pos,\n width=width,\n height=height)",
"def add_bilink(self, nodeport_a, nodeport_b, bilink):",
"def connect(self):\n if self.pin_1.type == self.pin_2.type:\n self.pin_1.connected = True\n self.pin_2.connected = True\n else:\n raise InvalidPowerCombination(\"Not the same types\")",
"def connect_poly(self):\n # connect pmos1 poly\n nmos_gate = (self.nmos_position1 \n + self.nmos.poly_positions[0]\n + vector(0.5 * drc[\"minwidth_poly\"], 0))\n for i in range(len(self.pmos.poly_positions)):\n pmos_gate = (self.pmos_position1 \n + self.pmos.poly_positions[i]\n + vector(0.5 * drc[\"minwidth_poly\"], 0))\n mid1 = [pmos_gate.x, pmos_gate.y - drc[\"poly_to_active\"]]\n self.add_path(\"poly\", [nmos_gate, mid1, pmos_gate])\n\n # connect pmos2 poly\n nmos_gate = vector(self.nmos_position2[0] \n + self.nmos.poly_positions[0].x\n + 0.5 * drc[\"minwidth_poly\"], \n self.nmos_position1.y \n + self.nmos.poly_positions[0].y)\n for i in range(len(self.pmos.poly_positions)):\n pmos_gate = (self.pmos_position2\n + self.pmos.poly_positions[i]\n + vector(0.5 * drc[\"minwidth_poly\"], 0))\n mid1 = vector(pmos_gate.x,\n nmos_gate.y + self.nmos.height \n + drc[\"poly_to_active\"])\n self.add_path(\"poly\", [nmos_gate, mid1, pmos_gate])",
"def make_m2_crv(TSUGITE_list, SHIGUCHI_list):\n \"\"\"\n 1 Get information from TSUGITE_list and SHIGUCHI_list.\n \"\"\"\n # TSUGITE\n # Left----------------------------------------------------------------------\n # material2\n m2_left_list = TSUGITE_list[0]\n m2_left_upper = m2_left_list[0]\n m2_left_middle = m2_left_list[1]\n m2_left_lower = m2_left_list[2]\n\n # SHIGUCHI\n m2_KUMIKI_points1 = SHIGUCHI_list[4]\n m2_KUMIKI_points2 = SHIGUCHI_list[5]\n\n m2_KUMIKI_points1.reverse()\n\n m2_left_upper.extend(m2_KUMIKI_points1)\n m2_left_upper.append(m2_left_upper[0])\n m2_left_upper_crv = rs.AddPolyline(m2_left_upper)\n\n m2_left_middle.extend(m2_KUMIKI_points1)\n m2_left_middle.append(m2_left_middle[0])\n m2_left_middle_crv = rs.AddPolyline(m2_left_middle)\n\n m2_left_lower.extend(m2_KUMIKI_points1)\n m2_left_lower.append(m2_left_lower[0])\n m2_left_lower_crv = rs.AddPolyline(m2_left_lower)\n\n m2_left_crvs = [m2_left_upper_crv, m2_left_middle_crv, m2_left_lower_crv]\n\n # Right---------------------------------------------------------------------\n m2_right_list = TSUGITE_list[1]\n m2_right_upper = m2_right_list[0]\n m2_right_middle = m2_right_list[1]\n m2_right_lower = m2_right_list[2]\n\n # SHIGUCHI\n m2_KUMIKI_points1 = SHIGUCHI_list[0]\n m2_KUMIKI_points2 = SHIGUCHI_list[1]\n\n # Extend\n # material2\n m2_right_upper.reverse()\n m2_right_middle.reverse()\n m2_right_lower.reverse()\n\n # m2_KUMIKI_points1.reverse()\n\n m2_right_upper.extend(m2_KUMIKI_points1)\n m2_right_upper.append(m2_right_upper[0])\n m2_right_upper_crv = rs.AddPolyline(m2_right_upper)\n\n m2_right_middle.extend(m2_KUMIKI_points1)\n m2_right_middle.append(m2_right_middle[0])\n m2_right_middle_crv = rs.AddPolyline(m2_right_middle)\n\n m2_right_lower.extend(m2_KUMIKI_points1)\n m2_right_lower.append(m2_right_lower[0])\n m2_right_lower_crv = rs.AddPolyline(m2_right_lower)\n\n m2_right_crvs = [m2_right_upper_crv, m2_right_middle_crv, m2_right_lower_crv]\n\n return m2_left_crvs, m2_right_crvs",
"def route_output(self):\n self.Z_position = vector(self.width, self.A_position.y)\n # route nmos drain to Z\n nmos_contact = (self.nmos_position1 \n + self.nmos.active_contact_positions[1] \n + self.nmos.active_contact.second_layer_position\n + vector(self.nmos.active_contact.second_layer_width,\n 0).scale(0.5, 0))\n mid = [nmos_contact.x, self.A_position.y]\n self.add_path(\"metal1\", [self.Z_position, mid, nmos_contact])\n\n for i in range(len(self.pmos.poly_positions) + 1):\n if i % 2 == 1:\n # pmos2 drain to Z\n pmos_contact = (self.pmos_position2\n + self.pmos.active_contact_positions[i]\n + self.pmos.active_contact.second_layer_position.scale(1, 0)\n + vector(self.pmos.active_contact.second_layer_width,\n 0).scale(0.5, 0))\n offset = pmos_contact - vector(0.5 * self.m1m2_via.width, 0)\n self.add_via(layers=(\"metal1\", \"via1\", \"metal2\"),\n offset=offset)\n mid = [pmos_contact.x, self.Z_position.y]\n self.add_wire((\"metal1\", \"via1\", \"metal2\"),\n [self.Z_position, mid, pmos_contact])\n\n self.add_layout_pin(text=\"Z\",\n layer=\"metal1\",\n offset=mid - vector(0,0.5*drc[\"minwidth_metal1\"]),\n width=self.Z_position.x-mid[0],\n height=drc[\"minwidth_metal1\"])",
"def head2head(self, atoms):\n\n c1, c2 = atoms.keys()\n c1_ndx, c2_ndx = atoms.values()\n\n chain1, chain2 = self.determine_chains([c1, c2])\n\n # to get indexing right\n c1_ndx -= self.monomer.indices[chain1]['C1']\n c2_ndx -= self.monomer.indices[chain2]['C2']\n\n types = {'chain1': {'C1': 'c3', 'C2': 'c2', 'C3': 'c2', 'C4': 'c2', 'H1': 'hc', 'H2': 'hc', 'H3': 'ha',\n 'H4': 'ha', 'H5': 'ha'},\n 'chain2': {'C1': 'c3', 'C2': 'c2', 'C3': 'c2', 'C4': 'c3', 'H1': 'hc', 'H2': 'hc', 'H3': 'ha',\n 'H4': 'ha', 'H5': 'hc', 'D4': 'hc'}}\n\n # update types\n reacted_types = {'chain1': {c1_ndx + self.monomer.indices[chain1][a]: types['chain1'][a] for a in\n types['chain1'].keys()},\n 'chain2': {c2_ndx + self.monomer.indices[chain2][a]: types['chain2'][a] for a in\n types['chain2'].keys()}}\n\n # bond between carbons\n bonds = [[c1_ndx + self.monomer.indices[chain1]['C1'], c2_ndx + self.monomer.indices[chain2]['C1'], 'carbon']]\n\n # dummy bonds - 1 new bond between dummy atoms and carbon\n bonds += [[c2_ndx + self.monomer.indices[chain2]['C4'], c2_ndx + self.monomer.indices[chain2]['D4'], 'dummy']]\n\n # define indices of left-over radicals\n radicals = [c1_ndx + self.monomer.indices[chain1]['C2']]\n\n chain1_impropers = ['C1'] # [1]\n chain2_impropers = ['C1', 'C4'] # [1, 2]\n rm_improper = []\n for c in chain1_impropers:\n rm_improper.append([c1_ndx + self.monomer.indices[chain1][x] for x in self.monomer.impropers[chain1][c]])\n for c in chain2_impropers:\n rm_improper.append([c2_ndx + self.monomer.indices[chain2][x] for x in self.monomer.impropers[chain2][c]])\n\n # define terminated atoms\n terminated = [c1_ndx + self.monomer.indices[chain1]['C1'], c2_ndx + self.monomer.indices[chain2]['C1'],\n c2_ndx + self.monomer.indices[chain2]['C2']] # C2 terminated for now even though still alkene\n\n return reacted_types, bonds, radicals, rm_improper, terminated",
"def add_conn(self, a1, a2):\n if self.use_pconn:\n raise ValueError(\"Can not add bonds to systems with pconn - well, we can fix this ;) \")\n self.conn[a1].append(a2)\n self.conn[a2].append(a1)\n d,v,imgi = self.get_distvec(a1,a2)\n self.pconn[a1].append(images[imgi])\n d,v,imgi = self.get_distvec(a2,a1)\n self.pconn[a2].append(images[imgi])\n logger.warning('pconn may not be properly updated!!!')\n return",
"def route_gnd(self):\n \n gnd_start = self.rbl_inv_inst.get_pin(\"gnd\").bc()\n gnd_end = vector(gnd_start.x, self.rbl_inst.uy()+2*self.m2_pitch)\n \n # Add a rail in M1 from bottom of delay chain to two above the RBL\n # This prevents DRC errors with vias for the WL\n dc_top = self.dc_inst.ur()\n self.add_segment_center(layer=\"metal1\",\n start=vector(gnd_start.x, dc_top.y),\n end=gnd_end)\n\n # Add a rail in M2 from RBL inverter to two above the RBL\n self.add_segment_center(layer=\"metal2\",\n start=gnd_start,\n end=gnd_end)\n \n # Add pin from bottom to RBL inverter\n self.add_layout_pin_center_segment(text=\"gnd\",\n layer=\"metal1\",\n start=gnd_start.scale(1,0),\n end=gnd_start)\n \n # Connect the WL pins directly to gnd\n gnd_pin = self.get_pin(\"gnd\").rc()\n for row in range(self.bitcell_loads):\n wl = \"wl[{}]\".format(row)\n pin = self.rbl_inst.get_pin(wl)\n start = vector(gnd_pin.x,pin.cy())\n self.add_segment_center(layer=\"metal1\",\n start=start,\n end=pin.lc())\n self.add_via_center(layers=(\"metal1\", \"via1\", \"metal2\"),\n offset=start)\n\n # Add via for the delay chain\n offset = self.dc_inst.get_pins(\"gnd\")[0].bc() + vector(0.5*contact.m1m2.width,0.5*contact.m1m2.height)\n self.add_via_center(layers=(\"metal1\", \"via1\", \"metal2\"),\n offset=offset)\n\n # Add via for the inverter\n offset = self.rbl_inv_inst.get_pin(\"gnd\").bc() - vector(0,0.5*contact.m1m2.height)\n self.add_via_center(layers=(\"metal1\", \"via1\", \"metal2\"),\n offset=offset)\n\n # Connect the bitcell gnd pins to the rail\n gnd_pins = self.get_pins(\"gnd\")\n gnd_start = gnd_pins[0].ul()\n rbl_gnd_pins = self.rbl_inst.get_pins(\"gnd\")\n # Add L shapes to each vertical gnd rail\n for pin in rbl_gnd_pins:\n if pin.layer != \"metal2\":\n continue\n gnd_end = pin.uc()\n gnd_mid = vector(gnd_end.x, gnd_start.y)\n self.add_wire((\"metal1\",\"via1\",\"metal2\"), [gnd_start, gnd_mid, gnd_end])\n gnd_start = gnd_mid\n \n\n # Add a second gnd pin to the second delay chain rail. No need for full length.\n dc_gnd_offset = self.dc_inst.get_pins(\"gnd\")[1].ll()\n self.add_layout_pin(text=\"gnd\",\n layer=\"metal1\",\n offset=dc_gnd_offset.scale(1,0),\n width=self.m1_width,\n height=self.delay_chain_offset.y)",
"def head2tail(self, atoms):\n\n c1, c2 = atoms.keys()\n c1_ndx, c2_ndx = atoms.values()\n\n chain1, chain2 = self.determine_chains([c1, c2])\n\n # to get indexing right\n c1_ndx -= self.monomer.indices[chain1]['C1']\n c2_ndx -= self.monomer.indices[chain2]['C2']\n\n types = {'chain1': {'C1': 'c3', 'C2': 'c2', 'C3': 'c2', 'C4': 'c2', 'H1': 'hc', 'H2': 'hc', 'H3': 'ha',\n 'H4': 'ha', 'H5': 'ha'},\n 'chain2': {'C1': 'c3', 'C2': 'c3', 'C3': 'c2', 'C4': 'c2', 'H1': 'hc', 'H2': 'hc', 'H3': 'hc',\n 'H4': 'ha', 'H5': 'ha', 'D1': 'hc'}}\n\n # update types\n reacted_types = {'chain1': {c1_ndx + self.monomer.indices[chain1][a]: types['chain1'][a] for a in\n types['chain1'].keys()},\n 'chain2': {c2_ndx + self.monomer.indices[chain2][a]: types['chain2'][a] for a in\n types['chain2'].keys()}}\n\n # bond between carbons. Format [c1, c2, type]\n bonds = [[c1_ndx + self.monomer.indices[chain1]['C1'], c2_ndx + self.monomer.indices[chain2]['C2'], 'carbon']]\n\n # dummy bonds - 1 new bond between dummy atoms and carbon\n bonds += [[c2_ndx + self.monomer.indices[chain2]['C1'], c2_ndx + self.monomer.indices[chain2]['D1'], 'dummy']]\n\n # define indices of left-over radicals\n radicals = [c1_ndx + self.monomer.indices[chain1]['C2']]\n\n chain1_impropers = ['C1']\n chain2_impropers = ['C1', 'C2']\n rm_improper = []\n for c in chain1_impropers:\n rm_improper.append([c1_ndx + self.monomer.indices[chain1][x] for x in self.monomer.impropers[chain1][c]])\n for c in chain2_impropers:\n rm_improper.append([c2_ndx + self.monomer.indices[chain2][x] for x in self.monomer.impropers[chain2][c]])\n\n # define terminated atoms\n terminated = [c1_ndx + self.monomer.indices[chain1]['C1'], c2_ndx + self.monomer.indices[chain2]['C2'], c2_ndx +\n self.monomer.indices[chain2]['C1']]\n\n return reacted_types, bonds, radicals, rm_improper, terminated",
"def makelinks(links, sp1, sp2):\n sp1_links = []\n sp2_links = []\n sp1_chrom = []\n sp2_chrom = []\n f = open(\"circos.{}-{}.links.txt\".format(sp1, sp2), 'w')\n with open(links, 'r') as link:\n for line in link:\n x = line.strip().split()\n species = x[0].split(\".\")[0]\n chrom = x[0].split(\".\")[1]\n orient = x[3]\n size = int(x[4])\n align_l = int(x[2])\n align_s = int(x[1])\n if orient == \"+\":\n start = align_s\n end = start + align_l\n elif orient == \"-\":\n start = size - align_s\n end = start - align_l\n else:\n print(\"\\nNo Direction indicated\".format(line))\n if species == sp1:\n sp1_links.append(\"{} {} {}\".format(chrom, start, end))\n sp1_chrom.append(chrom)\n elif species == sp2:\n sp2_links.append(\"{} {} {}\".format(chrom, start, end))\n sp2_chrom.append(chrom)\n [f.write(\"{} {}\\n\".format(i, j)) for i, j in zip(sp1_links, sp2_links)]\n f.close()\n\n return(sp1_chrom, sp2_chrom)",
"def add_connector(self):\n \n no = len(self.connectors)\n state = {}\n state[\"s_pin\"] = no\n state[\"p_pin\"] = no\n state[\"s_label\"] = \"C%d\" % no\n \n if len(self.connectors)>0:\n state = self.connectors[-1].get_state()\n state[\"s_pin\"] = no\n state[\"p_pin\"] = no\n state[\"s_label\"] = \"C%d\" % (no)\n else:\n if self.mount == self.MOUNT_THT:\n state[\"p_shape\"] = Con.SHAPE_HOLE\n elif self.mount == self.MOUNT_SMD:\n state[\"p_shape\"] = Con.SHAPE_PAD\n \n c = Con(no)\n c.set_state(state) \n \n self.sch_layers[\"pins\"].add(c.s_svg)\n self.pcb_layers[\"copper1\"].add(c.p_svg)\n self.connectors.append(c)",
"def _update_attributes(self,circuit_graph,name,lib_name,lib_graph, Gsub):\n #PnR can route primitive power but not subckt power\n if lib_name in self.all_lef:\n pg = []\n else:\n pg = self.pg\n G1 = circuit_graph\n num = len([key for key in Gsub\n if 'net' not in G1.nodes[key][\"inst_type\"]])\n # Define ports for subblock\n matched_ports = {}\n ports_weight = {}\n G2 = lib_graph.copy()\n for g1_n, g2_n in Gsub.items():\n if 'net' in G2.nodes[g2_n][\"inst_type\"]:\n if 'external' in G2.nodes[g2_n][\"net_type\"]:\n if num > 1 and g1_n in pg:\n # remove power connections\n G2=nx.relabel_nodes(G2,{g2_n:g1_n},copy=False)\n else:\n matched_ports[g2_n] = g1_n\n ports_weight[g2_n] = []\n for nbr in list(G2.neighbors(g2_n)):\n ports_weight[g2_n].append(G2.get_edge_data(g2_n, nbr)['weight'])\n else:\n G2.nodes[g2_n]['values'] = G1.nodes[g1_n]['values']\n G2.nodes[g2_n]['real_inst_type'] = G1.nodes[g1_n]['real_inst_type']\n return matched_ports,ports_weight,G2",
"def __init__(self,\n name,\n vertices_location,\n connectivity,\n connectivity_ids=None,\n connectivity_label=None,\n connectivity_label_metadata=None,\n connectivity_colormap = None,\n connector_size=2.6,\n global_deselect_alpha=0.2,\n global_select_alpha=1.0,\n skeleton_linewidth=2.0):\n super(Microcircuit, self).__init__( name )\n\n if not connectivity_ids is None:\n self.connectivity_ids = connectivity_ids\n\n if not connectivity_label is None:\n self.connectivity_labels = connectivity_label\n\n if not connectivity_label_metadata is None:\n\n for semanticdict in connectivity_label_metadata:\n # name needs to be based on convention, TODO: best from ontology id rather than string!\n # TODO: use microcircuit convention\n if semanticdict.has_key(\"name\"):\n name = semanticdict[\"name\"]\n if \"skeleton\" in name:\n self.con_skeleton = int(semanticdict[\"value\"])\n elif \"presynaptic\" in name:\n self.con_pre = int(semanticdict[\"value\"])\n elif \"postsynaptic\" in name:\n self.con_post = int(semanticdict[\"value\"])\n\n else:\n # TODO: default\n self.con_skeleton = 1\n self.con_pre = 2\n self.con_post = 3\n\n # selection stores integer ids from connectivity_selectionID\n # when selected\n self.skeleton_selection = []\n\n # use the connectivity labels to extract the connectivity for the skeletons\n self.index_skeleton = np.where(self.connectivity_labels == self.con_skeleton)[0]\n self.index_allpre = np.where(self.connectivity_labels == self.con_pre)[0]\n self.index_allpost = np.where(self.connectivity_labels == self.con_post)[0]\n \n self.vertices = vertices_location\n self.connectivity = connectivity\n\n connectivity_skeleton = self.connectivity[self.index_skeleton]\n self.vertices_skeleton = self.vertices[ connectivity_skeleton.ravel() ]\n \n # we have a simplified connectivity now\n self.connectivity_skeleton = np.array( range(len(self.vertices_skeleton)), dtype = np.uint32 )\n self.connectivity_skeleton = self.connectivity_skeleton.reshape( (len(self.connectivity_skeleton)/2, 2) )\n self.connectivity_ids_skeleton = self.connectivity_ids[ self.index_skeleton ]\n\n # look up the start and end vertex id\n # map these to _skeleton arrays, and further to actor???\n\n # colors for skeletons\n if isinstance(connectivity_colormap, dict) and connectivity_colormap.has_key( self.con_skeleton ):\n self.connectivity_skeleton_colors = np.repeat(connectivity_colormap[self.con_skeleton], len(self.connectivity_skeleton), axis=0).astype( np.float32 )\n\n ##########\n # Incoming connectors\n ##########\n\n # extract the pre connectivity and create cones\n # store the indices for to be used to create the vector scatter\n # by itself, it represent implicitly the index used to select/deselect the vectors\n if len(self.index_allpre) == 0:\n if DEBUG:\n print \"no presynaptic connection\"\n self.pre_actor = None\n else:\n self.vertices_pre = self.vertices[ connectivity[self.index_allpre].ravel() ]\n self.pre_p1 = self.vertices_pre[::2, :] # data is NOT copied here\n self.pre_p2 = self.vertices_pre[1::2, :]\n pren = len(self.index_allpre)\n r1 = np.ones( pren, dtype = np.float32 ) * connector_size\n r2 = np.zeros( pren, dtype = np.float32 )\n if isinstance(connectivity_colormap, dict) and connectivity_colormap.has_key( self.con_pre ):\n preval = np.ones( pren, dtype = np.dtype(type(self.con_pre)) ) * self.con_pre\n else:\n preval = None\n self.pre_actor = VectorScatter( \"PreConnector\", self.pre_p1, self.pre_p2, r1, r2, values = preval,\n resolution = 8, colormap = connectivity_colormap )\n # len(self.index_pre) = len(self.pre_p1) = len(preval)\n\n ##########\n # Outgoing connectors\n ##########\n\n # extract the post connectivity and create cones\n if len(self.index_allpost) == 0:\n if DEBUG:\n print \"no postsynaptic connection\"\n self.post_actor = None\n else:\n self.vertices_post = self.vertices[ connectivity[self.index_allpost].ravel() ]\n self.post_p1 = self.vertices_post[::2, :]\n self.post_p2 = self.vertices_post[1::2, :]\n postn = len(self.index_allpost)\n r1 = np.zeros( postn, dtype = np.float32 )\n r2 = np.ones( postn, dtype = np.float32 ) * connector_size\n if isinstance(connectivity_colormap, dict) and connectivity_colormap.has_key( self.con_post ):\n postval = np.ones( postn, dtype = np.dtype(type(self.con_post)) ) * self.con_post\n else:\n postval = None\n self.post_actor = VectorScatter( \"PostConnector\", self.post_p1, self.post_p2, r1, r2, values = postval,\n resolution = 8, colormap = connectivity_colormap )\n\n ##########\n # Skeletons\n ##########\n self.skeleton = Skeleton( name = \"Polygon Lines\",\n vertices = self.vertices_skeleton,\n connectivity = self.connectivity_skeleton,\n connectivity_colors = self.connectivity_skeleton_colors,\n connectivity_ID = self.connectivity_ids_skeleton,\n linewidth = skeleton_linewidth,\n global_deselect_alpha = global_deselect_alpha,\n global_select_alpha = global_select_alpha )\n\n self.connectivity_skeletononly_ids = None\n self.connectivity_preonly_ids = None\n self.connectivity_postonly_ids = None\n\n self.global_deselect_alpha = global_deselect_alpha\n self.global_select_alpha = global_select_alpha",
"def connect_memory(self):\n for i in range(0,len(self.memory_selectors)):\n bodyA = self.memory_selectors[i]\n bodyB = self.memory_sensors[i+5]\n self.world.CreateDistanceJoint(bodyA=bodyA,\n\t bodyB=bodyB,\n\t anchorA=bodyA.worldCenter,\n\t anchorB=bodyB.worldCenter,\n\t collideConnected=False)\n\n for i in range(0,len(self.rom_selectors)):\n bodyA = self.rom_selectors[i]\n bodyB = self.memory_sensors[i]\n self.world.CreateDistanceJoint(bodyA=bodyA,\n\t bodyB=bodyB,\n\t anchorA=bodyA.worldCenter,\n\t anchorB=bodyB.worldCenter,\n\t collideConnected=False)",
"def route_en(self):\n # adds the en contact to connect the gates to the en rail on metal1\n offset = self.lower_pmos_inst.get_pin(\"G\").ul() + vector(0,0.5*self.poly_space)\n self.add_contact_center(layers=(\"poly\", \"contact\", \"metal1\"),\n offset=offset,\n rotate=90)\n\n # adds the en rail on metal1\n self.add_layout_pin_segment_center(text=\"en\",\n layer=\"metal1\",\n start=offset.scale(0,1),\n end=offset.scale(0,1)+vector(self.width,0))",
"def wg_heater_connector(\n heater_ports: List[Port],\n metal_width: float = 10.0,\n tlm_layers: List[Tuple[int, int]] = [\n LAYER.VIA1,\n LAYER.M1,\n LAYER.VIA2,\n LAYER.M2,\n LAYER.VIA3,\n LAYER.M3,\n ],\n) -> Component:\n\n cmp = Component()\n assert len(heater_ports) == 2\n assert (\n heater_ports[0].orientation == heater_ports[1].orientation\n ), \"both ports should be facing in the same direction\"\n angle = heater_ports[0].orientation\n angle = angle % 360\n assert angle in [0, 180], \"angle should be 0 or 180, got {}\".format(angle)\n\n dx = 0.0\n dy = 0.0\n\n angle_to_dps = {0: [(-dx, -dy), (-dx, dy)], 180: [(dx, -dy), (dx, dy)]}\n ports = heater_ports\n hw = heater_ports[0].width\n\n if angle in [0, 180]:\n ports.sort(key=lambda p: p.y)\n else:\n ports.sort(key=lambda p: p.x)\n\n _heater_to_metal = tlm(width=0.5, height=0.5, layers=tlm_layers, vias=[])\n\n tlm_positions = []\n for port, dp in zip(ports, angle_to_dps[angle]):\n # Extend heater\n p = port.midpoint\n\n # Add via/metal transitions\n tlm_pos = p + dp\n hm = _heater_to_metal.ref(position=tlm_pos)\n tlm_positions += [tlm_pos]\n cmp.add(hm)\n\n ss = 1 if angle == 0 else -1\n\n # Connect both sides with top metal\n edge_metal_piece_width = 7.0\n x = ss * edge_metal_piece_width / 2\n top_metal_layer = tlm_layers[-1]\n cmp.add_polygon(\n line(\n tlm_positions[0] + (x, -hw / 2),\n tlm_positions[1] + (x, hw / 2),\n edge_metal_piece_width,\n ),\n layer=top_metal_layer,\n )\n\n # Add metal port\n cmp.add_port(\n name=\"0\",\n midpoint=0.5 * sum(tlm_positions) + (ss * edge_metal_piece_width / 2, 0),\n orientation=angle,\n width=metal_width,\n layer=top_metal_layer,\n port_type=\"dc\",\n )\n\n return cmp",
"def make_m3_crv(TSUGITE_list, SHIGUCHI_list):\n \"\"\"\n 1 Get information from TSUGITE_list and SHIGUCHI_list.\n \"\"\"\n # TSUGITE\n # Left----------------------------------------------------------------------\n # material2\n m3_left_list = TSUGITE_list[2]\n m3_left_upper = m3_left_list[0]\n m3_left_middle = m3_left_list[1]\n m3_left_lower = m3_left_list[2]\n\n # SHIGUCHI\n m3_KUMIKI_points1 = SHIGUCHI_list[6]\n m3_KUMIKI_points2 = SHIGUCHI_list[7]\n\n # m3_KUMIKI_points1.reverse()\n\n m3_left_upper.extend(m3_KUMIKI_points1)\n m3_left_upper.append(m3_left_upper[0])\n m3_left_upper_crv = rs.AddPolyline(m3_left_upper)\n\n m3_left_middle.extend(m3_KUMIKI_points1)\n m3_left_middle.append(m3_left_middle[0])\n m3_left_middle_crv = rs.AddPolyline(m3_left_middle)\n\n m3_left_lower.extend(m3_KUMIKI_points1)\n m3_left_lower.append(m3_left_lower[0])\n m3_left_lower_crv = rs.AddPolyline(m3_left_lower)\n\n m3_left_crvs = [m3_left_upper_crv, m3_left_middle_crv, m3_left_lower_crv]\n\n # Right---------------------------------------------------------------------\n # material3\n m3_right_list = TSUGITE_list[3]\n m3_right_upper = m3_right_list[0]\n m3_right_middle = m3_right_list[1]\n m3_right_lower = m3_right_list[2]\n\n # SHIGUCHI\n m3_KUMIKI_points1 = SHIGUCHI_list[2]\n m3_KUMIKI_points2 = SHIGUCHI_list[3]\n\n # Extend\n # material3\n m3_right_upper.extend(m3_KUMIKI_points1)\n m3_right_upper.append(m3_right_upper[0])\n m3_right_upper_crv = rs.AddPolyline(m3_right_upper)\n\n m3_right_middle.extend(m3_KUMIKI_points1)\n m3_right_middle.append(m3_right_middle[0])\n m3_right_middle_crv = rs.AddPolyline(m3_right_middle)\n\n m3_right_lower.extend(m3_KUMIKI_points1)\n m3_right_lower.append(m3_right_lower[0])\n m3_right_lower_crv = rs.AddPolyline(m3_right_lower)\n\n m3_right_crvs = [m3_right_upper_crv, m3_right_middle_crv, m3_right_lower_crv]\n\n return m3_left_crvs, m3_right_crvs",
"def get_contact_atoms(self,cutoff=8.5,chain1='A',chain2='B',\n extend_to_residue=False,only_backbone_atoms=False,\n excludeH=False,return_only_backbone_atoms=False,return_contact_pairs=False):\n\n # xyz of the chains\n xyz1 = np.array(self.get('x,y,z',chainID=chain1))\n xyz2 = np.array(self.get('x,y,z',chainID=chain2))\n\n # index of b\n index2 = self.get('rowID',chainID=chain2)\n\n # resName of the chains\n resName1 = np.array(self.get('resName',chainID=chain1))\n #resName2 = np.array(self.get('resName',chainID=chain2))\n\n # atomnames of the chains\n atName1 = np.array(self.get('name',chainID=chain1))\n atName2 = np.array(self.get('name',chainID=chain2))\n\n\n # loop through the first chain\n # TO DO : loop through the smallest chain instead ...\n index_contact_1,index_contact_2 = [],[]\n index_contact_pairs = {}\n\n for i,x0 in enumerate(xyz1):\n\n # compute the contact atoms\n contacts = np.where(np.sqrt(np.sum((xyz2-x0)**2,1)) <= cutoff )[0]\n\n # exclude the H if required\n if excludeH and atName1[i][0] == 'H':\n continue\n\n if len(contacts)>0 and any([not only_backbone_atoms, atName1[i] in self.backbone_type]):\n\n # the contact atoms\n index_contact_1 += [i]\n index_contact_2 += [index2[k] for k in contacts if ( any( [atName2[k] in self.backbone_type, not only_backbone_atoms]) and not (excludeH and atName2[k][0]=='H') ) ]\n\n # the pairs\n pairs = [index2[k] for k in contacts if any( [atName2[k] in self.backbone_type, not only_backbone_atoms] ) and not (excludeH and atName2[k][0]=='H') ]\n if len(pairs) > 0:\n index_contact_pairs[i] = pairs\n\n # get uniques\n index_contact_1 = sorted(set(index_contact_1))\n index_contact_2 = sorted(set(index_contact_2))\n\n # if no atoms were found\n if len(index_contact_1)==0:\n print('Warning : No contact atoms detected in pdb2sql')\n\n # extend the list to entire residue\n if extend_to_residue:\n index_contact_1,index_contact_2 = self._extend_contact_to_residue(index_contact_1,index_contact_2,only_backbone_atoms)\n\n\n # filter only the backbone atoms\n if return_only_backbone_atoms and not only_backbone_atoms:\n\n # get all the names\n # there are better ways to do that !\n atNames = np.array(self.get('name'))\n\n # change the index_contacts\n index_contact_1 = [ ind for ind in index_contact_1 if atNames[ind] in self.backbone_type ]\n index_contact_2 = [ ind for ind in index_contact_2 if atNames[ind] in self.backbone_type ]\n\n # change the contact pairs\n tmp_dict = {}\n for ind1,ind2_list in index_contact_pairs.items():\n\n if atNames[ind1] in self.backbone_type:\n tmp_dict[ind1] = [ind2 for ind2 in ind2_list if atNames[ind2] in self.backbone_type]\n\n index_contact_pairs = tmp_dict\n\n # not sure that's the best way of dealing with that\n if return_contact_pairs:\n return index_contact_pairs\n else:\n return index_contact_1,index_contact_2",
"def simultaneousSwitch(house1, house2):\n # 1. Remove house from list of connected houses in battery object\n house1.connection.connectedHouses.remove(house1)\n house2.connection.connectedHouses.remove(house2)\n\n # 2. \"Reset\" capacity of batteries\n house1.connection.capacity += house1.output\n house2.connection.capacity += house2.output\n # 3. Change connections\n house1.connection, house2.connection = house2.connection, house1.connection\n\n # 4. Append house to battery list of connected houses\n house1.connection.connectedHouses.append(house1)\n house2.connection.connectedHouses.append(house2)\n\n # 5. Recalculate capacity of batteries\n house1.connection.capacity -= house1.output\n house2.connection.capacity -= house2.output\n\n # 6. Recalculate distance\n house1.distance = manhattan(house1, house1.connection)\n house2.distance = manhattan(house1, house2.connection)",
"def __write_lc1_info_config(self, link_node):\n if \"lc1_node_id\" not in link_node:\n return\n if link_node[\"lc1_node_id\"] == \"0\":\n ip_str = u'0.0.168.192'.format(app[\"app_id\"])\n print('ERROR: Found invalid link node ID (lcls1_id of 0)')\n else:\n ip_str = u'{}.0.168.192'.format(link_node[\"lc1_node_id\"])\n\n ip_address = int(ipaddress.ip_address(ip_str))\n\n slot = 2\n if \"analog_slot\" in link_node: \n slot = link_node[\"analog_slot\"]\n path = '{}app_db/{}/{:04}/{:02}/'.format(self.dest_path, link_node[\"cpu_name\"], link_node[\"crate_id\"], slot)\n\n mask = 0\n remap_dig = 0\n if link_node[\"type\"] == \"Digital\" or link_node[\"type\"] == \"Mixed\":\n if \"dig_app_id\" not in link_node:\n remap_dig = 0\n else: \n mask = 1\n remap_dig = link_node[\"dig_app_id\"]\n\n bpm_index = 0\n blm_index = 0\n remap_bpm = [0, 0, 0, 0, 0]\n remap_blm = [0, 0, 0, 0, 0]\n for slot_number, slot_info in link_node[\"slots\"].items():\n if slot_info[\"type\"] == \"BPM Card\":\n if bpm_index < 5:\n remap_bpm[bpm_index] = slot_info[\"app_id\"]\n mask |= 1 << (bpm_index + 1) # Skip first bit, which is for digital app\n bpm_index += 1\n else:\n print('ERROR: Cannot remap BPM app id {}, all remap slots are used already'.\\\n format(slot_info[\"app_id\"]))\n \n elif slot_info[\"type\"] == \"Generic ADC\":\n if blm_index < 5:\n remap_blm[blm_index] = slot_info[\"app_id\"]\n mask |= 1 << (blm_index + 1 + 5) # Skip first bit and 5 BPM bits\n blm_index += 1\n else:\n print('ERROR: Cannot remap BLM app id {}, all remap slots are used already'.\\\n format(slot_info[\"app_id\"]))\n\n macros={\"ID\":str(link_node[\"lc1_node_id\"]),\n \"IP_ADDR\":str(ip_address),\n \"REMAP_DIG\":str(remap_dig),\n \"REMAP_BPM1\":str(remap_bpm[0]),\n \"REMAP_BPM2\":str(remap_bpm[1]),\n \"REMAP_BPM3\":str(remap_bpm[2]),\n \"REMAP_BPM4\":str(remap_bpm[3]),\n \"REMAP_BPM5\":str(remap_bpm[4]),\n \"REMAP_BLM1\":str(remap_blm[0]),\n \"REMAP_BLM2\":str(remap_blm[1]),\n \"REMAP_BLM3\":str(remap_blm[2]),\n \"REMAP_BLM4\":str(remap_blm[3]),\n \"REMAP_BLM5\":str(remap_blm[4]),\n \"REMAP_MASK\":str(mask),\n }\n self.__write_fw_config(path=path, template_name=\"lc1_info.template\", macros=macros)",
"def radical_c2(self, atoms):\n\n c1, c2 = atoms.keys()\n c1_ndx, c2_ndx = atoms.values()\n\n chain1, chain2 = self.determine_chains([c1, c2])\n\n # to get indexing right\n c1_ndx -= self.monomer.indices[chain1]['C1']\n c2_ndx -= self.monomer.indices[chain2]['C2']\n\n # types after reaction\n types = {'chain1': {'C1': 'c3', 'C2': 'c2', 'C3': 'c2', 'C4': 'c2', 'H1': 'hc', 'H2': 'hc', 'H3': 'ha',\n 'H4': 'ha', 'H5': 'ha'}, # chain1 contains c1\n 'chain2': {'C1': 'c3', 'C2': 'c3', 'C3': 'c2', 'C4': 'c2', 'H1': 'hc', 'H2': 'hc', 'H3': 'hc',\n 'H4': 'ha', 'H5': 'ha'}} # chain2 contains c2 radical\n\n # update types\n reacted_types = {'chain1': {c1_ndx + self.monomer.indices[chain1][a]: types['chain1'][a]\n for a in types['chain1'].keys()},\n 'chain2': {c2_ndx + self.monomer.indices[chain2][a]: types['chain2'][a]\n for a in types['chain2'].keys()}}\n\n # new bonds\n bonds = [[c1_ndx + self.monomer.indices[chain1]['C1'], c2_ndx + self.monomer.indices[chain2]['C2'], 'carbon']]\n\n # no dummy bonds to add\n\n # define indices of left-over radicals\n radicals = [c1_ndx + self.monomer.indices[chain1]['C2']]\n\n chain1_impropers = ['C1'] # [1]\n chain2_impropers = ['C2'] # [2]\n rm_improper = []\n for c in chain1_impropers:\n rm_improper.append([c1_ndx + self.monomer.indices[chain1][x] for x in self.monomer.impropers[chain1][c]])\n for c in chain2_impropers:\n rm_improper.append([c2_ndx + self.monomer.indices[chain2][x] for x in self.monomer.impropers[chain2][c]])\n\n # define terminated atoms\n terminated = [c1_ndx + self.monomer.indices[chain1]['C1'], c2_ndx + self.monomer.indices[chain2]['C2']]\n\n return reacted_types, bonds, radicals, rm_improper, terminated",
"def route(self, mn, direction=None, via_tag=None):\n mn = np.asarray(mn)\n _mn = list()\n for i in range(1, mn.shape[0]): \n # when more than two points are given,\n # create a multi-point wire compose of sub-routing wires\n # connecting the points given by mn in sequence.\n _mn.append([mn[i - 1, :], mn[i, :]])\n route = list()\n # via at the starting point\n if via_tag is not None:\n if via_tag[0] is True:\n route.append(self.via(mn=_mn[0][0], params=None))\n # routing wires\n for i, __mn in enumerate(_mn):\n xy0 = self.abs2phy[__mn[0]]\n xy1 = self.abs2phy[__mn[1]]\n _xy = np.array([[xy0[0], xy0[1]], [xy1[0], xy1[1]]])\n if np.all(xy0 == xy1): # if two points are identical, generate a metal stub on the bottom layer.\n if (direction == 'vertical') or ((direction is None) and (self.primary_grid == 'vertical')):\n width = self.vwidth[__mn[0][0]]\n hextension = int(width/2)\n vextension = self.vextension0[__mn[0][0]]\n layer = self.vlayer[__mn[0][0]]\n else:\n width = self.hwidth[__mn[0][1]]\n hextension = self.hextension0[__mn[0][1]]\n vextension = int(width/2)\n layer = self.hlayer[__mn[0][1]]\n else:\n if (xy0[0] == xy1[0]) or (direction == 'vertical'): # vertical routing\n width = self.vwidth[__mn[0][0]]\n hextension = int(width/2)\n vextension = self.vextension[__mn[0][0]]\n layer = self.vlayer[__mn[0][0]]\n color = self.xcolor[__mn[0][0]%self.xcolor.shape[0]]\n\n else: # horizontal routing\n width = self.hwidth[__mn[0][1]]\n hextension = self.hextension[__mn[0][1]]\n vextension = int(width/2)\n layer = self.hlayer[__mn[0][1]]\n color = self.ycolor[__mn[0][1]%self.ycolor.shape[0]] # ycolor is determined by its grid layer.\n p = laygo2.object.physical.Rect(xy=_xy, layer=layer, hextension=hextension, vextension=vextension, color=color)\n route.append(p)\n # via placement\n if via_tag is None:\n if (i > 0) and (i < mn.shape[0] - 1):\n route.append(self.via(mn=__mn[0], params=None))\n else:\n if via_tag[i + 1] == True:\n route.append(self.via(mn=__mn[1], params=None))\n if len(route) == 1: # not isinstance(mn[0][0], list):\n return route[0]\n else:\n return route",
"def alm2cl(self, m1, m2=None, lmin=2, lmax=None, symmetric=True):\n import healpy as hp\n\n if lmax is None:\n lmax = self.lmax\n cls = np.asarray(hp.alm2cl(m1, alms2=m2, lmax=lmax))\n if symmetric:\n cls_T = np.asarray(hp.alm2cl(m2, alms2=m1, lmax=lmax))\n cls = (cls + cls_T) / 2.0\n if lmin:\n cls[..., :lmin] = 0\n return np.atleast_2d(cls)",
"def __init__(self,\n m1_model: KBModelM1,\n functionalities: Dict[int, float],\n inverse_functionalities: Dict[int, float],\n relation_id_to_density: Dict[int, float],\n relation_id_to_distinct_subjects: Dict[int, int],\n relation_id_to_distinct_objects: Dict[int, int],\n relation_id_to_reflexiveness: Dict[int, bool]):\n assert isinstance(m1_model, KBModelM1), f\"Model is of type {type(m1_model)} but needs to be of type KBModelM1\"\n # initialize the M1 model data with the values of the passed M1 model\n super(KBModelM2, self).__init__(\n entity_type_hierarchy=m1_model.entity_type_hierarchy,\n object_property_hierarchy=m1_model.object_property_hierarchy,\n domains=m1_model.domains,\n ranges=m1_model.ranges,\n entity_count=m1_model.entity_count,\n relation_count=m1_model.relation_count,\n edge_count=m1_model.edge_count,\n entity_type_count=m1_model.entity_type_count,\n entity_type_distribution=m1_model.entity_type_distribution,\n relation_distribution=m1_model.relation_distribution,\n relation_domain_distribution=m1_model.relation_domain_distribution,\n relation_range_distribution=m1_model.relation_range_distribution,\n relation_to_id=m1_model.relation_to_id,\n entity_type_to_id=m1_model.entity_type_to_id\n )\n\n self.functionalities = functionalities\n self.inverse_functionalities = inverse_functionalities\n self.relation_id_to_density = relation_id_to_density\n self.relation_id_to_distinct_subjects = relation_id_to_distinct_subjects\n self.relation_id_to_distinct_objects = relation_id_to_distinct_objects\n self.relation_id_to_reflexiveness = relation_id_to_reflexiveness\n\n self.name = \"M2 (OWL)\"\n\n # initialized in other methods\n #\n # the number of facts that violated the functionality of 1 of a relation type\n self.num_facts_violating_functionality = 0\n # the number of facts that violated the inverse functionality of 1 of a relation type\n self.num_facts_violating_inverse_functionality = 0\n # the number of facts that violated the non reflexiveness by being reflexive\n self.num_facts_violating_non_reflexiveness = 0",
"def connect_layers(self, material_dict=None):\n if material_dict is None:\n material_dict = {k: DEFAULT_BEAM for k in range(self.layers)}\n\n for layer in range(self.layers):\n material = material_dict[layer]\n\n for h in range(self.height):\n for c in range(self.ring_n):\n if layer == 0:\n n0 = f'N.{h}.c'\n else:\n n0 = f'R.{layer}.{h}.{c}'\n n1 = f'R.{layer+1}.{h}.{c}'\n name = f'M.{layer}.{h}.{c}'\n\n self.fem.AddMember(name, n0, n1,\n material[0],\n material[1],\n material[2],\n material[3],\n material[4],\n material[5]\n )"
]
| [
"0.693796",
"0.67024845",
"0.61300176",
"0.5741309",
"0.5715331",
"0.5709275",
"0.56884384",
"0.55448365",
"0.5517545",
"0.54770654",
"0.54025584",
"0.5344132",
"0.5279721",
"0.52553105",
"0.5248629",
"0.5226081",
"0.5191582",
"0.51813334",
"0.5166011",
"0.5162329",
"0.516208",
"0.5114183",
"0.5090897",
"0.5077762",
"0.5052227",
"0.5042769",
"0.503597",
"0.50255686",
"0.49943456",
"0.49939495"
]
| 0.73841476 | 0 |
Connect pmos pin to bitline pin | def connect_pmos(self, pmos_pin, bit_pin):
ll_pos = vector(min(pmos_pin.lx(),bit_pin.lx()), pmos_pin.by())
ur_pos = vector(max(pmos_pin.rx(),bit_pin.rx()), pmos_pin.uy())
width = ur_pos.x-ll_pos.x
height = ur_pos.y-ll_pos.y
self.add_rect(layer="metal2",
offset=ll_pos,
width=width,
height=height) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_pins(self):\n\n for bit in range(self.addr_size):\n self.add_pin(\"addr_{0}\".format(bit),\"INPUT\")\n \n self.add_pin(\"wl_en\", \"INPUT\")\n\n for bit in range(self.num_rows):\n self.add_pin(\"wl_{0}\".format(bit),\"OUTPUT\")\n \n self.add_pin(\"vdd\",\"POWER\")\n self.add_pin(\"gnd\",\"GROUND\")",
"def route_bitlines(self):\n # adds the BL on metal 2\n offset = vector(self.bitcell.get_pin(self.bitcell_bl).cx(),0) - vector(0.5 * self.m2_width,0)\n self.add_layout_pin(text=\"bl\",\n layer=\"metal2\",\n offset=offset,\n width=drc['minwidth_metal2'],\n height=self.height)\n\n # adds the BR on metal 2\n offset = vector(self.bitcell.get_pin(self.bitcell_br).cx(),0) - vector(0.5 * self.m2_width,0)\n self.add_layout_pin(text=\"br\",\n layer=\"metal2\",\n offset=offset,\n width=drc['minwidth_metal2'],\n height=self.height)",
"def add_bitline_contacts(self):\n\n stack=(\"metal1\", \"via1\", \"metal2\")\n pos = self.lower_pmos_inst.get_pin(\"S\").center()\n self.add_contact_center(layers=stack,\n offset=pos)\n pos = self.lower_pmos_inst.get_pin(\"D\").center()\n self.add_contact_center(layers=stack,\n offset=pos)\n pos = self.upper_pmos1_inst.get_pin(\"S\").center()\n self.add_contact_center(layers=stack,\n offset=pos)\n pos = self.upper_pmos2_inst.get_pin(\"D\").center()\n self.add_contact_center(layers=stack,\n offset=pos)",
"def connect(self):\n if self.pin_1.type == self.pin_2.type:\n self.pin_1.connected = True\n self.pin_2.connected = True\n else:\n raise InvalidPowerCombination(\"Not the same types\")",
"def __init__(self, pin_obj: Pin, invert: bool = False):",
"def __init__(self, pin: int | Pin, /):",
"def __init__(self, pin):\n ...",
"def pin(self) -> int:",
"def setup_pin(self, pin, dutycycle, frequency=2000):\n raise NotImplementedError",
"def __init__(self, pin, pinishigh=True):\n self.pin = pin\n self.pinishigh = pinishigh",
"def draw_pin(self, pin, xform):\n # TODO special pin characteristics (inverted, clock)?\n line = [xform.chain(p) for p in (pin.p1, pin.p2)]\n self.canvas.line([(p.x, p.y) for p in line],\n fill=self.options.style['part'])",
"def add_pin(x, y):\n\n pass",
"def draw_pins():\n\n pass",
"def __init__(self, input_pin, output_pin):\n self.input_pin = input_pin\n self.output_pin = output_pin\n self.power_value = 0\n self.shortage_value = 0\n self.json_message = self.get_json()",
"def cmd(self,bits,char_mode=False):\n\n if( rpi_device ):\n sleep(0.0012)\n bits = bin(bits)[2:].zfill(8)\n GPIO.output(self.pin_rs,char_mode)\n\n for pin in self.pins_db:\n GPIO.output(pin,False)\n\n for i in range(4):\n if bits[i] == \"1\":\n GPIO.output(self.pins_db[::-1][i],True)\n \n GPIO.output(self.pin_e,True)\n GPIO.output(self.pin_e,False)\n\n for pin in self.pins_db:\n GPIO.output(pin,False)\n \n for i in range(4,8):\n if bits[i] == \"1\":\n GPIO.output(self.pins_db[::-1][i-4],True)\n\n GPIO.output(self.pin_e,True)\n GPIO.output(self.pin_e,False)",
"def do_show_pinout(self, arg):\n try:\n if arg:\n showboard = int(arg)\n else:\n showboard = self.phil.read_reg('sys.status.board')['data']\n if showboard == 1:\n print(\"\"\"\nPHILIP-B -> BLUEPILL\n\n ____\n ___|__|___\n DUT_RST = B12 - | | - GND\n DUT_CTS = B13 - | | - GND\n DUT_RTS = B14 - | | - 3V3\nUSER_BTN = B15 - | | - NRST\n DUT_IC = A8 - | | - B11 = DUT_RX\n IF_TX = A9 - | | - B10 = DUT_TX\n IF_RX = A10 - | | - B1 = PM_V_ADC\n USB_DM = A11 - | | - B0 = PM_HI_ADC\n USB_DP = A12 - | | - A7 = PM_LO_ADC\n DUT_NSS = A15 - | | - A6 = DUT_ADC\n DUT_SCK = B3 - | | - A5 = TEST_FAIL\n DUT_MISO = B4 - | | - A4 = TEST_WARN\n DUT_MOSI = B5 - | | - A3 = TEST_PASS\n DUT_SCL = B6 - | | - A2 = DEBUG2\n DUT_SDA = B7 - | | - A1 = DEBUG1\n DUT_PWM = B8 - | | - A0 = DEBUG0\n DUT_DAC = B9 - | | - C15\n 5V - | | - C14\n GND - | | - C13 = LED0\n 3V3 - | | - VBAT\n __________\n ||||\n\"\"\")\n else:\n print(\"\"\"\nPHILIP-N -> NUCLEO-F103RB\nCN6\n\n DUT_SCL = PB8 = SCL/D15 -\n DUT_SDA = PB9 = SDA/D14 -\n AVDD -\n GND -\n- LED0 = PA5 = SCK/D13 -\n- IOREF MISO/D12 -\n- NRST PWM/MOSI/D11 -\n- 3V3 PWM/CS/D10 -\n- 5V PWM/D9 -\n- GND DUT_TX = PA9 = D8 -\n- GND |CN5|\n- VIN DUT_IC = PA8 = D7 -\n|CN6| PWM/D6 -\n- A0 = PA0 = TEST_WARN DUT_MISO = PB4 = PWM/D5 -\n- A1 = PA1 = TEST_FAIL DUT_MOSI = PB5 = D4 -\n- A2 = PA4 = TEST_PASS DUT_SCK = PB3 = PWM/D3 -\n- A3 = PB0 = DUT_ADC DUT_RX = PA10 = D2 -\n- A4 = PC1 = PM_HI_ADC IF_TX = PA2 = TX/D1 -\n- A5 = PC0 = PM_V_ADC IF_RX = PA3 = RX/D0 -\n|CN8| |CN9|\n\n -1 - DUT_DAC -1 - DUT_PWM\n -2 - DUT_SCL -2 -\n -3 - DUT_SDA -3 -\n -4 - -4 -\n -5 - -5 -\n -6 - LED0 -6 - DUT_RTS\n -7 - -7 - DUT_CTS\n -8 - -8 -\n DUT_NSS -9 - -9 -\n -10- -10-\n -11- DUT_TX -11- DUT_RST\n USER_BTN -12- DUT_IC -12-\n -13- -13- DEBUG2\n -14- TEST_WARN DUT_MISO -14- DEBUG1\n -15- TEST_FAIL DUT_MOSI -15- DEBUG0\n -16- TEST_PASS DUT_SCK -16-\n -17- DUT_ADC DUT_RX -17-\nPM_LO_ADC -18- PM_HI_ADC IF_TX -18-\n -19- PM_V_ADC IF_RX -19-\n |CN7| |CN10|\n\"\"\")\n except (ValueError) as exc:\n print(exc)",
"def pin(self, name, mn, direction=None, netname=None, params=None):\n #pin0 = Pin(xy=[[0, 0], [100, 100]], layer=['M1', 'drawing'], netname='net0', master=rect0,\n # params={'direction': 'input'})\n xy0 = self.abs2phy[mn[0]]\n xy1 = self.abs2phy[mn[1]]\n #_xy = np.array([[xy0[0], xy0[1]], [xy1[0], xy1[1]]])\n if np.all(xy0 == xy1): # if two points are identical, generate a metal stub on the bottom layer.\n if (direction == 'vertical') or ((direction is None) and (self.primary_grid == 'vertical')):\n width = self.vwidth[mn[0][0]]\n hextension = int(width / 2)\n vextension = 0\n layer = self.pin_vlayer[mn[0][0]]\n else:\n width = self.hwidth[mn[0][1]]\n hextension = 0\n vextension = int(width / 2)\n layer = self.pin_hlayer[mn[0][1]]\n else:\n if (xy0[0] == xy1[0]) or (direction == 'vertical'): # vertical routing\n width = self.vwidth[mn[0][0]]\n hextension = int(width / 2)\n vextension = 0\n layer = self.pin_vlayer[mn[0][0]]\n else: # horizontal routing\n width = self.hwidth[mn[0][1]]\n hextension = 0\n vextension = int(width / 2)\n layer = self.pin_hlayer[mn[0][1]]\n # TODO: pin.xy differ from tech.py.\n _xy = np.array([[xy0[0]-hextension, xy0[1]-vextension], [xy1[0]+hextension, xy1[1]+vextension]]) ## need to check\n p = laygo2.object.physical.Pin(name=name, xy=_xy, layer=layer, netname=netname, params=params)\n return p",
"def pinModule(self, state):\n\n pass",
"def set_com_pins_config(set_sequential, enable_remap):\n if set_sequential:\n a4bit = 0x00\n else:\n a4bit = 0x10\n\n if enable_remap:\n a5bit = 0x20\n else:\n a5bit = 0x00\n\n send_command(0xDA)\n send_command(0x02 | a4bit | a5bit)",
"def set_pin(self):\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BOARD)\n GPIO.setup(self.pin, GPIO.OUT)\n GPIO.output(self.pin, GPIO.LOW)\n time.sleep(3)\n GPIO.output(self.pin, GPIO.HIGH)",
"def _gpio_pin(self, pin_obj, gpio_type):\n self.hw_interfaces[\"gpio\"][pin_obj.name] = \\\n GPIO(name=pin_obj.name, pin=pin_obj, type=self.GPIO_MAPPER[gpio_type])",
"def __init__(self, pin: Union[int, Pin], /):",
"def setup(motion):\n GPIO.setmode(GPIO.BCM)\n # Sets pin numbering system\n GPIO.setup(motion, GPIO.IN)\n # Configures given pin for input usage.",
"def cmd(self, bits, char_mode=False):\n sleep(0.001)\n bits=bin(bits)[2:].zfill(8)\n\n GPIO.output(self.pin_rs, char_mode)\n\n for pin in self.pins_db:\n GPIO.output(pin, False)\n\n for i in range(4):\n if bits[i] == \"1\":\n GPIO.output(self.pins_db[::-1][i], True)\n\n GPIO.output(self.pin_e, True)\n GPIO.output(self.pin_e, False)\n\n for pin in self.pins_db:\n GPIO.output(pin, False)\n\n for i in range(4,8):\n if bits[i] == \"1\":\n GPIO.output(self.pins_db[::-1][i-4], True)\n\n GPIO.output(self.pin_e, True)\n GPIO.output(self.pin_e, False)",
"def __init__(self, pin: str) -> None:\n ...",
"def set_bit(self, port, bit):\n hw = self.device.peripherals[port]\n hw.BSRR.wr(1 << (bit & 15))",
"def __init__(self, pads):\n mfioCommon.__init__(self, pads)\n\n mfio_width = len(pads)\n #\n mfio_o = Signal(mfio_width)\n mfio_oe = Signal(mfio_width)\n mfio_i = Signal(mfio_width)\n # create single pin tristate buffers\n for b in range(mfio_width):\n self.submodules += mfioSinglePin(pads, b, mfio_i[b], mfio_o[b], mfio_oe[b]) \t\n\n # Wishbone \n self.bus = bus = wishbone.Interface()\n\n #\n sel = Signal(mfio_width)\n inbit = Signal(1)\n\n # todo: dynamic address width calc to optimize the decode logic\n addr_width = 12\n # 1024 IO max\n seladr = Signal(10)\n\n # 10 bits of address = 1024 pins max\n self.comb += seladr.eq(self.bus.adr[:10]) \n \n # address decoder\n for b in range(mfio_width):\n self.comb += sel[b].eq(seladr == b)\n\n self.comb += inbit.eq( (mfio_i & sel) != 0 )\n\n # Read bit\n rdbus = Signal(32)\n self.comb += [\n rdbus[0].eq(inbit),\n bus.dat_r.eq(rdbus)\n ]\t\n\n # process output \n outbit = Signal(1)\n oebit = Signal(1)\n wren = Signal(1)\n\n # PINAPI 1.0 compatible: 0 = drive 0, 1 drive 1, 3 = HiZ\n self.comb += outbit.eq( bus.dat_w[0] )\n self.comb += oebit.eq ( ~bus.dat_w[1] )\n\n # write enable\n self.comb += wren.eq(self.bus.stb & self.bus.cyc & self.bus.we) \n\n for b in range(mfio_width):\n self.sync += If(wren & sel[b], mfio_o[b].eq(outbit), mfio_oe[b].eq(oebit) )\n\n seq = [\n (1, [bus.ack.eq(1)]), #\n (1, [bus.ack.eq(0)]), #\n (0, []),\n ]\n \n t, tseq = 0, []\n for dt, a in seq:\n tseq.append((t, a))\n t += dt\n\n self.sync += timeline(bus.cyc & bus.stb, tseq)",
"def pin_on(self, pin):\n port_num = self._convert_pin_port(pin)\n if port_num:\n gpio.setcfg(port_num, gpio.OUTPUT)\n gpio.output(port_num, gpio.HIGH)",
"def add_layout_pins(self):\n en_offset = self.dc_inst.get_pin(\"in\").ll()\n self.add_layout_pin(text=\"en\",\n layer=\"metal1\",\n offset=en_offset.scale(1,0),\n width=self.m1_width,\n height=en_offset.y)\n\n out_offset = self.rbl_inv_inst.get_pin(\"Z\").ll()\n self.add_layout_pin(text=\"out\",\n layer=\"metal1\",\n offset=out_offset.scale(1,0),\n width=self.m1_width,\n height=out_offset.y)",
"def __init__(self):\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(pin,GPIO.OUT)"
]
| [
"0.6256124",
"0.6165595",
"0.6141988",
"0.61245584",
"0.612056",
"0.61114496",
"0.60696626",
"0.6058473",
"0.60407984",
"0.60359067",
"0.5940562",
"0.5879642",
"0.5822711",
"0.58177835",
"0.5814207",
"0.57660544",
"0.5757099",
"0.5755097",
"0.5746251",
"0.5702823",
"0.56797504",
"0.5641358",
"0.5636109",
"0.5630704",
"0.56258655",
"0.5597348",
"0.55955166",
"0.5554799",
"0.5546619",
"0.5538786"
]
| 0.74909616 | 0 |
Change first convolution layer input channels. | def patch_first_conv(model, in_channels):
# get first conv
for module in model.modules():
if isinstance(module, nn.Conv2d):
break
# change input channels for first conv
module.in_channels = in_channels
weight = module.weight.detach()
reset = False
if in_channels == 1:
weight = weight.sum(1, keepdim=True)
elif in_channels == 2:
weight = weight[:, :2] * (3.0 / 2.0)
else:
reset = True
weight = torch.Tensor(
module.out_channels,
module.in_channels // module.groups,
*module.kernel_size
)
module.weight = nn.parameter.Parameter(weight)
if reset:
module.reset_parameters() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def patch_first_conv(model, in_channels: int = 4) -> None:\n\n # get first conv\n for module in model.modules():\n if isinstance(module, torch.nn.Conv2d):\n break\n\n # change input channels for first conv\n module.in_channels = in_channels\n weight = module.weight.detach()\n # reset = False\n\n if in_channels == 1:\n weight = weight.sum(1, keepdim=True)\n elif in_channels == 2:\n weight = weight[:, :2] * (3.0 / 2.0)\n elif in_channels == 4:\n weight = torch.nn.Parameter(torch.cat([weight, weight[:, -1:, :, :]], dim=1))\n elif in_channels % 3 == 0:\n weight = torch.nn.Parameter(torch.cat([weight] * (in_channels // 3), dim=1))\n\n module.weight = weight",
"def conv1x1(in_channels, out_channels, groups=1):\n return nn.Conv2d(\n in_channels, \n out_channels, \n kernel_size=1, \n groups=groups,\n stride=1)",
"def __init__(self, in_channels, out_channels):\n super(CNN, self).__init__()\n self.conv1 = nn.Conv1d(in_channels=in_channels, out_channels=out_channels, kernel_size=5, padding=1)",
"def change_input_channels(self, input_channels: int, mode=\"auto\"):\n raise NotImplementedError",
"def _first_conv(x: tf.Tensor) -> tf.Tensor:\n with slim.arg_scope([slim.conv2d], activation_fn=None, normalizer_fn=None):\n x = ResNet._conv2d_same(x, 64, 7, stride=2, scope='conv1')\n return slim.max_pool2d(x, [3, 3], stride=2, scope='pool1')",
"def apply(self, input):\n\n # input.unsqueeze(1) changes dim from (minibatch_size, sequence_length) to\n # (minibatch_size, num_channels=1, sequence_length)\n # the final squeeze(1) removes the num_channels=1 axis\n return torch.nn.functional.conv1d(input.unsqueeze(1), self.filt.type_as(input),\n padding=self.padding).squeeze(1)",
"def add_conv_type1(model, depth, input_shape=None):\n if input_shape is not None:\n model.add(Convolution2D(depth, 5, 5, subsample=(2, 2), \\\n input_shape=input_shape))\n else:\n model.add(Convolution2D(depth, 5, 5, subsample=(2, 2), \\\n activation='relu', W_regularizer=l2(0.05)))",
"def _init_layers(self) -> None:\n super()._init_layers()\n self.controller = nn.Conv2d(\n self.feat_channels, self.num_params, 3, padding=1)",
"def conv2d_one_by_one(self, output_channel):\n return self.add_layer(conv2d_one_by_one, output_channel)",
"def clConvolution(self, size, mask):",
"def conv_layer(n_in_filters, n_filters, ker_size, stride=1, \n depthwise=False, zero_bn=False, act=True) :\n bn = nn.BatchNorm2d(n_filters)\n nn.init.constant_(bn.weight, 0. if zero_bn else 1.)\n conv = nn.Conv2d(n_in_filters, n_filters, ker_size, stride=stride,padding=ker_size//2, \n bias=False,groups = n_in_filters if depthwise else 1)\n layer = [conv, bn]\n if act: layer += [Swish()]\n return nn.Sequential(*layer)",
"def Linear1d(\n in_channels: int,\n out_channels: int,\n stride: int = 1,\n bias: bool = True,\n) -> torch.nn.Module:\n return nn.Conv1d(in_channels, out_channels, kernel_size=1, stride=stride, bias=bias)",
"def _convk(\n in_channels, out_channels, kernel_size=3, stride=1, groups=1, dilation=1, bias=False\n):\n padding = dilation * (kernel_size - 1) // 2\n return Conv1d(\n in_channels,\n out_channels,\n kernel_size=kernel_size,\n stride=stride,\n padding=padding,\n groups=groups,\n bias=bias,\n dilation=dilation,\n )",
"def to_channel_first(image, target):\n # put channel first\n image = image.transpose((2, 0, 1))\n return image, target",
"def sn_conv1x1(x, output_dim, training=True, name='sn_conv1x1'):\n with tf.variable_scope(name, custom_getter=sn_gettr(training=training)):\n w = tf.get_variable(\n 'weights', [1, 1, x.get_shape()[-1], output_dim],\n initializer=tf.keras.initializers.VarianceScaling(\n scale=1.0, mode='fan_avg', distribution='uniform'))\n conv = tf.nn.conv2d(\n input=x, filters=w, strides=[1, 1, 1, 1], padding='SAME')\n return conv",
"def pre_filter_channels(self, channels=None): # pragma: no cover\n pass",
"def convs(self, x):\n\n for layer, drop in zip(self.convolutionals, self.cnn_drop):\n x = F.max_pool2d(F.relu(drop(layer(x))), (1, 2))\n\n if self._to_linear is None:\n print(x.shape)\n self._to_linear = x[0].shape[0]*x[0].shape[1]*x[0].shape[2]\n\n return x",
"def changeConvolutionalDepth(self,depth):\n self.conv_depth = depth",
"def test_conv1d():\n filters = 3\n kernel_size = 2\n strides = 1\n batch_size = 2\n in_channels = 3\n input_size = 5\n input_shape = (batch_size, input_size, in_channels)\n\n keras_layer = keras.layers.Conv1D(filters=filters, kernel_size=kernel_size, strides=strides, use_bias=True, bias_initializer=\"ones\")\n input_layer = keras.Input(batch_shape=input_shape)\n keras_model = keras.models.Model(input=input_layer, outputs=keras_layer(input_layer))\n\n new_weights = np.arange(18).reshape(2, 3, 3)\n keras_layer.set_weights([new_weights, keras_layer.get_weights()[1]])\n\n kinput = np.arange(batch_size * input_size * in_channels).reshape(input_shape)\n kout = keras_model.predict(kinput)\n\n torch_model, _ = translate.translate_layer(keras_layer)\n tinput = torch.Tensor(kinput).permute(0, 2, 1)\n tout = torch_model(tinput).permute(0, 2, 1)\n assert np.isclose(kout, tout.cpu().data.numpy()).all()",
"def apply_conv2d_1x1(input_layer, num_classes, kernel_size=1):\n # tf.layers.conv2d(inputs, filters, kernel_size, strides=(1, 1), padding='valid', ...,\n # kernel_initializer=None, ... , kernel_regularizer=None)\n return tf.layers.conv2d(input_layer, num_classes, kernel_size, padding='same',\n kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),\n kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))",
"def conv1x1(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)",
"def forward(self, x):\n conv_output = self.conv1(x)\n\n # The window size of max pooling layer of CNN depends on the dimension of conv1d output.\n # Since padding size is 1 and kernal size is 5, so the output of conv1d is with dimension\n # length_of_input_sequence - 2 + 5 - 1 = length_of_input_sequence - 2\n x_conv = F.max_pool1d(F.relu(conv_output), x.size()[-1] - 2)\n return x_conv",
"def PrimaryCap(inputs, dim_vector, n_channels, kernel_size, strides, padding):\n output = layers.Conv1D(filters=dim_vector * n_channels, kernel_size=kernel_size, strides=strides, padding=padding)(\n inputs)\n \n #output = Activation(activation=\"relu\")(output)\n outputs = layers.Reshape(target_shape=[-1, dim_vector])(output)\n return layers.Lambda(squash)(outputs)",
"def __init__(self, channels, momentum):\n super(PointNetConv2Layer, self).__init__()\n self.channels = channels\n self.momentum = momentum",
"def _make_conv_level(in_channels, out_channels, num_convs, norm_func,\n stride=1, dilation=1):\n layers = []\n for i in range(num_convs):\n layers.extend([\n nn.Conv2D(in_channels, out_channels, kernel_size=3,\n stride=stride if i == 0 else 1,\n padding=dilation, bias_attr=False, dilation=dilation),\n norm_func(out_channels),\n nn.ReLU()])\n\n in_channels = out_channels\n\n return nn.Sequential(*layers)",
"def conv1x1(in_planes, out_planes, stride=1):\r\n return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,\r\n padding=0, bias=False)",
"def conv2D(null,channels,X,stride,kernel_shape,padding = False,initialize_weights = True,*args):\n # filters = dimensionality of output space\n # If padding is enabled, we pad the input with zeros such that the input size\n # remains the same if weights with stride 1 are applied to the input\n if initialize_weights:\n kernel = np.random.normal(size = (kernel_shape[0],kernel_shape[1],kernel_shape[2]))*math.sqrt(1/(kernel_shape[0]*kernel_shape[1]*kernel_shape[2])) # Our input\n kernel = torch.FloatTensor(kernel)\n kernel.requires_grad = False\n else:\n kernel = args[0] # weights and bias must be given if initialise weights is disabled\n bias = args[1]\n kernel_shape = kernel.shape\n \n X = X.detach().numpy()\n if padding: # Can only pad during initialization -> weights and input shapes cannot change during feedforward and backpropagation\n if kernel_shape[1] % 2 == 0 and kernel_shape[2] % 2 == 0:\n X = np.pad(X,((0,0),(math.floor(kernel_shape[1]/2)-1,math.floor(kernel_shape[1]/2)),(math.floor(kernel_shape[2]/2),math.floor(kernel_shape[2]/2)-1)), 'symmetric')\n elif kernel_shape[1] % 2 != 0 and kernel_shape[2] % 2 == 0:\n X = np.pad(X,((0,0),(math.floor(kernel_shape[1]/2),math.floor(kernel_shape[1]/2)),(math.floor(kernel_shape[2]/2),math.floor(kernel_shape[2]/2)-1)), 'symmetric')\n elif kernel_shape[1] % 2 == 0 and kernel_shape[2] % 2 != 0:\n X = np.pad(X,((0,0),(math.floor(kernel_shape[1]/2)-1,math.floor(kernel_shape[1]/2)),(math.floor(kernel_shape[2]/2),math.floor(kernel_shape[2]/2))), 'symmetric')\n else:\n X = np.pad(X,((0,0),(math.floor(kernel_shape[1]/2),math.floor(kernel_shape[1]/2)),(math.floor(kernel_shape[2]/2),math.floor(kernel_shape[2]/2))), 'symmetric')\n \n X = torch.FloatTensor(X)\n \n img_shape = X.shape\n \n output_size1 = math.floor((img_shape[1] - kernel_shape[1])/(stride)) + 1\n output_size2 = math.floor((img_shape[2] - kernel_shape[2])/(stride)) + 1\n output_shape = [channels,output_size1,output_size2]\n \n X_im2col,im = im2col(X,kernel,stride)\n \n \n if initialize_weights:\n weight = torch.reshape(kernel,(kernel_shape[0]*kernel_shape[1]*kernel_shape[2],1))\n # weight consists of only one weight vector. But the dimensionality of output space has to be\n # num_filters. So we need to stack weight vectors horizontally and create num_filters number of\n # feature maps\n for i in range(channels-1):\n weight2 = np.random.normal(size = (kernel_shape[0]*kernel_shape[1]*kernel_shape[2],1))*math.sqrt(1/(kernel_shape[0]*kernel_shape[1]*kernel_shape[2])) # Our input\n weight2 = torch.FloatTensor(weight2)\n weight2.requires_grad = False\n weight = torch.cat((weight2, weight),1) # do this num_filters - 1 number of times\n conv_output = torch.t(X_im2col).mm(weight)\n bias = torch.Tensor(np.random.normal(size = conv_output.shape))\n conv_output += bias\n conv_output = torch.reshape(conv_output,(output_shape))\n return torch.nn.Parameter(conv_output), torch.nn.Parameter(weight),X_im2col,im, output_shape,bias\n else:\n # Since weights are already initialised, the relevant channels are already dictated in the architecture.\n # Therefore, conv output is just a matmul\n conv_output = torch.t(X_im2col).mm(kernel) + bias\n return torch.nn.Parameter(conv_output),X_im2col",
"def add_conv_type2(model, depth):\n model.add(Convolution2D(depth, 3, 3, subsample=(1, 1)))",
"def _conv_block( inputs, filters, kernel, strides, nl):\n channel_axis = 1 if K.image_data_format() == 'channels_first' else -1\n x = Conv2D(filters, kernel, padding='same', strides=strides)(inputs)\n x = BatchNormalization(axis=channel_axis)(x)\n return _return_activation(x, nl)",
"def conv_init(conv, act='linear'):\r\n n = conv.kernel_size[0] * conv.kernel_size[1] * conv.out_channels\r\n conv.weight.data.normal_(0, math.sqrt(2. / n))"
]
| [
"0.75123245",
"0.6696509",
"0.66346097",
"0.64516467",
"0.64232844",
"0.63495713",
"0.6230231",
"0.62070876",
"0.6168099",
"0.61499965",
"0.60493684",
"0.6044107",
"0.60128045",
"0.59726113",
"0.59684473",
"0.5937715",
"0.5925386",
"0.5920553",
"0.59197867",
"0.5918833",
"0.587419",
"0.58694875",
"0.5820181",
"0.5806012",
"0.5804756",
"0.57972944",
"0.5795942",
"0.5782056",
"0.57772684",
"0.5775164"
]
| 0.7558337 | 0 |
Returns True if user's answer matches with answer from database. | def is_correct_answer(answer):
db_answer = Answer.objects.get(id=int(list(answer.keys())[0]))
return db_answer.is_correct == bool(list(answer.values())[0]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check(self, answer):\n return self.answer == answer",
"def check_if_correct(self, q, ans):\n answer = OnlyAnswer.objects.get(question=q)\n cleaned_guess = (ans.strip()).lower()\n if answer.content == cleaned_guess:\n return True\n else:\n return False",
"def check_if_correct(self, guess):\n answer = MCQAnswer.objects.get(id=guess)\n\n if answer.correct is True:\n return True\n else:\n return False",
"def is_user_answer_correct(user_answer, comp):\n user_correct = False\n if user_answer == comp.correct_answer:\n user_correct = True\n return user_correct",
"def answer_check(self, game, position, answer):\n q = session.query(Question).filter(\n Question.game == game, Question.position == position).first()\n if q == None:\n return None\n ans = session.query(Answer).filter(\n Answer.question == q, Answer.text.ilike(answer), Answer.accepted == True).first()\n return ans",
"def check_answer(answer, solution):\n if answer == solution:\n return True\n else:\n return False",
"def is_response_correct(self, response):\n for answer in self.my_osid_object.get_answers():\n if self._is_match(response, answer):\n return True\n return False",
"def matches(self, answer):\n return self.group_id == answer.group_id and \\\n self.block_id == answer.block_id and \\\n self.answer_id == answer.answer_id and \\\n self.group_instance == answer.group_instance and \\\n self.answer_instance == answer.answer_instance",
"def check_answer(self, choice):\n return choice == self.answer",
"def check_answer(answer, answer_dict, question):\n print(f\"\\nThe answer is: {question['correct']}\")\n\n if answer_dict[answer] == question['correct']:\n print(\"You got it correct! :)\")\n return True\n else:\n print(\"You got it incorrect! :(\")\n return False",
"def answer(self) -> bool:",
"def exists(self, answer):\n return self.find(answer) is not None",
"def is_correct_answer(game_module, question, answer):\n correct_answer = game_module.get_correct_answer(question)\n return correct_answer == answer",
"def check_security_answers(db, username, answers, minimum_correct=0):\n user = get_username_profile(db, username)\n if user is None:\n return False\n\n security = db['security'].find_one({'_id': str(user['_id'])})\n if security is None:\n return False\n correct_answers = security['security_answers']\n\n # Count the number of correct answers\n correct_count = 0\n for answer, correct in zip(answers, correct_answers):\n if answer == correct:\n correct_count += 1\n elif check_password_hash(correct, answer):\n correct_count += 1\n\n # Check if satisfy minimum correct count\n if minimum_correct > 0:\n return correct_count >= minimum_correct\n else: # Otherwise, pass if all correct\n return correct_count == len(correct_answers)",
"def compare_answer(self, ans1, ans2):\r\n internal_result = self.check_formula(ans1, ans2, self.samples)\r\n return internal_result == \"correct\"",
"def check_answer(self, user_answer, correct_answer):\n if user_answer.lower() == correct_answer.lower():\n self.score += 1\n print(\"You got it right! \", end=\"\")\n else:\n print(\"That's wrong. \", end=\"\")\n print(f\"The correct answer was {correct_answer}\")\n print(f\"Your current score is: {self.score}/{self.question_number}.\\n\")",
"def is_correct_answer(user_answer, correct_answer):\n\n string_correct_answer = str(correct_answer).lower()\n string_user_answer = str(user_answer).lower()\n return string_user_answer == string_correct_answer\n\n\n # If you accidentally used the same variable name\n # twice in the comparison, for example\n\n # return string_user_answer == string_user_answer\n\n # the tests will catch the error, you'll see test fails\n # breaking code that works is called a regression",
"def __check_answer_id(self, answer_id: str) -> bool:\n return answer_id == self.session_facade.correct_id",
"def default_checker(self, raw_answers):\r\n formatted_answers = [x.strip().lower() for x in raw_answers]\r\n result = [user == correct for user, correct\r\n in zip(formatted_answers, self.correct_answers)]\r\n len_diff = len(self.correct_answers) - len(formatted_answers)\r\n if len_diff > 0:\r\n for _ in range(len_diff):\r\n result.append(False)\r\n return result",
"def correct_or_not(user_ans,ans_list):\r\n\tfor ans_word in ans_list:\r\n\t\tif user_ans == ans_word:\r\n\t\t\treturn True\r\n\treturn False",
"def is_correct(self, ans):\n \n seq = self.player_seq.copy()\n seq.append(ans)\n return seq[-1] == self.sequence[len(seq) - 1]",
"def vote_exists(self):\n con = psycopg2.connect(**self.config)\n cur = con.cursor(cursor_factory=RealDictCursor)\n try:\n query = \"SELECT user_id, vote_id FROM votes WHERE answer_id=%s AND user_id=%s\"\n cur.execute(query, (self.answer_id, self.user_id))\n queryset_list = cur.fetchall()\n con.close()\n if len(queryset_list) < 1:\n return False\n return True\n except Exception as e:\n print(e)\n con.close()\n return False",
"def check_answer(self, chat_id, answer):\n\n language = self.user.get_lang()\n if answer == language:\n self.user.increase_score()\n\n message_text = \"Correct!\"\n self.bot.sendMessage(chat_id, message_text)\n\n self.user.set_lang_path(\"\") # resets current_lang_path in database\n self.send_track(chat_id)\n else:\n message_text = \"You answer is incorrect. Try again.\"\n self.bot.sendMessage(chat_id, message_text)",
"def answer_available(self):\r\n if self.showanswer == '':\r\n return False\r\n elif self.showanswer == \"never\":\r\n return False\r\n elif self.runtime.user_is_staff:\r\n # This is after the 'never' check because admins can see the answer\r\n # unless the problem explicitly prevents it\r\n return True\r\n elif self.showanswer == 'attempted':\r\n return self.attempts > 0\r\n elif self.showanswer == 'answered':\r\n # NOTE: this is slightly different from 'attempted' -- resetting the problems\r\n # makes lcp.done False, but leaves attempts unchanged.\r\n return self.lcp.done\r\n elif self.showanswer == 'closed':\r\n return self.closed()\r\n elif self.showanswer == 'finished':\r\n return self.closed() or self.is_correct()\r\n\r\n elif self.showanswer == 'past_due':\r\n return self.is_past_due()\r\n elif self.showanswer == 'always':\r\n return True\r\n\r\n return False",
"def check_guess(self, user_guess):\n return user_guess in self.active_phrase",
"def ask_and_evaluate(self):\n\n print self.question\n return self.correct_answer == raw_input('>> ').lower()",
"def vote_result(self) -> bool:\n token_score = self.create_interface_score(self._token_score.get(), TokenInterface)\n yes = 0\n no = 0\n for address in self._voted:\n vote = self._vote[str(address)]\n if vote == 'yes':\n yes += token_score.balanceOf(address)\n else:\n no += token_score.balanceOf(address)\n self._yes_votes.set(yes)\n self._no_votes.set(no)\n if self._yes_votes.get() > (token_score.totalSupply() - token_score.balanceOf(self._rewards_score.get())) // 2:\n return True\n else:\n return False",
"def test_compare_answer(self):\r\n problem = self.build_problem(answer=\"42\")\r\n responder = problem.responders.values()[0]\r\n self.assertTrue(responder.compare_answer('48', '8*6'))\r\n self.assertFalse(responder.compare_answer('48', '9*5'))",
"def compare_answer(ans1, ans2):\r\n return ans1 == ans2",
"def compare_results(self):\n return self.guess_number == self.secret_number"
]
| [
"0.7286002",
"0.72846013",
"0.7091092",
"0.6901954",
"0.67433286",
"0.6692698",
"0.6672469",
"0.655168",
"0.6540481",
"0.65291345",
"0.6469405",
"0.64011556",
"0.6371484",
"0.6338362",
"0.6301711",
"0.62966347",
"0.6283468",
"0.62660295",
"0.6132463",
"0.6112869",
"0.60622096",
"0.60437584",
"0.603966",
"0.6022944",
"0.5985156",
"0.5983162",
"0.59729785",
"0.59672266",
"0.59593594",
"0.5923041"
]
| 0.7438986 | 0 |
If the emission_rest_wavelengths parameter is present, return a nebular emission line spectrum. Currently uses several approximations for the velocity broadening. Currently does not affect photometry. Only provides samples of the nebular spectrum at outwave, so will not be correct for total power unless outwave densley samples the emission dispersion. | def nebular(self, params, outwave):
if 'emission_rest_wavelengths' not in params:
return 0.
mu = vac2air(params['emission_rest_wavelengths'])
# try to get a nebular redshift, otherwise use stellar redshift,
# otherwise use no redshift
a1 = params.get('zred_emission', self.params.get('zred', 0.0)) + 1.0
A = params.get('emission_luminosity', 0.)
sigma = params.get('emission_disp', 10.)
if params.get('smooth_velocity', False):
# This is an approximation to get the dispersion in terms of
# wavelength at the central line wavelength, but should work much
# of the time
sigma = mu * sigma / 2.998e5
return gauss(outwave, mu * a1, A, sigma * a1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_spectrum(self, outwave=None, filters=None, nebular=True, **params):\n spec, neb, phot, ex = self.get_components(outwave, filters, **params)\n total_spec = (spec * self.params['mass'][:, None]).sum(axis=0)\n if nebular:\n total_spec += neb\n total_phot = (phot * self.params['mass'][:, None]).sum(axis=0)\n extra = (ex * self.params['mass']).sum()\n\n return total_spec, total_phot, extra",
"def get_spectrum(self, outwave=None, filters=None, peraa=False, **params):\n self.params.update(**params)\n # Pass the model parameters through to the sps object\n ncomp = len(self.params['mass'])\n for ic in range(ncomp):\n s, p, x = self.one_sed(component_index=ic, filterlist=filters)\n try:\n spec += s\n maggies += p\n extra += [x]\n except(NameError):\n spec, maggies, extra = s, p, [x]\n # `spec` is now in Lsun/Hz, with the wavelength array being the\n # observed frame wavelengths. Flux array (and maggies) have not been\n # increased by (1+z) due to cosmological redshift\n\n if outwave is not None:\n w = self.csp.wavelengths\n spec = np.interp(outwave, w, spec)\n # Distance dimming and unit conversion\n if (self.params['zred'] == 0) or ('lumdist' in self.params):\n # Use 10pc for the luminosity distance (or a number provided in the\n # lumdist key in units of Mpc). Do not apply cosmological (1+z)\n # factor to the flux.\n dfactor = (self.params.get('lumdist', 1e-5) * 1e5)**2\n a = 1.0\n else:\n # Use the comsological luminosity distance implied by this\n # redshift. Incorporate cosmological (1+z) factor on the flux.\n lumdist = cosmo.luminosity_distance(self.params['zred']).value\n dfactor = (lumdist * 1e5)**2\n a = (1 + self.params['zred'])\n if peraa:\n # spectrum will be in erg/s/cm^2/AA\n spec *= to_cgs * a / dfactor * lightspeed / outwave**2\n else:\n # Spectrum will be in maggies\n spec *= to_cgs * a / dfactor / 1e3 / (3631*jansky_mks)\n\n # Convert from absolute maggies to apparent maggies\n maggies *= a / dfactor\n \n return spec, maggies, extra",
"def spectre_etrange(f):\n end = False\n while not end:\n try:\n line = f.readline().split()\n wavnew = [float(w) for w in line]\n wav = np.append(wav,wavnew)\n prevwav = wavnew[-1]\n except:\n end = True\n aflux = f.readlines()\n for line in aflux:\n line = re.sub('-10\\d', 'e-100', line)\n flux = np.append(flux, line.rstrip().split())\n \n wav, flux = np.array(wav), np.array(flux)\n return wav,flux",
"def spectrum(self,photon_energy):\n\n Eph = _validate_ene(photon_energy)\n\n spec = self.n0 * (self.weight_ee * self._emiss_ee(Eph)\n + self.weight_ep * self._emiss_ep(Eph))\n\n return spec",
"def getSpectralEnergy(datatype, traceList, outfile, channelStart, channelEnd):\r\n assert(datatype in ['mat', 'segy']) \r\n if datatype=='segy':\r\n st = obspy.Stream(traceList) \r\n else:\r\n raise Exception('not implemented')\r\n sampleRate = traceList[0].stats.sampling_rate\r\n #for decimated data,sampleRate should be reflected\r\n #set wlen to 0.25 sec, high pass is 250\r\n wlen = 0.5*sampleRate\r\n nfft = int(_nearest_pow_2(wlen))\r\n npts = len(st[0].data)\r\n per_lap = 0.9\r\n if nfft > npts:\r\n nfft = int(_nearest_pow_2(npts / 8.0))\r\n nlap = int(nfft * float(per_lap))\r\n\r\n nTraces = len(traceList)\r\n Emat = None\r\n print ('sample rate is ', sampleRate, 'nfft=', nfft, 'noverlap', nlap)\r\n \r\n t_ = (traceList[0].stats.endtime-traceList[0].stats.starttime)\r\n dx_ = traceList[1].stats.distance - traceList[0].stats.distance\r\n extent = [0,len(traceList)*dx_/1e3,0,t_/100.0]\r\n\r\n for itr in range(0,nTraces):\r\n #F,T,SXX = signal.spectrogram(np.array(st[itr].data), fs=sampleRate, \r\n # window='hann', nfft=nfft, mode='magnitude')\r\n F,T,SXX = signal.spectrogram(np.array(st[itr].data), fs=sampleRate, \r\n window='hann', nfft=nfft)\r\n #sum along frequency axis \r\n #energy = np.sum((SXX[1:,:]/np.max(SXX[1:,:])),axis=0)\r\n energy = np.sum(SXX[1:,:],axis=0)\r\n #energy = np.log10(np.abs(energy/np.max(energy)))*10.0\r\n energy = np.log10(energy)*10.0\r\n if Emat is None:\r\n Emat = np.zeros((nTraces, len(T)))\r\n Emat[itr,:]=energy\r\n if DEBUG:\r\n plt.figure()\r\n im = plt.imshow(Emat,extent=extent)\r\n plt.colorbar(im)\r\n plt.savefig('spectralenergy{0}_ch{1}_{2}.png'.format(outfile,channelStart,channelEnd))\r\n plt.close()",
"def spectrum(self, photon_energy):\n\n outspecene = _validate_ene(photon_energy)\n\n from scipy.special import cbrt\n\n def Gtilde(x):\n \"\"\"\n AKP10 Eq. D7\n\n Factor ~2 performance gain in using cbrt(x)**n vs x**(n/3.)\n \"\"\"\n gt1 = 1.808 * cbrt(x) / np.sqrt(1 + 3.4 * cbrt(x) ** 2.)\n gt2 = 1 + 2.210 * cbrt(x) ** 2. + 0.347 * cbrt(x) ** 4.\n gt3 = 1 + 1.353 * cbrt(x) ** 2. + 0.217 * cbrt(x) ** 4.\n return gt1 * (gt2 / gt3) * np.exp(-x)\n\n log.debug('calc_sy: Starting synchrotron computation with AKB2010...')\n\n # strip units, ensuring correct conversion\n # astropy units do not convert correctly for gyroradius calculation when using\n # cgs (SI is fine, see https://github.com/astropy/astropy/issues/1687)\n CS1_0 = np.sqrt(3) * e.value ** 3 * self.B.to('G').value\n CS1_1 = (2 * np.pi * m_e.cgs.value * c.cgs.value ** 2 *\n hbar.cgs.value * outspecene.to('erg').value)\n CS1 = CS1_0/CS1_1\n\n # Critical energy, erg\n Ec = 3 * e.value * hbar.cgs.value * self.B.to('G').value * self._gam ** 2\n Ec /= 2 * (m_e * c).cgs.value\n\n EgEc = outspecene.to('erg').value / np.vstack(Ec)\n dNdE = CS1 * Gtilde(EgEc)\n # return units\n spec = trapz_loglog(np.vstack(self._nelec) * dNdE, self._gam, axis=0) / u.s / u.erg\n spec = spec.to('1/(s eV)')\n\n return spec",
"def getIntensitySpectum(self,wavelengths,intensities = 1.0):\n angles,peaks,widths = self.getSpectrum(wavelengths,intensities)\n\n # Get arnge of angles to calulate over\n minField = np.min(angles) - 10*np.max(widths)\n maxField = np.max(angles) + 10*np.max(widths)\n # Sample finely enough to make peak widths visible, but least 400points\n npoints = max(int((maxField - minField)/np.min(widths)),400)\n\n # Make the two two array for the output data\n fieldAngle = np.linspace(minField,maxField,npoints)\n spectralOutput = np.zeros(fieldAngle.size)\n\n # Add each peak in turn\n for a,p,w in zip(angles,peaks,widths):\n\n for i,af in enumerate(fieldAngle):\n s = self.lineShape(a,w,af) # Add the spectrometer lineshape\n spectralOutput[i] += p*s\n\n # Return the two numpy arrays as a list\n return fieldAngle,spectralOutput",
"def getSpectrum(self,wavelengths,intensities = 1.0):\n\n if isinstance(intensities,(float,int)): # If single value, make np array\n intensities = np.full(wavelengths.size,intensities)\n\n pt = self.getInputPoint() # Input point on prism\n ang = Unit3d().parseAngle(self.minDeviation()/2) # Direction of imput beam\n pencil = RayPencil().addRays(pt,ang,wavelengths,intensities) # Make pencil of rays\n pencil *= self # Put through prism\n\n # Extarct the information about the rays into numpy arrays.\n angles = np.zeros(len(pencil))\n peaks = np.zeros(len(pencil))\n widths = np.zeros(len(pencil))\n\n for i,ray in enumerate(pencil):\n a = ray.getAngle() # Get the angle in theta,psi format\n angles[i] = a.theta*math.cos(a.psi) # Extract -ve angle\n peaks[i] = ray.getIntensity()\n # Note width is lambda/(pi*diameter) but 1,000 to convert nn -> um\n widths[i] = ray.getWavelength()/(2000*math.pi*self.beam)\n\n return angles,peaks,widths",
"def Create_Constant_WavelengthArray(spec_cube,final_wave_start,final_wave_end):\n\tdwave = np.zeros(len(spec_cube))\n\tfor n in xrange(len(spec_cube)):\n\t\ttemp_final_wave = spec_cube[n][0] # Take one of the spectrum use its resolution\n\t\tdwave[n] = np.median(temp_final_wave[1:] - temp_final_wave[:-1])\n\tdwave = np.max(dwave)\n\tfinal_wave = np.arange(final_wave_start,final_wave_end,dwave)\n\tprint 'Since input dv = 0 -> median resolution (constant) dwave = %f angstrom is used.' % dwave\n\treturn final_wave",
"def magnitude_2_waveform(magnitude, n_iter=16, frame_length=512,\n frame_step=128, log_magnitude=True,\n n_mel_bins=None, mel_lower_hertz_edge=0.0,\n mel_upper_hertz_edge=8000.0):\n\n if len(magnitude.shape) == 2:\n magnitude = tf.expand_dims(magnitude, 0)\n\n if log_magnitude:\n magnitude = np.exp(magnitude) - _EPSILON\n\n if n_mel_bins:\n magnitude = _mel_to_linear_scale(\n magnitude, frame_length//2, mel_lower_hertz_edge, mel_upper_hertz_edge\n )\n\n # Set the nyquist frequency to zero (the band we earlier removed).\n # This is also commonly done in these other papers.\n magnitude = np.pad(magnitude, [[0, 0], [0, 0], [0, 1]])\n\n to_waveform = lambda m: griffinlim(\n np.transpose(m), n_iter=n_iter, win_length=frame_length,\n hop_length=frame_step, pad_mode='constant', center=False\n )\n\n return np.array(list(map(to_waveform, magnitude)))",
"def get_experimental_spectra(mol):\n\n data = pd.read_csv(mol, sep=',')\n wavelength = data.values[:, 0]\n\n absorption = data.values[:, 1]\n\n func = interp1d(wavelength, absorption, kind='quadratic')\n wavelength_new = 1. / np.linspace(1. / wavelength.max(), 1. / wavelength.min(), 100)\n absorption_new = func(wavelength_new)\n absorption_new *= 100. / absorption_new.max()\n\n return wavelength_new, absorption_new",
"def spectrum(self,photon_energy):\n\n outspecene = _validate_ene(photon_energy)\n\n if not hasattr(self, 'Etrans'):\n # Energy at which we change from delta functional to accurate\n # calculation\n self.Etrans = 0.1 * u.TeV\n else:\n validate_scalar('Etrans', self.Etrans,\n domain='positive', physical_type='energy')\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n self.nhat = 1. # initial value, works for index~2.1\n if np.any(outspecene < self.Etrans) and np.any(outspecene >= self.Etrans):\n # compute value of nhat so that delta functional matches accurate\n # calculation at 0.1TeV\n full = self._calc_specpp_hiE(self.Etrans)\n delta = self._calc_specpp_loE(self.Etrans)\n self.nhat *= (full / delta).decompose().value\n\n self.specpp = np.zeros(len(outspecene)) * u.Unit('1/(s TeV)')\n\n for i, Egamma in enumerate(outspecene):\n if Egamma >= self.Etrans:\n self.specpp[i] = self._calc_specpp_hiE(Egamma)\n else:\n self.specpp[i] = self._calc_specpp_loE(Egamma)\n\n density_factor = (self.nh / (1 * u.Unit('1/cm3'))).decompose().value\n\n return density_factor * self.specpp.to('1/(s eV)')",
"def _calculate_strehl(self):\n\n self.strehl = np.exp(-1*((2*np.pi/self.science_wavelength)*self.high_order_wfe)**2)",
"def fluxes(wavelength, s, line, lowlow= 14, lowhigh=6, highlow=6, highhigh = 14, lmin=0, lmax=0, fmin=0, fmax=0, \n broad=2.355, plot=True, verbose=True, plot_sus = False, fcal = True, fit_continuum = True, median_kernel=35, warnings = True ): # Broad is FWHM for Gaussian sigma= 1,\n # s must be an array, no a list\n try: \n index_maximo_del_rango = s.tolist().index(np.nanmax(s))\n #print \" is AN ARRAY\"\n except Exception:\n #print \" s is A LIST -> must be converted into an ARRAY\" \n s = np.array(s)\n \n # Setup wavelength limits\n if lmin == 0 :\n lmin = line-65. # By default, +-65 A with respect to line\n if lmax == 0 :\n lmax = line+65.\n \n # Extract subrange to fit\n w_spec = []\n f_spec = []\n w_spec.extend((wavelength[i]) for i in range(len(wavelength)) if (wavelength[i] > lmin and wavelength[i] < lmax) ) \n f_spec.extend((s[i]) for i in range(len(wavelength)) if (wavelength[i] > lmin and wavelength[i] < lmax) ) \n \n if np.isnan(np.nanmedian(f_spec)): \n # The data are NAN!! Nothing to do\n if verbose or warnings: print(\" There is no valid data in the wavelength range [{},{}] !!\".format(lmin,lmax))\n \n resultado = [0, line, 0, 0, 0, 0, 0, 0, 0, 0, 0, s ] \n\n return resultado\n \n else: \n \n ## 20 Sep 2020\n f_spec_m=signal.medfilt(f_spec,median_kernel) # median_kernel = 35 default\n \n \n # Remove nans\n median_value = np.nanmedian(f_spec)\n f_spec = [median_value if np.isnan(x) else x for x in f_spec] \n \n \n # Setup min and max flux values in subrange to fit\n if fmin == 0 :\n fmin = np.nanmin(f_spec) \n if fmax == 0 :\n fmax = np.nanmax(f_spec) \n \n # We have to find some \"guess numbers\" for the Gaussian. Now guess_centre is line\n guess_centre = line\n \n # Define continuum regions: [-lowlow, -lowhigh] and [highlow,highhigh] in Angstroms with respect to guess_centre\n \n w_cont=[]\n f_cont=[]\n w_cont.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre-lowlow and w_spec[i] < guess_centre-lowhigh) or (w_spec[i] > guess_centre+highlow and w_spec[i] < guess_centre+highhigh) ) \n f_cont.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre-lowlow and w_spec[i] < guess_centre-lowhigh) or (w_spec[i] > guess_centre+highlow and w_spec[i] < guess_centre+highhigh) ) \n \n if fit_continuum:\n # Linear Fit to continuum \n f_cont_filtered=sig.medfilt(f_cont,np.int(median_kernel))\n #print line #f_cont\n # if line == 8465.0:\n # print w_cont\n # print f_cont_filtered\n # plt.plot(w_cont,f_cont_filtered)\n # plt.show()\n # plt.close()\n # warnings=True\n try: \n mm,bb = np.polyfit(w_cont, f_cont_filtered, 1)\n except Exception:\n bb = np.nanmedian(f_cont_filtered)\n mm = 0.\n if warnings: \n print(\" WARNING: Impossible to get the continuum!\")\n print(\" Scaling the continuum to the median value b = \",bb,\": cont = 0 * w_spec + \", bb)\n continuum = mm*np.array(w_spec)+bb \n c_cont = mm*np.array(w_cont)+bb \n \n else: \n # Median value in each continuum range # NEW 15 Sep 2019\n w_cont_low = []\n f_cont_low = []\n w_cont_low.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre-lowlow and w_spec[i] < guess_centre-lowhigh) ) \n f_cont_low.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre-lowlow and w_spec[i] < guess_centre-lowhigh) ) \n median_w_cont_low = np.nanmedian(w_cont_low)\n median_f_cont_low = np.nanmedian(f_cont_low)\n w_cont_high = []\n f_cont_high = []\n w_cont_high.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre+highlow and w_spec[i] < guess_centre+highhigh) ) \n f_cont_high.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre+highlow and w_spec[i] < guess_centre+highhigh) ) \n median_w_cont_high = np.nanmedian(w_cont_high)\n median_f_cont_high = np.nanmedian(f_cont_high) \n \n b = (median_f_cont_low-median_f_cont_high)/(median_w_cont_low-median_w_cont_high)\n a = median_f_cont_low- b * median_w_cont_low\n \n continuum = a + b*np.array(w_spec)\n c_cont = a + b*np.array(w_cont) \n \n \n # rms continuum\n rms_cont = np.nansum([ np.abs(f_cont[i] - c_cont[i]) for i in range(len(w_cont)) ]) / len(c_cont)\n \n # Search for index here w_spec(index) closest to line\n min_w = np.abs(np.array(w_spec)-line)\n mini = np.nanmin(min_w)\n # guess_peak = f_spec[min_w.tolist().index(mini)] # WE HAVE TO SUSTRACT CONTINUUM!!!\n guess_peak = f_spec[min_w.tolist().index(mini)] - continuum[min_w.tolist().index(mini)]\n \n # LOW limit\n low_limit=0\n w_fit = []\n f_fit = []\n w_fit.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre-15 and w_spec[i] < guess_centre)) \n f_fit.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre-15 and w_spec[i] < guess_centre)) \n if fit_continuum: \n c_fit=mm*np.array(w_fit)+bb \n else: \n c_fit=b*np.array(w_fit)+a \n \n fs=[]\n ws=[]\n for ii in range(len(w_fit)-1,1,-1):\n if f_fit[ii]/c_fit[ii] < 1.05 and f_fit[ii-1]/c_fit[ii-1] < 1.05 and low_limit == 0: low_limit = w_fit[ii]\n # if f_fit[ii]/c_fit[ii] < 1.05 and low_limit == 0: low_limit = w_fit[ii]\n fs.append(f_fit[ii]/c_fit[ii])\n ws.append(w_fit[ii])\n if low_limit == 0: \n sorted_by_flux=np.argsort(fs)\n try:\n low_limit = ws[sorted_by_flux[0]]\n except Exception:\n plot=True\n low_limit = 0\n \n # HIGH LIMIT \n high_limit=0\n w_fit = []\n f_fit = []\n w_fit.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre and w_spec[i] < guess_centre+15)) \n f_fit.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre and w_spec[i] < guess_centre+15)) \n if fit_continuum: \n c_fit=mm*np.array(w_fit)+bb \n else: \n c_fit=b*np.array(w_fit)+a\n \n fs=[]\n ws=[]\n for ii in range(len(w_fit)-1):\n if f_fit[ii]/c_fit[ii] < 1.05 and f_fit[ii+1]/c_fit[ii+1] < 1.05 and high_limit == 0: high_limit = w_fit[ii]\n # if f_fit[ii]/c_fit[ii] < 1.05 and high_limit == 0: high_limit = w_fit[ii]\n fs.append(f_fit[ii]/c_fit[ii])\n ws.append(w_fit[ii])\n if high_limit == 0: \n sorted_by_flux=np.argsort(fs)\n try:\n high_limit = ws[sorted_by_flux[0]] \n except Exception:\n plot=True\n high_limit = 0 \n \n # Guess centre will be the highest value in the range defined by [low_limit,high_limit]\n \n try: \n rango = np.where((high_limit >= wavelength ) & (low_limit <= wavelength)) \n index_maximo_del_rango = s.tolist().index(np.nanmax(s[rango]))\n guess_centre = wavelength[index_maximo_del_rango]\n except Exception:\n guess_centre = line #### It was 0 before\n \n \n # Fit a Gaussian to data - continuum \n p0 = [guess_centre, guess_peak, broad/2.355] # broad is the Gaussian sigma, 1.0 for emission lines\n try:\n fit, pcov = curve_fit(gauss, w_spec, f_spec-continuum, p0=p0, maxfev=10000) # If this fails, increase maxfev...\n fit_error = np.sqrt(np.diag(pcov))\n \n # New 28th Feb 2019: Check central value between low_limit and high_limit\n # Better: between guess_centre - broad, guess_centre + broad\n # If not, redo fit fixing central value to the peak (it does not work... just fix FWHM= (high_limit-low_limit)/2.5 )\n \n if verbose != False: print(\" ----------------------------------------------------------------------------------------\")\n # if low_limit < fit[0] < high_limit:\n if fit[0] < guess_centre - broad or fit[0] > guess_centre + broad:\n # if verbose: print \" Fitted center wavelength\", fit[0],\"is NOT in the range [\",low_limit,\",\",high_limit,\"]\"\n if verbose: print(\" Fitted center wavelength\", fit[0],\"is NOT in the expected range [\",guess_centre - broad,\",\",guess_centre + broad,\"]\")\n \n # print \"Re-do fitting fixing center wavelength\"\n # p01 = [guess_peak, broad]\n # fit1, pcov1 = curve_fit(gauss_fix_x0, w_spec, f_spec-continuum, p0=p01, maxfev=100000) # If this fails, increase maxfev...\n # fit_error1 = np.sqrt(np.diag(pcov1))\n # fit[0]=guess_centre\n # fit_error[0] = 0.\n # fit[1] = fit1[0]\n # fit_error[1] = fit_error1[0]\n # fit[2] = fit1[1]\n # fit_error[2] = fit_error1[1] \n \n fit[0]=guess_centre\n fit_error[0] = 0.000001\n fit[1]=guess_peak\n fit_error[1] = 0.000001\n fit[2] = broad/2.355\n fit_error[2] = 0.000001 \n else:\n if verbose: print(\" Fitted center wavelength\", fit[0],\"IS in the expected range [\",guess_centre - broad,\",\",guess_centre + broad,\"]\")\n \n \n if verbose: print(\" Fit parameters = \", fit[0], fit[1], fit[2])\n if fit[2] == broad and warnings == True : \n print(\" WARNING: Fit in\",fit[0],\"failed! Using given centre wavelength (cw), peak at (cv) & sigma = broad/2.355 given.\") \n gaussian_fit = gauss(w_spec, fit[0], fit[1], fit[2])\n \n \n # Estimate rms of the Gaussian fit in range [low_limit, high_limit]\n residuals = f_spec-gaussian_fit-continuum\n rms_fit = np.nansum([ ((residuals[i]**2)/(len(residuals)-2))**0.5 for i in range(len(w_spec)) if (w_spec[i] >= low_limit and w_spec[i] <= high_limit) ]) \n \n # Fluxes, FWHM and Eq. Width calculations\n gaussian_flux = gauss_flux(fit[1],fit[2])\n error1 = np.abs(gauss_flux(fit[1]+fit_error[1],fit[2]) - gaussian_flux)\n error2 = np.abs(gauss_flux(fit[1],fit[2]+fit_error[2]) - gaussian_flux)\n gaussian_flux_error = 1 / ( 1/error1**2 + 1/error2**2 )**0.5\n \n \n fwhm=fit[2]*2.355\n fwhm_error = fit_error[2] *2.355\n fwhm_vel = fwhm / fit[0] * C \n fwhm_vel_error = fwhm_error / fit[0] * C \n \n gaussian_ew = gaussian_flux/np.nanmedian(f_cont)\n gaussian_ew_error = gaussian_ew * gaussian_flux_error/gaussian_flux \n \n # Integrated flux\n # IRAF: flux = sum ((I(i)-C(i)) * (w(i2) - w(i1)) / (i2 - i2) \n flux = np.nansum([ (f_spec[i]-continuum[i])*(w_spec[i+1]-w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] >= low_limit and w_spec[i] <= high_limit) ]) \n flux_error = rms_cont * (high_limit - low_limit)\n wave_resolution = (wavelength[-1]-wavelength[0])/len(wavelength)\n ew = wave_resolution * np.nansum ([ (1 - f_spec[i]/continuum[i]) for i in range(len(w_spec)) if (w_spec[i] >= low_limit and w_spec[i] <= high_limit) ]) \n ew_error = np.abs(ew*flux_error/flux) \n gauss_to_integrated = gaussian_flux/flux * 100.\n \n index=0\n s_s=np.zeros_like(s)\n for wave in range(len(wavelength)):\n s_s[wave]=s[wave]\n if wavelength[wave] == w_spec[0] : \n s_s[wave] = f_spec[0]-gaussian_fit[0]\n index=1\n if wavelength[wave] > w_spec[0] and wavelength[wave] <= w_spec[-1]:\n s_s[wave] = f_spec[index]-gaussian_fit[index]\n index=index+1\n \n # Plotting \n ptitle = 'Fit: x0=%.2f y0=%.2e sigma=%.2f flux=%.2e rms=%.3e' % (fit[0], fit[1], fit[2], gaussian_flux, rms_fit)\n if plot :\n plt.figure(figsize=(10, 4))\n # Plot input spectrum\n plt.plot(np.array(w_spec),np.array(f_spec), \"b\", lw=3, alpha = 0.8)\n # Plot median input spectrum\n plt.plot(np.array(w_spec),np.array(f_spec_m), \"orange\", lw=3, alpha = 0.5) # 2021: era \"g\"\n # Plot spectrum - gauss subtracted\n plt.plot(wavelength,s_s,\"g\",lw=3, alpha = 0.6)\n \n plt.minorticks_on() \n plt.xlabel(\"Wavelength [$\\mathrm{\\AA}$ ]\")\n if fcal:\n plt.ylabel(\"Flux [ erg cm$^{-2}$ s$^{-1}$ $\\mathrm{\\AA}^{-1}$ ]\")\n else:\n plt.ylabel(\"Flux [ counts ]\")\n plt.xlim(lmin,lmax)\n plt.ylim(fmin,fmax)\n \n # Vertical line at guess_centre\n plt.axvline(x=guess_centre, color='r', linestyle='-', alpha=0.3)\n # Horizontal line at y = 0\n plt.axhline(y=0, color='k', linestyle=':', alpha=0.5) \n # Dashed green regions for continuum, defined by [lowlow, lowhigh] and [highlow,highhigh]\n plt.axvspan(guess_centre+highlow, guess_centre+highhigh, facecolor='g', alpha=0.15,zorder=3)\n plt.axvspan(guess_centre-lowlow, guess_centre-lowhigh, facecolor='g', alpha=0.15,zorder=3)\n # Plot linear fit for continuum\n plt.plot(w_spec, continuum,\"g--\")\n # Plot Gaussian fit \n plt.plot(w_spec, gaussian_fit+continuum, 'r-', alpha=0.8) \n # Vertical line at Gaussian center\n plt.axvline(x=fit[0], color='k', linestyle='-', alpha=0.5)\n # Vertical lines to emission line\n plt.axvline(x= low_limit, color='k', linestyle=':', alpha=0.5)\n plt.axvline(x= high_limit, color='k', linestyle=':', alpha=0.5) \n # Plot residuals\n plt.plot(w_spec, residuals, 'k')\n plt.title(ptitle)\n plt.show()\n \n # Printing results\n if verbose :\n print(\"\\n - Gauss and continuum fitting + integrated flux calculations:\\n\")\n print(\" rms continuum = %.3e erg/cm/s/A \" % (rms_cont)) \n print(\" Gaussian Fit parameters: x0 = ( %.2f +- %.2f ) A \" % (fit[0], fit_error[0]))\n print(\" y0 = ( %.3f +- %.3f ) 1E-16 erg/cm2/s/A\" % (fit[1]/1E-16, fit_error[1]/1E-16 ))\n print(\" sigma = ( %.3f +- %.3f ) A\" % (fit[2], fit_error[2])) \n print(\" rms fit = %.3e erg/cm2/s/A\" % (rms_fit))\n print(\" Gaussian Flux = ( %.2f +- %.2f ) 1E-16 erg/s/cm2 (error = %.1f per cent)\" % (gaussian_flux/1E-16, gaussian_flux_error/1E-16, gaussian_flux_error/gaussian_flux*100))\n print(\" FWHM = ( %.3f +- %.3f ) A = ( %.1f +- %.1f ) km/s \" % (fwhm, fwhm_error, fwhm_vel, fwhm_vel_error))\n print(\" Eq. Width = ( %.1f +- %.1f ) A\" % (-gaussian_ew, gaussian_ew_error)) \n print(\"\\n Integrated flux = ( %.2f +- %.2f ) 1E-16 erg/s/cm2 (error = %.1f per cent) \" % ( flux/1E-16, flux_error/1E-16, flux_error/flux *100)) \n print(\" Eq. Width = ( %.1f +- %.1f ) A\" % (ew, ew_error))\n print(\" Gauss/Integrated = %.2f per cent \" % gauss_to_integrated)\n \n \n # Plot independent figure with substraction if requested \n if plot_sus: plot_plot(wavelength,[s,s_s], xmin=lmin, xmax=lmax, ymin=fmin, ymax=fmax, fcal=fcal, frameon=True, ptitle=ptitle)\n \n # 0 1 2 3 4 5 6 7 8 9 10 11\n resultado = [rms_cont, fit[0], fit_error[0], gaussian_flux, gaussian_flux_error, fwhm, fwhm_error, flux, flux_error, ew, ew_error, s_s ]\n return resultado \n except Exception:\n if verbose: \n print(\" - Gaussian fit failed!\")\n print(\" However, we can compute the integrated flux and the equivalent width:\")\n \n flux = np.nansum([ (f_spec[i]-continuum[i])*(w_spec[i+1]-w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] >= low_limit and w_spec[i] <= high_limit) ]) \n flux_error = rms_cont * (high_limit - low_limit)\n wave_resolution = (wavelength[-1]-wavelength[0])/len(wavelength)\n ew = wave_resolution * np.nansum ([ (1 - f_spec[i]/continuum[i]) for i in range(len(w_spec)) if (w_spec[i] >= low_limit and w_spec[i] <= high_limit) ]) \n ew_error = np.abs(ew*flux_error/flux) \n \n if verbose:\n print(\" Integrated flux = ( %.2f +- %.2f ) 1E-16 erg/s/cm2 (error = %.1f per cent) \" % ( flux/1E-16, flux_error/1E-16, flux_error/flux *100)) \n print(\" Eq. Width = ( %.1f +- %.1f ) A\" % (ew, ew_error))\n \n resultado = [0, guess_centre, 0, 0, 0, 0, 0, flux, flux_error, ew, ew_error, s ] # guess_centre was identified at maximum value in the [low_limit,high_limit] range but Gaussian fit failed\n \n \n # Plotting \n if plot :\n plt.figure(figsize=(10, 4))\n plt.plot(np.array(w_spec),np.array(f_spec), \"b\", lw=3, alpha = 0.5)\n plt.minorticks_on() \n plt.xlabel(\"Wavelength [$\\mathrm{\\AA}$]\")\n if fcal:\n plt.ylabel(\"Flux [ erg cm$^{-2}$ s$^{-1}$ $\\mathrm{\\AA}^{-1}$ ]\")\n else:\n plt.ylabel(\"Flux [ counts ]\") \n plt.xlim(lmin,lmax)\n plt.ylim(fmin,fmax)\n \n # Vertical line at guess_centre\n plt.axvline(x=guess_centre, color='r', linestyle='-', alpha=0.5)\n # Horizontal line at y = 0\n plt.axhline(y=0, color='k', linestyle=':', alpha=0.5) \n # Dashed green regions for continuum, defined by [lowlow, lowhigh] and [highlow,highhigh]\n plt.axvspan(guess_centre+highlow, guess_centre+highhigh, facecolor='g', alpha=0.15,zorder=3)\n plt.axvspan(guess_centre-lowlow, guess_centre-lowhigh, facecolor='g', alpha=0.15,zorder=3)\n # Plot linear fit for continuum\n plt.plot(w_spec, continuum,\"g--\")\n # Plot Gaussian fit \n # plt.plot(w_spec, gaussian_fit+continuum, 'r-', alpha=0.8) \n # Vertical line at Gaussian center\n # plt.axvline(x=fit[0], color='k', linestyle='-', alpha=0.5)\n # Vertical lines to emission line\n plt.axvline(x= low_limit, color='k', linestyle=':', alpha=0.5)\n plt.axvline(x= high_limit, color='k', linestyle=':', alpha=0.5) \n # Plot residuals\n # plt.plot(w_spec, residuals, 'k')\n plt.title(\"No Gaussian fit obtained...\")\n plt.show()\n \n \n return resultado",
"def getSpectralEnergyFrame(datatype, traceList, outfile, channelStart, channelEnd, winlen=1000):\r\n assert(datatype in ['mat', 'segy']) \r\n if datatype=='segy':\r\n st = obspy.Stream(traceList) \r\n else:\r\n raise Exception('not implemented')\r\n sampleRate = traceList[0].stats.sampling_rate\r\n\r\n wlen = 256\r\n nfft = int(_nearest_pow_2(wlen))\r\n npts = len(st[0].data)\r\n per_lap = 0.9\r\n if nfft > npts:\r\n nfft = int(_nearest_pow_2(npts / 8.0))\r\n nlap = int(nfft * float(per_lap))\r\n\r\n nTraces = len(traceList)\r\n nperlen = len(traceList[0].data)\r\n if winlen>=nperlen:\r\n nFrames=1\r\n else:\r\n nFrames = int(nperlen/winlen)\r\n\r\n print ('sample rate is ', sampleRate, 'nfft=', nfft, 'noverlap', nlap)\r\n\r\n for iframe in range(nFrames): \r\n Emat = None\r\n for itr in range(0,nTraces):\r\n F,T,SXX = signal.spectrogram(np.array(st[itr].data[iframe*winlen:(iframe+1)*winlen]), fs=sampleRate, \r\n window='hann', nfft=nfft)\r\n #sum along frequency axis \r\n energy = np.sum((SXX[1:,:]/np.max(SXX[1:,:])),axis=0)\r\n #energy = np.abs(np.log10(np.abs(energy/np.max(energy)))*10.0)\r\n #energy = np.log10(energy)*10.0\r\n if Emat is None:\r\n Emat = np.zeros((nTraces, len(T)))\r\n Emat[itr,:]=energy\r\n \r\n #datafile = 'spectralenergy_{0}_ch{1}_{2}.npy'.format(outfile,channelStart,channelEnd)\r\n #np.save(datafile,Emat)\r\n #scale to 0 255\r\n print (Emat.max())\r\n Emat = (255.0 / Emat.max() * (Emat - Emat.min())).astype(np.uint8)\r\n im = Image.fromarray(Emat, 'L')\r\n imgfile = 'spectralenergy_{0}_ch{1}_{2}_{3}.png'.format(outfile,channelStart,channelEnd,iframe) \r\n im.save(imgfile)\r\n histogram = im.histogram()\r\n imgfile = 'spectralhist_{0}_ch{1}_{2}_{3}.png'.format(outfile,channelStart,channelEnd,iframe) \r\n plt.figure()\r\n plt.plot(histogram)\r\n plt.savefig(imgfile)",
"def measure_wavelength_avg(self, from_time, to_time, print_n=False):\n from_idx = self._find_index_for_time(from_time)\n to_idx = self._find_index_for_time(to_time)\n \n # print number of measurements\n if print_n:\n n_measurements = to_idx - from_idx + 1\n print 'n measurements:', n_measurements\n \n # calculate overlap\n overlap = self.overlap()[from_idx:to_idx+1,:]\n \n # intitialize wavelength storage\n wavelengths = np.zeros(overlap.shape[0])\n \n for i in range(overlap.shape[0]):\n this_overlap = overlap[i,:]\n \n # set all overlap entries below 0.1% of the maximum overlap to zero\n this_overlap[this_overlap<0.001*np.max(this_overlap)] = 0.0\n \n nonzero = np.nonzero(this_overlap)\n \n # find connected section of chain with nonzero overlap\n consecutives = np.split(nonzero, np.where(np.diff(nonzero) != 1)[0]+1)\n \n if len(consecutives) != 1:\n warnings.warn('Wavelength could not be determined unambiguously.')\n return np.nan\n else:\n # add 1 since overlap involves two beads each\n wavelengths[i] = float( len(consecutives[0][0]) ) + 1\n \n return np.mean( wavelengths )",
"def spectrum(self,photon_energy):\n outspecene = _validate_ene(photon_energy)\n\n self.specic = np.zeros(len(outspecene)) * u.Unit('1/(s eV)')\n\n for seed in self.seed_photon_fields:\n # Call actual computation, detached to allow changes in subclasses\n self.specic += self._calc_specic(seed,outspecene).to('1/(s eV)')\n\n self.specic = self.specic.to('1/(s eV)')\n\n return self.specic",
"def mask_emissionlines(self, element_emission_lines):\n\t\t#Dictionary of corrosponding elements to their emission lines\n\t\temission_dict = {'He-II' : (3202.15, 4685.74),\n\t\t\t\t\t\t 'Ne-V' : (3345.81, 3425.81),\n\t\t\t\t\t\t 'O-II' : (3726.03, 3728.73),\n\t\t\t\t\t\t 'Ne-III': (3868.69, 3967.40),\n\t\t\t\t\t\t 'H-ζ' : 3889.05,\n\t\t\t\t\t\t 'H-ε' : 3970.07,\n\t\t\t\t\t\t 'H-δ' : 4101.73,\n\t\t\t\t\t\t 'H-γ' : 4340.46,\n\t\t\t\t\t\t 'O-III' : (4363.15, 4958.83, 5006.77),\n\t\t\t\t\t\t 'Ar-IV' : (4711.30, 4740.10),\n\t\t\t\t\t\t 'H-β' : 4861.32,\n\t\t\t\t\t\t 'N-I' : (5197.90, 5200.39),\n\t\t\t\t\t\t 'He-I' : 5875.60,\n\t\t\t\t\t\t 'O-I' : (6300.20, 6363.67),\n\t\t\t\t\t\t 'N-II' : (6547.96, 6583.34),\n\t\t\t\t\t\t 'H-α' : 6562.80,\n\t\t\t\t\t\t 'S-II' : (6716.31, 6730.68),\n\t\t\t\t\t\t 'Ar-III': 7135.67}\n\n\t\t#Create an array full of booleans equal to False, same size as the restframe_wavelength\n\t\tself.lines_mask = np.zeros_like(self.restframe_wavelength,dtype=bool)\n\n\t\t#Loop through the input of the emission lines list\n\t\tfor i in range(len(element_emission_lines)):\n\n\t\t\t#Check if the value is in the dictionary\n\t\t\tif element_emission_lines[i] in emission_dict:\n\n\t\t\t\tele_line = element_emission_lines[i]\n\t\t\t\tline = emission_dict[ele_line]\n\n\t\t\t\t#Check if it contains a tuple (some elements have more then one emission line)\n\t\t\t\tif type(line) == tuple:\n\n\t\t\t\t\t#Find the number of emission lines for this value\n\t\t\t\t\tn_lines = len(line)\n\n\t\t\t\t\t#Loop through and mask them\n\t\t\t\t\tfor n in range(n_lines):\n\n\t\t\t\t\t\tn_line = line[n]\n\n\t\t\t\t\t\t#Creates the boolean array\n\t\t\t\t\t\ttemp_lines_mask = ((self.restframe_wavelength > n_line - self.N_angstrom_masked) & (self.restframe_wavelength < n_line + self.N_angstrom_masked))\n\t\t\t\t\t\t#Adds the boolean array to the exisiting one to save it\n\t\t\t\t\t\tself.lines_mask = (temp_lines_mask | self.lines_mask)\n\t\t\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\ttemp_lines_mask = ((self.restframe_wavelength > line - self.N_angstrom_masked) & (self.restframe_wavelength < line + self.N_angstrom_masked))\n\t\t\t\t\tself.lines_mask = (temp_lines_mask | self.lines_mask)\n\n\t\t\telse:\n\t\t\t\tprint(element_emission_lines[i])\n\t\t\t\traise KeyError",
"def getWavelength(self, inAngle, outAngle, wavelengths = [0.25,1.0]):\n\n # Get prism point and angle of input at Unit3d\n #\n pt = self.getInputPoint()\n u = Unit3d(Angle(inAngle))\n\n # Guess at initial wavelngth\n wave = (wavelengths[1] - wavelengths[0])/2\n # Make input ray at guess wavelength\n ray = IntensityRay(pt,u,wave)\n\n # Parameters for seaerch\n delta = 0.1\n forward = True\n na = float(\"inf\") # New angle\n\n while abs(na - outAngle) > 1.0e-9/abs(outAngle) :\n nray = ray*self # New Ray through prism\n na = nray.getAngle()\n na = na.theta*math.cos(na.psi) # In radians\n if na < outAngle: # Less that target\n wave += delta\n forward = True\n else:\n if forward: # Half step\n delta *= 0.5\n forward = False\n wave -= delta\n if wave < wavelengths[0] or wave > wavelengths[1]:\n print(\"Out of wavelength range :\")\n return float(\"nan\")\n\n ray.wavelength = wave # Update the wavelength of ray\n\n return ray.getWavelength() # End of loop, so success, return value",
"def get_surfaceflux_from_wavelength_and_laser_power(wavelength, rover_specs, laser_powers, receiver_areas,\n power_reqs, pointing_error=[1e-7, 1e-7]):\n assert len(power_reqs) == len(receiver_areas)\n assert len(power_reqs) == len(rover_specs)\n\n # Set the parameter space\n trans_radius = np.logspace(-3, 1, 1000)\n altitudes = np.logspace(4, 7, 1001)\n R, Z = np.meshgrid(trans_radius, altitudes, indexing=\"ij\")\n\n fig, ax = plt.subplots(len(power_reqs), len(laser_powers), sharey=True, sharex=True, figsize=(12, 7))\n for i, laser_power in enumerate(laser_powers):\n for j in range(len(power_reqs)):\n rover_spec = rover_specs[j]\n receiver_area = receiver_areas[j]\n power_req = power_reqs[j]\n\n # Get the beam radius\n beam_radius = R * np.sqrt(1.0 + (Z * wavelength / (np.pi * R ** 2)) ** 2)\n receiver_radius = np.sqrt(receiver_area / np.pi)\n radius_constraint_one = pointing_error[j] * Z + receiver_radius\n radius_constraint_two = pointing_error[j] * Z + beam_radius\n mask_one = beam_radius < radius_constraint_one\n mask_two = receiver_radius > radius_constraint_two\n final_mask = np.logical_and(mask_one, np.logical_not(mask_two))\n beam_radius[final_mask] = np.nan\n\n # Calculate the resulting surface flux\n receiver_power = laser_power/ (np.pi * beam_radius ** 2) * receiver_area\n receiver_power[np.pi * beam_radius ** 2 < receiver_area] = laser_power\n receiver_power[receiver_power < power_req] = np.nan\n\n # Normalise result by input power to get total efficiency\n receiver_power /= laser_power\n receiver_power[receiver_power < 0.001] = np.nan\n\n log_power = np.log10(receiver_power * 100)\n ax[j, i].contourf(np.log10(R), Z / 1e3, log_power, 100)\n m = cm.ScalarMappable()\n m.set_array(log_power)\n m.set_clim(-1.0, 2.0)\n fig.colorbar(m, ax=ax[j, i])\n ax[j, 0].set_ylabel('{} \\n Transmission distance [km]'.format(rover_spec))\n ax[0, i].set_title('Laser Power: {}kW'.format(laser_power / 1e3))\n ax[1, i].set_xlabel('Logarithm of Transmitter Radius [m]')\n plt.tight_layout()\n plt.show()\n\n return beam_radius, receiver_power",
"def get_spectral_phase_expansion(self, orders=4, prefix=1e12):\n if self.Et is not None:\n w = self.w\n ph = self.get_trace_spectral_phase()\n ph_ind = np.isfinite(ph)\n ph_good = ph[ph_ind]\n w_good = w[ph_ind] / prefix\n ph_poly = np.polyfit(w_good, ph_good, orders)\n else:\n ph_poly = None\n return ph_poly",
"def spectra(num_energies, num_samples):\n fixed_header = (\n 1*8 # SSID\n + 4*8 # SCET Coarse time\n + 2*8 # SCET Fine time\n + 2*8 # Integration time\n + 1 # Spare\n + 1 # Comp Schema spectra S\n + 3 # Comp Schema spectra k\n + 3 # Comp Schema spectra M\n + 1 # Spare\n + 1 # Comp Schema trigger S\n + 3 # Comp Schema trigger S\n + 3 # Comp Schema trigger S\n + 4 # Spare\n + 12 # Pixel mask\n + 2*8 # Number of data samples\n )\n\n variable = (\n num_samples * (\n 1*8 # Detector index\n + 32*8 # Spectrum x 32\n + 1*8 # Trigger\n + 1*8 # Number of integrations\n )\n )\n\n return fixed_header, variable",
"def calc_elv_spectra(self, red, comp, src):\n if ((src in red.data.keys())\n & (src in red.data.keys())):\n # check that the wavelenth grids are identical\n delt_wave = red.data[src].waves - comp.data[src].waves\n if np.sum(np.absolute(delt_wave)) > 0.01*u.micron:\n warnings.warn(\"wavelength grids not equal for %s\" % src,\n UserWarning)\n else:\n # reference band\n red_V = red.data['BAND'].get_band_mag('V')\n comp_V = comp.data['BAND'].get_band_mag('V')\n\n # setup the needed variables\n self.waves[src] = red.data[src].waves\n n_waves = len(self.waves[src])\n self.exts[src] = np.zeros(n_waves)\n self.uncs[src] = np.zeros(n_waves)\n self.npts[src] = np.zeros(n_waves)\n\n # only compute the extinction for good, positive fluxes\n print(comp.data[src].npts)\n print(comp.data[src].fluxes)\n indxs, = np.where((red.data[src].npts > 0)\n & (comp.data[src].npts > 0)\n & (red.data[src].fluxes.value > 0)\n & (comp.data[src].fluxes.value > 0))\n self.exts[src][indxs] = \\\n (-2.5*np.log10(red.data[src].fluxes[indxs]\n / comp.data[src].fluxes[indxs])\n + (comp_V[0] - red_V[0]))\n self.uncs[src][indxs] = np.sqrt(\n np.square(_flux_unc_as_mags(red.data[src].fluxes[indxs],\n red.data[src].uncs[indxs]))\n + np.square(_flux_unc_as_mags(comp.data[src].fluxes[indxs],\n comp.data[src].uncs[indxs]))\n + np.square(red_V[1])\n + np.square(comp_V[1]))\n self.npts[src][indxs] = np.full(len(indxs), 1)",
"def mu_law_decode(output, quantization_channels=Channels):\n mu = float(quantization_channels - 1)\n\n expanded = (output / quantization_channels) * 2. - 1\n waveform = np.sign(expanded) * (\n np.exp(np.abs(expanded) * np.log(mu + 1)) - 1\n ) / mu\n\n return waveform",
"def constant_power_fade_envelopes(duration:float,\n sampleRate:int,\n t0:float,\n t1:float,) -> np.ndarray:\n y = np.ones(int(duration*sampleRate))\n y[:int(t0*sampleRate)] = 1/t0*np.linspace(0, t0, int(t0*sampleRate))\n y[-int(t1*sampleRate):] = 1 - 1/t1*np.linspace(0, t1, int(t1*sampleRate))\n\n even = 0.5*(np.sqrt(0.5+0.5*y) + np.sqrt(0.5-0.5*y))\n odd = 0.5*(np.sqrt(0.5+0.5*y) +- np.sqrt(0.5-0.5*y))\n\n return even + odd, even-odd",
"def calc_emission(self, emission, fingers):\n return math.sqrt(sum(item*item for item in emission[:-1])) / emission[-1]",
"def make_artificial_spectra(line_table, wavelength_range = None, dw = 0.1,\n sigma=1e-2, wavelength_unit=u.angstrom, flux_unit=u.electron):\n if wavelength_range is None:\n wavelength_range=[line_table['wavelength'].min(), line_table['wavelength'].max()]\n\n warr = np.arange(wavelength_range[0], wavelength_range[1], dw)\n\n flux = np.zeros(len(warr), dtype=float)\n for w, f in line_table:\n i = abs(warr - w).argmin()\n flux[i] = f\n\n if wavelength_unit is None:\n wavelength\n\n spec = Spectrum1D(spectral_axis=warr*wavelength_unit, flux = flux * flux_unit)\n spec = gaussian_smooth(spec, stddev=sigma)\n\n return spec",
"def spectral_model(self):\n pars, errs = {}, {}\n pars['amplitude'] = self.data['Flux']\n pars['emin'], pars['emax'] = self.energy_range\n pars['index'] = self.data['Spectral_Index'] * u.dimensionless_unscaled\n errs['amplitude'] = self.data['Unc_Flux']\n errs['index'] = self.data['Unc_Spectral_Index'] * u.dimensionless_unscaled\n model = PowerLaw2(**pars)\n model.parameters.set_parameter_errors(errs)\n return model",
"def spectral_model(self):\n pars, errs = {}, {}\n pars['amplitude'] = self.data['Flux50']\n pars['emin'], pars['emax'] = self.energy_range\n pars['index'] = self.data['Spectral_Index'] * u.dimensionless_unscaled\n\n errs['amplitude'] = self.data['Unc_Flux50']\n errs['index'] = self.data['Unc_Spectral_Index'] * u.dimensionless_unscaled\n\n model = PowerLaw2(**pars)\n model.parameters.set_parameter_errors(errs)\n return model",
"def get_spectral_response(wavelengths_arr, stack):\n\n resolution = 1\n for i, re_index in enumerate(stack.index):\n step_size = stack.thickness.sum() / 2 ** 17\n z0 = np.linspace(0, stack.thickness[i], round(stack.thickness[i] / step_size))\n resolution += len(z0)\n\n electric_tot_te = np.zeros([resolution, len(wavelengths_arr)], dtype=complex)\n electric_tot_tm = np.zeros([resolution, len(wavelengths_arr)], dtype=complex)\n reflectivity_te = np.zeros(len(wavelengths_arr), dtype=complex)\n reflectivity_tm = np.zeros(len(wavelengths_arr), dtype=complex)\n transmission_te = np.zeros(len(wavelengths_arr), dtype=complex)\n transmission_tm = np.zeros(len(wavelengths_arr), dtype=complex)\n index_tot = np.zeros([resolution, len(wavelengths_arr)], dtype=complex)\n theta_tot = np.zeros([len(stack.index) + 1, wavelengths_arr.size], dtype=complex)\n\n a0 = 1 # Initial amplitude of electric field going toward the coating\n b0 = 0 # Initial amplitude of electric field going back the coating (if 0, no counter propagating light)\n theta = 0 # angle of the beam with respect to the coating\n\n for i, lam in enumerate(wavelengths_arr):\n # print a progressbar in the console\n print_progressbar(i, len(wavelengths_arr), suffix = '%')\n electric_tot_te[:, i], electric_tot_tm[:, i], reflectivity_te[i], reflectivity_tm[i], transmission_te[i], \\\n transmission_tm[i], index_tot, L, theta_tot = transfer_matrix_method(stack, a0, b0, lam, theta)\n return reflectivity_te, transmission_te, 1 - (reflectivity_te + transmission_te)"
]
| [
"0.57497835",
"0.5429985",
"0.53425705",
"0.52842164",
"0.52446645",
"0.5162241",
"0.50572604",
"0.5036854",
"0.5012343",
"0.49678618",
"0.4942516",
"0.49243805",
"0.49133074",
"0.49097195",
"0.4870502",
"0.48516658",
"0.48274308",
"0.48018676",
"0.47904533",
"0.47681382",
"0.47626024",
"0.47570962",
"0.4750904",
"0.4749918",
"0.47402754",
"0.4737996",
"0.47278953",
"0.4715592",
"0.47007656",
"0.4678892"
]
| 0.6854461 | 0 |
Update the parameters, recording whether it was new for the ssp or basis parameters. If either of those changed, regenerate the relevant spectral grid(s). | def update(self, newparams):
for k, v in list(newparams.items()):
if k in self.basis_params:
# Make sure parameter is in dict, and check if it changed
if k not in self.params:
self.basis_dirty = True
self.params[k] = v
if np.any(v != self.params.get(k)):
self.basis_dirty = True
else:
try:
# here the sps.params.dirtiness should increase to 2 if
# there was a change
self.ssp.params[k] = v[0]
except KeyError:
pass
# now update params
self.params[k] = np.copy(np.atleast_1d(v))
# if we changed only csp_params but are relying on COMPSP, make
# sure we remake the basis
if self.safe and (self.ssp.params.dirtiness == 1):
self.basis_dirty = True
# if we changed only csp_params propagate them through but don't
# force basis remake (unless basis_dirty)
if self.ssp.params.dirtiness == 1:
self.ssp._update_params()
if self.basis_dirty | (self.ssp.params.dirtiness == 2):
self.build_basis() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_parameters(self):\n # We update gamma, gamma0, lambda and nu in turn (Bottolo et al, 2011)\n self.update_gamma()\n self.update_gamma0()\n self.update_lambda()\n self.update_nu()\n if self.sample_xi:\n self.update_xi()",
"def update_parameters(self):\n self.alignment_factor = rospy.get_param('/dyn_reconf/alignment_factor')\n self.cohesion_factor = rospy.get_param('/dyn_reconf/cohesion_factor')\n self.separation_factor = rospy.get_param('/dyn_reconf/separation_factor')\n self.avoid_factor = rospy.get_param('/dyn_reconf/avoid_factor')\n self.max_speed = rospy.get_param('/dyn_reconf/max_speed')\n self.max_force = rospy.get_param('/dyn_reconf/max_force')\n self.friction = rospy.get_param('/dyn_reconf/friction')\n self.crowd_radius = rospy.get_param('/dyn_reconf/crowd_radius')\n self.search_radius = rospy.get_param('/dyn_reconf/search_radius')\n\n rospy.loginfo(rospy.get_caller_id() + \" -> Parameters updated\")\n if DEBUG:\n print('alignment_factor: ', self.alignment_factor)\n print('cohesion_factor: ', self.cohesion_factor)\n print('separation_factor: ', self.separation_factor)\n print('avoid_factor: ', self.avoid_factor)\n print('max_speed: ', self.max_speed)\n print('max_force: ', self.max_force)\n print('friction: ', self.friction)\n print('crowd_radius: ', self.crowd_radius)\n print('search_radius: ', self.search_radius)",
"def _update(self):\n self.all_params = {}\n self._update_experiment_params()\n self._update_preprocessing_params()\n self._update_model_params()",
"def update(self, parameters):\n self.set_frequencies(parameters) # f_i\n self.set_coupling_weights(parameters) # w_ij\n self.set_phase_bias(parameters) # theta_i\n self.set_amplitudes_rate(parameters) # a_i\n self.set_nominal_amplitudes(parameters) # R_i",
"def _update_parameter(self, dWxh, dbh, dWhy, dby):\n # Add code to update all the weights and biases here",
"def _update_params(self, new_params):\n # update all the parameters\n for old_param, new_param in zip(self.model.parameters(), new_params):\n old_param.data += torch.from_numpy(new_param).to(old_param.device)",
"def update_parameters(parameters, grads, learning_rate):\n pass",
"def updateParameters(self, parameters):",
"def update_params(self):\n pass",
"def _update_params(self):\n with self.sphere.sphere_lock:\n self._args_to_params(self.sphere.bai_1d_args, self.bai_1d_pars)\n self._args_to_params(self.sphere.bai_2d_args, self.bai_2d_pars)\n #self._args_to_params(self.sphere.mg_args, self.mg_pars)",
"def updateParameters(self):\n\n return",
"def update_params(self, learning_rate):\n\t\t#######################################################################\n\t\t# ** START OF YOUR CODE **\n\t\t#######################################################################\n\t\tself._W = self._W - learning_rate * self._grad_W_current\n\t\tself._b = self._b - learning_rate * self._grad_b_current\n\t\t#######################################################################\n\t\t# ** END OF YOUR CODE **\n\t\t#######################################################################",
"def updateParameters(self, parameters):\r\n return",
"def updateParameters(self, parameters):\r\n return",
"def updateParameters(self, parameters):\r\n return",
"def updateParameters(self, parameters):\r\n return",
"def updateParameters(self, parameters):\r\n return",
"def updateParameters(self, parameters):\r\n return",
"def updateParameters(self, parameters):\r\n return",
"def updateParameters(self, parameters):\r\n return",
"def _update_params(self):\n pass",
"def _update_parameters(self, delta):\n if delta is not None:\n self.SGD.update_with_L1_regularization(self.variables, delta, self.L1)",
"def updateParameters(self, parameters):\r\n #return\r\n parameters[2].enabled = 0\r\n parameters[3].enabled = 0\r\n parameters[4].enabled = 0",
"def updateParameters(self, parameters):\r\n #return\r\n parameters[2].enabled = 0\r\n parameters[3].enabled = 0\r\n parameters[4].enabled = 0",
"def updateParameters(self, parameters):\n\t\treturn",
"def updateParameters(self, parameters):\r\n if parameters[0].altered or parameters[1].altered:\r\n in_layer_value = parameters[0].valueAsText\r\n in_spacing_value = parameters[1].valueAsText\r\n if in_layer_value is not None and in_spacing_value is not None:\r\n parameters[5].value = in_layer_value + \"_densified_\" + str(int(in_spacing_value)) + \"m\"\r\n \r\n if parameters[2].altered:\r\n with arcpy.da.SearchCursor(parameters[0].valueAsText, parameters[2].valueAsText) as g_rows:\r\n parameters[3].filter.list = sorted(list(set([row[0] for row in g_rows])))\r\n with arcpy.da.SearchCursor(parameters[0].valueAsText, parameters[2].valueAsText) as l_rows:\r\n parameters[4].filter.list = sorted(list(set([row[0] for row in l_rows])))\r\n return",
"def updateParameters(self, parameters):\n return",
"def updateParameters(self, parameters):\n return",
"def updateParameters(self, parameters):\n return",
"def updateParameters(self, parameters):\n return"
]
| [
"0.73953754",
"0.71710765",
"0.7023726",
"0.69720674",
"0.68460697",
"0.66629374",
"0.6656813",
"0.6526564",
"0.647887",
"0.6461412",
"0.6455409",
"0.638619",
"0.6373815",
"0.6373815",
"0.6373815",
"0.6373815",
"0.6373815",
"0.6373815",
"0.6373815",
"0.6373815",
"0.63724744",
"0.6355871",
"0.6348777",
"0.6348777",
"0.6288974",
"0.6251173",
"0.62120134",
"0.62120134",
"0.62120134",
"0.62120134"
]
| 0.74350566 | 0 |
Rebuild the component spectra from the SSPs. The component spectra include dust attenuation, redshifting, and spectral regridding. This is basically a proxy for COMPSP from FSPS, with a few small differences. In particular, there is interpolation in metallicity and the redshift and the output wavelength grid are taken into account. The dust treatment is less sophisticated. The assumption is that the basis is a N_z by N_age (by N_wave) array where the z values and age values are given by vectors located in params['tage'] and params['zmet'] This method is only called by self.update if necessary. | def build_basis(self):
if self.debug:
print('sps_basis: rebuilding basis')
# Setup the internal component basis arrays
inwave = self.ssp.wavelengths
nbasis = len(np.atleast_1d(self.params['mass']))
self.nbasis = nbasis
# nbasis = ( len(np.atleast_1d(self.params['zmet'])) *
# len(np.atleast_1d(self.params['tage'])) )
self.basis_spec = np.zeros([nbasis, len(inwave)])
self.basis_mass = np.zeros(nbasis)
i = 0
tesc = self.params['dust_tesc']
dust1, dust2 = self.params['dust1'], self.params['dust2']
for j, zmet in enumerate(self.params['zmet']):
for k, tage in enumerate(self.params['tage']):
# get the intrinsic spectrum at this metallicity and age
if self.safe:
# do it using compsp
if self.ssp._zcontinuous > 0:
self.ssp.params['logzsol'] = zmet
else:
self.ssp.params['zmet'] = zmet
w, spec = self.ssp.get_spectrum(tage=tage, peraa=True)
mass = self.ssp.stellar_mass
else:
# do it by hand. Faster but dangerous
spec, mass, lbol = self.ssp.ztinterp(zmet, tage, peraa=True)
self.basis_spec[i, :] = spec
self.basis_mass[i] = mass
i += 1
self.basis_dirty = False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def process_component(self, i, outwave, filters):\n cspec = self.basis_spec[i, :].copy()\n cphot = 0\n inwave = self.ssp.wavelengths\n\n if self.safe:\n cspec = np.interp(self.params['outwave'], vac2air(inwave), cspec/a)\n cphot = 10**(-0.4 * getSED(inwave, cspec/a, filters))\n return cspec, cphot\n\n # Dust attenuation\n tage = self.params['tage'][i]\n tesc = self.params.get('dust_tesc', 0.01)\n dust1 = self.params.get('dust1', 0.0)\n dust2 = self.params['dust2']\n a = (1 + self.params.get('zred', 0.0))\n dust = (tage < tesc) * dust1 + dust2\n att = self.params['dust_curve'][0](inwave, **self.params)\n cspec *= np.exp(-att*dust)\n\n if filters is not None:\n cphot = 10**(-0.4 * getSED(inwave*a, cspec / a, filters))\n\n # Wavelength scale. Broadening and redshifting and placing on output\n # wavelength grid\n if self.params.get('lsf', [None])[0] is not None:\n cspec = smoothspec(vac2air(inwave) * a, cspec / a,\n self.params['sigma_smooth'], **self.params)\n else:\n sigma = self.params.get('sigma_smooth', 0.0)\n cspec = self.ssp.smoothspec(inwave, cspec, sigma)\n cspec = np.interp(self.params['outwave'], vac2air(inwave * a), cspec/a)\n\n return cspec, cphot",
"def _update_raw_data(params):\n from scipy.signal import filtfilt\n start = params['t_start']\n stop = params['raw'].time_as_index(start + params['duration'])[0]\n start = params['raw'].time_as_index(start)[0]\n data_picks = _pick_data_channels(params['raw'].info)\n data, times = params['raw'][:, start:stop]\n if params['projector'] is not None:\n data = np.dot(params['projector'], data)\n # remove DC\n if params['remove_dc'] is True:\n data -= np.mean(data, axis=1)[:, np.newaxis]\n if params['ba'] is not None:\n data[data_picks] = filtfilt(params['ba'][0], params['ba'][1],\n data[data_picks], axis=1, padlen=0)\n # scale\n for di in range(data.shape[0]):\n data[di] /= params['scalings'][params['types'][di]]\n # stim channels should be hard limited\n if params['types'][di] == 'stim':\n norm = float(max(data[di]))\n data[di] /= norm if norm > 0 else 1.\n # clip\n if params['clipping'] == 'transparent':\n data[np.logical_or(data > 1, data < -1)] = np.nan\n elif params['clipping'] == 'clamp':\n data = np.clip(data, -1, 1, data)\n params['data'] = data\n params['times'] = times",
"def get_spectrum(self, outwave=None, filters=None, peraa=False, **params):\n self.params.update(**params)\n # Pass the model parameters through to the sps object\n ncomp = len(self.params['mass'])\n for ic in range(ncomp):\n s, p, x = self.one_sed(component_index=ic, filterlist=filters)\n try:\n spec += s\n maggies += p\n extra += [x]\n except(NameError):\n spec, maggies, extra = s, p, [x]\n # `spec` is now in Lsun/Hz, with the wavelength array being the\n # observed frame wavelengths. Flux array (and maggies) have not been\n # increased by (1+z) due to cosmological redshift\n\n if outwave is not None:\n w = self.csp.wavelengths\n spec = np.interp(outwave, w, spec)\n # Distance dimming and unit conversion\n if (self.params['zred'] == 0) or ('lumdist' in self.params):\n # Use 10pc for the luminosity distance (or a number provided in the\n # lumdist key in units of Mpc). Do not apply cosmological (1+z)\n # factor to the flux.\n dfactor = (self.params.get('lumdist', 1e-5) * 1e5)**2\n a = 1.0\n else:\n # Use the comsological luminosity distance implied by this\n # redshift. Incorporate cosmological (1+z) factor on the flux.\n lumdist = cosmo.luminosity_distance(self.params['zred']).value\n dfactor = (lumdist * 1e5)**2\n a = (1 + self.params['zred'])\n if peraa:\n # spectrum will be in erg/s/cm^2/AA\n spec *= to_cgs * a / dfactor * lightspeed / outwave**2\n else:\n # Spectrum will be in maggies\n spec *= to_cgs * a / dfactor / 1e3 / (3631*jansky_mks)\n\n # Convert from absolute maggies to apparent maggies\n maggies *= a / dfactor\n \n return spec, maggies, extra",
"def spMultiIndex(self):\n # reset column levels\n self.spfiltEEG.columns = pd.MultiIndex.from_arrays([self.channels, np.repeat(('Filtered'), len(self.channels))],names=['Channel','datatype'])\n self.spRMS.columns = pd.MultiIndex.from_arrays([self.channels, np.repeat(('RMS'), len(self.channels))],names=['Channel','datatype'])\n self.spRMSmavg.columns = pd.MultiIndex.from_arrays([self.channels, np.repeat(('RMSmavg'), len(self.channels))],names=['Channel','datatype'])\n\n # list df vars for index specs\n dfs =[self.spfiltEEG, self.spRMS, self.spRMSmavg] # for > speed, don't store spinfilt_RMS as an attribute\n calcs = ['Filtered', 'RMS', 'RMSmavg']\n lvl0 = np.repeat(self.channels, len(calcs))\n lvl1 = calcs*len(self.channels) \n \n # combine & custom sort\n self.spindle_calcs = pd.concat(dfs, axis=1).reindex(columns=[lvl0, lvl1])",
"def joss_ssr(ppg, accel_x, accel_y, accel_z, eng):\n\n freq = ppg.frequency\n\n N = 60 * freq # Resolution is 1 BPM\n\n Y = np.array((ppg.values, accel_x.values, accel_y.values, \n accel_z.values))\n\n Y = np.transpose(Y)\n\n L = Y.shape[1]\n\n spectra = ssr(Y, freq, N, eng)\n \n for i in range(0, L):\n spectra_i = spectra[:,i]\n\n spectra_i[:20] = 0\n spectra_i[220:] = 0\n\n spectra_i = spectra_i / np.max(spectra_i)\n spectra[:,i] = spectra_i\n\n\n # BPM axis\n bpm = 60 * freq / N * np.arange(N)\n\n aggression = 0.99\n accel_max = np.zeros((N))\n signal_ssr = spectra[:,0]\n\n # Modify the SSR signal by subtracting the maximum acceleration in each bin\n for i in range(0, bpm.size):\n # Max of acceleration at this frequency\n accel_max[i] = np.max([spectra[i,1], spectra[i,2], spectra[i,3]])\n\n signal_ssr[i] = signal_ssr[i] - aggression * accel_max[i]\n\n if DEBUG:\n plt.subplot(221)\n plt.title(\"x\")\n plt.plot(spectra[:,1])\n plt.subplot(222)\n plt.title(\"y\")\n plt.plot(spectra[:,2])\n plt.subplot(223)\n plt.title(\"z\")\n plt.plot(spectra[:,3])\n\n\n # Set all SSR bins lower than the maximum divided by 5 to 0\n max_bin = np.max(signal_ssr)\n signal_ssr[signal_ssr < max_bin / 4] = 0\n\n return signal_ssr, accel_max",
"def reduc(self,zarange=[20,50]):\n \n # First, take out a secular gain drift for each constant elevation\n # stare. Fit P(t) to each channel in a contiguous elevation stare,\n # normalize fit to mean=1, and normalize each chan to this.\n #deg=10\n #self.removedrift(deg)\n\n # Convert P-> T RJ\n #self.P2T()\n\n # Now fit a line to P(am) in each scan and store the results.\n self.fitam(zarange)",
"def soMultiIndex(self):\n # reset column levels\n self.sofiltEEG.columns = pd.MultiIndex.from_arrays([self.channels, np.repeat(('Filtered'), len(self.channels))],names=['Channel','datatype'])\n self.spsofiltEEG.columns = pd.MultiIndex.from_arrays([self.channels, np.repeat(('Filtered'), len(self.channels))],names=['Channel','datatype'])\n\n # list df vars for index specs\n # dfs =[self.sofiltEEG] # for > speed, don't store spinfilt_RMS as an attribute\n # calcs = ['Filtered']\n # lvl0 = np.repeat(self.channels, len(calcs))\n # lvl1 = calcs*len(self.channels) \n \n # # combine & custom sort --> what was this for??\n # self.so_calcs = pd.concat(dfs, axis=1).reindex(columns=[lvl0, lvl1])",
"def one_sed(self, component_index=0, filterlist=[]):\n # Pass the model parameters through to the sps object, and keep track\n # of the mass of this component\n mass = 1.0\n for k, vs in list(self.params.items()):\n try:\n v = vs[component_index]\n except(IndexError, TypeError):\n v = vs\n if k in self.csp.params.all_params:\n if k == 'zmet':\n vv = np.abs(v - (np.arange(len(self.csp.zlegend)) + 1)).argmin() + 1\n else:\n vv = v.copy()\n self.csp.params[k] = vv\n if k == 'mass':\n mass = v\n # Now get the magnitudes and spectrum. The spectrum is in units of\n # Lsun/Hz/per solar mass *formed*\n w, spec = self.csp.get_spectrum(tage=self.csp.params['tage'], peraa=False)\n mags = getSED(w, lightspeed/w**2 * spec * to_cgs, filterlist)\n mfrac = self.csp.stellar_mass\n if np.all(self.params.get('mass_units', 'mstar') == 'mstar'):\n # Convert normalization units from per stellar masss to per mass formed\n mass /= mfrac\n # Output correct units\n return mass * spec, mass * 10**(-0.4*(mags)), mfrac",
"def combine(self,data):\n channels = []\n #if type(data.data) == pandas.core.frame.DataFrame:\n # spike_train_all_trial_ = np.array(data.data.stack())\n # spike_train_all_trial = np.reshape(spike_train_all_trial_,(spike_train_all_trial_.shape[0],1))\n #else:\n # spike_train_all_trial_ = np.array(data.data)\n # spike_train_all_trial = np.reshape(spike_train_all_trial_,(spike_train_all_trial_.shape[0],1))\n for c in range(data.nr_cells):\n channels.append(data.cell(c).getFlattend())\n\n d = DesignMatrix(channels[0].shape[0], self.component_width)\n d.setMask(self.mask)\n for c in self.components:\n d.add(c.getSplines(channels),c.header)\n #if self.mask.shape[0] == d.matrix.shape[1] + 1:\n # d.matrix = d.matrix[:,~self.mask[:-1]]\n self._header = d.header\n #print \"--\"\n #print d.matrix.shapeload\n return d.matrix",
"def get_components(self, outwave, filters, **params):\n if outwave is not None:\n params['outwave'] = outwave\n # This will rebuild the basis if relevant parameters changed\n self.update(params)\n\n # distance dimming and conversion from Lsun/AA to cgs\n dist10 = self.params.get('lumdist', 1e-5)/1e-5 # distance in units of 10s of pcs\n dfactor = to_cgs / dist10**2\n\n nebspec = self.nebular(params, self.params['outwave']) * dfactor\n cspec = np.empty([self.nbasis, len(outwave)])\n cphot = np.empty([self.nbasis, np.size(filters)])\n for i in range(self.nbasis):\n cspec[i,:], cphot[i,:] = self.process_component(i, outwave, filters)\n\n return cspec * dfactor, nebspec, cphot * dfactor, self.basis_mass",
"def combine_components(self):\n\n # ensure is array with correct shape\n if self.separated_components_ft.shape[1] != 3:\n raise ValueError(\"Expected shifted_components_ft to have shape (nangles, 3, ny, nx), where components are\"\n \"O(f)*otf(f), O(f-fo)*otf(f), O(f+fo)*otf(f). But size of second dimension was not 3.\")\n\n # upsample image before shifting\n f_upsample = 2\n otf_us = tools.expand_fourier_sp(self.otf, mx=f_upsample, my=f_upsample, centered=True)\n\n # upsampled frequency data\n fx_us = tools.get_fft_frqs(f_upsample * self.nx, self.dx / f_upsample)\n dfx_us = fx_us[1] - fx_us[0]\n fy_us = tools.get_fft_frqs(f_upsample * self.ny, self.dx / f_upsample)\n dfy_us = fy_us[1] - fy_us[0]\n\n def ff_shift(f): return np.sqrt((self.fx[None, :] - f[0]) ** 2 + (self.fy[:, None] - f[1]) ** 2)\n def ff_shift_upsample(f): return np.sqrt((fx_us[None, :] - f[0]) ** 2 + (fy_us[:, None] - f[1]) ** 2)\n\n # wiener filter deconvolution to divide by H(k)\n # components_deconvolved_ft = np.zeros((self.nangles, 3, self.ny, self.nx), dtype=np.complex)\n components_deconvolved_ft = np.zeros((self.nangles, 3, self.ny * f_upsample, self.nx * f_upsample), dtype=np.complex)\n snr = np.zeros((self.nangles, 3, self.ny, self.nx))\n # shift to correct place in frq space\n components_shifted_ft = np.zeros((self.nangles, 3, self.ny * f_upsample, self.nx * f_upsample), dtype=np.complex)\n snr_shifted = np.zeros(components_shifted_ft.shape)\n # weight and average\n components_weighted = np.zeros(components_shifted_ft.shape, dtype=np.complex)\n weights = np.zeros(components_weighted.shape)\n\n # shift and filter components\n for ii in range(self.nangles):\n # loop over components, O(f)H(f), m*O(f - f_o)H(f), m*O(f + f_o)H(f)\n for jj, eps in enumerate([0, 1, -1]):\n params = list(self.power_spectrum_params[ii, jj, :-1]) + [0]\n snr[ii, jj] = power_spectrum_fn(params, ff_shift(self.frqs[ii] * eps), 1) / self.noise_power[ii, jj]\n\n # get shifted SNR\n otf_shifted, _, _ = tools.translate_pix(otf_us, eps*self.frqs[ii], dx=dfx_us, dy=dfy_us, mode='no-wrap')\n snr_shifted[ii, jj] = power_spectrum_fn(params, ff_shift_upsample((0, 0)), 1) / self.noise_power[ii, jj]\n\n # weights\n weights[ii, jj] = get_snr_weight(otf_shifted, snr_shifted[ii, jj])\n\n if True:\n # deconvolve, then shift\n\n # deconvolve\n deconv_temp, _ = \\\n wiener_deconvolution(self.separated_components_ft[ii, jj] / self.mod_depths[ii, jj], self.otf,\n snr[ii, jj])\n\n components_deconvolved_ft[ii, jj] = tools.expand_fourier_sp(deconv_temp, mx=f_upsample, my=f_upsample, centered=True)\n\n # shift and expand\n components_shifted_ft[ii, jj] = tools.translate_ft(components_deconvolved_ft[ii, jj], eps * self.frqs[ii], self.dx / f_upsample)\n else:\n # shift then deconvolve\n\n # shift and expand\n components_shifted_ft[ii, jj] = tools.translate_ft(\n tools.expand_fourier_sp(self.separated_components_ft[ii, jj], mx=f_upsample, my=f_upsample, centered=True),\n eps * self.frqs[ii], self.dc / f_upsample)\n\n # deconvolved\n components_shifted_ft[ii, jj], _ = \\\n wiener_deconvolution(components_shifted_ft[ii, jj] / self.mod_depths[ii, jj], otf_shifted,\n snr_shifted[ii, jj])\n\n # optionally remove frequency data around modulation frequency, to avoid artifacts\n if self.size_near_fo_to_remove != 0:\n to_remove = np.abs(ff_shift_upsample(-self.frqs[ii])) < self.size_near_fo_to_remove * np.linalg.norm(self.frqs[ii])\n components_shifted_ft[ii, jj][to_remove] = 0\n\n to_remove = np.abs(ff_shift_upsample(self.frqs[ii])) < self.size_near_fo_to_remove * np.linalg.norm(self.frqs[ii])\n components_shifted_ft[ii, jj][to_remove] = 0\n\n # correct for wrong global phases (on shifted components before weighting,\n # but then apply to weighted components)\n if self.global_phase_correction:\n self.phase_corrections = global_phase_correction(components_shifted_ft)\n else:\n self.phase_corrections = np.zeros(self.nangles)\n\n # combine components\n components_weighted = components_shifted_ft * weights\n for ii in range(self.nangles):\n components_weighted[ii, 1] = np.exp(1j * self.phase_corrections[ii]) * components_weighted[ii, 1]\n components_weighted[ii, 2] = np.exp(-1j * self.phase_corrections[ii]) * components_weighted[ii, 2]\n\n # final averaging\n weight_norm = np.sum(weights, axis=(0, 1)) + self.wiener_parameter\n sim_sr_ft = np.nansum(components_weighted, axis=(0, 1)) / weight_norm\n\n # Fourier transform back to get real-space reconstructed image\n apod = scipy.signal.windows.tukey(sim_sr_ft.shape[1], alpha=0.1)[None, :] * \\\n scipy.signal.windows.tukey(sim_sr_ft.shape[0], alpha=0.1)[:, None]\n\n sim_sr = fft.fftshift(fft.ifft2(fft.ifftshift(sim_sr_ft * apod))).real\n\n return sim_sr, sim_sr_ft, components_deconvolved_ft, components_shifted_ft,\\\n weights, weight_norm, snr, snr_shifted",
"def repackage_coeff(boss_pca_fil=None, sdss_pca_fil=None,\n outfil='qso_templates_v2.0.fits'):\n # PCA values\n if boss_pca_fil is None:\n boss_pca_fil = 'BOSS_DR10Lya_PCA_values_nocut.fits.gz'\n hdu = fits.open(boss_pca_fil)\n boss_pca_coeff = hdu[1].data\n\n if sdss_pca_fil is None:\n sdss_pca_fil = 'SDSS_DR7Lya_PCA_values_nocut.fits.gz'\n hdu2 = fits.open(sdss_pca_fil)\n sdss_pca_coeff = hdu2[1].data\n\n # Redshifts\n boss_cat_fil = os.environ.get('BOSSPATH')+'/DR10/BOSSLyaDR10_cat_v2.1.fits.gz'\n bcat_hdu = fits.open(boss_cat_fil)\n t_boss = bcat_hdu[1].data\n boss_zQSO = np.array(t_boss['z_pipe'])\n\n # Open the SDSS catalog file\n sdss_cat_fil = os.environ.get('SDSSPATH')+'/DR7_QSO/dr7_qso.fits.gz'\n scat_hdu = fits.open(sdss_cat_fil)\n t_sdss = scat_hdu[1].data\n sdss_zQSO = t_sdss['z']\n if len(sdss_pca_coeff) != len(sdss_zQSO):\n print('Need to finish running the SDSS models!')\n sdss_zQSO = sdss_zQSO[0:len(sdss_pca_coeff)]\n\n # Eigen vectors\n eigen, eigen_wave = fbq.read_qso_eigen()\n\n # Write\n phdu = fits.PrimaryHDU()\n bp_hdu = fits.BinTableHDU(boss_pca_coeff)\n bp_hdu.name = 'BOSS_PCA'\n bz_hdu = fits.ImageHDU(boss_zQSO)\n bz_hdu.name = 'BOSS_z'\n sp_hdu = fits.BinTableHDU(sdss_pca_coeff)\n sp_hdu.name = 'SDSS_PCA'\n sz_hdu = fits.ImageHDU(sdss_zQSO)\n sz_hdu.name = 'SDSS_z'\n e_hdu = fits.ImageHDU(eigen)\n e_hdu.name = 'SDSS_EIGEN'\n ew_hdu = fits.ImageHDU(eigen_wave)\n ew_hdu.name = 'SDSS_EIGEN_WAVE'\n\n hdulist = fits.HDUList([phdu, bp_hdu, bz_hdu, sp_hdu, sz_hdu,\n e_hdu, ew_hdu])\n hdulist.writeto(outfil, overwrite=True)\n print('Wrote {:s}'.format(outfil))",
"def createIntegratedPsf(self):\n\n (wavelengths, weights) = self.filter\n for i in range(len(wavelengths)):\n\n wavelength = wavelengths[i]\n weight = weights[i]\n self.convertToOpd(wavelength) # creates self.opd\n opd = self.embedOpd()\n zf = numpy.fft.fft2(opd)\n del opd\n # Compute the amplitude squared.\n # (psf is not really the point spread function yet)\n psf = np.conjugate(zf)\n # psf will now be the point spread function, but still complex\n np.multiply(psf, zf, psf)\n del zf\n # normalize the PSF, and convert to single precision\n psf = psf.real / psf.size\n psf = psf.astype(np.float32)\n\n self.center(psf)\n\n # This describes the image scale if no resampling is done.\n cdelt_before_resampling = (wavelength * MICRONStoMETERS) / \\\n (self.D * self.oversample) * RADIANStoDEGREES\n if self.pixel_size is None:\n # we won't resample the output image\n self.cdelt = cdelt_before_resampling\n # Extract a subset.\n if self.output_size < self.npix:\n o_npix = self.output_size\n n0 = (self.npix - o_npix) // 2\n self.integrated_psf += \\\n (psf[n0:n0 + o_npix, n0:n0 + o_npix] * weight)\n else:\n self.integrated_psf += (psf * weight)\n else:\n # we'll resample to this image scale\n self.cdelt = self.pixel_size / self.oversample * ARCSECtoDEGREES\n # These three parameters are only used by mapPsf and for\n # normalizing the weight after resampling.\n self.rescale = self.cdelt / cdelt_before_resampling\n self.input_center = (self.npix + 1) // 2\n self.output_center = (self.output_size + 1) // 2\n sub_psf = np.zeros((self.output_size, self.output_size),\n dtype=np.float32)\n # Do the resampling, writing the output to sub_psf.\n ndimage.geometric_transform(psf, self.mapPsf,\n output_shape=(self.output_size, self.output_size),\n output=sub_psf, prefilter=True)\n weight = weight * self.rescale**2\n self.integrated_psf += (sub_psf * weight)\n del sub_psf\n\n if self.verbose:\n print(\"PSF for wavelength %g has been computed\" % wavelength)",
"def _ssc(pars, nu):\n\t(log10_gamma_max, redshift, delta, log10_R, log10_B, log10_Norm, index, log10_gamma_c) = pars\n\n\t# define from the input parameters the dictionary to be feeded to the model\n\t# we neglect the time-dependent part for now\n\ttime_grid = dict(time_min=0, time_max=3, time_bins=50, time_inj=2)\n\t# gamma grid\n\tgamma_grid = dict(log10_gamma_min=2, log10_gamma_max=log10_gamma_max, gamma_bins=50)\n\t# emission region, again time dependent part is ignored\n\temission_region = dict(log10_R=log10_R, R_unit='cm', delta=delta,\n\t\t\t\t\t\t log10_B=log10_B, B_unit='G', t_esc=1.5, z=redshift)\n\t# injected spectrum\n\tinjected_spectrum = dict(type='ExponentialCutoffPowerLaw',\n\t\t\t\t\t\t\t log10_Norm=log10_Norm,\n\t\t\t\t\t\t\t Norm_unit='cm-3',\n\t\t\t\t\t\t\t index=index,\n\t\t\t\t\t\t\t log10_gamma_c=log10_gamma_c)\n\n\t# dump into a tmp yaml file\n\twith open('tmp_config.yaml', 'w') as yaml_file:\n\t\tyaml.dump({'time_grid': time_grid,\n\t\t\t\t 'gamma_grid': gamma_grid,\n\t\t\t\t 'emission_region': emission_region,\n\t\t\t\t 'injected_spectrum': injected_spectrum},\n\t\t\t\t yaml_file, default_flow_style=False)\n\n\n\t# initialize the ssc model\n\tmodel = BaseModel('tmp_config.yaml')\n\n\t# define the base electron population for now just as the injected one\n\tgamma = model.gamma\n\tN_e = model.N_e_inj(gamma)\n\n\t# test synchrotron\n\tsyn = Synchrotron(model)\n\tic = InverseCompton(model)\n\n\tobs_nu = nu * u.Hz\n\t# de - boosting, for intrinsic values\n\tnu = obs_nu / model.blob.delta\n\n\t# transform to energy\n\tE = const.h * obs_nu\n\n\tsyn_flux = syn.flux(nu, N_e, self_absorption=True)\n\tic_flux = ic.flux(nu, N_e, ebl_absorption=True)\n\n\tsed = (E**2*(syn_flux + ic_flux)).to('erg cm-2 s-1')\n\n\treturn sed.value",
"def constructAllWRFstreamsRegridded(first=1):\n pattern.a.load()\n WRFstreams = pickle.load(open(summary_folder+'WRF/dbzstreamAll.pydump', 'r'))\n for M in range(first, len(WRFstreams)+1):\n ds = WRFstreams[M-1] # the models are labelled 1-20\n print \"\\n.......\\n\", M, ds.name \n #ds.outputFolder = summary_folder+ 'WRF[regridded]'\n #debug\n #print ds[0].outputFolder\n #print ds[0].dataPath\n print 'loading...'\n ds.load()\n # debug\n #print ds[0].matrix.shape\n #ds[0].show()\n #ds[1].show()\n #ds[4].show()\n # END debug\n ds.setOutputFolder(summary_folder+'WRF[regridded]/')\n ds.setImageFolder(summary_folder+'WRF[regridded]/')\n #print 'outputFolder:', ds.outputFolder\n print ds[0].outputFolder\n print 'imageFolder:', ds.imageFolder\n print 'regridding...'\n ds.regrid(pattern.a)\n for D in ds:\n print D.name,\n if 'regridded' not in D.name:\n D.name += '[regridded]'\n # debug\n #print D.matrix.shape\n #D.show()\n # END debug\n print 'saving...'\n pickle.dump(ds, open(summary_folder + 'WRF[regridded]/dbzstream' + ('0'+str(M))[-2:] + '.pydump', 'w'))\n return WRFstreams",
"def calc_spindle_psd_concat(self, psd_bandwidth):\n \n print('Calculating power spectra (this may take a few minutes)...')\n self.metadata['spindle_analysis']['psd_dtype'] = 'raw_concat'\n self.metadata['spindle_analysis']['psd_method'] = 'multitaper'\n self.metadata['spindle_analysis']['psd_bandwidth'] = psd_bandwidth\n sf = self.metadata['analysis_info']['s_freq']\n \n spindle_psd = {}\n spindle_multitaper_calcs = pd.DataFrame(index=['data_len', 'N', 'W', 'NW', 'K'])\n for chan in self.spindles:\n #print(f'Calculating spectra for {chan}...')\n if len(self.spindles[chan]) > 0:\n # concatenate spindles\n spindles = [self.spindles[chan][x].Raw.values for x in self.spindles[chan]]\n data = np.concatenate(spindles)\n \n # record PS params [K = 2NW-1]\n N = len(data)/sf\n W = psd_bandwidth\n K = int((2*N*W)-1)\n spindle_multitaper_calcs[chan] = [len(data), N, W, N*W, K] \n \n # calculate power spectrum\n pwr, freqs = psd_array_multitaper(data, sf, adaptive=True, bandwidth=psd_bandwidth, fmax=25, \n normalization='full', verbose=0)\n # convert to series & add to dict\n psd = pd.Series(pwr, index=freqs)\n spindle_psd[chan] = psd\n \n self.spindle_multitaper_calcs = spindle_multitaper_calcs\n self.spindle_psd_concat = spindle_psd\n print('Done. Spectra stored in obj.spindle_psd_concat. Calculations stored in obj.spindle_multitaper_calcs.\\n')",
"def reconstruct(self):\n self.print_tee(\"starting reconstruction\", self.log_file)\n\n # #############################################\n # estimate SIM parameters\n # #############################################\n # estimate frequencies\n tstart = time.time() # since done using joblib, process_time() is not useful...\n\n if self.use_fixed_frq:\n self.frqs = self.frqs_guess\n self.print_tee(\"using fixed frequencies\", self.log_file)\n else:\n if self.find_frq_first:\n self.frqs = self.estimate_sim_frqs(self.imgs_ft, self.imgs_ft, self.frqs_guess)\n else:\n self.print_tee(\"doing phase demixing prior to frequency finding\", self.log_file)\n self.separated_components_ft = separate_components(self.imgs_ft, self.phases_guess, np.ones((self.nangles, self.nphases)))\n imgs1 = np.expand_dims(self.separated_components_ft[:, 0], axis=1)\n imgs2 = np.expand_dims(self.separated_components_ft[:, 1], axis=1)\n self.frqs = self.estimate_sim_frqs(imgs1, imgs2, self.frqs_guess)\n\n tend = time.time()\n self.print_tee(\"fitting frequencies took %0.2fs\" % (tend - tstart), self.log_file)\n\n # estimate phases\n tstart = time.process_time()\n if self.use_fixed_phase:\n self.phases = self.phases_guess\n self.amps = np.ones((self.nangles, self.nphases))\n self.print_tee(\"Using fixed phases\", self.log_file)\n else:\n self.phases, self.amps = self.estimate_sim_phases(self.frqs, self.phases_guess)\n\n tend = time.process_time()\n self.print_tee(\"estimated %d phases in %0.2fs\" % (self.nangles * self.nphases, tend - tstart), self.log_file)\n\n # separate components\n self.separated_components_ft = separate_components(self.imgs_ft, self.phases, self.amps)\n\n # estimate modulation depths and power spectrum fit parameters\n tstart = time.process_time()\n # for the moment, need to do this feet even if have fixed mod depth, because still need the\n # power spectrum parameters\n self.mod_depths, self.power_spectrum_params, self.pspec_masks = self.estimate_mod_depths()\n\n if self.use_fixed_mod_depths:\n self.mod_depths = np.zeros((self.nangles, self.nphases))\n self.mod_depths[:, 0] = 1\n for jj in range(1, self.nphases):\n self.mod_depths[:, jj] = self.mod_depths_guess\n\n # also correct power spectrum params\n self.power_spectrum_params[:, jj, 2] = self.mod_depths[:, jj]\n\n tend = time.process_time()\n self.print_tee('estimated %d modulation depths in %0.2fs' % (self.nangles, tend - tstart), self.log_file)\n\n # #############################################\n # estimate modulation contrast to noise ratio for raw images\n # #############################################\n mcnr = np.zeros((self.nangles, self.nphases))\n for ii in range(self.nangles):\n for jj in range(self.nphases):\n mcnr[ii, jj] = get_mcnr(self.imgs_ft[ii, jj], self.frqs[ii], self.fx, self.fy, self.fmax)\n\n # if mcnr is too low (typically < 1), use guess values instead\n if self.default_to_guess_on_low_mcnr and np.min(mcnr[ii]) < self.min_mcnr and self.frqs_guess is not None:\n self.frqs[ii] = self.frqs_guess[ii]\n self.print_tee(\"Angle %d/%d, minimum mcnr = %0.2f is less than the minimum value, %0.2f,\"\n \" so fit frequency will be replaced with guess\"\n % (ii + 1, self.nangles, np.min(mcnr[ii]), self.min_mcnr), self.log_file)\n\n for jj in range(self.nphases):\n mcnr[ii, jj] = get_mcnr(self.imgs_ft[ii, jj], self.frqs[ii], self.fx, self.fy, self.fmax)\n\n self.mcnr = mcnr\n # for convenience, also save periods and angles\n self.periods = 1 / np.sqrt(self.frqs[:, 0] ** 2 + self.frqs[:, 1] ** 2)\n self.angles = np.angle(self.frqs[:, 0] + 1j * self.frqs[:, 1])\n\n # #############################################\n # estimate noise in component images\n # #############################################\n self.noise_power = self.power_spectrum_params[:, :, -1]\n\n # #############################################\n # SIM reconstruction\n # #############################################\n tstart = time.process_time()\n self.img_sr, self.img_sr_ft, self.components_deconvolved_ft, self.components_shifted_ft, \\\n self.weights, self.weight_norm, self.snr, self.snr_shifted = self.combine_components()\n # self.img_sr, self.img_sr_ft, self.components_deconvolved_ft, self.components_shifted_ft, \\\n # self.weights, self.weight_norm, self.snr, self.snr_shifted = self.combine_components_fairSIM()\n tend = time.process_time()\n\n self.print_tee(\"combining components took %0.2fs\" % (tend - tstart), self.log_file)\n\n # #############################################\n # widefield deconvolution\n # #############################################\n # get signal to noise ratio\n wf_noise = get_noise_power(self.widefield_ft, self.fx, self.fy, self.fmax)\n fit_result, self.mask_wf = fit_power_spectrum(self.widefield_ft, self.otf, self.fx, self.fy, self.fmax, self.fbounds,\n init_params=[None, self.power_spectrum_params[0, 0, 1], 1, wf_noise],\n fixed_params=[False, True, True, True])\n\n self.pspec_params_wf = fit_result['fit_params']\n ff = np.sqrt(self.fx[None, :]**2 + self.fy[:, None]**2)\n sig = power_spectrum_fn([self.pspec_params_wf[0], self.pspec_params_wf[1], self.pspec_params_wf[2], 0], ff, 1)\n wf_snr = sig / wf_noise\n # deconvolution\n wf_decon_ft, wfilter = wiener_deconvolution(self.widefield_ft, self.otf, wf_snr, snr_includes_otf=False)\n\n # upsample to make fully comparable to reconstructed image\n self.widefield_deconvolution_ft = tools.expand_fourier_sp(wf_decon_ft, 2, 2, centered=True)\n self.widefield_deconvolution = fft.fftshift(fft.ifft2(fft.ifftshift(self.widefield_deconvolution_ft))).real\n\n # #############################################\n # print parameters\n # #############################################\n self.print_parameters()\n\n try:\n self.log_file.close()\n except AttributeError:\n pass",
"def prepare_templates(params, outfile, redo=False):\n if os.path.exists(outfile) and not redo:\n return\n emiles = EMILES()\n wmin = params[\"wmin\"] * u.micrometer\n wmax = params[\"wmax\"] * u.micrometer\n # Modify wmin to compensate for the recession velocity of the system\n zmax = (params[\"vsyst\"] + 3000) / const.c.to(\"km/s\").value\n wrest = wmin / (1 + zmax)\n grid = np.array(np.meshgrid(params[\"ages\"], params[\"metals\"],\n params[\"bis\"])).T.reshape(-1, 3)\n ssppars = Table(grid, names=[\"T\", \"Z\", \"imf\"])\n filenames = []\n for args in grid:\n filenames.append(os.path.join(emiles.data_dir,\n emiles.filename(*args)))\n wave, spec = misc.read_spec(filenames[0])\n wave = wave * u.angstrom\n idx = np.where((wave > wrest) & (wave <= wmax))\n wave = wave[idx]\n spec = spec[idx]\n wrange = [wave[0].to(\"angstrom\").value, wave[-1].to(\"angstrom\").value]\n newflux, logLam, velscale = util.log_rebin(wrange, spec,\n velscale=params[\"velscale\"])\n ssps = np.zeros((len(filenames), len(newflux)))\n print(\"Processing SSP files\")\n for i, fname in tqdm(enumerate(filenames)):\n spec = fits.getdata(fname)[idx]\n newflux, logLam, velscale = util.log_rebin(wrange, spec,\n velscale=params[\"velscale\"])\n ssps[i] = newflux\n norm = np.median(ssps)\n ssps /= norm\n hdu1 = fits.PrimaryHDU(ssps)\n hdu1.header[\"EXTNAME\"] = \"SSPS\"\n hdu1.header[\"BSCALE\"] = (norm, \"Scale to convert from ADU to flux.\")\n hdu2 = fits.BinTableHDU(ssppars)\n hdu2.header[\"EXTNAME\"] = \"PARAMS\"\n hdu1.header[\"CRVAL1\"] = logLam[0]\n hdu1.header[\"CD1_1\"] = logLam[1] - logLam[0]\n hdu1.header[\"CRPIX1\"] = 1.\n # Making wavelength array\n hdu3 = fits.BinTableHDU(Table([logLam], names=[\"loglam\"]))\n hdu3.header[\"EXTNAME\"] = \"LOGLAM\"\n hdulist = fits.HDUList([hdu1, hdu2, hdu3])\n hdulist.writeto(outfile, overwrite=True)\n return",
"def __update(self):\n\n # Make sure loads have been assigned to group\n if type(self.appliedLoad) == Load:\n self.appliedLoad = LoadSet(self.appliedLoad)\n elif type(self.appliedLoad) != LoadSet:\n raise TypeError(\"Applied load must be a Load or LoadSet\")\n\n # Begin Calculations\n _cg = self.cg # calculate the cg once to save computation time\n _appLoad = self.appliedLoad.totalForce\n _appMoment = self.appliedLoad.totalMoment\n\n coef_mat = np.zeros((len(self) * 3, len(self) * 3)) # coeff matrix\n soln_mat = np.zeros(len(self) * 3) # solution matrix\n\n cSet = [[i, i+1, i+2] for i in range(0, 3 * len(self), 3)]\n rSet = [[i+6, i+7, i+8] for i in range(0, 3 * (len(self) - 2), 3)]\n\n for i, j in enumerate(cSet):\n # i = column fastener ID\n # j = column fastener set\n # Mx = yFz - zFy\n # My = zFx - xFz\n # Mz = xFy - yFx\n\n Fx = j[0]\n Fy = j[1]\n Fz = j[2]\n\n # fill in first three rows\n coef_mat[0][Fx] = 1 # sum of Fx\n coef_mat[1][Fy] = 1 # sum of Fy\n coef_mat[2][Fz] = 1 # sum of Fz\n\n # fill in fourth row (sum of Mx at CG)\n coef_mat[3][Fy] = -(F[i].xyz[2] - _cg[2]) # -zFy\n coef_mat[3][Fz] = +(F[i].xyz[1] - _cg[1]) # +yFz\n\n # fill in fifth row (sum of My at CG)\n coef_mat[4][Fx] = +(F[i].xyz[2] - _cg[2]) # +zFx\n coef_mat[4][Fz] = -(F[i].xyz[0] - _cg[0]) # -xFz\n\n # fill in sixth row (sum of Mz at CG)\n coef_mat[5][Fx] = -(F[i].xyz[1] - _cg[1]) # -yFx\n coef_mat[5][Fy] = +(F[i].xyz[0] - _cg[0]) # +xFy\n\n for u, w in enumerate(rSet):\n # u = row fastener ID\n # w = row fastener set\n\n rX = w[0]\n rY = w[1]\n rZ = w[2]\n\n coef_mat[rX][Fy] = -(F[i].xyz[2] - F[u].xyz[2]) # -zFy\n coef_mat[rX][Fz] = +(F[i].xyz[1] - F[u].xyz[1]) # +yFz\n\n coef_mat[rY][Fx] = +(F[i].xyz[2] - F[u].xyz[2]) # +zFx\n coef_mat[rY][Fz] = -(F[i].xyz[0] - F[u].xyz[0]) # -xFz\n\n coef_mat[rZ][Fx] = -(F[i].xyz[1] - F[u].xyz[1]) # -yFx\n coef_mat[rZ][Fy] = +(F[i].xyz[0] - F[u].xyz[0]) # +xFy\n\n # fill in the solution matrix (soln_mat)\n for i in range(3):\n soln_mat[i] = -_netLoad.force[i]\n soln_mat[i+3] = -_netLoad.moment[i]\n\n # fill in the remaining rows\n for i, j in enumerate(rSet):\n # i = fastener\n # j = row\n\n rX = j[0]\n rY = j[1]\n rZ = j[2]\n\n # Mx = (y_cg - y_i)F_znet - (z_cg - z_i)F_ynet + M_xnet\n soln_mat[rX] = - ((_cg[1] - F[i].xyz[1]) * _netLoad.force[2]\n - (_cg[2] - F[i].xyz[2]) * _netLoad.force[1]\n + _netLoad.moment[0])\n\n # My = (z_cg - z_i)F_xnet - (x_cg - x_i)F_znet + M_ynet\n soln_mat[rY] = -((_cg[2] - F[i].xyz[2]) * _netLoad.force[0]\n - (_cg[0] - F[i].xyz[0]) * _netLoad.force[2]\n + _netLoad.moment[1])\n\n # Mz = (x_cg - x_i)F_ynet - (y_cg - y_i)F_xnet + M_znet\n soln_mat[rZ] = -((_cg[0] - F[i].xyz[0]) * _netLoad.force[1]\n - (_cg[1] - F[i].xyz[1]) * _netLoad.force[0]\n + _netLoad.moment[2])\n\n # Solve system of equations\n matSol = np.linalg.lstsq(coef_mat, soln_mat)[0]\n\n # Add resulting fastener loads to fastener objects\n for i, j in enumerate(cSet):\n rX = j[0]\n rY = j[1]\n rZ = j[2]\n\n F[i].force[0] = matSol[rX]\n F[i].force[1] = matSol[rY]\n F[i].force[2] = matSol[rZ]",
"def reconstruct_pu(self, receivers, compute_uxy = True):\n # Initialize\n self.p_recon = np.zeros((receivers.coord.shape[0], len(self.controls.k0)), dtype=complex)\n self.uz_recon = np.zeros((receivers.coord.shape[0], len(self.controls.k0)), dtype=complex)\n if compute_uxy:\n self.ux_recon = np.zeros((receivers.coord.shape[0], len(self.controls.k0)), dtype=complex)\n self.uy_recon = np.zeros((receivers.coord.shape[0], len(self.controls.k0)), dtype=complex)\n # Loop over frequency\n bar = tqdm(total = len(self.controls.k0), desc = 'Reconstructing sound field...')\n for jf, k0 in enumerate(self.controls.k0):\n # get the scaled version of the propagating directions\n k_p = k0 * self.dir\n # Form the new sensing matrix\n h_mtx = np.exp(1j*receivers.coord @ k_p.T)\n # compute P and U\n self.p_recon[:,jf] = h_mtx @ self.pk[:,jf]\n self.uz_recon[:,jf] = -((np.divide(k_p[:,2], k0)) * h_mtx) @ self.pk[:,jf]\n if compute_uxy:\n self.ux_recon[:,jf] = -((np.divide(k_p[:,0], k0)) * h_mtx) @ self.pk[:,jf]\n self.uy_recon[:,jf] = -((np.divide(k_p[:,1], k0)) * h_mtx) @ self.pk[:,jf]\n bar.update(1)\n bar.close()",
"def compute_mixing_coefficients_surf(self):\n [Ly,N] = self.b.shape\n z_u_w = self.grid_dict['z_u_w']\n\n # SET UP NEW MIXING COEFFICIENT ARRAYS\n self.Kv_surf = np.zeros([Ly,N+1])\n self.Kt_surf = np.zeros([Ly,N+1])\n \n self.ghat = np.zeros([Ly,N+1])\n \n\n #################################\n # \tSURFACE KPP\n ################################\n #---> j-loop\n \n self.wm2 = []\n self.ws2 = []\n self.sigma_y = []\n for j in range(Ly):\n #--> k-loop (top to kbl[j])\n # in fortran k=N-1,kbl(j),-1\n for k in range(N-1,self.kbl[j]-1,-1):\n k_w = k\n k_r = k-1\n\n Bfsfc = self.Bfsfc_bl[j]\n zscale = z_u_w[j,N] - z_u_w[j,k_w]\n \n # CALCULATE TURBULENT VELOCITY SCALES\n wm,ws = self.lmd_wscale_wm_and_ws(Bfsfc,zscale,self.ustar[j],self.hbls[j])\n self.wm2.append(wm)\n self.ws2.append(ws)\n # COMPUTE VERTICAL MIXING COEFFICIENTS\n sigma = (z_u_w[j,N] - z_u_w[j,k_w]) / np.max([self.hbls[j],self.eps])\n self.sigma1 = sigma #for debugging\n if j == 25: \n self.sigma_y.append(sigma)\n a1 = sigma - 2.\n a2 = 3.-2.*sigma\n a3 = sigma - 1.\n\n if sigma < 0.07:\n cff = 0.5 * (sigma-0.07)**2/0.07\n else:\n cff = 0\n \n \n if k == N-1: \n self.wm_debug = wm\n self.hbls_debug = self.hbls[j]\n self.cff_debug = cff\n self.sigma_debug = sigma\n self.a1_debug = a1\n self.a2_debug = a2\n self.a3_debug = a3\n\n self.Kv_surf[j,k_w] = wm * self.hbls[j] * ( cff + sigma * (1. + sigma * (\\\n a1 + a2*self.Gm1[j]+a3*self.dGm1_dS[j])))\n\n if k == N-1:\n self.ws_debug = ws\n self.hbls_debug = self.hbls[j]\n self.cff_debug = cff\n self.sigma_debug = sigma\n self.a1_debug = a1\n self.a2_debug = a2\n self.a3_debug = a3\n \n self.Kt_surf[j,k_w] = ws * self.hbls[j] * ( cff + sigma * (1. + sigma * (\\\n a1 + a2*self.Gt1[j]+a3*self.dGt1_dS[j])))\n #---> end k-loop \n if self.LMD_NONLOCAL:\n if Bfsfc < 0:\n self.ghat[j,k_w] = 0\n self.ghat[j,k_w] = self.Cg * sigma * (1.-sigma)**2\n else:\n self.ghat[j,k_w] = 0.\n\n # ADD CONVECTIVE ADJUSTMENT IN SURFACE MIXED LAYER \n if self.LMD_CONVEC and self.MLCONVEC: \n for k in range(N-1,int(self.kbl[j]-1),-1):\n k_w = k\n k_r = k -1\n\n if self.bvf[j,k_w] < 0:\n self.Kt_surf[j,k_w] = self.Kt_surf[j,k_w] + self.ffac*self.nu0c\n\n # ADD CONVECTIVE ADJUSTMENT BELOW SURFACE MIXED LAYER\n # IF BKPP IS SWITCHED OFF!!\n for k in range(int(self.kbl[j]-1),-1,-1):\n k_w = k\n k_r = k -1\n if self.LMD_NONLOCAL:\n self.ghat[j,k_w] = 0\n if self.LMD_CONVEC and self.LMD_BKPP == False:\n if self.bvf[j,k_w] < 0:\n self.Kv_surf[j,k_w] = self.Kv_surf[j,k_w] + self.nu0c\n self.Kt_surf[j,k_w] = self.Kt_surf[j,k_w] + self.nu0c\n \n\n #---> end j-loop",
"def get_bforce_wm_ws_Gx_surf(self):\n\n [Ly,N] = self.b.shape\n z_u_w = self.grid_dict['z_u_w'] \n \n self.Gm1 = np.zeros([Ly])\n self.dGm1_dS = np.zeros([Ly]) \n self.Gt1 = np.zeros([Ly])\n self.dGt1_dS = np.zeros([Ly]) \n self.Bfsfc_bl = np.zeros([Ly])\n self.Av_bl = np.zeros([Ly])\n self.dAv_bl = np.zeros([Ly])\n \n #debugging\n self.wm_surf = np.zeros([Ly])\n self.ws_surf = np.zeros([Ly]) \n\n #---> j-loop\n for j in range(Ly): \n k_w = self.kbl[j] # KBL is \"new bl index after calling find_new_kbl()\n z_bl = z_u_w[j,N] - self.hbls[j]\n zscale = self.hbls[j] \n \n if self.swr_frac[j,k_w-1] > 0:\n Bfsfc = self.Bo[j] + self.Bosol[j] * ( 1. - self.swr_frac[j,k_w-1]\\\n * self.swr_frac[j,k_w] * ( z_u_w[j,k_w] - z_u_w[j,k_w-1] )\\\n / (self.swr_frac[j,k_w] * (z_u_w[j,k_w] - z_bl)\\\n + self.swr_frac[j,k_w-1] * (z_bl - z_u_w[j,k_w-1]) ))\n \n else:\n Bfsfc = self.Bo[j] + self.Bosol[j]\n \n # CALCUALTE TURBULENT VELOCITY SCALES\n wm,ws = self.lmd_wscale_wm_and_ws(Bfsfc,zscale,self.ustar[j],self.hbls[j])\n self.wm_surf[j] = wm\n self.ws_surf[j] = ws \n\n if self.LIMIT_UNSTABLE_ONLY:\n f1 = 5. * np.max([0,Bfsfc]) * self.vonKar / (self.ustar[j]**4+self.eps)\n else:\n f1 = 0\n\n \n cff = 1. / (z_u_w[j,k_w] - z_u_w[j,k_w-1])\n cff_up = cff * (z_bl - z_u_w[j,k_w])\n cff_dn = cff * (z_u_w[j,k_w] - z_bl)\n\n #MOMENTUM \n Av_bl = cff_up * self.Kv_old[j,k_w] + cff_dn * self.Kv_old[j,k_w-1]\n dAv_bl = cff * (self.Kv_old[j,k_w] - self.Kv_old[j,k_w-1])\n self.Av_bl[j] = Av_bl\n self.dAv_bl[j] = dAv_bl\n self.Gm1[j] = Av_bl / (self.hbls[j] * wm + self.eps)\n self.dGm1_dS[j] = np.min([0.,Av_bl*f1-dAv_bl/(wm+self.eps)]) \n\n #TEMPERATURE(BUOYANCY)\n At_bl = cff_up * self.Kt_old[j,k_w] + cff_dn * self.Kt_old[j,k_w-1]\n dAt_bl = cff * (self.Kt_old[j,k_w] - self.Kt_old[j,k_w-1])\n self.Gt1[j] = At_bl / (self.hbls[j] * ws + self.eps)\n self.dGt1_dS[j] = np.min([0.,At_bl*f1-dAt_bl/(ws+self.eps)]) \n\n self.Bfsfc_bl[j] = Bfsfc",
"def __InitializeNorms(self, norm_SF, norm_plane):\n # Initializes values \n nc = self.__nc_RSoft_I\n n_SF = self.__n_SF_rad+self.__n_SF_ang\n mean_SF = np.zeros(n_SF)\n std_SF = np.zeros(n_SF)\n cov_SF = np.zeros((n_SF,n_SF))\n n_parts = 0\n\n for idx_type, type_ in enumerate(self.__types_unique):\n n_parts = 0\n idx_type_SF = np.where(self.__types==type_)[0]\n for f in range(self._n_f):\n # Finds particle typtes for each particle. \n particle_types = self.__NcIO_dyn.GetDataCol(f,'type')\n type_ids = np.where(particle_types==type_)[0]\n\n # Obtains radial and angular SFs for f\n if self.__containsRadial:\n rSF = nc.variables['radial_structures'][f][type_ids]\n else:\n rSF = np.zeros((len(type_ids),0))\n if self.__containsAngular:\n aSF = nc.variables['angular_structures'][f][type_ids]\n else:\n aSF = np.zeros((len(type_ids),0))\n SF = np.hstack((rSF,aSF))\n SF = SF[~np.isnan(np.sum(SF,axis=1))] # SHOULD REMOVE NaNs\n\n # Counts number of SFs in frame and sums particles to find\n # mean. We do not use mean function in case number of \n # particles changes between frames\n n_parts += len(SF)\n mean_SF[idx_type_SF] += np.sum(SF[:,idx_type_SF],axis=0)\n cov_SF[idx_type_SF[:,None],idx_type_SF[None,:]] += \\\n np.dot(SF[:,idx_type_SF].T,SF[:,idx_type_SF])\n\n # Calculates mean and covariance\n mean_SF[idx_type_SF] /= float(n_parts)\n cov_SF[idx_type_SF[:,None],idx_type_SF[None,:]] /= \\\n float(n_parts)\n cov_SF[idx_type_SF[:,None],idx_type_SF[None,:]] -= \\\n np.outer(mean_SF[idx_type_SF],mean_SF[idx_type_SF])\n std_SF = np.sqrt(np.diagonal(cov_SF))\n\n # Checks if std_SF == 0 for any structure functions\n if np.any(std_SF==0):\n print('WARNING: stdev of following structure functions is 0')\n idx_0s = np.where(std_SF==0)[0]\n for idx_0 in idx_0s:\n std_SF[idx_0] = 1\n if idx_0 < self.__n_SF_rad:\n mu = self.mus[idx_0]\n L = self.Ls[idx_0]\n X = self.radial_Xs[idx_0]\n Y = self.radial_Ys[idx_0]\n print(' radial structure function: mu = '+str(mu)+\\\n ', L = '+str(L)+', X = '+str(X)+', Y = '+str(Y))\n else:\n idx_0 -= self.__n_SF_rad\n xi = self.xis[idx_0]\n l = self.lambdas[idx_0] \n z = self.zetas[idx_0]\n X = self.angular_Xs[idx_0]\n Y = self.angular_Ys[idx_0]\n Z = self.angular_Za[idx_0]\n print(' angular structure function: xi = '+str(xi)+\\\n ', lambda = '+str(l)+', zeta = '+str(z)+\\\n ', X = '+str(X)+', Y = '+str(Y)+', Z = '+str(Z))\n\n self._mean_SF = mean_SF\n self._cov_SF = cov_SF\n self._std_SF = std_SF",
"def hessian_full(self, params):\n\n if type(params) is not MixedLMParams:\n params = MixedLMParams.from_packed(params, self.k_fe,\n self.use_sqrt)\n\n fe_params = params.get_fe_params()\n cov_re = params.get_cov_re()\n try:\n cov_re_inv = np.linalg.inv(cov_re)\n except np.linalg.LinAlgError:\n cov_re_inv = None\n\n # Blocks for the fixed and random effects parameters.\n hess_fe = 0.\n hess_re = np.zeros((self.k_re2, self.k_re2), dtype=np.float64)\n hess_fere = np.zeros((self.k_re2, self.k_fe),\n dtype=np.float64)\n\n fac = self.n_totobs\n if self.reml:\n fac -= self.exog.shape[1]\n\n rvir = 0.\n xtvix = 0.\n xtax = [0.,] * self.k_re2\n B = np.zeros(self.k_re2, dtype=np.float64)\n D = np.zeros((self.k_re2, self.k_re2), dtype=np.float64)\n F = [[0.,]*self.k_re2 for k in range(self.k_re2)]\n for k in range(self.n_groups):\n\n exog = self.exog_li[k]\n ex_r = self.exog_re_li[k]\n ex2_r = self.exog_re2_li[k]\n\n # The residuals\n expval = np.dot(exog, fe_params)\n resid = self.endog_li[k] - expval\n\n viexog = _smw_solve(1., ex_r, ex2_r, cov_re, cov_re_inv,\n exog)\n xtvix += np.dot(exog.T, viexog)\n vir = _smw_solve(1., ex_r, ex2_r, cov_re, cov_re_inv,\n resid)\n rvir += np.dot(resid, vir)\n\n for jj1,mat1 in self._gen_dV_dPsi(ex_r):\n\n hess_fere[jj1,:] += np.dot(viexog.T,\n np.dot(mat1, vir))\n if self.reml:\n xtax[jj1] += np.dot(viexog.T, np.dot(mat1, viexog))\n\n B[jj1] += np.dot(vir, np.dot(mat1, vir))\n E = _smw_solve(1., ex_r, ex2_r, cov_re, cov_re_inv,\n mat1)\n\n for jj2,mat2 in self._gen_dV_dPsi(ex_r, jj1):\n Q = np.dot(mat2, E)\n Q1 = Q + Q.T\n vt = np.dot(vir, np.dot(Q1, vir))\n D[jj1, jj2] += vt\n if jj1 != jj2:\n D[jj2, jj1] += vt\n R = _smw_solve(1., ex_r, ex2_r, cov_re,\n cov_re_inv, Q)\n rt = np.trace(R) / 2\n hess_re[jj1, jj2] += rt\n if jj1 != jj2:\n hess_re[jj2, jj1] += rt\n if self.reml:\n F[jj1][jj2] += np.dot(viexog.T,\n np.dot(Q, viexog))\n\n hess_fe -= fac * xtvix / rvir\n\n hess_re = hess_re - 0.5 * fac * (D/rvir - np.outer(B, B) / rvir**2)\n\n hess_fere = -fac * hess_fere / rvir\n\n if self.reml:\n for j1 in range(self.k_re2):\n Q1 = np.linalg.solve(xtvix, xtax[j1])\n for j2 in range(j1 + 1):\n Q2 = np.linalg.solve(xtvix, xtax[j2])\n a = np.trace(np.dot(Q1, Q2))\n a -= np.trace(np.linalg.solve(xtvix, F[j1][j2]))\n a *= 0.5\n hess_re[j1, j2] += a\n if j1 > j2:\n hess_re[j2, j1] += a\n\n # Put the blocks together to get the Hessian.\n m = self.k_fe + self.k_re2\n hess = np.zeros((m, m), dtype=np.float64)\n hess[0:self.k_fe, 0:self.k_fe] = hess_fe\n hess[0:self.k_fe, self.k_fe:] = hess_fere.T\n hess[self.k_fe:, 0:self.k_fe] = hess_fere\n hess[self.k_fe:, self.k_fe:] = hess_re\n\n return hess",
"def dstrf_snapshots(rec, model_list, D=11, out_channel=0, time_step=85, snr_threshold=5):\n t_indexes = np.arange(time_step, rec['stim'].shape[1], time_step)\n dlc = rec['dlc'].as_continuous().T\n log.info(f\"Computing dSTRF at {len(t_indexes)} timepoints, {dlc.shape[1]} DLC channels, t_step={time_step}\")\n if rec.meta['batch'] in [346, 347]:\n dicount=didx.shape[0]\n else:\n dicount=4\n\n dstrf = {}\n mdstrf = np.zeros((len(model_list), dicount, rec['stim'].shape[0], D))\n pc1 = np.zeros((len(model_list), dicount, rec['stim'].shape[0], D))\n pc2 = np.zeros((len(model_list), dicount, rec['stim'].shape[0], D))\n pc_count=3\n pc_mag_all = np.zeros((len(model_list), dicount, pc_count))\n for di in range(dicount):\n dlc1 = dlc.copy()\n dcount=dlc1.shape[1]\n didx_ = adjust_didx(dlc, didx)\n\n for t in t_indexes:\n dlc1[(t-didx_.shape[1]+1):(t+1), :] = didx_[di,:,:dcount]\n log.info(f\"DLC values: {np.round(didx[di,-1,:dcount],3)}\")\n #log.info(f'di={di} Applying HRTF for frozen DLC coordinates')\n #rec2 = rec.copy()\n #rec2['dlc'] = rec2['dlc']._modified_copy(data=dlc1.T)\n #rec2 = free_tools.stim_filt_hrtf(rec2, hrtf_format='az', smooth_win=2,\n # f_min=200, f_max=20000, channels=18)['rec']\n\n for mi, m in enumerate(model_list):\n stim = {'stim': rec['stim'].as_continuous().T, 'dlc': dlc1}\n dstrf[di] = m.dstrf(stim, D=D, out_channels=[out_channel], t_indexes=t_indexes)\n\n d = dstrf[di]['stim'][0, :, :, :]\n\n if snr_threshold is not None:\n d = np.reshape(d, (d.shape[0], d.shape[1] * d.shape[2]))\n md = d.mean(axis=0, keepdims=True)\n e = np.std(d - md, axis=1) / np.std(md)\n if (e > snr_threshold).sum() > 0:\n log.info(f\"Removed {(e > snr_threshold).sum()}/{len(d)} noisy dSTRFs for PCA calculation\")\n\n d = dstrf[di]['stim'][0, (e <= snr_threshold), :, :]\n mdstrf[mi, di, :, :] = d.mean(axis=0)\n pc, pc_mag = dtools.compute_dpcs(d[np.newaxis, :, :, :], pc_count=pc_count)\n pc1[mi, di, :, :] = pc[0, 0, :, :] * pc_mag[0, 0]\n pc2[mi, di, :, :] = pc[0, 1, :, :] * pc_mag[1, 0]\n pc_mag_all[mi, di, :] = pc_mag[:, 0]\n return mdstrf, pc1, pc2, pc_mag_all",
"def coadd(self, sp, method='pixel'):\n\t\tif method == 'pixel':\n\t\t\tw1 = 1/self.oriNoise**2\n\t\t\tw2 = 1/sp.oriNoise**2\n\t\t\tself.oriFlux = (self.oriFlux*w1 + sp.oriFlux*w2)/(w1 + w2)\n\t\t\tself.oriNoise = np.sqrt(1/(w1 + w2))\n\t\t\t## set up masking criteria\n\t\t\tself.avgFlux = np.mean(self.oriFlux)\n\t\t\tself.stdFlux = np.std(self.oriFlux)\n\t\t\tself.smoothFlux = self.oriFlux\n\t\t\t## set the outliers as the flux below \n\t\t\tif self.apply_sigma_mask:\n\t\t\t\tself.smoothFlux[self.smoothFlux <= self.avgFlux-2*self.stdFlux] = 0\n\t\t\t\tself.mask = np.where(self.smoothFlux <= 0)\n\t\t\telse:\n\t\t\t\tself.mask = []\n\t\t\tself.wave = np.delete(self.oriWave, list(self.mask))\n\t\t\tself.flux = np.delete(self.oriFlux, list(self.mask))\n\t\t\tself.noise = np.delete(self.oriNoise, list(self.mask))\n\n\t\telif method == 'wavelength':\n\t\t\tself_supers = copy.deepcopy(self)\n\t\t\tg = interpolate.interp1d(self.wave, self.flux)\n\t\t\tsp_supers = copy.deepcopy(sp)\n\t\t\tf = interpolate.interp1d(sp.wave, sp.flux)\n\t\t\t## 10x supersample the average difference of \n\t\t\t## the wavelength\n\t\t\t#step0 = np.mean(np.diff(self.wave))/10\n\t\t\t#self_supers.wave = np.arange(self.wave[0],\n\t\t\t#\tself.wave[-1],step0)\n\t\t\tself_supers.flux = g(self_supers.wave)\n\t\t\tself_supers.oriWave = np.arange(self.oriWave[0],\n\t\t\t\tself.oriWave[-1],(self.oriWave[-1]-self.oriWave[0])/10240)\n\t\t\tg1 = interpolate.interp1d(self.oriWave, self.oriFlux)\n\t\t\tself_supers.oriFlux = g1(self_supers.oriWave)\n\n\t\t\t#step = np.mean(np.diff(sp.wave))/10\n\t\t\t#sp_supers.wave = np.arange(sp.wave[0],sp.wave[-1],step)\n\t\t\t#sp_supers.flux = f(sp_supers.wave)\n\t\t\tsp_supers.oriWave = np.arange(sp.oriWave[0],\n\t\t\t\tsp.oriWave[-1],(sp.oriWave[-1]-sp.oriWave[0])/10240)\n\t\t\tf1 = interpolate.interp1d(sp.oriWave, sp.oriFlux)\n\t\t\tsp_supers.oriFlux = f1(sp_supers.oriWave)\n\n\t\t\t## calculate the max cross correlation value\n\t\t\tdef xcorr(a0,b0,shift):\n\t\t\t\t\"\"\"\n\t\t\t\tShift is the index number after supersampling \n\t\t\t\tboth of the spectra.\n\t\t\t\t\"\"\"\n\t\t\t\ta = copy.deepcopy(a0)\n\t\t\t\tb = copy.deepcopy(b0)\n\n\t\t\t\t## shift the wavelength of b\n\t\t\t\tlength = b.oriFlux.shape[0]\n\t\t\t\tif shift >= 0:\n\t\t\t\t\tmask_a = np.arange(0,shift,1)\n\t\t\t\t\ta.oriFlux = np.delete(a.oriFlux,mask_a)\n\t\t\t\t\tmask_b = np.arange(length-1,length-shift-1,-1)\n\t\t\t\t\tb.oriFlux = np.delete(b.oriFlux,mask_b)\n\n\t\t\t\telif shift < 0:\n\t\t\t\t\tmask_a = np.arange(length-1,length+shift-1,-1)\n\t\t\t\t\ta.oriFlux = np.delete(a.oriFlux,mask_a)\n\t\t\t\t\tmask_b = np.arange(0,-shift,1)\n\t\t\t\t\tb.oriFlux = np.delete(b.oriFlux,mask_b)\n\n\t\t\t\t## shift the wavelength of b\n\t\t\t\t#b.wave += shift * step\n\t\t\t\t## discard the points where the wavelength values\n\t\t\t\t## are larger\n\t\t\t\t#condition = (a.wave > b.wave[0]) & (a.wave < b.wave[-1])\n\t\t\t\t\n\t\t\t\t#a.flux = a.flux[np.where(condition)]\n\t\t\t\t#a.wave = a.wave[np.where(condition)]\n\t\t\t\t## resampling the telluric model\n\t\t\t\t#b.flux = np.array(smart.integralResample(xh=b.wave, \n\t\t\t\t#\tyh=b.flux, xl=a.wave))\n\t\t\t\t\n\t\t\t\treturn np.inner(a.oriFlux, b.oriFlux)/\\\n\t\t\t\t(np.average(a.oriFlux)*np.average(b.oriFlux))/a.oriFlux.shape[0]\n\n\t\t\txcorr_list = []\n\t\t\t## mask the ending pixels\n\t\t\tself_supers2 = copy.deepcopy(self_supers)\n\t\t\tsp_supers2 = copy.deepcopy(sp_supers)\n\t\t\tself_supers2.wave = self_supers2.wave[1000:-1000]\n\t\t\tself_supers2.flux = self_supers2.flux[1000:-1000]\n\t\t\tsp_supers2.wave = sp_supers2.wave[1000:-1000]\n\t\t\tsp_supers2.flux = sp_supers2.flux[1000:-1000]\n\t\t\tfor shift in np.arange(-10,10,1):\n\t\t\t\txcorr_list.append(xcorr(self_supers2,sp_supers2,shift))\n\n\t\t\t## dignostic plot for cc result\n\t\t\tfig, ax = plt.subplots()\n\t\t\tax.plot(np.arange(-10,10,1),np.array(xcorr_list),'k-')\n\t\t\tplt.show()\n\t\t\tplt.close()\n\n\t\t\tstep = np.absolute(np.mean(np.diff(sp_supers.wave)))\n\t\t\tbestshift = np.arange(-10*step,10*step,step)[np.argmax(xcorr_list)]\n\t\t\tsp_supers.oriWave += bestshift\n\t\t\t## discard the points where the wavelength values\n\t\t\t## are larger\n\t\t\tcondition = (self.oriWave > sp_supers.oriWave[0])\\\n\t\t\t& (self.oriWave < sp_supers.oriWave[-1])\n\n\t\t\tself.oriFlux = self.oriFlux[np.where(condition)]\n\t\t\tself.oriWave = self.oriWave[np.where(condition)]\n\t\t\tself.oriNoise = self.oriNoise[np.where(condition)]\n\t\t\tsp_supers.oriNoise = sp_supers.oriNoise[np.where(condition)]\n\t\t\tsp_supers.oriFlux = np.array(smart.integralResample(xh=sp_supers.oriWave, \n\t\t\t\tyh=sp_supers.oriFlux, xl=self.oriWave))\n\n\t\t\tw1 = 1/self.oriNoise**2\n\t\t\tw2 = 1/sp_supers.oriNoise**2\n\t\t\tself.oriFlux = (self.oriFlux*w1 + sp_supers.oriFlux*w2)/(w1 + w2)\n\t\t\tself.oriNoise = np.sqrt(1/(w1 + w2))\n\t\t\t## set up masking criteria\n\t\t\tself.avgFlux = np.mean(self.oriFlux)\n\t\t\tself.stdFlux = np.std(self.oriFlux)\n\t\t\tself.smoothFlux = self.oriFlux\n\t\t\t## set the outliers as the flux below \n\t\t\tself.smoothFlux[self.smoothFlux <= self.avgFlux-2*self.stdFlux] = 0\n\t\t\tself.mask = np.where(self.smoothFlux <= 0)\n\t\t\tself.wave = np.delete(self.oriWave, list(self.mask))\n\t\t\tself.flux = np.delete(self.oriFlux, list(self.mask))\n\t\t\tself.noise = np.delete(self.oriNoise, list(self.mask))\n\n\t\treturn self",
"def update(self, newparams):\n for k, v in list(newparams.items()):\n if k in self.basis_params:\n # Make sure parameter is in dict, and check if it changed\n if k not in self.params:\n self.basis_dirty = True\n self.params[k] = v\n if np.any(v != self.params.get(k)):\n self.basis_dirty = True\n else:\n try:\n # here the sps.params.dirtiness should increase to 2 if\n # there was a change\n self.ssp.params[k] = v[0]\n except KeyError:\n pass\n # now update params\n self.params[k] = np.copy(np.atleast_1d(v))\n # if we changed only csp_params but are relying on COMPSP, make\n # sure we remake the basis\n if self.safe and (self.ssp.params.dirtiness == 1):\n self.basis_dirty = True\n # if we changed only csp_params propagate them through but don't\n # force basis remake (unless basis_dirty)\n if self.ssp.params.dirtiness == 1:\n self.ssp._update_params()\n\n if self.basis_dirty | (self.ssp.params.dirtiness == 2):\n self.build_basis()",
"def loadData(fname='Unstra.out2.00008.athdf'):\n #data=ath.athdf(fname,quantities=['B1','B2','B3'])\n time,data=ath.athdf(fname,quantities=['Bcc1'])\n bx = data['Bcc1']\n time,data=ath.athdf(fname,quantities=['Bcc2'])\n by = data['Bcc2']\n time,data=ath.athdf(fname,quantities=['Bcc3'])\n bz = data['Bcc3']\n x = data['x1f']\n y = data['x2f']\n z = data['x3f']\n # refinement\n rfac = 1.0\n ##if bx.shape[0] < 512:\n ## nz,ny,nx = bx.shape\n ## rfac = int(512/bx.shape[0])\n ## bx = np.repeat(bx,rfac,axis=0)\n ## bx = np.repeat(bx,rfac,axis=1)\n ## bx = np.repeat(bx,rfac,axis=2)\n ## by = np.repeat(by,rfac,axis=0)\n ## by = np.repeat(by,rfac,axis=1)\n ## by = np.repeat(by,rfac,axis=2)\n ## bz = np.repeat(bz,rfac,axis=0)\n ## bz = np.repeat(bz,rfac,axis=1)\n ## bz = np.repeat(bz,rfac,axis=2)\n # ---\n def curl(vx,vy,vz,dx,dy,dz):\n [dzvx,dyvx,dxvx] = np.gradient(vx)\n [dzvy,dyvy,dxvy] = np.gradient(vy)\n [dzvz,dyvz,dxvz] = np.gradient(vz)\n cx = dyvz/dy-dzvy/dz\n cy = dzvx/dz-dxvz/dx\n cz = dxvy/dx-dyvx/dy\n # No need to del the reference by one manually\n # allow python to perform its own garbage collection\n # after the function return cxyz\n #del dzvx\n #del dzvy\n #del dzvz\n return cx,cy,cz\n # ---\n dx = dz = (x[1]-x[0])/rfac\n dy = (y[1]-y[0])/rfac\n jx,jy,jz = curl(bx,by,bz,dx,dy,dz)\n j2 = jx**2+jy**2+jz**2\n return j2",
"def reconstruct(self, my_data, my_suff_stat, model_params):\n\n my_x = my_data[\"x\"]\n my_x_infr = my_data[\"x_infr\"]\n lpj = my_suff_stat[\"lpj\"]\n ss = my_suff_stat[\"ss\"] # is (my_N x S x H)\n S_perm = my_suff_stat[\"S_perm\"]\n\n my_N, D = my_x.shape\n B = np.minimum(self.B_max - lpj.max(axis=1), self.B_max_shft) # is: (my_N,)\n pjc = np.exp(lpj + B[:, None]) # is: (my_N, S+H+1)\n\n modelmean = self.modelmean\n\n this_suff_stat = {}\n if \"storage\" in my_suff_stat.keys():\n this_suff_stat[\"storage\"] = my_suff_stat[\"storage\"]\n\n my_data[\"y_reconstructed\"] = my_data[\"y\"].copy()\n my_y = my_data[\"y_reconstructed\"]\n\n for n in range(my_N):\n this_x_infr = my_x_infr[n, :] # is (D,)\n if np.logical_not(this_x_infr).all():\n continue\n\n this_y = my_y[n, :] # is (D,)\n this_x = my_x[n, :] # is (D,)\n this_pjc = pjc[n, :] # is (S,)\n this_ss = ss[n, :, :] # is (S, H)\n\n this_data = {\"y\": this_y, \"x\": this_x, \"x_infr\": this_x_infr}\n this_suff_stat[\"ss\"] = this_ss\n this_mu = modelmean(model_params, this_data, this_suff_stat) # is (D_miss, S)\n\n this_pjc_sum = this_pjc.sum()\n\n this_estimate = (this_mu * this_pjc[None, S_perm:]).sum(\n axis=1\n ) / this_pjc_sum # is (D_miss,)\n this_y[np.logical_not(this_x)] = this_estimate",
"def undulations(**kwargs):\n\n\t#---parameters\n\tsn = kwargs['sn']\n\twork = kwargs['workspace']\n\tcalc = kwargs['calc']\n\tupname = 'lipid_abstractor'\n\tgrid_spacing = calc['specs']['grid_spacing']\n\tvecs = datmerge(kwargs,upname,'vecs')\n\tnframes = int(np.sum(datmerge(kwargs,upname,'nframes')))\n\ttrajectory = datmerge(kwargs,upname,'points')\n\tattrs,result = {},{}\n\t#---! hacking through error with monolayer separation\n\ttry: monolayer_indices = kwargs['upstream'][upname+'0']['monolayer_indices']\n\texcept: monolayer_indices = kwargs['upstream'][upname]['monolayer_indices']\n\t#---choose grid dimensions\n\tgrid = np.array([round(i) for i in np.mean(vecs,axis=0)/grid_spacing])[:2]\n\t#---! removed timeseries from result for new version of omnicalc\n\t#---parallel\n\tmesh = [[],[]]\n\tfor mn in range(2):\n\t\tstart = time.time()\n\t\tmesh[mn] = Parallel(n_jobs=work.nprocs,verbose=0,require='sharedmem')(\n\t\t\tdelayed(makemesh_regular)(\n\t\t\t\ttrajectory[fr][np.where(monolayer_indices==mn)],vecs[fr],grid)\n\t\t\tfor fr in framelooper(nframes,start=start,text='monolayer %d, frame'%mn))\n\tchecktime()\n\n\t#---pack\n\tresult['mesh'] = np.array(mesh)\n\tresult['grid'] = np.array(grid)\n\tresult['nframes'] = np.array(nframes)\n\tresult['vecs'] = vecs\n\tattrs['grid_spacing'] = grid_spacing\n\treturn result,attrs"
]
| [
"0.5937564",
"0.5780565",
"0.5741519",
"0.56655246",
"0.563537",
"0.5506949",
"0.5402162",
"0.5372675",
"0.5352444",
"0.5346785",
"0.532719",
"0.531849",
"0.5266165",
"0.5228784",
"0.5225048",
"0.51991665",
"0.51623684",
"0.5162299",
"0.51584435",
"0.5140857",
"0.5140395",
"0.5132746",
"0.5084031",
"0.5079144",
"0.50787956",
"0.507082",
"0.50343704",
"0.5027339",
"0.5013533",
"0.50035834"
]
| 0.60962576 | 0 |
r"""align(imgDim, rgbImg, bb=None, landmarks=None, landmarkIndices=INNER_EYES_AND_BOTTOM_LIP) Transform and align a face in an image. | def align(imgDim, rgbImg,
landmarks, landmarkIndices=INNER_EYES_AND_BOTTOM_LIP,
skipMulti=True):
assert imgDim is not None
assert rgbImg is not None
assert landmarks is not None
#if bb is None:
# bb = self.getLargestFaceBoundingBox(rgbImg, skipMulti)
# if bb is None:
# return
#if landmarks is None:
# landmarks = self.findLandmarks(rgbImg, bb)
npLandmarks = np.float32(landmarks)
npLandmarkIndices = np.array(landmarkIndices)
H = cv2.getAffineTransform(npLandmarks[npLandmarkIndices],
imgDim * MINMAX_TEMPLATE[npLandmarkIndices])
thumbnail = cv2.warpAffine(rgbImg, H, (imgDim, imgDim))
return thumbnail | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def align(self, image, landmark_indices, anchor_points, size=96):\n # Detect face in image and find landmarks\n box = self.detect(image)\n landmarks = self.find_landmarks(image, box)\n\n # Select three points in the landmarks(Eyes and nose)\n points_in_image = landmarks[landmark_indices]\n points_in_image = points_in_image.astype('float32')\n # Generate the normalized output size\n output_size = anchor_points * size\n\n # Calculates the 2 \\times 3 matrix of an affine transform\n affine_transf = cv2.getAffineTransform(points_in_image, output_size)\n\n # Transforms the source image using the specified matrix\n transformed_img = cv2.warpAffine(image, affine_transf, (size, size))\n\n return transformed_img",
"def align_face(np_img, mask=True):\n # Prepare extraction\n current_dir = os.getcwd()\n os.chdir(OPEN_FACE_BINARY_PATH)\n timestamp = datetime.now().timestamp()\n mask_param = '-nomask' if mask is False else ''\n\n # Align and mask face using OpenFace\n np_img *= 255\n np_img = np_img.astype(np.uint8)\n imsave('{}.jpg'.format(timestamp), np_img)\n exit_code = os.system(\n './FaceLandmarkImg -f {}.jpg -wild -simalign -simsize 192 {} -format_aligned jpg >/dev/null'.format(timestamp,\n mask_param))\n aligned_img = imread('processed/{}_aligned/face_det_000000.jpg'.format(timestamp))\n aligned_img = aligned_img / 255.\n\n # Delete temporary files created during face alignment\n exit_code = os.system('rm -r {}.jpg >/dev/null'.format(timestamp))\n exit_code = os.system('rm -r processed/{}* >/dev/null'.format(timestamp))\n\n os.chdir(current_dir)\n\n return aligned_img",
"def align(self, image, landmark_indices, anchor_points, size=96):\n\n detected = self.detect(image)\n coords = self.find_landmarks(image, detected)\n in_points = coords[landmark_indices]\n in_points = in_points.astype('float32')\n out_points = anchor_points * size\n warp_mat = cv2.getAffineTransform(in_points, out_points)\n warp_dst = cv2.warpAffine(image, warp_mat, (size, size))\n\n return warp_dst",
"def imalign(src_file, dst_file, face_landmarks, output_size=1024, transform_size=1024, enable_padding=True, x_scale=1, y_scale=1, em_scale=0.1, alpha=False):\n lm = np.array(face_landmarks)\n lm_chin = lm[0 : 17] # left-right\n lm_eyebrow_left = lm[17 : 22] # left-right\n lm_eyebrow_right = lm[22 : 27] # left-right\n lm_nose = lm[27 : 31] # top-down\n lm_nostrils = lm[31 : 36] # top-down\n lm_eye_left = lm[36 : 42] # left-clockwise\n lm_eye_right = lm[42 : 48] # left-clockwise\n lm_mouth_outer = lm[48 : 60] # left-clockwise\n lm_mouth_inner = lm[60 : 68] # left-clockwise\n\n # Calculate auxiliary vectors.\n eye_left = np.mean(lm_eye_left, axis=0)\n eye_right = np.mean(lm_eye_right, axis=0)\n eye_avg = (eye_left + eye_right) * 0.5\n eye_to_eye = eye_right - eye_left\n mouth_left = lm_mouth_outer[0]\n mouth_right = lm_mouth_outer[6]\n mouth_avg = (mouth_left + mouth_right) * 0.5\n eye_to_mouth = mouth_avg - eye_avg\n\n # Choose oriented crop rectangle.\n x = eye_to_eye - np.flipud(eye_to_mouth) * [-1, 1]\n x /= np.hypot(*x)\n x *= max(np.hypot(*eye_to_eye) * 2.0, np.hypot(*eye_to_mouth) * 1.8)\n x *= x_scale\n y = np.flipud(x) * [-y_scale, y_scale]\n c = eye_avg + eye_to_mouth * em_scale\n quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y])\n qsize = np.hypot(*x) * 2\n\n # Load in-the-wild image.\n if not os.path.isfile(src_file):\n print('\\nCannot find source image. Please run \"--wilds\" before \"--align\".')\n return\n img = Image.open(src_file)\n\n # Shrink.\n shrink = int(np.floor(qsize / output_size * 0.5))\n if shrink > 1:\n rsize = (int(np.rint(float(img.size[0]) / shrink)), int(np.rint(float(img.size[1]) / shrink)))\n img = img.resize(rsize, Image.ANTIALIAS)\n quad /= shrink\n qsize /= shrink\n\n # Crop.\n border = max(int(np.rint(qsize * 0.1)), 3)\n crop = (int(np.floor(min(quad[:,0]))), int(np.floor(min(quad[:,1]))), int(np.ceil(max(quad[:,0]))), int(np.ceil(max(quad[:,1]))))\n crop = (max(crop[0] - border, 0), max(crop[1] - border, 0), min(crop[2] + border, img.size[0]), min(crop[3] + border, img.size[1]))\n if crop[2] - crop[0] < img.size[0] or crop[3] - crop[1] < img.size[1]:\n img = img.crop(crop)\n quad -= crop[0:2]\n\n # Pad.\n pad = (int(np.floor(min(quad[:,0]))), int(np.floor(min(quad[:,1]))), int(np.ceil(max(quad[:,0]))), int(np.ceil(max(quad[:,1]))))\n pad = (max(-pad[0] + border, 0), max(-pad[1] + border, 0), max(pad[2] - img.size[0] + border, 0), max(pad[3] - img.size[1] + border, 0))\n if enable_padding and max(pad) > border - 4:\n pad = np.maximum(pad, int(np.rint(qsize * 0.3)))\n img = np.pad(np.float32(img), ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'reflect')\n h, w, _ = img.shape\n y, x, _ = np.ogrid[:h, :w, :1]\n mask = np.maximum(1.0 - np.minimum(np.float32(x) / pad[0], np.float32(w-1-x) / pad[2]), 1.0 - np.minimum(np.float32(y) / pad[1], np.float32(h-1-y) / pad[3]))\n blur = qsize * 0.02\n img += (ndimage.gaussian_filter(img, [blur, blur, 0]) - img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0)\n img += (np.median(img, axis=(0,1)) - img) * np.clip(mask, 0.0, 1.0)\n img = np.uint8(np.clip(np.rint(img), 0, 255))\n if alpha:\n mask = 1-np.clip(3.0 * mask, 0.0, 1.0)\n mask = np.uint8(np.clip(np.rint(mask*255), 0, 255))\n img = np.concatenate((img, mask), axis=2)\n img = Image.fromarray(img, 'RGBA')\n else:\n img = Image.fromarray(img, 'RGB')\n quad += pad[:2]\n\n # Transform.\n img = img.transform((transform_size, transform_size), Image.QUAD, (quad + 0.5).flatten(), Image.BILINEAR)\n print(transform_size)\n if output_size < transform_size:\n img = img.resize((output_size, output_size), Image.ANTIALIAS)\n\n # Save aligned image.\n img.save(dst_file, 'PNG')",
"def align_preprocessed(self, img):\n aligner = FaceAligner(self.args.wing_path, self.args.lm_path, self.args.img_size)\n return aligner.align(img)",
"def preprocess(img, bbox=None, landmark=None, **kwargs):\n M = None\n image_size = []\n str_image_size = kwargs.get('image_size', '')\n # Assert input image shape\n if len(str_image_size)>0:\n image_size = [int(x) for x in str_image_size.split(',')]\n if len(image_size)==1:\n image_size = [image_size[0], image_size[0]]\n assert len(image_size)==2\n assert image_size[0]==112\n assert image_size[0]==112 or image_size[1]==96\n # Do alignment using landmnark points\n if landmark is not None:\n assert len(image_size)==2\n src = np.array([\n [30.2946, 51.6963],\n [65.5318, 51.5014],\n [48.0252, 71.7366],\n [33.5493, 92.3655],\n [62.7299, 92.2041] ], dtype=np.float32 )\n if image_size[1]==112:\n src[:,0] += 8.0\n dst = landmark.astype(np.float32)\n\n tform = trans.SimilarityTransform()\n tform.estimate(dst, src)\n M = tform.params[0:2,:]\n\n # If no landmark points available, do alignment using bounding box. If no bounding box available use center crop\n if M is None:\n if bbox is None: #use center crop\n det = np.zeros(4, dtype=np.int32)\n det[0] = int(img.shape[1]*0.0625)\n det[1] = int(img.shape[0]*0.0625)\n det[2] = img.shape[1] - det[0]\n det[3] = img.shape[0] - det[1]\n else:\n det = bbox\n margin = kwargs.get('margin', 44)\n bb = np.zeros(4, dtype=np.int32)\n bb[0] = np.maximum(det[0]-margin/2, 0)\n bb[1] = np.maximum(det[1]-margin/2, 0)\n bb[2] = np.minimum(det[2]+margin/2, img.shape[1])\n bb[3] = np.minimum(det[3]+margin/2, img.shape[0])\n ret = img[bb[1]:bb[3],bb[0]:bb[2],:]\n if len(image_size)>0:\n ret = cv2.resize(ret, (image_size[1], image_size[0]))\n return ret\n else: #do align using landmark\n assert len(image_size)==2\n\n # warped = cv2.warpAffine(img,M,(image_size[1],image_size[0]), borderValue = 0.0)\n warped = cv2.warpAffine(img,M,(image_size[1],image_size[0]), borderValue = 0.0)\n\n return warped",
"def align_crop(img, landmarks, standard_landmarks, crop_size=572, face_factor=0.45, align_type='similarity', order=3,\r\n mode='edge'):\r\n interpolation = {0: cv2.INTER_NEAREST,\r\n 1: cv2.INTER_LINEAR,\r\n 2: cv2.INTER_AREA,\r\n 3: cv2.INTER_CUBIC,\r\n 4: cv2.INTER_LANCZOS4,\r\n 5: cv2.INTER_LANCZOS4}\r\n border = {'constant': cv2.BORDER_CONSTANT,\r\n 'edge': cv2.BORDER_REPLICATE,\r\n 'symmetric': cv2.BORDER_REFLECT,\r\n 'reflect': cv2.BORDER_REFLECT101,\r\n 'wrap': cv2.BORDER_WRAP}\r\n\r\n # check\r\n assert align_type in ['affine', 'similarity'], \\\r\n \" [!] Invalid 'align_type'! The {} is not included in ['affine' and 'similarity']!\".format(align_type)\r\n assert order in [0, 1, 2, 3, 4, 5], \\\r\n \" [!] Invalid 'order'! The {} is not included in [0, 1, 2, 3, 4, 5]!\".format(order)\r\n assert mode in ['constant', 'edge', 'symmetric', 'reflect', 'wrap'], \\\r\n \" [!] Invalid 'mode'! the {} is not included in ['constant', 'edge', 'symmetric', 'reflect', and 'wrap']\".format(mode)\r\n\r\n # crop size\r\n if isinstance(crop_size, (list, tuple)) and len(crop_size) == 2:\r\n crop_size_h = crop_size[0]\r\n crop_size_w = crop_size[1]\r\n elif isinstance(crop_size, int):\r\n crop_size_h = crop_size_w = crop_size\r\n else:\r\n raise Exception(\" [!] Invalid 'crop_size'! The 'crop_size' should be (1) one integer for (crop_size, crop_size) ar (2) (int, int) for (crop_size_h, crop_size_w)!\")\r\n\r\n # estimate transform matrix\r\n target_landmarks = standard_landmarks * max(crop_size_h, crop_size_w) * face_factor + np.array([crop_size_w // 2, crop_size_h // 2])\r\n if align_type == 'affine': # 6 degree of freedom\r\n transform_matrix, _ = cv2.estimateAffine2D(target_landmarks, landmarks, ransacReprojThreshold=np.Inf)\r\n else: # 4 degree of freedom: using the combinations of translation, rotation, and uniform scaling\r\n transform_matrix, _ = cv2.estimateAffinePartial2D(target_landmarks, landmarks, ransacReprojThreshold=np.Inf)\r\n\r\n # warp image by given transform\r\n img_crop = cv2.warpAffine(img, transform_matrix, dsize=(crop_size_w, crop_size_h),\r\n flags=cv2.WARP_INVERSE_MAP + interpolation[order], borderMode=border[mode])\r\n\r\n # get transformed landmarks\r\n transformed_landmarks = cv2.transform(np.expand_dims(landmarks, axis=0), m=cv2.invertAffineTransform(transform_matrix))\r\n\r\n return img_crop, transformed_landmarks",
"def align(img, landmarks, d_size=(400, 400), normalized=False, show=False):\n assert len(landmarks) == 10\n assert isinstance(img, np.ndarray)\n landmarks = np.array(landmarks).reshape(5, 2)\n dw, dh = d_size\n\n keypoints = landmarks.copy().astype(np.float64)\n if normalized:\n keypoints[:, 0] *= img.shape[1]\n keypoints[:, 1] *= img.shape[0]\n\n keypoints_ref = np.zeros((5, 2), dtype=np.float64)\n keypoints_ref[:, 0] = FivePointsAligner.ref_landmarks[:, 0] * dw\n keypoints_ref[:, 1] = FivePointsAligner.ref_landmarks[:, 1] * dh\n\n transform_matrix = transformation_from_points(keypoints_ref, keypoints)\n output_im = cv.warpAffine(img, transform_matrix, d_size, flags=cv.WARP_INVERSE_MAP)\n\n if show:\n tmp_output = output_im.copy()\n for point in keypoints_ref:\n cv.circle(tmp_output, (int(point[0]), int(point[1])), 5, (255, 0, 0), -1)\n for point in keypoints:\n cv.circle(img, (int(point[0]), int(point[1])), 5, (255, 0, 0), -1)\n img = cv.resize(img, d_size)\n cv.imshow('source/warped', np.hstack((img, tmp_output)))\n cv.waitKey()\n\n return output_im",
"def align_and_extract_face(self, image, landmarks, output_size=(150,150), desired_left_eye_relative_position=(0.55, 0.55)):\n\n # get the key landmark features\n lp, eye_center, facebox = landmarks['left-eye-center-pos'], landmarks['eye-center-pos'], landmarks['face-box']\n\n # calculating the center box for face box, this will also be the center for the output_box\n face_box_center = calc_box_center(facebox)\n\n # calculate the output box using the box center\n output_box = (face_box_center[0] - output_size[0] / 2, face_box_center[1] - output_size[1]/2, output_size[0], output_size[1])\n\n # left eye's absolute position relative to facebox\n current_left_eye_absolute_x_position = lp[0] - facebox[0]\n\n # desired left eye's absolute position relative to output box\n desired_left_eye_absolute_x_position = output_box[2] * desired_left_eye_relative_position[0]\n\n # scale needed would be the ratio between desired and current\n scale = desired_left_eye_absolute_x_position / current_left_eye_absolute_x_position\n\n # calculate alignment angle\n angle = self.determine_rotation_angle(landmarks)\n\n # rotation origin will be the eye_center\n rotation_origin = eye_center\n\n # calculate the rotation matrix\n rotation_matrix = self.determine_rotation_matrix(rotation_origin, angle, scale)\n\n # calculate the new bounding box needed to bound/contain the rotated image\n aligned_box = self.determine_bounding_box_of_rotated_box(calc_img_box(image), rotation_matrix)\n (aligned_x, aligned_y, aligned_w, aligned_h) = aligned_box\n # update the translation to fit the rotated image to the bounding box\n rotation_matrix[0, 2] -= aligned_x\n rotation_matrix[1, 2] -= aligned_y\n\n # perform the transformation\n warped_image = cv2.warpAffine(image, rotation_matrix, (aligned_w, aligned_h), flags=cv2.INTER_CUBIC)\n\n # calculate new box positions\n new_face_box_center = calc_rotate_point_with_rotation_matrix(face_box_center, rotation_matrix)\n new_output_box = [\n int(new_face_box_center[0] - output_size[0] / 2),\n int(new_face_box_center[1] - output_size[1]/2),\n output_size[0], output_size[1]\n ]\n\n # calculate new left eye position\n new_lp = calc_rotate_point_with_rotation_matrix(lp, rotation_matrix)\n\n # calculate how far we are from the ideal y position for the left eye\n desired_left_eye_absolute_y_position = new_output_box[1] + int(new_output_box[3] * desired_left_eye_relative_position[1])\n distance = new_lp[1] - desired_left_eye_absolute_y_position\n\n # shift the y position such that the output box is in the desired y position\n new_output_box[1] += distance\n\n # it's possible output boxes's position will extend beyond current image space, pad the current image space such that they'll be enough space for the output box\n\n # figure out the padding\n x_left_pad = abs(min(0, new_output_box[0]))\n x_right_pad = abs(min(0, warped_image.shape[1] - (new_output_box[0] + new_output_box[2])))\n y_top_pad = abs(min(0, new_output_box[1]))\n y_bottom_pad = abs(min(0, warped_image.shape[0] - (new_output_box[1] + new_output_box[3])))\n\n # some amount of padding exists, create an image with enough padding space\n if x_left_pad + x_right_pad + y_top_pad + y_bottom_pad != 0:\n # create a new image with the padded distance\n new_shape = (warped_image.shape[0] + y_top_pad + y_bottom_pad, warped_image.shape[1] + x_left_pad + x_right_pad, 3)\n padded_image = np.zeros(new_shape)\n\n # copy contents of warped image into padded image\n padded_image[y_top_pad:y_top_pad + warped_image.shape[0], x_left_pad:x_left_pad + warped_image.shape[1]] = warped_image\n\n # set the warped image to the padded image\n warped_image = padded_image\n\n # now \"tare\" the coordinates of the output box so that it fits in side the padded image\n new_output_box[0] += x_left_pad\n new_output_box[1] += y_top_pad\n\n # crop the output box out of the warped image\n cropped_image = warped_image[new_output_box[1]:new_output_box[1]+new_output_box[3], new_output_box[0]:new_output_box[0]+new_output_box[2]]\n\n # return the cropped image\n return cropped_image",
"def rotate(img, bbox, landmark, alpha):\r\n center = (bbox.x+bbox.w/2, bbox.y+bbox.h/2)\r\n rot_mat = cv2.getRotationMatrix2D(center, alpha, 1)\r\n img_rotated_by_alpha = cv2.warpAffine(img, rot_mat, img.shape)\r\n landmark_ = np.asarray([(rot_mat[0][0]*x+rot_mat[0][1]*y+rot_mat[0][2],\r\n rot_mat[1][0]*x+rot_mat[1][1]*y+rot_mat[1][2]) for (x, y) in landmark])\r\n face = img_rotated_by_alpha[bbox.y:bbox.y+bbox.h,bbox.x:bbox.x+bbox.w]\r\n return (face, landmark_)",
"def align_crop_opencv(img,\n src_landmarks,\n standard_landmarks,\n celeba_standard_landmark,\n src_celeba_landmark,\n crop_size=512,\n face_factor=0.8,\n align_type='similarity',\n order=3,\n mode='edge'):\n # set OpenCV\n\n #img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n inter = {0: cv2.INTER_NEAREST, 1: cv2.INTER_LINEAR, 2: cv2.INTER_AREA,\n 3: cv2.INTER_CUBIC, 4: cv2.INTER_LANCZOS4, 5: cv2.INTER_LANCZOS4}\n border = {'constant': cv2.BORDER_CONSTANT, 'edge': cv2.BORDER_REPLICATE,\n 'symmetric': cv2.BORDER_REFLECT, 'reflect': cv2.BORDER_REFLECT101,\n 'wrap': cv2.BORDER_WRAP}\n\n # check\n assert align_type in ['affine', 'similarity'], 'Invalid `align_type`! Allowed: %s!' % ['affine', 'similarity']\n assert order in [0, 1, 2, 3, 4, 5], 'Invalid `order`! Allowed: %s!' % [0, 1, 2, 3, 4, 5]\n assert mode in ['constant', 'edge', 'symmetric', 'reflect', 'wrap'], 'Invalid `mode`! Allowed: %s!' % ['constant',\n 'edge',\n 'symmetric',\n 'reflect',\n 'wrap']\n # crop size\n if isinstance(crop_size, (list, tuple)) and len(crop_size) == 2:\n crop_size_h = crop_size[0]\n crop_size_w = crop_size[1]\n elif isinstance(crop_size, int):\n crop_size_h = crop_size_w = crop_size\n else:\n raise Exception(\n 'Invalid `crop_size`! `crop_size` should be 1. int for (crop_size, crop_size) or 2. (int, int) for (crop_size_h, crop_size_w)!')\n\n # estimate transform matrix\n trg_landmarks = standard_landmarks * max(crop_size_h, crop_size_w) * face_factor + np.array(\n [crop_size_w // 2, crop_size_h // 2])\n\n if align_type == 'affine':\n tform = cv2.estimateAffine2D(trg_landmarks, src_landmarks, ransacReprojThreshold=np.Inf)[0]\n else:\n tform = cv2.estimateAffinePartial2D(trg_landmarks, src_landmarks, ransacReprojThreshold=np.Inf)[0] #tform{2,3}\n\n # calcaute the scale of tform\n m1 = np.mat('0;0;1')\n m2 = np.mat('1;0;1')\n p1 = tform.dot(m1)\n p2 = tform.dot(m2)\n scale = LA.norm(p2 - p1) # defualt is Frobenius norm\n # change the translations part of the transformation matrix for downwarding vertically\n tform[1][2] = tform[1][2] + 20 * scale\n output_shape = (crop_size_h, crop_size_w)\n img_crop = cv2.warpAffine(img, tform, output_shape[::-1], flags=cv2.WARP_INVERSE_MAP + inter[order],\n borderMode=border[mode])\n\n tformed_landmarks = cv2.transform(np.expand_dims(src_landmarks, axis=0), cv2.invertAffineTransform(tform))[0]\n tformed_celeba_landmarks = cv2.transform(np.expand_dims(src_celeba_landmark, axis=0), cv2.invertAffineTransform(tform))[0]\n return img_crop, tformed_landmarks, tformed_celeba_landmarks",
"def augment_image(im):\n # First crop out the face to save reduce computation load\n bb = im.landmarks['bb'].lms\n bb_vec = bb.as_vector()\n bb_ul = (np.array([bb_vec[0], bb_vec[1]]) - bb.centre()) * 2\n bb_lr = (np.array([bb_vec[4], bb_vec[5]]) - bb.centre()) * 2\n ul = bb_ul + bb.centre()\n lr = bb_lr + bb.centre()\n im = im.crop(ul, lr, constrain_to_boundary=True)\n if im.pixels.shape[0] == 1:\n pix = np.zeros((3, im.pixels.shape[1], im.pixels.shape[2]))\n pix[:,] = im.pixels\n im.pixels = pix\n\n beta = 0.3\n cx = np.random.uniform(-beta, beta)\n cy = np.random.uniform(-beta, beta)\n fx = 1.0\n fy = np.random.uniform(0.6, 1.4)\n max_rotation = 30\n theta = np.random.uniform(-max_rotation, max_rotation)\n\n rotation = menpo.transform.Rotation.init_from_2d_ccw_angle(theta)\n shear = menpo.transform.Affine(np.array([[1, cx, 0],[cy, 1, 0], [0,0,1]]))\n scale = menpo.transform.Affine(np.array([[fx, 0, 0],[0, fy, 0], [0,0,1]]))\n T = scale.compose_after(shear).compose_after(rotation)\n\n t_im = im.transform_about_centre(T)\n\n t_im = add_color_jetting(t_im)\n t_im = add_occlusion(t_im)\n\n\n new_bb = t_im.landmarks['PTS'].lms.bounding_box()\n\n #new_bb contains the gt bounding box\n augmented_bb = add_bb_noise(new_bb)\n augmented_bb = augmented_bb.reshape((4,2))\n augmented_bb = menpo.shape.PointCloud(augmented_bb)\n t_im.landmarks['bb'] = menpo.landmark.LandmarkGroup.init_with_all_label(augmented_bb)\n\n return t_im",
"def prepocessImg(self, method, size, img, bb,offset=0.3,gray=True,\n boundry=False, outputDebug=False,outputprefix=None):\n if method == 'crop':\n crop_img = crop_only(img,bb.left(),bb.top(),bb.width(),bb.height(),offset,size)\n elif method == 'affine':\n img = Image.fromarray(img)\n if self.predictor == None:\n raise Exception(\"Error: method affine should initial with an facepredictor.\")\n alignPoints = self.align(img, bb)\n (xs, ys) = zip(*alignPoints)\n (l, r, t, b) = (min(xs), max(xs), min(ys), max(ys))\n w,h = img.size\n if boundry and (l < 0 or r > w or t < 0 or b > h):\n raise AliError('face out of boundry')\n \n left_eye_l = alignPoints[36]\n left_eye_r = alignPoints[39]\n left_eye = (np.array(left_eye_l)+np.array(left_eye_r))/2\n right_eye_l = alignPoints[42]\n right_eye_r = alignPoints[45]\n right_eye = (np.array(right_eye_l)+np.array(right_eye_r))/2\n crop_img = crop_simi(img,left_eye,right_eye,(offset,offset),(size,size))\n im_buffer = cStringIO.StringIO()\n crop_img.save(im_buffer, format=\"JPEG\")\n im_str = base64.b64encode(im_buffer.getvalue())\n else:\n raise Exception(\"undefined crop method\")\n if gray:\n crop_img = crop_img.convert('L')\n if outputDebug:\n dirname = './aligndebug'\n if not os.path.exists(os.path.abspath(dirname)):\n os.mkdir(dirname)\n drawbox(img,(bb.left(),bb.right(),bb.top(),bb.bottom()))\n if method == 'affine':\n drawpoint(img,left_eye)\n drawpoint(img,right_eye)\n img.save('{}/{}_annotated.jpg'.format(dirname,outputprefix))\n crop_img.save('{}/{}_crop.jpg'.format(dirname,outputprefix))\n crop_img = np.array(crop_img,dtype=np.float32) #look carefully on data format\n if crop_img.ndim == 3: #data shape for caffe\n return crop_img,score\n elif crop_img.ndim == 2:\n bbox = [bb.left(),bb.top(),bb.right(),bb.bottom()]\n return crop_img[:,:,np.newaxis], bbox\n else:\n raise Exception(\"wrong dimension\")",
"def alignment(src_img, src_pts, output_size=(96, 112)):\n ref_pts = [\n [30.2946, 51.6963],\n [65.5318, 51.5014],\n [48.0252, 71.7366],\n [33.5493, 92.3655],\n [62.7299, 92.2041],\n ]\n src_pts = np.array(src_pts).reshape(5, 2)\n s = np.array(src_pts).astype(np.float32)\n r = np.array(ref_pts).astype(np.float32)\n tfm = get_similarity_transform_for_cv2(s, r)\n face_img = cv2.warpAffine(src_img, tfm, output_size)\n return face_img",
"def align(img, left_eye, right_eye):\n left_eye_x, left_eye_y = left_eye\n right_eye_x, right_eye_y = right_eye\n point_3rd, direction = (left_eye, -1) if left_eye_y > right_eye_y else (right_eye, 1)\n\n # np.linalg.norm is being used for euclidean distance\n a = np.linalg.norm(np.array(left_eye) - np.array(point_3rd))\n b = np.linalg.norm(np.array(right_eye) - np.array(point_3rd))\n c = np.linalg.norm(np.array(right_eye) - np.array(left_eye))\n\n if b != 0 and c != 0:\n angle = np.arccos((b ** 2 + c ** 2 - a ** 2) / (2 * b * c))\n angle = (angle * 180) / math.pi\n if direction == -1:\n angle = 90 - angle\n img = Image.fromarray(img)\n img = np.array(img.rotate(direction * angle))\n\n return img",
"def draw_annotations(self, image, results):\n image.flags.writeable = True\n image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n for face_landmarks in results.multi_face_landmarks:\n # draw face landmark net\n mp_drawing.draw_landmarks(\n image=image,\n landmark_list=face_landmarks,\n connections=mp_face_mesh.FACE_CONNECTIONS,\n landmark_drawing_spec=self.drawing_spec,\n connection_drawing_spec=self.drawing_spec)\n cv2.imshow('MediaPipe FaceMesh', image)",
"def _calibrate_landmarks(self, bboxes, landmarks, align=False):\n\n x1, y1, x2, y2 = [bboxes[:, i] for i in range(4)]\n w = x2 - x1 + 1.0\n h = y2 - y1 + 1.0\n w = torch.unsqueeze(w, 1)\n h = torch.unsqueeze(h, 1)\n\n translation = torch.cat([w]*5 + [h]* 5, 1).float() * landmarks\n if align:\n landmarks = torch.ceil(translation).int()\n else:\n landmarks = torch.stack([bboxes[:, 0]] * 5 + [bboxes[:, 1]] * 5, 1) + torch.round(translation).int()\n return landmarks",
"def imagesAlign(I,Iref,fillval=np.nan,trfm_type='similarity',\n vCells=1,hCells=1,rszFac=1,verbose=False,\n minArea = np.power(2, 11), applyWarp=True):\n if len(I.shape)==3:\n I1=sh.rgb2gray(I)\n else:\n I1=I\n \n if len(Iref.shape)==3:\n Iref1=sh.rgb2gray(Iref)\n else:\n Iref1=Iref\n\n WARN_USER, ORIG_DTYPE = False, None\n if I1.dtype != 'float32':\n WARN_USER, ORIG_DTYPE = True, I1.dtype\n I1 = I1.astype('float32')\n if Iref1.dtype != 'float32':\n WARN_USER, ORIG_DTYPE = True, Iref1.dtype\n Iref1 = Iref1.astype('float32')\n if WARN_USER:\n print \"(Info) imagesAlign was called with input image dtype={0}. \\\nimagesAlign expects image dtype='float32' (Also, intensity vals in range \\\n[0.0,1.0]. The image dtype conversion was \\\nautomatically done, but this slows down the computation a little. Consider \\\ntrying to work in 'float32' in the first place if convenient for a little \\\nspeed boost.\".format(ORIG_DTYPE)\n\n t1 = time.clock()\n # check if more than one vertical and horizontal cell\n if (vCells>1) and (hCells>1):\n I2=imagesAlign(I1,Iref1,trfm_type=trfm_type, minArea=minArea)[1];\n Iout=np.copy(Iref1);\n pFac=.25;\n vStep=math.ceil(I1.shape[0]/vCells); vPad=pFac*vStep;\n hStep=math.ceil(I1.shape[1]/hCells); hPad=pFac*vStep;\n for i in range(vCells):\n for j in range(hCells):\n # 2. chop + pad each cell then align\n # 3. stitch back together\n i1=i*vStep; i1=max(i1,0);\n i2=(i+1)*vStep; i2=min(i2,I1.shape[0]-1);\n j1=j*hStep; j1=max(j1,0);\n j2=(j+1)*hStep; j2=min(j2,I1.shape[1]-1);\n\n i1p=i1-vPad; i1p=max(i1p,0);\n i2p=i2+vPad; i2p=min(i2p,I1.shape[0]-1);\n j1p=j1-hPad; j1p=max(j1p,0);\n j2p=j2+hPad; j2p=min(j2p,I1.shape[1]-1);\n \n Ic=I2[i1p:i2p,j1p:j2p]\n Irefc=Iref1[i1p:i2p,j1p:j2p]\n (H,err)=imagesAlign1(Ic,Irefc,trfm_type=trfm_type,verbose=verbose, minArea=minArea)\n IcT=sh.imtransform(Ic, H)\n Iout[i1:i2,j1:j2]=IcT[i1-i1p:(i1-i1p)+(i2-i1),j1-j1p:(j1-j1p)+(j2-j1)]\n\n return (np.eye(3),Iout,-1)\n\n if rszFac==1:\n t0 = time.clock()\n (H,err)=imagesAlign1(I1,Iref1,trfm_type=trfm_type,verbose=verbose, minArea=minArea)\n if verbose:\n print 'alignment time:',time.clock()-t0,'(s)'\n\n #print 'alignment time:',time.clock()-t0,'(s)' \n else:\n I1=sh.fastResize(I1,rszFac)\n Iref1=sh.fastResize(Iref1,rszFac)\n S=np.eye(3, dtype=np.float32);\n S[0,0]=1/rszFac; S[1,1]=1/rszFac;\n H0=np.eye(3, dtype=np.float32)\n H0=np.dot(np.dot(np.linalg.inv(S),H0),S)\n t0 = time.clock()\n (H,err)=imagesAlign1(I1,Iref1,H0=H0,trfm_type=trfm_type,verbose=verbose, minArea=minArea)\n if verbose:\n print 'alignment time:',time.clock()-t0,'(s)'\n\n #print 'alignment time:',time.clock()-t0,'(s)'\n H=np.dot(S,np.dot(H,np.linalg.inv(S)))\n\n #print \"overall time: \", time.clock() - t1\n if applyWarp:\n return (H,sh.imtransform(I,H,fillval=fillval),err)\n else:\n return (H,err)",
"def align_and_crop_face(self, img, rect_list, desired_width, desired_height):\n \n for j, det in enumerate(rect_list):\n shape = self.align_predictor(img, det)\n left_eye = extract_left_eye_center(shape)\n right_eye = extract_right_eye_center(shape)\n M = get_rotation_matrix(left_eye, right_eye)\n\n rotated_img = cv2.warpAffine(img, M, (img.shape[1], img.shape[0]), flags=cv2.INTER_CUBIC)\n cropped = crop_image(rotated_img, det)\n try:\n \n cropped_res = cv2.resize(cropped, (desired_width, desired_height))\n except:\n print(det)\n print(img.shape)\n cropped_res = cv2.resize(rotated_img,(desired_width, desired_height))\n cropped_img = cropped_res[:, :, ::-1]\n\n return cropped_img, left_eye, right_eye",
"def load_aligned(self, image, size=256, padding=48, align_eyes=False):\n self.aligned[\"size\"] = size\n self.aligned[\"padding\"] = padding\n self.aligned[\"align_eyes\"] = align_eyes\n self.aligned[\"matrix\"] = get_align_mat(self, size, align_eyes)\n self.aligned[\"face\"] = AlignerExtract().transform(\n image,\n self.aligned[\"matrix\"],\n size,\n padding)",
"def image_bbox(img, bbox, label=None, score=None, ax=None):\n\n label_names = list(labels) + ['bg']\n ax = image_vis(img, ax=ax)\n\n\n # If there is no bounding box to display, visualize the image and exit.\n if len(bbox) == 0:\n return ax\n\n for i, bb in enumerate(bbox):\n xy = (bb[1], bb[0])\n height = bb[2] - bb[0]\n width = bb[3] - bb[1]\n ax.add_patch(plt.Rectangle(\n xy, width, height, fill=False, edgecolor='red', linewidth=2))\n\n caption = list()\n\n if label is not None and label_names is not None:\n lb = label[i]\n caption.append(label_names[lb])\n if score is not None:\n sc = score[i]\n caption.append('{:.2f}'.format(sc))\n\n if len(caption) > 0:\n ax.text(bb[1], bb[0],\n ': '.join(caption),\n style='italic',\n bbox={'facecolor': 'white', 'alpha': 0.5, 'pad': 0})\n return ax",
"def normalize_face(image: Image, face):\n y, x, h, w = face['box']\n cropped_face = image[x:x + w, y:y + h]\n\n left_eye = face['keypoints']['left_eye']\n right_eye = face['keypoints']['right_eye']\n aligned_face = align(cropped_face, left_eye, right_eye)\n aligned_face = cv2.resize(aligned_face, (160, 160))\n\n return np.expand_dims(aligned_face / 255, axis=0)",
"def transform(self,image,landmarks,s0=None):\n if s0 is None:\n s0 = np.array([[127.6475, 227.8161], [79.1608, 87.0376], [176.8392, 87.0376]], np.float32)\n idx = [8,36,45] #\"\"\"Anchor points\"\"\"\n pts = np.float32(landmarks[idx,:])\n M = cv2.getAffineTransform(pts,s0)\n dst = cv2.warpAffine(image, M, (256,256))\n return dst",
"def align_single_image(filename, crop_list, bead_channel='488',\n all_channels=_allowed_colors, \n single_im_size=_image_size,\n num_buffer_frames=_num_buffer_frames,\n num_empty_frames=_num_empty_frames,\n illumination_corr=True, \n correction_folder=_correction_folder,\n ref_filename=None, ref_all_channels=None,\n ref_centers=None, ref_ims=None,\n th_seed=100, th_seed_per=98, use_percentile=False,\n max_num_seeds=None, min_num_seeds=50, \n fitting_kwargs={}, \n use_fft=True, fft_filt_size=0, \n match_distance_th=2., \n check_paired_cts=True,\n outlier_sigma=1.5,\n good_drift_th=1.,\n return_target_ims=False,\n return_paired_cts=False,\n verbose=False, \n ):\n from scipy.spatial.distance import cdist, pdist, squareform\n from ..io_tools.load import correct_fov_image\n from ..alignment_tools import fft3d_from2d\n from ..spot_tools.fitting import get_centers\n ## check inputs\n # check crop_list:\n if len(crop_list) < 2:\n raise IndexError(f\"crop_list should at least have 2 elements\")\n elif len(crop_list[0]) != len(single_im_size):\n raise IndexError(\"dimension of crop_list should match single_im_size\")\n # check channels:\n _all_channels = [str(_ch) for _ch in all_channels]\n # check bead_channel\n _bead_channel = str(bead_channel)\n if _bead_channel not in all_channels:\n raise ValueError(f\"bead channel {_bead_channel} not exist in all channels given:{_all_channels}\")\n # check ref_all_channels\n if ref_all_channels is None:\n _ref_all_channels = _all_channels\n else:\n _ref_all_channels = [str(_ch) for _ch in ref_all_channels]\n\n # check filename file type \n if isinstance(filename, np.ndarray):\n if verbose:\n print(f\"-- start aligning given image to\", end=' ')\n _bead_im = filename\n if np.shape(_bead_im) != tuple(single_im_size):\n raise IndexError(f\"shape of target image:{np.shape(_bead_im)} and single_im_size:{single_im_size} doesn't match!\")\n elif isinstance(filename, str):\n if verbose:\n print(f\"-- start aligning file {filename} to\", end=' ')\n if not os.path.isfile(filename) or filename.split('.')[-1] != 'dax':\n raise IOError(f\"input filename: {filename} should be a .dax file!\")\n _bead_im = correct_fov_image(filename, [_bead_channel], \n single_im_size=single_im_size, \n all_channels=all_channels,\n num_buffer_frames=num_buffer_frames, \n num_empty_frames=num_empty_frames, \n calculate_drift=False, \n correction_folder=correction_folder,\n illumination_corr=illumination_corr,\n bleed_corr=False, chromatic_corr=False,\n z_shift_corr=False, hot_pixel_corr=True,\n normalization=False, return_drift=False,\n verbose=False,\n )[0]\n else:\n raise IOError(f\"Wrong input file type, {filename} should be .dax file or np.ndarray\")\n # crop target image:\n _tar_ims = [_bead_im[tuple([slice(_s[0], _s[-1]) for _s in _c])] for _c in crop_list]\n # get centers\n _tar_ct_list = [get_centers(_im, th_seed=th_seed,\n th_seed_per=th_seed_per, \n use_percentile=use_percentile,\n max_num_seeds=max_num_seeds, \n min_num_seeds=min_num_seeds,\n **fitting_kwargs,\n ) for _im in _tar_ims]\n\n ## acquire references\n # case 1: ref_centers and ref_ims are given:\n if ref_centers is not None and ref_ims is not None:\n if verbose:\n print(f\"given ref_centers and images, n={len(ref_centers)}\")\n if len(ref_centers) != len(ref_ims):\n raise IndexError(f\"length of ref_centers:{len(ref_centers)} should match length of ref_ims:{len(ref_ims)}\")\n elif len(crop_list) != len(ref_centers):\n raise IndexError(f\"length of crop_list:{len(crop_list)} should match length of ref_centers:{len(ref_centers)}\")\n _ref_ims = ref_ims\n _ref_ct_list = ref_centers\n # case 2: ref_filename is given:\n elif ref_filename is not None:\n if isinstance(ref_filename, np.ndarray):\n if verbose:\n print(f\"ref image directly given\")\n _ref_bead_im = ref_filename\n elif isinstance(ref_filename, str):\n if verbose:\n print(f\"ref_file: {ref_filename}\")\n _ref_bead_im = old_correct_fov_image(ref_filename, [_bead_channel], \n single_im_size=single_im_size, \n all_channels=all_channels,\n num_buffer_frames=num_buffer_frames,\n num_empty_frames=num_empty_frames, \n calculate_drift=False, \n correction_folder=correction_folder,\n illumination_corr=illumination_corr,\n warp_image=False,\n bleed_corr=False, \n chromatic_corr=False,\n z_shift_corr=False, \n hot_pixel_corr=True,\n normalization=False, \n return_drift=False,\n verbose=False,\n )[0][0]\n _ref_ims = []\n for _c in crop_list:\n _crop = tuple([slice(int(_s[0]), int(_s[-1])) for _s in _c])\n _ref_ims.append(_ref_bead_im[_crop])\n # collect ref_ct_list\n from ..spot_tools.fitting import select_sparse_centers\n _ref_ct_list = []\n for _im in _ref_ims:\n _cand_cts = get_centers(_im, th_seed=th_seed,\n th_seed_per=th_seed_per, \n use_percentile=use_percentile,\n max_num_seeds=max_num_seeds, \n min_num_seeds=min_num_seeds,\n **fitting_kwargs,\n )\n _ref_ct_list.append(select_sparse_centers(_cand_cts, \n distance_th=match_distance_th))\n else:\n raise ValueError(f\"ref_filename or ref_centers+ref_ims should be given!\")\n \n # Do alignment\n _drift_list = []\n _paired_tar_ct_list = []\n _paired_ref_ct_list = []\n # iterate until find good drifts or calculated all cropped images\n while len(_drift_list) < len(crop_list):\n # get image\n _cid = len(_drift_list)\n # calculate drift\n _drift, _paired_tar_cts, _paired_ref_cts = align_beads(\n _tar_ct_list[_cid], _ref_ct_list[_cid], \n _tar_ims[_cid], _ref_ims[_cid],\n use_fft=use_fft, \n fft_filt_size=fft_filt_size, \n match_distance_th=match_distance_th, \n check_paired_cts=check_paired_cts,\n outlier_sigma=outlier_sigma,\n return_paired_cts=True,\n verbose=verbose,\n )\n # judge whether this matching is successful\n if len(_paired_tar_cts) == 0:\n _drift = np.inf * np.ones(len(single_im_size))\n # append\n _drift_list.append(_drift)\n _paired_tar_ct_list.append(_paired_tar_cts)\n _paired_ref_ct_list.append(_paired_ref_cts)\n\n # check if matched well: \n if len(_drift_list) >=2:\n if (cdist(_drift[np.newaxis,:], _drift_list[:-1])[0] < good_drift_th).any():\n break\n ## select drifts\n _dists = squareform(pdist(_drift_list))\n _dists[np.arange(len(_dists)), np.arange(len(_dists))] = np.inf\n _inds = np.unravel_index(np.argmin(_dists, axis=None), _dists.shape)\n # get the two that are closest\n if _dists[_inds] > good_drift_th:\n _success_flag = False\n print(f\"-- Suspicious Failure: selcted drifts: {_drift_list[_inds[0]]}, {_drift_list[_inds[1]]} are not close enough.\")\n else:\n _success_flag = True\n # extract _final_drift and return\n _final_drift = np.nanmean([_drift_list[_inds[0]], _drift_list[_inds[1]]], axis=0)\n\n # return\n _return_args = [_final_drift, _success_flag]\n if return_target_ims:\n _return_args.append(_tar_ims)\n if return_paired_cts:\n _return_args.append(_paired_tar_ct_list)\n _return_args.append(_paired_ref_ct_list)\n \n return tuple(_return_args)",
"def _perform_alignment(self, formulatrix_image, beamline_image, formulatrix_points):\n aligner = ImageAligner(formulatrix_image, beamline_image, self._config_align, self._config_detector)\n aligned_images = aligner.align()\n scaled_formulatrix_points = aligner.scale_points(formulatrix_points)\n self._log_alignment_status(aligned_images)\n\n return aligned_images, scaled_formulatrix_points",
"def show_bboxes(img, bounding_boxes=None, facial_landmarks=[]):\n\n img_copy = img.copy()\n draw = ImageDraw.Draw(img_copy)\n# for b in bounding_boxes:\n# draw.rectangle([\n# (b[0], b[1]), (b[2], b[3])\n# ], outline='white')\n\n for p in facial_landmarks:\n for i in range(106):\n draw.ellipse([\n (p[i*2] - 1.0, p[2*i + 1] - 1.0),\n (p[i*2] + 1.0, p[2*i+1] + 1.0)\n ], outline='blue')\n font = ImageFont.truetype(\"arial.ttf\", 10)\n draw.text([p[2*i], p[2*i+1]], str(i), font=font)\n\n return img_copy",
"def highlight_faces(image, faces, output_filename, terminal_print=True):\n im = Image.open(image)\n draw = ImageDraw.Draw(im)\n\n for (face_ind, face) in enumerate(faces):\n\n # compute emotions\n list_emotion_scores = [face.sorrow_likelihood,\n face.joy_likelihood,\n face.anger_likelihood,\n face.surprise_likelihood]\n\n list_emotions = [\"SORROW\",\n \"JOY\",\n \"ANGER\",\n \"SURPRISE\"]\n\n string_label = generate_string_label(list_emotions, list_emotion_scores)\n\n if terminal_print:\n # print emotions on terminal\n print(\"\\n\")\n print(\"-----------------------\")\n print(\"Face {}\".format(face_ind))\n\n for (crrt_emotion, crrt_score) in zip(list_emotions, list_emotion_scores):\n print(\"{}: {}\".format(crrt_emotion, crrt_score))\n\n print(string_label)\n\n print(\"-----------------------\")\n\n # draw box around face\n box = [(vertex.x, vertex.y)\n for vertex in face.bounding_poly.vertices]\n draw.line(box + [box[0]], width=5, fill='#00ff00')\n\n # add legend in the face box\n fontsize = 35\n font = ImageFont.truetype(\"/usr/share/fonts/truetype/freefont/FreeMono.ttf\", fontsize)\n\n offset = 5\n heigth_text = 40\n length_text = box[1][0] - box[0][0] - 2 * offset\n draw.rectangle(((box[0][0] + offset, box[0][1] + offset), (box[0][0] + length_text + offset, box[0][1] + heigth_text + offset)), fill=\"black\")\n draw.text((box[0][0] + offset, box[0][1] + offset), string_label, font=font, fill=(255, 255, 255, 255))\n\n # highlight significant points\n point_nbr = 0\n half_width_sqare = 2\n\n list_point_coords = []\n\n for point in face.landmarks:\n x = point.position.x\n y = point.position.y\n\n list_point_coords.append((x, y))\n\n draw.rectangle(((x - half_width_sqare, y - half_width_sqare), (x + half_width_sqare, y + half_width_sqare)), fill=\"red\")\n\n # fontsize = 15\n # font = ImageFont.truetype(\"/usr/share/fonts/truetype/freefont/FreeMono.ttf\", fontsize)\n # draw.text((x, y), str(point_nbr), font=font, fill=(255, 255, 0, 0))\n\n point_nbr += 1\n\n all_lists_points = [\n [10, 11, 9],\n [10, 12, 11],\n [14, 7, 13, 15],\n [7, 6],\n [14, 6, 13, 7, 14],\n [16, 17, 18, 19],\n [21, 22, 23, 24],\n [30, 6],\n ]\n\n for crrt_list_points in all_lists_points:\n draw_line_list_points(draw, crrt_list_points, list_point_coords)\n\n draw_line_list_points(draw, [2, 26, 3], list_point_coords, close=False)\n draw_line_list_points(draw, [4, 27, 5], list_point_coords, close=False)\n draw_line_list_points(draw, [10, 8, 11], list_point_coords, close=False)\n\n im.save(output_filename)",
"def correctMisalign(img, marker, center, compus, scope=100):\n\n markerCenter = np.asarray(marker.shape)//2\n guide = np.asarray([center, compus])\n landmark = np.zeros(guide.shape)\n \n #To run template matching to finder markers\n result = cv2.matchTemplate(img, marker, 0)\n result = (1-result/np.max(result))*255\n M = np.float32([\n [1, 0, markerCenter[1]] ,\n [0, 1, markerCenter[0]] ])\n resultPadded = cv2.warpAffine(result, M, (width, height))\n \n mask = np.zeros(resultPadded.shape)\n\n for i in range(0, len(guide)):\n mask[:] = 0\n mask_xfr = max(0, guide[i,1]-(scope+markerCenter[0]))\n mask_xto = min(width, guide[i,1]+(scope+markerCenter[0]))\n mask_yfr = max(0, guide[i,0]-(scope+markerCenter[1]))\n mask_yto = min(width, guide[i,0]+(scope+markerCenter[1]))\n mask[mask_xfr:mask_xto, mask_yfr:mask_yto] = 255\n min_val, max_val, min_loc, landmark[i,:] = \\\n cv2.minMaxLoc(np.multiply(resultPadded, mask))\n \n #To shift image\n shift = guide[0] - landmark[0] \n M = np.float32([\n [1, 0, shift[0]] ,\n [0, 1, shift[1]] ])\n imgShifted = cv2.warpAffine(img, M, (width, height))\n \n #To rescale & rotate image\n radius = np.linalg.norm(landmark[1,:] - landmark[0,:])\n scale = np.linalg.norm(guide[1,:] - guide[0,:])/radius\n cos = (landmark[1,0]-landmark[0,0])/radius\n theta = np.arccos(cos) / (2 * np.pi) * 360\n M = cv2.getRotationMatrix2D((guide[0,0],guide[0,1]),-theta,scale)\n imgModified = cv2.warpAffine(imgShifted,M,(width,height))\n return imgModified\n\n #}}}",
"def align(image):\n angle = find_angle(image)\n image = rotate(image, angle)\n return image",
"def align_faces(frames):\n align = get_entry('recognition')['face alignment']\n provider = align['provider']\n\n logger = logging.getLogger('alignment')\n logger.setLevel(logging.INFO)\n\n logger.info('alignment provider: ' + provider)\n if provider == 'opencv':\n from faceR.alignment import opencv as aligner\n elif provider == 'dlib':\n import dlib_al as aligner\n elif provider == 'mtcnn':\n from faceR.alignment import mtcnn as aligner\n elif provider == 'face-boxes':\n from faceR.alignment import face_boxes as aligner\n elif provider == 'adas':\n from faceR.alignment import adas as aligner\n\n return aligner.align_faces(frames, align[provider])"
]
| [
"0.6571622",
"0.65087074",
"0.6424263",
"0.63643926",
"0.62261367",
"0.62224346",
"0.6209903",
"0.60258204",
"0.6018538",
"0.58878434",
"0.5757735",
"0.56468785",
"0.552677",
"0.5469069",
"0.53907555",
"0.5254516",
"0.5184894",
"0.5080917",
"0.50786936",
"0.50399625",
"0.5037215",
"0.50154394",
"0.49946553",
"0.49753574",
"0.4925867",
"0.49086583",
"0.4892576",
"0.48759267",
"0.48668587",
"0.48511785"
]
| 0.7130995 | 0 |
Return a bs4 object containing all the tags in doc of the URL | def _grab_tags(self, url):
a = self._api_request(url)
return bs4.BeautifulSoup(a,features="html.parser") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_soup(url):\n return BeautifulSoup(requests.get(url).content, 'lxml')",
"def getSoup(url):\n return BeautifulSoup(getHtml(url), 'lxml')",
"def find_tag_urls(r):\n parser = MyHTMLParser()\n parser.feed(r)\n return parser.url_list",
"def request(self, url):\r\n\r\n req = self.get(url)\r\n soup = BeautifulSoup(req.content, \"lxml\")\r\n return soup",
"def get_soup(url):\n\tr = requests.get(url)\n\tdata = r.text\n\tsoup = BeautifulSoup(data, \"lxml\")\n\treturn soup",
"def get_soup(url):\n response = requests.get(url)\n soup = BeautifulSoup(response.text, 'html.parser')\n return soup",
"def request(url):\n response=requests.get(url)\n soup=BeautifulSoup(response.content,\"lxml\")\n return soup",
"def get_soup(url: str):\n response = requests.get(url)\n\n return BeautifulSoup(response.content, \"html.parser\")",
"def get_soup(url):\r\n page=requests.get(url)\r\n soup = BeautifulSoup(page.text.encode(\"utf-8\"), 'html.parser')\r\n return soup",
"def get_tags(self):\r\n self.tags = []\r\n for tag in self.soup.find_all('dl'):\r\n name = tag.dt.contents[0]\r\n\r\n # getting info about tag\r\n info = ''\r\n for p in tag.dd.find_all('p'):\r\n info += p.getText() + ' '\r\n\r\n # getting reference link and code snippet\r\n a_tags = tag.dd.find_all('a')\r\n example_id = a_tags[1]['href'].replace('#', '') # code snippet\r\n example = self.soup.find('div', {'id': example_id}).getText()\r\n\r\n # url reference (from HTML5Doctor if exists)\r\n reference = ''\r\n try:\r\n reference = tag.dt.span.a['href'] # url for HTML5Doctor\r\n except:\r\n reference = a_tags[0]['href'] # url for W3C\r\n\r\n reference = 'http://html5doctor.com/element-index/#' + name\r\n new_tag = Tag(name, info, reference, example)\r\n self.tags.append(new_tag)\r\n logger.info('Tag parsed: %s' % new_tag.name)",
"def make_soup(url):\r\n htmlFile = urllib.request.urlopen(url).read()\r\n soup = BeautifulSoup(htmlFile)\r\n return soup",
"def _get_soup_object(url: str) -> bs4.BeautifulSoup:\n request_result=requests.get(url)\n soup = bs4.BeautifulSoup(request_result.text, \"html.parser\")\n return soup",
"def get_document(url):\n req = requests.get(url)\n doc = BeautifulSoup(req.content, \"html.parser\")\n return doc",
"def get_soup(url: str) -> BeautifulSoup:\n html = get_html(url)\n soup = BeautifulSoup(html, 'lxml')\n return soup",
"def get_soup(url):\n\tresponse = urlopen(url)\n\thtml = response.read()\n\tsoup = BeautifulSoup(html, \"html.parser\")\n\tresponse.close()\n\treturn soup",
"def _soup(self, url):\n r = self.session.get(url)\n r.raise_for_status()\n html = Soup(r.text, 'lxml') # lxml is fastert than html.parser\n r.close()\n return html",
"def getLinks(self, url, tag = \"a\", attr = \"href\"): \n try: \n response = open(self.filename(url)).read() #read from the file\n except IOError:\n raise IOError\n parsed_url = urlparse(url)\n domain = parsed_url[0] + '://' + parsed_url[1]\n \n try:\n soup = BeautifulSoup.BeautifulSoup(response)\n l = soup.findAll(tag, href = True)\n except Exception:\n raise Exception\n links = []\n \n for tag in l:\n link = str(tag[attr]) #convert the link to a string\n purl = urlparse(link)\n if purl[1] == '': #if the link is relative make it absolute\n link = domain+link\n #check if the extension is that of a document \n if splitext(link)[1] in self._invalidExt: \n self.docs_list.append(link)\n \n #append only the html link\n links.append(link)\n \n \n \n return list(set(links)) #returns only distinct links",
"def get_soup(url):\n opener = urllib2.build_opener()\n request = urllib2.Request(url);\n request.add_header('User-Agent','Mozilla/6.0 (Windows NT 6.2; WOW64; rv:16.0.1) Gecko/20121011 Firefox/16.0.1');\n data = opener.open(request).read(); \n return BeautifulSoup(data);",
"def get_tags(html_soup):\n \n tags = html_soup.findAll('a', attrs = {\"class\" : \"tag\"})\n all_tags = []\n for i in tags:\n all_tags.append(i.get_text())\n \n return all_tags",
"def get_html_parser(url):\n response = requests.get(url)\n return BeautifulSoup(response.content, 'html.parser')",
"def _get_tags(self, res):\n doc = pq(res.content)\n return (doc('[property=\"og:title\"]').attr('content'),\n doc('[property=\"og:image\"]').attr('content'),\n doc('[name=\"description\"]').attr('content'))",
"def make_file_soup(self):\n soup = BeautifulSoup(self.html, 'html.parser')\n return soup",
"def page_soup(url):\n html = requests.get(url).text\n return bs(html, 'html.parser')",
"def get_content(self):\n response = requests.get(self.url)\n soup = BeautifulSoup(response.text, \"html.parser\")\n return soup",
"def load_data(url: str):\n\n page = requests.get(url=url)\n soup = BeautifulSoup(page.content, 'html.parser')\n return soup",
"def get_article(url):\n \n r = requests.get(url) \n html_soup = BeautifulSoup(r.content, 'lxml')\n return html_soup",
"def _get_soup(self, url):\n\n # generate a random header \n headers = {'User-Agent': self._random_user_agent()}\n # send a request and get the soup\n response = requests.get(url, headers=headers)\n results = response.content\n if not response.status_code == 404:\n soup = BeautifulSoup(results, 'lxml')\n return soup",
"def soup(url):\n handle = ''\n max_tries = 10\n for i in range(max_tries):\n try:\n handle = urlopen(url)\n handle = handle.read()\n break\n except:\n logging.exception('urlopen failed (attempt %d)', i + 1)\n if i == max_tries - 1:\n logging.error('the maximum urlopen attempts have been reached')\n raise\n time.sleep(1)\n\n s = BeautifulSoup(handle)\n return s",
"def make_soup(self):\n req = urllib.request.Request(\n url,\n data=None,\n headers={\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36'\n }\n )\n f = urllib.request.urlopen(self.html)\n soupdata = BeautifulSoup(f, \"html.parser\")\n return soupdata",
"def get_url_soup(url):\n r = requests.get(url)\n if r.status_code != 200:\n raise Exception(\"Paper request failed '%s'\" % url)\n return get_soup(r.content)"
]
| [
"0.7076313",
"0.70216066",
"0.69696605",
"0.6889851",
"0.6826193",
"0.68196535",
"0.6767906",
"0.67462474",
"0.67079365",
"0.67003834",
"0.66789085",
"0.66782737",
"0.6670707",
"0.66667145",
"0.66216266",
"0.6597798",
"0.65232784",
"0.64535815",
"0.64357543",
"0.6435498",
"0.6426641",
"0.64046687",
"0.63789827",
"0.6362027",
"0.6353759",
"0.63476354",
"0.6329162",
"0.6290426",
"0.6284854",
"0.628321"
]
| 0.7892951 | 0 |
Gets the workspace zip for the specific build URL by parsing HTML The API has no way of retrieving the workspace zip AFAIK | def get_workspace_zip(self):
workspace_api = '/ws/'
# print("Checking Workspaces For: {}".format(self.url))
workspace_elements = self._grab_tags(self.url + workspace_api)
workspace_links = []
root_domain = urllib.parse.urlparse(self.url).scheme + '://' + urllib.parse.urlparse(self.url).netloc
for link in workspace_elements.find_all(name='a', href=True):
if '/execution/node/' in link['href']:
workspace_links.append(link['href'])
if len(workspace_links) > 0:
for workspace_link in workspace_links:
single_workspace_elements = self._grab_tags(root_domain + workspace_link)
for link in single_workspace_elements.find_all(name='a', href=True):
if '/*zip*/' in link['href']:
# URL returned as relative link, must reconstruct
print("FOUND ZIP: {}".format(root_domain + workspace_link + link['href']))
return root_domain + workspace_link + link['href'] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def prepare_project(path: str):\n zip_path = os.path.join(path, 'Simulation.Machine.V1.zip')\n\n # Download zip file with project\n requested_file = requests.get(URL)\n with open(zip_path, 'wb') as f:\n f.write(requested_file.content)\n\n # Extract contents\n with ZipFile(zip_path, 'r') as zip_obj:\n zip_obj.extractall(path)\n\n # Remove file\n os.remove(zip_path)",
"def get_file(url):\n helpers.make_workdir() # create temp working directory\n file_url = url + constant.MALICIOUS_LOCATION\n print(file_url)\n filename = wget.download(file_url, out=constant.WORKDIR)\n return filename",
"def read_zip_file():\n with open(os.path.join(DIST_DIR, \"build.zip\"), \"rb\") as zip_file:\n return zip_file.read()",
"def main(file_url, file_path):\n\n # extract file from the link\n\n if not os.path.exists(file_path):\n os.makedirs(file_path, exist_ok=True)\n \n r = requests.get(str(file_url))\n\n #unzip the zip file\n z = zipfile.ZipFile(io.BytesIO(r.content))\n z.extractall(path = file_path)",
"def fetch_executable_from_jenkins():\n\n base_job_url = os.environ.get('JENKINS_JOB_URL')\n if not base_job_url:\n error('Jenkins job URL for the builder is not specified.')\n\n build_json = json.loads(requests.get('%s/api/json'\n % base_job_url).text)\n last_build = build_json['lastCompletedBuild']['number']\n print 'Last build ID: %d' % last_build\n\n job_url = '%s/%d' % (base_job_url, last_build)\n last_build_json = json.loads(requests.get('%s/api/json'\n % job_url).text)\n if not last_build_json['artifacts']:\n error('No artifacts found!')\n\n artifacts_deb = [artifact for artifact in\n last_build_json['artifacts'] if '.dmg'\n in artifact['fileName']]\n artifact_url = '%s/artifact/%s' % (job_url,\n artifacts_deb[0]['relativePath'])\n file_name = artifacts_deb[0]['fileName']\n print 'Tribler installer url: %s' % artifact_url\n\n # Download the file\n file_path = os.path.join(os.environ.get('WORKSPACE'), file_name)\n download_response = requests.get(artifact_url, stream=True)\n download_response.raise_for_status()\n\n with open(file_path, 'wb') as handle:\n for block in download_response.iter_content(1024):\n handle.write(block)\n\n return file_path",
"def fetch(self) -> None:\n archive_path = os.path.join(self._output_dir, self._archive_name)\n self._download_file(self._parsed_url.original_url, archive_path)\n try:\n with zipfile.ZipFile(archive_path, \"r\") as zip_file:\n zip_file.extractall(path=self._output_dir)\n except zipfile.BadZipfile:\n raise REANAFetcherError(\"The provided zip file is not valid\")\n\n os.remove(archive_path)\n\n if not self._discover_workflow_specs():\n top_level_entries = [\n os.path.join(self._output_dir, entry)\n for entry in os.listdir(self._output_dir)\n ]\n # Some zip archives contain a single directory with all the files.\n if len(top_level_entries) == 1 and os.path.isdir(top_level_entries[0]):\n top_level_dir = top_level_entries[0]\n # Move all entries inside the top level directory\n # to the output directory.\n for entry in os.listdir(top_level_dir):\n shutil.move(os.path.join(top_level_dir, entry), self._output_dir)\n os.rmdir(top_level_dir)",
"def extract_web_archive(url, apath, ffilter=[]):\n\n download(url, apath)\n output_files = extract(apath, ffilter=ffilter)\n\n return output_files",
"def fetch_exe_from_jenkins():\n base_job_url = os.environ.get(\"JENKINS_JOB_URL\")\n if not base_job_url:\n print \"Jenkins job URL for the builder is not specified.\"\n sys.exit(-1)\n\n build_json = json.loads(requests.get(\"%s/api/json\" % base_job_url).text)\n last_build = build_json['lastCompletedBuild']['number']\n print \"Last build ID: %d\" % last_build\n\n job_url = '%s/%d' % (base_job_url, last_build)\n last_build_json = json.loads(requests.get(\"%s/api/json\" % job_url).text)\n if len(last_build_json['artifacts']) == 0:\n error(\"No artifacts found!\")\n\n artifact_url = \"%s/artifact/%s\" % (job_url, last_build_json['artifacts'][0]['relativePath'])\n file_name = last_build_json['artifacts'][0]['fileName']\n print \"Tribler installer url: %s\" % artifact_url\n\n # Download the file\n file_path = os.path.join(os.environ.get('WORKSPACE'), file_name)\n download_response = requests.get(artifact_url, stream=True)\n download_response.raise_for_status()\n\n with open(file_path, 'wb') as handle:\n for block in download_response.iter_content(1024):\n handle.write(block)\n\n return file_path",
"def _download_project(name, apikey):\n payload = {'apikey': apikey, 'project': name, 'version': 'portia'}\n r = requests.get(DASH_API_URL + 'as/project-slybot.zip', params=payload)\n return r.content",
"def get_url_and_parse():\n \n # TODO: dynamic site_code\n #global site_code\n #\n #if not site_code:\n # site_code = get_site_code(city_name)\n\n urllib.request.urlretrieve(\n \"https://dd.weather.gc.ca/citypage_weather/xml/AB/s0000661_e.xml\", \"s0000661_e.xml\")\n tree = ET.parse(\"s0000661_e.xml\")\n return tree.getroot()",
"def _query_website(q):\n\n url = _cfg[\"request_url\"]\n print('Interrogating {0}...'.format(url))\n\n print('Request...', end='')\n if py3k:\n req = request.Request(url, q.encode('utf8'))\n print('done.')\n print(\"Reading content...\", end='')\n c = urlopen(req).read().decode('utf8')\n else:\n c = urlopen(url, q).read()\n print('done.')\n\n try:\n fname = re.compile('<a href=\".*\">').findall(c)[0][9:-2]\n except Exception as e:\n print(e)\n raise RuntimeError(\"Something went wrong\")\n\n furl = _cfg['download_url'] + fname\n\n print('Downloading data...{0}...'.format(furl), end='')\n if py3k:\n req = request.Request(furl)\n bf = urlopen(req)\n else:\n bf = urlopen(furl)\n r = bf.read()\n print(\"done.\")\n\n\n typ = file_type(r, stream=True)\n # force format\n if (typ is None) & ('zip' in fname):\n typ = 'zip'\n if typ is not None:\n print(r[:100], type(r), bytes(r[:10]))\n print(\"decompressing archive (type={0})...\".format(typ), end='')\n if 'zip' in typ:\n r = _extract_zip(bytes(r))\n else:\n r = zlib.decompress(bytes(r), 15 + 32)\n print(\"done.\")\n\n return r",
"def download(self, url):\n try:\n logging.info(self.log_format((\"downloading \" + url)))\n webFile = urllib.urlopen(url)\n localFile = open(self.paths['workspace'] + \"/\" + url.split('/')[-1], 'w')\n localFile.write(webFile.read())\n webFile.close()\n localFile.close()\n except IOError:\n logging.error(self.log_format((\"could not get url \" + url)))",
"def _downloadWGS(WGSurl) :\n gzipContent = urllib2.urlopen(WGSurl).read()\n gzipFile = StringIO.StringIO(gzipContent)\n o = gzip.GzipFile(fileobj = gzipFile)\n output = None\n try :\n output = o.read()\n except IOError as e:\n print(e)\n o.close()\n return output",
"def fetch(self, url) -> bytes:\n buffer = self.download(url)\n zfs = ZipFileSystem(buffer, \"r\")\n return zfs.open(zfs.glob(\"*\")[0]).read()",
"def extract_src(session, file_name, submission_num):\n # Gets the HTML page for the submission page\n response = session.get(\"https://dmoj.ca/src/\" + submission_num + \"/raw\")\n with open(file_name, \"w\") as f:\n f.write(response.text)",
"def __download_web(self):\n page = requests.get(self.url)\n\n if page.status_code == 200:\n return BeautifulSoup(page.content, \"html.parser\")",
"def pull(self, build_id, file_path):\n url = f\"{self.base_url}/pull\"\n payload = {\"build_id\": build_id}\n response = requests.get(url, json=payload, headers=self.headers)\n if response.headers[\"Content-Type\"] == \"text/html\":\n return response.text\n else:\n with open(file_path, 'wb') as f:\n f.write(response.content)\n\n return \"Success\"",
"def getzip(url, zipfile, unzipdir):\n done_file = os.path.join(unzipdir, '.'+os.path.basename(zipfile)+'.done')\n if file_exists(done_file):\n print('{} already downloaded and extracted; skipping. To reinstall \"rm {}\"'.format(os.path.basename(zipfile), done_file))\n else:\n print('Downloading {} as {}.'.format(url, zipfile))\n urlretrieve(url, zipfile)\n print('Extracting {} into {}.'.format(zipfile, unzipdir))\n with ZipFile(zipfile, 'r') as zip:\n zip.extractall(unzipdir)\n os.remove(zipfile)\n with open(done_file, 'w'):\n pass",
"def wget_content(url):\n\n try:\n\n for i in range(len(url)):\n url[i].replace(' ', \"%20\") if i > url.find('?') else url[i]\n\n with TemporaryDirectory() as dirname:\n retval = ''\n retcode = subprocess.Popen([\"wget\", \"--tries=5\", '--timeout=10', url, \"-O\", os.path.join(dirname, \"1.txt\")])\n retcode.wait()\n file_name = os.path.join(dirname, \"1.txt\")\n handle = open(file_name)\n if handle:\n retval = handle.read()\n\n\n except Exception as ex:\n if url.startswith(\"https://\") and \"handshake failure\" in retval:\n return wget_content(url.replace(\"https://\", \"http://\"))\n else:\n wxpush(\"Crawler module failure\", traceback.extract_stack(), True)\n\n return retval or \"\"",
"def download():\n try:\n cli.run(\n [URL, '--output', TEMP_DIR],\n )\n except SystemExit:\n return None",
"def extract_web_archive(cls, url, apath, ffilter=[]):\n\n if apath not in cls._archives.keys():\n download(url, apath)\n\n _files = extract(apath, ffilter=ffilter)\n\n return _files",
"def download_and_expand(self):\n top_dir_name = None\n if self.git_branch:\n # Download a source by git clone.\n top_dir_name = self._download_and_expand_by_git()\n else:\n # Download a source from the arcihve URL.\n # Downloading the compressed archive is better than \"git clone\",\n # because it is faster.\n # If download failed due to URL not found, try \"git clone\".\n try:\n top_dir_name = self._download_and_expand_from_archive_url()\n except RemoteFileNotFoundError:\n Log.info('Try to download by git clone.')\n top_dir_name = self._download_and_expand_by_git()\n return top_dir_name",
"def download_extract_zip(url):\n response = requests.get(url)\n with ZipFile(BytesIO(response.content)) as thezip:\n for zipinfo in thezip.infolist():\n with thezip.open(zipinfo) as thefile:\n df = pd.read_csv(thefile)\n return (df)",
"def __extractChemDrawRemote(self, urlOrData, outputDir, name):\n baseUrl = self.__remoteServiceUrl\n postData = [('pathext', '.cml'), ('mode', 'extract')]\n if self.__isUrl(urlOrData):\n postData.append(('url', urlOrData))\n else:\n postData.append(('file', (name, urlOrData)))\n zipData = self.iceContext.Http().post(baseUrl, postData)\n if outputDir is not None:\n tmpFs = self.iceContext.fs.createTempDirectory()\n tmpFs.writeFile(\"media.zip\", zipData)\n tmpFs.unzipToDirectory(\"media.zip\", outputDir)\n tmpFs.delete()\n return zipData",
"def download_dependency_url(name, url, temp_path, build_path, config, zip=True):\n parsed = urlparse(url)\n fn = os.path.basename(parsed.path)\n target_name = os.path.join(temp_path, fn)\n logger.info(f\"Downloading {url} to {target_name}\")\n\n download_file(url, target_name)\n\n if zip:\n with zipfile.ZipFile(target_name, \"r\") as z:\n z.extractall(build_path)\n else:\n shutil.copy(target_name, os.path.join(build_path, \"GameData\"))",
"def get_svn_info(here, there, mcmc_tag=None):\n\n #print(there)\n os.chdir(there)\n #print(os.system(\"svn info\"))\n #print(os.getcwd())\n if mcmc_tag is not None:\n os.system(\"svn info > tmp_svn_%s\" % (mcmc_tag))\n fname = 'tmp_svn_%s' % (mcmc_tag)\n else:\n os.system(\"svn info > tmp_svn\")\n fname = 'tmp_svn'\n\n fp = open(fname, \"r\")\n svn = fp.readlines()\n fp.close()\n os.remove(fname)\n\n url = [i.split(\":\", 1)[1].strip() \\\n for i in svn if i.startswith('URL')]\n rev = [i.split(\":\", 1)[1].strip() \\\n for i in svn if i.startswith('Revision')]\n os.chdir(here)\n\n return url, rev",
"def download(repo_url, sha, working_dir):\n print 'Downloading %s ...' % (sha)\n sf_zip = os.path.join(working_dir, 'sf.gz')\n with open(sf_zip, 'wb+') as f:\n f.write(requests.get(github_api(repo_url) + '/zipball/' + sha).content)\n zip_file = ZipFile(sf_zip)\n zip_file.extractall(working_dir)\n zip_file.close()\n\n for name in zip_file.namelist():\n if name.endswith('/src/'):\n src_dir = name\n break\n\n return os.path.join(working_dir, src_dir)",
"def archive_url(url: str) -> Tuple[str, bool]:\n internet_archive = InternetArchive()\n return internet_archive.archive_page(url)",
"def url_HITRAN12():\n url=u\"https://www.cfa.harvard.edu/HITRAN/HITRAN2012/HITRAN2012/By-Molecule/Uncompressed-files/\"\n return url",
"def download():\n response = requests.get(URL, stream=True)\n\n file = open(FILE_NAME, 'wb')\n file.write(response.content)\n\n with zipfile.ZipFile(FILE_NAME, 'r') as zip_ref:\n zip_ref.extractall()\n\n file.close()\n os.remove(FILE_NAME)"
]
| [
"0.556513",
"0.5522339",
"0.5424023",
"0.5417535",
"0.53908026",
"0.5365669",
"0.5358795",
"0.5332443",
"0.52523816",
"0.5220812",
"0.5178104",
"0.5170915",
"0.5148962",
"0.51238537",
"0.51191694",
"0.5066204",
"0.5062707",
"0.5060167",
"0.5052632",
"0.50380456",
"0.5033808",
"0.5027104",
"0.5015001",
"0.49825746",
"0.49419397",
"0.49217093",
"0.4903021",
"0.4860602",
"0.48565802",
"0.48460996"
]
| 0.7396624 | 0 |
Recursively search through all jobs and projects to pull out build URLs | def get_all_build_links(url, auth=None, netloc_force=False):
all_build_links = []
if 'api/json' not in url:
# if the api endpoint isnt appended, then append it:
url += '/api/json/'
def recurse_to_build(url):
orig_url = urllib.parse.urlparse(url)
try:
json_reply = json.loads(requests.get(url, verify=False, auth=auth).text)
except JSONDecodeError:
return
if 'builds' in json_reply:
if len(json_reply['builds']) > 0:
url_link = json_reply['builds'][0]['url']
if netloc_force:
url_link = urllib.parse.urlparse(url_link)
url_link = url_link._replace(netloc=orig_url.netloc)
url_link = url_link.geturl()
print("{}".format(url_link))
all_build_links.append(url_link)
if 'jobs' in json_reply:
for job in json_reply['jobs']:
url_link = job['url'] + 'api/json/'
if netloc_force:
url_link = urllib.parse.urlparse(url_link)
url_link = url_link._replace(netloc=orig_url.netloc)
url_link = url_link.geturl()
recurse_to_build(url_link)
if 'endpoint' in json_reply:
url_link = json_reply['endpoint'] + 'api/json/'
if netloc_force:
url_link = urllib.parse.urlparse(url_link)
url_link = url_link._replace(netloc=orig_url.netloc)
url_link = url_link.geturl()
recurse_to_build(url_link)
recurse_to_build(url)
return all_build_links | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def process_builds(self):\n for p in self.product_config:\n p_str = p[0]\n\n for l in self.link_list:\n if l.startswith(p[1]):\n if not self.builds_list.has_key(p_str):\n self.builds_list[p_str] = []\n b_str = l.replace('/', '')\n self.builds_list[p_str].append(b_str)",
"def getBuilds():",
"def grab_project_links(soup):\n project_urls = []\n valid_project_url = \"/?archive/?gsoc/\\d+[0-9]/orgs/[a-zA-Z]+/[a-zA-Z]+/[a-zA-Z]+.html\"\n try:\n # Grab links to all the projects\n all_link = soup.find_all(\"a\")\n for link in all_link:\n if re.match(valid_project_url, link.get(\"href\")):\n project_urls.append(join(melange, link.get(\"href\")[1:]))\n except TypeError:\n print(link)\n\n return project_urls",
"def getBuildRequests():",
"def get_projects_url_paths(*args):\n projects_infos = get_projects()\n projects_url_paths = []\n\n for project in projects_infos:\n projects_url_paths.append(BASE_URL + \"/projects/{id}/\".format(id=project[0]))\n return projects_url_paths",
"def import_builds_for_job(job_pk):\n job = Job.objects.get(pk=job_pk)\n\n logging.info(\"Located job %s\\n\" % job)\n\n client = job.server.get_client()\n\n logging.info(\"Using server at %s\\n\" % job.server.url)\n\n jenkins_job = client.get_job(job.name)\n\n good_build_numbers = list(jenkins_job.get_build_ids())\n logging.info(\"%s\\n\" % good_build_numbers)\n\n for build_number in good_build_numbers:\n import_build_for_job(job.pk, build_number)",
"def get_job_builds(self, job_id, started=None, finished=None,\n success=None, skipped=None, order='asc', limit=100):\n pass",
"def get_finished_builds(job_dir, last_processed_log):\n return [\n dir.name\n for dir in os.scandir(job_dir)\n if dir.name.isdigit()\n and int(dir.name) > int(last_processed_log)\n and os.path.isfile(\n functools.reduce(os.path.join, [job_dir, dir.name, \"build.xml\"])\n )\n and grep(\n functools.reduce(os.path.join, [job_dir, dir.name, \"build.xml\"]),\n \"<result>\",\n )\n ]",
"def Builds():\n return builds",
"def get_new_running_builds(job_dir, last_processed_log):\n return [\n dir.name\n for dir in os.scandir(job_dir)\n if dir.name.isdigit()\n and int(dir.name) > int(last_processed_log)\n and os.path.isfile(\n functools.reduce(os.path.join, [job_dir, dir.name, \"build.xml\"])\n )\n and not grep(\n functools.reduce(os.path.join, [job_dir, dir.name, \"build.xml\"]), \"<result>\"\n )\n ]",
"def getPendingBuilds():",
"def getPendingBuilds():",
"def getBuild():",
"def get_jobs():\n jobs = [os.path.join(JOBS_DIR, job)\n for job in os.listdir(JOBS_DIR)\n if job != '.gitignore']\n return jobs",
"def do_all_projects(args):\n man = load_manifest()\n\n if args[0] == '-p':\n parallel = True\n del args[0]\n else:\n parallel = False\n\n towait = []\n\n for (name, project) in man.projects.iteritems():\n repo = GitRepo(workdir_for_project(project))\n print >>sys.stderr, \"In project: \", name, \" running \", \" \".join(args)\n p = repo.command_process(args)\n if not parallel:\n p.Wait()\n print >>sys.stderr\n else:\n towait.append(p)\n\n for p in towait:\n p.Wait()",
"def __setup_recursion(folder_found, links_titles):\n urls = []\n if folder_found:\n for element_x, _, element_z in links_titles:\n if element_z == 'folder':\n # fill urls with sub-links to recursively call crawl function on them\n urls.append(element_x)\n return urls",
"def get_pr_jobs():\n res = requests.get(\n uri + \"/view/Pull%20Requests/api/json\",\n headers={\"accept\": \"application/json\"},\n auth=requests.auth.HTTPBasicAuth(user, password),\n verify=verify,\n )\n if res.status_code != 200:\n raise RuntimeError(\"Received non 200 status code from jenkins\")\n data = res.json()\n for job in data[\"jobs\"]:\n yield job",
"def pipeline_results(self):\n repo = self.main_gitlab_repo()\n ret = []\n for branch in [\"master\", \"main\", \"devel\"]:\n build = repo.cibuild_set.filter(branch__name__endswith=branch).first()\n if build is not None:\n link = repo.url + f\"/-/pipelines/{build.build_id}\"\n sym = \"✓\" if build.passed else \"✘\"\n ret.append(f'<a href=\"{link}\">{branch}: {sym}</a>')\n\n return mark_safe(\"<br>\".join(ret))",
"def getURLs():",
"def recursive_urls(urls):\n if len(urls) == 0:\n return\n rs = [grequests.get(url, hooks=dict(args=print_url)) for url in urls]\n responses = grequests.map(rs)\n url_lists = [get_urls_from_response(response) for response in responses]\n urls = sum(url_lists, []) # flatten list of lists into a list\n recursive_urls(urls)",
"def urls(gh, user):\n return [repo.url for repo in getuserrepos(gh, user)]",
"def download_builds(config, builds, force=False):\n if not config.get('datadir'):\n raise ValueError(\"No output dir (--datadir) specified\")\n\n jenkins_client = jenkins.get_client(config)\n download_args = []\n for build in builds:\n if ':' in build:\n (job, build_id) = build.split(':')\n download_args.append(\n (job, build_id, config['datadir'],\n jenkins_client, config.get('groupingParameter'), force)\n )\n else:\n job = build\n for build_id in jenkins_client.fetch_all_build_ids(job):\n download_args.append(\n (job, build_id, config['datadir'],\n jenkins_client, config.get('groupingParameter'), force)\n )\n\n num_threads = config.get('downloadThreads', 7) # arbitrary number\n if num_threads <= 1:\n for args_tuple in download_args:\n _download_one_build(args_tuple)\n else:\n import multiprocessing.pool # only import if we need it!\n pool = multiprocessing.pool.ThreadPool(num_threads)\n pool.map(_download_one_build, download_args)",
"def get(self, project_slug):\n project = Project.query.filter_by(slug=project_slug).first_or_404()\n\n if not (project.public or current_user.is_authenticated()):\n flask_restful.abort(404)\n\n return [\n dict(name=job.git_branch)\n for job\n in (\n project.jobs.distinct(Job.git_branch)\n .order_by(sqlalchemy.asc(Job.git_branch))\n )\n if job.git_branch is not None\n ]",
"def list_projects():\n if '.wcscanner' not in os.listdir(context.__BASE_PATH__):\n return []\n return os.listdir(context.__PROJECTS_PATH__)",
"def build_folder_map(base_url, proxies):\n try:\n LOGGING.info('Requesting: {0}'.format(base_url))\n\n user_agent = {'User-agent': BASECONFIG.user_agent}\n\n request = requests.get(base_url, headers=user_agent, proxies=proxies, timeout=(20, 20))\n\n if request.status_code == 200:\n LOGGING.info('Request OK. Parsing result...')\n\n children = []\n\n content = BeautifulSoup(request.text, 'html.parser')\n links = content.find_all('a', href=True)\n\n for link in links:\n if 'Parent Directory' in link:\n continue\n\n href = link.get('href')\n\n if len(href) > 1 and not any(s in href for s in BAD_CHARS):\n children.append(urljoin(base_url, href))\n\n return children\n\n else:\n LOGGING.warning(\n 'Problem connecting to {0}. Status code: {1}. Aborting task.'.format(\n base_url, request.status_code))\n\n except requests.exceptions.ConnectionError as e:\n LOGGING.warning(\n 'Problem connecting to {0}. Error: {1}'.format(\n base_url, e))\n\n except Exception as e:\n LOGGING.warning(\n 'Problem connecting to {0}. Aborting task.'.format(base_url))\n LOGGING.exception(sys.exc_info())\n LOGGING.exception(type(e))\n LOGGING.exception(e.args)\n LOGGING.exception(e)\n\n return []",
"def getAllSolutionUrls(urls: list) -> list:\n allSolutionUrls = []\n\n for index, url in enumerate(urls):\n print(\"current puzzle: \" + str(index))\n solutionUrls = getSolutionUrls(url)\n\n if solutionUrls != None:\n allSolutionUrls += solutionUrls\n\n return allSolutionUrls",
"def getBuildbotURL():",
"def scan_buildfiles(root_dir, base_path=None):\r\n\r\n buildfiles = []\r\n for root, dirs, files in os.walk(base_path if base_path else root_dir):\r\n for filename in files:\r\n if BuildFile._is_buildfile_name(filename):\r\n buildfile_relpath = os.path.relpath(os.path.join(root, filename), root_dir)\r\n buildfiles.append(BuildFile(root_dir, buildfile_relpath))\r\n return OrderedSet(sorted(buildfiles, key=lambda buildfile: buildfile.full_path))",
"def _fetch_base_urls(repository_url):\n repo_config = _url_as_ini_file(repository_url)\n config = configparser.ConfigParser()\n config.read_file(repo_config)\n\n base_urls = list()\n for repo in config.sections():\n base_urls.append((config.get(repo, 'name'),\n config.get(repo, 'baseurl')))\n\n return base_urls",
"def getUrls(self):\n # in case you need to move from a read only Url to a writeable one, here it gets replaced\n repopath = self.repositoryUrl().replace(\"[git]\", \"\")\n repoString = utils.replaceVCSUrl(repopath)\n [repoUrl, repoBranch, repoTag] = utils.splitVCSUrl(repoString)\n if not repoBranch and not repoTag:\n repoBranch = \"master\"\n print(\"|\".join([repoUrl, repoBranch, repoTag]))\n return True"
]
| [
"0.6178584",
"0.612089",
"0.60826075",
"0.59894234",
"0.5935299",
"0.5781459",
"0.5699846",
"0.56969726",
"0.5667781",
"0.560925",
"0.5579634",
"0.5579634",
"0.55440015",
"0.5480648",
"0.5456297",
"0.5444982",
"0.5421424",
"0.54137886",
"0.5393616",
"0.5388915",
"0.5386272",
"0.53702515",
"0.53451747",
"0.5343377",
"0.5311376",
"0.53103757",
"0.52936214",
"0.52924657",
"0.5279329",
"0.52708733"
]
| 0.6822186 | 0 |
To load the descriptor settings from the config file,only HOG is supported | def load_descriptor(settings):
return {
'hog': descriptors.HogDescriptor.from_config_file(settings['hog']),
}.get(settings['train']['descriptor'], 'hog') # Default to HOG for invalid input | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_config(self):\n pass",
"def config():",
"def config():",
"def read_config(self, config_filename):",
"def loadConfig(self):\r\n self.config.read(self.CONFIG_FILE)\r\n try:\r\n assert \"Settings\" in self.config\r\n except AssertionError:\r\n print(\"Settings do not exist, creating new config file...\")\r\n self.saveConfig()\r\n settings = self.config[\"Settings\"]\r\n self.dataPath = settings.get(\"datapath\",fallback=\"\")\r\n self.videoPath = settings.get(\"videopath\",fallback=\"\")\r\n self.dataOffset = settings.getfloat(\"dataoffset\",fallback=0)\r\n self.colBlindMode = settings.getboolean(\"colblindmode\",False)\r\n if self.videoPath != \"\":\r\n self.loadVideo(self.videoPath,loadAudio=False)\r\n if self.dataPath != \"\":\r\n self.loadData(self.dataPath)",
"def config(self):\n pass",
"def config(self):\n pass",
"def widget_load_config(self, plugman):\r\n pass",
"def initialize_from_config(self):",
"def load_standard_parameters(self):\n paradic = {'x':'0',\n 'y':'0',\n 'n_oct':'8',\n 'n_spo':'3',\n 'sigma_min':'0.8',\n 'delta_min':'0.5',\n 'sigma_in':'0.5',\n 'C_DoG':'0.015',\n 'C_edge':'10',\n 'n_bins':'36',\n 'lambda_ori':'1.5',\n 't':'0.8',\n 'n_hist':'4',\n 'n_ori':'8',\n 'lambda_descr':'6',\n 'flag_match':'1',\n 'C_match':'0.6'}\n self.cfg['param']['paradic'] = paradic\n self.cfg.save()",
"def configuration():",
"async def cmd_galloadsettings(self, ctx):\n config = Config()\n\n # ===== UPDATE THE SETTINGS IN THE LOCAL COG\n self.cogset['guild_id'] = config.target_guild_id\n self.cogset['enable']= config.galEnable\n self.cogset['channel_ids'] = config.gallerys[\"chls\"]\n self.cogset['text_expirein']= config.gallerys['expire_in']\n self.cogset['rem_low']= config.gallerys['rem_low']\n self.cogset['user_wl']= config.gallerys[\"user_wl\"]\n self.cogset['allow_links']= config.gallerys[\"links\"]\n self.cogset['link_wl']= config.gallerys['link_wl']\n\n # ===== SAVE COG SETTING\n await cogset.SAVE(self.cogset, cogname=self.qualified_name)\n \n # ===== RETURN\n await ctx.channel.send(content=\"Gallery information has been updated from the setup.ini file\", delete_after=15)\n return",
"def load_from_conf(self):\r\n raise NotImplementedError",
"def load_ini_file(ini_file_path):\n config = configparser.ConfigParser()\n config.read(ini_file_path)\n cfg = {}\n\n # Load hyperparameters\n cfg[\"hyperparameters\"] = {}\n cfg[\"hyperparameters\"][\"gpu_id\"] = config.getint(\"hyperparameters\", \"gpu_id\")\n cfg[\"hyperparameters\"][\"seed\"] = config.getint(\"hyperparameters\", \"seed\")\n cfg[\"hyperparameters\"][\"optimizer\"] = config.get(\"hyperparameters\", \"optimizer\")\n cfg[\"hyperparameters\"][\"lr\"] = config.getfloat(\"hyperparameters\", \"lr\")\n cfg[\"hyperparameters\"][\"momentum\"] = config.getfloat(\"hyperparameters\", \"momentum\")\n cfg[\"hyperparameters\"][\"clip\"] = config.getfloat(\"hyperparameters\", \"clip\")\n cfg[\"hyperparameters\"][\"dropout\"] = config.getfloat(\"hyperparameters\", \"dropout\")\n cfg[\"hyperparameters\"][\"batch_size\"] = config.getint(\"hyperparameters\", \"batch_size\")\n cfg[\"hyperparameters\"][\"embedding_dim\"] = config.getint(\"hyperparameters\", \"embedding_dim\")\n cfg[\"hyperparameters\"][\"commun_embed_size\"] = config.getint(\"hyperparameters\", \"commun_embed_size\")\n cfg[\"hyperparameters\"][\"num_epochs\"] = config.getint(\"hyperparameters\", \"num_epochs\")\n cfg[\"hyperparameters\"][\"use_one_hot\"] = config.getboolean(\"hyperparameters\", \"use_one_hot\")\n cfg[\"hyperparameters\"][\"max_input_length\"] = config.getint(\"hyperparameters\", \"max_input_length\")\n cfg[\"hyperparameters\"][\"max_num_answers\"] = config.getint(\"hyperparameters\", \"max_num_answers\")\n cfg[\"hyperparameters\"][\"use_dnc_c\"] = config.getboolean(\"hyperparameters\", \"use_dnc_c\") \n cfg[\"hyperparameters\"][\"use_dnc_q\"] = config.getboolean(\"hyperparameters\", \"use_dnc_q\")\n cfg[\"hyperparameters\"][\"share_memory\"] = config.getboolean(\"hyperparameters\", \"share_memory\")\n cfg[\"hyperparameters\"][\"weight_decay\"] = config.getfloat(\"hyperparameters\", \"weight_decay\")\n cfg[\"hyperparameters\"][\"use_clip_grad\"] = config.getboolean(\"hyperparameters\", \"use_clip_grad\")\n cfg[\"hyperparameters\"][\"clip_value\"] = config.getfloat(\"hyperparameters\", \"clip_value\")\n cfg[\"hyperparameters\"][\"lr_reduce_after\"] = config.getint(\"hyperparameters\", \"lr_reduce_after\")\n cfg[\"hyperparameters\"][\"lr_decay_rate\"] = config.getfloat(\"hyperparameters\", \"lr_decay_rate\")\n cfg[\"hyperparameters\"][\"grad_flow_interval\"] = config.getfloat(\"hyperparameters\", \"grad_flow_interval\")\n cfg[\"hyperparameters\"][\"add_noise\"] = config.getboolean(\"hyperparameters\", \"add_noise\")\n cfg[\"hyperparameters\"][\"finetune\"] = config.getboolean(\"hyperparameters\", \"finetune\")\n cfg[\"hyperparameters\"][\"fc_flag\"] = config.getboolean(\"hyperparameters\", \"fc_flag\")\n\n # Load lstm parameters\n cfg[\"lstm\"] = {}\n cfg[\"lstm\"][\"hidden_dim\"] = config.getint(\"lstm\", \"hidden_dim\")\n cfg[\"lstm\"][\"num_layers\"] = config.getint(\"lstm\", \"num_layers\")\n cfg[\"lstm\"][\"dropout\"] = config.getfloat(\"lstm\", \"dropout\")\n\n # Load dnc_q parameters\n cfg[\"dnc_q\"] = {}\n cfg[\"dnc_q\"][\"input_size\"] = config.getint(\"dnc_q\", \"input_size\")\n cfg[\"dnc_q\"][\"output_size\"] = config.getint(\"dnc_q\", \"output_size\")\n cfg[\"dnc_q\"][\"rnn_type\"] = config.get(\"dnc_q\", \"rnn_type\")\n cfg[\"dnc_q\"][\"hidden_dim\"] = config.getint(\"dnc_q\", \"hidden_dim\")\n cfg[\"dnc_q\"][\"memory_type\"] = config.get(\"dnc_q\", \"memory_type\")\n cfg[\"dnc_q\"][\"num_layers\"] = config.getint(\"dnc_q\", \"num_layers\")\n cfg[\"dnc_q\"][\"num_layers_hidden\"] = config.getint(\"dnc_q\", \"num_layers_hidden\")\n cfg[\"dnc_q\"][\"n\"] = config.getint(\"dnc_q\", \"n\")\n cfg[\"dnc_q\"][\"w\"] = config.getint(\"dnc_q\", \"w\")\n cfg[\"dnc_q\"][\"r\"] = config.getint(\"dnc_q\", \"r\")\n cfg[\"dnc_q\"][\"s_r\"] = config.getint(\"dnc_q\", \"t_r\")\n cfg[\"dnc_q\"][\"t_r\"] = config.getint(\"dnc_q\", \"s_r\")\n cfg[\"dnc_q\"][\"pass_through_mem\"] = config.getboolean(\"dnc_q\", \"pass_through_mem\")\n cfg[\"dnc_q\"][\"reset_experience\"] = config.getboolean(\"dnc_q\", \"reset_experience\")\n cfg[\"dnc_q\"][\"debug\"] = config.getboolean(\"dnc_q\", \"debug\")\n cfg[\"dnc_q\"][\"lr\"] = config.getfloat(\"dnc_q\", \"lr\")\n cfg[\"dnc_q\"][\"dropout\"] = config.getfloat(\"dnc_q\", \"dropout\")\n\n # Load dnc_c parameters\n cfg[\"dnc_c\"] = {}\n cfg[\"dnc_c\"][\"output_size\"] = config.getint(\"dnc_c\", \"output_size\")\n cfg[\"dnc_c\"][\"rnn_type\"] = config.get(\"dnc_c\", \"rnn_type\")\n cfg[\"dnc_c\"][\"hidden_dim\"] = config.getint(\"dnc_c\", \"hidden_dim\")\n cfg[\"dnc_c\"][\"memory_type\"] = config.get(\"dnc_c\", \"memory_type\")\n cfg[\"dnc_c\"][\"num_layers\"] = config.getint(\"dnc_c\", \"num_layers\")\n cfg[\"dnc_c\"][\"num_layers_hidden\"] = config.getint(\"dnc_c\", \"num_layers_hidden\")\n cfg[\"dnc_c\"][\"n\"] = config.getint(\"dnc_c\", \"n\")\n cfg[\"dnc_c\"][\"w\"] = config.getint(\"dnc_c\", \"w\")\n cfg[\"dnc_c\"][\"r\"] = config.getint(\"dnc_c\", \"r\")\n cfg[\"dnc_c\"][\"s_r\"] = config.getint(\"dnc_c\", \"t_r\")\n cfg[\"dnc_c\"][\"t_r\"] = config.getint(\"dnc_c\", \"s_r\")\n cfg[\"dnc_c\"][\"pass_through_mem\"] = config.getboolean(\"dnc_c\", \"pass_through_mem\")\n cfg[\"dnc_c\"][\"reset_experience\"] = config.getboolean(\"dnc_c\", \"reset_experience\")\n cfg[\"dnc_c\"][\"debug\"] = config.getboolean(\"dnc_c\", \"debug\")\n cfg[\"dnc_c\"][\"lr\"] = config.getfloat(\"dnc_c\", \"lr\")\n cfg[\"dnc_c\"][\"dropout\"] = config.getfloat(\"dnc_c\", \"dropout\")\n cfg[\"dnc_c\"][\"type\"] = config.get(\"dnc_c\", \"type\")\n cfg[\"dnc_c\"][\"nonlinearity\"] = config.get(\"dnc_c\", \"nonlinearity\")\n cfg[\"dnc_c\"][\"concat_out_rv\"] = config.getboolean(\"dnc_c\", \"concat_out_rv\")\n cfg[\"dnc_c\"][\"bidirectional\"] = config.getboolean(\"dnc_c\", \"bidirectional\")\n\n # Load logging paths\n cfg[\"logging\"] = {}\n cfg[\"logging\"][\"tensorboard_dir\"] = config.get(\"logging\", \"tensorboard_dir\")\n cfg[\"logging\"][\"checkpoints_dir\"] = config.get(\"logging\", \"checkpoints_dir\")\n cfg[\"logging\"][\"results_dir\"] = config.get(\"logging\", \"results_dir\")\n cfg[\"logging\"][\"grad_flow_dir\"] = config.get(\"logging\", \"grad_flow_dir\")\n\n # Load paths\n cfg[\"paths\"] = {}\n cfg[\"paths\"][\"input\"] = config.get(\"paths\", \"input\")\n cfg[\"paths\"][\"json_q_path_tr\"] = config.get(\"paths\", \"json_q_path_tr\")\n cfg[\"paths\"][\"json_q_path_val\"] = config.get(\"paths\", \"json_q_path_val\")\n cfg[\"paths\"][\"json_a_path_tr\"] = config.get(\"paths\", \"json_a_path_tr\")\n cfg[\"paths\"][\"json_a_path_val\"] = config.get(\"paths\", \"json_a_path_val\")\n cfg[\"paths\"][\"json_q_path_test\"] = config.get(\"paths\", \"json_q_path_test\")\n cfg[\"paths\"][\"dnc_q\"] = config.get(\"paths\", \"dnc_q\")\n cfg[\"paths\"][\"dnc_c\"] = config.get(\"paths\", \"dnc_c\")\n return cfg",
"def load(self, config_instance):\r\n pass",
"def settings_init(self):\n config_console = configparser.ConfigParser()\n config_console.read(CONFIG_FILE_NAME)\n self.logmode = config_console[\"LOG\"][\"log_mode\"]",
"def read_config():\n global batch_size, num_classes, num_filters, dropout_dim, dense_neurons\n global b_eval_advanced, pool_size, kernel_size, IMG_SIZE, epochs, img_cols, img_rows\n\n config = configparser.ConfigParser()\n config.read('config.ini')\n\n batch_size = int(config['MODEL']['batch_size'])\n num_filters = int(config['MODEL']['num_filters'])\n dropout_dim = float(config['MODEL']['dropout_dim'])\n dense_neurons = int(config['MODEL']['dense_neurons'])\n _pool_size = config['MODEL']['pool_size']\n _kernel_size = config['MODEL']['kernel_size']\n IMG_SIZE = int(config['DATA']['image_size'])\n num_classes = int(config['CUSTOM']['num_classes'])\n epochs = int(config['MODEL']['epochs'])\n b_eval_advanced = (config['MODEL']['complex_analysis'] == 'true' or config['MODEL']['complex_analysis'] == 'True')\n\n pool_size = tuple(map(int, _pool_size.split(',')))\n kernel_size = tuple(map(int, _kernel_size.split(',')))\n\n img_rows, img_cols = IMG_SIZE, IMG_SIZE",
"def config(self) -> Dict[str, Any]:",
"def init_config(self):\n pass",
"def loadParameters (self, filePath):\r\n # productive #onButton\r\n profprint()\r\n widget = slicer.modules.NeedleFinderWidget\r\n config = ConfigParser.RawConfigParser()\r\n config.read(filePath)\r\n\r\n autoCorrectTip = config.getboolean('BooleanSection', 'autoCorrectTip')\r\n invertedContrast = config.getboolean('BooleanSection', 'invertedContrast')\r\n gradient = config.getboolean('BooleanSection', 'gradient')\r\n filterControlPoints = config.getboolean('BooleanSection', 'filterControlPoints')\r\n drawFiducialPoints = config.getboolean('BooleanSection', 'drawFiducialPoints')\r\n autoStopTip = config.getboolean('BooleanSection', 'autoStopTip')\r\n extendNeedle = config.getboolean('BooleanSection', 'extendNeedle')\r\n maxLength = config.getboolean('BooleanSection', 'maxLength')\r\n gaussianAttenuationButton = config.getboolean('BooleanSection', 'gaussianAttenuationButton')\r\n\r\n realNeedleLength = config.getint('IntegerSection', 'realNeedleLength')\r\n sigmaValue = config.getint('IntegerSection', 'sigmaValue')\r\n gradientPonderation = config.getint('IntegerSection', 'gradientPonderation')\r\n exponent = config.getint('IntegerSection', 'exponent')\r\n try:\r\n radiusMax = config.getint('IntegerSection', 'distanceMax') # try deprecated parameter name (old parameter files)\r\n except:\r\n radiusMax = config.getint('IntegerSection', 'radiusMax')\r\n nbRotatingIterations = config.getint('IntegerSection', 'nbRotatingIterations')\r\n numberOfPointsPerNeedle = config.getint('IntegerSection', 'numberOfPointsPerNeedle')\r\n lenghtNeedleParameter = config.getint('IntegerSection', 'lenghtNeedleParameter')\r\n radiusNeedleParameter = config.getint('IntegerSection', 'radiusNeedleParameter')\r\n algoVersParameter = config.getint('IntegerSection', 'algoVersParameter')\r\n\r\n widget.autoCorrectTip.checked = autoCorrectTip\r\n widget.invertedContrast.checked = invertedContrast\r\n widget.gradient.checked = gradient\r\n widget.filterControlPoints.checked = filterControlPoints\r\n widget.drawFiducialPoints.checked = drawFiducialPoints\r\n widget.autoStopTip.checked = autoStopTip\r\n widget.extendNeedle.checked = extendNeedle\r\n widget.maxLength.checked = maxLength\r\n widget.gaussianAttenuationButton.checked = gaussianAttenuationButton\r\n\r\n widget.realNeedleLength.value = realNeedleLength\r\n widget.sigmaValue.value = sigmaValue\r\n widget.gradientPonderation.value = gradientPonderation\r\n widget.exponent.value = exponent\r\n widget.radiusMax.value = radiusMax\r\n widget.nbRotatingIterations.value = nbRotatingIterations\r\n widget.numberOfPointsPerNeedle.value = numberOfPointsPerNeedle\r\n widget.lenghtNeedleParameter.value = lenghtNeedleParameter\r\n widget.radiusNeedleParameter.value = radiusNeedleParameter\r\n widget.algoVersParameter.value = algoVersParameter\r\n print \"#############\"\r\n print \"algoVers: \", algoVersParameter\r\n print \"Parameters successfully loaded!\"",
"def __init__(self, yaml_file = 'options_modeling.yaml'):\n\n self.reproj_th = 2.5\n self.min_matched_views = 3\n self.descriptors = {'SIFT': 'sift'} # Descriptor name and module name\n self.mask_suffix = '*_mask.png'\n \n # If there is an options file, it will overwrite the defaults \n if yaml_file is not None:\n self.load(yaml_file)",
"def init_cfg(self):\n # read the config dict\n self.cfg = config_json.cfg_open(self.work_dir)\n # default three sections\n self.cfg.setdefault('param', {})\n self.cfg.setdefault('info', {})\n self.cfg.setdefault('meta', {})",
"def load_from_conf(self):\n raise NotImplementedError",
"def loadConfigs(self):\n self.onLoadConfig(urlopen(self.inipath))",
"def setup_d2d(self):\n\n self.config[\"d2d\"] = dict()\n\n self.config[\"d2d\"][LC.WHITE] = dict()\n self.config[\"d2d\"][LC.GROWTH] = dict()\n\n self.config[\"d2d\"][LC.WHITE][\"analog-gain\"] = 1.0\n self.config[\"d2d\"][LC.WHITE][\"digital-gain\"] = 1.0\n self.config[\"d2d\"][LC.GROWTH][\"analog-gain\"] = 1.0\n self.config[\"d2d\"][LC.GROWTH][\"digital-gain\"] = 1.0\n\n self.config[\"d2d\"][\"timestamp\"] = time.time()\n\n self.save_config_to_file()",
"def config():\n file_path = None # path to the input file\n db_path = None # path to the output db\n atomic_properties = (\n \"Properties=species:S:1:pos:R:3\"\n ) # atomic properties of the input file\n molecular_properties = [\"energy\"] # molecular properties of the input file\n overwrite = False",
"def config_init(self):\n\n game_opts = [\n\n # Execution Options\n ('debug',False), # Toggle Debug Messaging\n ('log_path',False), # Turn on logging (w/path)\n ('log_lvl',logging.DEBUG), # Set log level\n\n # World Generation Options\n ('flex_limit',3) # Sets the maximum variance\n\n ]\n\n # Attempts to pull each value from the configuration\n # if not in config, the default value defined above\n # is set instead\n for opt in game_opts:\n try:\n setattr(self,opt[0],self.conf.conf_dict[opt[0]])\n except:\n setattr(self,opt[0],opt[1])\n continue",
"def comando_config(self):\r\n if args.tag:\r\n cfile = args.file\r\n\t if args.opcao == 'daruma' and not cfile:\r\n cfile = '/usr/local/lib/daruma.ini'\r\n\t if args.value:\r\n dictags = self.set_param_section_config_ini(cfile, args.loja, args.tag, args.value)\r\n return dictags\r\n # modificar\r\n\t else:\r\n dictag = self.get_param_section_config_ini(cfile, args.loja, args.tag)\r\n return dictag #retorna dicicionario\r",
"def config(self):\n raise NotImplementedError",
"def load_config(self):\r\n with open('config.json', 'r') as f:\r\n self.config = json.load(f)"
]
| [
"0.64163566",
"0.60806596",
"0.60806596",
"0.5885641",
"0.58768445",
"0.58304346",
"0.58304346",
"0.582294",
"0.57977813",
"0.5787138",
"0.5759142",
"0.57564086",
"0.57534146",
"0.5717577",
"0.5700591",
"0.56643593",
"0.56641626",
"0.563442",
"0.56303775",
"0.5625049",
"0.56210136",
"0.5615423",
"0.56137353",
"0.55887234",
"0.5578363",
"0.55585945",
"0.5555756",
"0.5554492",
"0.5552223",
"0.55498254"
]
| 0.7915412 | 0 |
Generator which yields all files in the given directories with any of the EXTENSIONS. | def get_files(dirs):
for dir in dirs:
for root, _, files in os.walk(dir):
for file in files:
path = Path(os.path.join(root, file))
if path.suffix in EXTENSIONS:
yield path | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_files_in_dir(dir, ext):\n import os\n\n for root, dirs, files in os.walk(dir):\n for file in files:\n if file.split('.')[1].lower() == ext.lower() or not ext:\n file_full_path = os.path.join(root, file)\n yield file_full_path",
"def search_images(\n current_dir: str,\n exts={\"jpg\", \"png\", \"jpeg\", \"gif\"}\n) -> typing.Iterable[typing.Tuple[str, str]]:\n for root, _, files in os.walk(current_dir):\n for file_name in files:\n ext = file_name.rsplit('.', 1)[-1].lower()\n if ext in exts:\n yield os.path.join(root, file_name), file_name",
"def _glob_files(directories, extensions):\n pwd = Path(__file__).resolve().parent\n open3d_root_dir = pwd.parent\n\n file_paths = []\n for directory in directories:\n directory = open3d_root_dir / directory\n for extension in extensions:\n extension_regex = \"*.\" + extension\n file_paths.extend(directory.rglob(extension_regex))\n file_paths = [str(file_path) for file_path in file_paths]\n file_paths = sorted(list(set(file_paths)))\n return file_paths",
"def find_files(extensions):\n\n return [fname for fname in os.listdir('.') if fname.endswith(extensions)]",
"def find_files(directory, extensions):\n res = set()\n for filename in os.listdir(directory):\n if filename.endswith(extensions):\n res.add(\"{}/{}\".format(directory, filename))\n return list(res)",
"def get_files(self, include=[], exclude=[]):\r\n for (basepath, dpaths, fpaths) in os.walk(self.path, topdown=True):\r\n for subpath in dpaths + fpaths:\r\n path = os.path.join(self.chroot_path(basepath), subpath)\r\n if filter_path(path, include, exclude):\r\n yield path",
"def glob_ext_files(dirname, ext=\"fa\") -> list:\n fnames = glob(os.path.join(dirname, f\"*.{ext}*\"))\n return [f for f in fnames if f.endswith((ext, f\"{ext}.gz\"))]",
"def _iter_plugin_files(dirs):\n for plugin_dir in dirs:\n plugin_dir = Path(plugin_dir).expanduser()\n if not plugin_dir.exists(): # pragma: no cover\n continue\n for subdir, dirs, files in os.walk(plugin_dir, followlinks=True):\n subdir = Path(subdir)\n # Skip test folders.\n base = subdir.name\n if 'test' in base or '__' in base or '.git' in str(subdir): # pragma: no cover\n continue\n logger.debug(\"Scanning `%s`.\", subdir)\n for filename in files:\n if (filename.startswith('__') or not filename.endswith('.py')):\n continue # pragma: no cover\n logger.debug(\"Found plugin module `%s`.\", filename)\n yield subdir / filename",
"def list_image_files(dir, filter=None):\n for entry in os.listdir(dir):\n path = os.path.join(dir, entry)\n if os.path.isdir(path):\n for p in list_image_files(path, filter):\n yield p\n elif any((entry.lower().endswith(ext) for ext in image_exts)):\n if filter and not filter(path):\n continue\n yield path",
"def searchfiles(directory, filenames, ext=None):\n if ext:\n filenames = [f'{file}{ext}' for file in filenames]\n return [\n file for file in Path(directory).glob('*')\n if file.name in filenames\n ]",
"def _collect_files(folders, extention='Default'):\r\n if isinstance(extention, str):\r\n if extention.lower() == 'default':\r\n extention = ['.*']\r\n else:\r\n extention = [extention]\r\n files = []\r\n for f in folders:\r\n for e in extention:\r\n files += glob(os.path.join(f, f'*{e}'))\r\n return files",
"def get_files(self, sub_dir, suffixes):\n path = os.path.join(self.src_dir, sub_dir)\n for f in os.listdir(path):\n if os.path.isfile(os.path.join(path, f)) and not f.startswith('.') and any(list(map(f.endswith, suffixes))):\n yield f",
"def find_files(directory, patterns):\n for root, dirs, files in os.walk(directory):\n for basename in files:\n if \".pyc\" not in basename and \"__pycache__\" not in basename:\n for pattern in patterns:\n if fnmatch.fnmatch(basename, pattern):\n filename = os.path.join(root, basename)\n yield filename",
"def get_files(imagedir, ext='jpg|jpeg|bmp|png'):\n rex = re.compile(r'^.*\\.({})$'.format(ext), re.I)\n return [os.path.join(imagedir,base) for base in os.listdir(imagedir)\n if rex.match(base)]",
"def _list_files(basePath, validExts=(\".jpg\", \".jpeg\", \".png\", \".bmp\", \".tif\", \".tiff\"), contains=None):\n for (rootDir, dirNames, filenames) in os.walk(basePath):\n # loop over the filenames in the current directory\n for filename in filenames:\n # if the contains string is not none and the filename does not contain\n # the supplied string, then ignore the file\n if contains is not None and filename.find(contains) == -1:\n continue\n\n # determine the file extension of the current file\n ext = filename[filename.rfind(\".\"):].lower()\n\n # check to see if the file is an image and should be processed\n if ext.endswith(validExts):\n # construct the path to the image and yield it\n imagePath = os.path.join(rootDir, filename).replace(\" \", \"\\\\ \")\n yield imagePath",
"def generateFileList(args):\n\textensions = (\"jpg\", \"gif\", \"png\", \"bmp\", \"xpm\", \"ico\")\n\n\tfor arg in args:\n\t\tif not os.access(arg, os.R_OK):\n\t\t\tprint >> sys.stderr, \"File not found: %s\" % arg \n\t\t\tcontinue\n\t\tif os.path.isdir(arg):\n\t\t\tfor (root, dirs, files) in os.walk(arg):\n\t\t\t\tfor file in files:\n\t\t\t\t\tif os.path.splitext(file)[1][1:].lower() in extensions:\n\t\t\t\t\t\tyield os.path.join(root, file)\n\t\telse:\n\t\t\tyield arg",
"def open_dir(input_path, patterns):\r\n for ext in patterns:\r\n for file in Path(input_path).glob('**/*.' + ext):\r\n yield file",
"def _iter_files_in_dir(directory):\n for filename in os.listdir(directory):\n filepath = os.path.join(directory, filename)\n if os.path.isfile(filepath):\n yield filepath",
"def __get_files(self, directory, file_extension):\n path_spec = \"{}**/*.{}\" if self.__config.recursive() else \"{}*.{}\"\n return glob.glob(path_spec.format(directory, file_extension), recursive=self.__config.recursive())",
"def iterfiles(self, include_dirs: bool = False) -> Iterator[P]:\n dirs = deque([self.filetree])\n while dirs:\n for p in dirs.popleft().iterdir():\n if p.is_dir():\n dirs.append(p)\n if include_dirs:\n yield p\n else:\n yield p",
"def files_from_dir(dirname, exts=[\"obj\", \"h5\"]):\n filenames = list()\n for ext in exts:\n ext_expr = os.path.join(dirname, f\"*.{ext}\")\n filenames.extend(glob.glob(ext_expr))\n\n return sorted(filenames, key=os.path.basename)",
"def get_all_files(directory, extension):\n return (f for f in os.listdir(directory) if f.endswith(extension) and os.path.isfile(os.path.join(directory, f)))",
"def find_with_ext(abs_root: str, compile_root: str, ext: str) -> iter([str]):\n for dirpath, dirnames, filenames in walk(abs_root):\n rpath = relpath(dirpath, start=compile_root)\n dirnames[:] = [x for x in dirnames if x not in {'.git'}]\n for fn in filenames:\n if splitext(fn)[1] == ext:\n yield join(rpath, fn)",
"def list_files(directory, extension):\n file_list = listdir(directory)\n included_list = []\n for f in file_list:\n for ext in extension:\n if f.endswith('.' + ext):\n included_list.append(f)\n break\n return included_list",
"def parse_filenames(dirname, pattern = \"*conll\"):\n for path, subdirs, files in os.walk(dirname):\n for name in files:\n if fnmatch(name, pattern):\n yield os.path.join(path, name)",
"def getFiles(searchDir = './', extension = 'source'):\n from glob import glob \n\n return glob(searchDir+'/*.'+extension)",
"def find_files_by_extensions(cls, search_path, allowed_ext):\n file_list = []\n for root, dirnames, filenames in os.walk(search_path):\n for filename in filenames:\n name, extension = os.path.splitext(filename)\n if extension in allowed_ext:\n file_list.append(os.path.join(root, filename))\n\n return file_list",
"def generate_files(path='', ext='', level=None, dirs=False, files=True, verbosity=0):\n path = expand_path(path or '.')\n # None interpreted as '', False is interpreted as '.' (no ext will be accepted)\n ext = '.' if ext is False else ext\n # multiple extensions can be specified in a list or tuple\n ext = ext if ext and isinstance(ext, (list, tuple)) else [ext]\n # case-insensitive extensions, '.' ext means only no-extensions are accepted\n ext = set(x.lower() if x else '.' if x is False else '' for x in ext)\n\n if os.path.isfile(path):\n fn = os.path.basename(path)\n # only yield the stat dict if the extension is among those that match or files without any ext are desired\n if not ext or any(path.lower().endswith(x) or (x == '.' and '.' not in fn) for x in ext):\n yield path_status(os.path.dirname(path), os.path.basename(path), verbosity=verbosity)\n else:\n for dir_path, dir_names, filenames in walk_level(path, level=level):\n if verbosity > 0:\n print('Checking path \"{}\"'.format(dir_path))\n if files:\n for fn in filenames: # itertools.chain(filenames, dir_names)\n if ext and not any((fn.lower().endswith(x) or (x == '.' and x not in fn) for x in ext)):\n continue\n stat = path_status(dir_path, fn, verbosity=verbosity)\n if stat and stat['name'] and stat['path']:\n yield stat\n if dirs:\n for fn in dir_names:\n if ext and not any((fn.lower().endswith(x) or (x == '.' and x not in fn) for x in ext)):\n continue\n yield path_status(dir_path, fn, verbosity=verbosity)",
"def _find_files(directory: str, pattern: str) -> Iterator[str]:\n for root, dirs, files in os.walk(directory, topdown=True):\n dirs[:] = [d for d in dirs if _is_file_valid(d)]\n for basename in sorted(files):\n if _is_file_valid(basename) and fnmatch.fnmatch(basename, pattern):\n filename = os.path.join(root, basename)\n yield filename",
"def list_files(self, extensions=None):\n if self.port_type.lower() != 'directory':\n raise ValueError(\"Port type is not == directory\")\n\n filesystem_location = self.path\n\n for root, dirs, files in os.walk(filesystem_location):\n if extensions is None:\n return [os.path.join(root, f) for f in files]\n elif not isinstance(extensions, list):\n extensions = [extensions]\n\n subset_files = []\n\n for f in files:\n for extension in extensions:\n if f.lower().endswith(extension.lower()):\n subset_files.append(os.path.join(root, f))\n break\n return subset_files"
]
| [
"0.7361726",
"0.7168468",
"0.70106226",
"0.6978779",
"0.69671875",
"0.6956483",
"0.69562167",
"0.69542754",
"0.6939163",
"0.68995976",
"0.68970984",
"0.683653",
"0.68256193",
"0.6820684",
"0.67836255",
"0.67363906",
"0.67210203",
"0.6719789",
"0.6704543",
"0.67027146",
"0.66982514",
"0.6661657",
"0.66367143",
"0.6616822",
"0.6601249",
"0.6576262",
"0.657433",
"0.65723175",
"0.65586394",
"0.65382695"
]
| 0.8138541 | 0 |
Set the hash preprocessors of the state and the action, in order to make them hashable. | def _initialize_hash(self):
# action
if isinstance(self.env.action_space, gym.spaces.Discrete):
self._hash_action = lambda x: x
elif isinstance(self.env.action_space, gym.spaces.Box):
if self.__class__.__name__ == "MCTS":
raise Exception("Cannot run vanilla MCTS on continuous actions")
else:
self._hash_action = lambda x: tuple(x)
else:
mex = "Action space has to be Discrete or Box, instead is {}".format(type(self.env.action_space))
raise TypeError(mex)
# observation
if isinstance(self.env.observation_space, gym.spaces.Discrete):
self._hash_space = lambda x: x
elif isinstance(self.env.observation_space, gym.spaces.Box):
self._hash_space = lambda x: tuple(x)
else:
mex = "Action space has to be Discrete or Box, instead is {}".format(type(self.env.observation_space))
raise TypeError(mex) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def init_hash_state(self) -> None:\n self.hash_states = [hashlib.sha1()]",
"def __setstate__(self, state):\n self.__dict__ = dict(state)\n self._init_compiled()",
"def hash_functions(self):\n pass",
"def _state_actions(self) -> dict:\n return {}",
"def state_encod_arch2(self, state, action):",
"def __init__(self, n_states: int, n_actions: int):\n self._p = {s: {a: [] for a in range(n_actions)} for s in range(n_states)}",
"def build_preprocessors(md_instance, **kwargs):\r\n preprocessors = odict.OrderedDict()\r\n if md_instance.safeMode != 'escape':\r\n preprocessors[\"html_block\"] = HtmlBlockPreprocessor(md_instance)\r\n preprocessors[\"reference\"] = ReferencePreprocessor(md_instance)\r\n return preprocessors",
"def load_state_dict(self, state_dict):\n own_state = self.state_dict()\n new_state = OrderedDict()\n for name, param in state_dict.items():\n if name in own_state:\n new_state[name] = param\n\n super(EncoderImagePrecomp, self).load_state_dict(new_state)",
"def __setstate__(self, state):\n\n for key, value in state.items():\n if key in self.__slots__:\n setattr(self, key, value)",
"def pre_processor(self):",
"def update_hash(self, h):\n # Generate a sequence of fragments that add up to the canonical\n # version of the expression.\n fragments = []\n self.collect_str_fragments(fragments)\n # Update the hash. Wrapping with 'node<...>' prevents the hash\n # from being extended in a way that would clash with something we can\n # generate. (Probably not an important concern but it doesn't hurt.)\n h.update(\"node<\")\n for f in fragments:\n h.update(f)\n h.update(\">\")",
"def addPreInitAction ( action ) :\n global __Bender_PreInit_Actions\n if action : __Bender_PreInit_Actions.append ( action )\n return tuple(__Bender_PreInit_Actions)",
"def __setstate__(self, state: dict) -> None: # pragma: no cover\n self.__dict__.update(state)\n self.rFp = {}\n self.wFp = {}\n self.Fp = ChainMap(self.rFp, self.wFp)\n self.open(mode=self.mode)",
"def _convert_token_to_hash(self):\n self.hash_map = {}\n self.hash_count = {}\n self.convert_col_list = []\n if self.config[\"convert_token_to_onehot\"]:\n for feat_name in [\"inter_feat\", \"user_feat\", \"item_feat\"]:\n feat = getattr(self, feat_name)\n if feat is not None:\n feat = self._judge_token_and_convert(feat)\n setattr(self, feat_name, feat)",
"def prehash(key):\n\n return hash(key)",
"def preprocessing(self, preprocessing):\n\n self._preprocessing = preprocessing",
"def get_state_actions_mapping(self):\n return None",
"def pre_transform_hash(self):\n if self.custom_hash is not None:\n return self.custom_hash\n if self.pre_transform is None:\n return 'no_pre_transform'\n return hashlib.md5(_repr(self.pre_transform).encode()).hexdigest()",
"def regist_hash(cobj, hash, handler, dir):\n pass",
"def setHash(self):\n chash_string = str(self.code) + str(\"CAMPAIGN\") + str(self.created_at)\n chash = hashlib.sha1()\n chash.update(chash_string)\n \n self.chash = chash.hexdigest()\n self.save()",
"def _pre_hook(\n state_dict,\n prefix,\n local_metadata,\n strict,\n missing_keys,\n unexpected_keys,\n error_msgs,\n):\n k = prefix + \"pe\"\n if k in state_dict:\n state_dict.pop(k)",
"def do_post_action_processing(self, i_state, low_level_actions):\n pass",
"def __call__(self, state, action):\n pass",
"def __setstate__(self, _state : dict):\n self.__init__(**_state)",
"def test_hash(self):\r\n self.assertEqual(processor_hash('test'), 'GqNJWF7X7L07nEhqMAZ+OVyks1Y=')\r\n self.assertEqual(processor_hash('edx '), '/KowheysqM2PFYuxVKg0P8Flfk4=')",
"def hash_state(self):\n return hash(self.board.tostring())",
"def __setstate__(self, state):\n self.__dict__.update(state)",
"def __init__(self,ParamFunctionStateTuples):\n self.mDict = dict()\n for stateInit,param,func,stateFinal in ParamFunctionStateTuples:\n assert param not in stateInit\n self.mDict[param] = StateDict.EmitObj(stateInit,func,stateFinal)",
"def __hash__(self):\n hash_content = []\n hash_content.extend(self.analyzer_options)\n hash_content.append(str(self.analyzer_type))\n hash_content.append(self.target[self.lang])\n hash_content.append(self.source)\n return hash(''.join(hash_content))",
"def action_space(self, state) -> set:\n return {0, 1} # Actions independent of state"
]
| [
"0.57571864",
"0.5435478",
"0.54035085",
"0.52372074",
"0.518619",
"0.5141052",
"0.5130122",
"0.50220853",
"0.5004999",
"0.49558762",
"0.4926324",
"0.49014458",
"0.48990655",
"0.48768932",
"0.48673987",
"0.48605478",
"0.48585635",
"0.4834343",
"0.48277575",
"0.48239538",
"0.4818922",
"0.48155108",
"0.48105577",
"0.48056647",
"0.47581592",
"0.47569403",
"0.47543123",
"0.4749913",
"0.47157097",
"0.47001123"
]
| 0.5770757 | 0 |
Explores the current tree with the UCB principle until we reach an unvisited node where the reward is obtained with random rollouts. | def grow_tree(self):
decision_node = self.root
internal_env = copy.copy(self.env)
while (not decision_node.is_final) and decision_node.visits > 1:
a = self.select(decision_node)
new_random_node = decision_node.next_random_node(a, self._hash_action)
(new_decision_node, r) = self.select_outcome(internal_env, new_random_node)
new_decision_node = self.update_decision_node(new_decision_node, new_random_node, self._hash_space)
new_decision_node.reward = r
new_random_node.reward = r
decision_node = new_decision_node
decision_node.visits += 1
cumulative_reward = self.evaluate(internal_env)
while not decision_node.is_root:
random_node = decision_node.father
cumulative_reward += random_node.reward
random_node.cumulative_reward += cumulative_reward
random_node.visits += 1
decision_node = random_node.father
decision_node.visits += 1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def UCT(rootstate, itermax, verbose=False):\n\n rootnode = Node(state=rootstate)\n\n for i in range(itermax):\n node = rootnode\n state = rootstate.Clone()\n\n # Select\n while node.untriedMoves == [] and node.childNodes != []: # node is fully expanded and non-terminal\n node = node.UCTSelectChild()\n state.DoMove(node.move)\n\n # Expand\n expand = True\n while expand and node.untriedMoves != []: # if we can expand (i.e. state/node is non-terminal)\n m = random.choice(node.untriedMoves)\n # print(\"[Expand] Untried move %s, %s, %s\" % (m[0], m[1], m[2]))\n expand = not state.DoMove(m)\n node = node.AddChild(m, state) # add child and descend tree\n\n # Rollout - this can often be made orders of magnitude quicker using a state.GetRandomMove() function\n while state.GetMoves() != []: # while state is non-terminal\n state.DoMove(random.choice(state.GetMoves()))\n\n # Backpropagate\n while node != None: # backpropagate from the expanded node and work back to the root node\n node.Update(state.GetResult()) # state is terminal. Update node with result from POV of node.playerJustMoved\n node = node.parentNode\n\n # Output some information about the tree - can be omitted\n if (verbose):\n print(rootnode.TreeToString(0))\n else:\n print(rootnode.ChildrenToString())\n\n return sorted(rootnode.childNodes, key=lambda c: c.visits)[-1].move # return the move that was most visited",
"def UCT(rootstate, itermax, verbose = False):\n\n rootnode = Node(state=rootstate)\n\n for i in range(itermax):\n node = rootnode\n state = rootstate.Clone()\n\n # Select\n while node.untriedMoves == [] and node.childNodes != []: # node is fully expanded and non-terminal\n\n node = node.UCTSelectChild()\n state.DoMove(node.move)\n\n\n # Expand\n if node.untriedMoves != []: # if we can expand (i.e. state/node is non-terminal)\n m = random.choice(node.untriedMoves)\n state.DoMove(m)\n node = node.AddChild(m, state) # add child and descend tree\n\n # Rollout - this can often be made orders of magnitude quicker using a state.GetRandomMove() function\n while state.GetMoves() != []: # while state is non-terminal\n state.DoMove(random.choice(state.GetMoves()))\n\n # Backpropagate\n while node != None: # backpropagate from the expanded node and work back to the root node\n node.Update(state.GetResult(node.whosemove))\n node = node.parentNode\n\n return sorted(rootnode.childNodes, key=lambda c: c.visits)[-1].move # return the move that was most visited",
"def UCT(player, rootstate, iter, prev_board, verbose=False):\n rootnode = Node(player=player, state=rootstate, preState=prev_board)\n\n for i in range(iter):\n node = rootnode\n state = deepcopy(rootstate)\n pre_state = deepcopy(state)\n currentPlayer = -player # turn of the player x\n\n # Select\n while (\n node.untriedMoves == [] and node.childNodes != []\n ): # node is fully expanded and non-terminal, loop until have untried Child or it is leaf node\n # choose the next child\n node = node.UCTChooseChild()\n # update the state of the board ((x,y),(x,y))\n pre_state = deepcopy(state)\n state = board_after_move_and_capturing(node.move[0], node.move[1], state)\n currentPlayer = -currentPlayer\n\n # Expand\n if (\n node.untriedMoves != []\n ): # if we can expand (i.e. state/node is non-terminal)\n m = random.choice(node.untriedMoves)\n # update the state of the board\n pre_state = deepcopy(state)\n state = board_after_move_and_capturing(m[0], m[1], state)\n currentPlayer = -currentPlayer\n\n node = node.addChild(m, state, pre_state) # add child and descend tree\n\n # Rollout - this can often be made orders of magnitude quicker using a state.GetRandomMove() function\n height = 0\n while get_winner(state) == 0: # while state is non-terminal\n if height > 30:\n break\n choice = random.choice(get_all_legal_moves(pre_state, state, currentPlayer))\n # update the state of the board\n pre_state = deepcopy(state)\n state = board_after_move_and_capturing(choice[0], choice[1], state)\n # print_board(state)\n currentPlayer = -currentPlayer\n height += 1\n\n # cout board\n # print(f\"The board becomes {board_to_string(state)}\")\n\n # Backpropagate\n while (\n node != None\n ): # backpropagate from the expanded node and work back to the root node\n if height > 30:\n winner = player\n else:\n winner = get_winner(state)\n\n if winner == -player:\n node.updateNode(1)\n else:\n node.updateNode(0)\n\n # state is terminal. Update node with result from POV of node.playerJustMoved\n node = node.parentNode\n\n # cout root node\n # for i in rootnode.childNodes:\n # print(i.move)\n # print(i.wins)\n # print(i.visits)\n\n return sorted(rootnode.childNodes, key=lambda c: c.visits)[\n -1\n ].move # return the move that was most visited",
"def backup(from_edge, reward):\n cur_edge = from_edge\n\n while cur_edge is not None and cur_edge.parent is not None:\n cur_edge.total_reward += reward\n cur_edge.visit_count += 1\n cur_edge.avg_reward = cur_edge.total_reward / cur_edge.visit_count\n cur_edge = cur_edge.parent.parent_edge",
"def backpropagation(self, simulation_reward):\n current = self\n while current.parent is not None:\n current.visits += 1\n current.reward += simulation_reward\n current = current.parent",
"def decide_reward(prev_node, cur_node):\n # Constant decay\n reward = constants.REWARD_DECAY\n\n # First check if any players died\n prev_agent_ids = prev_node.get_alive_agents()\n cur_agent_ids = cur_node.get_alive_agents()\n missing_agent_ids = [agent for agent in prev_agent_ids if agent not in cur_agent_ids]\n this_agent = cur_node.get_agent_obj()\n\n # Agent died\n if this_agent.agent_id in missing_agent_ids:\n reward += constants.REWARD_DIED\n\n # Check if any enemies died (regardless of dying from whom)\n for enemy in this_agent.enemies:\n if enemy.name == 'AgentDummy':\n continue\n enemy_agent_id = int(enemy.name[-1])\n if enemy_agent_id in missing_agent_ids:\n reward += constants.REWARD_ENEMY_DIED\n\n\n # Then, check if picked up powerups\n cur_pos = this_agent.position\n item_enum = constants.Item\n powerups = [item_enum.Kick, item_enum.IncrRange, item_enum.ExtraBomb]\n\n if prev_node.game_env._board[cur_pos] in [p.value for p in powerups]:\n reward += constants.REWARD_POWERUP\n\n\n # Lastly, check if agent destroyed any wood boxes\n prev_laid_bombs = prev_node.get_agent_laid_bombs()\n\n def reward_bomb_for_direction(bomb, prev_board, cur_board, direction):\n for off in range(1, bomb.blast_strength):\n pos = (bomb.position[0] + off * direction[0], bomb.position[1] + off * direction[1])\n exit = False\n # If the explosion is blocked, exit early\n if not utility.position_on_board(prev_board, pos):\n break\n if utility.position_is_wall(prev_board, pos):\n exit = True\n # If a wood box is to be flamed in previous state and\n # is actually flamed in current state, then give reward\n if utility.position_is_wood(prev_board, pos) and utility.position_is_flames(cur_board, pos):\n return constants.REWARD_WOOD\n if exit:\n break\n return 0\n\n\n for bomb in prev_laid_bombs:\n # About to explode\n if bomb.life == 1:\n # NOTE:\n # For now, as long as any wood boxes are destroyed within the bomb range\n # of this exploding bomb it's counted towards this agent.\n # If bomb life > 1 but it's triggered by other bombs then we don't care\n prev_board, cur_board = prev_node.game_env._board, cur_node.game_env._board\n reward += reward_bomb_for_direction(bomb, prev_board, cur_board, direction=(1, 0))\n reward += reward_bomb_for_direction(bomb, prev_board, cur_board, direction=(0, 1))\n reward += reward_bomb_for_direction(bomb, prev_board, cur_board, direction=(-1, 0))\n reward += reward_bomb_for_direction(bomb, prev_board, cur_board, direction=(0, -1))\n\n return reward",
"def _apply_tree_policy(self, root, state):\n visit_path = [root]\n working_state = state.clone()\n current_node = root\n while not working_state.is_terminal() and current_node.explore_count > 0:\n if not current_node.children:\n # For a new node, initialize its state, then choose a child as normal.\n legal_actions = working_state.legal_actions()\n # Reduce bias from move generation order.\n self._random_state.shuffle(legal_actions)\n player_sign = -1 if working_state.current_player() != self.player else 1\n current_node.children = [SearchNode(action, player_sign)\n for action in legal_actions]\n\n if working_state.is_chance_node():\n # For chance nodes, rollout according to chance node's probability\n # distribution\n outcomes = working_state.chance_outcomes()\n action_list, prob_list = zip(*outcomes)\n action = self._random_state.choice(action_list, p=prob_list)\n chosen_child = next(c for c in current_node.children\n if c.action == action)\n else:\n # Otherwise choose node with largest UCT value\n chosen_child = max(\n current_node.children,\n key=lambda c: c.uct_value(current_node.explore_count, self.uct_c, # pylint: disable=g-long-lambda\n self.child_default_value))\n\n working_state.apply_action(chosen_child.action)\n current_node = chosen_child\n visit_path.append(current_node)\n\n return visit_path, working_state",
"def uct(root_state, iter_max, verbose=False):\n\n root_node = Node(state=root_state)\n\n for i in range(iter_max):\n node = root_node\n state = root_state.clone()\n\n # Select\n # node is fully expanded and non-terminal\n while not node.untried_moves and node.child_nodes:\n node = node.uct_select_child()\n state.do_move(node.move)\n\n # Expand\n # if we can expand (i.e. state/node is non-terminal)\n if node.untried_moves:\n m = random.choice(node.untried_moves)\n state.do_move(m)\n # add child and descend tree\n node = node.add_child(m, state)\n\n # Rollout - this can often be made orders of magnitude quicker\n # using a state.get_random_move() function\n # while state is non-terminal\n while state.get_moves():\n state.do_move(random.choice(state.get_moves()))\n\n # Backpropagate\n # backpropagate from the expanded node and work back to the root node\n while node is not None:\n # state is terminal. Update node with result\n # from POV of node.playerJustMoved\n node.update(state.get_result(node.player_just_moved))\n node = node.parent_node\n\n # Output some information about the tree - can be omitted\n if verbose:\n print(root_node.tree_to_string(0))\n else:\n print(root_node.children_to_string())\n\n # return the move that was most visited\n return sorted(root_node.child_nodes,\n key=lambda c: c.Q)[-1].move",
"def mcts(env, x_e, x_p, goal, k_budget, default_policy, T_max=100):\n a = np.array(action_space)\n new_x_p = x_p\n new_x_e = x_e\n tree = MyTree([Node(my_id=0, parent_id=-2, state=x_e, p_state=x_p,\n sum_inv_distance=1. / np.linalg.norm(np.array(x_e) - np.array(x_p)))]) # our tree\n for _ in range(k_budget):\n sum_inv_distance = 0. # aggregated sum of distances, before termination happened\n # First, we select the new candidate:\n best_node = tree.return_best() # here we have the best state\n x_e_best = best_node.state\n\n # Second, we perform action, according to our default policy\n u_e = a[default_policy[x_e_best]]\n new_x_e, _ = transition_function(env=env, x=x_e_best, u=u_e)\n # And perform corresponding step for pursuer\n new_x_p = pursuer_transition(env=env, x_e=new_x_e, x_p=new_x_p)\n\n # And we add the node to the tree\n tree.add_node(parent_id=best_node.id, state=new_x_e, action_applied=u_e)\n\n # Third, we launch the simulation\n last_t = T_max\n for t in range(T_max):\n # Step, according to the default policy\n u_e = a[default_policy[new_x_e]]\n new_x_e, _ = transition_function(env=env, x=new_x_e, u=u_e)\n # Step, according to pursuer policy\n new_x_p = pursuer_transition(env=env, x_e=new_x_e, x_p=new_x_p)\n # Accumulate the inversed distance. We need it for reward computation\n if new_x_e != new_x_p:\n sum_inv_distance += 1. / np.linalg.norm(np.array(new_x_e) - np.array(new_x_p))\n # If we reached the goal or was eaten, then stop\n if (new_x_e == new_x_p) or (new_x_e == goal):\n last_t = t\n break\n\n rew = reward(x_e=new_x_e, x_p=new_x_p, sum_inv_distance=sum_inv_distance, goal=goal, t=last_t, T_max=T_max)\n\n # Forth, we update all parent's nodes\n tree.update_tree(node_id=-1, outcome=rew)\n\n u = max([(i, tree.nodes[i].value) for i in tree.nodes[0].children_ids])\n u = tree.nodes[u[0]].action_applied\n return u",
"def backup(self, uct, reward, winner):\n while uct is not None:\n uct.visit_time += 1\n board = uct.my_board\n node_color = board.side_color\n if winner == node_color:\n uct.total_reward -= reward\n else:\n uct.total_reward += reward #正奖励应该更新赢家节点之后的动作\n # uct.total_reward -= reward\n uct = uct.my_parent",
"def backup_negamax(node, reward):\n temp_node = node\n temp_node.N += 1\n temp_node.Q += reward\n for parent, _ in temp_node.parents:\n backup_negamax(parent, -reward)",
"def run_UCT(self, game, actions_taken):\n \n # If current tree is null, create one using game\n if self.root == None:\n self.root = Node(game.clone())\n # If it's not, check if there are actions from actions_taken to update\n # the tree from this player.\n # If the list is not empty, update the root accordingly.\n # If the list is empty, that means this player hasn't passed the turn,\n # therefore the root is already updated.\n elif len(actions_taken) != 0:\n for i in range(len(actions_taken)):\n # Check if action from history is in current root.children.\n if actions_taken[i][0] in self.root.children:\n # Check if the actions are made from the same player\n child_node = self.root.children[actions_taken[i][0]]\n if child_node.state.player_turn == actions_taken[i][1] \\\n and set(self.root.state.available_moves()) \\\n == set(game.available_moves()):\n self.root = child_node\n else:\n self.root = None\n self.root = Node(actions_taken[i][2].clone())\n else:\n self.root = None\n self.root = Node(actions_taken[i][2].clone())\n # This means the player is still playing (i.e.: didn't choose 'n').\n else:\n # Therefore, check if current root has the same children as \"game\"\n # offers. If not, reset the tree.\n if set(self.root.children) != set(game.available_moves()):\n self.root = None\n self.root = Node(game.clone())\n\n #Expand the children of the root if it is not expanded already\n if not self.root.is_expanded():\n self.expand_children(self.root)\n\n root_state = self.root.state.clone()\n\n for _ in range(self.n_simulations):\n node = self.root\n node.state = root_state.clone()\n search_path = [node]\n while node.is_expanded():\n action, new_node = self.select_child(node)\n node.action_taken = action\n search_path.append(new_node)\n node = new_node\n # At this point, a leaf was reached.\n # If it was not visited yet, then perform the rollout and\n # backpropagates the reward returned from the simulation.\n # If it has been visited, then expand its children, choose the one\n # with the highest ucb score and do a rollout from there.\n if node.n_visits == 0:\n rollout_value = self.rollout(node)\n self.backpropagate(search_path, rollout_value)\n else:\n _, terminal_state = node.state.is_finished()\n # Special case: if \"node\" is actually a leaf of the game (not \n # a leaf from the current tree), then only rollout should be \n # applied since it does not make sense to expand the children\n # of a leaf.\n if terminal_state:\n rollout_value = self.rollout(node)\n self.backpropagate(search_path, rollout_value)\n else:\n self.expand_children(node)\n action, new_node = self.select_child(node)\n node.action_taken = action\n search_path.append(new_node)\n node = new_node\n rollout_value = self.rollout(node)\n self.backpropagate(search_path, rollout_value)\n \n dist_probability = self.distribution_probability(game)\n action = self.select_action(game, self.root, dist_probability)\n q_a_root = self.root.q_a\n self.root = self.root.children[action]\n # Remove the statistics of the chosen action from the chosen child\n if self.root.n_a:\n self.root.n_a.pop(action)\n if self.root.q_a:\n self.root.q_a.pop(action)\n return action, dist_probability, q_a_root",
"def lmbd(self, lamb):\n\t n = self.nodes\n\n\t \t# The top_k_nodes is a list of all nodes in descending\n\t # order of influence\n\t top_k_nodes = self.top_k(self.nodes)\n\t for i in range(n):\n\t\t\tself.deactivate_all()\n\t\t\tinitially_active = top_k_nodes[:i]\n\n\t\t\ttotal_contrib = i + 1\n\t\t\tfor node in initially_active:\n\t\t\t\ttotal_contrib += self.v(node)\n\n\t\t\tcoverage = total_contrib*1.00/n\n\t\t\tif coverage >= lamb:\n\t\t\t\treturn top_k_nodes[:i]",
"def learn_after_trial(self, cumulative_reward):\r\n return",
"def execute_policy_and_get_cost(curr_node, reward_machines, policy_bank, tester, new_task_rm, new_task_u1,\n bound=np.inf):\n game = copy.deepcopy(curr_node.parent_state)\n num_features = len(game.get_features())\n s1, s1_features = game.get_state_and_features()\n curr_policy = curr_node.policy\n curr_policy_rm = reward_machines[curr_policy[0]]\n\n bonus = []\n for t in range(tester.testing_params.num_steps):\n a = policy_bank.get_best_action(curr_policy[0], curr_policy[1],\n s1_features.reshape((1, num_features)), add_noise=False)\n game.execute_action(a)\n # game.render()\n s2, s2_features = game.get_state_and_features()\n curr_policy_u2 = curr_policy_rm.get_next_state(curr_policy[1], game.get_true_propositions())\n new_task_u2 = new_task_rm.get_next_state(new_task_u1, game.get_true_propositions())\n\n desired_next_state = curr_policy_rm.get_next_state(curr_policy[1], curr_policy[2])\n\n r = new_task_rm.get_reward(new_task_u1, new_task_u2, s1, a, s2)\n if curr_policy_u2 == desired_next_state:\n logger.info(\"EXECUTED ACTION {}, CAN GO TO NEXT LEVEL\".format(curr_policy[2]))\n return t + 1, game, new_task_u2, r, bonus\n elif curr_policy_u2 == curr_policy[1]:\n logger.info(\"STILL FOLLOWING CURRENT POLICY {}, CONTINUE\".format(curr_policy[2]))\n if new_task_u2 != new_task_u1:\n logger.info(\n \"ENCOUNTERED EVENT {} WHILE FOLLOWING {}\".format(game.get_true_propositions(), curr_policy[2]))\n bonus.append(game.get_true_propositions())\n # else:\n # curr_policy_u2 = curr_policy[1]\n # print(game.get_true_propositions())\n # print(\"OOPS, WRONG WAY\")\n # return np.inf, game, new_task_u1, r, bonus\n\n if game.is_env_game_over() or t + 1 >= bound:\n return np.inf, game, new_task_u2, r, bonus\n\n s1, s1_features = s2, s2_features\n new_task_u1 = new_task_u2\n\n return np.inf, game, new_task_u1, 0, bonus",
"def _greedy(self, state):\n\n node = self.mcts_head\n if self.verbose > 1:\n logger.debug(f\"Starting greedy algorithm.\")\n\n while not node.terminal:\n # Parse current state\n this_state, total_reward, terminal = self._parse_path(state, node.path)\n node.set_terminal(terminal)\n if self.verbose > 1:\n logger.debug(f\" Analyzing node {node.path}\")\n\n # Expand\n if not node.terminal and not node.children:\n actions = self._find_legal_actions(this_state)\n step_rewards = [self._parse_action(action, from_which_env=\"sim\") for action in actions]\n if self.verbose > 1:\n logger.debug(f\" Expanding: {len(actions)} legal actions\")\n node.expand(actions, step_rewards=step_rewards)\n\n # If terminal, backup reward\n if node.terminal:\n if self.verbose > 1:\n logger.debug(f\" Node is terminal\")\n if self.verbose > 1:\n logger.debug(f\" Backing up total reward {total_reward}\")\n node.give_reward(self.episode_reward + total_reward, backup=True)\n\n # Debugging -- this should not happen\n if not node.terminal and not node.children:\n logger.warning(\n f\"Unexpected lack of children! Path: {node.path}, children: {node.children.keys()}, legal actions: {self._find_legal_actions(this_state)}, terminal: {node.terminal}\"\n )\n node.set_terminal(True)\n\n # Greedily select next action\n if not node.terminal:\n action = node.select_greedy()\n node = node.children[action]\n\n if self.verbose > 0:\n choice = self.mcts_head.select_best(mode=\"max\")\n self._report_decision(choice, state, \"Greedy\")",
"def immediate_reward(state_node):\n #DANIEL: edited the reward function to get rid of the parent argument and will just be a function of the current state\n return state_node.state.reward()",
"def solveOneStep(self):\n ### Student code goes here\n # Mark this move as explored\n self.visited[self.currentState] = True\n self.visited_states.append(self.currentState.state)\n\n # Get move to make\n movables = self.gm.getMovables()\n # print(\"EXPLORING GAME STATE \" + str(self.gm.getGameState()) + \"---------------------------------------------------------\")\n to_move = self.currentState.nextChildToVisit # movables index\n # print(\"depth \", self.currentState.depth)\n\n # Return if done\n if self.currentState.state == self.victoryCondition:\n # print(\"DONE\")\n return True\n\n # If current state has no children, make children\n if not self.currentState.children:\n for movable_statement in movables:\n # Make the move\n # print(\"implementing move \", movable_statement)\n self.gm.makeMove(movable_statement)\n\n # Create a new state with this move made\n new_state = self.gm.getGameState()\n # print (\"new state \", new_state)\n\n # If the new state hasn't been visited and isn't in the queue then add it as a child and to the queue\n if (new_state not in self.visited_states):\n new_gs = GameState(new_state, self.currentState.depth + 1, movable_statement)\n new_gs.parent = self.currentState\n self.currentState.children.append(new_gs)\n self.currentState.nextChildToVisit = to_move + 1\n self.visited[new_gs] = True\n self.visited_states.append(new_state)\n self.gs_queue.append(new_gs)\n\n self.gm.reverseMove(movable_statement)\n\n # Return false if no more to explore\n if not self.gs_queue:\n return False\n\n # Revert to state at when current and next start to change\n root_curr = self.currentState\n self.currentState = self.gs_queue.popleft()\n root_new = self.currentState\n\n # Backtrack to when current node and new node start to diverge\n if root_new.depth == root_curr.depth:\n while root_curr.state != root_new.state:\n self.gm.reverseMove(root_curr.requiredMovable)\n root_curr = root_curr.parent\n root_new = root_new.parent\n else:\n while root_curr.requiredMovable:\n self.gm.reverseMove(root_curr.requiredMovable)\n root_curr = root_curr.parent\n\n # Return game master to state that we are exploring\n # Find path between root and current state\n path = []\n currNode = self.currentState\n while currNode != root_curr:\n path.append(currNode.requiredMovable)\n currNode = currNode.parent\n\n # Created backwards path, now make moves from root to current state\n path.reverse()\n for movable_statement in path:\n self.gm.makeMove(movable_statement)\n\n return False",
"def step_tree(self):\n if random.random() < self.world.f or self.any_neighbor_burning():\n self.set_state(\"orange\")",
"def getNextNodeUsingTotalStepsToTravel(kGoalState):\n \n global fringe\n global solutions\n\n\n\n minNode = None\n minCost = 99999999999\n minNodeIndex = -1\n\n \n pnode = None\n pcost = None\n\n if len(solutions)>0 and solutions[0] != None:\n pnode = solutions[0];\n pcost = getTotalStepsToReachGoalState(pnode,kGoalState)\n # print pnode, pcost\n # raw_input()\n \n\n\n\n for idx,node in enumerate(fringe):\n #get the heu. function values\n g_value =getTotalStepsToReachGoalState(node,kGoalState)\n \n\n if g_value < minCost:\n minNode = node\n minNodeIndex = idx\n minCost = g_value\n\n\n fringe.pop(minNodeIndex)\n c = getTotalStepsToReachGoalState(minNode,kGoalState)\n if pnode != None:\n if c > pcost:\n minNode = None\n \n return minNode",
"def expand(node):\n if not node.is_leaf():\n return\n\n # build children\n is_done = []\n for action in constants.Action:\n child_node = node.copy()\n agents_obs = node.game_env.get_observations()\n\n # Combine current observation with the agent's memory of the game\n agents_obs[node.agent_id] = utility.combine_agent_obs_and_memory(node.agent_memory, agents_obs[node.agent_id])\n\n # Replace the current agent's action with the action we are searching\n agents_actions = node.game_env.act(agents_obs)\n agents_actions[node.agent_id] = action.value\n\n # Apply actions to environment, while checking if reaching a terminal state\n new_agents_obs, _, done, _ = child_node.game_env.step(agents_actions)\n is_done.append(done)\n\n # Update agent's memory after stepping\n child_node.agent_memory = utility.update_agent_memory(child_node.agent_memory,new_agents_obs[child_node.agent_id])\n\n # Build Tree\n new_edge = MCTEdge(node, child_node)\n child_node.parent_edge = new_edge\n node.child_edges.append(new_edge)\n\n # rollout for each children, and then send the average reward back to root via the current leaf\n leaf_avg_reward = 0\n\n for done, edge in zip(is_done, node.child_edges):\n child_node = edge.child\n if done:\n leaf_avg_reward += decide_reward(prev_node=node, cur_node=child_node)\n else:\n leaf_avg_reward += rollout(child_node, constants.ROLLOUT_DEPTH)\n\n leaf_avg_reward /= len(node.child_edges)\n\n # Backup the value to root\n backup(node.parent_edge, leaf_avg_reward)",
"def _expand_node(expand_n, base_cost, randomizer):\n\n for next_n, props in nb[expand_n].items():\n randomizer -= 1\n total_cost = props['weight'] + base_cost\n e_cost = (total_cost, props['weight'], randomizer)\n\n # Check for tree membership as this signifies a loop back to the tree\n if next_n not in scanned or e_cost < scanned[next_n] and not tree.has_node(next_n):\n heappush(queue, (e_cost[0], e_cost[1], e_cost[2], next_n))\n scanned[next_n] = e_cost\n p[next_n] = expand_n",
"def act(self):\n\n self.state = self.next_state\n self.choose_random = np.random.uniform(0., 1.) < self.epsilon\n # If exploring\n if self.choose_random:\n # Select a random action using softmax\n idx = np.random.choice(4)\n self.action = self.idx2act[idx]\n else:\n # Select the greedy action\n self.action = self.idx2act[self.argmaxQsa(self.state)]\n\n self.reward = self.move(self.action)\n self.total_reward += self.reward",
"def _step(self):\n if self.best_unexplored_lower_bound < self.best_upper_bound:\n\n # Select a Node\n self._active_node = self._pop_node_with_best_lower_bound()\n\n # Reporting\n if self._reporting:\n print(self.report)\n\n # Select a Vertex\n unassigned_vertex_chosen = self._choose_unassigned_vertex_highest_degree()\n\n # Branch\n self._active_node.construct_children_nodes(\n unassigned_vertex_chosen,\n self._terminals_by_vertex[unassigned_vertex_chosen],\n )\n\n # NB: we do not need to worry about duplicate nodes\n # the nodes are constructed by forcing an assignment of\n # vertices to terminals. Thus, the resulting partitions\n # can never be identical\n self._unexplored_nodes += self._active_node.children\n self._all_nodes += self._active_node.children\n\n else:\n # if there are no unassigned vertices, we are at a leaf node\n self._done = True",
"def explorer(global_rb, queue, trained_steps, n_transition,\n is_training_done, lock, env_fn, policy_fn,\n buffer_size=1024, max_transition=None,\n episode_max_steps=1000):\n env = env_fn()\n policy = policy_fn(env, \"Explorer\", global_rb.get_buffer_size())\n local_rb = ReplayBuffer(obs_shape=env.observation_space.shape,\n act_dim=env.action_space.low.size,\n size=buffer_size)\n\n s = env.reset()\n episode_steps = 0\n total_reward = 0.\n total_rewards = []\n start = time.time()\n sample_at_start = 0\n\n while not is_training_done.is_set():\n # Periodically copy weights of explorer\n if not queue.empty():\n actor_weights, critic_weights, critic_target_weights = queue.get()\n update_target_variables(policy.actor.weights, actor_weights, tau=1.)\n update_target_variables(policy.critic.weights, critic_weights, tau=1.)\n update_target_variables(policy.critic_target.weights, critic_target_weights, tau=1.)\n\n n_transition.value += 1\n episode_steps += 1\n a = policy.get_action(s)\n s_, r, done, _ = env.step(a)\n done_flag = done\n if episode_steps == env._max_episode_steps:\n done_flag = False\n total_reward += r\n local_rb.add(s, a, r, s_, done_flag)\n\n s = s_\n if done or episode_steps == episode_max_steps:\n s = env.reset()\n total_rewards.append(total_reward)\n total_reward = 0\n episode_steps = 0\n\n # Add collected experiences to global replay buffer\n if local_rb.get_stored_size() == buffer_size - 1:\n temp_n_transition = n_transition.value\n samples = local_rb.sample(local_rb.get_stored_size())\n states, next_states, actions, rewards, done = samples[\"obs\"], samples[\"next_obs\"], samples[\"act\"], samples[\"rew\"], samples[\"done\"]\n done = np.array(done, dtype=np.float64)\n td_errors = policy.compute_td_error(\n states, actions, next_states, rewards, done)\n print(\"Grad: {0: 6d}\\tSamples: {1: 7d}\\tTDErr: {2:.5f}\\tAveEpiRew: {3:.3f}\\tFPS: {4:.2f}\".format(\n trained_steps.value, n_transition.value, np.average(np.abs(td_errors).flatten()),\n sum(total_rewards) / len(total_rewards), (temp_n_transition - sample_at_start) / (time.time() - start)))\n total_rewards = []\n lock.acquire()\n global_rb.add(\n states, actions, rewards, next_states, done,\n priorities=np.abs(td_errors)+1e-6)\n lock.release()\n local_rb.clear()\n start = time.time()\n sample_at_start = n_transition.value\n\n if max_transition is not None and n_transition.value >= max_transition:\n is_training_done.set()",
"def mcts(self, node):\n\n starttime = time.time()\n sim_count = 0\n board_in = self.env.board.fen()\n\n # First make a prediction for each child state\n for move in self.env.board.generate_legal_moves():\n if move not in node.children.keys():\n node.children[move] = Node(self.env.board, parent=node)\n\n episode_end, reward = self.env.step(move)\n\n if episode_end:\n successor_state_value = 0\n else:\n successor_state_value = np.squeeze(\n self.agent.model.predict(np.expand_dims(self.env.layer_board, axis=0))\n )\n\n child_value = reward + self.gamma * successor_state_value\n\n node.update_child(move, child_value)\n self.env.board.pop()\n self.env.init_layer_board()\n if not node.values:\n node.values = [0]\n\n while starttime + self.search_time > time.time() or sim_count < self.min_sim_count:\n depth = 0\n color = 1\n node_rewards = []\n\n # Select the best node from where to start MCTS\n while node.children:\n node, move = node.select(color=color)\n if not move:\n # No move means that the node selects itself, not a child node.\n break\n else:\n depth += 1\n color = color * -1 # switch color\n episode_end, reward = self.env.step(move) # Update the environment to reflect the node\n node_rewards.append(reward)\n # Check best node is terminal\n\n if self.env.board.result() == \"1-0\" and depth == 1: # -> Direct win for white, no need for mcts.\n self.env.board.pop()\n self.env.init_layer_board()\n node.update(1)\n node = node.parent\n return node\n elif episode_end: # -> if the explored tree leads to a terminal state, simulate from root.\n while node.parent:\n self.env.board.pop()\n self.env.init_layer_board()\n node = node.parent\n break\n else:\n continue\n\n # Expand the game tree with a simulation\n Returns, move = node.simulate(self.agent.fixed_model,\n self.env,\n temperature=self.temperature,\n depth=0)\n self.env.init_layer_board()\n\n if move not in node.children.keys():\n node.children[move] = Node(self.env.board, parent=node)\n\n node.update_child(move, Returns)\n\n # Return to root node and backpropagate Returns\n while node.parent:\n latest_reward = node_rewards.pop(-1)\n Returns = latest_reward + self.gamma * Returns\n node.update(Returns)\n node = node.parent\n\n self.env.board.pop()\n self.env.init_layer_board()\n sim_count += 1\n\n board_out = self.env.board.fen()\n assert board_in == board_out\n\n return node",
"def _step(self, action):\n\n reward = 0.0\n x, y = action\n\n if not Creator.add_edge(self.nxgraph, x+1, y+1):\n reward = 0.0\n # TODO: do we return here?\n raise NotImplementedError\n else:\n reward = 1.0\n new_state = EnvTools.get_state(self.nxgraph)\n EnvTools.calculate_reward(self.state, self.previous_state)\n raise NotImplementedError\n\n\n\n pass",
"def learn(self, state, action, reward, next_state):\r\n\r\n \"\"\"Please Fill Your Code Here.\r\n \"\"\"\r\n self.Q[state][action] = self.Q[state][action] + self.alpha * (reward + self.gamma * max(self.Q[next_state]) - self.Q[state][action])\r\n\r\n return 0",
"def mcts(node, expanding=False):\n global boards\n boards += 1\n\n node.games += 1\n if game.check_win(node.board) != ' ':\n if game.check_win(node.board) == 'X' or game.check_win(node.board) == 'O':\n node.wins += 1\n return\n\n # Selection\n move = -1\n next_player = 'O' if node.player_id == 'X' else 'X'\n next_board = deepcopy(node.board)\n if len(node.children) == 25:\n max_uct = -inf\n for child in node.children:\n uct = child.wins/child.games + sqrt(log(node.games) / child.games)\n if uct > max_uct:\n max_uct = uct\n move = child.move\n\n # Expansion and Simulation\n elif not expanding:\n for move_expansion in range(25):\n if node.board[move_expansion] != ' ':\n continue\n next_board = deepcopy(node.board)\n next_board[move_expansion] = node.player_id\n next_node = Node(next_board, next_player, move_expansion)\n is_child = False\n for child in node.children:\n if child.board == next_board:\n next_node = child\n is_child = True\n if not is_child:\n node.children.append(next_node)\n mcts(next_node, True)\n else:\n move = randint(0, 24)\n while node.board[move] != ' ':\n move = randint(0, 24)\n next_board[move] = node.player_id\n next_node = Node(next_board, next_player, move)\n is_child = False\n for child in node.children:\n if child.board == next_board:\n next_node = child\n is_child = True\n if not is_child:\n node.children.append(next_node)\n mcts(next_node, expanding)\n\n # Back-Propagation\n node.wins = 0\n node.games = 0\n if node.children:\n for child in node.children:\n node.wins += child.games - child.wins\n node.games += child.games",
"def linucb(self):\n while True:\n context = yield\n context = np.matrix(context)\n matrix_ainv_tmp = np.array(\n [self._modelstorage.get_model()['matrix_ainv'][action] for action in self._actions])\n theta_tmp = np.array([self._modelstorage.get_model()['theta'][action] for action in self._actions])\n\n # The recommended action should maximize the Linear UCB.\n score = {}\n for action_idx in range(len(self._actions)):\n score[action_idx] = np.dot(context[action_idx], theta_tmp[action_idx]) + self.alpha * np.sqrt(\n np.dot(np.dot(context[action_idx], matrix_ainv_tmp[action_idx]), context[action_idx].T))\n action_max = self._actions[np.argmax(score.values())]\n yield action_max\n\n raise StopIteration"
]
| [
"0.6164046",
"0.61358875",
"0.6106919",
"0.6031748",
"0.60115874",
"0.595056",
"0.59471905",
"0.59269196",
"0.58961433",
"0.58696514",
"0.58533907",
"0.58485657",
"0.5842488",
"0.5821475",
"0.5783845",
"0.5782573",
"0.57748175",
"0.5721878",
"0.57213783",
"0.5706393",
"0.5656547",
"0.56451815",
"0.56177765",
"0.561712",
"0.5612555",
"0.5595582",
"0.5582546",
"0.55804706",
"0.5574465",
"0.5555482"
]
| 0.6590161 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.