query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
sequencelengths 30
30
| negative_scores
sequencelengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
>>> tfm = PSTransform() >>> LineSegment(Point(0, 0), Point(1, 0)).postscript(tfm) '306.0 396.0 lineto 378.0 396.0 moveto' | def postscript(self, tfm):
return (tfm.format('{0} {1} lineto ', self.p1) +
tfm.format('{0} {1} moveto', self.p2)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def postscript(self, tfm):\n k = 180. / PI\n if self.angle_range.clockwise():\n a, b = self.angle_range.start, self.angle_range.finish\n else:\n a, b = self.angle_range.finish, self.angle_range.start\n return (tfm.format('{0} {1} ', self.center) +\n '{0} '.format(tfm.scale(abs(self.radius))) +\n '{0} {1} '.format(k * a, k * b) + 'arc')",
"def ps2svg_string(sPostscript):\n\n def group_numbers(result, times = 1):\n nums = []\n for sNum in result.groups():\n if re.match(r'[a-zA-Z]+', sNum):\n # This is just a string\n nums.append(sNum)\n else:\n # This must be a floating point number\n nums.append(\"{:.6f}\".format(times * float(sNum) ))\n return nums\n\n sBack = \"\"\n lst_out = []\n oErr = ErrHandle()\n path_style = \"fill:none;stroke:#000000;stroke-width:16;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1\"\n point_style = \"font-variant:normal;font-weight:normal;font-size:13.39669991px;font-family:Times;-inkscape-font-specification:Times-Roman;writing-mode:lr-tb;fill:#0000FF;fill-opacity:1;fill-rule:nonzero;stroke:none\"\n try:\n # Recognize the initial lines we are looking for\n re_Line = re.compile( r'^\\s+([0-9]+\\.?[0-9]*)\\s+([0-9]+\\.?[0-9]*)\\s+([0-9]+\\.?[0-9]*)\\s+([0-9]+\\.?[0-9]*)\\s+l$')\n re_point = re.compile(r'^([0-9]+\\.?[0-9]*)\\s+([0-9]+\\.?[0-9]*)\\s+translate\\s+([0-9]+\\.?[0-9]*)\\s+rotate$')\n re_label = re.compile(r'^\\(([a-zA-Z]+)\\)\\s+show$')\n\n lst_out.append(sIntro)\n\n # Split into lines\n lines = sPostscript.split(\"\\n\")\n section = \"pre\"\n idx = 14\n point_info = []\n bFirstPoint = True\n oorsprong = dict(x=0.0, y=0.0)\n for line in lines:\n # Check if we have a line \n if section == \"pre\":\n result = re_Line.search(line)\n if result:\n section = \"lines\"\n else:\n # We are not in a lines section\n pass\n if section == \"lines\":\n result = re_Line.search(line)\n if result:\n nums = group_numbers(result, 10)\n # Convert into path line\n sPathLine = '<path id=\"path{}\" style=\"{}\" d=\"M {},{} {},{}\" />'.format(\n idx, path_style, nums[0], nums[1], nums[2], nums[3])\n idx += 2\n lst_out.append(sPathLine)\n else:\n # We have exited the lines section\n section = \"point\"\n lst_out.append('<g transform=\"scale(10)\" id=\"g{}\">'.format(idx))\n idx += 2\n elif section == \"point\":\n # Look for a point\n result = re_point.search(line)\n if result:\n # We have found a point: get it in\n nums = group_numbers(result, 1)\n\n # Is this the first point?\n if bFirstPoint:\n lst_out.append('<text id=\"text{}\" style=\"{}\" transform=\"matrix(1,0,0,-1,{},{})\">'.format(\n idx, point_style, nums[0], nums[1]))\n idx += 2\n oorsprong['x'] = float(nums[0])\n oorsprong['y'] = float(nums[1])\n bFirstPoint = False\n\n # In all situations: position w.r.t. oorsprong\n pos_x = \"{:.6f}\".format(float(nums[0]) - oorsprong['x']) \n pos_y = \"{:.6f}\".format(oorsprong['y'] - float(nums[1]) )\n point_info.append(pos_y)\n point_info.append(pos_x)\n\n section = \"label\"\n elif section == \"label\":\n # Look for a label\n result = re_label.search(line)\n if result:\n # we have found a label: get it\n sLabel = result.groups()[0]\n point_info.append(sLabel)\n\n # Output this label\n sLabel = '<tspan id=\"tspan{}\" y=\"{}\" x=\"{}\">{}</tspan>'.format(\n idx, pos_y, pos_x, sLabel)\n idx += 2\n lst_out.append(sLabel)\n\n section = \"point\"\n point_info = []\n\n # Finish up the svg nicely\n lst_out.append(\" </text>\")\n lst_out.append(\" </g>\")\n lst_out.append(\" </g>\")\n lst_out.append(\" </g>\")\n lst_out.append(\"</svg>\")\n # Convert the list into a string\n sBack = \"\\n\".join(lst_out)\n except:\n msg = oErr.get_error_message()\n oErr.DoError(\"ps2svg\")\n\n # Return what we have gathered\n return sBack",
"def ps2svg_simple(sPostscript):\n\n def group_numbers(result, times = 1):\n nums = []\n for sNum in result.groups():\n if re.match(r'[a-zA-Z]+', sNum):\n # This is just a string\n nums.append(sNum)\n else:\n # This must be a floating point number\n nums.append(\"{:.6f}\".format(times * float(sNum) ))\n return nums\n\n sBack = \"\"\n lst_out = []\n oErr = ErrHandle()\n line_style = 'stroke:black;stroke-width:1'\n point_style = \"fill:blue;font-family:Times\"\n offset_y = 18 # Adding 18px to compensate for double mirroring\n min_y = width_simple\n min_x = height_simple\n max_y = 0\n max_x = 0\n try:\n # Recognize the initial lines we are looking for\n re_Line = re.compile( r'^\\s+([0-9]+\\.?[0-9]*)\\s+([0-9]+\\.?[0-9]*)\\s+([0-9]+\\.?[0-9]*)\\s+([0-9]+\\.?[0-9]*)\\s+l$')\n re_point = re.compile(r'^([0-9]+\\.?[0-9]*)\\s+([0-9]+\\.?[0-9]*)\\s+translate\\s+([0-9]+\\.?[0-9]*)\\s+rotate$')\n re_label = re.compile(r'^\\(([a-zA-Z]+)\\)\\s+show$')\n\n lst_out.append(sIntroSimple)\n\n # Split into lines\n lines = sPostscript.split(\"\\n\")\n section = \"pre\"\n idx = 14\n bFirstPoint = True\n oorsprong = dict(x=0.0, y=0.0)\n for line in lines:\n # Check if we have a line \n if section == \"pre\":\n result = re_Line.search(line)\n if result:\n section = \"lines\"\n else:\n # We are not in a lines section\n pass\n if section == \"lines\":\n result = re_Line.search(line)\n if result:\n nums = group_numbers(result, 1)\n # Convert into <line> element\n sLine = '<g id=line{}><line x1=\"{}\" y1=\"{}\" x2=\"{}\" y2=\"{}\" style=\"{}\" stroke-linecap=\"round\" /></g>'.format(\n idx, nums[0], nums[1], nums[2], nums[3], line_style)\n idx += 2\n lst_out.append(sLine)\n\n # Keep track of min_y and min_x\n min_x = min(min_x, float(nums[0]), float(nums[2]))\n min_y = min(min_y, float(nums[1]), float(nums[3]))\n max_x = max(max_x, float(nums[0]), float(nums[2]))\n max_y = max(max_y, float(nums[1]), float(nums[3]))\n else:\n # We have exited the lines section\n section = \"point\"\n\n elif section == \"point\":\n # Look for a point\n result = re_point.search(line)\n if result:\n # We have found a point: get it in\n nums = group_numbers(result, 1)\n pos_x = \"{:.6f}\".format(float(nums[0])) \n pos_y = \"{:.6f}\".format(float(nums[1]) + offset_y )\n\n # Keep track of min_y and min_x\n min_x = min(min_x, float(nums[0]))\n min_y = min(min_y, float(nums[1]))\n max_x = max(max_x, float(nums[0]))\n max_y = max(max_y, float(nums[1]))\n\n section = \"label\"\n elif section == \"label\":\n # Look for a label\n result = re_label.search(line)\n if result:\n # we have found a label: get it\n sLabel = result.groups()[0]\n\n # Output this label\n sLabel = '<g id=\"text{}\"><text y=\"{}\" x=\"{}\" style=\"{}\">{}</text></g>'.format(\n idx, pos_y, pos_x, point_style, sLabel)\n idx += 2\n lst_out.append(sLabel)\n\n section = \"point\"\n\n # Finish up the svg nicely\n lst_out.append(\"</svg>\")\n # Convert the list into a string\n sBack = \"\\n\".join(lst_out)\n\n # Adapt w.r.t. min_x and min_y, max_x, max_y\n fHeight = height_simple - 2 * min_y + offset_y\n sViewbox = 'viewBox=\"{} {} {} {}\" width=\"{}\" height=\"{}\"'.format(\n 0, min_y, width_simple, fHeight, width_simple, fHeight\n )\n sBack = sBack.replace('@viewbox', sViewbox)\n\n except:\n msg = oErr.get_error_message()\n oErr.DoError(\"ps2svg\")\n\n # Return what we have gathered\n return sBack",
"def postscript(self):\n self.g.postscript_output(fileName='tmp2.ps',decorations='no')",
"def line(points):\n return LineString(points)",
"def transform_point(self, pt):\r\n\r\n x, y = pt\r\n return (x - self.xoffset, (y - self.yoffset) * self.yscale)",
"def convert(self, pt):\n # default transformation is to do nothing\n return pt",
"def dotext(rpoint, text, angle, bi):\r\n## w(\"/Arial findfont\")\r\n if bi:\r\n w(\"/%s findfont\" % gv[\"bifont\"])\r\n else:\r\n w(\"/%s findfont\" % gv[\"font\"])\r\n if gv[\"fontfixed\"] is False:\r\n localfontsize = int(gv[\"fontsize\"]*gv[\"globalscale\"])\r\n else:\r\n localfontsize = int(gv[\"fontsize\"])\r\n w(\"%d scalefont\" % localfontsize)\r\n w(\"setfont\")\r\n w(\"newpath\")\r\n p = apoint(rpoint)\r\n if angle != 0:\r\n w(\"gsave\")\r\n w(\"%d %d translate\" % (p[0], p[1]))\r\n w(\"%d rotate\" % angle)\r\n w(\"0 0 moveto\")\r\n w(\"(\" + text + \") show\")\r\n w(\"grestore\")\r\n else:\r\n w(\"%d %d moveto\" % (p[0],p[1]))\r\n w(\"(\" + text + \") show\")",
"def postepy(self,przedmiot:str)->float:\n pass",
"def convert_point_to_units(self, p):\n pt = vector3d(p)\n pt = pt.scale(self.track_widths[0],self.track_widths[1],1)\n return pt",
"def translate( t, P ): \n assert t.shape[0]==P.n # Dimension match \n if P.type=='AH_polytope':\n return pp.AH_polytope(t=t+P.t,T=P.T,P=P.P)\n elif P.type=='zonotope':\n return pp.zonotope(x=t+P.x,G=P.G)\n elif P.type==\"H_polytope\":\n return pp.H_polytope(H=P.H,h=P.h+np.dot(P.H,t))\n else:\n return ValueError('Polytope type: ',P.type,\" Not recognized\")",
"def predicted(self, p):\n verts = self.p2vertices(p)\n poly = Polygon(verts, self.props)\n return talwani.gz(self.x, self.z, [poly])",
"def m2pt(x):\n return x / pt_size",
"def sign_line(pt, P1, P2):\n x1, y1 = P1\n x2, y2 = P2\n x, y = pt\n\n return np.sign((x - x1)*(y2 - y1) - (y-y1)*(x2-x1))",
"def predicted(self, p):\n polygon = Polygon(self.verts + [p], {'density': self.density})\n return talwani.gz(self.x, self.z, [polygon])",
"def lineTo(self, pt: Tuple[float, float]) -> None:\n raise NotImplementedError",
"def isPostscript(fmt):\n if fmt == 'POST' or fmt == 'PSCL' or fmt == 'PDF':\n return 1\n return 0",
"def project_point_line(point, line):\n a, b = line\n ab = subtract_vectors(b, a)\n ap = subtract_vectors(point, a)\n c = vector_component(ap, ab)\n\n return add_vectors(a, c)",
"def mm_to_pt(x):\n return x * 2.8346278",
"def points (p, line: str) -> list:\n direction = line [0]\n steps = list (range (1, 1 + int (F.tail (line))))\n return F.map (point (p, direction)) (steps)",
"def __repr__(self):\n return 'LineSegment({0}, {1})'.format(self.p1, self.p2)",
"def postepy(self,przedmiot:str)-> float:\n return self.przedmioty[przedmiot].srednia()",
"def text_preprocessing_pdf(self,p):\n #remover_end_paragraphs=np.vectorize(self.remove_end_paragraphs,otypes=[str])\n cleaner=np.vectorize(self.remove_non_alpha,otypes=[str])\n cut_text=np.vectorize(self.cut_text,otypes=[str])\n cut_text_raw=np.vectorize(self.cut_text_raw,otypes=[str])\n assert len(self.parser)==len(self.parser_raw), \"Length of the treated sentence treated list does not match length of raw text list: {} / {}\".format(len(self.parser),len(self.parser_raw))\n cut_text_raw(p)\n p=cleaner(p)\n cut_text(p)\n return p",
"def RearangePoinAnnotation(point,PaddZAxis=0,PaddYAxis=0,PaddXAxis=0):\n point[:,0] = point[:,0] ^ point[:,2]\n point[:,2] = point[:,2] ^ point[:,0]\n point[:,0] = point[:,0] ^ point[:,2]\n if PaddZAxis!=0:\n point[:,0]+=PaddZAxis\n if PaddYAxis!=0:\n point[:,1]+=PaddYAxis\n if PaddXAxis!=0:\n point[:,2]+=PaddXAxis\n return(point)",
"def transform(p, xform, axes=None, vector=False):\n\n p = _fillPoints(p, axes)\n t = np.dot(xform[:3, :3], p.T).T\n\n if not vector:\n t = t + xform[:3, 3]\n\n if axes is not None:\n t = t[:, axes]\n\n if t.size == 1: return t[0]\n else: return t",
"def draw_p_to_eps(p):\n return ppf((p + 1.0) / 2)",
"def normal_at(self, p):\n pass",
"def psprint(self, filename):\n\n # The portrait A4 page is, in mm, WxH=210x297. Let's have a safety\n # margin of 7mm all around it, and the usable area becomes 196x283.\n W = 196.0\n H = 283.0\n x1, y1, x2, y2 = self._c.bbox(\"all\")\n options = {\n \"pageanchor\": \"sw\",\n \"x\": \"%fp\" % x1,\n \"y\": \"%fp\" % y1,\n \"height\": \"%fp\" % (y2-y1),\n \"width\": \"%fp\" % (x2-x1),\n \"pagex\": \"0\",\n \"pagey\": \"0\",\n \"file\": filename,\n \"colormode\": \"mono\",\n }\n # ??? I think I'm doing all this viewport math sensibly, BUT I\n # still get a weird asymmetric margin around the thing, and I\n # haven't got a clue how to get rid of it.\n yscale = (y2-y1) / H\n xscale = (x2-x1) / W\n # The direction with the greater scaling factor is the limiting one\n if xscale > yscale:\n options[\"pagewidth\"] = \"%fm\" % W\n else:\n options[\"pageheight\"] =\"%fm\" % H\n self._c.update()\n apply(self._c.postscript, (), options)",
"def pdf(x):\n return lambda point: self.information_manifold.point_to_pdf(point)(x)",
"def dist_to_point(self, point):\n\t\treturn dist_to_line2d_seg((self.a.to_tuple(),self.b.to_tuple()), point.to_tuple())"
] | [
"0.58846533",
"0.5812993",
"0.5740526",
"0.5379793",
"0.5304976",
"0.5210837",
"0.5098128",
"0.5069906",
"0.5061781",
"0.5048277",
"0.50333875",
"0.50174236",
"0.50158346",
"0.49841657",
"0.49388242",
"0.49265838",
"0.48961708",
"0.48790273",
"0.48632494",
"0.4860366",
"0.48510084",
"0.48329973",
"0.48106608",
"0.47637415",
"0.4737925",
"0.47267494",
"0.4726714",
"0.46845967",
"0.4673648",
"0.4666519"
] | 0.7476756 | 0 |
>>> LineSegment(Point(0, 0), Point(1, 0)) LineSegment((0,0), (1,0)) | def __repr__(self):
return 'LineSegment({0}, {1})'.format(self.p1, self.p2) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def createLineSegment(self):\n return _libsbml.Curve_createLineSegment(self)",
"def createLineSegment(self):\n return _libsbml.Layout_createLineSegment(self)",
"def line(points):\n return LineString(points)",
"def createLineSegment(self):\n return _libsbml.GeneralGlyph_createLineSegment(self)",
"def createLineSegment(self):\n return _libsbml.ReferenceGlyph_createLineSegment(self)",
"def __init__(self, *points, width=1, color=colors.WHITE, conversion=True):\n if len(points) > 0: # Extracting the points arguments under the same list format\n if type(points[0]) == list:\n points = points[0]\n if len(points) == 1: points = points[0]\n if len(points) != 2: raise Exception(\"A segment must have 2 points.\")\n self.points = list(points)\n self.width = width\n self.color = color\n self.conversion = conversion",
"def createLineSegment(self):\n return _libsbml.ReactionGlyph_createLineSegment(self)",
"def line(x0: float, y0: float, x1: float, y1: float) -> LineCollection:\n return LineCollection([(complex(x0, y0), complex(x1, y1))])",
"def __init__(self, c, p1=Point(), p2 = Point()):\n Line.__init__(self, p1, p2)\n self.cnv = c",
"def createFromLine(cls, line, **kwargs):\n angle = line.angle\n x, y = cls.cartesian([1, angle])\n return cls(x, y, **kwargs)",
"def createFromSegment(cls, segment, **kwargs):\n return cls.createFromTwoPoints(segment.p1, segment.p2, **kwargs)",
"def discretize_line(p0, p1, segments):\n p0, p1 = Point(p0), Point(p1)\n dx, dy = p1.x - p0.x, p1.y - p0.y\n vtx = [Point(p0).as_tuple()]\n if isinstance(segments, list):\n for ds in segments:\n x0 = p0.x + ds * dx\n y0 = p0.y + ds * dy\n vtx.append((x0, y0))\n return vtx\n for i in range(segments):\n ds = (i + 1) / segments\n x0 = p0.x + ds * dx\n y0 = p0.y + ds * dy\n vtx.append((x0, y0))\n return vtx",
"def line_to(self, point: Onion[Tuple[float, float], Point2D, Point3D, Point]):\n start_point = self.last_point\n end_point = _point_2d(point)\n self._segments.append(Line3D.create(start_point, end_point))",
"def __init__(self, line):\n self.start = LineAndColumn(line, 1)\n self.end = LineAndColumn(line, 0)",
"def createFromLine(line):\n return HalfLine(line.point, line.angle)",
"def createLineSegment(self):\n return _libsbml.SpeciesReferenceGlyph_createLineSegment(self)",
"def test_contains_point() -> None:\n point_1 = Point(1, 2)\n point_2 = Point(-2, -4)\n point_3 = Point(3, 3)\n point_4 = Point(0, 0)\n\n line_segment = LineSegment(first=point_1, second=point_2)\n\n assert line_segment.does_contain(point_1)\n assert line_segment.does_contain(point_2)\n assert not line_segment.does_contain(point_3)\n assert line_segment.does_contain(point_4)",
"def __init__(self, vertices, **kwargs):\n super(Line, self).__init__(vertices, **kwargs)\n self._geotype = \"Line\"\n return",
"def __init__(self, start, end, oldLine = None):\n self.__start = start\n self.__end = end\n if(self.__start == self.__end):\n \"\"\"\n If a zero length line is created that most likely means there is a\n logic problem somewhere in the program. This does not throw and error\n so that the output can still be examined to help diagnose the problem.\n \"\"\"\n# raise Exception('Zero length line')\n logger.warning('A line was created with no length at: ' + \n str(self.start))\n \"\"\" The Point which is the upper left corner of the line's bounding box \"\"\"\n self.__upperLeft = None\n \"\"\" The Point of the lower right corner of the bounding box. \"\"\"\n self.__lowerRight = None\n self.__extrusionRate = 0\n self.freezeExRate = False\n if not(oldLine is None):\n self.__extrusionRate = oldLine.extrusionRate\n self.freezeExRate = oldLine.freezeExRate\n self.vector = np.array([self.end.x-self.start.x,\n self.end.y-self.start.y])",
"def line(self, x, y):\n self.call('line', x, y)",
"def __init__(self, vertices=None, vector=None, color=None):\n\n if vector and isinstance(vector, Vector3):\n vertices = [Point3(0, 0, 0), Point3(vector)]\n\n if vertices and len(vertices) > 2:\n vertices = vertices[:2]\n LineString.__init__(self, vertices=vertices, color=color)",
"def line(canvas, points, line_width, line_color):\n \n # duplicate first point in case only one point was given\n points = points[0], points\n canvas.create_line(points, width = int(line_width), fill = line_color)",
"def draw_line_segment(\n x1: float, y1: float, x2: float, y2: float, color: C3F\n ) -> None:\n pyglet.graphics.draw(\n 2,\n pyglet.gl.GL_LINE_STRIP,\n (GeoDrawer._VERTEX_MODE, [x1, y1, x2, y2]),\n (GeoDrawer._COLOR_MODE, color * 2),\n )",
"def segment(x,u1,u2):\n if not (isgoodnum(u1) and isgoodnum(u2)) or close(u1,u2) or u1<0 or u2 < 0 or u1 > 1 or u2 > 1:\n raise ValueError('bad parameter arguments passed to segment: '+str(u1)+', '+str(u2))\n if ispoint(x):\n return deepcopy(x)\n elif isline(x):\n return segmentline(x,u1,u2)\n elif isarc(x):\n return segmentarc(x,u1,u2)\n elif ispoly(x):\n return segmentpoly(x,u1,u2)\n elif isgeomlist(x):\n return segmentgeomlist(x,u1,u2)\n else:\n raise ValueError(\"inappropriate figure type for segment(): \"+str(x))",
"def line(value):\r\n return '({}, {}), ({}, {})'.format(value.x1(), value.y1(), value.x2(), value.y2())",
"def __drawSegment(self, p1, p2, color):\n pygame.draw.aaline(self.screen, color, p1, p2)",
"def _createline(self):\n return self.cv.create_line(0, 0, 0, 0, fill=\"\", width=2,\n capstyle = TK.ROUND)",
"def __init__(self, points):\n self.points = points\n self.lines = []\n\n orientation = 1\n for i, point in enumerate(self.points):\n try:\n if points[i+1].x > point.x:\n orientation = orientation\n else:\n orientation = - 1\n point.orientation = orientation\n self.points[i+1].orientation = orientation\n self.lines.append(Line(point, self.points[i+1]))\n except IndexError:\n point.orientation = orientation\n self.lines.append(Line(point, self.points[0]))",
"def make_line_points(y1, y2, line):\n if line is None:\n return None\n\n slope, intercept = line\n\n # make sure everything is integer as cv2.line requires it\n x1 = int((y1 - intercept) / slope)\n x2 = int((y2 - intercept) / slope)\n y1 = int(y1)\n y2 = int(y2)\n\n return ((x1, y1), (x2, y2))",
"def __init__(self, point1, point2):\n self.point1 = point1\n self.point2 = point2\n self.vertical = False\n self.fixed_x = None\n self.k = None\n self.b = None\n\n # cached angle props\n self.angle = None\n self.angle_cos = None\n self.angle_sin = None\n\n self.set_line_props(point1, point2)"
] | [
"0.72106606",
"0.70376474",
"0.6866702",
"0.6816661",
"0.67462677",
"0.6648129",
"0.6617927",
"0.65004915",
"0.6392701",
"0.63844943",
"0.6346983",
"0.6311228",
"0.6241615",
"0.62122416",
"0.6210177",
"0.6207314",
"0.6160868",
"0.6140412",
"0.60893387",
"0.60420334",
"0.6031996",
"0.60075486",
"0.5994436",
"0.59798735",
"0.59787446",
"0.59647614",
"0.59406006",
"0.5940276",
"0.5898827",
"0.58833617"
] | 0.7115233 | 1 |
>>> p1, p2 = Point(0, 0), Point(1, 0) >>> seg = LineSegment(p1, p2) >>> seg.param_to_point(0) == p1 True >>> seg.param_to_point(1) == p2 True >>> seg.param_to_point(0.5) == Point(0.5, 0) True | def param_to_point(self, param):
return self.p1 + param * (self.p2 - self.p1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def point_to_param(self, pt):\n r = self.p2 - self.p1\n return (pt - self.p1).dot(r) / r.square()",
"def dist_to_point(self, point):\n\t\treturn dist_to_line2d_seg((self.a.to_tuple(),self.b.to_tuple()), point.to_tuple())",
"def line_param(point_a, point_b, t):\n new_point = point_a - point_b\n return point_b + t*new_point",
"def closest_point_on_segment(point, segment):\n a, b = segment\n p = closest_point_on_line(point, segment)\n d = distance_point_point_sqrd(a, b)\n d1 = distance_point_point_sqrd(a, p)\n d2 = distance_point_point_sqrd(b, p)\n if d1 > d or d2 > d:\n if d1 < d2:\n return a\n return b\n return p",
"def get_point_in_segment(p1, p2, alpha):\n return ((1-alpha)*p1[0]+alpha*p2[0], (1-alpha)*p1[1]+alpha*p2[1])",
"def project_point_to_line(point, line_start, line_end):\n line_magnitude = line_start.distance(line_end)\n \n u = ((point.x - line_start.x) * (line_end.x - line_start.x) +\n (point.y - line_start.y) * (line_end.y - line_start.y)) \\\n / (line_magnitude ** 2)\n\n # closest point does not fall within the line segment, \n # take the shorter distance to an endpoint\n if u < 0.00001 or u > 1:\n ix = point.distance(line_start)\n iy = point.distance(line_end)\n if ix > iy:\n return line_end\n else:\n return line_start\n else:\n ix = line_start.x + u * (line_end.x - line_start.x)\n iy = line_start.y + u * (line_end.y - line_start.y)\n return Point([ix, iy])",
"def _nearest_point_on_line(begin, end, point):\n b2e = _vec_sub(end, begin)\n b2p = _vec_sub(point, begin)\n nom = _vec_dot(b2p, b2e)\n denom = _vec_dot(b2e, b2e)\n if denom == 0.0:\n return begin\n u = nom / denom\n if u <= 0.0:\n return begin\n elif u >= 1.0:\n return end\n else:\n return _vec_add(begin, _vec_scale(b2e, u))",
"def dist_to_line2d_seg(line, point):\n\tx1,y1 = line[0]\n\tx2,y2 = line[1]\n\tx3,y3 = point\n\t\n\t# where on line the perpendicular is\n\tu = ( ((x3-x1)*(x2-x1) + (y3-y1)*(y2-y1))\n\t\t\t/ (math.pow(x1-x2,2) + math.pow(y1-y2,2)) )\n\t\n\t# closet to mid section or an end point?\n\tif 0.0 <= u <= 1.0:\t\t\n\t\tx = x1 + u*(x2-x1)\n\t\ty = y1 + u*(y2-y1)\n\t\t\n\telif u < 0:\t\t\n\t\tx,y = x1,y1\n\t\t\n\telse:\n\t\tx,y = x2,y2\n\t\t\n\tdist = math.sqrt(math.pow(x-x3,2)+math.pow(y-y3,2))\n\t\n\treturn dist",
"def find_segment(p, line, start_vertex=0):\n EPS = 1e-9\n for seg in range(start_vertex, len(line)-1):\n if is_near(p, line[seg]):\n return seg, 0\n if line[seg][0] == line[seg+1][0]:\n if not (p[0]-EPS <= line[seg][0] <= p[0]+EPS):\n continue\n px = None\n else:\n px = (p[0] - line[seg][0]) / (line[seg+1][0] - line[seg][0])\n if px is None or (0 <= px <= 1):\n if line[seg][1] == line[seg+1][1]:\n if not (p[1]-EPS <= line[seg][1] <= p[1]+EPS):\n continue\n py = None\n else:\n py = (p[1] - line[seg][1]) / (line[seg+1][1] - line[seg][1])\n if py is None or (0 <= py <= 1):\n if py is None or px is None or (px-EPS <= py <= px+EPS):\n return seg, px or py\n return None, None",
"def get_dependent_param_points(self, param):\n if param == SHAPE_STRING:\n return self.shape_at, self.shape_value\n elif param == LOCATION_STRING:\n return self.loc_at, self.loc_value\n elif param == SCALE_STRING:\n return self.scale_at, self.scale_value\n else:\n err_msg = \"Parameter '{}' is unknown.\".format(param)\n raise ValueError(err_msg)",
"def is_point_on_segment(point, segment, tol=0.0):\n a, b = segment\n if not is_point_on_line(point, segment, tol=tol):\n return False\n d_ab = distance_point_point(a, b)\n if d_ab == 0:\n return False\n d_pa = distance_point_point(a, point)\n d_pb = distance_point_point(b, point)\n if d_pa + d_pb <= d_ab + tol:\n return True\n return False",
"def pointInSegment(point, segmentPoint1, segmentPoint2):\n\t\tx = point[0]\n\t\ty = point[1]\n\n\t\tif x < segmentPoint1[0] and x < segmentPoint2[0]:\n\t\t\treturn False\n\t\t\n\t\tif x > segmentPoint1[0] and x > segmentPoint2[0]:\n\t\t\treturn False\n\t\t\n\t\tif y < segmentPoint1[1] and y < segmentPoint2[1]:\n\t\t\treturn False\n\t\t\n\t\tif y > segmentPoint1[1] and y > segmentPoint2[1]:\n\t\t\treturn False\n\t\t\n\t\treturn True",
"def line_param(v1,v2):\n if (v1[0]-v2[0] != 0.):\n m = (v1[1] - v2[1])/(v1[0] - v2[0])\n b = -m*v1[0] + v1[1]\n if num.fabs(m)>1.0e6:\n m = None\n b = v1[0]\n else: \n m = None\n b = v1[0]\n return (m,b)",
"def point_sur_segment(self, pt):\n dp = pt - self.c\n d = dp.length - self.r\n a = atan2(dp.y, dp.x)\n t = (a - self.a0) / self.da\n return t > 0 and t < 1, d, t",
"def intersect(l: Line, p: Plane) -> Point:\n if math.isclose((l.d * p.normal()), 0):\n # If the line direction is perpendicular to the plane normal,\n # the line and plane must be parallel.\n return None\n else:\n # There exists a parameter t, which makes\n # p.isInPlane(l.point(t)) == 0\n # Let's find it.\n # Initial guess\n t1 = 1\n p1 = l.point(t1)\n d1 = distancePointPlane(p1, p)\n t2 = 2\n p2 = l.point(t2)\n d2 = distancePointPlane(p2, p)\n\n # Calculate line through the two points (t,d)\n a = (d2 - d1) / (t2 - t1)\n b = d1 - a * t1\n\n # Find the t-value where d is zero\n # 0 = at+b <=> t = -b/a\n t = -b / a\n print(\"parameter: {}\".format(t))\n return l.point(t)",
"def segment(x,u1,u2):\n if not (isgoodnum(u1) and isgoodnum(u2)) or close(u1,u2) or u1<0 or u2 < 0 or u1 > 1 or u2 > 1:\n raise ValueError('bad parameter arguments passed to segment: '+str(u1)+', '+str(u2))\n if ispoint(x):\n return deepcopy(x)\n elif isline(x):\n return segmentline(x,u1,u2)\n elif isarc(x):\n return segmentarc(x,u1,u2)\n elif ispoly(x):\n return segmentpoly(x,u1,u2)\n elif isgeomlist(x):\n return segmentgeomlist(x,u1,u2)\n else:\n raise ValueError(\"inappropriate figure type for segment(): \"+str(x))",
"def DistPoint2Line(point,line_point1, line_point2=np.array([0,0,0])):\n return np.linalg.norm(np.cross((point-line_point2),(point-line_point1)))/np.linalg.norm(line_point1 - line_point2)",
"def tValueForPoint(self, point):\n if self.segmentType == \"curve\":\n on1 = self.previousOnCurve\n off1 = self.points[0].coordinates\n off2 = self.points[1].coordinates\n on2 = self.points[2].coordinates\n return _tValueForPointOnCubicCurve(point, (on1, off1, off2, on2))\n elif self.segmentType == \"line\":\n return _tValueForPointOnLine(point, (self.previousOnCurve, self.points[0].coordinates))\n elif self.segmentType == \"qcurve\":\n raise NotImplementedError\n else:\n raise NotImplementedError",
"def get_line_to(self, point):\n\n b = ((self.x - point.x)*point.y - (self.y - point.y)*point.x)/(self.x - point.x)\n\n a = (self.y - point.y)/(self.x - point.x)\n\n return a, b",
"def on_segment(point_p, point_q, point_r):\n if (point_q.x <= max(point_p.x, point_r.x)\n and point_q.x >= min(point_p.x, point_r.x)\n and point_q.y <= max(point_p.y, point_r.y)\n and point_q.y >= min(point_p.y, point_r.y)):\n return True\n return False",
"def point_on_line(point:tuple, line:tuple, d_y:float, d_x:float, b:float)->bool:\n if not near_segment(point, line):\n # Fast fail to handle cases where the point isn't in the bounding rectangle of the line segment.\n return False\n if b == None and point[0] == line[0][0]:\n return True\n return d_y * point[0] == (point[1] - b) * d_x",
"def distanceToPoint(self, point):\n\n length = self.length\n if not length:\n raise ValueError('Cannot calculate point distance. Invalid line segment.')\n\n s = self.start\n e = self.end\n deltaX = e.x - s.x\n deltaY = e.y - s.y\n\n distance = abs(deltaY*point.x - deltaX*point.y - s.x*e.y + e.x*s.y)/length.raw\n\n B = deltaY*point.x - deltaX*point.y - s.x*e.y + e.x*s.y\n AbsB = abs(B)\n D = math.sqrt(deltaX*deltaX + deltaY*deltaY)\n DPrime = 1.0/math.pow(deltaX*deltaX + deltaY*deltaY, 3.0/2.0)\n bBD = B/(AbsB*D)\n\n pointXErr = point.xUnc*abs(deltaY*B/(AbsB*D))\n pointYErr = point.yUnc*abs(deltaX*B/(AbsB*D))\n startXErr = s.xUnc*abs(AbsB*DPrime + bBD*(point.y - e.y))\n startYErr = s.yUnc*abs(AbsB*DPrime + bBD*(e.x - point.x))\n endXErr = e.xUnc*abs(bBD*(s.y - point.y) - AbsB*DPrime)\n endYErr = e.yUnc*abs(bBD*(point.x - s.x) - AbsB*DPrime)\n error = pointXErr + pointYErr + startXErr + startYErr + endXErr + endYErr\n\n return NumericUtils.toValueUncertainty(distance, error)",
"def point_in_segment(point: _Point, segment: _Segment,\n *,\n context: _Optional[_Context] = None) -> _Location:\n return _segment.locate_point(\n segment, point, _get_context() if context is None else context\n )",
"def point_to_parameter(self, pt):\n uv = ShapeAnalysis_Surface(self.surface()).ValueOfUV(\n gp_Pnt(pt[0], pt[1], pt[2]), 1e-9\n )\n return np.array(uv.Coord())",
"def linePointXY(l,p,inside=True,distance=False,params=False):\n a=l[0]\n b=l[1]\n # check for degenerate case of zero-length line\n abdist = dist(a,b)\n if abdist < epsilon:\n #raise ValueError('zero-length line passed to linePointXY')\n print('zero-length line passed to linePointXY')\n return False\n\n if distance and params:\n raise ValueError('incompatible distance and params parameters passed to linePointXY')\n\n x0=p[0]\n y0=p[1]\n z0=p[2]\n x1=a[0]\n y1=a[1]\n z1=a[2]\n x2=b[0]\n y2=b[1]\n z2=b[2]\n\n ## check to see if all three points lie in the same x,y plane\n if not isXYPlanar([p,a,b]):\n raise ValueError('non-XY points in linePointXY call')\n return false\n # if abs(z1-z0) > epsilon or abs(z2-z0) > epsilon:\n # return False\n\n linedist = abs( ((y2-y1)*x0 - (x2-x1)*y0 + x2*y1 - y2*x1)/abdist)\n\n ## this is the fast case:\n if not inside and distance:\n return linedist\n \n ## find out where the intersection between the original line and a\n ## line defined by the point and an orthogonal direction vector\n ## is. We do this by constructing two direction vectors\n ## orthogonal to the orgiginal line scaled by the line distance,\n ## and adding them to the point in question. Assuming that the\n ## line distance is not zero, only one of these constructed points\n ## will fall on the line\n\n ## compute unit direction vector for original line\n dir = sub(b,a)\n dir = scale3(dir,1.0/mag(dir))\n\n ## compute two orthogonal direction vectors of length linedist\n ordir1 = scale3(orthoXY(dir),linedist)\n ordir2 = scale3(ordir1, -1.0)\n \n ## there are two possible intersection points\n pi1 = add(p,ordir1)\n pi2 = add(p,ordir2)\n\n ## compute distances\n d1pa = dist(a,pi1)\n d1pb = dist(pi1,b)\n d1 = d1pa+d1pb # \"triangle\" with pi1\n\n d2pa = dist(a,pi2)\n d2pb = dist(pi2,b)\n d2 = d2pa+d2pb # \"triangle\" with pi2\n\n ## the shortest \"triangle\" distance will signal the point that\n ## is actually on the line, even if that point falls outside\n ## the a,b line interval\n \n if params or not inside: # if we don't care about being inside the\n # line segment\n if d1 <= d2:\n if distance:\n return d1\n elif params:\n return d1pb/abdist\n else:\n return pi1\n else:\n if distance:\n return d2\n elif params:\n return d2pb/abdist\n else:\n return pi2\n \n \n ## if the closest point on the line to point p lies between\n ## the endpoints of the line, then either d1 or d2 will equal\n ## abdist. IF neither do, then we know that the closest point lies\n ## outside the endpoints\n\n if abs(d1-abdist) < epsilon:\n if distance:\n return linedist\n else:\n return pi1\n\n if abs(d2-abdist) < epsilon:\n if distance:\n return linedist\n else:\n return pi2\n\n ## closest point is outside the interval. That means that the\n ## distance from point p to whichever endpoint is smaller is the\n ## closest distance\n\n d3 = dist(a,p)\n d4 = dist(b,p)\n\n if d3 < d4:\n if distance:\n return d3\n else:\n return a\n else:\n if distance:\n return d4\n else:\n return b",
"def point(x, y):\n return test(Point(x,y))",
"def shortest_line_to_point(point_a, point_b, point_c): # where a and b are on spin axis, c is the point spinning round\n axis_vect = np.subtract(point_a, point_b)\n axis_mag = magnitude(point_a, point_b)\n unit_axis = np.divide(axis_vect, axis_mag) # unit of pp\n # pp' constants - p\n\n # pp dot u\n t = np.sum(np.dot(unit_axis, unit_axis))\n c = np.sum(np.dot(np.subtract(point_b, point_c), unit_axis))\n p = -c / t\n project_point_on_axis_add = (np.multiply(unit_axis, p))\n project_point_on_axis = project_point_on_axis_add + point_b\n distance = magnitude(point_c, project_point_on_axis)\n return distance, project_point_on_axis",
"def find_closest_pt_on_segment_2d(pt1, pt2, pt, tol=None):\r\n if tol is None:\r\n tol = get_tol_2d()\r\n return geometry.gmFindClosestPtOnSegment(pt1, pt2, pt, tol)",
"def _as_parameter_(self):\n return POINT(self.x, self.y)",
"def lineTo(self, pt: Tuple[float, float]) -> None:\n raise NotImplementedError"
] | [
"0.6876588",
"0.66193455",
"0.6241705",
"0.6213389",
"0.6163717",
"0.6000711",
"0.5991964",
"0.59617895",
"0.5959313",
"0.5918417",
"0.59099036",
"0.58839566",
"0.5865199",
"0.58155835",
"0.5796937",
"0.57824886",
"0.5776108",
"0.57577264",
"0.5738541",
"0.57312596",
"0.5723538",
"0.57135415",
"0.56951684",
"0.5678038",
"0.5666678",
"0.56605697",
"0.5652423",
"0.5651553",
"0.5651326",
"0.56458485"
] | 0.74114805 | 0 |
>>> seg = LineSegment(Point(0, 0), Point(1, 0)) >>> seg.point_to_param(Point(0, 1)) 0 >>> seg.point_to_param(Point(1, 1)) 1 | def point_to_param(self, pt):
r = self.p2 - self.p1
return (pt - self.p1).dot(r) / r.square() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def param_to_point(self, param):\n return self.p1 + param * (self.p2 - self.p1)",
"def line_param(point_a, point_b, t):\n new_point = point_a - point_b\n return point_b + t*new_point",
"def point_to_parameter(self, pt):\n uv = ShapeAnalysis_Surface(self.surface()).ValueOfUV(\n gp_Pnt(pt[0], pt[1], pt[2]), 1e-9\n )\n return np.array(uv.Coord())",
"def get_dependent_param_points(self, param):\n if param == SHAPE_STRING:\n return self.shape_at, self.shape_value\n elif param == LOCATION_STRING:\n return self.loc_at, self.loc_value\n elif param == SCALE_STRING:\n return self.scale_at, self.scale_value\n else:\n err_msg = \"Parameter '{}' is unknown.\".format(param)\n raise ValueError(err_msg)",
"def line_param(v1,v2):\n if (v1[0]-v2[0] != 0.):\n m = (v1[1] - v2[1])/(v1[0] - v2[0])\n b = -m*v1[0] + v1[1]\n if num.fabs(m)>1.0e6:\n m = None\n b = v1[0]\n else: \n m = None\n b = v1[0]\n return (m,b)",
"def _as_parameter_(self):\n return POINT(self.x, self.y)",
"def get_segm_para(*args):\n return _ida_segment.get_segm_para(*args)",
"def param(self, *args, **kwargs):\n return self.options.param(*args,**kwargs)",
"def get_param(self, param):\n return self.params.get(param, None)",
"def get_point_in_segment(p1, p2, alpha):\n return ((1-alpha)*p1[0]+alpha*p2[0], (1-alpha)*p1[1]+alpha*p2[1])",
"def getParam(self,param):\n if param in self.params.keys():\n return self.params[param]\n else:\n return None",
"def find_segment(p, line, start_vertex=0):\n EPS = 1e-9\n for seg in range(start_vertex, len(line)-1):\n if is_near(p, line[seg]):\n return seg, 0\n if line[seg][0] == line[seg+1][0]:\n if not (p[0]-EPS <= line[seg][0] <= p[0]+EPS):\n continue\n px = None\n else:\n px = (p[0] - line[seg][0]) / (line[seg+1][0] - line[seg][0])\n if px is None or (0 <= px <= 1):\n if line[seg][1] == line[seg+1][1]:\n if not (p[1]-EPS <= line[seg][1] <= p[1]+EPS):\n continue\n py = None\n else:\n py = (p[1] - line[seg][1]) / (line[seg+1][1] - line[seg][1])\n if py is None or (0 <= py <= 1):\n if py is None or px is None or (px-EPS <= py <= px+EPS):\n return seg, px or py\n return None, None",
"def get_parameter(request, param):\n if param == \"Params\":\n return request.split(\"\\r\\n\\r\\n\")[1]\n if isinstance(param, type([])):\n return [request.split(\"\\r\\n\\r\\n\")[1] if x == \"Param\" else request.split(x + \": \")[1].split(\"\\r\\n\")[0] for x in param]\n if isinstance(param, type(\"\")):\n return request.split(param + \": \")[1].split(\"\\r\\n\")[0]",
"def __init__(self, *points, width=1, color=colors.WHITE, conversion=True):\n if len(points) > 0: # Extracting the points arguments under the same list format\n if type(points[0]) == list:\n points = points[0]\n if len(points) == 1: points = points[0]\n if len(points) != 2: raise Exception(\"A segment must have 2 points.\")\n self.points = list(points)\n self.width = width\n self.color = color\n self.conversion = conversion",
"def _param(self) ->nn.Parameter:\n return next(self.parameters())",
"def dist_to_point(self, point):\n\t\treturn dist_to_line2d_seg((self.a.to_tuple(),self.b.to_tuple()), point.to_tuple())",
"def createFromSegment(cls, segment, **kwargs):\n return cls.createFromTwoPoints(segment.p1, segment.p2, **kwargs)",
"def segment(x,u1,u2):\n if not (isgoodnum(u1) and isgoodnum(u2)) or close(u1,u2) or u1<0 or u2 < 0 or u1 > 1 or u2 > 1:\n raise ValueError('bad parameter arguments passed to segment: '+str(u1)+', '+str(u2))\n if ispoint(x):\n return deepcopy(x)\n elif isline(x):\n return segmentline(x,u1,u2)\n elif isarc(x):\n return segmentarc(x,u1,u2)\n elif ispoly(x):\n return segmentpoly(x,u1,u2)\n elif isgeomlist(x):\n return segmentgeomlist(x,u1,u2)\n else:\n raise ValueError(\"inappropriate figure type for segment(): \"+str(x))",
"def segmentize(line, seg_len, mid_point_lats, mid_point_lons):\n segs = []\n n = line.GetPointCount()\n #print \"n: \", n\n seg = []\n # Iterate over the number of points in the polyline\n for i in range(n):\n distances = []\n total_distance = 0\n pt1 = line.GetPoint_2D(i)\n seg.append(pt1)\n d = seg_length(seg, distances)\n # Check to see if the total length of the points so far is greater than the specified segment length\n if d >= seg_len: # 1.6 km for 1 mile segments\n print \"Total distance of segment in kilometers, in miles: %f, %f\" % (d, d*0.6214)\n # If the desired segment length (or greater) has been reached, append the point to the list of segments\n total_distance = d\n #print \"total_distance: \", total_distance\n segs.append(seg)\n #print \"seg, total_distance: \", seg, total_distance\n set_mid_point(seg, total_distance, distances, mid_point_lats, mid_point_lons)\n seg = [pt1]\n return segs",
"def setParam(self,param,value):\n if param in self.params.keys():\n self.params[param] = value",
"def param(self):\n return self._param",
"def get_params(self, params):\n mapping = OrderedDict(\n (key, params[x]) if isinstance(x, str) else (key, float(x))\n for key, x in self.transformations.items()\n )\n return Params(**mapping)",
"def inters_segment(self, s):\r\n if (self.m == s.m) and (self.n == s.n):\r\n # The segment s is over this segment. Return the middle point\r\n x = (self.start[0] + self.end[0]) / 2\r\n y = (self.start[1] + self.end[1]) / 2\r\n elif self.m == s.m:\r\n # The segments are parallels\r\n return None\r\n elif self.m == None:\r\n x = self.start[0]\r\n y = int(s.m * x + s.n)\r\n elif s.m == None:\r\n x = s.start[0]\r\n y = self.m * x + self.n\r\n else:\r\n x = (s.n - self.n) / (self.m - s.m)\r\n y = self.m * x + self.n \r\n \r\n if self.contains_point(x, y) and s.contains_point(x, y):\r\n return int(x), int(y)\r\n else:\r\n return None",
"def param(self, param):\n if param is not None and len(param) < 1:\n raise ValueError(\"Invalid value for `param`, length must be greater than or equal to `1`\") # noqa: E501\n\n self._param = param",
"def get_params(self):",
"def closest_point_on_segment(point, segment):\n a, b = segment\n p = closest_point_on_line(point, segment)\n d = distance_point_point_sqrd(a, b)\n d1 = distance_point_point_sqrd(a, p)\n d2 = distance_point_point_sqrd(b, p)\n if d1 > d or d2 > d:\n if d1 < d2:\n return a\n return b\n return p",
"def discretize_line(p0, p1, segments):\n p0, p1 = Point(p0), Point(p1)\n dx, dy = p1.x - p0.x, p1.y - p0.y\n vtx = [Point(p0).as_tuple()]\n if isinstance(segments, list):\n for ds in segments:\n x0 = p0.x + ds * dx\n y0 = p0.y + ds * dy\n vtx.append((x0, y0))\n return vtx\n for i in range(segments):\n ds = (i + 1) / segments\n x0 = p0.x + ds * dx\n y0 = p0.y + ds * dy\n vtx.append((x0, y0))\n return vtx",
"def __repr__(self):\n return 'LineSegment({0}, {1})'.format(self.p1, self.p2)",
"def getVzoneOneParam(self, param):\n\n # return only the param of the first p_zone of the v_zone\n \t# if we want to know th CH parameter, we use the channels dict of\n \t# the v_amp object\n\tif param == \"CH\":\n\t return(param, self.v_amp_obj.channels[self.v_amp_obj._pzones[self.v_params[\"childs\"][0]][param]])\n\telse:\n value = self.v_amp_obj._pzones[self.v_params[\"childs\"][0]][param]\n return(param, value)",
"def prior_param(self, param_dict={}): \n self.param_obj = Params(param_dict) # parameter object \n self.param_names = param_dict.keys() \n self.n_params = len(param_dict.keys()) # number of parameters in theta "
] | [
"0.67098206",
"0.58437604",
"0.5808367",
"0.5781741",
"0.57216614",
"0.5697626",
"0.56160396",
"0.55942684",
"0.5469736",
"0.54624003",
"0.5426168",
"0.5413337",
"0.53801966",
"0.53713965",
"0.5358662",
"0.5308702",
"0.52752256",
"0.52395874",
"0.52279246",
"0.521795",
"0.51927656",
"0.5177488",
"0.517641",
"0.5173953",
"0.5163848",
"0.51521844",
"0.51379263",
"0.51375693",
"0.51221156",
"0.51215523"
] | 0.70094305 | 0 |
>>> AngleRange(0, 1).intersection(AngleRange(2, 3)) | def intersection(self, other):
a, b = min(self.start, self.finish), max(self.start, self.finish)
c, d = min(other.start, other.finish), max(other.start, other.finish)
a1 = normalize(a, 0, TWO_PI)
a, b = a1, b + a1 - a
c1 = normalize(c, 0, TWO_PI)
c, d = c1, d + c1 - c
e, f = max(a, c), min(b, d)
if f >= e:
return AngleRange(e, f)
else:
return None # no overlap | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def interval_intersect(b, a, d, c):\n return (d <= a) and (b <= c) # int2 lower <= int1 upper AND int1 lower <= int2 upper",
"def interval_intersect(a, b, c, d):\r\n return (c <= b) and (a <= d)",
"def ranges_overlap(start1, end1, start2, end2):\n return start1 <= end2 and end1 >= start2",
"def intersect(start1, stop1, start2, stop2):\n\tassert isinstance(start1, int)\n\tassert isinstance(stop2, int)\n\tassert isinstance(start2, int)\n\tassert isinstance(stop2, int)\n\tassert start1 <= stop1\n\tassert start2 <= stop2\n\t\n\t# if interval 1 is completely to the left of interval 2\n\tif stop1 < start2:\n\t\treturn False\n\t\n\t# if interval 1 is completely to the right of interval2\n\tif stop2 < start1:\n\t\treturn False\n\t\t\n\treturn True",
"def interval_intersect(a, b, c, d):\n if (c <= b) and (a <= d):\n return True\n else:\n return False",
"def _intersect(self, interval):\n first = self.intervals.bisect_left(interval)\n last = first\n while first > 0 and \\\n self.intervals[first - 1].upper > interval.lower:\n first -= 1\n while last < len(self.intervals) and \\\n self.intervals[last].lower < interval.upper:\n last += 1\n return first, last",
"def overlap(start1, end1, start2, end2):\n return not (end1 < start2 or end2 < start1)",
"def _range_overapped(self, x, y):\n xs = set( range(x[0], x[1]))\n ys = set( range(y[0], y[1]))\n return xs.intersection(ys)",
"def intersection(x, y, f, p):",
"def overlap(range1, range2):\n if range1[0] <= range2[1] and range2[0] <= range1[1]:\n return True\n return False",
"def overlap(t1start, t1end, t2start, t2end):\n\n return (t1start <= t2start <= t1end) or (t2start <= t1start <= t2end)",
"def _intersection(x, y):\n a, b = x\n c, d = y\n return (d > a) and (c < b)",
"def intersect(self, *args, **kwargs): # real signature unknown\n pass",
"def range_overlap(range1, range2):\n return range(max(range1[0], range2[0]), min(range1[1], range2[1]))",
"def test(a, b, c, d):\r\n print \"Intervals [\" + str(a) + \", \" + str(b) + \"] and [\" + str(c) + \", \" + str(d) + \"]\",\r\n if interval_intersect(a, b, c, d):\r\n print \"intersect.\"\r\n else:\r\n print \"do not intersect.\"",
"def intersection(a: int, b: int, c: int, d: int):\n assert a <= b and c <= d\n return max(0, min(b, d) - max(a, c))",
"def overlap(start_idx1, end_idx1, start_idx2, end_idx2):\n head = min(end_idx1, end_idx2)\n tail = max(start_idx1, start_idx2)\n return head >= tail",
"def _intersect_continuous(self, interval):\n first = self.intervals.bisect_left(interval)\n last = first\n while first > 0 and \\\n self.intervals[first - 1].upper >= interval.lower:\n first -= 1\n while last < len(self.intervals) and \\\n self.intervals[last].lower <= interval.upper:\n last += 1\n return first, last",
"def test_is_on_intersection():\n center = Coordinates(1, 1)\n radius = 10\n\n i = Intersection(center, radius, 20)\n\n in_circle = Coordinates(2, 2)\n not_in_circle = Coordinates(100, 150)\n before_circumference = Coordinates(1, 10.9)\n on_circumference = Coordinates(1, 11)\n after_circumference = Coordinates(1, 11.1)\n\n assert is_on_intersection(i, in_circle)\n assert is_on_intersection(i, on_circumference)\n assert is_on_intersection(i, before_circumference)\n assert not is_on_intersection(i, not_in_circle)\n assert not is_on_intersection(i, after_circumference)",
"def intersection(seq, *seqs):\n yield from intersectionby(None, seq, *seqs)",
"def an_intersection(v1, b1):\n try:\n return intersection(v1, b1, np.array([1,1]), 0)\n except np.linalg.linalg.LinAlgError:\n print v1\n return intersection(v1, b1, np.array([-1,1]), 0)",
"def roi_intersect(a, b):\n def slice_intersect(a, b):\n if a.stop < b.start:\n return slice(a.stop, a.stop)\n elif a.start > b.stop:\n return slice(a.start, a.start)\n\n _in = max(a.start, b.start)\n _out = min(a.stop, b.stop)\n\n return slice(_in, _out)\n\n if isinstance(a, slice):\n if not isinstance(b, slice):\n b = b[0]\n return slice_intersect(a, b)\n\n b = (b,) if isinstance(b, slice) else b\n\n return tuple(slice_intersect(sa, sb) for sa, sb in zip(a, b))",
"def _overlap(c1, c2, index='dice'):\n set1 = set(c1)\n set2 = set(c2)\n intersection_num = float(len(set1 & set2))\n try:\n if index == 'dice':\n total_num = len(set1 | set2) + intersection_num\n overlap = 2.0 * intersection_num / total_num\n elif index == 'percent':\n overlap = 1.0 * intersection_num / len(set1)\n else:\n raise Exception(\"Only support 'dice' and 'percent' as overlap indices at present.\")\n except ZeroDivisionError as e:\n print(e)\n overlap = np.nan\n return overlap",
"def from_inclusive(a, b):\n c = int(b > a)*2-1\n return range(a, b+c, c)",
"def range_inclusive(start, stop):\n return range(start, stop + 1)",
"def intersection(a, b):\n x = max(a[0],b[0])\n y = max(a[1],b[1])\n w = min(a[2],b[2]) - x\n h = min(a[3],b[3]) - y\n \n if h<0 or w<0 :\n return 0\n \n return h*w",
"def intersection(*seqs):\n return (item for item in seqs[0]\n if all(item in seq for seq in seqs[1:]))",
"def overlaps(*objs):\n return set.intersection(*(set(range(*extent(obj))) for obj in objs))",
"def overlap(a, b):\n return not(a[2]<=b[0] or a[3]<=b[1] or a[0]>=b[2] or a[1]>=b[3])",
"def _intersect_interval(self, other):\n interval = Intersection(self.interval, other.interval)\n return interval.inf, interval.sup"
] | [
"0.6961142",
"0.68901646",
"0.66428983",
"0.65814406",
"0.6556519",
"0.64353484",
"0.6413068",
"0.63258123",
"0.6324006",
"0.6293021",
"0.62256324",
"0.618209",
"0.617776",
"0.6167389",
"0.61051816",
"0.6082974",
"0.6061148",
"0.6060571",
"0.60537374",
"0.6049184",
"0.6048017",
"0.6047029",
"0.6022951",
"0.6020832",
"0.60151196",
"0.6008488",
"0.59882635",
"0.59828067",
"0.59705675",
"0.5969502"
] | 0.7063069 | 0 |
>>> tfm, center = PSTransform(), Point(1, 0) upper half circle >>> Arc(center, 1, PI, 0).postscript(tfm) '378.0 396.0 72 0.0 180.0 arc' >>> Arc(center, 1, 0, PI).postscript(tfm) '378.0 396.0 72 0.0 180.0 arc' lower half circle >>> Arc(center, 1, PI, TWO_PI).postscript(tfm) '378.0 396.0 72 180.0 360.0 arc' >>> Arc(center, 1, TWO_PI, PI).postscript(tfm) '378.0 396.0 72 180.0 360.0 arc' %!PSAdobe2.0 378.0 396.0 72 180.0 0.0 arcn stroke showpage | def postscript(self, tfm):
k = 180. / PI
if self.angle_range.clockwise():
a, b = self.angle_range.start, self.angle_range.finish
else:
a, b = self.angle_range.finish, self.angle_range.start
return (tfm.format('{0} {1} ', self.center) +
'{0} '.format(tfm.scale(abs(self.radius))) +
'{0} {1} '.format(k * a, k * b) + 'arc') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def postscript(self, tfm):\n return (tfm.format('{0} {1} lineto ', self.p1) +\n tfm.format('{0} {1} moveto', self.p2))",
"def output(self):\n xpos, ypos = self.arcpoints[2]\n startxy = np.array([xpos, ypos]) # start point\n xpos, ypos = self.arcpoints[1]\n pointxy = np.array([xpos, ypos]) # a point on the curve\n xpos, ypos = self.arcpoints[0]\n endxy = np.array([xpos, ypos]) # end point\n\n a_norm = np.linalg.norm(endxy - pointxy)\n b_norm = np.linalg.norm(endxy - startxy)\n c_norm = np.linalg.norm(pointxy - startxy)\n \"\"\"\n s_factor = (a_norm + b_norm + c_norm) / 2\n radius = a_norm * b_norm * c_norm / 4\n / np.sqrt(s_factor * (s_factor - a_norm)\n * (s_factor - b_norm)\n * (s_factor - c_norm))\n \"\"\"\n b_factor1 = a_norm * a_norm * (b_norm * b_norm\n + c_norm * c_norm\n - a_norm * a_norm)\n b_factor2 = b_norm * b_norm * (a_norm * a_norm\n + c_norm * c_norm\n - b_norm * b_norm)\n b_factor3 = c_norm * c_norm * (a_norm * a_norm\n + b_norm * b_norm\n - c_norm * c_norm)\n centerxy = np.column_stack((startxy,\n pointxy,\n endxy)).dot(np.hstack((b_factor1,\n b_factor2,\n b_factor3)))\n centerxy /= b_factor1 + b_factor2 + b_factor3 # arc center\n\n self.def_field['XY_center'] = (centerxy)\n self.def_field['XY_arcpoints'].append(startxy) # start point\n self.def_field['XY_arcpoints'].append(endxy) # end point\n\n to_write = 'A '\n xpos, ypos = self.def_field['XY_center']\n\n to_write += str(int(xpos)) + ' ' + str(int(ypos)) + ' '\n to_write += str(self.def_field['radius']) + ' '\n to_write += str(self.def_field['angle1']) + ' '\n to_write += str(self.def_field['angle2']) + ' '\n to_write += str(self.def_field['unit']) + ' '\n to_write += str(self.def_field['convert']) + ' '\n to_write += str(self.def_field['width']) + ' '\n to_write += str(self.def_field['fill']) + ' '\n for xpos, ypos in self.def_field['XY_arcpoints']:\n to_write += str(self.offset[0] + xpos) + ' ' \\\n + str(self.offset[1] + ypos) + ' '\n to_write += '\\n'\n return to_write",
"def dotext(rpoint, text, angle, bi):\r\n## w(\"/Arial findfont\")\r\n if bi:\r\n w(\"/%s findfont\" % gv[\"bifont\"])\r\n else:\r\n w(\"/%s findfont\" % gv[\"font\"])\r\n if gv[\"fontfixed\"] is False:\r\n localfontsize = int(gv[\"fontsize\"]*gv[\"globalscale\"])\r\n else:\r\n localfontsize = int(gv[\"fontsize\"])\r\n w(\"%d scalefont\" % localfontsize)\r\n w(\"setfont\")\r\n w(\"newpath\")\r\n p = apoint(rpoint)\r\n if angle != 0:\r\n w(\"gsave\")\r\n w(\"%d %d translate\" % (p[0], p[1]))\r\n w(\"%d rotate\" % angle)\r\n w(\"0 0 moveto\")\r\n w(\"(\" + text + \") show\")\r\n w(\"grestore\")\r\n else:\r\n w(\"%d %d moveto\" % (p[0],p[1]))\r\n w(\"(\" + text + \") show\")",
"def DrawArcPoint(*args, **kwargs):\n return _gdi_.PseudoDC_DrawArcPoint(*args, **kwargs)",
"def make_pyts_logo(cmap='jet', background_color='darkslategray',\n output_file=None, dpi=400): # noqa: E501\n # Properties\n figsize = (5, 3)\n radius = 0.2\n linewidth = 20\n line_params = {'lw': linewidth, 'color': background_color,\n 'solid_capstyle': 'round'}\n arc_params = {'facecolor': 'none', 'lw': linewidth,\n 'edgecolor': background_color, 'capstyle': 'round'}\n\n # Create the figure\n fig, ax = plt.subplots(figsize=figsize, frameon=False)\n ax.axis('off')\n\n # CREATE THE \"PYTS\" BLACK BACKGROUND\n\n # Create the \"p\"\n arc_p = Arc((0, 0), 2 * radius, 2 * radius, **arc_params)\n line_p = Line2D([-0.2, -0.2], [-0.6, 0.2], **line_params)\n ax.add_patch(arc_p)\n ax.add_line(line_p)\n\n # Create the \"y\"\n line_left_y = Line2D([0.2, 0.2], [0, 0.15], **line_params)\n line_right_y = Line2D([0.6, 0.6], [0.15, -0.4], **line_params)\n arc_lower_y = Arc((0.4, -0.4), 2 * radius, 2 * radius,\n theta1=-180, theta2=0, **arc_params)\n ax.add_line(line_left_y)\n ax.add_line(line_right_y)\n ax.add_patch(arc_lower_y)\n\n # Create the \"t\"\n arc_t = Arc((0.7, -0.3), radius, radius, theta1=-165,\n theta2=-90, **arc_params)\n line_middle_t = Line2D([0.5, 0.6], [0, 0], **line_params)\n line_lower_t = Line2D([0.7, 0.9], [-0.4, -0.4], **line_params)\n ax.add_patch(arc_t)\n ax.add_line(line_middle_t)\n ax.add_line(line_lower_t)\n\n # Create the \"s\"\n arc_lower_s = Arc((0.9, -0.275), 1.25 * radius, 1.25 * radius,\n theta1=-90, theta2=91, **arc_params)\n arc_upper_s = Arc((0.9, -0.025), 1.25 * radius, 1.25 * radius,\n theta1=181, theta2=-90, capstyle='butt',\n facecolor='none', lw=linewidth,\n edgecolor=background_color, zorder=2)\n ax.add_patch(arc_lower_s)\n ax.add_patch(arc_upper_s)\n\n # PLOT THE TIME SERIES\n x = np.linspace(-0.2, 1.2, 800)\n y = pyts_time_series(x)\n points = np.array([x, y]).T.reshape(-1, 1, 2)\n segments = np.concatenate([points[:-1], points[1:]], axis=1)\n norm = plt.Normalize(-0.2, 1.1)\n lc = LineCollection(segments, cmap=cmap, norm=norm,\n capstyle='round', zorder=3)\n lc.set_array(x)\n lc.set_linewidth(linewidth)\n ax.add_collection(lc)\n\n # Fix bounding issue\n arc_upper_s = Arc((0.854, -0.05), 0.5 * radius, 0.3 * radius,\n theta1=180, theta2=-150, capstyle='butt',\n facecolor='none', lw=1.1 * linewidth,\n edgecolor=background_color, zorder=4)\n rectangle_patch = Rectangle((0.8312, -0.05), width=0.1, height=0.05,\n fill=True, color='white', zorder=5)\n ax.add_patch(arc_upper_s)\n ax.add_patch(rectangle_patch)\n\n # Set the good limits for the axes\n plt.xlim((-0.25, 1.25))\n plt.ylim((-0.65, 0.25))\n\n # Save the figure\n plt.tight_layout()\n if output_file is not None:\n plt.savefig(output_file, dpi=dpi)\n\n plt.show()",
"def DrawArc(*args, **kwargs):\n return _gdi_.PseudoDC_DrawArc(*args, **kwargs)",
"def point(pt, angle, dist):\n x, y = pt\n return dist * cos(angle) + x, dist * sin(angle) + y,",
"def draw_arc(ax, center, height, width, theta1=0, theta2=numpy.pi, color='k', direction='down',\n num_points=DEFAULT_ARC_POINTS):\n LEFT_END_THETA = numpy.pi / 2\n RIGHT_END_THETA = numpy.pi * 1.5\n MIDPOINT_THETA = numpy.pi\n\n vertical_baseline = center[1]\n\n assert LEFT_END_THETA <= theta1 <= theta2 <= RIGHT_END_THETA\n\n b = height\n a = width / 2\n\n # determine how to allocate points\n left_angle_span = min(max(MIDPOINT_THETA - theta1, 0), theta2 - theta1)\n right_angle_span = min(max(theta2 - MIDPOINT_THETA, 0), theta2 - theta1)\n total_angle_span = left_angle_span + right_angle_span\n left_points = int(num_points * left_angle_span / total_angle_span)\n right_points = num_points - left_points\n\n x_coords = numpy.empty(num_points)\n y_coords = numpy.empty(num_points)\n\n if left_points:\n # plot upper left quadrant\n left_theta2 = theta1 + left_angle_span\n x, y = compute_half_arc_points(center=(center[0], 0),\n a=a, b=b,\n theta1=theta1, theta2=left_theta2,\n num_points=left_points)\n x_coords[:left_points] = x[:]\n y_coords[:left_points] = y[:]\n if right_points:\n # plot upper right quadrant\n right_theta1 = theta2 - right_angle_span\n x, y = compute_half_arc_points(center=(center[0], 0),\n a=a, b=b,\n theta1=right_theta1, theta2=theta2,\n num_points=right_points)\n x_coords[left_points:] = x[:]\n y_coords[left_points:] = y[:]\n\n if direction == 'down':\n y_coords = - y_coords\n\n y_coords += vertical_baseline\n\n ax.plot(x_coords, y_coords, color=color)",
"def create_arc(arc_data: Dict[str, Any], arc_tag: str) -> FpArc:\n fp_arc = arc_data[arc_tag]\n # end in kicad is start point\n start: Coords = [get_dict_by_key(fp_arc, 'end')['end'][0], get_dict_by_key(fp_arc, 'end')['end'][1]]\n start[1] = str(-1 * float(start[1]))\n end: Coords = [get_dict_by_key(fp_arc, 'start')['start'][0],\n get_dict_by_key(fp_arc, 'start')['start'][1]] # start in kicad is center point\n end[1] = str(-1 * float(end[1]))\n angle: float = -1 * float(get_dict_by_key(fp_arc, 'angle')['angle'])\n layer: Layer = convert_to_layers(get_dict_by_key(fp_arc, 'layer')['layer'])[0]\n width: float = get_dict_by_key(fp_arc, 'width')['width']\n new_arc = FpArc(start=start, end=end, angle=angle, layer=layer, width=width)\n new_arc.end = get_end_point(new_arc)\n return new_arc",
"def draw(self, ctx, centerpoint, basepoint=(0, 0),\n angle=0, scale_x=1.0, scale_y=1.0, \n opacity=1,\n axes=True):\n ctx.set_line_width(3)\n ctx.set_line_join(cairo.LINE_JOIN_ROUND)\n \n ctx.translate(centerpoint[0], centerpoint[1])\n ctx.rotate(angle)\n ctx.scale(scale_x, scale_y)\n\n ctx.translate(basepoint[0], basepoint[1])\n\n # sign panels\n ctx.set_source_rgba(*color_hex_unpack(\"#3165A5\", opacity))\n for c, p in zip([(50, 100), (-50, 100), (-50, -100), (50, -100)], xrange(4)):\n ctx.arc(c[0], c[1], 5, math.radians(p * 90), math.radians((p + 1) * 90)) \n ctx.close_path()\n ctx.fill()\n\n ctx.set_source_rgba(*color_hex_unpack(\"#EFEFEF\", opacity))\n for c, p in zip([(35, 30), (-35, 30), (-35, -70), (35, -70)], xrange(4)):\n ctx.arc(c[0], c[1], 10, math.radians(p * 90), math.radians((p + 1) * 90)) \n ctx.close_path()\n ctx.fill()\n \n # text label\n ctx.set_source_rgba(*color_hex_unpack(\"#293531\", opacity))\n ctx.set_font_size(18)\n ctx.move_to(-ctx.text_extents('Такси')[4] / 2, -50)\n ctx.show_text('Такси')\n\n # car shape\n ctx.move_to(0, -40)\n ctx.curve_to(20, -40, 10, -10, 30, -10)\n ctx.curve_to(40, -10, 40, 15, 30, 15)\n\n # wheels\n ctx.curve_to(15, 15, 30, 30, 15, 30)\n ctx.curve_to(0, 30, 15, 15, 0, 15)\n\n ctx.curve_to(-15, 15, 0, 30, -15, 30)\n ctx.curve_to(-30, 30, -15, 15, -30, 15)\n\n ctx.curve_to(-40, 15, -40, -10, -30, -10)\n ctx.curve_to(-10, -10, -20, -40, 0, -40)\n ctx.close_path()\n ctx.fill()\n\n # windscreen\n ctx.set_source_rgba(*color_hex_unpack(\"#EFEFEF\", opacity))\n ctx.move_to(0, -30)\n for point in [(5, -30), (10, -10), (-10, -10), (-5, -30), (0, -30)]:\n ctx.line_to(point[0], point[1])\n ctx.close_path()\n ctx.fill()\n\n # lights\n for c in 17, -17:\n ctx.move_to(c, -3)\n for point in [(c + 5, -3), (c + 5, 5), (c - 5, 5), (c - 5, -3)]:\n ctx.line_to(point[0], point[1])\n ctx.close_path()\n ctx.stroke()\n\n ctx.translate(-basepoint[0], -basepoint[1])\n\n ctx.scale(1/scale_x, 1/scale_y)\n ctx.rotate(-angle)\n ctx.translate(-centerpoint[0], -centerpoint[1])",
"def DrawArcPoint(*args, **kwargs):\n return _gdi_.DC_DrawArcPoint(*args, **kwargs)",
"def _draw_root_arc(file, x, y, height_in_units, deprel, css_class):\n height = height_in_units * _ARC_HEIGHT_UNIT\n\n # Start.\n file.write(u' <g class=\"%s\">\\n' % css_class)\n\n # Path.\n path = 'M %i %i L %i %i' % (x, y, x, y - height)\n file.write(u' <path d=\"%s\" class=\"arc\" />\\n' % path)\n file.write(u' <path d=\"%s\" class=\"arc hid\" />\\n' % path)\n\n # Arrow.\n _draw_arrow(file, x, y, math.pi / 2)\n\n # Role.\n deprel = cgi.escape(deprel)\n file.write(u' <text x=\"%i\" y=\"%i\" class=\"role\">%s</text>\\n' %\n (x, y - height - 0.2 * _SMALL_FONT, deprel))\n\n # End.\n file.write(u' </g>\\n')",
"def scat_angle(self, t_0, t_ex, p_0, p_ex, a):\n return (\n a[0] * sp.cos(t_0) * sp.cos(t_ex)\n + a[1] * sp.sin(t_0) * sp.sin(t_ex) * sp.cos(p_0) * sp.cos(p_ex)\n + a[2] * sp.sin(t_0) * sp.sin(t_ex) * sp.sin(p_0) * sp.sin(p_ex)\n )",
"def ps2svg_simple(sPostscript):\n\n def group_numbers(result, times = 1):\n nums = []\n for sNum in result.groups():\n if re.match(r'[a-zA-Z]+', sNum):\n # This is just a string\n nums.append(sNum)\n else:\n # This must be a floating point number\n nums.append(\"{:.6f}\".format(times * float(sNum) ))\n return nums\n\n sBack = \"\"\n lst_out = []\n oErr = ErrHandle()\n line_style = 'stroke:black;stroke-width:1'\n point_style = \"fill:blue;font-family:Times\"\n offset_y = 18 # Adding 18px to compensate for double mirroring\n min_y = width_simple\n min_x = height_simple\n max_y = 0\n max_x = 0\n try:\n # Recognize the initial lines we are looking for\n re_Line = re.compile( r'^\\s+([0-9]+\\.?[0-9]*)\\s+([0-9]+\\.?[0-9]*)\\s+([0-9]+\\.?[0-9]*)\\s+([0-9]+\\.?[0-9]*)\\s+l$')\n re_point = re.compile(r'^([0-9]+\\.?[0-9]*)\\s+([0-9]+\\.?[0-9]*)\\s+translate\\s+([0-9]+\\.?[0-9]*)\\s+rotate$')\n re_label = re.compile(r'^\\(([a-zA-Z]+)\\)\\s+show$')\n\n lst_out.append(sIntroSimple)\n\n # Split into lines\n lines = sPostscript.split(\"\\n\")\n section = \"pre\"\n idx = 14\n bFirstPoint = True\n oorsprong = dict(x=0.0, y=0.0)\n for line in lines:\n # Check if we have a line \n if section == \"pre\":\n result = re_Line.search(line)\n if result:\n section = \"lines\"\n else:\n # We are not in a lines section\n pass\n if section == \"lines\":\n result = re_Line.search(line)\n if result:\n nums = group_numbers(result, 1)\n # Convert into <line> element\n sLine = '<g id=line{}><line x1=\"{}\" y1=\"{}\" x2=\"{}\" y2=\"{}\" style=\"{}\" stroke-linecap=\"round\" /></g>'.format(\n idx, nums[0], nums[1], nums[2], nums[3], line_style)\n idx += 2\n lst_out.append(sLine)\n\n # Keep track of min_y and min_x\n min_x = min(min_x, float(nums[0]), float(nums[2]))\n min_y = min(min_y, float(nums[1]), float(nums[3]))\n max_x = max(max_x, float(nums[0]), float(nums[2]))\n max_y = max(max_y, float(nums[1]), float(nums[3]))\n else:\n # We have exited the lines section\n section = \"point\"\n\n elif section == \"point\":\n # Look for a point\n result = re_point.search(line)\n if result:\n # We have found a point: get it in\n nums = group_numbers(result, 1)\n pos_x = \"{:.6f}\".format(float(nums[0])) \n pos_y = \"{:.6f}\".format(float(nums[1]) + offset_y )\n\n # Keep track of min_y and min_x\n min_x = min(min_x, float(nums[0]))\n min_y = min(min_y, float(nums[1]))\n max_x = max(max_x, float(nums[0]))\n max_y = max(max_y, float(nums[1]))\n\n section = \"label\"\n elif section == \"label\":\n # Look for a label\n result = re_label.search(line)\n if result:\n # we have found a label: get it\n sLabel = result.groups()[0]\n\n # Output this label\n sLabel = '<g id=\"text{}\"><text y=\"{}\" x=\"{}\" style=\"{}\">{}</text></g>'.format(\n idx, pos_y, pos_x, point_style, sLabel)\n idx += 2\n lst_out.append(sLabel)\n\n section = \"point\"\n\n # Finish up the svg nicely\n lst_out.append(\"</svg>\")\n # Convert the list into a string\n sBack = \"\\n\".join(lst_out)\n\n # Adapt w.r.t. min_x and min_y, max_x, max_y\n fHeight = height_simple - 2 * min_y + offset_y\n sViewbox = 'viewBox=\"{} {} {} {}\" width=\"{}\" height=\"{}\"'.format(\n 0, min_y, width_simple, fHeight, width_simple, fHeight\n )\n sBack = sBack.replace('@viewbox', sViewbox)\n\n except:\n msg = oErr.get_error_message()\n oErr.DoError(\"ps2svg\")\n\n # Return what we have gathered\n return sBack",
"def _constructArc(self, a, b, c):\n arc = pcbnew.PCB_SHAPE()\n arc.SetShape(STROKE_T.S_ARC)\n arc.SetLayer(Layer.Edge_Cuts)\n arc.SetArcGeometry(toKiCADPoint(a), toKiCADPoint(b), toKiCADPoint(c))\n return arc",
"def DrawArc(*args, **kwargs):\n return _gdi_.DC_DrawArc(*args, **kwargs)",
"def arc(radius = 10, angle = 90, num_pts = 720):\n t = np.linspace(0, angle*np.pi/180, abs(int(num_pts*angle/360))-2)\n x = radius*np.cos(t)\n y = radius*np.sin(t)\n points = np.array((x,y)).T\n start_angle = 90*np.sign(angle)\n end_angle = start_angle + angle\n return points, start_angle, end_angle",
"def draw_shape_arc(self, arc, xform, colour):\n x, y, r = arc.x, arc.y, arc.radius\n # if the arc segment were extended to draw a full circle, box would\n # enclose that circle\n minpt, maxpt = [xform.chain(Point(px, py)) for (px, py)\n in [(x - r, y - r), (x + r, y + r)]]\n xs, ys = [minpt.x, maxpt.x], [minpt.y, maxpt.y]\n box = (min(xs), min(ys), max(xs), max(ys))\n\n center = xform.chain(Point(x, y))\n\n def pt_to_deg(pt):\n # given a point, give the angle w.r.t. to the xform'd center of the\n # arc (ie. where it will be when drawn)\n # 3 o'clock is angle of 0, angles increase clockwise\n opp, adj = pt.y - center.y, pt.x - center.x\n if adj == 0:\n if opp > 0:\n return 90\n return 270\n angle = 180 * atan(opp / float(adj)) / pi\n if pt.x < center.x:\n angle += 180\n return int(angle % 360)\n\n # create a point in the middle of the arc (used to detect that the xform\n # has flipped the arc around. In that case, drawing from start_angle to\n # end_angle will go in the wrong direction, and draw out exactly the\n # wrong part of the circle)\n mid_ang = (arc.start_angle + arc.end_angle) / 2\n if arc.start_angle > arc.end_angle:\n mid_ang = (mid_ang - 1) % 2\n mid_pt = xform.chain(Point(cos((2 - mid_ang) * pi) * arc.radius + x,\n sin((2 - mid_ang) * pi) * arc.radius + y))\n\n start, end = [xform.chain(pt) for pt in arc.ends()]\n if pt_to_deg(start) < pt_to_deg(end):\n if not (pt_to_deg(start) < pt_to_deg(mid_pt) < pt_to_deg(end)):\n # swap start and end so that the arc traces through the\n # transformed midpoint\n start, end = end, start\n elif (pt_to_deg(end) < pt_to_deg(mid_pt) < pt_to_deg(start)):\n # swap start and end so that the arc traces through the\n # transformed midpoint\n start, end = end, start\n\n # by using the arc.ends() points, any rotation in xform gets handled\n # properly.\n self.canvas.arc(box, pt_to_deg(start), pt_to_deg(end), fill=colour)",
"def meander_taper(x_taper, w_taper, meander_length = 1000, spacing_factor = 3,\n min_spacing = 0.5, layer = 0):\n\n def taper_width(x):\n return np.interp(x, x_taper, w_taper)\n\n def taper_section(x_start, x_end, num_pts = 30, layer = 0):\n D = Device('tapersec')\n length = x_end - x_start\n x = np.linspace(0, length, num_pts)\n widths = np.linspace(taper_width(x_start), taper_width(x_end), num_pts)\n xpts = np.concatenate([x, x[::-1]])\n ypts = np.concatenate([widths/2, -widths[::-1]/2])\n D.add_polygon((xpts,ypts), layer = layer)\n D.add_port(name = 1, midpoint = (0,0), width = widths[0],\n orientation = 180)\n D.add_port(name = 2, midpoint = (length, 0), width = widths[-1],\n orientation = 0)\n return D\n\n def arc_tapered(radius = 10, width1 = 1, width2 = 2, theta = 45,\n angle_resolution = 2.5, layer = 0):\n D = Device('arctaper')\n path1 = gdspy.Path(width = width1, initial_point = (0, 0))\n path1.turn(radius = radius, angle = theta * pi/180,\n number_of_points = int(abs(2*theta/angle_resolution)),\n final_width = width2)\n [D.add_polygon(p, layer = layer) for p in path1.polygons]\n D.add_port(name = 1, midpoint = (0, 0), width = width1,\n orientation = 180)\n D.add_port(name = 2, midpoint = (path1.x, path1.y), width = width2,\n orientation = path1.direction * 180/pi)\n return D\n\n D = Device('meander-taper')\n xpos1 = min(x_taper)\n xpos2 = min(x_taper) + meander_length\n t = D.add_ref( taper_section(x_start = xpos1, x_end = xpos2,\n num_pts = 50, layer = layer) )\n D.add_port(t.ports[1])\n dir_toggle = -1\n while xpos2 < max(x_taper):\n arc_width1 = taper_width(xpos2)\n arc_radius = max(spacing_factor*arc_width1, min_spacing)\n arc_length = pi*arc_radius\n arc_width2 = taper_width(xpos2 + arc_length)\n A = arc_tapered(radius = arc_radius, width1 = arc_width1,\n width2 = arc_width2, theta = 180*dir_toggle,\n layer = layer)\n a = D.add_ref(A)\n a.connect(port = 1, destination = t.ports[2])\n dir_toggle = -dir_toggle\n xpos1 = xpos2 + arc_length\n xpos2 = xpos1 + meander_length\n t = D.add_ref(taper_section(x_start = xpos1, x_end = xpos2,\n num_pts = 30, layer = layer) )\n t.connect(port = 1, destination = a.ports[2])\n D.add_port(t.ports[2])\n\n return D",
"def arc(c,rp=False,sn=False,e=False,n=False,samplereverse=False):\n if isarc(c):\n return deepcopy(c)\n elif ispoint(c):\n cen = point(c)\n w=-1\n if samplereverse:\n w=-2\n if isvect(rp):\n psu = deepcopy(rp)\n r=psu[0]\n if r < 0:\n raise ValueError('negative radius not allowed for arc')\n psu[3]=w\n if not sn:\n return [ cen, psu ]\n else:\n if not ispoint(sn) or abs(mag(sn)-1.0) > epsilon:\n raise ValueError('bad (non-unitary) plane vector for arc')\n return [ cen, psu, point(sn) ]\n elif isgoodnum(rp):\n r = rp\n if r < 0:\n raise ValueError('negative radius not allowed for arc')\n start = 0\n end = 360\n if isgoodnum(sn) and isgoodnum(e):\n start = sn\n end = e\n psu = vect(r,start,end,w)\n if not n:\n return [ cen, psu ]\n else:\n if not ispoint(n) or abs(mag(n)-1.0) > epsilon:\n raise ValueError('bad (non-unitary) plane vector for arc')\n return [ cen, psu, point(n) ]\n\n raise ValueError('bad arguments passed to arc()')",
"def draw(p, layout=\"rd\"):\n import matplotlib.pyplot as plt\n from matplotlib.patches import Wedge\n from matplotlib.font_manager import FontManager\n\n if not isinstance(p, Pharmacophore):\n raise TypeError(\"Expected Pharmacophore, got %s instead\" %\n type(p).__name__)\n\n if not isinstance(layout, str):\n raise TypeError(\"Invalid layout! Expected str, got %s instead.\" %\n type(layout).__name__)\n\n if p.numnodes == 0:\n raise ValueError(\"Pharmacophore is empty!\")\n\n if layout == \"rd\":\n try:\n from decaf.toolkits.rd import layout\n pos = layout(p)\n except Exception as e:\n raise ImportError(\"Cannot use 'rd' layout! Use 'ob' or 'spring'\"\n \"instead\", e)\n\n elif layout == \"ob\":\n try:\n from decaf.toolkits.ob import layout\n pos = layout(p)\n except Exception as e:\n raise ImportError(\"Cannot use 'ob' layout! Use 'rd' or 'spring'\"\n \"instead\", e)\n\n elif layout == \"spring\":\n try:\n pos = spring_layout(p)\n except Exception as e:\n raise ImportError(\"Cannot use spring layout!\", e)\n else:\n raise ValueError(\"Wrong layout specified! Use 'rd', 'ob' or 'spring'\"\n \"instead.\")\n\n ax_coeff = 1.\n\n def fontsize(idx, default=FontManager.get_default_size()):\n coeff = p.nodes[idx][\"freq\"] / p.molecules\n size = default * coeff * ax_coeff\n return size\n\n fig, ax = plt.subplots()\n plt.axis(\"equal\")\n plt.axis(\"off\")\n\n axis = (np.min(pos[:, 0])-1,\n np.max(pos[:, 0])+1,\n np.min(pos[:, 1])-1,\n np.max(pos[:, 1])+1)\n plt.axis(axis)\n\n # calculate scaling ratio for font\n ax_coeff = 12. / max((axis[1]-axis[0]), (axis[3]-axis[2]))\n\n for i in range(p.numnodes):\n for j in range(i):\n if p.edges[i, j] > 0:\n tmp = np.array([pos[i], pos[j]])\n ax.plot(tmp[:, 0], tmp[:, 1], color=\"#000000\", zorder=1)\n\n r = p.nodes[i][\"freq\"] / p.molecules * 0.3\n fsize = fontsize(i)\n nfreq = sum(p.nodes[i][\"type\"].values())\n theta1 = 0.0\n for t in p.nodes[i][\"type\"]:\n delta = 360 * p.nodes[i][\"type\"][t] / nfreq\n theta2 = theta1+delta\n w = Wedge(pos[i], r, theta1, theta2, ec=\"none\", fc=COLORS[t])\n ax.add_artist(w)\n ax.text(pos[i][0], pos[i][1], str(p.nodes[i][\"label\"]),\n color=\"#000000\", ha=\"center\", va=\"center\", size=fsize)\n theta1 = theta2\n\n plt.show()\n return fig, ax",
"def AddArcToPoint(*args, **kwargs):\n return _gdi_.GraphicsPath_AddArcToPoint(*args, **kwargs)",
"def arc(t, r, angle):\n circumference = math.pi * 2 * r\n n = 60\n length = circumference / n\n arc_in = int(((angle / 360) * circumference) / 2)\n for i in range(arc_in):\n t.fd(length)\n t.lt(360/n)",
"def create_arc(self, x1, y1, x2, y2, radius, angle, style=None, parent=None):\n sweep_flag = 0 if angle < 0 else 1\n attrs = { 'd': 'M %5f %5f A %5f %5f 0 0 %d %5f %5f' % \\\n (x1, y1, radius, radius,\n sweep_flag, x2, y2),}\n return self.create_path(attrs, style, parent)",
"def _draw_text_as_path(self, gc, x, y, s, prop, angle, ismath, mtext=None):\n writer = self.writer\n\n writer.comment(s)\n\n glyph_map=self._glyph_map\n\n text2path = self._text2path\n color = rgb2hex(gc.get_rgb())\n fontsize = prop.get_size_in_points()\n\n style = {}\n if color != '#000000':\n style['fill'] = color\n if gc.get_alpha() != 1.0:\n style['opacity'] = short_float_fmt(gc.get_alpha())\n\n if not ismath:\n font = text2path._get_font(prop)\n _glyphs = text2path.get_glyphs_with_font(\n font, s, glyph_map=glyph_map, return_new_glyphs_only=True)\n glyph_info, glyph_map_new, rects = _glyphs\n\n if glyph_map_new:\n writer.start('defs')\n for char_id, glyph_path in six.iteritems(glyph_map_new):\n path = Path(*glyph_path)\n path_data = self._convert_path(path, simplify=False)\n writer.element('path', id=char_id, d=path_data)\n writer.end('defs')\n\n glyph_map.update(glyph_map_new)\n\n attrib = {}\n attrib['style'] = generate_css(style)\n font_scale = fontsize / text2path.FONT_SCALE\n attrib['transform'] = generate_transform([\n ('translate', (x, y)),\n ('rotate', (-angle,)),\n ('scale', (font_scale, -font_scale))])\n\n writer.start('g', attrib=attrib)\n for glyph_id, xposition, yposition, scale in glyph_info:\n attrib={'xlink:href': '#%s' % glyph_id}\n if xposition != 0.0:\n attrib['x'] = short_float_fmt(xposition)\n if yposition != 0.0:\n attrib['y'] = short_float_fmt(yposition)\n writer.element(\n 'use',\n attrib=attrib)\n\n writer.end('g')\n else:\n if ismath == \"TeX\":\n _glyphs = text2path.get_glyphs_tex(prop, s, glyph_map=glyph_map,\n return_new_glyphs_only=True)\n else:\n _glyphs = text2path.get_glyphs_mathtext(prop, s, glyph_map=glyph_map,\n return_new_glyphs_only=True)\n\n glyph_info, glyph_map_new, rects = _glyphs\n\n # we store the character glyphs w/o flipping. Instead, the\n # coordinate will be flipped when this characters are\n # used.\n if glyph_map_new:\n writer.start('defs')\n for char_id, glyph_path in six.iteritems(glyph_map_new):\n char_id = self._adjust_char_id(char_id)\n # Some characters are blank\n if not len(glyph_path[0]):\n path_data = \"\"\n else:\n path = Path(*glyph_path)\n path_data = self._convert_path(path, simplify=False)\n writer.element('path', id=char_id, d=path_data)\n writer.end('defs')\n\n glyph_map.update(glyph_map_new)\n\n attrib = {}\n font_scale = fontsize / text2path.FONT_SCALE\n attrib['style'] = generate_css(style)\n attrib['transform'] = generate_transform([\n ('translate', (x, y)),\n ('rotate', (-angle,)),\n ('scale', (font_scale, -font_scale))])\n\n writer.start('g', attrib=attrib)\n for char_id, xposition, yposition, scale in glyph_info:\n char_id = self._adjust_char_id(char_id)\n\n writer.element(\n 'use',\n transform=generate_transform([\n ('translate', (xposition, yposition)),\n ('scale', (scale,)),\n ]),\n attrib={'xlink:href': '#%s' % char_id})\n\n for verts, codes in rects:\n path = Path(verts, codes)\n path_data = self._convert_path(path, simplify=False)\n writer.element('path', d=path_data)\n\n writer.end('g')",
"def arccosh(a):",
"def AddArc(*args):\n return _gdi_.GraphicsPath_AddArc(*args)",
"def _draw_arc(file, start_x, end_x, y, height_in_units, deprel, css_class):\n height = height_in_units * _ARC_HEIGHT_UNIT\n radius = _arc_radius(height_in_units)\n length = _arc_min_length(height_in_units)\n\n # Start.\n file.write(u' <g class=\"%s\">\\n' % css_class)\n\n # Path.\n path = (\n 'M %.2f %.2f'\n 'A %.2f %.2f 0 0 1 %.2f %.2f'\n 'L %.2f %.2f'\n 'A %.2f %.2f 0 0 1 %.2f %.2f'\n ) % (\n min(start_x, end_x), y,\n radius, radius, min(start_x, end_x) + length / 2, y - height,\n max(start_x, end_x) - length / 2, y - height,\n radius, radius, max(start_x, end_x), y\n )\n file.write(u' <path d=\"%s\" class=\"arc\" />\\n' % path)\n file.write(u' <path d=\"%s\" class=\"arc hid\" />\\n' % path)\n\n # Arrow.\n arrow_angle = _ANGLE if start_x > end_x else math.pi - _ANGLE\n _draw_arrow(file, end_x, y, arrow_angle)\n\n # Role.\n deprel = cgi.escape(deprel)\n file.write(u' <text x=\"%i\" y=\"%i\" class=\"role\">%s</text>\\n' %\n ((start_x + end_x) / 2, y - height - 0.2 * _SMALL_FONT, deprel))\n\n # End.\n file.write(u' </g>\\n')",
"def arc(r, mv_direction):\n \n vert_amount = 80\n arc_vert_amount = int(vert_amount / 2);\n edge = 2 * r * math.sin(math.radians(360 / (2 * vert_amount)))\n polygon_angle = (vert_amount - 2) / vert_amount * 180\n angle = 180 - polygon_angle\n \n for i in range(arc_vert_amount):\n if i == 0: \n rotate_turtle(polygon_angle / 2, not mv_direction)\n else:\n rotate_turtle(angle, mv_direction)\n turtle.forward(edge)",
"def GetArc(arc):\r\n pass"
] | [
"0.58153826",
"0.5513635",
"0.534185",
"0.5338048",
"0.53348184",
"0.53150916",
"0.5297564",
"0.52517813",
"0.5235955",
"0.5168656",
"0.5148232",
"0.51290214",
"0.51076967",
"0.50502115",
"0.5047317",
"0.50382435",
"0.503397",
"0.501881",
"0.5012607",
"0.49682286",
"0.49567246",
"0.49473685",
"0.49438912",
"0.49167687",
"0.4905151",
"0.48968813",
"0.4890049",
"0.48770347",
"0.4876835",
"0.48677626"
] | 0.7833728 | 0 |
>>> p1, p2 = Point(0, 0), Point(2, 0) >>> Arc.from_endpoints(p1, p2, 1) Arc((1.0,0.0),1,180.0,0.0) >>> Arc.from_endpoints(p1, p2, 1) Arc((1.0,0.0),1,180.0,360.0) >>> Arc.from_endpoints(p2, p1, 1) Arc((1.0,0.0),1,360.0,180.0) >>> Arc.from_endpoints(p2, p1, 1) Arc((1.0,0.0),1,0.0,180.0) >>> p1, p2, r = Point(1, 1), Point(1, 1), 2.5 >>> Arc.from_endpoints(p1, p2, r) Arc((2.22044604925e16,0.0),1.41421356237,45.0,45.0) >>> Arc.from_endpoints(p2, p1, r) Arc((2.22044604925e16,0.0),1.41421356237,45.0,45.0) >>> p1, p2, r = Point(1, 1), Point(1, 1), 2.5 >>> Arc.from_endpoints(p1, p2, r) Arc((2.22044604925e16,0.0),1.41421356237,135.0,225.0) >>> Arc.from_endpoints(p2, p1, r) Arc((2.22044604925e16,0.0),1.41421356237,225.0,135.0) >>> p1, p2 = Point(1, 0), Point(0, 1) >>> Arc.from_endpoints(p1, p2, 1).center (1.0,1.0) >>> Arc.from_endpoints(p2, p1, 1).center (1.0,1.0) >>> Arc.from_endpoints(p1, p2, 1).center (0.0,0.0) >>> Arc.from_endpoints(p2, p1, 1).center (0.0,0.0) >>> Arc.from_endpoints(p1, p2, 0.1) | def from_endpoints(cls, p1, p2, radius):
# radius > 0 means we go clockwise from p1 to p2, radius < 0 means we go counter-clockwise
x = p2 - p1
if radius**2 < 0.25 * x.dot(x):
raise Exception("radius is too small for this arc, make it bigger")
w = (radius**2 - 0.25 * x.dot(x)) ** .5
wn = Vector(x.y, -x.x).normalize()
if radius * wn.cross(x) < 0:
wn = -wn
center = p1 + 0.5 * x + w * wn
def get_angle(p):
return math.atan2(p.y, p.x)
a, b = get_angle(p1-center), get_angle(p2-center)
if radius < 0:
while b < a:
b += TWO_PI
else:
while a < b:
a += TWO_PI
return cls(center, abs(radius), a, b) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def Arc( x, y0, y1, r):\n return 0.5 * r*r * ( np.arctan( (y1).astype(float)/(x).astype(float) ) - np.arctan( (y0).astype(float)/(x).astype(float) ) )",
"def createArc(self, start, finish):\n return Arc(start, finish)",
"def Arc(self, center=(0.,0.), radius=1., nrad=16, ncirc=40,\n start_angle=0., end_angle=np.pi/2., element_type=\"tri\",\n refinement=False, refinement_level=2, algorithm=\"standard\"):\n\n # CHECK FOR ANGLE\n PI = u\"\\u03C0\".encode('utf-8').strip()\n EPS = np.finfo(np.float64).eps\n if np.abs(start_angle) + EPS > 2.*np.pi:\n raise ValueError(\"The starting angle should be either in range [-2{},0] or [0,2{}]\".format(PI,PI))\n if np.abs(end_angle) + EPS > 2.*np.pi:\n raise ValueError(\"The end angle should be either in range [-2{},0] or [0,2{}]\".format(PI,PI))\n\n a1 = np.sign(start_angle) if np.sign(start_angle)!=0. else np.sign(end_angle)\n a2 = np.sign(end_angle) if np.sign(end_angle)!=0. else np.sign(start_angle)\n if a1 == a2:\n total_angle = np.abs(end_angle - start_angle)\n if np.isclose(total_angle,0.) or np.isclose(total_angle,2.*np.pi) or total_angle > 2.*np.pi:\n self.Circle(center=center, radius=radius, nrad=nrad, ncirc=ncirc, element_type=element_type)\n return\n\n if not isinstance(center,tuple):\n raise ValueError(\"The center of the arc should be given in a tuple with two elements (x,y)\")\n\n self.__reset__()\n\n if algorithm == \"midpoint_subdivision\":\n from Florence.MeshGeneration.CustomMesher import SubdivisionArc\n mesh = SubdivisionArc(center=center, radius=radius, nrad=nrad, ncirc=ncirc,\n start_angle=start_angle, end_angle=end_angle,\n element_type=element_type, refinement=refinement, refinement_level=refinement_level)\n self.__update__(mesh)\n return\n\n\n if refinement:\n ndivider = refinement_level\n else:\n ndivider = 1\n\n ncirc = int(ncirc/ndivider)\n nrad = int(nrad/ndivider)\n\n if ncirc % 2 != 0 or ncirc < 2:\n ncirc = (ncirc // 2)*2 + 2\n\n radii = radius\n\n radius = np.linspace(0,radii,nrad+1)[1:]\n t = np.linspace(start_angle,end_angle,ncirc+1)\n x = radius[0]*np.cos(t)[::-1]\n y = radius[0]*np.sin(t)[::-1]\n\n points = np.zeros((ncirc+2,2),dtype=np.float64)\n points[0,:] = [0.,0.]\n points[1:,:] = np.array([x,y]).T\n\n self.elements = np.zeros((ncirc // 2,4),dtype=np.int64)\n aranger = np.arange(ncirc // 2)\n self.elements[:,1] = 2*aranger + 1\n self.elements[:,2] = 2*aranger + 2\n self.elements[:,3] = 2*aranger + 3\n\n for i in range(1,nrad):\n t = np.linspace(start_angle,end_angle,ncirc+1)\n x = radius[i]*np.cos(t)[::-1]\n y = radius[i]*np.sin(t)[::-1]\n points = np.vstack((points,np.array([x,y]).T))\n\n points[:,0] += center[0]\n points[:,1] += center[1]\n\n elements = np.zeros((ncirc,4),dtype=np.int64)\n for i in range(1,nrad):\n aranger = np.arange(1+ncirc*(i-1),ncirc*i+1)\n elements[:,0] = aranger + i - 1\n elements[:,1] = aranger + i + ncirc\n elements[:,2] = aranger + i + ncirc + 1\n elements[:,3] = aranger + i\n\n self.elements = np.concatenate((self.elements,elements),axis=0)\n\n\n makezero(points)\n self.points = points\n self.elements[:ncirc // 2,:] = self.elements[:ncirc // 2, [1,2,3,0]]\n\n self.element_type = \"quad\"\n self.nelem = self.elements.shape[0]\n self.nnode = self.points.shape[0]\n self.GetBoundaryEdges()\n\n if refinement:\n mesh = self.QuadrilateralProjection(points=self.points[self.elements[0,:],:], npoints=ndivider)\n for i in range(1,self.nelem):\n mesh += self.QuadrilateralProjection(points=self.points[self.elements[i,:],:], npoints=ndivider)\n self.__update__(mesh)\n\n if element_type == \"tri\":\n sys.stdout = open(os.devnull, \"w\")\n self.ConvertQuadsToTris()\n sys.stdout = sys.__stdout__",
"def _constructArc(self, a, b, c):\n arc = pcbnew.PCB_SHAPE()\n arc.SetShape(STROKE_T.S_ARC)\n arc.SetLayer(Layer.Edge_Cuts)\n arc.SetArcGeometry(toKiCADPoint(a), toKiCADPoint(b), toKiCADPoint(c))\n return arc",
"def arc_to_cubic(\n start_point: Tuple[float, float],\n rx: float,\n ry: float,\n rotation: float,\n large: int,\n sweep: int,\n end_point: Tuple[float, float],\n) -> Iterator[Tuple[Optional[Point], Optional[Point], Point]]:\n if not isinstance(start_point, Point):\n start_point = Point(*start_point)\n if not isinstance(end_point, Point):\n end_point = Point(*end_point)\n\n arc = EllipticalArc(start_point, rx, ry, rotation, large, sweep, end_point)\n if arc.is_zero_length():\n return\n elif arc.is_straight_line():\n yield None, None, arc.end_point\n else:\n yield from _arc_to_cubic(arc)",
"def segmentarc(c,u1,u2):\n\n pol1=samplearc(c,u1,polar=True)\n pol2=samplearc(c,u2,polar=True)\n sr= (c[1][3] == -2)\n if sr:\n return arc(pol1[0],pol1[1],pol2[2],pol1[2],samplereverse=True)\n else:\n return arc(pol1[0],pol1[1],pol1[2],pol2[2])",
"def get_points_on_arc(self, num_points):\n points1 = np.zeros((num_points, 3)) # points placeholder for segment 1\n points2 = np.zeros((num_points, 3)) # points placeholder for segment 2\n s1 = np.linspace(0.0, self.seg_len1, num_points) # variable arc length 1\n s2 = np.linspace(0.0, self.seg_len2, num_points) # variable arc length 2\n for i in range(num_points):\n points1[i] = np.matmul(self.transformation_matrix_bishop(self.kappa1, self.phi1, s1[i]), self.base)[0:3]\n for i in range(num_points):\n T02_s = np.matmul(self.T01_bishop, self.transformation_matrix_bishop(self.kappa2, self.phi2, s2[i]))\n points2[i] = np.matmul(T02_s, self.base)[0:3]\n return points1, points2",
"def _arcArcIntersectXY(c1,c2,inside=True,params=False):\n x1=c1[0]\n x2=c2[0]\n r1=c1[1][0]\n r2=c2[1][0]\n\n # check for sample reverse condition\n sr1 = c1[1][3]==-2\n sr2 = c2[1][3]==-2\n\n ## first check for non-intersection due to distance between the\n ## centers of the arcs, treating both arcs as circles for the moment\n\n d=dist(x1,x2) #calculate the distance d between circle centers\n\n if d > r1+r2:\n return False # too far, no possiblity of intersection\n\n if ( r1> r2 and d < r1-r2) or (r2 >= r1 and d < r2-r1):\n return False # too close, little arc is fully inside bigger arc\n\n if d < epsilon:\n return False # circle centers too close for stable calculation\n\n ## OK, we are in the goldilocks zone of intersection. this means\n ## that if boh arcs are cicles or if inside=False we are\n ## guaranteed one or two intersections. Calculate those\n ## intersections and then test to see if they fall between start\n ## and end of the respective arcs\n\n ## we start by calculating the distance id of the intersection plane\n ## from the center of arc 1, knowing that by definition id <= r1\n\n ## Math: consider the triangle with side lengths r1, r2, and d,\n ## where d is the previously calculated distance between arc\n ## centers. Consider the two right triangles with side lengths\n ## r1, id, h, and r2, h, (d-id). We know that:\n ## id^2 + h^2 = r1^2, (d-id)^2 + h^2 = r2^2\n ## solving both for h2 and then substituting, this means:\n ## r1^2 - id^2 = r2^2 - (d-id)^2\n ## collecting terms and solving for id produces:\n ## id = (r1^2-r2^2 + d^2)/2d\n\n id = (r1*r1 - r2*r2 + d*d)/(2 * d)\n\n ## compute the point on the line connecting the two arc centers\n ## that is id away from the first arc\n\n v1 = scale3(sub(x2,x1),1.0/d) # unitary direction vector pointing\n # from x1 to x2\n v2 = scale3(v1,id) # point on line between two circles in\n # coordinate space centered at x1\n\n ## compute direction vector o orthgonal to v1 -- the line that\n ## intersects point v2 and v2+o will pass through our intersection\n ## points\n\n o = orthoXY(v1)\n \n ## now, transform back into world coordinates and calculate the\n ## intersection of this line with either of our arcs, treating\n ## them as circles for now\n\n l = [add(v2,x1),add(add(v2,o),x1)]\n\n s = _lineArcIntersectXY(l,c1,False)\n\n ## as a sanity check, do the same with the other arc. Results\n ## should be within epsilon\n #ss = _lineArcIntersectXY(l,c2,False)\n #foo = list(map(lambda x, y: dist(x,y) < epsilon,s,ss))\n #print(\"sanity check: \" , foo)\n\n if not s or len(s) == 0:\n raise ValueError('no computed intersections, something is wrong')\n\n if not inside and not params:\n return s\n \n ## jump back to arc1 and arc2 space and check angles\n\n s1 = list(map(lambda x: sub(x,x1),s))\n s2 = list(map(lambda x: sub(x,x2),s))\n\n ## compute start and end angles for arcs\n start1=c1[1][1]\n end1=c1[1][2]\n if not (start1 == 0 and end1 == 360):\n start1 = start1 % 360.0\n end1 = end1 % 360.0\n if end1 < start1:\n end1 = end1 + 360.0\n \n start2=c2[1][1]\n end2=c2[1][2]\n \n if not (start2 == 0 and end2 == 360):\n start2 = start2 % 360.0\n end2 = end2 % 360.0\n if end2 < start2:\n end2 = end2 + 360.0\n \n\n ## check each intersection against angles for each arc. \n ss = []\n uparam1 = []\n uparam2 = []\n for i in range(len(s)):\n p1 =s1[i]\n p2 =s2[i]\n ang1 = (atan2(p1[1],p1[0]) % pi2)*360.0/pi2\n ang2 = (atan2(p2[1],p2[0]) % pi2)*360.0/pi2\n\n if params:\n u1 = 0\n u2 = 0\n if end1 <= 360.0 or ang1 >= start1 or \\\n ( end1 > 360.0 and ang1 > end1-360.0):\n u1 = (ang1-start1)/(end1-start1)\n if sr1:\n u1 = 1.0-u1\n elif end1 > 360.0:\n u1 = (ang1+360.0-start1)/(end1-start1)\n if sr1:\n u1 = 1.0-u1\n uparam1 = uparam1 + [ u1 ]\n \n if end2 <= 360.0 or ang2 >= start2 or \\\n ( end2 > 360.0 and ang2 > end1-360.0):\n u2 = (ang2-start2)/(end2-start2)\n if sr2:\n u2 = 1.0-u2\n elif end2 > 360.0:\n u2 = (ang2+360.0-start2)/(end2-start2)\n if sr2:\n u2 = 1.0-u2\n uparam2 = uparam2 + [ u2]\n \n else:\n good = False\n ## check angle against first arc\n if end1 <= 360.0 and ang1 >= start1 and ang1 <= end1:\n good = True\n elif end1 > 360.0 and (ang1 >= start1 or ang1<= end1-360.0):\n good = True\n\n ## check angle against second arc\n if end2 <= 360.0 and ang2 >= start2 and ang2 <= end2:\n good = good and True\n elif end2 > 360.0 and (ang2 >= start2 or ang2<= end2-360.0):\n good = good and True\n else:\n good = False\n\n ## only add instersection to the list if both checks were passed\n if good:\n ss = ss + [ s[i] ]\n \n if not params and len(ss) == 0:\n return False\n else:\n if params:\n return [uparam1,uparam2]\n else:\n return ss",
"def create_arc(self, x1, y1, x2, y2, radius, angle, style=None, parent=None):\n sweep_flag = 0 if angle < 0 else 1\n attrs = { 'd': 'M %5f %5f A %5f %5f 0 0 %d %5f %5f' % \\\n (x1, y1, radius, radius,\n sweep_flag, x2, y2),}\n return self.create_path(attrs, style, parent)",
"def __init__(self):\n raise NotImplementedError('cannot create independent arc')",
"def addRevArcSeg(self, x1, y1, x2, y2, cx, cy, arcDir):\n a = x1 - cx\n b = y1 - cy\n r = sqrt(a*a + b*b)\n arc = Arc.fromVectors(QVector2D(a, b),\n QVector2D(x2 - cx, y2 - cy),\n r,\n arcDir == 'cclw')\n # TODO: By halving the mesh segs ( * 0.5), fewer triangles are\n # created. Shading is ok but arc edges look blocky.\n # angstep = 360.0 / (self._mesh.segs * 0.5)\n angstep = 360.0 / self._mesh.segs\n # minimum 2 segments in the arc\n segs = max(int(abs(arc.span()) / angstep), 3)\n step = arc.span() / segs\n sa = arc.startAngle()\n a1 = radians(sa)\n sa1 = sin(a1)\n ca1 = cos(a1)\n for i in range(1, segs):\n a2 = radians(sa + step * i)\n sa2 = sin(a2)\n ca2 = cos(a2)\n x1 = cx + r * ca1\n y1 = cy + r * sa1\n x2 = cx + r * ca2\n y2 = cy + r * sa2\n self.addRevLineSeg(x1, y1, x2, y2)\n a1 = a2\n sa1 = sa2\n ca1 = ca2\n if i == 1:\n # only blend the first strip\n self._mesh.blendTangent(False)\n # last strip\n else:\n a2 = radians(arc.endAngle())\n x1 = cx + r * ca1\n y1 = cy + r * sa1\n x2 = cx + r * cos(a2)\n y2 = cy + r * sin(a2)\n self.addRevLineSeg(x1, y1, x2, y2)",
"def arc_pts(cx, cy, w, h, start_angle, end_angle, res=5):\n sweep_angle = end_angle - start_angle\n \n if abs(sweep_angle) < 0.0001:\n vx = cx + cos(start_angle) * w / 2.0\n vy = cy + sin(start_angle) * h / 2.0\n return [(vx, vy)]\n num_points = abs(sweep_angle * w / 2) / res\n pts_list = []\n step_angle = float(sweep_angle) / num_points \n va = start_angle\n side = 1 if sweep_angle > 0 else -1\n while va * side < end_angle * side or abs(va - end_angle) < 0.0001:\n vx = cx + cos(va) * w / 2.0\n vy = cy + sin(va) * h / 2.0\n pts_list.append((vx, vy))\n va += step_angle\n return pts_list",
"def intersect(self, other):\n if isinstance(other, Arc):\n if self.center == other.center:\n if nearly_zero(self.radius - other.radius):\n v = Arc(self.center, self.radius, 0, 0)\n v.angle_range = self.angle_range.intersection(other.angle_range)\n return v\n else:\n return None\n else:\n # find the two points where the circles intersect\n # filter them by the angle ranges of both arcs, must be in both to survive\n # return list of surviving points, or None\n k = 1. / abs(self.center - other.center)\n theta = math.atan2(other.center.y - self.center.y, other.center.x - self.center.x)\n r1 = k * self.radius\n r2 = k * other.radius\n intersections = []\n # u and v are in a coordinate system that has been scaled, rotated, and translated\n # to move the two centers to (0, 0) and (1, 0) to simplify some of the math.\n u = (r1**2 + 1 - r2**2) / 2\n if abs(r1) >= abs(u):\n v = (r1**2 - u**2) ** .5\n # Transform u and v back into the original coordinate system.\n x1 = self.center.x + (u * math.cos(theta) - v * math.sin(theta)) / k\n y1 = self.center.y + (v * math.cos(theta) + u * math.sin(theta)) / k\n p = Point(x1, y1)\n if self.included_angle(p) and other.included_angle(p):\n intersections.append(Point(x1, y1))\n if not nearly_zero(r1 - u):\n x2 = self.center.x + (u * math.cos(theta) + v * math.sin(theta)) / k\n y2 = self.center.y + (-v * math.cos(theta) + u * math.sin(theta)) / k\n p2 = Point(x2, y2)\n if self.included_angle(p2) and other.included_angle(p2):\n intersections.append(p2)\n return intersections or None\n elif isinstance(other, LineSegment):\n c = (self.center - other.p2).square() - self.radius**2\n b = 2 * (other.p1 - other.p2).dot(other.p2 - self.center)\n a = (other.p1 - other.p2).square()\n det = b**2 - 4 * a * c\n if det < 0:\n return None\n elif nearly_zero(det):\n pts = [-b / (2. * a)]\n else:\n pts = [(-b + det**0.5) / (2 * a), (-b - det**0.5) / (2 * a)]\n pts = map(other.param_to_point,\n filter(lambda root: 0 <= root <= 1, pts))\n pts = filter(self.included_angle, pts)\n if len(pts) == 0:\n return None\n elif len(pts) == 1:\n return pts[0]\n else:\n return pts\n raise TypeError(other)",
"def CircularArc(pointa, pointb, center, resolution=100, negative=False):\n check_valid_vector(pointa, 'pointa')\n check_valid_vector(pointb, 'pointb')\n check_valid_vector(center, 'center')\n if not np.isclose(\n np.linalg.norm(np.array(pointa) - np.array(center)),\n np.linalg.norm(np.array(pointb) - np.array(center)),\n ):\n raise ValueError(\"pointa and pointb are not equidistant from center\")\n\n # fix half-arc bug: if a half arc travels directly through the\n # center point, it becomes a line\n pointb = list(pointb)\n pointb[0] -= 1e-10\n pointb[1] -= 1e-10\n\n arc = _vtk.vtkArcSource()\n arc.SetPoint1(*pointa)\n arc.SetPoint2(*pointb)\n arc.SetCenter(*center)\n arc.SetResolution(resolution)\n arc.SetNegative(negative)\n\n arc.Update()\n angle = np.deg2rad(arc.GetAngle())\n arc = wrap(arc.GetOutput())\n # Compute distance of every point along circular arc\n center = np.array(center).ravel()\n radius = np.sqrt(np.sum((arc.points[0] - center) ** 2, axis=0))\n angles = np.arange(0.0, 1.0 + 1.0 / resolution, 1.0 / resolution) * angle\n arc['Distance'] = radius * angles\n return arc",
"def archimedean(\n radius_start,\n radius_end,\n step,\n center=None,\n close=False,\n point_start=None,\n angle_start=None,\n arc_res=None):\n\n if radius_start > radius_end:\n sign = 1\n else:\n sign = -1\n\n # the spiral constant\n # evaluated from: step = K * 2 * pi\n K = step / (np.pi * 2)\n\n # use our constant to find angular start and end\n theta_start = radius_start / K\n theta_end = radius_end / K\n\n # if not passed set angular resolution\n if arc_res is None:\n arc_res = constants.default_arc\n\n arc_count = int(np.ceil((\n abs(theta_end - theta_start)) / arc_res))\n\n # given that arcs will share points how many\n # points on the helix do we need\n arc_index, point_count = arc_indexes(arc_count)\n\n assert arc_index.max() == point_count - 1\n\n # create an array of angles\n theta = np.linspace(theta_start, theta_end, point_count)\n\n # use the spiral equation to generate radii\n radii = theta * K\n\n # make sure they match\n assert np.isclose(radii[0], radius_start)\n assert np.isclose(radii[-1], radius_end)\n\n # do offset AFTER radius calculation\n if angle_start is not None:\n theta += (angle_start - theta_start)\n\n # convert polar coordinates to 2D cartesian\n points = np.column_stack(\n (np.cos(theta), np.sin(theta))) * radii.reshape((-1, 1))\n\n if close:\n\n # get indexes of arcs required to close\n close_idx, close_ct = arc_indexes(\n int(np.ceil((np.pi * 2) / arc_res)))\n\n # the additional angles needed to close\n # we are cutting off the first point as its a duplicate\n t_close = np.linspace(theta[-1],\n theta[-1] + np.pi * 2 * sign,\n close_ct)[1:]\n\n # additional points to close the arc\n closer = np.column_stack((\n np.cos(t_close), np.sin(t_close))) * radii[-1]\n assert len(closer) == close_ct - 1\n assert len(points) == point_count\n\n # stack points with closing arc\n points = np.vstack((points, closer))\n # add the additional points to the count\n point_count += close_ct - 1\n # add the additional arc indexes\n\n arc_index = np.vstack((\n arc_index, arc_index[-1][-1] + close_idx))\n\n assert len(points) == point_count\n # max index of arcs should correspond to points\n assert arc_index[-1][-1] == point_count - 1\n\n if center is not None:\n points += center\n\n # convert sequential points into three point arcs\n arcs = points[arc_index]\n\n if constants.strict:\n # check all arcs to make sure the correspond\n for a, b in zip(arcs[:-1], arcs[1:]):\n assert np.allclose(a[2], b[0])\n\n if point_start is not None:\n a, b = np.clip(\n (point_start[:2] - center[:2]) / radius_start,\n -1.0, 1.0)\n assert np.isclose(a, np.cos(angle_start), atol=1e-3)\n assert np.isclose(b, np.sin(angle_start), atol=1e-3)\n\n return arcs",
"def arc(radius = 10, width = 0.5, theta = 45, start_angle = 0,\n angle_resolution = 2.5, layer = 0):\n inner_radius = radius - width/2\n outer_radius = radius + width/2\n angle1 = (start_angle) * pi/180\n angle2 = (start_angle + theta) * pi/180\n t = np.linspace(angle1, angle2, int(np.ceil(abs(theta)/angle_resolution)))\n inner_points_x = (inner_radius*cos(t)).tolist()\n inner_points_y = (inner_radius*sin(t)).tolist()\n outer_points_x = (outer_radius*cos(t)).tolist()\n outer_points_y = (outer_radius*sin(t)).tolist()\n xpts = inner_points_x + outer_points_x[::-1]\n ypts = inner_points_y + outer_points_y[::-1]\n\n D = Device('arc')\n D.add_polygon(points = (xpts,ypts), layer = layer)\n D.add_port(name = 1,\n midpoint = (radius*cos(angle1), radius*sin(angle1)),\n width = width,\n orientation = start_angle - 90 + 180*(theta<0))\n D.add_port(name = 2,\n midpoint = (radius*cos(angle2), radius*sin(angle2)),\n width = width,\n orientation = start_angle + theta + 90 - 180*(theta<0))\n D.info['length'] = (abs(theta) * pi/180) * radius\n return D",
"def draw_arc(ax, center, height, width, theta1=0, theta2=numpy.pi, color='k', direction='down',\n num_points=DEFAULT_ARC_POINTS):\n LEFT_END_THETA = numpy.pi / 2\n RIGHT_END_THETA = numpy.pi * 1.5\n MIDPOINT_THETA = numpy.pi\n\n vertical_baseline = center[1]\n\n assert LEFT_END_THETA <= theta1 <= theta2 <= RIGHT_END_THETA\n\n b = height\n a = width / 2\n\n # determine how to allocate points\n left_angle_span = min(max(MIDPOINT_THETA - theta1, 0), theta2 - theta1)\n right_angle_span = min(max(theta2 - MIDPOINT_THETA, 0), theta2 - theta1)\n total_angle_span = left_angle_span + right_angle_span\n left_points = int(num_points * left_angle_span / total_angle_span)\n right_points = num_points - left_points\n\n x_coords = numpy.empty(num_points)\n y_coords = numpy.empty(num_points)\n\n if left_points:\n # plot upper left quadrant\n left_theta2 = theta1 + left_angle_span\n x, y = compute_half_arc_points(center=(center[0], 0),\n a=a, b=b,\n theta1=theta1, theta2=left_theta2,\n num_points=left_points)\n x_coords[:left_points] = x[:]\n y_coords[:left_points] = y[:]\n if right_points:\n # plot upper right quadrant\n right_theta1 = theta2 - right_angle_span\n x, y = compute_half_arc_points(center=(center[0], 0),\n a=a, b=b,\n theta1=right_theta1, theta2=theta2,\n num_points=right_points)\n x_coords[left_points:] = x[:]\n y_coords[left_points:] = y[:]\n\n if direction == 'down':\n y_coords = - y_coords\n\n y_coords += vertical_baseline\n\n ax.plot(x_coords, y_coords, color=color)",
"def _arc(i, j, width=1, linestyle='-', color='black'):\n\treturn Arc(((i+j)/2., 0), abs(i-j), abs(i-j), 0, 0, 180, linewidth=width, \n\t\tedgecolor=color, fill=False, linestyle=linestyle)",
"def arc(c,rp=False,sn=False,e=False,n=False,samplereverse=False):\n if isarc(c):\n return deepcopy(c)\n elif ispoint(c):\n cen = point(c)\n w=-1\n if samplereverse:\n w=-2\n if isvect(rp):\n psu = deepcopy(rp)\n r=psu[0]\n if r < 0:\n raise ValueError('negative radius not allowed for arc')\n psu[3]=w\n if not sn:\n return [ cen, psu ]\n else:\n if not ispoint(sn) or abs(mag(sn)-1.0) > epsilon:\n raise ValueError('bad (non-unitary) plane vector for arc')\n return [ cen, psu, point(sn) ]\n elif isgoodnum(rp):\n r = rp\n if r < 0:\n raise ValueError('negative radius not allowed for arc')\n start = 0\n end = 360\n if isgoodnum(sn) and isgoodnum(e):\n start = sn\n end = e\n psu = vect(r,start,end,w)\n if not n:\n return [ cen, psu ]\n else:\n if not ispoint(n) or abs(mag(n)-1.0) > epsilon:\n raise ValueError('bad (non-unitary) plane vector for arc')\n return [ cen, psu, point(n) ]\n\n raise ValueError('bad arguments passed to arc()')",
"def elliptic_arc(numbers, p_current, relative = False):\n if len(numbers) != 7:\n return None\n\n if any(numbers[:2]) == 0:\n return straight_line(numbers, p_current, relative)\n else:\n p_next = Point(numbers[5], numbers[6])\n if relative:\n p_next += p_curent\n\n return Ellipse(p_current, p_next, \n numbers[0], #radius x\n numbers[1], #radius y\n numbers[2], #angle\n numbers[3], #large flag\n numbers[4]) #sweep flag",
"def get_arc_3D(v1, v2, points_per_radian=100, radius=1):\n\n # v1 and w become the x, y axes of the great circle\n v1_3D = ang_to_vec_coords(v1, radius=radius)\n v2_3D = ang_to_vec_coords(v2, radius=radius)\n w_axis_3D = np.cross(np.cross(v1_3D, v2_3D), v1_3D)\n # make w a vector of proper radius\n w_len = np.sqrt(square_distance([0,0,0], w_axis_3D))\n w_3D = w_axis_3D * (radius / w_len) \n arc_len = np.arccos(np.dot(v1_3D, v2_3D))\n num_points = arc_len * points_per_radian\n t = np.linspace(0, arc_len, num_points)\n u, cos_t = np.meshgrid(v1_3D, np.cos(t))\n w, sin_t = np.meshgrid(w_3D, np.sin(t))\n arc_points = u*cos_t + w*sin_t\n return arc_points",
"def Oneside( x, y0, y1, r):\n\n true = 1\n size_x = np.shape( x )\n if not size_x: size_x = [0]\n\n if size_x[ 0 ] == 0:\n if x == 0: return x\n elif abs( x ) >= r: return Arc( x, y0, y1, r )\n yh = sqrt( r*r - x*x )\n if ( y0 <= -yh ):\n if ( y1 <= -yh ) : return Arc( x, y0, y1, r )\n elif ( y1 <= yh ) : return Arc( x, y0, -yh, r ) \\\n + Chord( x, -yh, y1 )\n else : return Arc( x, y0, -yh, r ) \\\n + Chord( x, -yh, yh ) + Arc( x, yh, y1, r )\n \n elif ( y0 < yh ):\n if ( y1 <= -yh ) : return Chord( x, y0, -yh ) \\\n + Arc( x, -yh, y1, r )\n elif ( y1 <= yh ) : return Chord( x, y0, y1 )\n else : return Chord( x, y0, yh ) + Arc( x, yh, y1, r )\n\n else :\n if ( y1 <= -yh ) : return Arc( x, y0, yh, r ) \\\n + Chord( x, yh, -yh ) + Arc( x, -yh, y1, r )\n elif ( y1 <= yh ) : return Arc( x, y0, yh, r ) + Chord( x, yh, y1 )\n else : return Arc( x, y0, y1, r )\n\n else :\n ans2 = x\n t0 = where( x == 0)[0]\n count = len(t0)\n if count == len( x ): return ans2\n\n ans = x * 0\n yh = x * 0\n to = where( abs( x ) >= r)[0]\n tocount = len(to)\n ti = where( abs( x ) < r)[0]\n ticount = len(ti)\n if tocount != 0: ans[ to ] = Arc( x[to], y0[to], y1[to], r )\n if ticount == 0: return ans\n \n yh[ ti ] = sqrt( r*r - x[ti]*x[ti] )\n \n t1 = where( np.less_equal(y0[ti],-yh[ti]) )[0]\n count = len(t1)\n if count != 0:\n i = ti[ t1 ]\n\n t2 = where( np.less_equal(y1[i],-yh[i]))[0]\n count = len(t2)\n if count != 0:\n j = ti[ t1[ t2 ] ]\n ans[j] = Arc( x[j], y0[j], y1[j], r )\n\n t2 = where( ( greater(y1[i],-yh[i]) ) &\n ( less_equal(y1[i],yh[i]) ))[0]\n count = len(t2)\n if count != 0:\n j = ti[ t1[ t2 ] ]\n ans[j] = Arc( x[j], y0[j], -yh[j], r ) \\\n + Chord( x[j], -yh[j], y1[j] )\n\n t2 = where( greater(y1[i], yh[i]) )[0]\n count = len(t2)\n\n if count != 0:\n j = ti[ t1[ t2 ] ]\n ans[j] = Arc( x[j], y0[j], -yh[j], r ) \\\n + Chord( x[j], -yh[j], yh[j] ) \\\n + Arc( x[j], yh[j], y1[j], r )\n \n t1 = where( ( greater(y0[ti],-yh[ti]) ) & \n ( less(y0[ti],yh[ti]) ))[0] \n count = len(t1)\n if count != 0:\n i = ti[ t1 ]\n\n t2 = where( np.less_equal(y1[i],-yh[i]))[0]\n count = len(t2)\n if count != 0:\n j = ti[ t1[ t2 ] ]\n ans[j] = Chord( x[j], y0[j], -yh[j] ) \\\n + Arc( x[j], -yh[j], y1[j], r )\n \n\n t2 = where( ( greater(y1[i], -yh[i]) ) & \n ( less_equal(y1[i], yh[i]) ))[0]\n count = len(t2)\n\n if count != 0:\n j = ti[ t1[ t2 ] ]\n ans[j] = Chord( x[j], y0[j], y1[j] )\n\n t2 = where( greater(y1[i], yh[i]))[0]\n count = len(t2)\n if count != 0:\n j = ti[ t1[ t2 ] ]\n ans[j] = Chord( x[j], y0[j], yh[j] ) \\\n + Arc( x[j], yh[j], y1[j], r )\n\n t1 = where( greater_equal(y0[ti], yh[ti]))[0] \n count = len(t1)\n if count != 0:\n i = ti[ t1 ]\n\n t2 = where ( np.less_equal(y1[i], -yh[i]))[0] \n count = len(t2)\n if count != 0:\n j = ti[ t1[ t2 ] ]\n ans[j] = Arc( x[j], y0[j], yh[j], r ) \\\n + Chord( x[j], yh[j], -yh[j] ) \\\n + Arc( x[j], -yh[j], y1[j], r )\n\n t2 = where( ( greater(y1[i], -yh[i]) ) & \n ( less_equal(y1[i], yh[i]) ))[0]\n count = len(t2)\n if count != 0:\n j = ti[ t1[ t2 ] ]\n ans[j] = Arc( x[j], y0[j], yh[j], r ) \\\n + Chord( x[j], yh[j], y1[j] )\n\n t2 = where( greater(y1[i], yh[i]))[0]\n count = len(t2)\n if count != 0:\n j = ti[ t1[ t2 ] ]\n ans[j] = Arc( x[j], y0[j], y1[j], r )\n\n return ans",
"def arc_points_between_vectors(x, y, z, v1, v2, angle, nb_points):\n arc_origin = np.array([x, y, z])\n arc_points = []\n for t in np.linspace(0, 1, nb_points):\n # slerp formula (https://en.wikipedia.org/wiki/Slerp) between v1 vector and v2 vector\n arc_points.append(\n sin((1 - t) * angle) / sin(angle) * v1 + sin(t * angle) / sin(angle) * v2 + arc_origin)\n\n return np.array(arc_points)",
"def arccenter(c):\n start=c[1][1] \n end=c[1][2]\n if start == 0 and end == 360:\n return c[0]\n else:\n return samplearc(c,0.5)",
"def create_arc(arc_data: Dict[str, Any], arc_tag: str) -> FpArc:\n fp_arc = arc_data[arc_tag]\n # end in kicad is start point\n start: Coords = [get_dict_by_key(fp_arc, 'end')['end'][0], get_dict_by_key(fp_arc, 'end')['end'][1]]\n start[1] = str(-1 * float(start[1]))\n end: Coords = [get_dict_by_key(fp_arc, 'start')['start'][0],\n get_dict_by_key(fp_arc, 'start')['start'][1]] # start in kicad is center point\n end[1] = str(-1 * float(end[1]))\n angle: float = -1 * float(get_dict_by_key(fp_arc, 'angle')['angle'])\n layer: Layer = convert_to_layers(get_dict_by_key(fp_arc, 'layer')['layer'])[0]\n width: float = get_dict_by_key(fp_arc, 'width')['width']\n new_arc = FpArc(start=start, end=end, angle=angle, layer=layer, width=width)\n new_arc.end = get_end_point(new_arc)\n return new_arc",
"def compute_half_arc_points(center, a, b, theta1, theta2, num_points=DEFAULT_ARC_POINTS):\n # ToDo: Add input validation to make sure we stay within a single quadrant.\n x_coords = numpy.empty(num_points)\n y_coords = numpy.empty(num_points)\n\n for i in range(0, num_points):\n theta = (theta2 - theta1) * (i / max(num_points - 1, 1)) + theta1\n fi = numpy.pi / 2 - numpy.arctan(numpy.tan(theta))\n x = center[0] + a * numpy.cos(fi)\n y = center[1] + b * numpy.sin(fi)\n x_coords[i] = x\n y_coords[i] = y\n\n return x_coords, y_coords",
"def arc(radius = 10, angle = 90, num_pts = 720):\n t = np.linspace(0, angle*np.pi/180, abs(int(num_pts*angle/360))-2)\n x = radius*np.cos(t)\n y = radius*np.sin(t)\n points = np.array((x,y)).T\n start_angle = 90*np.sign(angle)\n end_angle = start_angle + angle\n return points, start_angle, end_angle",
"def approximateArc(arc, endWith):\n SEGMENTS_PER_FULL= 4 * 32 # To Be consistent with default shapely settings\n\n startAngle = EDA_ANGLE(0, pcbnew.DEGREES_T)\n endAngle = EDA_ANGLE(0, pcbnew.DEGREES_T)\n arc.CalcArcAngles(startAngle, endAngle)\n if arc.GetShape() == STROKE_T.S_CIRCLE:\n endAngle = startAngle + 360 * deg\n segments = SEGMENTS_PER_FULL\n else:\n segments = abs(int((endAngle.AsDegrees() - startAngle.AsDegrees()) * SEGMENTS_PER_FULL // 360))\n # Ensure a minimal number of segments for small angle section of arcs\n segments = max(segments, 12)\n theta = np.linspace(startAngle.AsRadians(), endAngle.AsRadians(), segments)\n x = arc.GetCenter()[0] + arc.GetRadius() * np.cos(theta)\n y = arc.GetCenter()[1] + arc.GetRadius() * np.sin(theta)\n outline = list(np.column_stack([x, y]))\n\n end = np.array(endWith)\n first = np.array([outline[0][0], outline[0][1]])\n last = np.array([outline[-1][0], outline[-1][1]])\n if (np.linalg.norm(end - first) < np.linalg.norm(end - last)):\n outline.reverse()\n return outline",
"def get_arc_center(self):\n # First two anchors and handles\n a1, h1, h2, a2 = self.points[:4]\n # Tangent vectors\n t1 = h1 - a1\n t2 = h2 - a2\n # Normals\n n1 = rotate_vector(t1, TAU / 4)\n n2 = rotate_vector(t2, TAU / 4)\n try:\n return line_intersection(\n line1=(a1, a1 + n1),\n line2=(a2, a2 + n2),\n )\n except Exception:\n warnings.warn(\"Can't find Arc center, using ORIGIN instead\")\n return np.array(ORIGIN)",
"def HollowArc(self, center=(0.,0.), inner_radius=1., outer_radius=2., nrad=16, ncirc=40,\n start_angle=0., end_angle=np.pi/2., element_type=\"tri\", refinement=False, refinement_level=2):\n\n # CHECK FOR ANGLE\n PI = u\"\\u03C0\".encode('utf-8').strip()\n EPS = np.finfo(np.float64).eps\n if np.abs(start_angle) + EPS > 2.*np.pi:\n raise ValueError(\"The starting angle should be either in range [-2{},0] or [0,2{}]\".format(PI,PI))\n if np.abs(end_angle) + EPS > 2.*np.pi:\n raise ValueError(\"The end angle should be either in range [-2{},0] or [0,2{}]\".format(PI,PI))\n\n\n if np.sign(start_angle) == np.sign(end_angle):\n total_angle = np.abs(end_angle - start_angle)\n if np.isclose(total_angle,0.) or total_angle > 2.*np.pi:\n self.Circle(center=center, radius=radius, nrad=nrad, ncirc=ncirc, element_type=element_type)\n return\n\n if not isinstance(center,tuple):\n raise ValueError(\"The center of the arc should be given in a tuple with two elements (x,y)\")\n\n self.__reset__()\n\n if refinement:\n ndivider = refinement_level\n else:\n ndivider = 1\n\n ncirc = int(ncirc/ndivider)\n nrad = int(nrad/ndivider) + 2\n\n if ncirc % 2 != 0 or ncirc < 2:\n ncirc = (ncirc // 2)*2 + 2\n\n # radius = np.linspace(inner_radius,outer_radius,nrad)\n # points = np.zeros((1,2),dtype=np.float64)\n # for i in range(nrad):\n # t = np.linspace(start_angle,end_angle,ncirc+1)\n # x = radius[i]*np.cos(t)[::-1]\n # y = radius[i]*np.sin(t)[::-1]\n # points = np.vstack((points,np.array([x,y]).T))\n # points = points[ncirc+2:,:]\n\n radius = np.linspace(inner_radius,outer_radius,nrad-1)\n points = np.zeros((1,2),dtype=np.float64)\n for i in range(nrad-1):\n t = np.linspace(start_angle,end_angle,ncirc+1)\n x = radius[i]*np.cos(t)[::-1]\n y = radius[i]*np.sin(t)[::-1]\n points = np.vstack((points,np.array([x,y]).T))\n points = points[1:,:]\n\n points[:,0] += center[0]\n points[:,1] += center[1]\n makezero(points)\n self.points = points\n\n self.elements = np.zeros((1,4),dtype=np.int64)\n elements = np.zeros((ncirc,4),dtype=np.int64)\n for i in range(nrad-2):\n aranger = np.arange(ncirc*i,ncirc*(i+1))\n elements[:,0] = aranger + i\n elements[:,1] = aranger + i + ncirc + 1\n elements[:,2] = aranger + i + ncirc + 2\n elements[:,3] = aranger + i + 1\n\n self.elements = np.concatenate((self.elements,elements),axis=0)\n self.elements = self.elements[1:,:]\n\n\n self.element_type = \"quad\"\n self.nelem = self.elements.shape[0]\n self.nnode = self.points.shape[0]\n self.GetBoundaryEdges()\n\n if refinement:\n mesh = self.QuadrilateralProjection(points=self.points[self.elements[0,:],:], npoints=ndivider)\n for i in range(1,self.nelem):\n mesh += self.QuadrilateralProjection(points=self.points[self.elements[i,:],:], npoints=ndivider)\n self.__update__(mesh)\n\n\n if element_type == \"tri\":\n sys.stdout = open(os.devnull, \"w\")\n self.ConvertQuadsToTris()\n sys.stdout = sys.__stdout__\n self.CheckNodeNumbering(change_order_to=\"anti-clockwise\", verbose=False)\n\n self.points = np.ascontiguousarray(self.points)"
] | [
"0.6723727",
"0.6515953",
"0.6461364",
"0.63957953",
"0.6276647",
"0.62342805",
"0.6219087",
"0.6149375",
"0.61468315",
"0.6137554",
"0.60967463",
"0.6093382",
"0.60369945",
"0.60079134",
"0.59980977",
"0.5988356",
"0.59547377",
"0.5905458",
"0.58958495",
"0.58899057",
"0.58607465",
"0.58410335",
"0.58325696",
"0.580403",
"0.5789816",
"0.57779425",
"0.57720214",
"0.57520497",
"0.5728033",
"0.5703442"
] | 0.7708817 | 0 |
>>> me = Arc.from_endpoints(Point(0,0), Point(2,0), 1) >>> me.intersect('foo') | def intersect(self, other):
if isinstance(other, Arc):
if self.center == other.center:
if nearly_zero(self.radius - other.radius):
v = Arc(self.center, self.radius, 0, 0)
v.angle_range = self.angle_range.intersection(other.angle_range)
return v
else:
return None
else:
# find the two points where the circles intersect
# filter them by the angle ranges of both arcs, must be in both to survive
# return list of surviving points, or None
k = 1. / abs(self.center - other.center)
theta = math.atan2(other.center.y - self.center.y, other.center.x - self.center.x)
r1 = k * self.radius
r2 = k * other.radius
intersections = []
# u and v are in a coordinate system that has been scaled, rotated, and translated
# to move the two centers to (0, 0) and (1, 0) to simplify some of the math.
u = (r1**2 + 1 - r2**2) / 2
if abs(r1) >= abs(u):
v = (r1**2 - u**2) ** .5
# Transform u and v back into the original coordinate system.
x1 = self.center.x + (u * math.cos(theta) - v * math.sin(theta)) / k
y1 = self.center.y + (v * math.cos(theta) + u * math.sin(theta)) / k
p = Point(x1, y1)
if self.included_angle(p) and other.included_angle(p):
intersections.append(Point(x1, y1))
if not nearly_zero(r1 - u):
x2 = self.center.x + (u * math.cos(theta) + v * math.sin(theta)) / k
y2 = self.center.y + (-v * math.cos(theta) + u * math.sin(theta)) / k
p2 = Point(x2, y2)
if self.included_angle(p2) and other.included_angle(p2):
intersections.append(p2)
return intersections or None
elif isinstance(other, LineSegment):
c = (self.center - other.p2).square() - self.radius**2
b = 2 * (other.p1 - other.p2).dot(other.p2 - self.center)
a = (other.p1 - other.p2).square()
det = b**2 - 4 * a * c
if det < 0:
return None
elif nearly_zero(det):
pts = [-b / (2. * a)]
else:
pts = [(-b + det**0.5) / (2 * a), (-b - det**0.5) / (2 * a)]
pts = map(other.param_to_point,
filter(lambda root: 0 <= root <= 1, pts))
pts = filter(self.included_angle, pts)
if len(pts) == 0:
return None
elif len(pts) == 1:
return pts[0]
else:
return pts
raise TypeError(other) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _arcArcIntersectXY(c1,c2,inside=True,params=False):\n x1=c1[0]\n x2=c2[0]\n r1=c1[1][0]\n r2=c2[1][0]\n\n # check for sample reverse condition\n sr1 = c1[1][3]==-2\n sr2 = c2[1][3]==-2\n\n ## first check for non-intersection due to distance between the\n ## centers of the arcs, treating both arcs as circles for the moment\n\n d=dist(x1,x2) #calculate the distance d between circle centers\n\n if d > r1+r2:\n return False # too far, no possiblity of intersection\n\n if ( r1> r2 and d < r1-r2) or (r2 >= r1 and d < r2-r1):\n return False # too close, little arc is fully inside bigger arc\n\n if d < epsilon:\n return False # circle centers too close for stable calculation\n\n ## OK, we are in the goldilocks zone of intersection. this means\n ## that if boh arcs are cicles or if inside=False we are\n ## guaranteed one or two intersections. Calculate those\n ## intersections and then test to see if they fall between start\n ## and end of the respective arcs\n\n ## we start by calculating the distance id of the intersection plane\n ## from the center of arc 1, knowing that by definition id <= r1\n\n ## Math: consider the triangle with side lengths r1, r2, and d,\n ## where d is the previously calculated distance between arc\n ## centers. Consider the two right triangles with side lengths\n ## r1, id, h, and r2, h, (d-id). We know that:\n ## id^2 + h^2 = r1^2, (d-id)^2 + h^2 = r2^2\n ## solving both for h2 and then substituting, this means:\n ## r1^2 - id^2 = r2^2 - (d-id)^2\n ## collecting terms and solving for id produces:\n ## id = (r1^2-r2^2 + d^2)/2d\n\n id = (r1*r1 - r2*r2 + d*d)/(2 * d)\n\n ## compute the point on the line connecting the two arc centers\n ## that is id away from the first arc\n\n v1 = scale3(sub(x2,x1),1.0/d) # unitary direction vector pointing\n # from x1 to x2\n v2 = scale3(v1,id) # point on line between two circles in\n # coordinate space centered at x1\n\n ## compute direction vector o orthgonal to v1 -- the line that\n ## intersects point v2 and v2+o will pass through our intersection\n ## points\n\n o = orthoXY(v1)\n \n ## now, transform back into world coordinates and calculate the\n ## intersection of this line with either of our arcs, treating\n ## them as circles for now\n\n l = [add(v2,x1),add(add(v2,o),x1)]\n\n s = _lineArcIntersectXY(l,c1,False)\n\n ## as a sanity check, do the same with the other arc. Results\n ## should be within epsilon\n #ss = _lineArcIntersectXY(l,c2,False)\n #foo = list(map(lambda x, y: dist(x,y) < epsilon,s,ss))\n #print(\"sanity check: \" , foo)\n\n if not s or len(s) == 0:\n raise ValueError('no computed intersections, something is wrong')\n\n if not inside and not params:\n return s\n \n ## jump back to arc1 and arc2 space and check angles\n\n s1 = list(map(lambda x: sub(x,x1),s))\n s2 = list(map(lambda x: sub(x,x2),s))\n\n ## compute start and end angles for arcs\n start1=c1[1][1]\n end1=c1[1][2]\n if not (start1 == 0 and end1 == 360):\n start1 = start1 % 360.0\n end1 = end1 % 360.0\n if end1 < start1:\n end1 = end1 + 360.0\n \n start2=c2[1][1]\n end2=c2[1][2]\n \n if not (start2 == 0 and end2 == 360):\n start2 = start2 % 360.0\n end2 = end2 % 360.0\n if end2 < start2:\n end2 = end2 + 360.0\n \n\n ## check each intersection against angles for each arc. \n ss = []\n uparam1 = []\n uparam2 = []\n for i in range(len(s)):\n p1 =s1[i]\n p2 =s2[i]\n ang1 = (atan2(p1[1],p1[0]) % pi2)*360.0/pi2\n ang2 = (atan2(p2[1],p2[0]) % pi2)*360.0/pi2\n\n if params:\n u1 = 0\n u2 = 0\n if end1 <= 360.0 or ang1 >= start1 or \\\n ( end1 > 360.0 and ang1 > end1-360.0):\n u1 = (ang1-start1)/(end1-start1)\n if sr1:\n u1 = 1.0-u1\n elif end1 > 360.0:\n u1 = (ang1+360.0-start1)/(end1-start1)\n if sr1:\n u1 = 1.0-u1\n uparam1 = uparam1 + [ u1 ]\n \n if end2 <= 360.0 or ang2 >= start2 or \\\n ( end2 > 360.0 and ang2 > end1-360.0):\n u2 = (ang2-start2)/(end2-start2)\n if sr2:\n u2 = 1.0-u2\n elif end2 > 360.0:\n u2 = (ang2+360.0-start2)/(end2-start2)\n if sr2:\n u2 = 1.0-u2\n uparam2 = uparam2 + [ u2]\n \n else:\n good = False\n ## check angle against first arc\n if end1 <= 360.0 and ang1 >= start1 and ang1 <= end1:\n good = True\n elif end1 > 360.0 and (ang1 >= start1 or ang1<= end1-360.0):\n good = True\n\n ## check angle against second arc\n if end2 <= 360.0 and ang2 >= start2 and ang2 <= end2:\n good = good and True\n elif end2 > 360.0 and (ang2 >= start2 or ang2<= end2-360.0):\n good = good and True\n else:\n good = False\n\n ## only add instersection to the list if both checks were passed\n if good:\n ss = ss + [ s[i] ]\n \n if not params and len(ss) == 0:\n return False\n else:\n if params:\n return [uparam1,uparam2]\n else:\n return ss",
"def arcArcIntersectXY(c1,c2,inside=True,params=False):\n \n for c in [c1,c2]:\n if len(c) == 3:\n norm = c[2]\n if dist(norm,vect(0,0,1)) > epsilon:\n raise ValueError('arc passed to lineArcIntersectXY does not lie in x-y plane')\n if not isXYPlanar([c1[0],c2[0]]):\n raise ValueError('arcs passed to arcArcIntersectXY do not lie in same x-y plane')\n return _arcArcIntersectXY(c1,c2,inside,params)",
"def intersect(self, *args, **kwargs): # real signature unknown\n pass",
"def intersection(self, other): # -> BaseGeometry:\n ...",
"def intersection(x, y, f, p):",
"def test_intersect_line_in_one_point(start, end):\n circle = ConstructionCircle((0, 0), 1.0)\n assert len(circle.intersect_line(ConstructionLine(start, end))) == 1",
"def _intersection(line_points_0, line_points_1):\n u,v = line_points_0,line_points_1\n (A,B),(C,D) = line_points_0,line_points_1\n h1 = _homogenous_line(A,B)\n h2 = _homogenous_line(C,D)\n P = _intersection_homogenous(h1, h2)\n return P",
"def test_is_on_intersection():\n center = Coordinates(1, 1)\n radius = 10\n\n i = Intersection(center, radius, 20)\n\n in_circle = Coordinates(2, 2)\n not_in_circle = Coordinates(100, 150)\n before_circumference = Coordinates(1, 10.9)\n on_circumference = Coordinates(1, 11)\n after_circumference = Coordinates(1, 11.1)\n\n assert is_on_intersection(i, in_circle)\n assert is_on_intersection(i, on_circumference)\n assert is_on_intersection(i, before_circumference)\n assert not is_on_intersection(i, not_in_circle)\n assert not is_on_intersection(i, after_circumference)",
"def intersection(self, other):\n \n self_corners = self.corners\n\n other_corners = get_2d_false_corners(other)\n\n #shell()\n\n return planar_intersection_polygon(self_corners,other_corners)",
"def intersection(self, other):\n a, b = min(self.start, self.finish), max(self.start, self.finish)\n c, d = min(other.start, other.finish), max(other.start, other.finish)\n a1 = normalize(a, 0, TWO_PI)\n a, b = a1, b + a1 - a\n c1 = normalize(c, 0, TWO_PI)\n c, d = c1, d + c1 - c\n e, f = max(a, c), min(b, d)\n if f >= e:\n return AngleRange(e, f)\n else:\n return None # no overlap",
"def _lineArcIntersectXY(l,c,inside=True,params=False):\n x=c[0]\n r=c[1][0]\n mpr=mpm.mpf(r)\n \n # is the arc a full circle?\n circle = False\n if c[1][1] == 0 and c[1][2] == 360:\n circle = True\n \n start=c[1][1] % 360.0\n end=c[1][2] %360.0\n\n ## what is the shortest distance between the line and the center\n ## of the arc? If that is greater than r, then there is no\n ## intersection\n dst = linePointXYDist(l,x,inside and not params)\n if dst > r+epsilon:\n return False\n\n ## start by treating the arc as a circle. At this point we know\n ## we have one or two intersections within the line segment,\n ## though perhaps none within the arc segment, which we will test\n ## for later\n \n ## transform points so arc is located at the origin\n p0=sub(l[0],x)\n p1=sub(l[1],x)\n \n ## solve for b in: | b*p0 + (1-b)*p1 | = r\n ## let V= p0-p1, P=p1\n ## | b*V + P |^2 = r^2\n ## b^2(Vx^2 + Vy^2) + 2b(VxPx+VyPy) + Px^2 + Py^2 - r^2 = 0\n ## let a = Vx^2 + Vy^2,\n ## b = 2*(VxPx + VyPy)\n ## cc = Px^2 + Py^2 - r^2\n ## b0 = ( -b + sqrt(b^2 - 4ac) )/ 2a\n ## b1 = ( -b - sqrt(b^2 - 4ac) )/ 2a\n \n V = sub(p0,p1)\n P = p1\n #a = V[0]*V[0]+V[1]*V[1]\n mpV0 = mpm.mpf(V[0])\n mpV1 = mpm.mpf(V[1])\n mpP0 = mpm.mpf(P[0])\n mpP1 = mpm.mpf(P[1])\n a = mpV0*mpV0+mpV1*mpV1\n mpepsilon = mpm.mpf(epsilon)\n if mpm.fabs(a) < mpepsilon*mpepsilon:\n print('degenerate line in lineArcIntersectXY')\n raise ValueError('bad!')\n return False\n # b = 2*(V[0]*P[0]+V[1]*P[1])\n b = 2*(mpV0*mpP0+mpV1*mpP1)\n #cc = P[0]*P[0]+P[1]*P[1]-r*r\n cc = mpP0*mpP0+mpP1*mpP1-mpr*mpr\n d = b*b-4*a*cc\n ## Check to see if we are within epsilon, scaled by the length of the line\n if mpm.fabs(d) < mpm.sqrt(a)*2*mpepsilon: # one point of intersection\n b0 = -b/(2*a)\n b1 = False\n elif d < 0:\n print(\"value of d: \",d,\" value of sqrt(a)*epsilon\",sqrt(a)*epsilon)\n raise ValueError(\"imaginary solution to circle line intersection -- shouldn't happen here\")\n else: # two points of intersection\n b0 = (-b + mpm.sqrt(d))/(2*a)\n b1 = (-b - mpm.sqrt(d))/(2*a)\n\n # use computed parameters to calculate solutions, still in\n # circle-at-origin coordinates\n s = [ add(scale3(V,float(b0)),p1) ]\n if b1:\n s = s + [ add(scale3(V,float(b1)),p1) ]\n\n if not inside or circle or params: # transform back into world\n # coordinates\n pp = list(map(lambda q: add(q,x),s))\n if params:\n uu1 = []\n uu2 = []\n for i in range(len(pp)):\n uu1 = uu1 + [ unsampleline(l,pp[i]) ]\n uu2 = uu2 + [ unsamplearc(c,pp[i]) ]\n return [uu1, uu2]\n else:\n return pp\n\n ## see if any of the intersections we've found lie between\n ## start and end of the arc\n \n ss = []\n for i in s:\n ang = (atan2(i[1],i[0]) % pi2)*360.0/pi2\n\n if end > start and ang >= start and ang <= end:\n ss = ss + [ add(x,i) ]\n elif end < start and (ang >= start or ang<= end):\n ss = ss + [ add(x,i) ]\n\n if len(ss) == 0:\n return False\n return ss",
"def intersects(*args):\r\n if len(args) == 2:\r\n p0, p1, p2, p3 = *args[0], *args[1]\r\n elif len(args) == 4:\r\n p0, p1, p2, p3 = args\r\n else:\r\n raise AttributeError(\"Pass 2, 2-pnt lines or 4 points to the function\")\r\n #\r\n # ---- First check ---- np.cross(p1-p0, p3-p2 )\r\n p0_x, p0_y, p1_x, p1_y, p2_x, p2_y, p3_x, p3_y = *p0, *p1, *p2, *p3\r\n s10_x = p1_x - p0_x\r\n s10_y = p1_y - p0_y\r\n s32_x = p3_x - p2_x\r\n s32_y = p3_y - p2_y\r\n denom = s10_x * s32_y - s32_x * s10_y\r\n if denom == 0.0:\r\n return False\r\n #\r\n # ---- Second check ---- np.cross(p1-p0, p0-p2 )\r\n den_gt0 = denom > 0\r\n s02_x = p0_x - p2_x\r\n s02_y = p0_y - p2_y\r\n s_numer = s10_x * s02_y - s10_y * s02_x\r\n if (s_numer < 0) == den_gt0:\r\n return False\r\n #\r\n # ---- Third check ---- np.cross(p3-p2, p0-p2)\r\n t_numer = s32_x * s02_y - s32_y * s02_x\r\n if (t_numer < 0) == den_gt0:\r\n return False\r\n #\r\n if ((s_numer > denom) == den_gt0) or ((t_numer > denom) == den_gt0):\r\n return False\r\n #\r\n # ---- check to see if the intersection point is one of the input points\r\n t = t_numer / denom\r\n # substitute p0 in the equation\r\n x = p0_x + (t * s10_x)\r\n y = p0_y + (t * s10_y)\r\n # be careful that you are comparing tuples to tuples, lists to lists\r\n if sum([(x, y) == tuple(i) for i in [p0, p1, p2, p3]]) > 0:\r\n return False\r\n return True",
"def intersect(self, rays):\n raise NotImplementedError",
"def lineArcIntersectXY(l,c,inside=True,params=False):\n \n if len(c) == 3:\n norm = c[2]\n if dist(norm,vect(0,0,1)) > epsilon:\n raise ValueError('arc passed to lineArcIntersectXY does not lie in x-y plane')\n points = l + [ c[0] ]\n if not isXYPlanar(points):\n raise ValueError('line and circle passed to lineArcIntersectXY do not all lie in same x-y plane')\n return _lineArcIntersectXY(l,c,inside,params)",
"def intersection(self, other):\n ### Original\n from pyresample.spherical_geometry import intersection_polygon\n # MLS This was failing if all the corners of the \n # area_definition fell inside the data box definition.\n # I think __contains__ in spherical_geometry is\n # not working properly? This seems to work, should\n # watch for false positives ?\n # This DOES NOT WORK for over the pole...\n allselfcornersin = False\n allothercornersin = False\n retcorners = intersection_polygon(self.corners, other.corners)\n if not retcorners:\n # Only try these if intersection_polygon didn't return anything.\n for i in self.corners:\n if planar_point_inside(i,other.corners):\n allselfcornersin = True\n else:\n allselfcornersin = False\n for i in other.corners:\n if planar_point_inside(i,self.corners):\n allothercornersin = True\n else:\n allothercornersin = False\n\n if allselfcornersin:\n return self.corners\n if allothercornersin: \n return other.corners\n return retcorners\n \n ### End Original\n #from .spherical import SphPolygon\n #log.info('RUNNING SPHERICAL in intersection')\n #shell()\n #sphpoly = SphPolygon(self.corners)\n #return sphpoly.intersection(SphPolygon(other.corners))",
"def intersection(self, other):\n log.info('self: '+str(self)+' other: '+str(other))\n if self == other:\n # Used to be return True, that is definitely not right (expects Coordinate)\n # Do we want start or end ? Does it matter? Lines are the same, everything is\n # an intersection.\n return self.start\n # If any of the start/end points match, return that point.\n if self.end==other.start or self.end == other.end:\n return self.end \n if self.start==other.start or self.start == other.end: \n return self.start\n\n # Line equation: y = mx + b\n # m = (y2-y1)/(x2-x1)\n # B_self = y - M_self*x\n # Pick any x/y on the line - try end point\n # B_self = self.end.lat - M_self*self.end.lon\n # B_other = other.end.lat - M_self*self.end.lon\n from pyresample.spherical_geometry import Coordinate\n\n selfendlon = self.end.lon\n selfstartlon = self.start.lon\n otherendlon = other.end.lon\n otherstartlon = other.start.lon\n # Not sure if this is necessary, or good...\n# if self.end.lon < 0:\n# selfendlon = self.end.lon + 2*math.pi\n# if self.start.lon < 0:\n# selfstartlon = self.start.lon + 2*math.pi\n# if other.end.lon < 0:\n# otherendlon = other.end.lon + 2*math.pi\n# if other.start.lon < 0:\n# otherstartlon = other.start.lon + 2*math.pi\n\n log.info(' self lons: '+str(math.degrees(selfstartlon))+' '+str(math.degrees(selfendlon))+' other lons: '+str(math.degrees(otherstartlon))+' '+str(math.degrees(otherendlon)))\n\n # If both vertical, will be no intersection\n if abs(selfendlon - selfstartlon) < EPSILON and abs(otherendlon - otherstartlon) < EPSILON:\n log.info(' Both vertical, no intersection')\n return None\n # If self is vertical, but not parallel, intersection will be selfstartlon and lat = Mother*lon+B_other\n if abs(selfendlon - selfstartlon) < EPSILON:\n lon = selfstartlon\n M_other = (other.end.lat - other.start.lat)/(otherendlon-otherstartlon)\n B_other = other.end.lat - M_other*otherendlon\n lat = M_other*lon+B_other\n log.info(' self is vertical')\n #Make sure it falls within the segment and not outside.\n # Previously was only checking lat, need to \n # also check lon or opposite side of world would match\n if (lat > min([self.end.lat,self.start.lat]) and \n lat < max([self.end.lat,self.start.lat]) and\n lon > min([otherendlon,otherstartlon]) and\n lon < max([otherendlon,otherstartlon])):\n log.info(' and intersects')\n # Apparently Coordinate takes degrees ??? And must be -180 to 180 ?!\n # MLS use wrap_longitudes?\n if lon > math.pi:\n lon -= 2*math.pi\n return Coordinate(math.degrees(lon),math.degrees(lat))\n else:\n return None\n # same for other\n if abs(otherendlon - otherstartlon) < EPSILON:\n lon = otherstartlon\n M_self = (self.end.lat - self.start.lat)/(selfendlon-selfstartlon)\n B_self = self.end.lat - M_self*selfendlon\n lat = M_self*lon+B_self\n log.info(' other is vertical')\n #Make sure it falls within the segment and not outside.\n # Previously was only checking lat, need to \n # also check lon or opposite side of world would match\n if (lat > min([other.end.lat,other.start.lat]) and \n lat < max([other.end.lat,other.start.lat]) and \n lon > min([selfendlon,selfstartlon]) and\n lon < max([selfendlon,selfstartlon])):\n log.info(' and intersects')\n # Apparently Coordinate takes degrees ??? And must be -180 to 180 ?!\n # MLS Use wrap_longitudes?\n if lon > math.pi:\n lon -= 2*math.pi\n return Coordinate(math.degrees(lon),math.degrees(lat))\n else:\n return None\n\n \n\n # Get slopes of the lines \n M_self = (self.end.lat - self.start.lat)/(selfendlon-selfstartlon)\n M_other = (other.end.lat - other.start.lat)/(otherendlon-otherstartlon)\n \n # If they are parallel, no intersection\n if (M_self-M_other) < EPSILON:\n log.info(' self and other are parallel, no intersection')\n return None\n\n # Get the y-intercepts of the lines \n B_self = self.end.lat - M_self*selfendlon\n B_other = other.end.lat - M_other*otherendlon\n\n # Solve the equation\n # y=m1x+b1 and y=m2x+b2, equate y's so m1x+b1=m2x+b2, x = (b1-b2)/(m2-m1)\n # equate x's so x=(y-b1)/m1=(y-b2)/m2, y = (b1m2-b2m1)/(m2-m1)\n lon = (B_self - B_other)/(M_other - M_self)\n lat = (B_self*M_other - B_other*M_self)/(M_other-M_self)\n\n # Make sure lat/lon intersects within the line segment, and not outside.\n if (lat > min([other.end.lat,other.start.lat]) and \n lat < max([other.end.lat,other.start.lat]) and\n lon > min([otherendlon,otherstartlon]) and \n lon < max([otherendlon,otherstartlon]) and\n lat > min([self.end.lat,self.start.lat]) and \n lat < max([self.end.lat,self.start.lat]) and\n lon > min([selfendlon,selfstartlon]) and \n lon < max([selfendlon,selfstartlon])):\n log.info(' self and other intersect within segment')\n # Apparently Coordinate takes degrees ??? And must be -180 to 180 ?!\n # MLS use wrap longitudes?\n if lon > math.pi:\n lon -= 2*math.pi\n return Coordinate(math.degrees(lon),math.degrees(lat))\n else:\n log.info(' self and other intersect, but not within segment')\n return None",
"def test_does_intersect() -> None:\n line_1 = Line(k=1, n=0)\n line_2 = Line(k=2.5, n=1)\n line_3 = Line(k=2.5, n=3)\n\n assert line_1.does_intersect(line_1) == True\n assert line_1.does_intersect(line_2) == True\n assert line_2.does_intersect(line_3) == False",
"def intersection(s1, s2):\n \"*** YOUR CODE HERE ***\"\n return s1.intersection(s2) # ...",
"def circ_intersect(v0, v1, r0, r1):\n dist = pt_dist(v0, v1) #calculate distance between\n if dist > (r0 + r1): return [] #out of range\n if dist < abs(r0 - r1): return [] #circle contained\n if dist == 0: return [] #same origin\n \n a = (r0**2 - r1**2 + dist**2) / (2*dist)\n b = dist - a\n h = math.sqrt(r0**2 - a**2)\n \n v2x = v0[0] + a*(v1[0] - v0[0])/dist\n v2y = v0[1] + a*(v1[1] - v0[1])/dist\n \n x3p = v2x + h*(v1[1] - v0[1])/dist\n y3p = v2y - h*(v1[0] - v0[0])/dist\n x3n = v2x - h*(v1[1] - v0[1])/dist\n y3n = v2y + h*(v1[0] - v0[0])/dist\n \n return np.array([[x3p, y3p,0.], [x3n, y3n,0.]])",
"def intersection_with(self, other):\n i = self.line_intersection_with(other)\n if i is None:\n return None# parallel lines\n\n if self.contains(i) and other.contains(i) and not (i in self.endpoints and i in other.endpoints):\n return i\n return None",
"def intersects(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads\r\n return False",
"def test_intersect_line_in_no_point(start, end):\n circle = ConstructionCircle((0, 0), 1.0)\n assert len(circle.intersect_line(ConstructionLine(start, end))) == 0",
"def segmentsIntersect(self, other, allowProjInt = False):\n \n \"\"\"\n If we are not allowing projected intersection and the bounding boxes\n do not intersect then return -3, None.\n \"\"\"\n if(not(allowProjInt) and not(self.doBoundingBoxesIntersect(other))): return -3, None #return if bounding boxes do not intersect\n \"\"\" A special case for colinear lines. \"\"\" \n if(self.areColinear(other)):\n \"\"\"\n First place all four endpoint into a set. This will elliminate shared\n end points. Next, convert the set back into a list so it can\n finally be sorted.\n \"\"\"\n pointList = sorted(list(set([self.start, self.end, other.start, other.end])), key=self.calcT) \n if len(pointList) == 3:\n \"\"\"\n if there are only three points in the list then return 2, the\n middle point in the list since it is the shared point of the\n two lines.\n \"\"\"\n return 2, pointList[1] #if they are colinear and two ends have the same point return that point\n elif len(pointList) == 2:\n \"\"\" If the two lines have the same endpoints. \"\"\"\n return 2.5, self.getMidPoint()\n else:\n \"\"\"\n If the length was not three then we know it is length 4 in which case\n we turn the two middle points into a line and return 3, the line's\n midpoint.\n \"\"\"\n tempLine = Line(pointList[1], pointList[2])\n return 3, tempLine.getMidPoint() #If they are colinear return half way inbetween middle two points\n \"\"\"\n To calculate the intersection of two points we put the lines into the\n form P+tr and Q+us where P and Q are the starting points of the lines\n r and s are vectors form the starting point to the end point, and\n t and u are scalars. Set the two equations equal to each other and \n then solve for t and u. If t and u are in the range [0-1] then the\n intersection point lines on the lines, else it is a projected point.\n \"\"\"\n r = np.subtract(self.end.get2DPoint(), self.start.get2DPoint())\n s = np.subtract(other.end.get2DPoint(), other.start.get2DPoint())\n Q_Less_P = np.subtract(other.start.get2DPoint(), self.start.get2DPoint())\n denom = np.cross(r, s)*1.0\n t = np.cross(Q_Less_P, s)/denom\n u = np.cross(Q_Less_P, r)/denom \n point = p.Point(self.start.x + r[c.X]*t, self.start.y+r[c.Y]*t) \n #If t or u are not in the range 0-1 then the intersection is projected\n if(t > 1 or u > 1 or t < 0 or u < 0):\n \"\"\"\n Due to floating point problems sometimes if t or u is outside the 0-1\n range we end up inside this if statement but are actually at the end\n of one of the lines. I can't figure out how to properly add in a tolerance\n so we are taking the four end points putting them into a list,\n then comparing them to the calculated point. The Point module is\n properly handling tolerances so if the point == any of the end\n points then we should not return a projected point.\n \"\"\"\n if not any(point == lineEnd for lineEnd in (self.start, self.end,\n other.start, other.end)):\n return -1, point #return for projected intersection of non-colinear lines\n return 1, point #lines intersect at given point",
"def intersection(self, axis2):",
"def intersect(*args, caching: bool=True, firstSurface: bool=True, nodeState: Union[int, bool]=0,\n tolerance: Union[float, bool]=0.01, constructionHistory: bool=True,\n curveOnSurface: bool=True, name: AnyStr=\"\", object: bool=True, q=True, query=True,\n e=True, edit=True, **kwargs)->Union[List[AnyStr], Any]:\n pass",
"def intersect(self, other):\n if isinstance(other, Arc):\n return other.intersect(self)\n elif not isinstance(other, LineSegment):\n raise TypeError(other)\n S = (self.p2 - self.p1).scale(1.)\n T = (other.p2 - other.p1).scale(1.)\n denom = S.y * T.x - S.x * T.y\n if nearly_zero(denom):\n if nearly_zero(S.cross(other.p1 - self.p1)):\n q1 = (other.p1 - self.p1) * S / (S * S)\n q2 = (other.p2 - self.p1) * S / (S * S)\n if q2 < q1:\n q1, q2 = q2, q1\n left, right = max(0, q1), min(1, q2)\n if left < right:\n return LineSegment(self.p1 + left * S, self.p1 + right * S)\n return None\n a = (T.x * (other.p1.y - self.p1.y) - T.y * (other.p1.x - self.p1.x)) / denom\n b = (S.x * (other.p1.y - self.p1.y) - S.y * (other.p1.x - self.p1.x)) / denom\n if 0 <= a <= 1 and 0 <= b <= 1:\n return self.p1 + a * S\n # else return None because we don't intersect",
"def intersection(self, other):\n return _binary_geo(arctern.ST_Intersection, self, other)",
"def intersects(a0, a1, b0, b1):\n # First line is vertical\n if a0[0] == a1[0]:\n # Both lines are vertical\n if b0[0] == b1[0]:\n return (a0[0] == b0[0]) and (in_range(b0[1], a0[1], a1[1]) or in_range(b1[1], a0[1], a1[1]))\n eqn = get_eqn(b0, b1)\n y = apply_eqn(eqn, a0[0])\n return in_range(y, a0[1], a1[1])\n # Only second line is vertical\n if b0[0] == b1[0]:\n eqn = get_eqn(a0, a1)\n y = apply_eqn(eqn, b0[0])\n return in_range(y, b0[1], b1[1])\n # Parallel lines\n eqn0 = get_eqn(a0, a1)\n eqn1 = get_eqn(b0, b1)\n if eqn0[0] == eqn1[0]:\n if eqn0[1] != eqn1[1]:\n return False\n return in_range(a0[0], b0[0], b1[0]) or in_range(a1[0], b0[0], b1[0])\n # Get intersection\n i = intersection(eqn0, eqn1)\n # Check if intersection is between end points\n return in_range(i[0], a0[0], a1[0]) and in_range(i[0], b0[0], b1[0]) and in_range(i[1], a0[1], a1[1]) and in_range(i[1], b0[1], b1[1])",
"def from_endpoints(cls, p1, p2, radius):\n # radius > 0 means we go clockwise from p1 to p2, radius < 0 means we go counter-clockwise\n x = p2 - p1\n if radius**2 < 0.25 * x.dot(x):\n raise Exception(\"radius is too small for this arc, make it bigger\")\n w = (radius**2 - 0.25 * x.dot(x)) ** .5\n wn = Vector(x.y, -x.x).normalize()\n if radius * wn.cross(x) < 0:\n wn = -wn\n center = p1 + 0.5 * x + w * wn\n\n def get_angle(p):\n return math.atan2(p.y, p.x)\n a, b = get_angle(p1-center), get_angle(p2-center)\n if radius < 0:\n while b < a:\n b += TWO_PI\n else:\n while a < b:\n a += TWO_PI\n return cls(center, abs(radius), a, b)",
"def _intersect(edge1, edge2):\n # consecutive edges connexions are not intersections\n if edge1.end == edge2.start or edge2.end == edge1.start:\n return False\n\n # test for existence of an intersect point\n lsign = rsign = 0.0\n lsign = _isLeft(edge1.start, edge1.end, edge2.start) # edge2 start point sign\n rsign = _isLeft(edge1.start, edge1.end, edge2.end) # edge2 end point sign\n if (lsign * rsign > 0): # edge2 endpoints have same sign relative to edge1\n return False # => on same side => no intersect is possible\n lsign = _isLeft(edge2.start, edge2.end, edge1.start) # edge1 start point sign\n rsign = _isLeft(edge2.start, edge2.end, edge1.end) # edge1 end point sign\n if (lsign * rsign > 0): # edge1 endpoints have same sign relative to edge2\n return False # => on same side => no intersect is possible\n # the segments edge1 and edge2 straddle each other\n return True # => an intersect exists"
] | [
"0.6679789",
"0.65497285",
"0.65160197",
"0.64776295",
"0.64027035",
"0.63585263",
"0.6221283",
"0.6192833",
"0.6165679",
"0.6152619",
"0.60648733",
"0.6053784",
"0.60018986",
"0.60005635",
"0.59932536",
"0.5963659",
"0.59488946",
"0.5945772",
"0.59424376",
"0.5927644",
"0.5926168",
"0.59261286",
"0.59252757",
"0.5919272",
"0.590484",
"0.5863473",
"0.5851771",
"0.5833756",
"0.58203226",
"0.5792958"
] | 0.6790867 | 0 |
Create a user context based on a cluster's stored Keystone trust. | def make_cluster_context(cluster, show_deleted=False):
context = RequestContext(user_name=cluster.trustee_username,
password=cluster.trustee_password,
trust_id=cluster.trust_id,
show_deleted=show_deleted,
user_domain_id=CONF.trust.trustee_domain_id,
user_domain_name=CONF.trust.trustee_domain_name)
return context | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _make_context():\n return {'app': app, 'db': db, 'User': User}",
"def _make_context():\n return {'app': app, 'db': db, 'User': User}",
"def _make_context():\n return {'app': app,\n 'db': db,\n 'User': User\n }",
"def _make_context():\n\n return {\n 'app': app,\n 'db': db,\n 'User': User\n }",
"def make_shell_context():\n return {'User': User}",
"async def _create_context(self) -> ssl.SSLContext:\n context = utils.server_context_modern()\n\n await self.cloud.run_executor(\n context.load_cert_chain,\n self._acme.path_fullchain,\n self._acme.path_private_key,\n )\n\n return context",
"def create_user(self):\n # TODO-ROB: This is used ONLY when the user registers in flask\n # TODO-ROB: Create the cookiecutter.json file\n # extra_context overrides user and default configs\n cookiecutter(self.user_cookie, no_input=True, extra_context={\"user_name\": self.user}, output_dir=self.users)",
"def new_user(testapp):\n SessionFactory = testapp.app.registry[\"dbsession_factory\"]\n with transaction.manager:\n dbsession = get_tm_session(SessionFactory, transaction.manager)\n new_user = User(username=\"test\", password=pwd_context.hash(\"test\"))\n dbsession.add(new_user)",
"def create_keystone_v3_user(self, **kwargs):\n LOG_OBJ.debug(\"Creating the user.\")\n print self.project_info\n\n _url = \"http://\" + self.host_ip + \":35357/v3/users\"\n _headers = {'x-auth-token': self.cloud_admin_info[\"token_domain\"],\n 'content-type': 'application/json'}\n\n _project_info = {\"user\": {}}\n for argument in [\"name\", \"description\", \"domain_id\",\n \"default_project_id\", \"password\",\n \"enable\", \"disable\"]:\n try:\n _project_info['user'].update(\n {argument: kwargs[argument]})\n except KeyError:\n pass\n _body = json.dumps(_project_info)\n response = self.request(\"POST\", _url, _headers, _body)\n\n if response is None:\n LOG_OBJ.error(\"No response from Server while creating user\")\n print (\"No response from Server while creating user\")\n return response\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\" Creating user Failed with status %s \"\n \"and error : %s\" % (response.status, response.data))\n print (\" Creating user Failed with status %s and error : %s\" %\n (response.status, response.data))\n return response.status\n\n output = json.loads(response.data)\n LOG_OBJ.info(\"User details : %s \" % output)\n print (\"User details : %s \" % output)\n return output['user']['id']",
"def setupBaseSecurityContext(self):\n\n resp = gss.initSecContext(self.service_name,\n flags=self.flags,\n mech_type=self.mech_type,\n ttl=self.ttl)\n\n (self.ctx, _, _, self.token, self.last_ttl, _) = resp\n return self.token",
"def test_store_saves_creds_trust(self):\n cfg.CONF.set_override('deferred_auth_method', 'trusts')\n\n self.m.StubOutWithMock(keystone.KeystoneClientPlugin, '_create')\n keystone.KeystoneClientPlugin._create().AndReturn(\n fakes.FakeKeystoneClient(user_id='auser123'))\n self.m.ReplayAll()\n\n self.stack = stack.Stack(self.ctx, 'creds_stack', self.tmpl)\n self.stack.store()\n\n # The store should've created a user_creds row and set user_creds_id\n db_stack = stack_object.Stack.get_by_id(self.ctx, self.stack.id)\n user_creds_id = db_stack.user_creds_id\n self.assertIsNotNone(user_creds_id)\n\n # should've stored the trust_id and trustor_user_id returned from\n # FakeKeystoneClient.create_trust_context, username/password should\n # not have been stored\n user_creds = ucreds_object.UserCreds.get_by_id(user_creds_id)\n self.assertIsNone(user_creds.get('username'))\n self.assertIsNone(user_creds.get('password'))\n self.assertEqual('atrust', user_creds.get('trust_id'))\n self.assertEqual('auser123', user_creds.get('trustor_user_id'))\n\n # Check the stored_context is as expected\n expected_context = context.RequestContext(\n trust_id='atrust', trustor_user_id='auser123',\n request_id=self.ctx.request_id, is_admin=False).to_dict()\n stored_context = self.stack.stored_context().to_dict()\n self.assertEqual(expected_context, stored_context)\n\n # Store again, ID should not change\n self.stack.store()\n self.assertEqual(user_creds_id, db_stack.user_creds_id)",
"def init_context(self) -> None:\n self.context = AKSManagedClusterContext(\n self.cmd, AKSManagedClusterParamDict(self.__raw_parameters), self.models, DecoratorMode.CREATE\n )",
"def create_user(user_name, password, tenant_name, auth_admin_url, admin_token):\n keystone = get_client(auth_admin_url, admin_token)\n tenants = keystone.tenants.list()\n my_tenant = [x for x in tenants if x.name==tenant_name][0]\n my_user = keystone.users.create(name=user_name, password=password, tenant_id=my_tenant.id)\n print my_user\n return my_user.to_dict()",
"def create_user_credentials(storage_type, storage_id, space_name, client_ip,\n user_details):\n user_id = user_details[\"id\"]\n if user_id == \"0\":\n return PosixCredentials(0, 0)\n\n uid = gid = gen_storage_id(user_id)\n return PosixCredentials(uid, gid)",
"def _make_context():\n return {'User': User, 'CreditCard': CreditCard, 'Transaction': Transaction, 'db': db, 'jsonify':jsonify}",
"def _create_user(userid, **kw):\n\n new_user = User(userid, **kw)\n USERS[new_user.token] = new_user\n return USERS[new_user.token]",
"def user(db):\n user = UserFactory(password='myprecious')\n db.session.commit()\n return user",
"def createUser(self):\n if self.user:\n return self.user\n from soc.models.user import User\n from soc.modules.seeder.logic.providers.user import CurrentUserProvider\n properties = {'account': CurrentUserProvider(),\n 'status': 'valid', 'is_developer': self.dev_test}\n self.user = seeder_logic.seed(User, properties=properties)\n return self.user",
"def get_user_cred(self):\n if Config.eap_outer == 'PEAP' or Config.eap_outer == 'TTLS':\n self.__get_credentials_from_config()",
"def user(db):\n user = UserFactory(password='myPrecious')\n db.session.commit()\n return user",
"def _make_context():\n return {\n 'api': application.mounts['/api'],\n 'db': db,\n 'User': User,\n 'admin': application.mounts['/admin']\n }",
"def create(self):\n data = {\"language\": self.language, \"clusterId\": self.cluster_id}\n response = self.post(self.url, \"1.2\", \"contexts/create\", data=data, token=self.token)\n self.id = response.get(\"id\", None)\n if self.id is None:\n raise DatabricksApiException(403, 4, \"Context ID missing\")",
"def make_shell_context():\n\n return dict(app=app, db=db, User=User)",
"def make_shell_context():\n\n return dict(app=app, db=db, User=User)",
"def get_synthetic_context(self, args):\n #create_data = CreateData(args.num_of_users, args.num_of_arms, args.dim)\n mean = [0.2, 0.9, 0.5, 3, 1.1, 0.9, 2, 2.5, 1.6, 1.8] * int(self.dims / 10)\n var = [3, 2, 4, 3, 3.5, 5.5, 5, 3.5, 5, 3.5] * int(self.dims / 10)\n context_gen = self.data(mean, var)\n # normalize\n ctx_norm = np.max(np.sqrt(np.sum(context_gen * context_gen, 2)), 1)\n for idx in range(self.users):\n context_gen[idx] = context_gen[idx] / ctx_norm[idx]\n self.contexts = context_gen",
"def get_context(self, extra_ctx=None, **kwargs):\n ctx = {\n 'user': self.user,\n }\n if extra_ctx:\n ctx.update(extra_ctx)\n ctx.update(kwargs)\n return ctx",
"def setup(self,context,result):\n try:\n return_code, stdout, stderr= runProgram([context.gsec_path,\n \"-user\", context.user_name,\n \"-password\", context.user_password,\n \"-add\", self.user_name,\n \"-pw\", self.user_password],[])\n except:\n result.note_exception(cause=\"Resource setup: Can't add user.\")\n result[\"user_name\"] = self.user_name\n return\n else:\n if return_code != 0:\n self.fail_and_annotate_streams(result,Result.ERROR,'GSEC','Add new user',\n stdout,stderr)\n return\n else:\n self.do_cleanup = True",
"def with_user(data_builder, randstr, as_public):\n api_key = randstr()\n user = data_builder.create_user(api_key=api_key, root=False)\n session = copy.deepcopy(as_public)\n session.headers.update({'Authorization': 'scitran-user ' + api_key})\n return attrdict.AttrDict(user=user, api_key=api_key, session=session)",
"def make_context(self, engine, args):\n args = self.normalize_args(args)\n _, ctx = self._make_argkey_and_context(engine, args)\n return ctx",
"def template_context(**kwrds):\n usr = User.get_user()\n\n default = {\n 'usr': usr\n }\n default.update(kwrds)\n return default"
] | [
"0.5965407",
"0.59498507",
"0.58217144",
"0.5815646",
"0.5801412",
"0.5688743",
"0.5554952",
"0.5467238",
"0.54271215",
"0.5412256",
"0.5404341",
"0.53961015",
"0.5374897",
"0.53746927",
"0.5358552",
"0.53158754",
"0.5300448",
"0.5287158",
"0.52825874",
"0.52765644",
"0.5264227",
"0.5255281",
"0.5236425",
"0.5236425",
"0.52119374",
"0.5208027",
"0.5193905",
"0.5179475",
"0.5172075",
"0.5171007"
] | 0.63946825 | 0 |
Download the POJO for the leader model in AutoML to the directory specified by path. If path is an empty string, then dump the output to screen. | def download_pojo(self, path="", get_genmodel_jar=False, genmodel_name=""):
return h2o.download_pojo(self.leader, path, get_jar=get_genmodel_jar, jar_name=genmodel_name) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def download_mojo(self, path=\".\", get_genmodel_jar=False, genmodel_name=\"\"):\n return ModelBase.download_mojo(self.leader, path, get_genmodel_jar, genmodel_name)",
"def download_model(key, output_path):\n print(\"Looking fro model in {}\".format(output_path))\n if os.path.exists(output_path):\n print('File at: {} already exists'.format(output_path))\n else:\n download_file_from_s3(key, output_path)",
"def dump(self, path):\n torch.save(self,path)",
"def download_model_from_url(\n url: str = typer.Argument(..., help='The link to a model'),\n path: Path = typer.Argument(..., file_okay=True, help='The saved path and file name.')\n):\n\n from modelci.hub.registrar import download_model_from_url\n\n download_model_from_url(url, path)\n typer.echo(f'{path} model downloaded successfully.')",
"def save(self, path):\n print('Saving model... %s' % path)\n torch.save(self, path)",
"def save(self, path):\n print('Saving model... %s' % path)\n torch.save(self, path)",
"def save(self, path):\n print('Saving model... %s' % path)\n torch.save(self, path)",
"def save(self, path):\n print('Saving model... %s' % path)\n torch.save(self, path)",
"def save(self, path):\n print('Saving model... %s' % path)\n torch.save(self, path)",
"def download(self):\n if not os.path.exists(self.pkg_dir):\n os.makedirs(self.pkg_dir)\n\n url = self.metadata_pkg[\"url\"]\n\n # Download modelpkg only if not already downloaded.\n if os.path.exists(self.file_path):\n self.is_downloaded = True\n else:\n print(f\"Fetching {os.path.basename(self.file_path)} model package from {url} to {self.file_path}\", flush=True)\n r = requests.get(url, stream=True)\n with open(self.file_path, \"wb\") as file_out:\n for chunk in r.iter_content(chunk_size=2048):\n file_out.write(chunk)\n r.close()\n self.is_downloaded = True",
"def download_model():\n logging.info(\"[genreml] Downloading model...\")\n with urllib.request.urlopen(config.FMAModelConfig.FMA_MODEL_URL) as f:\n data = f.read()\n open(config.FMAModelConfig.FMA_MODEL_PATH, 'wb').write(data)\n logging.info(\"[genreml] Model download complete\")",
"def download_trainer(self):\n # Connect to bucket on Cloud Storage\n trainer_bucket = self.storage_client.get_bucket(\"trainer\")\n blob = trainer_bucket.blob(\"trainer.yml\")\n blob.download_to_filename(\"trainer.yml\")\n print(\"Trainer.yml downloaded\")",
"def save(self, output_path):\n with open(output_path, \"wb\") as file:\n dill.dump(self, file)",
"def save(self, path):\n torch.save(self, path)",
"def save(self, path):\n torch.save(self, path)",
"def save(self, path: str):\n torch.save(self, path)",
"def save(self, path=None):\n if path is None:\n path = os.path.join(logger.get_dir(), \"model.pkl\")\n\n with tempfile.TemporaryDirectory() as td:\n save_state(os.path.join(td, \"model\"))\n arc_name = os.path.join(td, \"packed.zip\")\n with zipfile.ZipFile(arc_name, 'w') as zipf:\n for root, dirs, files in os.walk(td):\n for fname in files:\n file_path = os.path.join(root, fname)\n if file_path != arc_name:\n zipf.write(file_path, os.path.relpath(file_path, td))\n with open(arc_name, \"rb\") as f:\n model_data = f.read()\n with open(path, \"wb\") as f:\n cloudpickle.dump((model_data, self._act_params), f)",
"def save(self, path):\n with tempfile.TemporaryDirectory() as td:\n U.save_state(os.path.join(td, \"model\"))\n arc_name = os.path.join(td, \"packed.zip\")\n with zipfile.ZipFile(arc_name, 'w') as zipf:\n for root, dirs, files in os.walk(td):\n for fname in files:\n file_path = os.path.join(root, fname)\n if file_path != arc_name:\n zipf.write(file_path, os.path.relpath(file_path, td))\n with open(arc_name, \"rb\") as f:\n model_data = f.read()\n with open(path, \"wb\") as f:\n dill.dump((model_data, self._act_params), f)",
"def download_model(\n self, model_name: str, save_path: Union[str, Path] = \"./zoo/model.pth\"\n ):\n # check if the model name is valide\n assert model_name in list(\n self.meta.name\n ), f\"requested model {model_name} does not exist\"\n # TODO: fix, using pathlib\n # check if save_path already has the model\n save_dir = os.path.dirname(save_path)\n if not os.path.exists(save_dir):\n os.mkdir(save_dir)\n else:\n assert not os.path.exists(\n save_path\n ), f\"the save_path {save_path} is already used\"\n\n # fetch the model file\n model_id = self.meta[self.meta[\"name\"] == model_name][\"models\"].iloc[0]\n model_path = model_id.split(\"/\")\n self.pkg[model_path[0]][model_path[1]].fetch(save_path)",
"def set_model_output(self, path):\n\n file = f'model_R{str(self.time_span).replace(\".\", \"_\")} ({str(self.date_time).replace(\":\",\"_\")}).csv'\n self.model_output_file = path_inc(path, file)",
"def download(self, download_path):\n return",
"def run(output, path):\n\n # Derive path to dbfile\n dbfile = os.path.join(path, \"articles.sqlite\")\n\n # Stream text from database to file\n Export.stream(dbfile, output)",
"def export(output, model_path, run_id, mlflow_home):\n mlflow.azureml.export(output=output, model_path=model_path, run_id=run_id,\n mlflow_home=os.path.abspath(mlflow_home) if mlflow_home else None)",
"def load_model(self, path):\n pass",
"def maybe_download():\n\n print(\"Downloading Inception 5h Model ...\")\n download.maybe_download_and_extract(url=data_url, download_dir=data_dir)",
"def load(self, path):\n load_model(path, self)",
"def save(self, output_path):\r\n self.graph.cleanup().toposort()\r\n model = gs.export_onnx(self.graph)\r\n output_path = os.path.realpath(output_path)\r\n os.makedirs(os.path.dirname(output_path), exist_ok=True)\r\n onnx.save(model, output_path)\r\n log.info(\"Saved ONNX model to {}\".format(output_path))",
"def download(path):\n\treturn send_from_directory(\"results\", path, as_attachment=True)",
"def download(self, session):\n target_path = self.get_target_full_dir()\n os.chdir(target_path)\n schema_get = session.get(self.get_full_url(), verify=False)\n target_name = self.get_target_name()\n logger.debug('Starting download of file {} to {}.'.format(target_name.upper(), target_path))\n with open(os.path.join(target_path, target_name), \"wb\") as code:\n code.write(schema_get.content)\n logger.info('{} file has been downloaded successfully.'.format(target_name.upper()))",
"def download_model(name: str) -> str:\n model_name, model_type, model_url = ModelInfo.get_model_info(name)\n model_path = _create_dirs(model_name)\n if model_type == \"single\":\n model_path = _download_file(model_url, model_path)\n elif model_type == \"zip\":\n model_path = _download_zip_model(model_url, model_path)\n else:\n print(f\"model type {model_type} not yet implemented\")\n model_path = \"\"\n return model_path"
] | [
"0.6805856",
"0.5677127",
"0.5564327",
"0.5526138",
"0.54357004",
"0.54357004",
"0.54357004",
"0.54357004",
"0.54357004",
"0.54064924",
"0.5355542",
"0.53408563",
"0.52581507",
"0.52444184",
"0.52444184",
"0.52443415",
"0.52355677",
"0.5219559",
"0.5150729",
"0.514913",
"0.5146409",
"0.5140911",
"0.51289773",
"0.51134306",
"0.51033664",
"0.50876725",
"0.50850654",
"0.5075989",
"0.5052592",
"0.50245285"
] | 0.6896179 | 0 |
Retrieve a string indicating the project_name of the automl instance to retrieve. | def project_name(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def project_name(self) -> typing.Optional[str]:\n return self._values.get(\"project_name\")",
"def project_name(self) -> typing.Optional[str]:\n return self._values.get(\"project_name\")",
"def project_name(self) -> typing.Optional[str]:\n return self._values.get(\"project_name\")",
"def project_name(self) -> typing.Optional[str]:\n return self._values.get(\"project_name\")",
"def project_name(self) -> typing.Optional[str]:\n return self._values.get(\"project_name\")",
"def project_name(self) -> typing.Optional[str]:\n return self._values.get(\"project_name\")",
"def project(self) -> str:\n return pulumi.get(self, \"project\")",
"def project(self) -> str:\n return pulumi.get(self, \"project\")",
"def project(self) -> str:\n return pulumi.get(self, \"project\")",
"def project(self) -> str:\n return pulumi.get(self, \"project\")",
"def project(self) -> str:\n return pulumi.get(self, \"project\")",
"def project(self) -> str:\n return pulumi.get(self, \"project\")",
"def get_project_name(self):\n return self.line_edit.text()",
"def getProjectName():",
"def _project_name(self):\n name = getattr(self._req.req, 'project_name', '')\n if name:\n return name\n raise ValueError('Requirement has no project_name.')",
"def full_name(self):\n if not self.project_id:\n raise ValueError('Missing project ID.')\n return 'projects/%s' % (self.project_id)",
"def getProjectName(self, projectId: int) -> str:\n query = f\"SELECT name FROM projects WHERE id = {projectId}\"\n result = sql.executeAndReadQuery(self.connection, query)\n return result[0][0]",
"def __str__(self):\n return self.project_name",
"def __str__(self):\n return self.project_name",
"def get_project_name(self, project_id):\n return self.project_names.get(project_id)",
"def _get_project_name(self, context, project_id):\n return project_id",
"def log_project(self) -> str:\n return pulumi.get(self, \"log_project\")",
"def get_project_name(self):\n remote = self.get_gitlab_remote()\n return self.get_project_name_from_url(remote.url)",
"def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")",
"def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")",
"def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")",
"def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")",
"def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")",
"def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")",
"def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")"
] | [
"0.75766367",
"0.75766367",
"0.75766367",
"0.75766367",
"0.75766367",
"0.75766367",
"0.75074005",
"0.75074005",
"0.75074005",
"0.75074005",
"0.75074005",
"0.75074005",
"0.74999297",
"0.74356097",
"0.74130285",
"0.73136413",
"0.7302438",
"0.72723585",
"0.72723585",
"0.72267807",
"0.71930367",
"0.71386063",
"0.7107365",
"0.7097773",
"0.7097773",
"0.7097773",
"0.7097773",
"0.7097773",
"0.7097773",
"0.7097773"
] | 0.7629592 | 0 |
Retrieve the leaderboard from an H2OAutoML object | def leaderboard(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getLeaderboard(self, **kwargs):\n board = []\n scores = sorted(self._players, key=lambda score: score.dct_net['total'])\n pos = 1\n prev_total = None\n for sc in scores:\n score_dct = {\n 'player': sc.doc,\n 'total' : sc.dct_net['total'],\n }\n if prev_total != None and score_dct['total'] > prev_total:\n pos += 1\n prev_total = score_dct['total']\n score_dct['pos'] = pos\n for n,net in enumerate(sc.dct_net['holes']):\n if net == None:\n break\n else:\n n += 1\n score_dct['thru'] = n\n score_dct['line'] = '{:<3} {:<6} {:>5} {:>4}'.format(\n score_dct['pos'], score_dct['player'].nick_name, score_dct['total'], score_dct['thru'])\n board.append(score_dct)\n self.dctLeaderboard['leaderboard'] = board\n return self.dctLeaderboard",
"def leaderboard(self):\n if not self._lb:\n self._update_lb()\n return self._lb",
"def leaderboard(self):\n\n url = API_PATH[\"leaderboard\"].format(region_url=self.region_url)\n\n response = requests.get(url, headers=self.headers)\n\n return response.json()",
"def leaderboard():\n # Get leaderboard and user information\n leaderboard, current_user_info = gdb.getleaderboard(current_user.userID)\n # Get top gainer leaderboards\n weektopgainers, monthtopgainers = gdb.gettopgainers()\n # Render template\n return render_template('leaderboard.html',\n leaderboard=leaderboard,\n current_user_info=current_user_info,\n weektopgainers=weektopgainers,\n monthtopgainers=monthtopgainers,\n userbalance=current_user.balance)",
"def get_leaderboard(n, user_id):\r\n sql = text('''\r\n WITH global_rank AS (\r\n WITH scores AS (\r\n SELECT user_id, COUNT(*) AS score FROM task_run\r\n WHERE user_id IS NOT NULL GROUP BY user_id)\r\n SELECT user_id, score, rank() OVER (ORDER BY score desc)\r\n FROM scores)\r\n SELECT rank, id, name, fullname, email_addr, info, score FROM global_rank\r\n JOIN public.\"user\" on (user_id=public.\"user\".id) ORDER BY rank\r\n LIMIT :limit;\r\n ''')\r\n\r\n results = db.engine.execute(sql, limit=n)\r\n\r\n top_users = []\r\n user_in_top = False\r\n for row in results:\r\n if (row.id == user_id):\r\n user_in_top = True\r\n user=dict(\r\n rank=row.rank,\r\n id=row.id,\r\n name=row.name,\r\n fullname=row.fullname,\r\n email_addr=row.email_addr,\r\n info=dict(json.loads(row.info)),\r\n score=row.score)\r\n top_users.append(user)\r\n if (user_id != 'anonymous'):\r\n if not user_in_top:\r\n sql = text('''\r\n WITH global_rank AS (\r\n WITH scores AS (\r\n SELECT user_id, COUNT(*) AS score FROM task_run\r\n WHERE user_id IS NOT NULL GROUP BY user_id)\r\n SELECT user_id, score, rank() OVER (ORDER BY score desc)\r\n FROM scores)\r\n SELECT rank, id, name, fullname, email_addr, info, score FROM global_rank\r\n JOIN public.\"user\" on (user_id=public.\"user\".id)\r\n WHERE user_id=:user_id ORDER BY rank;\r\n ''')\r\n user_rank = db.engine.execute(sql, user_id=user_id)\r\n u = User.query.get(user_id)\r\n # Load by default user data with no rank\r\n user=dict(\r\n rank=-1,\r\n id=u.id,\r\n name=u.name,\r\n fullname=u.fullname,\r\n email_addr=u.email_addr,\r\n info=u.info,\r\n score=-1)\r\n for row in user_rank: # pragma: no cover\r\n user=dict(\r\n rank=row.rank,\r\n id=row.id,\r\n name=row.name,\r\n fullname=row.fullname,\r\n email_addr=row.email_addr,\r\n info=dict(json.loads(row.info)),\r\n score=row.score)\r\n top_users.append(user)\r\n\r\n return top_users",
"def get_leaderboard(request):\n\n includedUsers = User.objects.filter(hide_leaderboard=False, is_staff=False)\n\n # ordered list of points, index denoting leaderboard position (rank)\n # distinct values means that everyone with the same points has the same rank\n rankings = []\n for item in includedUsers.values(\"points\").distinct().order_by(\"-points\"):\n rankings.append(item[\"points\"])\n\n includedUsers = includedUsers.order_by(\"-points\")\n\n paginationData = []\n for user in includedUsers:\n # rank is the index of the users points +1 (converting from 0-indexing)\n data = {\"user\": user, \"rank\": rankings.index(user.points) + 1}\n paginationData.append(data)\n\n return JsonResponse(\n json_paginator(request, paginationData, lb_serializer),\n status=200,\n )",
"async def from_url(cls) -> \"AocGlobalLeaderboard\":\n aoc_url = f\"https://adventofcode.com/{AocConfig.year}/leaderboard\"\n\n async with aiohttp.ClientSession(headers=AOC_REQUEST_HEADER) as session:\n async with session.get(aoc_url) as resp:\n if resp.status == 200:\n raw_html = await resp.text()\n else:\n log.warning(f\"Bad response received from AoC ({resp.status}), check session cookie\")\n resp.raise_for_status()\n\n soup = BeautifulSoup(raw_html, \"html.parser\")\n ele = soup.find_all(\"div\", class_=\"leaderboard-entry\")\n\n exp = r\"(?:[ ]{,2}(\\d+)\\))?[ ]+(\\d+)\\s+([\\w\\(\\)\\#\\@\\-\\d ]+)\"\n\n lb_list = []\n for entry in ele:\n # Strip off the AoC++ decorator\n raw_str = entry.text.replace(\"(AoC++)\", \"\").rstrip()\n\n # Use a regex to extract the info from the string to unify formatting\n # Group 1: Rank\n # Group 2: Global Score\n # Group 3: Member string\n r = re.match(exp, raw_str)\n\n rank = int(r.group(1)) if r.group(1) else None\n global_score = int(r.group(2))\n\n member = r.group(3)\n if member.lower().startswith(\"(anonymous\"):\n # Normalize anonymous user string by stripping () and title casing\n member = re.sub(r\"[\\(\\)]\", \"\", member).title()\n\n lb_list.append((rank, global_score, member))\n\n return cls(lb_list)",
"async def leaderboard(self, ctx, arg1: T = None, arg2: T = None):\n\n (channel, member) = self.resolve_arguments(arg1, arg2, types=get_args(T))\n\n await ctx.trigger_typing()\n\n member = member if member else ctx.author\n channel_id = channel.id if channel else None\n bot_ids = [bot.id for bot in filter(lambda user: user.bot, ctx.guild.members)]\n\n await self.bot.db.leaderboard.preselect(ctx.guild.id, bot_ids, channel_id)\n top10 = await self.bot.db.leaderboard.get_top10()\n around = await self.bot.db.leaderboard.get_around(member.id)\n\n embed = await self.display_leaderboard(ctx, top10, around, member)\n await ctx.send(embed=embed)",
"async def leaderboard(self, ctx: commands.Context):\r\n async with ctx.typing():\r\n user_info_unsorted = {}\r\n user_info_sorted = {}\r\n async with self.bot.database() as db:\r\n user_info_rows = await db(\"\"\"SELECT * FROM user_balance\"\"\")\r\n for user_info in user_info_rows:\r\n user_info_unsorted[user_info['balance']] = user_info['user_id']\r\n user_info_unsorted_items = user_info_unsorted.items()\r\n user_id_sorted = sorted(user_info_unsorted_items, reverse=True)\r\n page = 0\r\n place = 0\r\n set = 0\r\n fields = []\r\n field_info = []\r\n for user_id_sorted_single in user_id_sorted:\r\n user = await self.bot.fetch_user(user_id_sorted_single[1])\r\n place += 1\r\n field_info.append(f\"#{place:,}. {user.name} ({user_id_sorted_single[0]:,})\")\r\n set += 1\r\n if set == 10:\r\n page += 1\r\n fields.append((f\"Page {page:,}\", \"\\n\".join(field_info)))\r\n field_info = []\r\n set = 0\r\n if set != 0:\r\n page += 1\r\n fields.append((f\"Page {page:,}\", \"\\n\".join(field_info)))\r\n field_info = []\r\n set = 0\r\n return await utils.paginate(ctx, fields, ctx.author.id, \"Global Balance Leaderboard\")",
"async def message_leaderboard(self, ctx, boardType):\n\n\t\tglobal embeds\n\t\tguild = ctx.message.guild\n\n\t\tif boardType == \"quotes\":\n\t\t\tleaderboardType = \"quoteLeaderboard\"\n\t\t\tleaderboard = self.leaderboards[str(ctx.message.guild.id)][\"quoteLeaderboard\"]\n\t\t\tleaderboardEmbed = embeds[leaderboardType]\n\t\telif boardType == \"reactions\":\n\t\t\tleaderboardType = \"reactionLeaderboard\"\n\t\t\tleaderboard = self.leaderboards[str(ctx.message.guild.id)][\"reactionLeaderboard\"]\n\t\t\tleaderboardEmbed = embeds[leaderboardType]\n\t\telif boardType == \"emojis\":\n\t\t\tleaderboardType = \"emojiLeaderboard\"\n\t\t\tleaderboard = self.leaderboards[str(ctx.message.guild.id)][\"emojiLeaderboard\"]\n\t\t\tleaderboardEmbed = embeds[leaderboardType]\n\t\telse:\n\t\t\tleaderboardType = \"messageLeaderboard\"\n\t\t\tleaderboard = self.leaderboards[str(ctx.message.guild.id)][\"messageLeaderboard\"]\n\t\t\tleaderboardEmbed = embeds[leaderboardType]\n\n\t\tleaderboardEmbed.clear_fields()\n\n\t\tleaderboard = {k: v for k, v in sorted(leaderboard.items(), key=lambda a: a[1], reverse=True)}\n\n\t\tpastScore = 0\n\t\toffset = 0\n\t\tposition = 0\n\t\tuserValues = \"\"\n\n\t\tfor participant in leaderboard:\n\t\t\tscore = leaderboard[participant]\n\n\t\t\tif score == pastScore:\n\t\t\t\toffset += 1\n\t\t\telse:\n\t\t\t\tposition += offset + 1\n\t\t\t\toffset = 0\n\t\t\t\tpastScore = score\n\n\t\t\tif leaderboardType == \"reactionLeaderboard\":\n\t\t\t\tname = str(participant)\n\t\t\telif leaderboardType == \"emojiLeaderboard\":\n\t\t\t\tfor emoji in guild.emojis:\n\t\t\t\t\tif int(participant) == emoji.id:\n\t\t\t\t\t\tname = \"<:\" + emoji.name + \":\" + str(emoji.id) + \">\"\n\t\t\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tif int(participant) == 456226577798135808:\n\t\t\t\t\t# Skip deleted users\n\t\t\t\t\tTrue\n\t\t\t\telif guild.get_member(int(participant)) is None:\n\t\t\t\t\tname = str(await self.bot.fetch_user(int(participant)))\n\t\t\t\telse:\n\t\t\t\t\tname = str(guild.get_member(int(participant)).display_name)\n\n\t\t\tuserValues += \"**\" + str(position) + \". \" + name + \"** - \" + str(score) + \"\\n\\n\\t\"\n\n\t\tif userValues == \"\":\n\t\t\tuserValues = \"None\"\n\n\t\tleaderboardEmbed.add_field(name=\"User\", value=\"\".join(userValues.split(\"\\t\")[0:10]), inline=True)\n\n\t\tmessage = await ctx.send(embed=leaderboardEmbed)\n\t\tself.cachedMessages[message.id] = {\"type\": leaderboardType, \"page\": 1}\n\t\tawait message.add_reaction(\"⬅️\")\n\t\tawait message.add_reaction(\"➡️\")",
"def leader(self):\n pass",
"def leader(self):\n pass",
"def get_overall_leaderboard(cursor, lang='all-langs') -> List[LeaderboardEntry]:\n # sqlite doesn't support PostgreSQL's DISTINCT ON.\n query = '''\n WITH augmented_solutions AS (\n SELECT hole,\n user,\n strokes,\n submitted,\n ROW_NUMBER() OVER (PARTITION BY hole, user\n ORDER BY strokes, submitted) hole_user_ordinal\n FROM solutions\n WHERE ? IN ('all-langs', lang)\n ), leaderboard AS (\n SELECT hole,\n user,\n strokes,\n submitted\n FROM augmented_solutions\n WHERE hole_user_ordinal = 1\n ), scored_leaderboard AS (\n SELECT hole,\n ROUND(\n (COUNT(*) OVER (PARTITION BY hole) -\n RANK() OVER (PARTITION BY hole ORDER BY strokes) + 1)\n * (1000.0 / COUNT(*) OVER (PARTITION BY hole))\n ) points,\n strokes,\n submitted,\n user\n FROM leaderboard\n ), summed_leaderboard AS (\n SELECT user,\n COUNT(*) holes,\n SUM(points) points,\n SUM(strokes) strokes,\n MAX(submitted) submitted\n FROM scored_leaderboard\n GROUP BY user\n ) SELECT user,\n '' lang,\n points,\n RANK() OVER (ORDER BY points DESC, strokes),\n holes,\n strokes,\n submitted\n FROM summed_leaderboard\n ORDER BY points DESC, strokes, submitted'''\n return [LeaderboardEntry(*item) for item in cursor.execute(query, [lang])]",
"def leaderboard():\n \n global score_dictinary\n data = []\n fields = []\n scores = []\n names = []\n users = []\n i=0\n \n #Reads the winners from a mongo database \n read_mongo(scores, names)\n \n #Sorts the list in descending order\n quicksort(scores, names, 0, len(scores) - 1)\n \n #Joins the names and scores arrays together\n while i < len(scores):\n users.append(names[i] + \" \" + scores[i])\n i += 1\n \n users = (reversed(users))\n \n return render_template(\"leaderboard.html\", users=users)",
"def get_leaderboard(cursor, hole='all-holes', lang='all-langs') -> List[LeaderboardEntry]:\n if hole == 'all-holes':\n return get_overall_leaderboard(cursor, lang)\n query = '''\n WITH leaderboard AS (\n SELECT hole,\n submitted,\n strokes,\n user,\n lang\n FROM solutions\n WHERE hole = ?\n AND ? IN ('all-langs', lang)\n ), scored_leaderboard AS (\n SELECT hole,\n ROUND(\n (COUNT(*) OVER (PARTITION BY hole) -\n RANK() OVER (PARTITION BY hole ORDER BY strokes) + 1)\n * (1000.0 / COUNT(*) OVER (PARTITION BY hole))\n ) points,\n strokes,\n submitted,\n user,\n lang\n FROM leaderboard\n ) SELECT user,\n lang,\n points,\n RANK() OVER (ORDER BY points DESC, strokes),\n 1 holes,\n strokes,\n submitted\n FROM scored_leaderboard\n ORDER BY points DESC, strokes, submitted'''\n return [LeaderboardEntry(*item) for item in cursor.execute(query, [hole, lang])]",
"def leaderboard_timeline_API():\n keyName = \"leaderboard-key\"\n\n leadership_data= redis_obj.zrevrange('add_like',0,-1,withscores=True)\n list_of_users=[]\n if redis_obj.get(keyName):\n print(\"** Messages from leaderboard Redis Cache **\")\n key = pickle.loads(redis_obj.get(keyName))\n user_profile = jsonify(Message=\"Success! leaderboard details.\",\n leaderboard=key, Status_code=status.HTTP_200_OK)\n return user_profile\n else:\n for like in leadership_data:\n message_id=like[0]\n user_record=mongo.db.message.find_one({\"_id\":ObjectId(message_id)})\n tmp =user_record['email']\n list_of_users.append({'username':user_record['username'],'email':user_record['email'],'pub_date':user_record['pub_date'],'text':user_record['text'],'score':int(like[1])})\n\n\n print(\"** Messages from leaderboard DB hit **\")\n redis_obj.set(keyName, cPickle.dumps(list_of_users))\n redis_obj.expire(keyName, 60)\n\n\n user_profile = jsonify(Message=\"Success! leaderboard details.\",\n leaderboard=list_of_users, Status_code=status.HTTP_200_OK)\n\n return user_profile",
"def retrieve(self, request, pk=None):\n team_leader = self.get_team_leader_object(pk)\n serializer = data_serializers.TeamLeaderPresenterSerializer(team_leader)\n return Response(serializer.data, status=status.HTTP_201_CREATED)",
"def print_leaderboard(self):\n \n leaderboard = pandas.DataFrame(self.history_score.items(), columns=[\"Name\", \"Score\"])\n leaderboard.index += 1\n \n print(leaderboard)",
"def take_leader(self):",
"def leaderboard(request, when):\n limit = _clean_int(request.GET.get('limit'), 300, 1, 1000)\n data = leaderboard_impl(when, limit)\n if data is None:\n return HttpResponseNotFound()\n tops = []\n shame = []\n for i in data:\n if i.score == models.AccountStatsBase.NULL_SCORE:\n shame.append(i)\n else:\n tops.append(i)\n return respond(\n request, 'leaderboard.html', {'tops': tops, 'shame': shame, 'when': when})",
"async def get_one_leaderboard(self, variant: 'VariantTypes', limit: int = 10) -> 'Response':\n headers = {\n 'Content-Type': 'application/json',\n 'Accept': 'application/vnd.lichess.v3+json'\n }\n response = await self._client.request(method=RequestMethods.GET,\n url=USERS_PLAYER_TOP_URL.format(nb=limit, perfType=variant),\n headers=headers)\n return response",
"def get_leader(model, application_name):\n command = ['juju', 'run', '--format=yaml',\n '--model', model,\n '--application', application_name,\n 'is-leader']\n results = yaml.load(subprocess.check_output(command))\n for unit in results:\n if 'True' in unit['Stdout'].strip():\n return unit['UnitID']",
"def leaderboard(request):\r\n\tMEDIA_URL = '/media/'\r\n\tgames = Game.objects.all()\r\n\tuser_high_scores = []\r\n\tgame_high_scores = []\r\n\tnew = {}\r\n\t# Get global scores\r\n\tfor game in games:\r\n\t\tgame_intermediate_high = Score.objects.filter(game = game.id).order_by('-current_score').values('game__name', 'player__user__username', 'current_score')[:1]\r\n\t\tif (game_intermediate_high.count() > 0):\r\n\t\t\tgame_high_scores.append(game_intermediate_high)\r\n\t# Check if user is authenticated and get user's scores\r\n\tif (request.user.is_authenticated):\r\n\t\tfor game in games:\r\n\t\t\t\tuser_intermediate_high = Score.objects.filter(game=game.id, player = request.user.profile).order_by('-current_score').values('player__user__username','game__name', 'current_score').distinct()[:1]\r\n\t\t\t\tif (user_intermediate_high.count() > 0):\r\n\t\t\t\t\tuser_high_scores.append(user_intermediate_high)\r\n\r\n\treturn render(request, 'leaderboard.html',{'MEDIA_URL' : MEDIA_URL,'games': games, 'user_high_scores': user_high_scores, 'game_high_scores': game_high_scores})",
"async def leaderboard(self, ctx):\n settings = config.load_settings()\n if settings['guilds'][str(ctx.guild.id)][\"leveling\"] is True:\n guild = ctx.guild.id\n xp = config.load_xp()\n scores = {}\n if str(guild) in xp['guilds']:\n for user in xp['guilds'][str(guild)]:\n scores.update({ctx.guild.get_member(int(user)).display_name: xp['guilds'][str(guild)][user]['xp']})\n sorted_scores = collections.OrderedDict(sorted(scores.items(), key=lambda x: x[1], reverse=True))\n message = discord.Embed(title='Leaderboard', description=ctx.guild.name + \"'s most active users\")\n current_field = 1\n field_limit = 25\n for index, (key, value) in enumerate(sorted_scores.items()):\n if current_field <= field_limit:\n message.add_field(name=str(index+1) + \": \" + key,\n value=\"with: \" + str(value) + \" xp\",\n inline=False)\n current_field += 1\n else:\n break\n await ctx.send('', embed=message)\n else:\n await ctx.send(\"leveling is currently disabled on this server!\")",
"async def leaderboard_handler(\n self, ctx: Context, title: str, thumb_url: str,\n padding: int, pb=False, brawler_name=None\n ):\n\n all_users = await self.config.all_users()\n users = []\n for user_id in all_users:\n try:\n user = self.bot.get_user(user_id)\n if not user:\n continue\n trophies = await self.get_trophies(\n user, pb=pb, brawler_name=brawler_name)\n users.append((user, trophies))\n except Exception:\n pass\n\n # remove duplicates\n users = list(set(users))\n users = sorted(users, key=lambda k: k[1], reverse=True)\n\n embed_desc = (\n \"Check out who is at the top of the Brawlcord leaderboard!\\n\\u200b\"\n )\n add_user = True\n # return first 10 (or fewer) members\n for i in range(10):\n try:\n trophies = users[i][1]\n user = users[i][0]\n if brawler_name:\n emoji = await self.get_rank_emoji(user, brawler_name)\n else:\n _, emoji = await self.get_league_data(trophies)\n if user.id == ctx.author.id:\n embed_desc += (\n f\"**\\n`{(i+1):02d}.` {user} {emoji}\"\n f\"{trophies:>{padding},}**\"\n )\n add_user = False\n else:\n embed_desc += (\n f\"\\n`{(i+1):02d}.` {user} {emoji}\"\n f\"{trophies:>{padding},}\"\n )\n except Exception:\n pass\n\n embed = discord.Embed(color=EMBED_COLOR, description=embed_desc)\n embed.set_author(name=title, icon_url=ctx.me.avatar_url)\n embed.set_thumbnail(url=thumb_url)\n\n # add rank of user\n if add_user:\n for idx, user in enumerate(users):\n if ctx.author == user[0]:\n val_str = \"\"\n try:\n trophies = users[idx][1]\n user = users[idx][0]\n if brawler_name:\n emoji = await self.get_rank_emoji(\n user, brawler_name)\n else:\n _, emoji = await self.get_league_data(trophies)\n val_str += (\n f\"\\n**`{(idx+1):02d}.` {user} {emoji}\"\n f\"{trophies:>{padding},}**\"\n )\n except Exception:\n pass\n try:\n embed.add_field(name=\"Your position\", value=val_str)\n except UnboundLocalError:\n # happens only in case of brawlers\n embed.add_field(name=f\"\\u200bNo one owns {brawler_name}!\",\n value=\"Open boxes to unlock new Brawlers.\")\n except Exception:\n pass\n\n try:\n await ctx.send(embed=embed)\n except discord.Forbidden:\n return await ctx.send(\n \"I do not have the permission to embed a link.\"\n \" Please give/ask someone to give me that permission.\"\n )",
"def read_leaderboard(self):\n \n with open(\"Leaderboard.csv\", 'r') as in_file:\n reader = csv.reader(in_file)\n history_score = {rows[0]:float(rows[1]) for rows in reader}\n \n return history_score",
"def get_leaderboards():\n leaderboards = select(\n (workout.user_id, sum(workout.repetitions), sum(workout.weight)) for workout in UserExerciseData)\n\n return [{'username': _get_username(l[0]), 'reps': l[1], 'weights': l[2]} for l in leaderboards]",
"async def from_url(cls) -> \"AocPrivateLeaderboard\":\n api_json = await cls.json_from_url()\n return cls.from_json(api_json)",
"async def post_leaderboard(\n self,\n ctx: commands.Context,\n leaderboard_type: Literal[\n \"season\",\n \"weekly\",\n \"worst\",\n \"playoffs\",\n \"playoffs_weekly\",\n \"pre-season\",\n \"pre-season_weekly\",\n ],\n ) -> None:\n leaderboard_type_str = leaderboard_type.replace(\"_\", \" \").title()\n leaderboard = await self.pickems_config.guild(ctx.guild).leaderboard()\n if leaderboard == {} or leaderboard is None:\n await ctx.send(_(\"There is no current leaderboard for this server!\"))\n return\n if leaderboard_type != \"worst\":\n leaderboard = sorted(\n leaderboard.items(), key=lambda i: i[1][leaderboard_type], reverse=True\n )\n else:\n leaderboard = sorted(\n leaderboard.items(), key=lambda i: i[1][\"total\"] - i[1][\"season\"], reverse=True\n )\n msg_list = []\n count = 1\n user_position = None\n total_str = {\n \"season\": \"total\",\n \"playoffs\": \"playoffs_total\",\n \"pre-season\": \"pre-season_total\",\n }.get(leaderboard_type, \"total\")\n\n for member_id in leaderboard:\n if str(member_id[0]) == str(ctx.author.id):\n user_position = leaderboard.index(member_id)\n member = ctx.guild.get_member(int(member_id[0]))\n if member is None:\n member_mention = _(\"User has left the server \") + member_id[0]\n else:\n member_mention = member.mention\n if leaderboard_type in [\"weekly\", \"playoffs_weekly\", \"pre-season_weekly\"]:\n points = member_id[1].get(leaderboard_type, 0)\n msg_list.append(\"#{}. {}: {}\\n\".format(count, member_mention, points))\n elif leaderboard_type in [\"season\", \"playoffs\", \"pre-season\"]:\n total = member_id[1].get(total_str, 0)\n wins = member_id[1].get(leaderboard_type, 0)\n try:\n percent = (wins / total) * 100\n except ZeroDivisionError:\n percent = 0.0\n msg_list.append(\n f\"#{count}. {member_mention}: {wins}/{total} correct ({percent:.4}%)\\n\"\n )\n else:\n total = member_id[1].get(total_str, 0)\n losses = member_id[1].get(total_str, 0) - member_id[1].get(leaderboard_type)\n try:\n percent = (losses / total) * 100\n except ZeroDivisionError:\n percent = 0.0\n msg_list.append(\n f\"#{count}. {member_mention}: {losses}/{total} incorrect ({percent:.4}%)\\n\"\n )\n count += 1\n leaderboard_list = [msg_list[i : i + 10] for i in range(0, len(msg_list), 10)]\n if user_position is not None:\n user = leaderboard[user_position][1]\n wins = user[\"season\"]\n total = user[total_str]\n losses = user[total_str] - user[\"season\"]\n position = _(\n \"{member}, you're #{number} on the {leaderboard_type} leaderboard!\\n\"\n ).format(\n member=ctx.author.display_name,\n number=user_position + 1,\n leaderboard_type=leaderboard_type_str,\n )\n if leaderboard_type == \"season\":\n percent = (wins / total) * 100\n position += _(\"You have {wins}/{total} correct ({percent:.4}%).\").format(\n wins=wins, total=total, percent=percent\n )\n elif leaderboard_type == \"worst\":\n percent = (losses / total) * 100\n position += _(\"You have {wins}/{total} incorrect ({percent:.4}%).\").format(\n wins=wins, total=total, percent=percent\n )\n await ctx.send(position)\n await BaseMenu(\n source=LeaderboardPages(pages=leaderboard_list, style=leaderboard_type_str),\n delete_message_after=False,\n clear_reactions_after=True,\n timeout=60,\n ).start(ctx=ctx)",
"def get_leaderboard(self, context_type, context_id, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async'):\n return self.get_leaderboard_with_http_info(context_type, context_id, **kwargs)\n else:\n (data) = self.get_leaderboard_with_http_info(context_type, context_id, **kwargs)\n return data"
] | [
"0.71753174",
"0.6814637",
"0.67305756",
"0.6534892",
"0.6010836",
"0.6006939",
"0.60041165",
"0.5990674",
"0.59727794",
"0.58822256",
"0.58502156",
"0.58502156",
"0.58473134",
"0.5838606",
"0.5764738",
"0.5730108",
"0.57219785",
"0.57013613",
"0.5653109",
"0.5608319",
"0.5597274",
"0.5577709",
"0.5509108",
"0.5496134",
"0.5495779",
"0.5452805",
"0.5434065",
"0.5415931",
"0.53783303",
"0.53665334"
] | 0.68511003 | 1 |
Retrieve the backend event log from an H2OAutoML object | def event_log(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getLogs():",
"def getLogs():",
"def log(self):\r\n return self._log",
"def log(self):\n resp = requests.get(\"%s/api/log\"%self.urlbase, verify=False)\n return resp.json[\"log\"]",
"def get_eventlogs_detail(self, conn, id):\n path = urlJoin(urls.EVENT_LOG[\"GET\"], id)\n resp = conn.command(apiMethod=\"GET\", apiPath=path)\n return resp",
"def GetLogs(self):\n raise NotImplementedError()",
"def getLog(self):\n \n return self.resp[\"log\"]",
"def get_one(self, id):\n rpc_ilog = objects.event_log.get_by_uuid(\n pecan.request.context, id)\n\n return EventLog.convert_with_links(rpc_ilog)",
"def getLog(self):\n return self.session.request('diag/log/')",
"def log(self):\n return self._log",
"def log(self):\n return self._log",
"def log (self):\n return self._log",
"def get_log()->dict:\n return execute_command(\"SELECT log FROM log\").fetchall()[0][0]",
"def async_describe_logbook_event(event): # type: ignore\n data = event.data\n message = \"has been triggered\"\n if ATTR_SOURCE in data:\n message = f\"{message} by {data[ATTR_SOURCE]}\"\n return {\n \"name\": data.get(ATTR_NAME),\n \"message\": message,\n \"source\": data.get(ATTR_SOURCE),\n \"entity_id\": data.get(ATTR_ENTITY_ID),\n }",
"def get_full_log(self):\n return self._get_log('full')",
"def fetchLogRecords(self):\n return self.handler.buffer",
"def getLog(self):\n return self.log",
"def getLog(self):\n return self.log",
"def get_api_event(self):\n pass",
"def get_addons_changelogs(hass):\n return hass.data.get(DATA_ADDONS_CHANGELOGS)",
"def fetchLogs(self):\n return [record.msg for record in self.handler.buffer]",
"def getLog(self):\n pass",
"def get_eventhub_info(self):\n self._create_connection()\n eh_name = self.address.path.lstrip('/')\n target = \"amqps://{}/{}\".format(self.address.hostname, eh_name)\n mgmt_client = uamqp.AMQPClient(target, auth=self.auth, debug=self.debug)\n mgmt_client.open(self.connection)\n try:\n mgmt_msg = Message(application_properties={'name': eh_name})\n response = mgmt_client.mgmt_request(\n mgmt_msg,\n constants.READ_OPERATION,\n op_type=b'com.microsoft:eventhub',\n status_code_field=b'status-code',\n description_fields=b'status-description')\n eh_info = response.get_data()\n output = {}\n if eh_info:\n output['name'] = eh_info[b'name'].decode('utf-8')\n output['type'] = eh_info[b'type'].decode('utf-8')\n output['created_at'] = datetime.datetime.fromtimestamp(float(eh_info[b'created_at'])/1000)\n output['partition_count'] = eh_info[b'partition_count']\n output['partition_ids'] = [p.decode('utf-8') for p in eh_info[b'partition_ids']]\n return output\n except:\n raise\n finally:\n mgmt_client.close()",
"def event(self):\n return self.events[0]",
"def endpoint_log(self, endpoint_name=None, since=None):\n if endpoint_name is None:\n url = '/v1.1/endpoint/log'\n else:\n url = '/v1.1/endpoints/%s/log' % endpoint_name\n if since is not None:\n url += '?since=%f' % float(since)\n _, body = self.request(url, 'GET')\n return body",
"def getLogs():\n # in flux, it may be possible to provide more structured information\n # like python Failure instances",
"def test_get_event_log(event_log_api_setup):\n api_response = event_log_api_setup.get_event_log(\n event_log_id=1,\n )\n logging.getLogger().info(\"%s\", api_response)\n print(f\"{BCOLORS.OKGREEN}OK{BCOLORS.ENDC}\")",
"def get():\n\n if not in_agent_mode():\n return responses.bad_request_resp(\n \"Episode data only recorded when in Agent mode\"\n )\n\n current_ep_file = bb_logging.EP_FILE\n if not current_ep_file:\n return responses.bad_request_resp(\"No episode being recorded\")\n\n cwd = os.getcwd()\n full_log_dir = os.path.join(cwd, bb_logging.INST_LOG_DIR)\n full_ep_file = os.path.join(cwd, current_ep_file)\n\n data = {\n \"inst_id\": bb_logging.INSTANCE_ID,\n \"cur_ep_id\": bb_logging.EP_ID,\n \"cur_ep_file\": full_ep_file,\n \"log_dir\": full_log_dir,\n }\n\n return responses.ok_resp(data)",
"def logfile(self):\n return self._get('logfile')",
"def log(self):\n if self._log is None:\n self._log = Log(client=self)\n return self._log"
] | [
"0.5895145",
"0.5895145",
"0.5818652",
"0.5775522",
"0.57516444",
"0.5684986",
"0.56431377",
"0.5545011",
"0.5513884",
"0.5493785",
"0.5493785",
"0.54921085",
"0.54801255",
"0.5439759",
"0.5415686",
"0.5415399",
"0.5406173",
"0.5406173",
"0.54029393",
"0.5401882",
"0.53920996",
"0.53872776",
"0.53264856",
"0.5324127",
"0.52861506",
"0.527889",
"0.5250783",
"0.52506757",
"0.5211256",
"0.5210067"
] | 0.63432443 | 0 |
Get best model of a given family/algorithm for a given criterion from an AutoML object. | def get_best_model(self, algorithm=None, criterion=None):
from h2o.exceptions import H2OValueError
def _get_models(leaderboard):
return [m[0] for m in
leaderboard["model_id"].as_data_frame(use_pandas=False, header=False)]
higher_is_better = ["auc", "aucpr"]
assert_is_type(algorithm, None, str)
assert_is_type(criterion, None, str)
if criterion is not None:
criterion = criterion.lower()
if "deviance" == criterion:
criterion = "mean_residual_deviance"
if algorithm is not None:
if algorithm.lower() not in ("basemodel", "deeplearning", "drf", "gbm",
"glm", "stackedensemble", "xgboost"):
raise H2OValueError("Algorithm \"{}\" is not supported!".format(algorithm))
algorithm = algorithm.lower()
extra_cols = ["algo"]
if criterion in ("training_time_ms", "predict_time_per_row_ms"):
extra_cols.append(criterion)
leaderboard = h2o.automl.get_leaderboard(self, extra_columns=extra_cols)
leaderboard = leaderboard if algorithm is None else (
leaderboard[leaderboard["algo"].tolower() == algorithm, :] if algorithm != "basemodel"
else leaderboard[leaderboard["algo"].tolower() != "stackedensemble", :])
if leaderboard.nrow == 0:
return None
if criterion is None:
return h2o.get_model(leaderboard[0, "model_id"])
if criterion not in leaderboard.columns:
raise H2OValueError("Criterion \"{}\" is not present in the leaderboard!".format(criterion))
models_in_default_order = _get_models(leaderboard)
sorted_lb = leaderboard.sort(by=criterion, ascending=criterion not in higher_is_better)
selected_models = _get_models(sorted_lb[sorted_lb[criterion] == sorted_lb[0, criterion]])
picked_model = [model for model in models_in_default_order if model in selected_models][0]
return h2o.get_model(picked_model) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_best_model(self):\n return self.best_model",
"def get_best_model(res, model_type):\n if model_type == 'classification':\n # the best classification model according to f1 metric\n best = sorted([(k, v['f1']) for k, v in res.items()], key=lambda x: -x[1])[0]\n # the best regression model according to mse metric\n elif model_type == 'regression':\n best = sorted([(k, v['mse']) for k, v in res.items()], key=lambda x: x[1])[0]\n\n return best",
"def get_best_known_model(self) -> Tuple[Optional[Path], int]:\n return self._get_first_model(sort='total_score', desc=False)",
"def test_find_best_model(self):\n parameters = dict(\n model=('spherical', 'gaussian', 'exponential', 'matern')\n )\n gs = GridSearchCV(\n VariogramEstimator(n_lags=15, normalize=False),\n parameters,\n cv=3\n )\n\n gs = gs.fit(self.c, self.v)\n\n # Python 3.6 yields 'exponential', \n # while 3.7, 3.8 yield 'gaussian' - this is so stupid\n self.assertTrue(gs.best_params_['model'] in ['gaussian', 'exponential'])",
"def get_best_model(self, d_model_info, metric='F1', delta_auc_th=0.03, verbose=False):\n # select valid models (abs(auc_train - auc_test)<0.03)\n valid_model = {}\n for key, param in d_model_info.items():\n if param['metrics']['delta_auc'] <= delta_auc_th:\n valid_model[key] = param\n\n # Best model according to selected metric\n if len(valid_model.keys()) > 0:\n best_model_idx = max(valid_model, key=lambda x: valid_model[x].get('metrics').get(metric))\n if verbose:\n print(' >', len(valid_model.keys()), ' valid models |auc(train)-auc(test)|<=' + str(delta_auc_th))\n print(' > best model : ' + str(best_model_idx))\n else:\n best_model_idx = None\n print('0 valid model')\n\n return best_model_idx, list(valid_model.keys())",
"def select_best_model(self, df):\n params = {\n # check whether unigrams give good results or bigrams.\n \"vectorizer__vectorizer\": [self.feature_name_to_class[self.feature]],\n \"vectorizer__ngram_range\": [(1,1), (1,2), (2,2)],\n # check pca parameters\n \"pca__n_components\": [30, 40, 50],\n # stemmer to use for preprocessing\n \"preprocessor__stemmer\": [self.stemmer_name_to_method[self.stemmer_method]],\n 'extractor__punctuations': [True, False]\n\n }\n # select the tunable parameters according to the model\n if self.model == MODELS_SVM:\n params.update({\n 'model__kernel': ['linear'],\n 'model__gamma': [1e-3, 1e-4],\n 'model__C': [0.5, 1, 10]\n })\n elif self.model == MODELS_RANDOM_FOREST:\n params.update({\n 'model__n_estimators': [5, 10, 15]\n })\n elif self.model == MODELS_LOGISTIC_REGRESSION:\n params.update({\n 'model__C': [1.0, 10],\n 'model__tol': [0.001, 0.01, 0.1]\n })\n clf = GridSearchCV(self.get_pipeline(), params, cv=5,\n scoring='%s_macro' % self.training_param)\n X = df.drop([\"Value\"], axis=1)\n Y = df[\"Value\"].values\n clf.fit(X, Y)\n print clf.best_params_\n # print clf.best_estimator_\n print clf.best_score_",
"def get_best_known_model(cls, model_dir) -> Tuple[Optional[Path], int]:\n return cls._get_first_model(model_dir, sort='total_score', desc=False)",
"def set_best_model(self):\n if (self.metric == 'bic'):\n self.best_gmm = self.best_gmm_bic\n elif(self.metric == 'aic'):\n self.best_gmm = self.best_gmm_aic",
"def bestModel(self, channel_type):",
"def getBestFittedModel( models, features ):\r\n\r\n\tvalidModels = []\r\n\tclusteringScores = []\r\n\tfor model in models:\r\n\t\t#Skip mono cluster models\r\n\t\tif st.getNbClusters( model ) < 2: continue\r\n\t\tvalidModels.append( model )\r\n\t\tlabels = model.labels_\r\n\t\tclusteringScore = evaluateClusters(features, labels)\r\n\t\tclusteringScores.append( clusteringScore)\r\n\tif len(clusteringScores) == 0: return False, -1\r\n\tbestScoreIndex = np.argmax(clusteringScores)\r\n\treturn validModels[bestScoreIndex], clusteringScores[bestScoreIndex]",
"def get_best_model(x_train, y_train):\n # calculate class weights\n class_weights = class_weight.compute_class_weight('balanced', np.unique(y_train),\n y_train)\n # convert to dict\n class_weights = dict(enumerate(class_weights))\n # encode label data\n y_train = to_categorical(y_train)\n\n return get_model(x_train, y_train, 256, 3, 'adamax', 'normal', class_weights)",
"def _single_model_BayesianSearchCV(self, \n model_ID,\n model_dict, \n X_train, y_train, \n X_test, y_test,\n path_model_dir,\n refit=True,\n **kwargs):\n if self.verbose>=1:\n print('Fitting',self.cv,'folds for each of',self.max_evals,'candidates, totalling',self.cv*self.max_evals,'fits')\n \n model_dict = model_dict.copy()\n model = model_dict['model']\n type_model = str(type(model))\n model_type = str(type(model_dict['model']))\n param_grid = model_dict['param_grid'].copy()\n objective = _functools.partial(self._objective, \n model_ID = model_ID,\n model_dict = model_dict, \n X = X_train, y=y_train, \n **kwargs)\n \n space = self._build_space(param_grid)\n \n if self.verbose>=4:\n self._plot_space(space)\n \n best_params_bad_keys = _hyperopt.fmin(fn = objective, \n space = space, \n algo = _hyperopt.tpe.suggest, \n max_evals = self.max_evals, \n trials = _hyperopt.Trials(),\n verbose = self.verbose)\n # hyperopt doesn't return the best params dict with keys matching the 'space' keys.\n # This breaks handling of 'log10.' transformed parameters. Fix is implemented below\n best_params_ = {}\n for key in space.keys():\n best_params_[key] = best_params_bad_keys[key.replace('log10.','')]\n if self.verbose>=3:\n print('hyperopt_input_best_params_:',best_params_)\n \n best_score_ = self._objective(best_params_, \n model_ID,\n model_dict = model_dict, \n X = X_train, y=y_train)['loss']\n \n #transform params back to original model values\n best_params_, best_model_ = self._update_model_params(best_params_, model_ID, model, param_grid)\n \n if self.verbose>=3:\n print('model_input_best_params_:',best_params_)\n \n \n if refit:\n if 'sklearn' in type_model or 'xgboost' in type_model:\n if y_train.shape[1]==1:\n y_train = _np.array(y_train).reshape(-1,)\n best_model_.fit(X_train, y_train)\n else: #using neural net function\n import tensorflow as _tf\n \n if 'dataframe' in str(type(X_train)).lower():\n X_train = _np.array(X_train)\n X_test = _np.array(X_test)\n if 'dataframe' in str(type(y_train)).lower():\n y_train = _np.array(y_train)\n y_test = _np.array(y_test)\n \n #check for kwargs\n epochs = 100\n batch_size = 32\n callbacks = [_tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience =10)]\n for item in kwargs.items():\n if 'epochs' in item[0]: \n epochs = item[1]\n elif 'batch_size' in item[0]: \n batch_size = item[1]\n elif 'callbacks' in item[0]: \n callbacks = item[1]\n \n history = best_model_.fit(x= X_train, \n y= y_train, \n validation_data=(X_test, y_test),\n batch_size=batch_size, \n epochs = epochs, \n verbose= max(0,self.verbose-2), \n callbacks = callbacks)\n \n model_dict['best_params'] = best_params_\n model_dict['best_model'] = best_model_\n model_dict['best_cv_score'] = best_score_ \n \n if 'sklearn' in model_type or 'xgboost' in model_type:\n self.save(model_dict, 'model_dict', 'dill', path_model_dir)\n else:\n if _os.path.isdir(path_model_dir)==False:\n _os.makedirs(path_model_dir)\n best_model_.save(_os.path.join(path_model_dir, 'best_model.h5')) \n self.save(model_dict['best_params'], 'best_params', 'dill', path_model_dir)\n \n return model_dict",
"def _get_best(self, populations, func):\n best = None\n for population in populations:\n for item in population:\n if not best:\n best = item\n elif func.fit(*item) > func.fit(*best):\n best = item\n return best",
"def getBestAgglomerativeModel( features, maxNbClusters=10 ):\r\n\r\n\tprint(\"Agglomerative model\")\r\n\tmodels = []\r\n\tlinkageMetrics = [\"ward\", \"complete\", \"average\", \"single\"]\r\n\tfor nbClusters in range(2, maxNbClusters+1):\r\n\t\tfor metric in linkageMetrics: models.append( st.getFittedAgglomerativeModel( features, nbClusters, metric))\r\n\tbestModel, bestScore = st.getBestFittedModel( models, features)\r\n\tif not bestModel:\r\n\t\tprint(\"Regected all models\")\r\n\t\treturn False, -1\r\n\tprint(\"Score:\", bestScore)\r\n\tprint(\"Number of clusters:\", st.getNbClusters(bestModel))\r\n\tprint(\"Linkage:\", bestModel.get_params()[\"linkage\"])\r\n\treturn bestModel, bestScore",
"def get_model(model):\n all_models = cmd.get_object_list()\n\n if len(all_models) == 0:\n logging.parser_error('No models are opened.')\n return\n\n model = model.lower()\n\n if model and (model in all_models):\n return model\n\n if len(all_models) > 1:\n logging.parser_error(\"Please specify which model you want to use. {}\".format(all_models))\n return\n\n return all_models[0]",
"def load_best_model():\r\n best_model = LSTM_load_best_model()\r\n return best_model",
"def load_best_model(self) -> Optional[nn.Module]:\n if self.save_best_model:\n return torch.load(self.best_model_path)\n return None",
"def select(self):\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n\n best_hmm_model = None\n best_hmm_model_score = float(\"inf\")\n\n for states in range(self.min_n_components, self.max_n_components + 1):\n try:\n hmm_model = self.base_model(states) \n model_score = hmm_model.score(self.X, self.lengths)\n \n parameters = (states * states) + (2 * states * self.num_features)\n score = -2 * model_score + parameters * np.log(self.num_datapoints)\n \n if score < best_hmm_model_score: \n best_hmm_model = hmm_model\n best_hmm_model_score = score\n \n except:\n if self.verbose:\n print(\"Upps! word {} states {}\".format(self.this_word, states))\n \n return best_hmm_model",
"def select(self):\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n\n bic_models = map(lambda x: self.run_bic_model(x),\n range(self.min_n_components, self.max_n_components + 1))\n valid_models = [x for x in bic_models if x is not None]\n\n if len(valid_models) > 0:\n best_model = sorted(valid_models, key=lambda x: x[1])[0]\n return best_model[0]\n else:\n return None",
"def get_model(parameters):\n if MODEL == 6:\n return get_model_6(parameters)\n elif MODEL == 5:\n return get_model_5(parameters)\n elif MODEL == 4:\n return get_model_4(parameters)\n elif MODEL == 3:\n if CROSS_VALIDATION:\n return get_cv_model_3(parameters)\n else:\n return get_model_3(parameters)\n elif MODEL == 2:\n return get_model_2(parameters)\n else:\n return get_model_1(parameters)",
"def _select_classifier_from_sk_search(estimator, X, A):\n estimator.fit(X, A)\n best_estimator = clone(estimator.best_estimator_)\n return best_estimator",
"def get_algorithm_functionality(\n model: MLTypes.ModelType = None,\n y: MLTypes.DatasetType = None,\n objective: str = None,\n ) -> AlgorithmFunctionality:\n # Check if LightGBM is being used with SciKit-Learn API:\n if objective is None:\n return super().get_algorithm_functionality(model=model, y=y)\n\n # Declare the conversion map according to the LightGBM docs:\n objective_to_algorithm_functionality_map = {\n # regression application:\n \"regression\": AlgorithmFunctionality.REGRESSION,\n \"regression_l2\": AlgorithmFunctionality.REGRESSION,\n \"l2\": AlgorithmFunctionality.REGRESSION,\n \"mean_squared_error\": AlgorithmFunctionality.REGRESSION,\n \"mse\": AlgorithmFunctionality.REGRESSION,\n \"l2_root\": AlgorithmFunctionality.REGRESSION,\n \"root_mean_squared_error\": AlgorithmFunctionality.REGRESSION,\n \"rmse\": AlgorithmFunctionality.REGRESSION,\n \"regression_l1\": AlgorithmFunctionality.REGRESSION,\n \"l1\": AlgorithmFunctionality.REGRESSION,\n \"mean_absolute_error\": AlgorithmFunctionality.REGRESSION,\n \"mae\": AlgorithmFunctionality.REGRESSION,\n \"huber\": AlgorithmFunctionality.REGRESSION,\n \"fair\": AlgorithmFunctionality.REGRESSION,\n \"poisson\": AlgorithmFunctionality.REGRESSION,\n \"quantile\": AlgorithmFunctionality.REGRESSION,\n \"mape\": AlgorithmFunctionality.REGRESSION,\n \"mean_absolute_percentage_error\": AlgorithmFunctionality.REGRESSION,\n \"gamma\": AlgorithmFunctionality.REGRESSION,\n \"tweedie\": AlgorithmFunctionality.REGRESSION,\n # binary classification application:\n \"binary\": AlgorithmFunctionality.BINARY_CLASSIFICATION,\n # multi-class classification application:\n \"multiclass\": AlgorithmFunctionality.MULTICLASS_CLASSIFICATION,\n \"softmax\": AlgorithmFunctionality.MULTICLASS_CLASSIFICATION,\n \"multiclassova\": AlgorithmFunctionality.MULTICLASS_CLASSIFICATION,\n \"multiclass_ova\": AlgorithmFunctionality.MULTICLASS_CLASSIFICATION,\n \"ova\": AlgorithmFunctionality.MULTICLASS_CLASSIFICATION,\n \"ovr\": AlgorithmFunctionality.MULTICLASS_CLASSIFICATION,\n # cross-entropy application\n \"cross_entropy\": AlgorithmFunctionality.BINARY_CLASSIFICATION,\n \"xentropy\": AlgorithmFunctionality.BINARY_CLASSIFICATION,\n \"cross_entropy_lambda\": AlgorithmFunctionality.BINARY_CLASSIFICATION,\n \"xentlambda\": AlgorithmFunctionality.BINARY_CLASSIFICATION,\n # ranking application\n \"lambdarank\": AlgorithmFunctionality.MULTICLASS_CLASSIFICATION,\n \"rank_xendcg\": AlgorithmFunctionality.MULTICLASS_CLASSIFICATION,\n \"xendcg\": AlgorithmFunctionality.MULTICLASS_CLASSIFICATION,\n \"xe_ndcg\": AlgorithmFunctionality.MULTICLASS_CLASSIFICATION,\n \"xe_ndcg_mart\": AlgorithmFunctionality.MULTICLASS_CLASSIFICATION,\n \"xendcg_mart\": AlgorithmFunctionality.MULTICLASS_CLASSIFICATION,\n }\n\n # Return unknown if the objective is not in the map and otherwise return its functionality:\n if objective not in objective_to_algorithm_functionality_map:\n raise AlgorithmFunctionality.UNKNOWN\n return objective_to_algorithm_functionality_map[objective]",
"def get_classifier(name, model, param, rand_iter=-1):\r\n assert isinstance(name, str)\r\n if param: # Do grid search only if parameter list is not empty\r\n N_p = np.prod([len(l) for l in param.values()])\r\n if (N_p <= rand_iter) or rand_iter<=0:\r\n logging.info(\"Using grid search for %s\" % name)\r\n model = GridSearchCV(model, param, cv=5, scoring=\"accuracy\",\r\n n_jobs=PROCESSORS)\r\n else:\r\n logging.info(\"Using random search for %s\" % name)\r\n model = RandomizedSearchCV(model, param, cv=5, scoring=\"accuracy\",\r\n n_jobs=PROCESSORS, n_iter=rand_iter)\r\n else:\r\n logging.info(\"Not using grid search for %s\" % name)\r\n return model",
"def obtain_best_model(optimal_weights):\n gnn = NeuralNetwork(optimal_weights)\n gnn.compile_train(5)\n\n gnn.save_accuracy_chart()\n\n gnn.model.save('spy_classifier')",
"def bestalg(self, dimfun):\n if self._bestalg is None:\n self._bestalg = bb.bestalg.generate(self.algds)\n return self._bestalg[dimfun] if dimfun is not None else self._bestalg",
"def generate_best_model(model_name: str,\n X: np.ndarray,\n y: np.ndarray) -> BaseEstimator:\n model_list = {\n \"svm\": BestSVM(),\n \"naive_bayes\": BestNaiveBayes(),\n 'lr': BestLogisticRegression()\n }\n\n model = model_list[model_name]\n return model.fit_best_model(X, y)",
"def select(self):\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n bestBicScore = float(\"+inf\")\n bestModel = None\n log_n_samples = np.log(sum(self.lengths))\n \n for n_components in range(self.min_n_components,self.max_n_components+1): \n logL = float(\"-inf\")\n bicScore = float(\"+inf\")\n hmm_model = None\n logging.info('BIC: Training word =%s with number of components=%d', self.this_word, n_components)\n \n try :\n hmm_model = GaussianHMM(n_components=n_components, covariance_type=\"diag\", \n n_iter=1000, random_state=self.random_state,verbose=False).fit(self.X, self.lengths)\n logL = hmm_model.score(self.X, self.lengths)\n # Bayesian information criteria: BIC = -2 * logL + p * logN\n # p is number of Free Parameters in the Model\n parameters = n_components * n_components + 2 * len(self.X[0]) * n_components - 1\n bicScore = -2 * logL + parameters * log_n_samples\n if bicScore < bestBicScore:\n logging.debug('BIC: found lower bic score=%f for word =%s with components=%d', bicScore, self.this_word, n_components)\n bestBicScore = bicScore\n bestModel = hmm_model\n \n except RuntimeWarning as rw:\n logging.warning('BIC: RuntimeWarning : %s', rw)\n except ValueError as ve:\n logging.warning('BIC: ValueError : %s', ve) \n \n if bestModel == None:\n return None\n \n logging.info('BIC: returning : best model with BIC score=%f for word=%s with number of components=%d', bestBicScore, self.this_word, bestModel.n_components) \n return bestModel",
"def get_model(opt, device):\n model = CifarConvNet(hidden_dim=512).to(device)\n if opt.mode == \"SVI\":\n return SVIModel(model)\n return model",
"def select(self):\n\t\tbest_num_components = self.n_constant\n\t\treturn self.base_model(best_num_components)",
"def get_sklearn_model(x):\n if is_sklearn_model(x):\n return x # already a valid model\n elif type(x) is dict:\n if hasattr(x, 'model'):\n return get_sklearn_model(x['model'])\n else:\n return None\n elif type(x) is str:\n # noinspection PyBroadException\n try:\n return get_sklearn_model(eval(x))\n except:\n pass\n return None"
] | [
"0.65354514",
"0.6436559",
"0.62489223",
"0.6159949",
"0.60301775",
"0.60104805",
"0.58534336",
"0.5849753",
"0.56639946",
"0.56572855",
"0.56554496",
"0.56159276",
"0.5615587",
"0.5601147",
"0.5595409",
"0.5568075",
"0.5552408",
"0.5526239",
"0.55151755",
"0.54995584",
"0.5483262",
"0.5481087",
"0.5475688",
"0.5466655",
"0.546424",
"0.54627126",
"0.5457559",
"0.54565954",
"0.54411834",
"0.5422267"
] | 0.7380481 | 0 |
Get a cell's bounding box coordinates | def bounding_box(self, index_or_id):
cell_index = self.grid.insure_index(index_or_id)
left = self.cell_size[0] * cell_index[1] + self.origin[0]
top = self.cell_size[1] * cell_index[0] + self.origin[1]
right = left + self.cell_size[0]
bottom = top + self.cell_size[1]
return (left, top, right, bottom) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def boundingBox(self):\n y_max = np.max(self.points[:,0])\n x_max = np.max(self.points[:,1])\n y_min = np.min(self.points[:,0])\n x_min = np.min(self.points[:,1])\n \n return ((x_max, y_max), (x_min, y_min))",
"def bounding_box(self):\n latlon00 = self.ij_to_latlon(-1,-1)\n latlon01 = self.ij_to_latlon(-1,self.domain_size[1]+1)\n latlon11 = self.ij_to_latlon(self.domain_size[0]+1,self.domain_size[1]+1)\n latlon10 = self.ij_to_latlon(self.domain_size[0]+1,-1)\n return (latlon00,latlon01,latlon11,latlon10)",
"def get_boundingbox(self):\n tile_iterator = iter(self)\n (coordinate,tile) = next(tile_iterator)\n assert(tile is not None)\n min_x = coordinate[0]\n max_x = min_x + 1\n min_y = coordinate[1]\n max_y = min_y + 1\n\n for (coordinate,tile) in tile_iterator:\n\n if coordinate[0] < min_x:\n min_x = coordinate[0]\n if coordinate[0]+1> max_x:\n max_x = coordinate[0] +1\n if coordinate[1] < min_y:\n min_y = coordinate[1]\n if coordinate[1]+1> max_y:\n max_y = coordinate[1] +1\n\n return ((min_x, min_y), (max_x, max_y))",
"def bounding_box(self):\n if self._owcs.pixel_bounds is None:\n if self._owcs.pixel_shape is not None:\n nx, ny = self._owcs.pixel_shape\n elif self._owcs.array_shape is not None:\n ny, nx = self._owcs.array_shape\n else:\n return None\n\n return ((-0.5, nx - 0.5), (-0.5, ny - 0.5))\n\n else:\n return self._owcs.pixel_bounds",
"def bounding_box(self):\n if self._owcs.pixel_bounds is None:\n if self._owcs.pixel_shape is not None:\n nx, ny = self._owcs.pixel_shape\n elif self._owcs.array_shape is not None:\n ny, nx = self._owcs.array_shape\n else:\n return None\n\n return ((-0.5, nx - 0.5), (-0.5, ny - 0.5))\n\n else:\n return self._owcs.pixel_bounds",
"def get_bounding_box(self):\n lon, lat = self.coordinates\n\n ll = (np.min(lon),np.min(lat))\n ul = (np.min(lon),np.max(lat))\n ur = (np.max(lon),np.max(lat))\n lr = (np.max(lon),np.min(lat))\n\n return (ll, ul, ur, lr)",
"def get_box_coordinates(self):\n return self.box_coordinates",
"def getCellpos(self, event):\n e = event.widget\n cx, cy = cart(e.canvasx(event.x), e.canvasy(event.y))\n cellx = int(cx) // self.cell_width\n celly = int(cy) // self.cell_height\n return cellx, celly",
"def get_box_coordinates(self):\n return QRect(self.box_begin,self.box_end)",
"def getBoundingBox(self):\n lX, lY = self.lX(), self.lY()\n return min(lX), min(lY), max(lX), max(lY)",
"def boundingBox(self):\n pmodel = (glm.vec3(1, -self.y_sign, 0)\n * self.model.pos * self.transform.scale)\n x, y, _ = self.transform.pos + pmodel\n y += -self.y_sign * self.font.table['ascent'] * self.transform.scale[1]\n return x, y, self.pixwidth(), self.pixheight()",
"def get_bounding_box(self):\n if not isinstance(self.ref_cell, Cell):\n return None\n key = (self.ref_cell, self.rotation, self.magnification,\n self.x_reflection, self.columns, self.rows, self.spacing[0],\n self.spacing[1])\n deps = self.ref_cell.get_dependencies(True)\n if not (self.ref_cell._bb_valid and\n all(ref._bb_valid for ref in deps) and key in _bounding_boxes):\n for ref in deps:\n ref.get_bounding_box()\n self.ref_cell.get_bounding_box()\n tmp = self.origin\n self.origin = None\n polygons = self.get_polygons()\n self.origin = tmp\n if len(polygons) == 0:\n bb = None\n else:\n all_points = numpy.concatenate(polygons).transpose()\n bb = numpy.array(((all_points[0].min(), all_points[1].min()),\n (all_points[0].max(), all_points[1].max())))\n _bounding_boxes[key] = bb\n else:\n bb = _bounding_boxes[key]\n if self.origin is None or bb is None:\n return bb\n else:\n return bb + numpy.array(((self.origin[0], self.origin[1]),\n (self.origin[0], self.origin[1])))",
"def boundingBox(self):\n minx, miny, maxx, maxy = self.substrates.bounds\n return pcbnew.BOX2I(\n pcbnew.VECTOR2I(int(minx), int(miny)),\n pcbnew.VECTOR2I(int(maxx - minx), int(maxy - miny)))",
"def bounding_box(self, grid=1):\n supp = self.support\n grid = [np.linspace(s[0], s[1], grid+1) for s in supp]\n X = self.grid_eval(grid)\n X.shape = (-1, self.dim)\n return tuple((X[:, d].min(), X[:, d].max()) for d in range(self.dim))",
"def bbox(self, idx):\n row = self.table.iloc[idx]\n bbox = row['bbox']\n return bbox",
"def bbox(self):\n return self.get_bounding_box()",
"def bbox(self):\n return np.array(\n [[self.position[0], self.position[1]], [self.position[0], self.position[1]]]\n )",
"def bbox(self):\n lower = (self.x.min(), self.y.min())\n upper = (self.x.max(), self.y.max())\n return (lower, upper)",
"def getbbox(self):\r\n img_ = (self._instance > 0)\r\n rows = np.any(img_, axis=1)\r\n cols = np.any(img_, axis=0)\r\n rmin, rmax = np.argmax(rows), img_.shape[0] - 1 - np.argmax(np.flipud(rows))\r\n cmin, cmax = np.argmax(cols), img_.shape[1] - 1 - np.argmax(np.flipud(cols))\r\n return (rmin, rmax, cmin, cmax)",
"def getLocation(bounding_box):\n ymin, xmin, ymax, xmax = bounding_box\n w=1280\n h=720\n left, right, top, bottom = (xmin * w, xmax * w,\n ymin * h, ymax * h)\n ###############################################\n\n top = max(0, np.floor(top + 0.5).astype('int32'))\n left = max(0, np.floor(left + 0.5).astype('int32'))\n bottom = min(h, np.floor(bottom + 0.5).astype('int32'))\n right = min(w, np.floor(right + 0.5).astype('int32'))\n # print(label, (left, top), (right, bottom))\n \n return int((left + right) / 2.0), int((top + bottom) / 2.0)\n \n # xlt, ylt, xrb, yrb = bounding_box\n # return int((xlt + xrb) / 2.0), int((ylt + yrb) / 2.0)",
"def bbox(self):\n return self.canvas.bbox(self.boxitem)",
"def get_cell_coords(self, pt):\n\n\t return int(pt[0] // self.a), int(pt[1] // self.a)",
"def get_bounding_box(self):\n if len(self.polygons) == 0:\n return None\n return numpy.array(((min(pts[:, 0].min() for pts in self.polygons),\n min(pts[:, 1].min() for pts in self.polygons)),\n (max(pts[:, 0].max() for pts in self.polygons),\n max(pts[:, 1].max() for pts in self.polygons))))",
"def get_cell(self, business):\n x = self.longitudes.searchsorted(business.longitude) - 1\n y = self.latitudes.searchsorted(business.latitude) - 1\n return x, y",
"def get_bounding_box(im):\n coords = np.where(im)\n \n return np.array([np.min(coords[0]), np.max(coords[0]), \n np.min(coords[1]), np.max(coords[1])])",
"def get_bbox(self) -> Tuple[Vec, Vec]:\n if self.is_brush():\n bbox_min, bbox_max = self.solids[0].get_bbox()\n for s in self.solids[1:]:\n side_min, side_max = s.get_bbox()\n bbox_max.max(side_max)\n bbox_min.min(side_min)\n return bbox_min, bbox_max\n else:\n origin = self.get_origin()\n # the bounding box is 0x0 large for a point ent basically\n return origin, origin.copy()",
"def getbbox(self):\n pass",
"def get_bounding_box(self):\n if not isinstance(self.ref_cell, Cell):\n return None\n if (self.rotation is None and self.magnification is None and\n self.x_reflection is None):\n key = self\n else:\n key = (self.ref_cell, self.rotation, self.magnification,\n self.x_reflection)\n deps = self.ref_cell.get_dependencies(True)\n if not (self.ref_cell._bb_valid and\n all(ref._bb_valid for ref in deps) and key in _bounding_boxes):\n for ref in deps:\n ref.get_bounding_box()\n self.ref_cell.get_bounding_box()\n tmp = self.origin\n self.origin = None\n polygons = self.get_polygons()\n self.origin = tmp\n if len(polygons) == 0:\n bb = None\n else:\n all_points = numpy.concatenate(polygons).transpose()\n bb = numpy.array(((all_points[0].min(), all_points[1].min()),\n (all_points[0].max(), all_points[1].max())))\n _bounding_boxes[key] = bb\n else:\n bb = _bounding_boxes[key]\n if self.origin is None or bb is None:\n return bb\n else:\n return bb + numpy.array(((self.origin[0], self.origin[1]),\n (self.origin[0], self.origin[1])))",
"def bbox(self):\n return [self._x0, self._y0, self._x1, self._y1]",
"def boundingRect(self):\n return self.rect().adjusted(-2, -2, 2, 2)"
] | [
"0.7524594",
"0.73899823",
"0.7318858",
"0.72919244",
"0.72919244",
"0.7288416",
"0.72778505",
"0.7215684",
"0.7214053",
"0.7175839",
"0.7171557",
"0.7171143",
"0.7126568",
"0.71247655",
"0.70768714",
"0.70758456",
"0.70705795",
"0.7025306",
"0.7002235",
"0.69995075",
"0.6991971",
"0.6985715",
"0.6983919",
"0.6979015",
"0.69774914",
"0.69630086",
"0.6941163",
"0.69106805",
"0.6908507",
"0.68493706"
] | 0.77145386 | 0 |
Add some status fields, specific to this microservice, to give a clue about selffitnes. This method is called after gathering the base actor status. So changing existing status fields will overwrite the response. | def status(self, status: dict):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _create_status(self):\n if self.headers['Accept'] != CONTENT_TYPE_STATUS:\n raise NotAcceptable()\n\n body = self.server.status()\n self._write_response(\n 200, body,\n content_type='application/se.novafaen.smrt.status.v1+json'\n )\n self.server.successful_response()",
"def set_status(self, status: HTTPProxyStatus) -> None:\n self._status = status\n self.update_actor_details(status=self._status)",
"def UpdateFromResponse(self, response):\n for key in self.status:\n self.status[key] = response[key]",
"def _do_status(self) -> Dict[str, Any]:\n return {}",
"def _do_status(self) -> Dict[str, Any]:\n return {}",
"def rest_status():\n my_dict = set_status(None)\n return my_dict",
"def extract_status(self, status_headers):\n self.status = status_headers.get_statuscode()\n if not self.status:\n self.status = '-'",
"def status(self, status):\n self._status = status",
"def status(self, status):\n self._status = status",
"def status(self, status):\n self._status = status",
"def status(self, status):\n self._status = status",
"def status(self, status):\n self._status = status",
"def status(self, status):\n self._status = status",
"def status(self, status):\n self._status = status",
"def status(self, **options):\n pass",
"def status(self, additional=[]): # pylint: disable=dangerous-default-value\n self.manager.refresh_client()\n fields = [\"batteryLevel\", \"deviceDisplayName\", \"deviceStatus\", \"name\"]\n fields += additional\n properties = {}\n for field in fields:\n properties[field] = self.content.get(field)\n return properties",
"def status(self, status):\n\n self._status = status",
"def status(self, status):\n\n self._status = status",
"def status(self, status):\n\n self._status = status",
"def status(self, status):\n\n self._status = status",
"def status(self, status):\n\n self._status = status",
"def status(self, status):\n\n self._status = status",
"def status(self, status):\n\n self._status = status",
"def status(self, status):\n\n self._status = status",
"def status(self, status):\n\n self._status = status",
"def status(self, status):\n\n self._status = status",
"def status(self, status):\n\n self._status = status",
"def status(self, status):\n\n self._status = status",
"def status(self, status):\n\n self._status = status",
"def status(self, status):\n\n self._status = status"
] | [
"0.6502432",
"0.6343856",
"0.61346763",
"0.60640275",
"0.60640275",
"0.6054969",
"0.60468",
"0.6018417",
"0.6018417",
"0.6018417",
"0.6018417",
"0.6018417",
"0.6018417",
"0.6018417",
"0.60125935",
"0.5956768",
"0.58687556",
"0.58687556",
"0.58687556",
"0.58687556",
"0.58687556",
"0.58687556",
"0.58687556",
"0.58687556",
"0.58687556",
"0.58687556",
"0.58687556",
"0.58687556",
"0.58687556",
"0.58687556"
] | 0.66898763 | 0 |
return infos from a .desktop file | def get_info_desktop(desktopfile):
name, cmd, icon, generic= "", "", "", ""
nameloc = False
geneloc = False
lang = locale.setlocale(locale.LC_ALL, "")[0:2]
with open(desktopfile,'r') as d:
df = d.readlines()
for l in df:
if generic == "" or geneloc == False:
if l.startswith('GenericName[{0}]='.format(lang)):
generic = l.replace('GenericName[{0}]='.format(lang),'').strip()
geneloc = True
elif l.startswith('GenericName='.format(lang)):
generic = l.replace('GenericName='.format(lang),'').strip()
if name == "" or nameloc == False:
if l.startswith('Name[{0}]='.format(lang)):
name = l.replace('Name[{0}]='.format(lang),'').strip()
nameloc = True
elif l.startswith('Name='):
name = l.replace('Name=', '').strip()
if cmd == "":
if l.startswith('Exec='):
cmd = l.replace('Exec=', '').strip()
cmd = cmd.split('%')[0].strip()
if icon == "":
if l.startswith('Icon='):
icon = os.path.splitext(l.replace('Icon=', '').strip())[0]
return(name, cmd, icon, generic) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_info():\r\n app = application.Application()\r\n\r\n app.start(r\"C:\\\\AL50022\\\\Circ\\\\bin\\\\Circ.exe\")\r\n\r\n app.Circ.menu_select(\"View\")",
"def parse(self, file):\n IniFile.parse(self, file, [\"Desktop Entry\", \"KDE Desktop Entry\"])",
"def _parseMediaInfo(self):\n\t\t# the program path to MediaInfo should be set otherwise\n\t\tenv = {'path': env_mediainfo_dir}\n\t\t# the command for MediaInfo is a fixed command\n\t\tcom = [com_mediainfo, '-f', self.name]\n\t\t# invoke the external program\n\t\tproc = externalProcess(com, env)\n\t\t# read the programs output line by line and parse the output to a dictionary, obtaining all information\n\t\tinfo = {}\n\t\tstate = 'start'\n\t\tstream = 0\n\t\tfor line in proc.execute():\n\t\t\tlist = line.split(\":\")\n\t\t\t# recognize the sections ('General','Video','Audio','Text')\n\t\t\tif len(list) == 1 and list[0] != '':\n\t\t\t\tstate = str(list[0].lstrip().rstrip())\n\t\t\t\t# print \"New state: \", state\n\t\t\telif len(list) >= 2 and list[0] != '' and list[1] != '':\n\t\t\t\t# recognize several stream identifier\n\t\t\t\tif str(list[0].lstrip().rstrip()) == 'Stream identifier':\n\t\t\t\t\tstream = int(str(list[1].lstrip().rstrip()))\n\t\t\t\t\tcontinue\n\t\t\t\t# save the information to the dictionary\n\t\t\t\tkey = state + \"_\" + str(stream) + \"_\" + str(list[0].lstrip().rstrip())\n\t\t\t\twhile key in info.keys():\n\t\t\t\t\tkey += \"_\"\n\t\t\t\tinfo[key] = str(list[1].lstrip().rstrip())\n\t\treturn info",
"def read_file(self, currentIndex):\n handle = open(\"Program Files\\\\\" + str(currentIndex) + \".tvInfo\", \"r\")\n data = handle.read() #reading description\n handle.close()\n return data",
"def getInfo():",
"def lsinfo(name):",
"def process_desktop_entries(menu, dirname, filenames):\n for filename in filenames:\n path = os.path.join(dirname, filename)\n if os.path.isdir(path) or not path.endswith(\"desktop\"):\n continue\n else:\n menu.Feed(path)",
"def read_file(self, currentIndex):\n handle = open(\"Program Files\\\\TvInfo\\\\\" + str(currentIndex) + \".tvInfo\", \"r\")\n data = handle.read() #reading description\n handle.close()\n return data",
"def get_info():\n\n global DISKINFO\n DISKINFO = {}\n\n #Run diskutil list to get disk names.\n runcmd = subprocess.Popen(\"diskutil list -plist\", stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)\n\n #Get the output.\n stdout = runcmd.communicate()[0]\n\n #Parse the plist (Property List).\n global PLIST\n\n PLIST = plistlib.loads(stdout)\n\n #Find the disks.\n for disk in PLIST[\"AllDisks\"]:\n #Run diskutil info to get disk info.\n runcmd = subprocess.Popen(\"diskutil info -plist \"+disk, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)\n stdout = runcmd.communicate()[0]\n\n #Parse the plist (Property List).\n PLIST = plistlib.loads(stdout)\n\n #Check if the disk is a partition.\n disk_is_partition = is_partition(disk)\n\n if not disk_is_partition:\n #These are devices.\n get_device_info(disk)\n\n else:\n #These are Partitions. Fix for disks w/ more than 9 partitions.\n host_disk = \"/dev/\"+disk.split(\"s\")[0]+\"s\"+disk.split(\"s\")[1]\n get_partition_info(disk, host_disk)\n\n #Check we found some disks.\n if not DISKINFO:\n raise RuntimeError(\"No Disks found!\")",
"def show(filepath):\n if os.name == 'mac': subprocess.call(('open', filepath))\n elif os.name == 'nt': os.startfile(filepath)",
"def _get_app_info(self):\n info_plist = None\n\n for data in self.filelist:\n if re.match(self.info_plist_regex, data.filename):\n info_plist = data\n\n if not info_plist:\n self._raise_ipa_error()\n\n info_plist = self.read(info_plist)\n self.app_info = readPlistFromString(info_plist)\n\n return self.app_info",
"def view(args):\n print(\"List of all available phonebooks:\")\n for file in glob.glob(\"*.ph\"):\n print(file)",
"def open_main_window(self):\r\n track_terms_dic = ''\r\n sg.theme(self.look)\r\n\r\n layout = [[sg.Text('Welcome to tweeet monitor ')],\r\n [sg.Text('Please enter Details ')],\r\n [sg.Text('User Mail', size=(15, 1)), sg.InputText()],\r\n [sg.Text('Timout', size=(15, 1)), sg.InputText('', enable_events=True, key='-DIGITS-')],\r\n [sg.Text('')],\r\n [sg.Text('You can select an existing list or create a new one '),\r\n sg.Combo(self.files, default_value='Select Track Terms List ', key='-COMBO1-')],\r\n [sg.Text('')],\r\n [sg.Button('Select Exists List'), sg.Button('Create a New List')],\r\n [sg.Text('\\n')],\r\n [sg.Button('Start Monitor'), sg.Button('Exit')]\r\n ]\r\n\r\n window = sg.Window('Monitor tweeter', layout)\r\n # Event Loop\r\n while True:\r\n event, values = window.read()\r\n\r\n if event == sg.WIN_CLOSED:\r\n exit()\r\n elif event == 'Select Exists List' or event == 'Create a New List' or event == 'Start Monitor':\r\n user_mail = values[0]\r\n timeout = values['-DIGITS-']\r\n list_dic = values['-COMBO1-']\r\n\r\n if self.check(user_mail) == 'Invalid Email':\r\n self.info_popup_window('You Enter not valid mail ', 'Info', self.look)\r\n elif event == 'Select Exists List':\r\n if list_dic == 'Select Track Terms List ':\r\n self.info_popup_window('Track Terms List ', 'Info', self.look)\r\n else:\r\n file_name = self.path + self.bachslash + list_dic\r\n os.system(file_name)\r\n track_terms_dic = list_dic\r\n elif event == 'Create a New List':\r\n track_terms_dic = self.open_window()\r\n track_terms_dic = track_terms_dic + '.txt'\r\n elif event == 'Start Monitor':\r\n if track_terms_dic == '':\r\n self.info_popup_window('Please, Create new Dictionary or select one ', 'Info', self.look)\r\n elif track_terms_dic != '':\r\n file_name = self.path + self.bachslash + track_terms_dic\r\n my_file = open(file_name, \"r\")\r\n content = my_file.read()\r\n content = content.split(\"\\n\")\r\n content = self.cleanList(content)\r\n # print(content)\r\n my_file.close()\r\n now = datetime.now()\r\n date_time = now.strftime(\"%m/%d/%Y, %H:%M:%S\")\r\n dict_list = {'User': user_mail,\r\n 'Timeout': timeout,\r\n 'Dictionary': list_dic,\r\n 'Create Date': date_time,\r\n 'track_terms_list': content\r\n }\r\n header = ['user_mail', 'Timeout', 'Dictionary', 'Create Date', 'list words']\r\n if os.path.isfile(self.file_track_terms_audit) == False:\r\n # check if the file exsist = if not: create file and print header to the file\r\n with open(self.file_track_terms_audit, 'a', newline='\\n') as file:\r\n try:\r\n write = csv.writer(file)\r\n write.writerow(header)\r\n write.writerows(self.values_list)\r\n file.close()\r\n except:\r\n print(\"Something went wrong when writing to the file\")\r\n else:\r\n self.values_list = list(dict_list.values())\r\n # print ('self.values_list :****',self.values_list)\r\n with open(self.file_track_terms_audit, 'a', newline='\\n') as file:\r\n try:\r\n write = csv.writer(file)\r\n self.values_list = [self.values_list]\r\n write.writerows(self.values_list)\r\n file.close()\r\n except:\r\n print(\"Something went wrong when writing to the file\")\r\n print('self.values_list:', self.values_list)\r\n\r\n window.close()\r\n\r\n print('track_terms_dic: ', track_terms_dic)\r\n print('dict_list:', dict_list)\r\n return (dict_list)\r\n\r\n # always check for closed window\r\n if event in (sg.WIN_CLOSED, 'Exit'):\r\n break\r\n\r\n if event == '-LIST-' and len(values['-LIST-']):\r\n sg.popup('Selected ', values['-LIST-'])\r\n\r\n if len(values['-DIGITS-']) and values['-DIGITS-'][-1] not in ('0123456789'):\r\n # delete last char from input\r\n window['-DIGITS-'].update(values['-DIGITS-'][:-1])\r\n\r\n window.close()",
"def open_h5meta(filepath):\n data = dict()\n h5meta_content = read_h5meta(filepath)\n for file in h5meta_content[\"filelist\"]:\n data[file] = read_detector_data(file)\n\n return data",
"def lsinfo(path):",
"def songInfo():\n \n global songFile, currentRadio\n \n lines = songFile.readlines()\n if len(lines) > 0:\n\n songFile.seek(0)\n title = formatSong(lines[0]).strip()\n \n with canvas(device) as draw:\n invert(draw, 0, 0, names[currentRadio][0], True)\n if len(title)<19:\n draw.text((72-4*(len(title)), 20), title , fill=\"white\")\n else:\n lineNum = len(title)\n if lineNum > 72:\n lineNum = 72\n thelist = [title[i:i+19] for i in range(0, lineNum, 19)]\n for i in range(len(thelist)): \n draw.text((81-4*(len(thelist[i].strip())), 19+10*i), thelist[i] , fill=\"white\")",
"def list_programs():\n return list(INFO)",
"def parseFileInfo(self, file):\n # FileMode, FilesNumber, User, Group, Size, Date, Filename\n item = [f for f in file.split(' ') if f != '']\n \n ftype, size, date, filename = (item[0], item[4], ' '.join(item[5:8]), ' '.join(item[8:]))\n # print(ftype, size, date, filename)\n return (ftype, size, date, filename)",
"def load_info():\n\n infofile = os.path.join(ROOTDIR, 'weirdos.info')\n info = Table().read(infofile, format='ascii')\n\n return info",
"def ask_file(window_title):\n root = tk.Tk()\n root.withdraw()\n media_info_path = os.path.join(os.path.realpath(__file__))\n if ASK_DLL_LOCATION:\n return filedialog.askopenfile(title=window_title).name\n else:\n return os.path.join(os.path.dirname(__file__), 'modules', 'pymediainfo', 'pymediainfo', 'MediaInfo.dll')",
"def hxlinfo():\n run_script(hxlinfo_main)",
"def screeninfo(self):\n\t\tDevice().capture_screenshot()\n\t\tresolution = (self.width, self.height)\n\t\tdroid = AQMdroid('image.png', resolution, self.filename)\n\t\t\n\t\ttry:\n\t\t\tdroid.getorigin()\n\t\texcept Exception as e:\n\t\t\tScriptGen(self.filename).log_checker(self.log_handler)\n\t\t\tScriptGen(self.filename).log_checker(self.generate_log_file)\n\t\t\tprint \"\\nExit Point Triggered.\"\n\t\t\tsys.exit()",
"def info(self, *path):\n target = self.localpath(*path)\n return _open_file_info(target + '.info')",
"def arrange_desktop_files(documentspath,desktoppath):\n\n\tprint \"\\n\\n------------------Arranging Your Desktop-------------------\\n\\n\"\n\n\tfor ext in ext_dict :\n\t\tif ext != 'desktop':\n\t\t\tmake_directory(documentspath,ext[1:].upper())\n\t\t\tfor file in ext_dict[ext]:\n\t\t\t\tprint file , \"----------------------->\" , documentspath+'/'+ext[1:] , '\\n'\n\t\t\t\tmovefile(documentspath+'/'+ext[1:].upper(),file,desktoppath)",
"def get_data():\n info_html = urlopen(\"http://marci1368.getmarci.com\").read()\n div = Soup(info_html, \"html.parser\").find('div', {'id': 'letterbox1'})\n moreinfo = get_itunes_info(div[\"data-artist\"], div[\"data-title\"])\n if not moreinfo:\n return {\"artistName\": div[\"data-artist\"],\n \"trackName\": div[\"data-title\"],\n \"collectionName\": div[\"data-album\"]}\n return moreinfo",
"def fileInfoWin(filePath, *args):\n fileInfo.fileInfo(filePath)",
"def info(self, *path):\n self._download_server_info()\n if self._info:\n return self._info.get(path, {})\n path = list(path)\n path[-1] += \".info\"\n t = self._open(*path)\n if t.status_code == 200:\n return json.loads(t.text)\n else:\n return {}",
"def get_desktop():\n l=get_pids(('kwin','ksmserver',))\n if l: kde=l[0]\n else: kde=None\n l=get_pids(('gnome-session',))\n if l: gnome=l[0]\n else: gnome=None\n if kde:\n if not gnome or kde<gnome: return 1\n else: return 0\n if gnome: return 0\n else: return -1",
"def list_windows():\n proc = subprocess.Popen(['wmctrl', '-lp'], # -l for list, -p include PID\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n out, err = proc.communicate()\n windows = []\n for line in out.splitlines():\n info = str(line, encoding='utf8').split()\n # Format expected: ID num PID host title with spaces\n window_id = info[0]\n desktop_num = info[1]\n pid = info[2]\n host = info[3]\n title = ' '.join(info[4:])\n windows.append({\n 'id': window_id,\n 'desktop': desktop_num,\n 'pid': pid,\n 'host': host,\n 'title': title\n })\n\n return windows",
"def organize_desktop():\n logging.info('organize_desktop')\n monitor = get_monitor_layout()\n # pretty.pprint([str(m) for m in monitor])\n avoid_right_monitor = len(monitor) <= 2\n # Lay out windows for my three monitors with centre as the work machine.\n # Roughly in order of left-to-right appearance.\n left_slack_width = monitor[0].width * 0.5\n move_and_restore(exe_match(\"slack.exe\"), monitor[0].topleft(left_slack_width, monitor[0].height))\n move_and_restore(window_class_match(\"Vim\"), monitor[1].topleft(monitor[1].width//2, monitor[1].height))\n # Game and log go here (but they position themselves).\n if avoid_right_monitor:\n move_and_restore(exe_match(\"chrome.exe\"), monitor[0].topright(monitor[0].width - left_slack_width, monitor[0].height))\n # Using Chrome size on terminal doesn't produce the same size window?\n # move_and_restore(exe_match(\"ubuntu.exe\"), monitor[0].x + left_slack_width, monitor[0].y, monitor[0].width - left_slack_width, monitor[0].height - 200)\n move_and_restore(exe_match(\"ubuntu.exe\"), monitor[0].topright(1419, monitor[0].height-50))\n else:\n move_and_restore(exe_match(\"chrome.exe\"), monitor[2].topleft(974, 1080))\n move_and_restore(exe_match(\"ubuntu.exe\"), monitor[2].topright(974, 1087))\n\n\n # Tortoise has lots of windows and they all have the same ahk_exe\n # (TortoiseProc.exe) and ahk_class (#32770). We could do try to match on\n # text inside the window, but the title should be pretty consistent so use\n # that instead.\n if avoid_right_monitor:\n move_and_restore(title_contains(\"Working Copy - TortoiseSVN\"), monitor[0].botright(1395,722))\n else:\n move_and_restore(title_contains(\"Working Copy - TortoiseSVN\", monitor[2].botright(974, 605)))"
] | [
"0.6534352",
"0.60698557",
"0.56144017",
"0.5599252",
"0.5479898",
"0.5462303",
"0.54253316",
"0.5415696",
"0.54109067",
"0.54053926",
"0.5390978",
"0.5381943",
"0.5322215",
"0.5285531",
"0.5283228",
"0.5276574",
"0.5265289",
"0.52510995",
"0.5244691",
"0.523948",
"0.52115786",
"0.52088124",
"0.5207079",
"0.51990455",
"0.5198248",
"0.51958025",
"0.51720625",
"0.5157817",
"0.51568615",
"0.5154825"
] | 0.79934424 | 0 |
Return active company based on user's profile | def get_active_company(request):
from project.models import get_user_profile_ex
profile = get_user_profile_ex(request.user)
try:
company = profile.active_company
except:
company = None
if company is None:
raise Exception('Please select active company in user\'s profile')
return company | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_active_company_pk(request):\n active_company = get_active_company(request)\n return active_company and active_company.pk or None",
"def get_company_users(self, company_referece, active=True):\n url = 'companies/{0}/users'.format(company_referece)\n if active:\n data = {'status_in_company': 'active'}\n else:\n data = {'status_in_company': 'inactive'}\n result = self.get(url, data)\n return result.get('users', result)",
"def get_company(self, name):\n return self.website.company.id",
"def get_queryset(self):\n return self.request.user.setting_set.get().companies",
"def get_queryset(self):\n return self.request.user.setting_set.get().companies",
"def get_queryset(self):\n return self.request.user.setting_set.get().companies",
"def get_queryset(self):\n return self.request.user.setting_set.get().companies",
"def get_company(self, company_referece):\n url = 'companies/{0}'.format(company_referece)\n result = self.get(url)\n return result.get('company', result)",
"def get_company(self, name):\n return self.store.company.id",
"def get_company(self, name):\n return self.instance.company.id",
"def displayProfile(companyID):\n a = compDF.iloc[companyID].comp_ID\n selectPersonDF = personDF[personDF.comp_ID == a]\n return render_template('profile.html',\n selectcompanyDF=compDF.iloc[companyID],\n selectContactDF=contactDF.iloc[companyID],\n selectPersonDF=selectPersonDF,\n selectID=companyID)",
"def get_companies(self, obj):\n userCompanies = get_objects_for_user(\n obj, \"view_company\", klass=models.Company)\n return [x.id for x in userCompanies]",
"def company(self):\n return self._company",
"def company(self):\n return self._company",
"def get_company_founded(self):\n return self.company_founded",
"def default_company():\n return Transaction().context.get('company')",
"def get(request, company_id):\n try:\n company = Company.objects.get(pk=company_id)\n\n if not (request.user.company_id == int(company_id) or request.user.is_admin == True):\n raise Exception(\"Fobiden: requesting user doesn't have permission to specified Company.\")\n\n return format_ajax_response(True, \"Company profile retrieved successfully.\", {'company': company.dump_to_dict(True)})\n except Exception as ex:\n logger.error(\"Failed to get: %s\" % ex)\n return format_ajax_response(False, \"There was a problem retrieving the company profile.\")",
"def get_available_companies_and_people(team):",
"def get_companies_and_people(team):",
"def get_queryset(self):\n qs = super().get_queryset()\n qs.filter(company=self.request.user.company)\n return qs",
"def _go_company_site(self, linkedin_company_profile_url):\n self.driver.get(linkedin_company_profile_url)",
"def active(self):\n if self._active is not None:\n return self._active\n # Try to get it from the userprofile\n try:\n self._active = self.userprofile.user.is_active\n except UserProfile.DoesNotExist:\n # User profile does not exist.\n # The default value for active is False.\n self._active = False\n return self._active",
"def rol_capooti():\n username = 'roland.capooti'\n if Profile.objects.filter(username=username).count() == 1:\n return Profile.objects.get(username=username)\n else:\n return G(Profile, first_name='Roland', last_name='Capooti',\n username=username, password=make_password('test'),\n email='[email protected]')",
"def users_organizations(user):\n if not user or not user.is_authenticated():\n return None\n else:\n return get_users_organizations(user)",
"def get_available_companies(team):",
"def get_company(self, cmp):\n if cmp in self.cnames:\n return self.cnames[cmp]\n else:\n return None",
"def get_company_affiliation(order):\n redemption = CouponRedemption.objects.filter(order=order).last()\n if redemption:\n return redemption.coupon_version.payment_version.company\n return None",
"def current_profile(request):\n if not request.user.is_authenticated():\n return HttpResponseRedirect(reverse('login'))\n return profile(request, request.user.username)",
"def current_profile(request):\n if not request.user.is_authenticated():\n return HttpResponseRedirect(reverse('login'))\n return profile(request, request.user.username)",
"def current_user(email):\n for user in Data.users:\n if email == user['email']:\n return user"
] | [
"0.6089149",
"0.6082637",
"0.6071943",
"0.5895015",
"0.5895015",
"0.5895015",
"0.5895015",
"0.5884712",
"0.58581364",
"0.5846359",
"0.57815146",
"0.57746136",
"0.5733336",
"0.5733336",
"0.56848305",
"0.5656476",
"0.5638435",
"0.55426466",
"0.5528021",
"0.5509426",
"0.54802394",
"0.5428486",
"0.54282594",
"0.54246783",
"0.5414318",
"0.54131997",
"0.53888196",
"0.53838086",
"0.53838086",
"0.5363875"
] | 0.82013434 | 0 |
Return active company pk based on user's profile | def get_active_company_pk(request):
active_company = get_active_company(request)
return active_company and active_company.pk or None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_active_company(request):\n from project.models import get_user_profile_ex\n profile = get_user_profile_ex(request.user)\n try:\n company = profile.active_company\n except:\n company = None\n if company is None:\n raise Exception('Please select active company in user\\'s profile')\n return company",
"def parish(object):\n parish_id = object.env.user.company_id.id\n return parish_id",
"def get_primary_for(userid):",
"def get_primary_for(userid):",
"def customer_profile_oid(self):\n return self._customer_profile_oid",
"def get_company(self, name):\n return self.instance.company.id",
"def get_company(self, name):\n return self.website.company.id",
"def get_company(self, name):\n return self.store.company.id",
"def displayProfile(companyID):\n a = compDF.iloc[companyID].comp_ID\n selectPersonDF = personDF[personDF.comp_ID == a]\n return render_template('profile.html',\n selectcompanyDF=compDF.iloc[companyID],\n selectContactDF=contactDF.iloc[companyID],\n selectPersonDF=selectPersonDF,\n selectID=companyID)",
"def get_subscription_owner(request, profile):\n return profile.km_user.user",
"def get_user_primary_key(self, request):\r\n try:\r\n return request.user.pk\r\n except AttributeError:\r\n return ''",
"def get_company_id_value(self):\n return self.company_id_value",
"def get_subscription_owner(self, request):\n return get_user_model().objects.get(\n km_user__profile__pk=self.kwargs.get(\"pk\")\n )",
"def get_userid_profile(db, user_id):\n return db['user'].find_one({'_id': user_id})",
"def get_profile_id(self, profile):\n return profile['id']",
"def phone_primary(self, instance):\r\n return instance.user.profile.phone_primary",
"def get_current_user_id():\n user = get_current_user()\n return user.pk if user and user.is_authenticated else None",
"def get_companies(self, obj):\n userCompanies = get_objects_for_user(\n obj, \"view_company\", klass=models.Company)\n return [x.id for x in userCompanies]",
"def get_company_affiliation(order):\n redemption = CouponRedemption.objects.filter(order=order).last()\n if redemption:\n return redemption.coupon_version.payment_version.company\n return None",
"def get_profile(request):\n p_obj = Profile.objects.filter(hashid=request.session.get('profile', '-'))\n if len(p_obj):\n return p_obj[0]\n else:\n return None",
"def get_company_id_label(self):\n return self.company_id_label",
"def get_client_company_id(self):\n output = False\n value_out = False\n\n sql = u'SELECT client_company_ID ' \\\n u'FROM client_com_link_jobs_TBL ' \\\n u'WHERE job_ID_year = %s ' \\\n u'AND job_ID_number = %s;'\n\n data = (self.job_number_sql[0], self.job_number_sql[1])\n\n c, conn = connection(self.company_schema)\n try:\n c.execute(sql, data)\n values = c.fetchone()\n\n if values is not None:\n value_out = values[0]\n output = True\n finally:\n conn_close(c, conn)\n\n return output, value_out",
"def get_company_id_parameter(self):\n company_id_field = self.view.company_id_field\n if company_id_field:\n return Parameter(\n name=company_id_field,\n in_='query',\n type='integer',\n description='The ID of the company to manage',\n required=True,\n )\n\n return None",
"def default_company():\n return Transaction().context.get('company')",
"def get_p_id(self, _df):\n p_ids = _df[self.PROFILE_ID_COL].unique().tolist()\n assert len(p_ids) == 1, 'More than one profile given in get_p_id()!'\n return p_ids[0]",
"def active_id(self):\n return self._active_id",
"def organization_current_get(request):\n if request.organization:\n return request.organization.slug\n else:\n return None",
"def get_subscription_owner(self, request):\n return get_user_model().objects.get(km_user__pk=self.kwargs.get(\"pk\"))",
"def get_subscription_owner(request, profile_item):\n return profile_item.topic.profile.km_user.user",
"def get_subscription_owner(self, request):\n return get_user_model().objects.get(km_user__pk=self.kwargs[\"pk\"])"
] | [
"0.7610169",
"0.6473127",
"0.6097654",
"0.6097654",
"0.6062279",
"0.58767855",
"0.5866891",
"0.5841073",
"0.57657206",
"0.575413",
"0.57436687",
"0.56769633",
"0.5620573",
"0.5589516",
"0.55817324",
"0.5542347",
"0.5498362",
"0.54776585",
"0.54737616",
"0.54366016",
"0.54047483",
"0.5376392",
"0.5372856",
"0.5364144",
"0.53429127",
"0.5322507",
"0.53173447",
"0.53091073",
"0.52751225",
"0.5260196"
] | 0.7791624 | 0 |
Generate a suitable invoice number for given object; | def generate_next_invoice_number(obj):
queryset = obj.__class__.objects.filter(year=obj.year, company=obj.company)
max = queryset.aggregate(Max('number')).values()[0]
if max is None:
max = 0
return (max + 1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_invNo(self, obj):\n return str(obj.invDate.year) + str(obj.id)",
"def invoice(self, invoice_number):\r\n return inv.Invoice(self, invoice_number)",
"def create_invoice(self):\n sales_tax = 0.06\n item_sum = 0\n inv = f'Invoice#: {self.invoice_id}\\n'\n for key, value in self.items_with_price.items():\n item_sum += value\n inv += f'{key}.....${value:.2f}\\n'\n\n tax = item_sum * sales_tax\n inv += f'Tax.....${tax:.2f}\\n'\n inv += f'Total.....${tax + item_sum:.2f}'\n # print(inv)\n # returning for unit testing purposes\n return inv",
"def next_invoice_number(cls, user):\n cur_max = cls.query.filter_by(user_id=user.id).count()\n cur_max += 1\n\n return str(cur_max)",
"def invoice(self, reference_no=None, with_vat=True):\n\n return self.invoice_class(apiobj=self, reference_no=reference_no)",
"def onchange_invoice_id(self):\n # self.invoice_id = False\n # self.base_amount = 0.0\n # self.wh_src_rate = 5.0\n if self._context is None:\n context = {}\n res = {}\n inv_obj = self.env['account.invoice']\n if not self.invoice_id:\n return {'value': {\n 'invoice_id': False,\n 'base_amount': 0.0,\n 'wh_src_rate': 0.0,\n 'wh_amount': 0.0, }\n }\n\n inv_brw = inv_obj.browse(self.invoice_id.id)\n base_amount = self.base_amount or inv_brw.amount_untaxed\n wh_src_rate = self.wh_src_rate or inv_brw.wh_src_rate or 5.0\n wh_amount = base_amount * wh_src_rate / 100.0\n res = {'value': {\n 'base_amount': base_amount,\n 'wh_src_rate': wh_src_rate,\n 'wh_amount': wh_amount,\n }\n }\n return res",
"def computed_identifier(o):\n\n pfx = vmc_model_prefixes[type(o)]\n dig = digest(o)\n accession = \"{pfx}_{dig}\".format(pfx=pfx, dig=dig)\n ir = models.Identifier(namespace=namespace, accession=accession)\n return ir",
"def invoice(self, start, end):\n\n if self.invoice_type is None:\n invoice_type = self.conn.config[\"main\"][\"invoice:object\"]\n if \":\" not in invoice_type:\n raise AttributeError(\"Invoice configuration incorrect! %s\" % invoice_type)\n module, call = invoice_type.split(\":\")\n _package = __import__(module, globals(), locals(), [ call ])\n\n funct = getattr(_package, call)\n self.invoice_type = funct\n config = self.conn.config[\"invoice_object\"]\n invoice = self.invoice_type(self, config)\n return invoice",
"def invoice(customer_id):\n encoder = request.url_rule.endpoint\n template = \"{{ encoder }}#{{ customer_id|%s }}\" % encoder\n return render_template_string(template, **locals())",
"def get_document_number(self, txt_line, inv_type):\n number = 0\n if txt_line.invoice_id.type in ['in_invoice', 'in_refund']:\n if not txt_line.invoice_id.supplier_invoice_number:\n raise exceptions.except_orm(\n _('Invalid action !'),\n _(\"Unable to make txt file, because the bill has no\"\n \" reference number free!\"))\n else:\n number = self.get_number(\n txt_line.invoice_id.supplier_invoice_number.strip(),\n inv_type, 20)\n elif txt_line.invoice_id.number:\n number = self.get_number(\n txt_line.invoice_id.number.strip(), inv_type, 20)\n return number",
"def build_invoice(payment_object):\n # Fill html template with the domain orders and user profile info\n html_template = get_template('billing/billing_invoice.html')\n rendered_html = html_template.render({\n 'payment': payment_object,\n 'user_profile': payment_object.owner.profile,\n })\n # Create pdf file from a html file\n pdfkit.from_string(rendered_html, '/tmp/out.pdf')\n with open(\"/tmp/out.pdf\", \"rb\") as pdf_file:\n pdf_raw = pdf_file.read()\n os.remove(\"/tmp/out.pdf\")\n return {\n 'body': pdf_raw,\n 'filename': 'invoice_{}.pdf'.format(payment_object.transaction_id),\n }",
"def object_for(objectid):",
"def getNumOfInvoice(self,id,start,finish):\n self.calls += 1\n invoice = self.getResponse(self.buildParams(id,start,finish))\n if not self.isNumeric(invoice):\n middle = self.diveDates(start,finish)\n plusMiddle = middle + timedelta(days = 1)\n middle = self.removeHours(middle)\n plusMiddle = self.removeHours(plusMiddle)\n invoice = self.getNumOfInvoice(id,start,middle)+\\\n self.getNumOfInvoice(id,plusMiddle,finish)\n return invoice",
"def _generate_invoice_report(self, request, queryset):\n logger.info('Generating invoice report for model {}'.format(\n queryset.model\n ))\n data = self._get_report_data(request, queryset)\n content = self._get_pdf_content(data)\n file_name = '{}-{}.pdf'.format(\n self._invoice_report_name, data['id'],\n )\n return generate_pdf_response(content, file_name)",
"def _generate_order_number(self):\n return uuid.uuid4().hex.upper()",
"def _generate_order_number(self):\n return uuid.uuid4().hex.upper()",
"def _generate_order_number(self):\n return uuid.uuid4().hex.upper()",
"def _create_invoice(self):\n self.ensure_one()\n partner = self.member_id.partner_id\n invoice = self.env['account.invoice'].create({\n 'partner_id': partner.id,\n 'account_id': partner.property_account_receivable_id.id,\n 'fiscal_position_id': partner.property_account_position_id.id\n })\n for line in self.line_ids:\n product = line.activity_id.product_id\n # Handling of invoice lines : needs cache record for onchange, then\n # real writing...\n invoice_line = self.env['account.invoice.line'].new({\n 'product_id': product.id,\n 'invoice_id': invoice.id\n })\n invoice_line._onchange_product_id()\n line_values = dict(invoice_line._cache)\n line_values['price_unit'] = line.price\n invoice_line = self.env['account.invoice.line'].create(line_values)\n invoice.compute_taxes()\n line.registration_id.invoice_line_id = invoice_line.id\n return invoice",
"def invoice(self, id):\r\n return Invoice(self, id)",
"def create_bill_pdf(obj):\n data = {\n 'today': datetime.date.today(),\n 'amount': obj.price,\n 'customer_name': obj.company.company_name,\n 'order_id': obj.pk,\n }\n pdf = render_to_pdf('pdf/invoice.html', data)\n filename = obj.company.company_name + '_' + obj.promotion.campaign_name + '_' + \\\n datetime.datetime.now().strftime(\"%Y-%m-%d\") + '.pdf'\n obj.bill.save(filename, File(io.BytesIO(pdf.content)))",
"def genAccountNo():\n from random import randint\n\n accountNumber = randint(0000000000, 9999999999)\n return accountNumber",
"def test_get_invoice(self):\n invoice = Invoice(self.client, 123, {})\n\n self.assertEqual(invoice.date, datetime(2018, 1, 1, 0, 1, 1))\n self.assertEqual(invoice.id, 123)\n self.assertEqual(invoice.label, \"Invoice\")\n self.assertEqual(invoice.subtotal, 120.25)\n self.assertEqual(invoice.tax, 12.25)\n self.assertEqual(invoice.total, 132.5)\n self.assertIsNotNone(invoice.tax_summary)",
"def action_invoice_create(self, cr, uid, ids, context=None):\n res = False\n\n journal_obj = self.pool.get('account.journal')\n inv_obj = self.pool.get('account.invoice')\n inv_line_obj = self.pool.get('account.invoice.line')\n fiscal_obj = self.pool.get('account.fiscal.position')\n\n for order in self.browse(cr, uid, ids, context=context):\n# pay_acc_id = order.partner_id.property_account_payable.id\n #use a new method to get the account_id\n pay_acc_id = self._get_inv_pay_acc_id(cr,uid,order) \n journal_ids = journal_obj.search(cr, uid, [('type', '=','purchase'),('company_id', '=', order.company_id.id)], limit=1)\n if not journal_ids:\n raise osv.except_osv(_('Error!'),\n _('Define purchase journal for this company: \"%s\" (id:%d).') % (order.company_id.name, order.company_id.id))\n\n # generate invoice line correspond to PO line and link that to created invoice (inv_id) and PO line\n inv_lines = []\n for po_line in order.order_line:\n #check if this line have quantity to generate invoice, by johnw\n if po_line.product_qty <= po_line.invoice_qty:\n continue \n# if po_line.product_id:\n# acc_id = po_line.product_id.property_account_expense.id\n# if not acc_id:\n# acc_id = po_line.product_id.categ_id.property_account_expense_categ.id\n# if not acc_id:\n# raise osv.except_osv(_('Error!'), _('Define expense account for this company: \"%s\" (id:%d).') % (po_line.product_id.name, po_line.product_id.id,))\n# else:\n# acc_id = property_obj.get(cr, uid, 'property_account_expense_categ', 'product.category').id \n #use a new method to get the account_id, by johnw \n acc_id = self._get_inv_line_exp_acc_id(cr,uid,order,po_line)\n fpos = order.fiscal_position or False\n acc_id = fiscal_obj.map_account(cr, uid, fpos, acc_id)\n\n inv_line_data = self._prepare_inv_line(cr, uid, acc_id, po_line, context=context)\n #update the quantity to the quantity, by johnw\n inv_line_data.update({'quantity':(po_line.product_qty - po_line.invoice_qty)})\n inv_line_id = inv_line_obj.create(cr, uid, inv_line_data, context=context)\n inv_lines.append(inv_line_id)\n\n po_line.write({'invoiced':True, 'invoice_lines': [(4, inv_line_id)]}, context=context)\n \n #if no lines then return direct, by johnw\n if len(inv_lines) == 0:\n continue\n \n # get invoice data and create invoice\n inv_data = {\n 'name': order.partner_ref or order.name,\n 'reference': order.partner_ref or order.name,\n 'account_id': pay_acc_id,\n 'type': 'in_invoice',\n 'partner_id': order.partner_id.id,\n 'currency_id': order.pricelist_id.currency_id.id,\n 'journal_id': len(journal_ids) and journal_ids[0] or False,\n 'invoice_line': [(6, 0, inv_lines)],\n 'origin': order.name,\n 'fiscal_position': order.fiscal_position.id or False,\n 'payment_term': order.payment_term_id.id or False,\n 'company_id': order.company_id.id,\n }\n inv_id = inv_obj.create(cr, uid, inv_data, context=context)\n\n # compute the invoice\n inv_obj.button_compute(cr, uid, [inv_id], context=context, set_total=True)\n\n # Link this new invoice to related purchase order\n order.write({'invoice_ids': [(4, inv_id)]}, context=context)\n res = inv_id\n return res",
"def generate_nik(self, vals):\n\n seq_obj = self.env['ir.sequence']\n res = ''\n\n # Internship and outsource has no Employee Identification Number\n if vals.get('internship') or vals.get('outsource'):\n return\n\n if vals.get('contract_type') == '1':\n # PKWTT Monthly/Daily\n res = seq_obj.with_context(ir_sequence_code_1='1').next_by_code('hr_indonesia.nik')\n elif vals.get('contract_type') == '2' and vals.get('contract_period') == '1':\n # Contract / PKWT Montly\n res = seq_obj.with_context(ir_sequence_code_1='2').next_by_code('hr_indonesia.nik_pkwt_monthly')\n else:\n return\n return res",
"def print_invoice(request, invoice_number):\n\n data = Invoice.objects.get(number=invoice_number)\n\n sub_total = sum([a.get(\"total_cost\") for a in data.items])\n s_gst_val = float(sub_total) * (float(data.s_gst) / 100)\n c_gst_val = float(sub_total) * (float(data.c_gst) / 100)\n\n data.addressed_to = data.addressed_to.replace(\"\\n\", \"<br>\")\n\n return render(request,\n \"invoice/invoice_print.html\",\n {\n \"data\": data,\n \"sub_total\": sub_total,\n \"s_gst_value\": s_gst_val,\n \"c_gst_value\": c_gst_val\n })",
"def _prepare_invoice(self):\n # get current logged in user's timezone\n local = pytz.timezone(self.env['res.users'].browse(self._uid).tz) or pytz.utc\n\n self.ensure_one()\n journal_id = self.env['account.journal'].search([('type', '=', 'purchase')], limit=1).id\n if not journal_id:\n raise UserError(_('Please define an accounting purchase journal for this company.'))\n invoice_vals = {\n 'name': self.partner_ref or '',\n 'origin': self.name,\n 'type': 'in_invoice',\n 'account_id': self.partner_id.property_account_payable_id.id,\n 'partner_id': self.partner_id.id,\n 'journal_id': journal_id,\n 'currency_id': self.currency_id.id,\n 'comment': self.notes,\n 'payment_term_id': self.payment_term_id.id,\n 'fiscal_position_id': self.fiscal_position_id.id or self.partner_id.property_account_position_id.id,\n 'company_id': self.company_id.id,\n 'purchase_id': self.id,\n 'date_invoice':pytz.utc.localize(datetime.datetime.now()).astimezone(local).strftime('%Y-%m-%d'),\n }\n return invoice_vals",
"def gen_call_number(self, serializer):\n data = serializer.validated_data\n original_data = self.request.data\n subject = data['subject']\n author_id = original_data['authors'][0]\n publication_date = data['publication_date'].year\n code = Constant.BOOK_SUBJECT_CODE[subject]\n author = Author.objects.get(id=author_id)\n ret = '{}{}{}.{}{} {}'.format(code,\n str(random.randint(1, 9999)).ljust(4, '0'),\n chr(random.randint(65, 90)),\n author.name[0].upper(),\n str(random.randint(1, 999)).ljust(3, '0'),\n publication_date)\n return {'call_number': ret}",
"def get_invoice(self):\n\n # Check if unclosed invoice for the client exists\n old_inv = connection.Kinko.find_one({'cl': self.cl, 'tid': None,\n 'typ': TYPE_MAP[self.tab_type]})\n\n inv_num = None\n # If it does, update its values and update packages\n if old_inv:\n old_inv.dt = datetime.datetime.today()\n old_inv.range.lt = self.q_dict[\"cs.sd\"].get(\"$lt\", None)\n old_inv.save()\n\n inv_num = old_inv.num\n\n else:\n #kinko dict to be updated in Kinko Collection.\n kdict = {\n \"amt\": 0.0,\n \"cl\": unicode(self.cl),\n \"dt\": datetime.datetime.today(),\n \"typ\": TYPE_MAP[self.tab_type],\n \"range\": {\"lt\": self.q_dict[\"cs.sd\"].get(\"$lt\", None),\n \"gt\": self.q_dict[\"cs.sd\"].get(\"$gte\", None),\n }\n }\n\n k = Kinko(kdict)\n\n k_count = 1\n\n #the get num method of Kinko model generates the unique no for new kinko\n k[\"num\"] = self.get_knum(1)\n while connection.Kinko.collection.find({\"num\": k.num}).count() > 0:\n k[\"num\"] = self.get_knum(k_count+1)\n k_count += k_count\n\n connection.Kinko(k).save()\n\n inv_num = k['num']\n\n if inv_num:\n #after creating a new document in Kinko all packages are updated.\n connection.Package.collection.update(self.q_dict, {'$set': {'inv.num': inv_num}}, safe=True, multi=True)\n \n #Aggrigation of remitted amount for requested client\n non_invoiced = kinko_map_reduce(inv_num, TYPE_MAP[self.tab_type])\n\n if len(non_invoiced) == 0:\n return False\n else:\n inv = connection.Kinko.find_one({'num': inv_num})\n if inv:\n inv.amt = non_invoiced[0]['value']['amt']\n inv.save()\n return inv\n else:\n return False\n else:\n return False",
"def _prepare_invoice(self):\n self.ensure_one()\n journal_id = self.env['account.invoice'].default_get(['journal_id'])['journal_id']\n if not journal_id:\n raise UserError(_('Please define an accounting sales journal for this company.'))\n invoice_vals = {\n 'name': self.client_order_ref or '',\n 'origin': self.name,\n 'type': 'out_invoice',\n 'account_id': self.partner_invoice_id.property_account_receivable_id.id,\n 'partner_id': self.partner_invoice_id.id,\n 'partner_shipping_id': self.partner_shipping_id.id,\n 'journal_id': journal_id,\n 'currency_id': self.pricelist_id.currency_id.id,\n 'comment': self.note,\n 'payment_term_id': self.payment_term_id.id,\n 'fiscal_position_id': self.fiscal_position_id.id or self.partner_invoice_id.property_account_position_id.id,\n 'company_id': self.company_id.id,\n 'user_id': self.user_id and self.user_id.id,\n 'team_id': self.team_id.id,\n 'x_studio_field_rgEdd': self.x_studio_field_icWOZ.id,\n 'x_studio_car_type_1': self.vehicle.id,\n 'x_studio_job_card_1': self.x_studio_agency_job_card,\n 'x_studio_car_type_name': self.vehicle.model_id.name,\n 'x_studio_plate_num': self.vehicle.license_plate,\n 'x_studio_claim_num': self.claim_no,\n\n 'x_studio_is_insured':self.is_insured,\n 'x_studio_service_provider': self.service_advisor.id,\n 'date_invoice': fields.Date.today(),\n 'transaction_ids': [(6, 0, self.transaction_ids.ids)],\n }\n return invoice_vals\n\n # 'x_studio_field_rgEdd':order.x_studio_field_icWOZ.id,",
"def createCustomerID(self):\n\n customerID = self._df_invoice_original.CustomerID.max()\n customerID += 1\n return int(customerID)"
] | [
"0.7300732",
"0.6198336",
"0.6160349",
"0.6044071",
"0.60314023",
"0.60062176",
"0.5938249",
"0.5898966",
"0.5887895",
"0.5809442",
"0.5789716",
"0.57646215",
"0.573949",
"0.5655809",
"0.5655787",
"0.5655787",
"0.5655787",
"0.55716646",
"0.5546775",
"0.5536652",
"0.5502589",
"0.5495982",
"0.5476719",
"0.54725367",
"0.54489404",
"0.54429775",
"0.5437506",
"0.543438",
"0.54091245",
"0.54036385"
] | 0.72478 | 1 |
Reconstructs a list of contour coordinates from the fourier descriptors Takes the length of the original contours to know how much to pad. | def reconstruct(descriptors, length):
padded = pad_descriptors(descriptors, length) # Pad descriptors
inversed = np.fft.ifft(padded) # Inverse Fourier transform
reconstructed = np.rint(np.column_stack((inversed.real, inversed.imag))).astype('int') # Convert to coordinates
return reconstructed | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pad_descriptors(descriptors, length):\n \n padded = np.zeros(length, dtype='complex')\n degree = len(descriptors)\n descriptors = np.fft.fftshift(descriptors)\n \n center_index = length / 2\n left_index = center_index - degree / 2 # Left index always round down\n right_index = int(round(center_index + degree / 2.0)) # Right index rounded up \n \n padded[left_index:right_index] = descriptors\n padded = np.fft.ifftshift(padded)\n return padded",
"def get_descriptors(image,filtered_coords,wid=5):\n desc = []\n for coords in filtered_coords:\n patch = image[coords[0]-wid:coords[0]+wid+1,\n coords[1]-wid:coords[1]+wid+1].flatten()\n desc.append(patch)\n return desc",
"def find_descriptors(contours, remove_dc=False):\n contour_complex = np.empty(contours.shape[0], dtype=complex)\n contour_complex.real = contours[:, 0]\n contour_complex.imag = contours[:, 1]\n\n #We can optionally remove the DC component (perhaps not the right place here)\n if remove_dc:\n contour_complex -= np.mean(contour_complex) \n\n # Do we need to re-sample here so we can compare shapes\n # with different sized contours?\n\n descriptors = np.fft.fft(contour_complex)\n return descriptors",
"def convert_coor_im2decart(pts, w, h):\n return [[x, h-1-y] for x, y in pts]",
"def GetSubContoursByFrame(watershed, allValsByFrame):\n scListByFrame = []\n for frame in range(len(watershed)):\n scList = []\n for v in allValsByFrame[frame]:\n boundingRect = ImageContour.GetBoundingRect(watershed[frame], v)\n # No longer needed: #contour,turns,vals = ImageContour.GetContour(watershed[0],v,boundingRect=boundingRect,byNeighbor=True)\n (\n perimeterVals,\n perimeterList,\n scPoints,\n ) = ImageContour.GetPerimeterByNeighborVal(\n watershed[frame], v, boundingRect=boundingRect, getSubContours=True\n )\n scPointsAdj = [\n (np.array(scp) + [boundingRect[0][0], boundingRect[1][0]]).tolist()\n for scp in scPoints\n ] # Will need to - 0.5 to line up on an overlay\n if len(perimeterList) > 0:\n scList += [\n SubContour(\n points=scPointsAdj[i],\n numPoints=len(scPointsAdj[i]),\n adjusted_length=perimeterList[i],\n values=tuple(sorted([v, perimeterVals[i]])),\n startPointValues=GetValuesAroundSCPoint(\n watershed[frame], scPointsAdj[i][0]\n ),\n endPointValues=GetValuesAroundSCPoint(\n watershed[frame], scPointsAdj[i][-1]\n ),\n )\n for i in range(len(perimeterVals))\n ]\n scList.sort(key=lambda x: x.values)\n for i in range(len(scList) - 1, 0, -1):\n # if 2 subcoutours are the same, keep only the one with the minimum length computation\n if scList[i - 1].values == scList[i].values:\n scList[i - 1].adjusted_length = min(\n scList[i - 1].adjusted_length, scList[i].adjusted_length\n )\n del scList[i]\n scListByFrame.append(scList)\n return scListByFrame",
"def extract(self, files):\n for i in range(len(files)):\n print(files[i])\n img = cv2.imread('{}/{}'.format('{}/{}/{}'.format(DIR_2DST_Mask, self.patient, self.plan), files[i]), 0)\n\n \"\"\"\n Find the indices of array elements that are non-zero, i.e,\n find the pixels' positions that represents the respiratory\n functions (pixels in the respiratory function are brighter).\n \"\"\"\n color_pts = np.argwhere(img > 70)\n\n \"\"\"\n Sorts the pixels according to their x coordenate.\n Obs: np.argwhere inverts x and y, it's like (y, x), because of it,\n the parameter of itemgetter is 1 (to get x coordinate)\n \"\"\"\n lcolor_pts = sorted(color_pts.tolist(), key=itemgetter(1))\n\n \"\"\"\n If there is no pixel representing the respiratory function\n (i.e., lighter pixel) it creates an empty image (without any\n respiratory function)\n \"\"\"\n if len(lcolor_pts) == 0:\n diaphragmatic_lvl = np.zeros((256, 50, 3), np.uint8)\n\n cv2.imwrite('{}/{}/{}/{}'.format(\n DIR_2DST_Diaphragm, patient, plan, files[i]), diaphragmatic_lvl)\n\n # file = open(\n # '{}/{}/{}/points.txt'.format(DIR_2DST_Diaphragm, self.patient, self.plan), 'a')\n # file.write(\"{}:{}\\n\".format(files[i], []))\n # file.close()\n\n continue\n\n # Reverse the coordinates and store the result in lordered_pts list\n lordered_pts = []\n for j in range(len(lcolor_pts)):\n lordered_pts.append(lcolor_pts[j][::-1])\n\n \"\"\"\n Convert pixels coordinates into a tuples and check which column\n has pixels that corresponding to diaphragmatic level\n Obs. There are some columns that doesnt have any pixel that\n correpond to diaphragmatic level.\n \"\"\"\n # Columns that have a pixel corresponding diaphragmatic level\n lcolumn_available = []\n for j in range(len(lordered_pts)):\n lordered_pts[j] = tuple(lordered_pts[j])\n lcolumn_available.append(lordered_pts[j][0])\n lcolumn_available = list(set(lcolumn_available))\n # print(\"Ordered points: \", lordered_pts)\n # print(\"Columns available: \", lcolumn_available)\n\n \"\"\"\n If there is not enough columns to build a respiratory pattern,\n create a blank image\n \"\"\"\n if len(lcolumn_available) < 20:\n diaphragmatic_lvl = np.zeros((256, 50, 3), np.uint8)\n cv2.imwrite('{}/{}/{}/{}'.format(\n DIR_2DST_Diaphragm, patient, plan, files[i]), diaphragmatic_lvl)\n continue\n\n \"\"\"\n If there are no pixel that corresponding diaphragmatic level in the\n first column, assign to it the value of the second y coordinate\n \"\"\"\n if lcolumn_available[0] is not 0:\n y = max(\n [x for x in lordered_pts if x[0] == lcolumn_available[0]],\n key=itemgetter(1))[1]\n lordered_pts.insert(0, (0, y))\n lcolumn_available.insert(0, 0)\n\n \"\"\"\n If there are no pixel that corresponding diaphragmatic level in the\n last column, assign to it the value of the penultimate y coordinate\n available\n \"\"\"\n if lcolumn_available[-1] is not 49:\n lordered_pts.append(\n (49, lordered_pts[len(lcolumn_available)][1]))\n lcolumn_available.append(49)\n\n \"\"\"\n Get the biggest y value in each column that represents the\n diaphragmatic level\n \"\"\"\n column = 0\n lcolumn = []\n ldiaphragm_pts = []\n for j in range(50):\n # Get the column's points\n lcolumn = [x for x in lordered_pts if x[0] == column]\n # print('{}: {}'.format(j, lcolumn))\n\n if len(lcolumn) > 0:\n ldiaphragm_pts.append(\n max(lcolumn, key=itemgetter(1))) # Get the biggest y\n else:\n # Get the y value from the previous column\n lcolumn_available.insert(column, column)\n ldiaphragm_pts.append((column, ldiaphragm_pts[-1][1]))\n column += 1\n lcolumn = []\n\n # Draw diaphragmatic level\n diaphragmatic_lvl = np.zeros((256, 50, 3), np.uint8)\n j = 0\n while(j < len(lcolumn_available) - 1):\n cv2.line(\n diaphragmatic_lvl,\n ldiaphragm_pts[j], ldiaphragm_pts[j + 1],\n (0, 0, 255), 1)\n j = j + 1\n\n lcolumn_available = []\n\n print(\"Diaphragmatic's points: \", ldiaphragm_pts)\n cv2.imshow('Diaphragmatic level', diaphragmatic_lvl)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n cv2.imwrite('{}/{}/{}/{}'.format(\n DIR_2DST_Diaphragm, patient, plan, files[i]), diaphragmatic_lvl)\n\n # file = open('{}/{}/{}/points.txt'.format(DIR_2DST_Diaphragm, self.patient, self.plan), 'a')\n # file.write(\"{}:{}\\n\".format(files[i], ldiaphragm_pts))\n # file.close()\n\n # return ldiaphragm_pts",
"def truncate_descriptor(descriptors, degree):\n \n descriptors = np.fft.fftshift(descriptors)\n center_index = len(descriptors) / 2\n descriptors = descriptors[\n center_index - degree / 2:center_index + degree / 2]\n descriptors = np.fft.ifftshift(descriptors)\n return descriptors",
"def footprint_corner_indices():",
"def get_integral_descriptors(image,filtered_coords,wid=5):\n desc = []\n for coords in filtered_coords:\n patch = image[coords[0]-wid:coords[0]+wid+1,\n coords[1]-wid:coords[1]+wid+1]\n desc.append(patch)\n return desc",
"def cut4(image):\r\n i, j = image.shape\r\n a1 = image[:i // 2, :j // 2]\r\n a2 = image[i // 2:, :j // 2]\r\n a3 = image[:i // 2, j // 2:]\r\n a4 = image[i // 2:, j // 2:]\r\n return a1, a2, a3, a4",
"def _pad(input_data):\n h = input_data['img'].shape[0]\n w = input_data['img'].shape[1]\n max_size = max([int(np.sqrt(np.power(h, 2) + np.power(w, 2))),\n int(w + h * np.cos(11 / 36)),\n int(max(h, w) * 2)\n ]) + 5\n\n up = (max_size - h) // 2\n down = max_size - up - h\n left = (max_size - w) // 2\n right = max_size - left - w\n\n input_data['img'] = np.pad(input_data['img'], ((up, down), (left, right), (0, 0)), mode='constant')\n\n if DataAugmentor._is_synthtext(input_data):\n input_data['contour'][0] = list(\n map(lambda x: np.stack([x[:, :, 0] + up, x[:, :, 1] + left], axis=-1), # x: np.array(n,1,2)\n input_data['contour'][0]))\n input_data['contour'][1] = list(\n map(lambda x: np.stack([x[:, :, 0] + up, x[:, :, 1] + left], axis=-1), # x: np.array(n,1,2)\n input_data['contour'][1]))\n else:\n input_data['contour'] = list(\n map(lambda x: np.stack([x[:, :, 0] + up, x[:, :, 1] + left], axis=-1), # x: np.array(n,1,2)\n input_data['contour']))\n\n input_data['center_point'] = list(\n map(lambda x: (x[0] + up, x[1] + left),\n input_data['center_point']))\n return input_data",
"def getContours(img,iteration):\n nP, IDrange = upDate(iteration)\n imgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n retvalth, imgthreshold = cv2.threshold(imgray, 50, 255, cv2.THRESH_BINARY)\n kernel = np.ones((nP, nP), np.uint8)\n imgdilation = cv2.dilate(imgthreshold, kernel, iterations=2)\n contours = []\n # two vertion of cv2 for findcontours-> (old vertion): imgcontours, contours, hierarchy = cv2.findContours(imgdilation, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n #contours, hierarchy = cv2.findContours(imgdilation, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n if iteration == 2 :\n contours, hierarchy = cv2.findContours(imgdilation, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n elif iteration == 3:\n contours, hierarchy = cv2.findContours(imgdilation, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n else:\n contours, hierarchy = cv2.findContours(imgdilation, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n ##imgcontours, contours, hierarchy = cv2.findContours(imgdilation, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n return contours",
"def extractFeatures(bwimage):\n \n \n # circularity\n img = bwimage.copy()\n img1, contours, hierarchy = cv2.findContours(img, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)\n \n if len(contours)==0:\n return []\n B = contours[0]\n C = B[:,0,0]\n l = C.size\n \n \n if abs(B[0,0,0] - B[l-1,0,0]) + abs(B[0,0,1] - B[l-1,0,1]) == 2:\n P8 = math.sqrt(2)\n else:\n P8 = 1 \n for j in range(0,l-1): \n if abs((B[j+1,0,0] - B[j,0,0])) + abs(B[j+1,0,1] - B[j,0,1]) == 2:\n P8 = P8 + math.sqrt(2)\n else:\n P8 = P8 + 1\n \n n = np.count_nonzero(bwimage)\n \n circularity = P8*P8/n\n \n \n # elongation\n idx = np.nonzero(bwimage);\n c = idx[1]\n r = idx[0]\n meanx = np.mean(c)\n meany = np.mean(r)\n \n \n pows = 2*np.ones(n)\n \n sigxx = np.sum(np.power((c-meanx),pows))/n\n sigyy = np.sum(np.power((r-meany),pows))/n\n sigxy = np.sum(np.multiply((r-meany),(c-meanx)))/n\n \n covMat = np.array([[sigxx, sigxy], [sigxy, sigyy]])\n val, vects = np.linalg.eig(covMat);\n \n maxEigenValue = np.amax(val) \n minEigenValue = np.amin(val.ravel()[np.flatnonzero(val)])\n \n \n elongation = math.sqrt(maxEigenValue/minEigenValue);\n \n \n # principal axis\n maxidx = np.argmax(val)\n principalAxisVector = vects[maxidx]\n \n \n return [circularity, elongation, principalAxisVector]",
"def GetContourValuesLengthsAndSubContoursByFrame(watershed, allValsByFrame):\n return [\n [sc.cVLS() for sc in scList]\n for scList in GetSubContoursByFrame(watershed, allValsByFrame)\n ]",
"def bound_shapes(contours):\r\n\r\n contours_poly = [None]*len(contours)\r\n boundRect = [None]*len(contours)\r\n centers = [None]*len(contours)\r\n radius = [None]*len(contours)\r\n for i, c in enumerate(contours):\r\n contours_poly[i] = cv2.approxPolyDP(c, 3, True)\r\n boundRect[i] = cv2.boundingRect(contours_poly[i])\r\n centers[i], radius[i] = cv2.minEnclosingCircle(contours_poly[i])\r\n \r\n return (contours_poly, boundRect, centers, radius)",
"def contour2points(contours):\n return contours.reshape(-1, 2)",
"def sort(contours):\n\treturn sorted(contours, reverse=True, key=len)",
"def splitContours(contours):\n split_contours = []\n for contour in contours:\n c = contour.reshape(-1, 2)\n line_segments = splitLine(c)\n for seg in line_segments:\n # Turn it back to its original shape, so we can add it back to contours\n new_contour = seg.reshape(-1,1,2)\n # Dont add empty contours to our new list of contours\n if new_contour.size != 0:\n split_contours.append(new_contour)\n\n return split_contours",
"def get_corners_from_contours(contours, corner_amount=4):\n\tcoefficient = .05\n\tepsilon = coefficient * cv2.arcLength(contours, True)\n\n\twhile True:\n\t\t# print(contours)\n\t\tprint(\"epsilon:\", epsilon)\n\n\t\tpoly_approx = cv2.approxPolyDP(contours, epsilon, True)\n\t\t\n\t\t#Выпуклая оболочка, описывающая точки poly_approx\n\t\thull = cv2.convexHull(poly_approx)\n\t\tif len(hull) == corner_amount:\n\t\t\treturn hull\n\t\telse:\n\t\t\tif len(hull) > corner_amount:\n\t\t\t\tcoefficient += .01\n\t\t\telse:\n\t\t\t\tcoefficient -= .01\n\t\tepsilon = coefficient * cv2.arcLength(contours, True)\n\t\tif epsilon < 0: return hull",
"def get_position_patterns(contours, hierarchy):\n found = []\n for i in range(len(contours)):\n k = i\n c = 0\n while hierarchy[k][2] != -1:\n k = hierarchy[k][2]\n c += 1\n if c >= 5:\n found.append(i)\n return found",
"def Get_Label_Features(mask_in, feature_dict, convert_length = 0.2204315, eps_factor = 0.025, area_thresh = 2): \n\n nfeatures = len(feature_dict)\n Image_Features = pd.DataFrame({'Type':[],'Feature_Area':[], 'x':[], 'y':[]})\n Contours_List = []\n\n # Expand mask into one-hot mask if input is flat\n if len(mask_in.shape)==2:\n mask_in = Expand_Mask(mask_in, feature_dict = feature_dict)\n \n # Loop through mask layers (i.e., feature types) and calculate contours \n for i in range(nfeatures):\n Contours_List.append(list()) \n for ii in feature_dict.keys():\n nii = int(ii)\n mask = mask_in[:,:,nii] \n mask = 255*mask.round().astype('uint8')\n mask = np.stack((mask,mask, mask),-1)\n mask = cv2.cvtColor(mask, cv2.COLOR_RGB2GRAY);\n ret, thresh = cv2.threshold(mask, 127.5, 255, cv2.THRESH_BINARY)\n contours,hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n for cnt in contours:\n # arcLength args: Contours, flag of whether curve is closed or not\n epsilon = eps_factor*cv2.arcLength(cnt,True)\n # approxPolyDP args: Contours, epsilon for wiggliness, closed shape or not\n approx = cv2.approxPolyDP(cnt,epsilon,True)\n # Extract area and convert to square meters \n area = convert_length**2 * cv2.contourArea(approx)\n if area > area_thresh: ## Filter small features / noise\n ## Compute centroid from moments\n M = cv2.moments(cnt)\n cx = int(M['m10']/(1e-5 + M['m00']))*convert_length\n cy = int(M['m01']/(1e-5 + M['m00']))*convert_length\n Image_Features = Image_Features.append({'Type':feature_dict[ii], 'Feature_Area':area, \n 'x':cx, 'y':cy}, ignore_index = True)\n Contours_List[nii].append(cnt)\n return Contours_List, Image_Features.copy()",
"def crop_binaries(list_of_binary_images):\n lst_cropped_binary = []\n replacement_columns = np.zeros((723,270),dtype=int)\n for i in list_of_binary_images:\n if sum(i[:,0]) != 0: #if spiral starts left side remove some and add empty space\n new_binary = i[:,270:]\n new_binary = np.append(replacement_columns,new_binary,axis=1)\n lst_cropped_binary.append(new_binary.astype(\"uint8\"))\n if sum(i[:,0]) == 0:\n new_binary = i[:,:(1129-270)]\n new_binary = np.append(new_binary,replacement_columns,axis=1)\n lst_cropped_binary.append(new_binary.astype(\"uint8\"))\n return lst_cropped_binary",
"def detect_contours(self):\r\n (contours, _) = cv2.findContours(self.image.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\r\n return [DbugContour(cv_contour=contour) for contour in contours]",
"def get_contour_features(mask,selectcell=\"centered\"):\r\n \r\n #binarize image (everything above 0 becomes 1)\r\n mask = np.clip(mask,a_min=0,a_max=1)\r\n\r\n #for contours, dont use RETR_TREE, but RETR_EXTERNAL as we are not interested in internal objects\r\n contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\r\n contours = list(contours)\r\n \r\n #in case there is no contour found, add a dummy contour\r\n if len(contours)==0:\r\n contours = [np.array([[[0, 0]],[[0, 1]],[[1, 1]],[[1, 0]]])] #generate a dummy contour\r\n\r\n #Sort contours, longest first\r\n contours.sort(key=len,reverse=True)\r\n contours = [c for c in contours if len(c)>4] #proper contour should have at least 5 points\r\n hulls = [cv2.convexHull(contour,returnPoints=True) for contour in contours]\r\n\r\n mu_origs = [cv2.moments(contour) for contour in contours]\r\n mu_hulls = [cv2.moments(hull) for hull in hulls]\r\n\r\n area_origs = [mu_orig[\"m00\"] for mu_orig in mu_origs]\r\n area_hulls = [mu_hull[\"m00\"] for mu_hull in mu_hulls]\r\n\r\n #drop events where area is zero\r\n hulls = [hulls[i] for i in range(len(hulls)) if area_origs[i]>0] \r\n contours = [contours[i] for i in range(len(contours)) if area_origs[i]>0]\r\n mu_origs = [mu_origs[i] for i in range(len(mu_origs)) if area_origs[i]>0]\r\n mu_hulls = [mu_hulls[i] for i in range(len(mu_hulls)) if area_origs[i]>0]\r\n area_hulls = [area_hulls[i] for i in range(len(area_hulls)) if area_origs[i]>0]\r\n area_origs = [area_origs[i] for i in range(len(area_origs)) if area_origs[i]>0]\r\n \r\n \r\n pos_x = [int(mu_orig['m10']/mu_orig['m00']) for mu_orig in mu_origs]\r\n pos_y = [int(mu_orig['m01']/mu_orig['m00']) for mu_orig in mu_origs]\r\n\r\n \r\n if selectcell == \"smooth\":\r\n #compute the area ratio (roughness of contour)\r\n area_ratio = np.array(area_hulls)/np.array(area_origs)\r\n #get the contour with minimum roughness (smooth contour)\r\n sorter = np.argsort(area_ratio) #smallest first\r\n\r\n if selectcell == \"centered\":\r\n #select contour that is closest to the center of the image. \r\n #In iPAC, cells are usually in the center.\r\n mid_x,mid_y = mask.shape[0]/2,mask.shape[1]/2 #middle of the image\r\n BB = [cv2.boundingRect(c) for c in contours] #get a bounding box around the object\r\n distances = [np.sqrt((mid_x-bb[0])**2 + (mid_y-bb[1])**2) for bb in BB]\r\n sorter = np.argsort(distances) #smallest first\r\n \r\n #sort values with respect to chosen metric (area_ratio or distance)\r\n contours = [contours[s] for s in sorter]\r\n hulls = [hulls[s] for s in sorter]\r\n pos_x = [pos_x[s] for s in sorter]\r\n pos_y = [pos_y[s] for s in sorter]\r\n mu_origs = [mu_origs[s] for s in sorter]\r\n area_origs = [area_origs[s] for s in sorter]\r\n area_hulls = [area_hulls[s] for s in sorter]\r\n \r\n # draw mask of the chosen contour\r\n mask = np.zeros_like(mask)\r\n cv2.drawContours(mask,contours,0,1,cv2.FILLED)# produce a contour that is filled inside\r\n\r\n hull = hulls[0]#[0:n_contours]\r\n pos_x = pos_x[0]\r\n pos_y = pos_y[0] \r\n mu_orig = mu_origs[0]#[0:n_contours]\r\n area_orig = area_origs[0]#[0:n_contours]\r\n area_hull = area_hulls[0]#[0:n_contours]\r\n \r\n if area_orig>0:\r\n area_ratio = area_hull/area_orig\r\n else:\r\n area_ratio = np.nan\r\n\r\n arc = cv2.arcLength(hull, True) \r\n circularity = 2.0 * np.sqrt(np.pi * mu_orig[\"m00\"]) / arc\r\n\r\n\r\n dic = {\"mask\":mask,\"pos_x\":pos_x,\"pos_y\":pos_y,\"area_orig\":area_orig,\"area_hull\":area_hull,\\\r\n \"area_ratio\":area_ratio,\"circularity\":circularity}\r\n return dic",
"def find_contour(ctx: Context):\n cv2.copyTo(ctx.filter_image, np.ones_like(ctx.temp_image1), ctx.temp_image1)\n contours, _ = cv2.findContours(ctx.temp_image1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\n # take the 5 biggest areas\n contours = sorted(contours, key=lambda c: math.fabs(cv2.contourArea(c)), reverse=True)[:5]\n\n # approximate contours with poly line\n ctx.contours = [cv2.approxPolyDP(c, 2, True) for c in contours]",
"def extract_dl(driving_log_path):\r\n entries = []\r\n with open(driving_log_path) as csv_file:\r\n reader = csv.reader(csv_file)\r\n for entry in reader:\r\n entries.append(entry)\r\n empty_lists = [[] for i in range(7)]\r\n center_images, left_images, right_images, steerings, throttles, brakes, speeds = empty_lists\r\n for entry in entries:\r\n center_image_path, left_image_path, right_image_path = (entry[0], entry[1], entry[2])\r\n steering = float(entry[3])\r\n throttle = float(entry[4])\r\n brake = float(entry[5])\r\n speed = float(entry[6])\r\n center_image = cv2.imread(center_image_path)\r\n left_image = cv2.imread(left_image_path)\r\n right_image = cv2.imread(right_image_path)\r\n center_images.append(center_image)\r\n left_images.append(left_image)\r\n right_images.append(right_image)\r\n steerings.append(steering)\r\n throttles.append(throttle)\r\n brakes.append(brake)\r\n speeds.append(speed)\r\n return center_images, left_images, right_images, steerings, throttles, brakes, speeds",
"def fast_fourier_transform(coeffs: list) -> list:\n result = [complex(0.0, 0.0) for _ in range(len(coeffs))]\n\n if len(coeffs) == 1:\n result[0] = coeffs[0]\n return result\n\n coeffs_0 = fast_fourier_transform([coeffs[i * 2] for i in range(len(coeffs) // 2)])\n coeffs_1 = fast_fourier_transform([coeffs[i * 2 + 1] for i in range(len(coeffs) // 2)])\n\n w = complex(1.0, 0.0)\n wn = complex(math.cos((2.0 * math.pi) / len(coeffs)), math.sin((2.0 * math.pi) / len(coeffs)))\n\n for i in range(len(coeffs) // 2):\n result[i] = coeffs_0[i] + w * coeffs_1[i]\n result[i + (len(coeffs) // 2)] = coeffs_0[i] - w * coeffs_1[i]\n w *= wn\n\n return result",
"def make_pattern(pixels, origin, pattern_size, ndots):\n\tw,h = pattern_size\n\tow,oh = origin\n\tcoordinates = itertools.product(range(h), range(w))\n\twith_offset = [(c+ow, r+oh) for r,c in coordinates]\n\t# take only n dots\n\twith_offset = with_offset[:ndots]\n\tfor c,r in with_offset:\n\t\tpixels[c, r] = 0",
"def reconstruct_image(patch_list, patch_nb=2):\n line_list = []\n for i in range(0, patch_nb ** 2 - 1, patch_nb):\n line_list.append(cv2.hconcat(patch_list[i : i + patch_nb]))\n final_img = cv2.vconcat(line_list)\n return final_img",
"def contour_sampling(contour, num,delta = 5):\n \n samples = []\n step = len(contour)/num\n\n lefte = 0\n \n for i in range(num):\n #for i in range(5):\n samples.append(contour[i*step - i*delta])\n samples.append((contour[i*step - i*delta][0],lefte))\n\n #samples.append(contour[len(contour) - 1])\n\n return samples"
] | [
"0.63926274",
"0.52539974",
"0.51878995",
"0.5187165",
"0.5159146",
"0.5142736",
"0.5059244",
"0.5041294",
"0.50263",
"0.50123715",
"0.5000883",
"0.49830064",
"0.49202907",
"0.49104097",
"0.4898262",
"0.48702744",
"0.4861574",
"0.4837805",
"0.4827184",
"0.48218665",
"0.4811737",
"0.4792082",
"0.47847807",
"0.4775728",
"0.47733095",
"0.47620195",
"0.4745059",
"0.47336426",
"0.4732952",
"0.4723084"
] | 0.66475296 | 0 |
Latent heat of vapourisation is approximated by a linear func of air temp (J kg1) | def latent_heat_vapourisation(self, tair):
return (2.501 - 0.00237 * tair) * 1E06 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_evaporation_latent_heat() -> float:\n theta = 28.0\n return 2500.8 - 2.3668 * theta",
"def heat_vaporization_func(ts):\n heat_vaporization = np.copy(ts).astype(np.float64)\n heat_vaporization -= 273.15\n heat_vaporization *= -0.00236\n heat_vaporization += 2.501\n heat_vaporization *= 1E6\n return heat_vaporization.astype(np.float32)",
"def helmholtzenergy(temp,pres):\n g = liq_g(0,0,temp,pres)\n g_p = liq_g(0,1,temp,pres)\n f = g - pres*g_p\n return f",
"def general_solver(heat_flux, temp_initial, temp_air, k, alpha, x_grid,t_grid, upsilon, \n bc_surface, sigma):\n\n # temperatures are reported as a data frame, where each column is a step in time\n temperatures = pd.DataFrame(columns = [n for n in t_grid])\n\n # extract the necessary parameters to determine the surface heat losses\n if bc_surface[0] == \"linear\":\n h = bc_surface[1] + bc_surface[2]\n hc = 0\n emissivity = 0\n elif bc_surface[0] == \"non-linear\":\n h = 0\n hc = bc_surface[1]\n emissivity = bc_surface[2]\n\n # initialize temperature arrays for present and future temperatures\n T = np.zeros_like(x_grid) + temp_initial\n Tn = np.zeros_like(x_grid)\n\n # iterate over each time step\n temperatures.iloc[:,0] = T\n for j, t in enumerate(t_grid[:-1]):\n \n # create tri-diagonal matrix A\n A = tridiag_matrix(bc_surface_type = bc_surface[0], upsilon = upsilon, \n space_divisions = len(x_grid), dx = x_grid[1] - x_grid[0], \n k = k, T = T, h = h, hc = hc, emissivity = emissivity, sigma = sigma)\n \n # create vector b\n b = vector_b(bc_surface_type = bc_surface[0], upsilon = upsilon, \n space_divisions = len(x_grid), dx = x_grid[1] - x_grid[0], \n k = k, T = T, T_air = temp_air, heat_flux = heat_flux, h = h, hc = hc, \n emissivity = emissivity, sigma = sigma, j = j)\n \n # calculate value of future temperature\n Tn = np.linalg.solve(A,b)\n \n # update present temperature\n T = Tn.copy()\n \n # store temperature profile at this time in the data frame\n temperatures.iloc[:, j+1] = Tn\n \n return temperatures",
"def kelvin_effect(pres, surft, temp, mw_ba, dcell):\n volm = mw_ba/1e3 # approximation: using density 1000 kg/m3\n return pres*exp(-4*surft*volm/(dcell*gas_constant*temp))",
"def vaporPressure(temp: float) -> float:\n exponent = (17.27*temp)/(temp + 237.3)\n vp = 611*np.exp(exponent)\n\n return vp",
"def Jv(t,y,v):\n return A@v",
"def compute_surface_temperature(heat_flux):\n\n return 1.1e-4*heat_flux + 323",
"def tempAir(sample):\n sample *= 1.0\n sample /= 1000\n celsius = (sample - 0.5) * 100\n return round(celsius,2)",
"def energy_function(self, x):\n \n return -T.dot(T.transpose(x), T.dot(self.W, x)) -\\\n T.dot(T.transpose(self.b), x)",
"def temperature() -> float:",
"def calculateTemperature(self):\n \n # CIE XYZ space\n self.X = (1/0.17697)*((0.49)*self.R + (0.31)*self.G + (0.2)*self.B)\n self.Y = (1/0.17697)*((0.17697)*self.R + (0.81240)*self.G + (0.01063)*self.B)\n self.Z = (1/0.17697)*((0)*self.R + (0.010)*self.G + (0.99)*self.B)\n\n # CIE Chromaticities xy\n self.x = self.X/(self.X + self.Y + self.Z)\n self.y = self.Y/(self.X + self.Y + self.Z)\n \n # CIE Chromaticities uv\n #self.u = (0.4661*self.x + 0.1593*self.y)/(self.y - 0.15735*self.x + 0.2424)\n #self.v = (0.6581*self.y)/(self.y - 0.15735*self.x + 0.2424)\n \n # constant for McCamy's/Hernandez-Andrés formula\n n = (self.x - self.x_e)/(self.y - self.y_e)\n \n # Correlated color temperature according to Hernández-Andrés (1999)\n self.color_temp = ( self.A_0 + \n self.A_1*np.exp(-n/self.t_1) + \n self.A_2*np.exp(-n/self.t_2) + \n self.A_3*np.exp(-n/self.t_3) )\n \n # Delete too high values\n self.color_temp[self.color_temp > 30000] = 0\n \n # Affichage de la CCT\n self.mean_temp = int(round(self.color_temp.mean()))\n self.mean_temp_label.setText(\"Temperature moyenne = \"+str(self.mean_temp))\n self.mean_temp_label.adjustSize()\n \t\n # Affichage de l'illuminance (Y)\n self.mean_illu = int(round((self.Y.mean())))\n self.illuminance_label.setText(\"Illuminance moyenne = \"+str(self.mean_illu))\n self.illuminance_label.adjustSize()",
"def fglidingHST_exact(xy, v, NL, KL, BM, Mm, params):\n I1 = params['I1']\n I3 = params['I3']\n l = params['l']\n g = params['g']\n k = params['k']\n\n try:\n NP, NN = np.shape(NL)\n except:\n '''There is only one particle'''\n NP = 1\n NN = 0\n\n # print 'xy = ', xy\n # print 'v = ', v\n\n x = xy[:, 0].ravel() # .reshape(NP,1);\n y = xy[:, 1].ravel() # .reshape(NP,1);\n theta = xy[:, 2].ravel() # .reshape(NP,1);\n phi = xy[:, 3].ravel() # .reshape(NP,1);\n psi = xy[:, 4].ravel() # .reshape(NP,1);\n vx = v[:, 0].ravel() # .reshape(NP,1);\n vy = v[:, 1].ravel() # .reshape(NP,1);\n vtheta = v[:, 2].ravel() # .reshape(NP,1);\n vphi = v[:, 3].ravel() # .reshape(NP,1);\n vpsi = v[:, 4].ravel() # .reshape(NP,1)\n\n # if theta is very nearly pi, push it back\n close_pi = 3.1415\n # xout[xy[:,2] > close_pi,2] = close_pi\n theta[theta > close_pi] = close_pi\n\n # w3 = vpsi + vphi*np.cos(theta)\n w3 = params['w3']\n # if not isinstance(w3,np.ndarray):\n # print 'w3 --> ndarray'\n # w3 = np.array(w3)\n\n # SPRING FORCE\n vecx = np.array([[KL[i, j] * (xy[i, 0] - xy[NL[i, j], 0]) for j in range(NN)] for i in range(NP)])\n vecy = np.array([[KL[i, j] * (xy[i, 1] - xy[NL[i, j], 1]) for j in range(NN)] for i in range(NP)])\n mag = np.sqrt(vecx ** 2 + vecy ** 2)\n # KLnoz = KL.copy() #no zeros\n # KLnoz[KLnoz ==0] = 1. #same value as mag[mag==0], so that stretch=0 for those\n stretch = mag - BM\n mag[mag == 0.] = 1. # avoid divide by zero error\n # print(stretch)\n springx = k * np.sum(stretch * vecx / mag, axis=-1)\n springy = k * np.sum(stretch * vecy / mag, axis=-1)\n # print 'stretch = ', stretch\n\n # add them up\n FX = - springx.ravel() # .reshape(NP,1)\n FY = - springy.ravel() # .reshape(NP,1)\n\n # Set force on fixed particles to zero\n if 'BIND' in params:\n if len(params['BIND']) > 0:\n FX[params['BIND']] = 0.\n FY[params['BIND']] = 0.\n\n # Transform into A frame\n Fx = FX * np.cos(phi) + FY * np.sin(phi)\n Fy = -FX * np.sin(phi) + FY * np.cos(phi)\n\n # print '\\n Fx =', Fx\n\n # VERTICAL REACTION FORCE\n # print 'T1 = ', Mm*g*I1\n # print 'T2 =', - Mm*l*(I1*np.cos(theta)*(vtheta**2 + vphi**2*np.sin(theta)**2))\n # print 'T3a = ', I3*w3\n # print 'T3b = ', vphi*np.sin(theta)**2\n # print 'T3 = ', I3*w3*vphi*np.sin(theta)**2\n # print 'T4 = ', - l* np.sin(theta)*np.cos(theta)*Fx\n # print 'denom = ', I1 + Mm*l**2*np.sin(theta)**2\n gn = (Mm * g * I1 - Mm * l * (I1 * np.cos(theta) * (vtheta ** 2 + vphi ** 2 * np.sin(theta) ** 2) - \\\n I3 * w3 * vphi * (np.sin(theta) ** 2) - l * np.sin(theta) * np.cos(theta) * Fx)) / (\n I1 + Mm * l ** 2 * np.sin(theta) ** 2)\n\n # print 'gn_ term 1 = ', Mm*g*I1\n # print 'gn_ denominator = ', (I1 + Mm*l**2*np.sin(theta)**2)\n # print 'gn_ denom term 2 = ', Mm*l**2\n print 'gn_exact = ', gn\n # print 'gn = ', gn\n\n # EULER EQUATIONS\n # print 'denominator = ',I1*np.sin(theta)\n dvphi = (I3 * w3 * vtheta - 2 * I1 * vphi * vtheta * np.cos(theta) - l * Fy) / (I1 * np.sin(theta))\n # print 'dvtheta -- term 1:', l*gn[4]*np.sin(theta[4])\n # print 'dvtheta -- term 2:', -l*Fx[4]*np.cos(theta[4])\n # print 'dvtheta -- term 3:', I1*vphi[4]**2*np.sin(theta[4])*np.cos(theta[4])\n # print 'dvtheta -- term 4:', I3*w3[4]*vphi[4]*np.sin(theta[4])\n dvtheta = (1. / I1) * (l * gn * np.sin(theta) - l * Fx * np.cos(theta) + I1 * vphi ** 2 * np.sin(theta) * np.cos(\n theta) - I3 * w3 * vphi * np.sin(theta))\n dvpsi = - dvphi * np.cos(theta) + vphi * np.sin(theta) * vtheta\n\n # print 'shape(dvphi)=', np.shape(dvphi)\n # print 'shape(Fx)=', np.shape(Fx)\n\n # SPRING EQUATIONS\n # print 'dvtheta =', dvtheta\n wx = l * (dvtheta * np.cos(theta) - vtheta ** 2 * np.sin(theta) - vphi ** 2 * np.sin(theta))\n wy = l * (dvphi * np.sin(theta) + 2 * vphi * vtheta * np.cos(theta))\n wX = wx * np.cos(phi) - wy * np.sin(phi)\n wY = wx * np.sin(phi) + wy * np.cos(phi)\n dvX = (FX / Mm) - wX\n dvY = (FY / Mm) - wY\n\n # print 'shapes = ', np.shape(dvX), np.shape(dvY),np.shape(dvtheta),np.shape(dvphi),np.shape(dvpsi)\n ftx = np.dstack((dvX, dvY, dvtheta, dvphi, dvpsi))[0]\n # print 'Resulting second derivative: ', ftx[1,:]\n\n if 'BIND' in params:\n if len(params['BIND']) > 0:\n ftx[params['BIND'], 0:2] = [0., 0.]\n # ftx[params['BIND']] = [0.,0.,0.,0.,0.]\n\n # print 'ftx = ', ftx[4,:]\n # gn_check = Mm*g - Mm*l*(dvtheta*np.sin(theta) + vtheta**2*np.cos(theta))\n # print 'gn_check = ', gn_check-gn\n # if sum(abs(gn_check -gn)) > 1e-8:\n # print 'gn vertical reaction force does not match up!'\n # print 'gn_check - gn = ', gn_check-gn\n print 'ftx = ', ftx\n\n return ftx",
"def mV_surf(T):\n A = erf(T**.5)/2**.5\n return A",
"def water_vapour(t):\n T_0 = 273.15\n T_rw = 35.86 # over water\n a = 17.269\n # cdo -mulc,610.78 -exp -div -mulc,17.5 -subc,273.15 a\n return 610.78 * np.exp(a * (t - T_0) / (t - T_rw))",
"def vappressure(temp):\n tau = temp/_TTP\n earg = 0.\n for (a,b) in _C_PSUBL:\n earg += a * tau**(b-1)\n pres = _PTPE * numpy.exp(earg)\n return pres",
"def InstTemp(Vel):\n return np.sum(Vel * Vel) / (3. * len(Vel))",
"def Latentc(tempc):\n \n return 1000*(2500.8 - 2.36*tempc + 0.0016*tempc**2 - 0.00006*tempc**3)",
"def statsi(h):\n\n # Define constants\n zsa = np.array([0.0, 11000.0, 20000.0, 32000.0, 47000.0, 52000.0, 61000.0, 79000.0, 9.9e20])\n Tsa = np.array([288.15, 216.65, 216.65, 228.65, 270.65, 270.65,252.65, 180.65, 180.65])\n g = 9.80665\n R = 287.0528\n Re = 6346766.0\n Psa = 101325.0\n\n # Calculate geopotential altitude\n z = Re*h/(Re+h)\n\n # Loop through atmosphere layers\n for i in range(8):\n \n # Calculate layer temperature gradient\n Lt = -(Tsa[i+1]-Tsa[i])/(zsa[i+1]-zsa[i])\n\n # If no temperature gradient\n if Lt == 0.0:\n\n # Are we in this layer of the atmosphere?\n if z <= zsa[i+1]:\n t = Tsa[i] # Temp isn't changing\n p = Psa*np.exp(-g*(z-zsa[i])/R/Tsa[i])\n d = p/R/t\n break\n\n # We need to go higher\n else:\n Psa *= np.exp(-g*(zsa[i+1]-zsa[i])/R/Tsa[i])\n\n # Temperature gradient\n else:\n ex = g/R/Lt\n if z <= zsa[i+1]:\n t = Tsa[i]-Lt*(z-zsa[i])\n p = Psa*(t/Tsa[i])**ex\n d = p/R/t\n break\n else:\n Psa *= (Tsa[i+1]/Tsa[i])**ex\n\n # We have left the atmosphere...\n else:\n t = Tsa[-1]\n p = 0.0\n d = 0.0\n\n return z, t, p, d",
"def cooled_surface_temp(T:np.ndarray) -> float:\n \n return T.dot(cs_temp_weights)",
"def manipulate_heat_data(self): \n self.exh.T_array = ( 0.5 * (self.exh.T_inlet_array +\n self.exh.T_outlet_array) + 273.15)\n self.exh.delta_T_array = ( self.exh.T_inlet_array -\n self.exh.T_outlet_array )\n \n self.cool.delta_T_array = ( self.cool.T_inlet_array -\n self.cool.T_outlet_array )\n self.cool.C = self.cool.mdot * self.cool.c_p",
"def mt(P_1,V0_1,meanF_1,rho): \n psi = np.arctan2(V0_1[2],-V0_1[0])\n \n # Find swept ares\n idx_zmax = np.argmax(P_1[:,-1,2])\n idx_ymax = np.argmax(P_1[:,-1,1])\n idx_zmin = np.argmin(P_1[:,-1,2])\n \n Ad = np.linalg.norm(P_1[idx_zmax,-1,2]-P_1[idx_zmin,-1,2])*P_1[idx_ymax,-1,1]\n print P_1[idx_zmax,-1,2]\n V0 = np.linalg.norm(V0_1)\n \n Vi_1new = np.zeros_like(V0_1,dtype=float)\n\n while True:\n Vi_1 = Vi_1new\n \n Vi_1new[0] = meanF_1[0] / (2 * rho * Ad * np.sqrt( (V0*np.cos(psi)+Vi_1[0])**2 + (-V0*np.sin(psi)+Vi_1[2])**2 )) \n Vi_1new[2] = meanF_1[2] / (2 * rho * Ad * np.sqrt( (V0*np.cos(psi)+Vi_1[0])**2 + (-V0*np.sin(psi)+Vi_1[2])**2 )) \n \n if np.linalg.norm(Vi_1-Vi_1new) < 0.001:\n break\n\n return -Vi_1",
"def env_temperature(v3: \"float\", v4: \"float\") -> \"float\":",
"def internalenergy(temp,pres):\n g = liq_g(0,0,temp,pres)\n g_t = liq_g(1,0,temp,pres)\n g_p = liq_g(0,1,temp,pres)\n u = g - temp*g_t - pres*g_p\n return u",
"def kappa_t(temp,pres):\n g_p = liq_g(0,1,temp,pres)\n g_pp = liq_g(0,2,temp,pres)\n kappa = -g_pp/g_p\n return kappa",
"def __v(pk: float, pna: float, pcl: float, pca: float) -> float:\n ex_ion = pk * ex_k + pna * ex_na + pcl * in_cl + pca * ex_ca\n in_ion = pk * in_k + pna * in_na + pcl * ex_cl + pca * in_ca\n v = r * t / f * np.log(ex_ion/in_ion) * 1000\n return v",
"def linear_evolve(self,nt=1):\n for l in range(nt):\n y_temp = np.empty(self.y.shape[0])\n for i in range(self.y.shape[0]):\n \n # idx left to the departure point\n j = int(np.floor((self.x[i]-self.u[i]*self.dt)/self.dx))\n # idx right to the departure point\n k = j+1\n print i, j, k\n # linear interpolation\n alpha = (self.x[i]-self.u[i]*self.dt - j*self.dx)/self.dx\n y_temp[i] = (1-alpha)*self.y[j] + alpha*self.y[k]\n # copy array to current time\n self.y = np.copy(y_temp)\n stop\n #return current varibale\n return self.y",
"def ftlan_E1c(hop, v0, T, m=50, Min_b=10e-10, Min_m=5, kB=1, norm = np.linalg.norm):\n# def Tri_diag(a1, b1):\n# mat = np.diag(b1, -1) + np.diag(a1, 0) + np.diag(b1, 1)\n# e, w = np.linalg.eigh(mat)\n# return e, w\n\n beta = 1./(T * kB)\n E = 0.\n a, b = [], []\n v0 = v0/norm(v0)\n Hv = hop(v0)\n a.append(v0.dot(Hv))\n v1 = Hv - a[0] * v0\n b.append(norm(v1))\n if b[0] < Min_b:\n return 0\n\n v1 = v1/b[0]\n Hv = hop(v1)\n a.append(v1.dot(Hv))\n\n for i in range(1, m - 1):\n v2 = Hv - b[i - 1] * v0 - a[i] * v1\n b.append(norm(v2))\n if abs(b[i]) < Min_b:\n b.pop()\n break\n\n v2 = v2/b[i]\n Hv = hop(v2)\n a.append(v2.dot(Hv))\n v0 = v1.copy()\n v1 = v2.copy()\n \n a = np.asarray(a)\n b = np.asarray(b)\n\n eps, phi = Tri_diag(a, b)\n l = len(eps)\n# Eo = eps[0]\n# eps = eps-Eo\n exp_eps = np.exp(-beta * eps)\n E = np.sum(exp_eps * eps * phi[0, :]**2.)\n Z = np.sum(exp_eps * phi[0, :]**2.)\n# for i in range(len(eps)):\n# E += exp_eps[i] * eps[i] * phi[0, i]**2\n\n# E = E + Eo\n# de = eps[:, np.newaxis] - eps\n# for i in range(l):\n# E += eps[i] * phi[0, i]**2./np.sum(np.exp(-beta*de[:l, i])*(phi[0, :l]**2.))\n return E, Z",
"def interpolating_model_DA(temp,grav,m_type='da2014'):\n # PARAMETERS # \n dir_models = basedir + '/WDModels_Koester.'+m_type+'_npy/'\n if m_type==\"pier\":\n teff=np.array([1500.,1750.,2000.,2250.,2500.,2750.,3000.,3250.,3500.,\n 3750.,4000.,4250.,4500.,4750.,5000.,5250.,5500.,6000.,\n 6500.,7000.,7500.,8000.,8500.,9000.,9500.,10000.,10500.,\n 11000.,11500.,12000.,12500.,13000.,13500.,14000.,14500.,\n 15000.,15500.,16000.,16500.,17000.,20000.,25000.,30000.,\n 35000.,40000.,45000.,50000.,55000.,60000.,65000.,70000.,\n 75000.,80000.,85000.,90000.])\n logg=np.array([6.50,7.00,7.50,7.75,8.00,8.25,8.50,9.00,9.50])\n elif m_type==\"da2014\":\n teff=np.array([6000.,6250.,6500.,6750.,7000.,7250.,7500.,7750.,8000.,\n 8250.,8500.,8750.,9000.,9250.,9500.,9750.,10000.,10100.,\n 10200.,10250.,10300.,10400.,10500.,10600.,10700.,10750.,\n 10800.,10900.,11000.,11100.,11200.,11250.,11300.,11400.,\n 11500.,11600.,11700.,11750.,11800.,11900.,12000.,12100.,\n 12200.,12250.,12300.,12400.,12500.,12600.,12700.,12750.,\n 12800.,12900.,13000.,13500.,14000.,14250.,14500.,14750.,\n 15000.,15250.,15500.,15750.,16000.,16250.,16500.,16750.,\n 17000.,17250.,17500.,17750.,18000.,18250.,18500.,18750.,\n 19000.,19250.,19500.,19750.,20000.,21000.,22000.,23000.,\n 24000.,25000.,26000.,27000.,28000.,29000.,30000.,35000.,\n 40000.,45000.,50000.,55000.,60000.,65000.,70000.,75000.,\n 80000.,90000.,100000.])\n logg=np.array([4.00,4.25,4.50,4.75,5.00,5.25,5.50,5.75,6.00,6.25,6.50,\n 6.75,7.00,7.25,7.50,7.75,8.00,8.25,8.50,8.75,9.00,9.25,\n 9.50])\n if (m_type=='pier') & (temp<1500. or temp>90000. or grav<6.5 or grav>9.5): \n return [],[]\n elif (m_type=='da2014') & (temp<6000. or temp>100000. or grav<4.0 or grav>9.5): \n return [],[]\n # INTERPOLATION #\n g1,g2 = np.max(logg[logg<=grav]),np.min(logg[logg>=grav])\n if g1!=g2: g = (grav-g1)/(g2-g1)\n else: g=0\n t1,t2 = np.max(teff[teff<=temp]),np.min(teff[teff>=temp])\n if t1!=t2: t = (temp-t1)/(t2-t1) \n else: t=0\t\n if m_type =='da2014': models = ['da%06d_%d_2.7.npy'%(i, j*100) for i in [t1,t2] \n for j in [g1,g2]]\n else: models = ['WD_%.2f_%d.0.npy'%(j, i) for i in [t1,t2] for j in [g1,g2]]\n try:\n m11, m12 = np.load(dir_models+models[0]), np.load(dir_models+models[1])\t\n m21, m22 = np.load(dir_models+models[2]), np.load(dir_models+models[3])\t\n flux_i = (1-t)*(1-g)*m11[:,1]+t*(1-g)*m21[:,1]+t*g*m22[:,1]+(1-t)*g*m12[:,1]\n return np.dstack((m11[:,0], flux_i))[0]\n except: return [],[]",
"def thermal_i(mu,Ti):\n return 9.79*1.e5/np.sqrt(mu/Ti)/1.e2"
] | [
"0.7173018",
"0.69608897",
"0.61423993",
"0.61281765",
"0.6077439",
"0.60561836",
"0.6050125",
"0.6003145",
"0.590397",
"0.58907586",
"0.58311015",
"0.58004564",
"0.57759726",
"0.577098",
"0.577019",
"0.57701373",
"0.5768532",
"0.576528",
"0.57489395",
"0.57373637",
"0.5727742",
"0.5708233",
"0.57063276",
"0.56996626",
"0.56801414",
"0.56380355",
"0.56374687",
"0.56329095",
"0.56326807",
"0.5626574"
] | 0.7419154 | 0 |
Perform several steps of simulation with constant action | def _simulate(self, action=None):
for k in range(int(self.SIMULATION_FREQUENCY // self.config["policy_frequency"])):
if action is not None and \
self.time % int(self.SIMULATION_FREQUENCY // self.config["policy_frequency"]) == 0:
# Forward action to the spacecraft
self.spacecraft.act(self.ACTIONS[action])
self.space.act()
self.space.step(1 / self.SIMULATION_FREQUENCY)
self.time += 1
# Automatically render intermediate simulation steps if a viewer has been launched
# Ignored if the rendering is done offscreen
self._automatic_rendering()
# Stop at terminal states
if self.done or self._is_terminal():
break
self.enable_auto_render = False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run(self, steps):\n self.sim.run(steps)",
"def step_simulation(self, action):\n # target = np.zeros(6)\n # a = np.copy(action)\n # for i in range(6):\n # target[i] = a[i] + ref_pos[i + 3]\n\n target = action * 1.5\n # target = action + ref_pos[3:9]\n\n joint_angle_4, joint_velocity_4 = self.get_joint_angle_and_velocity(4)\n joint_angle_7, joint_velocity_7 = self.get_joint_angle_and_velocity(7)\n self.joint_history.append(np.asarray([joint_angle_4, joint_velocity_4, joint_angle_7, joint_velocity_7]))\n\n joint_angles = self.robot_skeleton.q[3:]\n joint_velocities = self.robot_skeleton.dq[3:]\n\n tau = np.zeros(self.robot_skeleton.ndofs) # torque to apply at each simulation clock\n tau[3:] = self.P * (target - joint_angles) - self.D * joint_velocities\n tau = np.clip(tau, -150 * self.volume_scaling, 150 * self.volume_scaling)\n self.tau_history.append(tau)\n # print(tau)\n self.do_simulation(tau, 1)",
"def step(self):\n\t\tnumpy.random.shuffle(self.agents_list)\n\t\tfor agent in self.agents_list:\n\t\t\tagent.produce()\n\t\tfor agent in self.agents_list:\n\t\t\tagent.charge()\n\t\tfor agent in self.agents_list:\n\t\t\tif agent.strategy == 0: \n\t\t\t\tagent.retribute()\n\t\tfor agent in self.agents_list:\n\t\t\tif agent.strategy == 0: \n\t\t\t\tagent.give()\n\t\tfor agent in self.agents_list:\n\t\t\tagent.consume()\n\t\tfor agent in self.agents_list:\n\t\t\tagent.solve_consumption_deficit()\n\t\tfor site in self.sites:\n\t\t\tsite.recovery()\n\t\tfor agent in self.agents_list:\n\t\t\tagent.sprout()",
"def step(self):\n if not self.is_done():\n actions = [ agent.program(self.percept(agent)) for agent in self.agents ]\n for agent, action in zip(self.agents, actions):\n self.execute_action(agent, action)\n\n self.exogenous_change()",
"def step(self, action):",
"def run(self):\n \n t = 0\n while t < 10:\n self.reset()\n self.start_simulation()\n while not self.done:\n raw_input(\"Press Enter to continue...\")\n action = self.action_space.sample()\n print(action)\n state, reward, self.done, _ = self.step(action)\n print('Current state:\\n angles: {}'.format(state))\n print('Reward: {}'.format(reward))\n\n self.stop_simulation()\n t += 1",
"def step(self):\n if not self.is_done():\n actions = []\n for agent in self.agents:\n if agent.alive:\n actions.append(agent.program(self.percept(agent)))\n else:\n actions.append(\"\")\n for (agent, action) in zip(self.agents, actions):\n self.execute_action(agent, action)\n self.exogenous_change()",
"def step(self):\n if not self.is_done():\n actions = []\n for agent in self.agents:\n if agent.alive:\n actions.append(agent.program(self.percept(agent)))\n else:\n actions.append(\"\")\n for (agent, action) in zip(self.agents, actions):\n self.execute_action(agent, action)\n self.exogenous_change()",
"def _simulation_run(model_instance, observations, actions, rewards):\r\n\r\n for observation, action, reward in zip(observations, actions, rewards):\r\n model_instance.observe(observation)\r\n model_instance.overrideActionChoice(action)\r\n model_instance.feedback(reward)\r\n\r\n return model_instance",
"def _simulate(self, action: Optional[Action] = None) -> None:\n frames = int(self.config[\"simulation_frequency\"] // self.config[\"policy_frequency\"])\n for frame in range(frames):\n # Forward action to the vehicle\n if action is not None \\\n and not self.config[\"manual_control\"] \\\n and self.steps % int(self.config[\"simulation_frequency\"] // self.config[\"policy_frequency\"]) == 0:\n self.action_type.act(action)\n\n self.road.act()\n self.road.step(1 / self.config[\"simulation_frequency\"])\n self.steps += 1\n\n # Automatically render intermediate simulation steps if a viewer has been launched\n # Ignored if the rendering is done offscreen\n if frame < frames - 1: # Last frame will be rendered through env.render() as usual\n self._automatic_rendering()\n\n self.enable_auto_render = False",
"def simulate(self):\r\n\r\n for index in tqdm(range(self.steps)):\r\n\r\n S = 0.1 - 0.1 / self.steps * (index + 1)\r\n T = 0.5 / (np.log(2 + 0.2 * index))\r\n\r\n self.move(T, S)\r\n self.t_change.append(T)\r\n self.s_change.append(S)\r\n tot = calculate_total_energy(self.current_config)\r\n self.energies.append(tot)",
"def simulateOneTimeStep(self):\n\n self.susceptibleToInfected()\n self.infectedToRecovered()\n\n # add the new values of healthy/infected/recovered to the arrays keeping track\n SIR_t = np.array([self.getSusceptible(), self.getInfected(), self.getRecovered()])\n #update SIR time series\n self.SIR = np.concatenate([self.SIR, SIR_t[:,np.newaxis]], axis=1)\n\n # add the new snapshot of the simulation\n self.snapshots.append(self.getSpace().copy())",
"def step(self): \n self.reset_parameters()\n\n if np.random.uniform(0, 1) < self.model.churn_prob: self.exit_triggered = True \n if self.exit_triggered:\n self.exit()\n else:\n self.register_deposit(self.deposit_intent)\n self.register_contribution(self.contribution_intent)\n self.register_sponsorship(self.sponsor_intent)\n self.register_euro_exchange(self.euro_exchange_intent)\n self.register_teo_exchange(self.teo_exchange_intent)\n self.register_withdraw(self.withdraw_intent)",
"def step(self):\n #1. Time progresses\n self.time_operator.step()\n \n #2. Form and dissolve relationships\"\n self.relationship_operator.step()\n\n #3. HIV transmission\n self.infection_operator.step()",
"def do_step(self) -> None:",
"def take_one_step(self):\n\t\tfor i in range(len(self.agents)):\n\t\t\tself.agents[i].action(0)",
"def step(self, action, update=True):\n\n if self.centralized_planning:\n agent_states = [human.get_full_state() for human in self.humans]\n if self.robot.visible:\n agent_states.append(self.robot.get_full_state())\n human_actions = self.centralized_planner.predict(\n agent_states, self.group_membership, self.obstacles\n )[:-1]\n else:\n human_actions = self.centralized_planner.predict(\n agent_states, self.group_membership, self.obstacles\n )\n else:\n human_actions = []\n for human in self.humans:\n # Choose new target if human has reached goal and in perpetual mode:\n if human.reached_destination() and self.perpetual:\n if self.train_val_sim == \"square_crossing\":\n gx = (\n np.random.random() * self.square_width * 0.5 * np.random.choice([-1, 1])\n )\n gy = (np.random.random() - 0.5) * self.square_width\n human.set(human.px, human.py, gx, gy, 0, 0, 0)\n elif self.train_val_sim == \"circle_crossing\":\n human.set(human.px, human.py, -human.px, -human.py, 0, 0, 0)\n else:\n if np.random.rand(1) > 0.5:\n gx = (\n np.random.random()\n * self.square_width\n * 0.5\n * np.random.choice([-1, 1])\n )\n gy = (np.random.random() - 0.5) * self.square_width\n human.set(human.px, human.py, gx, gy, 0, 0, 0)\n else:\n human.set(human.px, human.py, -human.px, -human.py, 0, 0, 0)\n # observation for humans is always coordinates\n human_ob = [\n other_human.get_observable_state()\n for other_human in self.humans\n if other_human != human\n ]\n if self.robot.visible:\n human_ob += [self.robot.get_observable_state()]\n human_actions.append(human.act(human_ob, self.group_membership))\n # collision detection\n dmin = float(\"inf\")\n collisions = 0\n human_distances = list()\n for i, human in enumerate(self.humans):\n px = human.px - self.robot.px\n py = human.py - self.robot.py\n if self.robot.kinematics == \"holonomic\":\n vx = human.vx - action.vx\n vy = human.vy - action.vy\n else:\n vx = human.vx - action.v * np.cos(action.r + self.robot.theta)\n vy = human.vy - action.v * np.sin(action.r + self.robot.theta)\n ex = px + vx * self.time_step\n ey = py + vy * self.time_step\n # closest distance between boundaries of two agents\n human_dist = (\n point_to_segment_dist(px, py, ex, ey, 0, 0) - human.radius - self.robot.radius\n )\n if human_dist < 0:\n collisions += 1\n self.episode_info[\"collisions\"] -= self.collision_penalty\n # logging.debug(\"Collision: distance between robot and p{} is {:.2E}\".format(i, human_dist))\n break\n elif human_dist < dmin:\n dmin = human_dist\n human_distances.append(human_dist)\n\n # collision detection between robot and static obstacle\n static_obstacle_dmin = float(\"inf\")\n static_obstacle_collision = 0\n obstacle_distances = list()\n min_dist = self.robot.radius\n px = self.robot.px\n py = self.robot.py\n\n if self.robot.kinematics == \"holonomic\":\n vx = action.vx\n vy = action.vy\n else:\n vx = action.v * np.cos(action.r + self.robot.theta)\n vy = action.v * np.sin(action.r + self.robot.theta)\n ex = px + vx * self.time_step\n ey = py + vy * self.time_step\n for i, obstacle in enumerate(self.obstacles):\n robot_position = ex, ey\n obst_dist = line_distance(obstacle, robot_position)\n if obst_dist < min_dist:\n static_obstacle_collision += 1\n self.episode_info[\n \"static_obstacle_collisions\"\n ] -= self.static_obstacle_collision_penalty\n break\n\n # collision detection between humans\n human_num = len(self.humans)\n for i in range(human_num):\n for j in range(i + 1, human_num):\n dx = self.humans[i].px - self.humans[j].px\n dy = self.humans[i].py - self.humans[j].py\n dist = (\n (dx ** 2 + dy ** 2) ** (1 / 2) - self.humans[i].radius - self.humans[j].radius\n )\n if dist < 0:\n # detect collision but don't take humans' collision into account\n logging.debug(\"Collision happens between humans in step()\")\n # check if reaching the goal\n end_position = np.array(self.robot.compute_position(action, self.time_step, self.closed))\n reaching_goal = (\n norm(end_position - np.array(self.robot.get_goal_position()))\n < self.robot.radius + self.goal_radius\n )\n done = False\n info = Nothing()\n reward = -self.time_penalty\n goal_distance = np.linalg.norm(\n [\n (end_position[0] - self.robot.get_goal_position()[0]),\n (end_position[1] - self.robot.get_goal_position()[1]),\n ]\n )\n progress = self.previous_distance - goal_distance\n self.previous_distance = goal_distance\n reward += self.progress_reward * progress\n self.episode_info[\"progress\"] += self.progress_reward * progress\n if self.global_time >= self.time_limit:\n done = True\n info = Timeout()\n self.episode_info[\"did_succeed\"] = 0.0\n self.episode_info[\"did_collide\"] = 0.0\n self.episode_info[\"did_collide_static_obstacle\"] = 0.0\n self.episode_info[\"did_timeout\"] = 1.0\n if collisions > 0:\n reward -= self.collision_penalty * collisions\n if self.end_on_collision:\n done = True\n info = Collision()\n self.episode_info[\"did_succeed\"] = 0.0\n self.episode_info[\"did_collide\"] = 1.0\n self.episode_info[\"did_collide_static_obstacle\"] = 0.0\n self.episode_info[\"did_timeout\"] = 0.0\n\n if static_obstacle_collision > 0:\n reward -= self.static_obstacle_collision_penalty * static_obstacle_collision\n if self.end_on_collision:\n done = True\n info = Collision()\n self.episode_info[\"did_succeed\"] = 0.0\n self.episode_info[\"did_collide\"] = 0.0\n self.episode_info[\"did_collide_static_obstacle\"] = 1.0\n self.episode_info[\"did_timeout\"] = 0.0\n if reaching_goal:\n reward += self.success_reward\n done = True\n info = ReachGoal()\n self.episode_info[\"goal\"] = self.success_reward\n self.episode_info[\"did_succeed\"] = 1.0\n self.episode_info[\"did_collide\"] = 0.0\n self.episode_info[\"did_collide_static_obstacle\"] = 0.0\n self.episode_info[\"did_timeout\"] = 0.0\n for human_dist in human_distances:\n if 0 <= human_dist < self.discomfort_dist * self.discomfort_scale:\n discomfort = (\n (human_dist - self.discomfort_dist * self.discomfort_scale)\n * self.discomfort_penalty_factor\n * self.time_step\n )\n reward += discomfort\n self.episode_info[\"discomfort\"] += discomfort\n\n forces = self.centralized_planner.get_force_vectors(coeff=[1] * 6)\n\n if forces is not None:\n # separate human and robot forces\n robot_forces = forces[-1]\n human_forces = forces[:-1]\n # calculate average of human forces and append them to the log\n for i, force in enumerate(self.force_list):\n self.episode_info.get(\"avg_\" + force).append(\n np.average(np.hypot(*human_forces[:, i, :].transpose()))\n )\n # add robot social force\n self.episode_info.get(\"robot_social_force\").append(np.hypot(*robot_forces[1]))\n\n human_num = len(self.humans)\n for i in range(human_num):\n px = self.humans[i].px\n py = self.humans[i].py\n gx = self.humans[i].gx\n gy = self.humans[i].gy\n\n self.episode_info[\"pedestrian_distance_traversed\"][i].append([px,py])\n self.episode_info[\"pedestrian_goal\"][i].append([gx,gy])\n\n self.episode_info[\"pedestrian_velocity\"][i].append([vx,vy])\n\n\n\n # penalize group intersection\n robot_pos = [self.robot.px, self.robot.py]\n robot_vel = [self.robot.vx, self.robot.vy]\n\n self.episode_info[\"robot_distance_traversed\"].append(robot_pos)\n self.episode_info[\"robot_velocity\"].append(robot_vel)\n\n\n\n convex = 1\n\n for idx, group in enumerate(self.group_membership):\n # get the members of the group\n points = []\n for human_id in group:\n ind_points = [\n point_along_circle(\n self.humans[human_id].px,\n self.humans[human_id].py,\n self.humans[human_id].radius,\n )\n for _ in range(10)\n ]\n points.extend(ind_points)\n\n if convex == 1:\n\n # compute the convex hull\n hull = ConvexHull(points)\n\n group_col = point_in_hull(robot_pos, hull)\n\n # min spanning circle\n else:\n circle_def = minimum_enclosing_circle(points)\n\n group_col = is_collision_with_circle(\n circle_def[0][0], circle_def[0][1], circle_def[1], robot_pos[0], robot_pos[1]\n )\n\n if group_col:\n group_discomfort = -self.group_discomfort_penalty\n reward += group_discomfort\n self.episode_info[\"group_discomfort\"] += group_discomfort\n\n # we only want to track number of violations once per group per episode\n self.episode_info[\"group_intersection_violations\"][idx] = 1.0\n\n if (\n len(human_distances) > 0\n and 0 <= min(human_distances) < self.discomfort_dist * self.discomfort_scale\n ):\n info = Danger(min(human_distances))\n if update:\n # update all agents\n self.robot.step(action, self.closed)\n for i, human_action in enumerate(human_actions):\n self.humans[i].step(human_action, self.closed)\n self.global_time += self.time_step\n for i, human in enumerate(self.humans):\n # only record the first time the human reaches the goal\n if self.human_times[i] == 0 and human.reached_destination():\n self.human_times[i] = self.global_time\n # compute the observation\n if self.robot.sensor == \"coordinates\":\n ob = [human.get_observable_state() for human in self.humans]\n\n if self.enable_intent:\n if self.intent_type == \"individual\":\n target_maps = np.array([human.get_target_map() for human in self.humans])\n elif self.intent_type == \"group\":\n target_maps = np.array([human.get_target_map() for human in self.humans])\n\n # average intent map across group members\n for group in self.group_membership:\n # get the members of the group\n avg = np.average([target_maps[human_id] for human_id in group], axis=0)\n for human_id in group:\n target_maps[human_id] = avg\n\n # add target_map to observation\n for i in range(len(ob)):\n ob[i].update_target_map(target_maps[i])\n else:\n print(\n \"unrecognized intent type, only valid options are individual or group, received: \",\n self.intent_type,\n )\n\n elif self.robot.sensor.lower() == \"rgb\" or self.robot.sensor.lower() == \"gray\":\n snapshot = self.get_pixel_obs()\n prior_planes = snapshot.shape[1] * (self.num_frames - 1)\n self.obs_history = np.concatenate(\n (self.obs_history[:, -prior_planes:, :, :], snapshot), axis=1\n )\n ob = self.obs_history\n else:\n raise ValueError(\"Unknown robot sensor type\")\n # store state, action value and attention weights\n self.states.append(\n [\n self.robot.get_full_state(),\n [human.get_full_state() for human in self.humans],\n self.centralized_planner.get_force_vectors(),\n ]\n )\n if hasattr(self.robot.policy, \"action_values\"):\n self.action_values.append(self.robot.policy.action_values)\n if hasattr(self.robot.policy, \"get_attention_weights\"):\n self.attention_weights.append(self.robot.policy.get_attention_weights())\n else:\n if self.robot.sensor == \"coordinates\":\n ob = [\n human.get_next_observable_state(action, self.closed)\n for human, action in zip(self.humans, human_actions)\n ]\n elif self.robot.sensor.lower() == \"rgb\" or self.robot.sensor.lower() == \"gray\":\n snapshot = self.get_pixel_obs()\n prior_planes = snapshot.shape[1] * (self.num_frames - 1)\n self.obs_history = np.concatenate(\n (self.obs_history[:, -prior_planes:, :, :], snapshot), axis=1\n )\n ob = self.obs_history\n else:\n raise ValueError(\"Unknown robot sensor type\")\n if done:\n self.episode_info[\"time\"] = -self.global_time * self.time_penalty / self.time_step\n self.episode_info[\"global_time\"] = self.global_time\n info = self.episode_info # Return full episode information at the end\n return ob, reward, done, info",
"def step(self, actions):\n observations = []\n rewards = []\n dones = []\n self.perform_action(self.car1, actions[0])\n self.perform_action(self.car2, actions[1])\n p.stepSimulation()\n observations = self._get_obs()\n for i in range(2):\n rewards.append(self._get_rewards(i, observations[i]))\n self.step_counter+=1\n self.reduce = self.reduce * 0.9993\n for i in range(2):\n done, cause = self._check_done(observations[i])\n dones.append(done)\n if(done == 1):\n if(cause==1):\n rewards[1] += 72000\n rewards[0] -= 72000\n elif(cause==0):\n rewards[0] += 72000\n rewards[1] -= 72000\n elif(cause==2):\n rewards[i] -= 72000\n return observations, rewards, dones, None",
"def test_run_sim():\n rnd = rand.Arrivals(31, 40)\n sim.run_sim(2, 1, 3, 4, 24, rnd)",
"def step(self, actions=None):\n \n if actions is not None:\n # implement the actions\n# print(actions)\n for asset, valve_position in zip(self.config[\"action_space\"], actions):\n# print(asset, valve_position)\n self._setValvePosition(asset, valve_position)\n\n # take the step !\n time = self.sim._model.swmm_step()\n done = False if time > 0 else True\n return done",
"def step(self):\n self.function()",
"def simulate(agent, steps, initialize=None):\n grid, r, c = random_world()\n if initialize:\n state = initialize()\n result = 0\n for t in range(steps):\n result += score(grid)\n percept = get_percept(grid, r, c)\n if initialize:\n action, *state = agent(percept, *state)\n else:\n action = agent(percept)\n r, c = apply(grid, r, c, action)\n return result",
"def step(self, action):\n # THIS WILL BE CALLED FROM ALL STEP DRIVERS\n self._world = self._action_wrapper.action_to_behavior(world=self._world,\n action=action)\n # 1. move the agent we set the action for\n controlled_agent_id = self._scenario._eval_agent_ids[self._action_wrapper._input_count-1]\n self._world.stepAgent(self._step_time, controlled_agent_id)\n\n # length of agents\n if self._action_wrapper._input_count >= len(self._scenario._eval_agent_ids):\n # CANNOT STEP WORLD IF NOT ALL ACTIONS ARE SET\n self._action_wrapper._input_count = 0\n \n # 2. move all other agent\n self._world.step(self._step_time)\n if self._render:\n self.render()\n\n # TODO needs to know the agents id\n return self.snapshot(\n world=self._world,\n controlled_agents=controlled_agent_id,\n action=action)",
"def run():\n step = 0\n while traci.simulation.getMinExpectedNumber() > 0:\n traci.simulationStep()\n step+=1\n traci.close()\n sys.stdout.flush()",
"def step(self, action):\n\n \"\"\"\n Here we should convert the action num to movement action, execute the action in the\n simulation and get the observations result of performing that action.\n \"\"\"\n #if self.step_number > 200:\n #self.reset()\n rospy.logdebug(\"START STEP OpenAIROS\")\n\n self.gazebo.unpauseSim()\n self._set_action(action)\n #self._prey_step()\n self.gazebo.pauseSim()\n obs = self._get_obs()\n done = self._is_done(obs)\n info = {}\n reward = self._compute_reward(obs, done)\n \n self.cumulated_episode_reward = self.cumulated_episode_reward+ reward\n self.step_number += 1\n rospy.logdebug(\"END STEP OpenAIROS\")\n\n return obs, reward, done, info",
"def step(self, action):\n if not hasattr(self, \"robot\"):\n raise RuntimeError(\"reset before step!!!\")\n\n control_miniBox(self.robot.robot, instruction=action, target_velocity=self.target_velocity,\n multiply=self.multiply, left_wheel_joint_index=self.left_wheel_joint_index,\n right_wheel_joint_index=self.right_wheel_joint_index, max_force=self.max_force, \n physicsClientId=self._physics_client_id)\n \n p.stepSimulation(physicsClientId=self._physics_client_id) \n self.step_num += 1\n state = self.robot.get_observation(self.target_pos)\n reward = self.__reward_func(state)\n if state[-2] < self.target_radius:\n done = True\n elif self.step_num > self.done_step_num:\n done = True\n else:\n done = False\n info = {\"distance\" : state[-2], \"collision_num\" : self.collision_num}\n\n # under evaluate mode, extra debug items need to be rendered\n if self._evaluate:\n froms, tos, results = rayTest(self.robot.robot, ray_length=self.laser_length, ray_num=self.laser_num)\n for index, result in enumerate(results):\n self.rayDebugLineIds[index] = p.addUserDebugLine(\n lineFromXYZ=froms[index], \n lineToXYZ=tos[index] if result[0] == -1 else result[3], \n lineColorRGB=self.miss_color if result[0] == -1 else self.hit_color, \n lineWidth=self.ray_debug_line_width, \n replaceItemUniqueId=self.rayDebugLineIds[index]\n )\n\n return np.array(state), reward, done, info",
"def test_run_sim_1():\n rnd = rand.Arrivals(36, 41)\n sim.run_sim(3, 2, 5, 6, 22, rnd)",
"def step(self, action):\n self.t += 1\n if self.use_run_time_assurance:\n probe_state, unsafe = self.probe_step(action)\n # switch to safe controller if unsafe\n if unsafe:\n x, x_dot, theta, theta_dot = probe_state\n # go right\n if x <= -self.x_threshold: # go right\n action = 1\n elif x>= self.x_threshold: # go left\n action = 0 \n \n state, reward, done, info = self.env.step(action)\n # Could make a custom reward here if you want\n if self.use_action_masking:\n self.update_avail_actions(state)\n obs = {\n \"action_mask\": self.action_mask,\n \"actual_obs\": state,\n }\n else:\n obs = state\n \n if self.t >= 200: # ------ change if using v1\n done = True\n return obs, reward, done, info",
"def step(self):\n self.world.slosh_oceans()\n self.world.transfer_energy_vertically()\n self.world.transfer_energy_horizontally()\n self.world.absorb_energy_from_core()\n self.world.absorb_energy_from_sun(self.sun)",
"def performSimulation(self):\n \n if self.parameters['verbose']:\n print(\"=====================\\nStarting simulation with parameters\\n\",self.parameters)\n print(\"=====================\\nInitial Graph\\n\")\n self.showState()\n print(\"=====================\")\n\n while self.parameters['steps'] > 0:\n if self.parameters['verbose']: print(\"Performing step\")\n self.performStep()\n if self.parameters['verbose']: self.showState()\n\n if self.parameters['verbose']:\n print(\"=====================\\nFinished Simulation\\n\\nResult graph:\")\n self.showState()\n #self.showGraph(self.parameters['file_name'])\n #self.showState()\n #self.showStats()"
] | [
"0.7152286",
"0.71506226",
"0.71068823",
"0.7042311",
"0.6963255",
"0.69249856",
"0.68964875",
"0.68964875",
"0.68496585",
"0.6815086",
"0.680374",
"0.6802927",
"0.67813265",
"0.6766567",
"0.67615855",
"0.6758372",
"0.67467135",
"0.6737128",
"0.6646202",
"0.6641702",
"0.66414595",
"0.6640942",
"0.6633892",
"0.66102225",
"0.6602687",
"0.65867573",
"0.65863824",
"0.6574544",
"0.6563619",
"0.6528745"
] | 0.737022 | 0 |
Render the environment. Create a viewer if none exists, and use it to render an image. | def render(self, mode='human'):
self.rendering_mode = mode
if self.viewer is None:
self.viewer = EnvViewer(self, offscreen=self.offscreen)
self.enable_auto_render = not self.offscreen
# If the frame has already been rendered, do nothing
if self.should_update_rendering:
self.viewer.display()
if mode == 'rgb_array':
image = self.viewer.get_image()
if not self.viewer.offscreen:
self.viewer.handle_events()
self.viewer.handle_events()
return image
elif mode == 'human':
if not self.viewer.offscreen:
self.viewer.handle_events()
self.should_update_rendering = False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def render(self) -> Optional[np.ndarray]:\n if self.render_mode is None:\n assert self.spec is not None\n gym.logger.warn(\n \"You are calling render method without specifying any render mode. \"\n \"You can specify the render_mode at initialization, \"\n f'e.g. gym.make(\"{self.spec.id}\", render_mode=\"rgb_array\")'\n )\n return\n if self.viewer is None:\n self.viewer = EnvViewer(self)\n\n self.enable_auto_render = True\n\n self.viewer.display()\n\n if not self.viewer.offscreen:\n self.viewer.handle_events()\n if self.render_mode == 'rgb_array':\n image = self.viewer.get_image()\n return image",
"def setupRender():\n prefs = getPreferences()\n\n # Check of the built-in environment maps path can be located.\n # Discontinue if it cannot be found.\n envPath = prefs.path_value\n if not envPath:\n return {'WARNING'}, \"No environment images path defined\"\n\n # Discontinue if there is no output path defined.\n renderPath = outputPath()\n if not renderPath:\n return {'WARNING'}, \"The scene needs to be saved before rendering\"\n\n if prefs.image_value == 'NONE':\n return {'WARNING'}, \"No environment image defined\"\n\n setRenderSettings(os.path.join(renderPath, IMAGE_NAME))\n createCamera()\n createWorld(envPath)\n return renderPath",
"def render(self, mode='human'):\n\n if self.RENDER_ENV_ONLY:\n SCREEN_W = 600\n SCREEN_H = 600\n \n if self.viewer is None:\n from gym.envs.classic_control import rendering\n self.viewer = rendering.Viewer(SCREEN_W, SCREEN_H)\n self.viewer.set_bounds(0, SCREEN_W, 0, SCREEN_H)\n\n self.viewer.draw_polygon([(0, 0), (SCREEN_W, 0), (SCREEN_W, SCREEN_H), (0, SCREEN_H)], color=np.array([120, 120, 120])/255.0)\n bezel = 10\n \n self._env_render(self.get_full_state,\n [bezel, bezel], [SCREEN_W-2*bezel, SCREEN_H-2*bezel])\n self._agent_render(self.get_full_state,\n [bezel, bezel], [SCREEN_W-2*bezel, SCREEN_H-2*bezel])\n return self.viewer.render(return_rgb_array = mode=='rgb_array')\n\n if (self.RENDER_INDIV_MEMORY == True and self.INDIV_MEMORY == \"fog\") or (self.RENDER_TEAM_MEMORY == True and self.TEAM_MEMORY == \"fog\"):\n SCREEN_W = 1200\n SCREEN_H = 600\n\n if self.viewer is None:\n from gym.envs.classic_control import rendering\n self.viewer = rendering.Viewer(SCREEN_W, SCREEN_H)\n self.viewer.set_bounds(0, SCREEN_W, 0, SCREEN_H)\n \n self.viewer.draw_polygon([(0, 0), (SCREEN_W, 0), (SCREEN_W, SCREEN_H), (0, SCREEN_H)], color=(0, 0, 0))\n\n self._env_render(self._static_map,\n [7, 7], [SCREEN_H//2-10, SCREEN_H//2-10])\n self._env_render(self.get_obs_blue_render,\n [7+1.49*SCREEN_H//3, 7], [SCREEN_H//2-10, SCREEN_H//2-10])\n self._env_render(self.get_obs_red_render,\n [7+1.49*SCREEN_H//3, 7+1.49*SCREEN_H//3], [SCREEN_H//2-10, SCREEN_H//2-10])\n self._env_render(self.get_full_state,\n [7, 7+1.49*SCREEN_H//3], [SCREEN_H//2-10, SCREEN_H//2-10])\n\n # ind blue agent memory rendering\n for num_blue, blue_agent in enumerate(self._team_blue):\n if num_blue < 2:\n blue_agent.INDIV_MEMORY = self.INDIV_MEMORY\n if blue_agent.INDIV_MEMORY == \"fog\" and self.RENDER_INDIV_MEMORY == True:\n self._env_render(blue_agent.get_obs(self),\n [900+num_blue*SCREEN_H//4, 7], [SCREEN_H//4-10, SCREEN_H//4-10])\n else:\n blue_agent.INDIV_MEMORY = self.INDIV_MEMORY\n if blue_agent.INDIV_MEMORY == \"fog\" and self.RENDER_INDIV_MEMORY == True:\n self._env_render(blue_agent.get_obs(self),\n [900+(num_blue-2)*SCREEN_H//4, 7+SCREEN_H//4], [SCREEN_H//4-10, SCREEN_H//4-10])\n\n # ind red agent memory rendering\n for num_red, red_agent in enumerate(self._team_red):\n if num_red < 2:\n red_agent.INDIV_MEMORY = self.INDIV_MEMORY\n if red_agent.INDIV_MEMORY == \"fog\" and self.RENDER_INDIV_MEMORY == True:\n self._env_render(red_agent.get_obs(self),\n [900+num_red*SCREEN_H//4, 7+1.49*SCREEN_H//2], [SCREEN_H//4-10, SCREEN_H//4-10])\n \n else:\n red_agent.INDIV_MEMORY = self.INDIV_MEMORY\n if red_agent.INDIV_MEMORY == \"fog\" and self.RENDER_INDIV_MEMORY == True:\n self._env_render(red_agent.get_obs(self),\n [900+(num_red-2)*SCREEN_H//4, 7+SCREEN_H//2], [SCREEN_H//4-10, SCREEN_H//4-10])\n\n if self.TEAM_MEMORY == \"fog\" and self.RENDER_TEAM_MEMORY == True:\n # blue team memory rendering\n blue_visited = np.copy(self._static_map)\n blue_visited[self.blue_memory] = UNKNOWN\n self._env_render(blue_visited,\n [7+2.98*SCREEN_H//3, 7], [SCREEN_H//2-10, SCREEN_H//2-10])\n\n # red team memory rendering \n red_visited = np.copy(self._static_map)\n red_visited[self.red_memory] = UNKNOWN\n self._env_render(red_visited,\n [7+2.98*SCREEN_H//3, 7+1.49*SCREEN_H//3], [SCREEN_H//2-10, SCREEN_H//2-10])\n else:\n SCREEN_W = 600\n SCREEN_H = 600\n \n if self.viewer is None:\n from gym.envs.classic_control import rendering\n self.viewer = rendering.Viewer(SCREEN_W, SCREEN_H)\n self.viewer.set_bounds(0, SCREEN_W, 0, SCREEN_H)\n\n self.viewer.draw_polygon([(0, 0), (SCREEN_W, 0), (SCREEN_W, SCREEN_H), (0, SCREEN_H)], color=(0, 0, 0))\n \n self._env_render(self._static_map,\n [5, 10], [SCREEN_W//2-10, SCREEN_H//2-10])\n self._env_render(self.get_obs_blue_render,\n [5+SCREEN_W//2, 10], [SCREEN_W//2-10, SCREEN_H//2-10])\n self._agent_render(self.get_full_state,\n [5+SCREEN_W//2, 10], [SCREEN_W//2-10, SCREEN_H//2-10], self._team_blue)\n self._env_render(self.get_obs_red_render,\n [5+SCREEN_W//2, 10+SCREEN_H//2], [SCREEN_W//2-10, SCREEN_H//2-10])\n self._env_render(self.get_full_state,\n [5, 10+SCREEN_H//2], [SCREEN_W//2-10, SCREEN_H//2-10])\n self._agent_render(self.get_full_state,\n [5, 10+SCREEN_H//2], [SCREEN_W//2-10, SCREEN_H//2-10])\n\n if self.SILENCE_RENDER:\n return self.viewer.get_array()\n else:\n return self.viewer.render(return_rgb_array = mode=='rgb_array')",
"def render(self, mode=\"human\", height=None, width=None, camera_name=\"agentview\"):\n if mode == \"human\":\n cam_id = self.env.sim.model.camera_name2id(camera_name)\n self.env.viewer.set_camera(cam_id)\n return self.env.render()\n elif mode == \"rgb_array\":\n return self.env.sim.render(height=height, width=width, camera_name=camera_name)[::-1]\n else:\n raise NotImplementedError(\"mode={} is not implemented\".format(mode))",
"def render(self):\n self.rendering = True\n self.env.render()",
"def render(self) -> None:\n if self.native_rendering:\n self._render()\n else:\n self.renderer.render_image(self.get_rendered_image())",
"def render(self):\n self.env.render()",
"def run(self):\n\n info(\"creating camera\")\n self.camera_controller = CameraController()\n self.camera_controller.camera.resolution = self.photo_resolution\n\n self.screen_resolution = ui.get_screen_resolution()\n self.normalized_screen_resolution = ui.normalize_dimension(self.screen_resolution)\n info(\"screen_resolution: %s\", self.screen_resolution)\n info(\"normalized_screen_resolution: %s\", self.normalized_screen_resolution)\n\n info(\"creating buffer image and canvas\")\n self.buffer_image = Image.new('RGB', self.normalized_screen_resolution)\n self.canvas = ImageDraw.Draw(self.buffer_image)\n debug(\"buffer_image resolution: %s\", self.buffer_image.size)\n\n info(\"creating preview renderer\")\n self.preview_renderer = self.camera_controller.start_preview(\n fullscreen=False,\n window=ui.normalize_dimension((\n 0, 0,\n self.normalized_screen_resolution[0] * 0.75,\n self.normalized_screen_resolution[1]\n )))\n debug(\"preview location: %s\", self.preview_renderer.window)\n\n info(\"creating window renderer\")\n self.window_renderer = self.camera_controller.add_overlay(\n self.buffer_image.tobytes(),\n size=self.buffer_image.size,\n fullscreen=False,\n layer=1,\n window=(\n 0, 0,\n self.normalized_screen_resolution[0],\n self.normalized_screen_resolution[1]\n ))\n debug(\"window location: %s\", self.window_renderer.window)\n\n info(\"setting up UI\")\n self._setup_ui()\n\n info(\"setting up input\")\n self.yes_button = GPIOButton(self.yes_pin)\n self.no_button = GPIOButton(self.no_pin)\n\n info(\"starting app\")\n self._enter_state(STATE_DEFAULT)\n self.render_timer.start()\n ui_context = ui.UIContext(self.canvas, self.window, update_function=self._logic)\n ui_context.main_loop()\n\n info(\"exiting\")",
"def _automatic_rendering(self) -> None:\n if self.viewer is not None and self.enable_auto_render:\n\n if self._record_video_wrapper and self._record_video_wrapper.video_recorder:\n self._record_video_wrapper.video_recorder.capture_frame()\n else:\n self.render()",
"def _create_vtk_pipeline(self):\n if self._viewer is None and not self._view_frame is None:\n \n if True:\n self._viewer = vtk.vtkImageViewer2()\n self._viewer.SetupInteractor(self._view_frame._rwi)\n self._viewer.GetRenderer().SetBackground(0.3,0.3,0.3)\n \n else:\n ren = vtk.vtkRenderer()\n self._view_frame._rwi.GetRenderWindow().AddRenderer(ren)",
"def render(self, mode=\"human\", width=500, height=500):\n if mode == \"human\":\n return self.mujoco_simulation.mujoco_viewer.render()\n elif mode == \"rgb_array\":\n return self.mujoco_simulation.render(width=width, height=height)\n else:\n raise ValueError(\"Unsupported mode %s\" % mode)",
"def render_and_save():\n\n rendering_config = configuration.get_config()\n rendering_config = ml_collections.FrozenConfigDict(rendering_config)\n aspect_ratio = rendering_config.aspect_ratio\n height = rendering_config.height\n width = int(aspect_ratio * height)\n\n scene_camera = build_camera(rendering_config, aspect_ratio)\n world = build_world(rendering_config)\n\n # Render.\n logging.info(\"Tracing rays...\")\n render_image_fn = jax.jit(\n render.generate_image,\n static_argnames=[\"height\", \"width\", \"config\"])\n image = render_image_fn(height, width, scene_camera, world, rendering_config)\n\n image = render.correct_gamma(image, gamma=rendering_config.gamma_correction)\n\n logging.info(\"Saving to file...\")\n output.export_as_ppm(image, rendering_config.output_file)\n\n return image",
"def viewer_setup(self):\n pass",
"def viewer_setup(self):\n pass",
"def camera():\n return render_template('home/cam.html')",
"def render(self) -> Optional[Union[RenderFrame, List[RenderFrame]]]:\n # Set the available rendering modes\n viewer_backend = (self.simulator.viewer or Viewer).backend\n if self.render_mode == 'human' and viewer_backend == \"panda3d-sync\":\n Viewer.close()\n\n # Call base implementation\n return self.simulator.render( # type: ignore[return-value]\n return_rgb_array=self.render_mode == 'rgb_array')",
"def init_renderer(self):\n\n # Initialise render window\n renWin = vtk.vtkRenderWindow()\n if self.FULL_SCREEN:\n renWin.FullScreenOn()\n else:\n renWin.SetSize(\n int(self.WIN_H_SCALE*self.SCREEN_SIZE[0]),\n int(self.WIN_V_SCALE*self.SCREEN_SIZE[1])\n )\n\n class MyInteractorStyle(vtk.vtkInteractorStyleTrackballCamera):\n\n def __init__(self, parent=None):\n return None\n\n # Initialise interactor\n iren = vtk.vtkRenderWindowInteractor()\n iren.SetInteractorStyle(MyInteractorStyle())\n # iren.AutoAdjustCameraClippingRangeOn()\n iren.SetRenderWindow(renWin)\n\n return renWin, iren",
"def wrapped_render(self, render_modes, front_and_back=False):\n\n # Render raw images\n render_color = False\n for mode in render_modes:\n if mode != RenderMode.DEPTH and mode != RenderMode.SCALED_DEPTH:\n render_color = True\n break\n\n color_im, depth_im = None, None\n if render_color:\n color_im, depth_im = self.render(render_color, front_and_back=front_and_back)\n else:\n depth_im = self.render(render_color)\n\n # For each specified render mode, add an Image object of the appropriate type\n images = []\n for render_mode in render_modes:\n # Then, convert them to an image wrapper class\n if render_mode == RenderMode.SEGMASK:\n images.append(BinaryImage((depth_im > 0.0).astype(np.uint8), frame=self.camera.intrinsics.frame, threshold=0))\n\n elif render_mode == RenderMode.COLOR:\n images.append(ColorImage(color_im, frame=self.camera.intrinsics.frame))\n\n elif render_mode == RenderMode.GRAYSCALE:\n images.append(ColorImage(color_im, frame=self.camera.intrinsics.frame).to_grayscale())\n\n elif render_mode == RenderMode.DEPTH:\n images.append(DepthImage(depth_im, frame=self.camera.intrinsics.frame))\n\n elif render_mode == RenderMode.SCALED_DEPTH:\n images.append(DepthImage(depth_im, frame=self.camera.intrinsics.frame).to_color())\n\n elif render_mode == RenderMode.RGBD:\n c = ColorImage(color_im, frame=self.camera.intrinsics.frame)\n d = DepthImage(depth_im, frame=self.camera.intrinsics.frame)\n images.append(RgbdImage.from_color_and_depth(c, d))\n\n elif render_mode == RenderMode.GD:\n g = ColorImage(color_im, frame=self.camera.intrinsics.frame).to_grayscale()\n d = DepthImage(depth_im, frame=self.camera.intrinsics.frame)\n images.append(GdImage.from_grayscale_and_depth(g, d))\n else:\n raise ValueError('Render mode {} not supported'.format(render_mode))\n\n return images",
"def make(self):\n\t\tif RENDER_VIEWS > 1:\n\t\t\tself._make()",
"def make_image(self):\n\n if self.type == 'passthrough':\n return\n render_template(\n os.path.dirname(self.main_module),\n os.path.basename(self.main_module_path),\n self.language,\n self.requirements,\n self.whitelist,\n self.type,\n into=self.code_dir)\n self.build()",
"def render_environment(self):\n board_img = make_img_of_board(*self.hist[-1])\n return board_img",
"def run_image_viewer( self ):\n\n # XXX: hardcoded program name and image size.\n subprocess.Popen( [\"feh\", \"-dZ\", \"-g\", \"800x600\", self.record[\"filename\"]] )",
"def vtk_show(renderer, width=400, height=300):\n\t renderWindow = vtk.vtkRenderWindow()\n\t renderWindow.SetOffScreenRendering(1)\n\t renderWindow.AddRenderer(renderer)\n\t renderWindow.SetSize(width, height)\n\t renderWindow.Render()\n\t \n\t windowToImageFilter = vtk.vtkWindowToImageFilter()\n\t windowToImageFilter.SetInputData(renderWindow)\n\t windowToImageFilter.Update()\n\t \n\t writer = vtk.vtkPNGWriter()\n\t writer.SetWriteToMemory(1)\n\t writer.SetInputConnection(windowToImageFilter.GetOutputPort())\n\t writer.Write()\n\t data = bytes(memoryview(writer.GetResult()))\n\t \n\t return Image(data)",
"def createBasicRenderSetup():\n\n pass",
"def render_camera_view():\n return render_template('inspection_screen_new.html', travel_distance=distance, the_inspection_time=elapsed_time, realsense_device_status=realsense_enabled, detector_enabled=enable_detection, detections=detections_results, report_details=inspection_report)",
"def generatePreview(self):\n self.saveParameters()\n image=self.simulation.generatePreview()\n # convert pil image to a tkinter image\n self.photo = ImageTk.PhotoImage(image)\n\n # display image\n self.preview.create_image(0, 0, anchor='nw', image=self.photo)",
"def render(self, window):\n body = pygame.image.load(IMAGE_SNAKE).convert_alpha() # loading image\n for block in self.body:\n window.blit(body, (block[0]*SPRITE_SIZE, block[1]*SPRITE_SIZE)) # painting a beautiful snek\n if self.neural_net: # calls for neural net rendering\n self.neural_net.render(window, self.vision)",
"def render(self, width=None, height=None, *, camera_name=None, depth=False,\n mode='offscreen', device_id=-1):\n if camera_name is None:\n camera_id = None\n else:\n camera_id = self.model.camera_name2id(camera_name)\n\n if mode == 'offscreen':\n with _MjSim_render_lock:\n if self._render_context_offscreen is None:\n render_context = MjRenderContextOffscreen(\n self, device_id=device_id)\n else:\n render_context = self._render_context_offscreen\n\n render_context.render(\n width=width, height=height, camera_id=camera_id)\n return render_context.read_pixels(\n width, height, depth=depth)\n elif mode == 'window':\n if self._render_context_window is None:\n from mujoco_py.mjviewer import MjViewer\n render_context = MjViewer(self)\n else:\n render_context = self._render_context_window\n\n render_context.render()\n\n else:\n raise ValueError(\"Mode must be either 'window' or 'offscreen'.\")",
"def start_render_window(self):\n\n # Initialize interactor\n self.__render_window_interactor.Initialize()\n\n # Start render window with interactor\n self.__render_window.Render()\n self.__render_window_interactor.Start()",
"def render(self, mode='human', close=False):\n if close:\n if self.viewer is not None:\n os.kill(self.viewer.pid, signal.SIGKILL)\n else:\n if self.viewer is None:\n self._start_viewer()"
] | [
"0.7304193",
"0.7073188",
"0.69786763",
"0.6757472",
"0.66534996",
"0.65286",
"0.6504135",
"0.62273604",
"0.617706",
"0.6072075",
"0.60463256",
"0.5997811",
"0.59711",
"0.59711",
"0.5969911",
"0.596902",
"0.5954911",
"0.5952626",
"0.5898475",
"0.5885439",
"0.58700675",
"0.5857569",
"0.5853964",
"0.5846555",
"0.58459496",
"0.58358717",
"0.5828217",
"0.58075225",
"0.57991415",
"0.57646793"
] | 0.7445947 | 0 |
Return a simplified copy of the environment where distant spacecrafts have been removed from the space. This is meant to lower the policy computational load while preserving the optimal actions set. | def simplify(self):
state_copy = copy.deepcopy(self)
state_copy.space.spacecrafts = [state_copy.spacecraft] + state_copy.space.close_spacecrafts_to(
state_copy.spacecraft, self.PERCEPTION_DISTANCE)
return state_copy | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def simplify(self) -> 'AbstractEnv':\n state_copy = copy.deepcopy(self)\n state_copy.road.vehicles = [state_copy.vehicle] + state_copy.road.close_vehicles_to(\n state_copy.vehicle, self.PERCEPTION_DISTANCE)\n\n return state_copy",
"def without(self, to_unset):\n modified = self.copy()\n assert type(to_unset) == list\n for env_var in to_unset:\n if env_var in modified:\n modified.pop(env_var)\n return modified",
"def potential_new_obs(self) -> Set[GriddedPerm]:\n subobs: Set[GriddedPerm] = set()\n for ob in self._tiling.obstructions:\n subobs.update(ob.all_subperms(proper=True))\n subobs.remove(GriddedPerm.empty_perm())\n return subobs",
"def clone(self):\n return Environment(self.local_variables, self.local_types)",
"def delete_inaccessible_buildings(self):\n def num_adj_buildings(pos):\n \"\"\"Helper function that returns number of immediately adjacent commercial buildings\"\"\"\n neighborhood = self.environment.grid.get_neighborhood(pos, moore=False, include_center=False)\n\n adj_num = 0\n num_cells = 0\n for cell in neighborhood:\n num_cells += 1\n # Check contents of each neighbor\n if not self.environment.grid.is_cell_empty(cell):\n contents = self.environment.grid.get_cell_list_contents(cell)\n for agent in contents:\n if type(agent) is Building or type(agent) is CommercialBuilding:\n # check that cell is not empty and contains a commercial building in it\n adj_num += 1\n break\n return adj_num, num_cells\n\n def is_inacessible(cell):\n \"\"\"Helper function, converts to boolean\"\"\"\n adj, count = num_adj_buildings(cell)\n return adj == count\n\n # Main Function\n for building in self.environment.agents['residences']:\n if is_inacessible(building.pos):\n self.environment.grid.remove_agent(building)\n self.environment.agents['residences'].remove(building)\n\n for building in self.environment.agents['commercial_buildings']:\n if is_inacessible(building.pos):\n self.environment.grid.remove_agent(building)\n self.environment.agents['commercial_buildings'].remove(building)",
"def __copy__(self):\n #new = MCTS(copy=True) # don't run _predict() twice\n new = MCTS(self.env, copy=True) # don't set pi and Q twice\n new.env = self.env.__copy__()\n # can't use __dict__.update() without effecting env __copy__()\n # in theory, you don't need to copy the env. just use one copy for simulating, and restore it to root\n # since _Q() evaluates the env.done() of children, you need self.done = env.done() in __init__()\n # same for env.winner\n new.pi = []\n new. Q = 0\n new.net = self.net\n new.t = self.t\n new.expl = self.expl\n new.children = []\n new.parent = None\n return new",
"def get_gym_space(self):\n return gym.spaces.Discrete(len(self.env.ACTION_MODES))",
"def test_population_movements_without_compilation(self):\n self._pystepx = PySTEPXIsland(nb_islands=4, init_script=init_script)\n self._pystepx._rc[0].execute('elems = gp_engine.get_evolver().select_and_remove_individuals(0.01, False)',\n block=True)\n print self._pystepx._rc[0]['elems']",
"def remove_weights(self):\n cdef StdVectorFst result = StdVectorFst(isyms=self.isyms, osyms=self.osyms)\n openfst.ArcMap(self.fst[0], result.fst, openfst.RmTropicalWeightMapper())\n return result",
"def slice(self, evidence={}):\n return self.condition(evidence)\n \n \n\n# def eliminate(self, elimVars, elimOp):\n # TODO: awkward way to define this; convert to more direct implementation?\n for v in elimVars:\n if len(self.markovBlanket(v)) > 2: raise ValueError(\"Cannot eliminate {} with {} (>2) neighbors\".format(v,len(self.markovBlanket(v))))\n flist = self.factorsWith(v)\n gm_model = GraphModel(flist); print(gm_model); gm_model.eliminate([v],elimOp)\n fnew = gm_model.factors[0]\n self.removeFactors(flist); # doesn't quite work? numerical roundoff issues?\n self.L[v,:] = 0; self.L[:,v] = 0; self.h[v] = 0; # TODO: better to mark as removed? how?\n self.addFactors([fnew])\n # TODO: \"remove\" variable by setting states = 0? \"known value\" = 0?",
"def scrub():\n\n\tlocal(\"rm -fr dist build\")\n\tlocal(\"find . -name \\\"*.pyc\\\" -exec rm '{}' ';'\")",
"def removeDegenerate(self):\n return self[~self.testDegenerate()]",
"def unsetSpatialDimensions(self):\n return _libsbml.Compartment_unsetSpatialDimensions(self)",
"def remove_abs_vars(self):\n self.m.remove(self.bp_abs)\n self.m.remove(self.bn_abs)\n self.m.remove(self.gp_abs)\n self.m.remove(self.gn_abs)\n self.m.remove(self.beta_p)\n self.m.remove(self.beta_n)\n self.m.remove(self.gamma_p)\n self.m.remove(self.gamma_n)",
"def soft_reset(self):\n self._soft_reset()\n rewards = np.zeros((self.n_envs, self.n_players))\n return self.obs, rewards, self.agent_dones, self.info_dict",
"def undo(self) -> CompilerEnv:\n if not self.stack:\n return\n self.env.close()\n self.env = self.stack.pop()\n return self.env",
"def env_reset(self):\n state = self.env.reset()\n return self.feature_extractor.extract_features(state)",
"def _removeOcean(self):\r\n\t\tnodesToClean = [CONST.OCEANDISPSHADER, CONST.OCEANANIMSHADER, CONST.OCEAN_ANIM_PREVIEWPLANENAME]\r\n\t\tfor eachNode in nodesToClean:\r\n\t\t\ttry:\r\n\t\t\t\tcmds.delete(each)\r\n\t\t\texcept:\r\n\t\t\t\tpass",
"def _drop_projected_dims(dims):\n return [d for d in dims if d != 1]",
"def reduced(self):\n from reduced import ReducedPermutationIET\n\n return ReducedPermutationIET(self.list(), alphabet=self._alphabet, reduced=True)",
"def simplify(self):\n \n added_clumps = []\n staying_tunnels = []\n removed_clumps = set()\n \n for tunnel in self.tunnels:\n tunnel_end_distance = self.get_distance(tunnel.start, tunnel.end)\n if tunnel_end_distance - tunnel.start.distance_from_wall < 0 or \\\n tunnel_end_distance - tunnel.end.distance_from_wall < 0:\n removed_clumps.add(tunnel.start.node)\n removed_clumps.add(tunnel.end.node)\n new_node = tunnel.merge_endpoints()\n added_clumps.append(new_node)\n else:\n staying_tunnels.append(tunnel)\n #print removed_clumps\n \n new_clumps = []\n \n for clump in list(self.clumps) + added_clumps:\n if clump not in removed_clumps:\n new_clumps.append(clump)\n else:\n removed_clumps.remove(clump)\n\n if removed_clumps:\n raise Exception(\"Some removed clumps couldn't be found in the main set and I'm scared\")\n \n self.clumps = new_clumps\n self.tunnels = staying_tunnels",
"def reduced(self):\n from surface_dynamics.interval_exchanges.reduced import FlippedReducedPermutationIET\n\n return FlippedReducedPermutationIET(\n intervals=self.list(flips=False),\n flips=self.flips(),\n alphabet=self.alphabet(),\n reduced=True)",
"def remove_epsilon(self) -> \"CFG\":\n new_productions = []\n nullables = self.get_nullable_symbols()\n for production in self._productions:\n new_productions += remove_nullable_production(production,\n nullables)\n return CFG(self._variables,\n self._terminals,\n self._start_symbol,\n new_productions)",
"def sliced_focused_workspaces(self):\n if self._choppedWorkspaceNameList is None or len(self._choppedWorkspaceNameList) == 0:\n return None\n elif self.is_reduced is False:\n return None\n\n return self._choppedWorkspaceNameList[:]",
"def compact(self, distance=2):\n pass",
"def hard_reset(self):\n self._hard_reset()\n rewards = np.zeros((self.n_envs, self.n_players))\n return self.obs, rewards, self.agent_dones, self.info_dict",
"def unallocated_spaces(self):\n unallocated_offices = 0\n for office in self.offices:\n unallocated_offices += self.offices[\n office]['room'].unallocated_spaces\n unallocated_living = 0\n for living in self.living_spaces:\n unallocated_living += self.living_spaces[\n living]['room'].unallocated_spaces\n\n return [unallocated_offices, unallocated_living]",
"def solution_copy(self):\n to_return = DepAlgoSolution(self.packages_in_solution[:], self.visited_packages[:], set(self.visited_names))\n to_return.is_valid = self.is_valid\n to_return.not_to_delete_deps = set(self.not_to_delete_deps)\n for key, value in self.dict_to_way.items():\n to_return.dict_to_way[key] = value[:]\n for key, value in self.dict_to_deps.items():\n to_return.dict_to_deps[key] = set(value)\n for key, value in self.dict_call_as_needed.items():\n to_return.dict_call_as_needed[key] = value\n to_return.installed_solution_packages = set(self.installed_solution_packages)\n return to_return",
"def reset(self):\r\n self.env.reset()\r\n return self.env.get_obs()",
"def eliminate(self):\n deleteKey = []\n for key,value in self._sets[self._currentSet].items():\n if value < self._minSupport:\n deleteKey.append(key)\n \n for key in deleteKey:\n del self._sets[self._currentSet][key]"
] | [
"0.5691152",
"0.54813194",
"0.5258752",
"0.5244194",
"0.52369404",
"0.52155966",
"0.5172308",
"0.5151422",
"0.5110349",
"0.50940883",
"0.508118",
"0.50775474",
"0.5073993",
"0.50731057",
"0.50580597",
"0.5023523",
"0.5012307",
"0.49986777",
"0.49946648",
"0.49837384",
"0.49636728",
"0.49525136",
"0.49395362",
"0.49349198",
"0.4933384",
"0.4890871",
"0.4890614",
"0.48705187",
"0.48635557",
"0.48441577"
] | 0.6686988 | 0 |
Build an object detection model from ResNet50 architecture pretrained on COCO | def build_detection_model(checkpoint_path='models/research/object_detection/test_data/checkpoint/ckpt-0',
pipeline_config='models/research/object_detection/configs/tf2/ssd_resnet50_v1_fpn_640x640_coco17_tpu-8.config',
freeze_batchnorm=True,
num_classes=1):
# Load the configuration file into a dictionary
configs = config_util.get_configs_from_pipeline_file(pipeline_config)
# Read in the object stored at the key 'model' of the configs dictionary
model_config = configs['model']
# Modify the number of classes from its default of 90
model_config.ssd.num_classes = num_classes
# Freeze batch normalization
model_config.ssd.freeze_batchnorm = freeze_batchnorm
# Build model
detection_model = model_builder.build(model_config=model_config, is_training=True)
# Bounding box prediction layers
tmp_box_predictor_checkpoint = tf.train.Checkpoint(
_base_tower_layers_for_heads=detection_model._box_predictor._base_tower_layers_for_heads,
_box_prediction_head=detection_model._box_predictor._box_prediction_head)
# Model checkpoint
model_checkpoint = tf.train.Checkpoint(
_feature_extractor=detection_model._feature_extractor,
_box_predictor=tmp_box_predictor_checkpoint
)
# Define a checkpoint that sets model equal to the model defined above
checkpoint = tf.train.Checkpoint(model=model_checkpoint)
# Restore the checkpoint to the checkpoint path
checkpoint.restore(checkpoint_path)
# use the detection model's `preprocess()` method and pass a dummy image
tmp_image, tmp_shapes = detection_model.preprocess(tf.zeros([1, 640, 640, 3]))
# run a prediction with the preprocessed image and shapes
tmp_prediction_dict = detection_model.predict(tmp_image, tmp_shapes)
# postprocess the predictions into final detections
_ = detection_model.postprocess(tmp_prediction_dict, tmp_shapes)
return detection_model | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_model():\n \n # load a model pre-trained pre-trained on COCO\n model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained = True)\n \n # replace the classifier with a new one, that has num_classes which is user-defined\n num_classes = 2 # 1 class (person) + background\n \n # get number of input features for the classifier\n in_features = model.roi_heads.box_predictor.cls_score.in_features\n \n # replace the pre-trained head with a new one\n model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)\n \n return model",
"def get_model(summary=False):\n\timage_input=Input(shape=(220,220,5),name='image_input')\n\tbranch1_conv1=Conv2D(64, kernel_size=(3, 3), border_mode='same', input_shape=(220,220,5), activation='relu')(image_input)\n\tbranch1_conv2=Conv2D(64, kernel_size=(1, 1), border_mode='same', activation='relu')(branch1_conv1)\t\n\tbranch1_pool1=MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(branch1_conv1)\n\tbranch2_conv1=Conv2D(128, kernel_size=(3, 3), border_mode='same', activation='relu')(branch1_pool1)\n\tbranch2_conv2=Conv2D(128, kernel_size=(1, 1), border_mode='same', activation='relu')(branch2_conv1)\t\n\tbranch2_pool1=MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(branch2_conv2)\n\tbranch3_conv1=Conv2D(128, kernel_size=(3, 3), border_mode='same', activation='relu')(branch2_pool1)\n\tbranch3_conv2=Conv2D(128, kernel_size=(1, 1), border_mode='same', activation='relu')(branch3_conv1)\t\n\tbranch3_pool1=MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(branch3_conv2)\n\tbranch4_conv1=Conv2D(128, kernel_size=(3, 3), border_mode='same', activation='relu')(branch3_pool1)\n\tbranch4_conv2=Conv2D(128, kernel_size=(1, 1), border_mode='same', activation='relu')(branch4_conv1)\t\n\tbranch4_pool1=MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(branch4_conv2)\n\tbranch5_conv1=Conv2D(128, kernel_size=(3, 3), border_mode='same', activation='relu')(branch4_pool1)\n\tbranch5_conv2=Conv2D(128, kernel_size=(1, 1), border_mode='same', activation='relu')(branch5_conv1)\t\n\tbranch5_pool1=MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(branch5_conv2)\n\tbranch6_conv1=Conv2D(128, kernel_size=(3, 3), border_mode='same', activation='relu')(branch5_pool1)\n\tbranch6_conv2=Conv2D(128, kernel_size=(1, 1), border_mode='same', activation='relu')(branch6_conv1)\t\n\tbranch6_pool1=MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(branch6_conv2)\n\tbranch1_flat=Flatten()(branch6_pool1)\n\tdrop=Dropout(.3)(branch1_flat)\n\t# FC layers group\n\tdense1=Dense(512, activation='relu', name='fc1')(drop)\n\tdrop1=Dropout(.3)(dense1)\n\tdense2=Dense(256, activation='relu', name='fc2')(drop1)\n\tdrop3=Dropout(.3)(dense2)\n\tout=Dense(2, activation='softmax', name='fc4')(drop3)\n\tmodel=Model(inputs=image_input,outputs=out)\n\treturn model",
"def ResNet50_model(input_shape, pooling):\n from keras.applications.resnet import ResNet50\n return ResNet50(include_top=False, weights='imagenet', input_shape=input_shape, pooling=pooling)",
"def build(self):\n input_shape_img = (None, None, 3)\n img_input = Input(shape=input_shape_img)\n roi_input = Input(shape=(None, 4))\n shared_layers = self.cnn_model.nn_base(img_input, trainable=True)\n num_anchors = len(self.C.anchor_scales) * len(self.C.anchor_ratios)\n \n output_region_proposal = self.region_proposal_net(shared_layers, num_anchors)\n output_classifier = self.classifier(shared_layers,\n self.cnn_model.classifier_layers, \n roi_input, self.C.num_roi, \n num_class=len(self.class_count), trainable=True)\n \n self.model_region_proposal = Model(img_input, output_region_proposal[:2])\n self.model_classifier = Model([img_input, roi_input], output_classifier)\n self.model_all = Model([img_input, roi_input], output_region_proposal[:2] + output_classifier)\n\n optimizer = Adam(lr=1e-5)\n self.model_region_proposal.compile(optimizer=optimizer, \n loss=[losses.rpn_loss_cls(num_anchors), \n losses.rpn_loss_regr(num_anchors)])\n self.model_classifier.compile(optimizer=optimizer, \n loss=[losses.class_loss_cls, \n losses.class_loss_regr(len(self.class_count)-1)], \n metrics={'dense_class_{}'.format(len(self.class_count)): 'accuracy'})\n self.model_all.compile(optimizer='sgd', loss='mae')\n\n # print(self.model_all.summary())\n plot_model(self.model_region_proposal, show_shapes=True, to_file='./frcnn/images/region_proposal.png')\n plot_model(self.model_classifier, show_shapes=True, to_file='./frcnn/images/classifier.png')\n plot_model(self.model_all, show_shapes=True, to_file='./frcnn/images/model_all.png')",
"def resnet50(pretrained=False):\n model = ResNet(Bottleneck, [3, 4, 6, 3])\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))\n return model",
"def resnet50(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))\n fc = transfer_fc(model.fc)\n model.fc = fc\n return model",
"def build_cut_model(self):\n model = None\n if self.model_name == 'vgg16':\n model = vgg16(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels))\n elif self.model_name == 'resnet':\n model = ResNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'densenet':\n model = DenseNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'mobilenet':\n model = MobileNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'efficientnet':\n model = efficientnet(input_shape=(self.ex_input_size, self.ex_input_size, self.ex_channels))\n elif self.model_name == 'vit':\n model = VisionTransformer(image_size=self.ex_input_size,\n patch_size=vit_settings['patch_size'],\n num_layers=vit_settings['num_layers'],\n num_classes=num_classes,\n d_model=vit_settings['d_model'],\n num_heads=vit_settings['num_heads'],\n mlp_dim=vit_settings['mlp_dim'],\n channels=self.ex_channels,\n dropout=vit_settings['dropout']).build_VisionTransformer()\n model.load_weights(self.path_to_weights)\n model = Model(model.input, model.get_layer(self.ex_last_conv_layer_name2).output)\n model.summary()\n return model",
"def resnet50(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))\n return model",
"def resnet50(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))\n return model",
"def resnet50(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))\n return model",
"def resnet50(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))\n return model",
"def bl_resnet50(pretrained=False, **kwargs):\n model = bL_ResNet([2, 3, 5, 3], **kwargs)\n # print ('model created')\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))\n return model",
"def build_model():\n model = models.Sequential()\n\n # # Anti-overfit methods\n # model.add(layers.BatchNormalization())\n # model.add(layers.Dropout(0.5))\n # regularizers.l1_l2(l1=0.01, l2=0.01)\n\n model.add(layers.Conv2D(200, (3, 3), activation='relu',\n input_shape=nnc.INPUT_SHAPE))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.BatchNormalization())\n model.add(layers.Conv2D(200, (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.BatchNormalization())\n model.add(layers.Conv2D(150, (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.BatchNormalization())\n model.add(layers.Conv2D(100, (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.BatchNormalization())\n model.add(layers.Flatten())\n model.add(layers.Dense(512, activation='relu',))\n model.add(layers.Dropout(0.5))\n model.add(layers.Dense(256, activation='relu',))\n model.add(layers.Dropout(0.5))\n model.add(layers.Dense(512, activation='relu',))\n model.add(layers.Dropout(0.5))\n model.add(layers.Dense(7, activation='sigmoid'))\n model.compile(optimizer=nnc.OPTIMIZER, loss=nnc.LOSS, metrics=nnc.METRICS)\n\n # # Print the model to the console\n model.summary()\n # # Print the model to a png file\n # utils.plot_model(model, show_shapes=True, to_file=nnc.MODEL_PLOT_PATH)\n # # Turn into multi-gpu model\n # model = utils.multi_gpu_model(model, gpus=2)\n\n return model",
"def load_feature_extractor(model_spec, device):\n\n model_type = model_spec['name']\n model_weights_fp = model_spec['weights']\n\n if model_type == 'imagenet_swav':\n # or could load from hub model\n # model = torch.hub.load('facebookresearch/swav', 'resnet50')\n\n model = models.resnet50(pretrained=False)\n model.fc = torch.nn.Identity()\n state_dict = torch.load(model_weights_fp, map_location=\"cpu\")\n\n state_dict = {k.replace(\"module.\", \"\"): v for k, v in state_dict.items()}\n for k in list(state_dict.keys()):\n if 'projection' in k or 'prototypes' in k:\n del state_dict[k]\n\n msg = model.load_state_dict(state_dict, strict=True)\n\n elif model_type == 'imagenet_moco_v2':\n model = models.resnet50(pretrained=False)\n model.fc = torch.nn.Identity()\n checkpoint = torch.load(model_weights_fp, map_location=\"cpu\")\n\n # rename moco pre-trained keys\n state_dict = checkpoint['state_dict']\n for k in list(state_dict.keys()):\n # retain only encoder_q up to before the embedding layer\n if k.startswith('module.encoder_q') and not k.startswith('module.encoder_q.fc'):\n # remove prefix\n state_dict[k[len(\"module.encoder_q.\"):]] = state_dict[k]\n # delete renamed or unused k\n del state_dict[k]\n\n msg = model.load_state_dict(state_dict, strict=True)\n\n elif model_type == 'imagenet_supervised':\n model = models.resnet50(pretrained=True)\n\n elif model_type == 'random':\n model = models.resnet50(pretrained=False)\n\n elif model_type == 'inat2018_supervised':\n model = models.resnet50(pretrained=False)\n # This model was actually trained with 10000 classes for the fc layer\n # but only 8142 (the number in inat2018) were actually updated\n model.fc = torch.nn.Linear(model.fc.in_features, 10000)\n checkpoint = torch.load(model_weights_fp, map_location=\"cpu\")\n msg = model.load_state_dict(checkpoint['state_dict'], strict=True)\n\n elif model_type == 'inat2021_mini_supervised':\n model = models.resnet50(pretrained=False)\n model.fc = torch.nn.Linear(model.fc.in_features, 10000)\n checkpoint = torch.load(model_weights_fp, map_location=\"cpu\")\n msg = model.load_state_dict(checkpoint['state_dict'], strict=True)\n\n elif model_type == 'inat2021_supervised':\n model = models.resnet50(pretrained=False)\n model.fc = torch.nn.Linear(model.fc.in_features, 10000)\n checkpoint = torch.load(model_weights_fp, map_location=\"cpu\")\n msg = model.load_state_dict(checkpoint['state_dict'], strict=True)\n\n elif model_type == 'inat2021_mini_supervised_from_scratch':\n model = models.resnet50(pretrained=False)\n model.fc = torch.nn.Linear(model.fc.in_features, 10000)\n checkpoint = torch.load(model_weights_fp, map_location=\"cpu\")\n state_dict = {k.replace(\"module.\", \"\"): v for k, v in checkpoint['state_dict'].items()}\n msg = model.load_state_dict(state_dict, strict=True)\n\n elif model_type == 'inat2021_supervised_from_scratch':\n model = models.resnet50(pretrained=False)\n model.fc = torch.nn.Linear(model.fc.in_features, 10000)\n checkpoint = torch.load(model_weights_fp, map_location=\"cpu\")\n msg = model.load_state_dict(checkpoint['state_dict'], strict=True)\n\n elif model_type == 'inat2021_mini_moco_v2':\n model = models.resnet50(pretrained=False)\n model.fc = torch.nn.Identity()\n checkpoint = torch.load(model_weights_fp, map_location=\"cpu\")\n\n # rename moco pre-trained keys\n state_dict = checkpoint['state_dict']\n for k in list(state_dict.keys()):\n # retain only encoder_q up to before the embedding layer\n if k.startswith('module.encoder_q') and not k.startswith('module.encoder_q.fc'):\n # remove prefix\n state_dict[k[len(\"module.encoder_q.\"):]] = state_dict[k]\n # delete renamed or unused k\n del state_dict[k]\n\n msg = model.load_state_dict(state_dict, strict=True)\n\n elif model_type == 'inat2021_mini_swav' or model_type == 'inat2021_mini_swav_1k':\n # or could load from hub model\n # model = torch.hub.load('facebookresearch/swav', 'resnet50')\n\n model = models.resnet50(pretrained=False)\n model.fc = torch.nn.Identity()\n state_dict = torch.load(model_weights_fp, map_location=\"cpu\")\n\n state_dict = {k.replace(\"module.\", \"\"): v for k, v in state_dict['state_dict'].items()}\n for k in list(state_dict.keys()):\n if 'projection' in k or 'prototypes' in k:\n del state_dict[k]\n\n msg = model.load_state_dict(state_dict, strict=True)\n\n else:\n raise ValueError(\"Unknown pytorch model: %s\" % model_type)\n\n\n # remove the final fully connected layer so the model only operates with post average pool features\n model = torch.nn.Sequential(*(list(model.children())[:-1]))\n model.to(device)\n model.eval()\n\n feature_extractor = PTResNet50FeatureExtractor(model, device)\n\n return feature_extractor",
"def make_model():\n \n model = models.Sequential()\n model.add(layers.Conv2D(32, (3, 3), activation='relu',\n input_shape=(150, 150, 3)))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.Conv2D(64, (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.Conv2D(128, (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.Conv2D(128, (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.Flatten())\n model.add(layers.Dense(512, activation='relu'))\n model.add(layers.Dense(37, activation='softmax'))\n \n #model.add(layers.Dense(1, activation='sigmoid'))\n \n\n model.compile(loss='sparse_categorical_crossentropy',\n optimizer=optimizers.RMSprop(lr=1e-4),\n metrics=['acc'])\n return model",
"def cnn_model(model_name, img_size):\n\tinput_size = (img_size, img_size, 3)\n\n\tif model_name == \"xception\":\n\t\tbaseModel = Xception(\n\t\t\tweights=\"imagenet\", include_top=False, input_shape=(img_size, img_size, 3)\n\t\t)\n\telif model_name == \"efn0\":\n\t\tbaseModel = efn.EfficientNetB0(weights=\"imagenet\", include_top=False,\n\t\t\tinput_shape=input_size)\n\telif model_name == \"efn_noisy\":\n\t\tbaseModel = efn.EfficientNetB5(weights=\"noisy-student\", include_top=False,\n\t\t\tinput_shape=input_size)\n\n\theadModel = baseModel.output\n\theadModel = GlobalAveragePooling2D()(headModel)\n\theadModel = Dense(1024, activation=\"relu\", kernel_initializer=\"he_uniform\")(\n\t\theadModel\n\t)\n\theadModel = Dropout(0.4)(headModel)\n\tpredictions = Dense(\n\t\t200,\n\t\tactivation=\"softmax\",\n\t\tkernel_initializer=\"he_uniform\")(\n\t\theadModel\n\t)\n\tmodel = Model(inputs=baseModel.input, outputs=predictions)\n\n\tfor layer in baseModel.layers:\n\t\tlayer.trainable = False\n\n\toptimizer = Nadam(\n\t\tlr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, schedule_decay=0.004\n\t)\n\tmodel.compile(\n\t\t# loss=\"categorical_crossentropy\",\n\t\tloss=joint_loss,\n\t\toptimizer=optimizer,\n\t\tmetrics=[\"accuracy\"]\n\t)\n\treturn model",
"def resnet50(pretrained=True, **kwargs):\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n if pretrained:\n print(\"=> using pre-trained model '{}'\".format('resnet_50'))\n pretrained_state = model_zoo.load_url(model_urls['resnet50'])\n model_state = model.state_dict()\n pretrained_state = { k:v for k,v in pretrained_state.items() if k in model_state and v.size() == model_state[k].size() }\n model_state.update(pretrained_state)\n model.load_state_dict(model_state)\n return model",
"def buildModel(model_name):\n if model_name == \"resnet50\":\n model = kapp.resnet50.ResNet50(weights=\"imagenet\", include_top=False)\n return model, kapp.resnet50.preprocess_input\n elif model_name == \"vgg16\":\n model = kapp.vgg16.VGG16(weights=\"imagenet\", include_top=False)\n return model, kapp.vgg16.preprocess_input\n elif model_name == 'xception':\n model = kapp.xception.Xception(weights=\"imagenet\", include_top=False)\n return model, kapp.xception.preprocess_input\n elif model_name == 'vgg19':\n model = kapp.vgg19.VGG19(weights=\"imagenet\", include_top=False)\n return model, kapp.vgg19.preprocess_input\n elif model_name == 'inceptionv3':\n model = kapp.inception_v3.InceptionV3(weights=\"imagenet\", include_top=False)\n return model, kapp.inception_v3.preprocess_input\n elif model_name == 'mobilenet':\n model = kapp.mobilenet.MobileNet(weights=\"imagenet\", include_top=False)\n return model, kapp.mobilenet.preprocess_input\n else:\n raise Exception(\"Unsupported model error\")",
"def _init_model(self):\r\n\r\n self.model = ResNet152V2(weights='imagenet')",
"def model(pretrained=False, **kwargs):\r\n\r\n layers = make_layers(cfg['O'], dilation=dilation['D1'])\r\n cnv = np.cumsum(cnvs['OI']) if kwargs['args'].IN or kwargs['args'].INL else np.cumsum(cnvs['O'])\r\n model = VGG(layers, cnvs=cnv, **kwargs)\r\n if pretrained:\r\n pre2local_keymap = [('features.{}.weight'.format(i), 'conv1_2.{}.weight'.format(i)) for i in range(10)]\r\n pre2local_keymap += [('features.{}.bias'.format(i), 'conv1_2.{}.bias'.format(i)) for i in range(10)]\r\n pre2local_keymap += [('features.{}.weight'.format(i + 10), 'conv3.{}.weight'.format(i)) for i in range(7)]\r\n pre2local_keymap += [('features.{}.bias'.format(i + 10), 'conv3.{}.bias'.format(i)) for i in range(7)]\r\n pre2local_keymap += [('features.{}.weight'.format(i + 17), 'conv4.{}.weight'.format(i)) for i in range(7)]\r\n pre2local_keymap += [('features.{}.bias'.format(i + 17), 'conv4.{}.bias'.format(i)) for i in range(7)]\r\n pre2local_keymap += [('features.{}.weight'.format(i + 24), 'conv5.{}.weight'.format(i)) for i in range(7)]\r\n pre2local_keymap += [('features.{}.bias'.format(i + 24), 'conv5.{}.bias'.format(i)) for i in range(7)]\r\n pre2local_keymap = dict(pre2local_keymap)\r\n\r\n\r\n model_dict = model.state_dict()\r\n pretrained_file = os.path.join(kwargs['args'].pretrained_model_dir, kwargs['args'].pretrained_model)\r\n if os.path.isfile(pretrained_file):\r\n pretrained_dict = torch.load(pretrained_file)\r\n print('load pretrained model from {}'.format(pretrained_file))\r\n else:\r\n pretrained_dict = model_zoo.load_url(model_urls['vgg16'])\r\n print('load pretrained model from {}'.format(model_urls['vgg16']))\r\n # 0. replace the key\r\n pretrained_dict = {pre2local_keymap[k] if k in pre2local_keymap.keys() else k: v for k, v in\r\n pretrained_dict.items()}\r\n # *. show the loading information\r\n for k in pretrained_dict.keys():\r\n if k not in model_dict:\r\n print('Key {} is removed from vgg16'.format(k))\r\n print(' ')\r\n for k in model_dict.keys():\r\n if k not in pretrained_dict:\r\n print('Key {} is new added for DA Net'.format(k))\r\n # 1. filter out unnecessary keys\r\n pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}\r\n # 2. overwrite entries in the existing state dict\r\n model_dict.update(pretrained_dict)\r\n # 3. load the new state dict\r\n model.load_state_dict(model_dict)\r\n return model",
"def try_load_model(save_dir, step_ckpt=-1, return_new_model=True, verbose=True, ocnn=False):\n ocnn_model=None\n ocnn_optimizer=None\n\n import tensorflow as tf\n tf.compat.v1.enable_v2_behavior()\n if configs.config_values.model == 'baseline':\n configs.config_values.num_L = 1\n\n splits=False\n if configs.config_values.y_cond:\n splits = dict_splits[configs.config_values.dataset]\n\n # initialize return values\n model_name = configs.config_values.model\n if model_name == 'resnet':\n model = ResNet(filters=configs.config_values.filters, activation=tf.nn.elu)\n elif model_name in ['refinenet', 'baseline']:\n model = RefineNet(filters=configs.config_values.filters, activation=tf.nn.elu,\n y_conditioned=configs.config_values.y_cond, splits=splits)\n elif model_name == 'refinenet_twores':\n model = RefineNetTwoResidual(filters=configs.config_values.filters, activation=tf.nn.elu)\n elif model_name == 'masked_refinenet':\n print(\"Using Masked RefineNet...\")\n # assert configs.config_values.y_cond \n model = MaskedRefineNet(filters=configs.config_values.filters, activation=tf.nn.elu, \n splits=dict_splits[configs.config_values.dataset], y_conditioned=configs.config_values.y_cond)\n\n optimizer = tf.keras.optimizers.Adam(learning_rate=configs.config_values.learning_rate)\n step = 0\n evaluate_print_model_summary(model, verbose)\n \n if ocnn:\n from tensorflow.keras import Model\n from tensorflow.keras.layers import Input, Flatten, Dense, AvgPool2D\n # Building OCNN on top\n print(\"Building OCNN...\")\n Input = [Input(name=\"images\", shape=(28,28,1)),\n Input(name=\"idx_sigmas\", shape=(), dtype=tf.int32)]\n\n score_logits = model(Input)\n x = Flatten()(score_logits)\n x = Dense(128, activation=\"linear\", name=\"embedding\")(x)\n dist = Dense(1, activation=\"linear\", name=\"distance\")(x)\n ocnn_model = Model(inputs=Input, outputs=dist, name=\"OC-NN\")\n ocnn_optimizer = tf.keras.optimizers.Adam(learning_rate=1e-5)\n evaluate_print_model_summary(ocnn_model, verbose=True)\n\n # if resuming training, overwrite model parameters from checkpoint\n if configs.config_values.resume:\n if step_ckpt == -1:\n print(\"Trying to load latest model from \" + save_dir)\n checkpoint = tf.train.latest_checkpoint(str(save_dir))\n else:\n print(\"Trying to load checkpoint with step\", step_ckpt, \" model from \" + save_dir)\n onlyfiles = [f for f in os.listdir(save_dir) if os.path.isfile(os.path.join(save_dir, f))]\n # r = re.compile(\".*step_{}-.*\".format(step_ckpt))\n r = re.compile(\"ckpt-{}\\\\..*\".format(step_ckpt))\n\n name_all_checkpoints = sorted(list(filter(r.match, onlyfiles)))\n print(name_all_checkpoints)\n # Retrieve name of the last checkpoint with that number of steps\n name_ckpt = name_all_checkpoints[-1][:-6]\n # print(name_ckpt)\n checkpoint = save_dir + name_ckpt\n if checkpoint is None:\n print(\"No model found.\")\n if return_new_model:\n print(\"Using a new model\")\n else:\n print(\"Returning None\")\n model = None\n optimizer = None\n step = None\n else:\n step = tf.Variable(0)\n\n if ocnn:\n ckpt = tf.train.Checkpoint(step=step, optimizer=optimizer, model=model,\n ocnn_model=ocnn_model, ocnn_optimizer=ocnn_optimizer)\n else:\n ckpt = tf.train.Checkpoint(step=step, optimizer=optimizer, model=model)\n\n ckpt.restore(checkpoint)\n step = int(step)\n print(\"Loaded model: \" + checkpoint)\n\n return model, optimizer, step, ocnn_model, ocnn_optimizer",
"def resnet50(pretrained=False, mode='rgb', **kwargs):\n if mode == 'flow':\n model = ResNet('resnet50', Bottleneck, [3, 4, 6, 3], inp=20, **kwargs)\n else:\n model = ResNet('resnet50', Bottleneck, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))\n return model",
"def build_model(self):\n model = None\n if self.model_name == 'vgg16':\n model = vgg16(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels))\n elif self.model_name == 'resnet':\n model = ResNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'densenet':\n model = DenseNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'mobilenet':\n model = MobileNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'efficientnet':\n model = efficientnet(input_shape=(self.ex_input_size, self.ex_input_size, self.ex_channels))\n elif self.model_name == 'vit':\n model = VisionTransformer(image_size=self.ex_input_size,\n patch_size=vit_settings['patch_size'],\n num_layers=vit_settings['num_layers'],\n num_classes=num_classes,\n d_model=vit_settings['d_model'],\n num_heads=vit_settings['num_heads'],\n mlp_dim=vit_settings['mlp_dim'],\n channels=self.ex_channels,\n dropout=vit_settings['dropout']).build_VisionTransformer()\n model.load_weights(self.path_to_weights)\n model.summary()\n return model",
"def cnn_model(model_name, img_size, weights):\n input_size = (img_size, img_size, 3)\n if model_name == \"xception\":\n baseModel = Xception(\n weights=\"imagenet\",\n include_top=False,\n input_shape=(img_size, img_size, 3)\n )\n elif model_name == \"iv3\":\n baseModel = InceptionV3(\n weights=\"imagenet\",\n include_top=False,\n input_shape=(img_size, img_size, 3)\n )\n elif model_name == \"irv2\":\n baseModel = InceptionResNetV2(\n weights=\"imagenet\",\n include_top=False,\n input_shape=(img_size, img_size, 3)\n )\n elif model_name == \"resnet\":\n baseModel = ResNet50(\n weights=\"imagenet\",\n include_top=False,\n input_shape=(img_size, img_size, 3)\n )\n elif model_name == \"nasnet\":\n baseModel = NASNetLarge(\n weights=\"imagenet\",\n include_top=False,\n input_shape=(img_size, img_size, 3)\n )\n elif model_name == \"ef0\":\n baseModel = EfficientNetB0(\n input_size,\n weights=\"imagenet\",\n include_top=False\n )\n elif model_name == \"ef5\":\n baseModel = EfficientNetB5(\n input_size,\n weights=\"imagenet\",\n include_top=False\n )\n\n headModel = baseModel.output\n headModel = GlobalAveragePooling2D()(headModel)\n headModel = Dense(\n 512,\n activation=\"relu\",\n kernel_initializer=\"he_uniform\",\n name=\"fc1\")(\n headModel\n )\n headModel = Dropout(0.4)(headModel)\n predictions = Dense(\n 2,\n activation=\"softmax\",\n kernel_initializer=\"he_uniform\")(\n headModel\n )\n model = Model(inputs=baseModel.input, outputs=predictions)\n\n model.load_weights(weights)\n print(\"Weights loaded...\")\n model_lstm = Model(\n inputs=baseModel.input,\n outputs=model.get_layer(\"fc1\").output\n )\n\n for layer in baseModel.layers:\n layer.trainable = True\n\n optimizer = Nadam(\n lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, schedule_decay=0.004\n )\n model.compile(\n loss=\"categorical_crossentropy\",\n optimizer=optimizer,\n metrics=[\"accuracy\"]\n )\n return model_lstm",
"def make_NN(n_hidden, n_epoch, labelsdict, lr, device, model_name, trainloader, validloader, train_data, pretrain, finetune_whole, custom_model):\n if custom_model == 2:\n # Use custom two-layer convolution model\n print(\"Using Two-Layer CNN\")\n model = TwoLayerConvNet()\n elif custom_model == 5:\n print(\"Using Five-Layer CNN\")\n # Use custom five-layer convolution model\n model = FiveLayerConvNet()\n else:\n # Import NN model (either pretrained or not)\n model = getattr(models, model_name)(pretrained=pretrain)\n \"\"\" ===================================================================================== \"\"\"\"\n \n \"\"\" IMPLEMENTATION OF finetune_whole ARGUMENT TO EITHER FREEZE THE PARAMETERS OR NOT (WILBERT ARISTO) \"\"\"\n # If we do not need to finetune whole model, freeze parameters that we don't need to re-train\n if not finetune_whole:\n for param in model.parameters():\n param.requires_grad = False\n \"\"\" ===================================================================================== \"\"\"\"\n\n n_out = len(labelsdict)\n\n \"\"\" CHANGED LAST LAYER TO model.fc IF WE ARE USING RESNET MODEL (WILBERT ARISTO) \"\"\"\n if \"resnet\" in model_name:\n # Make classifier\n n_in = next(model.fc.modules()).in_features\n model.fc = NN_Classifier(input_size=n_in, output_size=n_out, hidden_layers=n_hidden)\n \n \"\"\" IMPLEMENTATION OF finetune_whole ARGUMENT TO EITHER OPTIMIZE ALL PARAMETERS OR JUST THE LAST LAYER'S PARAMS (WILBERT ARISTO) \"\"\"\n # Define optimizer\n if finetune_whole:\n optimizer = optim.Adam(model.parameters(), lr = lr)\n else:\n optimizer = optim.Adam(model.fc.parameters(), lr = lr)\n \"\"\" ============================================================================================================================ \"\"\"\"\n else:\n # Make classifier\n n_in = next(model.classifier.modules()).in_features\n model.classifier = NN_Classifier(input_size=n_in, output_size=n_out, hidden_layers=n_hidden)\n \n \"\"\" IMPLEMENTATION OF finetune_whole ARGUMENT TO EITHER OPTIMIZE ALL PARAMETERS OR JUST THE LAST LAYER'S PARAMS (WILBERT ARISTO) \"\"\"\n # Define optimizer\n if finetune_whole:\n optimizer = optim.Adam(model.parameters(), lr = lr)\n else:\n optimizer = optim.Adam(model.classifier.parameters(), lr = lr)\n \"\"\" ============================================================================================================================ \"\"\"\"\n \"\"\" ============================================================================================================================ \"\"\"\"\n\n # Define criterion\n criterion = nn.NLLLoss() \n\n model.to(device)\n start = time.time()\n\n epochs = n_epoch\n steps = 0 \n running_loss = 0\n print_every = 40\n for e in range(epochs):\n model.train()\n for images, labels in trainloader:\n images, labels = images.to(device), labels.to(device)\n\n steps += 1\n\n optimizer.zero_grad()\n\n output = model.forward(images)\n loss = criterion(output, labels)\n loss.backward()\n optimizer.step()\n\n running_loss += loss.item()\n\n if steps % print_every == 0:\n # Eval mode for predictions\n model.eval()\n\n # Turn off gradients for validation\n with torch.no_grad():\n test_loss, accuracy = validation(model, validloader, criterion, device)\n\n print(\"Epoch: {}/{} - \".format(e+1, epochs),\n \"Training Loss: {:.3f} - \".format(running_loss/print_every),\n \"Validation Loss: {:.3f} - \".format(test_loss/len(validloader)),\n \"Validation Accuracy: {:.3f}\".format(accuracy/len(validloader)))\n\n running_loss = 0\n\n # Make sure training is back on\n model.train()\n \n \"\"\" CHANGED LAST LAYER TO model.fc IF WE ARE USING RESNET MODEL (WILBERT ARISTO) \"\"\"\n if \"resnet\" in model_name:\n # Add model info \n model.fc.n_in = n_in\n model.fc.n_hidden = n_hidden\n model.fc.n_out = n_out\n model.fc.labelsdict = labelsdict\n model.fc.lr = lr\n model.fc.optimizer_state_dict = optimizer.state_dict\n model.fc.model_name = model_name\n model.fc.class_to_idx = train_data.class_to_idx\n else:\n # Add model info \n model.classifier.n_in = n_in\n model.classifier.n_hidden = n_hidden\n model.classifier.n_out = n_out\n model.classifier.labelsdict = labelsdict\n model.classifier.lr = lr\n model.classifier.optimizer_state_dict = optimizer.state_dict\n model.classifier.model_name = model_name\n model.classifier.class_to_idx = train_data.class_to_idx\n \"\"\" ============================================================================================================================ \"\"\"\"\n\n print('model:', model_name, '- hidden layers:', n_hidden, '- epochs:', n_epoch, '- lr:', lr)\n print(f\"Run time: {(time.time() - start)/60:.3f} min\")\n return model\n\n# Define function to save checkpoint\ndef save_checkpoint(model, path):\n checkpoint = {'c_input': model.classifier.n_in,\n 'c_hidden': model.classifier.n_hidden,\n 'c_out': model.classifier.n_out,\n 'labelsdict': model.classifier.labelsdict,\n 'c_lr': model.classifier.lr,\n 'state_dict': model.state_dict(),\n 'c_state_dict': model.classifier.state_dict(),\n 'opti_state_dict': model.classifier.optimizer_state_dict,\n 'model_name': model.classifier.model_name,\n 'class_to_idx': model.classifier.class_to_idx\n }\n torch.save(checkpoint, path)\n \n# Define function to load model\ndef load_model(path):\n cp = torch.load(path)\n \n # Import pre-trained NN model \n model = getattr(models, cp['model_name'])(pretrained=True)\n \n # Freeze parameters that we don't need to re-train \n for param in model.parameters():\n param.requires_grad = False\n \n # Make classifier\n model.classifier = NN_Classifier(input_size=cp['c_input'], output_size=cp['c_out'], \\\n hidden_layers=cp['c_hidden'])\n \n # Add model info \n model.classifier.n_in = cp['c_input']\n model.classifier.n_hidden = cp['c_hidden']\n model.classifier.n_out = cp['c_out']\n model.classifier.labelsdict = cp['labelsdict']\n model.classifier.lr = cp['c_lr']\n model.classifier.optimizer_state_dict = cp['opti_state_dict']\n model.classifier.model_name = cp['model_name']\n model.classifier.class_to_idx = cp['class_to_idx']\n model.load_state_dict(cp['state_dict'])\n \n return model",
"def get_model():\n\n # Create a convolutional neural network\n model = tf.keras.models.Sequential([\n\n tf.keras.layers.Conv2D(32, (3,3), activation=\"relu\", input_shape=(IMG_WIDTH, IMG_HEIGHT, 3)), # Convolutional layer with 32 filters of a 3 x 3 kernel\n\n tf.keras.layers.MaxPooling2D(pool_size=(2, 2)), # Max-pooling layer with a 2 x 2 pool size\n\n tf.keras.layers.Conv2D(64, (3, 3), activation=\"relu\"), # Convolutional layer with 64 filters of a 3 x 3 kernel\n \n tf.keras.layers.MaxPooling2D(pool_size=(2, 2)), # Max-pooling layer with a 2 x 2 pool size\n\n tf.keras.layers.Flatten(), # Flatten units\n\n tf.keras.layers.Dense(256, activation=\"relu\"), # Hidden layer with 256 neurons\n\n tf.keras.layers.Dropout(0.25), # Dropout layer with a rate of 0.25\n\n tf.keras.layers.Dense(NUM_CATEGORIES, activation=\"softmax\") # Output layer with an output unit for each image category\n ])\n\n # Compile model\n model.compile(\n optimizer=\"adam\",\n loss=\"categorical_crossentropy\",\n metrics=[\"accuracy\"]\n )\n\n return model",
"def model_fn(model_dir):\n ctx = mx.cpu()\n net = gcv.model_zoo.get_model(\n 'yolo3_darknet53_voc',\n pretrained=False,\n ctx=ctx)\n batchify = gcv.data.batchify._stack_arrs\n net.load_parameters(os.path.join(model_dir, 'yolo3_darknet53_voc.params'), mx.cpu(0))\n net.hybridize()\n def image_transform(im_bytes):\n \"\"\"\n Apply image transformation to raw byte images\n \"\"\"\n img = [mx.image.imdecode(bytes.fromhex(im.lstrip('0x'))) for im in im_bytes]\n out = gcv.data.transforms.presets.yolo.transform_test(img)\n return out[0]\n\n return net, image_transform, batchify",
"def build_model():\n\n if K.image_data_format() == 'channels_first':\n input_shape = (3, img_width, img_height)\n else:\n input_shape = (img_width, img_height, 3)\n\n model = Sequential()\n model.add(Conv2D(32, (3, 3), input_shape=input_shape))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n model.add(Conv2D(32, (3, 3)))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n model.add(Conv2D(64, (3, 3)))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n model.add(Flatten())\n\n # FC layer\n model.add(Dense(64))\n model.add(Activation('relu'))\n model.add(Dropout(0.5))\n model.add(Dense(1))\n model.add(Activation('sigmoid'))\n\n model.compile(loss='binary_crossentropy',\n optimizer='adadelta',\n metrics=['accuracy'])\n\n return model",
"def get_model(imgsize: ImageSize, classes_count: int) -> keras.Model:\n data_augmentation = keras.Sequential(\n [\n layers.experimental.preprocessing.RandomFlip(\"horizontal\", input_shape=(imgsize.height, imgsize.width, 3)),\n layers.experimental.preprocessing.RandomRotation(0.1),\n layers.experimental.preprocessing.RandomZoom(0.1),\n ]\n )\n\n model = Sequential([\n data_augmentation,\n layers.experimental.preprocessing.Rescaling(1. / 255),\n layers.Conv2D(16, 3, padding='same', activation='relu'),\n layers.MaxPooling2D(),\n layers.Conv2D(32, 3, padding='same', activation='relu'),\n layers.MaxPooling2D(),\n layers.Conv2D(64, 3, padding='same', activation='relu'),\n layers.MaxPooling2D(),\n layers.Dropout(0.2),\n layers.Flatten(),\n layers.Dense(128, activation='relu'),\n layers.Dense(classes_count)\n ])\n\n model.compile(optimizer='adam',\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=['accuracy'])\n return model",
"def create_vision_model(image_dims=(224, 224, 3), weights='imagenet'):\n base_model = tf.keras.applications.ResNet50V2(\n weights=weights, include_top=False, input_shape=image_dims)\n inp = tf.keras.layers.Input(shape=image_dims)\n x = base_model(inp)\n x = tfkl.GlobalAveragePooling2D()(x)\n len_visual_description = x.shape[-1]\n vision_model = tf.keras.Model(inp, x)\n return vision_model, len_visual_description"
] | [
"0.690004",
"0.66588193",
"0.6648487",
"0.6648473",
"0.66194236",
"0.6607208",
"0.6582919",
"0.6557827",
"0.6557827",
"0.6557827",
"0.6557827",
"0.6549903",
"0.65468943",
"0.6501333",
"0.64943516",
"0.6476868",
"0.6446595",
"0.6439272",
"0.6412864",
"0.63972425",
"0.6386676",
"0.6377481",
"0.6349557",
"0.6346742",
"0.63460004",
"0.6344086",
"0.6335169",
"0.6331056",
"0.63275397",
"0.6322753"
] | 0.6721325 | 1 |
Assert a file contains the given text. | def assert_text(self, path, contents):
assert isinstance(contents, text_type)
data = self.fs.gettext(path)
self.assertEqual(data, contents)
self.assertIsInstance(data, text_type) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_in_file(file_path, text):\n with open(file_path, 'r') as f:\n content = f.read()\n return text in content",
"def assertFile(self, root_path, path, expected_content):\n full_path = os.path.join(root_path, path)\n self.assertFilePresent(root_path, path)\n with open(full_path) as f:\n content = f.read()\n self.assertEqual(content, expected_content)",
"def assertText(self,content,expected_text,description=\"\"): \n self.assertTrue(expected_text in content,\n \"expected to find '{0}' but found '{1}' instead.\\\n Attemted action: {2}\".format(expected_text, \n content,\n description))",
"def test_text_file_by_path(self):\n self.command.package = self.input_ovf\n self.command.file_path = \"sample_cfg.txt\"\n self.command.run()\n self.command.finished()\n self.check_diff(\"\"\"\n <ovf:File ovf:href=\"input.iso\" ovf:id=\"file2\" ovf:size=\"{iso_size}\" />\n- <ovf:File ovf:href=\"sample_cfg.txt\" ovf:id=\"textfile\" \\\novf:size=\"{cfg_size}\" />\n </ovf:References>\n\"\"\".format(iso_size=self.FILE_SIZE['input.iso'],\n cfg_size=self.FILE_SIZE['sample_cfg.txt']))\n self.assertFalse(os.path.exists(os.path.join(self.temp_dir,\n \"sample_cfg.txt\")),\n \"deleted file should not be exported\")",
"def test_read_file():\n filename = 'sample'\n assert read_file(filename) == 'hello!\\n'",
"def check_contain(file_content, check_text):\n for line in file_content:\n if check_text in line:\n return True\n return False",
"def test_text_file_by_id(self):\n self.command.package = self.input_ovf\n self.command.file_id = \"textfile\"\n self.command.run()\n self.command.finished()\n self.check_diff(\"\"\"\n <ovf:File ovf:href=\"input.iso\" ovf:id=\"file2\" ovf:size=\"{iso_size}\" />\n- <ovf:File ovf:href=\"sample_cfg.txt\" ovf:id=\"textfile\" \\\novf:size=\"{cfg_size}\" />\n </ovf:References>\n\"\"\".format(iso_size=self.FILE_SIZE['input.iso'],\n cfg_size=self.FILE_SIZE['sample_cfg.txt']))\n self.assertFalse(os.path.exists(os.path.join(self.temp_dir,\n \"sample_cfg.txt\")),\n \"deleted file should not be exported\")",
"def test_file(self, file: CollectedFile):\n\n return file.filename[-3:].upper() == 'TXT'",
"def assertContains(self, response, text):\n self.assertIn(text, response.data.decode('utf-8'))",
"def assert_has_text(self, xml_root, xpath, text, exact=True):\r\n element_list = xml_root.xpath(xpath)\r\n self.assertTrue(len(element_list) > 0,\r\n \"Could not find element at '%s'\" % str(xpath))\r\n\r\n if exact:\r\n self.assertEqual(text, element_list[0].text)\r\n else:\r\n self.assertIn(text, element_list[0].text)",
"def assert_file_equals(self, actual, fn):\n with open(fn, 'r') as expected:\n self.assertEquals(\n manage.to_unicode(expected.read()).split('\\n'),\n self.filter_log(actual.split('\\n')))",
"def test_file(self):\n a = False\n if \"show()\" in open('attempt.py').read():\n a = True\n self.assertEquals(a,True)",
"def test_add_text_str(self):\n with DataArchive(self.user, DATA_DOWNLOADS_WORKING_DIR) as archive:\n data_to_write = \"Hello, łorld!\"\n file_path = archive.add_text('testfile', data_to_write)\n self.assertTrue(os.path.isfile(file_path))\n\n valid_output_path = os.path.join(archive.data_dir_path, 'testfile.txt')\n self.assertEqual(file_path, valid_output_path)\n\n with open(file_path, 'r') as fp:\n saved_data = fp.read().strip()\n self.assertEqual(saved_data, data_to_write)",
"def test_files(host, f):\n assert host.file(f).exists",
"def test_file_read():\n expected = [\"scorevideo LOG\\n\", \"File: log.mat\"]\n with open(TEST_RES + \"/file_read.txt\", 'r') as file:\n actual = file.readlines()\n assert expected == actual",
"def assert_text_present(self, text, msg=None):\r\n e = driver.find_element_by_tag_name('body')\r\n assert text in e.text",
"def test_add_text_int(self):\n with DataArchive(self.user, DATA_DOWNLOADS_WORKING_DIR) as archive:\n data_to_write = 1234\n file_path = archive.add_text('testfile', data_to_write)\n self.assertTrue(os.path.isfile(file_path))\n\n valid_output_path = os.path.join(archive.data_dir_path, 'testfile.txt')\n self.assertEqual(file_path, valid_output_path)\n\n with open(file_path, 'r') as fp:\n saved_data = fp.read().strip()\n self.assertEqual(saved_data, str(data_to_write))",
"def verify_text_present(self, text, msg=None):\r\n try:\r\n self.assert_text_present(text, msg)\r\n except AssertionError, e:\r\n if msg:\r\n m = \"%s:\\n%s\" % (msg, str(e))\r\n else:\r\n m = str(e)\r\n self.verification_erorrs.append(m)",
"def assertFilePresent(self, root_path, path):\n full_path = os.path.join(root_path, path)\n self.assertTrue(os.path.exists(full_path))",
"def is_text_file(file_path):\n expanded_path = os.path.abspath(os.path.expanduser(file_path.strip()))\n valid_text_file = False\n\n # Check if the file is ASCII\n try:\n with codecs.open(expanded_path, encoding='ascii', errors='strict') as f:\n # Make sure the file has at least one line of text\n # noinspection PyUnusedLocal\n if sum(1 for line in f) > 0:\n valid_text_file = True\n except IOError:\n pass\n except UnicodeDecodeError:\n # The file is not ASCII. Check if it is UTF-8.\n try:\n with codecs.open(expanded_path, encoding='utf-8', errors='strict') as f:\n # Make sure the file has at least one line of text\n # noinspection PyUnusedLocal\n if sum(1 for line in f) > 0:\n valid_text_file = True\n except IOError:\n pass\n except UnicodeDecodeError:\n # Not UTF-8\n pass\n\n return valid_text_file",
"def test_no_such_file(self):\n with self.assertRaises(IOError):\n analyze_text('foobar')",
"def test_no_such_file(self):\n with self.assertRaises(IOError):\n analyze_text('foobar')",
"def test_no_such_file(self):\n with self.assertRaises(IOError):\n analyze_text('foobar')",
"def file_contents_is_equal(file_path, expected_file_data):\r\n if not os.path.exists(file_path):\r\n raise Exception('File does not exist:' + file_path)\r\n\r\n with open(file_path, 'r', encoding='utf-8') as file_handle:\r\n file_data = file_handle.read()\r\n if expected_file_data != file_data:\r\n raise Exception('{} \\n\\n!= \\n\\n{}'.format(expected_file_data, file_data))",
"def test_read_file():\n assert read_file('test_read_file.txt') == \"\"\"ABCDEFGHIJKLMNOPQRSTUVWXYZ?\nabcdefghijklmnopqrstuvwxyz.\n\"\"\"",
"def test_no_such_file(self):\n\t\twith self.assertRaises(IOError):\n\t\t\tanalyse_text('foobar')",
"def search_text_in_log_file(self, text) :\n try:\n with open(self.file_path_name, 'r') as searchfile:\n for line in searchfile:\n if text in line:\n return True \n return False \n except: \n print 'The log : ' + self.file_path_name + 'cannot be opened'",
"def test_file(self):\n browser = self.layer.get_web_browser(smi_settings)\n\n image = test_filename('test.txt')\n browser.login(self.username, self.username)\n self.assertEqual(browser.open('/root/edit'), 200)\n browser.macros.create(\n 'Silva File', id='file', title='Text File', file=image)\n self.assertEqual(\n browser.inspect.folder_listing, ['index', 'file'])\n\n # The user should by the last author on the content and container.\n self.assertEqual(\n self.root.sec_get_last_author_info().userid(),\n self.username)\n self.assertEqual(\n self.root.file.sec_get_last_author_info().userid(),\n self.username)\n\n # Visit the edit page\n self.assertEqual(\n browser.inspect.folder_listing['file'].click(),\n 200)\n self.assertEqual(browser.url, '/root/file/edit/tab_edit')\n self.assertEqual(browser.inspect.breadcrumbs, ['root', 'Text File'])\n browser.inspect.breadcrumbs['root'].click()\n browser.macros.delete('file')",
"def test_read_from_file():\n from scraper import read_from_file\n assert read_from_file(TEST_FILE) == (TEST_CONTENT, 'utf-8')",
"def verifyFileExists(self, fileDir, fileName):\n # check that file exists\n fpath = fileDir.child(fileName)\n self.assertTrue(fpath.exists())\n\n # check that the output files have some content\n fcontents = fpath.getContent()\n self.assertTrue(len(fcontents) > 0)\n\n # check that the html files are at least html-ish\n # this is not a terribly rigorous check\n if fpath.path.endswith(\".html\"):\n self.assertIn(b\"<body\", fcontents)"
] | [
"0.7260009",
"0.6971656",
"0.68093544",
"0.67896724",
"0.66795635",
"0.6662553",
"0.65673757",
"0.64777285",
"0.6463219",
"0.6451605",
"0.6445072",
"0.63984907",
"0.63611096",
"0.6255968",
"0.6254223",
"0.62036115",
"0.62024397",
"0.61698884",
"0.61658216",
"0.61540717",
"0.6141419",
"0.6141419",
"0.60844994",
"0.6078231",
"0.60748017",
"0.60454994",
"0.60364175",
"0.60033",
"0.60005146",
"0.5991685"
] | 0.7273277 | 0 |
Check an unknown purpose raises a NoURL error. | def test_geturl_purpose(self):
self.fs.create('foo')
with self.assertRaises(errors.NoURL):
self.fs.geturl('foo', '__nosuchpurpose__') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def url_is_good(url):\n return website_re.match(url)\n # possible validation of reachability of website\n # http_response = requests.get(url)\n # return http_response < 400:",
"def check_url(url: str) -> bool:\n try:\n potential_error = driver.find_element_by_xpath(\"/html/body/div[5]/div/div/div[1]/div/div/div/section/div[2]/div\").text\n if '403' in potential_error:\n return True\n except:\n return False",
"def url_exists(url):\n # Check for URLs we can't validate\n if url.startswith(\"https://kiwiirc.com\"):\n return True\n if url.startswith(\"https://www.projectcalico.org\"):\n return True\n\n try:\n urllib2.urlopen(url)\n return True\n except urllib2.HTTPError, e:\n print_bullet(\"Hit error reading %s: %s\" % (url, e))\n return False\n except urllib2.URLError, e:\n print_bullet(\"Hit error reading %s: %s\" % (url, e))\n return False",
"def test_missing_shortlink(self):\n rv = self.app.get('/TheStakeOut')\n assert 'No url found' in rv.data",
"def url_checker(url_str):\n file_msg = fd.Program_Msg(__file__)\n ## Checking input parameters\n if not (isinstance(url_str, str)):\n msg = '{0} `url_str` ({1}) is not a STRING!'.format(file_msg,\n type(url_str))\n raise LSSUtils_Error(msg)\n ##\n ## Checking Website\n request_url = requests.get(url_str)\n if (request_url.status_code != 200):\n msg = '{0} `url_str` ({1}) does not exist!'.format(file_msg, url_str)\n raise LSSUtils_Error(msg)",
"def url_exists(url):\n\n try:\n connection = urlopen(url)\n return connection.getcode() < 400\n except Exception as e:\n return False",
"def check_url(self):\n\n base = 'https://www.reformagkh.ru/myhouse/profile/view/'\n\n if base not in self.url:\n raise UrlError('It is not an www.reformagkh.ru link. '\n 'Please try the correct link.')",
"def url_check(url):\n try:\n request = urllib.request.Request(url)\n request.get_method = lambda: 'HEAD'\n urllib.request.urlopen(request)\n return True\n \n except ValueError:\n return False\n\n except urllib.request.HTTPError:\n return False\n \n except URLError:\n return False",
"def url_checker(url):\n if url.startswith(http_req):\n url_name = url[7:]\n # print('URL check passed. Using http')\n return url_name\n if url.startswith(https_req):\n url_name = url[8:]\n # print('URL check passed. Using https')\n return url_name\n else:\n print('URL check failed. not valid http or https URL')\n print(f'Bad URL:{url}')\n sys.exit()\n # return False",
"def test_url(quartus, part, url):\n print(\"\\rChecking %s/%s \" % (quartus, part), end='')\n try:\n response = urllib.request.urlopen(url)\n headers = response.getheaders()\n return True\n except KeyboardInterrupt:\n sys.exit(1)\n except:\n return False",
"def check_url(url=None, parse_url=None):\n \n if not parse_url:\n parse_url = urlparse.urlparse(url)\n \n invalid_paths = ['^\\/?$', '^\\/(stream|explore|groups|upload|you|dashboard|messages|settings|creativecommons|tracks|people)(\\/|$)']\n \n return parse_url.netloc in ['soundcloud.com', 'www.soundcloud.com', 'm.soundcloud.com']\\\n and not any(re.search(invalid_path, parse_url.path) for invalid_path in invalid_paths)",
"def check_url_and_raise_errors(url: str) -> None:\n if not url:\n raise_error(\"Url can not be empty\", 400)\n\n try:\n URL_REGEX.match(url).span()[1] - URL_REGEX.match(url).span()[0] == len(url)\n except AttributeError:\n raise_error(\"Url should be valid\", 400)",
"def check_url(url):\n return get_svninfo(url) != {}",
"def url_check(url):\n \n url_tuple = urlparse.urlparse(url)\n if url_tuple[0] == 'http' or url_tuple[0] == 'https' and url_tuple[1] != \"\":\n return url\n else:\n raise Exception('bad url')",
"def not_supported(cls, website):\n if website in cls.urls.keys():\n return False\n else:\n return True",
"def shouldSkipUrl(self, url, data):\n return data.xpath('//img[contains(@src, \"content-error-missing\")]')",
"def get_check_url(self,url):\n r = requests.get(url).status_code\n if r==requests.codes.ok:\n return(True)\n else:\n print \"something wrong! status_code: \" + r\n return(False)",
"def test_not_found(self):\n self._error_test(fitbit_exceptions.HTTPNotFound)",
"def _verify_url_exists(url, use_head=False):\n # (str, bool) -> bool\n try:\n if use_head:\n resp = requests.head(url)\n else:\n resp = requests.get(url)\n except requests.exceptions.ConnectionError:\n return False\n\n return resp.status_code in [200, 302]",
"def isUrlValid(self, url):\n if url is None:\n return False\n elif url.startswith('//'):\n return False\n elif ':' in url:\n return False\n elif url.startswith('/wiki'):\n return True\n elif 'en.wikipedia.org/wiki/' not in url:\n return False\n return True",
"def test_liking_non_existent_comment(self):\n self.non_existing(self.like_url(3))",
"def test_is_url(self):\n\n url = \"https://shadowrun.needs.management\"\n self.assertTrue(run(verification.is_url(url)))\n\n url = \"https:// www.google.com\"\n self.assertFalse(run(verification.is_url(url)))",
"def check_url_invalidity(self) -> bool:\n validate = URLValidator()\n try:\n validate(self.args.url)\n return False\n except ValidationError:\n return True",
"def test_404_url(self):\r\n url = 'http://lococast.net/archives/001'\r\n read = ReadUrl.parse(url)\r\n\r\n self.assertTrue(\r\n read.status == 404, \"The status is 404: \" + str(read.status))\r\n self.assertTrue(\r\n not read.is_image(), \"The content is not an image\")\r\n self.assertTrue(\r\n read.content is None, \"Content should be none\")",
"def test_url():\r\n global provided_url\r\n global verbose_flag\r\n # extracting url\r\n provided_url = urlparse(provided_url).scheme+\"://\"+urlparse(provided_url).netloc\r\n print provided_url \r\n if verbose_flag: print \"\\t[.] Checking if connection can be established...\",# + provided_url\r\n try:\r\n response = urllib2.urlopen(provided_url)\r\n \r\n except HTTPError, e:\r\n if verbose_flag: print \"[!] Failed\"\r\n return 0\r\n except URLError, e:\r\n if verbose_flag: print \"[!] Failed\"\r\n return 0\r\n else:\r\n valid_target = 1\r\n if verbose_flag: print \"Success\"\r\n return 1",
"def check_url(url_link):\n res = requests.get(url_link, allow_redirects =True)\n if res.status_code == 200:\n print('valid URL \\n')\n return url_link\n else:\n print('Oupps there is something wrong with your URL. Run the program again!! ')\n return res.status_code",
"def check_url(url=None, parse_url=None):\n return False",
"def bad_url_check_account(self, google_ads_account_id):\n pass",
"def is_valid_url(url: str) -> bool:\n try:\n requests.get(url)\n except requests.exceptions.RequestException:\n return False\n return True",
"def validate_url(url: str) -> None:\n if not is_valid_url(url):\n raise ValueError(f\"Validation Error. Provided url '{url}' is not valid.\")\n try:\n response = requests.get(url)\n except Exception as e:\n raise ValueError(f\"Validation Error. '{url}' website doesn't exists.\")\n else:\n if response.status_code != status.HTTP_200_OK:\n raise ValueError(f\"Validation Error. '{url}' website doesn't exists.\")"
] | [
"0.65323013",
"0.64228904",
"0.6372937",
"0.63698536",
"0.63243103",
"0.6290938",
"0.628491",
"0.6269983",
"0.62446475",
"0.62388295",
"0.6238438",
"0.6221981",
"0.62093294",
"0.618238",
"0.61809975",
"0.6106003",
"0.60834146",
"0.60717535",
"0.6054798",
"0.6054236",
"0.60482913",
"0.60454357",
"0.60438085",
"0.6014326",
"0.6009143",
"0.599694",
"0.5993672",
"0.59871364",
"0.598617",
"0.59841096"
] | 0.6870858 | 0 |
Make sure the version in the TOML file and in the __init__.py file are the same. | def test_version():
with open("pyproject.toml") as f:
tomllines = f.read().splitlines()
tomlversion = set([l for l in tomllines if "version =" in l])
initversion = set([f'version = "{mei2volpiano.__version__}"'])
# set is there to catch any duplicate/additional entries
assert initversion == tomlversion | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_version(self) -> None:\n with open(\"pyproject.toml\") as f:\n for line in f:\n if \"version\" in line:\n version = line.split()[-1].replace('\"', \"\")\n break\n self.assertEqual(__version__, version)",
"def test_version():\n assert(hasattr(tekel, '__version__'))",
"def version(self):",
"def importVersion():\n\n try:\n # Attempt to load the template first. It only exists in a working copy cloned via git.\n import version_template\n except ImportError:\n # If loading the template fails we must be in a unpacked source distribution and\n # src/toil/version.py will already exist.\n pass\n else:\n # Use the template to generate src/toil/version.py\n import os\n import errno\n from tempfile import NamedTemporaryFile\n\n new = version_template.expand_()\n\n print(new, sys.stderr)\n \n try:\n with open('bdgenomics/workflows/version.py') as f:\n old = f.read()\n except IOError as e:\n if e.errno == errno.ENOENT:\n old = None\n else:\n raise\n\n if old != new:\n with NamedTemporaryFile(dir='bdgenomics/workflows', prefix='version.py.', delete=False) as f:\n f.write(new)\n os.rename(f.name, 'bdgenomics/workflows/version.py')\n\n import bdgenomics.workflows.version\n return bdgenomics.workflows.version",
"def test_module_version_matches_pyproject_version():\n version_from_package_init = __version__\n\n # this is so that the test finds the pyproject.toml file when run from the command line or from within Pycharm\n this_directory = os.path.dirname(os.path.realpath(__file__))\n pyproject_toml_path = os.path.join(this_directory, \"..\", \"pyproject.toml\")\n\n with open(pyproject_toml_path) as pyproject_file:\n pyproject_contents = pyproject_file.read()\n\n pyproject_meta_data = tomlkit.parse(pyproject_contents)[\"tool\"][\"poetry\"]\n version_from_pyproject = pyproject_meta_data[\"version\"]\n\n assert version_from_package_init == version_from_pyproject",
"def run(self):\n\n version_str = (\n get_git_version(here))\n\n version_uniparser_dict = (\n get_uniparser_version())\n\n if (version_str is not None or\n version_uniparser_dict is not None):\n\n with open(\n os.path.join(here, 'lingvodoc', 'version.py'), 'w',\n encoding = 'utf-8') as version_py_file:\n\n version_py_file.write(\n self.version_py_template.format(\n repr(version_str),\n repr(version_uniparser_dict)))\n\n # Continuing with setup.\n\n super().run()",
"def test_version(self):\n pass",
"def create_update_pyproject_toml(self) -> None:\n if (self.toml_path).exists():\n # do not overwrite the version of a pre-existing file\n _pyproject = self.pyproject\n assert _pyproject is not None\n # clear out the packages section\n _pyproject[\"tool\"][\"poetry\"][\"packages\"] = []\n # update the dependencies section by readin that from the template file\n with open(CONFIG.template_path / \"pyproject.toml\", \"rb\") as f:\n tpl = tomllib.load(f)\n\n _pyproject[\"tool\"][\"poetry\"][\"dependencies\"] = tpl[\"tool\"][\"poetry\"][\"dependencies\"]\n\n else:\n # read the template pyproject.toml file from the template folder\n try:\n with open(CONFIG.template_path / \"pyproject.toml\", \"rb\") as f:\n _pyproject = tomllib.load(f)\n _pyproject[\"tool\"][\"poetry\"][\"version\"] = self.mpy_version\n except FileNotFoundError as e:\n log.error(f\"Could not find template pyproject.toml file {e}\")\n raise (e)\n\n # update the name , version and description of the package\n _pyproject[\"tool\"][\"poetry\"][\"name\"] = self.package_name\n _pyproject[\"tool\"][\"poetry\"][\"description\"] = self.description\n # write out the pyproject.toml file\n self.pyproject = _pyproject",
"def _get_version(self):",
"def test_version(self):\n version_instance = get_version('kolibri', __file__)\n self.assertIn(version_instance.major_version, kolibri.__version__)",
"def test_semantic_version():\n semantic_version.Version(settings.VERSION)",
"def test_version_exists():\n assert ztm.__version__",
"def test_version_string_consistency_pyproject_toml():\n\n repository_root = os.path.join(os.path.dirname(__file__), '..')\n fixture = os.path.join(repository_root, \"pyproject.toml\")\n\n with open(fixture, \"r\", encoding=\"utf-8\") as f:\n contents = f.read()\n\n match = re.search(r\"^version = (?P<semver>.*)$\", contents, re.MULTILINE)\n actual_version = match[\"semver\"].strip('\"')\n\n assert expected_version == actual_version, \"Expected version string used in pyproject.toml to be consistent with\" \\\n \" that in matchms.__version__\"",
"def version(self):\n pass",
"def version(self):\n pass",
"def version(self):\n pass",
"def Version(self) -> _n_0_t_12:",
"def Version(self) -> _n_0_t_12:",
"def load_toml_versions(toml_file: Path) -> Tuple[_TOMLDocument, _TOMLDocument]:\n\n def load(lines: Sequence[str]) -> _TOMLDocument: # noqa\n return tomlkit.loads(\"\".join(lines))\n\n with toml_file.open() as fp:\n ours, theirs = parser.parse(fp)\n return load(ours), load(theirs)",
"def set_version(self, version):\n\n def update_version(version, filepath):\n with open(filepath, \"r\") as stream:\n contents = stream.read()\n\n new_contents = _fix_contents_version(contents, version)\n assert contents != new_contents\n with open(filepath, \"w\") as stream:\n stream.write(new_contents)\n\n update_version(version, os.path.join(\".\", \"package.json\"))\n update_version(version, os.path.join(\".\", \"src\", \"setup.py\"))\n update_version(\n version, os.path.join(\".\", \"src\", \"robocorp_code\", \"__init__.py\")\n )",
"def test_version():\n assert __version__ == \"0.1.0\"",
"def set_toVersion(self):\n if not self.data.get('toVersion') or LooseVersion(self.data.get('toVersion', '99.99.99')) >= TO_VERSION_5_9_9:\n if self.verbose:\n click.echo('Setting toVersion field')\n self.data['toVersion'] = TO_VERSION_5_9_9",
"def version(self): # -> string\n try:\n return gtts.version.__version__\n except (AttributeError, NameError):\n self.ok = False\n return \"\"",
"def test_patch_namespaced_template(self):\n pass",
"def test_tuple(self):\n temp = self.mktemp()\n os.makedirs(temp)\n os.makedirs(os.path.join(temp, \"mytestproja\"))\n\n with open(os.path.join(temp, \"mytestproja\", \"__init__.py\"), \"w\") as f:\n f.write(\"__version__ = (1, 3, 12)\")\n\n version = get_version(temp, \"mytestproja\")\n self.assertEqual(version, \"1.3.12\")",
"def import_(self, version):\n #nuke.nodePaste(version.absolute_full_path)\n return True",
"def _check_version () -> None:\n py_version_info: typing.Tuple = sys.version_info[:2]\n\n if py_version_info < MIN_PY_VERSION:\n error_msg = \"This version of pytextrank requires Python {} or later ({} detected)\\n\"\n raise RuntimeError(error_msg.format(_versify(MIN_PY_VERSION), _versify(py_version_info)))",
"def addon_tautulli(self):\n print(\"Checking Tautulli version\")\n repo = self.github.get_repo('Tautulli/Tautulli')\n releases = list(repo.get_releases())\n index = 0\n while True:\n remote_version = releases[index].tag_name\n if 'b' in remote_version:\n index = index + 1\n else:\n break\n file = \"{}/Dockerfile\".format(self.name)\n remote_file = self.get_file_obj(file)\n masterfile = self.repoupdater.get_file_content(remote_file)\n file_version = masterfile.split('ENV TAUTULLI_VERSION ')[1]\n file_version = file_version.split('\\n')[0]\n file_version = file_version.replace(\"'\", \"\")\n if self.verbose:\n print(\"Current version\", file_version)\n print(\"Available version\", remote_version)\n if remote_version != file_version:\n msg = COMMIT_MSG.format('Tautulli', remote_version)\n new_content = self.repoupdater.get_file_content(remote_file)\n new_content = new_content.replace(file_version, remote_version)\n self.repoupdater.commit(file, msg, new_content, remote_file.sha)\n else:\n print(\"Tautulli already have the newest version\", file_version)",
"def version():\n\n pass",
"def test_version():\n assert __version__"
] | [
"0.63930917",
"0.62459564",
"0.5921642",
"0.587334",
"0.58482426",
"0.5747833",
"0.5688836",
"0.5572761",
"0.55282295",
"0.5525575",
"0.5509409",
"0.54920894",
"0.54500955",
"0.54486805",
"0.54486805",
"0.54486805",
"0.54453105",
"0.54453105",
"0.5420287",
"0.5393781",
"0.53555614",
"0.5347819",
"0.5323029",
"0.5310032",
"0.5299105",
"0.52624196",
"0.5259135",
"0.5254389",
"0.52223563",
"0.5210343"
] | 0.67833966 | 0 |
Find distances from start vertex to all vertices | def distances_bfs(self, start):
from queue import deque
assert start in self.graph
distance = {vertex: None for vertex in self.vertices()}
distance[start] = 0
queue = deque()
queue.append(start)
while queue:
current_vertex = queue.pop()
for neighbour in self.neighbours(current_vertex):
if distance[neighbour] is None:
queue.append(neighbour)
distance[neighbour] = distance[current_vertex] + 1
return distance | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def distances(self):",
"def one_to_all_bfs(start, num_vertexes, edges, INF=9223372036854775807):\n distances = [INF] * num_vertexes\n distances[start] = 0\n to_visit = [start]\n while to_visit:\n next_visit = []\n for frm in to_visit:\n for to in edges[frm]:\n new_cost = distances[frm] + 1\n if new_cost < distances[to]:\n distances[to] = new_cost\n next_visit.append(to)\n to_visit = next_visit\n return distances",
"def __dikjstra(self, start_node):\n visited = []\n unvisited = [x for x in self.__node]\n shortest_dist_from_start_node = 0\n current_node = start_node\n\n current_node.setShortestDist(shortest_dist_from_start_node)\n\n while current_node:\n #check unvisited neighbor\n for neighbor_node, distance in current_node.getNeighbors().items():\n #print(neighbor_node.getId(), distance) troubleshoot je ni\n if neighbor_node in visited:\n continue\n\n #add up shortest_dist_from_start_node with distance from neighbor distance\n calc_dist = shortest_dist_from_start_node + distance\n\n if calc_dist < neighbor_node.getShortestDist():\n neighbor_node.setShortestDist(calc_dist)\n neighbor_node.setPrevNode(current_node)\n\n # add current node to visited array\n visited.append(current_node)\n unvisited.remove(current_node)\n \n #update next node and next shortest distance\n next_shortest_dist_from_start_node = inf\n next_node = None\n\n for unvisited_node in unvisited:\n if unvisited_node.getShortestDist() < next_shortest_dist_from_start_node:\n next_shortest_dist_from_start_node = unvisited_node.getShortestDist()\n next_node = unvisited_node\n\n # update current node and shortest distance from start vertex\n if next_node:\n current_node = next_node\n shortest_dist_from_start_node = next_shortest_dist_from_start_node\n #if there are left over unvisited node\n else: \n if unvisited:\n current_node = unvisited[0]\n else:\n current_node = None",
"def single_dijkstra(graph, start, edge_weight_name):\r\n distances = []\r\n for x in start:\r\n try:\r\n value_set = nx.single_source_dijkstra_path_length(graph, source=x, weight=edge_weight_name)\r\n except nx.NetworkXNoPath:\r\n pass\r\n for key in value_set:\r\n\r\n distances.append([x,key,value_set[key]])\r\n return distances",
"def flat_distances_to(self, pt):\n A = np.array(self.vertices)\n P = np.tile(np.array(pt.vertex), (A.shape[0], 1))\n d = np.sqrt(np.sum((A-P)**2, 1))\n return d",
"def dijkstra(self, start, end):\n unvisited = self.nodes()\n distance = {}\n previous = {}\n for node in unvisited:\n distance[node] = sys.maxsize\n distance[start] = 0\n while len(unvisited) > 0:\n node = unvisited[0]\n smallest_curr = sys.maxsize\n for d in distance:\n if d in unvisited and distance[d] < smallest_curr:\n node = d\n smallest_curr = distance[d]\n unvisited.remove(node)\n for neighbor in self.neighbors(node).keys():\n alt_path = distance[node] + self.weight(node, neighbor)\n if alt_path < distance[neighbor]:\n distance[neighbor] = alt_path\n previous[neighbor] = node\n result = []\n result.append(end)\n curr = end\n while curr in previous:\n result.append(previous[curr])\n curr = previous[curr]\n return result",
"def shortest_distance(self, begin, end):\n\n begin_index = self._cell_indexes[begin]\n end_index = self._cell_indexes[end]\n\n distance = self._distance_mat[begin_index, end_index]\n # distance *= pq.meter\n\n path = [begin]\n inv_index = {v: k for k, v in self._cell_indexes.items()}\n while True:\n next_index = self._preds[end_index, begin_index]\n if next_index == -9999:\n break\n\n begin_index = next_index\n\n seg = inv_index[next_index]\n path.append(seg)\n\n return distance, path",
"def dijkstra(self, start, end):\n distance = {}\n path_weights = {start: (None, 0)}\n for key in self:\n distance[key] = float('inf')\n distance[start] = 0\n while distance:\n current = min(distance, key=distance.get)\n for neighbor in self[current]:\n temp_dist = distance[current] + self[current][neighbor]\n if neighbor in distance and temp_dist < distance[neighbor]:\n distance[neighbor] = temp_dist\n path_weights[neighbor] = (current, temp_dist)\n del distance[current]\n path = []\n prev = end\n while prev is not None:\n path.append(prev)\n prev = path_weights[prev][0]\n return list(reversed(path))",
"def dijkstra(start, vertex_list, line_list, vertex_labels, polygons):\n # create stack Q with all vertices including the arbitrary starting point\n Q = {**vertex_labels}\n Q[0] = start\n vertex_labels_with_start = {**Q}\n dist = {}\n prev = {}\n for key, val in Q.items():\n dist[key] = 1e10\n prev[key] = None\n # start has zero distance to itself\n dist[0] = 0\n while Q:\n min_ = 1e10\n curr_vertex = None\n # simulates priority queue (min heap) with for loop\n for v in Q.keys():\n if dist[v] < min_:\n curr_vertex = v\n min_ = dist[v]\n # curr_vertex = min(dist, key=dist.get)\n if curr_vertex is None:\n print(\"Target cannot be reached!\")\n break\n Q.pop(curr_vertex)\n invalid_point = False\n for poly in polygons:\n if inside_polygon(vertex_labels_with_start[curr_vertex], poly):\n invalid_point = True\n break\n if invalid_point:\n continue\n if curr_vertex == len(vertex_list):\n break\n _, vis_labels = visibility_graph(vertex_labels_with_start[curr_vertex], vertex_list, line_list)\n # Just implement dijkstra - need a way to mark vertices with labels\n for elem in vis_labels:\n if elem in Q:\n alt = dist[curr_vertex] + np.sqrt(len2((diff_(vertex_labels_with_start[curr_vertex],\n vertex_labels_with_start[elem]))))\n if alt < dist[elem]:\n dist[elem] = alt\n prev[elem] = curr_vertex\n return dist, prev",
"def test_distances(self):\n\n cent_1 = np.array([0.5, 0.5])\n verts_1 = np.array([[0., 1.], [0., 0.], [1., 0.], [1., 1.]])\n cent_2 = cent_1 - 0.5\n verts_2 = verts_1 - np.array([0.5, 0.5])\n\n # Compare the center-vertex distances between point sets with rigidly shifted coordinates\n self.assertTrue(all(po.cvdist(verts_1, cent_1) == po.cvdist(verts_2, cent_2)))\n # Compare the vertex-vertex distances between point sets with rigidly shifted coordinates\n self.assertTrue(all(po.vvdist(verts_1) == po.vvdist(verts_2)))",
"def length(self):\n points = [Point(v, crs=self.crs) for v in self.vertices]\n distances = [a.distance(b) for a, b in zip(points[:-1], points[1:])]\n return sum(distances)",
"def distance_of_x_to_y(start_node, end_node):\n\n # adding the base router's d_vec to n_d_vec\n # will simplify the problem and we can then\n # remove the first conditional statement\n\n # when a router comes back to its parent\n # its distance will ultimately increase\n # and hence that branch will be ignored\n\n global DATA\n\n # neighbor ids will now change for every router\n all_neighbor_ids = [\n neighbor[0]\n for neighbor in DATA[\"n_d_vec\"][start_node]\n ]\n # if start_node is not present at that moment\n # in n_d-vec then an exception (Type Error)\n # will be raised after which we need to\n # return math.inf\n try:\n if end_node in all_neighbor_ids:\n return [\n every_neighbor[1]\n for every_neighbor in DATA[\"n_d_vec\"][start_node]\n if end_node is every_neighbor[0]\n ][0]\n else:\n # we need to handle going back\n # we can pass an initial router\n # from which the algorithm has\n # started and hence we can avoid\n # going back\n\n # we may do some memoization here\n # and hence don't do reevaluation every time\n return min(\n [\n distance_of_x_to_y(start_node, neighbor) +\n distance_of_x_to_y(neighbor, end_node)\n for neighbor in all_neighbor_ids\n ]\n )\n except TypeError as node_err:\n with PRINT_LOCK:\n print(\"the start node is node is not present\\\n at this moment in the n_d_vec \\n{}\"\\\n .format(node_err)\\\n )\n return math.inf",
"def vertex_distance(self, v1, v2):\n return utils.real_distance(self.node_locations[v1], self.node_locations[v2])",
"def get_distances(self, crds):\n self.all_dist = np.zeros((self.natom, self.natom))\n # Loop over upper triangle of atom pairs\n for iat in range(self.natom-1):\n # Get the atom indices\n at_inds = np.arange(len(crds))\n\n # Calc distances between atoms (only upper triangle though)\n at_msk = at_inds > iat\n all_ut_dist = crds[at_msk] - crds[iat]\n all_ut_dist = np.linalg.norm(all_ut_dist, axis=1)\n\n self.all_dist[iat, iat+1:] = all_ut_dist\n\n # Get lower triangle indices\n self.all_dist = self.all_dist + self.all_dist.T",
"def get_dist_cost(data, start_node_id, end_node_id):\n p1 = get_coords(data, start_node_id)\n p2 = get_coords(data, end_node_id)\n return great_circle_distance(p1, p2)",
"def getStartVertex(self):",
"def dijkstra(self,start):\n path_weight = {i : float('inf') for i in range(self.n)}\n path_weight[start] = 0\n previous = {i : float('nan') for i in range(self.n)}\n remaining = PriorityQueue()\n for node,priority in path_weight.items():\n remaining.put((priority,node))\n\n while not remaining.empty():\n priority,node = remaining.get()\n for tgt,weight in self.edges[node].items():\n possibleNewWeight = path_weight[node] + weight\n if (possibleNewWeight < path_weight[tgt]):\n path_weight[tgt] = possibleNewWeight\n previous[tgt] = node\n \n return path_weight, previous",
"def shortest_path_tree__bfs(self, start):\r\n from queue import deque\r\n\r\n assert start in self.graph\r\n\r\n distance = {vertex: None for vertex in self.vertices()}\r\n distance[start] = 0\r\n\r\n previous = {vertex: None for vertex in self.vertices()}\r\n\r\n queue = deque()\r\n queue.append(start)\r\n\r\n while queue:\r\n current_vertex = queue.pop()\r\n for neighbour in self.neighbours(current_vertex):\r\n if distance[neighbour] is None:\r\n queue.append(neighbour)\r\n distance[neighbour] = distance[current_vertex] + 1\r\n previous[neighbour] = current_vertex\r\n\r\n return previous",
"def calc_dist(self, neighboring_pos):\n vec = np.array([i[1] - i[0] for i in zip(self.pos, neighboring_pos)])\n dist = np.linalg.norm(vec)\n return vec, dist",
"def shortest_flight(self):\r\n distance = sys.maxsize\r\n for code, _list in self.edges.items():\r\n for edge in _list:\r\n if edge.distance < distance:\r\n distance = edge.distance\r\n start = edge.start\r\n destination = edge.destination\r\n return start, destination, distance",
"def dft(self, starting_vertex):\n # Create a s and push starting vertex\n ss = Stack()\n ss.push([starting_vertex])\n # Create a set of traversed vertices\n visited = []\n eldest = [] \n # While stack is not empty:\n while ss.size() > 0:\n # dequeue/pop the first vertex\n path = ss.pop()\n if path[-1] not in visited:\n # DO THE THING!!!!!!!\n # print(path[-1])\n # mark as visited\n visited.append(path[-1])\n print(visited)\n # enqueue all neightbors\n if not self.get_neighbors(path[-1]):\n if starting_vertex == path[-1]:\n return -1\n else:\n # print(\"eldest ancestor:\",path[-1])\n eldest.append(path[-1])\n\n for next_vert in self.get_neighbors(path[-1]):\n new_path = list(path)\n # print(new_path)\n new_path.append(next_vert)\n ss.push(new_path)\n \n return min(eldest)",
"def _get_distances(self):\n for molecule in self.values():\n molecule.get_distances()\n\n # for atom in self.atoms:\n # atom.get_distances()",
"def get_distances(self):\n N = len(self.cells) # Number of cells\n distances = np.zeros([N, N]) # distances between cells\n positions = self.position_matrix() # positions of cells \n \n # get distances between cells (exploit symmetry between upper and lower triangular form)\n for i, position in enumerate(positions[:-1, :]): # Iterate matrix except the last one\n directions = positions[i+1:, :] - position # direction from i to j > i\n distances[i, i+1:] = np.linalg.norm(directions, axis=1) # length of directions\n \n return distances + distances.T # Add lower triangle of matrix to upper ",
"def _calculate_distances(self):\n all_dists = []\n for ref in range(len(self.atoms)):\n if self.atoms[ref].symbol in self.exclude:\n continue\n indices = list(range(ref+1, len(self.atoms)))\n indices = self._filter_excluded(indices)\n if len(indices) == 0:\n continue\n dists = self.atoms.get_distances(ref, indices, mic=True)\n all_dists += list(dists)\n \n # Normalize by the mean distance\n return np.array(all_dists)/np.mean(all_dists)",
"def travelling_salesman(points, start=None):\n if start is None:\n start = points[0]\n return min([perm for perm in permutations(points) if perm[0] == start], key=total_distance)",
"def _create_neighbor_distances(self):\n # --------------------------------\n # Create Directions from Point\n # --------------------------------\n diff = [[0 for _ in range(self._dim)]]\n curr = diff[0][:]\n for i in range(self._dim):\n # Each diff is a unit vector, only having one value at +1 or -1 and all others at 0.\n curr[i] = 1\n diff.append(curr[:])\n curr[i] = -1\n diff.append(curr[:])\n curr[i] = 0\n # Remove initial blank unit vector with all values at 0.\n diff.pop(0)\n del curr\n\n # --------------------------------\n # Breadth First Search\n # --------------------------------\n distances = []\n queue = [[0 for _ in range(self._dim)]]\n\n while queue:\n # Get latest distance\n curr = queue.pop()\n\n # The distance from any possible point should be less than or equal to the number of dimensions.\n # This can be shown using basic calculations.\n if self._metric(np.array(curr), np.zeros(shape=(len(curr),))) >= 2 * np.sqrt(self._dim) or \\\n np.any(np.abs(np.array(curr)) > self._extent / 2) or curr in distances:\n continue\n\n # Calculate all distances from child and add to queue\n queue.extend([list(np.array(curr) + np.array(diff[i])) for i in range(len(diff))])\n\n # Add current distance to distances\n distances.append(curr)\n\n # Return all possible neighbor distances\n return np.array(distances, dtype=int)",
"def dijkstra(self, start, maxD=1e309):\n # total distance from origin\n tdist = defaultdict(lambda: 1e309)\n tdist[start] = 0\n # neighbour that is nearest to the origin\n preceding_node = {}\n unvisited = self.nodes\n\n while unvisited:\n current = unvisited.intersection(tdist.keys())\n if not current: break\n min_node = min(current, key=tdist.get)\n unvisited.remove(min_node)\n\n for neighbour in self.neighbours[min_node]:\n d = tdist[min_node] + self.dist[min_node, neighbour]\n if tdist[neighbour] > d and maxD >= d:\n tdist[neighbour] = d\n preceding_node[neighbour] = min_node\n\n return tdist, preceding_node",
"def pairwise_distance(self, start, end):\n x1 = start[0]\n y1 = start[1]\n x2 = end[0]\n y2 = end[1]\n pairwise_dist = sqrt(((x2 - x1) * 10.29) ** 2 + ((y2 - y1) * 7.55) ** 2)\n return pairwise_dist",
"def point_distances(self, params=None):\n if params is None:\n params = self.collocation_points()\n with self.fix_evaluator():\n pts = np.array([self(la) for la in params])\n deltas = np.diff(pts, axis=0)\n distances = norm(deltas, axis=1)\n return distances",
"def iter_dist(self):\n self.makeTree()\n coords = self.coords\n sd = selfdistance\n for i in self.loopindices:\n dists, inds = self.nntree.query(coords[i], self.nnmaxcount,\n distance_upper_bound=self.nncutoff)\n yield coords[i], dists.compress((dists > sd) & ~np.isinf(dists))"
] | [
"0.67085356",
"0.6332678",
"0.6297563",
"0.62604505",
"0.62509423",
"0.6222758",
"0.61351496",
"0.61228406",
"0.60961115",
"0.60891247",
"0.6052529",
"0.60063326",
"0.5931734",
"0.5913894",
"0.59011626",
"0.58787453",
"0.5878429",
"0.58402365",
"0.58323824",
"0.58128625",
"0.58016866",
"0.58002245",
"0.5790461",
"0.57716",
"0.5761991",
"0.575113",
"0.57484204",
"0.5748396",
"0.5734218",
"0.5718059"
] | 0.66886586 | 1 |
Return list of new mails | def get_new_mails(self):
if cint(self.settings.use_imap):
self.imap.select("Inbox")
if self.settings.no_remaining == '0' and self.settings.uidnext:
if self.settings.uidnext == self.settings.newuidnext:
return False
else:
#request all messages between last uidnext and new
return True
else:
response, message = self.imap.uid('search', None, "ALL")
email_list = message[0].split()
else:
email_list = self.pop.list()[1]
return email_list | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def action_create_mail_messages(self):\n self.check_recipients()\n self.check_message()\n messages = self.env['mail.message']\n for recipient in self.recipient_ids:\n messages |= recipient._create_mail_message()\n return messages",
"def fetchmail(self):\n mails = []\n\n if self.security == 'SSL/TLS':\n imap = IMAP4_SSL(self.host, self.port)\n else:\n imap = IMAP4(self.host, self.port)\n if self.security == 'STARTTLS':\n imap.starttls()\n imap.login(self.username, self.passwd)\n imap.select(readonly=True)\n\n status, uids = imap.uid('SEARCH', 'UNSEEN')\n\n for uid in uids[0].split():\n status, data = imap.uid('FETCH', uid, '(BODY[HEADER.FIELDS (DATE SUBJECT FROM)])')\n message = self._message_from_data(data)\n mail = Mail(uid, message['FROM'], message['SUBJECT'], message['DATE'])\n mails.append(mail)\n\n imap.close()\n imap.logout()\n\n return mails",
"def get_message_list(self):\n count = 0\n for msg in self.mbox:\n if msg['From'].find(self.config['tgt_email']) > -1:\n dtime = arrow.get(msg['Date'], 'ddd, D MMM YYYY HH:mm:ss ZZ')\n message = dict({'from': msg['From'],\n 'date': dtime,\n 'subject': msg['Subject']})\n # boundary = msg.get_boundary()\n # if boundary is not None:\n # bounds = [m.start() for m\n # in re.finditer(boundary, str(msg))]\n # else:\n # bounds = list()\n # if len(bounds) > 2:\n # message['text'] = str(msg)[bounds[1]:bounds[2]]\n # else:\n # message['text'] = None\n pl = None\n if msg['Subject'].find(\":\") == -1:\n finished = False\n pl = msg.get_payload()\n while finished is False:\n if isinstance(pl, str):\n finished = True\n elif isinstance(pl, list):\n pl = pl[0].get_payload()\n else:\n raise ValueError(\"Non-list, non-str payload?\")\n break\n message['text'] = self.clean_text(str(pl))\n\n if message['text'] is not None:\n self.messages.append(message)\n count += 1\n # print count\n self.messages.sort(key=lambda item: item['date'])",
"def get_messages(self):\n\t\tif not self.check_mails():\n\t\t\treturn # nothing to do\n\n\t\tfrappe.db.commit()\n\n\t\ttry:\n\t\t\t# track if errors arised\n\t\t\tself.errors = False\n\t\t\tself.latest_messages = []\n\t\t\tif cint(self.settings.use_imap):\n\t\t\t\tuid_validity = self.get_status()\n\t\t\telse:\n\t\t\t\temail_list = self.get_new_mails()\n\n\n\t\t\t# size limits\n\t\t\tself.total_size = 0\n\t\t\tself.max_email_size = cint(frappe.local.conf.get(\"max_email_size\"))\n\t\t\tself.max_total_size = 5 * self.max_email_size\n\t\t\tif cint(self.settings.use_imap):\n\t\t\t\t#try:\n\t\t\t\tif self.check_uid_validity(uid_validity):\n\t\t\t\t\temail_list = self.get_new_mails()\n\t\t\t\t\tif email_list:\n\t\t\t\t\t\tself.get_imap_messages(email_list)\n\t\t\t\t\tself.sync_flags()\n\t\t\t\t\tself.get_seen()\n\t\t\t\t\tself.push_deleted()\n\n\t\t\t\telse:\n\t\t\t\t\tpass\n\n\t\t\telse:\n\t\t\t\tnum = num_copy = len(email_list)\n\n\t\t\t\t# WARNING: Hard coded max no. of messages to be popped\n\t\t\t\tif num > 20: num = 20 #20\n\n\t\t\t\tfor i, message_meta in enumerate(email_list):\n\t\t\t\t\t# do not pull more than NUM emails\n\t\t\t\t\tif (i+1) > num:\n\t\t\t\t\t\tbreak\n\n\t\t\t\t\ttry:\n\t\t\t\t\t\tself.retrieve_message(message_meta, i+1)\n\t\t\t\t\texcept (TotalSizeExceededError, EmailTimeoutError, LoginLimitExceeded):\n\t\t\t\t\t\tbreak\n\n\t\t\t\t# WARNING: Mark as read - message number 101 onwards from the pop list\n\t\t\t\t# This is to avoid having too many messages entering the system\n\t\t\t\tnum = num_copy\n\t\t\t\tif not cint(self.settings.use_imap):\n\t\t\t\t\tif num > 100 and not self.errors:\n\t\t\t\t\t\tfor m in xrange(101, num+1):\n\t\t\t\t\t\t\tself.pop.dele(m)\n\n\t\texcept Exception, e:\n\t\t\tif self.has_login_limit_exceeded(e):\n\t\t\t\tpass\n\n\t\t\telse:\n\t\t\t\traise\n\n\t\tfinally:\n\t\t\t# no matter the exception, pop should quit if connected\n\t\t\tif cint(self.settings.use_imap):\n\t\t\t\tself.imap.logout()\n\t\t\telse:\n\t\t\t\tself.pop.quit()\n\n\t\treturn self.latest_messages",
"def email_list(request):\n if not request.user.is_superuser:\n raise PermissionDenied\n emails = set()\n form = EmailSelectForm()\n subject = None\n message = None\n errors = []\n success = None\n if request.method == \"POST\":\n form = EmailSelectForm(request.POST)\n if form.is_valid():\n if \"send_email\" in request.POST:\n send = True\n else:\n send = False\n form, subject, message, success, errors = _send_emails(request, form, emails, send)\n return render(\n request,\n \"rr/email.html\",\n {\n \"object_list\": sorted(emails),\n \"form\": form,\n \"subject\": subject,\n \"message\": message,\n \"errors\": errors,\n \"success\": success,\n },\n )",
"def fetch_all(self):\n emails = []\n res, messages = self._mailconn.search(None, 'ALL')\n if res == 'OK':\n for msg in messages[0].split():\n try:\n res, data = self._mailconn.fetch(msg.decode('utf-8'), '(RFC822)')\n except Exception as error:\n self.close_mail_connection()\n print('No email to read: '+error)\n exit()\n \n msg = email.message_from_string((data[0][1]).decode('utf-8'))\n if not isinstance(msg, str):\n if self.is_sender_in_whitelist(msg['From']):\n emails.append(msg)\n\n return emails",
"def emails(self):\r\n return emails.Emails(self)",
"def to_be_mailed(self):\n return self.filter(\n mails_sent=False, start__lte=timezone.now() + datetime.timedelta(hours=1)\n )",
"def api_all():\n all_mail = mail_dao.get_all()\n return _create_response(all_mail)",
"def emails(self):\r\n url = api_base + 'emails/'\r\n return json.loads(self.load_url(url))",
"def create_new_mail(self):\n self.driver.get(consts.TEMP_MAIL)\n soup = BeautifulSoup(self.driver.page_source)\n self.mail = soup.find(id=\"email_id\").attrs[\"data-value\"]",
"def get_new_messages(self):\n inbox = list(self.reddit.inbox.unread(limit=10))\n inbox.reverse()\n return inbox",
"def get_inbox(character):\n messages = get_messages(character)\n return [ Mail(message) for message in messages ]",
"def send(self):\n msg_sent = []\n subs = mongo.db.subscribers\n bill_extractor = ExtractBills()\n \n # Do not need the object ID\n same_interval = subs.find({\"interval\":self.interval}, {'_id':0})\n \n for each in same_interval:\n email = each['email']\n tags = each['search_tags']\n state = each['state']\n chamber = each['chamber']\n print(email, tags)\n\n msg_for_rcpnt = bill_extractor.getBill(state, chamber, tags)\n #all_candidates.append((email, msg_for_rcpnt))\n \n #try:\n # msg_body = \"hello world\"\n # msg_body = render_template('mail_card.html')\n # msg = Message(msg_body,\n # sender=\"[email protected]\",\n # recipients=email)\n # mail.send(msg) \n # msg_sent.append((email, \"Success\"))\n #except Exception as e:\n # msg_sent.append((email, str(e)))\n #return msg_sent\n return msg_for_rcpnt",
"def get_mailing_list():\n\t\tresult = {}\n\t\tconnection = DbHelper.connect()\n\n\t\twith connection.cursor() as cursor:\n\t\t\tsql = \"SELECT email FROM mail_list \\\n\t\t\t\t WHERE is_activated=1;\"\n\t\t\tcursor.execute(sql)\n\t\t\tresult = cursor.fetchall()\n\n\t\treturn [email_data['email'] for email_data in result]",
"def outbox():\n with mail.record_messages() as messages:\n yield messages",
"def email_all():\n\tSubscribtion = session.query(email).all()\n\treturn subscribtion_object",
"def recipients(self) -> ty.List[str]:",
"def get_messages(character):\n mail = character.db.mail\n try:\n messages = [item for item in mail if item[TIMESTAMP] <= item[MESSAGE].date_sent]\n # Let's clean up mail storage for this user while we're at it.\n character.db.mail = messages\n except TypeError:\n messages = []\n return messages",
"def list_messages(self):",
"def get_list(update, context):\n chat = update.message.chat\n user_id = update.message.from_user.id\n if chat.id != user_id:\n msg = 'Управление Вашими напоминаниями доступно в личном диалоге'\n update.message.reply_text(msg)\n return\n\n try:\n handler = db_connector.DataBaseConnector()\n rems = handler.get_user_reminders(user_id)\n rems.sort(key=lambda x: x['datetime'])\n except (ValueError, ConnectionError, KeyError):\n update.message.reply_text(_ERR_MSG)\n _LOGGER.exception('Unable to fetch reminders')\n return\n\n if not rems:\n reps_text = 'У вас отсутствуют предстоящие напоминания!'\n update.message.bot.send_message(chat_id=chat.id, text=reps_text)\n return\n\n for rem in rems:\n try:\n resp_text, markup = _compile_rem(rem, show_dt=True)\n context.message = context.bot.send_message(\n chat_id=rem['user_id'], text=resp_text,\n reply_markup=markup, parse_mode=ParseMode.HTML)\n except (ValueError, ConnectionError, KeyError) as err:\n _LOGGER.exception('Unable to process reminder')",
"def get_messages(self, new=True):\n url = (\"https://api.imgur.com/3/account/{0}/notifications/\"\n \"messages\".format(self.name))\n result = self._imgur._send_request(url, params=locals(),\n needs_auth=True)\n return [Notification(msg_dict, self._imgur, has_fetched=True) for\n msg_dict in result]",
"def email_ml_list(self):\n return self._request('email/ml/list', inspect_args_func(inspect.currentframe()), method='get')",
"def send_emails():\n\n cmd = \"sendmail -f [email protected]\"\n for msg in EMAIL_MESSAGES:\n for rec in RECIPIENTS:\n call(\"echo '%s' | %s %s\" % (msg, cmd, rec), None, True)",
"def get_and_delete_messages (self):\n return []",
"def get_and_delete_messages (self):\n return []",
"def get_emails(print_list, email_dict):\n\n email_list = []\n again = True\n contact_table = PrettyTable()\n contact_table.field_names = [\"Command\", \"Advisor Name\", \"Email\"]\n\n for row in print_list:\n contact_table.add_row(row)\n\n while again:\n print(contact_table)\n pretty_print(email_list, \":\")\n pretty_print(\"To Add Receiving Emails Enter the corresponding command number\", \"-\")\n pretty_print(\"To Send Mail press any number key:\", \"-\")\n choice = get_int_input()\n if choice in email_dict.keys():\n email_list.append(email_dict[choice])\n\n else:\n if len(email_list) != 0:\n again = False\n\n else:\n again = True\n pretty_print(\"No Email Added\", \"-\")\n\n clear()\n\n return email_list",
"def email_list(self) -> Sequence[str]:\n return pulumi.get(self, \"email_list\")",
"def find_expired_email(mails):\n if not mails:\n return []\n\n # get a copy of mails sort by datetime in desc order first\n mails_date = sorted(mails, key=lambda m:m.datetime, reverse=True)\n last7mails = mails_date[:7]\n bmail = last7mails[-1]\n # year|month|date => mail.mid\n reserv_mails = { m.datetime:m.mid for m in last7mails }\n def reserv_m(key, mail):\n if not reserv_mails.has_key(key):\n reserv_mails[key] = mail.mid\n\n for mail in mails_date[7:]:\n if mail.datetime.year < bmail.datetime.year:\n # mails sent in the last years, keep the newest mail per year\n reserv_m(mail.datetime.year, mail)\n elif mail.datetime.year == bmail.datetime.year and \\\n mail.datetime.month < bmail.datetime.month:\n # mails sent in the last months, keep the newest mail per month\n reserv_m(mail.datetime.month, mail)\n\n reserv_mids = dict.fromkeys(reserv_mails.values())\n return [m for m in mails if m.mid not in reserv_mids]",
"def get_emails(self):\n email_ids = self.get_email_ids()\n Email = get_email_class()\n return [email for email in Email.objects.filter(pk__in=email_ids)]"
] | [
"0.67494476",
"0.6615385",
"0.649707",
"0.63677174",
"0.6357396",
"0.62726194",
"0.62524587",
"0.62359744",
"0.62353027",
"0.6214297",
"0.62103856",
"0.6176566",
"0.6172016",
"0.617097",
"0.6118627",
"0.60628283",
"0.60455704",
"0.60106045",
"0.60102516",
"0.6009126",
"0.60057867",
"0.59578484",
"0.59244865",
"0.59038734",
"0.59034467",
"0.59034467",
"0.58664244",
"0.5863179",
"0.5851",
"0.5829713"
] | 0.772635 | 0 |
Walk and process multipart email. | def parse(self):
for part in self.mail.walk():
self.process_part(part) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def payload_parse(self, mail):\n\t\tif mail.is_multipart():\n\t\t\tfor payload in mail.get_payload():\n\t\t\t\tif payload.get_content_maintype() == \"multipart\":\n\t\t\t\t\tself.payload_parse(payload)\n\t\t\t\telse:\n\t\t\t\t\tself.payload_handle(payload, mail)\n\t\t\t# Post deletion of payloads:\n\t\t\tself.payload_delete(mail)",
"def process_message(mail):\n\tmessage = email.message_from_string(mail)\t#parsing metadata\n\tdatetuple = email.utils.parsedate_tz(message.__getitem__('Date'))\n\tfiledirectory = basedirectory\n\tif not datetuple:\n\t\tdatetuple = email.utils.parsedate_tz(message.__getitem__('Delivery-date'))\n\tif directory_for_year: \n\t\tfiledirectory = os.path.join(filedirectory, str(datetuple[0]))\n\tif directory_for_month:\n\t\tfiledirectory = os.path.join(filedirectory, str(datetuple[1])) \n\tdateposix = email.utils.mktime_tz(datetuple)\n\tlocaldate = datetime.datetime.fromtimestamp(dateposix)\n\tdatestring = localdate.strftime('%Y%m%d-%H%M') # +'-'+'-'.join(time.tzname) #\n\tsender = email.utils.parseaddr(message['To'])[1].replace('@','_').replace('.','-')\n\tsubject = email.header.decode_header(message['Subject'])[0][0]\n\tfilename = datestring + '_' + sender[:60] + '_' + subject[:60]\n\n\t# parsing mail content\n\tmailstring = ''\n\tfor headername, headervalue in message.items():\n\t\tmailstring += headername + ': ' + headervalue + '\\r\\n'\t# add \\r\\n or\n\tif message.get_content_maintype() == 'text':\n\t\tmailstring += message.get_payload(decode=True)\n\n\t# handle multipart: \n\telif message.get_content_maintype() == 'multipart':\n\t\tpartcounter = 0\n\t\tfor part in message.walk():\n\t\t\tif part.get_content_maintype() == 'text':\t# also: text/html\n\t\t\t\tfor header, value in part.items():\n\t\t\t\t\tmailstring += header + ': ' + value + '\\r\\n'\n\t\t\t\t\tmailstring += '\\r\\n' + part.get_payload(decode=True) + '\\r\\n'\n\t\t\t# skip multipart containers\n\t\t\telif part.get_content_maintype() != 'multipart':\n\t\t\t\tpartcounter += 1\n\t\t\t\ttry:\n\t\t\t\t\tattachmentname = email.header.decode_header(part.get_filename())[0][0]\n\t\t\t\texcept:\n\t\t\t\t\tattachmentname = \"\"\n\t\t\t\t\tprint(\"Error when parsing filename.\")\n\t\t\t\tif not attachmentname:\n\t\t\t\t\text = mimetypes.guess_extension(part.get_content_type())\n\t\t\t\t\tif not ext:\n\t\t\t\t\t\text = '.bin'\t# use generic if unknown extension\n\t\t\t\t\tattachmentname = 'attachment' + str(partcounter) + ext\n\t\t\t\tattfilename = filename + '_' + attachmentname\n\t\t\t\twrite_to_file(filedirectory, attfilename, part.get_payload(decode=True))\n\twrite_to_file(filedirectory, filename+'.txt', mailstring)",
"def body_parts(self):\n return_vals = {'files': []}\n\n for part in self.email.walk():\n maintype, subtype = part.get_content_type().split('/')\n # Multipart/* are containers, so we skip it\n if maintype == 'multipart':\n continue\n # Get Text and HTML\n filename = part.get_filename()\n if filename:\n return_vals['files'].append(filename)\n elif maintype == 'text':\n if subtype in ['plain', 'html']:\n encoder = part.get_content_charset() or 'utf-8'\n return_vals.update(\n {subtype:part.get_payload(decode=True).decode(encoder)})\n return return_vals",
"def attachments(self):\n for part in self.email.walk():\n filename = part.get_filename()\n if filename:\n yield {\n 'type': part.get_content_type(),\n 'name': filename,\n 'content': part.get_payload()\n }",
"def process(self) -> None:\n self.parsed = email.message_from_bytes(self.rawmailcontent, policy=email.policy.EmailPolicy()) # type: email.message.EmailMessage\n\n self.subject = self.parsed[\"subject\"]\n\n if self.parsed[\"X-Jicket-Initial-ReplyID\"] is not None and self.parsed[\"X-Jicket-Initial-ReplyID\"] == self.parsed[\"In-Reply-To\"]:\n self.threadstarter = True\n elif self.config.ticketAddress in self.parsed[\"From\"]: # Take more heuristic approach\n self.threadstarter = True\n\n self.rawmailcontent = None # No need to store after processing\n\n self.get_text_bodies(self.parsed)\n self.textfrombodies()",
"def parse_multipart(request):\n\n # This code will process each non-file field in the form\n fields = {}\n data = request.form.to_dict()\n for field in data:\n fields[field] = data[field]\n print(\"Processed field: %s\" % field)\n\n # This code will process each file uploaded\n files = request.files.to_dict()\n for file_name, file in files.items():\n # Note: GCF may not keep files saved locally between invocations.\n # If you want to preserve the uploaded files, you should save them\n # to another location (such as a Cloud Storage bucket).\n file.save(get_file_path(file_name))\n print(\"Processed file: %s\" % file_name)\n\n # Clear temporary directory\n for file_name in files:\n file_path = get_file_path(file_name)\n os.remove(file_path)\n\n return \"Done!\"",
"def process_part(self, part):\n\t\tcontent_type = part.get_content_type()\n\t\tfilename = part.get_filename()\n\t\tif content_type == 'text/plain' and not filename:\n\t\t\tself.text_content += self.get_payload(part)\n\n\t\telif content_type == 'text/html':\n\t\t\tself.html_content += self.get_payload(part)\n\n\t\telif content_type == 'message/rfc822':\n\t\t\t# sent by outlook when another email is sent as an attachment to this email\n\t\t\tself.show_attached_email_headers_in_content(part)\n\n\t\telif content_type == 'text/calendar':\n\t\t\tself.set_calendar_invite(part)\n\n\t\telif filename or 'image' in content_type:\n\t\t\tself.get_attachment(part)",
"def parse_form_multipart(body):\n # type: (unicode) -> dict[unicode, Any]\n import email\n import json\n\n import xmltodict\n\n def parse_message(msg):\n if msg.is_multipart():\n res = {\n part.get_param(\"name\", failobj=part.get_filename(), header=\"content-disposition\"): parse_message(part)\n for part in msg.get_payload()\n }\n else:\n content_type = msg.get(\"Content-Type\")\n if content_type in (\"application/json\", \"text/json\"):\n res = json.loads(msg.get_payload())\n elif content_type in (\"application/xml\", \"text/xml\"):\n res = xmltodict.parse(msg.get_payload())\n elif content_type in (\"text/plain\", None):\n res = msg.get_payload()\n else:\n res = \"\"\n\n return res\n\n headers = _asm_request_context.get_headers()\n if headers is not None:\n content_type = headers.get(\"Content-Type\")\n msg = email.message_from_string(\"MIME-Version: 1.0\\nContent-Type: %s\\n%s\" % (content_type, body))\n return parse_message(msg)\n return {}",
"def multipart_nested():\n msg = MIMEMultipart(\"mixed\")\n msg[\"From\"] = sender\n msg[\"To\"] = recipient\n msg[\"Subject\"] = \"Nested multipart email\"\n\n part_1 = MIMEMultipart(\"alternative\")\n part_1_text = MIMEText(\"This is the **first** part\\n\", \"plain\")\n part_1_html = MIMEText(\"This is the <strong>first</strong> part\\n\", \"html\")\n part_1.attach(part_1_text)\n part_1.attach(part_1_html)\n\n part_2 = MIMEText(\"This is the second part\\n\", \"plain\")\n\n msg.attach(part_1)\n msg.attach(part_2)\n\n return msg",
"def contentEmail(email_one):\n parse = email.message_from_string(email_one)\n if parse.is_multipart():\n for payload in parse.walk():\n email_three = payload.get_payload()\n try:\n email_three = email_three\n except AttributeError:\n continue\n return email_three \n else:\n email_two = parse.get_payload()\n email_two = email_two\n return email_two",
"def walk(self,suspect):\n\n blockedextensions=self.config.get(self.section,'blockedextensions').split(',')\n\n m=suspect.getMessageRep()\n for i in m.walk():\n if i.is_multipart():\n continue\n contenttype_mime=i.get_content_type()\n att_name = i.get_filename(None)\n\n if not att_name:\n #workaround for mimetypes, it always takes .ksh for text/plain\n if i.get_content_type()=='text/plain':\n ext='.txt'\n else:\n ext = mimetypes.guess_extension(i.get_content_type())\n\n if ext==None:\n ext=''\n att_name = 'unnamed%s' % ext\n\n #we are only interested in zip files\n if not att_name.lower().endswith(\".zip\"):\n continue\n\n pl = StringIO(i.get_payload(decode=True))\n zip=zipfile.ZipFile(pl)\n namelist=zip.namelist()\n for name in namelist:\n for blocked in blockedextensions:\n ext=\".%s\"%blocked.lower().strip()\n if name.lower().strip().endswith(ext):\n return {self.asciionly(att_name):self.asciionly(name)}\n \n return None",
"def process_mailbox(M):\n\n rv, data = M.search(None, config.email['search'])\n if rv != 'OK':\n logger.info(\"No messages found!\")\n return\n\n for num in data[0].split():\n rv, data = M.fetch(num, '(RFC822)')\n if rv != 'OK':\n logger.error(\"ERROR getting message\", num)\n return\n\n msg = email.message_from_string(data[0][1])\n\n content, extras = decode_body(msg)\n extraText = \"\"\n if extras:\n extraText = \"\\n \" + unichr(10133) +\" **\" + str(len(extras)) + \" attachments:**\"\n for (name, cont) in extras:\n extraText += \"\\n- \" + str(name)\n # remove markdown which would confuse the parser\n content = re.sub('[\\*_]', '', content)\n if len(content) > config.email['maxLen']:\n content = content[:config.email['maxLen']] + \"... _trimmed_\"\n subject, encoding = email.Header.decode_header(msg['Subject'])[0]\n emailText = \"*From:* \" + msg['From'] + \"\\n*Subject:* \" + subject + \"\\n==========\\n\" + content + \" \" + extraText\n\n send_message(emailText)",
"def test_fetchBodyStructureMultipart(self, uid=0):\n self.function = self.client.fetchBodyStructure\n self.messages = '3:9,10:*'\n innerMessage = FakeyMessage({\n 'content-type': 'text/plain; name=thing; key=\"value\"',\n 'content-id': 'this-is-the-content-id',\n 'content-description': 'describing-the-content-goes-here!',\n 'content-transfer-encoding': '8BIT',\n 'content-language': 'fr',\n 'content-md5': '123456abcdef',\n 'content-disposition': 'inline',\n 'content-location': 'outer space',\n }, (), b'', b'Body\\nText\\nGoes\\nHere\\n', 919293, None)\n self.msgObjs = [FakeyMessage({\n 'content-type': 'multipart/mixed; boundary=\"xyz\"',\n 'content-language': 'en',\n 'content-location': 'nearby',\n }, (), b'', b'', 919293, [innerMessage])]\n self.expected = {0: {'BODYSTRUCTURE': [\n ['text', 'plain', ['key', 'value', 'name', 'thing'],\n 'this-is-the-content-id', 'describing-the-content-goes-here!',\n '8BIT', '20', '4', '123456abcdef', ['inline', None], 'fr',\n 'outer space'],\n 'mixed', ['boundary', 'xyz'], None, 'en', 'nearby'\n ]}}\n return self._fetchWork(uid)",
"def extract (msgfile, key):\n m = email.message_from_file(msgfile)\n From, To, Subject, Date = caption(m)\n #Text, Html, Files, Parts = pullout(m, key)\n Text = Text.strip(); Html = Html.strip()\n msg = {\"subject\": Subject, \"from\": From, \"to\": To, \"date\": Date,\n \"text\": Text, \"html\": Html, \"parts\": Parts}\n if Files: msg[\"files\"] = Files\n return msg",
"def parse_content(content):\n attachments = []\n body = None\n html = None\n\n for part in content.walk():\n if part.get('Content-Disposition') is not None:\n decoded_data = decode_attachment(part)\n\n attachment = parse_attachment(part)\n if attachment:\n attachments.append(attachment)\n elif part.get_content_type() == \"text/plain\":\n if body is None:\n body = \"\"\n body += unicode(\n part.get_payload(decode=True),\n part.get_content_charset(),\n 'replace'\n ).encode('utf8', 'replace')\n elif part.get_content_type() == \"text/html\":\n if html is None:\n html = \"\"\n html += unicode(\n part.get_payload(decode=True),\n part.get_content_charset(),\n 'replace'\n ).encode('utf8', 'replace')\n # return the parsed data\n return {\n 'body': body,\n 'html': html,\n 'filename': decoded_data['filename']\n # 'attachments': attachments\n }",
"def get_and_send_attachments(self, session, mid, message_payload_parts, context, m_chat_id):\r\n\r\n store_dir_1 = os.getcwd()\r\n\r\n for part in message_payload_parts:\r\n if part['filename']:\r\n attachment_id = part['body']['attachmentId']\r\n\r\n response = session.get(f'https://www.googleapis.com/gmail/v1/users/me/'\r\n f'messages/{mid}/attachments/{attachment_id}')\r\n\r\n data = response.content\r\n encoded_data_dict = ast.literal_eval(data.decode('utf-8'))\r\n file_data = base64.urlsafe_b64decode(encoded_data_dict['data'].encode('UTF-8'))\r\n\r\n path = os.path.join(store_dir_1, part['filename'])\r\n\r\n # запись данных в файловую систему, чтение, отправка и удаление\r\n with open(path, 'wb') as file_object:\r\n file_object.write(file_data)\r\n with open(path, 'rb') as f:\r\n context.bot.send_document(m_chat_id, f)\r\n os.remove(path)",
"def process_ingestion_emails():\n processor = CalendarInteractionEmailProcessor()\n\n for message in get_mail_docs_in_bucket():\n source = message['source']\n try:\n documents.delete_document(bucket_id=BUCKET_ID, document_key=message['source'])\n except Exception as e:\n logger.exception('Error deleting message: \"%s\", error: \"%s\"', source, e)\n continue\n\n try:\n email = mailparser.parse_from_bytes(message['content'])\n processed, reason = processor.process_email(message=email)\n if not processed:\n logger.error('Error parsing message: \"%s\", error: \"%s\"', source, reason)\n else:\n logger.info(reason)\n except Exception as e:\n logger.exception('Error processing message: \"%s\", error: \"%s\"', source, e)\n\n logger.info(\n 'Successfully processed message \"%s\" and deleted document from bucket \"%s\"',\n source,\n BUCKET_ID,\n )",
"def get_nested_payload(mime_message):\n return_message = EmailMessage()\n return_message.subject = mime_message.get('Subject')\n return_message.sender = clean_sender(mime_message.get('From'))\n return_message.recipient = clean_recipient(mime_message.get('To'))\n return_message.date = parse(mime_message.get('Date'))\n for sub_message in mime_message.walk():\n content_type = sub_message.get_content_type()\n disposition = sub_message.get('Content-Disposition')\n if content_type == 'text/plain' and disposition is None:\n x = unicode(sub_message.get_payload())\n return_message.append_body(x)\n elif content_type in _ignored_content_types and disposition is None:\n pass # throw away contents we don't want\n else:\n return_message.add_attachment(sub_message.get_payload(), content_type=content_type, filename=disposition)\n return return_message",
"def _process_incoming_mail(raw_message, recipients):\n recipients = [x[1] for x in email.utils.getaddresses([recipients])]\n\n incoming_msg = mail.InboundEmailMessage(raw_message)\n\n if 'X-Google-Appengine-App-Id' in incoming_msg.original:\n raise InvalidIncomingEmailError('Mail sent by App Engine')\n\n # Use the subject to find the issue number.\n # Originally the tag was (issueNNN).\n # Then we changed it to be (issue NNN by WHO).\n # We want to match either of these, and we need to deal with\n # the fact that some mail readers will fold the long subject,\n # turning a single space into \"\\r\\n \".\n # We use \"issue\\s*\" to handle all these forms,\n # and we omit the closing ) to accept both the original and the \"by WHO\" form.\n subject = incoming_msg.subject or ''\n match = re.search(r'\\(issue\\s*(?P<id>\\d+)', subject)\n if match is None:\n raise InvalidIncomingEmailError('No issue id found: %s', subject)\n issue_id = int(match.groupdict()['id'])\n issue = models.Issue.get_by_id(issue_id)\n if issue is None:\n raise InvalidIncomingEmailError('Unknown issue ID: %d' % issue_id)\n sender = email.utils.parseaddr(incoming_msg.sender)[1]\n\n body = None\n for _, payload in incoming_msg.bodies('text/plain'):\n # FIXME(andi): Remove this when issue 2383 is fixed.\n # 8bit encoding results in UnknownEncodingError, see\n # http://code.google.com/p/googleappengine/issues/detail?id=2383\n # As a workaround we try to decode the payload ourselves.\n if payload.encoding == '8bit' and payload.charset:\n body = payload.payload.decode(payload.charset)\n # If neither encoding not charset is set, but payload contains\n # non-ASCII chars we can't use payload.decode() because it returns\n # payload.payload unmodified. The later type cast to db.Text fails\n # with a UnicodeDecodeError then.\n elif payload.encoding is None and payload.charset is None:\n # assume utf-8 but set replace flag to go for sure.\n body = payload.payload.decode('utf-8', 'replace')\n else:\n body = payload.decode()\n break\n if body is None or not body.strip():\n raise InvalidIncomingEmailError('Ignoring empty message.')\n elif len(body) > django_settings.RIETVELD_INCOMING_MAIL_MAX_SIZE:\n # see issue325, truncate huge bodies\n trunc_msg = '... (message truncated)'\n end = django_settings.RIETVELD_INCOMING_MAIL_MAX_SIZE - len(trunc_msg)\n body = body[:end]\n body += trunc_msg\n\n # If the subject is long, this might come wrapped into more than one line.\n subject = ' '.join([x.strip() for x in subject.splitlines()])\n msg = models.Message(issue_key=issue.key, parent=issue.key,\n subject=subject,\n sender=sender,\n recipients=[x for x in recipients],\n date=datetime.datetime.now(),\n text=body,\n draft=False)\n\n # Add sender to reviewers if needed.\n all_emails = [str(x).lower()\n for x in ([issue.owner.email()] +\n issue.reviewers +\n issue.cc +\n issue.collaborator_emails())]\n if sender.lower() not in all_emails:\n query = models.Account.query(models.Account.lower_email == sender.lower())\n account = query.get()\n if account is not None:\n issue.reviewers.append(account.email) # e.g. account.email is CamelCase\n else:\n issue.reviewers.append(db.Email(sender))\n\n issue.calculate_updates_for(msg)\n issue.put()\n msg.put()",
"def carve_email(self, payload):\n\n regex = re.compile(b\"\\r\\nDATA\\r\\n(.*?)(?:\\r\\n.\\r\\n|\\Z)\", re.M | re.S)\n matches = re.findall(regex, payload)\n if matches:\n for match in matches:\n yield match\n else:\n yield payload",
"def transform(self, email_path):\n mail = open(email_path, 'r')\n content = mail.read(self.max_read_len)\n i = 0\n while not(content[i] == '\\n' and content[i + 1] == '\\n') and i < len(content) - self.ngram:\n i += 1\n header = content[:i]\n # TODO find a smarter way deal with the header-body problem\n body = content[i + 2:]\n if len(body) + len(header) > self.max_read_len:\n body = body[:max(1000, self.max_read_len - len(header))]\n header_set = self.tokenize(header)\n body_set = self.tokenize(body)\n mail.close()\n return (header_set, body_set)",
"def parse_inline_attachments(self, post_html):\n if 'inline-attachment' not in post_html:\n return []\n self.post_html = post_html\n self.p = PyQuery(self.post_html)\n\n attachment_dicts = []\n attachment_dicts += self.parse_s_thumbnails()\n attachment_dicts += self.parse_s_image()\n attachment_dicts += self.parse_s_file()\n attachment_dicts += self.parse_s_wm_file()\n attachment_dicts += self.parse_s_flash_file()\n attachment_dicts += self.parse_s_quicktime_file()\n attachment_dicts += self.parse_s_rm_file()\n\n #print('parse_inline_attachments() attachment_dicts: {0!r}'.format(attachment_dicts))\n return attachment_dicts",
"def __init__(self, fp, boundary=None, recurse=False, binary_size=None):\n self.hdr = MIMEHeader({})\n self.body = None\n in_hdr = True\n binary_type = False\n multipart_type = False\n # Note, need to use readline() rather than iterating over file\n # because we need to recover file positions and seek ahead.\n # The \"for line in file\" type loop reads ahead so is not compatible\n # with this approach.\n while True:\n\n line = fp.readline().decode('utf-8').replace('\\r', '')\n\n # hit EOF\n if line == '':\n return\n\n # Check for multipart boundary marker\n if boundary is not None:\n if in_hdr:\n # If we are starting, ignore a 'start' marker,\n # quit on a 'done' marker\n if line == '--'+boundary+'\\n':\n continue\n elif line == '--'+boundary+'--\\n':\n self.hdr = MIMEHeader({})\n self.body = None\n return\n else:\n # This marks the end of a part, rewind so that the\n # next part can be parsed, and return results\n if line.startswith('--' + boundary):\n fp.seek(-len(line), 1)\n return\n\n if line == '\\n':\n # Got blank line, the next part will be body. We\n # want to skip it if this is a binary part, otherwise\n # read and return the body.\n in_hdr = False\n if binary_type:\n # Note the location within the file and skip\n # ahead by the correct amount. For BDF files,\n # use Content-Location to get type of binary part.\n try:\n bin_name = basename_noext(\n self.hdr['Content-Location'][0])\n except KeyError:\n bin_name = None\n self.body = fp.tell()\n # If the list of binary sizes is not given, or\n # if the bin_name is unknown, we read the data\n # until the boundary marker is found to determine\n # the size.\n if ((binary_size is None) or\n (bin_name not in list(binary_size.keys()))):\n # raise RuntimeError(\"Unknown binary type '%s' found\"\n # % bin_name)\n bl = len(boundary)+2 # length of boundary string\n bs = 1024*1024 # block size for scanning\n gotit = False\n while not gotit:\n junk = fp.read(bs)\n bloc = junk.find(bytes('--'+boundary, 'utf-8'))\n br = len(junk)\n eof = (br < bs)\n if bloc < 0:\n if eof:\n raise RuntimeError(\n \"Missing boundary string '%s'\"\n % boundary)\n else:\n fp.seek(-bl, 1)\n else:\n gotit = True\n fp.seek(-br + bloc, 1)\n else:\n # Seek ahead to the correct place.\n # Need to add one extra byte for the newline.\n fp.seek(binary_size[bin_name]+1, 1)\n # Note size of the binary part\n self.size = fp.tell() - self.body\n elif multipart_type:\n if recurse:\n # Parse the parts and add to a list\n while True:\n pmime = MIMEPart(fp, boundary=boundary,\n recurse=True,\n binary_size=binary_size)\n if pmime.hdr == {}:\n return\n else:\n self.body.append(pmime)\n continue\n\n if in_hdr:\n # Still in the header, parse the line as MIME key/val\n self.hdr.addline(line)\n if 'Content-Type' in line:\n vals = self.hdr['Content-Type']\n if vals[0].startswith('multipart/'):\n multipart_type = True\n boundary = self.hdr.boundary\n self.body = []\n elif (vals[0] == 'application/octet-stream' or\n vals[0] == 'binary/octet-stream'):\n binary_type = True\n else:\n if not binary_type:\n # In body part of a non-binary type\n if self.body is None:\n self.body = line\n else:\n self.body += line\n else:\n # Should not really get here, means size calculation\n # failed or file is otherwise messed up... what to do?\n raise RuntimeError('MIME parsing failure')",
"def test_fetchSimplifiedBodyMultipart(self):\n self.function = self.client.fetchSimplifiedBody\n self.messages = '21'\n\n # A couple non-multipart messages to use as the inner-most payload\n singles = [\n FakeyMessage(\n {'content-type': 'text/plain'},\n (), b'date', b'Stuff', 54321, None),\n FakeyMessage(\n {'content-type': 'text/html'},\n (), b'date', b'Things', 32415, None)]\n\n # A multipart/alternative message containing the above non-multipart\n # messages. This will be the payload of the outer-most message.\n alternative = FakeyMessage(\n {'content-type': 'multipart/alternative'},\n (), b'', b'Irrelevant', 12345, singles)\n\n # The outer-most message, also with a multipart type, containing just\n # the single middle message.\n mixed = FakeyMessage(\n # The message is multipart/mixed\n {'content-type': 'multipart/mixed'},\n (), b'', b'RootOf', 98765, [alternative])\n\n self.msgObjs = [mixed]\n\n self.expected = {\n 0: {'BODY': [\n [['text', 'plain', None, None, None, None, '5', '1'],\n ['text', 'html', None, None, None, None, '6', '1'],\n 'alternative'],\n 'mixed']}}\n\n return self._fetchWork(False)",
"def test_multiPartExtended(self):\n oneSubPart = FakeyMessage({\n b'content-type': b'image/jpeg; x=y',\n b'content-id': b'some kind of id',\n b'content-description': b'great justice',\n b'content-transfer-encoding': b'maximum',\n }, (), b'', b'hello world', 123, None)\n\n anotherSubPart = FakeyMessage({\n b'content-type': b'text/plain; charset=us-ascii',\n }, (), b'', b'some stuff', 321, None)\n\n container = FakeyMessage({\n 'content-type': 'multipart/related; foo=bar',\n 'content-language': 'es',\n 'content-location': 'Spain',\n 'content-disposition': 'attachment; name=monkeys',\n }, (), b'', b'', 555, [oneSubPart, anotherSubPart])\n\n self.assertEqual(\n [imap4.getBodyStructure(oneSubPart, extended=True),\n imap4.getBodyStructure(anotherSubPart, extended=True),\n 'related', ['foo', 'bar'], ['attachment', ['name', 'monkeys']],\n 'es', 'Spain'],\n imap4.getBodyStructure(container, extended=True))",
"def process_messages(imap, messages):\n for i in messages:\n # fetch the email message by ID\n res, msg = imap.fetch(str(i), \"(RFC822)\")\n for response in msg:\n if isinstance(response, tuple):\n # parse bytes email into a message object\n msg = email.message_from_bytes(response[1])\n #print(msg.keys())\n\n # decode the email subject\n subject = decode_header(msg[\"Subject\"])[0][0]\n if isinstance(subject, bytes):\n # if it's a bytes, decode to str\n subject = subject.decode()\n\n # decode email sender\n From, encoding = decode_header(msg.get(\"From\"))[0]\n if isinstance(From, bytes):\n From = From.decode(encoding)\n\n # decode email Date\n Date, encoding = decode_header(msg.get(\"Date\"))[0]\n if isinstance(From, bytes):\n Date = Date.decode(encoding)\n\n print(\"Subject: \", subject)\n print(\"From: \", From)\n print(\"Date: \", Date)\n\n print(\"=\"*100)",
"def test_multiPart(self):\n oneSubPart = FakeyMessage({\n 'content-type': 'image/jpeg; x=y',\n 'content-id': 'some kind of id',\n 'content-description': 'great justice',\n 'content-transfer-encoding': 'maximum',\n }, (), b'', b'hello world', 123, None)\n\n anotherSubPart = FakeyMessage({\n 'content-type': 'text/plain; charset=us-ascii',\n }, (), b'', b'some stuff', 321, None)\n\n container = FakeyMessage({\n 'content-type': 'multipart/related',\n }, (), b'', b'', 555, [oneSubPart, anotherSubPart])\n\n self.assertEqual(\n [imap4.getBodyStructure(oneSubPart),\n imap4.getBodyStructure(anotherSubPart),\n 'related'],\n imap4.getBodyStructure(container))",
"def get_plaintext_parts(cls, msg):\n\n if msg.is_multipart():\n for payload in msg.get_payload():\n yield from cls.get_plaintext_parts(payload)\n else:\n if msg.get_content_type() == \"text/plain\":\n yield msg",
"def handleMsg(mailbox, msg, is_subpart=False, strdate=\"\"):\r\n global text\r\n global attachments\r\n global fieldFrom, fieldSubject, fieldTime\r\n\r\n # Message/RFC822 parts are bundled this way ==============\r\n while isinstance(msg.get_payload(),email.Message.Message):\r\n msg=msg.get_payload()\r\n\r\n if not is_subpart:\r\n fieldFrom = \"\"\r\n fieldSubject = \"\"\r\n fieldTime = None # fieldTime is a 9-item tuple\r\n text = \"\" # the text contents of a message\r\n attachments = \"\"\r\n\r\n ## Set the \"From\" Field ==================================\r\n if fieldFrom == \"\" and msg['From'] != None:\r\n text += \"To: %s\\n\" % decode_field(msg['To'])\r\n if msg['Cc'] != None:\r\n text += \"Cc: %s\\n\" % decode_field(msg['Cc'])\r\n if msg['Bcc'] != None:\r\n text += \"Bcc: %s\\n\" % decode_field(msg['Bcc'])\r\n text += \"From: %s\\n\" % decode_field(msg['From'])\r\n fieldFrom = decode_field(msg['From'])\r\n\r\n ## Set the \"Subject\" Field ===============================\r\n if fieldSubject == \"\" and msg['Subject'] != None:\r\n fieldSubject = decode_field(msg['Subject'])\r\n text += \"Subject: %s\\n\" % fieldSubject\r\n\r\n ## Set the \"Date\" Field ==================================\r\n if fieldTime == None and msg['Date'] != None:\r\n fieldTime = string2time(msg['Date'])\r\n strdate = time.strftime(\"%Y%m%d%H%M\", fieldTime)\r\n\r\n ## Handle multipart messages recursively =================\r\n if msg.is_multipart():\r\n for submsg in msg.get_payload():\r\n handleMsg(mailbox, submsg, True, strdate)\r\n else:\r\n fname = msg.get_filename()\r\n if fname == None:\r\n if msg.get_content_type() == 'text/plain':\r\n text += \"\\n%s\" % msg.get_payload(decode=1)\r\n else:\r\n fname = \"message.htm\"\r\n\r\n ## Save an attachment to a file ========================\r\n if not fname == None:\r\n fname = decode_field(fname)\r\n filename = \"%s\\\\att_%s\\\\%s_%s\" % (mailboxdir, mailbox, strdate, fname)\r\n org_filename = filename\r\n i = 1\r\n while os.path.exists(filename):\r\n path, ext = os.path.splitext(org_filename)\r\n filename = \"%s (%d)%s\" % (path, i, ext)\r\n i = i + 1\r\n\r\n print \" Found part: %s\" % filename # for debugging purposes\r\n attachments += \"%s\\n\" % filename\r\n fd = open (filename, \"wb\")\r\n data = msg.get_payload(decode=1)\r\n fd.write(data)\r\n\r\n # convert an html message to text\r\n if fname == \"message.htm\":\r\n try:\r\n strio = cStringIO.StringIO()\r\n html2text.html2text_file(data, out=strio.write)\r\n text += strio.getvalue()\r\n strio.close()\r\n except sgmllib.SGMLParseError, e:\r\n print e\r\n\r\n fd.close()\r\n\r\n # if this is the toplevel message (the first function that was called by\r\n # fetch_mailbox, then return the title of the message\r\n if not is_subpart and fieldTime != None:\r\n title = buildTitle(fieldTime, fieldFrom, fieldSubject)\r\n return title",
"def parse_email(self, email_file, file_descriptor):\n\t\t# Open the email file and parse its contents\n\t\tprint(\"[+] Opening source email file: {}\".format(email_file))\n\t\twith open(email_file, 'r') as input_file:\n\t\t\t# Read-in the raw email content\n\t\t\ttry:\n\t\t\t\te = email.message_from_string(input_file.read())\n\t\t\texcept Exception as err:\n\t\t\t\tprint(\"[!] Failed to open the email file!\")\n\t\t\t\tprint(\"L.. Details: {}\".format(err))\n\n\t\t\ttry:\n\t\t\t\t# Check if the email is a multipart MIME message or not\n\t\t\t\tsource = \"\"\n\t\t\t\tif e.is_multipart():\n\t\t\t\t\tprint(\"[+] Processing multi-part email message...\")\n\t\t\t\t\t# Walk through the multi-part MIME message\n\t\t\t\t\tfor payload in e.walk():\n\t\t\t\t\t\t# Check the content disposition, such as \"attachment\"\n\t\t\t\t\t\tcontent_disposition = payload.get_content_disposition()\n\t\t\t\t\t\tattachment = None\n\t\t\t\t\t\tattachment = payload.get_filename()\n\t\t\t\t\t\t# We need to ditch the attachments, so detect and drop them\n\t\t\t\t\t\tif attachment is not None:\n\t\t\t\t\t\t\tprint(\"[+] Attachment detected and discarded: {}, {}\".format(content_disposition, attachment))\n\t\t\t\t\t\t# Find the plaintext and HTML parts\n\t\t\t\t\t\telif payload.get_content_type() == \"text/html\":\n\t\t\t\t\t\t\tsource += payload.get_payload(decode=True).decode(payload.get_content_charset())\n\t\t\t\t\t\telif payload.get_content_type() == \"plain/text\":\n\t\t\t\t\t\t\tsource += payload.get_payload(decode=True).decode(payload.get_content_charset())\n\n\t\t\t\t\t# Replace the URLs\n\t\t\t\t\tsoup = BeautifulSoup(source, \"html.parser\")\n\t\t\t\t\tif self.email_replacement_url != \"\":\n\t\t\t\t\t\tprint(\"[+] Replacing any URLs in the email content: {}\".format(self.email_replacement_url))\n\t\t\t\t\t\tfor link in soup.findAll('a', href=True):\n\t\t\t\t\t\t\tlink['href'] = self.email_replacement_url\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint(\"[-] Warning: No URL provided for email_replacement_url in config file, so the email's links will be preserved.\")\n\n\t\t\t\t\ttry:\n\t\t\t\t\t\t# Prettify update source from a blob of HTML to human readable source\n\t\t\t\t\t\tsource = soup.prettify()\n\t\t\t\t\t\t# Fix/unescape characters translated to ;lt ;gt ;amp\n\t\t\t\t\t\tsource = xml.sax.saxutils.unescape(source)\n\n\t\t\t\t\t\tsource = self.add_tracker_to_email(source)\n\n\t\t\t\t\t\t# Write the updated source while removing the added [' and ']\n\t\t\t\t\t\tfile_descriptor.write(source.replace('[','').replace(']',''))\n\t\t\t\t\t\tprint(\"[+] All operations are complete and the output written to {}\".format(self.output_file_name))\n\t\t\t\t\texcept Exception as err:\n\t\t\t\t\t\tprint(\"[!] Could not write to the output file!\")\n\t\t\t\t\t\tprint(\"L.. Details: {}\".format(err))\n\t\t\t\telse:\n\t\t\t\t\t# We have a non-multipart message, so write out what we have\n\t\t\t\t\tprint(\"[+] Processing non-multipart email message...\")\n\t\t\t\t\tfor payload in e.walk():\n\t\t\t\t\t\tsource += payload.get_payload(decode=True).decode(payload.get_content_charset())\n\n\t\t\t\t\t# Replace the URLs\n\t\t\t\t\tsoup = BeautifulSoup(source, \"html.parser\")\n\t\t\t\t\tif self.email_replacement_url != \"\":\n\t\t\t\t\t\tprint(\"[+] Replacing any URLs in the email content: {}\".format(self.email_replacement_url))\n\t\t\t\t\t\tfor link in soup.findAll('a', href=True):\n\t\t\t\t\t\t\tlink['href'] = self.email_replacement_url\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint(\"[-] Warning: No URL provided for email_replacement_url in config file, so the email's links will be preserved.\")\n\n\t\t\t\t\ttry:\n\t\t\t\t\t\t# Prettify update source from a blob of HTML to human readable source\n\t\t\t\t\t\t# This also makes it a string we can use for this next part\n\t\t\t\t\t\tsource = soup.prettify()\n\t\t\t\t\t\tsource = self.add_tracker_to_email(source)\n\t\t\t\t\t\t# Fix/unescape characters translated to ;lt ;gt ;amp\n\t\t\t\t\t\tsource = xml.sax.saxutils.unescape(source)\n\t\t\t\t\t\t# Write the updated source while removing the added [' and ']\n\t\t\t\t\t\tfile_descriptor.write(source.replace('[','').replace(']',''))\n\t\t\t\t\t\tprint(\"[+] All operations are complete and the output written to {}\".format(self.output_file_name))\n\t\t\t\t\texcept Exception as err:\n\t\t\t\t\t\tprint(\"[!] Could not write to the output file!\")\n\t\t\t\t\t\tprint(\"L.. Details: {}\".format(err))\n\n\t\t\t\tprint(\"[+] All processes are complete! Check your output file: {}\".format(self.output_file_name))\n\t\t\texcept Exception as err:\n\t\t\t\tprint(\"[!] Failed to write out the email contents!\")\n\t\t\t\tprint(\"L.. Details: {}\".format(err))"
] | [
"0.67589504",
"0.66122556",
"0.6362437",
"0.6281379",
"0.6234594",
"0.6232879",
"0.6215236",
"0.5973215",
"0.5962559",
"0.5891035",
"0.5818458",
"0.5751515",
"0.57384187",
"0.57365113",
"0.56827193",
"0.5681532",
"0.5662649",
"0.5645171",
"0.5585304",
"0.5525507",
"0.5516139",
"0.5511002",
"0.5509832",
"0.5505874",
"0.54722404",
"0.5458001",
"0.54283226",
"0.54132944",
"0.5408922",
"0.5387382"
] | 0.7683323 | 0 |
Parse email `part` and set it to `text_content`, `html_content` or `attachments`. | def process_part(self, part):
content_type = part.get_content_type()
filename = part.get_filename()
if content_type == 'text/plain' and not filename:
self.text_content += self.get_payload(part)
elif content_type == 'text/html':
self.html_content += self.get_payload(part)
elif content_type == 'message/rfc822':
# sent by outlook when another email is sent as an attachment to this email
self.show_attached_email_headers_in_content(part)
elif content_type == 'text/calendar':
self.set_calendar_invite(part)
elif filename or 'image' in content_type:
self.get_attachment(part) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse_attachment(message_part):\n content_disposition = message_part.get(\"Content-Disposition\", None)\n if content_disposition:\n dispositions = content_disposition.strip().split(\";\")\n if bool(content_disposition and\n dispositions[0].lower() == \"attachment\"):\n\n file_data = message_part.get_payload(decode=True)\n attachment = StringIO(file_data)\n attachment.content_type = message_part.get_content_type()\n attachment.size = len(file_data)\n attachment.name = None\n attachment.create_date = None\n attachment.mod_date = None\n attachment.read_date = None\n\n for param in dispositions[1:]:\n name, value = param.split(\"=\")\n name = name.lower()\n\n if name == \"filename\":\n attachment.name = value\n elif name == \"create-date\":\n attachment.create_date = value # TODO: datetime\n elif name == \"modification-date\":\n attachment.mod_date = value # TODO: datetime\n elif name == \"read-date\":\n attachment.read_date = value # TODO: datetime\n return attachment\n # no attachment\n return None",
"def parse(self):\n\t\tfor part in self.mail.walk():\n\t\t\tself.process_part(part)",
"def _single_body(part):\n content_type = part.get_content_type()\n try:\n body = part.get_payload(decode=True)\n except Exception:\n return ''\n\n if content_type == 'text/html':\n return BeautifulSoup(body, 'html.parser').text\n elif content_type == 'text/plain':\n return body\n return ''",
"def get_html_part(parts):\n for part in parts:\n if part[\"mimeType\"] == \"text/html\":\n return part[\"body\"][\"data\"]\n return \"\"",
"def parse_content(content):\n attachments = []\n body = None\n html = None\n\n for part in content.walk():\n if part.get('Content-Disposition') is not None:\n decoded_data = decode_attachment(part)\n\n attachment = parse_attachment(part)\n if attachment:\n attachments.append(attachment)\n elif part.get_content_type() == \"text/plain\":\n if body is None:\n body = \"\"\n body += unicode(\n part.get_payload(decode=True),\n part.get_content_charset(),\n 'replace'\n ).encode('utf8', 'replace')\n elif part.get_content_type() == \"text/html\":\n if html is None:\n html = \"\"\n html += unicode(\n part.get_payload(decode=True),\n part.get_content_charset(),\n 'replace'\n ).encode('utf8', 'replace')\n # return the parsed data\n return {\n 'body': body,\n 'html': html,\n 'filename': decoded_data['filename']\n # 'attachments': attachments\n }",
"def process(self) -> None:\n self.parsed = email.message_from_bytes(self.rawmailcontent, policy=email.policy.EmailPolicy()) # type: email.message.EmailMessage\n\n self.subject = self.parsed[\"subject\"]\n\n if self.parsed[\"X-Jicket-Initial-ReplyID\"] is not None and self.parsed[\"X-Jicket-Initial-ReplyID\"] == self.parsed[\"In-Reply-To\"]:\n self.threadstarter = True\n elif self.config.ticketAddress in self.parsed[\"From\"]: # Take more heuristic approach\n self.threadstarter = True\n\n self.rawmailcontent = None # No need to store after processing\n\n self.get_text_bodies(self.parsed)\n self.textfrombodies()",
"def handleMsg(mailbox, msg, is_subpart=False, strdate=\"\"):\r\n global text\r\n global attachments\r\n global fieldFrom, fieldSubject, fieldTime\r\n\r\n # Message/RFC822 parts are bundled this way ==============\r\n while isinstance(msg.get_payload(),email.Message.Message):\r\n msg=msg.get_payload()\r\n\r\n if not is_subpart:\r\n fieldFrom = \"\"\r\n fieldSubject = \"\"\r\n fieldTime = None # fieldTime is a 9-item tuple\r\n text = \"\" # the text contents of a message\r\n attachments = \"\"\r\n\r\n ## Set the \"From\" Field ==================================\r\n if fieldFrom == \"\" and msg['From'] != None:\r\n text += \"To: %s\\n\" % decode_field(msg['To'])\r\n if msg['Cc'] != None:\r\n text += \"Cc: %s\\n\" % decode_field(msg['Cc'])\r\n if msg['Bcc'] != None:\r\n text += \"Bcc: %s\\n\" % decode_field(msg['Bcc'])\r\n text += \"From: %s\\n\" % decode_field(msg['From'])\r\n fieldFrom = decode_field(msg['From'])\r\n\r\n ## Set the \"Subject\" Field ===============================\r\n if fieldSubject == \"\" and msg['Subject'] != None:\r\n fieldSubject = decode_field(msg['Subject'])\r\n text += \"Subject: %s\\n\" % fieldSubject\r\n\r\n ## Set the \"Date\" Field ==================================\r\n if fieldTime == None and msg['Date'] != None:\r\n fieldTime = string2time(msg['Date'])\r\n strdate = time.strftime(\"%Y%m%d%H%M\", fieldTime)\r\n\r\n ## Handle multipart messages recursively =================\r\n if msg.is_multipart():\r\n for submsg in msg.get_payload():\r\n handleMsg(mailbox, submsg, True, strdate)\r\n else:\r\n fname = msg.get_filename()\r\n if fname == None:\r\n if msg.get_content_type() == 'text/plain':\r\n text += \"\\n%s\" % msg.get_payload(decode=1)\r\n else:\r\n fname = \"message.htm\"\r\n\r\n ## Save an attachment to a file ========================\r\n if not fname == None:\r\n fname = decode_field(fname)\r\n filename = \"%s\\\\att_%s\\\\%s_%s\" % (mailboxdir, mailbox, strdate, fname)\r\n org_filename = filename\r\n i = 1\r\n while os.path.exists(filename):\r\n path, ext = os.path.splitext(org_filename)\r\n filename = \"%s (%d)%s\" % (path, i, ext)\r\n i = i + 1\r\n\r\n print \" Found part: %s\" % filename # for debugging purposes\r\n attachments += \"%s\\n\" % filename\r\n fd = open (filename, \"wb\")\r\n data = msg.get_payload(decode=1)\r\n fd.write(data)\r\n\r\n # convert an html message to text\r\n if fname == \"message.htm\":\r\n try:\r\n strio = cStringIO.StringIO()\r\n html2text.html2text_file(data, out=strio.write)\r\n text += strio.getvalue()\r\n strio.close()\r\n except sgmllib.SGMLParseError, e:\r\n print e\r\n\r\n fd.close()\r\n\r\n # if this is the toplevel message (the first function that was called by\r\n # fetch_mailbox, then return the title of the message\r\n if not is_subpart and fieldTime != None:\r\n title = buildTitle(fieldTime, fieldFrom, fieldSubject)\r\n return title",
"def attach_text(self, text_part):\n part = MIMEText(text_part)\n self.alternative.attach(part)",
"def contentEmail(email_one):\n parse = email.message_from_string(email_one)\n if parse.is_multipart():\n for payload in parse.walk():\n email_three = payload.get_payload()\n try:\n email_three = email_three\n except AttributeError:\n continue\n return email_three \n else:\n email_two = parse.get_payload()\n email_two = email_two\n return email_two",
"def add_body_part(self, part):\n part.part_number = len(self.parts)\n self.parts.append(part)",
"def multipart_nested():\n msg = MIMEMultipart(\"mixed\")\n msg[\"From\"] = sender\n msg[\"To\"] = recipient\n msg[\"Subject\"] = \"Nested multipart email\"\n\n part_1 = MIMEMultipart(\"alternative\")\n part_1_text = MIMEText(\"This is the **first** part\\n\", \"plain\")\n part_1_html = MIMEText(\"This is the <strong>first</strong> part\\n\", \"html\")\n part_1.attach(part_1_text)\n part_1.attach(part_1_html)\n\n part_2 = MIMEText(\"This is the second part\\n\", \"plain\")\n\n msg.attach(part_1)\n msg.attach(part_2)\n\n return msg",
"def parse_email(self, email_file, file_descriptor):\n\t\t# Open the email file and parse its contents\n\t\tprint(\"[+] Opening source email file: {}\".format(email_file))\n\t\twith open(email_file, 'r') as input_file:\n\t\t\t# Read-in the raw email content\n\t\t\ttry:\n\t\t\t\te = email.message_from_string(input_file.read())\n\t\t\texcept Exception as err:\n\t\t\t\tprint(\"[!] Failed to open the email file!\")\n\t\t\t\tprint(\"L.. Details: {}\".format(err))\n\n\t\t\ttry:\n\t\t\t\t# Check if the email is a multipart MIME message or not\n\t\t\t\tsource = \"\"\n\t\t\t\tif e.is_multipart():\n\t\t\t\t\tprint(\"[+] Processing multi-part email message...\")\n\t\t\t\t\t# Walk through the multi-part MIME message\n\t\t\t\t\tfor payload in e.walk():\n\t\t\t\t\t\t# Check the content disposition, such as \"attachment\"\n\t\t\t\t\t\tcontent_disposition = payload.get_content_disposition()\n\t\t\t\t\t\tattachment = None\n\t\t\t\t\t\tattachment = payload.get_filename()\n\t\t\t\t\t\t# We need to ditch the attachments, so detect and drop them\n\t\t\t\t\t\tif attachment is not None:\n\t\t\t\t\t\t\tprint(\"[+] Attachment detected and discarded: {}, {}\".format(content_disposition, attachment))\n\t\t\t\t\t\t# Find the plaintext and HTML parts\n\t\t\t\t\t\telif payload.get_content_type() == \"text/html\":\n\t\t\t\t\t\t\tsource += payload.get_payload(decode=True).decode(payload.get_content_charset())\n\t\t\t\t\t\telif payload.get_content_type() == \"plain/text\":\n\t\t\t\t\t\t\tsource += payload.get_payload(decode=True).decode(payload.get_content_charset())\n\n\t\t\t\t\t# Replace the URLs\n\t\t\t\t\tsoup = BeautifulSoup(source, \"html.parser\")\n\t\t\t\t\tif self.email_replacement_url != \"\":\n\t\t\t\t\t\tprint(\"[+] Replacing any URLs in the email content: {}\".format(self.email_replacement_url))\n\t\t\t\t\t\tfor link in soup.findAll('a', href=True):\n\t\t\t\t\t\t\tlink['href'] = self.email_replacement_url\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint(\"[-] Warning: No URL provided for email_replacement_url in config file, so the email's links will be preserved.\")\n\n\t\t\t\t\ttry:\n\t\t\t\t\t\t# Prettify update source from a blob of HTML to human readable source\n\t\t\t\t\t\tsource = soup.prettify()\n\t\t\t\t\t\t# Fix/unescape characters translated to ;lt ;gt ;amp\n\t\t\t\t\t\tsource = xml.sax.saxutils.unescape(source)\n\n\t\t\t\t\t\tsource = self.add_tracker_to_email(source)\n\n\t\t\t\t\t\t# Write the updated source while removing the added [' and ']\n\t\t\t\t\t\tfile_descriptor.write(source.replace('[','').replace(']',''))\n\t\t\t\t\t\tprint(\"[+] All operations are complete and the output written to {}\".format(self.output_file_name))\n\t\t\t\t\texcept Exception as err:\n\t\t\t\t\t\tprint(\"[!] Could not write to the output file!\")\n\t\t\t\t\t\tprint(\"L.. Details: {}\".format(err))\n\t\t\t\telse:\n\t\t\t\t\t# We have a non-multipart message, so write out what we have\n\t\t\t\t\tprint(\"[+] Processing non-multipart email message...\")\n\t\t\t\t\tfor payload in e.walk():\n\t\t\t\t\t\tsource += payload.get_payload(decode=True).decode(payload.get_content_charset())\n\n\t\t\t\t\t# Replace the URLs\n\t\t\t\t\tsoup = BeautifulSoup(source, \"html.parser\")\n\t\t\t\t\tif self.email_replacement_url != \"\":\n\t\t\t\t\t\tprint(\"[+] Replacing any URLs in the email content: {}\".format(self.email_replacement_url))\n\t\t\t\t\t\tfor link in soup.findAll('a', href=True):\n\t\t\t\t\t\t\tlink['href'] = self.email_replacement_url\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint(\"[-] Warning: No URL provided for email_replacement_url in config file, so the email's links will be preserved.\")\n\n\t\t\t\t\ttry:\n\t\t\t\t\t\t# Prettify update source from a blob of HTML to human readable source\n\t\t\t\t\t\t# This also makes it a string we can use for this next part\n\t\t\t\t\t\tsource = soup.prettify()\n\t\t\t\t\t\tsource = self.add_tracker_to_email(source)\n\t\t\t\t\t\t# Fix/unescape characters translated to ;lt ;gt ;amp\n\t\t\t\t\t\tsource = xml.sax.saxutils.unescape(source)\n\t\t\t\t\t\t# Write the updated source while removing the added [' and ']\n\t\t\t\t\t\tfile_descriptor.write(source.replace('[','').replace(']',''))\n\t\t\t\t\t\tprint(\"[+] All operations are complete and the output written to {}\".format(self.output_file_name))\n\t\t\t\t\texcept Exception as err:\n\t\t\t\t\t\tprint(\"[!] Could not write to the output file!\")\n\t\t\t\t\t\tprint(\"L.. Details: {}\".format(err))\n\n\t\t\t\tprint(\"[+] All processes are complete! Check your output file: {}\".format(self.output_file_name))\n\t\t\texcept Exception as err:\n\t\t\t\tprint(\"[!] Failed to write out the email contents!\")\n\t\t\t\tprint(\"L.. Details: {}\".format(err))",
"def __store_part(self, definition, pnum, multisubtype):\n pnum = \"1\" if pnum is None else pnum\n params = {\n \"pnum\": pnum,\n \"params\": definition[2],\n \"cid\": definition[3],\n \"description\": definition[4],\n \"encoding\": definition[5],\n \"size\": definition[6]\n }\n mtype = definition[0].lower()\n subtype = definition[1].lower()\n ftype = \"%s/%s\" % (definition[0].lower(), subtype)\n if ftype in (\"text/plain\", \"text/html\"):\n if subtype not in self.contents:\n self.contents[subtype] = [params]\n else:\n self.contents[subtype].append(params)\n return\n elif multisubtype in [\"related\"]:\n self.inlines[params[\"cid\"].strip(\"<>\")] = params\n return\n\n params[\"Content-Type\"] = ftype\n if len(definition) > 7:\n extensions = [\"md5\", \"disposition\", \"language\", \"location\"]\n if mtype == \"text\":\n extensions = [\"textlines\"] + extensions\n elif ftype == \"message/rfc822\":\n extensions = [\n \"envelopestruct\",\n \"bodystruct\",\n \"textlines\"] + extensions\n for idx, value in enumerate(definition[7:]):\n params[extensions[idx]] = value\n\n self.attachments += [params]",
"def attach_html(self, html_part):\n part = MIMEText(html_part, 'html')\n self.alternative.attach(part)",
"def multipart_mixed():\n msg = MIMEMultipart(\"mixed\")\n msg[\"From\"] = sender\n msg[\"To\"] = recipient\n msg[\"Subject\"] = \"Multipart mixed\"\n\n part_1 = MIMEText(\"This is the first part (plaintext)\\n\", \"plain\")\n part_2 = MIMEText(\"This is the second part (HTML)\\n\", \"html\")\n part_3 = MIMEText(\"This is the third part (plaintext)\\n\", \"plain\")\n part_4 = MIMEText(\"This is the fourth part (HTML)\\n\", \"html\")\n\n msg.attach(part_1)\n msg.attach(part_2)\n msg.attach(part_3)\n msg.attach(part_4)\n\n return msg",
"def __init__(self, content):\n\t\tself.raw = content\n\t\tself.mail = email.message_from_string(self.raw)\n\n\t\tself.text_content = ''\n\t\tself.html_content = ''\n\t\tself.attachments = []\n\t\tself.cid_map = {}\n\t\tself.parse()\n\t\tself.set_content_and_type()\n\t\tself.set_subject()\n\t\tself.set_from()\n\t\tself.message_id = self.mail.get('Message-ID')\n\n\n\t\tself.unique_id = get_unique_id(self.mail)\n\n\t\t# gmail mailing-list compatibility\n\t\t# use X-Original-Sender if available, as gmail sometimes modifies the 'From'\n\t\t# _from_email = self.mail.get(\"X-Original-From\") or self.mail[\"From\"]\n\t\t# \n\t\t# self.from_email = extract_email_id(_from_email)\n\t\t# if self.from_email:\n\t\t# \tself.from_email = self.from_email.lower()\n\t\t# \n\t\t# #self.from_real_name = email.utils.parseaddr(_from_email)[0]\n\t\t# \n\t\t# _from_real_name = decode_header(email.utils.parseaddr(_from_email)[0])\n\t\t# self.from_real_name = decode_header(email.utils.parseaddr(_from_email)[0])[0][0] or \"\"\n\t\t# \n\t\t# try:\n\t\t# \tif _from_real_name[0][1]:\n\t\t# \t\tself.from_real_name = self.from_real_name.decode(_from_real_name[0][1])\n\t\t# \telse:\n\t\t# \t\t# assume that the encoding is utf-8\n\t\t# \t\tself.from_real_name = self.from_real_name.decode(\"utf-8\")\n\t\t# except UnicodeDecodeError,e:\n\t\t# \tprint e\n\t\t# \tpass\n\n\t\t#self.from_real_name = email.Header.decode_header(email.utils.parseaddr(_from_email)[0])[0][0]\n\t\tself.To = self.mail.get(\"To\")\n\t\tif self.To:\n\t\t\tto = u\"\"\n\t\t\tfor name, encoding in decode_header(self.To):\n\t\t\t\tif encoding:\n\t\t\t\t\tto += name.decode(encoding)\n\t\t\t\telse:\n\t\t\t\t\tto += name\n\t\t\tself.To = to.lower()\n\t\tself.CC = self.mail.get(\"CC\")\n\t\tif self.CC:\n\t\t\tself.CC = self.CC.lower()\n\t\tif self.mail[\"Date\"]:\n\t\t\ttry:\n\t\t\t\tutc = email.utils.mktime_tz(email.utils.parsedate_tz(self.mail[\"Date\"]))\n\t\t\t\tutc_dt = datetime.datetime.utcfromtimestamp(utc)\n\t\t\t\tself.date = convert_utc_to_user_timezone(utc_dt).strftime('%Y-%m-%d %H:%M:%S')\n\t\t\texcept:\n\t\t\t\tself.date = now()\n\t\telse:\n\t\t\tself.date = now()\n\t\tif self.date > now():\n\t\t\tself.date = now()",
"def fetchpart(self, uid, mbox, partnum):\n self.select_mailbox(mbox, False)\n data = self._cmd(\"FETCH\", uid, \"(BODYSTRUCTURE BODY[%s])\" % partnum)\n bs = BodyStructure(data[int(uid)][\"BODYSTRUCTURE\"])\n attdef = bs.find_attachment(partnum)\n return attdef, data[int(uid)][\"BODY[%s]\" % partnum]",
"def parse_part(self):\n parts = []\n for part in re.split(r'\\*\\*\\* ([A-Z- ]+) \\*\\*\\*', self.hand_file): # return [ 'part1', 'splitter1', 'part2',..\n parts.append(part)\n\n for i in range(0, len(parts)):\n if i == 0:\n self.part_dict['HEADER'] = parts[i]\n if i % 2 != 0: # number is odd\n self.part_dict[parts[i]] = parts[i + 1]",
"def extract_metadata(self, msg, payload, text, part):\n\n if part.get_content_maintype() == \"image\":\n\n name = part.get_param(\"name\")\n subtype = part.get_content_subtype()\n\n self._add_name(msg, name)\n self._update_counts(msg, subtype, by=1)\n self._save_stats(msg, part.get_payload(decode=True), subtype)",
"def payload_parse(self, mail):\n\t\tif mail.is_multipart():\n\t\t\tfor payload in mail.get_payload():\n\t\t\t\tif payload.get_content_maintype() == \"multipart\":\n\t\t\t\t\tself.payload_parse(payload)\n\t\t\t\telse:\n\t\t\t\t\tself.payload_handle(payload, mail)\n\t\t\t# Post deletion of payloads:\n\t\t\tself.payload_delete(mail)",
"def send_html_email(\n textpart, htmlpart, subject, recipients,\n server=\"localhost\",\n sender=\"Foodmaster <foodmaster@localhost>\",\n):\n\n msgtpl = email.mime.multipart.MIMEMultipart('alternative')\n msgtpl['Subject'] = subject\n msgtpl['From'] = email.utils.formataddr(email.utils.parseaddr(sender))\n msgtpl['Date'] = email.utils.formatdate(localtime=True)\n msgtpl.attach(email.mime.text.MIMEText(textpart))\n msgtpl.attach(email.mime.text.MIMEText(htmlpart, 'html'))\n\n with smtplib.SMTP(server) as smtp:\n for r in recipients:\n msg = copy.deepcopy(msgtpl)\n msg['Message-id'] = email.utils.make_msgid('poledni-menu')\n msg['To'] = email.utils.formataddr(email.utils.parseaddr(r))\n smtp.send_message(msg)",
"def get_mail_html_part(self,mail):\n return mail.alternatives[0][0]",
"def _split(self):\n text = self.md\n self.parts = parts = []\n self.headers = headers = []\n lines = []\n\n # Split in parts\n for line in text.splitlines():\n if line.startswith((\"# \", \"## \", \"### \", \"#### \", \"##### \")):\n # Finish pending lines\n parts.append(\"\\n\".join(lines))\n lines = []\n # Process header\n level = len(line.split(\" \")[0])\n title = line.split(\" \", 1)[1]\n title_short = title.split(\"(\")[0].split(\"<\")[0].strip().replace(\"`\", \"\")\n headers.append((level, title_short))\n parts.append((level, title_short, title))\n else:\n lines.append(line)\n parts.append(\"\\n\".join(lines))\n\n # Now convert all text to html\n for i in range(len(parts)):\n if not isinstance(parts[i], tuple):\n parts[i] = markdown.markdown(parts[i], extensions=[]) + \"\\n\\n\"",
"def process_message(mail):\n\tmessage = email.message_from_string(mail)\t#parsing metadata\n\tdatetuple = email.utils.parsedate_tz(message.__getitem__('Date'))\n\tfiledirectory = basedirectory\n\tif not datetuple:\n\t\tdatetuple = email.utils.parsedate_tz(message.__getitem__('Delivery-date'))\n\tif directory_for_year: \n\t\tfiledirectory = os.path.join(filedirectory, str(datetuple[0]))\n\tif directory_for_month:\n\t\tfiledirectory = os.path.join(filedirectory, str(datetuple[1])) \n\tdateposix = email.utils.mktime_tz(datetuple)\n\tlocaldate = datetime.datetime.fromtimestamp(dateposix)\n\tdatestring = localdate.strftime('%Y%m%d-%H%M') # +'-'+'-'.join(time.tzname) #\n\tsender = email.utils.parseaddr(message['To'])[1].replace('@','_').replace('.','-')\n\tsubject = email.header.decode_header(message['Subject'])[0][0]\n\tfilename = datestring + '_' + sender[:60] + '_' + subject[:60]\n\n\t# parsing mail content\n\tmailstring = ''\n\tfor headername, headervalue in message.items():\n\t\tmailstring += headername + ': ' + headervalue + '\\r\\n'\t# add \\r\\n or\n\tif message.get_content_maintype() == 'text':\n\t\tmailstring += message.get_payload(decode=True)\n\n\t# handle multipart: \n\telif message.get_content_maintype() == 'multipart':\n\t\tpartcounter = 0\n\t\tfor part in message.walk():\n\t\t\tif part.get_content_maintype() == 'text':\t# also: text/html\n\t\t\t\tfor header, value in part.items():\n\t\t\t\t\tmailstring += header + ': ' + value + '\\r\\n'\n\t\t\t\t\tmailstring += '\\r\\n' + part.get_payload(decode=True) + '\\r\\n'\n\t\t\t# skip multipart containers\n\t\t\telif part.get_content_maintype() != 'multipart':\n\t\t\t\tpartcounter += 1\n\t\t\t\ttry:\n\t\t\t\t\tattachmentname = email.header.decode_header(part.get_filename())[0][0]\n\t\t\t\texcept:\n\t\t\t\t\tattachmentname = \"\"\n\t\t\t\t\tprint(\"Error when parsing filename.\")\n\t\t\t\tif not attachmentname:\n\t\t\t\t\text = mimetypes.guess_extension(part.get_content_type())\n\t\t\t\t\tif not ext:\n\t\t\t\t\t\text = '.bin'\t# use generic if unknown extension\n\t\t\t\t\tattachmentname = 'attachment' + str(partcounter) + ext\n\t\t\t\tattfilename = filename + '_' + attachmentname\n\t\t\t\twrite_to_file(filedirectory, attfilename, part.get_payload(decode=True))\n\twrite_to_file(filedirectory, filename+'.txt', mailstring)",
"def test_textPart(self):\n body = b'hello, world\\nhow are you?\\ngoodbye\\n'\n major = 'text'\n minor = 'jpeg'\n charset = 'us-ascii'\n identifier = 'some kind of id'\n description = 'great justice'\n encoding = 'maximum'\n msg = FakeyMessage({\n 'content-type': major + '/' + minor +\n '; charset=' + charset + '; x=y',\n 'content-id': identifier,\n 'content-description': description,\n 'content-transfer-encoding': encoding,\n }, (), b'', body, 123, None)\n structure = imap4.getBodyStructure(msg)\n self.assertEqual(\n [major, minor, [\"charset\", charset, 'x', 'y'], identifier,\n description, encoding, len(body), len(body.splitlines())],\n structure)",
"def get_and_send_attachments(self, session, mid, message_payload_parts, context, m_chat_id):\r\n\r\n store_dir_1 = os.getcwd()\r\n\r\n for part in message_payload_parts:\r\n if part['filename']:\r\n attachment_id = part['body']['attachmentId']\r\n\r\n response = session.get(f'https://www.googleapis.com/gmail/v1/users/me/'\r\n f'messages/{mid}/attachments/{attachment_id}')\r\n\r\n data = response.content\r\n encoded_data_dict = ast.literal_eval(data.decode('utf-8'))\r\n file_data = base64.urlsafe_b64decode(encoded_data_dict['data'].encode('UTF-8'))\r\n\r\n path = os.path.join(store_dir_1, part['filename'])\r\n\r\n # запись данных в файловую систему, чтение, отправка и удаление\r\n with open(path, 'wb') as file_object:\r\n file_object.write(file_data)\r\n with open(path, 'rb') as f:\r\n context.bot.send_document(m_chat_id, f)\r\n os.remove(path)",
"def extract_email_address(logpart):\n # print \"Parsing for email address: {}\".format(logpart)\n return(logpart.split('<')[1].split('>')[0])",
"def message_with_attachments(self, session, mid, context, zero_part, message_payload_parts,\r\n from_who, to_whom, subject):\r\n\r\n zero_part_parts = zero_part['parts']\r\n sub_zero_part = zero_part_parts[0]\r\n body_of_part = sub_zero_part['body']\r\n\r\n # декодируем\r\n encoded_text = body_of_part['data']\r\n decodedBytes = base64.urlsafe_b64decode(encoded_text)\r\n decoded_text = str(decodedBytes, \"utf-8\") # текст сообщения сохраняем в переменную\r\n\r\n if self.SECRET_KEY in subject or self.SECRET_KEY in decoded_text:\r\n\r\n telebot_message_text = f'Sender: {from_who}.\\n' \\\r\n f'Receiver: {to_whom}.\\n' \\\r\n f'Subject: {subject}.\\n' \\\r\n f'Text of message: {decoded_text}'\r\n\r\n with open('managers.json') as obj:\r\n managers = json.load(obj)\r\n\r\n for m_chat_id in managers.values():\r\n try:\r\n context.bot.send_message(chat_id=m_chat_id, text=telebot_message_text) # отправка сообщения в бот\r\n except:\r\n pass\r\n\r\n self.get_and_send_attachments(session, mid, message_payload_parts, context, m_chat_id)",
"def on_part(self, raw_msg, source, **kwargs):",
"def decode_message_part(message_part):\n return base64.urlsafe_b64decode(message_part['body']['data']).decode().strip()"
] | [
"0.65008265",
"0.62675315",
"0.61053777",
"0.6043412",
"0.59855354",
"0.5756961",
"0.5730942",
"0.56908387",
"0.55686265",
"0.5391659",
"0.53889847",
"0.53342295",
"0.5289718",
"0.5285181",
"0.52509284",
"0.5240541",
"0.5207104",
"0.51973563",
"0.519061",
"0.51662457",
"0.50913465",
"0.50576967",
"0.5038293",
"0.500471",
"0.50002676",
"0.49957126",
"0.49901813",
"0.4964197",
"0.49374586",
"0.48959765"
] | 0.7365437 | 0 |
Save email attachments in given document. | def save_attachments_in_doc(self, doc):
saved_attachments = []
for attachment in self.attachments:
try:
file_data = save_file(attachment['fname'], attachment['fcontent'],
doc.doctype, doc.name, is_private=1)
saved_attachments.append(file_data)
if attachment['fname'] in self.cid_map:
self.cid_map[file_data.name] = self.cid_map[attachment['fname']]
except MaxFileSizeReachedError:
# WARNING: bypass max file size exception
pass
except frappe.DuplicateEntryError:
# same file attached twice??
pass
return saved_attachments | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def save(self, *args, **kwargs):\n attachments_to_save = []\n for attachment_attr in self.attachment_list:\n attachments_to_save.append(self.validated_data.pop(attachment_attr))\n response = super().save(*args, **kwargs)\n for attachment in attachments_to_save:\n if attachment.content_object is None:\n attachment.content_object = self.instance\n attachment.save()\n return response",
"def _save_attachments(self, messages, attachment_format,\n replace_spaces_in_filename):\n\n output_format = ATTACHMENT_FORMAT[attachment_format]\n replace_spaces_in_filename = REPLACE_SPACE.get(\n replace_spaces_in_filename, None)\n att_result_list = list()\n\n for message in messages:\n # Only *email* messages are handled.\n if not isinstance(message, Message):\n err_msg = (\"Message ID '{id}' is not an email message \"\n \"(item type: {item_type}).\".format(\n id=str(message.item_id),\n item_type=str(message.item_type)))\n self.logger.error(err_msg)\n raise TypeError(err_msg)\n # Save each attachment, if any\n att_filename_list = list()\n for attachment in message.attachments:\n if isinstance(attachment, FileAttachment):\n output_file = self._get_unique_filename(\n attachment_name=attachment.name,\n attachment_sent=message.datetime_sent,\n replace_spaces_in_filename=replace_spaces_in_filename)\n self.logger.debug(\"File attachment: {f}\"\n .format(f=output_file))\n with open(os.path.abspath(output_file), output_format) \\\n as f:\n f.write(attachment.content)\n self.logger.info(\"Saved attachment '{att_name}'.\"\n .format(att_name=output_file))\n att_filename_list.append(output_file)\n else:\n self.logger.info(\"Attachment '{att_name}' on email \"\n \"'{email}' is not a *file* attachment. \"\n \"Skipping...\".format(\n att_name=str(attachment.name),\n email=str(message.subject)))\n\n # Append to result list ONLY if one or more attachments are saved.\n if att_filename_list:\n att_result_list.append(dict([\n (\"email_subject\", str(message.subject)),\n (\"email_sent\", str(message.datetime_sent)),\n (\"sender_email_address\",\n str(message.sender.email_address)),\n (\"attachment_files\", att_filename_list)\n ]))\n\n return att_result_list",
"def _email_document(document, to, template='django_dms/email.txt', subject=''): \n # TODO: A really cool system would delay sending the email for 10 seconds or so, \n # to allow the user to quickly undo :-) This could probably also be done client-side (ie JS)\n # Create the message\n message = EmailMessage(to=to, subject=subject)\n message.to = to\n message.subject = subject\n message.body = render_to_string(template, {'document': document})\n message.attach(document.friendly_filename, document.file.read(), document.file_mimetype)\n\n # Send the message\n message.send()",
"def wrapup(self):\n for filename in self._delete_attachments:\n rev = flask.g.db.delete_attachment(self.doc, filename)\n self.doc[\"_rev\"] = rev\n for attachment in self._add_attachments:\n flask.g.db.put_attachment(self.doc,\n attachment[\"content\"],\n filename=attachment[\"filename\"],\n content_type=attachment[\"mimetype\"])",
"def _write_attachment(self, root, context=None):\n fecha = time.strftime('%Y_%m_%d_%H%M%S')\n name = 'IVA_' + fecha + '.' + 'txt'\n self.env['ir.attachment'].create({\n 'name': name,\n 'datas': base64.encodestring(root),\n 'datas_fname': name,\n 'res_model': 'txt.iva',\n 'res_id': self.ids[0],\n })\n msg = _(\"File TXT %s generated.\") % (name)\n self.message_post(body=msg)",
"def store_documents(self, partner, documents):\n for docs in documents:\n if docs and docs['type'] in DOCS_TYPES:\n document = DocumentDetails()\n document.partner_id = partner\n document.type = DOCS_TYPES[docs['type']]\n document.file_name = docs['file']\n document.file_data = os.path.join('documents/partner_doc', docs['file'])\n document.save()",
"def save_doc(self, doc, encode_attachments=True, force_update=False,\n **params):\n if doc is None:\n doc1 = {}\n else:\n doc1, schema = _maybe_serialize(doc)\n\n if '_attachments' in doc1 and encode_attachments:\n doc1['_attachments'] = resource.encode_attachments(doc['_attachments'])\n\n if '_id' in doc1:\n docid = doc1['_id'] if six.PY3 else doc1['_id'].encode('utf-8')\n couch_doc = Document(self.cloudant_database, docid)\n couch_doc.update(doc1)\n try:\n # Copied from Document.save to ensure that a deleted doc cannot be saved.\n headers = {}\n headers.setdefault('Content-Type', 'application/json')\n put_resp = couch_doc.r_session.put(\n couch_doc.document_url,\n data=couch_doc.json(),\n headers=headers\n )\n put_resp.raise_for_status()\n data = put_resp.json()\n super(Document, couch_doc).__setitem__('_rev', data['rev'])\n except HTTPError as e:\n if e.response.status_code != 409:\n raise\n\n if force_update:\n couch_doc['_rev'] = self.get_rev(docid)\n couch_doc.save()\n else:\n raise ResourceConflict\n res = couch_doc\n else:\n res = self.cloudant_database.create_document(doc1)\n\n if 'batch' in params and ('id' in res or '_id' in res):\n doc1.update({ '_id': res.get('_id')})\n else:\n doc1.update({'_id': res.get('_id'), '_rev': res.get('_rev')})\n\n if schema:\n for key, value in six.iteritems(doc.__class__.wrap(doc1)):\n doc[key] = value\n else:\n doc.update(doc1)\n return {\n 'id': res['_id'],\n 'rev': res['_rev'],\n 'ok': True,\n }",
"def _extract_inline_attachments(doc, files):\n for attr, f in files.items():\n if f.b64:\n data = f.file.replace('\\n', '')\n else:\n data = base64.encodestring(f.file.read()).replace('\\n','')\n f.file.close()\n del f.file\n del f.b64\n del f.inline\n del f.doc_id\n doc.setdefault('_attachments',{})[f.id] = {'content_type': f.mimetype,'data': data}",
"def attachments(self, attachments):\n\n self._attachments = attachments",
"def save_form(self, request, form, change):\n\n document = form.instance\n self.send_notification_email(document, request, \n 'email/document_modified.txt.django')\n\n document = super(DocumentAdmin, self).save_form(request, form, change)\n document.uploader = request.user\n return document",
"def upload_latest(self, project_id, document):\n url = base_url + 'portal/' + str(self.portal_id) + '/projects/' + str(project_id) + '/documents/' + str(document.get_id()) + '/' \n data = parser.to_json(document)\n if document.get_upload_doc():\n file_list = []\n for value in document.get_upload_doc():\n attachment = {\n 'uploaddoc': {\n 'filename': basename(value), \n 'content':open(value).read()\n } \n }\n file_list.append(attachment)\n else:\n file_list = []\n response = zoho_http_client.post(url, self.details, data, None, file_list)\n return parser.get_documents(response)[0]",
"def get_and_send_attachments(self, session, mid, message_payload_parts, context, m_chat_id):\r\n\r\n store_dir_1 = os.getcwd()\r\n\r\n for part in message_payload_parts:\r\n if part['filename']:\r\n attachment_id = part['body']['attachmentId']\r\n\r\n response = session.get(f'https://www.googleapis.com/gmail/v1/users/me/'\r\n f'messages/{mid}/attachments/{attachment_id}')\r\n\r\n data = response.content\r\n encoded_data_dict = ast.literal_eval(data.decode('utf-8'))\r\n file_data = base64.urlsafe_b64decode(encoded_data_dict['data'].encode('UTF-8'))\r\n\r\n path = os.path.join(store_dir_1, part['filename'])\r\n\r\n # запись данных в файловую систему, чтение, отправка и удаление\r\n with open(path, 'wb') as file_object:\r\n file_object.write(file_data)\r\n with open(path, 'rb') as f:\r\n context.bot.send_document(m_chat_id, f)\r\n os.remove(path)",
"def _handle_separate_attachments(session, deletions, additions):\n # XXX This needs to cope with files moving when sequences are re-numbered. We need\n # XXX to talk to matt about what a renumbering like this looks like\n\n for id, attrfiles in additions.items():\n doc = session.get(id)\n stubdoc = {'_id':doc['_id'], '_rev':doc['_rev']}\n for attr, f in attrfiles.items():\n data = ''\n if f.file:\n if f.b64:\n data = base64.decodestring(f.file)\n else:\n data = f.file.read()\n f.file.close()\n session._db.put_attachment(stubdoc, data, filename=f.id, content_type=f.mimetype)\n del f.file\n del f.b64\n del f.inline\n del f.doc_id\n\n for id, attrfiles in deletions.items():\n # XXX had to use _db because delete attachment freeaked using session version. \n doc = session._db.get(id)\n for attr, f in attrfiles.items():\n session._db.delete_attachment(doc, f.id)\n\n additions = {}\n deletions = {}",
"def email_document(document, to, template='django_dms/email.txt', subject=''):\n # Start a new thread to email the document\n # This avoids a frozen screen while the email is being sent (particularly if the document is big).\n t = threading.Thread(target=_email_document, args=[document, to, template, subject])\n t.setDaemon(True)\n t.start()",
"def attachments_create(self,\r\n document_id,\r\n request):\r\n\r\n # Validate required parameters\r\n self.validate_parameters(document_id=document_id,\r\n request=request)\r\n\r\n # Prepare query URL\r\n _query_builder = Configuration.get_base_uri()\r\n _query_builder += '/signature/documents/{documentId}/attachments'\r\n _query_builder = APIHelper.append_url_with_template_parameters(_query_builder, { \r\n 'documentId': document_id\r\n })\r\n _query_url = APIHelper.clean_url(_query_builder)\r\n\r\n # Prepare headers\r\n _headers = {\r\n 'accept': 'application/json',\r\n 'content-type': 'application/json; charset=utf-8'\r\n }\r\n\r\n # Prepare and execute request\r\n _request = self.http_client.post(_query_url, headers=_headers, parameters=APIHelper.json_serialize(request))\r\n OAuth2.apply(_request)\r\n _context = self.execute_request(_request)\r\n self.validate_response(_context)\r\n\r\n # Return appropriate type\r\n return APIHelper.json_deserialize(_context.response.raw_body, AttachmentResponse.from_dictionary)",
"def saveas(self, model, field, id=None, filename_field=None,file_name=None, **kw):\n Model = request.env[model]\n fields = [field]\n if filename_field:\n fields.append(filename_field)\n if id:\n id = int(id)\n res = Model.browse(id).read(fields)[0]\n else:\n res = Model.default_get(fields)\n filecontent = base64.b64decode(res.get(field) or '')\n content_type = kw.get('content_type', 'application/octet-stream')\n if not filecontent:\n return request.not_found()\n else:\n filename = '%s_%s' % (model.replace('.', '_'), id)\n if file_name:\n filename = file_name \n elif filename_field:\n filename = res.get(filename_field, '') or filename\n \n if id and kw.get(\"delete_document\",False):\n Model.sudo().browse(id).write({field:False})\n return request.make_response(filecontent,\n [('Content-Type', content_type),\n ('Content-Disposition', content_disposition(filename))])",
"def save_documents(event, transcript_data):\n documents = [\n ('transcript_url', \"transcript\"),\n ('opening_statement_chair', \"chair opening statement\"),\n ('opening_statement_rm', \"ranking member opening statement\")\n ]\n\n for (field, note) in documents:\n url = transcript_data[field]\n save_document(url, note, event)",
"def test_publish_with_add_first_file_attachment(self):\n draft = self._get_draft()\n draft.target_people.add(\n User.objects.create_user(username='testuser'))\n review_request = draft.review_request\n self.assertEqual(draft.file_attachments_count, 0)\n self.assertEqual(draft.inactive_file_attachments_count, 0)\n self.assertEqual(review_request.file_attachments_count, 0)\n self.assertEqual(review_request.inactive_file_attachments_count, 0)\n\n attachment = self.create_file_attachment(review_request,\n draft=draft,\n caption='',\n draft_caption='Test Caption')\n self.assertEqual(draft.file_attachments_count, 1)\n self.assertEqual(draft.inactive_file_attachments_count, 0)\n self.assertEqual(review_request.file_attachments_count, 0)\n self.assertEqual(review_request.inactive_file_attachments_count, 0)\n\n changes = draft.publish()\n\n attachment = FileAttachment.objects.get(pk=attachment.pk)\n self.assertEqual(attachment.caption, 'Test Caption')\n\n fields = changes.fields_changed\n\n self.assertEqual(fields['files'], {\n 'new': [\n (attachment.display_name,\n attachment.get_absolute_url(),\n attachment.pk)\n ],\n 'added': [\n (attachment.display_name,\n attachment.get_absolute_url(),\n attachment.pk)\n ],\n 'old': [],\n 'removed': [],\n })\n self.assertEqual(review_request.file_attachments_count, 1)\n self.assertEqual(review_request.inactive_file_attachments_count, 0)",
"def dealDocument(update: Update, _: CallbackContext) -> None:\n file_name = update.message.document.file_name\n file_name = uuid.uuid4().hex + \".\" + \\\n secure_filename(file_name).split(\".\")[-1]\n imagePath = os.path.join(args.input, file_name)\n update.message.document.get_file().download(custom_path=imagePath)\n add_mark(imagePath, mark, args)\n resultPath = os.path.join(args.out, file_name)\n with open(resultPath,\"rb\") as file:\n update.message.reply_document(file)",
"def add_attachment(self, name, content, type=None):\n if not type:\n type = guess_mimetype(name)\n\n # attachment[0]\n self.files['attachment[' + str(len(self.files)) + ']'] = (name, content, type)",
"def download_attachment(self, msg):\n path = None\n for part in msg.walk():\n if part.get_content_type() == 'application/pdf':\n\n time_prefix = datetime.datetime.now().strftime(\"%Y-%m-%dT%H:%M:%S\")\n filename = time_prefix+\"-\"+part.get_filename()\n path = os.path.join(self._DOWNLOAD_FOLDER, filename)\n\n if not os.path.isfile(path):\n with open(path, 'wb') as fb:\n fb.write(part.get_payload(decode=True))\n\n self._processed = True\n return path, self.get_company(msg['From'], msg['To'])",
"def test_attachments(self):\n data = mailgun_payload\n attachment_1 = open(self.test_upload_txt, 'r').read()\n attachment_2 = open(self.test_upload_png, 'rb').read()\n data['attachment-1'] = open(self.test_upload_txt, 'r')\n data['attachment-2'] = open(self.test_upload_png, 'rb')\n request = self.factory.post(self.url, data=data)\n email = self.parser.parse(request)\n\n self._assertEmailParsedCorrectly(email, data)\n\n # for each attachmen, check the contents match the input\n self.assertEqual(len(email.attachments), 2)\n\n # convert list of 3-tuples into dict so we can lookup by filename\n attachments = {k[0]: (k[1], k[2]) for k in email.attachments}\n self.assertEqual(smart_bytes(attachments['attachment-1'][0]), smart_bytes(attachment_1))\n self.assertEqual(attachments['attachment-1'][1], 'text/plain')\n self.assertEqual(attachments['attachment-2'][0], attachment_2)\n self.assertEqual(attachments['attachment-2'][1], 'image/jpeg')",
"def put_attach_document(filename: str, entry_hash: str) -> str:\n g.ledger.file.insert_metadata(entry_hash, \"document\", filename)\n return f\"Attached '{filename}' to entry.\"",
"def perform_create(self, serializer):\n\n attachment = serializer.save()\n attachment.user = self.request.user\n attachment.save()",
"def _store(self):\n self._post_item.save()\n self._attachment_item.save()\n self._marshaller.marshall(self._post_item)",
"def output_attachment(self, path, content):\n\t\twith open(path, \"w+b\") as fd:\n\t\t\tfd.write(content)",
"def attachments_update(self,\r\n document_id,\r\n attachment_id,\r\n request):\r\n\r\n # Validate required parameters\r\n self.validate_parameters(document_id=document_id,\r\n attachment_id=attachment_id,\r\n request=request)\r\n\r\n # Prepare query URL\r\n _query_builder = Configuration.get_base_uri()\r\n _query_builder += '/signature/documents/{documentId}/attachments/{attachmentId}'\r\n _query_builder = APIHelper.append_url_with_template_parameters(_query_builder, { \r\n 'documentId': document_id,\r\n 'attachmentId': attachment_id\r\n })\r\n _query_url = APIHelper.clean_url(_query_builder)\r\n\r\n # Prepare headers\r\n _headers = {\r\n 'accept': 'application/json',\r\n 'content-type': 'application/json; charset=utf-8'\r\n }\r\n\r\n # Prepare and execute request\r\n _request = self.http_client.patch(_query_url, headers=_headers, parameters=APIHelper.json_serialize(request))\r\n OAuth2.apply(_request)\r\n _context = self.execute_request(_request)\r\n self.validate_response(_context)\r\n\r\n # Return appropriate type\r\n return APIHelper.json_deserialize(_context.response.raw_body, AttachmentResponse.from_dictionary)",
"def attach(self, filename, file_contents, **extra):\n\t\tself._client.add_attachment(self, filename, file_contents, **extra)",
"def test_publish_with_delete_file_attachment(self):\n user = User.objects.create_user(username='testuser')\n review_request = self.create_review_request(target_people=[user])\n attachment = self.create_file_attachment(review_request,\n caption='File 1')\n review_request.publish(review_request.submitter)\n\n draft = ReviewRequestDraft.create(review_request)\n self.assertEqual(draft.file_attachments_count, 1)\n self.assertEqual(draft.inactive_file_attachments_count, 0)\n self.assertEqual(review_request.file_attachments_count, 1)\n self.assertEqual(review_request.inactive_file_attachments_count, 0)\n\n draft.file_attachments.remove(attachment)\n draft.inactive_file_attachments.add(attachment)\n\n self.assertEqual(draft.file_attachments_count, 0)\n self.assertEqual(draft.inactive_file_attachments_count, 1)\n self.assertEqual(review_request.file_attachments_count, 1)\n self.assertEqual(review_request.inactive_file_attachments_count, 0)\n\n changes = draft.publish()\n fields = changes.fields_changed\n\n self.assertEqual(fields['files'], {\n 'new': [],\n 'added': [],\n 'old': [\n (attachment.display_name,\n attachment.get_absolute_url(),\n attachment.pk),\n ],\n 'removed': [\n (attachment.display_name,\n attachment.get_absolute_url(),\n attachment.pk),\n ],\n })\n self.assertEqual(review_request.file_attachments_count, 0)\n self.assertEqual(review_request.inactive_file_attachments_count, 1)",
"def put(self):\n if 'file' not in self.request.POST:\n self.request.errors.add('body', 'file', 'Not Found')\n self.request.errors.status = 404\n return\n tender = TenderDocument.load(self.db, self.tender_id)\n if not tender:\n self.request.errors.add('url', 'tender_id', 'Not Found')\n self.request.errors.status = 404\n return\n data = self.request.POST['file']\n bids = [i for i in tender.bids if i.id == self.bid_id]\n if not bids:\n self.request.errors.add('url', 'bid_id', 'Not Found')\n self.request.errors.status = 404\n return\n bid = bids[0]\n documents = [i for i in bid.documents if i.id == self.request.matchdict['id']]\n if not documents:\n self.request.errors.add('url', 'id', 'Not Found')\n self.request.errors.status = 404\n return\n src = tender.serialize(\"plain\")\n document = Document()\n document.id = self.request.matchdict['id']\n document.title = data.filename\n document.format = data.type\n document.datePublished = documents[0].datePublished\n key = uuid4().hex\n document.url = self.request.route_url('Tender Bid Documents', tender_id=self.tender_id, bid_id=self.bid_id, id=document.id, _query={'download': key})\n bid.documents.append(document)\n filename = \"{}_{}\".format(document.id, key)\n tender['_attachments'][filename] = {\n \"content_type\": data.type,\n \"data\": b64encode(data.file.read())\n }\n patch = make_patch(tender.serialize(\"plain\"), src).patch\n tender.revisions.append(revision({'changes': patch}))\n try:\n tender.store(self.db)\n except Exception, e:\n return self.request.errors.add('body', 'data', str(e))\n return {'data': document.serialize(\"view\")}"
] | [
"0.62436914",
"0.59777683",
"0.59250724",
"0.57655126",
"0.5627985",
"0.5609621",
"0.5494929",
"0.54716134",
"0.54674774",
"0.53962153",
"0.53697306",
"0.5364998",
"0.53576803",
"0.531556",
"0.52292156",
"0.5151667",
"0.5141445",
"0.51180977",
"0.5107516",
"0.50661355",
"0.5015498",
"0.5013278",
"0.5004967",
"0.4973413",
"0.49719343",
"0.49474177",
"0.494058",
"0.4932681",
"0.49282208",
"0.49194726"
] | 0.73739046 | 0 |
Extract thread ID from `[]` | def get_thread_id(self):
l = re.findall('(?<=\[)[\w/-]+', self.subject)
return l and l[0] or None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_thread_id() -> str:\n with open('post_id.txt', 'r') as files:\n thread_id = files.read()\n\n return thread_id",
"def thread_id(self):\n return self._thread_id",
"def get_id(self):\n for id, thread in threading._active.items(): \n if thread is self: \n return id",
"def get_id(self):\n if hasattr(self, \"_thread_id\"):\n return self._thread_id\n for id, thread in threading._active.items():\n if thread is self:\n return id",
"def get_id(self):\n if hasattr(self, \"_thread_id\"):\n return self._thread_id\n for id, thread in threading._active.items():\n if thread is self:\n return id",
"def _get_thread_id() -> int:\n # NOTICE:\n # we do not use threading.get_ident() to identify a thread, as Python recycles these identifiers\n return id(threading.current_thread())",
"def read_thread(thread_num):\n pass# TODO",
"def thread_id_filter(record):\n record.thread_id = threading.get_native_id()\n return record",
"def get_name(thread_id):\r\n for thread in threading.enumerate():\r\n if thread.ident == thread_id:\r\n return thread.name",
"def get_thread_for_message(id):\n query = 'SELECT thread_id from messages WHERE id like %s'\n return __perform__(query, (id,), method='fetchone')",
"def get_build_id(build_line):\n match = re.search(r'\\[(\\d+)\\]', build_line)\n if match:\n return match.group(1)",
"def zend_thread_id():\n raise NotImplementedError()",
"def process_id_from(self):\r\n return self._tokens[1]",
"def _get_job_id(self) -> str:\n return self.split_name[2][3:]",
"def _extract_thread_stack_trace(\n self, thread: str, lines: List[str]\n ) -> Optional[List[str]]:\n thread_str = f\"Thread {thread} \"\n i: int = 0\n while i < len(lines) and thread_str not in lines[i]:\n i += 1\n if i != len(lines) and thread_str in lines[i]:\n j: int = i\n while j < len(lines) and lines[j] != \"\\n\":\n j += 1\n start = i - 1\n end = j\n return lines[start:end]\n return None",
"def src_task_id(self):\n return struct.unpack('<H', self.pkt.payload[4:6])[0]",
"def getSpimThreadID(self):\n\n\t\ttry:\n\t\t\tapp = api.getDesktopObject()\n\n\t\t\t# Eliminate items with a \"None\" name - these cause the list comprehension to fail.\n\t\t\tapp = filter(lambda x: x.name != None, app.children)\n\t\t\tapp = filter(lambda x: x.name[0:6] == \"PCSpim\", app)[0] # Drill down to the app itself\n\n\t\t\treturn app.windowThreadID\n\t\texcept:\n\t\t\treturn -1 # failure",
"def tag_ids(self, convthread_id=None):\n if None == convthread_id:\n return [tag[0] for tag in self.dfs[\"tags\"][[\"tag_id\"]].values]\n else :\n df = self.dfs[\"convthreads_with_tags\"]\n tag_records = df[df.convthread_id == convthread_id]\n return tag_records[\"tag_id\"].values",
"def log_threadid(msg):\n thread_id = threading.currentThread().ident\n logger.debug(\"Msg: %s, ThreadID: %s\", msg, thread_id)",
"def convthread(self, convthread_id):\n\n df = self.dfs[\"convthreads\"]\n tag_records = df[df.id == convthread_id]\n if 1 == len(tag_records): \n return tag_records.values[0]\n elif 1 < len(tag_records): \n raise Exception(\"More than one record exist by convthread_id\")\n else :\n import warnings\n warnings.warn(\"No record matched with convthread_id\", Warning)\n return None",
"def _get_my_tid(self):\n\t\tif not self.isAlive():\n\t\t\traise threading.ThreadError(\"the thread is not active\")\n\t\t# do we have it cached?\n\t\tif hasattr(self, \"_thread_id\"):\n\t\t\treturn self._thread_id\n\t\t# no, look for it in the _active dict\n\t\tfor tid, tobj in threading._active.items():\n\t\t\tif tobj is self:\n\t\t\t\tself._thread_id = tid\n\t\t\t\treturn tid\n\t\traise AssertionError(\"could not determine the thread's id\")",
"def get_thread(self):\n return self.threads[self.thread_id]",
"def extract_current_thread(maybe_thread_str: str) -> Optional[str]:\n match = CURRENT_THREAD_RE.search(maybe_thread_str)\n if match is not None:\n return match.group(1)\n return None",
"def get_jobs_id(self, ti) -> None:\n return self.get_hook().get_jobs_id(ti)",
"def get_threadbased_id(guarantee_uniq=False):\n\n return '{}:{}:{}:{}'.format(platform.node(), os.getpid(), str(threading.get_ident()),uuid.uuid4().hex if guarantee_uniq else '')",
"def extract_term_id( text ):\n if ('[' in text) and (']' in text):\n term_id = text.split('[')[1].split(']')[0]\n elif re.match(INT_IN_STRING, text):\n term_id = text\n else:\n term_id = text\n return term_id",
"def get_task_id(tag):\n cmd = [\"rally\", \"task\", \"list\", \"--tag\", tag, \"--uuids-only\"]\n output = subprocess.check_output(cmd).decode(\"utf-8\").rstrip()\n LOGGER.info(\"%s: %s\", \" \".join(cmd), output)\n return output",
"def _get_my_tid(self):\n if not self.isAlive():\n raise threading.ThreadError(\"the thread is not active\")\n\n # do we have it cached?\n if hasattr(self, \"_thread_id\"):\n return self._thread_id\n\n # no, look for it in the _active dict\n for tid, tobj in threading._active.items():\n if tobj is self:\n self._thread_id = tid\n return tid\n\n raise AssertionError(\"could not determine the thread's id\")",
"def _get_my_tid(self):\n if not self.is_alive():\n raise threading.ThreadError(\"the thread is not active\")\n\n # do we have it cached?\n if hasattr(self, \"_thread_id\"):\n return self._thread_id\n\n # no, look for it in the _active dict\n for tid, tobj in threading._active.items():\n if tobj is self:\n self._thread_id = tid\n return tid\n\n raise AssertionError(\"could not determine the thread's id\")",
"def getjobid(self, job_dir):\n input = os.path.join(job_dir,'job_id.txt')\n result = None\n if os.path.exists(input):\n contents = file(input).read()\n for c in contents.split('\\n'):\n if c and re.match('^Job <\\\\d*> is submitted to queue <.*>',c) is not None:\n try:\n result = c.split('<')[1].split('>')[0]\n except Exception as e:\n print('Job ID parsing error',str(e),c, file=sys.stderr)\n return result"
] | [
"0.6504339",
"0.63808286",
"0.6294525",
"0.6185133",
"0.6185133",
"0.6119811",
"0.59977484",
"0.5948242",
"0.59146464",
"0.5836307",
"0.57658243",
"0.5746452",
"0.5704008",
"0.56813455",
"0.5670495",
"0.56559366",
"0.56556565",
"0.56350636",
"0.5596078",
"0.5571945",
"0.55274785",
"0.55266434",
"0.55083597",
"0.5505245",
"0.54852724",
"0.5484048",
"0.54700476",
"0.5438917",
"0.54273343",
"0.54080856"
] | 0.7584884 | 0 |
Fetches all expenses for the house | def expenses(self):
return Expenses.objects.filter(
house=self.house,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_expenses(self, parameter=None):\n resp = zoho_http_client.get(base_url, self.details, self.headers, parameter)\n return parser.get_list(resp)",
"def get_all(self):\n total_expense_reports = []\n get_count = {\n 'query': {\n 'object': 'EEXPENSES',\n 'select': {\n 'field': 'RECORDNO'\n },\n 'pagesize': '1'\n }\n }\n\n response = self.format_and_send_request(get_count)\n count = int(response['data']['@totalcount'])\n pagesize = 2000\n offset = 0\n for i in range(0, count, pagesize):\n data = {\n 'query': {\n 'object': 'EEXPENSES',\n 'select': {\n 'field': [\n 'RECORDNO',\n 'RECORDID',\n 'WHENCREATED',\n 'WHENPOSTED',\n 'TOTALENTERED',\n 'STATE',\n 'TOTALDUE',\n 'DESCRIPTION',\n 'CURRENCY',\n 'BASECURR',\n 'MEMO'\n ]\n },\n 'pagesize': pagesize,\n 'offset': offset\n }\n }\n expense_reports = self.format_and_send_request(data)['data']['EEXPENSES']\n total_expense_reports = total_expense_reports + expense_reports\n offset = offset + pagesize\n return total_expense_reports",
"def get_list(self, resp):\n expenses = resp['expenses']\n expense_list = ExpenseList()\n for value in expenses:\n expense = Expense()\n expense.set_expense_id(value['expense_id'])\n expense.set_date(value['date'])\n expense.set_account_name(value['account_name'])\n expense.set_paid_through_account_name(value[\\\n 'paid_through_account_name'])\n expense.set_description(value['description'])\n expense.set_currency_id(value['currency_id'])\n expense.set_currency_code(value['currency_code'])\n expense.set_bcy_total(value['bcy_total'])\n expense.set_total(value['total'])\n expense.set_is_billable(value['is_billable'])\n expense.set_reference_number(value['reference_number'])\n expense.set_customer_id(value['customer_id'])\n expense.set_customer_name(value['customer_name'])\n expense.set_vendor_id(value['vendor_id'])\n expense.set_vendor_name(value['vendor_name'])\n expense.set_status(value['status'])\n expense.set_created_time(value['created_time'])\n expense.set_expense_receipt_name(value['expense_receipt_name'])\n expense_list.set_expenses(expense)\n page_context_obj = PageContext()\n page_context = resp['page_context']\n page_context_obj.set_page(page_context['page'])\n page_context_obj.set_per_page(page_context['per_page'])\n page_context_obj.set_has_more_page(page_context['has_more_page'])\n page_context_obj.set_report_name(page_context['report_name'])\n page_context_obj.set_applied_filter(page_context['applied_filter'])\n page_context_obj.set_sort_column(page_context['sort_column'])\n page_context_obj.set_sort_order(page_context['sort_order'])\n expense_list.set_page_context(page_context)\n\n return expense_list",
"def test_api_can_get_all_expenses(self):\n self.register_user()\n result = self.login_user()\n access_token = json.loads(result.data.decode())['access_token']\n res = self.client().post('/expenses/', headers=dict(Authorization=\"Bearer \" + access_token), data=self.expense)\n self.assertEqual(res.status_code, 201)\n res = self.client().get('/expenses/', headers=dict(Authorization=\"Bearer \" + access_token))\n self.assertEqual(res.status_code, 200)\n results = json.loads(res.data)\n self.assertEqual(results['items'][0]['name'], self.expense['name'])",
"def list(self, request):\n currentYear = datetime.now().year\n expenses = Expenses.objects.filter(\n date_purchased__contains=currentYear)\n serializer = ExpenseSerializer(\n expenses, many=True, context={'request': request})\n return Response(serializer.data)",
"def return_expenses():\r\n g.db.execute(\"SELECT * FROM monthly_data ORDER BY Sr\")\r\n rows = g.db.fetchall()\r\n data = []\r\n for x in rows:\r\n data.append({'sr':x[0],'name':x[1], 'id':x[2], 'item':x[3], 'price':x[5], 'date':x[4]})\r\n return jsonify(data)",
"def get_expense(self, resp):\n expense = resp['expense']\n expense_obj = Expense()\n expense_obj.set_expense_id(expense['expense_id'])\n expense_obj.set_expense_item_id(expense['expense_item_id'])\n expense_obj.set_account_id(expense['account_id'])\n expense_obj.set_account_name(expense['account_name'])\n expense_obj.set_paid_through_account_id(expense[\\\n 'paid_through_account_id'])\n expense_obj.set_paid_through_account_name(expense[\\\n 'paid_through_account_name'])\n expense_obj.set_vendor_id(expense['vendor_id'])\n expense_obj.set_vendor_name(expense['vendor_name'])\n expense_obj.set_date(expense['date'])\n expense_obj.set_tax_id(expense['tax_id'])\n expense_obj.set_tax_name(expense['tax_name'])\n expense_obj.set_tax_percentage(expense['tax_percentage'])\n expense_obj.set_currency_id(expense['currency_id'])\n expense_obj.set_currency_code(expense['currency_code'])\n expense_obj.set_exchange_rate(expense['exchange_rate'])\n expense_obj.set_tax_amount(expense['tax_amount'])\n expense_obj.set_sub_total(expense['sub_total'])\n expense_obj.set_total(expense['total'])\n expense_obj.set_bcy_total(expense['bcy_total'])\n expense_obj.set_amount(expense['amount'])\n expense_obj.set_is_inclusive_tax(expense['is_inclusive_tax'])\n expense_obj.set_reference_number(expense['reference_number'])\n expense_obj.set_description(expense['description'])\n expense_obj.set_is_billable(expense['is_billable'])\n expense_obj.set_customer_id(expense['customer_id'])\n expense_obj.set_customer_name(expense['customer_name'])\n expense_obj.set_expense_receipt_name(expense['expense_receipt_name'])\n expense_obj.set_created_time(expense['created_time'])\n expense_obj.set_last_modified_time(expense['last_modified_time'])\n expense_obj.set_status(expense['status'])\n expense_obj.set_invoice_id(expense['invoice_id'])\n expense_obj.set_invoice_number(expense['invoice_number'])\n expense_obj.set_project_id(expense['project_id'])\n expense_obj.set_project_name(expense['project_name'])\n return expense_obj",
"def set_list_of_expenses(self):\n fix_exp = DB.get_fixed_expenses(self.customer.email)\n var_exp = DB.get_variable_expenses(self.customer.email)\n self.listOfExpensesSEK.item(2).setText(str(fix_exp[\"subscription\"]))\n self.listOfExpensesSEK.item(3).setText(str(fix_exp[\"insurance\"]))\n self.listOfExpensesSEK.item(4).setText(str(fix_exp[\"rent\"]))\n self.listOfExpensesSEK.item(5).setText(str(fix_exp[\"others\"]))\n\n self.listOfExpensesSEK.item(11).setText(str(var_exp[\"food\"]))\n self.listOfExpensesSEK.item(12).setText(str(var_exp[\"bills\"]))\n self.listOfExpensesSEK.item(13).setText(str(var_exp[\"transportation\"]))\n self.listOfExpensesSEK.item(14).setText(str(var_exp[\"hygien\"]))\n self.listOfExpensesSEK.item(15).setText(str(var_exp[\"clothes\"]))\n self.listOfExpensesSEK.item(16).setText(str(var_exp[\"entertainment\"]))\n self.listOfExpensesSEK.item(17).setText(str(var_exp[\"others\"]))",
"def set_list_of_expenses(self):\n fix_exp = DB.get_fixed_expenses(self.customer.email)\n var_exp = DB.get_variable_expenses(self.customer.email)\n self.listOfExpensesSEK.item(2).setText(str(fix_exp[\"subscription\"]))\n self.listOfExpensesSEK.item(3).setText(str(fix_exp[\"insurance\"]))\n self.listOfExpensesSEK.item(4).setText(str(fix_exp[\"rent\"]))\n self.listOfExpensesSEK.item(5).setText(str(fix_exp[\"others\"]))\n\n self.listOfExpensesSEK.item(11).setText(str(var_exp[\"food\"]))\n self.listOfExpensesSEK.item(12).setText(str(var_exp[\"bills\"]))\n self.listOfExpensesSEK.item(13).setText(str(var_exp[\"transportation\"]))\n self.listOfExpensesSEK.item(14).setText(str(var_exp[\"hygien\"]))\n self.listOfExpensesSEK.item(15).setText(str(var_exp[\"clothes\"]))\n self.listOfExpensesSEK.item(16).setText(str(var_exp[\"entertainment\"]))\n self.listOfExpensesSEK.item(17).setText(str(var_exp[\"others\"]))",
"def get(self, updated_at=None, settled_at=None, reimbursed_at=None, approved_at=None, state=None, offset=None,\n verified=None, limit=None, fund_source=None, settlement_id=None):\n return self._get_request({\n 'updated_at': updated_at,\n 'offset': offset,\n 'limit': limit,\n 'settled_at': settled_at,\n 'reimbursed_at': reimbursed_at,\n 'approved_at': approved_at,\n 'state': state,\n 'verified': verified,\n 'fund_source': fund_source,\n 'settlement_id': settlement_id\n }, Expenses.GET_EXPENSES)",
"def get(self, expense_id):\n url = base_url + expense_id\n resp = zoho_http_client.get(url, self.details, self.headers)\n return parser.get_expense(resp)",
"def get_all(self, settlement_id=None, updated_at=None, settled_at=None, reimbursed_at=None, approved_at=None,\n state=None, verified=None, fund_source=None):\n expenses = []\n\n if settlement_id and len(settlement_id) > 40:\n pages = range(0, len(settlement_id), 40)\n chunks = []\n\n for i in range(0, len(pages)-1):\n chunks.append(settlement_id[pages[i]:pages[i+1]])\n chunks.append(settlement_id[pages[len(pages)-1]:])\n\n for chunk in chunks:\n count = self.count(settlement_id=chunk, updated_at=updated_at, settled_at=settled_at,\n reimbursed_at=reimbursed_at, approved_at=approved_at, state=state,\n verified=verified, fund_source=fund_source)['count']\n page_size = 300\n for i in range(0, count, page_size):\n segment = self.get(\n offset=i, limit=page_size, settlement_id=chunk, updated_at=updated_at,\n settled_at=settled_at,\n reimbursed_at=reimbursed_at, approved_at=approved_at, state=state,\n verified=verified, fund_source=fund_source\n )\n expenses = expenses + segment['data']\n return expenses\n\n count = self.count(settlement_id=settlement_id, updated_at=updated_at, settled_at=settled_at,\n reimbursed_at=reimbursed_at, approved_at=approved_at, state=state,\n verified=verified, fund_source=fund_source)['count']\n page_size = 300\n for i in range(0, count, page_size):\n segment = self.get(\n offset=i, limit=page_size, settlement_id=settlement_id, updated_at=updated_at, settled_at=settled_at,\n reimbursed_at=reimbursed_at, approved_at=approved_at, state=state,\n verified=verified, fund_source=fund_source\n )\n expenses = expenses + segment['data']\n return expenses",
"def test_get_all_upcoming_expenses(self):\n print()\n print(\"Get all expenses will still occur\")\n user = CustomUser.objects.get(username = \"Test User\")\n actual_result = get_all_upcoming_budget_expenses(user = user)\n for ele in actual_result:\n print(ele)\n expected_result = [ BudgetExpense.objects.get(id=100),\n BudgetExpense.objects.get(id=150), \n BudgetExpense.objects.get(id=200), \n BudgetExpense.objects.get(id=600), \n BudgetExpense.objects.get(id=700),\n BudgetExpense.objects.get(id=500),\n BudgetExpense.objects.get(id=800)]\n print(\"====================\")\n print()\n self.assertEquals(expected_result, list(actual_result))",
"def get_expenses(budget):\n return sum(expense['bgt'] for expense in budget['spend'])",
"def get_queryset(self):\n user = self.request.user\n expenses = Expense.objects.filter(\n Q(userexpense__in=user.userexpense_set.all())\n | Q(group__in=user.group_set.all()))\n\n if self.request.query_params.get('q', None) is not None:\n expenses = expenses.filter(\n description__icontains=self.request.query_params.get(\n 'q', None))\n return expenses",
"def get(self):\n resultado = EmployeeModel.query.all()\n return resultado",
"def get_offers(street_id: int, house_number: str) -> List[Dict[str, Any]]:\n\n url = 'https://api.n1.ru/api/v1/offers/'\n params = _offers_params.copy()\n params['filter_or[addresses][0][street_id]'] = street_id\n params['filter_or[addresses][0][house_number]'] = house_number\n offset, count, offers = 0, 1, []\n\n while offset < count: # while do\n try:\n r = requests.get(url, params=params, headers=_headers)\n response = r.json()\n count = response['metadata']['resultset']['count']\n except requests.RequestException as e:\n raise ParserException(\n f'Fail make request. street_id: {street_id}, house_number: {house_number}'\n ) from e\n except KeyError as e:\n raise ParserException('It was not possible to get the number of offers') from e\n \n offers.extend(response.get('result', []))\n offset += 25\n time.sleep(0.5)\n \n return offers",
"def refresh_offenses(record):\n for jurisdiction, header in constants.OFFENSE_HEADERS:\n offenses = record.data.get(header, {})\n\n # delete existing offenses in this jurisdiction\n record.offenses.filter(jurisdiction=jurisdiction).delete()\n for data_offense in offenses:\n offense = record.offenses.create(\n jurisdiction=jurisdiction,\n disposed_on=data_offense.get(\"Disposed On\", None),\n disposition_method=data_offense.get(\"Disposition Method\", \"\"),\n plea=data_offense.get(\"Plea\", \"\"),\n verdict=data_offense.get(\"Verdict\", \"\"),\n )\n for data_offense_record in data_offense.get(\"Records\", []):\n offense.offense_records.create(\n count=data_offense_record.get(\"Count\"),\n law=data_offense_record.get(\"Law\", \"\"),\n action=data_offense_record.get(\"Action\", \"\"),\n severity=data_offense_record.get(\"Severity\", \"\"),\n description=data_offense_record.get(\"Description\", \"\"),\n )",
"def test_get_expenses_amount(self) -> None:\n category = Category.objects.first()\n result = get_expenses_amount(category)\n self.assertEqual(\n result, Expense.objects.filter(category=category).count()\n )",
"def get_by_id(self, expense_id):\n return self._get_request({}, Expenses.GET_EXPENSE_BY_ID.format(expense_id))",
"def living_expenses(self):\n # Prepare arguments for call to `living_expenses_strategy`\n # NOTE: This is a pretty brittle way to determine the\n # retirement year. Issues #15 and #28 will require this\n # code to be changed in a future version.\n retirement_year = min(\n person.retirement_date.year for person in self.people)\n return self.living_expenses_strategy(\n year=self.this_year,\n people=self.people,\n retirement_year=retirement_year)",
"def form_expensive_list_goods(self): \n\n self.database.truncate_all_tables()\n\n self.database.add(GoodInfo(\"рыба мороженая, Кета 1кг\", \n \"400\", \"5\", \"2020-12-30\", \"90\", \"2020-12-30\"))\n \n most_expensive_test_list = self.database.get_all_goods()\n\n\n return most_expensive_test_list",
"def test_get_explores_filters(fc: fetcher.Fetcher):\n explores = fc.get_explores(model=\"henry_dusty\")\n assert all(e.model_name == \"henry_dusty\" for e in explores)\n\n explores = fc.get_explores(model=\"henry_qa\", explore=\"explore_2_joins_all_used\")\n assert all(\n e.model_name == \"henry_qa\" and e.name == \"explore_2_joins_all_used\"\n for e in explores\n )",
"def get_annual_energy_demand(cfg):\n houses_dict = cfg['houses']\n houses_list = sorted(houses_dict.keys())\n\n # Calculate annual energy demand of houses\n # and store the result in the dict containing the house info\n for house_name in houses_list:\n house_type = houses_dict[house_name]['house_type']\n N_Pers = houses_dict[house_name].get('N_Pers', None)\n N_WE = houses_dict[house_name].get('N_WE', None)\n\n # Assign defaults if values are not defined\n if house_type == 'EFH' and pd.isna(N_Pers):\n N_Pers = 3\n houses_dict[house_name]['N_Pers'] = N_Pers\n logger.warning('N_Pers not defined for ' + str(house_name)\n + '. Using default ' + str(N_Pers))\n if house_type == 'MFH' and pd.isna(N_WE):\n N_WE = 2\n houses_dict[house_name]['N_WE'] = N_WE\n logger.warning('N_WE not defined for ' + str(house_name)\n + '. Using default ' + str(N_WE))\n\n # Implement the restrictions defined on page 3:\n if house_type == 'EFH' and N_Pers > 12:\n logger.warning('VDI 4655 is only defined for N_Pers <= 12. '\n + str(house_name) + ' uses N_Pers = ' + str(N_Pers)\n + '. Proceeding with your input...')\n if house_type == 'MFH' and N_WE > 40:\n logger.warning('VDI 4655 is only defined for N_WE <= 40. '\n + str(house_name) + ' uses N_WE = ' + str(N_WE)\n + '. Proceeding with your input...')\n\n # Calculate annual energy demand estimates\n if house_type == 'EFH':\n # (6.2.2) Calculate annual electrical energy demand of houses:\n if N_Pers < 3:\n W_a = N_Pers * 2000 # kWh\n elif N_Pers <= 6:\n W_a = N_Pers * 1750 # kWh\n else:\n W_a = N_Pers * 1500 # kWh\n\n # (6.2.3) Calculate annual DHW energy demand of houses:\n Q_TWW_a = N_Pers * 500 # kWh\n\n elif house_type == 'MFH':\n # (6.2.2) Calculate annual electrical energy demand of houses:\n W_a = N_WE * 3000 # kWh\n\n # (6.2.3) Calculate annual DHW energy demand of houses:\n Q_TWW_a = N_WE * 1000 # kWh\n\n else:\n # No house category given. Just use annual demand of 1 kWh\n W_a = 1\n Q_TWW_a = 1\n\n # If W_a and/or Q_TWW_a were already defined by the user in the yaml\n # file, we use those values instead of the calculated ones:\n W_a = houses_dict[house_name].get('W_a', W_a)\n Q_TWW_a = houses_dict[house_name].get('Q_TWW_a', Q_TWW_a)\n\n # Store the results in the dict\n houses_dict[house_name]['W_a'] = W_a\n houses_dict[house_name]['Q_TWW_a'] = Q_TWW_a\n\n # Assign defaults if values are not defined\n if houses_dict[house_name].get('Q_Heiz_a', None) is None:\n Q_Heiz_a = 1 # kWh\n houses_dict[house_name]['Q_Heiz_a'] = Q_Heiz_a\n logger.warning('Q_Heiz_a not defined for ' + house_name\n + '. Using default ' + str(Q_Heiz_a) + ' kWh')\n\n # Apply the adjustment factors\n houses_dict[house_name]['Q_Heiz_a'] *= \\\n cfg.get('adjustment_factors', dict()).get('f_Q_Heiz', 1)\n\n houses_dict[house_name]['W_a'] *= \\\n cfg.get('adjustment_factors', dict()).get('f_W', 1)\n\n houses_dict[house_name]['Q_TWW_a'] *= \\\n cfg.get('adjustment_factors', dict()).get('f_Q_TWW', 1)\n\n return houses_dict",
"def iter_all(self):\n return self.opportunities.find()",
"def get_daily_energy_demand_houses(houses_dict, cfg):\n settings = cfg['settings']\n typtage_combinations = settings['typtage_combinations']\n houses_list = settings['houses_list_VDI']\n\n # Load the file containing the energy factors of the different typical\n # radiation year (TRY) regions, house types and 'typtage'. In VDI 4655,\n # these are the tables 10 to 24.\n # For the 'noarch' conda build, access the file as pkg resource object\n with pkg_resources.resource_stream('lpagg', cfg['data']['energy_factors']\n ) as resource:\n energy_factors_df = pd.read_excel(resource,\n sheet_name='Faktoren',\n index_col=[0, 1, 2])\n\n if settings.get('zero_summer_heat_demand', None) is not None:\n # Reduze the value of 'F_Heiz_TT' to zero.\n # For modern houses, this eliminates the heat demand in summer\n energy_factors_df.loc[(slice(None), slice(None), 'F_Heiz_TT'),\n ('SWX', 'SSX')] = 0\n\n # Create a new DataFrame with multiindex.\n # It has two levels of columns: houses and energy\n # The DataFrame stores the individual energy demands for each house in\n # each time step\n energy_demands_types = ['Q_Heiz_TT', 'W_TT', 'Q_TWW_TT']\n settings['energy_demands_types'] = energy_demands_types\n iterables = [houses_dict.keys(), energy_demands_types]\n multiindex = pd.MultiIndex.from_product(iterables, names=['house',\n 'energy'])\n daily_energy_demand_houses = pd.DataFrame(index=multiindex,\n columns=typtage_combinations)\n\n # Fill the DataFrame daily_energy_demand_houses\n for house_name in houses_list:\n house_type = houses_dict[house_name]['house_type']\n N_Pers = houses_dict[house_name]['N_Pers']\n N_WE = houses_dict[house_name]['N_WE']\n try:\n TRY = houses_dict[house_name]['TRY']\n except KeyError:\n raise KeyError('Key \"TRY\" (Region) missing from house '+house_name)\n\n # Savety check:\n if TRY not in energy_factors_df.index.get_level_values(0):\n logger.error('Error! TRY '+str(TRY)+' not contained in file ' +\n cfg['data']['energy_factors'])\n logger.error(' Skipping house \"'+house_name+'\"!')\n continue # 'Continue' skips the rest of the current for-loop\n\n # Get yearly energy demands\n Q_Heiz_a = houses_dict[house_name]['Q_Heiz_a']\n W_a = houses_dict[house_name]['W_a']\n Q_TWW_a = houses_dict[house_name]['Q_TWW_a']\n\n # (6.4) Do calculations according to VDI 4655 for each 'typtag'\n for typtag in typtage_combinations:\n F_Heiz_TT = energy_factors_df.loc[TRY, house_type,\n 'F_Heiz_TT'][typtag]\n F_el_TT = energy_factors_df.loc[TRY, house_type, 'F_el_TT'][typtag]\n F_TWW_TT = energy_factors_df.loc[TRY, house_type,\n 'F_TWW_TT'][typtag]\n\n Q_Heiz_TT = Q_Heiz_a * F_Heiz_TT\n\n if house_type == 'EFH':\n N_Pers_WE = N_Pers\n elif house_type == 'MFH':\n N_Pers_WE = N_WE\n\n W_TT = W_a * (1.0/365.0 + N_Pers_WE * F_el_TT)\n Q_TWW_TT = Q_TWW_a * (1.0/365.0 + N_Pers_WE * F_TWW_TT)\n\n if W_TT < 0:\n logger.warning('Warning: W_TT for '+house_name+' and ' +\n typtag + ' was negative, see VDI 4655 page 16')\n W_TT = W_a * (1.0/365.0 + N_Pers_WE * 0)\n\n if Q_TWW_TT < 0:\n logger.warning('Warning: Q_TWW_TT for '+house_name+' and ' +\n typtag + ' was negative, see VDI 4655 page 16')\n Q_TWW_TT = Q_TWW_a * (1.0/365.0 + N_Pers_WE * 0)\n\n # Write values into DataFrame\n daily_energy_demand_houses.loc[house_name,\n 'Q_Heiz_TT'][typtag] = Q_Heiz_TT\n daily_energy_demand_houses.loc[house_name,\n 'W_TT'][typtag] = W_TT\n daily_energy_demand_houses.loc[house_name,\n 'Q_TWW_TT'][typtag] = Q_TWW_TT\n\n# print(daily_energy_demand_houses)\n return daily_energy_demand_houses",
"def test_get_explores(fc: fetcher.Fetcher):\n explores = fc.get_explores()\n assert isinstance(explores, list)\n assert len(explores) > 0\n assert isinstance(explores[0], models.LookmlModelExplore)",
"def employees_earning(table):\n\n product_index = 1\n employee_id_index = 2\n amount_sold_index = 4\n\n person_id_index = 0\n person_name_index = 1\n\n game_index = 0\n price_index = 3\n\n store_table = store.get_table()\n store.check_table(store_table)\n hr_table = hr.get_table('model/hr/persons.csv')\n money_earned = {}\n for person in hr_table:\n person_id = person[person_id_index]\n person_name = person[person_name_index]\n money_earned[person_name] = 0\n for record in table:\n product_id = record[product_index]\n employee_id = record[employee_id_index]\n amount_sold = int(record[amount_sold_index])\n if person_id == employee_id:\n for game in store_table:\n game_id = game[game_index]\n if game_id == product_id:\n game_price = int(game[price_index])\n money_earned[person_name] += int(amount_sold * game_price)\n return money_earned",
"def get_expenses_for_rows(df, stor_exp_data_path, stor_data_path, budg_path, bankconfig):\n print(\"\\nIterating your transactions. If you want to quit halfway, type ctrl c to save!\\n\")\n\n # initialize the objects for tracking changes\n exp_stor_db = data_help.read_jsonFile(stor_exp_data_path)\n stor_db = data_help.read_jsonFile(stor_data_path)\n budg_db = data_help.read_jsonFile(budg_path)\n try:\n for idx, row in df.iterrows():\n # iterate through only the data which has no expenses declared.\n if pd.isnull(row[env.EXPENSE]):\n # get relevant expenses for that month set by the user.\n month_end_date = util.get_month_from_timestamp(\n row[env.DATE], start=False)\n if type(row[env.BANK_STORENAME]) is str:\n match = bankconfig.regex_str.search(row[env.BANK_STORENAME])\n\n if match:\n\n processed_text = util.process_text(match.group(0))\n print(\n f\"Was able to filter - {row[env.BANK_STORENAME]} -> {processed_text}\")\n storename = processed_text\n\n else:\n print(f\"Unable to filter - {row[env.BANK_STORENAME]}\")\n storename = row[env.BANK_STORENAME]\n \n else: # default case use empty str\n print(\"No storename exists for this transaction.\")\n storename = \"\"\n\n print(\"Curr Transaction: %-10s | %-10s | %-10s | %-10s \" %\n (row[env.DATE], row[env.AMOUNT], storename, row[env.TYPE]))\n selected_exp, exp_stor_db, stor_db, storename = search_store_relationships(storename, exp_stor_db, budg_db[month_end_date],\n stor_exp_data_path, stor_db, stor_data_path)\n df.at[idx, env.FILT_STORENAME] = storename\n df.at[idx, env.EXPENSE] = selected_exp\n\n except KeyboardInterrupt:\n print(\"\\n\\nQuitting to main menu. Your data inputs will be saved, and you can resume where you left off by restarting and selecting 'v' for view data!\\n\")\n\n return df",
"def get_all_desserts():\n return get_data_from_category_name(\"Desserts\")"
] | [
"0.75546044",
"0.70506036",
"0.6988394",
"0.6264646",
"0.61877763",
"0.6160968",
"0.6022803",
"0.5791499",
"0.5791499",
"0.5714145",
"0.5632406",
"0.55389637",
"0.5473047",
"0.5467186",
"0.54195464",
"0.5417473",
"0.540191",
"0.53685397",
"0.5358989",
"0.5358788",
"0.5358726",
"0.5346249",
"0.53134024",
"0.53094774",
"0.5298802",
"0.5273003",
"0.5267964",
"0.52623844",
"0.5258102",
"0.5235071"
] | 0.7608281 | 0 |
Fetches all approved jobs for the house | def approved_jobs(self):
return Job.objects.filter(
house=self.house,
approved=True,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def search_jobs(self, bill_id: int = 0, limit: int = 0) -> List[Job]:\n pass",
"async def get_jobs(): \n return mngr.getAllJobs()",
"def get(self):\n # TODO: auth\n return list(self.app.db.jobs.find())",
"def get_jobs_list(self, response):\n pass",
"def get_queryset(self):\n jobs = Job.objects.all()\n if not self.kwargs.get(\"pk\"):\n jobs = jobs.filter(\n status=choices.APPROVED, submission_deadline__gte=datetime.today()\n ).order_by(\"-created_at\")\n if not self.request.user.is_anonymous():\n # if user is logged in, exclude his/her applied jobs.\n # also append ignored jobs at the end of job listing.\n jobs = jobs.exclude(\n ~Q(application__state=\"ignored\"),\n application__user=self.request.user,\n ).order_by(\"-created_at\")\n\n if self.request.user.user_type == User.PERSON:\n # If user is of type \"person\",\n # show only jobs related to his/her gender along with not_specified jobs.\n if self.request.user.person.gender != \"NS\":\n jobs = jobs.filter(\n required_gender__in=[\n self.request.user.person.gender,\n choices.NOT_SPECIFIED,\n ]\n )\n return jobs",
"def get_jobs(self, *, params: Optional[dict] = None) -> \"resource_types.Jobs\":\n\n return communicator.Jobs(self.__requester).fetch(parameters=params)",
"def list(self, request):\n jobs = Job.objects.all()\n\n city = self.request.query_params.get('city', None)\n state = self.request.query_params.get('state', None)\n\n # Support filtering jobs by user id\n job = self.request.query_params.get('user', None)\n if job is not None:\n jobs = jobs.filter(user=request.user)\n\n if city is not None:\n jobs = jobs.filter(city=city)\n\n if state is not None:\n jobs = jobs.filter(state=state)\n\n serializer = JobSerializer(\n jobs, many=True, context={'request': request})\n return Response(serializer.data)",
"def get_pr_jobs():\n res = requests.get(\n uri + \"/view/Pull%20Requests/api/json\",\n headers={\"accept\": \"application/json\"},\n auth=requests.auth.HTTPBasicAuth(user, password),\n verify=verify,\n )\n if res.status_code != 200:\n raise RuntimeError(\"Received non 200 status code from jenkins\")\n data = res.json()\n for job in data[\"jobs\"]:\n yield job",
"def get_all_jobs(self):\n all_jobs = self.job_set.all().order_by(\"-time_last_updated\", \"project__name\", \"-id\")\n # for job in all_jobs:\n # job.check_exists()\n\n # get the list of jobs listed in the database as running and update them.\n dbrunning = all_jobs.filter(state__in=['in queue', 'started'])\n for runningjob in dbrunning: runningjob.update();\n\n # get the updated list \n all_jobs = self.job_set.all().order_by(\"-time_last_updated\", \"project__name\", \"-id\")\n\n return all_jobs",
"def search_jobs(self, bill_id: int = 0, limit: int = 0) -> List[Job]:\n res = []\n query = QSqlQuery()\n q = \"select id, hours, price, job from jobs\"\n if bill_id > 0:\n q += \" where b_id=?\"\n q += \" order by id desc\"\n if limit > 0:\n q += \" limit ?\"\n query.prepare(q)\n if bill_id > 0:\n query.addBindValue(bill_id)\n if limit > 0:\n query.addBindValue(limit)\n query.exec_()\n while query.next():\n res.append(_extract_job(query))\n return res",
"def list_jobs(arn=None, nextToken=None):\n pass",
"def list_jobs(self):\n\n return dict(self._from_json(self.manage.run(override=\"list-jobs\")))",
"def jobs(self):\n return self.get_jobs()",
"def listJobs():\n logger.debug('[FLASKWEB /jobs] Request for job listing')\n jobs = db.getJobs(numdays=2)\n for job in jobs:\n job['time'] = datetime.datetime.strptime(job['time'], db.TS_FMT).replace(tzinfo=db.timezone('UTC')).isoformat()\n if job['complete']:\n job['complete'] = datetime.datetime.strptime(job['complete'], db.TS_FMT).replace(tzinfo=db.timezone('UTC')).isoformat()\n\n # Garbage Collect Orpahened jobs\n compiles = db.getCompiles()\n for compile in compiles:\n if compile['submit']:\n compile['submit'] = datetime.datetime.strptime(compile['submit'], db.TS_FMT).replace(tzinfo=db.timezone('UTC')).isoformat()\n if compile['complete']:\n compile['complete'] = datetime.datetime.strptime(compile['complete'], db.TS_FMT).replace(tzinfo=db.timezone('UTC')).isoformat()\n # for c in compiles:\n # if c['uid'] not in compile_tasks.keys():\n # db.updateCompile(c['uid'], status='KILLED', done=True)\n # compiles = db.getCompiles()\n\n if request.headers['Accept'] == 'application/json':\n return jsonify(dict(LaunchJobs=jobs, CompilingJobs=compiles)), 200\n else:\n return render_template(\"jobs.html\", joblist=jobs, compilelist=compiles)",
"def all_jobs():\n\n jobs = Job.get_all()\n\n oneoffs = OneOff.get_all()\n\n job = JobView(None, jobs, oneoffs, False, Job.count() > 0)\n\n add_trello_task_links_to_g()\n\n return render_template(\"jobs.template.html\", page_title=\"Jobs\", job_info=job)",
"def getJobcardsAll(request):\n #GOLITODO add the extra field in models for the village and use it here for filtring\n if request.method == 'GET':\n jcEnd=request.GET.get('jobend', '')\n jcContains=request.GET.get('vcode', '')\n ptid=request.GET.get('ptid', '')\n limit=request.GET.get('limit', '')\n if limit == '':\n limit=50\n else:\n limit=int(limit)\n\n if ptid == '':\n error = {\"response\": \"Sorry, you need to provide Panchayat id to get jobcards.\"}\n res = JsonResponse(error, safe=False)\n else:\n if jcContains == '':\n jobcards = Jobcard.objects.filter(panchayat__id = ptid, jobcard__endswith = jcEnd)\n else:\n jobcards = Jobcard.objects.filter(panchayat__id = ptid, jobcard__endswith = jcEnd, jobcard__icontains = jcContains)\n\n jobcards = jobcards[:limit]\n serializer = JobcardSerializer(jobcards, many=True)\n res = JsonResponse(serializer.data, safe=False)\n return res",
"def get_workflow_pending_approval_jobs(workflow_id, headers):\n\n for current_job in get_all_items(f\"/workflow/{workflow_id}/job\", headers):\n if (current_job.get(\"type\") == \"approval\") and (current_job.get(\"status\") == \"on_hold\"):\n yield current_job",
"def get_queryset(self):\n project = ProjectPermissionsMixin.get_object(self)\n object_list = project.jobs.all()\n\n object_list = self._get_status({}, object_list)\n object_list = self._get_method({}, object_list)\n object_list = self._get_users({}, project, object_list)\n\n return object_list.order_by(\"-id\")",
"def get_jobs(self):\n return list(self._jobs.values())",
"def get_queryset(self):\n return Job.objects.all()",
"def ListJobs(cls):\n return [key.parent().string_id() for key in cls.query().fetch(\n 100, keys_only=True)]",
"def _get_jobs():\n return _get_bigquery_service().jobs()",
"def get_jobs():\n \n rate_limit()\n command = [\"bjobs\", \"-o\", \"\\\"JOBID\", \"USER\", \"STAT\", \"QUEUE\", \"JOB_NAME\", \\\n \"delimiter=';'\\\"\"]\n command = \" \".join(command)\n jobs = subprocess.check_output(command, shell=True, stderr=open(os.devnull))\n \n # if there aren't any currently running or pending jobs, then the output\n if jobs == \"\":\n return set([])\n \n jobs = jobs.decode().strip().split(\"\\n\")\n \n current_jobs = set([])\n for line in jobs:\n if line.startswith(\"JOBID\"): # ignore the header line\n continue\n \n line = line.split(\";\")\n job_name = line[4]\n current_jobs.add(job_name)\n \n return current_jobs",
"def query(self, jobs):\n assert isinstance(jobs, list), 'Jobs must be type list'\n assert len(jobs) > 0, 'One or more jobs required'\n\n req = list()\n if len(jobs) > 1:\n for r in self._batch_request(jobs):\n req.append(\n ''.join([self._scheduler_endpoint, '?', '&'.join(r)]))\n else:\n req = \"{}?job={}\".format(\n self._scheduler_endpoint, jobs[0])\n\n try:\n ret = list()\n for resp in self._api_get(req):\n ret.extend(resp.json())\n return ret\n except HTTPError as e:\n raise JobClientError(e.message)",
"def list(self):\n return self.rpc.call(MsfRpcMethod.JobList)",
"async def jobs(request):\n\n job_list = await get_jobs(request)\n return template('jobs.html',\n jobs=job_list)",
"def jobs(ctx, page):\n user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'),\n ctx.obj.get('experiment'))\n page = page or 1\n try:\n response = PolyaxonClient().experiment.list_jobs(\n user, project_name, _experiment, page=page)\n except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:\n Printer.print_error('Could not get jobs for experiment `{}`.'.format(_experiment))\n Printer.print_error('Error message `{}`.'.format(e))\n sys.exit(1)\n\n meta = get_meta_response(response)\n if meta:\n Printer.print_header('Jobs for experiment `{}`.'.format(_experiment))\n Printer.print_header('Navigation:')\n dict_tabulate(meta)\n else:\n Printer.print_header('No jobs found for experiment `{}`.'.format(_experiment))\n\n objects = [Printer.add_status_color(o.to_light_dict(humanize_values=True))\n for o in response['results']]\n objects = list_dicts_to_tabulate(objects)\n if objects:\n Printer.print_header(\"Jobs:\")\n objects.pop('experiment', None)\n dict_tabulate(objects, is_list_dict=True)",
"def active_jobs():\n\n jobs = Job.get_all_active()\n oneoffs = OneOff.get_all()\n\n job = JobView(None, jobs, oneoffs, True, Job.count() > 0)\n\n add_trello_task_links_to_g()\n\n return render_template(\"jobs.template.html\", page_title=\"Jobs\", job_info=job)",
"def get_jobs(self, age=1):\n jobs_for_reaper = []\n try: \n api_response = self.kube_v1_batch_client.list_namespaced_job(namespace=self.project, label_selector='job-origin=pman', include_uninitialized=True)\n for item in api_response.items:\n # Checking if job has finished running, either failed or succeeded\n if item.status.conditions and (item.status.failed or item.status.succeeded):\n # Using start_time because failed jobs have no completion_time\n start_time = item.status.start_time\n current_time = datetime.datetime.now(datetime.timezone.utc)\n diff = current_time-start_time\n # 86400 = number of seconds in a day. \"divmod\" returns quotient and remainder as tuple e.g (1, 5.74943)\n # means 1 day and 5.74943 sec have passed between current_time and start_time of the job\n diff_in_seconds = divmod(diff.total_seconds(), 86400)\n if diff_in_seconds[0] >= 1:\n jobs_for_reaper.append(item.metadata.name)\n \n except ApiException as e:\n print(\"Exception when calling BatchV1Api->list_namespaced_job: %s\\n\" % e)\n exit(1)\n return jobs_for_reaper",
"def GetJobs(self, bulk=False):\n query = []\n _AppendIf(query, bulk, (\"bulk\", 1))\n\n if bulk:\n return self._SendRequest(HTTP_GET,\n \"/%s/jobs\" % GANETI_RAPI_VERSION,\n query, None)\n else:\n return [int(j[\"id\"])\n for j in self._SendRequest(HTTP_GET,\n \"/%s/jobs\" % GANETI_RAPI_VERSION,\n None, None)]"
] | [
"0.64975166",
"0.64335525",
"0.6395426",
"0.6390087",
"0.6355305",
"0.6228912",
"0.61817175",
"0.6170959",
"0.61449194",
"0.6067417",
"0.60418403",
"0.6040219",
"0.6010533",
"0.6010371",
"0.6001975",
"0.5995996",
"0.5973105",
"0.59290487",
"0.5902574",
"0.5896722",
"0.5885378",
"0.5855661",
"0.58426565",
"0.5835844",
"0.5834499",
"0.58325183",
"0.5831773",
"0.57786727",
"0.5746274",
"0.57456356"
] | 0.79753345 | 0 |
Checks if the house has any active jobs. | def has_active_jobs(self, **kwargs):
if Job.objects.add_balance().filter(house=self.house, balance1__gt=0, approved=True, **kwargs).exists():
return True
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def isEmpty(self):\n return len(self.jobs) == 0",
"def check_jobs(self):\n # New/aborted jobs\n try:\n jobs = self.sm.get_job('%', phase = 'QUEUED')\n for job in jobs:\n self._launch_job(Job(job['job']))\n res = self.sm.get_aborted_jobs()\n aborts = [x['identifier'] for x in res]\n # Completed jobs\n for t in self.threads:\n if t.isDone() or t.name in aborts:\n self.threads.remove(t)\n # Set job status to COMPLETED\n job = Job(self.sm.get_job(t.name)[0]['job'])\n if t._Future__excpt == None:\n job.set_phase('COMPLETED')\n if t._Future__result != None:\n job.set_results(t._Future__result) \n status = True\n else:\n job.set_phase('ERROR')\n job.set_error_summary(str(t._Future__excpt[1]).replace(\"'\", \"\"))\n status = False\n job.set_end_time(datetime.utcnow().isoformat())\n self.sm.update_job(job = job, completed = status)\n except Exception, e:\n print \"Error:\", e",
"def workers_ready(self, qty=None):\n agents = self.agents_status()\n if any([a['state'] != 'RUNNING' for a in agents]):\n return False\n if qty and len(agents) != qty:\n return False\n return True",
"def isEmpty(self):\n\t\tself.logger.debug('Check if queue job is empty')\n\t\tisEmpty = self.queue.empty()\n\t\tself.logger.debug('Queue job is empty ?: %s'%(isEmpty))\n\t\treturn isEmpty",
"def in_queue(self):\n if self.get_db('jobid') is None:\n log.debug('jobid not found for calculation.')\n return False\n else:\n # get the jobid\n jobid = self.get_db('jobid')\n # see if jobid is in queue\n _, jobids_in_queue, _ = getstatusoutput('qselect',\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n\n if str(jobid) in jobids_in_queue.split('\\n'):\n # get details on specific jobid in case it is complete\n status, output, err = getstatusoutput(['qstat', jobid],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n if status == 0:\n lines = output.split('\\n')\n fields = lines[2].split()\n job_status = fields[4]\n if job_status == 'C':\n return False\n else:\n return True\n else:\n return False",
"def active(self):\n resp = self.server.request(\"get\", \"/jobs/%s/%s/state\" % (\n self.sessionid, self.name))\n if resp.body == b'1':\n return True\n return False",
"def start_game_check(self):\n if len(self.pending_players) > 0:\n return False\n else:\n return True",
"def check(args, session: Session = NEW_SESSION) -> None:\n if args.allow_multiple and not args.limit > 1:\n raise SystemExit(\"To use option --allow-multiple, you must set the limit to a value greater than 1.\")\n if args.hostname and args.local:\n raise SystemExit(\"You can't use --hostname and --local at the same time\")\n\n query = select(Job).where(Job.state == JobState.RUNNING).order_by(Job.latest_heartbeat.desc())\n if args.job_type:\n query = query.where(Job.job_type == args.job_type)\n if args.hostname:\n query = query.where(Job.hostname == args.hostname)\n if args.local:\n query = query.where(Job.hostname == get_hostname())\n if args.limit > 0:\n query = query.limit(args.limit)\n\n alive_jobs: list[Job] = [job for job in session.scalars(query) if job.is_alive()]\n\n count_alive_jobs = len(alive_jobs)\n if count_alive_jobs == 0:\n raise SystemExit(\"No alive jobs found.\")\n if count_alive_jobs > 1 and not args.allow_multiple:\n raise SystemExit(f\"Found {count_alive_jobs} alive jobs. Expected only one.\")\n if count_alive_jobs == 1:\n print(\"Found one alive job.\")\n else:\n print(f\"Found {count_alive_jobs} alive jobs.\")",
"def should_keep_running(self):\n return len(self.party.active_users())",
"def has_pending_jobs(instance_properties, max_size):\n try:\n max_cluster_slots = max_size * instance_properties.get(\"slots\")\n pending_jobs = get_pending_jobs_info(max_slots_filter=max_cluster_slots, skip_if_state=SGE_HOLD_STATE)\n logging.info(\"Found the following pending jobs:\\n%s\", pending_jobs)\n return len(pending_jobs) > 0, False\n except Exception as e:\n log.error(\"Failed when checking for pending jobs with exception %s. Reporting no pending jobs.\", e)\n return False, True",
"def active(self):\n return len(self.queue) > 0",
"def pending_work(self) -> bool:\n return len(self.ongoing) > 0",
"def health_check():\n printed_something = False\n\n job_checks = {}\n job_names = []\n for job in config.enabled_jobs:\n spec = nomad.parse(get_job(job.template))\n printed_something |= bool(nomad.check_events_and_logs(job.name))\n for service, checks in nomad.get_health_checks_from_spec(spec):\n if not checks:\n log.warn(f'service {service} has no health checks')\n continue\n job_checks[service] = checks\n job_names.append(job.name)\n printed_something |= nomad.wait_for_service_health_checks(consul, job_names, job_checks, nowait=True)\n\n if printed_something:\n log.error('Problems detected; see logs above.')\n sys.exit(1)\n else:\n log.info('No problems detected.')",
"def isJobRunning ( self ):\n #cmd = \"qstat \" + str(self.jobid)\n \n #magicString='Unknown Job Id' ### magicString _might_ need to be changed if Torque version changes\n #(output, error) = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()\n\n if self.ofile_exists(): #output.find(magicString) >=0 or redhawkStatsRe.search(output):\n self.status = \"finished\"\n return False\n \n\n return True",
"def is_alive(self):\n try:\n stdout, stderr = self.run(0, \"rabbitmqctl\", \"list_queues\")\n for lines in stdout, stderr:\n for line in lines:\n if \"no_exists\" in line:\n return False\n return True\n except Exception:\n return False",
"def is_done(self):\n return not any((agent.is_alive() for agent in self.agents))",
"def is_done(self):\n return not any(agent.is_alive() for agent in self.agents)",
"def is_done(self):\n return not any(agent.is_alive() for agent in self.agents)",
"def __some_alive(self):\n for service in self.__services.values():\n if service.is_alive():\n return True\n return False",
"def is_running(self):\n # do we have a job ID to work with?\n if self.jobid == None:\n return False\n else:\n q_status = self.queue.get_status(self.jobid)\n\n if q_status == self.queue.state[\"active\"]:\n self.meta[\"status\"] = 'PENDING'\n return True\n else:\n return False",
"def _include_job(self, job: Job) -> bool:\n if not super()._include_job(job):\n return False\n max_days = int(cast(str, self._parameter(\"inactive_job_days\")))\n actual_days = days_ago(self._latest_build_date_time(job))\n return actual_days > max_days",
"def has_rejected_jobs(self, **kwargs):\n if Job.objects.filter(house=self.house, rejected=True, **kwargs).exists():\n return True\n\n return False",
"def CheckJobComplete(self, name):\n request = self.messages.AiplatformProjectsLocationsCustomJobsGetRequest(\n name=name)\n response = self._service.Get(request)\n\n def ShouldContinue(periods_without_logs):\n if periods_without_logs <= 1:\n return True\n return response.endTime is None\n\n return ShouldContinue",
"def lantern_check():\n if not app.config.get(\"ENABLE_LANTERN\", False):\n print \"[{x}] Not checking Lantern jobs - interface disabled\".format(x=dates.now())\n return\n print \"[{x}] Checking Lantern jobs\".format(x=dates.now())\n LanternApi.check_jobs()",
"def isEmpty(self):\n return len(self.worklist) == 0",
"def is_running(self):\n data = self._poll()\n return data.get('building', False)",
"def _check_jobs_submitted(status, module):\n\n submitted = False\n if module in status.data:\n jobs = status.data[module]\n for job in jobs.keys():\n if job != 'pipeline_index':\n submitted = True\n break\n return submitted",
"def is_job_running(self, condor_id):\n\n classads = self.get_classads(\"OSGRSVUniqueName==\\\"%s\\\"\" % condor_id)\n\n if classads is None:\n self.rsv.log(\"ERROR\", \"Could not determine if job is running\")\n return False\n\n for classad in classads:\n # We put the attribute into the classad in quotes, so search for it accordingly\n if classad[\"OSGRSVUniqueName\"] == '\"' + condor_id + '\"':\n return True\n\n return False",
"def test_matching_jobs_existing(self):\n self.assertEquals(\n self.query_api.get_matching_jobs(\n \"try\", \"146071751b1e\",\n 'Linux x86-64 try build'), json.loads(JOBS_SCHEDULE))",
"def is_empty(self):\n return len(self.__queue) > 0"
] | [
"0.7014045",
"0.66785365",
"0.657027",
"0.65535456",
"0.6546421",
"0.6463334",
"0.6436376",
"0.64134765",
"0.6412882",
"0.6411166",
"0.63975406",
"0.6359158",
"0.63180745",
"0.63032097",
"0.6299841",
"0.6295104",
"0.62680423",
"0.62680423",
"0.6257935",
"0.62511295",
"0.62323356",
"0.6211038",
"0.61633277",
"0.6153902",
"0.61534",
"0.6134543",
"0.61268586",
"0.6060436",
"0.60424733",
"0.6032355"
] | 0.7794764 | 0 |
Checks if the house has any rejected jobs. | def has_rejected_jobs(self, **kwargs):
if Job.objects.filter(house=self.house, rejected=True, **kwargs).exists():
return True
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _check_job_completeness(self, jobs):\n for job in concurrent.futures.as_completed(jobs):\n if job.exception():\n raise (job.exception())",
"def check_jobs(self):\n # New/aborted jobs\n try:\n jobs = self.sm.get_job('%', phase = 'QUEUED')\n for job in jobs:\n self._launch_job(Job(job['job']))\n res = self.sm.get_aborted_jobs()\n aborts = [x['identifier'] for x in res]\n # Completed jobs\n for t in self.threads:\n if t.isDone() or t.name in aborts:\n self.threads.remove(t)\n # Set job status to COMPLETED\n job = Job(self.sm.get_job(t.name)[0]['job'])\n if t._Future__excpt == None:\n job.set_phase('COMPLETED')\n if t._Future__result != None:\n job.set_results(t._Future__result) \n status = True\n else:\n job.set_phase('ERROR')\n job.set_error_summary(str(t._Future__excpt[1]).replace(\"'\", \"\"))\n status = False\n job.set_end_time(datetime.utcnow().isoformat())\n self.sm.update_job(job = job, completed = status)\n except Exception, e:\n print \"Error:\", e",
"def has_pending_jobs(instance_properties, max_size):\n try:\n max_cluster_slots = max_size * instance_properties.get(\"slots\")\n pending_jobs = get_pending_jobs_info(max_slots_filter=max_cluster_slots, skip_if_state=SGE_HOLD_STATE)\n logging.info(\"Found the following pending jobs:\\n%s\", pending_jobs)\n return len(pending_jobs) > 0, False\n except Exception as e:\n log.error(\"Failed when checking for pending jobs with exception %s. Reporting no pending jobs.\", e)\n return False, True",
"def can_fit_more(self):\n\n return len(self._requeue_jobs) < MAX_NUM",
"def jobHealthy(self, count):\n job = self.tester.submission_result.job\n for idx in range(count - 1):\n if (job.health == 'healthy'):\n return True\n print(\"health check fail : %d\" % idx )\n time.sleep(1)\n job.refresh()\n self.assertEqual('healthy', job.health)\n return False",
"def check_missed_job_completion_notifications(self):\n logger.info(\"Checking for missed job completion notifications\")\n #ten_min_ago = int((time.time() - 600) * 1e6)\n operating = self.instances.find({\n #'mtime': {'$lt': ten_min_ago},\n 'operation' : {'$exists': True, '$ne': None}\n })\n\n for fix_doc in operating:\n service = self.axops_client.get_service(fix_doc['operation']['id'])\n if ServiceStatus.completed(service['status']):\n # Keep this consistent with expectation in process_action_result() and axops/service/service.go\n payload = {\n \"id\": service['id'],\n \"name\": service['name'],\n \"status\": service['status'],\n \"annotations\": service.get('annotations', {}),\n \"user\": service['user']\n }\n try:\n logger.info(\"Detected missed job notification: %s\", payload)\n self.process_action_result(payload)\n except Exception:\n logger.exception(\"Failed to process completion event\")",
"def _check_jobs_submitted(status, module):\n\n submitted = False\n if module in status.data:\n jobs = status.data[module]\n for job in jobs.keys():\n if job != 'pipeline_index':\n submitted = True\n break\n return submitted",
"def check_auto_reject(self):\r\n for pr in self:\r\n if not pr.line_ids.filtered(lambda l: l.cancelled is False):\r\n pr.write({'state': 'rejected'})",
"def workers_ready(self, qty=None):\n agents = self.agents_status()\n if any([a['state'] != 'RUNNING' for a in agents]):\n return False\n if qty and len(agents) != qty:\n return False\n return True",
"def test_job_failure(app):\n with worker(app):\n state = wait_for_results(app, length=100, sleep=0.2, maxwait=4)\n\n # Tasks have been delivered and executed.\n assert set(r.return_value for r in all_results(app)) == set(range(100))\n assert len(state.queue.messages) == 0\n\n # Consumer groups behaved properly.\n assert state.queue.info.groups == 1\n assert state.queue.groups[0].pending == 0\n\n # Nothing in the DLQ.\n assert len(state.dead.messages) == 0\n\n # Any scheduled tasks completed and removed.\n assert len(state.schedule) == 0",
"def isFinished(self):\r\n try:\r\n output = Popen(\"qstat | grep \"+self.jobId, shell=True, stdout=PIPE, stderr=PIPE).communicate()[0]\r\n if self.jobId in output:\r\n if output.split()[4] == \"Eqw\":\r\n #If the job fails, print a warning, and wait a minute so the user can check why the job fails,\r\n #before resubmitting the job.\r\n logging.warning(\"job \" + output.split()[2] + \" failed to run, resubmitting in one minute\")\r\n time.sleep(60)\r\n output = Popen(\"qdel \"+self.jobId, shell=True, stdout=PIPE, stderr=PIPE).communicate()[0]\r\n self.submit()\r\n return False\r\n else:\r\n logging.info(\"job with ID: \" + self.jobId + \" is finished.\")\r\n return True\r\n \r\n except ValueError:\r\n logging.info(\"Error: waiting for not submitted job...\")",
"def check(args, session: Session = NEW_SESSION) -> None:\n if args.allow_multiple and not args.limit > 1:\n raise SystemExit(\"To use option --allow-multiple, you must set the limit to a value greater than 1.\")\n if args.hostname and args.local:\n raise SystemExit(\"You can't use --hostname and --local at the same time\")\n\n query = select(Job).where(Job.state == JobState.RUNNING).order_by(Job.latest_heartbeat.desc())\n if args.job_type:\n query = query.where(Job.job_type == args.job_type)\n if args.hostname:\n query = query.where(Job.hostname == args.hostname)\n if args.local:\n query = query.where(Job.hostname == get_hostname())\n if args.limit > 0:\n query = query.limit(args.limit)\n\n alive_jobs: list[Job] = [job for job in session.scalars(query) if job.is_alive()]\n\n count_alive_jobs = len(alive_jobs)\n if count_alive_jobs == 0:\n raise SystemExit(\"No alive jobs found.\")\n if count_alive_jobs > 1 and not args.allow_multiple:\n raise SystemExit(f\"Found {count_alive_jobs} alive jobs. Expected only one.\")\n if count_alive_jobs == 1:\n print(\"Found one alive job.\")\n else:\n print(f\"Found {count_alive_jobs} alive jobs.\")",
"def _check_job_status(self):\n try:\n status = self.ee2.check_job_canceled({\"job_id\": self.job_id})\n except Exception as e:\n self.logger.error(\n f\"Warning: Job cancel check failed due to {e}. However, the job will continue to run.\"\n )\n return True\n if status.get(\"finished\", False):\n return False\n return True",
"def abort_unnecessary_jobs(self):\n self._update_candidate_range()\n for r in self.revisions:\n if r == self.lkgr:\n break\n if not r.tested or r.failed:\n r.good = True # pragma: no cover\n if r.in_progress:\n r.abort() # pragma: no cover\n for r in self.revisions[self.fkbr.list_index + 1:]:\n if not r.tested or r.failed:\n r.bad = True # pragma: no cover\n if r.in_progress:\n r.abort() # pragma: no cover",
"def _is_done_illegal_state(self, observation):\n servers_used_mem = np.zeros(len(self.servers_mem))\n for i, _ in enumerate(servers_used_mem):\n servers_used_mem[i] = np.sum(self.services_mem[observation==i])\n return np.alltrue(np.array(self.servers_mem) < servers_used_mem)",
"def test_matching_jobs_invalid(self):\n self.assertEquals(\n self.query_api.get_matching_jobs(\n \"try\", \"146071751b1e\",\n 'Invalid buildername'), [])",
"def pending_work(self) -> bool:\n return len(self.ongoing) > 0",
"def _check_results(self):\n if not 'EXECUTION OF GAMESS TERMINATED NORMALLY' in self.file_dic['output']:\n print self.job_name + \" didn't finish\"\n raise TypeError('Calculation didn\\'t finish')",
"def check_queue(st):\n\n logging.info(\"Checking queue...\")\n check_time = time.time()\n n_waiting_jobs = BatchPlugin.poll_queue()\n\n if n_waiting_jobs is not None:\n\n # Correction factor\n corr = st['vms_allegedly_running'] * cf['elastiq']['n_jobs_per_vm']\n logging.info(\"Jobs: waiting=%d | allegedly running=%d | considering=%d\" % \\\n (n_waiting_jobs, corr, n_waiting_jobs-corr))\n n_waiting_jobs -= corr\n\n if n_waiting_jobs > cf['elastiq']['waiting_jobs_threshold']:\n if st['first_seen_above_threshold'] != -1:\n if (check_time-st['first_seen_above_threshold']) > cf['elastiq']['waiting_jobs_time_s']:\n # Above threshold time-wise and jobs-wise: do something\n logging.info(\"Waiting jobs: %d (above threshold of %d for more than %ds)\" % \\\n (n_waiting_jobs, cf['elastiq']['waiting_jobs_threshold'], cf['elastiq']['waiting_jobs_time_s']))\n list_ok = scale_up( math.ceil(n_waiting_jobs / float(cf['elastiq']['n_jobs_per_vm'])), valid_hostnames=st['workers_status'].keys(), vms_allegedly_running=st['vms_allegedly_running'] )\n for inst in list_ok:\n change_vms_allegedly_running(st, 1, inst)\n st['event_queue'].append({\n 'action': 'check_owned_instance',\n 'when': time.time() + cf['elastiq']['estimated_vm_deploy_time_s'],\n 'params': [ inst ]\n })\n st['first_seen_above_threshold'] = -1\n else:\n # Above threshold but not for enough time\n logging.info(\"Waiting jobs: %d (still above threshold of %d for less than %ds)\" % \\\n (n_waiting_jobs, cf['elastiq']['waiting_jobs_threshold'], cf['elastiq']['waiting_jobs_time_s']))\n else:\n # First time seen above threshold\n logging.info(\"Waiting jobs: %d (first time above threshold of %d)\" % \\\n (n_waiting_jobs, cf['elastiq']['waiting_jobs_threshold']))\n st['first_seen_above_threshold'] = check_time\n else:\n # Not above threshold: reset\n logging.info(\"Waiting jobs: %d (below threshold of %d)\" % \\\n (n_waiting_jobs, cf['elastiq']['waiting_jobs_threshold']))\n st['first_seen_above_threshold'] = -1\n else:\n logging.error(\"Cannot get the number of waiting jobs this time, sorry\")\n\n return {\n 'action': 'check_queue',\n 'when': time.time() + cf['elastiq']['check_queue_every_s']\n }",
"def is_done(self):\n return not any((agent.is_alive() for agent in self.agents))",
"def is_all_done():\n for job_set in job_sets:\n if job_set.status != SetStatus.COMPLETED:\n return False\n return True",
"def _check_completed(self):\n current_rung_df = self.sieve_board.loc[\n self.sieve_board['status'].isin(\n [StatusType.WAITTING, StatusType.RUNNING])\n ]\n if current_rung_df.empty:\n return True\n else:\n return False",
"def start_game_check(self):\n if len(self.pending_players) > 0:\n return False\n else:\n return True",
"def has_active_jobs(self, **kwargs):\n if Job.objects.add_balance().filter(house=self.house, balance1__gt=0, approved=True, **kwargs).exists():\n return True\n\n return False",
"def health_check():\n printed_something = False\n\n job_checks = {}\n job_names = []\n for job in config.enabled_jobs:\n spec = nomad.parse(get_job(job.template))\n printed_something |= bool(nomad.check_events_and_logs(job.name))\n for service, checks in nomad.get_health_checks_from_spec(spec):\n if not checks:\n log.warn(f'service {service} has no health checks')\n continue\n job_checks[service] = checks\n job_names.append(job.name)\n printed_something |= nomad.wait_for_service_health_checks(consul, job_names, job_checks, nowait=True)\n\n if printed_something:\n log.error('Problems detected; see logs above.')\n sys.exit(1)\n else:\n log.info('No problems detected.')",
"def test_check_opt_crashed(self):\n self.assertEqual(check_opt(self.jobset2.job), 'ocrashed')",
"def missing_expected_delivery(self):\n from devilry.apps.core.models import Delivery\n from devilry.apps.core.models import Deadline\n if self.assignment.is_electronic and self.get_status() == \"waiting-for-feedback\":\n return not Delivery.objects.filter(\n deadline__assignment_group=self,\n deadline=Deadline.objects.filter(assignment_group=self).order_by('-deadline')[0]\n ).exists()\n return False",
"def _check_can_submit(self):\n if not self.parallel and self.last_submitted_i != self.highest_continuous_done_i:\n raise CannotSubmitNewTask(\n f\"Attempt to get task for {self} \"\n f\"out of order: last submitted {self.last_submitted_i}, \"\n f\"but highest_continuous_done_i is {self.highest_continuous_done_i}.\")\n if self.all_results_arrived:\n raise CannotSubmitNewTask(\n f\"Can't get {self} task: all results already arrived\")\n if self.final_task_submitted:\n raise CannotSubmitNewTask(\n \"Can't get {self} task: final task already submitted\")",
"def check(self):\n if self.backend.poll():\n raise RuntimeError('Backend process died.')\n\n if self.esp.poll():\n raise RuntimeError('ESP process died.')",
"def is_done(self):\n return not any(agent.is_alive() for agent in self.agents)"
] | [
"0.66733396",
"0.63713294",
"0.62130815",
"0.61487967",
"0.6122383",
"0.60535616",
"0.59987134",
"0.5935412",
"0.591256",
"0.5887511",
"0.5864966",
"0.5847546",
"0.58341813",
"0.58134335",
"0.5810112",
"0.5805247",
"0.58052236",
"0.5803409",
"0.58024246",
"0.57649076",
"0.57469803",
"0.5735888",
"0.5734674",
"0.57148653",
"0.57141715",
"0.56996256",
"0.5683361",
"0.5682952",
"0.5682856",
"0.5672645"
] | 0.80535066 | 0 |
Calculates the budget for the house based on three variables | def budget(self):
budget = (_House.closing_cost*self.vars['after_repair_value']) - self.vars['purchase_price'] - self.vars['profit'] - _House.broker_fee
return float(round(budget, 2)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calculateCosts(self):\n self.costs = 0\n for house in self.houses:\n if not house.distance == 1000:\n self.costs += house.distance * 9\n for battery in self.batteries:\n self.costs += battery.costs\n return self.costs",
"def budget_problem3(balance, annualInterestRate):\r\n remaining = balance\r\n\r\n # creating the following bounds assists with bisection search\r\n lo = balance/12\r\n hi = ((balance * (annualInterestRate/12))**12)/12\r\n payment = (lo + hi)/2\r\n\r\n while remaining != 0:\r\n for month in range(12):\r\n remaining = (remaining - payment) * (1 + (annualInterestRate/12))\r\n if remaining > 0:\r\n lo = payment\r\n elif round(remaining,2) < -0.11:\r\n hi = payment\r\n else:\r\n break\r\n payment = (lo + hi)/2\r\n remaining = balance\r\n print 'Lowest Payment: ' + str(round(payment,2))\r\n return round(payment,2)",
"def box_budget(da_i, da_j, left_i, right_i, lower_j, upper_j, bs=None):\n\n upper = da_j.isel(XC=slice(left_i, right_i), YG=upper_j).rename('upper')\n lower = da_j.isel(XC=slice(left_i, right_i), YG=lower_j).rename('lower')\n right = da_i.isel(XG=right_i, YC=slice(lower_j, upper_j)).rename('right')\n left = da_i.isel(XG=left_i, YC=slice(lower_j, upper_j)).rename('left')\n \n if bs=='upper':\n return upper\n elif bs=='lower':\n return lower\n elif bs=='right':\n return right\n elif bs=='left':\n return left\n else:\n None \n \n return -upper.sum(('Z', 'XC')) + lower.sum(('Z', 'XC')) - right.sum(('Z', 'YC')) + left.sum(('Z', 'YC'))",
"def calculate_profit(self):",
"def budget_balance(self):\n budget_balance = round(self.budget() - self.total_spent(), 2)\n budget_balance_degree = round( (9000 * self.total_spent()) / (self.budget()), 4) #convert to degrees and round to four decimal places\n return (budget_balance, budget_balance_degree)",
"def budget_for_necessities():\n print(\"========== Displaying hotel options ==========\")\n for i in range(len(hotel_list)):\n print(\" -- Enter\", i+1, \"to choose -- \")\n hotel_list[i].print_hotel()\n print(\" \")\n while True:\n try:\n hotel_num = int(input(\"Please choose your hotel option (Enter a number between 1 to 7): \"))\n hotel_num -= 1\n if hotel_num in range(len(hotel_list)): break\n except ValueError:\n print(\"Please enter an positive integer. Try again... \")\n while True:\n try:\n hotel_stay = int(input(\"Please enter the duration (in days) of your stay: \"))\n if hotel_stay > 0: break\n except ValueError:\n print(\"Please enter an positive integer. Try again... \")\n user_hotel = hotel_list[hotel_num]\n user_hotel_price = user_hotel.get_price()\n user_hotel_name = user_hotel.get_name()\n # display car option and ask for user input\n print(\"\\n======== Displaying rental car options =========\")\n for i in range(len(car_list)):\n print(\" -- Enter\", i+1, \"to choose -- \")\n car_list[i].print_car()\n print(\" \")\n while True:\n try:\n car_num = int(input(\"Please choose your car rental option (Enter a number between 1 to 6): \"))\n car_num -= 1\n if car_num in range(len(hotel_list)): break\n except ValueError:\n print(\"Please enter an positive integer. Try again... \")\n while True:\n try:\n car_rental_day = int(input(\"Please enter the duration (in days) of your car rental: \"))\n if car_rental_day > 0: break\n except ValueError:\n print(\"Please enter an positive integer. Try again... \")\n # calculate user's total cost for car rental and hotel\n user_car = car_list[car_num]\n user_car_price = user_car.get_price()\n user_car_name = user_car.get_name()\n total_hotel_cost = hotel_stay * user_hotel_price\n total_car_rental_cost = car_rental_day * user_car_price\n print(\"\\n=== Displaying your hotel and car rental information ===\")\n print(\"Hotel: \", user_hotel.get_name())\n print(\"Hotel total cost: $\", total_hotel_cost)\n print(\"Car Rental: \", user_car.get_name())\n print(\"Car rental total cost: $\", total_car_rental_cost)\n print(\" \")\n # calculate remaining budget based on hotel and car's cost and/or ask for higher budget\n user_budget.calculate_new_budget(total_hotel_cost + total_car_rental_cost)\n print(\" \")\n return total_hotel_cost, total_car_rental_cost, user_hotel_name, user_car_name",
"def get_broilers_budget(chickens: int = 1):\n budget = get_budget_fixture(thing_being_farmed=2)\n\n for i, segment in enumerate(budget['segments']):\n for i2, activity in enumerate(segment['activities']):\n for i3, item in enumerate(activity['inputs']):\n new_price = chickens * item['price']\n item['estimated_price'] = new_price\n item['price'] = new_price\n\n return budget",
"def budget(df, df_hist, harmonize_year=\"2015\"):\n\n harmonize_year = int(harmonize_year)\n\n df = df.set_axis(df.columns.astype(int), axis=\"columns\")\n df_hist = df_hist.set_axis(df_hist.columns.astype(int), axis=\"columns\")\n\n data_years = df.columns\n hist_years = df_hist.columns\n\n years = data_years[data_years >= harmonize_year]\n\n if data_years[0] not in hist_years:\n hist_years = hist_years.insert(bisect(hist_years, data_years[0]), data_years[0])\n df_hist = df_hist.reindex(columns=hist_years).interpolate(\n method=\"slinear\", axis=1\n )\n\n def carbon_budget(years, emissions):\n # trapezoid rule\n dyears = np.diff(years)\n demissions = np.diff(emissions)\n\n budget = (dyears * (np.asarray(emissions)[:-1] + demissions / 2)).sum()\n return budget\n\n solver = pyo.SolverFactory(\"ipopt\")\n if solver.executable() is None:\n raise RuntimeError(\n \"No executable for the solver 'ipopt' found \"\n \"(necessary for the budget harmonization). \"\n \"Install from conda-forge or add to PATH.\"\n )\n\n harmonized = []\n\n for region in df.index:\n model = pyo.ConcreteModel()\n\n \"\"\"\n PARAMETERS\n \"\"\"\n data_vals = df.loc[region, years]\n hist_val = df_hist.loc[region, harmonize_year]\n\n budget_val = carbon_budget(data_years, df.loc[region, :])\n\n if data_years[0] < harmonize_year:\n hist_in_overlap = df_hist.loc[region, data_years[0] : harmonize_year]\n budget_val -= carbon_budget(hist_in_overlap.index, hist_in_overlap)\n\n \"\"\"\n VARIABLES\n \"\"\"\n model.x = pyo.Var(years, initialize=0, domain=pyo.Reals)\n x = np.array(\n [model.x[y] for y in years]\n ) # keeps pyomo VarData objects, ie. modelling vars not numbers\n\n \"\"\"\n OBJECTIVE FUNCTION\n \"\"\"\n delta_years = np.diff(years)\n delta_x = np.diff(x)\n delta_m = np.diff(data_vals)\n\n def l2_norm():\n return pyo.quicksum((delta_m / delta_years - delta_x / delta_years) ** 2)\n\n model.obj = pyo.Objective(expr=l2_norm(), sense=pyo.minimize)\n\n \"\"\"\n CONSTRAINTS\n \"\"\"\n model.hist_val = pyo.Constraint(expr=model.x[harmonize_year] == hist_val)\n\n model.budget = pyo.Constraint(expr=carbon_budget(years, x) == budget_val)\n\n \"\"\"\n RUN\n \"\"\"\n results = solver.solve(model)\n\n assert (results.solver.status == pyo.SolverStatus.ok) and (\n results.solver.termination_condition == pyo.TerminationCondition.optimal\n ), (\n f\"ipopt terminated budget optimization with status: \"\n f\"{results.solver.status}, {results.solver.termination_condition}\"\n )\n\n harmonized.append([pyo.value(model.x[y]) for y in years])\n\n df_harm = pd.DataFrame(\n harmonized,\n index=df.index,\n columns=years.astype(str),\n )\n\n return df_harm",
"def calc_capital_costs (self):\n self.capital_costs = self.max_boiler_output * \\\n self.comp_specs[\"cost per btu/hrs\"]\n #~ print self.capital_costs",
"def daffodils(flNeeded,amtPaid, dzCost):\n\n\n import math\n\n dz = flNeeded / 12\n dozens = math.ceil (dz) #Rounds up to the nearest dozen\n\n totCost = dzCost * dozens\n toPay = totCost - amtPaid\n\n print (\"You will need to contribute\", toPay)",
"def buy(stage, budget, items):\n\n temptab = []\n\n if stage == 0:\n values = 0\n else:\n i = stage - 1 # align the stage with index of items\n if items[i].lim == 0:\n limit = int(math.floor(budget/items[i].cost)) + 1\n else:\n limit = items[i].lim + 1\n\n for n in xrange(\n min(limit, int(math.floor(budget/items[i].cost))+1)):\n temptab.append(treat(items[i], n) +\n buy(stage-1, budget - n*items[i].cost, items))\n\n values = max(temptab)\n # the index in temptab is the amount of item to buy\n AMOUNT[(stage, budget)] = temptab.index(values)\n\n return values",
"def calc_capital_costs (self):\n powerhouse_control_cost = 0\n if not self.cd['switchgear suitable for renewables']:\n powerhouse_control_cost = self.cd['switchgear cost']\n\n #~ road_needed = self.comp_specs['road needed for transmission line']\n\n\n if str(self.comp_specs['transmission capital cost'])\\\n != 'UNKNOWN':\n transmission_line_cost = \\\n int(self.comp_specs['transmission capital cost'])\n else:\n if str(self.comp_specs['distance to resource']) \\\n != 'UNKNOWN':\n distance = \\\n float(self.comp_specs\\\n ['distance to resource'])\n transmission_line_cost = \\\n distance*self.comp_specs['est. transmission line cost']\n\n secondary_load_cost = 0\n if self.comp_specs['secondary load']:\n secondary_load_cost = self.comp_specs['secondary load cost']\n\n if str(self.comp_specs['generation capital cost']) \\\n != 'UNKNOWN':\n wind_cost = \\\n int(self.comp_specs['generation capital cost'])\n self.cost_per_kw = np.nan\n else:\n for i in range(len(self.comp_specs['estimated costs'])):\n if int(self.comp_specs['estimated costs'].iloc[i].name) < \\\n self.load_offset_proposed:\n if i == len(self.comp_specs['estimated costs']) - 1:\n cost = float(self.comp_specs['estimated costs'].iloc[i])\n break\n continue\n\n cost = float(self.comp_specs['estimated costs'].iloc[i])\n break\n\n wind_cost = self.load_offset_proposed * cost\n self.cost_per_kw = cost\n\n #~ print powerhouse_control_cost\n #~ print transmission_line_cost\n #~ print secondary_load_cost\n #~ print wind_cost\n self.capital_costs = powerhouse_control_cost + transmission_line_cost +\\\n secondary_load_cost + wind_cost\n\n #~ print 'self.capital_costs',self.capital_costs",
"def declare_new_budget(date, exp_data):\n\n exp_list = exp_data[env.EXPENSE_DATA_KEY]\n local_budget = {}\n month_total = util.get_float_input(\n f\"Please input your total for the month ending {date}: \", force_pos=True)\n budg_remaining = month_total\n\n for i, exp in enumerate(exp_list):\n if i == len(exp_list) - 1:\n print(\"I got the last one for you :) MATH!\")\n budg_amnt = budg_remaining\n budg_remaining = 0\n\n elif budg_remaining == 0: # elif skips this condition if budget remaining is set above\n budg_amnt = 0\n local_budget[env.BUDGET_TOTAL_KEY] = month_total\n else:\n prompt = f\"Enter your budget for: [{exp}] - Total Budget Re. ${budg_remaining} - Exp's Re. [{len(exp_list) - i - 1}]: \"\n budg_amnt = prompt_for_budget_amnt(\n prompt, budg_remaining, exp_data)\n local_budget.update({exp: budg_amnt})\n budg_remaining = round(month_total - sum_budget(local_budget), 2)\n print(local_budget)\n return local_budget",
"def find_balanced_budget_tax(c):\n def steady_state_budget(t):\n e, u, w = compute_steady_state_quantities(c, t)\n return t - u * c\n\n tau = brentq(steady_state_budget, 0.0, 0.9 * c)\n return tau",
"def calculate_costs(data, costs, simulation_parameters, site_radius, environment):\n inter_site_distance = site_radius * 2\n site_area_km2 = math.sqrt(3) / 2 * inter_site_distance ** 2 / 1e6\n sites_per_km2 = 1 / site_area_km2\n\n for key, value in simulation_parameters.items():\n if key == 'backhaul_distance_km_{}'.format(environment):\n backhaul_distance = value\n\n cost_breakdown = {\n 'single_sector_antenna_2x2_mimo_dual_band': (\n costs['single_sector_antenna_2x2_mimo_dual_band'] *\n simulation_parameters['sectorization'] * sites_per_km2\n ),\n 'single_remote_radio_unit': (\n costs['single_remote_radio_unit'] *\n simulation_parameters['sectorization'] * sites_per_km2\n ),\n 'single_baseband_unit': (\n costs['single_baseband_unit'] * sites_per_km2\n ),\n 'router': (\n costs['router'] * sites_per_km2\n ),\n 'tower': (\n costs['tower'] * sites_per_km2\n ),\n 'civil_materials': (\n costs['civil_materials'] * sites_per_km2\n ),\n 'transportation': (\n costs['transportation'] * sites_per_km2\n ),\n 'installation': (\n costs['installation'] * sites_per_km2\n ),\n 'battery_system': (\n costs['battery_system'] * sites_per_km2\n ),\n 'fiber_backhaul_{}'.format(environment): (\n costs['fixed_fiber_backhaul_per_km'] * backhaul_distance * sites_per_km2\n ),\n 'microwave_backhaul_1m': (\n costs['microwave_backhaul_1m'] * sites_per_km2\n )\n }\n\n total_deployment_costs_km2 = 0\n for key, value in cost_breakdown.items():\n total_deployment_costs_km2 += value\n\n output = {\n 'environment': environment,\n 'inter_site_distance': inter_site_distance,\n 'site_area_km2': site_area_km2,\n 'sites_per_km2': sites_per_km2,\n 'results_type': data['results_type'],\n 'path_loss': data['path_loss'],\n 'received_power': data['received_power'],\n 'interference': data['interference'],\n 'sinr': data['sinr'],\n 'spectral_efficiency': data['spectral_efficiency'],\n 'capacity_mbps': data['capacity_mbps'],\n 'capacity_mbps_km2': data['capacity_mbps'],\n 'total_deployment_costs_km2': total_deployment_costs_km2,\n 'sector_antenna_costs_km2': cost_breakdown['single_sector_antenna_2x2_mimo_dual_band'],\n 'remote_radio_unit_costs_km2': cost_breakdown['single_remote_radio_unit'],\n 'baseband_unit_costs_km2': cost_breakdown['single_baseband_unit'],\n 'router_costs_km2': cost_breakdown['router'],\n 'tower_costs_km2': cost_breakdown['tower'],\n 'civil_material_costs_km2': cost_breakdown['civil_materials'],\n 'transportation_costs_km2': cost_breakdown['transportation'],\n 'installation_costs_km2': cost_breakdown['installation'],\n 'battery_system_costs_km2': cost_breakdown['battery_system'],\n 'fiber_backhaul_costs_km2': cost_breakdown['fiber_backhaul_{}'.format(environment)],\n 'microwave_backhaul_1m_costs_km2': cost_breakdown['microwave_backhaul_1m'],\n }\n\n return output",
"def monthly_fee(self):\n total_salary = self.base_salary\n if self.is_salary_allowances is True:\n fixed_allowances = self.summarize( self.fixed_allowances )\n non_fixed_allowances = self.summarize( self.non_fixed_allowances )\n total_salary = total_salary + non_fixed_allowances + fixed_allowances\n #end if\n\n company_old_age_insurance = 0\n individual_old_age_insurance = 0\n if self.old_age_insurance_status is True:\n company_old_age_insurance = \\\n self._company_old_age_insurance(total_salary)\n\n individual_old_age_insurance = \\\n self._individual_old_age_insurance(total_salary)\n #end if\n\n company_pension_insurance = 0\n individual_pension_insurance = 0\n if self.pension_insurance_status is True:\n company_pension_insurance = \\\n self._company_pension_insurance(total_salary)\n\n individual_pension_insurance = \\\n self._individual_pension_insurance(total_salary)\n #end if\n\n company_health_insurance = 0\n individual_health_insurance = 0\n if self.health_insurance_status is True:\n company_health_insurance = \\\n self._company_health_insurance(total_salary)\n\n individual_health_insurance = \\\n self._individual_health_insurance(total_salary)\n #end if\n\n death_insurance = 0\n if self.death_insurance_status is True:\n death_insurance = self._death_insurance(total_salary)\n #end if\n\n accident_insurance = 0\n if self.accident_insurance_status is True:\n accident_insurance = \\\n self._accident_insurance(total_salary, \\\n self.industry_risk_rate)\n #end if\n\n monthly = {\n \"old_age_insurance\" : {\n \"company\" : company_old_age_insurance,\n \"individual\" : individual_old_age_insurance,\n },\n \"pension_insurance\" : {\n \"company\" : company_pension_insurance,\n \"individual\" : individual_pension_insurance,\n },\n \"health_insurance\" : {\n \"company\" : company_health_insurance,\n \"individual\" : individual_health_insurance,\n },\n \"death_insurance\" : death_insurance,\n \"accident_insurance\" : accident_insurance\n }\n return monthly",
"def calculate_cost(self):\n info = {}\n c = self.get_collisions() * self.bomb_cost\n z = self.agent.get_position()[2] # Limit range of Drone agent\n\n # sum all costs in one total cost\n info['cost_gathered_bombs'] = c\n info['cost_out_of_range'] = 1. if z > 2 else 0.\n # limit cost to be at most 1.0\n info['cost'] = min(1, sum(v for k, v in info.items()\n if k.startswith('cost_')))\n return info",
"def buy2(budget, items):\n\n # Initialize the treatment table\n TREATMENT.append([(0, 0)] * (budget+1))\n\n for i in xrange(len(items)):\n TREATMENT.append([])\n if items[i].lim == 0:\n limit = int(math.floor(budget/items[i].cost)) + 1\n else:\n limit = items[i].lim + 1\n\n for j in xrange(budget + 1):\n temp = [treat(items[i], n) +\n TREATMENT[i][j-n*items[i].cost][1]\n for n in xrange(\n min(limit, int(math.floor(j/items[i].cost)+1)))\n ]\n\n value = max(temp)\n # the index in list 'temp' is the number of item to buy\n num = temp.index(value)\n TREATMENT[i+1].append((num, value))",
"def calculations():\r\n\t\r\n\tpayload, avionics, booster = weight_input()\r\n\r\n\tdrogue_size, drogue_force = drogue_calc()\r\n\tmain_size, main_force = main_calc(avionics, booster, drogue_force) #total mass, payload detaches\r\n\r\n\tprint(\"Drogue is diameter is \" + str(drogue_size) + \" inches\")\r\n\tprint(\"Main is diameter is \" + str(main_size) + \" inches\")",
"def calc_agent_budget(self, agent, assignment):\n budget_spent = sum([self.agent_cost(agent, t) for t in assignment[agent]])\n return self.agent_budget(agent) - budget_spent",
"def declare_budget(model, k, relays):\n m = model\n\n m.budget = pe.Constraint(expr=sum(m.delta[r] for r in relays) <= k)",
"def declare_budget(model, k, relays):\n m = model\n\n m.budget = pe.Constraint(expr=sum(m.delta[r] for r in relays) <= k)",
"def get_expenses(budget):\n return sum(expense['bgt'] for expense in budget['spend'])",
"def calc_cash_flow(self):\n s = self # shortcut variable\n\n # determine the changes caused by the heat pump on an annual basis.\n # First calculate annual totals for base case and heat pump case and\n # then calculate the change.\n ann_base = s.df_mo_dol_base.sum()\n ann_hp = s.df_mo_dol_hp.sum()\n ann_chg = ann_hp - ann_base\n initial_cost = np.zeros(s.hp_life+1)\n \n # Am not automatically adding sales tax to the initial cost as the user was\n # supposed to includes sales tax in their input.\n initial_cost[0] = -s.capital_cost * (1 - s.pct_financed) + s.rebate_dol\n loan_pmt = npf.pmt(s.loan_interest, s.loan_term, s.capital_cost * s.pct_financed)\n if loan_pmt < -0.01: # loan payment is negative\n loan_cost = [0.0] + [loan_pmt] * s.loan_term + [0.0] * (s.hp_life - s.loan_term)\n loan_cost = np.array(loan_cost)\n else:\n loan_cost = 0.0\n op_cost = -s.op_cost_chg * make_pattern(s.inflation_rate, s.hp_life)\n fuel_cost = -ann_chg.secondary_fuel_dol * make_pattern(s.fuel_esc_rate, s.hp_life)\n elec_cost = -ann_chg.elec_dol * make_pattern(s.elec_esc_rate, s.hp_life)\n cash_flow = initial_cost + loan_cost + op_cost + fuel_cost + elec_cost\n\n # calculate cumulative, discounted cash flow.\n disc_factor = np.ones(s.hp_life) * (1 + s.discount_rate)\n disc_factor = np.insert(disc_factor.cumprod(), 0, 1.0)\n cum_disc_cash_flow = np.cumsum(cash_flow / disc_factor)\n \n s.df_cash_flow = pd.DataFrame(\n {'initial_cost': initial_cost,\n 'loan_cost': loan_cost,\n 'op_cost': op_cost,\n 'fuel_cost': fuel_cost,\n 'elec_cost': elec_cost,\n 'cash_flow': cash_flow,\n 'cum_disc_cash_flow': cum_disc_cash_flow,\n }\n )\n s.df_cash_flow.index.name = 'year'\n \n # Calculate IRR and NPV for w/ and w/o PCE.\n s.summary['irr'] = npf.irr(s.df_cash_flow.cash_flow)\n s.summary['npv'] = npf.npv(s.discount_rate, s.df_cash_flow.cash_flow)\n \n # Add some summary fuel and electric usage and unit cost info\n s.summary['fuel_use_base'] = ann_base.secondary_fuel_units\n s.summary['fuel_use_hp'] = ann_hp.secondary_fuel_units\n s.summary['fuel_use_chg'] = ann_chg.secondary_fuel_units\n if ann_chg.secondary_fuel_units != 0.0:\n s.summary['fuel_price_incremental'] = ann_chg.secondary_fuel_dol / ann_chg.secondary_fuel_units\n else:\n s.summary['fuel_price_incremental'] = np.nan\n s.summary['elec_use_base'] = ann_base.elec_kwh\n s.summary['elec_use_hp'] = ann_hp.elec_kwh\n s.summary['elec_use_chg'] = ann_chg.elec_kwh\n s.summary['elec_rate_avg_base'] = ann_base.elec_dol / ann_base.elec_kwh\n s.summary['elec_rate_avg_hp'] = ann_hp.elec_dol / ann_hp.elec_kwh\n s.summary['elec_rate_incremental'] = ann_chg.elec_dol / ann_chg.elec_kwh",
"def howManyGames(p, d, m, budget):\n cost = p\n i = 0\n add = 0\n while budget >= cost:\n budget = budget - cost\n add += cost\n print(add, cost)\n\n i += 1\n\n if (p - i * d) <= m:\n cost = m\n else:\n cost = p - i * d\n print(i)\n return i",
"def double_declining_balance():\r\n cost = float(input(\"Please Enter The Cost Of Asset: \"))\r\n accdepreciation = float(input(\"Please Enter The Value Of Accumulated Depreciation: \"))\r\n life = float(input(\"Please Enter Estimated Useful Life Of Asset(Years): \"))\r\n rv = float(input(\"Please Enter Estimated Residual Value Of Asset: \"))\r\n n = 0\r\n a = (float(cost)-float(accdepreciation)) * (float(2)/float(life))\r\n bn = float(a)/float(12)\r\n print \">> Your Monthly Depreciation For First Year is\",bn\r\n while(n != (life-1)):\r\n bk = float(cost)\r\n a = ((float(cost)-float(accdepreciation)) * (float(2)/float(life)))\r\n cost -= float(a)\r\n bk -= float(a)\r\n n += 1\r\n vvv = float(bk)-float(rv)\r\n print \">> Your Depreciation For Year No.\",n,\"is\",a\r\n print \">> Your Book Value After\",n,\"Years is\",bk,\"\\n\"\r\n print \">> Your Depreciation For Year No.\",int(life),\"is\",vvv\r\n print \">> Your Book Value After\",int(life),\"Years is\",rv",
"def calculate_bonuses (the_sum_of_current_purchase):\n the_sum_of_previous_purchases = 0\n blue_card_percent = 0.05\n silver_card_percent = 0.07\n gold_card_percent = 0.1\n the_sum_of_previous_purchases = the_sum_of_previous_purchases + the_sum_of_current_purchase\n\n if the_sum_of_previous_purchases <1000:\n bonus_for_purchase = 0\n if 1000 <= the_sum_of_previous_purchases <= 15_000:\n bonus_for_purchase = the_sum_of_current_purchase * blue_card_percent\n\n if 15001 <= the_sum_of_previous_purchases < 150_000:\n bonus_for_purchase = the_sum_of_current_purchase * silver_card_percent\n\n if the_sum_of_previous_purchases >= 150_000:\n bonus_for_purchase = the_sum_of_current_purchase * gold_card_percent\n\n return bonus_for_purchase",
"def _compute_budget(self, kstpkper=None, totim=None):\n # Initialize an array to track where the constant head cells\n # are located.\n ich = np.zeros(self.cbc_shape, self.int_type)\n swiich = np.zeros(self.cbc_shape, self.int_type)\n\n if \"CONSTANT HEAD\" in self.record_names:\n \"\"\"\n C-----CONSTANT-HEAD FLOW -- DON'T ACCUMULATE THE CELL-BY-CELL VALUES FOR\n C-----CONSTANT-HEAD FLOW BECAUSE THEY MAY INCLUDE PARTIALLY CANCELING\n C-----INS AND OUTS. USE CONSTANT-HEAD TERM TO IDENTIFY WHERE CONSTANT-\n C-----HEAD CELLS ARE AND THEN USE FACE FLOWS TO DETERMINE THE AMOUNT OF\n C-----FLOW. STORE CONSTANT-HEAD LOCATIONS IN ICH ARRAY.\n \"\"\"\n chd = self.cbc.get_data(\n text=\"CONSTANT HEAD\",\n full3D=True,\n kstpkper=kstpkper,\n totim=totim,\n )[0]\n ich[np.ma.where(chd != 0.0)] = 1\n if \"FLOW RIGHT FACE\" in self.record_names:\n self._accumulate_flow_frf(\"FLOW RIGHT FACE\", ich, kstpkper, totim)\n if \"FLOW FRONT FACE\" in self.record_names:\n self._accumulate_flow_fff(\"FLOW FRONT FACE\", ich, kstpkper, totim)\n if \"FLOW LOWER FACE\" in self.record_names:\n self._accumulate_flow_flf(\"FLOW LOWER FACE\", ich, kstpkper, totim)\n if \"SWIADDTOCH\" in self.record_names:\n swichd = self.cbc.get_data(\n text=\"SWIADDTOCH\", full3D=True, kstpkper=kstpkper, totim=totim\n )[0]\n swiich[swichd != 0] = 1\n if \"SWIADDTOFRF\" in self.record_names:\n self._accumulate_flow_frf(\"SWIADDTOFRF\", swiich, kstpkper, totim)\n if \"SWIADDTOFFF\" in self.record_names:\n self._accumulate_flow_fff(\"SWIADDTOFFF\", swiich, kstpkper, totim)\n if \"SWIADDTOFLF\" in self.record_names:\n self._accumulate_flow_flf(\"SWIADDTOFLF\", swiich, kstpkper, totim)\n\n # NOT AN INTERNAL FLOW TERM, SO MUST BE A SOURCE TERM OR STORAGE\n # ACCUMULATE THE FLOW BY ZONE\n # iterate over remaining items in the list\n for recname in self.ssst_record_names:\n self._accumulate_flow_ssst(recname, kstpkper, totim)\n\n # Compute mass balance terms\n self._compute_mass_balance(kstpkper, totim)\n\n return",
"def calculate_pool_reward(height: uint32) -> uint64:\n\n if height == 0:\n return uint64(int((7 / 8) * 21000000 * _mojo_per_chia))\n elif height < 3 * _blocks_per_year:\n return uint64(int((7 / 8) * 2 * _mojo_per_chia))\n elif height < 6 * _blocks_per_year:\n return uint64(int((7 / 8) * 1 * _mojo_per_chia))\n elif height < 9 * _blocks_per_year:\n return uint64(int((7 / 8) * 0.5 * _mojo_per_chia))\n elif height < 12 * _blocks_per_year:\n return uint64(int((7 / 8) * 0.25 * _mojo_per_chia))\n else:\n return uint64(int((7 / 8) * 0.125 * _mojo_per_chia))",
"def construction_permitting(self):\n building_permits = 0.02 * self.input_dict['foundation_cost_usd']\n highway_permits = 20000 * self.input_dict['num_hwy_permits']\n construction_permitting_cost = building_permits + highway_permits\n return construction_permitting_cost"
] | [
"0.61505514",
"0.6089759",
"0.6038353",
"0.6015872",
"0.594914",
"0.5947916",
"0.59439886",
"0.59188956",
"0.5887294",
"0.5822022",
"0.5820701",
"0.5790669",
"0.5781134",
"0.57606393",
"0.5755206",
"0.5702439",
"0.5691797",
"0.5668918",
"0.5665423",
"0.56606954",
"0.5652122",
"0.5652122",
"0.56501",
"0.5636988",
"0.56193435",
"0.5618062",
"0.5599504",
"0.555429",
"0.5547553",
"0.55351907"
] | 0.67384696 | 0 |
Calculates the total amount spent for the house by adding all expenses and completed jobs. | def total_spent(self):
approved_jobs = self.approved_jobs()
expenses = self.expenses()
total = 0
for job in approved_jobs:
total += job.total_paid
for expense in expenses:
total += expense.amount
return float(round(total, 2)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def compute_total_paid(self):\n total = 0.0\n for line in self.loan_ids:\n if line.pay:\n total += line.amount\n self.total_paid = total",
"def cash_sum(self, room):\n self.cash = room.price\n return self.cash",
"def calculateCosts(self):\n self.costs = 0\n for house in self.houses:\n if not house.distance == 1000:\n self.costs += house.distance * 9\n for battery in self.batteries:\n self.costs += battery.costs\n return self.costs",
"def total_management_cost(self):\n total = 0\n total += self.output_dict['insurance_usd']\n total += self.output_dict['construction_permitting_usd']\n total += self.output_dict['bonding_usd']\n total += self.output_dict['project_management_usd']\n total += self.output_dict['markup_contingency_usd']\n total += self.output_dict['engineering_usd']\n total += self.output_dict['site_facility_usd']\n return total",
"def calculate_reserves(self):\n # TODO: Add back cash dividends and deduct exchange costs\n console.print(\"Still has to be build.\")",
"def total_balance(self) -> Decimal:\n return self.incomes_from_outside + self.expenses_to_outside",
"def calculate_total(self):\n if self.total_price == 0:\n for discount in self.discounts:\n for item in self.items:\n item.add_discount(discount)\n\n for item in self.items:\n self.total_price += item.final_price()\n\n return self.total_price",
"def calc_annual_heating_savings (self):\n price = (self.diesel_prices + self.cd['heating fuel premium'])\n\n #~ self.base_heating_cost =\n\n #~ self.proposed_heating_cost =\n\n\n\n\n self.annual_heating_savings = self.reduction_diesel_used * price\n #~ print 'self.annual_heating_savings',self.annual_heating_savings",
"def total(self, desired_period: int = 12):\n self._trigger_gather()\n result = Decimal(0)\n for item in self.elements:\n result += item.income.amount(desired_period)\n return(Decimal(result))",
"def total(self, desired_period: int = 12):\n self._trigger_gather()\n result = Decimal(0)\n for item in self.elements:\n result += item.income.amount(desired_period)\n return(Decimal(result))",
"def total_spent(self):\n total_sum = Order.objects.filter(\n email=self.email).aggregate(\n Sum('total_price')\n ).get('total_price__sum')\n return round(total_sum, 4) if total_sum else 0",
"def _compute_amount(self):\n for line in self:\n line.update({\n 'price_subtotal': line.price_unit * line.quantity,\n })",
"def life_insurance_to_recive_total(self):\n pass",
"def total_value(self):\n total = 0.0\n for account in self.accounts():\n total += account.available_cash()\n for asset in account.assets():\n total += asset.adjusted_value()\n return total",
"def total_cost(self):\n return (self.food_amount + self.local_transport_amount + self.other_expenses +\n self.travel_amount + self.accomodation_amount)",
"def calc_profit(self, assignment):\n return sum([self.profit(agent, task)\n for agent, tasks in assignment.items() \n for task in tasks])",
"def get_total_to_pay(self):\n self.__total_to_pay = Order.get_price_subtotals(self) + \\\n Order.get_qst_subtotals(self) + \\\n Order.get_gst_subtotals(self)\n return self.__total_to_pay",
"def get_total_expenses(self):\n return sum(self.expenses.values())",
"def total_cost(self):\n if self.goal:\n return self.goal + (self.community_contribution or 0)\n else:\n return 0",
"def total_equity(self):\n return self.total_market_value + self.cash",
"def total_paid(self) -> Decimal:\n return self.total_principal + self.total_interest",
"def calc_annual_electric_savings (self):\n price = self.diesel_prices\n #TODO add rural v non rural\n self.base_generation_cost = self.electric_diesel_reduction * price\n\n\n self.proposed_generation_cost = self.maintenance_cost\n\n self.annual_electric_savings = self.base_generation_cost - \\\n self.proposed_generation_cost\n #~ print 'self.annual_electric_savings',self.annual_electric_savings",
"def calculate_total_cost(state):\n pass",
"def get_total(self):\n total = 0.00\n\n for _drink in self.drinks:\n total = total + _drink.get_price()\n\n for _food in self.food:\n total = total + _food.get_price()\n\n return total",
"def get_total(self):\n\n self.base_price = self.get_base_price()\n\n if self.species == \"christmas melon\":\n self.base_price = self.base_price * 1.5\n\n total = (1 + self.tax) * self.qty * self.base_price\n return total",
"def get_total_price(self):\n i = self.get_copy_with_resolved_dependencies()\n total_price = Decimal(0)\n for product in i['products']:\n billed_price = Decimal(str(product.get('price', 0))) * Decimal(str(product.get('quantity')))\n total_price += billed_price\n return total_price",
"def get_total(self):\n\n base_price = self.get_base_price()\n if self.species == \"christmas melon\":\n base_price = base_price * 1.5\n\n total = ((1 + self.tax) * self.qty * base_price)\n\n return total",
"def calc_energy_and_price(self) -> (float, float):\n\n cost_sum = 0\n energy_sum = 0\n for pump_id in self.pumps:\n pump_energy, pump_cost = self.pumps[pump_id].calculate_energy_and_cost()\n cost_sum += pump_cost\n energy_sum += pump_energy\n\n pump_id.append_index = 0\n\n assert energy_sum >= 0, \"The pumping energy cant be negative!\"\n assert cost_sum >= 0, \"The pumping cost cant be negative!\"\n return energy_sum, cost_sum",
"def extras_total(self):\n total = self.wides + self.no_balls + self.byes + self.leg_byes\n return total",
"def get_total(self):\n\n base_price = self.get_base_price()\n\n # Christmas Melons are more x1.5 expensive than other melons\n if self.species == \"Christmas Melon\":\n base_price = base_price * 1.5\n\n total = (1 + self.tax) * self.qty * base_price\n\n return total"
] | [
"0.61278397",
"0.60141414",
"0.5883509",
"0.58684665",
"0.58504134",
"0.5818961",
"0.5803535",
"0.57970977",
"0.57137537",
"0.57137537",
"0.5711607",
"0.5659715",
"0.56533766",
"0.5646067",
"0.5616503",
"0.56072485",
"0.5606351",
"0.55777013",
"0.556878",
"0.5558684",
"0.5552353",
"0.55456096",
"0.5540644",
"0.55346864",
"0.55231816",
"0.551619",
"0.54996485",
"0.549161",
"0.5487316",
"0.5476552"
] | 0.7208334 | 0 |
Calculates the balance budget and balance budget degree by taking the budget amount and subtracting the total spent amount. | def budget_balance(self):
budget_balance = round(self.budget() - self.total_spent(), 2)
budget_balance_degree = round( (9000 * self.total_spent()) / (self.budget()), 4) #convert to degrees and round to four decimal places
return (budget_balance, budget_balance_degree) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def deposit(self, amount, budget):\r\n if budget != \"Total Balance\":\r\n assert budget in self.budgets, \"Specified budget doesn't exist\"\r\n self.budgets[budget] += float(amount)\r\n self.balance += float(amount)",
"def withdraw(self, amount, budget):\r\n if budget != \"Total Balance\":\r\n assert budget in self.budgets, \"Specified budget doesn't exist\"\r\n self.budgets[budget] -= float(amount)\r\n self.balance -= float(amount)",
"def budget(self):\n\n budget = (_House.closing_cost*self.vars['after_repair_value']) - self.vars['purchase_price'] - self.vars['profit'] - _House.broker_fee\n return float(round(budget, 2))",
"def balance(self) -> float:\n\t\tbalance = 0\n\t\tfor transaction in self.transactions:\n\t\t\tsign = 1 if transaction.receiving_account == self.__number else -1\n\t\t\tbalance += sign*transaction.usd*transaction.completed\n\t\t# The bank has infinite money\n\t\tif self.name == Account.BANK:\n\t\t\tbalance = Decimal('Infinity')\n\t\treturn balance",
"def budget_problem3(balance, annualInterestRate):\r\n remaining = balance\r\n\r\n # creating the following bounds assists with bisection search\r\n lo = balance/12\r\n hi = ((balance * (annualInterestRate/12))**12)/12\r\n payment = (lo + hi)/2\r\n\r\n while remaining != 0:\r\n for month in range(12):\r\n remaining = (remaining - payment) * (1 + (annualInterestRate/12))\r\n if remaining > 0:\r\n lo = payment\r\n elif round(remaining,2) < -0.11:\r\n hi = payment\r\n else:\r\n break\r\n payment = (lo + hi)/2\r\n remaining = balance\r\n print 'Lowest Payment: ' + str(round(payment,2))\r\n return round(payment,2)",
"def get_balance(self):\n final_amount = 0\n for i in range(len(self.ledger)):\n final_amount += self.ledger[i]['amount']\n return final_amount",
"def deposit(amt) :\r\n\tglobal bal\r\n\tbal_in = bal\r\n\t#PREMISES FOR NEXT LINE: \r\n\t# (amt >= 0)\r\n\t# (bal >= 0)\r\n\t# (bal == bal_in)\r\n\tbal = bal + amt\r\n\t#PREMISES FOR ATTACHED PROOF, IF ANY: \r\n\t# (bal == (bal_old + amt))\r\n\t# (amt >= 0)\r\n\t# (bal_old >= 0)\r\n\t# (bal_old == bal_in)\r\n\t#PREMISES FOR NEXT LINE: \r\n\t# (amt >= 0)\r",
"def budget_left(self):\n return max(0, self.budget_total - self.budget_spent_with_commission)",
"def total_balance(self) -> Decimal:\n return self.incomes_from_outside + self.expenses_to_outside",
"def balance(self):\n total_money = 0\n for item in self.ledger:\n total_money += item['amount']\n return total_money",
"def budget_spent_with_commission(self):\n return self.budget_spent + self.budget_spent_commission",
"def balance(self, other):\n return self.contract_balance - self.transfered_amount + other.transfered_amount",
"def withdrawMoney(self, withdraw_amount):\r\n self.balance_amt = self.balance_amt - withdraw_amount",
"def balance(self):\n return self._rbal - self._lbal",
"def balance(self) -> Decimal:\n withdrawals = self.withdrawal_requests.filter(\n status=WithdrawalStatus.open,\n )\n if len(withdrawals) == 0:\n return self.internal_balance\n else:\n withdrawal_total = sum(map(lambda w: w.amount, withdrawals))\n return self.internal_balance - withdrawal_total",
"async def debit(ctx, *args):\n users_mentioned = ctx.message.mentions\n user_mention = ctx.author.mention\n debit = 0\n for arg in args:\n try:\n debit = float(arg)\n await ctx.message.channel.send(user_mention+\", we have successfully debited as you commanded.\")\n break\n except:\n pass\n bals = self.data[\"balances.json\"]\n for user in users_mentioned:\n if user.id in bals:\n bals[user.id] -= debit\n else:\n bals[user.id] = -debit",
"def __balance__(self) -> float:\n\n with dataset.connect(database.get_db()) as db:\n # Find last bank transaction.\n statement = statement = f\"\"\"\n SELECT opening_balance, transaction_amount\n FROM bank\n WHERE author_id = {self.user.id}\n ORDER BY id DESC\n LIMIT 1\n \"\"\"\n result = db.query(statement)\n\n for row in result:\n balance = row[\"opening_balance\"] + row[\"transaction_amount\"]\n break\n else:\n # If there was no result for the user, default balance is given.\n balance = 500\n\n return float(balance)",
"def complete(self, cr, uid, ids, context={}):\n budget_pool = self.pool.get('account.budget')\n budget_line_pool = self.pool.get('account.budget.lines')\n for r in self.browse(cr, uid, ids, context=context):\n if r.type=='transfer' and not r.line_ids:\n raise osv.except_osv(_('Error!'),_('You cannot complete Transfer Operations without any Budget line.'))\n if r.budget_type=='cash':\n budget_ids = budget_pool.search(cr, uid,[('analytic_account_id', '=', r.analytic_account_id.id), \n ('period_id', '=', r.period_id.id)], context=context)\n budget_line_id = budget_line_pool.search(cr, uid,[('general_account_id', '=', r.account_id.id), \n ('account_budget_id', 'in', tuple(budget_ids))], context=context)\n if budget_line_id:\n line=budget_line_pool.browse(cr, uid, budget_line_id, context=context)[0]\n if line.planned_amount+line.total_operation < line.cash_total_operation + r.amount:\n raise orm.except_orm(_('Error!'),\n _(\"Cash budget (%s) can't be more than planned budget (%s)!\") % \\\n ( line.cash_total_operation+ r.amount,line.planned_amount+line.total_operation ,))\n if line.cash_residual_balance + r.amount <=0:\n raise orm.except_orm(_('Error!'),\n _(\"The amount you try to transfer (%s) is more than %s residual (%s)!\") % \\\n (r.amount, line.name, line.cash_residual_balance,))\n for e in r.line_ids:\n if line.planned_amount+line.total_operation < line.cash_total_operation - r.amount:\n raise orm.except_orm(_('Error!'),\n _(\"Cash budget (%s) can't be more than planned budget (%s)!\") % \\\n ( e.cash_total_operation- r.amount,line.planned_amount+line.total_operation ,))\n if e.line_id.cash_residual_balance - e.amount <=0:\n raise orm.except_orm(_('Error!'),\n _(\"The amount you try to transfer (%s) is more than %s residual (%s)!\") % \\\n (e.amount, e.line_id.name, e.line_id.cash_residual_balance,))\n return self.write(cr, uid, ids,{'state':'complete','name': r.name == '/' and \n self.pool.get('ir.sequence').get(cr, uid, 'account.budget.operation') or \n r.name, 'amount': r.type=='increase' and r.amount or sum([l.amount for l in r.line_ids])}, context=context)\n \n return super(account_budget_operation, self).complete(cr, uid, ids, context=context)",
"def getBudgetBalance(self, budgetName):\r\n assert budgetName in self.budgets, \"Specified budget doesn't exist\"\r\n return \"%.2f\" % float(self.budgets[budgetName])",
"def cash_withdrawal(amt):\r\n global withdraw_money\r\n global balance_money\r\n withdraw_money = amt\r\n print(\"Amout enetered : \", withdraw_money)\r\n balance_money = balance_money - withdraw_money\r\n print(\"Withdraw success\")",
"def dec_total(self, dif):\n if not (is_number_correct(dif)):\n raise ValueError(\"Incorrect total value!\")\n self.total -= int(dif)\n self.budget_holder[datetime.datetime.now()] = self.total",
"def get_balance(self):\n balance = 0\n for transaction in self.ledger:\n balance += transaction[\"amount\"]\n return balance",
"def calc_task_budget(self, task, assignment):\n # Get the agents assigned to this task\n agents_assigned = {a for a, tasks in assignment.items() if task in tasks}\n budget_spent = sum([self.task_cost(a, task) for a in agents_assigned])\n return self.task_budget(task) - budget_spent",
"def _total_d(self):\n debit = 0.0\n for l in self.data:\n debit += l['debit']\n self.t_credit += l['credit']\n self.t_balance += l['balance']\n return debit",
"def balance(self):\n return sum(self.operations.select())\n 11",
"def get_balance(self, payments):\n # calc monthly interest\n monthly_interest = self.__calculate_monthly_interest()\n m = 1 + monthly_interest\n\n # calculate balance\n balance = self.principle * (\n ((m ** self.__months) - (m ** payments)) / (\n (m ** self.__months) - 1))\n return balance",
"def calc_agent_budget(self, agent, assignment):\n budget_spent = sum([self.agent_cost(agent, t) for t in assignment[agent]])\n return self.agent_budget(agent) - budget_spent",
"def getRemainingBalance(monthlyPayment, balance, monthlyInterestRate):\r\n numberOfMOnthNeeded = 0\r\n for i in range (0, 12):\r\n numberOfMOnthNeeded += 1\r\n balance = round(balance * (1 + monthlyInterestRate) - monthlyPayment, 2)\r\n if balance < 0:\r\n print 'Number of month needed:', numberOfMOnthNeeded\r\n break\r\n return balance",
"def initial_cash_balance(self) -> float:\n return self.buy_budget * len(self.stocks)",
"def balance(self):\n #a couple of assumptions not clear in assignment\n #1) there is always an invalid transaction\n #2) there is only 1 invalid transaction\n closeBalance=0\n invalidTrans=0\n withdrawCount=0\n depositCount=0\n# print(self.numList)\n for i in range(len(self.numList)):\n addValue=0\n if self.numList[i]<0:\n if (-1*self.numList[i])>closeBalance:\n invalidTrans=self.numList[i]\n else:\n addValue=self.numList[i]\n withdrawCount+=1\n elif self.numList[i]>0:\n if i!=0:depositCount+=1\n addValue=self.numList[i]\n closeBalance+=addValue\n# print(i,addValue,closeBalance)\n print(\"Invalid transaction %.2f\" %invalidTrans)\n print(\"Closing balance = %.2f\" %closeBalance)\n print(\"Number of withdrawals = %d\" %withdrawCount)\n print(\"Number of deposits = %d\" %depositCount)"
] | [
"0.71001554",
"0.7028642",
"0.65880764",
"0.64517754",
"0.63420874",
"0.6261126",
"0.6258373",
"0.624599",
"0.62441903",
"0.6243866",
"0.6233808",
"0.61923677",
"0.6177002",
"0.61561537",
"0.61382204",
"0.61116695",
"0.6110295",
"0.60939896",
"0.60505503",
"0.60471076",
"0.60319924",
"0.6018894",
"0.59966654",
"0.59768146",
"0.5975387",
"0.59104854",
"0.5893211",
"0.58913875",
"0.5885669",
"0.5881925"
] | 0.79801476 | 0 |
Calculates how much of the budget has been used as a percentage. | def budget_used(self):
return int(self.total_spent() / self.budget() * 100.0) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def percent_usage(value, total):\n if total:\n return float(value) * 100.0 / (float(total) + float(value))\n else:\n return 100.0",
"def pct(self):\n\t\treturn self.bottle.pct()",
"def percentage_used(self):\n return self.volume_used/self.total_volume * 100.0",
"def percentage(count, total):\n return count / total * 100",
"def usage_percent(used, total, _round=None):\r\n try:\r\n ret = (used / total) * 100\r\n except ZeroDivisionError:\r\n ret = 0\r\n if _round is not None:\r\n return round(ret, _round)\r\n else:\r\n return ret",
"def calculate_used_margin_percentage(bal=None):\n if bal is None:\n bal = get_margin_balance()\n if bal['total'] <= 0:\n return 0\n return float(100 - (bal['free'] / bal['total']) * 100)",
"def _prorata_rate(self, days_used, days_in_month):\n return (100 * days_used // days_in_month) / 100.0",
"def get_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_rating() / self.field.range)",
"def get_free_set_percentage(self, params):\n raise NotImplementedError()",
"def calculate_percent(self, total_number, some_number):\n\t\treturn (some_number * 100) / total_number",
"def percent_raised(self):\n total_cost = self.total_cost()\n if total_cost:\n return round(self.total_raised() * 100 / total_cost, 2)\n else:\n return 0",
"def as_percent(self):\n if self.dynamic:\n self._update_db_obj()\n return f\"{(self._db_obj.progress / self._db_obj.total) * 100}%\"",
"def percent_of(part, whole):\n return part * 100 / whole",
"def percentage(part, whole):\n return round((100 * float(part)/float(whole)),2)",
"def percentageChange(self):\n try:\n curPrice = self.dailyData[-1].currentPrice\n closePrice = self.historicData[-1].closePrice\n except IndexError: # Just return zero when no historic or dailyData is available yet\n return 0.0\n return (curPrice - closePrice)/closePrice * 100",
"def get_real_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_real_rating() / self.field.range)",
"def percent(value, total):\n if total:\n return float(value) * 100.0 / float(total)\n else:\n return 100.0",
"def get_percentage(self):\n return self.percentage",
"def get_percentage(self):\n return self.percentage",
"def remaining_percent(self):\n return (self.remaining_words / self.total_words) * 100",
"def get_percentage(self):\n return self.PotTax_percentage",
"def budget(self):\n\n budget = (_House.closing_cost*self.vars['after_repair_value']) - self.vars['purchase_price'] - self.vars['profit'] - _House.broker_fee\n return float(round(budget, 2))",
"def percentage(a, b):\n return (a * 100.0) / b",
"def pct_bust(data):\n return round((data[\"new_total\"] > 21).sum() / len(data), 3)",
"def get_percent_completed(self):\n completed = self.object_list.filter(status__exact=True).count()\n total = len(self.object_list)\n return int(100 * completed / total) if total > 0 else 0",
"def coverage(self):\n try:\n return self.found * 100 / self.needed\n except ZeroDivisionError:\n return 100.0",
"def percent_b(self) -> float:\n return self._percent_b",
"def percentage_complete(self) -> float:\n return self.__percentage_complete",
"def profit_per_item_percentage(self, pk=None):\n total_profit_percentage = 0\n total_cost = self.item_cost + self.shipping_cost + self.listing_fee + self.final_value_fee\n total_paid = self.shipping_paid + self.item_paid\n total_profit_percentage = round(100*((total_paid - total_cost) / total_cost), 2)\n return total_profit_percentage",
"def cash_ratio(self):\n return self.cash / self.current_liabilities"
] | [
"0.7384314",
"0.7322966",
"0.7174507",
"0.7170255",
"0.7124505",
"0.7116485",
"0.6982112",
"0.6959819",
"0.6915842",
"0.68807685",
"0.68467945",
"0.68159556",
"0.6778811",
"0.67623633",
"0.6761943",
"0.67319983",
"0.672578",
"0.66812664",
"0.66812664",
"0.66636133",
"0.6662423",
"0.665427",
"0.66434777",
"0.6629591",
"0.66238195",
"0.65973437",
"0.659588",
"0.6589267",
"0.6557837",
"0.65458715"
] | 0.7920088 | 0 |
Internal function to check pivot conditions and return an intersection of pivot on the signals | def _get_signal_pivots(self):
sig_a_info = self._parser.inspect(self._a)
sig_b_info = self._parser.inspect(self._b)
if sig_a_info["pivot"] != sig_b_info["pivot"]:
raise RuntimeError("The pivot column for both signals" +
"should be same (%s,%s)"
% (sig_a_info["pivot"], sig_b_info["pivot"]))
if sig_a_info["pivot"]:
pivot_vals = set(
sig_a_info["pivot_values"]).intersection(sig_b_info["pivot_values"])
pivoted = sig_a_info["pivot"]
else:
pivot_vals = [StatConf.GRAMMAR_DEFAULT_PIVOT]
pivoted = False
return pivot_vals, pivoted | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def checkintersection(p1,p2,p3,p4):\n def isonsegment(i,j,k):\n return ((i.x <= k.x or j.x <= k.x) and (k.x <= i.x or k.x <= j.x) and\n (i.y <= k.y or j.y <= k.y) and (k.y <= i.y or k.x <= j.y))\n\n def computedirection(i,j,k):\n a = (k.x - i.x) * (j.y - i.y);\n b = (j.x - i.x) * (k.y - i.y);\n if a < b:\n return -1\n elif a > b:\n return 1\n else:\n return 0\n\n # return no intersection if they\n if p1.x == p3.x and p1.y == p3.y:\n return False \n if p1.x == p4.x and p1.y == p4.y:\n return False\n if p2.x == p3.x and p2.y == p3.y:\n return False\n if p2.x == p4.x and p2.y == p4.y:\n return False\n\n\n d1 = computedirection(p3,p4,p1)\n d2 = computedirection(p3,p4,p2)\n d3 = computedirection(p1,p2,p3)\n d4 = computedirection(p1,p2,p4)\n return ((((d1 > 0 and d2 < 0) or (d1 < 0 and d2 > 0)) and\n ((d3 > 0 and d4 < 0) or (d3 < 0 and d4 > 0))) or\n (d1 == 0 and isonsegment(p3,p4,p1)) or\n (d2 == 0 and isonsegment(p3,p4,p2)) or\n (d3 == 0 and isonsegment(p1,p2,p3)) or\n (d4 == 0 and isonsegment(p1,p2,p4)))",
"def intersection(self, axis2):",
"def _identify_initial_pivot(X, up_thresh, down_thresh):\n x_0 = X[0]\n max_x = x_0\n max_t = 0\n min_x = x_0\n min_t = 0\n up_thresh += 1\n down_thresh += 1\n\n for t in range(1, len(X)):\n x_t = X[t]\n\n if x_t / min_x >= up_thresh:\n return VALLEY if min_t == 0 else PEAK\n\n if x_t / max_x <= down_thresh:\n return PEAK if max_t == 0 else VALLEY\n\n if x_t > max_x:\n max_x = x_t\n max_t = t\n\n if x_t < min_x:\n min_x = x_t\n min_t = t\n\n t_n = len(X)-1\n return VALLEY if x_0 < X[t_n] else PEAK",
"def recalculate_pivots(self):\n pass",
"def test_union_intersection():\n X = np.random.randn(d, 100)\n assert np.array_equal(lincon.indicator_intersection(X), 1-lincon.indicator_union(X))",
"def intersection(x, y, f, p):",
"def conditional_compare(self, condition, **kwargs):\n\n if self._pivot:\n result = {self._pivot: {}}\n\n mask = self._parser.solve(condition)\n step = kwargs.get(\"step\", \"post\")\n\n for pivot_val in self._pivot_vals:\n\n a_piv = self._a_data[pivot_val]\n b_piv = self._b_data[pivot_val]\n\n area = area_under_curve(a_piv[mask[pivot_val]], **kwargs)\n try:\n area /= area_under_curve(b_piv[mask[pivot_val]], **kwargs)\n except ZeroDivisionError:\n area = float(\"nan\")\n\n duration = min(a_piv.last_valid_index(), b_piv.last_valid_index())\n duration -= max(a_piv.first_valid_index(),\n b_piv.first_valid_index())\n duration = interval_sum(mask[pivot_val], step=step) / duration\n\n if self._pivot:\n result[self._pivot][pivot_val] = area, duration\n else:\n result = area, duration\n\n return result",
"def test_get_pivot_in_correct_range(self):\n list = [5, 6, 7, 8, 9, 2]\n assert 0 <= get_pivot(list) <= 5 # between the first and last indices",
"def compute_intersecting(voxel, R, kdt, max_segment): \n\tsubset = np.unique(si[kdt.query_radius(voxel, r=R+max_segment)[0]]).astype(np.int)\n\treturn subset[np.array([track_roi_intersection_check(s, voxel, sq_dist_thr=R**2) for s in tracks[subset]])]",
"def intersection_over_union(gt_box, pred_box):\n combined = torch.stack((gt_box, pred_box), dim=1)\n max_0 = torch.max(combined[:, :, 0].T, dim = 0).values\n max_1 = torch.max(combined[:, :, 1].T, dim=0).values\n stacked = torch.stack((gt_box[:, 0] + gt_box[:, 2], pred_box[:, 0] + pred_box[:, 2]), dim=0)\n min_0 = torch.min(stacked, dim=0).values\n stacked = torch.stack((gt_box[:, 1] + gt_box[:, 3], pred_box[:, 1] + pred_box[:, 3]), dim=0)\n min_1 = torch.min(stacked, dim=0).values\n w = min_0 - max_0\n h = min_1 - max_1\n intersection = w*h\n union = gt_box[:,2] * gt_box[:,3] + pred_box[:,2] * pred_box[:,3] - intersection\n iou = intersection / union\n binaryIOU = iou.ge(0.5).int()\n return iou, intersection, union, binaryIOU",
"def flow_condition(p_prime, p3, triple):\n\n all_edges = set(self.arc_info.keys())\n not_p_prime = all_edges.difference(set(p_prime))\n #print(\"Not p_prime: {}\".format(not_p_prime))\n not_p3 = all_edges.difference(set(p3))\n #print(\"Not p_3: {}\".format(not_p3))\n p_prime_alone = list(set(p_prime).intersection(not_p3))\n #print(\"p_prime_alone: {}\".format(p_prime_alone))\n p3_alone = list(set(p3).intersection(not_p_prime))\n #print(\"p3 alone: {}\".format(p3_alone))\n overlap = list(set(p3).intersection(p_prime))\n #print(\"overlap alone: {}\".format(overlap))\n\n #print(\"computing L_wprime and U_wprime\")\n L_wprime, U_wprime = compute_bounds(p_prime_alone, triple)\n #print(\"computing L_w3 and U_w3\")\n L_w3, U_w3 = compute_bounds(p3_alone, triple)\n #print(\"computing L_overlap and U_overlap\")\n L_overlap, U_overlap = compute_bounds(overlap, triple)\n #print(\"L_wprime, U_wprime: {} {}\".format(L_wprime, U_wprime))\n #print(\"L_w3, U_w3: {} {}\".format(L_w3, U_w3))\n #print(\"{} <= {}\".format(L_overlap, U_wprime + U_w3))\n #print(\"{} >= {}\".format(U_overlap, L_wprime + L_w3))\n meets_conditions = (L_wprime <= U_wprime) & \\\n (L_w3 <= U_w3) & \\\n (L_overlap <= U_wprime + U_w3) & \\\n (L_wprime + L_w3 <= U_overlap)\n if meets_conditions:\n w_prime, w3 = center_flows(L_wprime, U_wprime,\n L_w3, U_w3,\n L_overlap, U_overlap)\n # change paths\n # first, delete:\n for index in sorted(triple, reverse=True):\n del self.paths[index]\n del self.weights[index]\n # now, add:\n self.paths.append(p3)\n self.paths.append(p_prime)\n self.weights.append(w3)\n self.weights.append(w_prime)\n # update weights on edges\n self.update_edge_weights()\n self.check_flow()\n self.check_paths()\n return(True)\n else:\n return(False)",
"def _intersection(x, y):\n a, b = x\n c, d = y\n return (d > a) and (c < b)",
"def getPivotPoints(df):\n resistancePivots = []\n supportPivots = []\n prevBarIsGreen = df['Close'].iloc[0] > df['Open'].iloc[0]\n prevClose = df['Close'].iloc[0]\n print(prevBarIsGreen)\n\n for index, row in df.iloc[1:].iterrows():\n if row['Close'] > row['Open'] and prevBarIsGreen == False:\n num = prevClose if prevClose < row['Open'] else row['Open']\n supportPivots.append((index.to_pydatetime(),num, row['Counter'])) # Note: You could have prev red candle as start of pivot (OR?)\n prevBarIsGreen = True\n elif row['Close'] < row['Open'] and prevBarIsGreen:\n num = prevClose if prevClose > row['Open'] else row['Open']\n resistancePivots.append((index.to_pydatetime(),num, row['Counter']))\n prevBarIsGreen = False\n prevClose = row['Close']\n\n return supportPivots, resistancePivots",
"def _intersect(self, interval):\n first = self.intervals.bisect_left(interval)\n last = first\n while first > 0 and \\\n self.intervals[first - 1].upper > interval.lower:\n first -= 1\n while last < len(self.intervals) and \\\n self.intervals[last].lower < interval.upper:\n last += 1\n return first, last",
"def within(p, q, r):\r\n return p <= q <= r or r <= q <= p",
"def get_all_crossing_threshold(sig, thresh, front, use_numexpr = False):\n sig1 = sig[:-1]\n sig2 = sig[1:]\n if use_numexpr:\n if front == '+':\n pos_spike, = np.where(numexpr.evaluate( '(sig1<=thresh) & (sig2>thresh)'))\n elif front == '-':\n pos_spike, = np.where(numexpr.evaluate( '(sig1>=thresh) & (sig2<thresh)'))\n else :\n if front == '+':\n pos_spike, = np.where( (sig1 <= thresh) & ( sig2>thresh) )\n elif front == '-':\n pos_spike, = np.where( (sig1 >= thresh) & ( sig2<thresh) )\n return pos_spike+1",
"def _check_bound(self, q):\n mat = ur_utils.forward(q, self._ik_params)\n xyz = mat[:3, 3]\n inside_bound = np.all(self._end_effector_low <= xyz) and np.all(xyz <= self._end_effector_high)\n inside_buffer_bound = (np.all(self._end_effector_low + self._box_bound_buffer <= xyz) and \\\n np.all(xyz <= self._end_effector_high - self._box_bound_buffer))\n return inside_bound, inside_buffer_bound, mat, xyz",
"def _intersect_continuous(self, interval):\n first = self.intervals.bisect_left(interval)\n last = first\n while first > 0 and \\\n self.intervals[first - 1].upper >= interval.lower:\n first -= 1\n while last < len(self.intervals) and \\\n self.intervals[last].lower <= interval.upper:\n last += 1\n return first, last",
"def inside(i,j,im,h=H): #X\n return i-h >=0 and j-h >=0 and i+h+1<=im.shape[0] and j+h+1<=im.shape[1]",
"def hasPivots(mModule):\n _str_funcName = \"hasPivots\"\n try:\n assert mModule.isModule(),\"%s.hasPivots>>> not a module\"%mModule.getShortName()\n l_coreNames = mModule.coreNames.value\n l_found = []\n l_missing = [] \n\n if 'ball' in l_coreNames and 'ankle' in l_coreNames:\n for attr in d_pivotAttrs['foot']:\n buffer = mModule.templateNull.getMessage(attr)\n if buffer:l_found.append(buffer[0])\n else:\n l_missing.append(attr)\n log.warning(\"%s.hasPivots>>> missing : '%s'\"%(mModule.getShortName(),attr))\n if l_missing:\n log.error(\"%s.hasPivots>>> found: '%s' | missing: '%s'\"%(mModule.getShortName(),l_found,l_missing))\n return False\n return l_found\n return False \n except Exception,error:raise Exception,\"{0} | {1}\".format(_str_funcName,error)",
"def intersects(*args):\r\n if len(args) == 2:\r\n p0, p1, p2, p3 = *args[0], *args[1]\r\n elif len(args) == 4:\r\n p0, p1, p2, p3 = args\r\n else:\r\n raise AttributeError(\"Pass 2, 2-pnt lines or 4 points to the function\")\r\n #\r\n # ---- First check ---- np.cross(p1-p0, p3-p2 )\r\n p0_x, p0_y, p1_x, p1_y, p2_x, p2_y, p3_x, p3_y = *p0, *p1, *p2, *p3\r\n s10_x = p1_x - p0_x\r\n s10_y = p1_y - p0_y\r\n s32_x = p3_x - p2_x\r\n s32_y = p3_y - p2_y\r\n denom = s10_x * s32_y - s32_x * s10_y\r\n if denom == 0.0:\r\n return False\r\n #\r\n # ---- Second check ---- np.cross(p1-p0, p0-p2 )\r\n den_gt0 = denom > 0\r\n s02_x = p0_x - p2_x\r\n s02_y = p0_y - p2_y\r\n s_numer = s10_x * s02_y - s10_y * s02_x\r\n if (s_numer < 0) == den_gt0:\r\n return False\r\n #\r\n # ---- Third check ---- np.cross(p3-p2, p0-p2)\r\n t_numer = s32_x * s02_y - s32_y * s02_x\r\n if (t_numer < 0) == den_gt0:\r\n return False\r\n #\r\n if ((s_numer > denom) == den_gt0) or ((t_numer > denom) == den_gt0):\r\n return False\r\n #\r\n # ---- check to see if the intersection point is one of the input points\r\n t = t_numer / denom\r\n # substitute p0 in the equation\r\n x = p0_x + (t * s10_x)\r\n y = p0_y + (t * s10_y)\r\n # be careful that you are comparing tuples to tuples, lists to lists\r\n if sum([(x, y) == tuple(i) for i in [p0, p1, p2, p3]]) > 0:\r\n return False\r\n return True",
"def pivot(self):\n\n # Pick random pivot node\n pivot = np.random.randint(self.n_v)\n\n # Get list of neighbors\n neighbors = np.asarray(list(nx.all_neighbors(self.G, pivot)))\n\n # Return if no neighbors are available\n if len(neighbors) == 0:\n return pivot, pivot, pivot\n\n # Collect all values\n values = np.asarray(list(nx.get_node_attributes(self.G, 'value').values()))\n\n # Save pivot value and set to NaN\n pivot_val = values[pivot]\n values[pivot] = np.nan\n\n # Find candidate\n candidate = np.nanargmin(np.abs(values - pivot_val))\n\n # Compute outcast\n neighbors_values = values[neighbors]\n outcast = neighbors[np.argmax(np.abs(neighbors_values - pivot_val))]\n\n # Return pivot and candidate\n return pivot, candidate, outcast",
"def check_convexity(hull, used_pivots):\n for instance in used_pivots:\n if not check_inside_hull(hull, instance):\n return False\n return True",
"def intersect(self, *args, **kwargs): # real signature unknown\n pass",
"def intersect(f, df, g, dg):\n \"*** YOUR CODE HERE ***\"",
"def relative_interior_contains(self, point):\n try:\n p = vector(point)\n except TypeError: # point not iterable or no common ring for elements\n if len(point)>0:\n return False\n else:\n p = vector(self.field(), [])\n\n if len(p)!=self.ambient_dim():\n return False\n \n for eq in self.equation_generator():\n if not eq.contains(p):\n return False\n\n for ine in self.inequality_generator():\n if not ine.interior_contains(p):\n return False\n\n return True",
"def findspikes(xin, vin, thresh, t0=None, t1= None, dt=1.0, mode=None, interpolate=False, debug=False):\n # if debug:\n # # this does not work with pyside...\n # import matplotlib\n # matplotlib.use('Qt4Agg')\n # import matplotlib.pyplot as PL\n # from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas\n # from matplotlib.figure import Figure\n # \n # #PL.rcParams['interactive'] = False\n \n st=numpy.array([])\n spk = []\n if xin is None:\n return(st, spk)\n xt = xin.view(numpy.ndarray)\n v = vin.view(numpy.ndarray)\n if t1 is not None and t0 is not None:\n it0 = int(t0/dt)\n it1 = int(t1/dt)\n if not isinstance(xin, numpy.ndarray):\n xt = xt[it0:it1]\n v = v[it0:it1]\n else:\n xt = xt[it0:it1]\n v = v[it0:it1]\n # if debug:\n # f = PL.figure(1)\n # print \"xt: \", xt\n # print \"v: \", v\n # PL.plot(numpy.array(xt), v, 'k-')\n # PL.draw()\n # PL.show()\n\n dv = numpy.diff(v, axis=0) # compute slope\n try:\n dv = numpy.insert(dv, 0, dv[0])\n except:\n pass # print 'dv: ', dv\n dv /= dt\n st = numpy.array([])\n spk = []\n spv = numpy.where(v > thresh)[0].tolist() # find points above threshold\n sps = numpy.where(dv > 0.0)[0].tolist() # find points where slope is positive\n sp = list(set.intersection(set(spv),set(sps))) # intersection defines putative spikes\n sp.sort() # make sure all detected events are in order (sets is unordered)\n sp = tuple(sp) # convert to tuple\n if sp is ():\n return(st, spk) # nothing detected\n dx = 1\n mingap = int(0.0005/dt) # 0.5 msec between spikes (a little unphysiological...)\n # normal operating mode is fixed voltage threshold\n # for this we need to just get the FIRST positive crossing,\n if mode == 'schmitt':\n sthra = list(numpy.where(numpy.diff(sp) > mingap))\n sthr = [sp[x] for x in sthra[0]] # bump indices by 1\n #print 'findspikes: sthr: ', len(sthr), sthr\n for k in sthr:\n if k == 0:\n continue\n x = xt[k-1:k+1]\n y = v[k-1:k+1]\n if interpolate:\n dx = 0\n m = (y[1]-y[0])/dt # local slope\n b = y[0]-(x[0]*m)\n s0 = (thresh-b)/m\n else:\n s0 = x[1]\n st = numpy.append(st, x[1])\n\n elif mode == 'peak':\n pkwidth = 1.0e-3 # in same units as dt - usually msec\n kpkw = int(pkwidth/dt)\n z = (numpy.array(numpy.where(numpy.diff(spv) > 1)[0])+1).tolist()\n z.insert(0, 0) # first element in spv is needed to get starting AP\n spk = []\n #print 'findspikes peak: ', len(z)\n for k in z:\n zk = spv[k]\n spkp = numpy.argmax(v[zk:zk+kpkw])+zk # find the peak position\n x = xt[spkp-1:spkp+2]\n y = v[spkp-1:spkp+2]\n if interpolate:\n try:\n # mimic Igor FindPeak routine with B = 1\n m1 = (y[1]-y[0])/dt # local slope to left of peak\n b1 = y[0]-(x[0]*m1)\n m2 = (y[2]-y[1])/dt # local slope to right of peak\n b2 = y[1]-(x[1]*m2)\n mprime = (m2-m1)/dt # find where slope goes to 0 by getting the line\n bprime = m2-((dt/2.0)*mprime)\n st = numpy.append(st, -bprime/mprime+x[1])\n spk.append(spkp)\n except:\n continue\n else:\n st = numpy.append(st, x[1]) # always save the first one\n spk.append(spkp)\n return(st, spk)",
"def peak_valley_pivots_candlestick(close, high, low, up_thresh, down_thresh):\n if down_thresh > 0:\n raise ValueError('The down_thresh must be negative.')\n\n initial_pivot = _identify_initial_pivot(close, up_thresh, down_thresh)\n\n t_n = len(close)\n pivots = np.zeros(t_n, dtype='i1')\n pivots[0] = initial_pivot\n\n # Adding one to the relative change thresholds saves operations. Instead\n # of computing relative change at each point as x_j / x_i - 1, it is\n # computed as x_j / x_1. Then, this value is compared to the threshold + 1.\n # This saves (t_n - 1) subtractions.\n # up_thresh += 1\n # down_thresh += 1\n\n trend = -initial_pivot\n last_pivot_t = 0\n if(trend == -1):\n last_pivot_x = high[0]\n else:\n last_pivot_x = low[0]\n for t in range(1, len(close)):\n xl = low[t]\n xh = high[t]\n rl = 1- last_pivot_x / xl\n rh = xh / last_pivot_x -1\n if trend == -1:\n # x = low[t]\n # r = x / last_pivot_x\n if rh >= up_thresh:\n pivots[last_pivot_t] = trend#\n trend = 1\n #last_pivot_x = x\n last_pivot_x = high[t]\n last_pivot_t = t\n elif xl < last_pivot_x:\n last_pivot_x = xl\n last_pivot_t = t\n else:\n # x = high[t]\n # r = x / last_pivot_x\n if rl <= down_thresh:\n pivots[last_pivot_t] = trend\n trend = -1\n #last_pivot_x = x\n last_pivot_x = low[t]\n last_pivot_t = t\n elif xh > last_pivot_x:\n last_pivot_x = xh\n last_pivot_t = t\n\n\n\n if last_pivot_t == t_n-1:\n pivots[last_pivot_t] = trend\n elif pivots[t_n-1] == 0:\n pivots[t_n-1] = trend\n\n return pivots",
"def an_intersection(v1, b1):\n try:\n return intersection(v1, b1, np.array([1,1]), 0)\n except np.linalg.linalg.LinAlgError:\n print v1\n return intersection(v1, b1, np.array([-1,1]), 0)",
"def _intersection(self, ix, iy):\n im = ix + iy # cv2.bitwise_or(ix, iy)\n # freq = cytoolz.frequencies(im.ravel())\n # hx = freq.get(1, 0) # x alone\n # hy = freq.get(2, 0) # y alone\n # ha = freq.get(3, 0) # x & y\n\n freq = np.bincount(im.ravel().astype(np.int64))\n hx = freq[1] if len(freq) > 1 else 0\n hy = freq[2] if len(freq) > 2 else 0\n ha = freq[3] if len(freq) > 3 else 0\n\n return hx, hy, ha"
] | [
"0.5798268",
"0.57260567",
"0.5718751",
"0.5637867",
"0.548589",
"0.5389251",
"0.5360851",
"0.53357774",
"0.52946544",
"0.5293057",
"0.52463186",
"0.523002",
"0.51608384",
"0.5152872",
"0.51508623",
"0.5137729",
"0.51251626",
"0.5122858",
"0.50904095",
"0.5083667",
"0.50527513",
"0.504928",
"0.5036801",
"0.5027357",
"0.5020363",
"0.50102276",
"0.50010985",
"0.49967554",
"0.4992153",
"0.49895287"
] | 0.71306324 | 0 |
Interpolate to xout xout must be a float or a ndarray of floats if xout.size > serial_cutoff, use parallel version | def __call__(self, xout, fout=None):
if isinstance(xout, np.ndarray):
if xout.size > serial_cutoffs[1]:
func = PAR_INTERP_1D[self.k]
else:
func = SER_INTERP_1D[self.k]
m = int(np.prod(xout.shape))
copy_made = False
if fout is None:
_out = np.empty(m, dtype=self.dtype)
else:
_out = fout.ravel()
if _out.base is None:
copy_made = True
_xout = xout.ravel()
func(self._f, _xout, _out, self.a, self.h, self.n, self.p, self._o, self.lb, self.ub)
if copy_made:
fout[:] = _out
return _out.reshape(xout.shape)
else:
func = SER_INTERP_1D[self.k]
_xout = np.array([xout],)
_out = np.empty(1)
func(self._f, _xout, _out, self.a, self.h, self.n, self.p, self._o, self.lb, self.ub)
return _out[0] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __call__(self, xout, yout, fout=None):\n if isinstance(xout, np.ndarray):\n if xout.size > serial_cutoffs[2]:\n func = PAR_INTERP_2D[self.k]\n else:\n func = SER_INTERP_2D[self.k]\n m = int(np.prod(xout.shape))\n copy_made = False\n if fout is None:\n _out = np.empty(m, dtype=self.dtype)\n else:\n _out = fout.ravel()\n if _out.base is None:\n copy_made = True\n _xout = xout.ravel()\n _yout = yout.ravel()\n func(self._f, _xout, _yout, _out, self.a, self.h, self.n, self.p, self._o, self.lb, self.ub)\n if copy_made:\n fout[:] = _out\n return _out.reshape(xout.shape)\n else:\n func = SER_INTERP_2D[self.k]\n _xout = np.array([xout],)\n _yout = np.array([yout],)\n _out = np.empty(1)\n func(self._f, _xout, _yout, _out, self.a, self.h, self.n, self.p, self._o, self.lb, self.ub)\n return _out[0]",
"def interpolate_parallel(self, variables, **kwargs):\n\n if variables is ...:\n variables = [v for v in self.data]\n\n if isinstance(variables, str):\n variables = [variables]\n if isinstance(variables, tuple):\n variables = list(variables)\n\n # Need to start with a Dataset with attrs as merge() drops the attrs of the\n # passed-in argument.\n # Make sure the first variable has all dimensions so we don't lose any\n # coordinates\n def find_with_dims(first_var, dims):\n if first_var is None:\n dims = set(dims)\n for v in variables:\n if set(self.data[v].dims) == dims:\n first_var = v\n break\n return first_var\n\n tcoord = self.data.metadata.get(\"bout_tdim\", \"t\")\n zcoord = self.data.metadata.get(\"bout_zdim\", \"z\")\n first_var = find_with_dims(None, self.data.dims)\n first_var = find_with_dims(first_var, set(self.data.dims) - set(tcoord))\n first_var = find_with_dims(first_var, set(self.data.dims) - set(zcoord))\n first_var = find_with_dims(\n first_var, set(self.data.dims) - set([tcoord, zcoord])\n )\n if first_var is None:\n raise ValueError(\n f\"Could not find variable to interpolate with both \"\n f\"{self.data.metadata.get('bout_xdim', 'x')} and \"\n f\"{self.data.metadata.get('bout_ydim', 'y')} dimensions\"\n )\n variables.remove(first_var)\n ds = self.data[first_var].bout.interpolate_parallel(\n return_dataset=True, **kwargs\n )\n xcoord = ds.metadata.get(\"bout_xdim\", \"x\")\n ycoord = ds.metadata.get(\"bout_ydim\", \"y\")\n for var in variables:\n da = self.data[var]\n if xcoord in da.dims and ycoord in da.dims:\n ds = ds.merge(\n da.bout.interpolate_parallel(return_dataset=True, **kwargs)\n )\n elif ycoord not in da.dims:\n ds[var] = da\n # Can't interpolate a variable that depends on y but not x, so just skip\n\n # Apply geometry\n ds = apply_geometry(ds, ds.geometry)\n\n return ds",
"def test_isentropic_pressure_tmp_out_interp():\n lev = [100000., 95000., 90000., 85000.] * units.Pa\n tmp = np.ones((4, 5, 5))\n tmp[0, :] = 296.\n tmp[1, :] = 292.\n tmp[2, :] = 290.\n tmp[3, :] = 288.\n tmpk = tmp * units.kelvin\n isentlev = [296., 297.] * units.kelvin\n isentprs = isentropic_interpolation(isentlev, lev, tmpk, temperature_out=True)\n truetmp = 291.4579 * units.kelvin\n assert_almost_equal(isentprs[1][1], truetmp, 3)",
"def forward(self, x):\n\n x, _ = equiangular_calculator(x, self.ratio)\n x = x.permute(0, 3, 1, 2)\n x = F.interpolate(x, scale_factor=(self.kernel_size, self.kernel_size), mode=\"nearest\")\n x = reformat(x)\n return x",
"def __call__(self, xout, yout, zout, fout=None):\n if isinstance(xout, np.ndarray):\n if xout.size > serial_cutoffs[3]:\n func = PAR_INTERP_3D[self.k]\n else:\n func = SER_INTERP_3D[self.k]\n m = int(np.prod(xout.shape))\n copy_made = False\n if fout is None:\n _out = np.empty(m, dtype=self.dtype)\n else:\n _out = fout.ravel()\n if _out.base is None:\n copy_made = True\n _xout = xout.ravel()\n _yout = yout.ravel()\n _zout = zout.ravel()\n func(self._f, _xout, _yout, _zout, _out, self.a, self.h, self.n, self.p, self._o, self.lb, self.ub)\n if copy_made:\n fout[:] = _out\n return _out.reshape(xout.shape)\n else:\n func = SER_INTERP_3D[self.k]\n _xout = np.array([xout],)\n _yout = np.array([yout],)\n _zout = np.array([zout],)\n _out = np.empty(1)\n func(self._f, _xout, _yout, _zout, _out, self.a, self.h, self.n, self.p, self._o, self.lb, self.ub)\n return _out[0]",
"def _call(self, x):\n u = functional.tmp_u_prox\n v = functional.tmp_v_prox\n\n # Running generalized Sinkhorn iterations\n for j in range(functional.niter):\n # Safe-guarded u-update, to avoid divide-by-zero error.\n u_old = u.copy()\n tmp1 = functional.K_op(v)\n if np.min(tmp1) < 1e-30 or np.max(tmp1) > 1e+50:\n print('Numerical instability, truncation in Transport prox (Kv)',\n str(np.min(tmp1)), str(np.max(tmp1)))\n\n tmp = np.fmax(tmp1, 1e-30)\n\n\n u = functional.mu0 / tmp\n if np.min(u) < 1e-30 or np.max(u) > 1e+50:\n print('u (min/max)', str(np.min(u)), str(np.max(u)))\n\n # Safe-guarded v-update, to avoid divide-by-zero error.\n v_old = v.copy()\n\n tmp3 = functional.K_op_adjoint(u)\n if np.min(tmp3) < 1e-30 or np.max(tmp3) > 1e+50:\n print('Truncation in Transport prox (KTu)',\n str(np.min(tmp3)), str(np.max(tmp3)))\n print('u (min/max)', str(np.min(u)), str(np.max(u)))\n\n tmp4 = (self.const * tmp3 * np.exp(self.const * x))\n\n if np.min(tmp4) < 1e-30 or np.max(tmp4) > 1e+200:\n print('Argument in lambdert omega (min/max)',\n str(np.min(tmp4)), str(np.max(tmp4)))\n\n v = np.exp(self.const * x - lambertw_fulfix(tmp4))\n\n v1 = np.exp(self.const * x - scipy.special.lambertw(\n tmp4))\n if (v-v1).norm() > 1e-10:\n print('diff pga ny lambderw omega funciton',\n str((v-v1).norm()))\n print('v (min/max)', str(np.min(v)), str(np.max(v)))\n print('Argument in lambdert omega (min/max)',\n str(np.min(tmp4)), str(np.max(tmp4)))\n\n # If the updates in both u and v are small, break the loop\n if ((np.log(v)-np.log(v_old)).norm() < 1e-8 and\n (np.log(u)-np.log(u_old)).norm() < 1e-8):\n break\n\n # Store the u and v in the internal temporary variables of the\n # functional\n functional.tmp_u_prox = u\n functional.tmp_v_prox = v\n\n return x - self.sigma * functional.epsilon * np.log(v)",
"def test_isentropic_pressure_tmp_out():\n lev = [100000., 95000., 90000., 85000.] * units.Pa\n tmp = np.ones((4, 5, 5))\n tmp[0, :] = 296.\n tmp[1, :] = 292.\n tmp[2, :] = 290.\n tmp[3, :] = 288.\n tmpk = tmp * units.kelvin\n isentlev = [296.] * units.kelvin\n isentprs = isentropic_interpolation(isentlev, lev, tmpk, temperature_out=True)\n truetmp = 296. * units.kelvin\n assert_almost_equal(isentprs[1], truetmp, 3)",
"def interpolate_parallel(self, N):\n with mp.Pool(processes=mp.cpu_count()) as pool: \n self.new_data = np.array(pool.starmap(interpolate_spline, \n zip(self.data, itertools.repeat(N))))",
"def select(ds, longitude, latitude, varname, T=None, Z=None, \n iT=None, iZ=None, extrap=False, extrap_val=None, locstream=False):\n \n assert not ((Z is not None) and (iZ is not None))\n assert not ((T is not None) and (iT is not None))\n \n if (isinstance(longitude, int)) or (isinstance(longitude, float)):\n longitude = [longitude]\n if (isinstance(latitude, int)) or (isinstance(latitude, float)):\n latitude = [latitude]\n latitude = np.asarray(latitude)\n longitude = np.asarray(longitude)\n\n if extrap:\n extrap_method = \"nearest_s2d\"\n else:\n extrap_method = None\n \n \n cf_var = get_var_cf(ds, varname)\n \n dr = ds.cf[cf_var]\n \n if not extrap:\n assertion = 'the input longitude range is outside the model domain'\n assert (longitude.min() >= dr.cf['longitude'].min()) and (longitude.max() <= dr.cf['longitude'].max()), assertion\n assertion = 'the input latitude range is outside the model domain'\n assert (latitude.min() >= dr.cf['latitude'].min()) and (latitude.max() <= dr.cf['latitude'].max()), assertion\n \n ## Horizontal interpolation ##\n \n # grid of lon/lat to interpolate to, with desired ending attributes\n if latitude.ndim == 1:\n ds_out = xr.Dataset(\n {\n \"lat\": ([\"lat\"], latitude, dict(axis=\"Y\", units='degrees_north', standard_name=\"latitude\")),\n \"lon\": ([\"lon\"], longitude, dict(axis=\"X\", units='degrees_east', standard_name=\"longitude\")),\n }\n )\n elif latitude.ndim == 2:\n ds_out = xr.Dataset(\n {\n \"lat\": ([\"Y\",\"X\"], latitude, dict(units='degrees_north', standard_name=\"latitude\")),\n \"lon\": ([\"Y\",\"X\"], longitude, dict(units='degrees_east', standard_name=\"longitude\")),\n }\n )\n \n\n # set up regridder, which would work for multiple interpolations if desired\n regridder = xe.Regridder(dr, ds_out, \"bilinear\", extrap_method=extrap_method, locstream_out=locstream)\n\n # do regridding\n dr_out = regridder(dr, keep_attrs=True)\n \n \n ## Time and depth interpolation or iselection ##\n if iZ is not None:\n with xr.set_options(keep_attrs=True):\n dr_out = dr_out.cf.isel(Z=iZ)\n \n if Z is not None:\n with xr.set_options(keep_attrs=True):\n dr_out = dr_out.cf.interp(Z=Z)\n\n if iT is not None:\n with xr.set_options(keep_attrs=True):\n dr_out = dr_out.cf.isel(T=iT)\n \n if T is not None:\n with xr.set_options(keep_attrs=True):\n dr_out = dr_out.cf.interp(T=T)\n \n if extrap_val is not None:\n # returns 0 outside the domain by default. Assumes that no other values are exactly 0\n # and replaces all 0's with extrap_val if chosen.\n dr_out = dr_out.where(dr_out != 0, extrap_val)\n \n return dr_out",
"def interpolate(self, x_pivot, f_pivot):\n interpolation = interp1d(x_pivot, f_pivot,\n kind=self.kind, bounds_error=False)\n return interpolation",
"def __call__(self,x):\n\n arr = np.array(x,copy=False,dtype=float)\n return self._filterfunc(arr,*self.parvals)",
"def interpolate_timeseries(self, x, t, **kw):\n v, t_v = self.timeseries(x, rmnans=True)\n kw.update(dict(bounds_error=False))\n interpolant = sp.interpolate.interp1d(t_v, v, **kw)\n return interpolant(t)",
"def interpolate_bigger(arrayin,ny,nx=None):\r\n if nx == None :\r\n nx = ny\r\n arrayout = np.array(arrayin,dtype=np.complex128)\r\n arrayout = fft2(arrayout)\r\n arrayout = padd(arrayout,ny,nx)\r\n arrayout = ifft2(arrayout)\r\n return np.array(arrayout,dtype=arrayin.dtype)",
"def __call__(self,x):\n arr = np.array(x,copy=False,dtype=float)\n res = self._filterfunc(arr.ravel(),*self.parvals)\n return res.reshape(arr.shape)",
"def forward(self, inputs, *args):\n\n x = equiangular_calculator(inputs, self.ratio)\n x = x.permute(0, 3, 1, 2)\n x = F.interpolate(x, scale_factor=(self.kernel_size, self.kernel_size), mode=\"nearest\")\n x = reformat(x)\n return x",
"def Interpolate(ax, ay, x, npoints):\r\n\r\n assert(ax[1]>ax[0]) # test for ascending order, at least for first point\r\n \r\n if (verbose): \r\n print 'interpolate/extrapolate to x=',x,', npoints=',npoints\r\n\r\n # Find best data points to use, based on which are closest to \r\n # requested point x. Will find <npoints> (or fewer) best data points and \r\n # return as an array.\r\n ibest = FindBest(ax,x,npoints)\r\n npoints = len(ibest) # make sure npoints is updated in case was reduced\r\n if (verbose): \r\n print 'ibest',ibest\r\n\r\n # Build the polynomial y(x), evaluated at the point x.\r\n y = 0.0\r\n for i in range(npoints): # do i=0,npoints-1\r\n li = 1.0\r\n ni = ibest[i] # index to ith best point\r\n # build up li[x] term, evaluated at the point x\r\n for j in range(npoints): # do j=0,npoints-1\r\n if (i != j): # exclude j=i term\r\n nj = ibest[j] # index to jth best point\r\n li = li*(x-ax[nj])/(ax[ni]-ax[nj])\r\n y = y+ay[ni]*li\r\n \r\n return y",
"def getxx(self,whichsol_,xx_):\n _xx_minlength = self.getnumvar()\n if self.getnumvar() > 0 and xx_ is not None and len(xx_) != self.getnumvar():\n raise ValueError(\"Array argument xx is not long enough: Is %d, expected %d\" % (len(xx_),self.getnumvar()))\n if isinstance(xx_,numpy.ndarray) and not xx_.flags.writeable:\n raise ValueError(\"Argument xx must be writable\")\n if xx_ is None:\n raise ValueError(\"Argument xx may not be None\")\n if isinstance(xx_, numpy.ndarray) and xx_.dtype is numpy.dtype(numpy.float64) and xx_.flags.contiguous:\n _xx_copyarray = False\n _xx_tmp = ctypes.cast(xx_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif xx_ is not None:\n _xx_copyarray = True\n _xx_np_tmp = numpy.zeros(len(xx_),numpy.dtype(numpy.float64))\n _xx_np_tmp[:] = xx_\n assert _xx_np_tmp.flags.contiguous\n _xx_tmp = ctypes.cast(_xx_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _xx_copyarray = False\n _xx_tmp = None\n \n res = __library__.MSK_XX_getxx(self.__nativep,whichsol_,_xx_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _xx_copyarray:\n xx_[:] = _xx_np_tmp",
"def forward(self, x):\r\n # return x.repeat_interleave(self.kernel_size, dim=1)\r\n x = x.permute(0, 2, 1)\r\n x = torch.nn.functional.interpolate(x, scale_factor=self.kernel_size, mode='nearest')\r\n return x.permute(0, 2, 1)",
"def test_interpolation(self):\n\n ndx1, ndx2 = self.find_partition()\n tessellation = Delaunay(self.grid[ndx2,:])\n\n # initialisation\n results = []\n ndim = self.ndim+1\n\n for j in ndx1:\n nmodels = len(self.tracks[j].models)\n aResult = np.empty((nmodels,ndim+nglb+6),dtype=gtype)\n pt = self.tracks[j].params + [0.0,]\n\n for i in range(nmodels):\n aModel1 = self.tracks[j].models[i]\n pt[-1] = aModel1.glb[iage]\n aModel2 = interpolate_model(self,pt,tessellation,ndx2)\n aResult[i,0:ndim] = pt\n if (aModel2 is None):\n aResult[i,ndim:ndim+nglb+6] = np.nan\n else:\n aResult[i,ndim:ndim+nglb+6] = compare_models(aModel1,aModel2)\n\n results.append(aResult)\n\n return results, ndx1, ndx2, tessellation",
"def forward(self, x, *args):\n # return x.repeat_interleave(self.kernel_size, dim=1)\n x = x.permute(0, 2, 1)\n x = torch.nn.functional.interpolate(x, scale_factor=self.kernel_size, mode='nearest')\n return x.permute(0, 2, 1)",
"def interpData(ampData,xVals,zVals):\r\n\r\n #Find the max and min of xVals and zVals to find the limits of the interpolation grid\r\n xmin=np.min(xVals)\r\n xmax=np.max(xVals)\r\n zmin=np.min(zVals)\r\n zmax=np.max(zVals)\r\n\r\n #Create the target grid of the interpolation\r\n xi=np.linspace(xmin,xmax,2001)\r\n zi=np.linspace(zmin,zmax,2001)\r\n xi,zi=np.meshgrid(xi,zi)\r\n\r\n #Interpolate the data\r\n interpData=griddata((xVals,zVals),ampData,(xi,zi),method='linear')\r\n\r\n return interpData,xi,zi",
"def InOutCubicFunction_getValueAt(*args):\n return _osgAnimation.InOutCubicFunction_getValueAt(*args)",
"def effective_latitude_xr(self):\n\n grid_areas_ddf = self.grid_area_xr.to_dataframe().reset_index()\n grid_areas_ddf = grid_areas_ddf[\n ['temp_bucket', 'cdf_eff_lat_deg', 'time']\n ]\n\n merge_ddf = (\n self.data_array_dask_df\n .reset_index(drop=True)\n #.repartition(npartitions=100)\n .merge(grid_areas_ddf,\n on=['time', 'temp_bucket'],\n how='left')\n )\n\n eff_lat_xr = self.dask_data_to_xarray(merge_ddf,\n var='cdf_eff_lat_deg')\n\n eff_lat_xr.name = 'effective_latitude'\n\n return eff_lat_xr",
"def calc_x(x, ALD,PL): ## jit works\n\n row, col = cuda.grid(2)\n if row < ALD.shape[0] and col < ALD.shape[1]:\n if PL[row,col] != 0 :\n x[row,col] = (ALD[row,col] / PL[row,col]) - 1",
"def interpolate( h, x, y=None, z=None, outOfRangeValue=30 ):\n\n if x != x: return outOfRangeValue\n if x <= h.GetXaxis().GetBinCenter(1) or x >= h.GetXaxis().GetBinCenter(h.GetXaxis().GetNbins()): return outOfRangeValue\n \n if y != None:\n if y != y: return outOfRangeValue\n if y <= h.GetYaxis().GetBinCenter(1) or y >= h.GetYaxis().GetBinCenter(h.GetYaxis().GetNbins()): return outOfRangeValue\n if z != None:\n if z != z: return outOfRangeValue\n if z <= h.GetZaxis().GetBinCenter(1) or z >= h.GetZaxis().GetBinCenter(h.GetZaxis().GetNbins()): return outOfRangeValue\n \n if y != None and z != None: return h.Interpolate( x, y, z )\n if y != None: return h.Interpolate( x, y )\n return h.Interpolate( x )",
"def interpolate_none(self):\n\n # Reset processed data\n self.u_processed_mps = np.copy(self.u_mps)\n self.v_processed_mps = np.copy(self.v_mps)\n self.u_processed_mps[self.valid_data[0, :] == False] = np.nan\n self.v_processed_mps[self.valid_data[0, :] == False] = np.nan",
"def test_isentropic_interpolation_dataarray():\n temp = xr.DataArray([[[296.]], [[292.]], [[290.]], [[288.]]] * units.K,\n dims=('isobaric', 'y', 'x'),\n coords={'isobaric': (('isobaric',), [1000., 950., 900., 850.],\n {'units': 'hPa'}),\n 'time': '2020-01-01T00:00Z'})\n\n rh = xr.DataArray([[[100.]], [[80.]], [[40.]], [[20.]]] * units.percent,\n dims=('isobaric', 'y', 'x'), coords={\n 'isobaric': (('isobaric',), [1000., 950., 900., 850.], {'units': 'hPa'}),\n 'time': '2020-01-01T00:00Z'})\n\n isentlev = [296., 297.] * units.kelvin\n press, rh_interp = isentropic_interpolation(isentlev, temp.isobaric, temp, rh)\n\n assert_array_almost_equal(press, np.array([[[1000.]], [[936.213]]]) * units.hPa, 3)\n assert_array_almost_equal(rh_interp, np.array([[[100.]], [[69.19706]]]) * units.percent, 3)",
"def interpolate(self, t):\n try:\n n = len(self.t)\n except TypeError:\n # self.t is not a sequence. Re-raise the exception\n # with an appropriate error message.\n raise TypeError(\"Please run the simulation first\")\n else:\n if (n < 2):\n raise ValueError(\"Not enough simulation steps\")\n tmin = self.t[0]\n tmax = self.t[n-1]\n if t < tmin or t > tmax:\n raise ValueError(\"Requested time is outside the simulated interval\")\n if self.adaptiveStepSize:\n nbelow = bisect_right(self.t, t) - 1 \n else:\n dt = (tmax - tmin)*1.0/(n - 1)\n nbelow = int(math.floor((t - tmin)/dt))\n nabove = nbelow + 1\n if nabove >= n:\n nabove = n - 1\n nbelow = nabove - 1\n x = interpolate_Hermite(t, self.t[nbelow], self.x[nbelow], self.v[nbelow],\n self.t[nabove], self.x[nabove], self.v[nabove])\n v = interpolate_linear(t, self.t[nbelow], self.v[nbelow],\n self.t[nabove], self.v[nabove])\n return x, v",
"def __call__(self,\n x0: Union[float, np.ndarray],\n xt: Union[float, np.ndarray],\n t: float) -> Union[float, np.ndarray]:\n raise NotImplementedError",
"def interpolate_nans(self):\n\n signal = self.signal\n\n # check for more than one nan in row\n for i in range(len(signal)-1) :\n if np.isnan(signal[i]) and np.isnan(signal[i+1]) :\n raise Exception('There are two nans in a row ask moritz what to do !')\n\n if np.isnan(signal[0]) :\n np.signal[0] = signal[1]\n if np.isnan(signal[-1]) :\n signal[-1] = signal[-2]\n\n for i in range(1,len(signal)-1) :\n if np.isnan(signal[i]):\n signal[i] = (signal[i-1] + signal[i+1])/2"
] | [
"0.55407476",
"0.55230695",
"0.5466127",
"0.5298338",
"0.5192844",
"0.5180324",
"0.51104575",
"0.5090676",
"0.50120974",
"0.49926928",
"0.4980818",
"0.49528757",
"0.49429956",
"0.49377602",
"0.49207413",
"0.4907824",
"0.490474",
"0.49029276",
"0.49015373",
"0.48381254",
"0.48274744",
"0.47445643",
"0.473819",
"0.47133818",
"0.46874663",
"0.46873608",
"0.46872124",
"0.4678946",
"0.46607667",
"0.46437594"
] | 0.5808691 | 0 |
Test navigates through the 'Documentation' tabs and verifies the links to tabs by asserting expected titles against given ones. | def test_documentation_path_links(self):
main_page = DogMainPage(self.driver)
dog_page = main_page.navigate_documentation()
# Switch to 'List all breeds' tab
all_breeds_page = dog_page.switch_tab(dog_page.ALL_BREEDS)
all_breeds_expected = all_breeds_page.get_expected_header()
all_breeds_header = all_breeds_page.get_header()
# Assert the title to verify the page
self.assertEqual(all_breeds_expected, all_breeds_header,
('%s expected, instead found: %s. Page is wrong' % (all_breeds_expected, all_breeds_header)))
# Switch to 'Random image' tab
random_page = dog_page.switch_tab(dog_page.RANDOM)
random_expected_header = random_page.get_expected_header()
random_header = random_page.get_header()
# Assert the title to verify the page
self.assertEqual(random_expected_header, random_header,
('%s expected, instead found: %s. Page is wrong' % (random_expected_header, random_header)))
# Switch to 'By breed' tab
breed_page = dog_page.switch_tab(dog_page.BREED)
breed_expected_header = breed_page.get_expected_header()
breed_header = breed_page.get_header()
# Assert the title to verify the page
self.assertEqual(breed_expected_header, breed_header,
('%s expected, instead found: %s. Page is wrong' % (breed_expected_header, breed_header)))
# Switch to 'By sub-breed' tab
sub_breed_page = dog_page.switch_tab(dog_page.SUB_BREED)
sub_expected_header = sub_breed_page.get_expected_header()
sub_header = sub_breed_page.get_header()
# Assert the title to verify the page
self.assertEqual(sub_expected_header, sub_header,
('%s expected, instead found: %s. Page is wrong' % (sub_expected_header, sub_header))) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_menu_path_links(self):\r\n main_page = DogMainPage(self.driver)\r\n # Navigate to Title (main) page\r\n title_page = main_page.navigate_main()\r\n title_expected_header = title_page.get_expected_header()\r\n title_header = title_page.get_header()\r\n # Assert the title by sub-string to verify page\r\n self.assertTrue(title_expected_header in title_header,\r\n ('%s expected, instead found: %s. Page is wrong' % (title_expected_header, title_header)))\r\n # Navigate to 'Documentation' page\r\n doc_page = main_page.navigate_documentation()\r\n doc_expected = doc_page.get_expected_header()\r\n doc_header = doc_page.get_header()\r\n # Assert the title to verify page\r\n self.assertEqual(doc_expected, doc_header,\r\n ('%s expected, instead found: %s. Page is wrong' % (doc_expected, doc_header)))\r\n # Navigate to 'Breed list' page\r\n breed_list_page = main_page.navigate_breeds_list()\r\n breed_list_expected_header = breed_list_page.get_expected_header()\r\n breed_list_header = breed_list_page.get_header()\r\n # Assert the title to verify page\r\n self.assertEqual(breed_list_expected_header, breed_list_header,\r\n ('%s expected, instead found: %s. Page is wrong' %\r\n (breed_list_expected_header, breed_list_header)))\r\n # Navigate to 'About' page\r\n about_page = main_page.navigate_about()\r\n about_expected_header = about_page.get_expected_header()\r\n about_header = about_page.get_header()\r\n # Assert the title to verify page\r\n self.assertEqual(about_expected_header, about_header,\r\n ('%s expected, instead found: %s. Page is wrong' % (about_expected_header, about_header)))",
"def test_navigates_to_about_page_then_index_page_then_about_page_success(w_driver):\n #1.) Navigate to about page, click link for index page\n w_driver.get('localhost:8000/about')\n\n element=w_driver.find_element_by_link_text('back to Kasner').click()\n #find index page info\n results=w_driver.page_source\n text_found1=re.search(r'Welcome to the Kasner Micro Search Engine',results)\n\n #2.) Click link for index page\n element=w_driver.find_element_by_link_text('About our team').click()\n #find about page info\n results=w_driver.page_source\n text_found2=re.search(r'About the Kasner Search Engine',results)\n\n #3.) Verify info we found is not None\n assert(text_found1 != None)\n assert(text_found2 != None)",
"def runTest(self):\n pagename = self._tester.create_wiki_page(content=\"\"\"\n{{{\n#!rst\nHello\n=====\n\n.. trac:: wiki:WikiStart Some Link\n}}}\n \"\"\")\n self._tester.go_to_wiki(pagename)\n tc.find(\"Some Link\")\n tc.find(r'<h1[^>]*>Hello')\n tc.notfind(\"wiki:WikiStart\")\n tc.follow(\"Some Link\")\n tc.url(self._tester.url + \"/wiki/WikiStart\")",
"def test_navigates_to_index_page_then_about_page_then_index_page_success(w_driver):\n #1.) Navigate to index page, click link for about page\n w_driver.get('localhost:8000')\n\n element=w_driver.find_element_by_link_text('About our team').click()\n #find about page info\n results=w_driver.page_source\n text_found1=re.search(r'About the Kasner Search Engine',results)\n\n #2.) Click link for index page\n element=w_driver.find_element_by_link_text('back to Kasner').click()\n #find index page info\n results=w_driver.page_source\n text_found2=re.search(r'Welcome to the Kasner Micro Search Engine',results)\n\n #3.) Verify info we found is not None\n assert(text_found1 != None)\n assert(text_found2 != None)",
"def test_scrape(self):\n self.assertEqual(self.scraped.title, 'Heading!')\n self.assertEqual(self.scraped.link_text, 'Go to Google')\n self.assertEqual(self.scraped.link_url, 'http://Google.com')",
"def test_navigates_to_index_page_about_page(w_driver):\n #Index Page\n w_driver.get('localhost:8000')\n results=w_driver.page_source\n text_found1=re.search(r'Welcome to the Kasner Micro Search Engine', results)\n\n #About Page\n w_driver.get('localhost:8000/about')\n results=w_driver.page_source\n text_found2=re.search(r'About the Kasner Search Engine',results)\n \n assert(text_found1 != None and text_found2 != None)",
"def test_title(names):",
"def test_title(self):\n self.driver.get(\"https://demo.testchameleon.com/\")\n assert \"Gentellela Alela!\" in self.driver.title",
"def test_navigates_to_index_page_about_page_index_page(w_driver):\n #Index Page\n w_driver.get('localhost:8000')\n results=w_driver.page_source\n text_found1=re.search(r'Welcome to the Kasner Micro Search Engine', results)\n\n #About Page\n w_driver.get('localhost:8000/about')\n results=w_driver.page_source\n text_found2=re.search(r'About the Kasner Search Engine',results)\n\n #Index Page\n w_driver.get('localhost:8000')\n results=w_driver.page_source\n text_found3=re.search(r'Welcome to the Kasner Micro Search Engine', results)\n\n assert(text_found1 != None)\n assert(text_found2 != None)\n assert(text_found3 != None)",
"def test_basic():\n browser = Browser(URL, \"Firefox\")\n # actions = Actions(browser)\n delay_between_pages = 1.5\n navigation_bar = NavigationBar(browser)\n # sequential clicking on all new elements\n navigation_bar.show_printers()\n section_title = Element(browser, By.XPATH, \"//*[@id='content']/h2\").get_text()\n assert section_title == \"Printers\"\n\n time.sleep(delay_between_pages)\n navigation_bar.show_scanners()\n section_title = Element(browser, By.XPATH, \"//*[@id='content']/h2\").get_text()\n assert section_title == \"Scanners\"\n\n time.sleep(delay_between_pages)\n navigation_bar.show_webcams()\n section_title = Element(browser, By.XPATH, \"//*[@id='content']/h2\").get_text()\n assert section_title == \"Web Cameras\"\n\n time.sleep(delay_between_pages)\n navigation_bar.show_all_phones_and_pdas()\n section_title = Element(browser, By.XPATH, \"//*[@id='content']/h2\").get_text()\n assert section_title == \"Phones & PDAs\"\n\n time.sleep(delay_between_pages)\n navigation_bar.show_pdas()\n section_title = Element(browser, By.XPATH, \"//*[@id='content']/h2\").get_text()\n assert section_title == \"PDAs\"\n\n time.sleep(delay_between_pages)\n navigation_bar.show_phones()\n section_title = Element(browser, By.XPATH, \"//*[@id='content']/h2\").get_text()\n assert section_title == \"Phones\"\n\n time.sleep(delay_between_pages)\n\n time.sleep(3)\n browser.shutdown()",
"def test_navigates_to_index_page_link_about_page(w_driver):\n w_driver.get('localhost:8000')\n \n element=w_driver.find_element_by_link_text('About our team').click()\n results=w_driver.page_source\n text_found=re.search(r'About the Kasner Search Engine',results)\n\n assert(text_found != None)",
"def test_navigates_to_about_page_link_index_page(w_driver):\n w_driver.get('localhost:8000/about')\n\n element=w_driver.find_element_by_link_text('back to Kasner').click()\n results=w_driver.page_source\n text_found=re.search(r'Welcome to the Kasner Micro Search Engine',results)\n\n assert(text_found != None)",
"def test_page_list_admin(self):\n user = self.get_superuser()\n title_1 = 'page'\n title_2 = 'inner'\n title_3 = 'page 3'\n page = create_page(title_1, 'page.html', 'en', published=True)\n page_2 = create_page(title_2, 'page.html', 'en', published=True, parent=page)\n page_3 = create_page(title_3, 'page.html', 'en', published=False)\n\n with self.login_user_context(user):\n url = reverse('api:page-list')\n response = self.client.get(url, format='json')\n self.assertEqual(len(response.data), 3)\n for page in response.data:\n self.assertIn(page.get('title'), {title_1, title_2, title_3})",
"def test_with_links_cases_and_issues():\n pass",
"def testDocTest(self):\n import doctest\n failures, tests = doctest.testmod(m=windowlayout)\n del tests # Just to remove the eclipse warning on the unused variable.\n self.assertEquals(failures, 0)",
"def test_toc(self):\n with sphinx_build('pyexample'):\n with open('_build/text/docfx_yaml/toc.yml') as yml_file:\n data = yaml.safe_load(yml_file)\n for item in data:\n if 'items' in item:\n self.assertEqual(\n item['items'][0]['name'],\n 'example.enum_type.EnumFoo'\n )\n break",
"def test_viewTermsOfServicePage(self):\r\n print('========================================================================')\r\n print('Test for check redirect on TermsOfService page after link TermsOfService click')\r\n #Load Registrtion page\r\n self.reg_page.open_registration_page()\r\n driver = self.reg_page.driver\r\n\r\n #cheks if right title\r\n assert self.reg_page.is_title_matches(), \"Registration title page doesn't match\"\r\n\r\n self.reg_page.click_terms_lnk()\r\n terms_page = page_TermsAndService.Page_TermsAndService(driver)\r\n\r\n\r\n driver.get(terms_page.TERMS_URL)\r\n wait = WebDriverWait(driver, 20)\r\n element = wait.until(EC.title_is(terms_page.get_terms_title()))\r\n assert terms_page.get_terms_title() == 'Snovio terms and conditions', \"Terms title page doesn't match\"\r\n\r\n print('--------- SUCCESS test_viewTermsOfServicePage-----------')\r\n driver.quit()",
"def test_Navigate_To_Formy_Home(browser):\n # Create an instanced Class object from the FormyHomePage Class\n home_page = FormyHomePage(browser)\n # Call the FormyHomePage load() method and navigate to the Formy Home Page\n home_page.load()\n \n \"\"\" Assert / THEN Section \"\"\"\n # Verify that the Formy Home Page Heading Text matches the WELCOME_GREETING_TEXT variable\n assert home_page.welcome_greeting_text() == WELCOME_GREETING_TEXT",
"def test_page_links(inspire_app):\n create_record(\"lit\", data={\"titles\": [{\"title\": \"Solenoid\"}]})\n create_record(\"lit\", data={\"titles\": [{\"title\": \"Solenoid\"}]})\n with inspire_app.test_client() as client:\n # Limit records\n response = client.get(\n \"/api/literature\", query_string=dict(size=1, page=1, q=\"Solenoid\")\n )\n response_json = response.json\n assert len(response_json[\"hits\"][\"hits\"]) == 1\n\n data = response_json[\"links\"]\n assert \"self\" in data\n assert \"next\" in data\n assert \"prev\" not in data\n\n # Assert next URL before calling it\n first_url = data[\"self\"]\n next_url = data[\"next\"]\n parsed_url = parse_url(next_url)\n assert parsed_url[\"qs\"][\"size\"] == [\"1\"]\n assert parsed_url[\"qs\"][\"page\"] == [\"2\"]\n\n # Access next URL\n response = client.get(next_url)\n response_json = response.json\n assert len(response_json[\"hits\"][\"hits\"]) == 1\n data = response.json[\"links\"]\n assert data[\"self\"] == next_url\n assert \"next\" not in data\n assert \"prev\" in data and data[\"prev\"] == first_url",
"def test_homepage_it(self):\n\n self.driver.get(self.url_ + '/?hl=it')\n\n title_present = EC.text_to_be_present_in_element(\n (By.XPATH, '//*[@id=\"main-nav\"]/div/div[1]/a'), 'Data Commons')\n WebDriverWait(self.driver, self.TIMEOUT_SEC).until(title_present)\n\n hero_msg = self.driver.find_elements_by_class_name('lead')[0]\n self.assertTrue(\n hero_msg.text.startswith(\n 'Data Commons è un repository di conoscenza aperto che combina i dati provenienti'\n ))\n\n explore_callout_msg = self.driver.find_element_by_xpath(\n '//*[@id=\"homepage\"]/section[3]/p')\n self.assertTrue(\n explore_callout_msg.text.startswith(\n 'Abbiamo pulito ed elaborato i dati al tuo posto, così non dovrai farlo tu.'\n ))\n\n nyc_health = self.driver.find_element_by_xpath(\n '//*[@id=\"homepage\"]/section[3]/ul/li[1]/ul/li[2]/a')\n self.assertEqual(nyc_health.text, 'Salute a New York, New York')\n self.assertEqual(nyc_health.get_attribute('href'),\n self.url_ + '/place/geoId/3651000?topic=Health&hl=it')\n\n schema_org = self.driver.find_element_by_xpath(\n '//*[@id=\"homepage\"]/section[5]/ul/li[2]')\n self.assertEqual(schema_org.text,\n 'Progetto open source realizzato con Schema.org.')\n\n more_msg = self.driver.find_element_by_xpath(\n '//*[@id=\"homepage\"]/section[3]/ul/li[3]/ul/li[4]/a')\n self.assertEqual(more_msg.text, 'altro…')",
"def test_link_to_documentation(\n self,\n _needs_unindent,\n _is_link_requested,\n _get_source_code_from_object,\n ):\n _needs_unindent.return_value = False\n _is_link_requested.return_value = True\n _get_source_code_from_object.return_value = \"\"\n\n data = (\n os.path.join(\n _CURRENT_DIRECTORY,\n \"fake_project\",\n \"_modules\",\n \"fake_project\",\n \"basic.html\",\n ),\n \"MyKlass.get_method\",\n )\n content = self._get_fake_project_method()\n nodes = self._get_nodes(data, content) # pylint: disable=no-value-for-parameter\n\n self.assertEqual(2, len(nodes))\n self.assertTrue(any(node for node in nodes if isinstance(\n node,\n extension._DocumentationHyperlink, # pylint: disable=protected-access\n )))",
"def test_data_admin_page(self):\n self.login(self.data_admin.user.username)\n self._go_to_data_admin_page()\n self.check_page_title(self.data_admin_config.get('PAGE_TITLE'))\n self.check_page_contains_ids(self.data_admin_config.get('ADMIN_LINKS'))",
"def test_fac_admin_page(self):\n self.login(self.fac_admin.user.username)\n self._got_to_fac_admin_page()\n self.check_page_title(self.admin_config.get('FAC_ADMIN').get('PAGE_TITLE'))\n self.check_page_contains_ids(self.admin_config.get('FAC_ADMIN').get('ADMIN_LINKS'))",
"def test_page_titles(self, rf, projects):\n request = rf.get(\"/projects/my\")\n view = MyProjectListView()\n view.setup(request)\n view.dispatch(request)\n view.get_queryset()\n context = view.get_context_data(request=request)\n assert context[\"page_title\"] == \"My Projects\"\n assert context[\"past_title\"] == \"My Past Projects\"",
"def test_switch_to_tab(self):\n\n # locators\n open_tab_button = 'opentab'\n new_tab_course_listing = '//div[@class=\"course-listing-title\"]'\n new_tab_course_title = '//h1[@class=\"course-title\"]'\n\n # actions\n locate_open_tab_button = WebDriverWait(self.driver, 10).until(\n ec.visibility_of_element_located((By.ID, open_tab_button))\n )\n locate_open_tab_button.click()\n new_window_handle = self.driver.window_handles\n self.driver.switch_to.window(new_window_handle[1])\n locate_new_tab_courses_listing = WebDriverWait(self.driver, 10).until(\n ec.presence_of_all_elements_located((By.XPATH, new_tab_course_listing))\n )\n\n # loop through each course and check if link of the selected course are clickable.\n for element in range(len(locate_new_tab_courses_listing)):\n print(\"Course from new tab: \" + locate_new_tab_courses_listing[element].text)\n locate_new_tab_courses_listing[element].click()\n locate_course_title = WebDriverWait(self.driver, 10).until(\n lambda driver: self.driver.find_element_by_xpath(new_tab_course_title)\n )\n print(\"Course title from course page: \" + locate_course_title.text)\n self.driver.execute_script(\"window.history.go(-1);return false;\")\n locate_new_tab_courses_listing = WebDriverWait(self.driver, 10).until(\n ec.presence_of_all_elements_located((By.XPATH, new_tab_course_listing))\n )\n if element == len(locate_new_tab_courses_listing) - 1:\n break\n else:\n locate_new_tab_courses_listing[element] = locate_new_tab_courses_listing[element + 1]",
"def test_about_page(self):\n response = self.testapp.get('/about')\n self.assertEqual(response.status_int, 200)\n self.assertEqual(response.content_type, 'text/html')\n response.mustcontain('Community Guidelines', 'Lifecycle', 'License')",
"def test_tags_faq(self):\n\n po = self.catalog.load_pageobject('TagsPage')\n po.goto_page()\n start_url = po.current_url()\n\n # the link brings up a popup, so we need to get the\n # handle of that window and check the switch to it\n parent_h = self.browser._browser.current_window_handle\n\n # press the FAQ link\n self.browser.proxy_client.new_har(\"page\")\n po.goto_faq()\n\n # click on the link that opens a new window\n handles = self.browser._browser.window_handles\n handles.remove(parent_h)\n self.browser._browser.switch_to_window(handles.pop())\n\n # get the page load details of the window\n har_entry = self.browser.page_load_details()\n end_url = po.current_url()\n\n # switch back to the main window\n self.browser._browser.switch_to_window(parent_h)\n\n # check for errors\n assert har_entry is not None, \\\n \"failed to load the uri: %s. http archive unavailable.\" \\\n % (end_url)\n assert self.browser.error_loading_page(har_entry) is False, \\\n \"while on the tags page %s,\" % (start_url) \\\n + \" pressing the Tags FAQ link returned error\" \\\n + \" response code on page %s.\" % (end_url) \\\n + \" http archive follows:\\n%s\" % (pprint.pformat(har_entry))",
"def test_doc():\n pass",
"def test_method_docs(self):\n for func in dir(Amenity):\n self.assertTrue(len(func.__doc__) > 0)",
"def test_method_docs(self):\n for func in dir(Amenity):\n self.assertTrue(len(func.__doc__) > 0)"
] | [
"0.68019795",
"0.67768544",
"0.6687646",
"0.6620972",
"0.64275634",
"0.63891387",
"0.63046104",
"0.62972707",
"0.6277566",
"0.62297547",
"0.621474",
"0.6169982",
"0.6162793",
"0.6142867",
"0.61180085",
"0.61172885",
"0.6100911",
"0.60956544",
"0.6092831",
"0.6066643",
"0.60659426",
"0.6065035",
"0.60620385",
"0.60307616",
"0.60157293",
"0.6008845",
"0.6005189",
"0.5981097",
"0.5980481",
"0.5980481"
] | 0.79340965 | 0 |
Test navigates through the menu options and verifies the page links by asserting expected titles against given ones. | def test_menu_path_links(self):
main_page = DogMainPage(self.driver)
# Navigate to Title (main) page
title_page = main_page.navigate_main()
title_expected_header = title_page.get_expected_header()
title_header = title_page.get_header()
# Assert the title by sub-string to verify page
self.assertTrue(title_expected_header in title_header,
('%s expected, instead found: %s. Page is wrong' % (title_expected_header, title_header)))
# Navigate to 'Documentation' page
doc_page = main_page.navigate_documentation()
doc_expected = doc_page.get_expected_header()
doc_header = doc_page.get_header()
# Assert the title to verify page
self.assertEqual(doc_expected, doc_header,
('%s expected, instead found: %s. Page is wrong' % (doc_expected, doc_header)))
# Navigate to 'Breed list' page
breed_list_page = main_page.navigate_breeds_list()
breed_list_expected_header = breed_list_page.get_expected_header()
breed_list_header = breed_list_page.get_header()
# Assert the title to verify page
self.assertEqual(breed_list_expected_header, breed_list_header,
('%s expected, instead found: %s. Page is wrong' %
(breed_list_expected_header, breed_list_header)))
# Navigate to 'About' page
about_page = main_page.navigate_about()
about_expected_header = about_page.get_expected_header()
about_header = about_page.get_header()
# Assert the title to verify page
self.assertEqual(about_expected_header, about_header,
('%s expected, instead found: %s. Page is wrong' % (about_expected_header, about_header))) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_navigates_to_about_page_then_index_page_then_about_page_success(w_driver):\n #1.) Navigate to about page, click link for index page\n w_driver.get('localhost:8000/about')\n\n element=w_driver.find_element_by_link_text('back to Kasner').click()\n #find index page info\n results=w_driver.page_source\n text_found1=re.search(r'Welcome to the Kasner Micro Search Engine',results)\n\n #2.) Click link for index page\n element=w_driver.find_element_by_link_text('About our team').click()\n #find about page info\n results=w_driver.page_source\n text_found2=re.search(r'About the Kasner Search Engine',results)\n\n #3.) Verify info we found is not None\n assert(text_found1 != None)\n assert(text_found2 != None)",
"def test_ui_menu(test):\n assert hl.test_help_ui_menu(test) == test",
"def test_sub_navigation(self):\n self.open_url('/group/list')\n self.wd.find_element(By.ID, \"subnav-list\").click()\n time.sleep(0.5) # FIXME: Need to figure out how to wait on page loads; this is supposed to happen automatically ...\n self.assertEquals('Group List', self.wd.title)\n \n self.open_url('/group/list')\n self.wd.find_element(By.ID, \"subnav-create\").click()\n time.sleep(0.5) # FIXME: Need to figure out how to wait on page loads; this is supposed to happen automatically ...\n self.assertEquals('Add Group', self.wd.title)\n \n # Copy/paste to check the other page\n self.open_url('/group/add')\n self.wd.find_element(By.ID, \"subnav-list\").click()\n time.sleep(0.5) # FIXME: Need to figure out how to wait on page loads; this is supposed to happen automatically ...\n self.assertEquals('Group List', self.wd.title)\n \n self.open_url('/group/add')\n self.wd.find_element(By.ID, \"subnav-create\").click()\n time.sleep(0.5) # FIXME: Need to figure out how to wait on page loads; this is supposed to happen automatically ...\n self.assertEquals('Add Group', self.wd.title)",
"def test_documentation_path_links(self):\r\n main_page = DogMainPage(self.driver)\r\n dog_page = main_page.navigate_documentation()\r\n # Switch to 'List all breeds' tab\r\n all_breeds_page = dog_page.switch_tab(dog_page.ALL_BREEDS)\r\n all_breeds_expected = all_breeds_page.get_expected_header()\r\n all_breeds_header = all_breeds_page.get_header()\r\n # Assert the title to verify the page\r\n self.assertEqual(all_breeds_expected, all_breeds_header,\r\n ('%s expected, instead found: %s. Page is wrong' % (all_breeds_expected, all_breeds_header)))\r\n # Switch to 'Random image' tab\r\n random_page = dog_page.switch_tab(dog_page.RANDOM)\r\n random_expected_header = random_page.get_expected_header()\r\n random_header = random_page.get_header()\r\n # Assert the title to verify the page\r\n self.assertEqual(random_expected_header, random_header,\r\n ('%s expected, instead found: %s. Page is wrong' % (random_expected_header, random_header)))\r\n # Switch to 'By breed' tab\r\n breed_page = dog_page.switch_tab(dog_page.BREED)\r\n breed_expected_header = breed_page.get_expected_header()\r\n breed_header = breed_page.get_header()\r\n # Assert the title to verify the page\r\n self.assertEqual(breed_expected_header, breed_header,\r\n ('%s expected, instead found: %s. Page is wrong' % (breed_expected_header, breed_header)))\r\n # Switch to 'By sub-breed' tab\r\n sub_breed_page = dog_page.switch_tab(dog_page.SUB_BREED)\r\n sub_expected_header = sub_breed_page.get_expected_header()\r\n sub_header = sub_breed_page.get_header()\r\n # Assert the title to verify the page\r\n self.assertEqual(sub_expected_header, sub_header,\r\n ('%s expected, instead found: %s. Page is wrong' % (sub_expected_header, sub_header)))",
"def test_navigates_to_index_page_then_about_page_then_index_page_success(w_driver):\n #1.) Navigate to index page, click link for about page\n w_driver.get('localhost:8000')\n\n element=w_driver.find_element_by_link_text('About our team').click()\n #find about page info\n results=w_driver.page_source\n text_found1=re.search(r'About the Kasner Search Engine',results)\n\n #2.) Click link for index page\n element=w_driver.find_element_by_link_text('back to Kasner').click()\n #find index page info\n results=w_driver.page_source\n text_found2=re.search(r'Welcome to the Kasner Micro Search Engine',results)\n\n #3.) Verify info we found is not None\n assert(text_found1 != None)\n assert(text_found2 != None)",
"def test_viewTermsOfServicePage(self):\r\n print('========================================================================')\r\n print('Test for check redirect on TermsOfService page after link TermsOfService click')\r\n #Load Registrtion page\r\n self.reg_page.open_registration_page()\r\n driver = self.reg_page.driver\r\n\r\n #cheks if right title\r\n assert self.reg_page.is_title_matches(), \"Registration title page doesn't match\"\r\n\r\n self.reg_page.click_terms_lnk()\r\n terms_page = page_TermsAndService.Page_TermsAndService(driver)\r\n\r\n\r\n driver.get(terms_page.TERMS_URL)\r\n wait = WebDriverWait(driver, 20)\r\n element = wait.until(EC.title_is(terms_page.get_terms_title()))\r\n assert terms_page.get_terms_title() == 'Snovio terms and conditions', \"Terms title page doesn't match\"\r\n\r\n print('--------- SUCCESS test_viewTermsOfServicePage-----------')\r\n driver.quit()",
"def test_basic():\n browser = Browser(URL, \"Firefox\")\n # actions = Actions(browser)\n delay_between_pages = 1.5\n navigation_bar = NavigationBar(browser)\n # sequential clicking on all new elements\n navigation_bar.show_printers()\n section_title = Element(browser, By.XPATH, \"//*[@id='content']/h2\").get_text()\n assert section_title == \"Printers\"\n\n time.sleep(delay_between_pages)\n navigation_bar.show_scanners()\n section_title = Element(browser, By.XPATH, \"//*[@id='content']/h2\").get_text()\n assert section_title == \"Scanners\"\n\n time.sleep(delay_between_pages)\n navigation_bar.show_webcams()\n section_title = Element(browser, By.XPATH, \"//*[@id='content']/h2\").get_text()\n assert section_title == \"Web Cameras\"\n\n time.sleep(delay_between_pages)\n navigation_bar.show_all_phones_and_pdas()\n section_title = Element(browser, By.XPATH, \"//*[@id='content']/h2\").get_text()\n assert section_title == \"Phones & PDAs\"\n\n time.sleep(delay_between_pages)\n navigation_bar.show_pdas()\n section_title = Element(browser, By.XPATH, \"//*[@id='content']/h2\").get_text()\n assert section_title == \"PDAs\"\n\n time.sleep(delay_between_pages)\n navigation_bar.show_phones()\n section_title = Element(browser, By.XPATH, \"//*[@id='content']/h2\").get_text()\n assert section_title == \"Phones\"\n\n time.sleep(delay_between_pages)\n\n time.sleep(3)\n browser.shutdown()",
"def test_page_list_admin(self):\n user = self.get_superuser()\n title_1 = 'page'\n title_2 = 'inner'\n title_3 = 'page 3'\n page = create_page(title_1, 'page.html', 'en', published=True)\n page_2 = create_page(title_2, 'page.html', 'en', published=True, parent=page)\n page_3 = create_page(title_3, 'page.html', 'en', published=False)\n\n with self.login_user_context(user):\n url = reverse('api:page-list')\n response = self.client.get(url, format='json')\n self.assertEqual(len(response.data), 3)\n for page in response.data:\n self.assertIn(page.get('title'), {title_1, title_2, title_3})",
"def test_extractMenu(self):\n doc = lunchr.parseHtml(self.html)\n self.assertEquals(self.menu, lunchr.extractMenu(doc))",
"def test_dropdown_menu_tops(self):\n\n # Test Setup:\n with open(\"../mystore.json\") as f:\n data = json.load(f)\n\n\n # Actual test:\n women_page = WomenPage(self.driver)\n women_page.go()\n women_page.maximize_window()\n if self.driver.title == data[\"error_page\"]:\n self.driver.refresh()\n else:\n assert self.driver.title == \"Women - My Store\"\n women_page.element(data[\"open_menu\"]).click()\n dropdown_txt = women_page.element(data[\"assert_dropdown_tshirts\"]).text\n assert dropdown_txt == 'T-shirts'\n dropdown_txt_b = women_page.element(data[\"assert_dropdown_blouses\"]).text\n assert dropdown_txt_b == 'Blouses'",
"def test_getMenu(self):\n urllib2.urlopen = self.urlopen_mock\n self.assertEquals(u'\\xc4RTSOPPA & PANNKAKOR m. sylt & gr\\xe4dde# SEJ m. \\xe4gg- & persiljes\\xe5s samt kokt potatis* \\xa4 KYCKLINGFAJITASm. paprika, salsa & tortillas* VEG: QUORNFAJITASm. paprika, salsa & tortillas', lunchr.getMenu('http://www.example.com', 3))",
"def test_next_page_returns_correct_menu(self):\n current_page_start_before = 1\n self.menu.current_page_start = current_page_start_before\n\n result = self.menu.next_page()\n\n expected_result = self.menu.present_results\n\n self.assertEqual(result, expected_result)",
"def test_options_menu_autoopen(self):\n self.login()\n page = Page.objects.create(owner=self.user)\n path = page.get_absolute_url()\n self.selenium.get('%s%s' % (self.live_server_url, path))\n dropdown_menu_btn = self._get_one('.page-options-dropdown .dropdown-toggle')\n show_opts_btn = self._get_one('.options-btn')\n options_menu = self._get_one('.pageOptions')\n self.assertTrue(dropdown_menu_btn.is_displayed())\n self.assertFalse(show_opts_btn.is_displayed())\n self.assertTrue(options_menu.is_displayed())",
"def test_data_admin_page(self):\n self.login(self.data_admin.user.username)\n self._go_to_data_admin_page()\n self.check_page_title(self.data_admin_config.get('PAGE_TITLE'))\n self.check_page_contains_ids(self.data_admin_config.get('ADMIN_LINKS'))",
"def test_present_results_loads_correct_next_menu(self):\n # this can be just like the previous tests that return menus\n # with the only wrinkle that we need to handle the different\n # values for self.menu.current_page_start do determine whether\n # next or previous menus are available\n # Add an entry to the database\n # to test this we don't actually need to write to the database,\n # we just need a list of ordered_dicts in menu.records\n test_records = [\n OrderedDict([\n ('name', 'Test Employee 1'),\n ('date', datetime.date(2018, 5, 1)),\n ('task_name', 'Test Task 1'),\n ('duration', 1),\n ('notes', 'This is a note for the first test task')\n ]),\n OrderedDict([\n ('name', 'Test Employee 2'),\n ('date', datetime.date(2018, 5, 2)),\n ('task_name', 'Test Task 2'),\n ('duration', 2),\n ('notes', 'This is a note for the second test task')\n ]),\n OrderedDict([\n ('name', 'Test Employee 3'),\n ('date', datetime.date(2018, 5, 3)),\n ('task_name', 'Test Task 3'),\n ('duration', 3),\n ('notes', 'This is a note for the third test task')\n ]),\n ]\n old_entries_per_page = self.menu.OPTIONS['entries per page']\n self.menu.OPTIONS['entries per page'] = 1\n self.menu.records = test_records\n self.menu.current_page_start = 1\n user_inputs = {\n 'n': self.menu.next_page,\n 'p': self.menu.previous_page,\n 'v': self.menu.select_detail,\n 'e': self.menu.edit_record,\n 'd': self.menu.delete_record,\n 'm': self.menu.main_menu,\n 'q': self.menu.quit_program,\n }\n results = []\n expected_results = []\n for key, value in user_inputs.items():\n expected_results.append(value)\n with patch('builtins.input', side_effect=key):\n results.append(self.menu.present_results())\n\n self.assertEqual(expected_results, results)\n self.menu.OPTIONS['entries per page'] = old_entries_per_page",
"def test_scrape(self):\n self.assertEqual(self.scraped.title, 'Heading!')\n self.assertEqual(self.scraped.link_text, 'Go to Google')\n self.assertEqual(self.scraped.link_url, 'http://Google.com')",
"def test_homepage_it(self):\n\n self.driver.get(self.url_ + '/?hl=it')\n\n title_present = EC.text_to_be_present_in_element(\n (By.XPATH, '//*[@id=\"main-nav\"]/div/div[1]/a'), 'Data Commons')\n WebDriverWait(self.driver, self.TIMEOUT_SEC).until(title_present)\n\n hero_msg = self.driver.find_elements_by_class_name('lead')[0]\n self.assertTrue(\n hero_msg.text.startswith(\n 'Data Commons è un repository di conoscenza aperto che combina i dati provenienti'\n ))\n\n explore_callout_msg = self.driver.find_element_by_xpath(\n '//*[@id=\"homepage\"]/section[3]/p')\n self.assertTrue(\n explore_callout_msg.text.startswith(\n 'Abbiamo pulito ed elaborato i dati al tuo posto, così non dovrai farlo tu.'\n ))\n\n nyc_health = self.driver.find_element_by_xpath(\n '//*[@id=\"homepage\"]/section[3]/ul/li[1]/ul/li[2]/a')\n self.assertEqual(nyc_health.text, 'Salute a New York, New York')\n self.assertEqual(nyc_health.get_attribute('href'),\n self.url_ + '/place/geoId/3651000?topic=Health&hl=it')\n\n schema_org = self.driver.find_element_by_xpath(\n '//*[@id=\"homepage\"]/section[5]/ul/li[2]')\n self.assertEqual(schema_org.text,\n 'Progetto open source realizzato con Schema.org.')\n\n more_msg = self.driver.find_element_by_xpath(\n '//*[@id=\"homepage\"]/section[3]/ul/li[3]/ul/li[4]/a')\n self.assertEqual(more_msg.text, 'altro…')",
"def test_reward_user_login(browser):\n\n login_page = LoginPage(browser)\n login_page.start()\n\n login_page.login(email=\"[email protected]\",\n password=\"reward_admin\")\n \n reward_admin_page = RewardAdminPage(browser)\n reward_admin_page.start()\n\n # Check that reward_admin has can only have priviledge to create reward\n assert reward_admin_page.get_side_bar_menu_item(RewardAdminPage.REWARDS_MENU_ITEM).size != 0\n\n # Check that rest of the menu_item element that can be found in admin page cannot be found in\n # reward admin page\n with pytest.raises(NoSuchElementException):\n reward_admin_page.get_side_bar_menu_item(AdminPage.REPORTS_MENU_ITEM)\n reward_admin_page.get_side_bar_menu_item(AdminPage.CAMPAIGNS_MENU_ITEM) \n reward_admin_page.get_side_bar_menu_item(AdminPage.LOYALTIES_MENU_ITEM) \n reward_admin_page.get_side_bar_menu_item(AdminPage.TRANSACTION_RULES_MENU_ITEM) \n reward_admin_page.get_side_bar_menu_item(AdminPage.MERCHANTS_MENU_ITEM) \n reward_admin_page.get_side_bar_menu_item(AdminPage.CUSTOMER_MANAGEMENT_MENU_ITEM) \n reward_admin_page.get_side_bar_menu_item(AdminPage.SETTINGS_MENU_ITEM) \n reward_admin_page.get_side_bar_menu_item(AdminPage.BUSINESS_INTELLIGENCE_MENU_ITEM)",
"def test_courseware_nav(self):\r\n # Navigate to the courseware page from the info page\r\n self.course_info_page.visit()\r\n self.tab_nav.go_to_tab('Courseware')\r\n\r\n # Check that the courseware navigation appears correctly\r\n EXPECTED_SECTIONS = {\r\n 'Test Section': ['Test Subsection'],\r\n 'Test Section 2': ['Test Subsection 2', 'Test Subsection 3']\r\n }\r\n\r\n actual_sections = self.course_nav.sections\r\n for section, subsections in EXPECTED_SECTIONS.iteritems():\r\n self.assertIn(section, actual_sections)\r\n self.assertEqual(actual_sections[section], EXPECTED_SECTIONS[section])\r\n\r\n # Navigate to a particular section\r\n self.course_nav.go_to_section('Test Section', 'Test Subsection')\r\n\r\n # Check the sequence items\r\n EXPECTED_ITEMS = ['Test Problem 1', 'Test Problem 2', 'Test HTML']\r\n\r\n actual_items = self.course_nav.sequence_items\r\n self.assertEqual(len(actual_items), len(EXPECTED_ITEMS))\r\n for expected in EXPECTED_ITEMS:\r\n self.assertIn(expected, actual_items)",
"def test_import_menu():\n assert sc.menu != {}",
"def test_menu_4(test):\n m1 = Menu()\n m1.add(\"1\", \"foo\")\n m1.add(\"2\", \"bar\")\n\n m2 = Menu()\n m2.add(\"a\", \"AAA\")\n m2.add(\"b\", \"BBB\")\n m2.add(\"c\", \"CCC\")\n\n test.assertEqual(len(m1.entries), 2)\n test.assertEqual(len(m2.entries), 3)",
"def test_anon_page_admin(self):\n self._make_quick_page()\n\n dropdown_menu_btn = self._get_one('.dropdown-toggle')\n show_opts_btn = self._get_one('.options-btn')\n options_menu = self._get_one('.pageOptions')\n\n self.assertTrue(dropdown_menu_btn.is_displayed())\n dropdown_menu_btn.click()\n self.assertTrue(show_opts_btn.is_displayed())\n\n # Something weird related to auto-open options menu.\n # Maybe this conditional will fix it? <:)\n if not options_menu.is_displayed():\n show_opts_btn.click()\n self.assertTrue(options_menu.is_displayed())\n\n settings_btn = self._find_one_with_text('li.tabs-tab', 'Settings')\n self.assertTrue(settings_btn.is_displayed())\n settings_btn.click()\n\n title_input = self._get_one('input.title')\n title_input.clear()\n TITLE = 'A Title Most Titular'\n title_input.send_keys(TITLE)\n\n # Make it published, so that our anon viewer can access it\n published_input = self._get_one('.published')\n published_input.click()\n\n # TODO: better solution. need to wait for autosave\n # This may have to wait for request-queuing in the Backbone model.\n import time; time.sleep(1) \n\n # Now pretend we're someone else\n self.selenium.delete_all_cookies()\n self.selenium.refresh()\n for cookie in self.selenium.get_cookies():\n self.assertFalse(cookie['name'].startswith('claim'))\n\n # make sure we aren't admins\n self.assertFalse(self._find('.dropdown-toggle')) \n\n # check that we got the new title\n self.assertEqual(self.selenium.title, TITLE)",
"def test_homepage_en(self):\n\n self.driver.get(self.url_ + '/')\n\n title_present = EC.text_to_be_present_in_element(\n (By.XPATH, '//*[@id=\"main-nav\"]/div/div[1]/a'), 'Data Commons')\n WebDriverWait(self.driver, self.TIMEOUT_SEC).until(title_present)\n\n hero_msg = self.driver.find_elements_by_class_name('lead')[0]\n self.assertTrue(\n hero_msg.text.startswith(\n 'Data Commons is an open knowledge repository'))\n\n explore_callout_msg = self.driver.find_element_by_xpath(\n '//*[@id=\"homepage\"]/section[3]/p')\n self.assertTrue(\n explore_callout_msg.text.startswith(\n 'We cleaned and processed the data so you don\\'t have to'))\n\n nyc_health = self.driver.find_element_by_xpath(\n '//*[@id=\"homepage\"]/section[3]/ul/li[1]/ul/li[2]/a')\n self.assertEqual(nyc_health.text, 'New York City, NY Health')\n self.assertEqual(nyc_health.get_attribute('href'),\n self.url_ + '/place/geoId/3651000?topic=Health')\n\n schema_org = self.driver.find_element_by_xpath(\n '//*[@id=\"homepage\"]/section[5]/ul/li[2]')\n self.assertEqual(schema_org.text,\n 'Open sourced, built using Schema.org.')\n\n more_msg = self.driver.find_element_by_xpath(\n '//*[@id=\"homepage\"]/section[3]/ul/li[3]/ul/li[4]/a')\n self.assertEqual(more_msg.text, 'more ...')",
"def test_chooser_multiple_choices(self):\n root_page = Page.objects.get(pk=2)\n news1 = root_page.add_child(instance=NewsIndex(\n title='Normal News 1', slug='news-1'))\n news2 = root_page.add_child(instance=NewsIndex(\n title='Normal News 2', slug='news-2'))\n secondary_news = root_page.add_child(instance=SecondaryNewsIndex(\n title='Secondary News', slug='secondary-news'))\n\n response = self.client.get(reverse('wagtailnews:choose'))\n self.assertContains(response, news1.title)\n self.assertContains(response, news2.title)\n self.assertNotContains(response, secondary_news.title)",
"def about_page_test(self):\r\n # default for ENABLE_MKTG_SITE is False.\r\n self.assertEquals(self.get_about_page_link(), \"//localhost:8000/courses/mitX/101/test/about\")",
"def test_menu(self) -> None:\n actions_registry.register(self.test_menu_action)\n actions_registry.register(self.test_menu_item_action)\n\n self.assertIn(self.test_menu_item_action,\n self.test_menu_action.child_actions)\n\n actions_registry.unregister(self.test_menu_item_action)\n\n self.assertNotIn(self.test_menu_item_action,\n self.test_menu_action.child_actions)",
"def test_tags_browse_click_page_links_check_items_displayed(self):\n\n po = self.catalog.load_pageobject('TagsBrowsePage')\n po.goto_page()\n\n # change the display limit to 5\n new_display_limit = '5'\n po.form.footer.display_limit(new_display_limit)\n\n # get the updated display limit\n display_limit = int(po.form.footer.display_limit())\n\n assert display_limit == int(new_display_limit), \\\n \"updated display limit does not match the display\" \\\n + \" limit set by user: updated display limit =\" \\\n + \" '%s', user set display limit = '%s'\" \\\n % (display_limit,new_display_limit)\n\n # get the updated page number links\n page_numbers = po.get_link_page_numbers()\n\n page_url = po.current_url()\n\n for p in page_numbers:\n # click the page number link\n po.goto_page_number(p)\n\n po2 = self.catalog.load_pageobject('TagsBrowsePage')\n\n # get the number of items that should be displayed\n # according to the pagination counts\n (start,end,total) = po2.get_pagination_counts()\n num_pag = (end-start+1)\n\n # get the number of items that are actually displayed\n num_rows = po2.form.search_results.num_rows()\n\n # compare that is should be displayed to what is displayed\n assert num_pag == num_rows, \\\n \"after clicking page link #%s on %s,\" % (p,page_url) \\\n + \" the number of items displayed does not match the\" \\\n + \" number of items listed in the pagination counts:\" \\\n + \" displayed = %s, start = %s,\" % (num_rows,start) \\\n + \" end = %s, end-start+1 (what should be displayed) = %s\" \\\n % (end,num_pag)\n\n # return back to our original page\n self.browser._browser.back()",
"def test_property_page(self):\n self.property_page.proceed_to_property_page()\n\n \"\"\"Step2 - Check rooms section\n Exp2 - Property page opened \"\"\"\n self.property_page.check_rooms_section()\n\n \"\"\"Step3 - Check other section\n Exp3 - Each item works well \"\"\"\n self.property_page.check_other_section()",
"def test_fac_admin_page(self):\n self.login(self.fac_admin.user.username)\n self._got_to_fac_admin_page()\n self.check_page_title(self.admin_config.get('FAC_ADMIN').get('PAGE_TITLE'))\n self.check_page_contains_ids(self.admin_config.get('FAC_ADMIN').get('ADMIN_LINKS'))",
"def test_navigates_to_about_page_link_index_page(w_driver):\n w_driver.get('localhost:8000/about')\n\n element=w_driver.find_element_by_link_text('back to Kasner').click()\n results=w_driver.page_source\n text_found=re.search(r'Welcome to the Kasner Micro Search Engine',results)\n\n assert(text_found != None)"
] | [
"0.6868659",
"0.6848145",
"0.6782984",
"0.67550534",
"0.6732094",
"0.65861976",
"0.6546698",
"0.6540386",
"0.6496603",
"0.64781463",
"0.64476573",
"0.64316386",
"0.6419403",
"0.6395801",
"0.6340467",
"0.63370305",
"0.6316351",
"0.6306604",
"0.6289248",
"0.62697625",
"0.62560403",
"0.6249799",
"0.6217734",
"0.62171763",
"0.6215703",
"0.6210562",
"0.61952543",
"0.61931527",
"0.6192114",
"0.61543983"
] | 0.79684585 | 0 |
For testing email text field | def test_email_form(self):
dummy_email = '[email protected]'
main_page = DogMainPage(self.driver)
main_page.populate_email(dummy_email)
self.assertEqual(dummy_email, main_page.get_value_email(), 'Expected conditions failed.') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_email(self):\r\n \r\n self.assertEqual('[email protected]', self.user.email)",
"def test_text_field():",
"def test_compose_email_good(self): \n pass",
"def get_email(self,text):\r\n return self.driver.find_element(*SinginPage.email).send_keys(text)",
"def test_compose_email_somebad(self):\n pass",
"def enter_email(self, email):\n self.selib.input_text(self.locator.email, email)",
"def test_empty_email_field(self):\r\n result=self.user.get_user_register(\"Stephen\",\" Ochieng\",\"[email protected]\",\"stephenochieng\",\"eat\")\r\n self.assertEqual(2,result,\"Fill in the email field please\")",
"def test_is_valid_email(self):\n self.assertTrue(is_valid_email('[email protected]'))",
"def test_email_address(self):\n key = api.portal.get_registry_record(\n 'plone.email_from_address'\n )\n self.assertEqual(u'[email protected]', key)",
"def test_empty_email():\n expect_error(register, InputError, \"a\", \"abdkjjd\", \"a\", \"A\", \"\")",
"def test_invalid_email_when_logging_in(self):\n pass",
"def test_email_search(self):\n # A name in the database\n search_string = \"[email protected]\"\n # Search For Umut\n resp = SearchTest.client.get('/api/search/',{\"token\":SearchTest.valid_token,\"search_string\":search_string})\n search_result = json.loads(resp.content)\n # Check the e_mail field of the result\n self.assertEqual(search_string,search_result[0]['e_mail'],\"It doesn't return the user with the email {}\".format(search_string))",
"def test_is_invalid_email(self):\n self.assertFalse(is_valid_email('helloworld'))",
"def test_blank_email(self):\n rv = self.signup('Bo', 'Theo', '', 'Bo1995', 'Bo1995')\n self.assertIn(b'Field must be between 6 and 30 characters long.', rv.data)",
"def test_valid_email():\n # find and click on sign in button\n sign_in_btn = my_store.element(sign_in_button)\n sign_in_btn.find()\n sign_in_btn.click()\n\n # find and populate email input field\n email_input = my_store.element(email_input_field)\n email_input.find()\n email_input.populate_field(valid_email)\n\n # find and click on create account button\n create_acc_btn = my_store.element(create_account_button)\n create_acc_btn.find()\n create_acc_btn.click()\n\n # compare current url with url after click on create account button\n create_acc_btn.url_to_be(create_acc_url)\n current_url = browser.current_url\n assert current_url == create_acc_url\n\n # close Google Chrome browser\n browser.close()",
"def testEmailRequired(self):\r\n res = self.app.post('/signup_process')\r\n self.assertIn('Please supply', res.body)",
"def test_text_email_only(self):\n data = mailgun_payload\n del data['stripped-html']\n request = self.factory.post(self.url, data=data)\n email = self.parser.parse(request)\n self._assertEmailParsedCorrectly(email, mailgun_payload)",
"def test_email():\n assert is_email(None) is None\n assert is_email('[email protected]') is None\n assert is_email('other')",
"def test_email_text():\n new_text = mailroom.compose_email(\"Willie Nelson\", 12.34)\n reference_text = \"\\nDear Willie Nelson,\\n\\\nThank you for your generous gift of $12.34! It will help Local Charity\\\n achieve our mission.\\n\\\nBest regards,\\n\\\nLocal Charity\\n\\n\"\n assert new_text == reference_text",
"def set_dispute_contact_email(self, email):\n if email == \"\":\n email = self.random_string_generator(8, string.ascii_lowercase) + \"@\" + self.random_string_generator(5, string.ascii_lowercase) + \".com\"\n self.set_value_into_input_field(self.dispute_contact_email_textbox_locator, email)",
"def test_invitation_email(self):\n queryset = models.Invitation.objects.filter(id=self.invitation.id)\n self.admin_instance.send_new_activation_email(self.some_request, queryset)\n # check whether there is a mail in the outbox\n self.assertEqual(len(mail.outbox), 1)\n # check subject\n self.assertEqual(\n mail.outbox[0].subject,\n \"Er is een account voor u aangemaakt op sso.lizard.net\",\n )\n self.assertEqual(mail.outbox[0].to, [\"[email protected]\"])\n # check mail starts with 'Hallo Reinout,'\n self.assertTrue(mail.outbox[0].body.startswith(\"Hallo Reinout,\"))",
"def test_compose_email2_good(self):\n pass",
"def test_clean_email(self):\n\n raw_email = 'from=<[email protected]>'\n result = clean_email(raw_email)\n self.assertEqual(result, '[email protected]')",
"def test_for_email_attribute_by_name(self):\n name = u\"__TestUser__\"\n password = u\"ekfdweurwerh\"\n email = \"__TestUser__@moinhost\"\n self.createUser(name, password, email=email)\n theuser = user.User(self.request, name=name)\n assert theuser.email == \"\"",
"def test_confirmation_username_not_email(self):\n pass",
"def test_display_form(self):\n\n result = self.client.get(\"/login\")\n self.assertIn(b\"Email address\", result.data)",
"def controls(email):",
"def test_get_form_label_email(self):\n self.assertEqual(\n self.user.get_form_label(email=True),\n 'testuser <[email protected]>',\n )",
"def test_email_name(self):\n key = api.portal.get_registry_record(\n 'plone.email_from_name'\n )\n self.assertEqual(u'Briefy CMS', key)",
"def test_get_user_by_emailuser_email_get(self):\n pass"
] | [
"0.7379854",
"0.73666507",
"0.7336404",
"0.71672654",
"0.71310836",
"0.71121943",
"0.70770097",
"0.7010115",
"0.6885396",
"0.6848203",
"0.68398523",
"0.68151253",
"0.68076175",
"0.67020476",
"0.6701801",
"0.666361",
"0.66532904",
"0.66501975",
"0.6637231",
"0.66144276",
"0.65948546",
"0.6564626",
"0.6529537",
"0.65259403",
"0.6497644",
"0.64854676",
"0.64452636",
"0.6441745",
"0.64111906",
"0.63580644"
] | 0.771166 | 0 |
Run the simulation and write resutls in report_file_name. This function runs the actual simulation by running a turn by turn simulation and writes the result in report_file_name. | def simulate(self, report_file_name):
current_turn = 1
last_turn = 0
# Process all customers entry
for next_customer in self._scenario:
# While we did not reach the turn that this customer enters the
# restaurant, process turns one by one
while current_turn < next_customer.entry_turn():
# Ask all approaches to process this turn
for approach in self._approaches:
approach.process_turn(current_turn)
current_turn += 1
# Ask all approaches to add this customer
# Only add 1 customer on each turn
# Despite possibly more customer enter at same time
for approach in self._approaches:
approach.add_customer(next_customer)
# Update the last turn that we should simulate based on when
# this customer may leave the restaurant
next_customer_exit_turn = next_customer.entry_turn() + next_customer.patience()
if (next_customer_exit_turn > last_turn):
last_turn = next_customer_exit_turn
# Continue simulation until we are sure that no customer may remains
# in a restaurant waiting
while current_turn <= last_turn:
# Ask all approaches to process this turn
for approach in self._approaches:
approach.process_turn(current_turn)
current_turn += 1
# Now write report of all approaches in report_file_name
report_file = open(report_file_name, "w")
for approach in self._approaches:
approach.write_report(report_file)
report_file.close() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run(sim_attr_generator):\n#TODO: clean\n#TODO: integrate analyses\n def analyze_and_save(simulation,simulation_attributes):\n#? Ugly conf file analyses integration.\n if simulation_attributes.analyses and Args.output_file != None:\n verbose_print(\"Saving analyses for {0}.\".format(simulation_attributes.id_name),2)\n results = analyze_datas(\n simulation.result,\n simulation_attributes.analyses\n )\n plotables = ana_results_to_plotables(\n results,\n simulation_attributes.analyses\n )\n#TODO error handling for save\n analysis_save_dm(\n results,\n plotables,\n simulation_attributes.analyses,\n simulation_attributes.id_name\n )\n\n def save_simulation(simulation,simulation_attributes):\n if not simulation_attributes.analyses and Args.output_file != None:\n verbose_print(\"Saving simulation datas of {0}.\".format(\n simulation_attributes.id_name\n ),2) \n try:\n np.save(\n simulation_attributes.id_name,\n simulation.result\n )\n except:\n raise EnvironmentError(\"Can't save data to {}.\".format(\n simulation_attributes.id_name\n ))\n\n verbose_print(\"Starting simulation run.\",1)\n for i,simulation_attributes in enumerate(sim_attr_generator):\n verbose_print(\"Starting simulation number {0}: {1}\".format(\n i,\n simulation_attributes.id_name\n ),2)\n simulation = Simulation(\n SimulationVariables(simulation_attributes)\n )\n simulation.start()\n save_simulation(simulation,simulation_attributes)\n analyze_and_save(simulation,simulation_attributes)",
"def run(self):\n report_file = self.get_report_file_name()\n self.export_records_to_file(report_file)\n print(\"Report file ({}) generated.\".format(report_file))",
"def run_simulation(self):\n\n # Create agents for simulation\n self.spawn_agents(self.num_agents)\n\n if self.force_personalities != None:\n self.force_personalities(self)\n\n if self.visualizer == True:\n V.Visualizer.createVisualizer(types=self.visualizerOptions, showAtEnd=True)\n\n TM.TimeManager.createManager()\n for x in range (self.time_to_run):\n for agent in self.agents:\n agent.take_turn()\n while self.agents_to_settle:\n self.agents_to_settle.pop().settle_reposts()\n if self.data_collector != None:\n self.data_collector.collector_turn(x, agent)\n if self.visualizer == True:\n self.generate_visualizations(x)\n TM.TimeManager.sharedManager.increaseTime()\n if self.data_collector != None:\n self.data_collector.collector_round(x)\n self.generate_statistics(x)\n\n if self.visualizer == True:\n V.Visualizer.sharedVisualizer.updateEverything()\n\n if self.data_collector != None:\n self.data_collector.finalize()",
"def run():\n\n for simulation in range(0, N_SIMULATIONS):\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n # TODO: Change later enforce_deadline=True\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.001, display=False) # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=N_TRIALS) # run for a specified number of trials\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line\n\n if simulation == N_SIMULATIONS - 1:\n\n with open('results.csv', 'a') as csvfile:\n fieldnames = ['alpha', 'gamma', 'epsilon', 'success_rate', 'last_failure']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n\n for index in range(0,len(simulation_rates)):\n writer.writerow({\n 'alpha': get_simulation_params(0)[0],\n 'gamma': get_simulation_params(0)[1],\n 'epsilon': get_simulation_params(0)[2],\n 'success_rate': simulation_rates[index],\n 'last_failure': last_errors[index]})\n\n\n if N_SIMULATIONS > 1: #multiple simulation AND last simulation\n\n plt.figure(1)\n\n plt.subplot(211)\n plt.plot(simulation_rates)\n plt.title('Success Rate/Simulation')\n plt.xlabel('# Simulation')\n plt.ylabel('Success Rate')\n\n plt.subplot(212)\n plt.plot(last_errors)\n plt.title('Last failed trial per simulation')\n plt.xlabel('# Simulation')\n plt.ylabel('Last failed trial')\n\n plt.show()",
"def performSimulation(self):\n \n if self.parameters['verbose']:\n print(\"=====================\\nStarting simulation with parameters\\n\",self.parameters)\n print(\"=====================\\nInitial Graph\\n\")\n self.showState()\n print(\"=====================\")\n\n while self.parameters['steps'] > 0:\n if self.parameters['verbose']: print(\"Performing step\")\n self.performStep()\n if self.parameters['verbose']: self.showState()\n\n if self.parameters['verbose']:\n print(\"=====================\\nFinished Simulation\\n\\nResult graph:\")\n self.showState()\n #self.showGraph(self.parameters['file_name'])\n #self.showState()\n #self.showStats()",
"def run_simulation(**kwargs):\n print(\"executing run_simulation() in file\", __file__)\n print(\"got the dictionary kwargs =\", kwargs)\n\n # HERE is where you would usually run your simulation (e.g. DMRG).\n # simulate some heavy calculations:\n for i in range(30):\n print(\"step \", i, flush=True) # (remove `flush=True` for Python 2)\n # the flush=True makes the output appear immediately\n time.sleep(5)\n\n results = {'kwargs': kwargs, 'example_data': np.random.random((2, 2))}\n\n output_filename = kwargs['output_filename']\n print(\"save results to \", output_filename)\n with open(output_filename, 'wb') as f:\n pickle.dump(results, f)",
"def run_tests(self):\n with self.report.timer.record(\"run\"):\n self.result.report.extend(self._run_tests())",
"def run_simulation(run):\n # Write the argument file used by metrosim.\n simulation = run.simulation\n metrosim_dir = settings.BASE_DIR + '/metrosim_files/'\n metrosim_file = '{0}execs/metrosim'.format(metrosim_dir)\n arg_file = (\n '{0}arg_files/simulation_{1!s}_run_{2!s}.txt'.format(metrosim_dir,\n simulation.id,\n run.id)\n )\n with open(arg_file, 'w') as f:\n database = settings.DATABASES['default']\n db_host = database['HOST']\n db_name = database['NAME']\n db_user = database['USER']\n db_pass = database['PASSWORD']\n log = metrosim_dir + 'logs/run_{}.txt'.format(run.id)\n tmp = metrosim_dir + 'output'\n stop = metrosim_dir + 'stop_files/run_{}.stop'.format(run.id)\n arguments = ('-dbHost \"{0}\" -dbName \"{1}\" -dbUser \"{2}\" '\n + '-dbPass \"{3}\" -logFile \"{4}\" -tmpDir \"{5}\" '\n + '-stopFile \"{6}\" -simId \"{7!s}\" -runId \"{8!s}\"'\n ).format(db_host, db_name, db_user, db_pass, log, tmp,\n stop, simulation.id, run.id)\n f.write(arguments)\n\n # Run the script 'prepare_run.py' then run metrosim then run the script \n # 'run_end.py'.\n # The two scripts are run with the run.id as an argument.\n prepare_run_file = settings.BASE_DIR + '/metro_app/prepare_run.py'\n build_results_file = settings.BASE_DIR + '/metro_app/build_results.py'\n log_file = (\n '{0}/website_files/script_logs/run_{1}.txt'.format(\n settings.BASE_DIR, run.id\n )\n )\n # Command looks like: \n #\n # python3 ./metro_app/prepare_results.py y\n # 2>&1 | tee ./website_files/script_logs/run_y.txt\n # && ./metrosim_files/execs/metrosim\n # ./metrosim_files/arg_files/simulation_x_run_y.txt \n # && python3 ./metro_app/build_results.py y \n # 2>&1 | tee ./website_files/script_logs/run_y.txt\n #\n # 2>&1 | tee is used to redirect output and errors to file.\n command = ('python3 {first_script} {run_id} 2>&1 | tee {log} && '\n + '{metrosim} {argfile} && '\n + 'python3 {second_script} {run_id} 2>&1 | tee {log}')\n command = command.format(first_script=prepare_run_file, run_id=run.id,\n log=log_file, metrosim=metrosim_file,\n argfile=arg_file,\n second_script=build_results_file)\n subprocess.Popen(command, shell=True)",
"def run_sim(self):\n t0 = time.time()\n print('Beginning AR coating simulation')\n self._d_converter()\n self._interconnect()\n f_list = []\n t_list = []\n r_list = []\n for f in self.freq_sweep:\n results = self.sim_single_freq(f)\n f_list.append(f)\n t_list.append(results['T'])\n r_list.append(results['R'])\n fs = np.asarray(f_list)\n ts = np.asarray(t_list)\n rs = np.asarray(r_list)\n results = np.array([fs, ts, rs])\n t = time.ctime(time.time())\n data_name = self._make_save_path(self.save_path, self.save_name)\n header = 'Frequency (Hz)\\t\\tTransmission amplitude\\t\\tReflection amplitude'\n# log_name = self._make_save_path(self.save_path, self.log_name)\n# log = self._make_log()\n with open(data_name, 'wb') as f:\n np.savetxt(f, np.c_[fs, ts, rs], delimiter='\\t', header=header)\n# with open(log_name, 'wb') as f:\n# for line in log:\n# f.writelines(line)\n# f.write('\\n')\n print('Finished running AR coating simulation')\n t1 = time.time()\n t_elapsed = t1-t0\n print('Elapsed time: {t}s\\n'.format(t=t_elapsed))\n return results",
"def generate(self):\n\n # create temporary folder for copying image files\n try:\n os.mkdir(ReportGenerator.TEMP_FOLDER)\n except FileExistsError:\n shutil.rmtree(ReportGenerator.TEMP_FOLDER)\n os.mkdir(ReportGenerator.TEMP_FOLDER)\n\n # find all simulations to be included in report\n group_directory = self.config['group_directory']\n sim_directory = self.config['sim_directory']\n assert (group_directory or sim_directory) and not(group_directory and sim_directory), \\\n \"A group directory or a simulation directory must be specified, but not both\"\n\n if group_directory:\n assert os.path.exists(f'results/{group_directory}'), \"ERROR: Group directory does not exist\"\n\n sim_list = os.listdir(f'results/{group_directory}')\n sim_label = group_directory.split('-')[0]\n sim_list = [f'{group_directory}/{x}' for x in sim_list if sim_label in x]\n\n # filters\n if self.config['filters']:\n \n seed_filters = self.config['filters']['seed']\n dilution_filters = self.config['filters']['dilution']\n order_filters = self.config['filters']['order']\n\n for symbol, filters in zip(['S', 'D', 'O'], [seed_filters, dilution_filters, order_filters]):\n if type(filters) == int:\n sim_list = [x for x in sim_list if f'{symbol}{filters}' in x.split('-')[2]]\n elif type(filters) == list:\n sim_list = [x for x in sim_list if any([f\"{symbol}{f}\" in x.split('-')[-2] for f in filters])]\n\n if len(sim_list) == 0:\n raise Exception('Simulation filters resulted in no satisfactory simulations')\n\n else:\n assert os.path.exists(f'results/{sim_directory}'), \"ERROR: Simulation directory does not exist\"\n sim_list = [sim_directory]\n\n # REPORT HEADER\n self.add_heading(level=1, text=self.config['title'])\n now = datetime.now()\n self.add_text(text=f'Report generated on {now.strftime(\"%B %d, %Y\")} at {now.strftime(\"%H:%M:%S\")}')\n\n # REPORT CONTENTS\n for i, sim in enumerate(sim_list):\n for content in self.config['content']:\n content_type = list(content.keys())[0]\n params = content[content_type]\n\n self.add_content(content_type, params, sim)\n\n if i+1 < len(sim_list):\n self.add_content(content_type='break')\n\n # CREATE HTML TEMPLATE\n base_template = self.templateEnv.get_template(f'{ReportGenerator.COMPONENTS_FOLDER}/base.html')\n base_output = base_template.render(content='\\n\\n'.join(self.contents))\n with open(f'{ReportGenerator.TEMP_FOLDER}/template.html', 'w') as html_file:\n html_file.write(base_output)\n\n # CREATE PDF FROM TEMPLATE\n if group_directory:\n HTML(f'{ReportGenerator.TEMP_FOLDER}/template.html').write_pdf(\n f'results/{group_directory}/{self.config[\"title\"]}.pdf', stylesheets=['reports/style.css'])\n else:\n HTML(f'{ReportGenerator.TEMP_FOLDER}/template.html').write_pdf(\n f'results/{sim_directory}/{self.config[\"title\"]}.pdf', stylesheets=['reports/style.css'])\n\n # delete temp folder for storing image files\n shutil.rmtree(f'{ReportGenerator.TEMP_FOLDER}')\n return None",
"def run_simulation(self, num_games=10):\n for _ in range(num_games):\n self.result.append(self.single_game())",
"def report(self, output_dir):",
"def main():\n file_txt = open('results.txt','w+')\n positions = [1,10,100,1000]\n num_trials = 10000\n \n # Simulate the investment and plot histogram for different positions\n for position in positions:\n daily_ret = simulation(position, num_trials)\n plt.figure()\n plt.hist(daily_ret, 100, range=[-1,1])\n plt.title('The histogram of daily return for position ={}'.format(position))\n plt.xlabel('Daily return')\n plt.ylabel('The number of trials')\n plt.savefig('histogram_{}_pos.pdf'.format(str(position).zfill(4)))\n \n # Save the results of the simulation into a txt file \n file_txt.write('Position: {}\\n'.format(position))\n file_txt.write('Mean: {}; Std: {}\\n'.format(np.mean(daily_ret),np.std(daily_ret)))\n file_txt.write('\\n')\n file_txt.close()",
"def report():\n Robot.report()",
"def run_report_generation(**kwargs):\n out = run_python_script_helper(\n os.path.dirname(__file__), \"report_generation_example.py\", **kwargs\n )\n return out",
"def _auto_run(args):\n\n # TDH (2020-01-13) For developement testing the following section\n # replicates the functionality of \"standard_analysis.py\" so that\n # json_results can be created and used to create the graph image\n # files.\n import benchmark_postprocessing as bmpp\n file_list = bmpp.get_benchmark_files(args.benchmark_results_dir)\n json_results = bmpp.parse_files(file_list)\n json_results = bmpp.parse_and_add_benchmark_metadata(json_results)\n run_id_list = get_unique_run_ids(json_results)\n\n # TDH (2020-01-13) - Create unqiue reports for each run ID found.\n # Even a single results directory can contain results from multiple\n # run IDs.\n for run_id in run_id_list:\n output_path = os.path.join(\n args.benchmark_results_dir,\n '{}_report'.format(run_id))\n\n # TDH: Thorough attempt to safely create the results directory and\n # provide good error reporting if something went wrong.\n try:\n os.mkdir(output_path)\n except OSError:\n logging.error('Failed to create directory for report at {}'.format(\n output_path))\n create_standard_analysis_report(output_path,\n json_results,\n run_id)",
"def main():\n\t#Necessary Parameters for Simulation\n\tAmplitudes = ['230','260','290']\n\tConditions = ['No EES','EES','EES+A08','EES+A08+ProIncrease']\n\n\n\n\t#eesAmplitude = \"230\"\n\teesAmplitudeName = \"230\"\n\tdelay = \"2\"\n\ttoAddname = \"\"\n\tspecies = \"rat\"\n\t#Paramters initialization\n\ttotSimTime = rp.get_tot_sim_time()\n\tgaitCyclesFileName = rp.get_gait_cycles_file()\n\tmuscles = rp.get_muscles()\n\ttemplateFile = \"templateFrwSimRORaReal.txt\"\n\tw1 = 0.011\n\tw2 = -0.005\n\n\ttemplateFile = \"A08.txt\"\n\n\ttls.modify_network_structure(templateFile,templateFile,delay,[w1,w2])\n\n\teesFrequencies = range(0,41,40)\n\tnProc = 4\n\tseed = \"1\"\n\n\tnSim = len(eesFrequencies)\n\tcount=0.\n\tpercLastPrint=0.\n\tprintPeriod = 0.05\n\n\t# run simulations\n\tfor j,eesAmplitude in enumerate(Amplitudes):\n\t\tfor i,eesFrequency in enumerate(eesFrequencies):\n\t\t\tfor condition in Conditions:\n\t\t\t\t#name = \"Tonic_FFS_\"+inputFileName+\"_freq_\"+str(eesFrequency)\n\t\t\t\tinputFileName = condition\n\t\t\t\tinputFile = \"generatedStructures/\"+inputFileName+\".txt\"\n\t\t\t\tname = \"Tonic_FFS_\"+condition+\"_freq_\"+str(eesFrequency)\n\t\t\t\tresultFile = gt.find(\"*\"+name+\".p\",pathToResults)\n\t\t\t\tif not resultFile:\n\t\t\t\t\tprogram = ['python','./scripts/runForSimMuscleSpindles_RORa.py',\\\n\t\t\t\t\t\tstr(eesFrequency),eesAmplitude,inputFile,name,\"--simTime\",str(totSimTime),\"--seed\",seed,\"--noPlot\"]\n\n\t\t\t\tif not resultFile: gt.run_subprocess(program)\n\n\t\t\t\tcount+=1\n\t\t\t\tif count/nSim-percLastPrint>=printPeriod:\n\t\t\t\t\tpercLastPrint=count/nSim\n\t\t\t\t\tprint str(round(count/nSim*100))+\"% of simulations performed...\"\n\n\n\n\t\"\"\" create plots \"\"\"\n\terrParams = dict(lw=0.5, capsize=1, capthick=0.5)\n\twith open(gaitCyclesFileName, 'r') as pickle_file:\n\t\theelStrikes = pickle.load(pickle_file)\n\t\tfootOffs = pickle.load(pickle_file)\n\n\n\t# Figure 5 plot all gait cycles- afferent and efferents\n\t#if not phasicStim:\n\tfigName = time.strftime(\"/%Y_%m_%d_Tonic_FFS_species_\"+toAddname+inputFileName+species+\"_muscles_\"+\"\".join(muscles)+\"_delay_\"+str(delay)+\"_amp_\"+str(eesAmplitudeName)+\"_firingRates.pdf\")\n\t#else: figName = time.strftime(\"/%Y_%m_%d_Phasic_FFS_species_\"+toAddname+inputFileName+species+\"_muscles_\"+\"\".join(muscles)+\"_delay_\"+str(delay)+\"_amp_\"+str(eesAmplitudeName)+\"_firingRates.pdf\")\n\tfig, ax = plt.subplots(2, 4,figsize=(16,9))\n\tcmap = plt.get_cmap('winter')\n\tcolors = cmap(np.linspace(0.1,0.9,len(eesFrequencies)))\n\n\tfor i,eesFrequency in enumerate(eesFrequencies):\n\t\t#if not phasicStim:\n\t\tname = \"FS_EES_230uA_\"+str(eesFrequency)+\"Hz_Delay_2ms_Tonic_FFS_Control_freq_\"+str(eesFrequency)\n\t\t#name = \"Tonic_FFS_species_\"+toAddname+inputFileName+species+\"_muscles_\"+\"\".join(muscles)+\"_delay_\"+str(delay)+\"_amp_\"+str(eesAmplitudeName)+\"_freq_\"+str(eesFrequency)\n\t\t#else: name = \"Phasic_\"+emgVsKinMod+\"_FFS_species_\"+toAddname+inputFileName+species+\"_muscles_\"+\"\".join(muscles)+\"_delay_\"+str(delay)+\"_amp_\"+str(eesAmplitudeName)+\"_freq_\"+str(eesFrequency)\n\t\tif species == \"human\":name += hp.get_dataset()\n\n\t\t# get data\n\t\tprint name\n\t\tresultFile = gt.find(\"*\"+name+\".p\",pathToResults)\n\t\tprint resultFile\n\t\tif len(resultFile)>1: print \"Warning: multiple result files found!!!\"\n\t\twith open(resultFile[0], 'r') as pickle_file:\n\t\t\testimatedEmg = pickle.load(pickle_file)\n\t\t\tmeanFr = pickle.load(pickle_file)\n\n\t\t# get gait cycles\n\t\tif not 'heelStrikeSamples' in locals():\n\t\t\tnSamples = len(meanFr[muscles[0]][\"Mn\"])\n\t\t\tdtMeanFr = float(totSimTime)/nSamples\n\t\t\theelStrikeSamples = [int(x) for x in heelStrikes*1000./dtMeanFr]\n\t\t\tfootOffSamples = [int(x) for x in footOffs*1000./dtMeanFr]\n\t\t\tsamples = range(nSamples)\n\t\t\tstance = np.zeros(nSamples).astype(bool)\n\t\t\tfor strike,off in zip(heelStrikeSamples,footOffSamples):\n\t\t\t\tif strike>nSamples: break\n\t\t\t\tstance[strike:off]=True\n\n\t\tfor j,muscle in enumerate(muscles):\n\t\t\tax[j,0].plot(meanFr[muscle]['Iaf'],color=colors[i])\n\t\t\tax[j,0].fill_between(samples, 0, 200, where=stance, facecolor='#b0abab', alpha=0.25)\n\t\t\tax[j,1].plot(meanFr[muscle]['IaInt'],color=colors[i])\n\t\t\tax[j,1].fill_between(samples, 0, 200, where=stance, facecolor='#b0abab', alpha=0.25)\n\t\t\tax[j,2].plot(meanFr[muscle]['Mn'],color=colors[i])\n\t\t\tax[j,2].fill_between(samples, 0, 200, where=stance, facecolor='#b0abab', alpha=0.25)\n\t\t\tax[j,3].plot(estimatedEmg[muscle]['Mn'],color=colors[i])\n\t\t\tax[j,3].fill_between(samples, 0, 200, where=stance, facecolor='#b0abab', alpha=0.25)\n\n\n\tfor j,muscle in enumerate(muscles):\n\t\tax[j,0].set_ylim([0,200])\n\t\tax[j,0].set_title(\"Ia fibers firing rate - \"+muscle)\n\t\tax[j,0].set_xlabel(\"Time (ms)\")\n\t\tax[j,0].set_ylabel(\"Firing rate (Imp/s)\")\n\t\tax[j,1].set_ylim([0,200])\n\t\tax[j,1].set_title(\"IaInt firing rate - \"+muscle)\n\t\tax[j,1].set_xlabel(\"Time (ms)\")\n\t\tax[j,1].set_ylabel(\"Firing rate (Imp/s)\")\n\t\tax[j,2].set_ylim([0,200])\n\t\tax[j,2].set_title(\"Mn firing rate - \"+muscle)\n\t\tax[j,2].set_xlabel(\"Time (ms)\")\n\t\tax[j,2].set_ylabel(\"Firing rate (Imp/s)\")\n\t\tax[j,3].set_ylim([0,200])\n\t\tax[j,3].set_title(\"EMG - \"+muscle)\n\t\tax[j,3].set_xlabel(\"Time (ms)\")\n\t\tax[j,3].set_ylabel(\"Emg amplitude (a.u.)\")\n\tplt.savefig(pathToResults+figName, format=\"pdf\",transparent=True)\n\n\n# FIgure 5 plot 2 single gait cycles- afferent and efferents + mn phasicity score\n\tif species == \"rat\":\n\t\tstartGaitCycleN = 3\n\t\tnCycles = 1\n\telif species == \"human\":\n\t\tstartGaitCycleN = 3\n\t\tnCycles = 1\n\n\tfigName = time.strftime(\"/%Y_%m_%d_Tonic_FFS_species_\"+toAddname+inputFileName+species+\"_muscles_\"+\"\".join(muscles)+\"_delay_\"+str(delay)+\"_amp_\"+str(eesAmplitudeName)+\"_single_firingRates.pdf\")\n\t#else: figName = time.strftime(\"/%Y_%m_%d_Phasic_FFS_species_\"+toAddname+inputFileName+species+\"_muscles_\"+\"\".join(muscles)+\"_delay_\"+str(delay)+\"_amp_\"+str(eesAmplitudeName)+\"_single_firingRates.pdf\")\n\tfig, ax = plt.subplots(2, 6,figsize=(16,9))\n\tcmap = plt.get_cmap('winter')\n\tcolors = cmap(np.linspace(0.1,0.9,len(eesFrequencies)))\n\tbar_width = 5\n\n\tfor i,eesFrequency in enumerate(eesFrequencies):\n\t\t#if not phasicStim:\n\n\t\tname = \"FS_EES_230uA_\"+str(eesFrequency)+\"Hz_Delay_2ms_Tonic_FFS_Control_freq_\"+str(eesFrequency)\t\t#else: name = \"Phasic_\"+emgVsKinMod+\"_FFS_species_\"+toAddname+inputFileName+species+\"_muscles_\"+\"\".join(muscles)+\"_delay_\"+str(delay)+\"_amp_\"+str(eesAmplitudeName)+\"_freq_\"+str(eesFrequency)\n\t\tif species == \"human\":name += hp.get_dataset()\n\n\t\t# get data\n\t\tresultFile = gt.find(\"*\"+name+\".p\",pathToResults)\n\t\tif len(resultFile)>1: print \"Warning: multiple result files found!!!\"\n\t\twith open(resultFile[0], 'r') as pickle_file:\n\t\t\testimatedEmg = pickle.load(pickle_file)\n\t\t\tmeanFr = pickle.load(pickle_file)\n\n\t\t# compute stats\n\t\tiaIntModDepth = {}\n\t\tactiveMnFr={}\n\t\tfor muscle in muscles:\n\t\t\tiaIntModDepth[muscle]=[]\n\t\t\tactiveMnFr[muscle]=[]\n\t\tfor j in xrange(len(heelStrikeSamples)-1):\n\t\t\tif heelStrikeSamples[j+1]>nSamples-50: break\n\t\t\tif heelStrikeSamples[j]<50:continue # to skip artefacts\n\t\t\tfor muscle in muscles:\n\t\t\t\tiaIntModDepth[muscle].append(\\\n\t\t\t\t\tmeanFr[muscle]['IaInt'][heelStrikeSamples[j]:heelStrikeSamples[j+1]].max()-meanFr[muscle]['IaInt'][heelStrikeSamples[j]:heelStrikeSamples[j+1]].min())\n\t\t\t\tmnActivityDuringCycle = meanFr[muscle]['Mn'][heelStrikeSamples[j]:heelStrikeSamples[j+1]]\n\t\t\t\tactiveMnFr[muscle].append(\\\n\t\t\t\t\tmnActivityDuringCycle[mnActivityDuringCycle>=0.8*mnActivityDuringCycle.max()].mean())\n\t\t\t\t\t# mnActivityDuringCycle[mnActivityDuringCycle>=1.5*mnActivityDuringCycle.std()].mean())\n\t\t\t\t\t# mnActivityDuringCycle[mnActivityDuringCycle>=np.percentile(mnActivityDuringCycle,90)].mean())\n\t\tiaIntModDepthStats = {}\n\t\tactiveMnFrStats = {}\n\t\tfor muscle in muscles:\n\t\t\tiaIntModDepthStats[muscle] = {\"mean\":np.mean(iaIntModDepth[muscle]),\n\t\t\t\t\"sem\":np.std(iaIntModDepth[muscle])/(np.sqrt(len(iaIntModDepth[muscle])-1))}\n\t\t\tactiveMnFrStats[muscle] = {\"mean\":np.mean(activeMnFr[muscle]),\n\t\t\t\t\"sem\":np.std(activeMnFr[muscle])/(np.sqrt(len(activeMnFr[muscle])-1))}\n\n\t\t# get gait cycles to plot\n\t\tif not 'startPlot' in locals():\n\t\t\tstartPlot = heelStrikeSamples[startGaitCycleN-1]\n\t\t\tstopPlot = heelStrikeSamples[startGaitCycleN+nCycles-1]\n\t\t\tif stopPlot>nSamples: stopPlot=nSamples\n\t\t\treducedSamples = range(stopPlot-startPlot)\n\t\t\treducedStance = stance[startPlot:stopPlot]\n\n\t\tfor j,muscle in enumerate(muscles):\n\t\t\tax[j,0].plot(meanFr[muscle]['Iaf'][startPlot:stopPlot],color=colors[i])\n\t\t\tax[j,0].fill_between(reducedSamples, 0, 200, where=reducedStance, facecolor='#b0abab', alpha=0.25)\n\t\t\tax[j,1].plot(meanFr[muscle]['IaInt'][startPlot:stopPlot],color=colors[i])\n\t\t\tax[j,1].fill_between(reducedSamples, 0, 250, where=reducedStance, facecolor='#b0abab', alpha=0.25)\n\t\t\tax[j,2].bar(eesFrequency,iaIntModDepthStats[muscle][\"mean\"],bar_width,yerr=iaIntModDepthStats[muscle][\"sem\"],\\\n\t\t\t\tcolor=colors[i],error_kw=errParams)\n\t\t\txValsScatter = np.linspace(0,bar_width*0.9,len(iaIntModDepth[muscle]))+eesFrequency-bar_width*0.45\n\t\t\tax[j,2].scatter(xValsScatter,iaIntModDepth[muscle], marker='o',edgecolor='black', linewidth='0.1', color=\"#dddde3\", s=7, zorder=3, alpha=0.7)\n\n\t\t\tax[j,3].plot(meanFr[muscle]['Mn'][startPlot:stopPlot],color=colors[i])\n\t\t\tax[j,3].fill_between(reducedSamples, 0, 40, where=reducedStance, facecolor='#b0abab', alpha=0.25)\n\t\t\tax[j,4].bar(eesFrequency,activeMnFrStats[muscle][\"mean\"],bar_width,yerr=activeMnFrStats[muscle][\"sem\"],\\\n\t\t\t\tcolor=colors[i],error_kw=errParams)\n\t\t\tax[j,4].scatter(xValsScatter,activeMnFr[muscle], marker='o',edgecolor='black', linewidth='0.1', color=\"#dddde3\", s=7, zorder=3, alpha=0.7)\n\t\t\tax[j,5].plot(estimatedEmg[muscle]['Mn'][startPlot:stopPlot],color=colors[i])\n\t\t\tax[j,5].fill_between(reducedSamples, -50, 50, where=reducedStance, facecolor='#b0abab', alpha=0.25)\n\n\tfor j,muscle in enumerate(muscles):\n\t\tax[j,0].set_ylim([0,200])\n\t\tax[j,0].set_title(\"Ia fibers firing rate - \"+muscle)\n\t\tax[j,0].set_xlabel(\"Time (ms)\")\n\t\tax[j,0].set_ylabel(\"Firing rate (Imp/s)\")\n\t\tax[j,1].set_ylim([0,250])\n\t\tax[j,1].set_title(\"IaInt firing rate - \"+muscle)\n\t\tax[j,1].set_xlabel(\"Time (ms)\")\n\t\tax[j,1].set_ylabel(\"Firing rate (Imp/s)\")\n\t\tax[j,2].set_ylim([0,250])\n\t\tax[j,2].set_title(\"Mean IaInr Fr while active\")\n\t\tax[j,2].set_xlabel(\"Stimulation amplitude (uA)\")\n\t\tax[j,2].set_ylabel(\"Firing rate (Imp/s)\")\n\t\tax[j,3].set_ylim([0,40])\n\t\tax[j,3].set_title(\"Mn firing rate - \"+muscle)\n\t\tax[j,3].set_xlabel(\"Time (ms)\")\n\t\tax[j,3].set_ylabel(\"Firing rate (Imp/s)\")\n\t\tax[j,4].set_ylim([0,40])\n\t\tax[j,4].set_title(\"Mean Mn Fr while active\")\n\t\tax[j,4].set_xlabel(\"Stimulation amplitude (uA)\")\n\t\tax[j,4].set_ylabel(\"Firing rate (Imp/s)\")\n\t\tax[j,5].set_ylim([-50,50])\n\t\tax[j,5].set_title(\"EMG - \"+muscle)\n\t\tax[j,5].set_xlabel(\"Time (ms)\")\n\t\tax[j,5].set_ylabel(\"Emg amplitude (a.u.)\")\n\tplt.savefig(pathToResults+figName, format=\"pdf\",transparent=True)\n\n\n\n\n\n\t# FIgure 2-7 plot\n\tif species == \"rat\":\n\t\tstartGaitCycleN = 3\n\t\tnCycles = 1\n\telif species == \"human\":\n\t\tstartGaitCycleN = 3\n\t\tnCycles = 1\n\n\t#if not phasicStim:\n\tfigName = time.strftime(\"/%Y_%m_%d_Tonic_FFS_species_\"+toAddname+inputFileName+species+\"_muscles_\"+\"\".join(muscles)+\"_delay_\"+str(delay)+\"_amp_\"+str(eesAmplitudeName)+\"_afferentStats.pdf\")\n\t#else: figName = time.strftime(\"/%Y_%m_%d_Phasic_FFS_species_\"+toAddname+inputFileName+species+\"_muscles_\"+\"\".join(muscles)+\"_delay_\"+str(delay)+\"_amp_\"+str(eesAmplitudeName)+\"_afferentStats.pdf\")\n\tfig, ax = plt.subplots(2, 4,figsize=(16,9))\n\tcmap = plt.get_cmap('winter')\n\tcolors = cmap(np.linspace(0.1,0.9,len(eesFrequencies)))\n\tbar_width = 5\n\n\tmeanPerEraserApIaf = []\n\toffsetMeanFr = 0\n\toffsetMeanModDepth = 0\n\n\tfor i,eesFrequency in enumerate(eesFrequencies):\n\t\t#if not phasicStim:\n\t\tname = \"FS_EES_230uA_\"+str(eesFrequency)+\"Hz_Delay_2ms_Tonic_FFS_Control_freq_\"+str(eesFrequency)\n\t\t#name = \"Tonic_FFS_species_\"+toAddname+inputFileName+species+\"_muscles_\"+\"\".join(muscles)+\"_delay_\"+str(delay)+\"_amp_\"+str(eesAmplitudeName)+\"_freq_\"+str(eesFrequency)\n\t\t#else: name = \"Phasic_\"+emgVsKinMod+\"_FFS_species_\"+toAddname+inputFileName+species+\"_muscles_\"+\"\".join(muscles)+\"_delay_\"+str(delay)+\"_amp_\"+str(eesAmplitudeName)+\"_freq_\"+str(eesFrequency)\n\t\tif species == \"human\":name += hp.get_dataset()\n\n\t\tresultFile = gt.find(\"*\"+name+\".p\",pathToResults)\n\t\tif len(resultFile)>1: print \"Warning: multiple result files found!!!\"\n\t\twith open(resultFile[0], 'r') as pickle_file:\n\t\t\testimatedEmg = pickle.load(pickle_file)\n\t\t\tmeanFr = pickle.load(pickle_file)\n\t\t\tmeanPerEraserApIaf.append(pickle.load(pickle_file))\n\n\t\t# compute stats\n\t\tiaModDepth = {}\n\t\tiaMeanFr={}\n\t\tfor muscle in muscles:\n\t\t\tiaModDepth[muscle]=[]\n\t\t\tiaMeanFr[muscle]=[]\n\t\tfor j in xrange(len(heelStrikeSamples)-1):\n\t\t\tif heelStrikeSamples[j+1]>nSamples-50: break\n\t\t\tif heelStrikeSamples[j]<50:continue # to skip artefacts\n\t\t\tfor muscle in muscles:\n\t\t\t\tiaModDepth[muscle].append(\\\n\t\t\t\t\tmeanFr[muscle]['Iaf'][heelStrikeSamples[j]:heelStrikeSamples[j+1]].max()-meanFr[muscle]['Iaf'][heelStrikeSamples[j]:heelStrikeSamples[j+1]].min())\n\t\t\t\tiaMeanFr[muscle].append(\\\n\t\t\t\t\tmeanFr[muscle]['Iaf'][heelStrikeSamples[j]:heelStrikeSamples[j+1]].mean())\n\t\tiaModDepthStats = {}\n\t\tiaMeanFrStats = {}\n\t\tfor muscle in muscles:\n\t\t\tiaModDepthStats[muscle] = {\"mean\":np.mean(iaModDepth[muscle]),\n\t\t\t\t\"sem\":np.std(iaModDepth[muscle])/(np.sqrt(len(iaModDepth[muscle])-1))}\n\t\t\tiaMeanFrStats[muscle] = {\"mean\":np.mean(iaMeanFr[muscle]),\n\t\t\t\t\"sem\":np.std(iaMeanFr[muscle])/(np.sqrt(len(iaMeanFr[muscle])-1))}\n\n\t\t# get gait cycles to plot\n\t\tif not 'startPlot' in locals():\n\t\t\tstartPlot = heelStrikeSamples[startGaitCycleN-1]\n\t\t\tstopPlot = heelStrikeSamples[startGaitCycleN+nCycles-1]\n\t\t\tif stopPlot>nSamples: stopPlot=nSamples\n\t\t\treducedSamples = range(stopPlot-startPlot)\n\t\t\treducedStance = stance[startPlot:stopPlot]\n\n\t\tfor j,muscle in enumerate(muscles):\n\n\t\t\tax[j,0].plot(meanFr[muscle]['Iaf'][startPlot:stopPlot],color=colors[i])\n\t\t\tax[j,0].fill_between(reducedSamples, 0, 125, where=reducedStance, facecolor='#b0abab', alpha=0.25)\n\t\t\tax[j,1].bar(eesFrequency,iaMeanFrStats[muscle][\"mean\"],bar_width,yerr=iaMeanFrStats[muscle][\"sem\"],\\\n\t\t\t\tcolor=colors[i],error_kw=errParams)\n\t\t\txValsScatter = np.linspace(0,bar_width*0.9,len(iaMeanFr[muscle]))+eesFrequency-bar_width*0.45\n\t\t\tax[j,1].scatter(xValsScatter,iaMeanFr[muscle], marker='o',edgecolor='black', linewidth='0.1', color=\"#dddde3\", s=7, zorder=3, alpha=0.7)\n\n\t\t\tax[j,2].bar(eesFrequency,iaModDepthStats[muscle][\"mean\"],bar_width,yerr=iaModDepthStats[muscle][\"sem\"],\\\n\t\t\t\tcolor=colors[i],error_kw=errParams)\n\t\t\tax[j,2].scatter(xValsScatter,iaModDepth[muscle], marker='o',edgecolor='black', linewidth='0.1', color=\"#dddde3\", s=7, zorder=3, alpha=0.7)\n\t\t\tax[j,3].bar(eesFrequency,meanPerEraserApIaf[-1],5,color=colors[i])\n\n\t\t\tax[j,0].set_ylim([0,125])\n\t\t\tax[j,0].set_title(\"Ia fibers firing rate - \"+muscle)\n\t\t\tax[j,0].set_xlabel(\"Time (ms)\")\n\t\t\tax[j,0].set_ylabel(\"Firing rate (Imp/s)\")\n\t\t\tax[j,1].set_ylim([0,125])\n\t\t\tax[j,1].set_title(\"Mean Ia firing rate \")\n\t\t\tax[j,1].set_xlabel(\"Stimulation amplitude (uA)\")\n\t\t\tax[j,1].set_ylabel(\"(imp/s)\")\n\t\t\tax[j,2].set_ylim([0,80])\n\t\t\tax[j,2].set_title(\"modulation depth\")\n\t\t\tax[j,2].set_xlabel(\"Stimulation amplitude (uA)\")\n\t\t\tax[j,2].set_ylabel(\"(imp/s)\")\n\t\t\tax[j,3].set_ylim([0,100])\n\t\t\tax[j,3].set_title(\"Percentage erased APs\")\n\t\t\tax[j,3].set_xlabel(\"Stimulation frequency (Hz)\")\n\t\t\tax[j,3].set_ylabel(\"Percentage\")\n\tplt.savefig(pathToResults+figName, format=\"pdf\",transparent=True)",
"def save(self, simulation_results: list):\n\n if self._dir_path is None or len(self._dir_path) < 1 or not os.path.isdir(self._dir_path):\n raise ValueError('Invalid dir path')\n\n best_sol = min(simulation_results, key=lambda x: x.result.best_fitness)\n\n self._file_name = best_sol.result.algorithm_title\n full_path = os.path.join(self._dir_path, self._file_name + '.gif')\n\n if os.path.isfile(full_path):\n raise ValueError('File already exists')\n\n img_arr = self._simulate_route(best_sol)\n imageio.mimsave(full_path, img_arr, fps=55, loop=0, duration=4)",
"def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create learning agent\n # a = e.create_agent(RandomAgent) # create random agent\n e.set_primary_agent(a, enforce_deadline=True) # set agent to track\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.01)\n # reduce update_delay to speed up simulation\n sys.stdout = open(\"./output.txt\", \"w\")\n tic = time()\n sim.run(n_trials=100) # press Esc or close pygame window to quit\n toc = time()\n sys.stdout = sys.__stdout__\n\n print \"Totoal time used: {}.\".format(toc - tic)\n parse(\"./output.txt\")",
"def report():\n pass",
"def test_simulation(self):\n\t\tprint \"Simulation is being tested\"\n\n\t\tif toggles.DEBUG_FLAG:\n\t\t\tprint \"Debug Flag Set!\"\n\t\t\tprint self.getConfig()\n\n\t\tif toggles.PACKING:\n\t\t\ttoggles.OUTPUT_PATH = toggles.OUTPUT_PATH+toggles.RUN_NAME+'/'\n\t\t\tpackageMaker(toggles.OUTPUT_PATH,self.getConfig())\n\t\tif toggles.IDEAL_GRID:\n\t\t\tself.consensusGrid()\n\n\t\tif toggles.REAL_DATA:\n\t\t\tsampleData = self.load_data()\n\t\t\tif toggles.RUN_DATA_STATS:\n\t\t\t\tself.output_data_stats(sampleData)\n\t\t\t\tself.reset_database()\n\t\t\tif toggles.RUN_AVERAGE_COST:\n\t\t\t\tself.sim_average_cost(sampleData)\n\t\t\t\tself.reset_database()\n\t\t\tif toggles.RUN_SINGLE_PAIR:\n\t\t\t\tself.sim_single_pair_cost(sampleData, pending_eddy(self.pick_worker([0], [0])))\n\t\t\t\tself.reset_database()\n\t\telse:\n\t\t\tsampleData = {}\n\t\t\tsyn_load_data()\n\n\t\tif toggles.RUN_ITEM_ROUTING and not (toggles.RUN_TASKS_COUNT or toggles.RUN_MULTI_ROUTING):\n\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\tprint \"Running: item Routing\"\n\t\t\tself.run_sim(deepcopy(sampleData))\n\t\t\tself.reset_database()\n\n\t\tif PRED_SCORE_COUNT and not (RUN_TASKS_COUNT or RUN_MULTI_ROUTING):\n\t\t\tif DEBUG_FLAG:\n\t\t\t\tprint \"Running: Pred Score count\"\n\t\t\tself.run_sim(sampleData)\n\t\t\tself.reset_database()\n\n\n\n\t\tif toggles.COUNT_TICKETS and not (toggles.RUN_TASKS_COUNT or toggles.RUN_MULTI_ROUTING):\n\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\tprint \"Running: ticket counting\"\n\t\t\tself.run_sim(deepcopy(sampleData))\n\t\t\tself.reset_database()\n\n\t\tif toggles.SELECTIVITY_GRAPH and not (toggles.RUN_TASKS_COUNT or toggles.RUN_MULTI_ROUTING):\n\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\tprint \"Running: selectivity amounts over time\"\n\t\t\tself.run_sim(sampleData)\n\t\t\tself.reset_database()\n\n\t\t#____FOR LOOKING AT ACCURACY OF RUNS___#\n\t\tif toggles.TEST_ACCURACY and toggles.REAL_DATA:\n\t\t\tcorrectAnswers = self.get_correct_answers(toggles.INPUT_PATH + toggles.ITEM_TYPE + '_correct_answers.csv')\n\t\t\tpassedItems = self.get_passed_items(correctAnswers)\n\n\n\t\tif toggles.RUN_OPTIMAL_SIM:\n\t\t\tcountingArr=[]\n\t\t\tself.reset_database()\n\t\t\tfor i in range(toggles.NUM_SIM):\n\t\t\t\tprint \"running optimal_sim \" +str(i)\n\t\t\t\tself.num_tasks = self.optimal_sim(sampleData)\n\t\t\t\tcountingArr.append(self.num_tasks)\n\t\t\t\tself.reset_database()\n\t\t\tdest = toggles.OUTPUT_PATH+toggles.RUN_NAME+'_optimal_tasks'\n\t\t\tgeneric_csv_write(dest+'.csv',[countingArr])\n\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\tprint \"Wrote File: \" + dest+'.csv'\n\n\n\n\t\tif toggles.RUN_TASKS_COUNT or toggles.RUN_MULTI_ROUTING or toggles.RUN_CONSENSUS_COUNT:\n\t\t\tif toggles.RUN_TASKS_COUNT:\n\t\t\t\t#print \"Running: task_count\"\n\t\t\t\t#f = open(toggles.OUTPUT_PATH + toggles.RUN_NAME + '_tasks_count.csv', 'a')\n\t\t\t\t#f1 = open(toggles.OUTPUT_PATH + toggles.RUN_NAME + '_incorrect_count.csv', 'a')\n\n\t\t\t\tif toggles.GEN_GRAPHS:\n\t\t\t\t\toutputArray = []\n\n\t\t\trunTasksArray = []\n\t\t\tgoodArray, badArray = [], []\n\t\t\tgoodPoints, badPoints = [], []\n\t\t\taccCount = []\n\t\t\tlocArray = [[],[],[],[]]\n\n\t\t\tfor i in range(toggles.NUM_SIM):\n\t\t\t\tprint \"running simulation \" + str(i+1)\n\t\t\t\tself.run_sim(deepcopy(sampleData))\n\t\t\t\trunTasksArray.append(self.num_tasks)\n\n\t\t\t\t#____FOR LOOKING AT ACCURACY OF RUNS___#\n\t\t\t\tif toggles.TEST_ACCURACY and toggles.REAL_DATA:\n\t\t\t\t\tnum_incorrect = self.final_item_mismatch(passedItems)\n\t\t\t\t\taccCount.append(num_incorrect)\n\t\t\t\tif toggles.RUN_CONSENSUS_COUNT or toggles.VOTE_GRID:\n\t\t\t\t\tdonePairs = IP_Pair.objects.filter(Q(num_no__gt=0)|Q(num_yes__gt=0))\n\t\t\t\t\tif toggles.TEST_ACCURACY:\n\t\t\t\t\t\tgoodPairs, badPairs = [], []\n\t\t\t\t\t\tfor pair in donePairs:\n\t\t\t\t\t\t\tval = bool((pair.num_yes-pair.num_no)>0)\n\t\t\t\t\t\t\tif toggles.REAL_DATA:\n\t\t\t\t\t\t\t\tcorrect = ((correctAnswers[(pair.item,pair.predicate)]) == val)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tcorrect = (pair.true_answer == val)\n\t\t\t\t\t\t\tif correct:\n\t\t\t\t\t\t\t\tgoodArray.append(pair.num_no+pair.num_yes)\n\t\t\t\t\t\t\t\tgoodPoints.append((pair.num_no,pair.num_yes))\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tbadArray.append(pair.num_no+pair.num_yes)\n\t\t\t\t\t\t\t\tbadPoints.append((pair.num_no,pair.num_yes))\n\t\t\t\t\telse:\n\t\t\t\t\t\tfor pair in donePairs:\n\t\t\t\t\t\t\tgoodArray.append(pair.num_no + pair.num_yes)\n\t\t\t\t\t\t\tgoodPoints.append((pair.num_no,pair.num_yes))\n\n\t\t\t\t\t#print \"This is number of incorrect items: \", num_incorrect\n\n\t\t\t\tself.reset_database()\n\n\t\t\tif toggles.RUN_TASKS_COUNT:\n\t\t\t\tgeneric_csv_write(toggles.OUTPUT_PATH+toggles.RUN_NAME+'_tasks_count.csv',[runTasksArray])\n\t\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\t\tprint \"Wrote File: \" + toggles.OUTPUT_PATH + toggles.RUN_NAME + '_tasks_count.csv'\n\t\t\t\tif toggles.GEN_GRAPHS:\n\t\t\t\t\tif len(runTasksArray)>1:\n\t\t\t\t\t\tdest = toggles.OUTPUT_PATH + toggles.RUN_NAME + '_tasks_count.png'\n\t\t\t\t\t\ttitle = toggles.RUN_NAME + ' Cost distribution'\n\t\t\t\t\t\thist_gen(runTasksArray, dest, labels = ('Cost','Frequency'), title = title)\n\t\t\t\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\t\t\t\tprint \"Wrote File: \" + dest\n\t\t\t\t\telif toggles.DEBUG_FLAG:\n\t\t\t\t\t\tprint \"only ran one sim, not running hist_gen\"\n\n\t\t\tif toggles.RUN_MULTI_ROUTING:\n\t\t\t\t\tdest = toggles.OUTPUT_PATH + toggles.RUN_NAME + '_Eddy_sys_' + str(toggles.EDDY_SYS) + '_multi_routing.png'\n\t\t\t\t\ttitle = toggles.RUN_NAME + ' Average Predicate Routing'\n\t\t\t\t\tquestions = toggles.CHOSEN_PREDS\n\t\t\t\t\tarrayData = []\n\t\t\t\t\tfor i in range(len(questions)):\n\t\t\t\t\t\tarrayData.append([])\n\t\t\t\t\tfor routingL in ROUTING_ARRAY:\n\t\t\t\t\t\tfor i in range(len(questions)):\n\t\t\t\t\t\t\tarrayData[i].append(routingL[i])\n\t\t\t\t\tmrsavefile = open(toggles.OUTPUT_PATH+toggles.RUN_NAME+'_multi_routing.csv','w')\n\t\t\t\t\tmrwriter = csv.writer(mrsavefile)\n\t\t\t\t\tmrwriter.writerow(questions)\n\t\t\t\t\tfor row in arrayData:\n\t\t\t\t\t\tmrwriter.writerow(row)\n\t\t\t\t\tmrsavefile.close()\n\t\t\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\t\t\tprint \"Wrote File: \"+toggles.OUTPUT_PATH+toggles.RUN_NAME+'_multi_routing.csv'\n\t\t\t\t\tif toggles.GEN_GRAPHS:\n\t\t\t\t\t\tstats_bar_graph_gen(arrayData, questions, dest, labels = ('Predicate','# of Items Routed'), title = title)\n\t\t\t\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\t\t\t\tprint \"Wrote File: \" + toggles.OUTPUT_PATH+toggles.RUN_NAME+'_multi_routing.png'\n\t\t\tif toggles.ACCURACY_COUNT:\n\t\t\t\tdest = toggles.OUTPUT_PATH+toggles.RUN_NAME+'_acc_count'\n\t\t\t\tgeneric_csv_write(dest+'.csv',[accCount])\n\t\t\t\tif toggles.GEN_GRAPHS:\n\t\t\t\t\thist_gen(accCount, dest+'.png')\n\n\t\t\tif toggles.RUN_CONSENSUS_COUNT:\n\t\t\t\tdest = toggles.OUTPUT_PATH + toggles.RUN_NAME+'_consensus_count'\n\t\t\t\tif len(goodArray)>1:\n\t\t\t\t\tif len(badArray) == 0:\n\t\t\t\t\t\tgeneric_csv_write(dest+'.csv',[goodArray])\n\t\t\t\t\t\t#print goodArray\n\t\t\t\t\telse:\n\t\t\t\t\t\tgeneric_csv_write(dest+'.csv',[goodArray,badArray])\n\t\t\t\t\t\t#print goodArray,badArray\n\t\t\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\t\t\tprint \"Wrote File: \" + dest + '.csv'\n\t\t\t\t\tif toggles.GEN_GRAPHS:\n\t\t\t\t\t\ttitle = 'Normalized Distribution of Tasks before Consensus'\n\t\t\t\t\t\tlabels = ('Number of Tasks', 'Frequency')\n\t\t\t\t\t\tif len(badArray) < 2:\n\t\t\t\t\t\t\thist_gen(goodArray, dest+'.png',labels=labels,title=title)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tleg = ('Correctly Evaluated IP pairs','Incorrectly Evaluated IP pairs')\n\t\t\t\t\t\t\tmulti_hist_gen([goodArray,badArray],leg,dest+'.png',labels=labels,title=title)\n\t\t\t\telif toggles.DEBUG_FLAG:\n\t\t\t\t\tprint \"only ran one sim, ignoring results\"\n\t\t\tif toggles.VOTE_GRID:\n\t\t\t\tdest = toggles.OUTPUT_PATH + toggles.RUN_NAME+'_vote_grid'\n\t\t\t\tif len(goodPoints)>1:\n\t\t\t\t\tif len(badPoints)==0:\n\t\t\t\t\t\tgeneric_csv_write(dest+'.csv',goodPoints)\n\t\t\t\t\telse:\n\t\t\t\t\t\tgeneric_csv_write(dest+'_good.csv',goodPoints)\n\t\t\t\t\t\tgeneric_csv_write(dest+'_bad.csv',badPoints)\n\t\t\t\t\tif toggles.GEN_GRAPHS:\n\t\t\t\t\t\ttitle = \"Vote Grid Graph\"\n\t\t\t\t\t\tlabels = (\"Number of No Votes\",\"Number of Yes Votes\")\n\t\t\t\t\t\tif len(badPoints)==0:\n\t\t\t\t\t\t\txL,yL=zip(*goodPoints)\n\t\t\t\t\t\t\tline_graph_gen(xL,yL,dest+'.png',title=title,labels=labels,scatter=True,square=True)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tgX,gY = zip(*goodPoints)\n\t\t\t\t\t\t\tbX,bY = zip(*badPoints)\n\t\t\t\t\t\t\tmulti_line_graph_gen((gX,bX),(gY,bY),('Correct','Incorrect'),dest+'_both.png',title=title,labels=labels,scatter=True,square=True)\n\t\t\t\t\t\t\tline_graph_gen(gX,gY,dest+'_good.png',title=title+\" goodPoints\",labels=labels,scatter=True,square=True)\n\t\t\t\t\t\t\tline_graph_gen(bX,bY,dest+'_bad.png',title=title+\" badPoints\",labels=labels,scatter=True,square=True)\n\t\tif toggles.TIME_SIMS:\n\t\t\tself.timeRun(sampleData)\n\n\t\tif toggles.RUN_ABSTRACT_SIM:\n\t\t\tself.abstract_sim(sampleData, toggles.ABSTRACT_VARIABLE, toggles.ABSTRACT_VALUES)",
"def _run_simulator(self):\n os.chdir(self.test_cases_path)\n\n simulator_config_filename = self.simulator_config_filename\n script, options = runner.parse_commands(simulator_config_filename)\n\n if sys.platform.startswith('win'):\n subprocess.call([script] + options, shell=True)\n else:\n subprocess.call([script] + options)\n\n os.chdir(self.this_file_path)",
"def run(num_trials):\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.1, display=True) \n # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=num_trials) # run for a specified number of trials\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line\n\n a.performace_report(num_trials)",
"def main():\n\tresults = []\n\n\tconfig = configparser.ConfigParser()\n\tconfig.read(\"simulation.ini\")\n\tsettings = config['sim']\n\n\tcompleted_obj_hw = int(settings[\"ClientsPerCampaign\"]) * float(settings[\"CompletedPctgHW\"])\n\texceeded_obj_hw = float(settings[\"ExceededPctgHW\"])\n\tsignificance_level = float(settings[\"SignificanceLevel\"])\n\tz_val_two_tails = scipy.stats.norm.ppf(1 - (significance_level / 2))\n\n\tprint(\"Completed Target HW: \" + str(completed_obj_hw))\n\tprint(\"Exceeded Target HW: \" + str(exceeded_obj_hw))\n\n\tcompleted_vals = []\n\texceeded_vals = []\n\tdone = False\n\n\tcompleted_avg = 0\n\texceeded_avg = 0\n\tcompleted_hw = 0\n\texceeded_hw = 0\n\n\ti = 0\n\twhile not done:\n\t\tprint(\"RUN: \" + str(i + 1))\n\t\tenv = simpy.Environment()\n\t\tsim = Simulation(env, settings, i == 0)\n\t\tsim.run()\n\t\tresults.append(sim.results)\n\t\ti += 1\n\n\t\tif settings['RunOnce'] == 'yes':\n\t\t\tprint(\"RUN ONCE\")\n\t\t\tsys.exit()\n\n\t\tcompleted_vals.append(sim.results['completed_count'])\n\t\texceeded_vals.append(sim.results['exceeded_proportion'])\n\n\t\tif i < 2:\n\t\t\tprint(\"---------------\")\n\t\t\tcontinue\n\n\t\tcompleted_avg = sum(completed_vals) / len(completed_vals)\n\t\tcompleted_S = sum([(v - completed_avg) ** 2 for v in completed_vals]) / (i - 1)\n\t\tcompleted_S = math.sqrt(completed_S)\n\t\tcompleted_hw = (z_val_two_tails * completed_S) / math.sqrt(i)\n\t\tprint(\"runs: \" + str(i) + \" completed HW: \" + str(completed_hw))\n\n\t\texceeded_avg = sum(exceeded_vals) / len(exceeded_vals)\n\t\texceeded_S = math.sqrt(exceeded_avg * (1 - exceeded_avg))\n\t\texceeded_hw = (z_val_two_tails * exceeded_S) / math.sqrt(i)\n\t\tprint(\"runs: \" + str(i) + \" exceeded HW: \" + str(exceeded_hw))\n\n\t\tif completed_hw < completed_obj_hw and exceeded_hw < exceeded_obj_hw:\n\t\t\tprint(\"END ITERATIONS\")\n\t\t\tdone = True\n\n\t\tprint(\"---------------\")\n\n\n\tfilename = 'results/Results_' + settings['FileSizeGB'] + '_' + settings['TorrentThreshold'] + '_' + settings['HTTPDownThreshold'] \\\n\t\t+ '_' + settings['HTTPUp'] + '_' + str(random.randint(0,10000)) + '.xlsx'\n\n\tprint(\"Saving XLSX to: \" + filename)\n\twb = xs.Workbook(filename)\n\n\tws = wb.add_worksheet()\n\n\tws.write(0, 1, 'Exceded')\n\tws.write(0, 2, 'Completed')\n\n\ti = 1\n\tfor result in results:\n\t\tws.write(i, 0, i)\n\t\tws.write(i, 1, result['exceeded_proportion'])\n\t\tws.write(i, 2, result['completed_count'])\n\t\ti += 1\n\n\tws.write(i, 0, 'average')\n\tws.write(i, 1, exceeded_avg)\n\tws.write(i, 2, completed_avg)\n\ti += 1\n\tws.write(i, 0, 'half width')\n\tws.write(i, 1, exceeded_hw)\n\tws.write(i, 2, completed_hw)\n\n\twb.close()",
"def simul_and_export(file, config, i):\n\n simulate_UVSPEC(file, config)\n\n load_skymap(config)\n\n sim = files_sim(config)[i]\n export_sim_rad(sim, config)",
"def make_report(self, report_name, id_test, x_test, y_test, country_test, frame_test):\n if not os.path.exists('Reports/' + report_name):\n os.mkdir('Reports/' + report_name)\n results = self.predict(x_test)\n\n # Generate detailied evaluation report\n header = 'Country,Child,Frame'\n for output_layer in self.get_config()['output_layers']:\n header += ',{}_Actual'.format(output_layer[0])\n for output_layer in self.get_config()['output_layers']:\n header += ',{}_Prediction'.format(output_layer[0]) \n header += '\\n'\n\n with open('Reports/{}/evaluation_report.txt'.format(report_name), 'a') as f:\n if os.stat('Reports/{}/evaluation_report.txt'.format(report_name)).st_size == 0:\n f.write(header)\n for row in range(len(results)):\n entry = ','.join([str(i) for i in country_test[row]]) + ','\n entry += ','.join([str(i) for i in id_test[row]]) + ','\n entry += ','.join([str(i) for i in frame_test[row]]) + ','\n entry += ','.join([str(i) for i in y_test[row]]) + ','\n entry += ','.join([str(i) for i in results[row]]) + '\\n'\n f.write(entry)\n\n # Generate report of summary statistics\n cultures = np.unique(country_test)\n for c in cultures:\n culture_rows = np.where(country_test == c)[0] # get row numbers for culture c \n culture_ids = id_test[culture_rows] # get ID rows for culture c \n unique_ids = np.unique(culture_ids) # get unique IDs for culture c \n\n for u in unique_ids: \n all_id_rows = np.where(id_test == u)[0]\n id_rows = np.intersect1d(all_id_rows, culture_rows) # get ID rows for child u \n\n id_icc = icc(results[id_rows], y_test[id_rows])[0] # compute ICC for child u \n id_pcc = pcc(results[id_rows], y_test[id_rows])[0][0] # compute PCC for child u \n id_ccc = ccc(results[id_rows], y_test[id_rows]) # compute CCC for child u \n id_mae = mae(results[id_rows], y_test[id_rows]) # compute MAE for child u \n\n icc_entry = '{},{},{}\\n'.format(c, u, id_icc)\n pcc_entry = '{},{},{}\\n'.format(c, u, id_pcc)\n ccc_entry = '{},{},{}\\n'.format(c, u, id_ccc)\n mae_entry = '{},{},{}\\n'.format(c, u, id_mae)\n \n with open('Reports/{}/icc_report.txt'.format(report_name), 'a') as f:\n f.write(icc_entry)\n\n with open('Reports/{}/pcc_report.txt'.format(report_name), 'a') as f:\n f.write(pcc_entry)\n\n with open('Reports/{}/ccc_report.txt'.format(report_name), 'a') as f:\n f.write(ccc_entry)\n\n with open('Reports/{}/mae_report.txt'.format(report_name), 'a') as f:\n f.write(mae_entry)\n\n return results",
"def save_simulation_results(file_name, **kwargs):\n\n # Insert debugging assertions\n assert type(file_name) is str, \"The 'filw_name' must be string.\"\n \n # Save a dictionary of names and arrays into a MATLAB-style .mat file\n sio.savemat(file_name, kwargs)",
"def __execute_reporter(self):\n if not self.__args.report:\n return\n reporter.HTMLReporter().generate_report_from_file(\n self.__lst_json_files)",
"def test_run(self):\n rig_analysis_dir = \"rig_analysis\"\n analysis_root = os.path.join(self.io_args.output_root, rig_analysis_dir)\n os.makedirs(analysis_root, exist_ok=True)\n\n self.io_args.output_obj = os.path.join(analysis_root, \"final.obj\")\n self.io_args.output_equirect = os.path.join(analysis_root, \"equirect.ppm\")\n self.io_args.output_camera = os.path.join(analysis_root, \"camera.ppm\")\n self.io_args.output_camera_id = \"0\"\n self.io_args.output_cross_section = os.path.join(analysis_root, \"cross.ppm\")\n\n self.run_app(\"RigAnalyzer\")\n self.check_against_truth(\n truth=os.path.join(self.io_args.truth_dir, rig_analysis_dir),\n output=analysis_root,\n )",
"def simulate(): \n \n # Create tmpdir to hold all steerfiles and log files \n SimObj = Simulation(steerfiles=steerfiles, name=os.path.splitext(os.path.basename(rawfile_alu))[0] + '-sim' )\n\n # Set Beam energy\n SimObj.set_beam_momentum(beamenergy)\n\n # Create steerfiles for processing\n simpath = create_sim_path_air(SimObj)\n\n # Get gearfile\n localgearfile = SimObj.get_filename('gear.xml')\n\n # Misalign gear file\n randomize_telescope(gearfile=localgearfile, mean_list=mean_list, sigma_list=sigma_list, sensorexception_list=sensorexception_list, modeexception_list=modeexception_list)\n\n localtruthdb_filename=SimObj.create_dbfilename(truthdb_filename)\n\n # Convert gear file to alignmentDB root file, which will be stored in the sim folder\n Create_AlignmentDBFile_From_Gear(gearfile=SimObj.get_filename('gear.xml'), truthdbfilename=localtruthdb_filename)\n\n # Copy gearfile\n SimObj.copy_file('gear.xml','gear_air.xml')\n\n # Get air gearfile\n gearfile_air = SimObj.get_filename('gear_air.xml')\n\n # Change DUT in copied gearfile\n set_parameter(gearfile=gearfile_air, sensorID=11, parametername='thickness', value=0.0001)\n set_parameter(gearfile=gearfile_air, sensorID=11, parametername='radLength', value=304000.0)\n\n\n # Create caltag for the truthdb\n localcaltag = os.path.splitext(os.path.basename(rawfile_air))[0] + '-test'\n simcaltag=localcaltag+ '-truthdb'\n\n # Run simulation to create rawfile with simulated digits \n SimObj.simulate(path=simpath,caltag=simcaltag)"
] | [
"0.6596672",
"0.6571582",
"0.64502853",
"0.6428246",
"0.64214563",
"0.63550687",
"0.630784",
"0.6306099",
"0.6249517",
"0.62216187",
"0.6202602",
"0.6120247",
"0.6079604",
"0.5939629",
"0.5922156",
"0.5920404",
"0.58797896",
"0.5826667",
"0.58249176",
"0.581223",
"0.5811084",
"0.5776924",
"0.5775157",
"0.57704616",
"0.57695943",
"0.5767226",
"0.57538974",
"0.57349384",
"0.5734729",
"0.573363"
] | 0.66220045 | 0 |
Wrapper for single or multigene figures. If multiple genes are provided, each will be rendered as its own axis in a vertical stack and each will be labeled with a single uppercase letter, as would be suitable for a multipanel figure in a manuscript. | def plot_figure(
df: pd.DataFrame, genes: Union[Mapping, Sequence[Mapping]]
) -> matplotlib.figure.Figure:
if isinstance(genes, Mapping): # convert single genes into a list of length 1
genes = [genes]
fig = plt.figure(figsize=(10, 2 * len(genes)), tight_layout="true")
spec = gridspec.GridSpec(ncols=1, nrows=len(genes), figure=fig)
for i, g in enumerate(genes):
ax = fig.add_subplot(spec[i, 0])
if len(genes) > 1:
ax.text(0, 1, chr(ord("A") + i), transform=ax.transAxes, size="xx-large")
plot_variants(ax, df, g)
if i == 0:
fig.legend()
if i == len(genes) - 1:
ax.set_xlabel("amino acid position")
return fig | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def Figure4Main(self, supplemental1=False):\n if not supplemental1:\n example_cells = [5, 9, 17, 30]\n else:\n example_cells = [2, 6, 10, 11, 13, 18]\n\n start_letter = \"A\"\n parent_figure = None\n\n if not supplemental1:\n sizer = {\n \"D\": {\"pos\": [6.5, 2.2, 4.25, 2.5], \"labelpos\": (-0.15, 1.02),},\n \"E\": {\"pos\": [9.5, 2.2, 4.25, 2.5], \"labelpos\": (-0.15, 1.02),},\n \"F\": {\"pos\": [6.5, 2.2, 0.5, 2.5], \"labelpos\": (-0.15, 1.02),},\n \"G\": {\"pos\": [9.5, 2.2, 0.5, 2.5], \"labelpos\": (-0.15, 1.02),},\n }\n figsize = (12,8)\n else:\n sizer = {}\n figsize = (9, 8)\n xw = 1.1\n trace_axes = []\n for j in range(len(example_cells)):\n i = j + 1\n xl = j * 1.25 + 0.75\n axn = f\"A{i:d}\"\n trace_axes.append(axn)\n sizer[axn] = {\n \"pos\": [xl, xw, 3.25, 4.25],\n \"labelpos\": (-0.15, 1.02),\n \"noaxes\": True,\n }\n sizer[f\"B{i:d}\"] = {\n \"pos\": [xl, xw, 2.0, 1.0],\n \"labelpos\": (-0.15, 1.02),\n # \"noaxes\": True,\n }\n sizer[f\"C{i:d}\"] = {\n \"pos\": [xl, xw, 0.5, 1.0],\n \"labelpos\": (-0.15, 0.9),\n \"noaxes\": True,\n }\n # dict pos elements are [left, width, bottom, height] for the axes in the plot. gr = [(a, a+1, 0, 1) for a in range(0, 8)] # just generate subplots - shape do not matter axmap = OrderedDict(zip(sizer.keys(), gr))\n P = PH.arbitrary_grid(\n sizer,\n order=\"columnsfirst\",\n units=\"in\",\n figsize=figsize,\n label=True,\n showgrid=False,\n parent_figure=parent_figure,\n )\n # Efficacy plot\n if not supplemental1:\n EFP = EF.EfficacyPlots(parent_figure=P)\n EFP.plot_efficacy(\"Full\", ax=P.axdict[\"D\"], figuremode=\"clean\")\n # participation plots\n synperum2 = 0.7686 # taken from cell_config.py, line 127 (11/15/2021)\n\n def plot_participation(ax, n, a, b, dB=0, color=None):\n ap = a[n][0].participation / a[n][0].npost_spikes\n bp = b[n][0].participation / b[n][0].npost_spikes\n ax.plot(\n [a[n][0].sites / synperum2, a[n][0].sites / synperum2],\n [ap, bp],\n \"-\",\n color=color,\n )\n ax.scatter(a[n][0].sites / synperum2, ap, marker=\"o\", color=color)\n ax.scatter(a[n][0].sites / synperum2, bp, marker=\"x\", color=color)\n ax.set_xlabel(r\"Input ASA (${\\mu m^2}$)\")\n ax.set_xlim(0, 300)\n ax.set_ylim(0, 1.0)\n ax.set_ylabel(f\"Participation at 0 and {dB:2d} dBSPL\")\n PH.talbotTicks(ax, floatAdd={\"x\": 0, \"y\": 2})\n\n def plot_diff_participation(ax, n, a, b, dB=0, color=None, legend=True):\n ap = a[n][0].participation / a[n][0].npost_spikes\n bp = b[n][0].participation / b[n][0].npost_spikes\n ax.scatter(\n a[n][0].sites / synperum2,\n bp / ap,\n marker=\"o\",\n color=color,\n label=f\"VCN_c{n:02d}\",\n )\n ax.set_xlabel(r\"Input ASA (${\\mu m^2}$)\")\n ax.set_xlim(0, 300)\n ax.set_ylim(0, 3)\n ax.set_ylabel(f\"Participation ratio {dB:2d}/{0:2d} dBSPL\")\n PH.talbotTicks(ax, floatAdd={\"x\": 0, \"y\": 2})\n if legend:\n ax.legend(fontsize=8, loc=\"upper right\", ncol=2)\n\n dB = 30\n if not supplemental1:\n ds = self._load_rcdata(\"Spont\")\n drc = self._load_rcdata(f\"{dB:2d}dB\")\n palette = sns.color_palette(None, len(ds.keys()))\n for i, c in enumerate(ds.keys()):\n # plot_participation(P.axdictax[0], c, ds, drc, dB=dB, color=palette[i])\n plot_diff_participation(\n P.axdict[\"E\"], c, ds, drc, dB=dB, color=palette[i], legend=False\n )\n\n axl = [P.axdict[axi] for axi in trace_axes]\n self.plot_stacked_traces(cells=example_cells, figure=P, axes=axl, maxstack=10)\n if not supplemental1:\n self.plot_revcorr_compare(\n parent_figure=P,\n axlist=[P.axdict[\"F\"], P.axdict[\"G\"]],\n dBSPLs=[\"Spont\", \"30dB\"],\n legend=False,\n )\n synlabel_num = 5\n else:\n synlabel_num = 2\n self.plot_revcorr_supplement(cells=example_cells, parent_figure=P, dBSPL=\"30dB\", synlabel_num=synlabel_num)\n # self.plot_efficacy_supplement(cells=example_cells, parent_figure=P, traces=False)\n\n for j in range(len(example_cells)):\n ax = P.axdict[f\"B{j+1:d}\"]\n ax.set_ylim(0, 0.8)\n ax.set_xlim(-5.0, 0.0)\n\n if j > 0:\n PH.noaxes(ax, whichaxes=\"y\")\n else:\n ax.set_ylabel(\"Coinc. Rate (Hz)\")\n ax.xaxis.set_minor_locator(MultipleLocator(1))\n ax.tick_params(which=\"major\", length=4, direction=\"in\")\n ax.tick_params(which=\"minor\", length=2, direction=\"in\")\n fig = FigInfo()\n if parent_figure is not None:\n fig.P = parent_figure\n else:\n fig.P = P\n if not supplemental1:\n fig.filename = \"Figure4_Ephys2_main_v4.pdf\"\n fig.title[\n \"title\"\n ] = \"SBEM Project Figure 4 Modeling: Singles, Efficacy and Revcorr\"\n else:\n fig.filename = \"Figure4-Supplemental1_Revcorr.pdf\"\n fig.title[\n \"title\"\n ] = \"SBEM Project Figure 4 Modeling: other cells Singles and Revcorr\"\n\n title2 = {\"title\": f\"\", \"x\": 0.99, \"y\": 0.01}\n fig.title2 = title2\n print(\"returnin fig: \", fig)\n return fig",
"def inspect(self, axis_units='px', frontview=True):\n ax = super().inspect(axis_units=axis_units, frontview=frontview)\n scale = self._get_plot_scale_factor(axis_units)\n\n # Label modules and tiles\n for ch, module in enumerate(self.modules):\n s = 'Q{Q}M{M}'.format(Q=(ch // 4) + 1, M=(ch % 4) + 1)\n cx, cy, _ = module[0].centre() * scale\n ax.text(cx, cy, s, fontweight='bold',\n verticalalignment='center',\n horizontalalignment='center')\n\n for t in [7, 8, 15]:\n cx, cy, _ = module[t].centre() * scale\n ax.text(cx, cy, 'T{}'.format(t + 1),\n verticalalignment='center',\n horizontalalignment='center')\n\n ax.set_title('LPD-1M detector geometry ({})'.format(self.filename))\n return ax",
"def inspect(self, axis_units='px', frontview=True):\n ax = super().inspect(axis_units=axis_units, frontview=frontview)\n scale = self._get_plot_scale_factor(axis_units)\n\n # Label modules and tiles\n for ch, module in enumerate(self.modules):\n s = 'Q{Q}M{M}'.format(Q=(ch // 4) + 1, M=(ch % 4) + 1)\n cx, cy, _ = module[4].centre() * scale\n ax.text(cx, cy, s, fontweight='bold',\n verticalalignment='center',\n horizontalalignment='center')\n\n for t in [0, 7]:\n cx, cy, _ = module[t].centre() * scale\n ax.text(cx, cy, 'T{}'.format(t + 1),\n verticalalignment='center',\n horizontalalignment='center')\n\n ax.set_title('AGIPD-1M detector geometry ({})'.format(self.filename))\n return ax",
"def Te_ne_P_panel(**kwargs):\n\n GR = glo.global_results()\n gal_indices = np.arange(GR.N_gal)\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n for gal_index in gal_indices:\n fig = plt.figure(figsize=(15,7),constrained_layout=False)\n gal_ob = gal.galaxy(GR=GR, gal_index=gal_index)\n cell_data = gal_ob.cell_data.get_dataframe()\n\n gs1 = fig.add_gridspec(nrows=1, ncols=3, wspace=0.0, hspace=0.0)\n\n ax = fig.add_subplot(gs1[0,0])\n h = np.histogram(np.log10(cell_data.Te_mw),bins=100)\n bin_size = (h[1][1]-h[1][0])/2\n ax.fill_between(h[1][0:-1] + bin_size,h[0],color='orange', step='pre',alpha=0.6,label='G%i' % gal_index)\n ax.set_xlabel('log mass-weighted T$_{e}$ per cell')\n ax.set_ylabel('Mass fraction')\n\n ax = fig.add_subplot(gs1[0,1])\n h = np.histogram(np.log10(cell_data.ne_mw_grid),bins=100)\n bin_size = (h[1][1]-h[1][0])/2\n ax.fill_between(h[1][0:-1] + bin_size,h[0],color='orange', step='pre',alpha=0.6,label='G%i' % gal_index)\n ax.set_xlabel('log mass-weighted n$_{e}$ per cell')\n ax.set_ylabel('Mass fraction')\n\n ax = fig.add_subplot(gs1[0,2])\n h = np.histogram(np.log10(cell_data.P_HII),bins=100)\n bin_size = (h[1][1]-h[1][0])/2\n ax.fill_between(h[1][0:-1] + bin_size,h[0],color='orange', step='pre',alpha=0.6,label='G%i' % gal_index)\n ax.set_xlabel('log mass-weighted P$_{HII}$ per cell')\n ax.set_ylabel('Mass fraction')\n\n plt.tight_layout()\n if p.savefig:\n if not os.path.isdir(p.d_plot + 'cell_data/pressure/'): os.mkdir(p.d_plot + 'cell_data/pressure/')\n plt.savefig(p.d_plot + 'cell_data/pressure/G%i' % gal_index, dpi=250, facecolor='w')\n plt.close()",
"def init_plot_emg(nb_emg, muscle_names=()):\n app = pg.mkQApp()\n remote = []\n layout = pg.LayoutWidget()\n layout.resize(800, 800)\n label = QtGui.QLabel()\n box = []\n rplt = []\n row_count = 0\n col_span = 4 if nb_emg > 8 else 8\n for emg in range(nb_emg):\n remote.append(rgv.RemoteGraphicsView())\n remote[emg].pg.setConfigOptions(antialias=True)\n app.aboutToQuit.connect(remote[emg].close)\n if len(muscle_names) == 0:\n box.append(QtGui.QCheckBox(f\"muscle_{emg}\"))\n else:\n box.append(QtGui.QCheckBox(muscle_names[emg]))\n if emg >= 8:\n layout.addWidget(box[emg], row=1, col=emg - 8)\n layout.addWidget(remote[emg], row=emg - 8 + 2, col=4, colspan=col_span)\n else:\n layout.addWidget(box[emg], row=0, col=emg)\n layout.addWidget(remote[emg], row=emg + 2, col=0, colspan=col_span)\n rplt.append(remote[emg].pg.PlotItem())\n rplt[emg]._setProxyOptions(deferGetattr=True) ## speeds up access to rplt.plot\n remote[emg].setCentralItem(rplt[emg])\n layout.addWidget(label)\n layout.show()\n emg += 1\n row_count += 1\n\n return rplt, layout, app, box",
"def inspect(self, axis_units='px', frontview=True):\n ax = super().inspect(axis_units=axis_units, frontview=frontview)\n scale = self._get_plot_scale_factor(axis_units)\n\n # Label modules and tiles\n for ch, module in enumerate(self.modules):\n s = 'Q{Q}M{M}'.format(Q=(ch // 4) + 1, M=(ch % 4) + 1)\n cx, cy, _ = module[0].centre() * scale\n ax.text(cx, cy, s, fontweight='bold',\n verticalalignment='center',\n horizontalalignment='center')\n\n for t in [1]:\n cx, cy, _ = module[t].centre() * scale\n ax.text(cx, cy, 'T{}'.format(t + 1),\n verticalalignment='center',\n horizontalalignment='center')\n\n ax.set_title('DSSC detector geometry ({})'.format(self.filename))\n return ax",
"def create_four_subplots():\n pass",
"def fig_template(name):\n fig, axs = plt.subplots(ncols=2, figsize=(9, 4))\n if name == \"circle\":\n for ax in axs:\n circle = plt.Circle(\n (0, 0), np.sqrt(2 / np.pi), color=\"black\", fill=False, zorder=10\n )\n ax.add_artist(circle)\n\n elif name == \"3_circles\":\n centers = np.array([[-1, 1], [1, 0], [-0.5, -0.5]])\n radii = np.array([1, np.sqrt(6 / np.pi - 1), 1 / 2])\n for c, r in zip(centers, radii):\n for ax in axs:\n circle = plt.Circle(c, r, color=\"black\", fill=False, zorder=10)\n ax.add_artist(circle)\n\n elif name == \"square\":\n p = 0.5 * np.sqrt(2)\n for ax in axs:\n ax.plot([-p, p, p, -p, -p], [-p, -p, p, p, -p], color=\"black\", zorder=10)\n\n elif name == \"4_squares\":\n for ax in axs:\n ax.plot([0, 0], [-1, 1], color=\"black\", zorder=10)\n ax.plot([-1, 1], [0, 0], color=\"black\", zorder=10)\n\n elif name == \"crown\" or name == \"tricrown\":\n centers = [[0, 0], [0, 0]]\n radii = [np.sqrt(0.8), np.sqrt(0.8 - 2 / np.pi)]\n for c, r in zip(centers, radii):\n for ax in axs:\n circle = plt.Circle(c, r, color=\"black\", fill=False, zorder=10)\n ax.add_artist(circle)\n\n elif name == \"wavy_lines\":\n freq = 1\n\n def fun1(s):\n return s + np.sin(freq * np.pi * s)\n\n def fun2(s):\n return -s + np.sin(freq * np.pi * s)\n\n x = np.linspace(-1, 1)\n for ax in axs:\n ax.plot(x, np.clip(fun1(x), -1, 1), color=\"black\", zorder=10)\n ax.plot(x, np.clip(fun2(x), -1, 1), color=\"black\", zorder=10)\n\n axs[0].set(xlabel=r\"$x_0$\", ylabel=r\"$x_1$\", xlim=[-1, 1], ylim=[-1, 1])\n axs[0].axis(\"equal\")\n axs[1].set(xlabel=r\"$x_0$\", xlim=[-1, 1], ylim=[-1, 1])\n axs[1].axis(\"equal\")\n\n return fig, axs",
"def viz_gene_quanti(self):\n # Create output folders for each species\n project_creator = ProjectCreator()\n project_creator.create_subfolders(\n self._pathcreator.required_viz_gene_quanti_folders()\n )\n\n for sp in self._species_folder_prefixes:\n # Set output folder and files paths for each species\n gene_wise_quanti_combined_path = self._pathcreator.gene_quanti_files_by_species[\n sp\n ][\n \"gene_wise_quanti_combined_path\"\n ]\n viz_gene_quanti_scatter_plot_path = self._pathcreator.viz_gene_quanti_files_by_species[\n sp\n ][\n \"viz_gene_quanti_scatter_plot_path\"\n ]\n rna_classes_plot_path = self._pathcreator.viz_gene_quanti_files_by_species[\n sp\n ][\n \"rna_classes_plot_path\"\n ]\n\n # Create plots\n gene_quanti_viz = GeneQuantiViz(\n gene_wise_quanti_combined_path,\n self._pathcreator.get_lib_names_single_end()\n if not self._args.paired_end\n else self._pathcreator.get_lib_names_paired_end(),\n )\n gene_quanti_viz.parse_input_table()\n gene_quanti_viz.plot_correlations(viz_gene_quanti_scatter_plot_path)\n gene_quanti_viz.plot_annotation_class_quantification(\n rna_classes_plot_path\n )",
"def generate_plot(self, xlimg = None , ylimg =None , exname = '' , prefix = True , save = True):\n print ('start with the generation of plots')\n #plot of condensation energy\n self.plotwrap(0,2, 'energy (a.u.)' , name = 'ge'+ exname, titel = 'the energy (a.u.)', xlim = xlimg , ylim = ylimg , prefix = prefix ,save = save )\n self.plotwrap(0,1, 'condensation energy (a.u.)' , name = 'ce' + exname ,titel = 'the condensation energy (a.u.)',xlim = xlimg , ylim = ylimg , prefix = prefix,save = save )",
"def render_sample(cls,\n nusc: 'NuScenes',\n radar_data_gp: 'RadarDataGroup',\n front_cam_data: 'CameraData',\n host_data:'HostData',\n fig_info:dict,\n box_vis_level: BoxVisibility = BoxVisibility.ANY,\n out_path: str = None) -> None:\n cls.colors=radar_data_gp.colors\n fig, axes = plt.subplots(1, 2, figsize=(16, 24))\n #left diagram to show front view\n ax1 = axes[0]\n cls.render_front_view(cls, nusc=nusc, radar_data_gp=radar_data_gp, front_cam_data=front_cam_data,\n ax =ax1, use_flat_vehicle_coordinates=False, with_anns=False, with_radar=True)\n #right diagram to show bird view\n ax2 = axes[1] \n \n cls.render_bird_view(cls, nusc=nusc, radar_data_gp=radar_data_gp, front_cam_data=front_cam_data, \n host_data=host_data, ax =ax2, use_flat_vehicle_coordinates=True)\n #figure information\n fig.text(0.01, 0.97, 'Figure information: ', weight='bold', color='green',fontsize = 15) \n fig.text(0.01, 0.93, 'package: '+nusc.version, fontsize = 12) \n fig.text(0.01, 0.9, 'scene name: '+fig_info['scene_name'], fontsize = 12) \n fig.text(0.01, 0.87, 'timestamp: '+str(front_cam_data.time_stamp*1e-6)+'(s)', fontsize = 12)\n fig.text(0.01, 0.84, 'elapse time: : '+str((fig_info['elapse_time'])*1e-3)+'(ms)', fontsize = 12)\n \n plt.tight_layout()\n fig.subplots_adjust(wspace=0, hspace=0)\n if out_path is not None:\n plt.savefig(out_path,dpi=200)\n if fig_info['verbose']: \n plt.show()",
"def spider(inputfiles, names, type=\"M1\", threshold=0.1, spinwindow=[], Eg_low=0, Eg_high=1e9, Ex_low=0, Ex_high=1e9, scale=2):\n\n Nsp = np.ceil(np.sqrt(len(inputfiles))).astype(int)\n f, ax_list = plt.subplots(Nsp,Nsp,squeeze=False,sharex='col', sharey='row')\n\n for i in range(len(inputfiles)):\n inputfile = inputfiles[i]\n name = names[i]\n ax = ax_list[i%Nsp][int((i-i%Nsp)/Nsp)]\n\n\n levels = read_energy_levels(inputfile)\n Egs = levels[0,0]\n\n Ex_high = min(Ex_high, levels[:,0].max()-Egs)\n Eg_high = min(Eg_high, levels[:,0].max()-Egs)\n \n \n levels_plot_J = []\n levels_plot_Ex = []\n for iEx in range(len(levels[:,0])):\n # print levels[iEx,:]\n J2 = levels[iEx,1]\n par = levels[iEx,2]\n if len(spinwindow) > 0 and not ([J2,par] in spinwindow or [J2-2,par] in spinwindow or [J2+2,par] in spinwindow):\n continue\n Ex = levels[iEx,0]-Egs\n if Ex < Ex_low or Ex > Ex_high:\n continue\n \n levels_plot_J.append(J2/2)\n levels_plot_Ex.append(Ex)\n \n ax.plot(levels_plot_J, levels_plot_Ex, 'o', color='grey', linewidth=0.5)\n ax.set_xlim([levels[:,1].min()/2-1,levels[:,1].max()/2+1])\n ax.set_title(name+r'$\\,E_\\gamma\\in[{:.1f},{:.1f}]$'.format(Eg_low,Eg_high))\n ax.set_ylabel(r'$E_x\\,\\mathrm{[MeV]}$')\n ax.set_xlabel(r'$J\\,\\mathrm{[\\hbar]}$')\n \n \n \n transitions = read_transition_strengths(inputfile, type=type)\n for iEx in range(len(transitions[:,0])):\n J2i = int(transitions[iEx,0])\n pari = int(transitions[iEx,1])\n if len(spinwindow) > 0 and not [J2i,pari] in spinwindow:\n continue\n B = transitions[iEx,7]\n Eg = transitions[iEx,6]\n if B < threshold or Eg<Eg_low or Eg>Eg_high:\n continue\n Ei = transitions[iEx,2]-Egs\n if Ei < Ex_low or Ei > Ex_high:\n continue\n J2f = int(transitions[iEx,3])\n parf = int(transitions[iEx,4])\n Ef = transitions[iEx,5]-Egs\n ax.plot([J2i/2,J2f/2],[Ei,Ef], color='teal', linewidth=(scale*B))\n \n return f, ax_list",
"def getSetup(figsize, gridd, multz=None, empts=None):\n sns.set(style=\"whitegrid\", font_scale=0.7, color_codes=True, palette=\"colorblind\", rc={\"grid.linestyle\": \"dotted\", \"axes.linewidth\": 0.6})\n\n # create empty list if empts isn't specified\n if empts is None:\n empts = []\n\n if multz is None:\n multz = dict()\n\n # Setup plotting space and grid\n f = plt.figure(figsize=figsize, constrained_layout=True)\n gs1 = gridspec.GridSpec(*gridd, figure=f)\n\n # Get list of axis objects\n x = 0\n ax = list()\n while x < gridd[0] * gridd[1]:\n if x not in empts and x not in multz.keys(): # If this is just a normal subplot\n ax.append(f.add_subplot(gs1[x]))\n elif x in multz.keys(): # If this is a subplot that spans grid elements\n ax.append(f.add_subplot(gs1[x: x + multz[x] + 1]))\n x += multz[x]\n x += 1\n\n return (ax, f)",
"def pomegranate(args):\n p = OptionParser(pomegranate.__doc__)\n opts, args, iopts = p.set_image_options(args, figsize=\"9x7\")\n\n if len(args) != 5:\n sys.exit(not p.print_help())\n\n seqidsfile, klayout, datafile, bedfile, slayout = args\n\n fig = plt.figure(1, (iopts.w, iopts.h))\n root = fig.add_axes([0, 0, 1, 1])\n\n Karyotype(fig, root, seqidsfile, klayout)\n Synteny(fig, root, datafile, bedfile, slayout)\n\n # legend showing the orientation of the genes\n draw_gene_legend(root, 0.42, 0.52, 0.48)\n\n labels = ((0.04, 0.96, \"A\"), (0.04, 0.52, \"B\"))\n panel_labels(root, labels)\n\n root.set_xlim(0, 1)\n root.set_ylim(0, 1)\n root.set_axis_off()\n\n pf = \"pomegranate-karyotype\"\n image_name = pf + \".\" + iopts.format\n savefig(image_name, dpi=iopts.dpi, iopts=iopts)",
"def Main_Sequence(**kwargs):\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n method = p.method\n fig,ax = plt.subplots(figsize = (8,6))\n \n # Plot all galaxies in simulation volume\n try:\n df_all = pd.read_pickle(p.d_data + 'galaxy_selection/z0_all_galaxies%s' % p.sim_runs[0])\n print('%i galaxies in Simba-%s' % (len(df_all),p.sim_runs[0]))\n df_all1 = df_all[(df_all['SFR_'+method] > 0) & (df_all['SFR_'+method] != 1)]\n hb = ax.hexbin(df_all1['M_star_'+method],df_all1['SFR_'+method],bins='log',xscale='log',yscale='log',\\\n cmap='binary',lw=0,gridsize=70)\n df_all = pd.read_pickle(p.d_data + 'galaxy_selection/z0_all_galaxies%s' % p.sim_runs[1])\n print('%i galaxies in Simba-%s' % (len(df_all),p.sim_runs[1]))\n df_all2 = df_all[df_all['SFR_'+method] > 0]\n df_all = df_all1.append(df_all2, ignore_index=True)\n hb = ax.hexbin(df_all['M_star_'+method],df_all['SFR_'+method],bins='log',xscale='log',yscale='log',\\\n cmap='binary',lw=0,gridsize=(50,70))\n except:\n print('Missing file to plot all galaxies in Simba%s' % (p.sim_runs[0]))\n\n # Plot 25 Mpc box? \n if p.select == '_25Mpc':\n GR = glo.global_results(sim_run='_25Mpc',nGal=240,grid_ext='_ext_ism_BPASS')\n M_star,SFR,Zsfr = getattr(GR,'M_star'),getattr(GR,'SFR'),getattr(GR,'Zsfr')\n ax.plot(1,1,'^',color='forestgreen',label='Simba-25 galaxy sample',ms=10)\n sc = ax.scatter(M_star,SFR,\\\n marker='^',s=50,alpha=0.8,c=np.log10(Zsfr),vmin=np.log10(0.01),vmax=np.log10(2),cmap='summer',zorder=10)\n\n # Plot current sample\n GR = glo.global_results()\n M_star,SFR,Zsfr = getattr(GR,'M_star'),getattr(GR,'SFR'),getattr(GR,'Zsfr')\n if p.select == '_MS':\n indices = aux.select_salim18(GR.M_star,GR.SFR)\n M_star = M_star[indices]\n SFR = SFR[indices]\n Zsfr = Zsfr[indices]\n print('With MS selection criteria: only %i galaxies' % (len(M_star)))\n ax.plot(1,1,'o',color='forestgreen',label='Simba-100 galaxy sample',ms=10)\n sc = ax.scatter(M_star,SFR,\\\n marker='o',s=20,alpha=0.8,c=np.log10(Zsfr),vmin=np.log10(0.01),vmax=np.log10(2),cmap='summer',zorder=10)\n\n # Plot observations\n if p.zred == 0:\n MS_salim = pd.read_csv('data/observations/MS/salim2018_ms_v1.dat',\\\n names=['logMstar','logsSFR','logsSFR_1','logsSFR_2'],sep=' ')\n ax.fill_between(10.**MS_salim.logMstar,10.**MS_salim.logMstar*10.**MS_salim.logsSFR_1,\\\n 10.**MS_salim.logMstar*10.**MS_salim.logsSFR_2,color='royalblue',alpha=0.3)\n ax.plot(10.**MS_salim.logMstar,10.**MS_salim.logMstar*10.**MS_salim.logsSFR,\\\n '--',color='mediumblue',label='[Salim+18] SF MS')\n # MS_salim = pd.read_csv('data/observations/MS/salim2018_ms_v2.dat',names=['logMstar','logsSFR'],sep=' ')\n # ax.plot(10.**MS_salim.logMstar,10.**MS_salim.logMstar*10.**MS_salim.logsSFR,'--',label='[Salim+18] SF MS')\n cosmo = FlatLambdaCDM(H0=0.68*100 * u.km / u.s / u.Mpc, Tcmb0=2.725 * u.K, Om0=0.3)\n t = cosmo.age(0).value\n fit_speagle = 10.**((0.84-0.026*t)*np.log10(ax.get_xlim())-(6.51-0.11*t))\n #ax.fill_between(ax.get_xlim(),10.**(np.log10(fit_speagle)-0.3),\\\n # 10.**(np.log10(fit_speagle)+0.3),alpha=0.2,color='grey')\n fit_speagle = 10.**((0.84-0.026*t)*np.log10(ax.get_xlim())-(6.51-0.11*t))\n # Convert from Kroupa to Chabrier: https://ned.ipac.caltech.edu/level5/March14/Madau/Madau3.html\n #ax.plot(ax.get_xlim(),fit_speagle*0.63/0.67,':',color='grey',label='[Speagle+14] \"mixed\" fit')\n \n ax.set_ylabel('SFR [M$_{\\odot}$/yr]')\n ax.set_xlabel('M$_*$ [M$_{\\odot}$]')\n ax.set_xscale('log')\n ax.set_yscale('log')\n ax.set_xlim([1e7,1e12])\n ax.set_ylim([10**(-2),1e2])\n \n cb = fig.colorbar(sc, ax=ax)\n cb.set_label(r'log $\\langle$Z$\\rangle_{\\rm{SFR}}$ [Z$_{\\odot}$]')\n handles,labels = ax.get_legend_handles_labels()\n try:\n handles = [handles[_] for _ in [1,0,2]]#np.flip(handles)\n labels = [labels[_] for _ in [1,0,2]]#np.flip(labels)\n except:\n handles = [handles[_] for _ in [1,0]]#np.flip(handles)\n labels = [labels[_] for _ in [1,0]]#np.flip(labels)\n ax.legend(handles,labels,fontsize=12)\n if p.savefig:\n if not os.path.isdir(p.d_plot + 'sim_data/'): os.mkdir(p.d_plot + 'sim_data/') \n plt.savefig('plots/sim_data/SFR_Mstar_%s_%s%s' % (method,p.sim_name,p.sim_run),dpi=250,facecolor='w')",
"def litchi(args):\n p = OptionParser(litchi.__doc__)\n opts, args, iopts = p.set_image_options(args, figsize=\"9x6\")\n\n if len(args) != 4:\n sys.exit(not p.print_help())\n\n datafile, bedfile, slayout, switch = args\n fig = plt.figure(1, (iopts.w, iopts.h))\n root = fig.add_axes([0, 0, 1, 1])\n\n Synteny(fig, root, datafile, bedfile, slayout, switch=switch)\n\n # legend showing the orientation of the genes\n draw_gene_legend(root, 0.4, 0.7, 0.82)\n\n # On the left panel, make a species tree\n fc = \"lightslategrey\"\n\n coords = {}\n xs, xp = 0.16, 0.03\n coords[\"lychee\"] = (xs, 0.37)\n coords[\"clementine\"] = (xs, 0.5)\n coords[\"cacao\"] = (xs, 0.6)\n coords[\"strawberry\"] = (xs, 0.7)\n coords[\"grape\"] = (xs, 0.8)\n xs -= xp\n coords[\"Sapindales\"] = join_nodes(root, coords, \"clementine\", \"lychee\", xs)\n xs -= xp\n coords[\"Rosid-II\"] = join_nodes(root, coords, \"cacao\", \"Sapindales\", xs)\n xs -= xp\n coords[\"Rosid\"] = join_nodes(root, coords, \"strawberry\", \"Rosid-II\", xs)\n xs -= xp\n coords[\"crown\"] = join_nodes(root, coords, \"grape\", \"Rosid\", xs, circle=False)\n\n # Names of the internal nodes\n for tag in (\"Rosid\", \"Rosid-II\", \"Sapindales\"):\n nx, ny = coords[tag]\n nx, ny = nx - 0.01, ny - 0.02\n root.text(nx, ny, tag, rotation=90, ha=\"right\", va=\"top\", color=fc)\n\n root.set_xlim(0, 1)\n root.set_ylim(0, 1)\n root.set_axis_off()\n\n pf = \"litchi\"\n image_name = pf + \".\" + iopts.format\n savefig(image_name, dpi=iopts.dpi, iopts=iopts)",
"def create_preview(name):\n file_type = os.path.splitext(name)[1]\n\n fig = Figure()\n axis = fig.add_subplot(1, 1, 1)\n\n dir = os.path.dirname(os.path.realpath(__file__))\n file = open(dir+'/instances/'+name)\n if file_type == '.csv':\n\n for nodeNo,line in enumerate(file): #enumerate used to obtain line numbers and thus node numbers\n coords = line.rsplit()[0].split(\",\")\n\n x = int(coords[0])\n y = int(coords[1])\n axis.scatter(x, y, c = 'b', label = nodeNo)\n axis.set_title(name)\n axis.text(x+5,y+5, str(nodeNo))\n else:\n file.readline()\n file.readline()\n file.readline()\n no_nodes = int(file.readline().strip().split()[1])\n file.readline()\n file.readline()\n file.readline()\n\n for i in range(0, no_nodes):\n\n coords = file.readline().strip().split()[1:]\n x = float(coords[0])\n y = float(coords[1])\n axis.scatter(x, y, c = 'b', label = i)\n axis.set_title(name)\n axis.text(x,y, str(i))\n\n return fig",
"def plot_multiple_venn(\n sizes: List[List[int]],\n labels: List[List[str]],\n figname: str,\n titles: List[str],\n suptitle: str = '',\n gridspec_kw: dict = {},\n figsize: Iterable = (8, 4.5),\n **kwargs,\n) -> None:\n\n assert len(sizes) == len(labels), 'Length of labels & sizes dont match.'\n assert len(sizes) == len(titles), 'Length of titles & sizes dont match.'\n assert len(sizes) > 1, 'At least 2 items should be provided.'\n assert all(list(map(lambda x: len(x) in [2, 3], labels))), 'Wrong label sizes.'\n assert all(list(map(lambda x: len(x) in [3, 7], sizes))), 'Wrong label sizes.'\n\n fig, axes = plt.subplots(1, len(sizes), gridspec_kw=gridspec_kw, figsize=figsize)\n plt.suptitle(suptitle, size=18, fontweight='bold')\n\n figname = titles[0].lower().replace(' vs. ', '_') if figname == '' else figname\n\n for idx, (size, label, title) in enumerate(zip(sizes, labels, titles)):\n if len(label) == 2:\n plot_venn_two(size, label, title=title, ax=axes[idx])\n elif len(label) == 3:\n plot_venn_three(size, label, title=title, ax=axes[idx])\n\n plt.savefig(f'{figname}.pdf')",
"def amborella(args):\n p = OptionParser(amborella.__doc__)\n p.add_option(\"--tree\", help=\"Display trees on the bottom of the figure\")\n p.add_option(\"--switch\", help=\"Rename the seqid with two-column file\")\n opts, args, iopts = p.set_image_options(args, figsize=\"8x7\")\n\n if len(args) != 5:\n sys.exit(not p.print_help())\n\n seqidsfile, klayout, datafile, bedfile, slayout = args\n switch = opts.switch\n tree = opts.tree\n\n fig = plt.figure(1, (iopts.w, iopts.h))\n root = fig.add_axes([0, 0, 1, 1])\n\n Karyotype(fig, root, seqidsfile, klayout)\n Synteny(fig, root, datafile, bedfile, slayout, switch=switch, tree=tree)\n\n # legend showing the orientation of the genes\n draw_gene_legend(root, 0.5, 0.68, 0.5)\n\n # annotate the WGD events\n fc = \"lightslategrey\"\n x = 0.05\n radius = 0.012\n TextCircle(root, x, 0.86, r\"$\\gamma$\", radius=radius)\n TextCircle(root, x, 0.95, r\"$\\epsilon$\", radius=radius)\n root.plot([x, x], [0.83, 0.9], \":\", color=fc, lw=2)\n pts = plot_cap((x, 0.95), np.radians(range(-70, 250)), 0.02)\n x, y = zip(*pts)\n root.plot(x, y, \":\", color=fc, lw=2)\n\n root.set_xlim(0, 1)\n root.set_ylim(0, 1)\n root.set_axis_off()\n\n pf = \"amborella\"\n image_name = pf + \".\" + iopts.format\n savefig(image_name, dpi=iopts.dpi, iopts=iopts)",
"def render(self, axes=None):\n raise NotImplementedError()",
"def plotDistributionWithGeneHistogram(lXs, lYs, lZs, lZOthers,out=\"out.png\", title=\"title\", xax=\"xax\", yax=\"yax\", yax2=\"yax2\"):\n\n fig = plt.Figure(figsize=(20,20))\n fig.suptitle(title, fontsize=32)\n gs = gridspec.GridSpec(2, 1, width_ratios=[1],height_ratios=[1, 3], hspace=0.1) \n ax1 = fig.add_subplot(gs[0])\n ax1.plot(lXs,lZOthers)\n lZmin = [0] * len(lZs)\n ax1.vlines(lXs,lZmin,lZOthers, colors='grey', alpha=0.15)\n if max(lZOthers) <= 0:\n ax1.set_ylim(0,1)\n ax1.set_xlim(lXs[0],lXs[-1])\n lZmax = lZs\n lZmin2 = [300] * len(lZs)\n ax2 = fig.add_subplot(gs[1])\n ax2.vlines(lXs,lZmin,lZmax, colors='grey', alpha=0.15)\n ax3 = ax2.twinx()\n ax3.plot(lXs,lYs)\n ax2.set_xlim(lXs[0],lXs[-1])\n ax2.set_ylim(0,max(lZs)+int(max(lZs)*0.05))\n #ax3.set_ylim(min(lYs)-1,max(lYs)+1)\n axis_font = {'size':'28'}\n ax2.set_xlabel(xax, **axis_font)\n ax3.set_ylabel(yax2, **axis_font)\n ax2.set_ylabel(yax, **axis_font)\n canvas = FigureCanvasAgg(fig)\n canvas.print_figure(out, dpi=80)",
"def plot_genes_and_coverage(genes,\n coverage_df,\n *,\n genes_relheight=0.3,\n gene_features=None,\n full_gene_feature='exon',\n site_tick_interval=250,\n color_mutations=False,\n mutation_colors=None,\n figwidth=17,\n figtitle='',\n ):\n if gene_features is None:\n gene_features = {'exon': '#999999',\n 'viral_barcode': '#E69F00',\n 'viral_tag': '#56B4E9',\n }\n\n if mutation_colors is None:\n mutation_colors = {'A': '#009E73',\n 'C': '#F0E442',\n 'G': '#0072B2',\n 'T': '#D55E00',\n }\n\n # check validity of genes and coverage\n gene_names = {gene.id for gene in genes}\n if len(genes) != len(set(gene_names)):\n raise ValueError('genes in `genes` do not have unique IDs')\n if set(coverage_df['gene']) != set(gene_names):\n raise ValueError('`coverage_df` and `genes` have different genes')\n\n # set up grid of subplots\n gene_lengths = [len(gene) for gene in genes]\n samples = list(coverage_df['sample'].unique())\n fig, axes = plt.subplots(\n nrows=1 + len(samples),\n ncols=len(genes),\n sharex='col',\n gridspec_kw={'width_ratios': gene_lengths,\n 'height_ratios': ([genes_relheight] +\n [1] * len(samples))\n },\n figsize=(figwidth,\n 1.5 * (len(samples) + genes_relheight) + 2),\n squeeze=False,\n )\n\n # Convert genes to plottable graphic records\n class ViralGeneTranslator(dna_features_viewer.BiopythonTranslator):\n \"\"\"Translate BioPython SeqRecord into GraphicRecord.\"\"\"\n\n def compute_feature_color(self, feature):\n for regex, color in gene_features.items():\n if re.search(regex, feature.type):\n return color\n\n def compute_feature_label(self, feature):\n return None\n\n def compute_filtered_features(self, features):\n return [f for f in features if\n any(re.search(regex, f.type)\n for regex in gene_features.keys())\n ]\n\n # plot the genes in the first row of the subplots\n for ax, seqrecord in zip(axes[: 1, ].ravel(), genes):\n graphic_record = ViralGeneTranslator().translate_record(seqrecord)\n for f in graphic_record.features:\n f.linecolor = f.color\n graphic_record.plot(ax=ax, with_ruler=False, draw_line=False)\n ax.set_title(seqrecord.id, fontsize=15)\n\n # plot coverage on subplots not in first row\n full_gene_feature_regex = [regex for regex in gene_features.keys()\n if re.search(regex, full_gene_feature)]\n if len(full_gene_feature_regex) != 1:\n raise ValueError('no `full_gene_feature` in `gene_features`')\n else:\n full_gene_feature_regex = full_gene_feature_regex[0]\n for ax, (sample, seqrecord) in zip(axes[1:, ].ravel(),\n itertools.product(samples, genes)):\n gene = seqrecord.id # noqa: F841\n\n # data frame of coverage for each site by nt, whether it is wildtype\n df = (coverage_df\n .query('(sample == @sample) & (gene == @gene)')\n .melt(id_vars=['sample', 'gene', 'site'],\n var_name='nucleotide',\n value_name='nt_coverage')\n .rename(columns={'nt_coverage': 'coverage'})\n .merge(pd.DataFrame.from_records(\n enumerate(str(seqrecord.seq), start=1),\n columns=['site', 'wildtype']),\n on='site')\n .assign(\n wildtype=lambda x: (\n x['wildtype']\n .map(Bio.Data.IUPACData.ambiguous_dna_values)\n ),\n is_wildtype=lambda x: x.apply(lambda r: (r['nucleotide']\n in r['wildtype']),\n axis=1)\n )\n )\n\n # plot coverage over full gene\n (df\n .query('nucleotide == \"coverage\"')\n .plot(ax=ax,\n x='site',\n y='coverage',\n kind='area',\n legend=False,\n color=gene_features[full_gene_feature_regex],\n )\n )\n\n # optionally overlay a plot for mutations\n if color_mutations:\n (df\n .query('nucleotide != \"coverage\"')\n .query('not is_wildtype')\n .pivot_table(index='site',\n columns='nucleotide',\n values='coverage')\n .fillna(0)\n [list(mutation_colors)]\n .reset_index()\n .plot(ax=ax,\n x='site',\n kind='area',\n stacked=False,\n color=list(mutation_colors.values()),\n legend=False,\n )\n )\n\n # set axes formatting\n ax.set_ylabel(None)\n ax.set_xlabel(None)\n ax.ticklabel_format(axis='y', style='sci', scilimits=(0, 0),\n useMathText=True)\n if sample == samples[-1]:\n xticklocs = list(range(1, int(ax.get_xlim()[1]),\n site_tick_interval))\n ax.set_xticks(ticks=xticklocs)\n ax.tick_params(axis='x', labelrotation=90)\n else:\n ax.set_xticklabels([])\n\n # plot rectangles for features of interest\n ymin, ymax = ax.get_ylim()\n for f in seqrecord.features:\n for regex in gene_features.keys():\n if re.search(regex, f.type) and (regex !=\n full_gene_feature_regex):\n color = gene_features[regex]\n break\n else:\n continue\n start = f.location.start\n end = f.location.end\n rect = matplotlib.patches.Rectangle(\n xy=(start - 0.5, ymin),\n width=end - start + 1,\n height=ymax,\n color=color,\n zorder=4,\n alpha=0.4)\n ax.add_patch(rect)\n\n # shared x-, y-labels following here: https://stackoverflow.com/a/53172335\n fig.add_subplot(111, frameon=False)\n plt.tick_params(labelcolor='none', top=False, bottom=False, left=False,\n right=False)\n plt.xlabel('\\nsite', size=14)\n plt.ylabel('coverage ', size=14)\n\n # add legend for features\n feature_legend = plt.legend(\n handles=[matplotlib.patches.Patch(facecolor=color,\n edgecolor=color,\n label=regex)\n for regex, color in gene_features.items()\n if regex != full_gene_feature_regex],\n bbox_to_anchor=(1.006, ((len(samples) + 0.6 * genes_relheight) /\n (len(samples) + genes_relheight))),\n bbox_transform=plt.gcf().transFigure,\n fontsize=13,\n handlelength=0.5,\n title='features',\n title_fontsize=14,\n )\n\n # add legend for mutation colors\n if color_mutations:\n plt.gca().add_artist(feature_legend)\n plt.legend(handles=[matplotlib.patches.Patch(facecolor=color,\n edgecolor=color,\n label=nt)\n for nt, color in mutation_colors.items()],\n bbox_to_anchor=(1, ((len(samples) - 0.2) /\n (len(samples) + genes_relheight))),\n bbox_transform=plt.gcf().transFigure,\n fontsize=13,\n handlelength=0.5,\n title='mutation',\n title_fontsize=14,\n ncol=2,\n )\n\n if figtitle:\n fig.suptitle(figtitle, size=18)\n\n fig.tight_layout(w_pad=0)\n return fig, axes",
"def embed_matplotlib(self):",
"def grid(*args, default: bool=True, displayAxes: bool=True, displayAxesBold: bool=True,\n displayDivisionLines: bool=True, displayGridLines: bool=True,\n displayOrthographicLabels: bool=True, displayPerspectiveLabels: bool=True, divisions:\n Union[int, bool]=5, orthographicLabelPosition: Union[AnyStr, bool]=\"\",\n perspectiveLabelPosition: Union[AnyStr, bool]=\"\", reset: bool=True, size: Union[float,\n bool]=12, spacing: Union[float, bool]=5, style: Union[int, bool]=0, toggle: bool=True,\n q=True, query=True, **kwargs)->Union[None, Any]:\n pass",
"def create_axes(s, both=False, text=True):\n # Se convierte la distancia a un entero positivo\n s = abs(s)\n\n if s > 0: # Si es una distancia valida\n\n # Vectores de dibujo\n x = Point3(s, 0, 0)\n y = Point3(0, s, 0)\n z = Point3(0, 0, s)\n o = Point3()\n\n # Se crea nueva lista\n lista = glGenLists(1)\n glNewList(lista, GL_COMPILE)\n\n # Se agregan los vectores al dibujo\n glBegin(GL_LINES)\n\n glColor4fv([1, 0, 0, 1])\n drawVertexList([o, x])\n glColor4fv([0, 1, 0, 1])\n drawVertexList([o, y])\n glColor4fv([0, 0, 1, 1])\n drawVertexList([o, z])\n\n if both: # Se dibujan los ejes en ambos sentidos\n x = Point3(-s, 0, 0)\n y = Point3(0, -s, 0)\n z = Point3(0, 0, -s)\n\n glColor4fv([1, 0, 0, 1])\n drawVertexList([o, x])\n glColor4fv([0, 1, 0, 1])\n drawVertexList([o, y])\n glColor4fv([0, 0, 1, 1])\n drawVertexList([o, z])\n\n glEnd()\n\n if text: # Se dibujan los nombres de los ejes\n draw_text(\"x\", Point3(s + 60, 0, -15), [1, 0, 0],\n GLUT_BITMAP_HELVETICA_18)\n draw_text(\"y\", Point3(0, s + 50, -15), [0, 1, 0],\n GLUT_BITMAP_HELVETICA_18)\n draw_text(\"z\", Point3(+0, +0, s + 50), [0, 0, 1],\n GLUT_BITMAP_HELVETICA_18)\n\n if both:\n draw_text(\"-x\", Point3(-s - 60, 0, -15), [1, 0, 0],\n GLUT_BITMAP_HELVETICA_18)\n draw_text(\"-y\", Point3(0, -s - 70, -15), [0, 1, 0],\n GLUT_BITMAP_HELVETICA_18)\n draw_text(\"-z\", Point3(+0, +0, -s - 80), [0, 0, 1],\n GLUT_BITMAP_HELVETICA_18)\n\n # Se retorna la lista\n glEndList()\n return lista\n\n else:\n raise Exception(\"la dimension de los ejes debe ser mayor a cero\")",
"def ratings_genres(Y, genres):\n plt.subplot(311)\n ratings_genre(Y, genres, 1)\n plt.subplot(312)\n ratings_genre(Y, genres, 2)\n plt.subplot(313)\n ratings_genre(Y, genres, 3)\n plt.tight_layout()\n plt.show()",
"def gen_figure(snf_nmi, rbf_nmi, snf_mod, rbf_mod):\n xticklabels = ['CT', 'SV', 'DB', 'CSF', 'CLIN']\n yticklabels = ['68', '114', '219', '448', '1000']\n\n fig, axes = plt.subplots(2, 2, figsize=(10, 8), sharex=True, sharey=True)\n fig.subplots_adjust(wspace=-0.05, hspace=0.15)\n\n # make four circleplots\n ax1 = plotting.circleplot(snf_nmi, vmin=-0.01, vmax=1.01, ax=axes[0][0],\n xticklabels=[], yticklabels=yticklabels)\n ax2 = plotting.circleplot(rbf_nmi, vmin=-0.01, vmax=1.01, ax=axes[0][1],\n xticklabels=[], yticklabels=yticklabels,\n cbar_kws={'ticks': [0.00, 1.00]})\n ax3 = plotting.circleplot(snf_mod, vmin=-0.01, vmax=0.36, ax=axes[1][0],\n xticklabels=xticklabels, yticklabels=yticklabels)\n ax4 = plotting.circleplot(rbf_mod, vmin=-0.01, vmax=0.36, ax=axes[1][1],\n xticklabels=xticklabels, yticklabels=yticklabels,\n cbar_kws={'ticks': [0, 0.35]})\n\n for ax in axes.flatten():\n for tick in ax.xaxis.get_major_ticks() + ax.yaxis.get_major_ticks():\n tick.label.set_fontsize(14)\n\n # set axis titles\n ax1.set_title('Similarity network fusion', pad=20)\n ax2.set_title('Data concatenation', pad=20)\n\n # set axis labels\n ax1.set_ylabel('Dimensionality of\\ncortical thickness data',\n labelpad=15, x=0, y=0)\n ax3.set_xlabel('Data type', labelpad=15, x=1.1)\n\n # turn off colorbars on lefthand plots\n ax1.collections[0].colorbar.ax.set_visible(False)\n ax3.collections[0].colorbar.ax.set_visible(False)\n\n # correct colorbar appearance for righthand plots\n ax2.collections[0].colorbar.ax.tick_params(size=0, labelsize=14)\n ax4.collections[0].colorbar.ax.tick_params(size=0, labelsize=14)\n ax2.collections[0].colorbar.ax.set_ylabel('Normalized mutual\\ninformation',\n rotation=270, labelpad=30)\n ax4.collections[0].colorbar.ax.set_ylabel('Modularity',\n rotation=270, labelpad=15)\n\n # plot small gray lines to better differentiate plots\n plt.plot([0.4725, 0.4725], [0.55, 0.85], color='gray', lw=0.5,\n transform=fig.transFigure, clip_on=False)\n plt.plot([0.4725, 0.4725], [0.15, 0.45], color='gray', lw=0.5,\n transform=fig.transFigure, clip_on=False)\n plt.plot([0.155, 0.415], [0.5, 0.5], color='gray', lw=0.5,\n transform=fig.transFigure, clip_on=False)\n plt.plot([0.525, 0.795], [0.5, 0.5], color='gray', lw=0.5,\n transform=fig.transFigure, clip_on=False)\n\n return fig",
"def metagene(ribo, \n site, \n out, \n experiments, \n title,\n normalize,\n lowerlength, \n upperlength, \n dump ):\n\n return plot_metagene_wrapper( \n ribo_file = ribo, \n output_file = out,\n site_type = site, \n range_lower = lowerlength, \n range_upper = upperlength,\n experiment_list = experiments,\n title = title,\n normalize = normalize,\n dump_to_file = dump)",
"def setup_figure_1ax(x_label='', y_label='', size=(13, 9), shrink_ax=True):\n\n matplotlib.rcParams.update({'font.size': 20})\n fig, ax = plt.subplots()\n fig.set_size_inches(size)\n ax.set_xlabel(x_label)\n ax.set_ylabel(y_label)\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.spines['bottom'].set_visible(False)\n ax.spines['left'].set_visible(False)\n # Shrink current axis by 20%\n if shrink_ax:\n box = ax.get_position()\n ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])\n ax.grid()\n return fig, ax",
"def createFigure(self):\n\n SMALL_SIZE = 14\n MEDIUM_SIZE = 18\n BIGGER_SIZE = 36\n\n plt.rc('font', size=SMALL_SIZE) # controls default text sizes\n plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title\n plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels\n plt.rc('xtick', labelsize=MEDIUM_SIZE) # fontsize of the tick labels\n plt.rc('ytick', labelsize=MEDIUM_SIZE) # fontsize of the tick labels\n plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize\n plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title\n\n fig, axes = plt.subplots()\n fig.set_size_inches(10, 6, forward=True)\n serialNumber = self.spectrometer.getSerialNumber()\n model = self.spectrometer.model\n fig.canvas.manager.set_window_title('Spectrometer [serial # {0}, model {1}]'.format(serialNumber, model))\n axes.set_xlabel(\"Wavelength [nm]\")\n axes.set_ylabel(\"Intensity [arb.u]\")\n return fig, axes"
] | [
"0.62391675",
"0.5573007",
"0.55273825",
"0.5471501",
"0.5401241",
"0.5400579",
"0.53350043",
"0.5321127",
"0.52325386",
"0.5185536",
"0.51851785",
"0.5162964",
"0.5139158",
"0.5105493",
"0.50971764",
"0.5093184",
"0.5092872",
"0.5075228",
"0.5068497",
"0.5068263",
"0.50643927",
"0.5044636",
"0.5015103",
"0.5014386",
"0.50112563",
"0.50070596",
"0.49963456",
"0.49821407",
"0.4972345",
"0.49713215"
] | 0.6132679 | 1 |
purge cache of a host | def purge(hostname):
config.purge(hostname)
log.info('OK') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _purge():\r\n _cache.clear()",
"def clear_cache():\n sudo('service varnish restart')",
"def purge_cache(self):\n\n self.local_store.purge_cache()",
"def destroy_cache():\n # TODO\n pass",
"def decache(self):",
"def purgeAndDeleteCache(webhook_cache, url):\n \n totalRecords = webhook_cache.__len__() # not used currently. \n print \"Now posting offline cache\"\n for data in webhook_cache.all():\n try:\n result = post_result(url, data) \n except Exception,e:\n print \"Unable to post record from cache. Message was: {0}\".format(e)\n # clear cache \n webhook_cache.purge()\n print \"Webhook cache cleared\"",
"def clear_cache():\n # TODO\n pass",
"def clear_cache():\n run(\"rm -rf ~/public_html/var/cache/mage*\")\n run(\"redis-cli FLUSHALL\")",
"def cache_clear():\n # type: () -> None\n with Cache(CACHE_URI) as c:\n c.clear()",
"def delete(self, cache_key):\r\n pass",
"def sipserver_purge(self) -> None:",
"def purge(self):\n pass",
"def clear_cache():\n run(\"/etc/init.d/memcached restart\")",
"def delete_cache(self, dbname=None, system=None):\n # Use flush_all, which\n # expire all data currently in the memcache servers.\n self.memcache.flush_all()",
"def clearcache():\n g.pafs = {}\n g.streams = {}\n g.url_memo = collections.OrderedDict()\n dbg(\"%scache cleared%s\", c.p, c.w)\n g.message = \"cache cleared\"",
"def purge_cache():\n from voiceplay.config import Config\n logger.debug('Purging cache...')\n cache_dir = Config.cfg_data().get('cache_dir', '')\n if os.path.exists(cache_dir) and os.path.isdir(cache_dir):\n files = glob(os.path.join(cache_dir, '*'))\n for fname in files:\n try:\n os.remove(fname)\n except Exception as exc:\n logger.debug('Removal of %r failed, please check permissions', exc)",
"def clear_cache(self):\n pass",
"def purge() -> None:\r\n _purge_func(False)",
"def clear(self, cacheDir):",
"def clear_cache():\n cache = Cache()\n cache.reset()",
"def cache_clear(self):\n\t\tself.__cache = {}",
"def flush():\n for k in cache._thecache.keys():\n del cache._thecache[k]",
"def clear_cache(self):\n requests.get(url=self.proxy_url+'/clear_cache')",
"def clear_cache():\n os.remove(CACHE_FILE)",
"def cache_clear():\n # type: () -> None\n with Cache() as c:\n c.clear()",
"def _clear_cache(self):\n keys = [\"nodes\", \"availability\", \"capacity\", \"cost\"]\n for key in keys:\n if key in self.__dict__:\n del self.__dict__[key]",
"def cache_cleanup(user_data, cache):\n LocalRepositoryCache().evict_expired()\n cache_files_cleanup()\n return jsonify({\"result\": \"ok\"})",
"def delete_cache(self, key):\n self.r.delete(key)",
"def clear_scache(cls) -> None:\n cls.scache = {}",
"def clean(self):\r\n with self.mutex:\r\n now = time.time()\r\n if self.last_clean_time + self.CLEAN_INTERVAL < now:\r\n to_remove = []\r\n for (host, pool) in self.host_to_pool.items():\r\n pool.clean()\r\n if pool.size() == 0:\r\n to_remove.append(host)\r\n for host in to_remove:\r\n del self.host_to_pool[host]\r\n self.last_clean_time = now"
] | [
"0.7727345",
"0.73509437",
"0.7080226",
"0.7028563",
"0.69827265",
"0.6830749",
"0.68005484",
"0.6735831",
"0.67053866",
"0.66968954",
"0.66672283",
"0.6632626",
"0.66293",
"0.6593723",
"0.65836966",
"0.65826976",
"0.6574661",
"0.65719706",
"0.65687996",
"0.6507312",
"0.64658755",
"0.645879",
"0.6432165",
"0.6418666",
"0.6417419",
"0.64008003",
"0.6388784",
"0.6370028",
"0.6299056",
"0.629187"
] | 0.7531293 | 1 |
install ca to system's certificate chain | def install_ca():
require_root()
config.proxy.install_ca_cert()
log.info('OK') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def gen_ca():\n require_root()\n\n config.proxy.gen_ca_certs()\n log.info('OK')",
"def ca():\n return trustme.CA()",
"def initial_setup():\n\n if os.path.exists(cfg.ca_private_key_path()):\n pkey = _try_load_ca_private_key(cfg.ca_private_key_path())\n else:\n pkey = _generate_ca_private_key(cfg.ca_private_key_path())\n\n if os.path.exists(cfg.ca_cert_path()):\n _try_load_ca_cert(cfg.ca_cert_path())\n else:\n _generate_ca_cert(cfg.ca_cert_path(), pkey)",
"def save_ca():\n cert_file = os.environ.get('HOME') + '/.cat_installer/ca.pem'\n debug(\"saving cert\")\n with open(cert_file, 'w') as cert:\n cert.write(Config.CA + \"\\n\")",
"def initca(ca_dir):\n click.echo('Initiliasing new CA in %s' % ca_dir)\n sca = SimpleCA(ca_dir)\n try:\n sca.init_ca()\n except FileExistsError as err:\n click.echo('The CA directory (%s) exists, not doing anything' %\n err.filename)\n exit(1)",
"def create_CA(dn):\n cmd_genrsa = [\"openssl\",\n \"genrsa\",\n \"-aes256\",\n \"-out\", f'{pki_dir}/ca.key',\n \"-passout\", f'pass:{ca_password}',\n f'{rsa_keysize}']\n cmd_req = [\"openssl\",\n \"req\",\n \"-new\",\n \"-x509\",\n \"-days\", \"999999\",\n \"-sha256\",\n \"-key\", f'{pki_dir}/ca.key',\n \"-out\", server_key_files[\"ca\"],\n \"-subj\", f'{dn}',\n \"-passin\", f'pass:{ca_password}']\n cmds = [cmd_genrsa, cmd_req]\n for cmd in cmds:\n exec_cmd(cmd)",
"def test_one_add_client_ca(self):\n cacert = load_certificate(FILETYPE_PEM, root_cert_pem)\n cadesc = cacert.get_subject()\n\n def single_ca(ctx):\n ctx.add_client_ca(cacert)\n return [cadesc]\n\n self._check_client_ca_list(single_ca)",
"def _check_ca_certificate(self):\n if not os.path.exists(self._ca_certificate_path):\n with open(self._ca_certificate_path, \"w\") as f:\n f.write(ssl.get_server_certificate((\"127.0.0.1\", self._app_port), ssl_version=ssl.PROTOCOL_TLSv1_2))",
"def test_set_and_add_client_ca(self):\n cacert = load_certificate(FILETYPE_PEM, root_cert_pem)\n secert = load_certificate(FILETYPE_PEM, server_cert_pem)\n clcert = load_certificate(FILETYPE_PEM, server_cert_pem)\n\n cadesc = cacert.get_subject()\n sedesc = secert.get_subject()\n cldesc = clcert.get_subject()\n\n def mixed_set_add_ca(ctx):\n ctx.set_client_ca_list([cadesc, sedesc])\n ctx.add_client_ca(clcert)\n return [cadesc, sedesc, cldesc]\n\n self._check_client_ca_list(mixed_set_add_ca)",
"def test_set_after_add_client_ca(self):\n cacert = load_certificate(FILETYPE_PEM, root_cert_pem)\n secert = load_certificate(FILETYPE_PEM, server_cert_pem)\n clcert = load_certificate(FILETYPE_PEM, server_cert_pem)\n\n cadesc = cacert.get_subject()\n sedesc = secert.get_subject()\n\n def set_replaces_add_ca(ctx):\n ctx.add_client_ca(clcert)\n ctx.set_client_ca_list([cadesc])\n ctx.add_client_ca(secert)\n return [cadesc, sedesc]\n\n self._check_client_ca_list(set_replaces_add_ca)",
"def install_certbot_ssl():\n run('wget https://dl.eff.org/certbot-auto')\n run('chmod a+x certbot-auto')\n run('./certbot-auto certonly --standalone -d indabom.com')",
"def _generate_ca_cert(path, pkey):\n crt = _make_base_cert(pkey, 5000, socket.gethostname(),\n random.randrange(0, 2**64))\n crt.set_issuer(crt.get_subject())\n crt.sign(pkey, 'sha256')\n\n data = crypto.dump_certificate(crypto.FILETYPE_PEM, crt)\n open(path, 'wb').write(data)",
"def dcos_ca_bundle():\n resp = sdk_cmd.cluster_request('GET', '/ca/dcos-ca.crt')\n cert = resp.content.decode('ascii')\n assert cert is not None\n return cert",
"def ca_cert(self, ca_cert):\n\n self._ca_cert = ca_cert",
"def create_cert(commonname, ca_dir):\n sca = SimpleCA(ca_dir)\n sca.new_cert(commonname)",
"def AddCaCertificateFlag(parser, required=False):\n help_text = \"\"\"\\\n x509 PEM-encoded certificate of the CA that signed the database\n server's certificate. The replica will use this certificate to verify\n it's connecting to the correct host. Database Migration Service encrypts the\n value when storing it.\n \"\"\"\n parser.add_argument('--ca-certificate', help=help_text, required=required)",
"def add_ca_certs(s3_client, certs):\n logger.info(\"Fetching CA certs and writing to filesystem\")\n\n # Determine which update-ca command to use and directory to store CAs in\n if command_exists(\"update-ca-trust\"):\n logger.info(\"update-ca-trust available\")\n update_ca_cmd = \"update-ca-trust\"\n ca_dir = \"/etc/pki/ca-trust/source/anchors/\"\n elif command_exists(\"update-ca-certificates\"):\n logger.info(\"update-ca-certificates available\")\n update_ca_cmd = \"update-ca-certificates\"\n ca_dir = \"/usr/local/share/ca-certificates/\"\n else:\n logger.error(\"Environment is missing required CA commands\")\n raise OSError(\n \"OS is missing a required command for CA trust. Either update-ca-trust or \"\n \"update-ca-certificates is required.\"\n )\n\n for cert_entry in certs:\n alias = cert_entry[\"alias\"]\n entry = cert_entry[\"cert\"]\n source = cert_entry[\"source\"]\n logger.info(\"...Processing cert with alias = {} from {}\".format(alias, source))\n\n pem_cert_body = fetch_cert(source, entry, s3_client)\n logger.debug(\"...cert body = {}\".format(pem_cert_body))\n\n with open(ca_dir + alias + \".crt\", \"a\") as f:\n f.write(str(pem_cert_body))\n\n logger.info(\"Updating CA trust\")\n os.system(update_ca_cmd)",
"def test_set_one_ca_list(self):\n cacert = load_certificate(FILETYPE_PEM, root_cert_pem)\n cadesc = cacert.get_subject()\n\n def single_ca(ctx):\n ctx.set_client_ca_list([cadesc])\n return [cadesc]\n\n self._check_client_ca_list(single_ca)",
"def test_load_client_ca(self, context, ca_file):\n context.load_client_ca(ca_file)",
"def ca_cert_path(self, ca_cert_path: str):\n\n self._ca_cert_path = ca_cert_path",
"def console_ca_cert(self, console_ca_cert):\n\n self._console_ca_cert = console_ca_cert",
"def fusion_api_import_external_ca_certificates(self, body, api=None, headers=None):\n return self.ca.add(body, api=api, headers=headers)",
"def test_multiple_add_client_ca(self):\n cacert = load_certificate(FILETYPE_PEM, root_cert_pem)\n secert = load_certificate(FILETYPE_PEM, server_cert_pem)\n\n cadesc = cacert.get_subject()\n sedesc = secert.get_subject()\n\n def multiple_ca(ctx):\n ctx.add_client_ca(cacert)\n ctx.add_client_ca(secert)\n return [cadesc, sedesc]\n\n self._check_client_ca_list(multiple_ca)",
"def init_ca(self):\n self._init_dir()\n self._init_serial()\n self._init_keys()",
"def insert_ca_certs_into_systemwide_ca_store(self, ca_certs):\n\n raise NotImplementedError()",
"def __check_opts(self):\n self.ca_cert_file = os.environ['HOME'] + '/.cat_installer/ca.pem'\n self.pfx_file = os.environ['HOME'] + '/.cat_installer/user.p12'\n if not os.path.isfile(self.ca_cert_file):\n print(Messages.cert_error)\n sys.exit(2)",
"def generate_ca(properties, host, isoverwrite):\n java_home = read_conf_file(properties, \"env\", \"JAVA_HOME\")\n java = java_home+'/bin/java'\n logger.info(\"Using JAVA {0}...\".format(java))\n\n try:\n os.path.exists(CA_DIR)\n except OSError:\n raise\n logger.info(\"Using {0} as base path.\".format(CA_DIR))\n if os.path.exists(properties):\n ca_props = read_ca_conf_file(properties, \"caprops\")\n logger.debug(\"CA properties are:\".format(ca_props))\n opdir = os.path.abspath(read_conf_file(properties, \"caprops\", \"outputDirectory\"))\n toolkit_cmd = [java, '-jar', '-Xms12m', '-Xmx24m', CA_DIR + '/lib/ssl_manager-1.5.0-jar-with-dependencies.jar'\n , 'standalone', '--certificateAuthorityHostname', read_conf_file(properties, \"caprops\", \"caName\")]\n if isoverwrite is True:\n toolkit_cmd.append(\"--isOverwrite\")\n create_ca = toolkit_cmd + ca_props\n logger.debug(\"tls toolkit args are : {0}\".format(create_ca))\n cacmd = subprocess.Popen(create_ca)\n cacmd.communicate()\n returncode = cacmd.poll()\n if not returncode == 0:\n logger.error(\"Unable to execute: {0}\".format(create_ca))\n sys.exit(1)\n generate_ambari_specific(properties, host, opdir)\n return",
"def ca_certificate(self) -> str:\n return pulumi.get(self, \"ca_certificate\")",
"def ca_certificate(self) -> str:\n return pulumi.get(self, \"ca_certificate\")",
"def _get_ca_bundle():\n try:\n import certifi\n return certifi.where()\n except ImportError:\n pass"
] | [
"0.7331956",
"0.6866141",
"0.6793719",
"0.6792014",
"0.6662008",
"0.6598612",
"0.64483553",
"0.63963974",
"0.6261553",
"0.6227196",
"0.6218202",
"0.61274976",
"0.61183465",
"0.6107496",
"0.6034503",
"0.6000411",
"0.5953046",
"0.59481186",
"0.59253514",
"0.58889574",
"0.588639",
"0.5882736",
"0.58010936",
"0.5768002",
"0.5730426",
"0.5727313",
"0.56876194",
"0.5633792",
"0.5633792",
"0.5625217"
] | 0.85667384 | 0 |
Populate relevant tables with formatted data stored in dictionary structures. The data will already be properly formatted in dictionary form (retrieved from a .csv file), so this function takes the preformatted data and stores it in Book and Author tables, since those should be populated upon initialization. | def populate_tables(self, data_book, data_author, datafile_name, initial_stock=20):
print("\nPopulating book table with input data from", datafile_name, "...", end='')
count = 0
failed_books = []
for book in data_book:
try:
date = datetime.datetime.strptime(book[7], '%m/%d/%Y').date()
t = (book[0], book[1], book[8], book[3], date,
int(book[4]), initial_stock, book[9])
self.cursor.execute(
"""INSERT INTO book (ISBN, title, publisher, lang, publicationDate, pageCount, stock, price)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s)""", t)
except Exception as e:
count = count + 1
failed_books.append(t[1])
if failed_books:
print("\nSome books were not added to the database because they had an invalid format:")
for book in failed_books:
print(book)
print("\nTotal books not included in database: ", count)
self.cursor.execute(
"""SELECT COUNT(*)
FROM book""")
num_successful = self.cursor.fetchall()
print(num_successful[0][0], "books successfully inserted into table \"Book\".")
self.db.commit()
print("done")
# Now we populate authors. First need to get all ISBNs of books that were added to the book table
print("\nAdding authors to \"Author\" table...", end='')
self.cursor.execute("SELECT ISBN FROM Book")
list_books = [book[0] for book in self.cursor.fetchall()]
for author in data_author:
self.cursor.execute("INSERT INTO author (name) VALUES (%s)", (author,))
self.db.commit()
for book in data_author[author]:
if book in list_books:
self.cursor.execute("SELECT ID FROM author WHERE name = %s", (author,))
auth_id = self.cursor.fetchone()[0]
self.cursor.execute("INSERT IGNORE INTO wrote VALUES (%s,%s)", (auth_id, book))
self.db.commit()
print("done")
# # Finally, populate HasKeyword table. For now just add words in title and author names
# print("\nGenerating keywords for \"HasKeyword\" table...", end='')
# for book in list_books:
# self.cursor.execute("SELECT title from book WHERE ISBN = %s", (book,))
# keywords = [i[0].split(' ') for i in self.cursor.fetchall()]
# self.cursor.execute("SELECT name FROM author A, wrote W WHERE A.ID = W.authorID AND W.ISBN = %s", (book,))
# authors = [i[0].split(' ') for i in self.cursor.fetchall()]
#
# keywords.extend(authors)
# for word_subset in keywords:
# for word in word_subset:
# if not word.isspace() and word:
# self.cursor.execute("INSERT IGNORE INTO HasKeyword VALUES(%s,%s)", (book, word))
# self.db.commit()
# print("done") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _populate(self):\n\n # Assume the first word is what we want, and we can find well formed years\n # This sucks, but will work for these ones.\n # Roll on bibtex for citations in the CIM.\n\n citation_detail = self.doc.citation_detail\n author = citation_detail.split(',')[0]\n match = '([^\\w])19|20\\d\\d([^\\w])*?'\n m = re.search(match, citation_detail)\n if m:\n year = m.group(0)\n else:\n year = None\n\n # one error in existing es-doc content to be fixed:\n if 'van Vuuren DP' in author:\n author = 'van Vuuren'\n print 'applying vv fix'\n\n self.year = int(year)\n\n # We assume that this table will have entries which ne\n\n # I use the first three letters of a an authors name, and for\n # three or more authors, EA, and then the year for my bibtex citation string\n self.citeguess = author[0:3] + 'EA' + year[2:]\n # This is what will appear in the table:\n self.citestring = '%s et al. (%s)' % (author, year)\n # Keep this for a reference list for checking against the eventual bibtex reference list.\n self.text = citation_detail",
"def prep_data(data: list):\n book = {\n 'title': data['title'],\n 'authors': [],\n 'categories': []\n }\n try:\n for author in data['authors']:\n author_obj, created = Author.objects.get_or_create(name=author)\n book['authors'].append(author_obj.id)\n except KeyError:\n pass\n try:\n for category in data['categories']:\n category_obj, created = Category.objects.get_or_create(name=category)\n book['categories'].append(category_obj.id)\n except KeyError:\n pass\n book['published_date'] = data.get('publishedDate', None)\n book['average_rating'] = data.get('averageRating', None)\n book['ratings_count'] = data.get('ratingsCount', None)\n try:\n book['thumbnail'] = data['imageLinks']['thumbnail']\n except KeyError:\n book['thumbnail'] = None\n return book",
"def populate(library):\n # Clears table\n table.delete(*table.get_children())\n\n # Inserts each book into the table\n # where text is the key field\n for book in library:\n table.insert(\"\", int(book[0]), text=book[0], values=(book[1], book[2], book[3], book[4]))",
"def fill_book_table(self, statistics, path, filemoving, conn, logg, parser):\n logg.writing_log(conn, 'Starting filling book table')\n c = conn.cursor()\n results = (statistics.book_name(path, filemoving, parser), statistics.paragraphs(path, filemoving, parser),\n statistics.words(path, filemoving, parser), statistics.letters(path, filemoving, parser),\n statistics.words_capital_letter(path, filemoving, parser), statistics.words_lower_case(path, filemoving, parser))\n sql = \"INSERT INTO stats VALUES(?,?,?,?,?,?)\"\n c.execute(sql, results)\n logg.writing_log(conn, 'Book table is filled')\n conn.commit()",
"def __cut_book_data(cls, data):\n book = {\n 'title': data['title'],\n # use '、' to connect the elements of list\n 'author': '、'.join(data['author']),\n 'publisher': data['publisher'],\n 'price': data['price'],\n # if current key's value of data is none,\n # put the empty string in this key, or put the original value in it.\n 'pages': data['pages'] or \"\",\n 'summary': data['summary'] or \"\",\n 'image': data['image']\n }\n return book",
"def populate_db():\n stdout.write('Emptying the tables...\\n')\n empty_tables()\n stdout.write('Populating Language records...\\n')\n populate_language()\n stdout.write('Populating Lemma, Wordform, and Definition records...\\n')\n populate_lexical()\n stdout.write('Populating ProperName records...\\n')\n populate_proper_names()",
"def setup(self):\n self.table = prettytable.PrettyTable()\n self.table.field_names = self.titles\n if self.convert_columns:\n self.rows = self.convert_columns_to_rows(self.rows)\n if self.colour:\n self.colour = self.convert_columns_to_rows(self.colour)",
"def populate_table(self, data):\n\n db = self.connection(database=\"imdb\")\n\n try:\n cur = db.cursor()\n sql = \"\"\"\n INSERT INTO film (title, film_id, year, director, cast, rating, poster_url) \n VALUES (%s, %s, %s, %s, %s, %s, %s);\n \"\"\"\n cur.execute(sql, data)\n db.commit()\n except:\n print(\"An error occurred when saving the data!\")\n\n db.close()",
"def transform_book_details(self, books_dict):\n for i, book_dict in enumerate(books_dict):\n print(f'Transforming data for book {i + 1}')\n with open(f'{PATH_TO_DATA}/{i}.html', 'r') as f:\n content = f.read()\n self._add_book_details(book_dict, content)",
"def raw_data_to_table(raw_data, cursor):\n table = PrettyTable()\n # setting table field names\n table.field_names = [column[0] for column in cursor.description]\n for row in raw_data:\n table.add_row(row)\n print(table)",
"def make_dict(data_for_dict): \n \n column_name_list = data_for_dict[0]\n db_list = data_for_dict[1:]\n \n column_list1 = []\n column_list2 = []\n column_list3 = []\n column_list4 = []\n column_list5 = []\n column_list6 = []\n column_list7 = []\n column_list8 = []\n column_list9 = []\n column_list10 = []\n column_list11 = []\n hmdb_dict = {}\n for line in db_list:\n my_string1 = '' \n my_string2 = ''\n my_string3 = ''\n my_string4 = ''\n my_string5 = ''\n my_string6 = ''\n my_string7 = ''\n my_string8 = ''\n my_string9 = ''\n my_string10 = ''\n my_string11 = ''\n\n my_string1 = line[0]\n column_list1 += [my_string1]\n my_string2 += line[1]\n column_list2 += [my_string2]\n my_string3 += line[2]\n column_list3 += [my_string3]\n my_string4 += line[3]\n column_list4 += [my_string4]\n my_string5 += line[4]\n column_list5 += [my_string5]\n my_string6 += line[5]\n column_list6 += [my_string6]\n my_string7 += line[6]\n column_list7 += [my_string7]\n my_string8 += line[7]\n column_list8 += [my_string8]\n my_string9 += line[8]\n column_list9 += [my_string9]\n my_string10 += line[9]\n column_list10 += [my_string10]\n my_string11 += line[10]\n column_list11 += [my_string11] \n \n hmdb_dict[column_name_list[0]] = column_list1\n hmdb_dict[column_name_list[1]] = column_list2\n hmdb_dict[column_name_list[2]] = column_list3\n hmdb_dict[column_name_list[3]] = column_list4\n hmdb_dict[column_name_list[4]] = column_list5\n hmdb_dict[column_name_list[5]] = column_list6\n hmdb_dict[column_name_list[6]] = column_list7\n hmdb_dict[column_name_list[7]] = column_list8\n hmdb_dict[column_name_list[8]] = column_list9\n hmdb_dict[column_name_list[9]] = column_list10\n hmdb_dict[column_name_list[10]] = column_list11\n \n return (hmdb_dict)",
"def prepare_data():\n user_name = os.environ.get('USER')\n traintest_corpus = ResumeCorpus('/Users/' + user_name + '/Documents/Data')\n random.shuffle(traintest_corpus.resumes)\n\n for resume in traintest_corpus.resumes:\n try:\n review_text = pre_processing(resume[0])\n review_text = \" \".join(review_text)\n data_dict['data'].append(review_text)\n data_dict['label'].append(resume[1])\n except:\n pass",
"def populate_table(database, table, data):\n\n for row in data:\n database.session.add(table(row))\n database.session.commit()",
"def pre_process_data():\n data_list, header_list = Parser.__parse_csv_data(Parser.training_data_file)\n table = pandas.DataFrame(data_list, columns=header_list)\n table.drop(['date', 'employee id'], axis=1, inplace=True)\n unique_categories = table['category'].unique()\n unique_expense_desc = table['expense description'].unique()\n unique_tax_name = table['tax name'].unique()\n\n column_index = {\n 'input': {},\n 'output': {}\n }\n\n column_index['input']['pre-tax amount'] = {\n 'column_index': 0,\n 'type': 'int'\n }\n\n column_index['input']['tax amount'] = {\n 'column_index': 1,\n 'type': 'int'\n }\n\n index = 2\n\n for i in range(len(unique_expense_desc)):\n column_index['input'][unique_expense_desc[i]] = {\n 'column_index': i + index,\n 'type': 'str'\n }\n\n index += len(unique_expense_desc)\n\n for i in range(len(unique_tax_name)):\n column_index['input'][unique_tax_name[i]] = {\n 'column_index': i + index,\n 'type': 'str'\n }\n\n for i in range(len(unique_categories)):\n column_index['output'][unique_categories[i]] = {'value': i}\n\n Parser.__save_column_index(column_index)",
"def create_tables(cur, country_json, xml_state, body_json):\n print(\"Creating the 3 first tables...\")\n cur.execute('CREATE TABLE IF NOT EXISTS country_purchases(idx INTEGER PRIMARY KEY, state TEXT, amount INTEGER)')\n with open(\"country_purchases.csv\", 'r') as f:\n for idx, line in enumerate(f.read().split('\\n')):\n line_splt = line.split(',')\n try:\n cur.execute('INSERT INTO country_purchases VALUES(\"%s\", \"%s\", \"%s\")' % (idx, line_splt[0], line_splt[1]))\n except IndexError:\n pass\n cur.execute('CREATE TABLE IF NOT EXISTS country_total_purchases(idx INTEGER PRIMARY KEY, state TEXT, amount INTEGER)')\n with open(\"country_total_purchases.csv\", 'r') as f:\n for idx, line in enumerate(f.read().split('\\n')):\n line_splt = line.split(',')\n try:\n cur.execute('INSERT INTO country_total_purchases VALUES(\"%s\", \"%s\", \"%s\")' % (idx, line_splt[0], line_splt[1]))\n except IndexError:\n pass\n cur.execute('CREATE TABLE IF NOT EXISTS country_albums(ID INTEGER PRIMARY KEY, state TEXT, year INTEGER, genre TEXT, album TEXT, amount INTEGER)')\n for idx, album in enumerate(country_json[body_json['state']]):\n cur.execute('INSERT INTO country_albums VALUES (\"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\")' %\n (idx, body_json['state'], body_json['year'], body_json['genre'], album,\n xml_state[0][0][idx].text))",
"def prepare_record_data_for_DB_insert(record_data: Dict) -> Dict:\n if record_data[\"artist\"] is None or record_data[\"title\"] is None:\n raise AssertionError(\"Artist and / or Title cannot be None.\")\n\n artist_list = [art.strip() for art in record_data[\"artist\"].split(\";\")]\n artist_country_list = [\n co.strip() for co in record_data[\"artist_country\"].split(\";\")\n ]\n label_list = [lab.strip() for lab in record_data[\"label\"].split(\";\")]\n\n if len(artist_list) != len(artist_country_list):\n raise AssertionError(\n \"Need the same number of artists and artist countries.\"\n )\n\n record_data[\"artist\"] = artist_list\n record_data[\"artist_country\"] = artist_country_list\n record_data[\"label\"] = label_list\n return record_data",
"def table_maker():\r\n try:\r\n off_copy = off.copy()\r\n man_copy = man.copy()\r\n exe_copy = exe.copy()\r\n ceo_copy = ceo.copy()\r\n list_of_lists = [off_copy, man_copy, exe_copy, ceo_copy]\r\n\r\n for i in list_of_lists:\r\n for j in i:\r\n if type(j) == str:\r\n continue\r\n else:\r\n raise ValueError('All elements must be strings')\r\n\r\n row_num = max(len(off_copy), len(man_copy),\r\n len(exe_copy), len(ceo_copy))\r\n for i in list_of_lists:\r\n if len(i) != row_num:\r\n diff = row_num - len(i)\r\n for j in range(diff):\r\n i.append('')\r\n\r\n t = PrettyTable(\r\n ['Office Workers', 'Managers', 'Executives', 'CEO'])\r\n for i in range(row_num):\r\n t.add_row([off_copy[i], man_copy[i], exe_copy[i], ceo_copy[i]])\r\n\r\n with open('Employee Table.txt', 'w') as f:\r\n f.write(str(t))\r\n\r\n except FileNotFoundError:\r\n print(\"Error: No file entered\")",
"def insert_books_data():\n # Get data from csv file\n print(\"Getting data from csv..\")\n file = open(\"books.csv\")\n reader = csv.reader(file)\n\n # Insert csv data into table\n print(\"Inserting data into 'books' table..\")\n for isbn, title, author, year in reader:\n try:\n db.execute(\"INSERT INTO books (isbn, title, author, year)\\\n VALUES (:isbn, :title, :author, :year)\", {\n \"isbn\": isbn, \"title\": title, \"author\": author, \"year\": year })\n except exc.DataError as err:\n print(\"Invalid entry in csv file\")\n db.commit()\n print(\"Data inserted\")",
"def insertData(self, table, title, rating, authorinfo, pubinfo):\n\n\t\tsql = \"insert into %s (bookname, authorinfo, pubinfo, rating) \\\n\t\t\tvalues('%s', '%s', '%s', '%s')\" %(table, title, authorinfo,\n\t\t\tpubinfo, rating)\n\t\ttry:\n\t\t\tself.cursor.execute(sql)\n\t\t\tself.conn.commit()\n\t\texcept Exception, e:\n\t\t\tsys.exit()",
"def populate(table_name, date):\n\tlog_msg3(\"Populando \" + table_name)\n\n\twsq_to_txt(table_name, date)\n\n\t# si es un nuevo año se crea una nueva tabla\n\tif(is_new_year(table_name) and not new_tables_created):\n\t\tcreate_tables()\n\n\ttxt_to_table(table_name)\n\n\tlog_msg_ok3()",
"def fill_table(info):\n # extrac attributes from info struct\n data = info[\"data\"]\n table = info[\"table\"]\n header = info[\"header\"]\n row_num = info[\"row_num\"]\n\n currency_type_num = row_num - 1\n row_index = 0\n col_index = 0\n i = 0\n while i < len(data):\n if data[i].find(\"%\") > 0:\n # stat data\n while i < len(data) and row_index < currency_type_num:\n table[row_index+1].append(data[i])\n row_index += 1\n i += 1\n # Reset row_index\n row_index = 0\n else:\n if i < row_num - 1:\n # currency Type\n table[i+1].append(data[i])\n else:\n # time marker\n if data[i] != header:\n table[0].append(data[i])\n i += 1\n\n # End loop\n return None",
"def _format_data(self) -> None:\n for row in self._db_data:\n if row['age_start'] is None:\n continue\n # entry = {'x': 'Celkem', 'y': int(row['count'])}\n elif row['age_start'] == 95:\n entry = {'x': f\"{int(row['age_start'])}+\", 'y': int(row['count'])}\n else:\n entry = {'x': f\"{int(row['age_start'])}-{int(row['age_start'])+4}\", 'y': int(row['count'])}\n self.return_data['data'].append(entry)",
"def cars_dict_to_table(car_data):\n table_data = [[\"ID\", \"Car\", \"Price\", \"Total Sales\"]]\n for item in car_data:\n table_data.append([item[\"id\"], format_car(item[\"car\"]), item[\"price\"], item[\"total_sales\"]])\n return table_data",
"def prepare_student_data(self) -> dict:\n self._filename_pre_data()\n empty_student = {}\n empty_student[\"scoreTimestamp\"] = \"N/A\"\n for i in self.draft_out:\n empty_student[i] = \"N/A\"\n for i in self.pre_data:\n empty_student[i] = self.pre_data[i]\n self.pre_data = empty_student",
"def _format_sample_sheet(sample_sheet_dict, sep=','):\n template = (\n '{comments}[Header]\\nIEMFileVersion{sep}{IEMFileVersion}\\n'\n 'Investigator Name{sep}{Investigator Name}\\n'\n 'Experiment Name{sep}{Experiment Name}\\nDate{sep}{Date}\\n'\n 'Workflow{sep}{Workflow}\\nApplication{sep}{Application}\\n'\n 'Assay{sep}{Assay}\\nDescription{sep}{Description}\\n'\n 'Chemistry{sep}{Chemistry}\\n\\n[Reads]\\n{read1}\\n{read2}\\n\\n'\n '[Settings]\\nReverseComplement{sep}{ReverseComplement}\\n\\n'\n '[Data]\\n{data}')\n\n if sample_sheet_dict['comments']:\n sample_sheet_dict['comments'] = re.sub(\n '^', '# ', sample_sheet_dict['comments'].rstrip(),\n flags=re.MULTILINE) + '\\n'\n sample_sheet = template.format(**sample_sheet_dict, **{'sep': sep})\n return sample_sheet",
"def process_table_init(self):\n logging.debug(\"Processing table initialization, %d entries\",\n len(self.table_initialization))\n\n for init_entry in self.table_initialization:\n for table_name, entry_desc in init_entry.items():\n self.air_table[table_name].add_entry(\n table_entry.description_to_entry(entry_desc))",
"def populate_from_samples():\n\n # Tags\n try:\n for row in get_csv_data('samples/tags.csv'):\n tag = Tag(name=row['Name'], desc=row['Description'])\n db_session.add(tag)\n finally:\n db_session.commit()\n\n # Organizations\n try:\n for row in get_csv_data('samples/organizations.csv'):\n org = Organization(desc=row['Name'])\n db_session.add(org)\n finally:\n db_session.commit()\n\n # Departments\n try: \n for row in get_csv_data('samples/departments.csv'):\n org = db_session.query(Organization).filter_by(desc=row['Organization']).one()\n dpt = Department(desc=row['Department'], org=org)\n\n db_session.add(dpt)\n finally:\n db_session.commit()\n\n # Application types\n try:\n for row in get_csv_data('samples/apptypes.csv'):\n apptype = AppType(desc=row['Name'])\n db_session.add(apptype)\n finally:\n db_session.commit()\n\n # Applications\n try:\n for row in get_csv_data('samples/applications.csv'):\n apptype = db_session.query(AppType).filter_by(desc=row['AppType']).one()\n dpt = db_session.query(Department).join(Organization).\\\n filter(Department.desc==row['Department']).\\\n filter(Organization.desc==row['Organization']).\\\n one()\n\n app = App(desc=row['Application'], \n app_type=apptype, \n department=dpt,\n version=row['Version'],\n environment=row['Environment'],\n platform=row['Platform']\n )\n\n db_session.add(app)\n finally:\n db_session.commit()\n\n # Connections and Headers\n try:\n for row in get_csv_data('samples/connections.csv'):\n conn = Connection(conn_type=row['Type'], url=row['URL'], port=row['Port'], answer=row['Answer'])\n header = Header(conn_id=conn.id, header=row['Header'], value=row['Value'], conn=conn)\n\n db_session.add(conn)\n db_session.add(header)\n finally:\n db_session.commit()",
"def populate_contents(self):\n\n data_table = self.data_table\n world = self.world\n\n self.add_text_row('World Name', data_table.world_name_label.text())\n self.add_text_row('Coordinates', data_table.world_coords_label.text())\n self.add_text_row('World Type', data_table.world_type_label.text())\n if data_table.world_extra_label.text() != '':\n self.add_text_row('Extra Info', data_table.world_extra_label.text())\n self.add_text_row('Filename', world.base_filename)\n self.add_text_row('Size', '{}x{}'.format(*world.info.size))\n\n if len(world.info.dungeons) > 0:\n dungeons = self.add_text_row('Dungeons', '<br/>'.join(sorted(world.info.dungeons)))\n else:\n self.add_text_row('Dungeons', '-')\n\n if len(world.info.biomes) > 0:\n biomes = self.add_text_row('Biomes', '<br/>'.join(sorted(world.info.biomes)))\n else:\n self.add_text_row('Biomes', '-')",
"def __init__(self, books_filename, authors_filename, books_authors_link_filename): \n with open(books_filename, newline='') as booksFile:\n books_reader = csv.reader(booksFile)\n try:\n bookArray = []\n newBookArray = []\n idCounter = 0\n for row in books_reader:\n newBookArray = [idCounter, row[0], int(row[1])]\n bookArray.append(newBookArray)\n idCounter = idCounter + 1\n self.simpleBookArray = bookArray\n except csv.Error as e:\n sys.exit('file {}, line {}: {}'.format(books_filename, books_reader.line_num, e))\n \n \"\"\"\n This function takes in the author_reader and puts the csv file into an array of arrays, in which\n each \"author\" is defined as an inner array, and each inner array is organized like so:\n [id, last name, first name, birth year, death year]\n \"\"\" \n with open(authors_filename, newline='') as authorsFile:\n authors_reader = csv.reader(authorsFile)\n try:\n authorArray = []\n newAuthorArray = []\n for row in authors_reader:\n if row[4] == \"NULL\":\n endDate = None\n else:\n endDate = row[4]\n if endDate == None:\n newAuthorArray = [int(row[0]), row[1], row[2], int(row[3]), endDate]\n else:\n newAuthorArray = [int(row[0]), row[1], row[2], int(row[3]), int(row[4])]\n authorArray.append(newAuthorArray)\n self.simpleAuthorArray = authorArray\n except csv.Error as e:\n sys.exit('file {}, line {}: {}'.format(authors_filename, authors_reader.line_num, e))\n \n \"\"\"\n This function takes in the link_reader and puts the csv file into an array of arrays, in which each\n \"book\" is defined as an inner array, and each inner array is organized as the following:\n [book id, author id]\n \"\"\"\n with open(books_authors_link_filename, newline='') as linkFile:\n link_reader = csv.reader(linkFile)\n try:\n linkArray = []\n newLinkArray = []\n for row in link_reader:\n newLinkArray = [int(row[0]), int(row[1])]\n linkArray.append(newLinkArray)\n self.simpleLinkArray = linkArray\n except csv.Error as e:\n sys.exit('file {}, line {}: {}'.format(books_authors_link_filename, link_reader.line_num, e))",
"def handle_dict(data: list, output_path: str, title: str) -> None:\n output = []\n for row in data:\n heading = get_heading(row)\n output.append(f'# {heading}')\n output.append('')\n for _, header in utils.srg_export.data.COLUMN_MAPPINGS.items():\n output.append(f'## {header}')\n content = get_content(header, row)\n output.append(content)\n output.append('')\n output.append('')\n output.append('\\\\newpage')\n output.append('')\n\n write_md_file(output, output_path)"
] | [
"0.67089665",
"0.6166051",
"0.61170274",
"0.6050481",
"0.5880672",
"0.5851863",
"0.5669068",
"0.5660942",
"0.5614498",
"0.5612204",
"0.5433636",
"0.5422656",
"0.54159504",
"0.5413617",
"0.5391802",
"0.53912425",
"0.53677887",
"0.5349724",
"0.5343896",
"0.5338233",
"0.5330698",
"0.5302253",
"0.52404815",
"0.5228877",
"0.52244836",
"0.52169794",
"0.5210149",
"0.5200284",
"0.5194208",
"0.51679605"
] | 0.703236 | 0 |
Take in a (valid) set of user information and create a new manager. Also need to check if the user is currently a customer, and remove the user if so. | def add_manager(self, info):
self.cursor.execute("""SELECT COUNT(*) FROM managerpersonal WHERE phone=%s""", (int(info['phone']),))
if not self.cursor.fetchone()[0]:
self.cursor.execute("""INSERT INTO managerpersonal VALUES (%s,%s)""", (int(info['phone']), info['address']))
self.cursor.execute("""INSERT INTO managercredentials (loginID, firstName, lastName, salt, pass_key, phone)
VALUES (%s,%s,%s,%s,%s,%s)""", (info['loginID'], info['firstName'], info['lastName'], info['salt'],
info['key'], int(info['phone'])))
self.db.commit()
self.cursor.execute("""SELECT COUNT(*) FROM customercredentials WHERE loginID=%s""", (info['loginID'],))
result = self.cursor.fetchone()
if result[0]:
self.cursor.execute("""DELETE FROM customerCredentials WHERE loginID=%s""", (info['loginID'],))
self.db.commit()
self.cursor.execute("""SELECT COUNT(*) FROM customerCredentials WHERE phone=%s""", (int(info['phone']),))
phone_count = self.cursor.fetchone()
if not phone_count[0]:
self.cursor.execute("""DELETE FROM customerPersonal WHERE phone=%s""", (int(info['phone']),))
self.db.commit()
self.update_book_scores()
self.update_comment_usefulness() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _create_user(self, new_user):\n new_user = User(user_name=new_user['user_name'], pin=new_user['pin'], user_type='customer')\n self.session.output(new_user.get_user_info(), '\\n[ New user created ]')",
"def test_with_manager_not_in_state(self):\n user_key = Key()\n user_id = user_key.public_key\n manager_key = Key()\n manager_id = manager_key.public_key\n name = self.test.user.name()\n\n message = protobuf.user_transaction_pb2.CreateUser(\n user_id=user_id, name=name, metadata=None, manager_id=manager_id\n )\n inputs, outputs = self.rbac.user.make_addresses(\n message=message, signer_keypair=user_key\n )\n payload = self.rbac.user.batch.make_payload(\n message=message,\n message_type=self.rbac.user.message_type,\n inputs=inputs,\n outputs=outputs,\n )\n _, status = self.rbac.user.send(signer_keypair=user_key, payload=payload)\n self.assertStatusInvalid(status)",
"def test_with_self_manager(self):\n user_key = Key()\n user_id = user_key.public_key\n name = self.test.user.name()\n\n message = protobuf.user_transaction_pb2.CreateUser(\n user_id=user_id, name=name, metadata=None, manager_id=user_id\n )\n inputs, outputs = self.rbac.user.make_addresses(\n message=message, signer_keypair=user_key\n )\n payload = self.rbac.user.batch.make_payload(\n message=message,\n message_type=self.rbac.user.message_type,\n inputs=inputs,\n outputs=outputs,\n )\n _, status = self.rbac.user.send(signer_keypair=user_key, payload=payload)\n self.assertStatusInvalid(status)",
"def create_manager(self, username, tenancy):\n raise NotImplementedError",
"def handle(self, *args, **options):\r\n username = 'populate_creators_command'\r\n email = '[email protected]'\r\n try:\r\n admin = User.objects.create_user(username, email, 'foo')\r\n admin.is_staff = True\r\n admin.save()\r\n except IntegrityError:\r\n # If the script did not complete the last time it was run,\r\n # the admin user will already exist.\r\n admin = User.objects.get(username=username, email=email)\r\n\r\n for user in get_users_with_role(CourseInstructorRole.ROLE):\r\n add_user_with_status_granted(admin, user)\r\n\r\n # Some users will be both staff and instructors. Those folks have been\r\n # added with status granted above, and add_user_with_status_unrequested\r\n # will not try to add them again if they already exist in the course creator database.\r\n for user in get_users_with_role(CourseStaffRole.ROLE):\r\n add_user_with_status_unrequested(user)\r\n\r\n # There could be users who are not in either staff or instructor (they've\r\n # never actually done anything in Studio). I plan to add those as unrequested\r\n # when they first go to their dashboard.\r\n\r\n admin.delete()",
"def create(user):\n # if the user does not have an email manager yet, create it\n if not EmailManager.objects.filter(user=user).exists():\n\n # create a new email manager obj\n new = EmailManager(\n key=EmailManager.generate_key(),\n user=user\n )\n new.save()\n\n # Send confirmation email\n welcome_email(new)\n\n return user.emailmanager",
"def new_user(cls, user):\r\n pass",
"def new_user(cls, user):\n pass",
"def promote_to_manager(self, loginID):\n self.cursor.execute(\"\"\"SELECT * FROM customercredentials WHERE loginID=%s\"\"\", (loginID,))\n creds = self.cursor.fetchone()\n self.cursor.execute(\"\"\"SELECT * FROM customerpersonal WHERE phone=%s\"\"\", (int(creds[5]),))\n personal = self.cursor.fetchone()\n\n info = {'phone': creds[5], 'address': personal[1], 'loginID': creds[0], 'firstName': creds[1],\n 'lastName': creds[2],\n 'salt': creds[3], 'key': creds[4]}\n self.add_manager(info)",
"def users_create():",
"def new_user():\n db = get_db()\n users = db.users\n data = request.json\n \n # Validate that the data has a 'role' field\n if not \"role\" in data:\n raise APIException(status_code=400, message='data must have a role field')\n elif not isinstance(data['role'], str):\n raise APIException(status_code=400, message='role must be a string')\n \n if data['role'].lower() == \"mentee\":\n # Validate that the data has a user_id in it, and that there isn't already a user with the same user_id\n if not \"user_id\" in data:\n raise APIException(status_code=400, message='data must have a user_id field for a new Mentee')\n cursor = users.find({\"user_id\": data['user_id']})\n if cursor.count() is 1:\n raise APIException(status_code=403, message='a user with user_id already exists')\n elif cursor.count() > 1:\n raise APIException(status_code=500, message='Error, multiple users with same user_id exist, which is not allowed')\n elif data['role'].lower() == \"mentor\":\n data['user_id'] = \"\"\n else:\n raise APIException(status_code=400, message=\"user has to be either a Mentor or a Mentee\")\n\n # Remove user_id from data so before the validation function, then add it back after\n user_id = data['user_id']\n del data['user_id']\n validate_user_data(data, is_adding_new_user=True)\n data['user_id'] = user_id\n \n # Insert user and return the newly created user_id\n postid = users.insert_one(data)\n return_data = {\"user_id\": user_id, \"mongo_id\": str(postid.inserted_id)}\n return flask.jsonify(**return_data), 200",
"def do_user_create():\n target = User(\n request.form['gender'],\n request.form['first_name'],\n request.form['name'],\n request.form['mail'],\n request.form['meter_id'],\n request.form['group_id'],\n secrets.token_hex(33))\n target.set_role(request.form['role'])\n target.nick = request.form['nick']\n db.session.add(target)\n db.session.commit()\n return user_list(\"Created user \" + target.name)",
"def assign_store_manager(user_name: str, new_store_manager_name: str, store_name: str):\n\n user_name = auth.get_username_from_hash(user_name)\n permission_handler.is_permmited_to(user_name, Action.ADD_MANAGER.value, store_name)\n permission_handler.assign_store_employee(action.MANAGER_INITIAL_PERMISSIONS,\n new_store_manager_name,\n store_name)\n user_handler.assign_store_employee(user_name, new_store_manager_name, store_name)",
"def createUser(self, user):\n dao = UserDAO()\n if not dao.userExist(user.address):\n DatabaseCollections.userCollection.insert_one(\n {\n \"address\": user.address,\n \"index\": user.index,\n \"messageFlow\": user.messageFlow,\n \"lotteryList\": user.lotteryList,\n \"count\": user.count,\n \"newUser\": user.newUser\n }\n )\n else:\n dao.updateUserElder(user.address, \"False\")\n count = dao.getUser(user.address).count\n dao.updateUserCount(user.address, count+1)\n dao.updateUserMessageFlow(user.address, 1)",
"def new_user():\n pass",
"def create_user(data):\n return woo_request_helper().post_details(wc_endpoint='customers', params=data)",
"def _add_user(user, state):\r\n if not user.is_staff and CourseCreator.objects.filter(user=user).count() == 0:\r\n entry = CourseCreator(user=user, state=state)\r\n entry.save()\r\n return True\r\n\r\n return False",
"def test_unit_with_self_manager(self):\n user_key = Key()\n user_id = user_key.public_key\n name = self.test.user.name()\n with self.assertRaises(ValueError):\n message = self.rbac.user.make(\n user_id=user_id, name=name, metadata=None, manager_id=user_id\n )\n\n message = protobuf.user_transaction_pb2.CreateUser(\n user_id=user_id, name=name, metadata=None, manager_id=user_id\n )\n with self.assertRaises(ValueError):\n payload = self.rbac.user.make_payload(message=message)\n\n with self.assertRaises(ValueError):\n payload = self.rbac.user.create(signer_keypair=user_key, message=message)",
"def create_users(self):\n from django.contrib.auth.models import User\n user = User.objects.create_user('red', '', 'red')\n user = User.objects.create_user('green', '', 'green')\n user = User.objects.create_user('blue', '', 'blue')",
"def create_user(\n *,\n user_in: schemas.UserCreate,\n) -> schemas.User:\n next_user_id = users[-1].id + 1 # type: ignore\n user = schemas.User(\n id=next_user_id,\n email=user_in.email,\n is_active=user_in.is_active,\n is_superuser=user_in.is_superuser,\n full_name=user_in.full_name,\n )\n users.append(user)\n return user",
"def createNewUser(self, userList, UserObj):\n if(self.adminAccess):\n userList.append(UserObj)\n \n return userList",
"def create_manager(self, name, pos, dept):\n self.manager[dept.upper()].append(\n {\n 'name': name,\n 'pos': pos,\n 'dept': dept,\n 'senior': [],\n 'junior': [],\n 'trainee': []\n }\n )",
"def create_user_using_manager(username,password):\n manager = UserManager()\n return manager.create_user(username=username, password=password)",
"def create(self, request, *args, **kwargs):\n user = request.user\n if user.is_authenticated and not user.has_perm(\"users.add_user\"):\n self.permission_denied(request, message=_(\"You cannot create users.\"))\n return super().create(request, *args, **kwargs)",
"def create(self, validated_data):\n admin_id = validated_data.pop('admin_id', None)\n users = validated_data.pop('users', None)\n group = Group.objects.create(\n admin=admin_id, **validated_data\n )\n if users is not None:\n group.users.set(users)\n group.save()\n return group",
"def createUser(self):\n if self.user:\n return self.user\n from soc.models.user import User\n from soc.modules.seeder.logic.providers.user import CurrentUserProvider\n properties = {'account': CurrentUserProvider(),\n 'status': 'valid', 'is_developer': self.dev_test}\n self.user = seeder_logic.seed(User, properties=properties)\n return self.user",
"def create(self, validated_data:tuple):\n user = user_details.objects.create(user_name=validated_data[0], email=validated_data[1], password=validated_data[2])\n return user",
"def create_user_by_id(cls, m_id):",
"def create(self, validated_data):\n user = super(UserSerializer, self).create(validated_data)\n for i in user.groups.all():\n if i.accountants.exists():\n company = get_object_or_404(\n models.Company, pk=i.accountants.all().first().id)\n assign_perm(\"change_user\", company.admins, user)\n assign_perm(\"view_user\", company.admins, user)\n assign_perm(\"delete_user\", company.admins, user)\n assign_perm(\"change_user\", user, user)\n assign_perm(\"view_user\", user, user)\n assign_perm(\"delete_user\", user, user)\n\n user.user_permissions.add(\n Permission.objects.get(name='Can add sale'))\n user.user_permissions.add(\n Permission.objects.get(name='Can delete sale'))\n user.user_permissions.add(\n Permission.objects.get(name='Can add purchase'))\n user.user_permissions.add(\n Permission.objects.get(name='Can change sale'))\n user.user_permissions.add(\n Permission.objects.get(name='Can change purchase'))\n user.user_permissions.add(\n Permission.objects.get(name='Can delete purchase'))\n user.user_permissions.add(\n Permission.objects.get(name='Can add media'))\n user.user_permissions.add(\n Permission.objects.get(name='Can delete media'))\n user.set_password(validated_data['password'])\n user.save()\n return user",
"def _create(cls, model_class, *args, **kwargs):\n manager = cls._get_manager(model_class)\n # The default would use ``manager.create(*args, **kwargs)``\n return manager.create_user(*args, **kwargs)"
] | [
"0.65041286",
"0.64282334",
"0.6421729",
"0.6411341",
"0.6370367",
"0.6361717",
"0.6351803",
"0.628153",
"0.62290317",
"0.6224785",
"0.62097484",
"0.6164947",
"0.60557103",
"0.6002894",
"0.59604114",
"0.5946951",
"0.5930125",
"0.5885216",
"0.58720917",
"0.58694744",
"0.5866902",
"0.58313817",
"0.5818309",
"0.5815366",
"0.580738",
"0.5806594",
"0.57897246",
"0.5789302",
"0.57876337",
"0.57815385"
] | 0.6900736 | 0 |
Given a valid login ID, promote the user to a manager by removing their credentials from the customer tables and adding it to the manager tables. | def promote_to_manager(self, loginID):
self.cursor.execute("""SELECT * FROM customercredentials WHERE loginID=%s""", (loginID,))
creds = self.cursor.fetchone()
self.cursor.execute("""SELECT * FROM customerpersonal WHERE phone=%s""", (int(creds[5]),))
personal = self.cursor.fetchone()
info = {'phone': creds[5], 'address': personal[1], 'loginID': creds[0], 'firstName': creds[1],
'lastName': creds[2],
'salt': creds[3], 'key': creds[4]}
self.add_manager(info) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove_manager(self, loginID):\n try:\n self.cursor.execute(\"\"\"SELECT COUNT(*) FROM managercredentials WHERE loginID=%s\"\"\", (loginID,))\n if not self.cursor.fetchone()[0]:\n return False\n self.cursor.execute(\"\"\"DELETE FROM managercredentials WHERE loginID=%s\"\"\", (loginID,))\n self.db.commit()\n self.cursor.execute(\"\"\"DELETE FROM managerpersonal WHERE phone NOT IN \n (SELECT phone FROM managercredentials)\"\"\")\n self.db.commit()\n return True\n except Exception as e:\n return False",
"def add_manager(self, info):\n self.cursor.execute(\"\"\"SELECT COUNT(*) FROM managerpersonal WHERE phone=%s\"\"\", (int(info['phone']),))\n if not self.cursor.fetchone()[0]:\n self.cursor.execute(\"\"\"INSERT INTO managerpersonal VALUES (%s,%s)\"\"\", (int(info['phone']), info['address']))\n self.cursor.execute(\"\"\"INSERT INTO managercredentials (loginID, firstName, lastName, salt, pass_key, phone)\n VALUES (%s,%s,%s,%s,%s,%s)\"\"\", (info['loginID'], info['firstName'], info['lastName'], info['salt'],\n info['key'], int(info['phone'])))\n\n self.db.commit()\n self.cursor.execute(\"\"\"SELECT COUNT(*) FROM customercredentials WHERE loginID=%s\"\"\", (info['loginID'],))\n result = self.cursor.fetchone()\n if result[0]:\n self.cursor.execute(\"\"\"DELETE FROM customerCredentials WHERE loginID=%s\"\"\", (info['loginID'],))\n self.db.commit()\n self.cursor.execute(\"\"\"SELECT COUNT(*) FROM customerCredentials WHERE phone=%s\"\"\", (int(info['phone']),))\n phone_count = self.cursor.fetchone()\n if not phone_count[0]:\n self.cursor.execute(\"\"\"DELETE FROM customerPersonal WHERE phone=%s\"\"\", (int(info['phone']),))\n self.db.commit()\n self.update_book_scores()\n self.update_comment_usefulness()",
"def promote_admin(self, user: TelegramController.User = None, id: str = None):\n\n if id == None:\n id = user.id\n header = connect(self.__path)\n curs = header.cursor()\n encrypted_id = md5((str(id) + \"admin\").encode()).hexdigest()\n curs.execute(\"INSERT INTO admins(id) VALUES(?)\", (encrypted_id,))\n header.commit()\n self.__update_admin_cache()",
"def promote(self):\n if self.is_admin == True:\n pass\n self.is_admin = True\n User.save(self)",
"def remove_customer(self, loginID):\n try:\n self.cursor.execute(\"\"\"SELECT COUNT(*) FROM customercredentials WHERE loginID=%s\"\"\", (loginID,))\n if not self.cursor.fetchone()[0]:\n return False\n self.cursor.execute(\"\"\"DELETE FROM customercredentials WHERE loginID=%s\"\"\", (loginID,))\n self.db.commit()\n self.cursor.execute(\"\"\"DELETE FROM customerpersonal WHERE phone NOT IN \n (SELECT phone FROM customercredentials)\"\"\")\n self.db.commit()\n self.update_book_scores()\n self.update_comment_usefulness()\n return True\n except Exception as e:\n return False",
"def promote_user(username):\n user = User.get_user_by_username(username)\n user.is_admin = True\n user.save()",
"def promote(self, update, context):\n\n message = update.message.text.lower().split(\" \")\n user = self.User(update)\n if not self.data_base.check_admin():\n output = \"This command is only accessible to admins\"\n\n elif len(message) == 2:\n if not self.data_base.check_admin(id=message[1]):\n self.data_base.promote_admin(id=message[1])\n output = f\"Promoted user with ID: {message[1]} to Admin role.\"\n else:\n output = \"The user you're trying to Promote is already an admin\"\n else:\n output = \"you might have made a syntax mistake, the correct form of promoting someone is /promote {his id number}\"\n\n self.data_base.log(user, update.message.text, output)\n user.send_message(output)",
"def assign_store_manager(user_name: str, new_store_manager_name: str, store_name: str):\n\n user_name = auth.get_username_from_hash(user_name)\n permission_handler.is_permmited_to(user_name, Action.ADD_MANAGER.value, store_name)\n permission_handler.assign_store_employee(action.MANAGER_INITIAL_PERMISSIONS,\n new_store_manager_name,\n store_name)\n user_handler.assign_store_employee(user_name, new_store_manager_name, store_name)",
"def change(login):\n try:\n manager = Actions()\n manager.change_user(login)\n except Exception as e:\n print(e)",
"def update_user_login(sender, user, **kwargs):\n user.userlogin_set.create(timestamp=timezone.now())\n user.save()\n\n bonus_wallet = BonusWallet.objects.filter(user=user)\n if not bonus_wallet.exists():\n bonus_wallet = BonusWallet.objects.create(user=user)\n bonus_wallet.save()\n else:\n bonus_wallet = bonus_wallet[0]\n\n login_bonus = LoginBonus.objects.create(wallet=bonus_wallet)\n bonus_wallet.value += Decimal(login_bonus.value)\n bonus_wallet.save()",
"def test_swap_customer_on_login(self):\n request = self.factory.post('/shop/auth/login/', follow=True)\n request.user = self.bart\n old_customer = Customer()\n old_customer.save()\n request.session = {\n 'session_key': 'bart_swap',\n SESSION_KEY: old_customer.pk,\n }\n request.customer = self.bart.customer\n user_logged_in.send(sender=self.bart.__class__, request=request, user=self.bart)\n try:\n Customer.objects.get_customer(request, force_unauth=True)\n except Customer.DoesNotExist:\n pass\n else:\n self.fail(\"\"\"Unauthenticated customer should be deleted on login\n by a User with existing Customer\"\"\")\n self.assertEqual(request.customer, self.bart.customer)",
"def __promote_admin_pressed(self):\n\n id = self.__admin_controls_entry.get()\n if not id.isdigit():\n print(\"failed to promote the user, the given id isn't a number\")\n return\n self.__admin_controls_entry.delete(0, 'end')\n self.__data_base.promote_admin(id=id)",
"def update_customer(customer_id, login, name, email, phone=\"\", permission=0):\n with MY_CONNECTION as connection:\n connection.execute(\n \"\"\"\n UPDATE Customers\n SET login=?, customer_name=?, phone=?, email=?, perm=?\n WHERE id_customer=?\n \"\"\",\n (login, name, phone, email, permission, customer_id))",
"def delete_sql_login(user, server, userdata):\n global servers_to_remove\n betterprint(\"Removing LOGIN {} from server {}\".format(user, server))\n sql = \"DROP LOGIN [{}]\".format(user)\n try:\n betterprint(\"SQL: \" + sql)\n rows, userdata = execute_sql(sql, server, None, False, userdata)\n betterprint(\"LOGIN removal successful.\")\n\n if rows:\n servers_to_remove.append(server)\n return True, userdata\n except Exception as e:\n print (e)\n return False, userdata",
"def clear_previous_ministry_login(request, user, *args, **kwargs):\n user.logged_in_as = None\n user.save()",
"def demote_admin(self, user=None, id=None):\n\n if id == None:\n id = user.id\n if self.check_admin(id=id):\n header = connect(self.__path)\n curs = header.cursor()\n encrypted_id = md5((str(id) + \"admin\").encode()).hexdigest()\n curs.execute(\"DELETE FROM admins WHERE id = (?)\", (encrypted_id,))\n header.commit()\n self.__update_admin_cache()",
"def notifyLoginsChanged(self, oldLogins, principal):\n # A user with the new login already exists\n for login in principal.logins:\n if (login not in oldLogins) and (login in self.__id_by_login):\n raise ValueError('Principal Login already taken!, '+ login)\n\n for login in oldLogins:\n del self.__id_by_login[login]\n\n for login in principal.logins:\n self.__id_by_login[login] = principal.__name__",
"def test_associate_customer_on_login(self):\n request = self.factory.post('/shop/auth/login/', follow=True)\n request.user = self.lisa\n customer = Customer()\n customer.save()\n request.session = {\n 'session_key': 'lisa_swap',\n SESSION_KEY: customer.pk,\n }\n request.customer = Customer.objects.get_customer(request)\n user_logged_in.send(sender=self.lisa.__class__, request=request, user=self.lisa)\n self.assertEqual(request.customer, customer)\n self.assertEqual(request.customer.user, self.lisa)",
"def change_user(self, login):\n self.task_storage.change_user_config(login)",
"def unmake_admin(self):\n user_datastore = SQLAlchemyUserDatastore(db, User, Role)\n user_datastore.remove_role_from_user(self, 'admin')\n db.session.commit()",
"def maps_login(self, role):\n from Testing.ZopeTestCase.PortalTestCase import user_name, user_password\n if role == 'manager':\n self.loginAsPortalOwner()\n elif role == 'member':\n self.login(user_name)\n elif role == 'another_member':\n self.login('another_member')\n elif role == 'anonym':\n self.logout()",
"def delete_customer(customer_id):\n try:\n remove_user = cm.Customers.get(cm.Customers.customer_id == customer_id)\n remove_user.delete_instance()\n except cm.DoesNotExist:\n logging.info(\"Customer successfully deleted from database.\")",
"def test_replace_user_identity_mapping(self):\n pass",
"def user_management_handler(redir_page: str, new_users_field_name: str, admins: bool):\n # Get a DB connection\n db = get_db()\n # The arguments should have a key \"add\" if the user clicked the \"+\" button\n if request.form.get(\"add\") is not None:\n # In a well-formatted request, this is a comma-separated list\n new_users = request.form.get(new_users_field_name)\n new_users_l = new_users.split(\",\")\n add_user(new_users_l, admins)\n return redirect(url_for(redir_page))\n elif request.form.get(\"update-rows\") is not None:\n # If necessary, update the row counts for the plain-text log viewer\n new_rows = int(request.form.get(\"rows\"))\n if new_rows is not None and new_rows in allowed_row_counts:\n session[\"log_rows\"] = new_rows\n elif request.form.get(\"delete\") is not None:\n # Copy the request, since we need to make changes\n args_copy = dict(request.form)\n # The arguments should have a key \"delete\" if the user clicked the trash bin.\n # Delete this key, since we don't need it\n del args_copy[\"delete\"]\n # If the request also has a new_admins key, delete that too\n if new_users_field_name in args_copy.keys():\n del args_copy[new_users_field_name]\n for user in args_copy.keys():\n if validate_username(user) and db.does_user_exist(user):\n # If we're deleting admins, avoid locking everybody out\n if admins:\n if avoid_lockouts() and is_admin(user):\n db.disable_user(user)\n else:\n return redirect(url_for(\"error\"))\n else:\n # Otherwise, just delete the user\n if not is_admin(user):\n db.disable_user(user)\n else:\n session[\"last_error\"] = (\n \"%s is not an administrator, so you cannot delete them in\"\n \" administrator editing mode.\"\n % (user,)\n )\n else:\n # If it wasn't a valid username or the user wasn't in the database,\n # If the user wasn't in the database, no error will be set, so set it\n if \"last_error\" not in session.keys():\n session[\"last_error\"] = \"That user doesn't exist.\"\n # Redirect to the error page\n return redirect(url_for(\"error\"))\n return redirect(url_for(redir_page))\n return redirect(url_for(redir_page))",
"def test_delete_user_identity_mapping(self):\n pass",
"def promote_user(self, username):\n parser_promote.add_argument('isadmin', choices=[\"True\", \"False\"],\n required=True, nullable=False,\n help=\"(Accepted values: True, False)\"\n )\n args = parser_promote.parse_args()\n isAdmin = request.json.get('isadmin')\n\n query = \"\"\"UPDATE users SET isadmin=%s WHERE username=%s\"\"\"\n values = isAdmin, username\n\n conn = self.db\n cursor = conn.cursor()\n cursor.execute(query, values)\n conn.commit()\n return True",
"def _reset_admin(self):\r\n DBSession.execute(\r\n \"UPDATE users SET activated='1' WHERE username='admin';\")\r\n Activation.query.delete()\r\n transaction.commit()",
"def reinitUser(self, id : int):\n id = self.validateID(id)\n # ensure the ID exists in the database\n if not self.userIDExists(id):\n raise KeyError(\"user not found: \" + str(id))\n # Reset the user\n self.users[id].resetUser()",
"def __addNewAdminDB(self,admin_id,username,password,name,comment,creator_id):\n query = self.__addNewAdminQuery(admin_id,username,password,name,comment,creator_id)\n query += self.__addNewAdminIASQuery(username, creator_id)\n db_main.getHandle().transactionQuery(query)",
"def customer_login_put(user_details):\n db_conn = DB_Conn()\n db = db_conn.db_connect()\n query = \"\"\"\n UPDATE `user_login` \n SET `old_password`= `password`, \n `password` = SHA('%(password)s'), \n `change_password_date` = NOW() \n WHERE `user_id` = \\\"%(user_id)s\\\"\n \"\"\" % (user_details)\n\n cursor = db.cursor()\n result = {\"success\" : 0, \"message\" : \"Customer Login not updated\"}\n try:\n if cursor.execute(query):\n db.commit()\n result = {\"success\" : 0, \"message\" : \"Customer Login updated Successfully\"}\n except Exception as e:\n result = {\"success\" : 1, \"message\" : \"Customer Login can not be created. Error \\\"\\'%s\\'\\\" \" % (e) }\n finally:\n cursor.close()\n db.close()\n\n return result"
] | [
"0.59607804",
"0.5587557",
"0.5577293",
"0.5532288",
"0.54025316",
"0.5340027",
"0.5235987",
"0.52357197",
"0.5205971",
"0.5111072",
"0.5108285",
"0.50876516",
"0.5061663",
"0.5035165",
"0.5018789",
"0.4973354",
"0.4956313",
"0.49558786",
"0.4954053",
"0.49522933",
"0.4943693",
"0.4922816",
"0.49199557",
"0.4901657",
"0.48983207",
"0.48749736",
"0.48615855",
"0.48504514",
"0.48333606",
"0.48187184"
] | 0.7494607 | 0 |
Utility function to return a boolean value whether or not the login ID entered is the ID of the system super manager. | def is_super_manager(self, loginID):
self.cursor.execute("""SELECT managerID FROM managercredentials WHERE loginID=%s""", (loginID,))
user_key = self.cursor.fetchone()[0]
self.cursor.execute("""SELECT MIN(managerID) FROM managercredentials""")
if user_key == self.cursor.fetchone()[0]:
return True
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_superuser():\n if sys.version > \"2.7\":\n for uid in os.getresuid():\n if uid == 0:\n return True\n else:\n if os.getuid() == 0 or os.getegid() == 0:\n return True\n return False",
"def is_superuser(self):\n sesh = self.get_session()\n return sesh.curr_role == 'admin'",
"def is_superuser(connection, window_info, kwargs):\n return window_info and window_info.is_superuser",
"def can_get_root():\n # On Vista or higher, there's the whole UAC token-splitting thing.\n # Many thanks for Junfeng Zhang for the workflow:\n # http://blogs.msdn.com/junfeng/archive/2007/01/26/how-to-tell-if-the-current-user-is-in-administrators-group-programmatically.aspx\n proc = kernel32.GetCurrentProcess()\n # Get the token for the current process.\n try:\n token = ctypes.wintypes.HANDLE()\n OpenProcessToken(proc, TOKEN_QUERY, byref(token))\n try:\n # Get the administrators SID.\n sid = ctypes.create_string_buffer(SECURITY_MAX_SID_SIZE)\n sz = ctypes.wintypes.DWORD(SECURITY_MAX_SID_SIZE)\n target_sid = WinBuiltinAdministratorsSid\n CreateWellKnownSid(target_sid, None, byref(sid), byref(sz))\n # Check whether the token has that SID directly.\n has_admin = ctypes.wintypes.BOOL()\n CheckTokenMembership(None, byref(sid), byref(has_admin))\n if has_admin.value:\n return True\n # Get the linked token. Failure may mean no linked token.\n lToken = ctypes.wintypes.HANDLE()\n try:\n cls = TokenLinkedToken\n GetTokenInformation(token, cls, byref(lToken), sizeof(lToken), byref(sz))\n except WindowsError as e:\n if e.winerror == ERROR_NO_SUCH_LOGON_SESSION:\n return False\n elif e.winerror == ERROR_PRIVILEGE_NOT_HELD:\n return False\n else:\n raise\n # Check if the linked token has the admin SID\n try:\n CheckTokenMembership(lToken, byref(sid), byref(has_admin))\n return bool(has_admin.value)\n finally:\n kernel32.CloseHandle(lToken)\n finally:\n kernel32.CloseHandle(token)\n finally:\n kernel32.CloseHandle(proc)",
"def is_system_uid(uid):\n\t\treturn uid < LMC.configuration.users.uid_min \\\n\t\t\tor uid > LMC.configuration.users.uid_max",
"def is_admin():\n if platform_is(WINDOWS):\n return windll.shell32.IsUserAnAdmin()\n return os.getuid() == 0",
"def is_superuser(self):\n return self.is_admin",
"def is_user(id):\n return id.startswith('U')",
"def has_root():\n return bool(shell32.IsUserAnAdmin())",
"def isSuperUser(token):\n try:\n decoded = jwt.decode(token, SECRET_KEY, algorithms=['HS256'])\n if decoded['is_superuser'] == True:\n return True\n except:\n return False",
"def is_usermanager(self):\n return False",
"def user_in_session():\n return 'user_id' in login_session",
"def is_admin(self):\n if not self.current_user:\n return False\n else:\n return self.current_user in [\"1\"]",
"def check_user(self):\n try:\n if self.get_customer()[0][0] == self.dni:\n return True\n else:\n return False\n except:\n return False",
"def has_super_access():\n current_user = frappe.get_doc('User', frappe.session.user)\n roles = set([role.role for role in current_user.roles])\n return bool(roles & {'Administrator', 'Instructor', 'Education Manager', 'System Manager', 'Academic User'})",
"def is_mgr():\n if get_cluster_vendor() == \"sgi\":\n return sgi_cluster.is_sac()\n elif get_cluster_vendor() == \"ibm\": \n return ibm_cluster.is_xcat_mgr()\n\n return False",
"def isSuper(self):\n user = self.getSession()\n return self.pipe.auth.isSuper(user)",
"def is_usermanager(self):\n return self.can(Permission.CRUD_USERS)",
"def check_admin_session(self):\n for session in vms.get_vm_sessions(vm_name=self.vm_name):\n if (\n session.get_console_user()\n and\n session.get_user().get_user_name().startswith(\"admin\")\n ):\n return True\n return False",
"def is_user_root():\n return (True if os.getuid() == 0 else False)",
"async def is_launcher(ctx):\n member = ctx.message.author\n staff = await is_staff(ctx)\n lhRole = discord.utils.get(member.guild.roles, name=ROLE_LH)\n if staff or lhRole in member.roles: return True",
"def guess_is_sys_id(value):\n return re.match(r'^[A-Za-z0-9]{32}$', value) is not None",
"def is_registered(self):\n if self.user == getpass.getuser():\n return True\n else:\n return False",
"def isSystemId(_idtf):\n if _idtf.startswith(\"@@\"): return True\n if _idtf.startswith(\"tmp_\"): return True\n if len(_idtf) == 36 and _idtf[8] == '-' and _idtf[13] == '-' and _idtf[23] == '-': return True\n \n return False",
"def is_current_user_admin():\n return (os.environ.get('USER_IS_ADMIN', '0')) == \"1\"",
"def is_logged_in() -> bool:\n is_dev_login_disabled = SETTINGS.DEV_LOGIN_DISABLED and is_localhost()\n return bool(is_dev_login_disabled or is_logged_in_user())",
"def isAdmin():\n\tif 'username' in session and session['username'] == 'admin':\n\t\treturn True\n\telse:\n\t\treturn False",
"def is_administrator(self):\n return False",
"def is_correct_user(self, login, password):\n pass",
"def is_local_administrator(self):\n\t\treturn bool(call_sdk_function('PrlUsrCfg_IsLocalAdministrator', self.handle))"
] | [
"0.6768197",
"0.65031356",
"0.64134824",
"0.6318678",
"0.6299954",
"0.62984467",
"0.62790716",
"0.6232972",
"0.615097",
"0.61283505",
"0.61260724",
"0.61119485",
"0.61034226",
"0.60718",
"0.60710406",
"0.6051024",
"0.60492224",
"0.60438657",
"0.6037717",
"0.6006233",
"0.59719896",
"0.5971657",
"0.5948721",
"0.59395",
"0.5934622",
"0.59301007",
"0.5923615",
"0.5849858",
"0.5833612",
"0.58240473"
] | 0.811737 | 0 |
Given an ISBN, find the book in the database and return the price, a boolean indicating whether or not | def valid_book(self, info):
self.cursor.execute("SELECT ISBN, title, price, stock FROM book WHERE ISBN=%s", (info['ISBN'],))
for book in self.cursor.fetchall():
return True, float(book[2]), book[1], book[3]
return False, 0, 0, 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_book_by_isbn(isbn):\n return Book.get_book(isbn)",
"def isbn_lookup(isbn):\n base = \"https://www.googleapis.com/books/v1/volumes?q=isbn=\"\n# Unfortunately we can't use the superior \"with spam as eggs\" syntax here...\n search = urlopen(base + isbn + \"&prettyprint=false\")\n lines = search.read()\n search.close()\n for bool_pair in [(\"false\", \"False\"), (\"true\", \"True\")]:\n lines = lines.replace(*bool_pair)\n volume_info = literal_eval(lines)[\"items\"][0][\"volumeInfo\"]\n title = volume_info[\"title\"]\n authors = ', '.join(a for a in volume_info[\"authors\"])\n return \"Title:\\t\\t%s\\nAuthor(s):\\t%s\" % (title, authors)",
"def searchbook(isbn):\r\n print(\"Searching for isbn \" + isbn + \" in googlebooks...\")\r\n result = _search_by_isbn(isbn)\r\n \r\n if result[\"totalItems\"] == 0:\r\n return None\r\n \r\n b = _item2book(result[\"items\"][0])\r\n return b",
"def test_search_by_ISBN(self):\n s1 = System()\n b1 = Books(\"1984\", \"George Orwell\", \"Harvill Secker\", \"1949\", \"0123456789123\")\n self.assertEqual(s1.search_by_ISBN(\"0123456789123\"), 0)\n s1.add_resource(b1)\n self.assertEqual(s1.search_by_ISBN(\"0123456789123\"), 1)",
"def is_isbn(val):\n if is_isbn10(val) or is_isbn13(val):\n if val[0:3] in [\"978\", \"979\"] or not is_ean13(val):\n return True\n return False",
"def isISBN(code, isbn13=True):\n\n return isISBN13(code) if isbn13 else isISBN10(code)",
"def restock_book(self, isbn, quantity):\n self.cursor.execute(\"\"\"SELECT COUNT(*) FROM book WHERE ISBN=%s\"\"\", (isbn,))\n if self.cursor.fetchone()[0]:\n self.cursor.execute(\"\"\"UPDATE book set stock=stock+%s WHERE ISBN=%s\"\"\", (quantity, isbn))\n self.db.commit()\n return True\n return False",
"def getISBN(self):\n return self.bookISBN",
"def isISBN(code):\n if not (\n isinstance(code, str) and # code must be a string\n len(code) == 10 and # code must contain 10 characters\n code[:9].isdigit() # first nine characters must be digits\n ):\n return False\n\n # check the check digit\n return checkdigit(code) == code[-1]",
"def is_valid_isbn(isbn):\n clean = clean_isbn(isbn)\n return clean[-1] == isbn_check_digit(clean[:-1])",
"def isbn():\n message = 'Informe um ISBN válido'\n def _isbn(form, field):\n if not is_isbn10(field.data) and not is_isbn13(field.data):\n raise ValidationError(message)\n return _isbn",
"def isISBN10(code):\n\n # helper function for computing ISBN-10 check digit\n def check_digit(code):\n\n # compute check digit\n check = sum((i + 1) * int(code[i]) for i in range(9)) % 11\n\n # convert check digit into its string representation\n return 'X' if check == 10 else str(check)\n\n # check whether given code is a string\n if not isinstance(code, str):\n return False\n\n # check whether given code contains 10 characters\n if len(code) != 10:\n return False\n\n # check whether first nine characters of given code are digits\n if not code[:9].isdigit():\n return False\n\n # check the check digit\n return check_digit(code) == code[-1]",
"def verify(isbn):\n\n isbn = isbn.replace(\"-\", \"\")\n if not verify_format(isbn):\n return False\n\n isbn_sum = 0\n for digit, i in zip(isbn, range(10, 0, -1)):\n if digit == \"X\":\n isbn_sum += 10 * i\n else:\n isbn_sum += int(digit) * i\n\n return isbn_sum % 11 == 0",
"def book(isbn):\n isbn = Markup.escape(isbn)\n # check if book exist in database\n book_db = db.execute(\n \"SELECT * FROM books WHERE isbn LIKE :isbn\", {\"isbn\": isbn}\n ).fetchone()\n if book_db == None:\n return render_template(\n \"error.html\", error=\"ISBN invalid or not in our Database.\"\n )\n\n # Get detail from Goodreads\n res = requests.get(\n \"https://www.goodreads.com/book/review_counts.json\",\n params={\"key\": os.getenv(\"GOODREADS_API\"), \"isbns\": isbn},\n )\n\n if res.status_code != 200:\n return render_template(\"error.html\", error=\"Not found on our API.\")\n data = res.json()\n book = data[\"books\"][0]\n\n # Get the reviews for the book.\n book_reviews = db.execute(\n \"SELECT review.*, users.nickname FROM review JOIN users ON review.user_id = users.id WHERE book_id = :book_id\",\n {\"book_id\": book_db.id},\n ).fetchall()\n\n # Get my own review\n user = session.get(\"user\")\n my_review = db.execute(\n \"SELECT * FROM review WHERE (book_id = :book_id) AND user_id = (SELECT id from users WHERE username LIKE :user)\",\n {\"book_id\": book_db.id, \"user\": user},\n ).fetchone()\n\n if my_review is not None:\n # Print results\n return render_template(\n \"book.html\",\n book=book,\n book_db=book_db,\n book_reviews=book_reviews,\n my_review=my_review,\n )\n else:\n return render_template(\n \"book.html\",\n book=book,\n book_db=book_db,\n book_reviews=book_reviews,\n my_review=None,\n )",
"def is_valid(isbn: str) -> bool:\n no_dashes = isbn.replace('-', '')\n\n nums = []\n\n valid_num = False\n\n if no_dashes:\n for char in no_dashes:\n if char == 'X':\n nums.append(10)\n elif char != 'X' and char.isalpha() or len(no_dashes) < 10 or len(no_dashes) > 10:\n break\n elif 'X' in no_dashes and no_dashes[-1] != 'X':\n break\n else:\n nums.append(int(char))\n\n char = 0\n value = 0\n\n if nums and len(nums) == 10:\n for n in range(10, 0, -1):\n value += (n * nums[char])\n char += 1\n valid_num = (value % 11 == 0)\n\n return valid_num",
"def testgetISBN(self):\r\n ebook1 = ElectronicResources()\r\n #ebook1.setListDevices([device1, device2])\r\n ebook1.setISBN(9780316485616)\r\n #ebook1.setEBookTitle('The Night Fire')\r\n #ebook1.setEBookAuthor('Harry Bosch')\r\n self.assertEqual(ebook1.getISBN(),9780316485616)",
"def query(isbn):\r\n wq = WEBQuery(SERVICE_URL.format(isbn=isbn))\r\n r = wq.parse_data() if wq.check_data() else None\r\n if r:\r\n return _records(isbn, r)\r\n return r",
"def create_book(self, title, isbn):\n isbn_list = [book.get_isbn() for book in self.books.keys()]\n if isbn in isbn_list:\n print(\"ISBN {isbn} already exists. Please provide a unique ISBN.\".format(isbn=isbn))\n else:\n return Book(title, isbn)",
"def get_isbn(self):\n return self.isbn",
"def filter_publication(publication, cmp_authors=True):\n query = None\n isbn_query = False\n\n # there can be ISBN query or book title query\n if publication.optionals and publication.optionals.ISBN:\n query = aleph.ISBNQuery(publication.optionals.ISBN)\n isbn_query = True\n else:\n query = aleph.TitleQuery(publication.title)\n\n result = aleph.reactToAMQPMessage(aleph.SearchRequest(query), \"\")\n\n if not result.records:\n return publication # book is not in database\n\n # if there was results with this ISBN, compare titles of the books\n # (sometimes, there are different books with same ISBN because of human\n # errors)\n if isbn_query:\n for record in result.records:\n epub = record.epublication\n\n # try to match title of the book\n if compare_names(epub.nazev, publication.title) >= 80:\n return None # book already in database\n\n return publication\n\n # checks whether the details from returned EPublication match Publication's\n for record in result.records:\n epub = record.epublication\n\n # if the title doens't match, go to next record from aleph\n if not compare_names(epub.nazev, publication.title) >= 80:\n continue\n\n if not cmp_authors:\n return None # book already in database\n\n # compare authors names\n for author in epub.autori:\n # convert Aleph's author structure to string\n author_str = \"%s %s %s\" % (\n author.firstName,\n author.lastName,\n author.title\n )\n\n # normalize author data from `publication`\n pub_authors = map(lambda x: x.name, publication.authors)\n if type(pub_authors) not in [list, tuple, set]:\n pub_authors = [pub_authors]\n\n # try to compare authors from `publication` and Aleph\n for pub_author in pub_authors:\n if compare_names(author_str, pub_author) >= 50:\n return None # book already in database\n\n return publication # book is not in database",
"def check_book(book_info, user_id):\n book = session.query(Book).filter(or_(Book.id == book_info,\n Book.book_name == book_info)).filter(Book.user_id == user_id).first()\n if book:\n return book",
"def book_by_isbn(ISBN):\n data = {}\n for book in root.findall('Book'):\n for elem in book:\n isbn = book.find('ISBN').text\n if isbn == ISBN:\n data['id'] = book.attrib['id']\n data[elem.tag] = elem.text\n return data",
"def isbn_check_digit(isbn):\n return (11 - (sum(x * y for (x, y) in enumerate(reversed(isbn), start=2))\n % 11)) % 11",
"def isISBN13(code):\n\n # helper function for computing ISBN-10 check digit\n def check_digit(code):\n\n # compute check digit\n check = sum((3 if i % 2 else 1) * int(code[i]) for i in range(12))\n\n # convert check digit into a single digit\n return str((10 - check) % 10)\n\n # check whether given code is a string\n if not isinstance(code, str):\n return False\n\n # check whether given code contains 10 characters\n if len(code) != 13:\n return False\n\n # check whether first nine characters of given code are digits\n if not code[:12].isdigit():\n return False\n\n # check the check digit\n return check_digit(code) == code[-1]",
"def api_book(isbn):\n isbn = Markup.escape(isbn)\n # check if book exist in database\n book_db = db.execute(\n \"SELECT * FROM books WHERE isbn LIKE :isbn\", {\"isbn\": isbn}\n ).fetchone()\n if book_db == None:\n return jsonify({\"error\": \"Invalid isbn or not in our database\"}), 404\n\n # Get detail from Goodreads\n res = requests.get(\n \"https://www.goodreads.com/book/review_counts.json\",\n params={\"key\": os.getenv(\"GOODREADS_API\"), \"isbns\": isbn},\n )\n\n if res.status_code != 200:\n raise Exception(\"ERROR: API request unsuccessful.\")\n data = res.json()\n book = data[\"books\"][0]\n\n # Print results\n return jsonify(\n {\n \"title\": book_db.title,\n \"author\": book_db.author,\n \"year\": book_db.year,\n \"isbn\": book_db.isbn,\n \"review_count\": book[\"work_ratings_count\"],\n \"average_score\": book[\"average_rating\"],\n }\n )",
"def get_book_data(isbn: int):\n try:\n book = next(iter(core.Book.search(('isbn', 'eq', isbn))))\n except StopIteration:\n pass # actually, I could put the whole rest of the function here\n else:\n data = core.Book.view_str(book.id)\n del data['id'], data['status'], data['return_date'], data['borrowed_by']\n del data['borrowed_by_id'], data['__str__']\n return data\n\n try:\n r = requests.get('https://portal.dnb.de/opac.htm?query=isbn%3D'\n + str(isbn) + '&method=simpleSearch&cqlMode=true')\n r.raise_for_status()\n except requests.exceptions.RequestException:\n raise core.BuchSchlossError('no_connection', 'no_connection')\n\n person_re = re.compile(r'(\\w*, \\w*) \\((\\w*)\\)')\n results = {'concerned_people': []}\n\n page = bs4.BeautifulSoup(r.text)\n table = page.select_one('#fullRecordTable')\n if table is None:\n # see if we got multiple results\n link_to_first = page.select_one('#recordLink_0')\n if link_to_first is None:\n raise core.BuchSchlossError(\n 'Book_not_found', 'Book_with_ISBN_{}_not_in_DNB', isbn)\n r = requests.get('https://portal.dnb.de'+link_to_first['href'])\n page = bs4.BeautifulSoup(r.text)\n table = page.select_one('#fullRecordTable')\n\n for tr in table.select('tr'):\n td = [x.get_text('\\n').strip() for x in tr.select('td')]\n if len(td) == 2:\n if td[0] == 'Titel':\n results['title'] = td[1].split('/')[0].strip()\n elif td[0] == 'Person(en)':\n for p in td[1].split('\\n'):\n g = person_re.search(p)\n if g is None:\n continue\n g = g.groups()\n if g[1] == 'Verfasser':\n results['author'] = g[0]\n else:\n results['concerned_people'].append(g[1]+': '+g[0])\n elif td[0] == 'Verlag':\n results['publisher'] = td[1].split(':')[1].strip()\n elif td[0] == 'Zeitliche Einordnung':\n results['year'] = td[1].split(':')[1].strip()\n elif td[0] == 'Sprache(n)':\n results['language'] = td[1].split(',')[0].split()[0].strip()\n\n results['concerned_people'] = '; '.join(results['concerned_people'])\n return results",
"def get_single_book_info(self, isbn):\n self.cursor.execute(\"SELECT * FROM book WHERE ISBN=%s\", (isbn,))\n books = self.cursor.fetchall()\n for book in books:\n authors = []\n self.cursor.execute(\"\"\"SELECT name FROM Author A, Wrote W, Book B WHERE A.ID = W.authorID AND\n W.ISBN = B.ISBN AND B.ISBN = %s\"\"\", (isbn,))\n for auth in self.cursor.fetchall():\n authors.append(auth[0])\n return book, authors",
"def is_in_stock(self, bookID):\n query = f\"\"\"SELECT quantity from {TABLE} where bookID = '{bookID}';\"\"\"\n self.cursor.execute(query)\n\n q = self.cursor.fetchone()\n\n if q[0] > 0:\n return True\n else:\n return False",
"def isbn_10_check_structure(isbn10):\r\n return True if re.match(RE_ISBN10, isbn10) else False",
"def is_isbn_10(isbn10):\r\n isbn10 = re.sub(r'[^0-9X]', '', isbn10.replace('x', 'X'))\r\n if len(isbn10) != 10: return False\r\n return False if isbn_10_check_digit(isbn10[:-1]) != isbn10[-1] else True"
] | [
"0.71685046",
"0.7099933",
"0.70221645",
"0.68731934",
"0.6789182",
"0.6749278",
"0.6529551",
"0.65249896",
"0.6510712",
"0.6385392",
"0.63603634",
"0.6330002",
"0.6277765",
"0.6264197",
"0.6224342",
"0.62236404",
"0.6164584",
"0.6158051",
"0.612079",
"0.60699725",
"0.60494524",
"0.60425496",
"0.60394377",
"0.6007894",
"0.59423804",
"0.5895645",
"0.5888706",
"0.58878607",
"0.58716947",
"0.58420336"
] | 0.72273356 | 0 |
Given a query entered by the user, return all books that match the search. Results must satisfy the provided filters. I will be making the result a dict so that duplicates are avoided. Also, because I may need to sort all of the books by a certain value, each filter check will add a subsection of the query and only one query will be executed at the end so that all of the results can be ordered together. | def find_books(self, query, filters, dates, order, descending, semantics, loginID):
if int(semantics):
# OR semantics
conjunction = ' UNION '
else:
# AND semantics
conjunction = ' INTERSECT '
results = {}
query_sections = ''
args = []
# we don't want all filters off, because that would throw a SQL error. So if user does not select
# any filters, we will assume they want all results.
if not filters:
filters['title_filt'] = 'on'
filters['author_filt'] = 'on'
filters['lang_filt'] = 'on'
filters['publisher_filt'] = 'on'
# go through each active filter and do a query based on that filter, then append results to the final
# return value
if 'title_filt' in filters and query[0]:
query_sections += "SELECT * FROM book WHERE title LIKE %s"
args.append('%' + query[0] + '%')
if 'author_filt' in filters and query[1]:
if query_sections:
query_sections += conjunction
query_sections += """SELECT B.ISBN, title, publisher, B.lang, publicationDate, pageCount,
stock, B.price, B.subject, avg_rating, total_rating_score, num_ratings FROM book B, author A, wrote W
WHERE W.ISBN = B.ISBN AND W.authorID = A.ID AND A.name LIKE %s"""
args.append('%' + query[1] + '%')
if 'lang_filt' in filters and query[2]:
if query_sections:
query_sections += conjunction
query_sections += "SELECT * FROM book WHERE lang LIKE %s"
args.append('%' + query[2] + '%')
if 'publisher_filt' in filters and query[3]:
if query_sections:
query_sections += conjunction
query_sections += "SELECT * FROM book WHERE publisher LIKE %s"
args.append('%' + query[3] + '%')
# if the query is empty, that means they did not fill out any of the forms for filters they wanted.
if not query_sections:
return results
# determine ordering method
if order == '0':
query_sections += " ORDER BY publicationDate"
# if descending is true, add descending specification
if int(descending):
query_sections += " DESC"
elif order == '1':
query_sections += "ORDER BY avg_rating"
# if descending is true, add descending specification
if int(descending):
query_sections += " DESC"
# execute final constructed query and store results in a dict
self.cursor.execute(query_sections, args)
books = self.cursor.fetchall()
for book in books:
if str(book[0]) not in results:
cur_authors = []
results[str(book[0])] = book
# now we need to find all the authors of this book so we can display them
self.cursor.execute("""SELECT name FROM author A, wrote W, book B WHERE A.ID = W.authorID AND
W.ISBN = B.ISBN AND B.ISBN = %s""", (book[0],))
for author in self.cursor.fetchall():
cur_authors.append(author[0])
results[str(book[0])] = [results[str(book[0])], cur_authors]
# filter results so only trusted comments are included in average rating without changing database
if order == '2':
actual_ratings = []
for book in books:
if not any(str(book[0]) in sub for sub in actual_ratings):
self.cursor.execute("""SELECT score FROM trusts T, comment C WHERE T.loginID = %s AND
T.otherLoginID = C.loginID AND T.trustStatus = 'TRUSTED' AND
C.ISBN = %s""", (loginID, str(book[0])))
current_sum = 0
current_num_users = 0
for score in self.cursor.fetchall():
current_num_users = current_num_users+1
current_sum = current_sum+score[0]
final_score = None
if current_num_users:
final_score = current_sum/current_num_users
else:
final_score = None
actual_ratings.append([str(book[0]), final_score])
if int(descending):
is_reverse = True
else:
is_reverse = False
actual_ratings = sorted(actual_ratings, key=lambda l:-1*float('inf') if l[1] is None else l[1],
reverse=is_reverse)
sorted_results = {}
for [book, score] in actual_ratings:
unfiltered_data = results[book]
t = unfiltered_data[0]
new_data = [(t[0],t[1],t[2],t[3],t[4],t[5],t[6],t[7],t[8],round(score,2) if score is not None else score,
t[9],t[10]), unfiltered_data[1]]
sorted_results[book] = new_data
results = sorted_results
return results | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def search_for_books(words, filters):\n db = get_db()\n books = {}\n\n if not filters:\n filters.append('title')\n\n for filter in filters:\n if filter == 'title':\n temp = Book(db).search(create_search_query(words))\n for book in temp:\n books[book['id']] = book\n elif filter == 'author':\n temp = Author(db).search_for_books(create_search_query(words, 'first_name', 'last_name'))\n for book in temp:\n if book['id'] not in books:\n books[book['id']] = book\n elif filter == 'genre':\n temp = Genre(db).search_for_books(create_search_query(words, 'name'))\n for book in temp:\n if book['id'] not in books:\n books[book['id']] = book\n\n return [book for book in books.values()]",
"def find_books(self):\n search_query = unicode(self.search_input.data)\n q = u'%{}%'.format(search_query)\n\n # used for dummy emulation of caseinsensetive search\n qC = u'%{}%'.format(capfirst(search_query))\n\n books = Book.query.filter(db.or_(\n Book.authors.any(db.or_(\n Author.name.like(q),\n Author.name.like(qC))),\n Book.title.like(q),\n Book.title.like(qC)),)\n\n return books",
"def filter_books():\n if request.method != \"POST\":\n return render_template(\"error.html\", message=\"First Login with your username.\")\n else:\n \n #according to the selected field, we did a search\n book_field = request.form.get(\"book_field\")\n book_field = book_field.lower()\n field_value = request.form.get(\"field_value\")\n field_value = '%' + field_value + '%'\n\n stmt = \"SELECT * FROM book WHERE \" + book_field +\" LIKE :field_value\"\n\n filter_books = db.execute(stmt, {\"book_field\":book_field , \"field_value\":field_value}).fetchall()\n \n\n # Get the all the books values\n books = db.execute(\"SELECT * FROM book\").fetchall()\n search_options = [\"ISBN\",\"Title\", \"Author\"]\n \n return render_template(\"search.html\", books=books, filter_books = filter_books,search_options=search_options)",
"def genSearch(request):\n \n assert isinstance(request, HttpRequest)\n booklist=[]\n form = request.GET.copy();\n searchvalue =form['query']\n for k,v in get_valid_Books().items():\n if searchvalue.lower() in v.title.lower() or searchvalue.lower() in v.desc.lower() or searchvalue.lower() in v.a_id.name.lower():\n booklist.append(v)\n if booklist is None:\n clearfilter=\"False\"\n else:\n clearfilter=\"True\"\n\n return render(\n request,\n 'app/about.html',\n {\n 'title':'Books',\n 'books':booklist,\n 'clearfilter':clearfilter,\n 'year':datetime.now().year,\n }\n )",
"def search():\n\n # TO DO: refine with wildcard to curb superfluous results\n \n # logged in users can search for books\n # via 'isbn', 'author', or 'title'\n query = request.form.get(\"search\")\n if not query:\n return render_template(\"home.html\", result=0, name=session[\"name\"],result_head=\"Results\")\n \n # query 'isbn'\n if query.isdigit():\n res = db.execute(\"SELECT * FROM books WHERE isbn LIKE :query\",\n {\"query\": f\"{query}%\"}).fetchall()\n else:\n # query 'author'\n res = db.execute(\"SELECT * FROM books WHERE author LIKE :query\",\n {\"query\": f\"{query}%\"}).fetchall()\n # If no result from author, query 'title'\n if len(res) == 0:\n res = db.execute(\"SELECT * FROM books WHERE title LIKE :query\",\n {\"query\": f\"{query}%\"}).fetchall()\n if len(res) == 0:\n res = 0\n return render_template(\"home.html\", result=res, name=session[\"name\"], result_head=\"Results\")",
"def book_search(library: list) -> None:\n options = ['Author', 'Title', 'Publisher', 'Shelf', 'Category', 'Subject']\n prompt = '\\nWhat option would you like to search by?'\n choice = get_user_choice(options, prompt)\n if choice == '1':\n search_by_chosen_option(library, options[0])\n elif choice == '2':\n search_by_chosen_option(library, options[1])\n elif choice == '3':\n search_by_chosen_option(library, options[2])\n elif choice == '4':\n search_by_shelf(library)\n elif choice == '5':\n search_by_chosen_option(library, options[4])\n elif choice == '6':\n search_by_chosen_option(library, options[5])",
"def search_for_books(self, query):\n books = []\n book = Book(self.db)\n for row in self.db.cursor().execute('SELECT genre_id FROM genres WHERE ' + query):\n books.extend(self.get_books(row[0]))\n\n return books",
"def search():\n import booksearch as bs\n\n opt = var.get()\n term = searchBox.get()\n term2 = dateBox.get()\n\n # Case statement (substitute) for different search areas\n # Each key is an option in the OptionMenu\n searchBy = {\n \"Title & Author\" : bs.search(term),\n \"ID\" : bs.bookID(term),\n \"Date\" : bs.dateRange(term, term2),\n }\n query = searchBy[opt] # Make & stores a query (2D list)\n\n # Repopulates table\n if term != \"\":\n populate(query)",
"def library_searched():\n\n searched_result = []\n \n updated_books = duplicated_code()\n\n if request.method == 'POST':\n if request.form['type_search'] == 'book':\n book_title = request.form['search']\n for book in updated_books:\n if book['title'] == book_title:\n searched_result.append(book)\n return render_template(\"library_searched.html\", result = searched_result)\n elif request.form['type_search'] == 'genre':\n book_genre = request.form['search']\n for book in updated_books:\n if book['genre'] == book_genre:\n searched_result.append(book)\n return render_template(\"library_searched.html\", result = searched_result)\n elif request.form['type_search'] == 'author':\n book_author = request.form['search']\n for book in updated_books:\n if book['author_name'] == book_author:\n searched_result.append(book)\n return render_template(\"library_searched.html\", result = searched_result)\n else:\n return render_template(\"library_searched.html\")",
"def get_queryset(self):\n queryset = Book.objects.all()\n title = self.request.query_params.get(\"title\", None)\n if title:\n queryset = queryset.filter(title__icontains=title)\n author = self.request.query_params.get(\"author\", None)\n if author:\n queryset = queryset.filter(author__icontains=author)\n publication_language = self.request.query_params.get(\n \"publication_language\", None\n )\n if publication_language:\n queryset = queryset.filter(\n publication_language__icontains=publication_language\n )\n publication_date_start = self.request.query_params.get(\n \"publication_date_start\", None\n )\n if publication_date_start:\n queryset = queryset.filter(publication_date__gte=publication_date_start)\n publication_date_end = self.request.query_params.get(\n \"publication_date_end\", None\n )\n if publication_date_end:\n queryset = queryset.filter(publication_date__lte=publication_date_end)\n return queryset",
"def search(self, title=\"\", author=\"\", year=\"\", isbn=\"\"):\n self.cursor.execute(\"SELECT * FROM Book WHERE Title = ? OR Author = ? \\\n OR Year = ? OR ISBN = ?\", (title, author, year, isbn))\n rows = self.cursor.fetchall()\n return rows",
"def search(self, filter: str = None) -> dict:\n r = requests.get(self.url, headers=self.headers)\n\n if filter:\n data = r.json()\n return filter_list(data=data, filter_by=filter)\n\n return r.json()",
"def search_all_bookings(q, county, dicipline, max_price, show_full_boked, show_canceled, show_done, ordering):\n sqs = SearchQuerySet().models(Booking)\n\n # TODO: Find out what this field should be used for O.o\n # if q is not None and q != '':\n # sqs = sqs.filter(name=AutoQuery(q))\n\n if max_price is not None and max_price not in ('', '0'):\n sqs = sqs.filter(price__lt=max_price)\n\n states = [Booking.STATE_PLANNING]\n\n if show_canceled:\n states.append(Booking.STATE_CANCELED)\n\n if show_done:\n states.append(Booking.STATE_DONE)\n\n sqs = sqs.filter(state__in=states)\n\n if show_full_boked:\n sqs = sqs.filter_or(full=to_es_bool(show_full_boked))\n else:\n sqs = sqs.filter(full=to_es_bool(show_full_boked))\n\n if county is not None and county not in ('', '0'):\n sqs = sqs.filter(county=county)\n\n if dicipline is not None and dicipline not in ('', '0'):\n sqs = sqs.filter(dicipline=dicipline)\n\n # Ordering for the sql query\n ordering_mapping = {\n '': '-id', # Default ordering by id\n 'A': '-id', # Datum\n 'B': '-free_spots', # Free spots\n 'C': '-id', # Booked spots\n 'D': '-price', # Most Expensive\n 'E': 'price', # Cheapest Price\n }\n sqs = sqs.order_by(ordering_mapping[ordering])\n\n print(sqs.query)\n\n # Extract all objects based on ES search\n # return order_by_id(Booking, sqs.values_list('pk', flat=True), ordering=ordering_mapping[ordering])\n return sqs.values_list('pk', flat=True)",
"def search():\n try:\n query = request.args.get(\"q\").lower()\n except AttributeError:\n query = request.args.get(\"q\")\n\n # Adding browse functionality\n browse = request.args.get(\"browse\")\n\n if browse is None:\n # Select all rows with a column value that includes query\n results = db.execute(\"SELECT * FROM books \"\n \"WHERE LOWER(isbn) LIKE CONCAT('%', :q, '%')\"\n \"OR LOWER(title) LIKE CONCAT('%', :q, '%') \"\n \"OR LOWER(author) LIKE CONCAT('%', :q, '%') \"\n \"ORDER BY title LIMIT 100\", {'q': query}).fetchall()\n else:\n # Select titles starting with letter\n results = db.execute(\n \"SELECT * FROM books \"\n \"WHERE LOWER(title) LIKE CONCAT(:q, '%') \"\n \"ORDER BY title\", {'q': query}).fetchall()\n\n return render_template(\"search.html\", browse=browse, query=query, results=results)",
"def search_books_body(collection_of_books: tuple) -> list:\r\n search_tag, search_keyword = search_info()\r\n try:\r\n found_books = search(collection_of_books, search_tag, search_keyword)\r\n except KeyError as err:\r\n print(f\"Invalid input: {err}\\n\"\r\n f\"{err} raised KeyError. Please follow the instruction carefully.\\n\")\r\n else:\r\n print_as_ordered_list(found_books)\r\n\r\n return found_books",
"def filter(self, objects_query, page):\n\n if page < 1:\n return {'books': {}}\n\n end = page * self.books_per_pege\n start = end - self.books_per_pege\n books_query = objects_query.order_by('title')\n books = books_query[start:end]\n\n context = {\n 'books': books,\n }\n return context",
"def search(self, filters=None):\n raise NotImplementedError",
"def within_book_search_json(request, book_id):\n query = request.GET.get('q')\n term = query # todo: meta options?\n book = Book.objects.get(pk=book_id)\n\n if not query or len(query) < 3:\n return\n\n # todo: method on objectmanager to search by keyword\n notes = book.notes.filter(\n Q(subject__icontains=term) |\n Q(quote__icontains=term) |\n Q(comment__icontains=term)\n )\n terms = book.terms.filter(\n Q(term__text__icontains=term) |\n Q(term__definition__icontains=term) |\n Q(quote__icontains=term) |\n Q(quote__icontains=term)\n )\n sections = book.sections.filter(\n Q(title__icontains=term) |\n Q(authors__name__icontains=term) |\n Q(subtitle__icontains=term) |\n Q(summary__icontains=term)\n )\n\n results = {'notes': [], 'terms': [], 'sections': []}\n for note in notes:\n results['notes'].append({\n 'title': highlighter.highlight(note.subject, query),\n 'description': highlighter.highlight(note.quote, query, 200),\n 'price': note.get_page_display(),\n 'url': note.get_absolute_url(),\n })\n\n for term in terms:\n results['terms'].append({\n 'title': highlighter.highlight(term.term.text, query),\n 'description': highlighter.highlight(term.quote, query, 200),\n 'price': term.get_page_display(),\n 'url': term.get_absolute_url(),\n })\n\n for section in sections:\n authors = ', '.join(a.name for a in section.authors.all())\n results['sections'].append({\n 'title': highlighter.highlight(section.title, query),\n 'description': highlighter.highlight(authors, query),\n 'price': section.get_page_display(),\n 'url': section.get_absolute_url(),\n })\n\n return JsonResponse({\n 'results': {\n 'books': {\n 'name': 'Notes',\n 'results': results['notes'],\n },\n 'authors': {\n 'name': 'Terms',\n 'results': results['terms'],\n },\n 'sections': {\n 'name': 'Sections',\n 'results': results['sections'],\n },\n }\n })",
"def get_books_data():\n entry = mongo.db.Books\n output = list()\n look_up_type = None\n if 'title' in request.args:\n look_up_type = 'title'\n if len(request.args['title']) <= 2:\n return render_template('error.html', message=\"Must enter characters\"), 400\n value = request.args['title'].strip('\"')\n title = entry.find({'title': {'$regex': value}})\n if title:\n for book in title:\n output.append({'title': book['title']})\n elif 'related_books' in request.args:\n look_up_type = 'similar_books'\n if len(request.args['related_books']) <= 2:\n return render_template('error.html', message=\"Must enter characters\"), 400\n value = request.args['related_books'].strip('\"')\n related_books = entry.find(\n {'similar_books': {'$regex': value}})\n if related_books:\n for related in related_books:\n for link in related['similar_books']:\n if value in link:\n output.append(({'similar_books': link}))\n elif 'author' in request.args:\n look_up_type = 'author'\n if len(request.args['author']) <= 2:\n return render_template('error.html', message=\"Must enter characters\"), 400\n value = request.args['author'].strip('\"')\n authors = entry.find({'author': {'$regex': value}})\n if authors:\n for name in authors:\n output.append({'author': name['author']})\n if len(output) == 0:\n return render_template('error.html', message=\"No Entries Found\"), 400\n return render_template('gottenBooks.html', output=output, look_up_type=look_up_type), 200",
"def create_search_query(words, *filters):\n if not filters:\n filters = filters + ('title',)\n\n query = ''\n\n for filter in filters:\n query += filter + ' LIKE \"%'\n\n for letter in words:\n if letter == '%':\n letter = '%\" AND ' + filter + ' LIKE \"%'\n elif letter == ' ':\n letter = '%\" OR ' + filter + ' LIKE \"%'\n\n query += letter\n\n if filter == filters[-1]:\n query += '%\"'\n else:\n query += '%\" OR '\n\n return query",
"def filter_by_query_params(self, request):\n items = self\n company = request.GET.get('company', None)\n main_contractor = request.GET.get('main_contractor', None)\n main_sub_contractor = request.GET.get('main_sub_contractor', None)\n client = request.GET.get('client', None)\n q = request.GET.get('q', None)\n sort_by = request.GET.get('sort_by', None)\n str = request.GET.get('str', None)\n\n # filter\n if main_contractor:\n items = items.filter(main_contractor=main_contractor).distinct()\n if main_sub_contractor:\n items = items.filter(main_sub_contractor=main_sub_contractor).distinct()\n if client:\n items = items.filter(client=client).distinct()\n if company:\n items = items.filter(companies_linked__in=[company]).distinct()\n # sort\n if q == 'asc' and sort_by:\n items = items.order_by(sort_by).distinct()\n\n if q == 'des' and sort_by:\n items = items.order_by('-' + sort_by).distinct()\n\n if str:\n # str = str.strip().lower()\n items = items.filter(Q(reference_no__icontains=str) |\n Q(name__icontains=str)).distinct()\n return items",
"def search(title = \"\", author = \"\", year = \"\", isbn = \"\"):\n conn_obj = mysql.connector.connect(host='localhost',database='mydb',user='root',password='kks')\n cur_obj = conn_obj.cursor()\n cur_obj.execute(\"SELECT * \"\n \"FROM book \"\n \"WHERE title = %s OR author = %s OR year = %s OR isbn = %s\", \n (title, author, year, isbn))\n rows = cur_obj.fetchall()\n conn_obj.close()\n return rows",
"def book_search_results(key, title):\n\n payload = {\"key\": key, \"q\": title}\n query = requests.get(\"https://www.goodreads.com/search.xml\", params=payload)\n\n doc = untangle.parse(query.content)\n\n results = doc.GoodreadsResponse.search.results\n\n books = []\n\n if len(results) > 0:\n for work in results.work:\n book = {}\n\n book['title'] = work.best_book.title.cdata\n book['book_id'] = int(work.best_book.id.cdata.encode('utf8'))\n book['author_id'] = int(work.best_book.author.id.cdata.encode('utf8'))\n book['author_fname'] = work.best_book.author.name.cdata\n book['image_url'] = work.best_book.image_url.cdata.encode('utf8')\n books.append(book)\n\n return books",
"def fetch_querydict(self):\n query = dict()\n query[\"filtered\"] = dict()\n if self.q_dict and isinstance(self.q_dict, dict):\n query_list, filter_list = self.build_query_structure()\n if query_list:\n query[\"filtered\"][\"query\"] = {\"bool\": {\"must\": query_list}}\n if filter_list:\n query[\"filtered\"][\"filter\"] = {\"bool\": {\"must\": filter_list}}\n return query",
"def search_results():\n search = False\n if session['patron']:\n search = False\n try:\n page = int(request.args.get('page', 1))\n except ValueError:\n page = 1\n\n search_criteria = request.args.get('search')\n patron_id = session['patron']\n session['search_criteria'] = search_criteria\n\n if search_criteria != '':\n print \"do a search\"\n list_of_books = booksearch.search(search_criteria, patron_id)\n pagination = Pagination(page=page, \n total=len(list_of_books), \n search=search, \n record_name='list_of_books')\n return render_template('book_list.html', search=search_criteria,\n list_of_books=list_of_books,\n pagination=pagination,\n )\n else:\n flash(\"Please enter an author or a title.\")\n return render_template('index.html')",
"def search_book():\n\n title = request.form.get(\"search\")\n books = book_search_results(GR_KEY, title)\n acct = get_current_account(session['acct'])\n search = True\n\n return render_template(\"index.html\", books=books, acct=acct, search=search)",
"def do_search(queries):\n global documents, list_document\n results = {}\n query = tokenize(queries)\n if query == []:\n sys.exit()\n # find document ids containing all query terms. Works by\n # intersecting the posting lists for all query terms.\n relevant_document_ids = intersection(\n [set(postings[term].keys()) for term in query])\n if not relevant_document_ids:\n documents.clear()\n list_document[:] = []\n flash('empty')\n else:\n scores = sorted([(id,similarity(query,id))\n for id in relevant_document_ids],\n key=lambda x: x[1],\n reverse=True)\n print \"Score: filename\"\n global total_document_found\n total_document_found = 0\n for (id,score) in scores:\n print str(score)+\": \"+document_filenames[id]\n results[document_filenames[id]] = score\n total_document_found += 1\n flash(\"Total document found : \" + str(total_document_found) + \" of \" + str(N))\n return results",
"def show_books():\n# need + or %20 for spaces in author (set encoding?)\n\n args = request.args\n column_names = get_column_names()\n\n sql_cmd = [\"SELECT title, author FROM books\"]\n if len(args) > 0:\n for j, arg in enumerate(args):\n if arg not in column_names: # return empty list\n sql_cmd = []\n break\n else:\n if not \" WHERE \" in sql_cmd:\n sql_cmd.append(\" WHERE \")\n sql_cmd.append(\"%s='%s'\" % (arg, args[arg]))\n if j+1 < len(args):\n sql_cmd.append(\" AND \")\n sql_cmd.append(\";\")\n sql_cmd = \"\".join(sql_cmd)\n# print('sql_cmd: ', sql_cmd)\n\n books = []\n if len(sql_cmd) > 1:\n cur = g.db.cursor()\n cur.execute(sql_cmd)\n if cur:\n books = [dict(title=row[0], author=row[1]) for row in cur.fetchall()]\n# return jsonify({'results': books})\n return json_dumps({'results': books}, indent=4)",
"def build_query_structure(self):\n query_list = list()\n filter_list = list()\n for key, val in self.q_dict.items():\n if key in self.es_query_keys:\n query_list.append(\n {\"match\": {\".\".join(key.split(\"_\")): val[0]}})\n elif key in self.es_date_keys:\n filter_list.append(\n {\"range\": {\".\".join(key.split(\"_\")): val}})\n elif \":\" in val[0]:\n #for handling queries like dd_dct=gte:1\n range_val = val[0].split(\":\")\n filter_list.append({\"range\": {\".\".join(key.split(\"_\")): {\n range_val[0]: int(range_val[1])}}})\n else:\n filter_list.append(\n {\"terms\": {\".\".join(key.split(\"_\")): val}})\n return query_list, filter_list",
"def search_for_redbooks(book_codes: tuple):\n\n book_dict = {}\n\n global setup\n\n for book_code in book_codes:\n URI_string = build_URI_string(book_code)\n search_web_page = requests.get(URI_string)\n if search_web_page.status_code != 200:\n print(\"Book with code {} not found! Continuing...\".format(book_code))\n continue\n web_page_content = search_web_page.content\n soup = BS(web_page_content, 'html.parser')\n book_name = soup.find('h1',{'class':'ibm-h1','id':'ibm-pagetitle-h1'}).text\n book_dict[book_code] = book_name\n\n return book_dict"
] | [
"0.7645307",
"0.6790778",
"0.6598476",
"0.641544",
"0.6351727",
"0.63514113",
"0.6336503",
"0.6308661",
"0.62955046",
"0.62509865",
"0.6181599",
"0.61077005",
"0.6104971",
"0.6099935",
"0.6044743",
"0.6006426",
"0.5989734",
"0.597283",
"0.58954346",
"0.5888524",
"0.58327734",
"0.582546",
"0.5807261",
"0.5796085",
"0.57755715",
"0.57663435",
"0.5761106",
"0.57340324",
"0.5726285",
"0.5698283"
] | 0.8094429 | 0 |
Given an author name and a degree of separation, return a list of books that are written by authors who share that degree separated from the specified author. | def find_books_by_author_separation(self, name, degree):
self.cursor.execute("""SELECT ID FROM author WHERE name=%s""", (name,))
original_author_id = int(self.cursor.fetchone()[0])
self.cursor.execute("""SELECT ISBN FROM wrote WHERE authorID=%s""", (original_author_id,))
first_degree_authors = []
for original_author_books in self.cursor.fetchall():
self.cursor.execute("""SELECT authorID FROM wrote WHERE ISBN=%s AND authorID <> %s""",
(original_author_books[0], original_author_id))
for author_id in self.cursor.fetchall():
first_degree_authors.append(author_id[0])
first_degree_results = {}
for author in first_degree_authors:
self.cursor.execute("""SELECT ISBN FROM wrote WHERE authorID=%s""",(author,))
for ISBN in self.cursor.fetchall():
book, author_list = self.get_single_book_info(ISBN[0])
first_degree_results[ISBN[0]] = [book, author_list]
if int(degree) == 1:
return first_degree_results
second_degree_results = {}
authors_to_check = []
all_authors = []
self.cursor.execute("""SELECT authorID FROM wrote""")
for author in self.cursor.fetchall():
all_authors.append(int(author[0]))
for author1 in first_degree_authors:
for author2 in all_authors:
if (self.is_one_degree_separated(author1, author2) and author1 != author2
and author2 != original_author_id):
authors_to_check.append(author2)
second_degree_authors = [a for a in authors_to_check if a not in first_degree_authors]
for author in second_degree_authors:
self.cursor.execute("""SELECT ISBN FROM wrote WHERE authorID=%s""", (author,))
for ISBN in self.cursor.fetchall():
book, author_list = self.get_single_book_info(ISBN[0])
second_degree_results[ISBN[0]] = [book, author_list]
return second_degree_results | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_relations_among_authors():\n for book in books:\n if len(books[book]) > 1:\n for i in range(len(books[book])):\n known_relations[books[book][i]] = books[book][:i] + books[book][i+1:]",
"def books_by_author(self, author):\n request_url = \"%s?author=%s\" % (self.API_URL, author)\n json_data = self.make_request(request_url)\n if not json_data:\n return []\n books = []\n for book in json_data['docs']:\n books.append(book['title_suggest'])\n return books",
"def filterListOfAuthors(name: str, birth: str, page: int, perPage: int) -> [Author]:\n authors = Author.query.filter(Author.name.ilike(\n f\"%{name}%\")).filter(Author.birth.ilike(f\"%{birth}%\")).paginate(page=page, per_page=perPage)\n if authors:\n return [author.serialize() for author in authors.items]\n abort(400)",
"def is_one_degree_separated(self,author1, author2):\n self.cursor.execute(\"\"\"SELECT COUNT(*) FROM wrote W WHERE W.authorID = %s AND EXISTS\n (SELECT * FROM wrote W2 WHERE W2.authorID = %s AND W.ISBN = W2.ISBN)\"\"\", (author1, author2))\n if int(self.cursor.fetchone()[0]):\n return True\n return False",
"def dfs_author(author, nodes, edges, papers, retval, max_level=1):\n for paper in author.paper_set.all():\n if paper.id in papers:\n continue\n papers.append(paper.id)\n for cite in paper.citations.all():\n for cite_author in cite.authors.all():\n add_author_node(cite_author, nodes, retval)\n edges.append( (author.id, cite_author.id) )\n if max_level > 0:\n dfs_author(cite_author, nodes, edges, papers, retval, max_level-1)\n for cite in Paper.objects.filter(citations__id__exact=paper.id):\n for cite_author in cite.authors.all():\n add_author_node(cite_author, nodes, retval)\n edges.append( (author.id, cite_author.id) )\n if max_level > 0:\n dfs_author(cite_author, nodes, edges, papers, retval, max_level-1)",
"def scrape_author(self, author_name, min_len=0, max_len=9999):\n search = sc.search_author(author_name)\n author = next(search)\n sc.fill(author, sections=['publications'])\n print(author.keys())\n with open(\n 'loadings\\\\authors_papers\\\\{}.txt'.format(author_name),\n 'w',\n encoding='utf-8'\n ) as file:\n for counter, pubblication in enumerate(author['publications']):\n\n if len(pubblication['bib']['title']) < min_len \\\n or len(pubblication['bib']['title']) > max_len:\n continue\n file.write(pubblication['bib']['title'])\n file.write('\\n')\n counter += 1\n if counter > self.hard_limit:\n break",
"def print_author_books(author, lang):\n def sort_by_page_count(book):\n return book['volumeInfo'].get('pageCount', 0)\n books = get_google_books_data(author, lang)\n books.sort(key=sort_by_page_count, reverse=True)\n\n line_fmt = '{:>4} | {:>5} | {:.65}\\n'\n lines = [\n '{sep}{h1}{sep}{h2}'.format(\n h1='{:^80}\\n'.format('\"%s\" ebooks (lang=%s)' % (author, lang)),\n h2=line_fmt.format('#', 'Pages', 'Title'),\n sep='{:=<80}\\n'.format('')\n )]\n for idx, book in enumerate(books, 1):\n accessInfo = book['accessInfo']\n if not accessInfo['epub']['isAvailable']:\n continue\n volumeInfo = book['volumeInfo']\n title = volumeInfo['title']\n subtitle = volumeInfo.get('subtitle')\n if subtitle is not None:\n title += ' / ' + subtitle\n count = volumeInfo.get('pageCount')\n pages = '{:,}'.format(count) if count is not None else ''\n lines.append(line_fmt.format(idx, pages, title))\n\n return ''.join(lines)",
"def get_authors_from_papers(papers):\n auth_set = set()\n for p in papers:\n auth_set.update(p['authors'])\n return list(auth_set)",
"def all_authors( data ) :\n return list(set( chain.from_iterable( [ authors(x) for x in data ] ) ))",
"def query_by_author(cls,author):\n bakey = 'BookByAuthor_%s'%author\n bks = from_cache(bakey)\n if not bks:\n bks = map(lambda e:str(e.id()), SuiBook.all(keys_only=True).filter('authors =',author).fetch(100))\n to_cache(bakey,bks)\n return bks",
"def splitAuthor(authors, sep='and', first=True):\n tmp = [k.strip() for k in authors.split(sep)]\n if first:\n return tmp[0].split(',')[0].strip()\n else:\n return tmp",
"def author_name(text):\n tag = text.split()\n\n \"\"\"\n We take the beginning of the text since the\n author name will likely be there\n \"\"\"\n\n tag = tag[:100]\n author = []\n\n current_tag = 0\n \"\"\"\n We go through each word until we find the first instance\n of the word 'by' or 'author', which should mean the author\n will be written right after that.\n We save the first word after 'by' or 'author' since it should\n be the authors first name\n \"\"\"\n\n for word in tag:\n if (word.lower() == ('by') or\n word.lower() == ('author') or\n word.lower() == ('author:')):\n\n author.append(tag[current_tag+1].decode(encoding='UTF8',\n errors='ignore'))\n current_tag += 1\n tag = tag[current_tag+1:]\n break\n current_tag += 1\n\n \"\"\"\n We go through each word after the first name of the author\n until we find a word that is not capitalized. We assume that\n it marks the end of the author name.\n We then return a list of the author's name split up.\n \"\"\"\n current_tag = 0\n for word in tag:\n if tag[current_tag].lower() == 'this':\n break\n if tag[current_tag].istitle():\n author.append(tag[current_tag].decode(encoding='UTF8',\n errors='ignore'))\n current_tag += 1\n\n return author",
"def get_publications_by_author(cached_list, cached_set, author_name):\n publications = { 'dblp': [], 'cdblp': [] }\n author = DBLPQuery.author_distinct(cached_list, cached_set, author_name)\n\n if author['dblp'].__contains__('publications'):\n publications['dblp'] = author['dblp']['publications']\n# for pub in author['dblp']['publications']:\n# print(pub)\n\n if author['cdblp'].__contains__('publications'):\n publications['cdblp'] = author['cdblp']['publications']\n# for pub in author['cdblp']['publications']:\n# print(pub)\n return publications",
"def get_authors(draft):\n authors = []\n for a in draft.authors.all():\n initial = ''\n prefix, first, middle, last, suffix = a.person.name_parts()\n if first:\n initial = first + '. '\n entry = '%s%s <%s>' % (initial,last,a.address)\n authors.append(entry)\n return authors",
"def test_refersto_author_multi_name(self):\n inv_search = 'author:ellis refersto:author:\"parke, s. j.\"'\n spi_search = 'find a ellis and refersto author \"parke, s. j.\"'\n self._compare_searches(inv_search, spi_search)",
"def get_ars(self, author):\n return self.divided[author][:-1]",
"def find_author(author):\n case = author_case(author)\n queries = author_queries(case, author)\n author_uri_set = set([])\n for query in queries:\n result = vivo_sparql_query(query.encode('utf-8'))\n count = len(result['results']['bindings'])\n if count == 1:\n author_uri_set = set([result['results']['bindings'][0]\\\n ['uri']['value']])\n break\n elif count > 1 and count < len(author_uri_set):\n author_uri_set = set([])\n for row in result['results']['bindings']:\n author_uri_set.add(row['uri']['value'])\n return author_uri_set",
"def filter_publication(publication, cmp_authors=True):\n query = None\n isbn_query = False\n\n # there can be ISBN query or book title query\n if publication.optionals and publication.optionals.ISBN:\n query = aleph.ISBNQuery(publication.optionals.ISBN)\n isbn_query = True\n else:\n query = aleph.TitleQuery(publication.title)\n\n result = aleph.reactToAMQPMessage(aleph.SearchRequest(query), \"\")\n\n if not result.records:\n return publication # book is not in database\n\n # if there was results with this ISBN, compare titles of the books\n # (sometimes, there are different books with same ISBN because of human\n # errors)\n if isbn_query:\n for record in result.records:\n epub = record.epublication\n\n # try to match title of the book\n if compare_names(epub.nazev, publication.title) >= 80:\n return None # book already in database\n\n return publication\n\n # checks whether the details from returned EPublication match Publication's\n for record in result.records:\n epub = record.epublication\n\n # if the title doens't match, go to next record from aleph\n if not compare_names(epub.nazev, publication.title) >= 80:\n continue\n\n if not cmp_authors:\n return None # book already in database\n\n # compare authors names\n for author in epub.autori:\n # convert Aleph's author structure to string\n author_str = \"%s %s %s\" % (\n author.firstName,\n author.lastName,\n author.title\n )\n\n # normalize author data from `publication`\n pub_authors = map(lambda x: x.name, publication.authors)\n if type(pub_authors) not in [list, tuple, set]:\n pub_authors = [pub_authors]\n\n # try to compare authors from `publication` and Aleph\n for pub_author in pub_authors:\n if compare_names(author_str, pub_author) >= 50:\n return None # book already in database\n\n return publication # book is not in database",
"def test_refersto_author_multi_name_no_quotes(self):\n inv_search = 'author:ellis refersto:(author:\"parke, sj*\" or exactauthor:\"parke, s *\" or exactauthor:\"parke, s\" or author:\"parke, sj, *\")'\n spi_search = \"find a ellis and refersto author parke, sj\"\n self._compare_searches(inv_search, spi_search)",
"def _get_authors_list():\n\n articles = os.listdir(\"../data/\")\n authors = []\n for article in articles:\n with open(\"../data/\" + article, 'r') as file:\n lines = file.readlines()\n author = tuple(\n line.replace(\"\\n\", \"\").split()[1] for line in lines\n if \"Автор:\" in line\n )[0]\n authors.append(author)\n\n return authors",
"def get_author_name_urls(dept_name, dept_url):\n\t# Change to \"School of Humanities\" to match the name used in Enlighten\n\t# Done because the string obtained from http://www.gla.ac.uk/schools/ contains the Gaelic name as well\n\tif \"Humanities\" in dept_name:\n\t\tdept_name = \"School of Humanities\"\n\n\t# get list of names of researchers in department\n\tnames = get_names(dept_url)\n\n\twinning_name_urls = set()\n\n\t# loop through each name\n\tfor name in names:\n\t\tname = initialise_first_name(name)\n\t\t# Get Enlighten page on which author name will be found (page for the letter of author's last name)\n\t\tfull_url = author_list_base + \"index.\"+ name.split(\" \")[0][0] + \".html\"\n\t\ttree = get_tree(full_url)\n\t\t# Get all candidate authors which match the name\n\t\tname_urls = get_name_url_matches(name, tree)\n\t\t# If candidates were found\n\t\tif name_urls:\n\t\t\t# Filter out authors that have already been scraped\n\t\t\tname_urls = [name_url for name_url in name_urls if name_url not in winning_name_urls]\n\t\t\t# Get the first ranked (name, url) tuple for the target name from the remaining candidates\n\t\t\twinning_name_url = get_winning_url(name_urls, dept_name)\n\t\t\tif winning_name_url:\n\t\t\t\twinning_name_urls.add(winning_name_url)\n\n\treturn winning_name_urls",
"def get_quotes_for_author(self, author: str) -> List[Quote]:\n params = (f'%{author}%',)\n query = '''\n SELECT *\n FROM quotes\n WHERE author LIKE ?\n ORDER BY created_at DESC\n '''\n\n ret = self.__execute_query(query, params)\n\n return self.__build_quotes_from_query_result(ret.fetchall())",
"def authors(author_ids):\n if author_ids is None:\n return ''\n else:\n ids = []\n for author_id in author_ids.split(','):\n ids.append(User.id == int(author_id))\n authors = User.query.filter(or_(*ids)).all()\n if authors is None:\n return ''\n else:\n return 'by ' + ', '.join([author.name for author in authors])",
"def popAuthors(self):\r\n# cur = self.dbConn.execute(\"SELECT * FROM People WHERE PersonID>0 ORDER BY Lastname\")\r\n# res = cur.fetchall()\r\n res = self.dbConn.execute(\"SELECT * FROM People WHERE PersonID>0 ORDER BY Lastname\").fetchall()\r\n\r\n self.authorList = [formatNameSQL(ln) for ln in res]\r\n self.quickAuthors = [ln[\"Lastname\"].lower() for ln in res]\r\n vals = [ln[\"PersonID\"] for ln in res]\r\n \r\n self.authorLookup = dict(zip(self.authorList,vals))",
"def get_paper_authors(tree):\n\tpath = '//table/tr/th[text() = \"Glasgow Author(s) Enlighten ID:\"]/following-sibling::td/a'\n\t# Get list of <a> elements, each an author\n\tauthors = tree.xpath(path)\n\t# Make list of (author name, author url) pairs to return\n\tauthors = [(author.text, author.get(\"href\")) for author in authors]\n\n\treturn authors",
"def get_external_authors_between(base, head):\n\n # Get all authors\n authors = git(\"log\", f\"{base}..{head}\", \"--format=%aN|%aE\")\n authors = set(\n author.partition(\"|\")[0].rstrip()\n for author in authors if not (author.endswith((\"@google.com\"))))\n\n # Get all co-authors\n contributors = git(\n \"log\", f\"{base}..{head}\", \"--format=%(trailers:key=Co-authored-by)\"\n )\n\n coauthors = []\n for coauthor in contributors:\n if coauthor and not re.search(\"@google.com\", coauthor):\n coauthors.append(\n \" \".join(re.sub(r\"Co-authored-by: |<.*?>\", \"\", coauthor).split())\n )\n return \", \".join(sorted(authors.union(coauthors), key=str.casefold))",
"def get_author_data():\n entry = mongo.db.Authors\n output = list()\n look_up_type = None\n if 'name' in request.args:\n look_up_type = 'name'\n print(request.args)\n if len(request.args['name']) <= 2:\n return render_template('error.html', message=\"Must enter characters\"), 400\n value = request.args['name'].strip('\"')\n name = entry.find({'name': {'$regex': value}})\n if name:\n for author in name:\n output.append({'name': author['name']})\n elif 'booktitle' in request.args:\n look_up_type = 'related_books'\n if len(request.args['booktitle']) <= 2:\n return render_template('error.html', message=\"Must enter characters\"), 400\n value = request.args['booktitle'].strip('\"')\n related_books = entry.find(\n {'author_books': {'$regex': value}})\n if related_books:\n for related in related_books:\n for title in related['author_books']:\n if value in title:\n output.append(({'related_books': title}))\n if len(output) == 0:\n return render_template('error.html', message=\"No Entries Found\"), 400\n return render_template('gottenAuthors.html', output=output, look_up_type=look_up_type), 200",
"def extra_bibparse(db):\n for key,entry in db.entries.items():\n for auth in entry.persons[\"author\"]:\n if (\"Harrison\" not in auth.first_names or\n \"Chapman\" not in auth.last_names):\n entry.add_person(auth, \"otherauthor\")",
"def split_author(author: str) -> Tuple[str, str]:\n author = author.split(' ')\n if len(author) == 1:\n author_first, author_last = None, author[0]\n else:\n # If len is more than 2, it may be initials or a middle name; group these\n # into the first name.\n *author_first, author_last = author\n author_first = ' '.join(author_first)\n return author_first, author_last",
"def get_author_titles(author_url):\n\t# Get the html tree for the author page\n\tauthor_page_tree = get_tree(author_url)\n\t# Get the <a> elements for the papers on the author's page\n\ta_elems = get_a_elems_for_papers(author_page_tree)\n\n\tall_titles = []\n\tlinks = []\n\t# Loop through a elements and put associated href and text into respective lists\n\tfor a in a_elems:\n\t\tall_titles.append(a.text_content())\n\t\tlinks.append(a.get(\"href\"))\n\n\t# Create list of (title, url) tuples\n\ttitles_links = zip(all_titles, links)\n\t# Get the list of titles of papers that have been tagged with a subject\n\ttagged_titles = get_tagged_titles(titles_links)\n\t# Return the 2 lists in a tuple\n\treturn (all_titles, tagged_titles)"
] | [
"0.63213474",
"0.583642",
"0.576012",
"0.55486053",
"0.5524639",
"0.5456475",
"0.54263955",
"0.54165334",
"0.54048514",
"0.54046386",
"0.53530794",
"0.5344667",
"0.53124434",
"0.5257112",
"0.52389055",
"0.5231252",
"0.5223985",
"0.5173741",
"0.51708895",
"0.5150454",
"0.51119715",
"0.5110372",
"0.5101695",
"0.5067067",
"0.5058792",
"0.50475323",
"0.50443786",
"0.5039072",
"0.5032424",
"0.50301635"
] | 0.796402 | 0 |
Utility function that determines if two authors are 1degree separated. | def is_one_degree_separated(self,author1, author2):
self.cursor.execute("""SELECT COUNT(*) FROM wrote W WHERE W.authorID = %s AND EXISTS
(SELECT * FROM wrote W2 WHERE W2.authorID = %s AND W.ISBN = W2.ISBN)""", (author1, author2))
if int(self.cursor.fetchone()[0]):
return True
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cross_check(context, authors, poscom):\n displaynames = [x['author']['displayname'] for x in poscom]\n\n for author in authors:\n if author.user.username not in displaynames:\n context.assertFalse(True, \"%s not in list\" %author.user.username)",
"def isComrade(self, other): # are the pieces comrades ?\r\n \r\n if self.name == other.name: \r\n return True\r\n else:\r\n return False",
"def e_paralelo(self, other):\n if (self == other) or (self.normaliza() == other.normaliza()):\n return True\n else:\n return False",
"def _degree_has_changed(first, second):\n return len(set(first) ^ set(second)) != 0",
"def are_similar(first_coords: List[Tuple[int, int]], second_coords: List[Tuple[int, int]]) -> bool:\n # Step 1: Get angles of each triangle\n # Step 2: Compare grades of two triangles\n # Step 3: If two angles are equal then first triangle is similar to second triangle\n pass",
"def has_duplicates_authors(L):\r\n # make a copy of t to avoid modifying the parameter\r\n s = L[:]\r\n s.sort()\r\n\r\n # check for adjacent elements that are equal\r\n for i in range(len(s)-1):\r\n if s[i] == s[i+1]:\r\n return True\r\n return False",
"def is_pair_allowed(a, b):\n if a == complementary(b):\n return True\n if a == 'G' and b == 'U' or a == 'U' and b == 'G':\n return True\n return False",
"def is_equivalence(self) -> bool:",
"def authors_in_relation(context, data, authors):\n guids = [a.id for a in authors]\n guids = map( lambda x: str(x).replace('-', ''), guids)\n\n for guid in guids:\n context.assertTrue(unicode(guid) in data)",
"def test_refersto_author_multi_name(self):\n inv_search = 'author:ellis refersto:author:\"parke, s. j.\"'\n spi_search = 'find a ellis and refersto author \"parke, s. j.\"'\n self._compare_searches(inv_search, spi_search)",
"def _pareto_dominance(self, party1: int, party2: int, profile: list[set[int]]) -> bool:\n strict_preference = False\n for ballot in profile:\n if party2 in ballot and party1 not in ballot:\n return False\n if party1 in ballot and party2 not in ballot:\n strict_preference = True\n return strict_preference",
"def matchAuthorBySign(sign = None):\n if not sign:\n return\n citations = allCitationsKnown(Graph())\n\n #print(citations + \" == \" + sign)\n if citations.find(sign.strip()) != -1:\n return True\n else:\n return False",
"def parallel(self, other):\n return other.angle == self.angle",
"def parallel(self, other):\n return other.angle == self.angle",
"def _is_authored_by_target(self, speech):\n speaker = speech.find(\"p\", klasse=\"redner\").redner.find(\"name\")\n\n if not speaker.vorname or not speaker.nachname:\n return False\n\n first_name = str(speaker.vorname.string)\n last_name = str(speaker.nachname.string)\n\n return first_name == self.first_name and last_name == self.last_name",
"def _coincident(a,b):\n return np.array_equal(a, b) or np.array_equal(np.flipud(a),b)",
"def test_refersto_author_multi_name_no_quotes(self):\n inv_search = 'author:ellis refersto:(author:\"parke, sj*\" or exactauthor:\"parke, s *\" or exactauthor:\"parke, s\" or author:\"parke, sj, *\")'\n spi_search = \"find a ellis and refersto author parke, sj\"\n self._compare_searches(inv_search, spi_search)",
"def test_accidentals(score1, score2, measure = 0, part = 0):\n\tdiff = ScoreDiff(score1, score2, path)\n\treturn diff.have_same_accidentals(measure, part)",
"def isIsosceles(self):\n\t\treturn self.a == self.b or self.a == self.c or self.b == self.c",
"def disjoint(self, other): # -> bool:\n ...",
"def is_hom(self) -> bool:\n if self.is_null():\n return False\n if self.allele1 == -1 or self.allele2 == -1:\n return True\n return self.allele1 == self.allele2",
"def _are_assumed_equal(a, b, assumed_equivs):\n\n # Are we just assuming that the two are equal?\n equiv = (id(a), id(b))\n if equiv in assumed_equivs:\n return True\n\n # If we see these two again assume they're equal. If they're not then the\n # traversal will detect it.\n assumed_equivs.add(equiv)\n return False",
"def _has_only_dropped_degrees(first, second):\n has_dropped = len(set(first) - set(second)) > 0\n has_added = len(set(second) - set(first)) > 0\n return (has_dropped and not has_added)",
"def same_grade(self, other):\n if self._year == other.year:\n return True\n else:\n return False",
"def __eq__(self, other):\n if isinstance(other, GutenbergAuthor):\n equals = self.get_gutenberg_id() == other.get_gutenberg_id()\n else:\n equals = super().__eq__(other)\n return equals",
"def compare_dominance(parta, partb):\n if list(parta) == list(partb):\n return '=='\n if parta.degree() != partb.degree():\n return 'Non-comparable'\n else:\n out = _compare_tuples(tuple(parta), tuple(partb))\n return out",
"def is_inverse(self, other):\n return (self * other).is_identity() and (other * self).is_identity()",
"def splitAuthor(authors, sep='and', first=True):\n tmp = [k.strip() for k in authors.split(sep)]\n if first:\n return tmp[0].split(',')[0].strip()\n else:\n return tmp",
"def _chain_equal(a,b):\n for a_part, b_part in zip(a.parts, b.parts):\n for a_seg, b_seg in zip(a_part, b_part):\n if not np.array_equal(a_seg, b_seg):\n return False\n return True",
"def allow_relation(self, obj1, obj2, **hints):\n if obj1._meta.app_label == 'eotrts_student' or \\\n obj2._meta.app_label == 'eotrts_student':\n return True\n return None"
] | [
"0.60600275",
"0.6000368",
"0.5961616",
"0.5815673",
"0.57547796",
"0.5678407",
"0.5665919",
"0.5462363",
"0.5437373",
"0.54316366",
"0.5408115",
"0.54073846",
"0.53969944",
"0.53969944",
"0.53861034",
"0.5378126",
"0.537463",
"0.5349241",
"0.53332996",
"0.5314922",
"0.52900755",
"0.52802026",
"0.5267606",
"0.52664155",
"0.52650654",
"0.52269113",
"0.52065253",
"0.51777405",
"0.5151419",
"0.515002"
] | 0.7722055 | 0 |
Given an ISBN number of a book, retrieve the entire tuple of that book as well as the authors. | def get_single_book_info(self, isbn):
self.cursor.execute("SELECT * FROM book WHERE ISBN=%s", (isbn,))
books = self.cursor.fetchall()
for book in books:
authors = []
self.cursor.execute("""SELECT name FROM Author A, Wrote W, Book B WHERE A.ID = W.authorID AND
W.ISBN = B.ISBN AND B.ISBN = %s""", (isbn,))
for auth in self.cursor.fetchall():
authors.append(auth[0])
return book, authors | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def book_by_isbn(ISBN):\n data = {}\n for book in root.findall('Book'):\n for elem in book:\n isbn = book.find('ISBN').text\n if isbn == ISBN:\n data['id'] = book.attrib['id']\n data[elem.tag] = elem.text\n return data",
"def isbn_lookup(isbn):\n base = \"https://www.googleapis.com/books/v1/volumes?q=isbn=\"\n# Unfortunately we can't use the superior \"with spam as eggs\" syntax here...\n search = urlopen(base + isbn + \"&prettyprint=false\")\n lines = search.read()\n search.close()\n for bool_pair in [(\"false\", \"False\"), (\"true\", \"True\")]:\n lines = lines.replace(*bool_pair)\n volume_info = literal_eval(lines)[\"items\"][0][\"volumeInfo\"]\n title = volume_info[\"title\"]\n authors = ', '.join(a for a in volume_info[\"authors\"])\n return \"Title:\\t\\t%s\\nAuthor(s):\\t%s\" % (title, authors)",
"def get_book_by_isbn(isbn):\n return Book.get_book(isbn)",
"def get_book_data(isbn: int):\n try:\n book = next(iter(core.Book.search(('isbn', 'eq', isbn))))\n except StopIteration:\n pass # actually, I could put the whole rest of the function here\n else:\n data = core.Book.view_str(book.id)\n del data['id'], data['status'], data['return_date'], data['borrowed_by']\n del data['borrowed_by_id'], data['__str__']\n return data\n\n try:\n r = requests.get('https://portal.dnb.de/opac.htm?query=isbn%3D'\n + str(isbn) + '&method=simpleSearch&cqlMode=true')\n r.raise_for_status()\n except requests.exceptions.RequestException:\n raise core.BuchSchlossError('no_connection', 'no_connection')\n\n person_re = re.compile(r'(\\w*, \\w*) \\((\\w*)\\)')\n results = {'concerned_people': []}\n\n page = bs4.BeautifulSoup(r.text)\n table = page.select_one('#fullRecordTable')\n if table is None:\n # see if we got multiple results\n link_to_first = page.select_one('#recordLink_0')\n if link_to_first is None:\n raise core.BuchSchlossError(\n 'Book_not_found', 'Book_with_ISBN_{}_not_in_DNB', isbn)\n r = requests.get('https://portal.dnb.de'+link_to_first['href'])\n page = bs4.BeautifulSoup(r.text)\n table = page.select_one('#fullRecordTable')\n\n for tr in table.select('tr'):\n td = [x.get_text('\\n').strip() for x in tr.select('td')]\n if len(td) == 2:\n if td[0] == 'Titel':\n results['title'] = td[1].split('/')[0].strip()\n elif td[0] == 'Person(en)':\n for p in td[1].split('\\n'):\n g = person_re.search(p)\n if g is None:\n continue\n g = g.groups()\n if g[1] == 'Verfasser':\n results['author'] = g[0]\n else:\n results['concerned_people'].append(g[1]+': '+g[0])\n elif td[0] == 'Verlag':\n results['publisher'] = td[1].split(':')[1].strip()\n elif td[0] == 'Zeitliche Einordnung':\n results['year'] = td[1].split(':')[1].strip()\n elif td[0] == 'Sprache(n)':\n results['language'] = td[1].split(',')[0].split()[0].strip()\n\n results['concerned_people'] = '; '.join(results['concerned_people'])\n return results",
"def get_book_info(self, book):\n request_url = \"%s?q=%s\" % (self.API_URL, book)\n json_data = self.make_request(request_url)\n if not json_data:\n return []\n books_info = []\n for book in json_data['docs']:\n info = {'title': book['title']}\n if 'publisher' in book:\n info.update({'publisher': book['publisher']})\n if 'publish_year' in book:\n info.update({'publish_year': book['publish_year']})\n if 'language' in book:\n info.update({'language': book['language']})\n books_info.append(info)\n return books_info",
"def demo_get_all_books(self):\n results = []\n self.cursor.execute(\"\"\"SELECT ISBN FROM book\"\"\")\n for book in self.cursor.fetchall():\n results.append(book[0])\n return results",
"def get_books_in_order(self, orderNumber):\n self.cursor.execute(\"\"\"SELECT ISBN, quantity FROM orderlog O, productof P WHERE O.orderNumber = P.orderNumber\n AND O.orderNumber=%s\"\"\",(orderNumber,))\n result = []\n for i in self.cursor.fetchall():\n result.append([i[0],i[1]])\n return result",
"def get_author_titles(author_url):\n\t# Get the html tree for the author page\n\tauthor_page_tree = get_tree(author_url)\n\t# Get the <a> elements for the papers on the author's page\n\ta_elems = get_a_elems_for_papers(author_page_tree)\n\n\tall_titles = []\n\tlinks = []\n\t# Loop through a elements and put associated href and text into respective lists\n\tfor a in a_elems:\n\t\tall_titles.append(a.text_content())\n\t\tlinks.append(a.get(\"href\"))\n\n\t# Create list of (title, url) tuples\n\ttitles_links = zip(all_titles, links)\n\t# Get the list of titles of papers that have been tagged with a subject\n\ttagged_titles = get_tagged_titles(titles_links)\n\t# Return the 2 lists in a tuple\n\treturn (all_titles, tagged_titles)",
"def get_author_data(authors):\n\n try:\n author = authors.author.name.cdata.encode(\"utf8\")\n author_id = int(authors.author.id.cdata.encode(\"utf8\"))\n except: # FIXME: running into errors when book has multiple authors\n author = authors.author[0].cdata.encode(\"utf8\")\n author_id = authors.author[0].cdata.encode(\"utf8\")\n\n return (author, author_id)",
"def searchbook(isbn):\r\n print(\"Searching for isbn \" + isbn + \" in googlebooks...\")\r\n result = _search_by_isbn(isbn)\r\n \r\n if result[\"totalItems\"] == 0:\r\n return None\r\n \r\n b = _item2book(result[\"items\"][0])\r\n return b",
"def find_relations_among_authors():\n for book in books:\n if len(books[book]) > 1:\n for i in range(len(books[book])):\n known_relations[books[book][i]] = books[book][:i] + books[book][i+1:]",
"def _get_book(s):\n\n s = s.strip()\n if s == '': raise SyntaxError('The book identification string is empty')\n \n author = ''\n author_start = 0\n author_found = False \n for match in author_regex.finditer(s):\n author_found = True\n author_start = match.start()\n author = match.group(1).strip()\n \n if author_found:\n title = s[:author_start].strip()\n else:\n title = s\n # Sometimes titles have a byte order mark prepended. The Kindle behavior\n # changed in 2011 to stop doing this to entries in My Clippings.txt. \n # Ensure the title is consistent, strip this character if it exists.\n BOM = '\\ufeff'\n if title.startswith(BOM):\n title = title[1:] \n \n return title,author",
"def search(self, title=\"\", author=\"\", year=\"\", isbn=\"\"):\n self.cursor.execute(\"SELECT * FROM Book WHERE Title = ? OR Author = ? \\\n OR Year = ? OR ISBN = ?\", (title, author, year, isbn))\n rows = self.cursor.fetchall()\n return rows",
"def getISBN(self):\n return self.bookISBN",
"def get_book_statistics(self, n, startDate, endDate):\n book_results = []\n author_results = []\n publisher_results = []\n\n self.cursor.execute(\"\"\"SELECT title, B.ISBN, SUM(quantity) as total FROM productof P, book B WHERE \n B.ISBN = P.ISBN AND orderNumber IN \n (SELECT orderNumber FROM orderlog WHERE orderDate >= %s AND orderDate <= %s) GROUP BY ISBN \n ORDER BY total DESC LIMIT %s\"\"\", (startDate, endDate, n))\n for book in self.cursor.fetchall():\n book_results.append(book)\n\n self.cursor.execute(\"\"\"SELECT name, SUM(quantity) as total FROM productof P, author A, wrote W\n WHERE ID=authorID AND W.ISBN = P.ISBN AND orderNumber IN \n (SELECT orderNumber FROM orderlog WHERE orderDate >= %s AND orderDate <= %s) GROUP BY name \n ORDER BY total DESC LIMIT %s\"\"\", (startDate, endDate, n))\n for author in self.cursor.fetchall():\n author_results.append(author)\n\n self.cursor.execute(\"\"\"SELECT publisher, SUM(quantity) as total FROM productof P, book B\n WHERE B.ISBN = P.ISBN AND orderNumber IN \n (SELECT orderNumber FROM orderlog WHERE orderDate >= %s AND orderDate <= %s) GROUP BY publisher \n ORDER BY total DESC LIMIT %s\"\"\", (startDate, endDate, n))\n for publisher in self.cursor.fetchall():\n publisher_results.append(publisher)\n\n return book_results, author_results, publisher_results",
"def books_by_year(year):\n lst = []\n for book in root.findall('Book'):\n published_year = book.find('Year_of_publishing').text\n if int(published_year) == year:\n lst.append(book)\n return len(lst)",
"def question_2():\n cursor.execute(mostPopAuthors)\n output = cursor.fetchall()\n return output",
"def query_by_author(cls,author):\n bakey = 'BookByAuthor_%s'%author\n bks = from_cache(bakey)\n if not bks:\n bks = map(lambda e:str(e.id()), SuiBook.all(keys_only=True).filter('authors =',author).fetch(100))\n to_cache(bakey,bks)\n return bks",
"def get_recommended_books(self, orderNumber, loginID):\n invalid_isbn_list = []\n books_in_order = []\n possible_isbn_list = []\n self.cursor.execute(\"\"\"SELECT orderNumber FROM orderlog WHERE loginID=%s\"\"\", (loginID,))\n for order in self.cursor.fetchall():\n self.cursor.execute(\"\"\"SELECT ISBN FROM productof WHERE orderNumber=%s\"\"\", (order[0],))\n for ISBN in self.cursor.fetchall():\n invalid_isbn_list.append(ISBN[0])\n self.cursor.execute(\"\"\"SELECT ISBN FROM productof WHERE orderNumber=%s\"\"\", (orderNumber,))\n for ISBN in self.cursor.fetchall():\n books_in_order.append(ISBN[0])\n self.cursor.execute(\"\"\"SELECT P.ISBN FROM productof P WHERE EXISTS \n (SELECT orderNumber FROM productof P2 WHERE ISBN = %s AND P2.orderNumber = P.orderNumber)\"\"\", (ISBN[0],))\n for valid_isbn in self.cursor.fetchall():\n possible_isbn_list.append(valid_isbn[0])\n valid_isbn_list = [i for i in possible_isbn_list if i not in invalid_isbn_list]\n return_list = []\n for book in valid_isbn_list:\n book, author = self.get_single_book_info(book)\n return_list.append([book, author])\n return return_list",
"def get_ebooks(self, book):\n request_url = \"%s?q=%s\" % (self.API_URL, book)\n json_data = self.make_request(request_url)\n if not json_data:\n return []\n ebooks = []\n for book in json_data['docs']:\n if book['ebook_count_i'] >= 1:\n ebooks.append({'title': book['title'], 'ebook_count': book['ebook_count_i']})\n return ebooks",
"def valid_book(self, info):\n self.cursor.execute(\"SELECT ISBN, title, price, stock FROM book WHERE ISBN=%s\", (info['ISBN'],))\n for book in self.cursor.fetchall():\n return True, float(book[2]), book[1], book[3]\n return False, 0, 0, 0",
"def get_book_details(self):\n\n try:\n # gives response for the request from the API url\n response = requests.get(self.book_url)\n\n \n # using ElementTree to store the response content in a tree\n root = ET.fromstring(response.content)\n book = root.find('book')\n\n # getting the required details\n self.book_details[\"title\"] = book.find('title').text\n self.book_details[\"average_rating\"] = book.find('average_rating').text\n self.book_details[\"ratings_count\"] = book.find('ratings_count').text\n self.book_details[\"num_pages\"] = book.find('num_pages').text\n self.book_details[\"image_url\"] = book.find('image_url').text\n self.book_details[\"publication_year\"] = book.find('publication_year').text\n\n # getting list of all the authors\n authors = book.find('authors')\n if authors:\n author_names_list = []\n for author in authors.iter('author'):\n author_names_list.append(author.find('name').text)\n author_names_sentence = \", \".join(author_names_list)\n self.book_details[\"authors\"] = author_names_sentence\n except:\n raise Exception(\"invalid XML response\")",
"def getGRAuthorByID(id, book_callee=None, series_callee=None, printout=True): \n author_entry = session.query(author).get(id)\n if author_entry is None:\n request = requests.get('https://www.goodreads.com/author/show/'+str(id)+'.xml?key='+API_KEY['GOODREADS'])\n if request.status_code == 200:\n data = xmltodict.parse(request.text)['GoodreadsResponse']['author']\n \n auth = {}\n auth['id'] = int(data['id'])\n auth['author'] = data['name']\n auth['description'] = data['about']\n auth['hometown'] = data['hometown']\n auth['small_img'] = data['small_image_url']\n auth['large_img'] = data['image_url']\n \n author_entry = author(**auth) \n session.add(author_entry)\n session.commit() \n for key, book in data['books'].items():\n while type(book) is list:\n book = book[0]\n if type(book) is OrderedDict and (book_callee is None or book_callee != int(book['id']['#text'])):\n a_book = getGRBookByID(int(book['id']['#text']), id)\n if a_book is not None:\n session.query(author).get(id).books.append(a_book)\n session.commit()\n if(printout):\n print(author_entry)\n \n return author_entry",
"def search(title = \"\", author = \"\", year = \"\", isbn = \"\"):\n conn_obj = mysql.connector.connect(host='localhost',database='mydb',user='root',password='kks')\n cur_obj = conn_obj.cursor()\n cur_obj.execute(\"SELECT * \"\n \"FROM book \"\n \"WHERE title = %s OR author = %s OR year = %s OR isbn = %s\", \n (title, author, year, isbn))\n rows = cur_obj.fetchall()\n conn_obj.close()\n return rows",
"def find_books_by_author_separation(self, name, degree):\n self.cursor.execute(\"\"\"SELECT ID FROM author WHERE name=%s\"\"\", (name,))\n original_author_id = int(self.cursor.fetchone()[0])\n self.cursor.execute(\"\"\"SELECT ISBN FROM wrote WHERE authorID=%s\"\"\", (original_author_id,))\n first_degree_authors = []\n for original_author_books in self.cursor.fetchall():\n self.cursor.execute(\"\"\"SELECT authorID FROM wrote WHERE ISBN=%s AND authorID <> %s\"\"\",\n (original_author_books[0], original_author_id))\n for author_id in self.cursor.fetchall():\n first_degree_authors.append(author_id[0])\n first_degree_results = {}\n for author in first_degree_authors:\n self.cursor.execute(\"\"\"SELECT ISBN FROM wrote WHERE authorID=%s\"\"\",(author,))\n for ISBN in self.cursor.fetchall():\n book, author_list = self.get_single_book_info(ISBN[0])\n first_degree_results[ISBN[0]] = [book, author_list]\n if int(degree) == 1:\n return first_degree_results\n\n second_degree_results = {}\n authors_to_check = []\n all_authors = []\n self.cursor.execute(\"\"\"SELECT authorID FROM wrote\"\"\")\n for author in self.cursor.fetchall():\n all_authors.append(int(author[0]))\n for author1 in first_degree_authors:\n for author2 in all_authors:\n if (self.is_one_degree_separated(author1, author2) and author1 != author2\n and author2 != original_author_id):\n authors_to_check.append(author2)\n second_degree_authors = [a for a in authors_to_check if a not in first_degree_authors]\n for author in second_degree_authors:\n self.cursor.execute(\"\"\"SELECT ISBN FROM wrote WHERE authorID=%s\"\"\", (author,))\n for ISBN in self.cursor.fetchall():\n book, author_list = self.get_single_book_info(ISBN[0])\n second_degree_results[ISBN[0]] = [book, author_list]\n return second_degree_results",
"def get_paper_authors(tree):\n\tpath = '//table/tr/th[text() = \"Glasgow Author(s) Enlighten ID:\"]/following-sibling::td/a'\n\t# Get list of <a> elements, each an author\n\tauthors = tree.xpath(path)\n\t# Make list of (author name, author url) pairs to return\n\tauthors = [(author.text, author.get(\"href\")) for author in authors]\n\n\treturn authors",
"def consult_books(self, bar_code: str):\n try:\n book_data = []\n self.db.cursor.execute('SELECT * from books WHERE bar_code = %s', (bar_code,))\n for i in self.db.cursor.fetchall():\n book_data.append(i)\n except Exception as error:\n print(error)\n else:\n print(f\"ID BOOK: {book_data[0][0]}\\n\"\n f\"TITLE: {book_data[0][1]}\\n\"\n f\"AUTHOR: {book_data[0][2]}\\n\"\n f\"PRICE: R$:{book_data[0][3]}\\n\"\n f\"BAR CODE: {book_data[0][4]}\\n\"\n f\"STOCK: {book_data[0][5]}\")",
"def get_author_data():\n entry = mongo.db.Authors\n output = list()\n look_up_type = None\n if 'name' in request.args:\n look_up_type = 'name'\n print(request.args)\n if len(request.args['name']) <= 2:\n return render_template('error.html', message=\"Must enter characters\"), 400\n value = request.args['name'].strip('\"')\n name = entry.find({'name': {'$regex': value}})\n if name:\n for author in name:\n output.append({'name': author['name']})\n elif 'booktitle' in request.args:\n look_up_type = 'related_books'\n if len(request.args['booktitle']) <= 2:\n return render_template('error.html', message=\"Must enter characters\"), 400\n value = request.args['booktitle'].strip('\"')\n related_books = entry.find(\n {'author_books': {'$regex': value}})\n if related_books:\n for related in related_books:\n for title in related['author_books']:\n if value in title:\n output.append(({'related_books': title}))\n if len(output) == 0:\n return render_template('error.html', message=\"No Entries Found\"), 400\n return render_template('gottenAuthors.html', output=output, look_up_type=look_up_type), 200",
"def read_book(url,book_num):\n\t#calls open_url function to open the url\n\tbook_contents = open_url(url)\n\tif book_contents != None:\n\t\t#calls filter data function to clean the data\n\t\tclean_data = filter_data(book_contents)\n\t\t#create dictionary for all the words in this book with 0's filling for count in all the books\n\t\tcreate_dict(clean_data)\n\t\treturn clean_data\n\telse:\n\t\treturn []",
"def get_books_by_read_value(self, read):\n try:\n cur = self._db.cursor()\n cur.execute('SELECT rowid, * FROM books WHERE read = ?', (read,))\n return self._cursor_to_booklist(cur)\n\n except sqlite3.Error as e:\n raise BookError(f'Error getting books with read = {read}') from e"
] | [
"0.683065",
"0.6526513",
"0.6487258",
"0.625673",
"0.6255212",
"0.6182602",
"0.61770684",
"0.60605705",
"0.6013382",
"0.6001543",
"0.58569443",
"0.5854576",
"0.58373415",
"0.5809132",
"0.5758391",
"0.56655586",
"0.56175834",
"0.55987334",
"0.5571843",
"0.5569836",
"0.55333585",
"0.55267066",
"0.5509423",
"0.5499541",
"0.5495721",
"0.5475535",
"0.5474431",
"0.54670113",
"0.5452748",
"0.5420273"
] | 0.7923752 | 0 |
Given an order number, return a list of several (we'll say 10 max) suggestions of books for the user to purchase. Need to get all the books from this order, then search all other orders and recommend books those customers ordered. Also need to find all of the books this user has ordered in the past as well, since we do not want to recommend books they already purchased. | def get_recommended_books(self, orderNumber, loginID):
invalid_isbn_list = []
books_in_order = []
possible_isbn_list = []
self.cursor.execute("""SELECT orderNumber FROM orderlog WHERE loginID=%s""", (loginID,))
for order in self.cursor.fetchall():
self.cursor.execute("""SELECT ISBN FROM productof WHERE orderNumber=%s""", (order[0],))
for ISBN in self.cursor.fetchall():
invalid_isbn_list.append(ISBN[0])
self.cursor.execute("""SELECT ISBN FROM productof WHERE orderNumber=%s""", (orderNumber,))
for ISBN in self.cursor.fetchall():
books_in_order.append(ISBN[0])
self.cursor.execute("""SELECT P.ISBN FROM productof P WHERE EXISTS
(SELECT orderNumber FROM productof P2 WHERE ISBN = %s AND P2.orderNumber = P.orderNumber)""", (ISBN[0],))
for valid_isbn in self.cursor.fetchall():
possible_isbn_list.append(valid_isbn[0])
valid_isbn_list = [i for i in possible_isbn_list if i not in invalid_isbn_list]
return_list = []
for book in valid_isbn_list:
book, author = self.get_single_book_info(book)
return_list.append([book, author])
return return_list | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_books_in_order(self, orderNumber):\n self.cursor.execute(\"\"\"SELECT ISBN, quantity FROM orderlog O, productof P WHERE O.orderNumber = P.orderNumber\n AND O.orderNumber=%s\"\"\",(orderNumber,))\n result = []\n for i in self.cursor.fetchall():\n result.append([i[0],i[1]])\n return result",
"def get_order_books(self):\n return self.execute_http_call(\"/api/order_books\", \"GET\", headers=None)",
"def book_search(library: list) -> None:\n options = ['Author', 'Title', 'Publisher', 'Shelf', 'Category', 'Subject']\n prompt = '\\nWhat option would you like to search by?'\n choice = get_user_choice(options, prompt)\n if choice == '1':\n search_by_chosen_option(library, options[0])\n elif choice == '2':\n search_by_chosen_option(library, options[1])\n elif choice == '3':\n search_by_chosen_option(library, options[2])\n elif choice == '4':\n search_by_shelf(library)\n elif choice == '5':\n search_by_chosen_option(library, options[4])\n elif choice == '6':\n search_by_chosen_option(library, options[5])",
"def returnOrderBook(self, limit=25):\n orders = self.dpay.rpc.get_order_book(limit, api=\"market_history\")\n r = {\"asks\": [], \"bids\": []}\n for side in [\"bids\", \"asks\"]:\n for o in orders[side]:\n r[side].append({\n 'price': float(o[\"price\"]),\n 'bbd': o[\"bbd\"] / 10 ** 3,\n 'dpay': o[\"dpay\"] / 10 ** 3,\n })\n return r",
"def query_top_recommended(cls,N=10):\n brkey = 'BooksMostRecommended'\n bks = from_cache(brkey)\n if not bks:\n bks = map(lambda e:str(e.id()), SuiBook.all(keys_only=True).order('-recommends').fetch(N))\n to_cache(brkey,bks)\n return bks",
"def books_by_author(self, author):\n request_url = \"%s?author=%s\" % (self.API_URL, author)\n json_data = self.make_request(request_url)\n if not json_data:\n return []\n books = []\n for book in json_data['docs']:\n books.append(book['title_suggest'])\n return books",
"def get_books_read():\n search_criteria = request.args.get('search')\n if search_criteria == \"\":\n list_of_books = []\n\n flash(\"Please enter an author or a title.\")\n\n return render_template('finished_book_list.html',\n list_of_books=list_of_books\n )\n else:\n print \"get a selection of books\"\n list_of_books = model.get_finished_books_by_criteria(search_criteria, \n session['patron'])\n\n return render_template('finished_book_list.html',\n list_of_books=list_of_books\n )",
"def search_for_books(search_criteria, product_url, headers):\n\tprint \"od api in search_for_books \"\n\tlist_of_books = []\n\tlist_book = []\n\tq = search_criteria\n\tlimit = 300\t\t# 25 by default 300 max\n\toffset = 0\t\t# number of titles to skip\n\tformats = \"\"\n\tsort = \"Author:desc\" \t\t# :desc\n\tlastupdatetime = \"\" \n\tseries = \"\" \n\tsearch_parms = \"?q=%s&limit=%s&offset=0&formats=%s&sort=%s\" % (q, limit, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t formats, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t sort)\n\tod_url=\"%s%s\" % (product_url, search_parms)\n\n\tprint \"overdrive url = \", od_url, \"\\n\"\n\tod_url = od_url.replace(' ', '%20')\n\tbook_response = requests.get(od_url, headers=headers)\n\n\tprint \"book search response == \", book_response, \"reason = \", book_response.reason, \"\\n\"\n\tif book_response.status_code == 401:\n\t print \"Patron is not authorize to use this library == \", od_url, \"\\n\"\n\telif book_response.status_code > 201:\n\t\tprint \"Get request failed == \", book_response.reason\n\telif book_response.status_code == 200 or book_response.status_code == 201:\n\t\tprint \"Get request to get the a list of books was successful\", \"\\n\"\n\n\t\tbook_response_data = json.loads(book_response.content)\n\t\tprint \"OverDrive book count == \", book_response_data['totalItems'], \"\\n\"\n\n\t\tif book_response_data['totalItems'] > 0:\n\t\t\tproducts = book_response_data['products']\n\t\t\tfor product in products:\n\t\t\t\tbook_data = {}\t\n\t\t\t\tbook_data['images'] = product['images']['thumbnail']['href']\n\t\t\t\tbook_data['title'] = product['title']\n\t\t\t\tbook_data['author'] = product['primaryCreator']['name']\n\t\t\t\tbook_data['availableToDownload'] = product['links']['availability']['href']\n\t\t\t\tbook_data['id'] = product['id']\n\t\t\t\tbook_data['metadata'] = product['links']['metadata']['href']\n\t\t\t\tbook_data['origin'] = 'ODCOM'\n\t\t\t\tlist_book = [book_data]\n\t\t\t\tlist_of_books.extend(list_book)\n\t\t\t#end for\n\t\t#end if\n\t#end if\n\n\treturn list_of_books",
"def create_get_order_book_request(self, symbol: str,\n limit: Optional[int] = None\n ) -> Request:",
"def order_book_fetch(self, symbol):\n orderbook = self.orderbooks[symbol]\n asks = [[float(price), float(stats[0]) * float(stats[1])] for price, stats in orderbook['asks'].items()]\n bids = [[float(price), float(stats[0]) * float(stats[1])] for price, stats in orderbook['bids'].items()]\n return asks, bids, orderbook",
"def get_books(**kwargs):\n\n global book_list\n\n if len(kwargs) == 0:\n return book_list\n elif 'read' in kwargs:\n read_books = [book for book in book_list if book.read == kwargs['read']]\n return read_books\n else:\n found_books = [book for book in book_list if book.title == kwargs['title']]\n return found_books",
"def order_book(symbol: str,\n number_of_data_points: int = 1,\n exchange: str = CRYPTO_EXCHANGE,\n rate_limit: bool = True):\n try:\n check_exchange_existence(exchange=exchange)\n return asyncio.get_event_loop().run_until_complete(\n getOrderBook(symbol=symbol,\n number_of_data_points=number_of_data_points,\n exchange=exchange,\n rate_limit=rate_limit))\n except Exception as exception:\n logger.error('Oops! An error Occurred ⚠️')\n raise exception",
"def book(self, irc, msg, args, thing):\n self.db.deleteExpired(self.registryValue('orderExpiry'))\n results = self.db.getCurrencyBook(thing)\n if len(results) == 0:\n irc.error(\"No orders for this currency present in database.\")\n return\n if len(results) > self.registryValue('maxOrdersInBookList'):\n irc.error(\"Too many orders to list on IRC. Visit the web \"\n \"order book, http://bitcoin-otc.com/vieworderbook.php?eitherthing=%s \"\n \"to see list of orders for this item.\" % (thing,))\n return\n self._getMtgoxQuote()\n L = [\"#%s %s %s %s %s %s @ %s %s (%s)\" % (id,\n time.ctime(refreshed_at),\n nick,\n buysell,\n amount,\n thing,\n self._getIndexedValue(price),\n otherthing,\n notes) \\\n for (id,\n created_at,\n refreshed_at,\n buysell,\n nick,\n host,\n amount,\n thing,\n price,\n otherthing,\n notes) in results]\n irc.replies(L, joiner=\" || \")",
"def book(args: list, update: Update) -> None:\n\n book_list = []\n if len(args) > 1:\n update.message.reply_text('fetching books, this may take a while...')\n book_list = scrape(' '.join(args[1:]))\n update.message.reply_text(f'found {len(book_list)} books')\n counter = 0\n msg = ''\n\n if len(book_list) > 0:\n for book in book_list:\n msg = msg + f'{book.Title} - {book.Author}\\n'\n counter += 1\n if counter == 5:\n msg = msg + '...'\n break\n update.message.reply_text(msg)\n\n else:\n update.message.reply_text(\n 'please add the name of the book after /book')",
"async def fetch_order_book(self, symbol: str, limit: Optional[int] = None, params={}):\n await self.load_markets()\n precision = self.safe_value(self.options, 'precision', 'R0')\n market = self.market(symbol)\n request = {\n 'symbol': market['id'],\n 'precision': precision,\n }\n if limit is not None:\n request['len'] = limit # 25 or 100\n fullRequest = self.extend(request, params)\n orderbook = await self.publicGetBookSymbolPrecision(fullRequest)\n timestamp = self.milliseconds()\n result = {\n 'symbol': market['symbol'],\n 'bids': [],\n 'asks': [],\n 'timestamp': timestamp,\n 'datetime': self.iso8601(timestamp),\n 'nonce': None,\n }\n priceIndex = 1 if (fullRequest['precision'] == 'R0') else 0\n for i in range(0, len(orderbook)):\n order = orderbook[i]\n price = self.safe_number(order, priceIndex)\n signedAmount = self.safe_string(order, 2)\n amount = Precise.string_abs(signedAmount)\n side = 'bids' if Precise.string_gt(signedAmount, '0') else 'asks'\n result[side].append([price, self.parse_number(amount)])\n result['bids'] = self.sort_by(result['bids'], 0, True)\n result['asks'] = self.sort_by(result['asks'], 0)\n return result",
"def order_book(self, pairs, limit=100):\n if isinstance(pairs, (list, tuple, set)):\n pairs = ','.join(pairs)\n if not isinstance(pairs, str):\n raise ValueError('The `pairs` argument must be a list, tuple, set or a string.')\n pairs = pairs.upper()\n\n max_positions = 1000\n limit = min(limit, max_positions)\n response = self.query('order_book', params={'pair': pairs, 'limit': limit})\n return response",
"def get_orderbooks(self, pair='btc_jpy'):\n url = 'https://coincheck.com/api/order_books'\n r = requests.get(url, {'pair': pair}, timeout=self.timeout)\n\n return json.loads(r.text)",
"def get_books_by_ids(self, book_ids):\n # Implemented from template for\n # osid.resource.BinLookupSession.get_bins_by_ids_template\n # NOTE: This implementation currently ignores plenary view\n # Also, this should be implemented to use get_Book() instead of direct to database\n if self._catalog_session is not None:\n return self._catalog_session.get_catalogs_by_ids(catalog_ids=book_ids)\n catalog_id_list = []\n for i in book_ids:\n catalog_id_list.append(ObjectId(i.get_identifier()))\n collection = JSONClientValidated('commenting',\n collection='Book',\n runtime=self._runtime)\n result = collection.find({'_id': {'$in': catalog_id_list}}).sort('_id', DESCENDING)\n\n return objects.BookList(result, runtime=self._runtime, proxy=self._proxy)",
"def get_book_list(session):\n debug('Getting the book list')\n\n purchased_packages = session.get('https://leanpub.com/api/v1/purchased_packages?include=book&archived=false&type=library').json()\n\n books_to_download = []\n\n for purchased_package in purchased_packages['data']:\n book_to_download = {\n 'id': purchased_package['attributes']['short_url']\n }\n\n book = None\n\n for included in purchased_packages['included']: # Get the book data\n if included['id'] == purchased_package['relationships']['book']['data']['id'] and included['type'] == 'Book':\n book = included['attributes']\n\n if not book:\n debug('Book not found for id #' + purchased_package['relationships']['book']['data']['id'], err=True)\n continue\n\n book_to_download['name'] = book['title']\n book_to_download['format'] = get_format_to_download(book, env('PREFERED_FORMAT'))\n\n books_to_download.append(book_to_download)\n\n debug('{} books to download'.format(len(books_to_download)))\n\n return books_to_download",
"async def fetch_order_book(self, symbol: str, limit: Optional[int] = None, params={}):\n await self.load_markets()\n market = self.market(symbol)\n request = {\n 'symbol': market['id'],\n }\n if limit is not None:\n request['depth'] = limit\n response = await self.publicGetOrderBookL2(self.extend(request, params))\n result = {\n 'symbol': symbol,\n 'bids': [],\n 'asks': [],\n 'timestamp': None,\n 'datetime': None,\n 'nonce': None,\n }\n for i in range(0, len(response)):\n order = response[i]\n side = 'asks' if (order['side'] == 'Sell') else 'bids'\n amount = self.convert_from_raw_quantity(symbol, self.safe_string(order, 'size'))\n price = self.safe_number(order, 'price')\n # https://github.com/ccxt/ccxt/issues/4926\n # https://github.com/ccxt/ccxt/issues/4927\n # the exchange sometimes returns null price in the orderbook\n if price is not None:\n result[side].append([price, amount])\n result['bids'] = self.sort_by(result['bids'], 0, True)\n result['asks'] = self.sort_by(result['asks'], 0)\n return result",
"def get_od_books(search_criteria, od_library_rows):\n\tprint \" in get_od_books \"\n\tbook_lists = []\n\toverdrive_client_app_fields, response = log_into_overdrive()\n\taccess_token = overdrive_client_app_fields['access_token']\n\tprint response.status_code, \" == \", response.reason\n\tif response.status_code > 201:\n\t flash((\"Action was not successful. %s == %s\\n\") % \n\t (response.status_code, response.reason))\n\telif response.status_code == 200 or response.status_code == 201:\n\t client_credentials = response.content\n\t print \"Post to get access token was successful\"\n\t#end \n\n\tfor library_fields in od_library_rows:\n\t\tbook_list = []\n\t\tproduct_url, headers = get_library_product_url(library_fields, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\taccess_token)\n\t\tbook_list = search_for_books(search_criteria, product_url, headers)\n\t\tbook_lists.extend(book_list)\n\t#end for\n\tprint \" number of books in book list == \", len(book_lists), \"\\n\"\n\treturn book_lists",
"def get_ebooks(self, book):\n request_url = \"%s?q=%s\" % (self.API_URL, book)\n json_data = self.make_request(request_url)\n if not json_data:\n return []\n ebooks = []\n for book in json_data['docs']:\n if book['ebook_count_i'] >= 1:\n ebooks.append({'title': book['title'], 'ebook_count': book['ebook_count_i']})\n return ebooks",
"def search_books_body(collection_of_books: tuple) -> list:\r\n search_tag, search_keyword = search_info()\r\n try:\r\n found_books = search(collection_of_books, search_tag, search_keyword)\r\n except KeyError as err:\r\n print(f\"Invalid input: {err}\\n\"\r\n f\"{err} raised KeyError. Please follow the instruction carefully.\\n\")\r\n else:\r\n print_as_ordered_list(found_books)\r\n\r\n return found_books",
"def search_for_books(self, query):\n books = []\n book = Book(self.db)\n for row in self.db.cursor().execute('SELECT genre_id FROM genres WHERE ' + query):\n books.extend(self.get_books(row[0]))\n\n return books",
"def get_books_by_genus_type(self, book_genus_type):\n # Implemented from template for\n # osid.resource.BinLookupSession.get_bins_by_genus_type_template\n # NOTE: This implementation currently ignores plenary view\n if self._catalog_session is not None:\n return self._catalog_session.get_catalogs_by_genus_type(catalog_genus_type=book_genus_type)\n collection = JSONClientValidated('commenting',\n collection='Book',\n runtime=self._runtime)\n result = collection.find({\"genusTypeId\": str(book_genus_type)}).sort('_id', DESCENDING)\n\n return objects.BookList(result, runtime=self._runtime, proxy=self._proxy)",
"def search_by_chosen_option(library: list, chosen_option: str) -> None:\n user_input = input(f'What is the name of the {chosen_option} you want to search for?')\n found_books = []\n for book in library:\n if user_input.lower() in str(getattr(book, chosen_option.lower())).lower():\n found_books.append(book)\n print(f'We found {len(found_books)} book(s) that matched this search in your library.\\n')\n for num, book in enumerate(found_books, 1):\n print(f'{num} - {book.__repr__()}')\n if len(found_books) > 0 and not return_to_main_menu():\n move_book(library, found_books)",
"def get_n_most_read_books(self, n):\n if type(n) != int:\n print(\"The argument n = {n} is not an integer. Try again with an integer\".format(n=n))\n else:\n sorted_books = [ book for book in sorted(self.books, key=self.books.get, reverse=True)]\n return sorted_books",
"def get_books():\n\n acct = get_current_account(session['acct'])\n user = get_user_by_acct(acct)\n search = False\n get_all_books_for_user(user, GR_KEY)\n\n return render_template(\"index.html\", acct=acct, search=search)",
"def get_user_books(user_id):\n return session.query(Book).filter(Book.user_id == user_id).all()",
"def find_books(self):\n search_query = unicode(self.search_input.data)\n q = u'%{}%'.format(search_query)\n\n # used for dummy emulation of caseinsensetive search\n qC = u'%{}%'.format(capfirst(search_query))\n\n books = Book.query.filter(db.or_(\n Book.authors.any(db.or_(\n Author.name.like(q),\n Author.name.like(qC))),\n Book.title.like(q),\n Book.title.like(qC)),)\n\n return books"
] | [
"0.66760135",
"0.6438365",
"0.58706945",
"0.5765255",
"0.5621472",
"0.5576812",
"0.55767345",
"0.55294985",
"0.550628",
"0.5500204",
"0.54980177",
"0.54707557",
"0.5468065",
"0.5464387",
"0.5456998",
"0.5440048",
"0.5420678",
"0.5417453",
"0.53468436",
"0.529879",
"0.52903426",
"0.52875775",
"0.52621436",
"0.5248135",
"0.52375937",
"0.52071315",
"0.52006197",
"0.5193683",
"0.5182514",
"0.5180961"
] | 0.72850907 | 0 |
Given an isbn of a book and a quantity, add that many books to inventory. If successful, return true. Otherwise, return false (this means the user entered an ISBN that is not present in the database) | def restock_book(self, isbn, quantity):
self.cursor.execute("""SELECT COUNT(*) FROM book WHERE ISBN=%s""", (isbn,))
if self.cursor.fetchone()[0]:
self.cursor.execute("""UPDATE book set stock=stock+%s WHERE ISBN=%s""", (quantity, isbn))
self.db.commit()
return True
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_book(self, data):\n exists = self.check_if_exists(data['isbn'])\n\n if exists:\n query = f\"\"\"UPDATE {TABLE} SET quantity = quantity + 10 WHERE bookID = '{data[\"isbn\"]}'\"\"\"\n else:\n query = f\"\"\"INSERT INTO {TABLE}(bookID, title, authors, avg_rating, ratings_count,\n lang_code, num_pages, text_reviews, pub_date, publisher) values(\n \"{data['isbn']}\",\n \"{data['title']}\",\n \"{data['authors']}\",\n {float(data['average_rating'])},\n {int(data['ratings_count'])},\n \"{data['language_code']}\",\n {int(data[' num_pages'])},\n {int(data['text_reviews_count'])},\n \"{data['publication_date']}\",\n \"{data['publisher']}\"\n );\"\"\"\n\n try:\n self.cursor.execute(query)\n self.conn.commit()\n except Error as e:\n print(e)",
"def add_to_cart(db, itemid, quantity):",
"def add_item_to_inventory(game, *args):\n (item, action_description, already_done_description) = args[0]\n if not game.is_in_inventory(item):\n print_bold(action_description)\n game.add_to_inventory(item)\n print_italic(\"You've just got a {item}.\".format(item=item.name))\n else:\n print_italic(already_done_description)\n return False",
"def test_book_isbn_must_only_be_numbers(self):\n\n\t\twith self.client:\n\t\t\tadd_book = {\n\t\t\t\t'title': 'Hello Books',\n\t\t\t\t'isbn': '56987451Ky'\n\t\t\t}\n\t\t\tlogin_data = self.login_test_user()\n\t\t\ttoken = login_data['auth_token']\n\t\t\tres = self.client.post(\n\t\t\t\tf'{URL_BOOKS}',\n\t\t\t\theaders=dict(Authorization=f'Bearer {token}'),\n\t\t\t\tcontent_type='application/json',\n\t\t\t\tdata=json.dumps(add_book)\n\t\t\t)\n\t\t\tres2 = json.loads(res.data.decode())\n\t\t\tself.assertIn('isbn must only include numbers', str(res2))\n\t\t\tself.assertEqual(res.status_code, 400)",
"def create_book(self, title, isbn):\n isbn_list = [book.get_isbn() for book in self.books.keys()]\n if isbn in isbn_list:\n print(\"ISBN {isbn} already exists. Please provide a unique ISBN.\".format(isbn=isbn))\n else:\n return Book(title, isbn)",
"def test_search_by_ISBN(self):\n s1 = System()\n b1 = Books(\"1984\", \"George Orwell\", \"Harvill Secker\", \"1949\", \"0123456789123\")\n self.assertEqual(s1.search_by_ISBN(\"0123456789123\"), 0)\n s1.add_resource(b1)\n self.assertEqual(s1.search_by_ISBN(\"0123456789123\"), 1)",
"def is_in_stock(self, bookID):\n query = f\"\"\"SELECT quantity from {TABLE} where bookID = '{bookID}';\"\"\"\n self.cursor.execute(query)\n\n q = self.cursor.fetchone()\n\n if q[0] > 0:\n return True\n else:\n return False",
"def add_cartons(db, cartons, tag):\n # stock cartons\n for key, value in cartons.iteritems():\n length, width, height = value.split('x')\n d = Box('box'+key, tags={tag}, length=int(float(length)), width=int(float(width)), height=int(float(height)))\n db.session.add(d)\n\n try:\n db.session.commit()\n except IntegrityError:\n return 1\n print(\"Duplicate boxes found!\")\n return 0",
"def add_book(code: str, name: str, author: str, quantity: int):\n pass",
"def inventory_add(self, item):\n if (len(self.ItemList) >= self.InventorySize):\n # Inventory full\n return 2\n self.ItemList.append(item)\n return 0",
"def add_to_basket(request, item_id):\n\n artwork = get_object_or_404(Artwork, pk=item_id)\n quantity = int(request.POST.get('quantity'))\n redirect_url = request.POST.get('redirect_url')\n\n basket = request.session.get('basket', {})\n\n if item_id in list(basket.keys()):\n basket[item_id] += quantity\n messages.success(\n request, f'Added {artwork.title} to {basket[item_id]}.' +\n 'Please make sure your' +\n ' basket total is not more than the quantity available.')\n\n else:\n basket[item_id] = quantity\n messages.success(request, f'Added {artwork.title} to your basket')\n request.session['basket'] = basket\n return redirect(redirect_url)",
"def update_isbn(genome):\n itemid = genome.get('metadata').get('identifier')\n # Checks if ia item already has isbn\n item = ia.get_item(itemid)\n metadata = item.item_metadata['metadata']\n if 'isbn' in metadata:\n item_isbn = item.item_metadata['metadata']['isbn'][0]\n else:\n item_isbn = False\n genome_isbn = get_canonical_isbn(genome)\n if genome_isbn:\n db_isbn_extracted(itemid, genome_isbn)\n if not item_isbn:\n try:\n update = item.modify_metadata(dict(isbn=genome_isbn))\n if update.status_code == 200:\n db_update_succeed(itemid)\n except Exception as e:\n db_update_failed(itemid)\n raise e\n else:\n db_update_conflict(itemid)\n else:\n db_isbn_none(itemid)",
"def store_inventory(self, batch, location, quantity, inventory_stock):\n # no transaction needed\n logger.info('ReleaseDiscard store inventory initiated')\n to_inventory = self.Inventory.search([('location', '=', location.id), ('batch_number', '=', batch)])\n if to_inventory:\n return self.update_store(to_inventory[0], quantity, inventory_stock)\n inventory = self.Inventory()\n inventory.location = location\n inventory.batch_number = batch\n inventory.save()\n inventory_line = self.InventoryLine()\n inventory_line.product = inventory_stock.product\n inventory_line.quantity = float(quantity)\n inventory_line.uom = inventory_stock.uom\n inventory_line.supplier = inventory_stock.supplier\n inventory_line.expiry_date = inventory_stock.expiry_date\n inventory_line.inventory = inventory\n inventory_line.save()\n # transaction.cursor.commit()\n inventory.state = 'done'\n inventory.save()\n return True",
"def add_quantity(\n quantity, ticket_type, item_id, bag):\n if quantity:\n if item_id in list(bag.keys()):\n if f'{ticket_type}' in bag[item_id]:\n bag[item_id][f'{ticket_type}'] += quantity\n else:\n bag[item_id][f'{ticket_type}'] = quantity\n else:\n bag[item_id] = {f'{ticket_type}': quantity}",
"def add_item(item):\n # Check first if the item already exists in the inventory\n for i in get_inventory():\n if i['name'] == item['name']:\n print(f\"[ERROR] item with name {i['name']} already exists\")\n break\n else:\n print(f'[INFO] Adding item {item}')\n INVENTORY.append(item)\n # mongo.collection().insert_one(item)",
"def add_book(self):\r\n self.addbook = input(\"\\nInput the name of book:\")\r\n if self.addbook in self.books:\r\n print(f\"{self.addbook} is already in the list\")\r\n else:\r\n books = self.books.append(self.addbook)\r\n print(f\"The book {self.addbook} added successfully\")",
"def validate_product_quantity(item, qty):\n return True",
"def cart_add(request,book_id):\r\n cart = Cart(request)\r\n book = get_object_or_404(Book, id=book_id)\r\n form = CartAddProductForm(request.POST)\r\n if book.get_discounted_price()>0 :\r\n if form.is_valid():\r\n \r\n cd = form.cleaned_data\r\n if book.has_inventory(cd['quantity']):\r\n cart.add(book=book,\r\n quantity=cd['quantity'],\r\n override_quantity=cd['override'])\r\n \r\n return redirect('cart_detail')\r\n \r\n \r\n return redirect('book_detail',pk = book.id)",
"def test_book_isbn_length_must_be_ten(self):\n\n\t\twith self.client:\n\t\t\tadd_book = {\n\t\t\t\t'title': 'Hello Books',\n\t\t\t\t'isbn': '56987451'\n\t\t\t}\n\t\t\tlogin_data = self.login_test_user()\n\t\t\ttoken = login_data['auth_token']\n\t\t\tres = self.client.post(\n\t\t\t\tf'{URL_BOOKS}',\n\t\t\t\theaders=dict(Authorization=f'Bearer {token}'),\n\t\t\t\tcontent_type='application/json',\n\t\t\t\tdata=json.dumps(add_book)\n\t\t\t)\n\t\t\tres2 = json.loads(res.data.decode())\n\t\t\tself.assertIn('isbn length must be 10', str(res2))",
"def add_to_inventory(item, location, quantity, user=None):\n\n try:\n inventory = Inventory.objects.get(item=item, location=location)\n inventory.quantity += quantity\n inventory.save()\n except ObjectDoesNotExist:\n inventory = Inventory.objects.create(item=item, location=location, quantity=quantity)\n\n transaction = InventoryTransaction.objects.create(inventory=inventory, quantity=quantity, user=user)\n\n return transaction",
"def add_to_inventory(self, item, quantity):\n\t\tincreaseQuantity = None\n\t\taddToDict = True\n\t\tfor key in self.inventoryDictionary:\n\t\t\tif key.name == item.name:\n\t\t\t\taddToDict = False\n\t\t\t\tincreaseQuantity = key\n\t\t\t\tbreak\n\t\t\t\t\n\n\t\t\telse:\n\t\t\t\taddToDict = True\n\t\t\t\t\n\n\t\tif addToDict:\n\t\t\tself.inventoryDictionary[item] = quantity\n\t\telse:\n\t\t\tself.inventoryDictionary[increaseQuantity] += quantity",
"def ingredient_used(self, item, quantity):\n logger.info('ReleaseDiscard ingredient used initiated')\n try:\n quantity = Decimal(quantity).quantize(Decimal('0.11'))\n inventory_list = self.Inventory.search([('location', '=', self.kitchen.id)]\n , order=[('batch_number', 'ASC')])\n product = self.Product.search([('name', '=', item),\n ('description', '=', 'Stock'),\n ('type', '=', 'goods')])[-1]\n done = False\n today = date.today()\n for i in inventory_list:\n for j in i.lines:\n if j.product.template.name == item:\n expiry = j.expiry_date\n if expiry:\n if expiry >= today:\n if Decimal(j.quantity) >= Decimal(quantity):\n j.quantity = Decimal(j.quantity) - Decimal(quantity)\n self.move(from_location=self.kitchen, to_location=self.used, item=product,\n quantity=quantity,\n batch_number=i.batch_number)\n self.store_inventory(location=self.used, inventory_stock=j,\n quantity=quantity, batch=i.batch_number)\n j.save()\n self.check_and_delete(i)\n done = True\n else:\n quantity = Decimal(quantity) - Decimal(j.quantity)\n self.move(from_location=self.kitchen, to_location=self.used, item=product,\n quantity=j.quantity, batch_number=i.batch_number)\n self.store_inventory(location=self.used, inventory_stock=j,\n quantity=j.quantity, batch=i.batch_number)\n j.quantity = 0\n j.save()\n self.check_and_delete(i)\n # transaction.cursor.commit()\n i.save()\n if done:\n return True\n except Exception:\n if settings.level == 10:\n logger.exception('raised exception')\n return False",
"def add_shelf_books(edition_ids, shelf):\n\n for ed_id in edition_ids:\n try:\n shelfbook_match = db.session.query(ShelfBook).filter(ShelfBook.ed_id == ed_id, ShelfBook.shelf_id == shelf.shelf_id).one()\n print \"This shelfbook already exists!\"\n except:\n new_shelfbook = ShelfBook(ed_id=ed_id, shelf_id=shelf.shelf_id)\n db.session.add(new_shelfbook)\n\n db.session.commit()",
"def add_item(self, item):\n item_exists = self.get_item(item.id)\n\n if item_exists:\n item_exists._increment_quantity(item.quantity)\n else:\n self.items.append(item)",
"def add_ingredient_to_shop_list (self, ingredient) :\n found = False\n qty_available = self.quantity_in_fridge (ingredient)\n for ing in self.shop_list :\n if ing.equals(ingredient) :\n qty_needed = ingredient.quantity - qty_available\n ing.add_quantity (qty_needed)\n found = True\n if found == False :\n ingredient.set_quantity(ingredient.quantity - qty_available)\n self.shop_list.append(ingredient)",
"def put_in(self, item):\n try:\n self.bag_of_holding.append(item)\n print(\"You have added {} to your inventory.\".format(item))\n except:\n print('Error in Inventory method: put_in')",
"def add_bag(self, bag, quantity):\n self.bags.append((bag, quantity))",
"def add_to_bag(request, item_id):\n product = get_object_or_404(Product, pk=item_id)\n\n # Get quantity of item and add to current bag\n quantity = int(request.POST.get('quantity'))\n redirect_url = request.POST.get('redirect_url')\n\n # get products\n select = None\n if 'product_select' in request.POST:\n select = request.POST['product_select']\n\n current_bag = request.session.get('current_bag', {})\n\n if item_id in list(current_bag.keys()):\n # if item is currently in bag\n if select in current_bag[item_id]['items_by_select'].keys():\n # if item is same size/time, increment quantity\n current_bag[item_id]['items_by_select'][select] += quantity\n messages.success(\n request,\n f'{product.name} qty updated')\n else:\n # if item is different size/time, add new item\n current_bag[item_id]['items_by_select'][select] = quantity\n messages.success(request, f'Added {product.name} to bag')\n else:\n # if not currently in bag, add new item\n current_bag[item_id] = {'items_by_select': {select: quantity}}\n messages.success(request, f'Added {product.name} to bag')\n\n # override session variable with update\n request.session['current_bag'] = current_bag\n return redirect(redirect_url)",
"def is_isbn(val):\n if is_isbn10(val) or is_isbn13(val):\n if val[0:3] in [\"978\", \"979\"] or not is_ean13(val):\n return True\n return False",
"def add_item_to_inventory(life, item_uid, equip=False):\n\tif not isinstance(item_uid, str) and not isinstance(item_uid, unicode):\n\t\traise Exception('Deprecated: String not passed as item UID')\n\t\n\titem = items.get_item_from_uid(item_uid)\n\tbrain.remember_item(life, item)\n\t\n\tunlock_item(life, item_uid)\n\titem['parent_id'] = life['id']\n\titem['owner'] = life['id']\n\t\n\tif 'stored_in' in item:\n\t\titems.remove_item_from_any_storage(item_uid)\n\t\n\tif equip:\n\t\tif can_wear_item(life, item_uid):\n\t\t\tlife['inventory'].append(item_uid)\n\t\t\tequip_item(life,item_uid)\n\t\t\n\telif not add_item_to_storage(life, item_uid):\n\t\tif not can_wear_item(life, item_uid):\n\t\t\tlogging.warning('%s cannot store or wear item. Discarding...' % ' '.join(life['name']))\n\t\t\t\n\t\t\titem['pos'] = life['pos'][:]\n\t\t\t\n\t\t\tdel item['parent_id']\n\t\t\titem['owner'] = None\n\t\t\t\n\t\t\treturn False\n\t\telse:\n\t\t\tlife['inventory'].append(item_uid)\n\t\t\tequip_item(life,item_uid)\n\telse:\n\t\tlife['inventory'].append(item_uid)\n\t\n\tif 'max_capacity' in item:\n\t\tfor uid in item['storing'][:]:\n\t\t\t_item = items.get_item_from_uid(uid)\n\t\n\tlogging.debug('%s got \\'%s\\'.' % (life['name'][0],item['name']))\n\t\n\treturn item_uid"
] | [
"0.6657328",
"0.6105469",
"0.5910948",
"0.58885527",
"0.58784324",
"0.58568716",
"0.5847984",
"0.58228725",
"0.58056134",
"0.576493",
"0.57219106",
"0.5702976",
"0.56621385",
"0.55984473",
"0.5585517",
"0.55787504",
"0.5577667",
"0.55656403",
"0.5536493",
"0.5530911",
"0.5522777",
"0.55186033",
"0.5513638",
"0.5501447",
"0.5496181",
"0.54953474",
"0.5494588",
"0.5417748",
"0.54054433",
"0.5399721"
] | 0.73372024 | 0 |
Given a unique login ID, find the details about all of the orders associated with that user and return in a | def get_user_orders(self, loginID):
order_details = {}
self.cursor.execute("""SELECT orderNumber, orderDate FROM orderlog WHERE loginID=%s
ORDER BY orderDate DESC, orderNumber DESC""", (loginID,))
for order in self.cursor.fetchall():
order_details[str(order[0])] = {'title': [], 'quantity': [], 'ISBN': []}
# this line only needs to execute once, but its easier to do it like this.
order_details[str(order[0])]['date'] = order[1]
self.cursor.execute("""SELECT ISBN FROM orderlog O INNER JOIN productof P ON O.orderNumber = P.orderNumber
WHERE O.orderNumber=%s""", (order[0],))
for book in self.cursor.fetchall():
self.cursor.execute("""SELECT title, quantity FROM book B, productof P, orderlog O WHERE P.ISBN=%s
AND P.orderNumber = O.orderNumber AND P.ISBN = B.ISBN AND O.orderNumber = %s""", (book[0], order[0]))
for details in self.cursor.fetchall():
title = details[0]
quantity = details[1]
order_details[str(order[0])]['title'].append(title)
order_details[str(order[0])]['quantity'].append(quantity)
order_details[str(order[0])]['ISBN'].append(book[0])
return order_details | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_one_user_orders(self,user_id):\n\n sql = \"SELECT * FROM parcel_orders WHERE user_id='{}'\".format(user_id)\n self.db_object.cursor.execute(sql)\n placed_orders = self.db_object.cursor.fetchall()\n return placed_orders",
"def test_get_all_orders_by_user(self):\n # Test with accessing other users parcels\n response = self.client.get(\n 'api/v1/users/35530/parcels', headers=self.user_token_dict)\n self.assertEqual(response.status_code, 401)\n self.assertEqual(json.loads(response.data), {\n 'message': 'Cannot perform this operation'})\n # Test with wrong format user id\n response = self.client.get(\n 'api/v1/users/35fsv530/parcels', headers=self.user_token_dict)\n data = json.loads(response.data)\n self.assertEqual(response.status_code, 400)\n self.assertEqual(data, {'message': 'Wrong id format'})\n # Test with user with no orders\n response = self.client.get(\n 'api/v1/users/104/parcels', headers=self.admin_token_dict)\n data = json.loads(response.data)\n self.assertEqual(data, {'message': 'No orders by that user'})\n self.assertEqual(response.status_code, 400)",
"def account_order(self, orderid):\n return self.get(f'orders/{orderid}', auth=True)",
"def get_all_orders():",
"def get_all_orders():\n response = requests.get(\n settings.SHOPIFY_ORDERS_URL,\n auth=(settings.SHOPIFY_API_KEY, settings.SHOPIFY_PASSWORD),\n )\n return response.json()[\"orders\"]",
"def get_all_orders(): \n data = order_obj.get_all_orders()\n return data",
"def get_order_by_id(access_token,order_ID):\r\n\r\n orders_url = 'https://api.tdameritrade.com/v1/accounts/{}/orders/{}'.format(TDAuth_Info.account_num,order_ID)\r\n headers={'Authorization': 'Bearer {}'.format(access_token)}\r\n\r\n #Make the get request to TD Ameritrade\r\n orders_data_json = requests.get(url=orders_url,headers=headers)\r\n return orders_data_json.json()",
"def return_customer_orders(customer_id):\n with MY_CONNECTION as connection:\n cursor = connection.cursor()\n cursor.execute(\n \"\"\"\n SELECT id_order, id_customer, id_product, quantity, total_price,\n payment_status, send_status, order_date, location\n FROM Orders\n Where id_customer=?\n \"\"\",\n (customer_id,))\n return cursor.fetchall()",
"def test_order_by_user(self):\n self.fill_session_cart()\n self.client.post(self.CHECKOUT_URL, self.build_checkout_form())\n self.assertEqual(OrderInfo.objects.get().user,\n USER_MODEL.objects.get())",
"def test_get_orders_by_caterer(self):\n\n\t\tres = self.login_user()\n\t\taccess_token = json.loads(res.data.decode())['access_token']\n\n\t\tresponse = self.client().post(\n\t\t\t'/api/v2/orders',\n\t\t\theaders={\"x-access-token\": access_token},\n\t\t\tdata = json.dumps(\n\t\t\t\tself.order_data) , content_type = 'application/json')\n\t\tself.assertEqual(response.status_code, 201)\n\n\t\tres = self.login_admin_user()\n\t\taccess_token = json.loads(res.data.decode())['access_token']\n\n\t\tresponse = self.client().get(\n\t\t\t'/api/v2/orders',\n\t\t\theaders={\"x-access-token\": access_token})\n\t\tself.assertEqual(response.status_code, 200)",
"def users(self):\n from sagas.ofbiz.entities import OfEntity as e, oc\n rs=e().allUserLogin()\n for r in rs:\n print(r['userLoginId'])",
"def get_customer_orders(customerId):\n data = user_obj.get_customer_orders(customerId)\n return data",
"def show_orders(self):\n\n data = cur.execute(\"\"\"SELECT * FROM orders\"\"\").fetchall()\n print(tabulate(data, headers=[\"Order ID\", \"Status\", \"Customer\", \"Address\", \"Delivery Method\"]))",
"def _get_order_data_from_shopify(api_key:str, password:str, shop_name:str):\n\n response = requests.get(f\"https://{api_key}:{password}@{shop_name}.myshopify.com/admin/orders.json?created_at_min=2016-01-01&created_at_max=2016-12-31\")\n\n return response.json()",
"def get(self):\n return DAO.orders",
"def get_all_orders(self):\n self.query = \"SELECT * FROM public.orders\"\n self.message = \"Successfully fetched all orders.\"\n self.error = \"Unable to fetch all orders\"\n self.event = \"admin_get_all_orders\"",
"def show_order_detail(self, order_id):\n\n data = cur.execute(\"\"\"SELECT productid, productname, quantity, location FROM orderitems WHERE orderid = ?\"\"\",\n (order_id,)).fetchall()\n print(tabulate(data, headers=[\"Product ID\", \"Name\", \"Quantity\", \"Location\"]))",
"def get_entries(order):\n users_entries = {}\n for item in order.items.all():\n entries_per_order = []\n entries = Entries.objects.filter(orderItem=item.id)\n for ent in entries:\n entries_per_order.append(ent.ticket_number)\n n_order = {\n item.id: entries_per_order\n }\n users_entries.update(n_order)\n return users_entries",
"def get_order_detail(orderid): \n data = order_obj.get_order_detail(orderid)\n return data",
"def get_basic_userinfo(self, loginID, my_id):\n info = {'loginID': '', 'firstName': '', 'lastName': '', 'orderCount': 0, 'books_purchased': 0,\n 'num_comments': 0,\n 'comments': [], 'books_commented': [], 'trusted': 0, 'untrusted': 0, 'personalStatus': ''}\n self.cursor.execute(\"\"\"SELECT DISTINCT C.loginID, firstName, lastName, COUNT(DISTINCT orderNumber),\n COUNT(DISTINCT commentID) FROM customercredentials C, comment CO, orderlog O \n WHERE C.loginID = %s AND O.loginID = %s AND CO.loginID = %s\"\"\", (loginID, loginID, loginID))\n\n result = self.cursor.fetchone()\n info['loginID'] = result[0]\n info['firstName'] = result[1]\n info['lastName'] = result[2]\n info['orderCount'] = result[3]\n info['num_comments'] = result[4]\n\n self.cursor.execute(\"\"\"SELECT SUM(quantity) FROM orderlog O, productof P WHERE O.orderNumber = P.orderNumber\n AND loginID=%s\"\"\", (loginID,))\n result = self.cursor.fetchone()\n info['books_purchased'] = result[0]\n\n self.cursor.execute(\"\"\"SELECT * FROM comment WHERE loginID = %s ORDER BY commentDate DESC\"\"\", (loginID,))\n result = self.cursor.fetchall()\n for comment in result:\n info['comments'].append(comment)\n\n for comment in info['comments']:\n info['books_commented'].append(self.get_single_book_info(comment[1]))\n self.cursor.execute(\"\"\"SELECT COUNT(loginID) FROM trusts WHERE otherLoginID=%s AND trustStatus='TRUSTED'\"\"\",\n (loginID,))\n result = self.cursor.fetchone()\n info['trusted'] = result[0]\n\n self.cursor.execute(\"\"\"SELECT COUNT(loginID) FROM trusts WHERE otherLoginID=%s AND trustStatus='UNTRUSTED'\"\"\",\n (loginID,))\n result = self.cursor.fetchone()\n info['untrusted'] = result[0]\n\n self.cursor.execute(\"\"\"SELECT trustStatus FROM trusts WHERE loginID=%s AND otherLoginID=%s\"\"\",\n (my_id, loginID))\n result = self.cursor.fetchone()\n if result:\n info['personalStatus'] = result[0]\n return info",
"def orders_product_info(customer_id):\n with MY_CONNECTION as connection:\n cursor = connection.cursor()\n cursor.execute(\n \"\"\"\n SELECT o.id_order,p.product_name,o.quantity,o.total_price\n FROM Orders AS o\n NATURAL JOIN Products AS p\n WHERE o.id_customer=?\n \"\"\",\n (customer_id,))\n return cursor.fetchall()",
"def get_orders(self, oid=None, include_expired=False, orderid=None):\n return self.request(\n 'get',\n '%sorders/%s%s' % (\n safeformat('objects/{:int}/', oid) if oid else \"\",\n \"all/\" if include_expired else \"\",\n safeformat('{:int}', orderid) if orderid else \"\"\n )\n )",
"def user_info(client_id, email):\n um = logic.UserManager()\n try:\n user = um.lookup_user_by_email(email)\n orders = om.get_orders_of_user(user.id)\n except ex.TickeeError as e:\n transaction.abort()\n return marshalling.error(e)\n except Exception as e:\n transaction.abort()\n return marshalling.internal_error(e)\n else:\n result = dict(first_name=user.first_name,\n last_name=user.last_name,\n email=user.email,\n orders=map(lambda o: dict(id=o.id,\n tickets=map(lambda t: marshalling.ticket_to_dict(t, include_scanned=True,\n include_user=False), \n o.get_tickets()),\n status=o.status,\n date=marshalling.date(o.session_start)), \n orders))\n return result",
"def loadOrderInfo(self, orderID):\n cursor = self.__db.cursor()\n cursor.execute(\n \"SELECT orders.*, JSON_ARRAYAGG(orderItems.itemID) as items FROM orders LEFT JOIN orderItems ON orderItems.orderID = orders.orderID WHERE orders.orderID = %s GROUP BY orders.orderID;\",\n orderID)\n if cursor.rowcount == 1:\n self.__orderinfo = cursor.fetchone()\n # Unjsonify the order items\n self.__orderinfo['items'] = json.loads(self.__orderinfo['items'])\n # Grab the customer data\n self.__orderinfo['customer'] = self.__customer.getCustomer(self.__orderinfo['customerID'])\n del self.__orderinfo['customerID']\n\n return True\n else:\n raise Exception(\"Error: OrderID not found.\")",
"def test_get_order_by_customer(self):\n test_order = self._create_orders(1)[0]\n print(test_order.customer_id)\n resp = self.app.get('/orders/customers/{}'.format(test_order.customer_id),\n content_type='application/json')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = resp.get_json()[0]\n self.assertEqual(data['uuid'], test_order.uuid)",
"async def get_order(cls, session, account, order_id):\n if not session.logged_in:\n raise Exception('Tastyworks session not logged in.')\n\n url = '{}/accounts/{}/orders/{}'.format(\n session.API_url,\n account.account_number,\n order_id\n )\n\n async with aiohttp.request('GET', url, headers=session.get_request_headers()) as resp:\n if resp.status != 200:\n raise Exception('Could not retreive the order')\n data = (await resp.json())['data']\n order = cls.from_dict(data)\n return order",
"def load_records(orders: List[Dict[str, Any]]):\n with session_scope() as session:\n for record in orders:\n try:\n users = [user[\"account\"] for user in get_table_records(Users)]\n if record[\"account\"] not in users:\n user = {\n \"account\": record[\"account\"],\n \"active\": True,\n \"is_demo\": True,\n }\n row = Users(**user)\n session.add(row)\n session.commit()\n except IntegrityError:\n print(\"User is already in the database\")\n\n try:\n orders = [order[\"order_number\"] for order in get_table_records(Orders)]\n row = Orders(**record)\n session.add(row)\n session.commit()\n except IntegrityError:\n print(\"Order is already in the database\")",
"def test_get_all_orders_authentication(self):\n # Test with user token\n response = self.client.get(\n 'api/v1/parcels', headers=self.user_token_dict)\n data = json.loads(response.data)\n self.assertEqual(data, {message: 'Cannot perform this operation'})\n self.assertEqual(response.status_code, 401)",
"def GetOrder(order_id): \n\t\"\"\"Method to get order\"\"\"\n\trequest = OrdersGetRequest(order_id)\n\tresponse = client.execute(request)\n\treturn response.result.__dict__[\"_dict\"]",
"def get_order(self, orderid):\n return self.get_orders(orderid=orderid)"
] | [
"0.6686448",
"0.64870703",
"0.6377364",
"0.63203555",
"0.59901",
"0.59260833",
"0.58946323",
"0.58818346",
"0.58209866",
"0.581566",
"0.5790411",
"0.5785189",
"0.57793796",
"0.57544464",
"0.5745315",
"0.5741415",
"0.569958",
"0.56847036",
"0.5682288",
"0.5681163",
"0.5678344",
"0.56680864",
"0.5659307",
"0.5655962",
"0.565456",
"0.56381726",
"0.5637787",
"0.56219214",
"0.55904776",
"0.55799645"
] | 0.75124234 | 0 |
Utility function to send a list of all ISBN's and their quantities in an order given an order number | def get_books_in_order(self, orderNumber):
self.cursor.execute("""SELECT ISBN, quantity FROM orderlog O, productof P WHERE O.orderNumber = P.orderNumber
AND O.orderNumber=%s""",(orderNumber,))
result = []
for i in self.cursor.fetchall():
result.append([i[0],i[1]])
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def book(self, irc, msg, args, thing):\n self.db.deleteExpired(self.registryValue('orderExpiry'))\n results = self.db.getCurrencyBook(thing)\n if len(results) == 0:\n irc.error(\"No orders for this currency present in database.\")\n return\n if len(results) > self.registryValue('maxOrdersInBookList'):\n irc.error(\"Too many orders to list on IRC. Visit the web \"\n \"order book, http://bitcoin-otc.com/vieworderbook.php?eitherthing=%s \"\n \"to see list of orders for this item.\" % (thing,))\n return\n self._getMtgoxQuote()\n L = [\"#%s %s %s %s %s %s @ %s %s (%s)\" % (id,\n time.ctime(refreshed_at),\n nick,\n buysell,\n amount,\n thing,\n self._getIndexedValue(price),\n otherthing,\n notes) \\\n for (id,\n created_at,\n refreshed_at,\n buysell,\n nick,\n host,\n amount,\n thing,\n price,\n otherthing,\n notes) in results]\n irc.replies(L, joiner=\" || \")",
"async def process_orderbook(self, data):\n for item in data:\n symbol = item.get(\"symbol\")\n orderbook = {\n \"platform\": self._platform,\n \"symbol\": symbol,\n \"asks\": item.get(\"asks\"),\n \"bids\": item.get(\"bids\"),\n \"timestamp\": tools.utctime_str_to_mts(item[\"timestamp\"])\n }\n EventOrderbook(**orderbook).publish()\n logger.debug(\"symbol:\", symbol, \"orderbook:\", orderbook, caller=self)",
"def get_all_orders():",
"def get_recommended_books(self, orderNumber, loginID):\n invalid_isbn_list = []\n books_in_order = []\n possible_isbn_list = []\n self.cursor.execute(\"\"\"SELECT orderNumber FROM orderlog WHERE loginID=%s\"\"\", (loginID,))\n for order in self.cursor.fetchall():\n self.cursor.execute(\"\"\"SELECT ISBN FROM productof WHERE orderNumber=%s\"\"\", (order[0],))\n for ISBN in self.cursor.fetchall():\n invalid_isbn_list.append(ISBN[0])\n self.cursor.execute(\"\"\"SELECT ISBN FROM productof WHERE orderNumber=%s\"\"\", (orderNumber,))\n for ISBN in self.cursor.fetchall():\n books_in_order.append(ISBN[0])\n self.cursor.execute(\"\"\"SELECT P.ISBN FROM productof P WHERE EXISTS \n (SELECT orderNumber FROM productof P2 WHERE ISBN = %s AND P2.orderNumber = P.orderNumber)\"\"\", (ISBN[0],))\n for valid_isbn in self.cursor.fetchall():\n possible_isbn_list.append(valid_isbn[0])\n valid_isbn_list = [i for i in possible_isbn_list if i not in invalid_isbn_list]\n return_list = []\n for book in valid_isbn_list:\n book, author = self.get_single_book_info(book)\n return_list.append([book, author])\n return return_list",
"def order_book_fetch(self, symbol):\n orderbook = self.orderbooks[symbol]\n asks = [[float(price), float(stats[0]) * float(stats[1])] for price, stats in orderbook['asks'].items()]\n bids = [[float(price), float(stats[0]) * float(stats[1])] for price, stats in orderbook['bids'].items()]\n return asks, bids, orderbook",
"def _send_order_book_snapshot(state, client, symbol):\n state.lock.acquire()\n\n # Try to find order book corresponding to symbol\n try:\n lob = state.get_current_lob_state(symbol)\n except KeyError as exc:\n state.lock.release()\n # TODO: create error message\n return\n\n messages = []\n # Send sell orders\n if (lob.asks is not None) and (len(lob.asks) > 0):\n for price, order_list in reversed(lob.asks.price_map.items()):\n head_order = order_list.get_head_order()\n for _ in range(0, order_list.length):\n messages.append(_create_add_message_from_order(head_order))\n head_order = head_order.next_order\n\n # Send buy orders\n if (lob.bids is not None) and (len(lob.bids) > 0):\n for price, order_list in reversed(lob.bids.price_map.items()):\n head_order = order_list.get_head_order()\n for _ in range(0, order_list.length):\n messages.append(_create_add_message_from_order(head_order))\n head_order = head_order.next_order\n\n for message in messages:\n message = json.dumps(message)\n messaging.send_data(client.socket, message, client.encoding)\n time.sleep(0.0001)\n\n client.snapshot_sent = True\n\n state.lock.release()",
"async def fetch_order_book(self, symbol: str, limit: Optional[int] = None, params={}):\n await self.load_markets()\n precision = self.safe_value(self.options, 'precision', 'R0')\n market = self.market(symbol)\n request = {\n 'symbol': market['id'],\n 'precision': precision,\n }\n if limit is not None:\n request['len'] = limit # 25 or 100\n fullRequest = self.extend(request, params)\n orderbook = await self.publicGetBookSymbolPrecision(fullRequest)\n timestamp = self.milliseconds()\n result = {\n 'symbol': market['symbol'],\n 'bids': [],\n 'asks': [],\n 'timestamp': timestamp,\n 'datetime': self.iso8601(timestamp),\n 'nonce': None,\n }\n priceIndex = 1 if (fullRequest['precision'] == 'R0') else 0\n for i in range(0, len(orderbook)):\n order = orderbook[i]\n price = self.safe_number(order, priceIndex)\n signedAmount = self.safe_string(order, 2)\n amount = Precise.string_abs(signedAmount)\n side = 'bids' if Precise.string_gt(signedAmount, '0') else 'asks'\n result[side].append([price, self.parse_number(amount)])\n result['bids'] = self.sort_by(result['bids'], 0, True)\n result['asks'] = self.sort_by(result['asks'], 0)\n return result",
"def get_isbn_items(query=\"\"):\n url = \"https://archive.org/advancedsearch.php?q=\" + query\n r = requests.get(url)\n isbn_items = r.json()[\"response\"][\"docs\"]\n print(f\"Length of isbn_items: {len(isbn_items)}\")\n return isbn_items",
"async def fetch_order_book(self, symbol: str, limit: Optional[int] = None, params={}):\n await self.load_markets()\n market = self.market(symbol)\n request = {\n 'symbol': market['id'],\n }\n if limit is not None:\n request['depth'] = limit\n response = await self.publicGetOrderBookL2(self.extend(request, params))\n result = {\n 'symbol': symbol,\n 'bids': [],\n 'asks': [],\n 'timestamp': None,\n 'datetime': None,\n 'nonce': None,\n }\n for i in range(0, len(response)):\n order = response[i]\n side = 'asks' if (order['side'] == 'Sell') else 'bids'\n amount = self.convert_from_raw_quantity(symbol, self.safe_string(order, 'size'))\n price = self.safe_number(order, 'price')\n # https://github.com/ccxt/ccxt/issues/4926\n # https://github.com/ccxt/ccxt/issues/4927\n # the exchange sometimes returns null price in the orderbook\n if price is not None:\n result[side].append([price, amount])\n result['bids'] = self.sort_by(result['bids'], 0, True)\n result['asks'] = self.sort_by(result['asks'], 0)\n return result",
"def order_book(self, order_details):\n order_date = datetime.date.today()\n self.cursor.execute(\"INSERT INTO orderlog (loginID, orderDate) VALUES (%s, %s)\",\n (order_details['loginID'], order_date))\n order_id = self.cursor.lastrowid\n for i in range(len(order_details['ISBN'])):\n self.cursor.execute(\"INSERT INTO productof Values (%s, %s, %s)\",\n (order_details['ISBN'][i], order_id, order_details['quantity'][i]))\n self.cursor.execute(\"UPDATE book SET stock=stock-%s WHERE ISBN=%s\",\n (order_details['quantity'][i], order_details['ISBN'][i]))\n self.db.commit()\n return order_id",
"def send_order(self, p_order, p_in_out, count):\n pass",
"def notifyBuyer(self,ISBN):\n\t\tans=Wishlist.objects.filter(ISBN=ISBN).values('userid')\n\t\tb=Book.objects.get(ISBN=ISBN)\n\t\timport smtplib\n\t\tfromaddr = '[email protected]'#sender's email\t\t\n\t\tserver=smtplib.SMTP('smtp.gmail.com:587')\n\t\tfor i in ans:\n\t\t\tmail=None\n\t\t\tprint \"hi \"\n\t\t\tprint i['userid']\n\t\t\tmail=User.objects.filter(userid=i['userid']).values('email')\n\t\t\tprint mail\n\t\t\tif not mail is None:\n\t\t\t\ttoaddr = mail[0]['email'] #receiver's email\n\t\t\t\tmsg = 'The book '+ b.title +' is available.Add to cart and checkout as quick as possible.'#The message\n\t\t\t\t\t\n\t\t\t\t#gmail credentials\n\t\t\t\tusername = 'booksonwheelsteam'\n\t\t\t\tpassword = 'books^**'\n\t\t\t\t\n\t\t\t\tserver.starttls()\n\t\t\t\ttry:\n\t\t\t\t\tserver.login(username,password)\n\t\t\t\t\tserver.sendmail(fromaddr,toaddr,msg)\n\t\t\t\texcept:\n\t\t\t\t\tprint \"not send mail\"\n\t\t\t\t\t#pass\n\n\t\tserver.quit()",
"def do_orders(self,args):\n try:\n orders = bitstamp.open_orders()\n orders = sorted(orders, key=lambda x: float(x['price']))\n buytotal,selltotal = 0,0\n numbuys,numsells = 0,0\n amtbuys,amtsells = 0,0\n buyavg,sellavg = 0,0\n numorder = 0 \n for order in orders:\n ordertype=\"Sell\" if order['type'] == 1 else \"Buy\"\n numorder += 1\n print '%s = %s | $%s @ %s BTC %s' % (numorder,ordertype,order['price'],order['amount'],order['id']) \n if order['type'] == 0:\n buytotal += D(order['price'])*D(order['amount'])\n numbuys += D('1')\n amtbuys += D(order['amount'])\n elif order['type'] == 1:\n selltotal += D(order['price'])*D(order['amount'])\n numsells += D('1')\n amtsells += D(order['amount'])\n if amtbuys:\n buyavg = D(buytotal/amtbuys).quantize(cPrec)\n if amtsells:\n sellavg = D(selltotal/amtsells).quantize(cPrec)\n print \"There are %s Buys. There are %s Sells\" % (numbuys,numsells)\n print \"Avg Buy Price: $%s. Avg Sell Price: $%s\" % (buyavg,sellavg)\n except Exception as e:\n print e",
"def view(self, irc, msg, args, optlist, query):\n self.db.deleteExpired(self.registryValue('orderExpiry'))\n gpgauth = self._checkGPGAuth(irc, msg.prefix)\n raw = False\n for (option, arg) in optlist:\n if option == 'raw':\n raw = True\n if raw:\n f = lambda x: '\"%s\"' % x\n else:\n self._getMtgoxQuote()\n f = self._getIndexedValue\n if query is None:\n if gpgauth is None:\n nick = msg.nick\n else:\n nick = gpgauth['nick']\n results = self.db.getByNick(nick)\n elif isinstance(query, int):\n results = self.db.getById(query)\n else:\n nick = query\n results = self.db.getByNick(nick)\n if len(results) == 0:\n irc.error(\"No orders found matching these criteria.\")\n return\n if len(results) > self.registryValue('maxOrdersInBookList'):\n irc.error(\"Too many orders to list on IRC. Visit \"\n \"http://bitcoin-otc.com/vieworderbook.php?nick=%s \"\n \"to see the list of matching orders.\" % (nick,))\n return\n L = [\"#%s %s %s %s %s %s @ %s %s (%s)\" % (id,\n time.ctime(refreshed_at),\n nick,\n buysell,\n amount,\n thing,\n f(price),\n otherthing,\n notes) \\\n for (id,\n created_at,\n refreshed_at,\n buysell,\n nick,\n host,\n amount,\n thing,\n price,\n otherthing,\n notes) in results]\n\n irc.replies(L, joiner=\" || \")",
"def returnOrderBook(self, limit=25):\n orders = self.dpay.rpc.get_order_book(limit, api=\"market_history\")\n r = {\"asks\": [], \"bids\": []}\n for side in [\"bids\", \"asks\"]:\n for o in orders[side]:\n r[side].append({\n 'price': float(o[\"price\"]),\n 'bbd': o[\"bbd\"] / 10 ** 3,\n 'dpay': o[\"dpay\"] / 10 ** 3,\n })\n return r",
"async def buy(self, ctx, *args):\n if has_post_permission(ctx.guild.id, ctx.channel.id):\n number, item = ch.parse_number_and_name(args)\n if number and item:\n out = item_helpers.buy(ctx.author.id, item, number=number)\n await ctx.send(out)",
"def getOrderBookPrice(exchange, symbol, side, quantity, order_book=None):\n # TODO test it\n # print(\"obap1\")\n order_book_side = order_book['asks'] \\\n if side == exchange.SIDE_SELL else order_book['bids']\n\n quantity = Decimal(quantity)\n i, orders, price = 0, [], Decimal(0)\n accounted_for_quantity = Decimal(0)\n qtdif = Decimal(1)\n # print(\"obap2\")\n while accounted_for_quantity < quantity or qtdif > Decimal(0.0001):\n try:\n order = order_book_side[i]\n except IndexError:\n raise Exception(\"There are not enough orders in the Order Book.\")\n # return False\n qty = min(Decimal(order[1]), quantity - accounted_for_quantity)\n price += Decimal(order[0]) * qty\n accounted_for_quantity += qty\n qtdif = abs(Decimal(1) - accounted_for_quantity / quantity)\n i += 1\n\n # print(\"obap3\")\n return price / quantity",
"def list_order(self, orderNo):\r\n param = {}\r\n param['orderNo'] = orderNo\r\n param['appid'] = self.apiKey\r\n param['nonce'] = int(time.time() * 1000)\r\n param['timestamp'] = int(time.time())\r\n return self.__signed_GET('/api/v1/order', param, self.timeout)",
"def post(cls):\n data = request.get_json() # token ,item_ids [1, 3, 3, 5, 5, 5]\n items = []\n item_id_quantities = Counter(data[\"item_ids\"])\n\n for _id, count in item_id_quantities.most_common(): # [(5,3),(3,2),(1,1)]\n item = ItemModel.find_by_id(_id)\n if not item:\n return {\"message\": gettext(\"order_item_by_id_not_found\").format(_id)}, 404\n\n \"\"\"ItemsInOrder get item_id and quantity, however\n order_id will be set later on,\n when items is passed into OrderModel, because back_populates=\"order\"\n it goes over to order column of ItemsInOrder table,\n and set order_id for each of those item in OrderModel\n to be the order to which you have added those items\"\"\"\n items.append(ItemsInOrder(item_id=_id, quantity=count))\n\n # items is a list of ItemsInOrder obj\n order = OrderModel(items=items, status=\"pending\") # pending until send to Stripe\n order.save_to_db() # this does not submit to Stripe\n\n try:\n order.set_status(\"failed\") # assume the order would fail until it's completed\n order.charge_with_stripe(data[\"token\"])\n order.set_status(\"complete\") # charge succeeded\n return order_schema.dump(order), 200\n # the following error handling is advised by Stripe, although the handling implementations are identical,\n # we choose to specify them separately just to give the students a better idea what we can expect\n except error.CardError as e:\n # Since it's a decline, stripe.error.CardError will be caught\n return e.json_body, e.http_status\n except error.RateLimitError as e:\n # Too many requests made to the API too quickly\n return e.json_body, e.http_status\n except error.InvalidRequestError as e:\n # Invalid parameters were supplied to Stripe's API\n return e.json_body, e.http_status\n except error.AuthenticationError as e:\n # Authentication with Stripe's API failed\n # (maybe you changed API keys recently)\n return e.json_body, e.http_status\n except error.APIConnectionError as e:\n # Network communication with Stripe failed\n return e.json_body, e.http_status\n except error.StripeError as e:\n # Display a very generic error to the user, and maybe send\n # yourself an email\n return e.json_body, e.http_status\n except Exception as e:\n # Something else happened, completely unrelated to Stripe\n print(e)\n return {\"message\": gettext(\"order_error\")}, 500",
"def get_order_lists(self, n_items, n_quantities):\n arr_stock_code = self._df_invoice_original.StockCode.unique()\n arr_stock_code = np.random.choice(arr_stock_code, n_items)\n list_stockCode = list(arr_stock_code)\n list_quantities = np.ones(arr_stock_code.shape[0])\n list_quantities *=n_quantities\n\n return list_stockCode, list_quantities",
"def list_orders(self, symbol):\r\n param = {}\r\n param['symbol'] = self.__transfer_symbol(symbol)\r\n param['appid'] = self.apiKey\r\n param['nonce'] = int(time.time() * 1000)\r\n param['timestamp'] = int(time.time())\r\n return self.__signed_GET('/api/v1/processing-orders', param, self.timeout)",
"def ISBNs(self, default=[{}]):\n tmp = self.data.get('metadata', {}).get('isbns', default)\n return [HEP.ISBNObject(i) for i in tmp]",
"def get_order_books(self):\n return self.execute_http_call(\"/api/order_books\", \"GET\", headers=None)",
"def request_orders(self):\r\n if self.use_http():\r\n self.enqueue_http_request(\"money/orders\", {}, \"orders\")\r\n else:\r\n self.send_signed_call(\"private/orders\", {}, \"orders\")",
"def test_get_order_items(self):\n pass",
"def order_book_fetch(self, symbol):\n pass",
"def request_return(self, orderNumber, ISBN, quantity):\n\n date = datetime.date.today()\n\n self.cursor.execute(\"\"\"INSERT INTO returnrequest (orderNumber, requestDate, ISBN, quantity)\n VALUES (%s,%s,%s,%s)\"\"\", (orderNumber, date, ISBN, quantity))\n self.db.commit()",
"def create_get_order_book_request(self, symbol: str,\n limit: Optional[int] = None\n ) -> Request:",
"def returnOrderTrades(self, order_number):",
"def proposed_order_print(proposed_order_list):\n for item_details in proposed_order_list:\n proposed_order_item_print(item_details)"
] | [
"0.6369327",
"0.5828879",
"0.5790139",
"0.57637495",
"0.5735976",
"0.5667943",
"0.5600581",
"0.55803293",
"0.5573463",
"0.5555479",
"0.5546125",
"0.5471802",
"0.5466978",
"0.54046434",
"0.5401237",
"0.5364825",
"0.5308784",
"0.53080535",
"0.5262634",
"0.5256943",
"0.52493423",
"0.5240516",
"0.52106714",
"0.5203482",
"0.51720566",
"0.5165161",
"0.515833",
"0.51536345",
"0.5099803",
"0.50580406"
] | 0.66823083 | 0 |
Utility function to check if an order specified by orderNumber has no books associated with it. This is only ever called internally, so no need for validity checks. | def is_empty_order(self, orderNumber):
self.cursor.execute("""SELECT COUNT(*) FROM productof WHERE orderNumber=%s""", (orderNumber,))
if self.cursor.fetchone()[0]:
return False
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def has_book(self, book):\n return self.books.filter(lists_books.c.book_id == book.id).count() > 0",
"def is_book_available(self, book):\n request_url = \"%s?q=%s\" % (self.API_URL, book)\n json_data = self.make_request(request_url)\n if json_data and len(json_data['docs']) >= 1:\n return True\n return False",
"def has_book(self, book):\n return self.books.filter(users_books.c.book_id == book.id).count() > 0",
"def get_books_in_order(self, orderNumber):\n self.cursor.execute(\"\"\"SELECT ISBN, quantity FROM orderlog O, productof P WHERE O.orderNumber = P.orderNumber\n AND O.orderNumber=%s\"\"\",(orderNumber,))\n result = []\n for i in self.cursor.fetchall():\n result.append([i[0],i[1]])\n return result",
"def get_recommended_books(self, orderNumber, loginID):\n invalid_isbn_list = []\n books_in_order = []\n possible_isbn_list = []\n self.cursor.execute(\"\"\"SELECT orderNumber FROM orderlog WHERE loginID=%s\"\"\", (loginID,))\n for order in self.cursor.fetchall():\n self.cursor.execute(\"\"\"SELECT ISBN FROM productof WHERE orderNumber=%s\"\"\", (order[0],))\n for ISBN in self.cursor.fetchall():\n invalid_isbn_list.append(ISBN[0])\n self.cursor.execute(\"\"\"SELECT ISBN FROM productof WHERE orderNumber=%s\"\"\", (orderNumber,))\n for ISBN in self.cursor.fetchall():\n books_in_order.append(ISBN[0])\n self.cursor.execute(\"\"\"SELECT P.ISBN FROM productof P WHERE EXISTS \n (SELECT orderNumber FROM productof P2 WHERE ISBN = %s AND P2.orderNumber = P.orderNumber)\"\"\", (ISBN[0],))\n for valid_isbn in self.cursor.fetchall():\n possible_isbn_list.append(valid_isbn[0])\n valid_isbn_list = [i for i in possible_isbn_list if i not in invalid_isbn_list]\n return_list = []\n for book in valid_isbn_list:\n book, author = self.get_single_book_info(book)\n return_list.append([book, author])\n return return_list",
"def validBookObject(bookObject):\n return (\"name\" in bookObject and\n \"price\" in bookObject and\n \"isbn\" in bookObject)",
"def valid_page_in_book(arch, **kwargs):\n return not arch.xpath('//page[not(ancestor::notebook)]')",
"def validate_bookid(self,book_id):\r\n if int(book_id) in [i.book_id for i in self.issued_books]:\r\n return True\r\n else:\r\n return False",
"def has(self, order_id):\n return Library.functions.has(self._book, order_id)",
"def isbn_13_check_structure(isbn13):\r\n return True if re.match(RE_ISBN13, isbn13) else False",
"def is_book_exist(self, book_info):\n for type, link in book_info.links.items():\n try:\n bookfile = BookFile.objects.get( link_hash = md5(link).hexdigest() )\n books = bookfile.book_set.all()\n if books:\n return True, books[0]\n except BookFile.DoesNotExist:\n continue\n try:\n book = Book.objects.get(author__name=book_info.authors, title=book_info.title)\n return True, book\n except Book.DoesNotExist:\n continue\n return False, None",
"def test_get_unexisting_book(self):\n\n response1 = self.client.get(\n '/api/v1/books/NJCF4057', content_type='application/json', headers=self.get_admin_token())\n result = json.loads(response1.data.decode())\n self.assertEqual(result['message'],\n 'Book Not Found')\n assert response1.status_code == 404",
"def test_index_view_with_no_books(self):\n response = self.client.get(reverse('booklist:index'))\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"No books available\")\n self.assertQuerysetEqual(response.context['all_books'],[])",
"def isbn_10_check_structure(isbn10):\r\n return True if re.match(RE_ISBN10, isbn10) else False",
"def has_items(self):\r\n return self.orderitem_set.exists() # pylint: disable=E1101\r",
"def test_empty_phonebook_is_consistent(self):\n self.assertTrue(self.phonebook.is_consistent())",
"def delete_orderbooks(self):\n counter = 0 \n orderbooksListlen = 0 \n if self.stored_query:\n queryInstruments = self.db_ops.get_instruments_from_stored_query(self.stored_query)\n else:\n logger.LOG(\"If deleting all order books on all instruments, please write and quiery for that. You should be sure of what you are doing.\")\n \n if queryInstruments:\n logger.DLOG(\"Deleting order books for instruments in market segment <%s> in the stored query <%s>\"%(self.market_segment, self.stored_query))\n \n orderbooksList = [] \n if self.market_segment and self.market_place:\n for ob in acm.FOrderBook.Select(\"marketPlace='%s' \"%(self.market_place)):#instrument, marketPlace, currency, externalType are indexes that can be used, the Oid also, but it s unique key index\n for gmp in ob.GroupMaps():#check if there is a leaf on this orderbook \n if gmp.Group().Name() == self.market_segment: \n orderbooksList.append(ob)\n orderbooksListlen =len(orderbooksList)\n if not orderbooksList:\n logger.LOG(\"No OrderBooks on Segment:'%s' and Market:'%s'\"%(self.market_segment, self.market_place)) \n else:\n for each_orderbook in orderbooksList: \n if queryInstruments.Includes(each_orderbook.Instrument()): \n isDeleted = self.db_ops.Delete_SingleOrderBookWithReference(each_orderbook, self.market_segment)\n if isDeleted: \n counter=counter+1\n \n logger.DLOG(\"**%s order books** were deleted for the following including '%s' instruments: %s\"%(str(counter), str(orderbooksListlen), queryInstruments))",
"def test_book_isbn_must_only_be_numbers(self):\n\n\t\twith self.client:\n\t\t\tadd_book = {\n\t\t\t\t'title': 'Hello Books',\n\t\t\t\t'isbn': '56987451Ky'\n\t\t\t}\n\t\t\tlogin_data = self.login_test_user()\n\t\t\ttoken = login_data['auth_token']\n\t\t\tres = self.client.post(\n\t\t\t\tf'{URL_BOOKS}',\n\t\t\t\theaders=dict(Authorization=f'Bearer {token}'),\n\t\t\t\tcontent_type='application/json',\n\t\t\t\tdata=json.dumps(add_book)\n\t\t\t)\n\t\t\tres2 = json.loads(res.data.decode())\n\t\t\tself.assertIn('isbn must only include numbers', str(res2))\n\t\t\tself.assertEqual(res.status_code, 400)",
"def checkout_book(book):\n\tno_token = 'Y'\n\tif no_token == 'Y':\n\t\tsuccessful = 200\n\t\treturn successful\n\telse:\n\t\tlist_of_books = check_out_book(book)\n\t\treturn list_of_books\n\t#end if",
"def check_if_exists(self, bookID):\n query = f\"\"\"SELECT * from {TABLE} WHERE bookID = '{bookID}';\"\"\"\n res = self.cursor.execute(query)\n\n if self.cursor.fetchall():\n return True\n else:\n return False",
"def test_read_not_interested(self):\n try:\n self.reader.read(self.books[2], 0, 0)\n self.fail(\"Readed book not interested\")\n except AssertionError:\n pass",
"def verify_if_basket_is_empty(self):\n self._basket.verify_if_basket_is_empty()",
"def is_isbn_13(isbn13):\r\n isbn13 = re.sub(r'[^0-9X]', '', isbn13.replace('x', 'X'))\r\n if len(isbn13) != 13: return False\r\n if isbn13[0:3] not in ('978', '979'): return False\r\n return False if isbn_13_check_digit(isbn13[:-1]) != isbn13[-1] else True",
"def is_valid_book(current_author, inputed_name, availale_books):\n\tbook_info = []\n\tauthor_book = {}\n\n\tfor book in availale_books:\n\t\tauthor = book.author.username\n\t\tauthor_book[author] = book.book_name\n\t\tbook_info.append(author_book)\n\t\tauthor_book = {}\n\n\tfor book in book_info:\n\t\tfor author, book_name in book.items():\n\t\t\tif book_name == inputed_name and author == current_author:\n\t\t\t\treturn False\n\n\treturn True",
"def check(self, number: int) -> bool:\n return (number in self.numbers_set)",
"def check(self, number):\n return number in self.numbers",
"def valid_book(self, info):\n self.cursor.execute(\"SELECT ISBN, title, price, stock FROM book WHERE ISBN=%s\", (info['ISBN'],))\n for book in self.cursor.fetchall():\n return True, float(book[2]), book[1], book[3]\n return False, 0, 0, 0",
"def is_booked(self):\n return self.booking_set.filter(confirmed=True).count() > 0",
"def get_order_books(self):\n return self.execute_http_call(\"/api/order_books\", \"GET\", headers=None)",
"def test_get_book_with_id_does_not_exist(self):\n\t\tlogin_data = self.register_and_login_in_user()\n\t\ttoken = login_data['auth_token']\n\n\t\t# get book id\n\t\tbook = self.client.get(\n\t\t\tf'{URL_BOOKS}/1',\n\t\t\theaders=dict(Authorization=f'Bearer {token}'),\n\t\t\tcontent_type='application/json'\n\t\t)\n\n\t\tbook_res = json.loads(book.data.decode())\n\t\tself.assertTrue(book_res['message'] == 'book not found')\n\t\tself.assertEqual(book.status_code, 404)"
] | [
"0.5795934",
"0.5631788",
"0.5619639",
"0.56194794",
"0.5501485",
"0.5421667",
"0.52619696",
"0.52269924",
"0.5222976",
"0.51843566",
"0.5094807",
"0.5090388",
"0.49968106",
"0.49886924",
"0.49697027",
"0.49391702",
"0.49239758",
"0.49178872",
"0.48695588",
"0.48668623",
"0.4851435",
"0.48248172",
"0.4821226",
"0.47459173",
"0.47403604",
"0.47317526",
"0.47233936",
"0.4695376",
"0.46712098",
"0.467105"
] | 0.68257135 | 0 |
Add a new comment from a particular user to a particular book. Since only one comment per user per book is allowed, this function first checks if this user has already commented on the book. If not, can just add a new comment. Otherwise, update the original comment since users are allowed to update their own comments. | def add_comment(self, comment_info):
self.cursor.execute("""SELECT commentID, score FROM comment WHERE loginID = %s AND ISBN = %s""",
(comment_info['loginID'], comment_info['ISBN']))
result = self.cursor.fetchall()
if result:
# found a comment, need to update it
self.cursor.execute("""UPDATE comment SET score=%s, message=%s WHERE commentID=%s""",
(comment_info['score'], comment_info['message'], result[0][0]))
self.cursor.execute("""UPDATE book SET total_rating_score=total_rating_score+%s WHERE ISBN=%s""",
(int(comment_info['score']) - result[0][1], comment_info['ISBN']))
return_code = 0
else:
# no comment found, create a new one
self.cursor.execute("""INSERT INTO comment (ISBN, loginID, score, message, commentDate)
VALUES (%s,%s,%s,%s,%s)""", (comment_info['ISBN'], comment_info['loginID'], comment_info['score'],
comment_info['message'], datetime.datetime.now()))
self.cursor.execute("""UPDATE book SET total_rating_score = total_rating_score+%s,
num_ratings = num_ratings+1 WHERE ISBN = %s""", (comment_info['score'], comment_info['ISBN']))
return_code = 1
self.db.commit()
self.update_average_book_rating(comment_info['ISBN'])
return return_code | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_review_comment(self, doc, comment):\n if len(doc.reviews) != 0:\n if not self.review_comment_set:\n self.review_comment_set = True\n if validations.validate_review_comment(comment):\n doc.reviews[-1].comment = str_from_text(comment)\n return True\n else:\n raise SPDXValueError('ReviewComment::Comment')\n else:\n raise CardinalityError('ReviewComment')\n else:\n raise OrderError('ReviewComment')",
"def assign_comment_to_book(self, comment_id, book_id):\n # Implemented from template for\n # osid.resource.ResourceBinAssignmentSession.assign_resource_to_bin\n mgr = self._get_provider_manager('COMMENTING', local=True)\n lookup_session = mgr.get_book_lookup_session(proxy=self._proxy)\n lookup_session.get_book(book_id) # to raise NotFound\n self._assign_object_to_catalog(comment_id, book_id)",
"def add_comment(self):\n comment = Comment(\n title=self.title,\n comment=self.comment,\n rating=self.rating,\n user_from_id=g.user.id,\n user_to_id=self.user_to_id\n )\n db.session.add(comment)\n db.session.commit()\n return comment",
"def can_assign_comments_to_book(self, book_id):\n # Implemented from template for\n # osid.resource.ResourceBinAssignmentSession.can_assign_resources_to_bin\n # NOTE: It is expected that real authentication hints will be\n # handled in a service adapter above the pay grade of this impl.\n if book_id.get_identifier() == '000000000000000000000000':\n return False\n return True",
"def add_comment(cls, post_id, user_id, content):\n c = cls(parent=comment_key(),\n post_id=post_id,\n user_id=user_id,\n content=content)\n c.put()",
"def _add_comment(self, comment, post_id, page_id, parent_comment=None):\n user_id = self._get_or_create_user(comment['from'])\n message = self._clean_message(comment)\n if len(message) > 0:\n columns = '(user, post, page, fb_id, created_time, message, like_count, comment_count'\n values = (user_id, post_id, page_id, comment['id'], comment['created_time'],\n message, comment['like_count'], comment['comment_count'])\n values_placeholder = '(%s,%s,%s,%s,%s,%s,%s,%s'\n if parent_comment is None:\n columns = columns + ')'\n values_placeholder = values_placeholder + ')'\n else:\n columns = columns + ',parent_comment)'\n values = values + (parent_comment,)\n values_placeholder = values_placeholder + ',%s)'\n return self._insert_if_possible('INSERT INTO comment {} VALUES {}'.format(columns, values_placeholder),\n values)\n else:\n return False",
"def add_book_to_user(self, book, email, rating=None):\n if email in self.users.keys():\n self.users[email].read_book(book, rating)\n if rating is not None:\n book.add_rating(rating)\n\n if book in self.books.keys():\n self.books[book] += 1\n else:\n self.books[book] = 1\n else:\n return f\"No user with email {email}!\"",
"def add_club_comment(user, club, comment):\n with open('club_comments.json') as json_file:\n comments = json.load(json_file)\n if club in comments.keys():\n if comments[club] is None: # If there are no comments associated with the club Python returns None\n comments[club] = [user + \": \" + comment] \n else:\n comments[club].append(user + \": \" + comment)\n with open('club_comments.json', 'w') as outfile:\n json.dump(comments, outfile)\n return True \n else:\n return False # If the specified club name does not exist return False so an error can be specified to the api caller.",
"def addComment(self, comment):\r\n comment.topicId = self.topicId\r\n self.comments.append(comment)\r\n return len(self.comments)-1",
"def add_book_to_user(self, book, email, rating=None):\n if self.users.get(email):\n self.users[email].read_book(book, rating)\n self.books[book] = self.books.get(book, 0) + 1\n if rating:\n book.add_rating(rating)\n else:\n print(\"{email} address not found.\".format(email=email))",
"def add_comment(cls, uid, person, email, body):\n\n try:\n qs = cls.query.add_comment(uid=uid, person=person, email=email, body=body)\n record = cls.engine.query(qs, fetch_opts='single')\n\n except (DBAPIError, SnaqlException) as error:\n raise Exception(error.args[0])\n\n return record if record else None",
"def submit_comment(book_id):\n \n #Information for inserting\n score = request.form.get(\"score\")\n comment = request.form.get(\"comment\")\n\n if score is None or comment is None:\n return render_template(\"error.html\",message=\"Please submit the complete information.\")\n\n #Inserte a new review\n db.execute(\"INSERT INTO reviewer (id_book, id_user, comment, score_user) VALUES (:id_book, :id_user, :comment, :score_user)\",\n {\"id_book\":book_id, \"id_user\":session[\"user_id\"], \"comment\": comment, \"score_user\": score})\n \n db.commit()\n\n #Get the info of the book\n book = db.execute(\"SELECT * FROM book WHERE id = :book_id\",{\"book_id\": book_id}).fetchone()\n if book is None:\n return render_template(\"error.html\", message = \"No such Book.\")\n\n #Get the reviews joined with the name of the user\n stmt = \"SELECT user_library.*, reviewer.* FROM user_library INNER JOIN reviewer ON user_library.id=reviewer.id_user WHERE id_book = :book_id\"\n reviews = db.execute(stmt,{\"book_id\": book_id}).fetchall()\n\n #Get the user_review info\n user_review = db.execute(\"SELECT * FROM reviewer WHERE id_book = :book_id AND id_user = :user_id\",\n {\"book_id\": book_id, \"user_id\": session[\"user_id\"]}).fetchone()\n\n #If this info not exist we could add a comment, else we can not.\n is_commented = True\n if user_review is None:\n is_commented = False\n\n #Insert a new score if a new comment is introduced\n average_score = db.execute(\"SELECT AVG(score_user) FROM reviewer WHERE id_book = :book_id\",{\"book_id\":book_id}).fetchone()\n average_score = average_score.items()\n average_score = average_score[0]\n average_score = float(average_score[1])\n\n db.execute(\"UPDATE book SET score = :average_score WHERE id = :book_id\", {\"average_score\":average_score, \"book_id\": book_id}) \n db.commit()\n\n #Get the info of the book\n book = db.execute(\"SELECT * FROM book WHERE id = :book_id\",{\"book_id\": book_id}).fetchone()\n if book is None:\n return render_template(\"error.html\", message = \"No such Book.\")\n\n #Proccess for rating count of Goofreaders\n goodreader_info = requests.get(\"https://www.goodreads.com/book/review_counts.json\", params={\"key\": KEY, \"isbns\": book.isbn })\n goodreader_info = goodreader_info.json()\n goodreader_info = goodreader_info[\"books\"]\n\n average_rating = goodreader_info[0][\"average_rating\"]\n ratings_counts = goodreader_info[0][\"ratings_count\"]\n\n return render_template(\"book_info.html\",book=book, reviews = reviews, is_commented = is_commented\n , average_rating = average_rating, ratings_counts = ratings_counts )",
"def user_can_edit_comment(user, comment: DocumentComment):\n current_status = comment.document.status.id\n\n if user.id == comment.create_user_id and \\\n current_status == \\\n DocumentStatus.objects.get_by_natural_key('Draft').id:\n return True\n\n return False",
"def add_comment(self, comment):\n assert isinstance(comment, Comment)\n self._comments.append(comment)\n return None",
"def add_comment_to_issue(repo, issue_number, body, allow_duplicates):\n found = False\n issue = repo.issue(issue_number)\n\n if not allow_duplicates:\n for comment in issue.iter_comments():\n if comment.body == body:\n found = True\n break\n\n if allow_duplicates or not found:\n success = issue.create_comment(body)\n if success:\n click.echo(\"The comment was successfully posted to the issue.\")\n else:\n click.echo(\"There was a failure commenting on the issue.\")\n raise SystemExit(1)\n else:\n click.echo(\"An identical comment was found, skipping posting comment.\")",
"def modify_user_comment(username, comment_id, comment):\n result = get_comment_by_id(username, comment_id)\n result[\"comment\"] = comment\n result[\"date_updated\"] = datetime.datetime.now()",
"def modify_user_comment(username, comment_id, comment):\n result = get_comment_by_id(username, comment_id)\n result[\"comment\"] = comment\n result[\"date_updated\"] = datetime.datetime.now()",
"def update_comment(application_id, new_comment, user_id):\n if not application_id or not new_comment:\n return {\"status\": \"Please provide an application ID and a new comment!\"}\n if not user_id:\n return {\"status\": \"Please make sure you are authenticated.\"}\n\n application = Application.query.filter_by(id=application_id, user_id=user_id).first()\n if not application:\n return {\"status\": \"Application not found!\"}\n \n application.comment = new_comment\n user_applications = Application.query.filter_by(user_id=application.user_id).all()\n return [application.to_dict() for application in user_applications]",
"def commented_by(self, user):\n return Comments.objects.filter(recipe=self, chef=user).exists()",
"def validate_user_comment(request, comment_id, is_you=True):\n if not request.user.is_authenticated():\n return forbidden_json({'error': AUTH_ERROR_TXT})\n try:\n com = Comment.objects.get(id=comment_id)\n except Comment.DoesNotExist:\n return bad_request_json({'error': \"Comment doesn't exist\"})\n if is_you and request.user != com.user:\n return bad_request_json({'error': AUTHOR_ERROR})\n return com",
"def new_comment(comment):\n attribute_state = inspect(comment).attrs.get('comment')\n # Check if the speaker has been updated\n history = attribute_state.history\n # TODO Check for insert rather than assuming the comment is immutable\n if history.has_changes():\n messages.send_new_comment(comment)",
"def _add_comment():\r\n per_page = current_app.config['FLASKY_ANSWERS_PER_PAGE']\r\n id = request.args.get('answer_id')\r\n answer = Answer.query.get_or_404(id)\r\n comment =request.args.get('comment')\r\n answers = Answer.query.get_or_404(id)\r\n page = 1\r\n result= False\r\n if current_user.can(Permission.COMMENT):\r\n comment = Comment(body=comment,\r\n author=current_user._get_current_object(),\r\n answer_id=id)\r\n db.session.add(comment)\r\n db.session.commit()\r\n page = (answer.comments.count()-1)/per_page + 1\r\n result=True\r\n pagination = Comment.query.order_by(Comment.timestamp).filter_by(answer_id=id).paginate(\r\n page,per_page=per_page,error_out=False\r\n )\r\n macro_comment = get_template_attribute(\"_comments.html\", \"render_comments\")\r\n macro_page = get_template_attribute(\"_page.html\", \"render_page\")\r\n comments = pagination.items\r\n return jsonify({'result': result,\r\n 'comment_html': macro_comment(comments),\r\n 'page_html': macro_page(pagination),\r\n 'comments_timestamp': [comment.timestamp for comment in comments],\r\n 'comments_id': [comment.id for comment in comments]\r\n })",
"def update_comment_feed(data:dict, user:User)->comments_models.CommentFeed:\n\ttry:\n\t\tcomment_feed = comments_models.CommentFeed.objects.get(user=user, id=data.get(\"id\"))\n\t\tif data.get(\"comment\") is not None:\n\t\t\taccounts_validations.validate_length(\"Comment\",data.get(\"comment\"),2,255)\n\t\telse:\n\t\t\traise ValueError(str(_(\"Comment is required\")))\n\t\tcomment_feed.comment = data.get(\"comment\")\n\t\tcomment_feed.save()\n\texcept comments_models.CommentFeed.DoesNotExist as e:\n\t\traise ValueError(str(_(\"Comment not exist\")))\n\treturn comment_feed",
"def put_comment(self, object_id, message):\n return self.put_object(object_id, \"comments\", message=message)",
"def put_comment(self, object_id, message):\n return self.put_object(object_id, \"comments\", message=message)",
"def add_comment(request, pk, redirect_user):\n\n if request.is_ajax():\n create_comment_and_status_notification(request, pk)\n if redirect_user == 'profile':\n return redirect('profile')\n if redirect_user == 'news_feed':\n return redirect('news_feed')",
"def add_awcomment(request, pk):\n\n award = get_object_or_404(Award, pk=pk)\n if request.method == \"POST\":\n form = AwardCommentForm(request.POST)\n if form.is_valid():\n comment = form.save(commit=False)\n comment.author = request.user\n comment.award = award\n comment.save()\n return redirect('../', pk=award.pk)\n else:\n form = AwardCommentForm()\n return render(request, {'form': form})",
"def add_comment(self, checkin_id: str, comment: str) -> Dict:\n method = \"checkin/addcomment/\" + checkin_id\n auth = self._get_access_token()\n if len(comment) > 140:\n raise ValueError(\n f\"Check-in comment is {len(comment)} characters whereas Untappd only supports comments up to 140 characters\"\n )\n params = {\"comment\": comment}\n return self._do_post(method, auth, params)",
"def on_comment(self, request, board_id):\n error = None\n if request.method == 'POST':\n creator = request.form['creator']\n comment = request.form['comment']\n if len(creator) > 30:\n error = 'creator name too long'\n elif len(comment) > 50:\n error = 'comment too long'\n else:\n self.insert_comment(request, board_id)\n return redirect('/board:' + board_id)\n return self.render_template('comment.html', error=error)",
"def create_or_update_comment(comment, message, repo, pr_number, token):\n # repo is in the form of \"org/repo\"\n if comment is not None:\n print(\"updating existing comment\")\n # API doc: https://docs.github.com/en/rest/issues/comments?apiVersion=2022-11-28#update-an-issue-comment # noqa\n response = requests.patch(\n f\"https://api.github.com/repos/{repo}/issues/comments/{comment['id']}\",\n headers=get_headers(token),\n json={\"body\": message},\n )\n else:\n print(\"creating new comment\")\n # API doc: https://docs.github.com/en/rest/issues/comments?apiVersion=2022-11-28#create-an-issue-comment # noqa\n response = requests.post(\n f\"https://api.github.com/repos/{repo}/issues/{pr_number}/comments\",\n headers=get_headers(token),\n json={\"body\": message},\n )\n\n response.raise_for_status()"
] | [
"0.625182",
"0.6067011",
"0.6059309",
"0.6050631",
"0.59851485",
"0.5965514",
"0.5910397",
"0.59039426",
"0.58771926",
"0.5862978",
"0.58472365",
"0.58253986",
"0.57115245",
"0.5697091",
"0.5649901",
"0.5633569",
"0.5633569",
"0.55912524",
"0.55813646",
"0.5577598",
"0.5548712",
"0.5542382",
"0.55190516",
"0.55036706",
"0.55036706",
"0.55014074",
"0.5497153",
"0.54897285",
"0.5479809",
"0.5472261"
] | 0.6476383 | 0 |
Maintenance function for updating the average rating of the book with the given ISBN. This should be called any time the number of ratings/the total rating score are updated. | def update_average_book_rating(self, isbn):
self.cursor.execute("""UPDATE book SET avg_rating = total_rating_score / num_ratings WHERE
ISBN=%s""", (isbn,))
self.db.commit() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_book_scores(self):\n self.cursor.execute(\"\"\"UPDATE book SET avg_rating=NULL, total_rating_score=0, num_ratings=0\"\"\")\n self.db.commit()\n self.cursor.execute(\"\"\"SELECT * FROM comment\"\"\")\n for comment in self.cursor.fetchall():\n self.cursor.execute(\"\"\"UPDATE book SET total_rating_score=total_rating_score+%s,\n num_ratings=num_ratings+1 WHERE ISBN=%s\"\"\", (comment[3], comment[1]))\n self.db.commit()\n self.update_average_book_rating(comment[1])",
"def update_rating_average(self, rating):\n self.num_ratings += 1\n self.rating_total += rating\n self.save(update_fields=[\"num_ratings\", \"rating_total\"])\n self.average_rating = int(round(self.rating_total/self.num_ratings))\n self.save(update_fields=[\"average_rating\"])\n return",
"def update_boy(self, hash, new_rate):\n image = self._db.boys.find_one({'_id': hash})\n total_average = self.average(image['rating'], new_rate, image['count'])\n\n self._db.boys.find_one_and_update(\n {'_id': hash}, {'$inc': {'count': 1},\n '$set': {'rating': total_average}},\n return_document=pymongo.ReturnDocument.AFTER)",
"def get_average_rating(self):\n count = 0\n total = 0\n num_books = len(self.books)\n if num_books > 0:\n for rating in self.books.values():\n if rating:\n count += 1\n total += rating\n average = total / count\n if count > 0:\n return average\n else:\n print(\"Books with ratings not found for user {user}\".format(user=self.name))",
"def update_girl(self, hash, new_rate):\n image = self._db.girls.find_one({'_id': hash})\n total_average = self.average(image['rating'], new_rate, image['count'])\n\n self._db.girls.find_one_and_update(\n {'_id': hash}, {'$inc': {'count': 1},\n '$set': {'rating': total_average}},\n return_document=pymongo.ReturnDocument.AFTER)",
"def average_review_stars():\n # get all un-counted reviews\n reviews = Review.query.filter_by(marked=False).join(Restaurant)\\\n .with_entities(Review, Restaurant).all()\n logging.info(f\"Averaging review stars of {len(reviews)} retrieved reviews..\")\n for review, restaurant in reviews:\n # compute running mean of reviews\n restaurant.num_reviews += 1\n restaurant.avg_stars = 1/restaurant.num_reviews * \\\n (restaurant.avg_stars * (restaurant.num_reviews-1) + review.stars)\n review.marked = True\n # update rows \n db.session.commit()",
"def update_comment_avg_score(self, commentID):\n self.cursor.execute(\"\"\"UPDATE comment SET avg_usefulness=(2*veryUseful+useful)/(veryUseful+useful+useless)\n WHERE commentID=%s\"\"\", (commentID,))\n self.db.commit()",
"def save(self, *args, **kwargs):\n self.item.rates_total += 1\n self.item.average_rate += (self.item.average_rate + self.rate) / self.item.rates_total\n self.item.save()\n super(Rate, self).save(*args, **kwargs)",
"def get_average_rating(self):\n count = 0\n total = 0\n ratings_length = len(self.ratings)\n if ratings_length > 0:\n for rating in self.ratings:\n count += 1\n total += rating\n average = total / count\n return average\n else:\n print(\"There does not seem to be any ratings for {book}\".format(book=self.title))",
"def book_api(isbn):\n\n # Get book data.\n book = db.execute(\"SELECT * FROM books WHERE isbn = :isbn\", {\"isbn\": isbn}).fetchone()\n # Make sure book exists.\n if book is None:\n return jsonify({\"error\": \"Invalid isbn\"}), 404\n\n # Get average score\n # Count reviews for that book.\n review_count = db.execute(\"SELECT * FROM reviews WHERE book_id = :book_id\", {\"book_id\": book.id}).rowcount\n \n # Get all ratings for that book.\n ratings = db.execute(\"SELECT rating FROM reviews WHERE book_id = :book_id\", {\"book_id\": book.id}).fetchall()\n \n # Count the sum of ratings for that book.\n rating_sum = 0\n for rating in ratings:\n rating_sum += rating[0]\n\n average_score = 0\n\n # Make sure review_count is not a zero\n if review_count > 0:\n average_score = rating_sum/review_count\n\n return jsonify({\n \"title\": book.title,\n \"author\": book.author,\n \"year\": book.year,\n \"isbn\": book.isbn,\n \"review_count\": review_count,\n \"average_score\": average_score,\n \"rating_sum\": rating_sum\n })",
"def update_comment_score(self, loginID, commentID, attrib_name):\n self.cursor.execute(\"SELECT rating FROM rates WHERE loginID = %s AND commentID = %s\", (loginID, commentID))\n old_rating = self.cursor.fetchall()\n if old_rating:\n # This user already rated this comment. Change the rating.\n if old_rating[0][0] == attrib_name:\n # Remove the rating, because the user already voted for this.\n self.cursor.execute(\"UPDATE comment SET \" + attrib_name + \"=\" + attrib_name + \"-1 WHERE commentID=%s\",\n (commentID,))\n self.cursor.execute(\"\"\"DELETE FROM rates WHERE loginID=%s AND commentID=%s\"\"\",\n (loginID, commentID))\n else:\n self.cursor.execute(\n \"UPDATE comment SET \" + old_rating[0][0] + \"=\" + old_rating[0][0] + \"-1, \" + attrib_name\n + \"=\" + attrib_name + \"+1 WHERE commentID=%s\"\"\", (commentID,))\n self.cursor.execute(\"\"\"UPDATE rates SET rating=%s WHERE loginID=%s AND commentID=%s\"\"\",\n (attrib_name, loginID, commentID))\n else:\n # New rating, just need to update one value and add a new rating tuple to rates\n self.cursor.execute(\"UPDATE comment SET \" + attrib_name + \"=\" + attrib_name + \"+1 WHERE commentID=%s\",\n (commentID,))\n self.cursor.execute(\"\"\"INSERT INTO rates VALUES (%s,%s,%s)\"\"\", (loginID, commentID, attrib_name))\n self.db.commit()\n self.update_comment_avg_score(commentID)",
"def set_isbn(self, new_isbn):\n old_isbn = self.isbn\n self.isbn = new_isbn\n print(\n f\"The isbn for the book '{self.title}' has been updated from '{old_isbn}' to '{self.isbn}'.\")",
"def __init__(self, title, isbn):\n self.title = title\n self.isbn = isbn\n self.ratings = []",
"def update_movie_rating_record(movie_id, rating_number, operation):\n movie = models.Movie.objects.get(mid=movie_id)\n if operation == 'new':\n # Update the average_rating and votecount for the movie.\n movie.average_rating = (float(movie.average_rating) * float(movie.votecount) + rating_number) / (\n movie.votecount + 1)\n movie.votecount += 1\n movie.save()\n elif operation == 'delete':\n movie.average_rating = (float(movie.average_rating) * float(movie.votecount) - float(rating_number)) / (\n movie.votecount - 1)\n movie.votecount -= 1\n movie.save()\n elif operation == 'edit':\n movie.average_rating = float(movie.average_rating) + (float(rating_number) / movie.votecount)\n movie.save()",
"def update(self, id, title, author, year, isbn):\n self.cursor.execute(\"UPDATE Book SET Title = ?, Author = ?, Year = ?, \\\n ISBN = ? WHERE Id = ?\",\n (title, author, year, isbn, id))\n self.connection.commit()",
"def update(d,title, author, year, isbn,shelf,raw):\n conn_obj = mysql.connector.connect(host='localhost',database='mydb',user='root',password='kks')\n cur_obj = conn_obj.cursor()\n cur_obj.execute(\"UPDATE book where isbn=%s\"\n \"SET title = %s, \"\n \"author = %s, \"\n \"year = %s, \"\n \"shelf=%s,\"\n \"raw=%s\", \n (isbn,title, author, year,shelf,raw))\n conn_obj.commit()\n conn_obj.close()",
"def update_price_books(self, barcode, new_price):\n try:\n self.db.cursor.execute('UPDATE books SET price = %s where id_books = %s', (round(new_price, 2), barcode))\n except Exception as error:\n print(error)\n else:\n self.db.con.commit()\n self.db.con.close()\n print('Updated Successfully!')",
"def book_api(isbn):\n\n db = get_db()\n book_info = db.execute(\"\"\"\n SELECT *\n FROM books\n WHERE isbn = :isbn;\n \"\"\", {\"isbn\": isbn}).fetchone()\n\n if book_info is None:\n return jsonify({\"error\": \"Invalid ISBN or ISBN is not available\"}), 404\n\n review_stats = db.execute(\n \"\"\"\n SELECT COUNT(*) as count,\n ROUND(AVG(rating), 2) AS avg_rating\n from reviews\n WHERE book_id = :id\n \"\"\",\n {\"id\": book_info['id']}\n ).fetchone()\n\n return jsonify({\n \"title\": book_info['title'],\n \"author\": book_info['author'],\n \"year\": book_info['year'],\n \"isbn\": isbn,\n \"review_count\": review_stats['count'],\n \"average_score\": float(review_stats['avg_rating'])\n })",
"def UpdateDataBase(self, isbn):\n self.dataBase.updateItem(isbn, 'front', self.isbnToFront[isbn])\n self.dataBase.updateItem(isbn, 'back', self.isbnToBack[isbn])",
"def update_attendance_rate(self):\n session_avg_rate = self.session_set\\\n .filter(attendance_rate__isnull=False)\\\n .aggregate(Avg('attendance_rate'))\n self.attendance_rate = session_avg_rate['attendance_rate__avg']\n self.save()",
"def read_book(self, book, rating=None):\n if isinstance(book, Book):\n self.books.update({book: rating})\n else:\n print(\"{book} does not appear to be a Book object.\".format(book=book))",
"def update_rating(self, name, rating):\n try:\n self.cursor.execute(\n \"\"\"UPDATE sandbox.dvds_rdbhdb_super\n SET rating = %s\n WHERE name LIKE %s\n \"\"\", \n (rating, name)\n )\n except (db.DataError, db.IntegrityError), e:\n if e[0] == '22P02':\n print 'Cannot add %s because its not a valid float' % rating\n else:\n print 'Caught Error while trying to update %s to %s' % (name, rating)\n #traceback.print_exc()",
"def update_mean_movie_rating(self):\n self.mean_movie_rating = self.ratings.groupby(['movie_id'])['rating'].mean().reset_index()",
"def book(isbn):\n\n if request.method == \"POST\":\n \n # Save current user info\n currentUser = session[\"user_id\"]\n \n # Fetch form data\n rating = request.form.get(\"rating\")\n comment = request.form.get(\"comment\")\n\n # Search book_id by ISBN\n row = db.execute(\"SELECT id FROM books WHERE isbn = :isbn\",\n {\"isbn\": isbn})\n\n # Save id into variable\n id_books = row.fetchone() # (id,)\n bookId = id_books[0]\n\n # Check for user submission (ONLY 1 review/user allowed per book)\n row2 = db.execute(\"SELECT * FROM reviews WHERE user_id = :user_id AND book_id = :book_id\",\n {\"user_id\": currentUser,\n \"book_id\": bookId})\n\n # A review already exists\n if row2.rowcount == 1:\n \n flash('You already submitted a review for this book', 'warning')\n return redirect(\"/book/\" + isbn)\n\n # Convert to save into DB\n rating = int(rating)\n\n db.execute(\"INSERT INTO reviews (user_id, book_id, comment, rating) VALUES \\\n (:user_id, :book_id, :comment, :rating)\",\n {\"user_id\": currentUser, \n \"book_id\": bookId, \n \"comment\": comment, \n \"rating\": rating})\n\n # Commit transactions to DB and close the connection\n db.commit()\n\n flash('Review submitted!', 'info')\n\n return redirect(\"/book/\" + isbn)\n \n # Take the book ISBN and redirect to his page (GET)\n else:\n row = db.execute(\"SELECT isbn, title, author, year FROM books WHERE \\\n isbn = :isbn\",\n {\"isbn\": isbn})\n\n # Hence this is the bookInfo[0] for using in reviews.html\n bookInfo = row.fetchall()\n\n \"\"\" GOODREADS reviews \"\"\"\n\n # Read API key from env variable\n key = os.getenv(\"GOODREADS_KEY\")\n \n # Query the api with key and ISBN as parameters\n query = requests.get(\"https://www.goodreads.com/book/review_counts.json\",\n params={\"key\": key, \"isbns\": isbn})\n \n # Convert the response to JSON\n json_response = query.json()\n\n # \"Clean\" the JSON before passing it to the bookInfo list\n response = json_response['books'][0]\n\n # Append it as the second element on the list to get bookInfo[1] for using in reviews.html\n bookInfo.append(response)\n\n \"\"\" Users reviews \"\"\"\n\n # Search book_id by ISBN\n row = db.execute(\"SELECT id FROM books WHERE isbn = :isbn\",\n {\"isbn\": isbn})\n\n # Save id into variable\n # 'id' of the user is found in both tables 'reviews' and 'users'\n # Hence this would be used for framing the JOIN query\n book = row.fetchone() # (id,)\n book = book[0]\n\n # Fetch book reviews\n # Date formatting (https://www.postgresql.org/docs/9.1/functions-formatting.html)\n results = db.execute(\"SELECT users.username, comment, rating, \\\n to_char(time, 'DD Mon YY - HH24:MI:SS') as time \\\n FROM users \\\n INNER JOIN reviews \\\n ON users.id = reviews.user_id \\\n WHERE book_id = :book \\\n ORDER BY time\",\n {\"book\": book})\n\n reviews = results.fetchall()\n\n return render_template(\"reviews.html\", bookInfo=bookInfo, reviews=reviews)",
"def test_update_review(self):\n\n user1 = User.objects.create_user('John')\n self.book.reviews.create(\n user=user1,\n rating=5,\n notes=\"It's so awesome\"\n )\n\n user2 = User.objects.create_user('Jane')\n review = self.book.reviews.create(\n user=user2,\n rating=4,\n notes=\"Love it\"\n )\n\n # update rating\n review.rating = 3\n review.save()\n\n # need to reload from database for updated rating value in book\n book = Book.objects.get(id=self.book.id)\n self.assertAlmostEqual(book.rating, 4)",
"def update_mean_user_rating(self):\n self.mean_user_rating = self.ratings.groupby(['user_id'])['rating'].mean().reset_index()",
"def updateUserRating(definition, increase):\n user = mongo.db.users.find_one({\"_id\": definition[\"submitted_by\"]})\n mongo.db.users.update_one(\n {\"_id\": user[\"_id\"]},\n {\"$inc\": {\"total_rating\": increase}})",
"def update_comment_usefulness(self):\n self.cursor.execute(\"\"\"UPDATE comment SET veryUseful=0, useful=0, useless=0, avg_usefulness=NULL\"\"\")\n self.db.commit()\n self.cursor.execute(\"\"\"SELECT * FROM rates\"\"\")\n for rating in self.cursor.fetchall():\n self.update_comment_score(rating[0], rating[1], rating[2])",
"def average_rating(self):\n ratings = Rating.objects.filter(game=self)\n\n if len(ratings):\n # Sum all of the ratings for the game\n total_rating = 0\n for rating in ratings:\n total_rating += rating.value\n\n # Calculate the averge and return it.\n average = total_rating / len(ratings)\n return average\n\n # else: \n return 0",
"def average_rating(self):\n return ( self.rating_1 + self.rating_2 + self.rating_3) / 3"
] | [
"0.75978434",
"0.6865599",
"0.6308009",
"0.62638813",
"0.6142683",
"0.59501565",
"0.5932905",
"0.5863806",
"0.57942003",
"0.5734078",
"0.5721565",
"0.56398964",
"0.56119525",
"0.5606553",
"0.5602719",
"0.55716145",
"0.54950917",
"0.5487591",
"0.54631513",
"0.5462967",
"0.5453545",
"0.5429973",
"0.5414782",
"0.5367226",
"0.5352126",
"0.5332985",
"0.53094125",
"0.5300599",
"0.5292348",
"0.5281202"
] | 0.8616785 | 0 |
Given the ISBN of a book, get n relevant information for all comments about that book. | def get_comments(self, isbn, n):
result = []
self.cursor.execute("""SELECT * FROM comment WHERE ISBN=%s ORDER BY avg_usefulness DESC LIMIT %s""",
(str(isbn), n))
for comment in self.cursor.fetchall():
result.append(comment)
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_comments_by_book(self, book_id):\n # Implemented from template for\n # osid.resource.ResourceBinSession.get_resources_by_bin\n mgr = self._get_provider_manager('COMMENTING', local=True)\n lookup_session = mgr.get_comment_lookup_session_for_book(book_id, proxy=self._proxy)\n lookup_session.use_isolated_book_view()\n return lookup_session.get_comments()",
"def get_book_statistics(self, n, startDate, endDate):\n book_results = []\n author_results = []\n publisher_results = []\n\n self.cursor.execute(\"\"\"SELECT title, B.ISBN, SUM(quantity) as total FROM productof P, book B WHERE \n B.ISBN = P.ISBN AND orderNumber IN \n (SELECT orderNumber FROM orderlog WHERE orderDate >= %s AND orderDate <= %s) GROUP BY ISBN \n ORDER BY total DESC LIMIT %s\"\"\", (startDate, endDate, n))\n for book in self.cursor.fetchall():\n book_results.append(book)\n\n self.cursor.execute(\"\"\"SELECT name, SUM(quantity) as total FROM productof P, author A, wrote W\n WHERE ID=authorID AND W.ISBN = P.ISBN AND orderNumber IN \n (SELECT orderNumber FROM orderlog WHERE orderDate >= %s AND orderDate <= %s) GROUP BY name \n ORDER BY total DESC LIMIT %s\"\"\", (startDate, endDate, n))\n for author in self.cursor.fetchall():\n author_results.append(author)\n\n self.cursor.execute(\"\"\"SELECT publisher, SUM(quantity) as total FROM productof P, book B\n WHERE B.ISBN = P.ISBN AND orderNumber IN \n (SELECT orderNumber FROM orderlog WHERE orderDate >= %s AND orderDate <= %s) GROUP BY publisher \n ORDER BY total DESC LIMIT %s\"\"\", (startDate, endDate, n))\n for publisher in self.cursor.fetchall():\n publisher_results.append(publisher)\n\n return book_results, author_results, publisher_results",
"def get_comments_by_books(self, book_ids):\n # Implemented from template for\n # osid.resource.ResourceBinSession.get_resources_by_bins\n comment_list = []\n for book_id in book_ids:\n comment_list += list(\n self.get_comments_by_book(book_id))\n return objects.CommentList(comment_list)",
"def get_commentary_for_book_chapters(book_file_name):\n if not os.path.exists(challoner_store):\n os.mkdir(challoner_store)\n with open(book_file_name, \"r+\") as rh:\n book = json.load(rh)\n chapter_text = {}\n\n for name, chapters_dictionary in book.items():\n \n for chap, location in chapters_dictionary.items():\n norm = normalize_filename(\"{}_{}\".format(name, chap))\n \n outfile = \"{}/{}.json\".format(challoner_store, norm)\n \n if os.path.exists(outfile):\n continue\n else:\n chapter_text[name + \"__\" + chap] = get_commentary_for_chapter(location)\n with open(outfile, \"w+\") as wh:\n json.dump(chapter_text, wh)\n chapter_text = {}",
"async def scrape_comments(self):\n\n subreddit_origin = await self.reddit.subreddit(self.subreddit)\n\n comment_count = 0\n async for comment in subreddit_origin.comments(limit=self.limit):\n if self.memory.contains(comment.id):\n continue\n\n self.memory.add(comment.id)\n\n # Parse Comment\n comment = self.parse_comment(comment)\n\n # Save in Pub/Sub\n if self.enable_publish:\n self.publish(comment)\n\n comment_count += 1\n\n return comment_count",
"def book_by_isbn(ISBN):\n data = {}\n for book in root.findall('Book'):\n for elem in book:\n isbn = book.find('ISBN').text\n if isbn == ISBN:\n data['id'] = book.attrib['id']\n data[elem.tag] = elem.text\n return data",
"def getComment(self, n = None):\n \n if n is None:\n return self._comments\n else:\n return self._comments[n]",
"def get_comment_ids_by_book(self, book_id):\n # Implemented from template for\n # osid.resource.ResourceBinSession.get_resource_ids_by_bin\n id_list = []\n for comment in self.get_comments_by_book(book_id):\n id_list.append(comment.get_id())\n return IdList(id_list)",
"def get_comentions(body):\n isbns = set(isbn for _, isbn in ISBN_REGEX.findall(body))\n for isbn1, isbn2 in permutations(isbns, 2):\n yield isbn1, isbn2",
"def test_get_comments():\n comments = list(get_comments(TEST_SUBREDDIT, TEST_START_DATE, TEST_END_DATE, TEST_MAX))\n\n # prints the dictionary of variables for each comment\n for x in comments:\n print(x.d_)",
"def get_book_data(isbn: int):\n try:\n book = next(iter(core.Book.search(('isbn', 'eq', isbn))))\n except StopIteration:\n pass # actually, I could put the whole rest of the function here\n else:\n data = core.Book.view_str(book.id)\n del data['id'], data['status'], data['return_date'], data['borrowed_by']\n del data['borrowed_by_id'], data['__str__']\n return data\n\n try:\n r = requests.get('https://portal.dnb.de/opac.htm?query=isbn%3D'\n + str(isbn) + '&method=simpleSearch&cqlMode=true')\n r.raise_for_status()\n except requests.exceptions.RequestException:\n raise core.BuchSchlossError('no_connection', 'no_connection')\n\n person_re = re.compile(r'(\\w*, \\w*) \\((\\w*)\\)')\n results = {'concerned_people': []}\n\n page = bs4.BeautifulSoup(r.text)\n table = page.select_one('#fullRecordTable')\n if table is None:\n # see if we got multiple results\n link_to_first = page.select_one('#recordLink_0')\n if link_to_first is None:\n raise core.BuchSchlossError(\n 'Book_not_found', 'Book_with_ISBN_{}_not_in_DNB', isbn)\n r = requests.get('https://portal.dnb.de'+link_to_first['href'])\n page = bs4.BeautifulSoup(r.text)\n table = page.select_one('#fullRecordTable')\n\n for tr in table.select('tr'):\n td = [x.get_text('\\n').strip() for x in tr.select('td')]\n if len(td) == 2:\n if td[0] == 'Titel':\n results['title'] = td[1].split('/')[0].strip()\n elif td[0] == 'Person(en)':\n for p in td[1].split('\\n'):\n g = person_re.search(p)\n if g is None:\n continue\n g = g.groups()\n if g[1] == 'Verfasser':\n results['author'] = g[0]\n else:\n results['concerned_people'].append(g[1]+': '+g[0])\n elif td[0] == 'Verlag':\n results['publisher'] = td[1].split(':')[1].strip()\n elif td[0] == 'Zeitliche Einordnung':\n results['year'] = td[1].split(':')[1].strip()\n elif td[0] == 'Sprache(n)':\n results['language'] = td[1].split(',')[0].split()[0].strip()\n\n results['concerned_people'] = '; '.join(results['concerned_people'])\n return results",
"def get_comments_for_issue(owner, repo, issue_number, session=None):\n url = (\n f'{GITHUB_API_URL}/repos/{owner}/{repo}/issues/{issue_number}/comments'\n )\n return get_one_item_at_a_time(url, session=session)",
"def find_n_reviews(x, n, review_books_df):\n asin_1 = x['asin_1']\n asin_2 = x['asin_2']\n\n overall_reviews_1 = review_books_df.query('asin == @asin_1').sort_values(\n 'unixReviewTime').iloc[0:(n+1)].overall.tolist()\n overall_reviews_2 = review_books_df.query('asin == @asin_2').sort_values(\n 'unixReviewTime').iloc[0:(n+1)].overall.tolist()\n\n dic_1 = {'asin': asin_1}\n for i, val in enumerate(overall_reviews_1):\n dic_1[str(i)+\"-th-review\"] = val\n\n dic_2 = {'asin': asin_2}\n for i, val in enumerate(overall_reviews_2):\n dic_2[str(i)+\"-th-review\"] = val\n \n return [dic_1, dic_2]",
"def book(isbn):\n isbn = Markup.escape(isbn)\n # check if book exist in database\n book_db = db.execute(\n \"SELECT * FROM books WHERE isbn LIKE :isbn\", {\"isbn\": isbn}\n ).fetchone()\n if book_db == None:\n return render_template(\n \"error.html\", error=\"ISBN invalid or not in our Database.\"\n )\n\n # Get detail from Goodreads\n res = requests.get(\n \"https://www.goodreads.com/book/review_counts.json\",\n params={\"key\": os.getenv(\"GOODREADS_API\"), \"isbns\": isbn},\n )\n\n if res.status_code != 200:\n return render_template(\"error.html\", error=\"Not found on our API.\")\n data = res.json()\n book = data[\"books\"][0]\n\n # Get the reviews for the book.\n book_reviews = db.execute(\n \"SELECT review.*, users.nickname FROM review JOIN users ON review.user_id = users.id WHERE book_id = :book_id\",\n {\"book_id\": book_db.id},\n ).fetchall()\n\n # Get my own review\n user = session.get(\"user\")\n my_review = db.execute(\n \"SELECT * FROM review WHERE (book_id = :book_id) AND user_id = (SELECT id from users WHERE username LIKE :user)\",\n {\"book_id\": book_db.id, \"user\": user},\n ).fetchone()\n\n if my_review is not None:\n # Print results\n return render_template(\n \"book.html\",\n book=book,\n book_db=book_db,\n book_reviews=book_reviews,\n my_review=my_review,\n )\n else:\n return render_template(\n \"book.html\",\n book=book,\n book_db=book_db,\n book_reviews=book_reviews,\n my_review=None,\n )",
"def get_comments(self,comments):\n all_comments = []\n for comment in comments:\n try :\n all_comments.append({\n 'comment':comment['data']['body'],\n 'score':comment['data']['score']\n })\n except: pass\n return all_comments",
"def count_reddit_comments_ngram_strs(year, month, n):\n ngram_strs = ngram_extract.extract_reddit_comments_ngram_strs(year, month, n)\n return Counter(itertools.chain.from_iterable(ngram_strs))",
"def get_comments_by_country(pages, hotel, country):\n url = \"http://www.booking.com/reviewlist.es.html\"\n headers = {\n 'User-Agent': \"PostmanRuntime/7.20.1\",\n 'Accept': \"*/*\",\n 'Cache-Control': \"no-cache\",\n 'Postman-Token': \"4b4e2c78-12c0-42a7-807a-29f5f7378ae5,e75b58fb-25dd-4fdd-b97a-47650ed52d41\", # NOQA\n 'Host': \"www.booking.com\",\n 'Accept-Encoding': \"gzip, deflate\",\n 'Cookie': \"bkng=11UmFuZG9tSVYkc2RlIyh9Yaa29%2F3xUOLbca8KLfxLPeck0I1eO54zQUW2YGGgHUJ6NVSV%2BmLwJzaS5ibHX0J%2BdueF6GNDCq1X0NvEJAU9t%2FoaAC2%2FMBm39Gz0lTSWuf6zuBVIiNGAI88YDjaj4w5H8Lrv7T0Yug9jg%2FpPsONkdMVLMiYifIslIsLvFl07K%2BTKGRykCAxOsgE%3D\", # NOQA\n 'Connection': \"keep-alive\",\n 'cache-control': \"no-cache\"\n }\n\n params = {\n 'cc1': country,\n 'pagename': hotel,\n 'type': 'total',\n 'dist': str(1),\n 'rows': str(20)\n }\n\n def build_soup_comment_request(page: int, list_of_countries):\n if page == 0:\n params['offset'] = str(page)\n else:\n params['offset'] = str(page * 20)\n\n response = requests.get(url=url, params=params, headers=headers)\n comments_soup = BeautifulSoup(response.content, 'html.parser')\n span = comments_soup.select('.bui-avatar-block__flag img')\n [get_flags(item, list_of_countries) for item in span]\n\n countries_list = {}\n [build_soup_comment_request(page, countries_list) for page in range(pages)]\n return countries_list",
"def get_photo_comments(self, photo_id, count = 30, page = 1):\n uri = 'photos/' + photo_id + '/comments'\n options = { 'per_page': count, 'page': page }\n return self.make_request(uri, options)",
"def get_comments(self, project, story):\n ret_val = []\n resource = \"projects/{0:d}/stories/{1:d}/comments\".format(project.id,\n story.id)\n params = {\"fields\": Comment.FIELDS}\n comments = self._request(\"get\", resource, params=params)\n\n for comment in comments:\n ret_val.append(Comment(comment))\n\n return ret_val",
"def iterateComments(db, post_id):\n c=db.cursor()\n c.execute(\"\"\"SELECT * FROM comments WHERE post_id=%d\"\"\" % post_id)\n for comment in c.fetchall():\n yield Comment(answer)\n c.close()",
"def get_book_info(self, book):\n request_url = \"%s?q=%s\" % (self.API_URL, book)\n json_data = self.make_request(request_url)\n if not json_data:\n return []\n books_info = []\n for book in json_data['docs']:\n info = {'title': book['title']}\n if 'publisher' in book:\n info.update({'publisher': book['publisher']})\n if 'publish_year' in book:\n info.update({'publish_year': book['publish_year']})\n if 'language' in book:\n info.update({'language': book['language']})\n books_info.append(info)\n return books_info",
"def get_books_by_comment(self, comment_id):\n # Implemented from template for\n # osid.resource.ResourceBinSession.get_bins_by_resource\n mgr = self._get_provider_manager('COMMENTING', local=True)\n lookup_session = mgr.get_book_lookup_session(proxy=self._proxy)\n return lookup_session.get_books_by_ids(\n self.get_book_ids_by_comment(comment_id))",
"def process_nbk_html(self, limit):\n model = Model(self.graph)\n c = 0\n books_not_found = set()\n for nbk in self.book_ids:\n c += 1\n nbk_id = 'GeneReviews:'+nbk\n book_item = self.all_books.get(nbk)\n url = '/'.join((self.rawdir, book_item['file']))\n\n # figure out if the book is there; if so, process, otherwise skip\n book_dir = '/'.join((self.rawdir, 'books'))\n book_files = os.listdir(book_dir)\n if ''.join((nbk, '.html')) not in book_files:\n # logger.warning(\"No book found locally for %s; skipping\", nbk)\n books_not_found.add(nbk)\n continue\n logger.info(\"Processing %s\", nbk)\n\n page = open(url)\n soup = BeautifulSoup(page.read())\n\n # sec0 == clinical description\n clin_summary = \\\n soup.find(\n 'div', id=re.compile(\".*Summary.sec0\"))\n if clin_summary is not None:\n p = clin_summary.find('p')\n ptext = p.text\n ptext = re.sub(r'\\s+', ' ', ptext)\n\n ul = clin_summary.find('ul')\n if ul is not None:\n item_text = list()\n for li in ul.find_all('li'):\n item_text.append(re.sub(r'\\s+', ' ', li.text))\n ptext += ' '.join(item_text)\n\n # add in the copyright and citation info to description\n ptext = \\\n ' '.join(\n (ptext,\n '[GeneReviews:NBK1116, GeneReviews:NBK138602, ' +\n nbk_id+']'))\n\n model.addDefinition(nbk_id, ptext.strip())\n\n # get the pubs\n pmid_set = set()\n pub_div = soup.find('div', id=re.compile(r\".*Literature_Cited\"))\n if pub_div is not None:\n ref_list = pub_div.find_all('div', attrs={'class': \"bk_ref\"})\n for r in ref_list:\n for a in r.find_all(\n 'a', attrs={'href': re.compile(r\"pubmed\")}):\n if re.match(r'PubMed:', a.text):\n pmnum = re.sub(r'PubMed:\\s*', '', a.text)\n else:\n pmnum = \\\n re.search(\n r'\\/pubmed\\/(\\d+)$', a['href']).group(1)\n if pmnum is not None:\n pmid = 'PMID:'+str(pmnum)\n self.graph.addTriple(\n pmid,\n model.object_properties['is_about'],\n nbk_id)\n pmid_set.add(pmnum)\n reference = Reference(\n self.graph,\n pmid, Reference.ref_types['journal_article'])\n reference.addRefToGraph()\n\n # TODO add author history, copyright, license to dataset\n\n # TODO get PMID-NBKID equivalence (near foot of page),\n # and make it \"is about\" link\n # self.gu.addTriple(\n # self.graph, pmid,\n # self.gu.object_properties['is_about'], nbk_id)\n # for example: NBK1191 PMID:20301370\n\n # add the book to the dataset\n self.dataset.setFileAccessUrl(book_item['url'])\n\n if limit is not None and c > limit:\n break\n\n # finish looping through books\n\n l = len(books_not_found)\n if len(books_not_found) > 0:\n if l > 100:\n logger.warning(\"There were %d books not found.\", l)\n else:\n logger.warning(\n \"The following %d books were not found locally: %s\",\n l, str(books_not_found))\n logger.info(\n \"Finished processing %d books for clinical descriptions\", c-l)\n\n return",
"def get_single_book_info(self, isbn):\n self.cursor.execute(\"SELECT * FROM book WHERE ISBN=%s\", (isbn,))\n books = self.cursor.fetchall()\n for book in books:\n authors = []\n self.cursor.execute(\"\"\"SELECT name FROM Author A, Wrote W, Book B WHERE A.ID = W.authorID AND\n W.ISBN = B.ISBN AND B.ISBN = %s\"\"\", (isbn,))\n for auth in self.cursor.fetchall():\n authors.append(auth[0])\n return book, authors",
"def problem_comments(self, identifier):\n return self._get(\"problems/%d/comments\" % identifier).json()",
"def get_commentary_for_chapter(location):\n chapter_commentary_dictionary = {}\n\n soup = Ripper(location, parser=\"html5lib\", save_path=save_path).soup\n text = soup.find(\"table\", class_=\"texttable\")\n\n for each in text.find_all(\"p\"):\n attributes = each.attrs\n if attributes:\n if \"desc\" in attributes[\"class\"]:\n pass\n elif \"note\" in attributes[\"class\"]:\n new_content = each.contents\n verse_header, text = export_commentary_text_as_dictionary(new_content)\n chapter_commentary_dictionary[verse_header] = text\n else:\n continue\n return chapter_commentary_dictionary",
"def _get_comments(**kwargs):\r\n\r\n # Log in to get cookies.\r\n cookies = _login(**kwargs)\r\n\r\n if 'r' not in kwargs:\r\n # This is the first comments request.\r\n # Make the comments request and set an empty list.\r\n kwargs['r'] = requests.get('https://news.ycombinator.com/threads?id=%s' % kwargs['args'].username,\r\n cookies=cookies)\r\n\r\n # Check to make sure we have a good response.\r\n if not _good_response(**kwargs):\r\n kwargs.pop('r')\r\n return _get_comments(**kwargs)\r\n\r\n kwargs['comments'] = []\r\n\r\n # Grab the comments.\r\n J = pq(kwargs['r'].content)\r\n comments = J('table table td.default')\r\n\r\n for c in comments:\r\n\r\n comment = _sanitize_comment(J, c)\r\n\r\n if kwargs['args'].no_owner and comment['user'] == kwargs['args'].username:\r\n continue\r\n\r\n # Add the comment to the saved list.\r\n kwargs['comments'].append({\r\n 'user': comment['user'],\r\n 'comment': comment['comment'],\r\n 'reply': comment['reply'],\r\n 'points': comment['points'],\r\n 'link': comment['link'],\r\n 'parent': comment['parent'],\r\n 'story': comment['story'],\r\n 'date': comment['date'],\r\n })\r\n\r\n # If we're getting all comments.\r\n if kwargs['args'].all:\r\n\r\n # Find the 'More' link and load it.\r\n last = J('a', J('table table tr td.title:last'))\r\n if last.text() == 'More':\r\n kwargs['r'] = requests.get('https://news.ycombinator.com%s' % last.attr('href'),\r\n cookies=cookies)\r\n\r\n # Check to make sure we have a good response.\r\n if not _good_response(**kwargs):\r\n kwargs.pop('r')\r\n return _get_comments(**kwargs)\r\n\r\n # Call this function again, this time with the new list.\r\n return _get_comments(**kwargs)\r\n\r\n return kwargs['comments']",
"def get_comment(self, index):\r\n\r\n # Get request to get all the comments for all exercises\r\n comments = requests.get(API.url_comment, headers = self.headers).json()\r\n # Parse the response\r\n for my_comment in comments:\r\n if my_comment['id'] == index:\r\n print(my_comment['comment'])",
"def book_info(book_id):\n \n #Get the info of the book\n book = db.execute(\"SELECT * FROM book WHERE id = :book_id\",{\"book_id\": book_id}).fetchone()\n if book is None:\n return render_template(\"error.html\", message = \"No such Book.\")\n\n #Get all the reviews joined with the name of each user\n stmt = \"SELECT user_library.*, reviewer.* FROM user_library INNER JOIN reviewer ON user_library.id=reviewer.id_user WHERE id_book = :book_id\"\n reviews = db.execute(stmt,{\"book_id\": book_id}).fetchall()\n\n #Get the user_review info\n user_review = db.execute(\"SELECT * FROM reviewer WHERE id_book = :book_id AND id_user = :user_id\",\n {\"book_id\": book_id, \"user_id\": session[\"user_id\"]}).fetchone()\n\n #If this info not exist we could add a comment, else we can not.\n is_commented = True\n if user_review is None:\n is_commented = False\n\n #Proccess for rating count of Goofreaders\n goodreader_info = requests.get(\"https://www.goodreads.com/book/review_counts.json\", params={\"key\": KEY, \"isbns\": book.isbn })\n goodreader_info = goodreader_info.json()\n goodreader_info = goodreader_info[\"books\"]\n\n average_rating = goodreader_info[0][\"average_rating\"]\n ratings_counts = goodreader_info[0][\"ratings_count\"]\n\n return render_template(\"book_info.html\",book=book, reviews = reviews, is_commented = is_commented\n , average_rating = average_rating, ratings_counts = ratings_counts )",
"def book(isbn, message=None):\n # Make sure book exists.\n book = db.execute(\"SELECT * FROM books WHERE isbn = :isbn\", {\"isbn\": isbn}).fetchone()\n\n if book is None:\n return render_template(\"error.html\", message=\"No such book.\")\n\n\n \"\"\" Goodreads info \"\"\"\n res = requests.get(\"https://www.goodreads.com/book/review_counts.json\", params={\"key\": KEY_goodreads, \"isbns\":isbn.strip()}).json()\n\n\n\n\n \"\"\" Adding comment section \"\"\"\n comments = db.execute(\"SELECT reviews.date, reviews.review, reviews.rating, users.id FROM reviews \\\n INNER JOIN users ON reviews.comment_id=users.user_id WHERE books = :books\", {\"books\": book[0]}).fetchall()\n\n\n \"\"\" Adding review \"\"\"\n review = request.form.get(\"review\")\n rating_value = request.form.get(\"rating\")\n\n # Checking if user made a comment\n if review != None:\n # Checking if the user already made a comment\n if db.execute(\"SELECT * FROM reviews WHERE books = :books AND usr = :user\", {\"books\": book[0], \"user\":session[\"user_id\"][0]}).rowcount == 0:\n today = date.today() # adding date to the comment\n # Inserting the relevant info into the database\n db.execute(\"INSERT INTO reviews (books, usr, review, date, rating) VALUES (:books, :usr, :review, :date, :rating)\",\n {\"books\":int(book[0]), \"usr\": int(session[\"user_id\"][0]), \"review\": review, \"date\":today, \"rating\": rating_value})\n db.commit()\n\n # I have to redo this line of code, so the next time it renders the webpage the new comment shows up\n comments = db.execute(\"SELECT * FROM reviews WHERE books = :books\", {\"books\": book[0]}).fetchall()\n return render_template(\"book.html\", book=book, comments=comments, goodreads = res[\"books\"][0], message=\"Thanks for your comment!\")\n\n else:\n return render_template(\"book.html\", book=book, comments=comments, goodreads = res[\"books\"][0], message=\"You've already commented this book!\")\n\n\n\n return render_template(\"book.html\", book=book, review=review, comments=comments, goodreads = res[\"books\"][0])"
] | [
"0.6394735",
"0.58236885",
"0.57772076",
"0.56973577",
"0.566967",
"0.55916137",
"0.5555044",
"0.5552309",
"0.5548011",
"0.54580206",
"0.54438627",
"0.5385552",
"0.5383615",
"0.5363736",
"0.5341239",
"0.5337437",
"0.5321707",
"0.5316787",
"0.5306555",
"0.5299457",
"0.52939415",
"0.52858275",
"0.52837205",
"0.52805716",
"0.5244576",
"0.52433497",
"0.5232513",
"0.5230632",
"0.5221838",
"0.52205306"
] | 0.79621714 | 0 |
Given a comment ID, update the average usefulness. This will only ever be called internally, so no need for validity checks. | def update_comment_avg_score(self, commentID):
self.cursor.execute("""UPDATE comment SET avg_usefulness=(2*veryUseful+useful)/(veryUseful+useful+useless)
WHERE commentID=%s""", (commentID,))
self.db.commit() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_comment_usefulness(self):\n self.cursor.execute(\"\"\"UPDATE comment SET veryUseful=0, useful=0, useless=0, avg_usefulness=NULL\"\"\")\n self.db.commit()\n self.cursor.execute(\"\"\"SELECT * FROM rates\"\"\")\n for rating in self.cursor.fetchall():\n self.update_comment_score(rating[0], rating[1], rating[2])",
"def calculate_score(self):\n try:\n self.score = self.__total_comment_score / float(self.num_comments)\n except ZeroDivisionError:\n self.score = float(0)",
"def update_comment_score(self, loginID, commentID, attrib_name):\n self.cursor.execute(\"SELECT rating FROM rates WHERE loginID = %s AND commentID = %s\", (loginID, commentID))\n old_rating = self.cursor.fetchall()\n if old_rating:\n # This user already rated this comment. Change the rating.\n if old_rating[0][0] == attrib_name:\n # Remove the rating, because the user already voted for this.\n self.cursor.execute(\"UPDATE comment SET \" + attrib_name + \"=\" + attrib_name + \"-1 WHERE commentID=%s\",\n (commentID,))\n self.cursor.execute(\"\"\"DELETE FROM rates WHERE loginID=%s AND commentID=%s\"\"\",\n (loginID, commentID))\n else:\n self.cursor.execute(\n \"UPDATE comment SET \" + old_rating[0][0] + \"=\" + old_rating[0][0] + \"-1, \" + attrib_name\n + \"=\" + attrib_name + \"+1 WHERE commentID=%s\"\"\", (commentID,))\n self.cursor.execute(\"\"\"UPDATE rates SET rating=%s WHERE loginID=%s AND commentID=%s\"\"\",\n (attrib_name, loginID, commentID))\n else:\n # New rating, just need to update one value and add a new rating tuple to rates\n self.cursor.execute(\"UPDATE comment SET \" + attrib_name + \"=\" + attrib_name + \"+1 WHERE commentID=%s\",\n (commentID,))\n self.cursor.execute(\"\"\"INSERT INTO rates VALUES (%s,%s,%s)\"\"\", (loginID, commentID, attrib_name))\n self.db.commit()\n self.update_comment_avg_score(commentID)",
"def update_book_scores(self):\n self.cursor.execute(\"\"\"UPDATE book SET avg_rating=NULL, total_rating_score=0, num_ratings=0\"\"\")\n self.db.commit()\n self.cursor.execute(\"\"\"SELECT * FROM comment\"\"\")\n for comment in self.cursor.fetchall():\n self.cursor.execute(\"\"\"UPDATE book SET total_rating_score=total_rating_score+%s,\n num_ratings=num_ratings+1 WHERE ISBN=%s\"\"\", (comment[3], comment[1]))\n self.db.commit()\n self.update_average_book_rating(comment[1])",
"def analyze_comments():\n\n scores = {} # {docket_id: [comment1_score, comment2_score, ...]}\n positive_counts = {} # {docket_id: num_positive_comments}\n neutral_counts = {} # {docket_id: num_neutral_comments}\n negative_counts = {} # {docket_id: num_negative_comments}\n\n comment_sentiments = {} # {comment_id: sentiment} to write to database\n comment_complexity = {} # {comment_id: complexity} to write to database\n\n for comment in lib.mongo.retrieve_comments(1000):\n docket_id = comment['docketId']\n comment_id = comment['documentId']\n text = comment.get('commentText', '').strip()\n\n # Fill in the 'sentiment' field of this comment.\n if 'sentiment' in comment:\n score = comment['sentiment']\n else:\n score = lib.analyze_text.getSentiment(text)\n comment_sentiments[comment_id] = score\n\n logging.info('docket %s, comment %s: sentiment %s (%r)' %\n (docket_id, comment_id, score, text[:20]))\n\n # Fill in the 'complexity' field of this comment.\n if 'complexity' not in comment:\n comment_complexity[comment_id] = lib.analyze_text.get_complexity(text)\n\n # Aggregate the sentiment scores for each docket.\n scores.setdefault(docket_id, []).append(score)\n counts = positive_counts if score > 0 else (\n negative_counts if score < 0 else neutral_counts)\n counts[docket_id] = counts.get(docket_id, 0) + 1\n\n if len(comment_sentiments) >= 10:\n logging.info('updating %d comments sentiment...' % len(comment_sentiments))\n lib.mongo.update_comments('sentiment', comment_sentiments)\n comment_sentiments = {}\n\n if len(comment_complexity) >= 10:\n logging.info('updating %d comments complexity...' % len(comment_complexity))\n lib.mongo.update_comments('complexity', comment_complexity)\n comment_complexity = {}\n\n logging.info('updating %d comments...' % len(comment_sentiments))\n lib.mongo.update_comments('sentiment', comment_sentiments)\n lib.mongo.update_comments('complexity', comment_complexity)\n logging.info('done!')\n\n docket_sentiments = {} # {docket_id: sentiment} to write to database\n\n for docket in lib.mongo.dockets.find():\n docket_id = docket.get('docketId', '')\n positive_count = positive_counts.get(docket_id, 0)\n neutral_count = neutral_counts.get(docket_id, 0)\n negative_count = negative_counts.get(docket_id, 0)\n rating = compute_rating(positive_count, neutral_count, negative_count)\n logging.info('docket %s: %d positive, %d neutral, %d negative - %s' %\n (docket_id, positive_count, neutral_count, negative_count,\n rating))\n\n docket_sentiments[docket_id] = {\n 'positive': positive_count,\n 'neutral': neutral_count,\n 'negative': negative_count,\n 'rating': rating\n }\n\n logging.info('updating %d dockets...' % len(docket_sentiments))\n lib.mongo.update_dockets('sentiment', docket_sentiments)\n logging.info('done!')",
"def add_comment_score(self, comment_score: float):\n self.__total_comment_score += comment_score",
"def update_average_book_rating(self, isbn):\n self.cursor.execute(\"\"\"UPDATE book SET avg_rating = total_rating_score / num_ratings WHERE \n ISBN=%s\"\"\", (isbn,))\n self.db.commit()",
"def update_rating_average(self, rating):\n self.num_ratings += 1\n self.rating_total += rating\n self.save(update_fields=[\"num_ratings\", \"rating_total\"])\n self.average_rating = int(round(self.rating_total/self.num_ratings))\n self.save(update_fields=[\"average_rating\"])\n return",
"def update(self, request, slug, id):\n article = ArticleInst.fetch(slug)\n updated_comment = request.data.get('comment', {})\n comment = self.check_comment(id, article)\n\n similar_comment = Comment.objects.filter(\n article=article,\n body=updated_comment.get('body')\n )\n\n if similar_comment:\n data = {'message': \"You've posted a similar comment before\"}\n status_ = status.HTTP_409_CONFLICT\n else:\n response = {'message': 'Comment Updated'}\n response['data'] = updated_comment\n serializer = self.serializer_class(\n comment,\n data=updated_comment,\n partial=True)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n\n data = serializer.data\n status_ = status.HTTP_200_OK\n\n return Response(data=data, status=status_)",
"def cloudnlp_analyze_comment(comment_pk):\n from .models import VideoComment # avoid circular imports\n try:\n comment = VideoComment.objects.get(pk=comment_pk)\n except VideoComment.DoesNotExist:\n logger.info(\n 'Video comment %r no longer exists! Cant analyze!', comment_pk)\n return\n try:\n client = cloudnlp.Client()\n analysis = client.analyze_sentiment(comment.comment_raw)\n except Exception:\n comment.analysis_failed = True\n comment.save()\n logger.exception(\n 'Error performing sentiment analysis on comment %r',\n comment.youtube_id)\n return\n comment.analyzed_comment = analysis\n comment.sentiment = analysis['documentSentiment']['score']\n comment.magnitude = analysis['documentSentiment']['magnitude']\n comment.save()",
"def update_comment_only(self, comment, incident_id):\n self.cursor.execute(\"\"\"UPDATE incidents SET comment='%s' WHERE incident_id='%s'\"\"\"%(comment ,incident_id))\n self.commiting()",
"def sentiment_for_one_comment(comment: str) -> float:\n try:\n ctm_blob = TextBlob(comment)\n sentiment_scores = [s.sentiment.polarity * s.sentiment.subjectivity for s in ctm_blob.sentences if\n s.sentiment.polarity != 0]\n result = sum(sentiment_scores) / len(sentiment_scores) if len(sentiment_scores) > 0 else 0\n except:\n result = 0\n return result",
"def update_comment(self, id, comment):\n sql = f\"UPDATE incidences SET comment = \\'{comment}\\'\\\n WHERE incidences.id = {id}\"\n conn = Db().con\n curr = conn.cursor()\n curr.execute(sql)\n conn.commit()",
"def update_boy(self, hash, new_rate):\n image = self._db.boys.find_one({'_id': hash})\n total_average = self.average(image['rating'], new_rate, image['count'])\n\n self._db.boys.find_one_and_update(\n {'_id': hash}, {'$inc': {'count': 1},\n '$set': {'rating': total_average}},\n return_document=pymongo.ReturnDocument.AFTER)",
"def test_upvote_modifies_comment_score(self):\n comment = Comment.objects.get(body=\"987XYZ\")\n self.assertEqual(comment.score, DEFAULT_SCORE)\n vote = Vote.create(comment=comment, value=1, voter=self.user)\n comment = Comment.objects.get(body=\"987XYZ\")\n self.assertEqual(comment.score, DEFAULT_SCORE + 1)",
"def _testCommentRating(self):\n\n try:\n host = models.Host.objects.all()[0]\n\n comment = models.Comment(text='test', host=host)\n comment.save()\n\n types = models.RatingType.objects.all()\n\n items = []\n for value, type in zip([3, 4, 5], types):\n tmp_obj = models.Rating(comment=comment, type=type, value=value)\n tmp_obj.save()\n items.append(tmp_obj)\n\n assert comment.rating() - 4.0 < .0001, comment.rating()\n\n for tmp_obj in items:\n tmp_obj.delete()\n\n items = []\n for value, type in zip([3, 3], types):\n tmp_obj = models.Rating(comment=comment, type=type, value=value)\n tmp_obj.save()\n items.append(tmp_obj)\n\n assert comment.rating() == 3.0, comment.rating()\n\n finally:\n for tmp_obj in items:\n tmp_obj.delete()\n\n comment.delete()",
"def average_review_stars():\n # get all un-counted reviews\n reviews = Review.query.filter_by(marked=False).join(Restaurant)\\\n .with_entities(Review, Restaurant).all()\n logging.info(f\"Averaging review stars of {len(reviews)} retrieved reviews..\")\n for review, restaurant in reviews:\n # compute running mean of reviews\n restaurant.num_reviews += 1\n restaurant.avg_stars = 1/restaurant.num_reviews * \\\n (restaurant.avg_stars * (restaurant.num_reviews-1) + review.stars)\n review.marked = True\n # update rows \n db.session.commit()",
"def update_mean_movie_rating(self):\n self.mean_movie_rating = self.ratings.groupby(['movie_id'])['rating'].mean().reset_index()",
"def update_mean_user_rating(self):\n self.mean_user_rating = self.ratings.groupby(['user_id'])['rating'].mean().reset_index()",
"def update_girl(self, hash, new_rate):\n image = self._db.girls.find_one({'_id': hash})\n total_average = self.average(image['rating'], new_rate, image['count'])\n\n self._db.girls.find_one_and_update(\n {'_id': hash}, {'$inc': {'count': 1},\n '$set': {'rating': total_average}},\n return_document=pymongo.ReturnDocument.AFTER)",
"def get_avg_score(game_id):\r\n\r\n scores = []\r\n game = Game.query.get(game_id)\r\n for rating in game.ratings:\r\n scores.append(rating.score)\r\n \r\n avg_score = sum(scores)/len(scores)\r\n \r\n \r\n return avg_score",
"def _apply_comment(self, iid, comment):\n data = {\"body\" : comment._body}\n resp = self._post(\n self._base + \"/issues/{}/comments\".format(iid),\n data=self._format_data(data))",
"def overall_sentiment(self, _testing=False):\n df = self.df.copy()\n\n sentiment_scores = df[self.review_column].apply(self.sentiment_for_one_comment)\n self.sentiment_scores_all = sentiment_scores\n print(\"Average sentiment score: {}\".format(round(sentiment_scores.mean(), 2)))\n print(\"{}% of the comments are positive,; {}% of the comments are neutral; {}% of the comments are negative\".\n format(\n round(100 * sum(sentiment_scores > 0) / len(sentiment_scores), 2),\n round(100 * sum(sentiment_scores == 0) / len(sentiment_scores), 2),\n round((100 * sum(sentiment_scores < 0) / len(sentiment_scores)), 2)\n )\n )\n plt.figure(figsize=(5, 5))\n plt.rc('xtick', labelsize=15)\n plt.rc('ytick', labelsize=15)\n\n fig, ax = plt.subplots()\n ax.hist(sentiment_scores)\n ax.set_title('Sentiment scores of all comments (avg: {})'.format(round(sentiment_scores.mean(), 2)),\n fontsize = 20)\n\n if not _testing:\n plt.show()\n else:\n return fig",
"def determine_spammer_by_percentage(self, reviewer_id):\n cut_value = 0.8\n\n fake_sql = \"select count(*) from reviews_simple where reviewerID = '%s' and fake = 1\" % reviewer_id\n legitimate_sql = \"select count(*) from reviews_simple where reviewerID = '%s' and fake = 0\" % reviewer_id\n\n self.cursor.execute(fake_sql)\n fake_num = self.cursor.fetchone()[0]\n self.cursor.execute(legitimate_sql)\n legitimate_num = self.cursor.fetchone()[0]\n\n total_num = float(fake_num + legitimate_num)\n if total_num == 0:\n return 2 # 2 represents unknown label\n else:\n\n if fake_num/total_num > cut_value:\n return 1\n else:\n return 0",
"def add_comment(self, comment_info):\n self.cursor.execute(\"\"\"SELECT commentID, score FROM comment WHERE loginID = %s AND ISBN = %s\"\"\",\n (comment_info['loginID'], comment_info['ISBN']))\n result = self.cursor.fetchall()\n if result:\n # found a comment, need to update it\n self.cursor.execute(\"\"\"UPDATE comment SET score=%s, message=%s WHERE commentID=%s\"\"\",\n (comment_info['score'], comment_info['message'], result[0][0]))\n self.cursor.execute(\"\"\"UPDATE book SET total_rating_score=total_rating_score+%s WHERE ISBN=%s\"\"\",\n (int(comment_info['score']) - result[0][1], comment_info['ISBN']))\n return_code = 0\n else:\n # no comment found, create a new one\n self.cursor.execute(\"\"\"INSERT INTO comment (ISBN, loginID, score, message, commentDate)\n VALUES (%s,%s,%s,%s,%s)\"\"\", (comment_info['ISBN'], comment_info['loginID'], comment_info['score'],\n comment_info['message'], datetime.datetime.now()))\n self.cursor.execute(\"\"\"UPDATE book SET total_rating_score = total_rating_score+%s, \n num_ratings = num_ratings+1 WHERE ISBN = %s\"\"\", (comment_info['score'], comment_info['ISBN']))\n return_code = 1\n self.db.commit()\n self.update_average_book_rating(comment_info['ISBN'])\n return return_code",
"def accept_comment(self, comment_id):\n raise NotImplementedError()",
"def update_movie_rating_record(movie_id, rating_number, operation):\n movie = models.Movie.objects.get(mid=movie_id)\n if operation == 'new':\n # Update the average_rating and votecount for the movie.\n movie.average_rating = (float(movie.average_rating) * float(movie.votecount) + rating_number) / (\n movie.votecount + 1)\n movie.votecount += 1\n movie.save()\n elif operation == 'delete':\n movie.average_rating = (float(movie.average_rating) * float(movie.votecount) - float(rating_number)) / (\n movie.votecount - 1)\n movie.votecount -= 1\n movie.save()\n elif operation == 'edit':\n movie.average_rating = float(movie.average_rating) + (float(rating_number) / movie.votecount)\n movie.save()",
"def update_score():\n pass",
"def modify_user_comment(username, comment_id, comment):\n result = get_comment_by_id(username, comment_id)\n result[\"comment\"] = comment\n result[\"date_updated\"] = datetime.datetime.now()",
"def modify_user_comment(username, comment_id, comment):\n result = get_comment_by_id(username, comment_id)\n result[\"comment\"] = comment\n result[\"date_updated\"] = datetime.datetime.now()"
] | [
"0.66975546",
"0.5790807",
"0.5765285",
"0.5579822",
"0.5506458",
"0.5442541",
"0.5431748",
"0.5343486",
"0.5267532",
"0.52481896",
"0.5233075",
"0.5226774",
"0.5194109",
"0.5163567",
"0.5147953",
"0.5120395",
"0.5106639",
"0.50918096",
"0.5082242",
"0.5078047",
"0.5047185",
"0.50303495",
"0.50236404",
"0.4942714",
"0.49373826",
"0.49278444",
"0.4868635",
"0.48441866",
"0.48285088",
"0.48285088"
] | 0.80525345 | 0 |
Maintenance function for updating all comment usefulness values. This should only be called in the case of a customer account being deleted. | def update_comment_usefulness(self):
self.cursor.execute("""UPDATE comment SET veryUseful=0, useful=0, useless=0, avg_usefulness=NULL""")
self.db.commit()
self.cursor.execute("""SELECT * FROM rates""")
for rating in self.cursor.fetchall():
self.update_comment_score(rating[0], rating[1], rating[2]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_issue_delete_comment_deprecated(self):\n pass",
"def standardize_comments(df, column_name):\n # create a copy of the dataframe\n df_copy = df.copy()\n # remove rows that contain '[deleted]' or '[removed]' in the comment body\n df_copy = df_copy[(df[column_name] != '[removed]') & (df[column_name] != '[deleted]')]\n # remove rows with null values\n df_copy.dropna(inplace=True)\n # remove rows with the bot comment that starts with \"Register to vote\"\n df_copy = df_copy[~df_copy[column_name].str.startswith('Register to vote')]\n # remove rows with 'Thank you for participating in /r/Politics' in the body\n df_copy = df_copy[~df_copy[column_name].str.contains('Thank you for participating in /r/Politics')]\n # remove rows that contain 'I am a bot' in the comment body\n df_copy = df_copy[~df_copy[column_name].str.contains('I am a bot')]\n # replace characters in comment bodies\n df_copy[column_name] = df_copy[column_name].str.replace(r\"http\\S+\", \"\")\n df_copy[column_name] = df_copy[column_name].str.replace(r\"http\", \"\")\n df_copy[column_name] = df_copy[column_name].str.replace(r\"@\\S+\", \"\")\n df_copy[column_name] = df_copy[column_name].str.replace(r\"[^A-Za-z0-9(),!?@\\'\\`\\\"\\_\\n]\", \" \")\n df_copy[column_name] = df_copy[column_name].str.replace(\">\", \"\")\n df_copy[column_name] = df_copy[column_name].str.replace(\" \", \" \")\n df_copy[column_name] = df_copy[column_name].str.replace(\" \", \" \")\n df_copy[column_name] = df_copy[column_name].str.replace(\" \", \" \")\n df_copy[column_name] = df_copy[column_name].str.replace(\"\\\"\", \"\")\n df_copy[column_name] = df_copy[column_name].str.replace(r\"@\", \"at\")\n df_copy[column_name] = df_copy[column_name].str.lower()\n # remove rows with empty comment strings\n df_copy = df_copy[df_copy[column_name] != '']\n # remove rows with comment strings containing only a space\n df_copy = df_copy[df_copy[column_name] != ' ']\n return df_copy",
"def _comment_check():\n for comment_row in unused_list:\n ddi_index = views_index[comment_row[15]]\n # Checks for empty src value and empty ddi data value.\n # Continues if True.\n if 'comment' not in ddi_data[ddi_index][comment_row[1]]\\\n and comment_row[12] == '':\n continue\n # Checks a non-empty src value and updates if an\n # empty ddi data value.\n if 'comment' not in ddi_data[ddi_index][comment_row[1]] and \\\n comment_row[12] != '':\n import_merge.append([comment_row[15],\n comment_row[1],\n comment_row[14],\n {'comment': comment_row[12]}])\n continue\n # Checks diff against src value and a populated value in the\n # ddi data and replaces with src value.\n if comment_row[12] != \\\n ddi_data[ddi_index][comment_row[1]]['comment']:\n import_override.append([comment_row[15],\n comment_row[1],\n comment_row[14],\n {'comment': comment_row[12]}])\n continue",
"def test_issue_edit_comment_deprecated(self):\n pass",
"def cleanup_comments(comments):\n clean_comments = []\n\n if comments:\n for comment in comments:\n cleaned_up = sub(r'\\n\\n {8}\\n {8}\\n {12}\\n {16}\\n {16}\\n {12}\\nEdit', '', comment)\n clean_comments.append(cleaned_up)\n\n return clean_comments",
"def update_comments(self):\n self.nb_comments = self.comments.count()\n self.save()",
"def check_comments():\n\n # Get the id of the group track\n try:\n group_track = soundcloud.get('/me/tracks')[config.post_track_id]\n except HTTPError as e:\n if e.response.status_code == 404:\n logging.critical('Cannot find a track with id %d. Please, fix post_track_id in config.py', config.post_track_id)\n sys.exit(1)\n else:\n raise\n\n # Get the comment list for the group track\n comments = soundcloud.get('/tracks/%d/comments' % group_track.id)\n if not comments:\n logging.info('Nothing found...')\n return\n \n # Process each comment and delete it\n for comment in reversed(comments): \n logging.info('Processing a comment by user %d (%s): %s', comment.user_id, comment.user['username'], comment.body)\n response = None\n \n # Try to process the comment\n try:\n response = process_comment(comment)\n except HTTPError as e:\n if e.response.status_code == 429:\n logging.exception('Failed to repost track: too many requests:')\n return\n elif e.response.status_code // 100 == 4:\n logging.exception('Failed to process comment due to a client request error:')\n else:\n raise\n except Exception as e: # Program crash\n logging.exception('Failed to process comment:')\n else:\n if response:\n logging.info('The comment would have this response: %s', response) \n else:\n logging.info('Comment processed successfully')\n \n # Delete the processed comment\n try:\n soundcloud.delete('/tracks/' + str(group_track.id) + '/comments/' + str(comment.id))\n except HTTPError as e:\n if e.response.status_code == 404:\n logging.warning('Comment already deleted')\n else:\n raise\n\n if config.use_advanced_description and should_update_description:\n update_description()",
"def update_comment_only(self, comment, incident_id):\n self.cursor.execute(\"\"\"UPDATE incidents SET comment='%s' WHERE incident_id='%s'\"\"\"%(comment ,incident_id))\n self.commiting()",
"def tidy(self):\n \n replies = self.bot.user.me().comments.new(\n limit=100)\n \n for reply in replies:\n \n if reply.score < 0:\n \n with open(\"deleted.csv\", \"a\", encoding = \"UTF-8\") as removed:\n \n deleted = clevercsv.writer(removed)\n \n if removed.tell() == 0:\n deleted.writerow(\n [\"Comment\", \n \"Parent\", \n \"Thread\", \n \"Subreddit\", \n \"Time\", \n \"Score\"])\n \n deleted.writerow(\n [f\"{reply.body}\", \n f\"{reply.parent().body}\", \n f\"{reply.submission.title}\", \n f\"{reply.subreddit}\", \n f\"{pendulum.from_timestamp(reply.created_utc)}\", \n f\"{reply.score}\"])\n\n reply.delete()",
"def run_standardize_comments():\n df = pd.read_csv('politics_past_30_months_comments_cleaned.csv')\n df = df.drop(['Unnamed: 0'], axis=1)\n\n standardized_df = standardize_comments(df, 'body')\n print(standardized_df.head())\n print()\n print('original length:', len(df))\n print('standardized length:', len(standardized_df))\n print('removed', len(df) - len(standardized_df), 'comments')\n\n # THIS MIGHT BRING BACK THE UTF-8 ENCODING EMOJIS. MIGHT HAVE TO WRITE TO CSV IN ASCII\n standardized_df.to_csv('politics_past_30_months_comments_cleaned_standardized.csv')",
"def purify_comments(csv_file, keep_stops=False, POS=False, lemmatize=False, popular=0):\r\n\r\n df = pd.read_csv(csv_file)\r\n df = df.loc[df[\"author\"] != \"[deleted]\"] # trim out comments whose authors have deleted their accounts\r\n df = df.loc[df[\"score\"] != \"score\"] # this is an error in the code when building new csv_files from dask\r\n\r\n # extracts only the popular comments\r\n if popular > 0:\r\n df = df.loc[pd.to_numeric(df[\"score\"]) > popular]\r\n\r\n comments = df[\"body\"]\r\n del df # no need for this anymore, and it'll merely eat up memory\r\n\r\n nlp = en_core_web_sm.load()\r\n\r\n revised_comments = []\r\n for comment in comments.astype('unicode').values:\r\n comment = comment[1:] # remove the initial 'b' bytes-representation character\r\n comment = comment.encode(\"utf-8-sig\").decode(\"utf-8-sig\") # get rid of BOM character\r\n comment = comment.lower().replace(r\"\\n\", r\"\").replace(r'\"', r'')\r\n\r\n tokens = nlp(comment)\r\n\r\n # actual specification section\r\n for sent in tokens.sents:\r\n\r\n if POS: # conversion of comments to tokens/lemmas-POS tags\r\n if lemmatize:\r\n if keep_stops:\r\n revised_tokens = [\"{}-{}\".format(token.lemma_, token.tag_) for token in sent\r\n if not token.is_punct]\r\n else:\r\n revised_tokens = [\"{}-{}\".format(token.lemma_, token.tag_) for token in sent\r\n if not token.is_stop and not token.is_punct\r\n and not token.orth_ == \"n't\" and not token.orth_ == \"'s\"]\r\n else:\r\n if keep_stops:\r\n revised_tokens = [\"{}-{}\".format(token.orth_, token.tag_) for token in sent\r\n if not token.is_punct]\r\n else:\r\n revised_tokens = [\"{}-{}\".format(token.orth_, token.tag_) for token in sent\r\n if not token.is_stop and not token.is_punct\r\n and not token.orth_ == \"n't\" and not token.orth_ == \"'s\"]\r\n\r\n elif lemmatize: # just lemmatization\r\n if keep_stops:\r\n revised_tokens = [token.lemma_ for token in sent\r\n if not token.is_punct]\r\n else:\r\n revised_tokens = [token.lemma_ for token in sent\r\n if not token.is_stop and not token.is_punct\r\n and not token.orth_ == \"n't\" and not token.orth_ == \"'s\"]\r\n\r\n else: # nothing but removal of stop words (or not)\r\n if keep_stops:\r\n revised_tokens = [token.orth_ for token in sent\r\n if not token.is_punct]\r\n else:\r\n revised_tokens = [token.orth_ for token in sent\r\n if not token.is_stop and not token.is_punct\r\n and not token.orth_ == \"n't\" and not token.orth_ == \"'s\"]\r\n\r\n revised_comments.append(\" \".join(revised_tokens))\r\n\r\n return pd.Series(revised_comments)",
"def comment():",
"def _put(self, data, comment_id, obj):\n comment = obj\n comment_id = int(comment_id)\n\n # Ensure that user and customer have not been changed (they can only be written once)\n if data['user_id'] != comment['user_id']:\n flask_restful.abort(400, message=f\"Bad Request - cannot change user ID in \"\n f\"comment '{comment_id}'\")\n if data['ticket_id'] != comment['ticket_id']:\n flask_restful.abort(400, message=f\"Bad Request - cannot change ticket ID in \"\n f\"comment '{comment_id}'\")\n\n # Remove keys that are not in the new resource\n keys_to_remove = [stored_key for stored_key in comment.keys()\n if stored_key not in data]\n for old_key in keys_to_remove:\n DB_COMMENT_TABLE.update(delete(old_key), doc_ids=[comment_id])\n DB_COMMENT_TABLE.update(data, doc_ids=[comment_id])\n return Comment.get_self_url(comment_id=comment_id)",
"def _testCommentRating(self):\n\n try:\n host = models.Host.objects.all()[0]\n\n comment = models.Comment(text='test', host=host)\n comment.save()\n\n types = models.RatingType.objects.all()\n\n items = []\n for value, type in zip([3, 4, 5], types):\n tmp_obj = models.Rating(comment=comment, type=type, value=value)\n tmp_obj.save()\n items.append(tmp_obj)\n\n assert comment.rating() - 4.0 < .0001, comment.rating()\n\n for tmp_obj in items:\n tmp_obj.delete()\n\n items = []\n for value, type in zip([3, 3], types):\n tmp_obj = models.Rating(comment=comment, type=type, value=value)\n tmp_obj.save()\n items.append(tmp_obj)\n\n assert comment.rating() == 3.0, comment.rating()\n\n finally:\n for tmp_obj in items:\n tmp_obj.delete()\n\n comment.delete()",
"def comments(self, comments):\n\n self.container['comments'] = comments",
"def edit_comment():\n # Implement me!\n\n logger.info(\"vars: %r\" % request.vars)\n logger.info(\"vars_comment_text: %r\" % request.vars.comment_text)\n logger.info(\"vars id: %r\" % request.vars.comment_id)\n logger.info(\"comment_text: %r\" % db(db.Comments.id == request.vars.comment_id))\n\n #comment.comment_text = request.vars.comment_text\n #comment.edited_on = datetime.datetime.utcnow()\n db(db.Comments.id == request.vars.comment_id).update(comment_text=request.vars.comment_text, edited_on=datetime.datetime.utcnow())\n db.commit()\n logger.info(\"comment_text: %r\" % db(db.Comments.id == request.vars.comment_id))\n return \"ok\"",
"def can_update_comments(self):\n # Implemented from template for\n # osid.resource.ResourceAdminSession.can_update_resources\n # NOTE: It is expected that real authentication hints will be\n # handled in a service adapter above the pay grade of this impl.\n return True",
"def delete_comments(redditor):\n\n for index, comment in enumerate(redditor.comments.new(limit=None)):\n print(\"Deleting comment {}\".format(index))\n comment.edit(\"-\")\n comment.delete()",
"def deleteComments(self: Self, event: Event = None) -> None:\n #@+<< deleteComments docstring >>\n #@+node:ekr.20171123135625.37: *3* << deleteComments docstring >>\n #@@pagewidth 50\n #@-<< deleteComments docstring >>\n c, p, u, w = self, self.p, self.undoer, self.frame.body.wrapper\n #\n # \"Before\" snapshot.\n bunch = u.beforeChangeBody(p)\n #\n # Initial data.\n head, lines, tail, oldSel, oldYview = self.getBodyLines()\n if not lines:\n g.warning('no text selected')\n return\n # The default language in effect at p.\n language = c.frame.body.colorizer.scanLanguageDirectives(p)\n if c.hasAmbiguousLanguage(p):\n language = c.getLanguageAtCursor(p, language)\n d1, d2, d3 = g.set_delims_from_language(language)\n #\n # Calculate the result.\n changed, result = False, []\n if d1:\n # Remove the single-line comment delim in front of each line\n d1b = d1 + ' '\n n1, n1b = len(d1), len(d1b)\n for s in lines:\n i = g.skip_ws(s, 0)\n if g.match(s, i, d1b):\n result.append(s[:i] + s[i + n1b :])\n changed = True\n elif g.match(s, i, d1):\n result.append(s[:i] + s[i + n1 :])\n changed = True\n else:\n result.append(s)\n else:\n # Remove the block comment delimiters from each line.\n n2, n3 = len(d2), len(d3)\n for s in lines:\n i = g.skip_ws(s, 0)\n j = s.find(d3, i + n2)\n if g.match(s, i, d2) and j > -1:\n first = i + n2\n if g.match(s, first, ' '):\n first += 1\n last = j\n if g.match(s, last - 1, ' '):\n last -= 1\n result.append(s[:i] + s[first:last] + s[j + n3 :])\n changed = True\n else:\n result.append(s)\n if not changed:\n return\n #\n # Set p.b and w's text first.\n middle = ''.join(result)\n p.b = head + middle + tail # Sets dirty and changed bits.\n w.setAllText(head + middle + tail)\n #\n # Set the selection range and scroll position.\n i = len(head)\n j = ins = max(i, len(head) + len(middle) - 1)\n w.setSelectionRange(i, j, insert=ins)\n w.setYScrollPosition(oldYview)\n #\n # \"after\" snapshot.\n u.afterChangeBody(p, 'Indent Region', bunch)",
"def delete(self, *args, **kwargs):\n self.item.comments_total -= 1\n self.item.save()\n super(Comment, self).delete(*args, **kwargs)",
"def process_comments(session, comments):\n for c in tqdm(comments, desc=\"Injecting comments into DB\"):\n db_comment = session.query(Comment).get(c['id'])\n if db_comment:\n db_comment.update(session, **c)\n else:\n Comment.create(session, **c)",
"def test_issue_delete_comment(self):\n pass",
"def update_comments(comments, account_name, post_url):\n inc_number = 0\n for index, comment in comments.iterrows():\n # increment + 1\n inc_number = inc_number + 1\n # get preprocessed comment\n comment_spaces, comment_no_stopwords = preprocess_comment(comment['comment'])\n # get sentiment score from comment\n sentiment_score = get_sentiment(comment_no_stopwords)\n # update collection with comments\n collection.update_one(\n {\n 'Codename': account_name,\n 'Posts.URL': post_url\n },\n {\n '$push': {\n 'Posts.$.All Comments': {'comment_id': inc_number,\n 'user': comment['user'],\n 'comment': comment['comment'],\n 'comment_no_stopwords': comment_no_stopwords,\n 'comment_spaces': comment_spaces,\n 'like': comment['like'],\n 'sentiment_score': sentiment_score\n }\n }\n }\n )",
"def update_comment_in_doc(doc):\n\n\t# only comments get updates, not likes, assignments etc.\n\tif doc.doctype == \"Comment\" and doc.comment_type != \"Comment\":\n\t\treturn\n\n\tdef get_truncated(content):\n\t\treturn (content[:97] + \"...\") if len(content) > 100 else content\n\n\tif doc.reference_doctype and doc.reference_name and doc.content:\n\t\t_comments = get_comments_from_parent(doc)\n\n\t\tupdated = False\n\t\tfor c in _comments:\n\t\t\tif c.get(\"name\") == doc.name:\n\t\t\t\tc[\"comment\"] = get_truncated(doc.content)\n\t\t\t\tupdated = True\n\n\t\tif not updated:\n\t\t\t_comments.append(\n\t\t\t\t{\n\t\t\t\t\t\"comment\": get_truncated(doc.content),\n\t\t\t\t\t# \"comment_email\" for Comment and \"sender\" for Communication\n\t\t\t\t\t\"by\": getattr(doc, \"comment_email\", None) or getattr(doc, \"sender\", None) or doc.owner,\n\t\t\t\t\t\"name\": doc.name,\n\t\t\t\t}\n\t\t\t)\n\n\t\tupdate_comments_in_parent(doc.reference_doctype, doc.reference_name, _comments)",
"def handle_free_comments(self):\r\n comments = FreeComment.objects.all()\r\n for c in comments:\r\n new = FreeThreadedComment(\r\n content_type = c.content_type,\r\n object_id = c.object_id,\r\n comment = c.comment,\r\n name = c.person_name,\r\n website = '',\r\n email = '',\r\n date_submitted = c.submit_date,\r\n date_modified = c.submit_date,\r\n date_approved = c.submit_date,\r\n is_public = c.is_public,\r\n ip_address = c.ip_address,\r\n is_approved = c.approved\r\n )\r\n new.save()",
"def modify_user_comment(username, comment_id, comment):\n result = get_comment_by_id(username, comment_id)\n result[\"comment\"] = comment\n result[\"date_updated\"] = datetime.datetime.now()",
"def modify_user_comment(username, comment_id, comment):\n result = get_comment_by_id(username, comment_id)\n result[\"comment\"] = comment\n result[\"date_updated\"] = datetime.datetime.now()",
"def test_set_comment_to_deleted(mocker, reddit_comment_obj):\n patched_partial_update_task = mocker.patch(\n \"search.search_index_helpers.update_document_with_partial\"\n )\n patched_increment_task = mocker.patch(\n \"search.search_index_helpers.increment_document_integer_field\"\n )\n set_comment_to_deleted(reddit_comment_obj)\n assert patched_partial_update_task.delay.called is True\n assert patched_partial_update_task.delay.call_args[0] == (\n gen_comment_id(reddit_comment_obj.id),\n {\"deleted\": True},\n COMMENT_TYPE,\n )\n assert patched_increment_task.delay.called is True\n assert patched_increment_task.delay.call_args[0] == (\n gen_post_id(reddit_comment_obj.submission.id),\n )\n assert patched_increment_task.delay.call_args[1] == {\n \"field_name\": \"num_comments\",\n \"incr_amount\": -1,\n \"object_type\": POST_TYPE,\n }",
"def test_suppress_comment_in_db(self):\n runid = self._runid\n logging.debug(\"Get all run results from the db for runid: \" +\n str(runid))\n\n expected_file_path = os.path.join(self._test_directory,\n \"suppress.expected\")\n\n hash_to_suppress_msgs = {}\n with open(expected_file_path, 'r', encoding=\"utf-8\",\n errors=\"ignore\") as expected_file:\n for line in expected_file:\n src_code_info = line.strip().split('||')\n\n status = None\n if len(src_code_info) == 4:\n # Newest source code comment format where status is given.\n bug_hash, _, msg, status = src_code_info\n elif len(src_code_info) == 3:\n # Old format where review status is not given.\n bug_hash, _, msg = src_code_info\n else:\n # Oldest source code comment format where status and file\n # name are not given.\n bug_hash, msg = src_code_info\n\n rw_status = ReviewStatus.FALSE_POSITIVE\n if status == 'confirmed':\n rw_status = ReviewStatus.CONFIRMED\n elif status == 'intentional':\n rw_status = ReviewStatus.INTENTIONAL\n\n hash_to_suppress_msgs[bug_hash] = {'message': msg,\n 'status': rw_status}\n\n run_results = get_all_run_results(self._cc_client, runid)\n logging.debug(\"Run results:\")\n [logging.debug(x) for x in run_results]\n self.assertIsNotNone(run_results)\n self.assertNotEqual(len(run_results), 0)\n\n for bug_hash in hash_to_suppress_msgs:\n logging.debug(\"tesing for bug hash \" + bug_hash)\n expected_data = hash_to_suppress_msgs[bug_hash]\n report_data_of_bug = [\n report_data for report_data in run_results\n if report_data.bugHash == bug_hash]\n self.assertEqual(len(report_data_of_bug), 1)\n report_data = report_data_of_bug[0]\n\n # Check the stored suppress comment\n self.assertEqual(report_data.reviewData.comment,\n expected_data['message'])\n self.assertEqual(report_data.reviewData.status,\n expected_data['status'])\n\n # Even review status with source code comment can change.\n review_comment = \"This is really a bug\"\n status = ReviewStatus.CONFIRMED\n success = self._cc_client.changeReviewStatus(\n report_data.reportId, status, review_comment)\n\n self.assertTrue(success)\n\n # Review status without source code comment can change.\n uncommented_report = next(filter(\n lambda r: r.reviewData.status == ReviewStatus.UNREVIEWED,\n iter(run_results)))\n self._cc_client.changeReviewStatus(\n uncommented_report.reportId,\n ReviewStatus.CONFIRMED,\n 'This is a known issue')\n\n # Get the results to compare from the primary run\n updated_results = get_all_run_results(self._cc_client, self._runid)\n # Get the results from the duplicated run\n updated_results_dup = \\\n get_all_run_results(self._cc_client, self._runid_dup)\n hash_to_report_updated = {r.bugHash: r for r in updated_results}\n self.assertIsNotNone(updated_results)\n self.assertNotEqual(len(updated_results), 0)\n\n # Review status of reports with source code comment can change\n # as they are stored as individual comments\n for bug_hash in hash_to_suppress_msgs:\n self.assertEqual(\n ReviewStatus.CONFIRMED,\n hash_to_report_updated[bug_hash].reviewData.status)\n self.assertEqual(\n \"This is really a bug\",\n hash_to_report_updated[bug_hash].reviewData.comment)\n\n # Review status of reports without source code comment changes.\n uncommented_report_updated = next(filter(\n lambda r: r.bugHash == uncommented_report.bugHash,\n iter(updated_results)))\n\n self.assertEqual(\n uncommented_report_updated.reviewData.status,\n ReviewStatus.CONFIRMED)\n self.assertEqual(\n uncommented_report_updated.reviewData.comment,\n 'This is a known issue')\n\n # Review status of the same report in the duplicate run must not change\n uncommented_report_updated_dup = next(filter(\n lambda r: r.bugHash == uncommented_report.bugHash,\n iter(updated_results_dup)))\n\n self.assertEqual(\n uncommented_report_updated_dup.reviewData.status,\n ReviewStatus.UNREVIEWED)\n\n # Check the same project again.\n codechecker_cfg = env.import_test_cfg(\n self._test_workspace)['codechecker_cfg']\n\n initial_test_project_name = self._run_name\n\n ret = codechecker.check_and_store(codechecker_cfg,\n initial_test_project_name,\n self._test_project_path)\n self.assertEqual(0, ret, \"Could not store test data to the server.\")\n\n # Get the results to compare.\n updated_results = get_all_run_results(self._cc_client, self._runid)\n self.assertIsNotNone(updated_results)\n self.assertNotEqual(len(updated_results), 0)\n\n for bug_hash in hash_to_suppress_msgs:\n expected_data = hash_to_suppress_msgs[bug_hash]\n report_data = [report_data for report_data in updated_results\n if report_data.bugHash == bug_hash][0]\n\n # Check that source code comments in the database are changed back\n # after storage.\n self.assertEqual(report_data.reviewData.comment,\n expected_data['message'])\n self.assertEqual(report_data.reviewData.status,\n expected_data['status'])",
"def test_update_post_removal_for_comments(mocker, reddit_submission_obj):\n patched_task = mocker.patch(\n \"search.search_index_helpers.update_field_values_by_query\"\n )\n field_name, field_value = (\"field1\", \"value1\")\n update_field_for_all_post_comments(\n reddit_submission_obj, field_name=field_name, field_value=field_value\n )\n assert patched_task.delay.called is True\n assert patched_task.delay.call_args[1] == dict(\n query={\n \"query\": {\n \"bool\": {\n \"must\": [\n {\"match\": {\"object_type\": COMMENT_TYPE}},\n {\"match\": {\"post_id\": reddit_submission_obj.id}},\n ]\n }\n }\n },\n field_dict={field_name: field_value},\n object_types=[COMMENT_TYPE],\n )"
] | [
"0.5566898",
"0.5496297",
"0.5447388",
"0.53928477",
"0.53862935",
"0.5370087",
"0.52498823",
"0.5237984",
"0.5140959",
"0.5121164",
"0.5116408",
"0.5115946",
"0.50965947",
"0.50806594",
"0.5073641",
"0.5072741",
"0.50522643",
"0.5040005",
"0.5038804",
"0.5034418",
"0.5019661",
"0.50040615",
"0.50001764",
"0.498752",
"0.4977536",
"0.497021",
"0.497021",
"0.49678075",
"0.49475238",
"0.4946945"
] | 0.6196248 | 0 |
Given a single login ID, see if that customer exists on the database. Return true if so, false if not. | def search_customers(self, loginID):
self.cursor.execute("""SELECT COUNT(*) FROM customercredentials WHERE loginID = %s""", (loginID,))
if self.cursor.fetchone()[0]:
return True
else:
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_customer_id_exist(customer_id) -> bool:\n with MY_CONNECTION as connection:\n cursor = connection.cursor()\n cursor.execute(\"SELECT exists(SELECT 1 FROM Customers WHERE id_customer=?)\", (customer_id,))\n return cursor.fetchone()[0] == 1",
"def is_customer_exists(login, email):\n with MY_CONNECTION as connection:\n cursor = connection.cursor()\n cursor.execute(\"SELECT exists(SELECT 1 FROM Customers WHERE login=?)\", (login,))\n if cursor.fetchone()[0] == 1:\n return my_config.CUSTOMER_LOGIN\n\n cursor.execute(\"SELECT exists(SELECT 1 FROM Customers WHERE email=?)\", (email,))\n if cursor.fetchone()[0] == 1:\n return my_config.CUSTOMER_EMAIL\n return my_config.CUSTOMER_ABSENT",
"def exists(cls, customer_id):\n customer_id = int(customer_id)\n cust = DB_CUSTOMER_TABLE.get(doc_id=customer_id)\n if not cust:\n raise ValueError(f\"unknown customer '{customer_id}'\")\n return customer_id",
"def existAccount(login:str) -> bool:\n\n query = f\"SELECT * FROM {Account.tablename} WHERE {Account.loginCol} = ?\"\n\n try:\n db = DataBaseConnection()\n db.cursor.execute(query, login)\n except Exception as error:\n return {\"flag\": \"queryError\", \"message\": f\"{error}\"} \n else:\n row = db.cursor.fetchone()\n\n if row:\n return True\n else:\n return False",
"def name_exists(self, login):\n\t\treturn login in self.users_by_name",
"def user_exists(self, login):\n\t\tif login in self.users_by_name and isinstance(self.users_by_name[login], VDOM_user):\n\t\t\treturn True\n\t\treturn False",
"def is_customer(self) -> bool:\n return self.customer_id is not None",
"def has_customer(self):\n return self.customer is not None",
"def check_user(self):\n try:\n if self.get_customer()[0][0] == self.dni:\n return True\n else:\n return False\n except:\n return False",
"def remove_customer(self, loginID):\n try:\n self.cursor.execute(\"\"\"SELECT COUNT(*) FROM customercredentials WHERE loginID=%s\"\"\", (loginID,))\n if not self.cursor.fetchone()[0]:\n return False\n self.cursor.execute(\"\"\"DELETE FROM customercredentials WHERE loginID=%s\"\"\", (loginID,))\n self.db.commit()\n self.cursor.execute(\"\"\"DELETE FROM customerpersonal WHERE phone NOT IN \n (SELECT phone FROM customercredentials)\"\"\")\n self.db.commit()\n self.update_book_scores()\n self.update_comment_usefulness()\n return True\n except Exception as e:\n return False",
"def userIDExists(self, id : int) -> bool:\n return id in self.users.keys()",
"def exists_in_db(self) -> bool:\n query = \"\"\"SELECT * \n FROM Users \n WHERE Username=?;\"\"\"\n return len(self.db.fetchall(query, values=(self.username,))) > 0",
"def user_exists(conn, account):\n cur = conn.cursor()\n cur.execute(f\"SELECT * FROM users WHERE account = '{account}'\")\n\n rows = cur.fetchall()\n\n if len(rows) > 0:\n return True\n else:\n return False",
"def checkIfUserExists(self, userID):\n return self.db.select_user(userID)",
"def member_in_database(uniqname, conn):\n with conn.cursor() as cur:\n cur.execute(\n 'SELECT * '\n 'FROM members '\n 'WHERE uniqname = %s',\n (uniqname,)\n )\n member_exists = cur.rowcount > 0\n\n return member_exists",
"def exists_user(self, tenant_name, username):\n base = basedn.people_dn(username, tenant_name)\n return self.exists_entry(base)",
"def exist_identity_match(client, table_id):\n try:\n client.get_table(table_id)\n return True\n except NotFound:\n return False",
"def is_customer(self):\n return self.user_type == 'C'",
"def is_customer(self):\n return self.rol == ProfileRoles.CUSTOMER",
"def delete_customer(customer_id):\n del_query = Customer.get(Customer.customer_id == customer_id)\n return bool(del_query.delete_instance())",
"def exists(username):\n if Users.query.filter_by(username=username).first():\n return True\n return False",
"def findUniqueUserID(userID):\n connector = appEngine.connect()\n userIdentifier = connector.execute(\"SELECT user.userID FROM user WHERE userName=(?)\", userID).fetchone()\n #userIdentifier = db.session.execute(\"SELECT user.userID FROM user WHERE userName=(?)\", userID)\n if type(userIdentifier) == type(None):\n return False # this means there is no user in the database yet\n else:\n return True # this means there is a user in the database",
"def user_exists(mail_or_id) -> bool:\n conn = sqlite3.connect(\"db.sqlite3\")\n c = conn.cursor()\n\n if type(mail_or_id) is int:\n c.execute(\"\"\"\n SELECT 1 FROM Users\n WHERE id=?\n \"\"\", (mail_or_id,))\n else: #mail\n c.execute(\"\"\"\n SELECT 1 FROM Users\n WHERE mail=?\n \"\"\", (mail_or_id,))\n \n conn.commit()\n \n exists = bool(len(list(c)))\n \n conn.close()\n\n return exists",
"def user_exists(self,unique_ID):\n\t\ttry:\n\t\t\tself.data[unique_ID]\n\t\texcept KeyError:\n\t\t\treturn False\n\t\telse:\n\t\t\treturn True",
"def _user_exists(self, username):\n return self.db.query(User).filter_by(name=username).first() is not None",
"def test_existence(self):\n self.assertTrue(User.objects.filter(username='rcm').exists())",
"def hasUser(self, id):\n try:\n self.getUser(id)\n return True\n except KeyError:\n return False",
"def exists(self, key):\n try:\n return (self.salt + str(key)) in self.DB\n except KeyError:\n return False",
"def userExists(self, username):\n data = db.session.query(User.id).filter_by(username = username).first()\n if data is None:\n return False\n else:\n return True",
"def user_exist(cls,user_name):\n for user in cls.user_list:\n if user.user_name == user_name:\n return True\n return False"
] | [
"0.77798474",
"0.76868355",
"0.7255544",
"0.67962533",
"0.67014456",
"0.6624874",
"0.66193676",
"0.65199184",
"0.64875746",
"0.6345606",
"0.6319818",
"0.6297829",
"0.6292088",
"0.6242173",
"0.61170304",
"0.6070107",
"0.6045518",
"0.6015249",
"0.6007035",
"0.5990626",
"0.59881854",
"0.59804934",
"0.5972888",
"0.5902818",
"0.58832574",
"0.5876764",
"0.5874232",
"0.57960105",
"0.579272",
"0.5791708"
] | 0.77091205 | 1 |
Update the trust status given both usernames and the status. If a relationship between the two usernames exists, either update the status if it is changing the status OR delete the relationship if the status is the same (since we are removing the trust status). Otherwise, just create a new relationship. | def update_trust_status(self, loginID, otherLoginID, status):
self.cursor.execute("""SELECT trustStatus FROM trusts WHERE loginID=%s AND otherLoginID=%s""",
(loginID, otherLoginID))
result = self.cursor.fetchone()
if result:
if result[0] == status:
self.cursor.execute("""DELETE FROM trusts WHERE loginID=%s AND otherLoginID=%s""",
(loginID, otherLoginID))
else:
self.cursor.execute("""UPDATE trusts SET trustStatus=%s WHERE loginID=%s AND otherLoginID=%s""",
(status, loginID, otherLoginID))
else:
self.cursor.execute("""INSERT INTO trusts VALUES (%s, %s, %s)""", (loginID, otherLoginID, status))
self.db.commit() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_signing_cert(self, cert_id, status, user_name=None):\r\n params = {'CertificateId' : cert_id,\r\n 'Status' : status}\r\n if user_name:\r\n params['UserName'] = user_name\r\n return self.get_response('UpdateSigningCertificate', params)",
"def _update_status(self, status: dict):\n with generate_retry_session() as session:\n session.headers.update({\n 'Authorization': 'Bearer {}'.format(self.platform_auth_token)\n })\n url = '{}/training/definitions/{}/jobs/{}/status'.format(\n ORGANIZATION_ENDPOINT, self.job_definition_name, self.training_job_id)\n res = session.put(url, json=status)\n res.raise_for_status()",
"def update_pet_status(email, pet_name, pet_status):\n\n user_id = db.session.query(User.user_id).filter(User.email==email, Pet.pet_name==pet_name).first()\n\n # Update the status of the pet\n status_update = db.session.query(Pet).filter(Pet.user_id == user_id, Pet.pet_name == pet_name).update({Pet.pet_status: pet_status})\n \n db.session.commit()\n\n return status_update",
"def change_status(publisher_name, package_name, status=PackageStateEnum.active):\n try:\n data = Package.query.join(Publisher). \\\n filter(Publisher.name == publisher_name,\n Package.name == package_name).one()\n data.status = status\n db.session.add(data)\n db.session.commit()\n return True\n except Exception as e:\n app.logger.error(e)\n return False",
"def assertCanUpdateStatus(self, user, transactions, old_status, new_status):\n self.login(user)\n run = False\n\n for transaction in [x for x in transactions if x.status == old_status]:\n res = self.patch(\n f\"/associations/transactions/{transaction.id}/\", {\"status\": new_status}\n )\n self.assertStatusCode(res, 200)\n\n transaction = Transaction.objects.get(id=transaction.id)\n self.assertEqual(\n transaction.status,\n new_status,\n msg=f\"User {user} did not manage to update the status of transaction {transaction.id} \"\n f\"from {old_status}to {new_status}.\",\n )\n # Revert the change.\n transaction.status = old_status\n transaction.save()\n\n run = True\n\n return run",
"def change_user_status(self, status, client):\n if self.verify_status(status, client):\n client.set_status(status)\n self.send_message('Estado actualizado exitosamente.', client.get_socket())",
"def set_status(trades, status):\n acm.BeginTransaction()\n try:\n for trade in trades:\n msg = \"Changing status on trade {0} ({1}) to {2}\"\n print(msg.format(trade.Oid(), trade.Instrument().Name(), status))\n trade.Status(status)\n trade.Commit()\n acm.CommitTransaction()\n print(\"Statuses successfully changed\")\n except Exception as ex:\n print(\"Failed to change statuses on pswap trades: {0}\".format(ex))\n acm.AbortTransaction()",
"def update_server_status(userStatusObj):\n oldStatusStr = get_from_db(key='status')\n if oldStatusStr:\n oldStatusObj = json.loads(oldStatusStr)\n mergeObj = {**oldStatusObj, **userStatusObj}\n set_to_db(key='status', str_value=json.dumps(mergeObj))",
"def save(self):\n payload = self.context['payload']\n user = User.objects.get(username=payload['user'])\n user.is_verified = True\n user.save()",
"def save(self, **kwargs):\n payload = self.context['payload']\n user = User.objects.get(username=payload['user'])\n user.is_verified = True\n user.save()",
"def updateBuddy(self,username,online,evilness,signontime,idletime,userclass,away):\n print \"status changed for\",username",
"def save(self):\n payload = self.context['payload']\n user = User.objects.get(username=payload['user'])\n user.is_verified = True\n user.save()",
"def save(self):\n payload = self.context['payload']\n user = User.objects.get(username=payload['user'])\n user.is_verified = True\n user.save()",
"def update_from_existing(self, existing_status=None):\n if isinstance(existing_status, self.__class__):\n self.status = self.status or existing_status.status\n self.owner = self.owner or existing_status.owner\n self.urgency = self.urgency or existing_status.urgency",
"def update_status(application_id, new_status, user_id):\n if not new_status:\n return {\"status\": \"You must provide a non-empty new status.\"}\n\n application = Application.query.filter_by(id=application_id, user_id=user_id).first()\n application.status = new_status\n user_applications = Application.query.filter_by(user_id=application.user_id).all()\n return [application.to_dict() for application in user_applications]",
"def _updateStatus(self, result):\n\n if result.status is not None:\n # status was explicitly set\n self.target.localStatus = result.status\n if self.target.present and self.target.created is None:\n self.target.created = self.configSpec.operation not in [\n \"check\",\n \"discover\",\n ]\n elif not result.success:\n # if any task failed and (maybe) modified, target.status will be set to error or unknown\n if result.modified:\n self.target.localStatus = (\n Status.error if self.required else Status.degraded\n )\n elif result.modified is None:\n self.target.localStatus = Status.unknown\n # otherwise doesn't modify target status",
"def update_status(self, obj_type, obj_id, root_lb_id,\n provisioning_status, operating_status,\n agent_info, obj=None):\n\n msg = {'info': {'service_type': lb_const.SERVICE_TYPE,\n 'context': agent_info['context']},\n 'notification': [{'resource': agent_info['resource'],\n 'data':{'obj_type': obj_type,\n 'obj_id': obj_id,\n 'notification_type': 'update_status',\n 'root_lb_id': root_lb_id,\n 'provisioning_status':\n provisioning_status,\n 'operating_status':\n operating_status,\n obj_type: obj}}]\n }\n LOG.info(\"Sending Notification 'Update Status' \"\n \"for resource: %(resource)s with Provisioning status:\"\n \"%(p_status)s and Operating status:%(o_status)s\",\n {'resource': agent_info['resource'],\n 'p_status': provisioning_status,\n 'o_status': operating_status})\n self.notify._notification(msg)",
"def update_user(self, username):\n parser_update.add_argument('email', type=validate_email,\n required=False, nullable=False,\n help=\"Email must be formatted correctly\")\n\n parser_update.add_argument('phoneNumber', type=validate_phonenumber,\n required=False, nullable=False,\n help=\"Enter a valid phone number\")\n\n parser_update.add_argument('firstname', type=validate_characters,\n required=False, nullable=False,\n help=\"First name must be formatted correctly\")\n\n parser_update.add_argument('lastname', type=validate_characters,\n required=False, nullable=False,\n help=\"Last name must be formatted correctly\")\n\n parser_update.add_argument('othernames', type=validate_characters,\n required=False, nullable=False,\n help=\"Other name must be formatted correctly\")\n\n user = self.get_user(username)\n if user is None:\n return None\n\n args = parser_update.parse_args()\n new_data = {\n 'email': request.json.get('email', user['email']).lower(),\n 'firstname': request.json.get('firstname', user['firstname']).capitalize(),\n 'lastname': request.json.get('lastname', user['lastname']).capitalize(),\n 'othernames': request.json.get('othernames', user['othernames']).capitalize(),\n 'phoneNumber': request.json.get('phoneNumber', user['phonenumber']),\n }\n\n getEmail = self.get_user(new_data['email'])\n verification_status = True\n\n if user['email'] != new_data['email']:\n if getEmail is not None:\n return 'email exists'\n verification_status = False\n\n query = \"\"\"UPDATE users SET firstname=%s,lastname=%s,othernames=%s,\\\n email=%s,phonenumber=%s,emailverified=%s WHERE username=%s\"\"\"\n values = new_data['firstname'], new_data['lastname'], new_data['othernames'], new_data['email'], new_data['phoneNumber'], verification_status, username\n\n conn = self.db\n cursor = conn.cursor()\n cursor.execute(query, values)\n conn.commit()\n return new_data",
"def assertCannotUpdateStatus(self, user, transactions, old_status, new_status):\n self.login(user)\n run = False\n\n for transaction in [x for x in transactions if x.status == old_status]:\n res = self.patch(\n f\"/associations/transactions/{transaction.id}/\", {\"status\": new_status}\n )\n self.assertStatusCodeIn(res, [404, 403])\n self.assertEqual(\n Transaction.objects.get(id=transaction.id).status,\n old_status,\n msg=f\"User {user} did manage to update the status of transaction {transaction.id}\"\n f\"from {old_status}to {new_status}.\",\n )\n run = True\n\n return run",
"def _update_status(self):\n self._db_update({'status': self.status})",
"def update_status(self, id, status):\n sql = f\"UPDATE incidences SET status = \\'{status}\\'\\\n WHERE incidences.id = {id}\"\n conn = Db().con\n curr = conn.cursor()\n curr.execute(sql)\n conn.commit()",
"def assign_ahj_official_status(user, ahjs):\n # The AHJPK of the AHJs the User is related to.\n all_time_ahjs = AHJUserMaintains.objects.filter(UserID=user)\n # Restore assignment for AHJs previously unassigned AHJs that have been reassigned.\n all_time_ahjs.filter(MaintainerStatus=False, AHJPK__in=ahjs).update(MaintainerStatus=True)\n current_ahjpks = all_time_ahjs.filter(MaintainerStatus=True).values_list('AHJPK', flat=True)\n # Create relations for the newly added AHJs.\n newly_assigned_ahjs = [ahj for ahj in ahjs if ahj.AHJPK not in current_ahjpks]\n AHJUserMaintains.objects.bulk_create([\n AHJUserMaintains(UserID=user, AHJPK=ahj, MaintainerStatus=True) for ahj in newly_assigned_ahjs])\n # Delete relations of the removed AHJs.\n ahjs_to_unassign = all_time_ahjs.exclude(AHJPK__in=ahjs)\n ahjs_to_unassign.update(MaintainerStatus=False)",
"def updateStatus(self, status):\n pass",
"async def change_status(self, status: str) -> int:\n data = {'status': str(status)}\n r = await self.request.request(url='https://www.roblox.com/home/updatestatus', method='POST', data=j.dumps(data))\n return r.status_code",
"def proc_status_effect(\n self,\n status_att=None,\n status_val=False,\n resist=None\n ):\n\n # If a resist attribute is passed, the player\n # will attempt to resist the status change\n\n if resist is not None:\n succ,bonus = RandomRoll(\n self,\n getattr(self,resist),\n 75\n )\n else:\n succ = False\n\n if succ:\n pass\n else:\n setattr(self,status_att,status_val)",
"def convert_dicts_in_status_to_obj(status: Status) -> Status:\n keys_to_update = [\"urls\", \"user\", \"user_mentions\", \"quoted_status\"]\n for key in keys_to_update:\n if key == \"urls\":\n status.urls = [Url(**url) for url in status.__getattribute__(key)]\n elif key == \"user\":\n status.user = User(**status.__getattribute__(key))\n elif key == \"user_mentions\":\n status.user_mentions = [\n User(**user) for user in status.__getattribute__(key)\n ]\n elif key == \"quoted_status\":\n status.quoted_status = (\n convert_dicts_in_status_to_obj(\n status=Status(**status.__getattribute__(key))\n )\n if status.__getattribute__(key)\n else None\n )\n return status",
"def update_status(request_id, status):\n pass",
"def setStatuses(self, urgency, status, comment, newOwner, currentUser, ruleUIDs, searchID, reviewTime, existing_statuses, capabilities, session_key):\n\n # Print a log message noting that an operation is about to happen\n if ruleUIDs is not None and searchID is not None:\n logger.info(\"About to edit events matching search %s (though only %d events are to be modified)\", searchID, len(ruleUIDs))\n if searchID is None and (ruleUIDs is not None and len(ruleUIDs) > 0):\n logger.info(\"About to edit events by ID (%d events are to be modified)\", searchID, len(ruleUIDs))\n else:\n logger.info(\"About to edit events matching all events matching search %s\", searchID)\n\n # Refresh the correlation searches list so we don't have to later\n self.refreshCorrelationSearches(session_key)\n\n # Perform the changes\n if searchID is None:\n result = self.setStatusByIDs(ruleUIDs, urgency, status, comment, newOwner, reviewTime, session_key, currentUser, existing_statuses=existing_statuses)\n logger.info(\"Done editing events\")\n return result\n else:\n result = self.setStatusBySearchID(searchID, urgency, status, comment, newOwner, reviewTime, capabilities, session_key, currentUser, force_refresh=False, rule_ids_to_change=ruleUIDs, existing_statuses=existing_statuses)\n logger.info(\"Done editing events matching search %s\", searchID)\n return result",
"def UpdateStatus(self, status):\r\n self.status.update(status)",
"def updateAccountStatus(login:str, status:int=0)->bool:\n\n query = f\"UPDATE {Account.tablename} SET {Account.statusCol} = ? WHERE {Account.loginCol} = ?\"\n\n try:\n db = DataBaseConnection()\n db.cursor.execute(query, status, login)\n\n if status == 1: # activation\n newActivationDate = datetime.now().date()\n newExpirationDate = (newActivationDate + datetimePack.timedelta(days=155)).date() # warning : + 5 mois\n\n newActivationDate = str(newActivationDate)\n newExpirationDate = str(newExpirationDate)\n\n query = f\"UPDATE {Account.tablename} SET {Account.activationDateCol} = ?, {Account.expirationDateCol} = ? WHERE {Account.loginCol} = ?\"\n\n db.cursor.execute(query, newActivationDate, newExpirationDate, login)\n\n except Exception as error:\n return {\"flag\": \"queryError\", \"message\": f\"{error}\"}\n else:\n db.conn.commit()\n return True"
] | [
"0.51980627",
"0.51476276",
"0.5094613",
"0.5001692",
"0.49981746",
"0.4995808",
"0.49869204",
"0.4980151",
"0.49481377",
"0.4947407",
"0.49399808",
"0.49398693",
"0.49398693",
"0.487932",
"0.48444963",
"0.48431697",
"0.4832812",
"0.48214483",
"0.47978377",
"0.47910407",
"0.47727296",
"0.4763321",
"0.47611174",
"0.475464",
"0.47436088",
"0.47040296",
"0.47027782",
"0.46897656",
"0.46853897",
"0.46757203"
] | 0.6515254 | 0 |
Given a single login ID, get basic info (name, number of orders made, number of books purchased, comments, total trust score, number trusted and untrusted, etc.). AVOID sensitive information (address, | def get_basic_userinfo(self, loginID, my_id):
info = {'loginID': '', 'firstName': '', 'lastName': '', 'orderCount': 0, 'books_purchased': 0,
'num_comments': 0,
'comments': [], 'books_commented': [], 'trusted': 0, 'untrusted': 0, 'personalStatus': ''}
self.cursor.execute("""SELECT DISTINCT C.loginID, firstName, lastName, COUNT(DISTINCT orderNumber),
COUNT(DISTINCT commentID) FROM customercredentials C, comment CO, orderlog O
WHERE C.loginID = %s AND O.loginID = %s AND CO.loginID = %s""", (loginID, loginID, loginID))
result = self.cursor.fetchone()
info['loginID'] = result[0]
info['firstName'] = result[1]
info['lastName'] = result[2]
info['orderCount'] = result[3]
info['num_comments'] = result[4]
self.cursor.execute("""SELECT SUM(quantity) FROM orderlog O, productof P WHERE O.orderNumber = P.orderNumber
AND loginID=%s""", (loginID,))
result = self.cursor.fetchone()
info['books_purchased'] = result[0]
self.cursor.execute("""SELECT * FROM comment WHERE loginID = %s ORDER BY commentDate DESC""", (loginID,))
result = self.cursor.fetchall()
for comment in result:
info['comments'].append(comment)
for comment in info['comments']:
info['books_commented'].append(self.get_single_book_info(comment[1]))
self.cursor.execute("""SELECT COUNT(loginID) FROM trusts WHERE otherLoginID=%s AND trustStatus='TRUSTED'""",
(loginID,))
result = self.cursor.fetchone()
info['trusted'] = result[0]
self.cursor.execute("""SELECT COUNT(loginID) FROM trusts WHERE otherLoginID=%s AND trustStatus='UNTRUSTED'""",
(loginID,))
result = self.cursor.fetchone()
info['untrusted'] = result[0]
self.cursor.execute("""SELECT trustStatus FROM trusts WHERE loginID=%s AND otherLoginID=%s""",
(my_id, loginID))
result = self.cursor.fetchone()
if result:
info['personalStatus'] = result[0]
return info | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_user_orders(self, loginID):\n order_details = {}\n self.cursor.execute(\"\"\"SELECT orderNumber, orderDate FROM orderlog WHERE loginID=%s \n ORDER BY orderDate DESC, orderNumber DESC\"\"\", (loginID,))\n for order in self.cursor.fetchall():\n order_details[str(order[0])] = {'title': [], 'quantity': [], 'ISBN': []}\n # this line only needs to execute once, but its easier to do it like this.\n order_details[str(order[0])]['date'] = order[1]\n self.cursor.execute(\"\"\"SELECT ISBN FROM orderlog O INNER JOIN productof P ON O.orderNumber = P.orderNumber\n WHERE O.orderNumber=%s\"\"\", (order[0],))\n for book in self.cursor.fetchall():\n self.cursor.execute(\"\"\"SELECT title, quantity FROM book B, productof P, orderlog O WHERE P.ISBN=%s\n AND P.orderNumber = O.orderNumber AND P.ISBN = B.ISBN AND O.orderNumber = %s\"\"\", (book[0], order[0]))\n for details in self.cursor.fetchall():\n title = details[0]\n quantity = details[1]\n order_details[str(order[0])]['title'].append(title)\n order_details[str(order[0])]['quantity'].append(quantity)\n order_details[str(order[0])]['ISBN'].append(book[0])\n return order_details",
"def login_details(cls) -> dict:\n if None in list(cls._login_details.values()):\n cls.input_login_details()\n return cls._login_details",
"def login_info(login_session):\n email, user_id, logged_in = None, None, False\n if 'email' in login_session:\n email = login_session['email']\n user_id = get_user_id(login_session['email'])\n logged_in = True\n return (email, user_id, logged_in)",
"def get_logged_info():\n user = current_identity\n return make_response(dumps({\"status\": True, \"user\": user}), 200)",
"def get_login_info(self):\n username = raw_input(\"Username: \")\n password = getpass.getpass(\"Password:\")\n return (username, password)",
"def info(self, login=None, full_name=None):\r\n params = base.get_params(None, locals())\r\n return self._get('info', params)",
"def user_info(username):\n print(json.dumps(client.user_info(username)))",
"def user_details():\n url = 'https://api.github.com/orgs/facebook/repos'\n json_obj = urllib2.urlopen(url)\n userdata = json.load(json_obj)\n if 'error' in userdata:\n print 'errors are scanned in data'\n for data in userdata:\n if 'name' in data:\n if data['name'] == 'codemod':\n print 'language used'\n print data['language']\n print 'number of watchers'\n print data['watchers']\n print 'git url'\n print data['git_url']\n print 'open issues'\n print data['open_issues']\n print 'permissions for user'\n print 'push'\n print data['permissions']['push']\n print 'pull'\n print data['permissions']['pull']",
"def info(self):\r\n cur = self.db.cursor()\r\n cur.execute(\"select * from lic where idx='USER'\")\r\n info = cur.fetchone()\r\n cur.close()\r\n return info",
"def user_info(client_id, email):\n um = logic.UserManager()\n try:\n user = um.lookup_user_by_email(email)\n orders = om.get_orders_of_user(user.id)\n except ex.TickeeError as e:\n transaction.abort()\n return marshalling.error(e)\n except Exception as e:\n transaction.abort()\n return marshalling.internal_error(e)\n else:\n result = dict(first_name=user.first_name,\n last_name=user.last_name,\n email=user.email,\n orders=map(lambda o: dict(id=o.id,\n tickets=map(lambda t: marshalling.ticket_to_dict(t, include_scanned=True,\n include_user=False), \n o.get_tickets()),\n status=o.status,\n date=marshalling.date(o.session_start)), \n orders))\n return result",
"def extractAuthGWInfo(self,dn):\n \n# dn = request.get(self.jid_auth_header, '')\n dn = transfer_codec(dn)\n userName,idNumber = split_idNumber(dn)\n loginid = idNumber\n# loginid = transfer_codec(loginid) \n# creds['remote_host'] = request.get('REMOTE_HOST', '')\n return loginid,userName,idNumber",
"def get_user_info(uid):\r\n session = tables.get_session()\r\n account_name = ''\r\n description = ''\r\n if session is None:\r\n return account_name, description\r\n try:\r\n user_account = UserAccount()\r\n account_name = user_account.get_field_by_key(UserAccount.account_name, UserAccount.user_id, uid,\r\n session)\r\n description = user_account.get_field_by_key(UserAccount.description, UserAccount.user_id, uid,\r\n session)\r\n except SQLAlchemyError as err:\r\n LOGGER.error('User login failed: %s', err)\r\n return account_name, description\r\n finally:\r\n session.close()\r\n return account_name, description",
"def fusion_api_get_login_details(self, api=None, headers=None):\n return self.logindetails.get(api=api, headers=headers)",
"def get_user_details():\n current_user = get_jwt_identity()\n\n if not current_user:\n print('uri=/login error=\"Missing username parameter\"')\n return jsonify({\"msg\": \"Missing username parameter\"}), 400\n\n try:\n username = User.get_username_by_id(current_user)\n result = UserDetail.get_printable_user_detail(username)\n\n if result['userType'] == 'adopter':\n animal_preference = Adopter.get_animal_preference(username)\n result['animalPreference'] = animal_preference\n\n dispositions = UserDetail.get_user_dispositions(User.get_username_by_id(current_user))\n result['dispositions'] = dispositions['dispositions']\n elif result['userType'] == 'shelter worker':\n result['shelter'] = ShelterWorker.get_shelter_by_username(username)\n\n except Exception as e:\n return jsonify(message='{}'.format(e)), 510\n\n if result:\n return jsonify(message=result), 200\n else:\n return jsonify(message='User {} not found'.format(username)), 511",
"def getUserInfo(data):\n\tusername = data[\"session_username\"]\n\tuser = Users.objects.filter(username=username).first()\n\n\tresponse = {}\n\n\tif not user:\n\t\treturn {\"Success\": False, \"Error\": \"Unable to retrieve the user information from database\"}\n\n\tresponse[\"Success\"] = True\n\tresponse[\"Username\"] = user.username\n\tresponse[\"Email\"] = user.email\n\tresponse[\"Verified\"] = user.verified\n\tresponse[\"Level\"] = user.level\n\tresponse[\"Experience\"] = user.experience\n\tresponse[\"Coins\"] = user.coins\n\tresponse[\"Preferences\"] = {\"Grid Opacity\": user.pref_grid}\n\n\treturn response",
"def user_info(self):\n return self.auth.get_user_by_session()",
"def getUserDetails(self,name):\n raise BorkedGetUserDetails",
"def get_user_info(self) -> str:\n return self._searcher.get_user_info()",
"def info(name):\n if HAS_SPWD:\n try:\n data = spwd.getspnam(name)\n ret = {\n \"name\": data.sp_nam,\n \"passwd\": data.sp_pwd,\n \"lstchg\": data.sp_lstchg,\n \"min\": data.sp_min,\n \"max\": data.sp_max,\n \"warn\": data.sp_warn,\n \"inact\": data.sp_inact,\n \"expire\": data.sp_expire,\n }\n except KeyError:\n ret = {\n \"name\": \"\",\n \"passwd\": \"\",\n \"lstchg\": \"\",\n \"min\": \"\",\n \"max\": \"\",\n \"warn\": \"\",\n \"inact\": \"\",\n \"expire\": \"\",\n }\n return ret\n\n # SmartOS joyent_20130322T181205Z does not have spwd, but not all is lost\n # Return what we can know\n ret = {\n \"name\": \"\",\n \"passwd\": \"\",\n \"lstchg\": \"\",\n \"min\": \"\",\n \"max\": \"\",\n \"warn\": \"\",\n \"inact\": \"\",\n \"expire\": \"\",\n }\n\n try:\n data = pwd.getpwnam(name)\n ret.update({\"name\": name})\n except KeyError:\n return ret\n\n # To compensate for lack of spwd module, read in password hash from /etc/shadow\n s_file = \"/etc/shadow\"\n if not os.path.isfile(s_file):\n return ret\n with salt.utils.files.fopen(s_file, \"r\") as ifile:\n for line in ifile:\n comps = line.strip().split(\":\")\n if comps[0] == name:\n ret.update({\"passwd\": comps[1]})\n\n # For SmartOS `passwd -s <username>` and the output format is:\n # name status mm/dd/yy min max warn\n #\n # Fields:\n # 1. Name: username\n # 2. Status:\n # - LK: locked\n # - NL: no login\n # - NP: No password\n # - PS: Password\n # 3. Last password change\n # 4. Minimum age\n # 5. Maximum age\n # 6. Warning period\n\n output = __salt__[\"cmd.run_all\"](\"passwd -s {}\".format(name), python_shell=False)\n if output[\"retcode\"] != 0:\n return ret\n\n fields = output[\"stdout\"].split()\n if len(fields) == 2:\n # For example:\n # root NL\n return ret\n # We have all fields:\n # buildbot L 05/09/2013 0 99999 7\n ret.update(\n {\n \"name\": data.pw_name,\n \"lstchg\": fields[2],\n \"min\": int(fields[3]),\n \"max\": int(fields[4]),\n \"warn\": int(fields[5]),\n \"inact\": \"\",\n \"expire\": \"\",\n }\n )\n return ret",
"def user_info(self):\n response = self.query('user_info')\n return response",
"def GetBasicInformation(self):\n if self.cur_uid is None:\n return\n self._get_product_detail_id()",
"def get_user_info_by_id(self, user_id: int) -> dict:",
"def get_user_info_by_name(self, username: str) -> dict:",
"def view():\n login_dict = _open_cnfg()\n login_name, login_url, login_api, login_hid = ['Login name'], ['URL'], ['API key'], ['History ID']\n for lgn in login_dict['logins']:\n login_name.append(lgn)\n login_url.append(login_dict['logins'][lgn]['url'])\n login_api.append(login_dict['logins'][lgn]['api_key'])\n login_hid.append(login_dict['logins'][lgn]['hid'])\n click.echo(\"You are currently using active login: \" + click.style(login_dict['active_login'], bold=True))\n utils._tabulate([login_name, login_url, login_api, login_hid])",
"def get_session_info(site_id):\n log = current_app.log\n db = request.db\n Cred = db.tables.Cred\n user_id = SiteService.get_current_uid()\n cred = Cred.query.filter_by(cred_owner=user_id,\n site_id=site_id).first()\n res = {'ok': False}\n if cred:\n res['username'] = cred.cred_username\n res['expiry'] = cred.cred_expiry\n if cred.cred_expiry > datetime.datetime.utcnow():\n res['ok'] = True\n log.info(\"Fetched info for user %u at site %u.\", user_id, site_id)\n return jsonify(res)",
"def display_accounts_details():\n return Credentials.display_credentials()",
"def basic_stats(db):\n rps = len(list(db['rp'].keys()))\n users = len(list(db['users'].keys()))\n logins = db['logins']\n return {\"rps\": rps, \"users\": users, \"logins\": logins}",
"def extract_cookie_info():\n\t# setup cookie jar\n\tcj = cookielib.CookieJar()\n\tlogin_data = urllib.urlencode({ID_USERNAME: USERNAME, \n\t\tID_PASSWORD: PASSWORD})\n\t# create url opener\n\topener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))\n\tresp = opener.open(LOGIN_URL, login_data)\n\n\t# Send login info\n\tfor cookie in cj:\n\t\tprint \"----first time cookie: %s --> %s\" % (cookie.name, cookie.value)\n\tprint \"Headers: %s\" % resp.headers",
"def get_customer_statistics(self, n):\n trusted = []\n useful = []\n\n trust_dict = {}\n self.cursor.execute(\"\"\"select otherLoginID, COUNT(loginID) as score_trusted\n FROM trusts GROUP BY otherLoginID, trustStatus HAVING trustStatus='TRUSTED'\"\"\")\n for cust in self.cursor.fetchall():\n trust_dict[cust[0]] = cust[1]\n self.cursor.execute(\"\"\"SELECT otherLoginID, COUNT(loginID) as score_trusted FROM trusts\n GROUP BY otherLoginID, trustStatus HAVING trustStatus='UNTRUSTED'\"\"\")\n for cust in self.cursor.fetchall():\n if cust[0] in trust_dict:\n trust_dict[cust[0]] = trust_dict[cust[0]] - cust[1]\n else:\n trust_dict[cust[0]] = -cust[1]\n m = 0\n n_temp = n\n while n_temp > m and len(trust_dict):\n loginID = max(trust_dict.items(), key=operator.itemgetter(1))[0]\n self.cursor.execute(\"\"\"SELECT firstName, lastName FROM customercredentials WHERE loginID=%s\"\"\", (loginID,))\n name = self.cursor.fetchone()\n trusted.append([loginID, name[0], name[1], trust_dict[loginID]])\n del trust_dict[loginID]\n n_temp = n_temp - 1\n\n self.cursor.execute(\"\"\"SELECT C.loginID, firstName, lastName, AVG(avg_usefulness) as total_avg\n FROM comment C, customercredentials CR WHERE C.loginID = CR.loginID GROUP BY C.loginID\n ORDER BY total_avg DESC LIMIT %s\"\"\", (n,))\n for cust in self.cursor.fetchall():\n useful.append(cust)\n return trusted, useful",
"def __display_login_info(self):\n print(f'\\nYour card has been created\\n'\n f'Your card number:\\n'\n # f'{self.__card_display()}\\n' # uncomment this line and comment out line below for pretty display\n f'{self.card_number}\\n'\n f'Your card PIN:\\n'\n f'{self.__account_pin}\\n', )"
] | [
"0.5838145",
"0.57874995",
"0.5712863",
"0.5696862",
"0.5646664",
"0.56201595",
"0.56026673",
"0.54782397",
"0.54718333",
"0.54540867",
"0.5419229",
"0.5384394",
"0.53752536",
"0.53589827",
"0.53411764",
"0.5340962",
"0.532667",
"0.53239703",
"0.53219485",
"0.5312928",
"0.5311514",
"0.53063023",
"0.53019506",
"0.5296713",
"0.52878195",
"0.5283732",
"0.52743584",
"0.5254636",
"0.52120817",
"0.5209308"
] | 0.75878596 | 0 |
Given the login ID of a customer, remove the customer from the database. Note that the ID passed in to this function is unchecked and so proper validity checks need to be in place. | def remove_customer(self, loginID):
try:
self.cursor.execute("""SELECT COUNT(*) FROM customercredentials WHERE loginID=%s""", (loginID,))
if not self.cursor.fetchone()[0]:
return False
self.cursor.execute("""DELETE FROM customercredentials WHERE loginID=%s""", (loginID,))
self.db.commit()
self.cursor.execute("""DELETE FROM customerpersonal WHERE phone NOT IN
(SELECT phone FROM customercredentials)""")
self.db.commit()
self.update_book_scores()
self.update_comment_usefulness()
return True
except Exception as e:
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete_customer(customer_id):\n try:\n remove_user = cm.Customers.get(cm.Customers.customer_id == customer_id)\n remove_user.delete_instance()\n except cm.DoesNotExist:\n logging.info(\"Customer successfully deleted from database.\")",
"def delete_customer(customer_id):\n with cm.DATABASE.transaction():\n try:\n LOGGER.info(\"Searching for customer [%s]\", customer_id)\n a_customer = cm.Customer.get(\n cm.Customer.customer_id == customer_id)\n # .delete_instance() will delete the record\n a_customer.delete_instance()\n a_customer.save()\n LOGGER.info(\"Deleted customer\")\n except pw.DoesNotExist:\n LOGGER.warning(\"Customer [%s] not in database!\", customer_id)\n raise ValueError",
"def delete_customer(customer_id):\n found = search_customer(customer_id)\n if found is None:\n LOGGER.warning('Could not find customer for delete with id %d.',\n customer_id)\n else:\n found.delete_instance()",
"def delete_customer(customer_id):\n LOGGER.info(\"Deleting customer %s\", customer_id)\n try:\n db_customer = Customers.get(Customers.customer_id == customer_id)\n db_customer.delete_instance()\n LOGGER.info(\"Customer %s deleted\", customer_id)\n except DoesNotExist as e_val:\n LOGGER.warning(\n \"Customer %s does not exist: Delete operation ignored\", customer_id\n )\n LOGGER.warning(e_val)",
"def delete_customer(self, customer_to_del):\n customer_list = self._customer_repo.get_customer_list()\n for customer in customer_list:\n if customer.get_customer_id() == customer_to_del: #Maybe need to find a more efficient way\n customer_list.remove(customer)\n self._customer_repo.overwrite_customer_list(customer_list)\n credit_card_list = self._customer_repo.get_credit_card_list()\n for credit_card in credit_card_list:\n if credit_card.get_customer_id() == customer_to_del: #Maybe need to find a more efficient way\n credit_card_list.remove(credit_card)\n self._customer_repo.overwrite_credit_card_list(credit_card_list)",
"def delete_customer(customer_id):\n try:\n with database.transaction():\n customer = Customer.get(Customer.customer_id == customer_id)\n customer.delete_instance()\n customer.save()\n except Exception as unknown_error:\n print(f'Error. Could not delete customer {customer_id}. {unknown_error}')",
"def delete_customer(customer_id):\n try:\n with database.transaction():\n customer = Customer.get(Customer.customer_id == customer_id)\n customer.delete_instance()\n customer.save()\n logger.info(f\"Successfully deleted customer {customer_id}\")\n except Exception as unknown_error:\n logger.error(\n f\"Error. Failed to delete customer {customer_id}. {unknown_error}\"\n )\n print(\n f'Error. Could not delete customer {customer_id}. {unknown_error}'\n )",
"def delete_customer(customer_id):\n print('Deleting customer with ID {}...'.format(customer_id))\n try:\n customer_delete = Customer.get_by_id(customer_id)\n customer_delete.delete_instance()\n LOGGER.info('Customer with Customer ID %s has been deleted',\n customer_id)\n except Customer.DoesNotExist:\n print('No record of customer with Customer ID {}'.format(customer_id))\n print('No customer deleted')",
"def delete_customer(customer_id):\n del_query = Customer.get(Customer.customer_id == customer_id)\n return bool(del_query.delete_instance())",
"def delete_customer(customer_id):\n init_database()\n try:\n with database.transaction():\n customer = Customer.get_by_id(customer_id)\n customer.delete_instance()\n logging.info('Customer with ID %s successfully deleted.', customer_id)\n return True\n except peewee.DoesNotExist:\n logging.error('Customer delete with ID %s failed, not in database..', customer_id)\n return False\n finally:\n database.close()",
"def delete_customer(customer_id):\n LOGGER.info('Delete customer with id %s', customer_id)\n try:\n a_customer = Customer.get(Customer.customer_id == customer_id)\n a_customer.delete_instance()\n LOGGER.info('Customer with id %s deleted successfully', customer_id)\n except Customer.DoesNotExist as error:\n LOGGER.info('Delete failed.')\n LOGGER.info('Customer with id %s not found.', customer_id)\n LOGGER.info(error)\n raise ValueError",
"def deleteCustomer(self, **params):\n self.__requireParams(params, ['id'])\n return self.__req('delete_customer', params)",
"def delete_customer(customer_id):\n try:\n customer = Customer.get(Customer.customer_id == customer_id)\n\n LOGGER.info('Trying to delete %s', customer.first_name)\n customer.delete_instance()\n LOGGER.info('Deleted %s', customer.first_name)\n\n return True\n\n except DoesNotExist as err:\n LOGGER.warning('Customer ID: %s does not exist', customer_id)\n LOGGER.warning(err)\n\n return False",
"def delete_customer(connection, customer_id):\n connection.command_path = 'customer/{0}'.format(customer_id)\n extra_headers = {connection.header_key: connection.token}\n url = connection.build_url()\n verify_ssl = connection.verify_ssl\n res = requests.delete(url, headers=extra_headers, verify=verify_ssl)\n if res.status_code == 204:\n return True\n raise CustomerDeletionException(res.content)",
"def delete_customer(customer_id):\n try:\n with customer_db.transaction():\n del_result = Customer.delete().where(Customer.customer_id == customer_id).execute()\n if del_result == 1:\n logger.info(\"Successfully deleted customer ID %s\", customer_id)\n return True\n logger.error(\"Failed to delete customer ID %s\", customer_id)\n return False\n except Exception as e:\n logger.error(\"Error deleting customer ID %s: %s\", customer_id, e)",
"def delete_customer(cls, api, id, **params):\n return api.delete_customer(id, **params)",
"def test_delete_customer(self):\n set_up_db()\n add_customer(*self.test_customer)\n delete_customer(1)\n try:\n Customer.get_by_id(1)\n except DoesNotExist:\n LOGGER.info(\"Customer was deleted.\")",
"def test_delete_customer(self):\r\n create_empty_db()\r\n add_customer(**user_1)\r\n delete_customer(user_1['customer_id'])\r\n self.assertEqual({}, search_customer(user_1['customer_id']))\r\n drop_db()",
"def delete(customer):\n if isinstance(customer, resources.Customer):\n customer = customer.id\n\n http_client = HttpClient()\n http_client.delete(routes.url(routes.CUSTOMER_RESOURCE, resource_id=customer))",
"def remove_user(self, login):\n\t\tif login in self.users_by_name:\n\t\t\tuser = self.users_by_name[login]\n\t\t\tif not user.system:\n\t\t\t\tself.users.pop(user.id, None)\n\t\t\t\tdel(self.users_by_name[login])\n\t\t\t\tself.sync()",
"def delete(self, **params):\n return self._api.delete_customer(self.id, **params)",
"def delete_user_by_id(user_id):\n return woo_request_helper().delete_details(wc_endpoint='customers/{}'.format(user_id))",
"def delete_customer(self,\n customer_id):\n\n # Prepare query URL\n _url_path = '/v2/customers/{customer_id}'\n _url_path = APIHelper.append_url_with_template_parameters(_url_path, {\n 'customer_id': customer_id\n })\n _query_builder = self.config.get_base_uri()\n _query_builder += _url_path\n _query_url = APIHelper.clean_url(_query_builder)\n\n # Prepare headers\n _headers = {\n 'accept': 'application/json'\n }\n\n # Prepare and execute request\n _request = self.config.http_client.delete(_query_url, headers=_headers)\n OAuth2.apply(self.config, _request)\n _response = self.execute_request(_request)\n\n decoded = APIHelper.json_deserialize(_response.text)\n if type(decoded) is dict:\n _errors = decoded.get('errors')\n else:\n _errors = None\n _result = ApiResponse(_response, body=decoded, errors=_errors)\n return _result",
"def delete_user(id):\n pass",
"def delete_sql_login(user, server, userdata):\n global servers_to_remove\n betterprint(\"Removing LOGIN {} from server {}\".format(user, server))\n sql = \"DROP LOGIN [{}]\".format(user)\n try:\n betterprint(\"SQL: \" + sql)\n rows, userdata = execute_sql(sql, server, None, False, userdata)\n betterprint(\"LOGIN removal successful.\")\n\n if rows:\n servers_to_remove.append(server)\n return True, userdata\n except Exception as e:\n print (e)\n return False, userdata",
"def del_usr (conn, id):\n\n try:\n csr = conn.cursor()\n\n cmd = \"DELETE FROM {tbl} WHERE {col1} = {val1};\".\\\n format(tbl = _tbl_users,\n col1 = _tbl_users_col1, val1 = id)\n print(cmd)\n\n csr.execute(cmd)\n csr.close()\n\n except Exception as ex:\n print(\"Error - del_usr: {0}\".format(ex))\n rc_err = ex.args[0]\n return rc_err\n\n return rc_ok",
"def delete_customer_accounts_by_institution_login(self,\r\n customer_id,\r\n institution_login_id):\r\n\r\n # Validate required parameters\r\n self.validate_parameters(customer_id=customer_id,\r\n institution_login_id=institution_login_id)\r\n\r\n # Prepare query URL\r\n _url_path = '/aggregation/v1/customers/{customerId}/institutionLogins/{institutionLoginId}'\r\n _url_path = APIHelper.append_url_with_template_parameters(_url_path, {\r\n 'customerId': customer_id,\r\n 'institutionLoginId': institution_login_id\r\n })\r\n _query_builder = Configuration.get_base_uri()\r\n _query_builder += _url_path\r\n _query_url = APIHelper.clean_url(_query_builder)\r\n\r\n # Prepare headers\r\n _headers = {\r\n 'Finicity-App-Key': Configuration.finicity_app_key\r\n }\r\n\r\n # Prepare and execute request\r\n _request = self.http_client.delete(_query_url, headers=_headers)\r\n CustomHeaderAuth.apply(_request)\r\n _context = self.execute_request(_request)\r\n self.validate_response(_context)",
"def remove(self, user_id):\n pass",
"def customer_delete(request, slug,id):\n \n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n\n if request.method == 'POST':\n return HttpResponseRedirect('/company/'+str(slug))\n else: \n #verifies if the company exists if not returns a 404 page\n customer_reference = get_object_or_404(Customer, id=id,company=company)\n\n #deletes the view and redirects to the page.\n customer_reference.delete()\n return HttpResponseRedirect('/company/'+str(slug))",
"def delUser(self, id):\n del self.users[id]\n if id in self._nameCache:\n del self._nameCache[self._nameCache[id]]\n del self._nameCache[id]\n if id in self._hostmaskCache:\n for hostmask in self._hostmaskCache[id]:\n del self._hostmaskCache[hostmask]\n del self._hostmaskCache[id]\n self.flush()"
] | [
"0.78983843",
"0.7327206",
"0.72582763",
"0.72373587",
"0.7197035",
"0.7154331",
"0.70046926",
"0.6938491",
"0.69187766",
"0.69126296",
"0.6911033",
"0.6747551",
"0.6737351",
"0.6678386",
"0.65200084",
"0.64759135",
"0.64430314",
"0.63739216",
"0.62082744",
"0.60372424",
"0.5915042",
"0.5912758",
"0.58671975",
"0.58667046",
"0.58406705",
"0.57804495",
"0.5779067",
"0.57573414",
"0.5757288",
"0.5732105"
] | 0.77048546 | 1 |
Given a login ID of a manager, remove the manager from the database. Note that the ID passed in to this function is unchecked and so proper validity checks need to be in place. However, this function will only be called after an authority validation has taken place, so we do not need to ensure that the caller is a supermanager. | def remove_manager(self, loginID):
try:
self.cursor.execute("""SELECT COUNT(*) FROM managercredentials WHERE loginID=%s""", (loginID,))
if not self.cursor.fetchone()[0]:
return False
self.cursor.execute("""DELETE FROM managercredentials WHERE loginID=%s""", (loginID,))
self.db.commit()
self.cursor.execute("""DELETE FROM managerpersonal WHERE phone NOT IN
(SELECT phone FROM managercredentials)""")
self.db.commit()
return True
except Exception as e:
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def manager_remove(self, manager):\n self.request('/v1.1/managers/configs/%s' % manager, 'DELETE')",
"def remove_store_manager(user_name: str, store_manager_name: str, store_name: str):\n\n user_name = auth.get_username_from_hash(user_name)\n permission_handler.is_permmited_to(user_name, Action.REMOVE_MANAGER.value, store_name)\n permission_handler.is_working_in_store(store_manager_name, store_name)\n to_remove: list = user_handler.remove_employee(user_name, store_manager_name, store_name)\n permission_handler.remove_employee(to_remove, store_name)\n for store_employee_name in to_remove:\n publisher.send_remove_employee_msg(\n f\"You are no longer an employee in {store_name} you have been removed by {user_name}\",\n store_employee_name)\n try:\n publisher.unsubscribe(store_employee_name, store_name)\n except:\n continue",
"def remove_user(self, login):\n\t\tif login in self.users_by_name:\n\t\t\tuser = self.users_by_name[login]\n\t\t\tif not user.system:\n\t\t\t\tself.users.pop(user.id, None)\n\t\t\t\tdel(self.users_by_name[login])\n\t\t\t\tself.sync()",
"def delete_podmanager(cls, podmanager_uuid):\n cls.dbdriver.delete_podmanager(podmanager_uuid)",
"def remove_employee(self, id):\n cursor = self.dbconnect.get_cursor()\n try:\n cursor.execute('DELETE FROM employee WHERE employeeID=%s', (id,))\n self.dbconnect.commit()\n except(Exception, self.dbconnect.get_error()) as error:\n self.dbconnect.rollback()\n raise Exception('\\nUnable to remove Employee!\\n(%s)' % (error))",
"def delUser(self, id):\n del self.users[id]\n if id in self._nameCache:\n del self._nameCache[self._nameCache[id]]\n del self._nameCache[id]\n if id in self._hostmaskCache:\n for hostmask in self._hostmaskCache[id]:\n del self._hostmaskCache[hostmask]\n del self._hostmaskCache[id]\n self.flush()",
"def test_remove_team_manager_from_team(self):\n pass",
"def remove_team_by_id(team_id: int) -> None:\n conn = db.connect()\n query = 'Delete From teams where TeamID={};'.format(team_id)\n conn.execute(query)\n conn.close()",
"def delete(self):\n self.manager.delete(self.name)",
"def delete(self):\n self.manager.delete(self.name)",
"def leave_farm(self, request, pk):\n farm = self.get_object()\n user = request.user\n farm.remove_member(user)\n return Response({}, status=status.HTTP_204_NO_CONTENT)",
"def delete(self):\n self.manager.delete(self)",
"def remove_member(self, request, pk):\n farm = self.get_object()\n user = request.data.get('user')\n farm.remove_member(user)\n return Response({}, status=status.HTTP_204_NO_CONTENT)",
"def remove_person(self, per: str):\n if per in self._people:\n self._people.remove(per)\n else:\n raise IDDoesNotExist",
"def removeAuthorByID(id: int):\n Author.query.filter_by(id=id).delete()\n db.session.commit()\n app.logger.info(f\"The author {id} has been removed\")",
"def delete_sql_login(user, server, userdata):\n global servers_to_remove\n betterprint(\"Removing LOGIN {} from server {}\".format(user, server))\n sql = \"DROP LOGIN [{}]\".format(user)\n try:\n betterprint(\"SQL: \" + sql)\n rows, userdata = execute_sql(sql, server, None, False, userdata)\n betterprint(\"LOGIN removal successful.\")\n\n if rows:\n servers_to_remove.append(server)\n return True, userdata\n except Exception as e:\n print (e)\n return False, userdata",
"def organization_del_no_login(self, client, id):\n assert client.delete('/organizations/' + id, headers={},\n data={'name': 'Daisy'}).status == \\\n '400 BAD REQUEST'",
"def remove_member(self, team_id, user_id):\n # Ensure that the team exists. Raises error if team does not exist.\n # If the user is the team owner the constraint that the owner has to be\n # a team member is violated.\n sql = 'SELECT owner_id FROM team WHERE id = ?'\n team = self.con.execute(sql, (team_id,)).fetchone()\n if team is None:\n raise err.UnknownTeamError(team_id)\n elif team['owner_id'] == user_id:\n raise err.ConstraintViolationError('cannot remove team owner')\n sql = 'DELETE FROM team_member WHERE team_id = ? AND user_id = ?'\n self.con.execute(sql, (team_id, user_id))\n self.con.commit()",
"def fusion_api_delete_hypervisor_manager(self, name=None, uri=None, api=None, headers=None):\n return self.hypervisor_mgr.delete(name=name, uri=uri, api=api, headers=headers)",
"def fusion_api_remove_san_manager(self, name=None, uri=None, api=None, headers=None):\n return self.dm.delete(name, uri, api, headers)",
"def stop_running_manager(self) -> None:\n self.remove_value(self._manager_running_attribute)",
"def delete(self):\n with sqlite3.connect(self.dbpath) as connection: \n cursor = connection.cursor()\n DELETESQL = \"\"\"DELETE FROM accounts WHERE id=:id \"\"\"\n cursor.execute(DELETESQL, {\"id\": self.id})\n self.id = None",
"def remove(model_class, id):\n key = build_key(model_class, id)\n logger.info(\" CACHE INVALIDATE key=%s\", key)\n cache.delete(key) # Invalidate from cache\n User.objects.filter(id=id).delete()",
"def stop_router_realm_role(self, id, role_id, details=None):\n self.log.debug(\"{}.drop_router_realm_role\".format(self.__class__.__name__),\n id=id, role_id=role_id)\n\n if id not in self.realms:\n raise ApplicationError(u\"crossbar.error.no_such_object\", \"No realm with ID '{}'\".format(id))\n\n if role_id not in self.realms[id].roles:\n raise ApplicationError(u\"crossbar.error.no_such_object\", \"No role with ID '{}' in realm with ID '{}'\".format(role_id, id))\n\n del self.realms[id].roles[role_id]",
"def remove(self, _id):\n if self.objects.get(_id):\n self.objects.pop(_id)",
"def remove_customer(self, loginID):\n try:\n self.cursor.execute(\"\"\"SELECT COUNT(*) FROM customercredentials WHERE loginID=%s\"\"\", (loginID,))\n if not self.cursor.fetchone()[0]:\n return False\n self.cursor.execute(\"\"\"DELETE FROM customercredentials WHERE loginID=%s\"\"\", (loginID,))\n self.db.commit()\n self.cursor.execute(\"\"\"DELETE FROM customerpersonal WHERE phone NOT IN \n (SELECT phone FROM customercredentials)\"\"\")\n self.db.commit()\n self.update_book_scores()\n self.update_comment_usefulness()\n return True\n except Exception as e:\n return False",
"def remove_employee(self, employee):\n self.employees.remove(employee)",
"def del_pass(passlocker):\n passlocker.delete_passlocker()",
"def delete(self, id):\n # Validation\n TrainerManager._int_validator(id)\n\n # Database Query\n session = self._db_session()\n\n existing_trainer = session.query(AbstractTrainer).filter(\n AbstractTrainer.trainer_id == id).first()\n\n if existing_trainer is None:\n session.close()\n raise ValueError('Incorrect value: id not in use')\n\n session.delete(existing_trainer)\n session.commit()\n\n session.close()",
"def delete_by_id(cls, id):\n\t\tauthor = Author.query.get(id)\n\t\tauthor.saved = False\n\t\tdb.session.commit()"
] | [
"0.6349377",
"0.6214349",
"0.60781866",
"0.59333056",
"0.57762134",
"0.564966",
"0.54429406",
"0.5420275",
"0.54007703",
"0.54007703",
"0.5385557",
"0.53853744",
"0.53794116",
"0.53059447",
"0.5249178",
"0.5243765",
"0.5243492",
"0.52429736",
"0.5232355",
"0.5206753",
"0.5203535",
"0.5185929",
"0.5170443",
"0.51478046",
"0.5106488",
"0.5089076",
"0.5087002",
"0.50759804",
"0.50519264",
"0.5044918"
] | 0.7577217 | 0 |
Given details needed to locate a book from a certain order and a quantity, create a return request for quantity amount of that book. | def request_return(self, orderNumber, ISBN, quantity):
date = datetime.date.today()
self.cursor.execute("""INSERT INTO returnrequest (orderNumber, requestDate, ISBN, quantity)
VALUES (%s,%s,%s,%s)""", (orderNumber, date, ISBN, quantity))
self.db.commit() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def post(self):\n order = None\n args = book_return_parser.parse_args()\n order_id = args['order_id']\n copy_id = args['copy_id']\n if order_id is not None and copy_id is not None:\n return 'Only one parameter is needed', 400\n if order_id is not None:\n order = db.session.query(models.Order).filter_by(id=order_id).first()\n if copy_id is not None:\n order = db.session.query(models.Order).filter_by(copy=copy_id).first()\n if order is None:\n return 'Please provide a correct order_id or copy_id for the book', 404\n copy = db.session.query(models.Copy).filter_by(id=order.copy).first()\n if copy is None:\n return 'Copy of the book does not exist', 404\n order = change_order_status(order.id, ORDER_STATUS_COMPLETED)\n copy.status = BOOK_COPY_STATUS_AVAILABLE\n db.session.commit()\n return {'order': order.serialize(),\n 'message': 'Book returned, Order completed!'}, 200",
"def add_book(code: str, name: str, author: str, quantity: int):\n pass",
"def create_get_order_book_request(self, symbol: str,\n limit: Optional[int] = None\n ) -> Request:",
"def test_query_orderbook(\n test_client, pydex_client, asset_infos\n):\n orderbook_params = pydex_client.make_orderbook_query(\n base_asset_data=asset_infos.VETH_ASSET_DATA,\n quote_asset_data=asset_infos.LONG_ASSET_DATA\n )\n res = test_client.get(\n pydex_client.orderbook_url,\n query_string=orderbook_params\n )\n assert res.status_code == 200\n res = res.get_json()\n assert_valid(res, \"/relayerApiOrderbookResponseSchema\")\n # expected_res = {\n # 'asks': {'page': 1, 'perPage': 20, 'records': [], 'total': 0},\n # 'bids': {'page': 1, 'perPage': 20, 'records': [], 'total': 0}}\n # assert res == expected_res",
"def getOrderBookPrice(exchange, symbol, side, quantity, order_book=None):\n # TODO test it\n # print(\"obap1\")\n order_book_side = order_book['asks'] \\\n if side == exchange.SIDE_SELL else order_book['bids']\n\n quantity = Decimal(quantity)\n i, orders, price = 0, [], Decimal(0)\n accounted_for_quantity = Decimal(0)\n qtdif = Decimal(1)\n # print(\"obap2\")\n while accounted_for_quantity < quantity or qtdif > Decimal(0.0001):\n try:\n order = order_book_side[i]\n except IndexError:\n raise Exception(\"There are not enough orders in the Order Book.\")\n # return False\n qty = min(Decimal(order[1]), quantity - accounted_for_quantity)\n price += Decimal(order[0]) * qty\n accounted_for_quantity += qty\n qtdif = abs(Decimal(1) - accounted_for_quantity / quantity)\n i += 1\n\n # print(\"obap3\")\n return price / quantity",
"def order_book(self, order_details):\n order_date = datetime.date.today()\n self.cursor.execute(\"INSERT INTO orderlog (loginID, orderDate) VALUES (%s, %s)\",\n (order_details['loginID'], order_date))\n order_id = self.cursor.lastrowid\n for i in range(len(order_details['ISBN'])):\n self.cursor.execute(\"INSERT INTO productof Values (%s, %s, %s)\",\n (order_details['ISBN'][i], order_id, order_details['quantity'][i]))\n self.cursor.execute(\"UPDATE book SET stock=stock-%s WHERE ISBN=%s\",\n (order_details['quantity'][i], order_details['ISBN'][i]))\n self.db.commit()\n return order_id",
"def return_book(self, user, book):\n r = self.get(rented_by=user, book=book, returned_on=None)\n r.returned_on = datetime.now()\n r.save()\n r.book.in_stock += 1\n r.book.save()",
"def _search_for_quantity_in_order_book(self, price, price_level, quantity_to_trade, order):\n if price_level is None: # Last price level\n return quantity_to_trade, []\n\n assert isinstance(price, Price), type(price)\n assert isinstance(price_level, PriceLevel), type(price_level)\n assert isinstance(quantity_to_trade, Quantity), type(quantity_to_trade)\n assert isinstance(order, Order), type(order)\n\n self._logger.debug(\"Searching in price level: %i\", int(price))\n\n if quantity_to_trade <= price_level.depth: # All the quantity can be matched in this price level\n quantity_to_trade, proposed_trades = self._search_for_quantity_in_price_level(price_level.first_tick,\n quantity_to_trade,\n order)\n else: # Not all the quantity can be matched in this price level\n quantity_to_trade, proposed_trades = self._search_for_quantity_in_order_book_partial(price,\n price_level,\n quantity_to_trade,\n order)\n return quantity_to_trade, proposed_trades",
"def find_purchase_qty_in_duration(self,from_date,to_date,location,product_id):\n # query=\"\"\"\n # select sum(product_uom_qty) from stock_move mv \n # Inner join stock_location sl on sl.id = mv.location_id and sl.usage='supplier'\n # and mv.location_dest_id in (%s) where state='done' and product_id = %s and date between '%s 00:00:00' and '%s 23:59:59'\n # \"\"\"\n query = \"\"\"select sum(product_uom_qty) as total,product_uom from stock_move mv \n Inner join stock_location sl on sl.id = mv.location_id and sl.usage='supplier' \n and mv.location_dest_id in (%s) where state='done' and product_id = %s and \n date between '%s 00:00:00' and '%s 23:59:59' group by product_uom\"\"\"%(\n ','.join(str(x) for x in location), product_id.id,from_date,to_date)\n self._cr.execute(query)\n result = self._cr.fetchall()\n uom_rec = self.env['product.uom']\n purchase_qty = 0\n for r in result:\n factor_inv = uom_rec.browse(r[1]).factor_inv\n purchase_qty += r[0] * factor_inv\n # Return Qty\n return_query = \"\"\"select sum(product_uom_qty) as total,product_uom \n from stock_move mv Inner join stock_location sl on sl.id = \n mv.location_dest_id and sl.usage='supplier' and mv.location_id in (\n %s) where state='done' and product_id = %s and date between '%s \n 00:00:00' and '%s 23:59:59' group by product_uom\"\"\" % (\n ','.join(str(x) for x in location), product_id.id, from_date,\n to_date)\n self._cr.execute(return_query)\n return_result = self._cr.fetchall()\n purchase_return_qty = 0\n for re in return_result:\n factor_inv = uom_rec.browse(re[1]).factor_inv\n purchase_return_qty += re[0] * factor_inv\n purchase_qty -= purchase_return_qty\n return purchase_qty",
"async def place_order(request: web.Request, body) -> web.Response:\n body = Order.from_dict(body)\n return web.Response(status=200)",
"def api(isbn):\n # Ensure valid isbn-10 format provided\n if len(isbn) != 10:\n response = make_response(\n jsonify(\"Please provide a valid ISBN-10\"), 404)\n response.headers['X-Error'] = \"Please provide a valid ISBN-10\"\n return response\n\n # Ensure requested book is in our database\n isInDB = db.execute(\n \"SELECT * from books \"\n \"WHERE isbn = :isbn \", {\n 'isbn': isbn\n }).fetchone()\n if isInDB is None:\n response = make_response(\n jsonify(\"Book does not exist in database\"), 404)\n response.headers['X-Error'] = \"Book does not exist in database\"\n return response\n\n # Query data for API response\n proxy = db.execute(\n \"SELECT books.title, books.author, books.year, books.isbn, \"\n \"COUNT(reviews.*) AS review_count, CAST(AVG(reviews.rating) AS float) AS average_score \"\n \"FROM books LEFT JOIN reviews ON reviews.book_id=books.id \"\n \"WHERE books.isbn=:isbn \"\n \"GROUP BY books.id\", {\n 'isbn': isbn\n }).fetchone()\n\n # Return json data\n book_data = {\n \"title\": proxy.title,\n \"author\": proxy.author,\n \"year\": int(proxy.year),\n \"isbn\": proxy.isbn,\n \"review_count\": proxy.review_count,\n \"average_score\": proxy.average_score\n }\n\n # Send requested data\n return make_response(jsonify(book_data), 200)",
"def create_order_2(i):\n # create dummy order 2\n o2 = models.Order()\n o2.inmate = i\n o2.save()\n o2.status = 'SENT'\n o2.date_closed = datetime.datetime.now()\n o2.save()\n # ...with 1 dummy book\n b2 = models.Book()\n b2.title = \"dictionary\"\n b2.order = o2\n b2.full_clean()\n b2.save()\n return o2",
"def query_om2_order():\n\n args = request.args\n\n try:\n search = args[\"q\"]\n _, orderId = search.split(\":\")\n hits = dss_lookup(orderId)\n # return result match ES\n data = {'hits': {'hits': hits}}\n return jsonify(data)\n\n except Exception as e:\n log.exception(e)\n return jsonify({'error': str(e)})",
"def get_product(location, product_id, quantity):\r\n\r\n product_array = []\r\n db = get_db()\r\n b_id = session.get(\"user_id\")\r\n if location == \"product_factory\":\r\n # Get product from product table, deduct the quantity\r\n ogquantity = db.execute(\r\n \"SELECT quantity FROM product WHERE product_id = ? AND for_business = ?\",\r\n (product_id, b_id,),\r\n ).fetchone()[0]\r\n newquantity = ogquantity - quantity\r\n if int(newquantity) < 0:\r\n raise Exception(\"Invalid quantity.\")\r\n query = (\r\n \"UPDATE product SET quantity = ? WHERE product_id = ? AND for_business = ?\"\r\n )\r\n db.execute(query, (newquantity, product_id, b_id))\r\n p = db.execute(\r\n \"SELECT product_id FROM product WHERE for_business = ? AND product_id = ?\",\r\n (b_id, product_id,),\r\n ).fetchone()\r\n product_array = list(p)\r\n product_array.append(quantity)\r\n db.commit()\r\n return product_array\r\n else:\r\n ogquantity = db.execute(\r\n \"SELECT qty FROM warehouse WHERE loc_id = ? AND prod_id = ? AND b_id = ?\",\r\n (location, product_id, b_id,),\r\n ).fetchone()[0]\r\n newquantity = ogquantity - quantity\r\n if int(newquantity) < 0:\r\n raise Exception(\"Invalid quantity.\")\r\n query = (\r\n \"UPDATE warehouse SET qty = ? where loc_id = ? AND prod_id = ? AND b_id = ?\"\r\n )\r\n db.execute(query, (newquantity, location, product_id, b_id,))\r\n p = db.execute(\r\n \"SELECT prod_id FROM warehouse WHERE prod_id = ? AND loc_id = ? AND b_id = ?\",\r\n (product_id, location, b_id,),\r\n ).fetchone()\r\n if int(newquantity) == 0:\r\n db.execute(\r\n \"DELETE FROM warehouse WHERE b_id = ? AND prod_id = ? AND loc_id = ?\",\r\n (b_id, product_id, location,),\r\n )\r\n product_array = list(p)\r\n product_array.append(quantity)\r\n db.commit()\r\n return product_array",
"def getquantity(prd, loc):\r\n db = get_db()\r\n b_id = session.get(\"user_id\")\r\n qty = {}\r\n if loc == \"Products\":\r\n if prd != \"None\":\r\n q = db.execute(\r\n \"SELECT quantity FROM product WHERE product_id = ? AND for_business = ?\",\r\n (prd, b_id,),\r\n ).fetchone()\r\n qty[\"qty\"] = str(q[\"quantity\"])\r\n else:\r\n pass\r\n else:\r\n q = db.execute(\r\n \"SELECT qty FROM warehouse WHERE prod_id = ? AND b_id = ? AND loc_id = ?\",\r\n (prd, b_id, loc,),\r\n ).fetchone()\r\n qty[\"qty\"] = str(q[\"qty\"])\r\n return qty",
"async def _book(self, msg: dict, timestamp: float):\n # PERF perf_start(self.id, 'book_msg')\n\n delta = {BID: [], ASK: []}\n # if we reset the book, force a full update\n forced = False\n pair = self.exchange_symbol_to_std_symbol(msg['data'][0]['symbol'])\n if not self.partial_received[pair]:\n # per bitmex documentation messages received before partial\n # should be discarded\n if msg['action'] != 'partial':\n return\n self.partial_received[pair] = True\n forced = True\n\n if msg['action'] == 'partial':\n for data in msg['data']:\n side = BID if data['side'] == 'Buy' else ASK\n price = Decimal(data['price'])\n size = Decimal(data['size'])\n order_id = data['id']\n\n self._l2_book[pair][side][price] = size\n self.order_id[pair][side][order_id] = price\n elif msg['action'] == 'insert':\n for data in msg['data']:\n side = BID if data['side'] == 'Buy' else ASK\n price = Decimal(data['price'])\n size = Decimal(data['size'])\n order_id = data['id']\n\n self._l2_book[pair][side][price] = size\n self.order_id[pair][side][order_id] = price\n delta[side].append((price, size))\n elif msg['action'] == 'update':\n for data in msg['data']:\n side = BID if data['side'] == 'Buy' else ASK\n update_size = Decimal(data['size'])\n order_id = data['id']\n\n price = self.order_id[pair][side][order_id]\n\n self._l2_book[pair][side][price] = update_size\n self.order_id[pair][side][order_id] = price\n delta[side].append((price, update_size))\n elif msg['action'] == 'delete':\n for data in msg['data']:\n side = BID if data['side'] == 'Buy' else ASK\n order_id = data['id']\n\n delete_price = self.order_id[pair][side][order_id]\n del self.order_id[pair][side][order_id]\n del self._l2_book[pair][side][delete_price]\n delta[side].append((delete_price, 0))\n\n else:\n LOG.warning(\"%s: Unexpected l2 Book message %s\", self.id, msg)\n return\n # PERF perf_end(self.id, 'book_msg')\n # PERF perf_log(self.id, 'book_msg')\n\n await self.book_callback(self._l2_book[pair], L2_BOOK, pair, forced, delta, timestamp, timestamp)",
"def change_item_quantity_from_order(detailid,newQuantity): \n data = order_obj.change_item_quantity_from_order(detailid,newQuantity)\n return data",
"def add_book(self, data):\n exists = self.check_if_exists(data['isbn'])\n\n if exists:\n query = f\"\"\"UPDATE {TABLE} SET quantity = quantity + 10 WHERE bookID = '{data[\"isbn\"]}'\"\"\"\n else:\n query = f\"\"\"INSERT INTO {TABLE}(bookID, title, authors, avg_rating, ratings_count,\n lang_code, num_pages, text_reviews, pub_date, publisher) values(\n \"{data['isbn']}\",\n \"{data['title']}\",\n \"{data['authors']}\",\n {float(data['average_rating'])},\n {int(data['ratings_count'])},\n \"{data['language_code']}\",\n {int(data[' num_pages'])},\n {int(data['text_reviews_count'])},\n \"{data['publication_date']}\",\n \"{data['publisher']}\"\n );\"\"\"\n\n try:\n self.cursor.execute(query)\n self.conn.commit()\n except Error as e:\n print(e)",
"def quantity_response(quantity,\n price_elasticity,\n aftertax_price1,\n aftertax_price2,\n income_elasticity,\n aftertax_income1,\n aftertax_income2):\n # pylint: disable=too-many-arguments\n # compute price term in log-log response equation\n if price_elasticity == 0.:\n pch_price = np.zeros(quantity.shape)\n else:\n atp1 = np.where(aftertax_price1 < 0.01, 0.01, aftertax_price1)\n atp2 = np.where(aftertax_price2 < 0.01, 0.01, aftertax_price2)\n pch_price = atp2 / atp1 - 1.\n # compute income term in log-log response equation\n if income_elasticity == 0.:\n pch_income = np.zeros(quantity.shape)\n else:\n ati1 = np.where(aftertax_income1 < 1.0, 1.0, aftertax_income1)\n ati2 = np.where(aftertax_income2 < 1.0, 1.0, aftertax_income2)\n pch_income = ati2 / ati1 - 1.\n # compute response\n pch_q = price_elasticity * pch_price + income_elasticity * pch_income\n qresponse = pch_q * quantity\n return qresponse",
"def post(self):\n body = request.get_json()\n borrower = body.get('borrower')\n borrower = query_user_by_name(borrower)\n if borrower is None:\n return 'User does not exit', 404\n if invalid_user(borrower.username):\n return 'Unauthorized user, please login as a user/borrower', 401\n copy_id = body.get('copy_id')\n copy = db.session.query(models.Copy).filter_by(id=copy_id).first()\n if copy is None:\n return 'Copy ID not found ' + str(copy_id), 409\n if copy.status == BOOK_COPY_STATUS_UNAVAILABLE:\n return 'The copy of the book is not available', 400\n copy_owner = body.get('copy_owner')\n owner = query_user_by_name(copy_owner)\n if owner is None:\n return 'Copy owner not found ' + copy_owner, 409\n new_order = models.Order()\n new_order.parse_body(body)\n new_order.status = ORDER_STATUS_REQUESTED\n db.session.add(new_order)\n db.session.commit()\n return new_order.serialize(), 201",
"def test_query_inventory_quantity(self):\n resp = self.app.get('/inventories', query_string='quantity=5')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertTrue(len(resp.data) > 0)\n self.assertTrue('conditioner' in resp.data)\n self.assertFalse('shampoo' in resp.data)\n data = json.loads(resp.data)\n query_item = data[0]\n self.assertEqual(query_item['quantity'], 5)",
"def searchOrder(order_id):\n flag = True\n generate_request = oAuth_magento()\n\n payload = {\"searchCriteria[filter_groups][0][filters][0][field]\": \"increment_id\",\n \"searchCriteria[filter_groups][0][filters][0][value]\": order_id,\n \"searchCriteria[filter_groups][0][filters][0][conditionType]\": \"eq\",\n \"fields\": \"items[increment_id,base_currency_code,grand_total,created_at,status,billing_address[company,firstname,lastname]]\",\n }\n\n try:\n response = requests.request(\"GET\", url=generate_request[0], headers=generate_request[1], params=payload)\n # with open('temp_files/magento_search_orders.json','w') as f:\n # f.write(response.text)\n json_response = json.loads(response.text)\n for ele in json_response['items']:\n for key, val in ele.items():\n if key == 'billing_address':\n name_container = ele.pop(key)\n try:\n ele['purchasing_institute'] = name_container['company']\n except:\n ele['purchasing_institute'] = name_container['firstname'] + ' ' + name_container['lastname']\n else:\n pass\n col_headers = list((json_response['items'][0]).keys())\n context = {'result': json_response['items'], 'col_headers': col_headers}\n return context\n \n except:\n flag = False\n context = {'msg': \"*Please enter a valid Order ID!\", 'flag': flag}\n return context",
"async def fetch_order_book(self, symbol: str, limit: Optional[int] = None, params={}):\n await self.load_markets()\n precision = self.safe_value(self.options, 'precision', 'R0')\n market = self.market(symbol)\n request = {\n 'symbol': market['id'],\n 'precision': precision,\n }\n if limit is not None:\n request['len'] = limit # 25 or 100\n fullRequest = self.extend(request, params)\n orderbook = await self.publicGetBookSymbolPrecision(fullRequest)\n timestamp = self.milliseconds()\n result = {\n 'symbol': market['symbol'],\n 'bids': [],\n 'asks': [],\n 'timestamp': timestamp,\n 'datetime': self.iso8601(timestamp),\n 'nonce': None,\n }\n priceIndex = 1 if (fullRequest['precision'] == 'R0') else 0\n for i in range(0, len(orderbook)):\n order = orderbook[i]\n price = self.safe_number(order, priceIndex)\n signedAmount = self.safe_string(order, 2)\n amount = Precise.string_abs(signedAmount)\n side = 'bids' if Precise.string_gt(signedAmount, '0') else 'asks'\n result[side].append([price, self.parse_number(amount)])\n result['bids'] = self.sort_by(result['bids'], 0, True)\n result['asks'] = self.sort_by(result['asks'], 0)\n return result",
"def update_product_details(book_id, stock_delta, updated_cost):\n\n response = {}\n\n if stock_delta and stock_delta != 0:\n curr_stock_details = query_db('select stock from books where id='+str(book_id))\n app.logfile.info('select stock from books where id='+str(book_id))\n\n updated_stock_count = curr_stock_details[0]['stock'] + stock_delta\n update_stock_details = update_db('update books set stock='+str(updated_stock_count)+' where id='+str(book_id))\n app.logfile.info('update books set stock='+str(updated_stock_count)+' where id='+str(book_id))\n response['stock_updated'] = update_stock_details\n\n if updated_cost:\n update_cost_details = update_db('update books set cost='+str(updated_cost)+' where id='+str(book_id))\n app.logfile.info('update books set stock='+str(updated_cost)+' where id='+str(book_id))\n response['cost_updated'] = update_cost_details\n \n return(jsonify(response))",
"async def fetch_order_book(self, symbol: str, limit: Optional[int] = None, params={}):\n await self.load_markets()\n market = self.market(symbol)\n request = {\n 'symbol': market['id'],\n }\n if limit is not None:\n request['depth'] = limit\n response = await self.publicGetOrderBookL2(self.extend(request, params))\n result = {\n 'symbol': symbol,\n 'bids': [],\n 'asks': [],\n 'timestamp': None,\n 'datetime': None,\n 'nonce': None,\n }\n for i in range(0, len(response)):\n order = response[i]\n side = 'asks' if (order['side'] == 'Sell') else 'bids'\n amount = self.convert_from_raw_quantity(symbol, self.safe_string(order, 'size'))\n price = self.safe_number(order, 'price')\n # https://github.com/ccxt/ccxt/issues/4926\n # https://github.com/ccxt/ccxt/issues/4927\n # the exchange sometimes returns null price in the orderbook\n if price is not None:\n result[side].append([price, amount])\n result['bids'] = self.sort_by(result['bids'], 0, True)\n result['asks'] = self.sort_by(result['asks'], 0)\n return result",
"def purchase(self, item_type):",
"def test_make_order_with_quantity_invalid(self):\n response = self.api_test_client.post('{}/orders'.format(\n self.BASE_URL), json={\n 'item_name': 'Watermelon', 'item_price': 50, 'quantity': -3\n }, headers={'Content-Type': 'application/json'})\n\n self.assertEqual(response.status_code, 400)\n self.assertEqual(response_as_json(\n response)['message'], 'Bad request. Price and quantity must be ints >= 1')",
"def get_order_book(self, pair):\r\n method = self.public_endpoints['order_book']['method']\r\n url = self.base_url + self.public_endpoints['order_book']['url'].format(pairId=pair)\r\n req = requests.request(method, url)\r\n res = req.json()\r\n\r\n if res['success'] == True:\r\n return res[\"result\"]\r\n else:\r\n return res",
"def mocked_requests_get(*args, **kwargs):\n class MockResponse:\n def __init__(self, json_data, status_code):\n self.json_data = json_data\n self.status_code = status_code\n\n def json(self):\n return self.json_data\n\n query_author = \"books?q=\" + \"eduardo-mendoza\" + \"&i=\" + \"author_name\"\n query_title = \"books?q=\" + \"rina-de-gatos\" + \"&i=\" + \"title\"\n query_publisher = \"books?q=\" + \"salamandra\" + \"&i=\" + \"publisher_name\"\n\n query_subject = \"subjects?q=\" + \"computer-science\"\n query_subject_page = \"subjects?q=\" + \"computer-science\" + \"&p=2\"\n query_author_page = \"books?q=\" + \"eduardo-mendoza\" + \"&i=\" + \"author_name\" + \"&p=2\"\n query_isbn = \"book/\" + \"9788408105626\"\n ISBN_with_cover = \"9788432291395\"\n ISBN_without_cover = \"9781582346816\"\n ISBN_no_book_cover = \"0000000000000\"\n\n if args[0] == settings.ISBNDB_API_URL + query_author:\n return MockResponse(json.loads(open(UNIT_TEST_RESOURCES_FOLDER +\n FILE_NAME_AUTHOR_SEARCH_RESPONSE).read()), 200)\n elif args[0] == settings.ISBNDB_API_URL + query_author_page:\n return MockResponse(json.loads(open(UNIT_TEST_RESOURCES_FOLDER +\n FILE_NAME_AUTHOR_SEARCH_PAGE_RESPONSE).read()), 200)\n elif args[0] == settings.ISBNDB_API_URL + query_isbn:\n return MockResponse(json.loads(open(UNIT_TEST_RESOURCES_FOLDER +\n FILE_NAME_ISBN_SEARCH_RESPONSE).read()), 200)\n elif args[0] == settings.ISBNDB_API_URL + query_title:\n return MockResponse(json.loads(open(UNIT_TEST_RESOURCES_FOLDER +\n FILE_NAME_TITLE_SEARCH_RESPONSE).read()), 200)\n elif args[0] == settings.ISBNDB_API_URL + query_publisher:\n return MockResponse(json.loads(open(UNIT_TEST_RESOURCES_FOLDER +\n FILE_NAME_PUBLISHER_SEARCH_RESPONSE).read()), 200)\n elif args[0] == settings.ISBNDB_API_URL + query_subject:\n return MockResponse(json.loads(open(UNIT_TEST_RESOURCES_FOLDER +\n FILE_NAME_SUBJECT_SEARCH_RESPONSE).read()), 200)\n elif args[0] == settings.ISBNDB_API_URL + query_subject_page:\n return MockResponse(json.loads(open(UNIT_TEST_RESOURCES_FOLDER +\n FILE_NAME_SUBJECT_SEARCH_PAGE_RESPONSE).read()), 200)\n elif args[0] == settings.GOOGLEBOOKS_API_URL + ISBN_with_cover:\n return MockResponse(json.loads(open(UNIT_TEST_RESOURCES_FOLDER +\n FILE_NAME_COVER_SEARCH_RESPONSE).read()), 200)\n elif args[0] == settings.GOOGLEBOOKS_API_URL + ISBN_without_cover:\n return MockResponse(json.loads(open(UNIT_TEST_RESOURCES_FOLDER +\n FILE_NAME_COVER_SEARCH_NO_IMAGE_RESPONSE).read()), 200)\n elif args[0] == settings.GOOGLEBOOKS_API_URL + ISBN_no_book_cover:\n return MockResponse(json.loads(open(UNIT_TEST_RESOURCES_FOLDER +\n FILE_NAME_COVER_SEARCH_NO_BOOK_RESPONSE).read()), 200)\n\n return MockResponse({}, 404)",
"def book(book_isbn):\n\n if len(book_isbn) == 10 and book_isbn.isdigit():\n # Get book details\n book_res = db.execute(\"SELECT * FROM books WHERE isbn = :book_isbn\",\n {\"book_isbn\": book_isbn}).fetchone()\n session[\"book_res\"] = book_res\n session[\"book_id\"] = book_res.id\n session[\"book_isbn\"] = book_res.isbn\n\n res = requests.get(\"https://www.goodreads.com/book/review_counts.json\", params={\"key\": \"bgh2LQUdiQnnDznXzFMbg\", \"isbns\": book_isbn})\n resp = res.json()\n # return resp\n\n\n # Get reviews\n review_res = db.execute(\"SELECT * FROM reviews WHERE book_id = :book_id\",\n {\"book_id\": session[\"book_id\"]}).fetchall()\n session[\"review_res\"] = review_res\n\n return render_template(\"book.html\", book=book_res, reviews=review_res, count=resp[\"books\"][0],name=session[\"name\"])\n\n return render_template(\"book.html\", message=\"Oops, something went wrong.\",name=session[\"name\"])"
] | [
"0.6053869",
"0.6048009",
"0.6044216",
"0.5897844",
"0.58446103",
"0.5696819",
"0.55940896",
"0.55275124",
"0.54358864",
"0.5434902",
"0.5411537",
"0.54063505",
"0.5378661",
"0.5351381",
"0.5297224",
"0.5285088",
"0.52831036",
"0.5255942",
"0.525464",
"0.5244282",
"0.52399427",
"0.5231292",
"0.52144426",
"0.519949",
"0.51907706",
"0.51744515",
"0.51709914",
"0.516544",
"0.5119298",
"0.5116675"
] | 0.6794005 | 0 |
Given a login ID, return a dict containing all of the return requests associated with the user. | def get_return_requests(self, loginID):
result = {'requestID': [], 'orderNumber': [], 'requestDate': [], 'ISBN': [], 'quantity': [], 'orderDate': [],
'status': [], 'title': []}
self.cursor.execute("""SELECT requestID, R.orderNumber, requestDate, B.ISBN, quantity, orderDate, status, B.title
FROM returnrequest R, orderlog O, Book B WHERE O.loginID = %s AND O.orderNumber = R.orderNumber AND
B.ISBN = R.ISBN ORDER BY requestDate DESC""", (loginID,))
for request in self.cursor.fetchall():
result['requestID'].append(request[0])
result['orderNumber'].append(request[1])
result['requestDate'].append(request[2])
result['ISBN'].append(request[3])
result['quantity'].append(request[4])
result['orderDate'].append(request[5])
result['status'].append(request[6])
result['title'].append(request[7])
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_user_info_by_id(self, user_id: int) -> dict:",
"def get_all_requests(user_id):\n db = get_db()\n requests = db.requests\n \n # Check if the user_id is a string\n if not isinstance(user_id, str):\n raise APIException(status_code=400, message='user_id not a string')\n \n cursor = requests.find({\"$or\":[ {\"mentor_id\": user_id}, {\"mentee_id\": user_id}]})\n context = {\"requests\": []}\n for document in cursor:\n temp = document\n temp['request_id'] = str(document['_id'])\n temp['mentee_profile'] = get_mentee(document['mentee_id'], with_partners=0)\n temp['mentor_profile'] = get_mentor(document['mentor_id'], with_partners=0)\n del temp['_id']\n del temp['mentor_id']\n del temp['mentee_id']\n context[\"requests\"].append(temp)\n \n context['url'] = \"/api/v1/\" + user_id + \"/requests/\"\n return flask.jsonify(**context)",
"def get_ids_from_login_list(khoros_object, login_list, return_type='list'):\n id_list, id_dict = [], {}\n for login in login_list:\n user_id = get_user_id(khoros_object, login=login)\n id_list.append(user_id)\n id_dict[login] = user_id\n return id_list if return_type == 'list' else id_dict",
"def fetch_logged_data(run_id: str) -> dict:\n client = mlflow.tracking.MlflowClient()\n data = client.get_run(run_id).data\n # Exclude system tags: https://www.mlflow.org/docs/latest/tracking.html#system-tags\n tags = {k: v for k, v in data.tags.items() if not k.startswith(\"mlflow.\")}\n artifacts = list(yield_artifacts(run_id))\n return {\n \"params\": data.params,\n \"metrics\": data.metrics,\n \"tags\": tags,\n \"artifacts\": artifacts,\n }",
"def get_all_access():\n\t# Get the email from the user making the request\n\temail = get_jwt_identity()\n\treturn get_all_access_helper(email)",
"def get_user_data(prs, client_id, client_secret):\n users = {}\n for owner, repo, number, pr in prs:\n username = pr.username\n\n # Initialize the User if needed\n if username not in users:\n print(pr.user_url, file=sys.stderr)\n payload = {\n 'client_id': client_id,\n 'client_secret': client_secret\n }\n resp = requests.get(pr.user_url, params=payload)\n\n # Abort if the return is an error\n out = resp.json()\n if 'message' in out:\n pprint.pprint(out, file=sys.stderr)\n raise Exception(resp.text)\n\n user = User(out)\n users[username] = user\n\n users[username].add_pr(pr)\n\n return users",
"def get_credentials_requests(request, router_id):\n router = models.Router.objects.get(pk=router_id)\n manager = get_manager(router.manufacturer, router.model)\n requests = manager.request_manager.get_login_credentials()\n serializer = serializers.RouterRequestSerializer(requests, many=True)\n return Response(serializer.data)",
"def get_basic_userinfo(self, loginID, my_id):\n info = {'loginID': '', 'firstName': '', 'lastName': '', 'orderCount': 0, 'books_purchased': 0,\n 'num_comments': 0,\n 'comments': [], 'books_commented': [], 'trusted': 0, 'untrusted': 0, 'personalStatus': ''}\n self.cursor.execute(\"\"\"SELECT DISTINCT C.loginID, firstName, lastName, COUNT(DISTINCT orderNumber),\n COUNT(DISTINCT commentID) FROM customercredentials C, comment CO, orderlog O \n WHERE C.loginID = %s AND O.loginID = %s AND CO.loginID = %s\"\"\", (loginID, loginID, loginID))\n\n result = self.cursor.fetchone()\n info['loginID'] = result[0]\n info['firstName'] = result[1]\n info['lastName'] = result[2]\n info['orderCount'] = result[3]\n info['num_comments'] = result[4]\n\n self.cursor.execute(\"\"\"SELECT SUM(quantity) FROM orderlog O, productof P WHERE O.orderNumber = P.orderNumber\n AND loginID=%s\"\"\", (loginID,))\n result = self.cursor.fetchone()\n info['books_purchased'] = result[0]\n\n self.cursor.execute(\"\"\"SELECT * FROM comment WHERE loginID = %s ORDER BY commentDate DESC\"\"\", (loginID,))\n result = self.cursor.fetchall()\n for comment in result:\n info['comments'].append(comment)\n\n for comment in info['comments']:\n info['books_commented'].append(self.get_single_book_info(comment[1]))\n self.cursor.execute(\"\"\"SELECT COUNT(loginID) FROM trusts WHERE otherLoginID=%s AND trustStatus='TRUSTED'\"\"\",\n (loginID,))\n result = self.cursor.fetchone()\n info['trusted'] = result[0]\n\n self.cursor.execute(\"\"\"SELECT COUNT(loginID) FROM trusts WHERE otherLoginID=%s AND trustStatus='UNTRUSTED'\"\"\",\n (loginID,))\n result = self.cursor.fetchone()\n info['untrusted'] = result[0]\n\n self.cursor.execute(\"\"\"SELECT trustStatus FROM trusts WHERE loginID=%s AND otherLoginID=%s\"\"\",\n (my_id, loginID))\n result = self.cursor.fetchone()\n if result:\n info['personalStatus'] = result[0]\n return info",
"def get_user_request_by_id(self, id):\n user_request_table = Table('user_request', self.metadata, autoload=True)\n try:\n u = self.session.query(user_request_table).filter(user_request_table.c.id==id).one()\n raw_request = u._asdict()\n user_request = json.loads(DateTimeEncoder().encode(raw_request))\n return user_request\n except Exception as e:\n logger.info(f\"Error retrieving request {id}: {e}\")\n return False",
"def get_identities(environ, start_response):\n store = environ['tiddlyweb.store']\n username = environ['wsgiorg.routing_args'][1]['username']\n usersign = environ['tiddlyweb.usersign']['name']\n roles = environ['tiddlyweb.usersign']['roles']\n\n if username != usersign and 'ADMIN' not in roles:\n raise HTTP403('Bad user for action')\n\n identities = []\n try:\n mapped_bag = store.get(Bag('MAPUSER'))\n tiddlers = store.list_bag_tiddlers(mapped_bag)\n matched_tiddlers = control.filter_tiddlers(tiddlers,\n 'select=mapped_user:%s' % username, environ)\n identities = [tiddler.title for tiddler in matched_tiddlers]\n except NoBagError:\n pass\n\n start_response('200 OK', [\n ('Content-Type', 'application/json; charset=UTF-8')])\n return [simplejson.dumps(identities)]",
"def get_signatories(account_id):\n query = iroha.query(\"GetSignatories\", account_id=account_id)\n ic.sign_query(query, user_private_key)\n response = net.send_query(query)\n data = MessageToDict(response)\n pprint(data, indent=2)",
"def __get_user(self, login):\n\n user = {}\n\n if not login:\n return user\n\n user_raw = self.client.get_user(login)\n user = json.loads(user_raw)\n self._push_cache_queue(user_raw)\n user_orgs_raw = \\\n self.client.get_user_orgs(login)\n user['organizations'] = json.loads(user_orgs_raw)\n self._push_cache_queue(user_orgs_raw)\n self._flush_cache_queue()\n\n return user",
"def login_to_dict(login):\n return dict(\n id=login.id,\n username=login.username,\n email=login.email,\n last_login=login.last_login,\n last_action=login.last_action,\n role=login.role.name,\n userdata=login.user.to_dict()\n )",
"def get_users(user_id):\n my_user = storage.get(\"User\", user_id)\n if my_user:\n return jsonify(my_user.to_dict()), 200\n else:\n abort(404)",
"def loginAudit(self, params):\n\n sortLimitParams = self.setSortLimitParameters(params)\n \n filterObj = Q()\n\n if params.get('searchEmail'):\n user_ids = []\n users = WebUsers.objects.filter(mail__icontains=params.get('searchEmail'))\n for user in users:\n user_ids.append(user.uid)\n \n filterObj = filterObj & Q(created_by_id__in=user_ids)\n if params.get('searchIpAddress'):\n filterObj = filterObj & Q(ip_address__icontains=params.get('searchIpAddress'))\n if params.get('searchStartLoginDate'):\n filterObj = filterObj & Q(date_created__gte=params.get('searchStartLoginDate'))\n if params.get('searchEndLoginDate'):\n filterObj = filterObj & Q(date_created__lte=params.get('searchEndLoginDate'))\n if params.get('searchIds'):\n filterObj = filterObj & Q(id__in=params.get('searchIds').split(\",\"))\n\n result = LoginAudit.objects.filter(filterObj).order_by(sortLimitParams['dir'] + sortLimitParams['sort']) [sortLimitParams['start']: sortLimitParams['limit']]\n count = LoginAudit.objects.filter(filterObj).count()\n\n cursor = connection.cursor()\n records = []\n for item in result:\n record = {}\n \n record['id'] = item.id\n record['ip_address'] = item.ip_address\n record['login_date'] = item.date_created\n record['logout_date'] = item.logout_date\n #get the details of this user\n user = WebUsers.objects.get(uid=item.created_by_id)\n record['email'] = user.mail\n \n records.append(record)\n\n cursor.close()\n \n return {'totalCount': count, 'records': records}",
"def return_request(self):\n folder_path = \"{0}/user_uploads/{1}/{2}/\".format(self.__APP_PATH__, request.json[\"net_id\"], request.json[\"request_id\"])\n request_submitted_marker = \"{0}request.submitted\".format(folder_path)\n request_processed_marker = \"{0}request.processed\".format(folder_path)\n request_returned_marker = \"{0}request.returned\".format(folder_path)\n request_voided_marker = \"{0}request.voided\".format(folder_path)\n\n if get_user_roles(current_user.net_id)[\"STFADM\"] and path.exists(request_submitted_marker):\n try:\n return_message = request.json[\"return_message\"].strip()\n\n if path.exists(request_processed_marker):\n return jsonify({\"success\": False, \"type\": \"error\", \"message\": \"You must unprocess a request before returning it.\"})\n elif path.exists(request_voided_marker):\n return jsonify({\"success\": False, \"type\": \"error\", \"message\": \"This request has already been voided. Please refresh the page.\"})\n elif path.exists(request_returned_marker):\n return jsonify({\"success\": False, \"type\": \"error\", \"message\": \"This request has already been returned. Please refresh the page.\"})\n\n with open(\"{0}submission.json\".format(folder_path), mode=\"r\") as request_details_json:\n request_details = json.load(request_details_json)\n\n with open(\"{0}submission.json\".format(folder_path), mode=\"w\") as request_details_json:\n date_time = \"{0}\".format(datetime.now()).split()\n\n request_details[\"history\"].append({\"date\": date_time[0],\n \"time\": date_time[1],\n \"action\": \"Returned\",\n \"actor\": {\n \"first_name\": current_user.first_name,\n \"last_name\": current_user.last_name,\n \"email\": current_user.email,\n \"uta_id\": current_user.uta_id\n },\n \"metadata\": {\n \"message\": return_message\n }})\n json.dump(request_details, request_details_json)\n\n with open(request_returned_marker, mode=\"w\") as returned_marker:\n returned_marker.write(\"/n\")\n\n if return_message:\n return_message_html = \"<br><br>Message from {0}:<br>\" \\\n \"<blockquote style='border-left: 3px solid rgb(200, 200, 200); \" \\\n \"border-top-color: rgb(200, 200, 200); border-right-color: \" \\\n \"rgb(200, 200, 200); border-bottom-color: rgb(200, 200, 200); \" \\\n \"padding-left: 1ex; margin-left: 0.8ex; color: rgb(102, 102, 102);'>\" \\\n \"<div style='color: rgb(0, 0, 0);'>{1}</div>\" \\\n \"</blockquote>\".format(current_user.first_name, return_message)\n return_message = \"\\n\\nMessage from {0}:\\n{1}\".format(current_user.first_name, return_message)\n else:\n return_message_html = \"\"\n\n request_date = \"{0:02d}/{1:02d}/{2:04d}\".format(request_details[\"request_date\"][\"month\"],\n request_details[\"request_date\"][\"day\"],\n request_details[\"request_date\"][\"year\"])\n email_subject = \"Reimbursement Request Returned\"\n email_body = app_constants.EMAILS[\"return_request\"][\"text\"].format(\n request_details[\"requester\"][\"first_name\"],\n request_details[\"requester\"][\"last_name\"],\n request_date, request_details[\"total_amount\"],\n \"{0}mavapps/\".format(URL_FULL_PATH), request_details[\"requester\"][\"net_id\"],\n request_details[\"folder_name\"], return_message,\n request_details[\"short_description\"],\n request_details[\"pay_to\"][\"name\"], request_details[\"pay_to\"][\"email\"])\n email_body_html = app_constants.EMAILS[\"return_request\"][\"html\"].format(\n request_details[\"requester\"][\"first_name\"],\n request_details[\"requester\"][\"last_name\"],\n request_date, request_details[\"total_amount\"],\n \"{0}mavapps/\".format(URL_FULL_PATH), request_details[\"requester\"][\"net_id\"],\n request_details[\"folder_name\"], return_message_html,\n request_details[\"short_description\"],\n request_details[\"pay_to\"][\"name\"], request_details[\"pay_to\"][\"email\"])\n\n if SRV != \"prod\":\n emails = self.__TEST_EMAILS__\n else:\n emails = [[\"{0} {1}\".format(current_user.first_name, current_user.last_name), current_user.email],\n [\"{0} {1}\".format(request_details[\"requester\"][\"first_name\"], request_details[\"requester\"][\"last_name\"]), request_details[\"requester\"][\"email\"]]] \\\n + self.__PROD_EMAILS__\n\n self.mailer.send_mail(emails, email_subject, email_body, email_body_html, from_name=\"CSE Reimbursement App\")\n\n remove(request_submitted_marker)\n\n return jsonify({\"success\": True, \"type\": \"success\", \"message\": \"Request returned to the user successfully.\"})\n except Exception as e:\n print(e)\n return abort(400)\n return abort(403)",
"def get_user_jobs_route(id):\n user = models.User.query.filter_by(id=id).first()\n\n if user.get_id() is not None:\n _tasks = user.get_tasks_in_progress()\n running_tasks = get_running_task_dicts(_tasks)\n\n _tasks = user.get_finished_tasks()\n finished_tasks = get_finished_task_dicts(_tasks)\n\n response_object = {\n 'running_tasks': running_tasks,\n 'finished_tasks': finished_tasks\n }\n else:\n response_object = {'status': 'error'}\n print(jsonify(response_object))\n return jsonify(response_object)",
"def fetch_requests(v1):\n \n #check if user has any requests\n if len(all_requests) < 1:\n return jsonify({\n \"message\":\"You have not made any requests yet\"\n })\n \n #if user has more than one request\n if len(all_requests) >= 1:\n return jsonify({\n \"message\":\"Successfully fetched requests\",\n \"requests\":[\n a_request.__dict__ for a_request in all_requests\n ]\n })\n return jsonify({\"message\":\"Can not fetch requests now\"})",
"def login_success(user_id):\n session['user'] = {}\n u_obj = User.query.filter(User.user_id == user_id).first()\n saved_events = UserEvent.query.filter_by(user_id = user_id).all()\n user = session['user']\n user['user_id'] = user_id\n user['name'] = u_obj.fname\n user['saved'] = [event.eventbrite_id for event in saved_events]",
"def user_for_login(request):\n log = get_log(\"user_for_login\")\n\n identifier = None\n\n # standard repoze related identity:\n if 'repoze.who.identity' in request.environ:\n identity = request.environ['repoze.who.identity']\n\n if 'username' in identity:\n identifier = identity['username']\n\n elif 'repoze.who.userid' in identity:\n identifier = identity['repoze.who.userid']\n\n # token based identity:\n elif 'pp.api_access.identity' in request.environ:\n identifier = request.environ['pp.api_access.identity']\n\n else:\n log.debug(\"No identifier recovered from environment!\")\n\n if not identifier:\n raise HTTPForbidden()\n\n if _USERS.get(identifier):\n found = _USERS.get(identifier)\n\n if _NAME_TO_ID.get(identifier):\n found = _USERS.get(_NAME_TO_ID.get(identifier))\n\n return found",
"def fetch_a_request(v1, requestid):\n\n #check if user has any requests\n if len(all_requests) < 1:\n return jsonify({\n \"message\":\"You have not made any requests yet\"\n })\n \n #if user has more than one request\n if len(all_requests) >= 1:\n returned_request = []\n for a_request in all_requests:\n if a_request.request_id == int(requestid):\n returned_request.append(a_request)\n return jsonify({\n \"message\": \"Successfully fetched the request\",\n \"request\": returned_request[0].__dict__\n })\n \n return jsonify({\n \"message\":\"Request doesnt exist\"\n })",
"def view_user(self):\n\n logged_in = authenticated_userid(self.request)\n return {\n 'project': '',\n 'username': self.context.username,\n 'logged_in': logged_in,\n }",
"def login(request):\r\n login_url = route_url('login', request)\r\n referrer = request.url\r\n if referrer == login_url:\r\n referrer = u'/' # never use the login form itself as came_from\r\n\r\n came_from = request.params.get('came_from', referrer)\r\n\r\n message = u''\r\n login = u''\r\n password = u''\r\n\r\n if 'form.submitted' in request.params:\r\n login = request.params['login'].lower()\r\n password = request.params['password']\r\n\r\n LOG.debug(login)\r\n auth = UserMgr.get(username=login)\r\n LOG.debug(auth)\r\n LOG.debug(UserMgr.get_list())\r\n\r\n if auth and auth.validate_password(password) and auth.activated:\r\n # We use the Primary Key as our identifier once someone has\r\n # authenticated rather than the username. You can change what is\r\n # returned as the userid by altering what is passed to remember.\r\n headers = remember(request, auth.id, max_age=60 * 60 * 24 * 30)\r\n auth.last_login = datetime.utcnow()\r\n\r\n # log the successful login\r\n AuthLog.login(login, True)\r\n\r\n # we're always going to return a user to their own /recent after a\r\n # login\r\n return HTTPFound(\r\n location=request.route_url(\r\n 'user_bmark_recent',\r\n username=auth.username),\r\n headers=headers)\r\n\r\n # log the right level of problem\r\n if auth and not auth.validate_password(password):\r\n message = \"Your login attempt has failed.\"\r\n AuthLog.login(login, False, password=password)\r\n\r\n elif auth and not auth.activated:\r\n message = \"User account deactivated. Please check your email.\"\r\n AuthLog.login(login, False, password=password)\r\n AuthLog.disabled(login)\r\n\r\n elif auth is None:\r\n message = \"Failed login\"\r\n AuthLog.login(login, False, password=password)\r\n\r\n return {\r\n 'message': message,\r\n 'came_from': came_from,\r\n 'login': login,\r\n 'password': password,\r\n }",
"def _get_data_user(self, id):\n logging.info(\"[_get_data_user] Pide la informacion del usuario al Shared server\")\n try:\n response = requests.get(ss.URL + '/users/' + str(id), headers={'token': \"superservercito-token\"})\n response.raise_for_status()\n except requests.exceptions.HTTPError:\n logging.error('[_get_data_user] Conexión con el Shared dio error: ' + repr(response.status_code))\n abort(response.status_code)\n logging.info(\"[_get_data_user] La consulta al Shared fue correcta.\")\n return response.json()",
"def _get_data_user(self, id):\n logging.info(\"[_get_data_user] Pide la informacion del usuario al Shared server\")\n try:\n response = requests.get(ss.URL + '/users/' + str(id), headers={'token': \"superservercito-token\"})\n response.raise_for_status()\n except requests.exceptions.HTTPError:\n logging.error('[_get_data_user] Conexión con el Shared dio error: ' + repr(response.status_code))\n abort(response.status_code)\n logging.info(\"[_get_data_user] La consulta al Shared fue correcta.\")\n return response.json()",
"def get_user_orders(self, loginID):\n order_details = {}\n self.cursor.execute(\"\"\"SELECT orderNumber, orderDate FROM orderlog WHERE loginID=%s \n ORDER BY orderDate DESC, orderNumber DESC\"\"\", (loginID,))\n for order in self.cursor.fetchall():\n order_details[str(order[0])] = {'title': [], 'quantity': [], 'ISBN': []}\n # this line only needs to execute once, but its easier to do it like this.\n order_details[str(order[0])]['date'] = order[1]\n self.cursor.execute(\"\"\"SELECT ISBN FROM orderlog O INNER JOIN productof P ON O.orderNumber = P.orderNumber\n WHERE O.orderNumber=%s\"\"\", (order[0],))\n for book in self.cursor.fetchall():\n self.cursor.execute(\"\"\"SELECT title, quantity FROM book B, productof P, orderlog O WHERE P.ISBN=%s\n AND P.orderNumber = O.orderNumber AND P.ISBN = B.ISBN AND O.orderNumber = %s\"\"\", (book[0], order[0]))\n for details in self.cursor.fetchall():\n title = details[0]\n quantity = details[1]\n order_details[str(order[0])]['title'].append(title)\n order_details[str(order[0])]['quantity'].append(quantity)\n order_details[str(order[0])]['ISBN'].append(book[0])\n return order_details",
"def get_user_folders_dict(user_id):\n return { folder['full_name'] : folder['id'] for folder in canvas_api.pull_folders(user_id) }",
"def get_responses():\n\n\tresponses = []\n\tif auth.user_id is not None:\n\t\trows = db(db.response.created_by == auth.user_id).select(db.response.ALL)\n\tfor i, r in enumerate(rows):\t \n\t\t\tt = dict(\n\t\t\t\tuser_email = r.user_email,\n\t\t\t\tuser_name = get_user_name_from_email(r.user_email),\n\t\t\t\tsurvey_idx = r.survey_idx,\n\t\t\t\topt1 = r.opt1,\n\t\t\t\topt2 = r.opt2,\n\t\t\t\topt3 = r.opt3,\n\t\t\t\topt4 = r.opt4,\n\t\t\t)\n\t\t\tresponses.append(t)\n\n\tlogged_in = auth.user_id is not None\n\temail = None\n\tif logged_in:\n\t\temail = auth.user.email\n\n\treturn response.json(dict(\n\t\tresponses=responses,\n\t\tlogged_in=logged_in,\n\t\temail=email,\n\t))",
"def get(self, user_id):\n if user_id:\n return get_from_user_id(user_id)\n else:\n # No user_id given; this is a GET all users request.\n if not current_user.is_admin:\n error(403, \"Logged in user not admin \")\n\n user_db_data = user_db_util.fetchall(g.database)\n\n response_data: Dict[str, List[Dict[str, str]]] = {\"users\": []}\n for user_entry in user_db_data:\n response_data[\"users\"].append(\n {\n \"id\": user_entry[\"user_id\"],\n \"email\": user_entry[\"email\"],\n \"name\": user_entry[\"name\"],\n \"group\": user_entry[\"group_name\"],\n \"admin\": user_entry[\"admin\"],\n \"timestamp\": user_entry[\"date_created\"],\n }\n )\n\n return jsonify(response_data), 201",
"def _get(self, user_id):\n user = DB_USER_TABLE.get(doc_id=int(user_id))\n if not user:\n flask_restful.abort(404, message=f\"User '{user_id}' not found!\")\n res = {\n \"id\" : user.doc_id\n }\n res.update(user)\n res['_links'] = self.make_links({\n \"self\" : User.get_self_url(user.doc_id),\n \"contained_in\" : UserList.get_self_url(),\n \"customers\" : UserCustomerList.get_self_url(user.doc_id),\n \"tickets\" : UserTicketList.get_self_url(user.doc_id)\n })\n return res"
] | [
"0.55100137",
"0.53969973",
"0.53640866",
"0.5216878",
"0.5162285",
"0.51496017",
"0.5084875",
"0.50645024",
"0.5029313",
"0.5011257",
"0.49894708",
"0.49887967",
"0.49819955",
"0.49323818",
"0.4917424",
"0.49163958",
"0.48780292",
"0.4853705",
"0.4852659",
"0.48472676",
"0.48179904",
"0.4817123",
"0.4812745",
"0.48100424",
"0.48100424",
"0.48091218",
"0.4806723",
"0.4795596",
"0.47719213",
"0.47519863"
] | 0.6872694 | 0 |
Function to find all of the return requests with a status of "PENDING". This is for the manager view for when he/she wishes to accept or deny requests. | def get_pending_requests(self):
result = {'requestID': [], 'orderNumber': [], 'requestDate': [], 'ISBN': [], 'quantity': [], 'orderDate': [],
'title': []}
self.cursor.execute("""SELECT requestID, R.orderNumber, requestDate, B.ISBN, quantity, orderDate, B.title
FROM returnrequest R, orderlog O, Book B WHERE status='PENDING' AND O.orderNumber = R.orderNumber AND
B.ISBN = R.ISBN ORDER BY requestDate ASC""")
for request in self.cursor.fetchall():
result['requestID'].append(request[0])
result['orderNumber'].append(request[1])
result['requestDate'].append(request[2])
result['ISBN'].append(request[3])
result['quantity'].append(request[4])
result['orderDate'].append(request[5])
result['title'].append(request[6])
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_all_pending_accounts(cls):\n return cls.query.filter_by(status=CfsAccountStatus.PENDING.value).all()",
"def get_pending_registration_requests(self,user,site):\n\n return self.filter(project=site,\n user=user,\n status=RegistrationRequest.PENDING)",
"def get_pending_orders(self):\n\n r = requests.get(build_api_call(self.base_url, ACCOUNTID, 'pending', ''), auth=HTTPBasicAuth(KEY, SECRET))\n\n if r.status_code == 200:\n return r.json()\n else:\n return 'error'",
"def get_pending_payments():\n try:\n database.execute(\"SELECT pymt_id, txhash, txid, amount_paid FROM payments WHERE status = %s\", (PAYMENT_STATUS_PENDING,))\n return database.fetchall()\n except database.psycopg2.Error as e:\n raise Exception(e.pgerror) from None\n except Exception as e:\n log.error('Failed to get pending payments')\n log.error(e)\n return []",
"def get_pending_jobs(self):\n try:\n result = self._session.query(JobEntity).\\\n filter(JobEntity.status == 'PENDING').\\\n order_by(asc(JobEntity.queued)).\\\n all()\n result_dict = self.result_dict(result)\n except SQLAlchemyError as err:\n Log.an().error('sql exception [%s]', str(err))\n return False\n\n return result_dict",
"def get_pending_friendships(self):\n url = 'friendships/pending/'\n return self.send_request(url)",
"def pending(self):\n\n return dict([(scanId,\n len(list(filter(lambda x: x[3].status == 'pending',\n futurelist))))\n for scanId, futurelist in iteritems(self.futures)])",
"def getAllPendingTargets(self, autonomous=None):\n getTarget = \"\"\"SELECT * FROM submitted_target \n WHERE submitted = 'pending'\"\"\"\n\n inputParams = None\n\n if autonomous is not None:\n inputParams = (autonomous,)\n getTarget += \" AND autonomous = %s\"\n \n getTarget += \";\"\n\n return super(SubmittedTargetDAO, self).getResultsAsModelList(getTarget, inputParams)",
"def pending_list(cls, num=20):\n plaques = Plaque.query().filter(Plaque.approved != True\n ).order(Plaque.approved\n ).order(-Plaque.created_on\n ).fetch(limit=num)\n return plaques",
"def list(self, request):\n\n to_approve_requests = Task.objects.filter(\n Q(approver_email=request.user.email),\n Q(state=Task.SUCCESS, review_output=True)\n | Q(state=Task.ERROR, review_output=True),\n ).order_by(\"-registered_on\")\n\n own_requests = Task.objects.filter(author_email=request.user.email).order_by(\n \"-registered_on\"\n )\n for request in own_requests:\n if request.state != Task.OUTPUT_RELEASED:\n request.output = None\n\n return Response(\n {\n \"to_approve_requests\": TaskSerializer(\n to_approve_requests, many=True\n ).data,\n \"own_requests\": TaskSerializer(own_requests, many=True).data,\n }\n )",
"def is_pending(self):\n if self.status == \"PENDING\":\n return True\n else:\n return False",
"def get_requested(self):\n return BBSitting.objects.filter(booked=self).filter(booking__confirmed=False)",
"def search_pending_op():\n ops = db.session.query(Op).filter(Op.status == \"pending\").filter(Op.client_id == None).all()\n\n print(\"Ops:\", ops)\n op_found = None\n for op in ops:\n inputs = json.loads(op.inputs)\n\n not_computed = []\n for op_id in inputs:\n if db.session.query(Op).get(op_id).status != \"computed\":\n not_computed.append(op_id)\n\n if len(not_computed) == 0:\n op_found = op\n break\n\n return op_found",
"def get_pending_transactions():\n\n return History.get_pending().get()",
"def get_friend_requests(self, user):\n return self.filter(addresser_user=user, status=Friendship.STATUS_PENDING, active=True)",
"def worklist():\n from wheelcms_axle.content import Content\n pending = Content.objects.filter(state=\"pending\", node__isnull=False)\n return pending",
"def list_pending_tasks():\n inspector = current_app.control.inspect()\n\n return inspector.reserved()",
"def users_groups_pending():\n if request.method == \"GET\":\n query = {\"token\": ciconnect_api_token, \"globus_id\": session[\"primary_identity\"]}\n # Get user info\n user = get_user_info(session)\n unix_name = user[\"metadata\"][\"unix_name\"]\n\n # Query user's pending project requests\n project_requests = get_user_pending_project_requests(unix_name)\n project_requests = [\n project_request\n for project_request in project_requests\n if session[\"url_host\"][\"unix_name\"] in project_request[\"name\"]\n ]\n # Check user status of root connect group\n connect_group = session[\"url_host\"][\"unix_name\"]\n user_status = get_user_connect_status(unix_name, connect_group)\n return render_template(\n \"users_groups_pending.html\",\n project_requests=project_requests,\n user_status=user_status,\n )",
"def get_workflow_pending_approval_jobs(workflow_id, headers):\n\n for current_job in get_all_items(f\"/workflow/{workflow_id}/job\", headers):\n if (current_job.get(\"type\") == \"approval\") and (current_job.get(\"status\") == \"on_hold\"):\n yield current_job",
"def get_all_status():\n return \"\"",
"def approve_pending_for_list(self, groupId):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/GroupV2/{groupId}/Members/ApproveList/\"))",
"def approve_all_pending(self, groupId):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/GroupV2/{groupId}/Members/ApproveAll/\"))",
"def get_by_status(status):\n return list(tasks.find({'status': status}))",
"def getPendingBuildsets():\n # TODO: this is not implemented anywhere",
"def pending_transactions(self):\n return self._call_account_method(\n 'pendingTransactions'\n )",
"def _getQuestionsPendingResponse(date, ministry):\n status = u\"Question pending response\" #q_state.response_pending\n session = Session()\n qfilter=sql.and_(\n (domain.Question.c.ministry_submit_date < date ),\n (domain.Question.c.status == status),\n (domain.Question.c.ministry_id == ministry.ministry_id)\n )\n query = session.query(domain.Question).filter(qfilter)\n return query.all()",
"def is_pending(self):\n status = self.get_status()\n return status[\"status\"] == 3",
"def fetch_pending(self):\n pending = self.open(self.urls['pending'])\n soup = BeautifulSoup(pending.read())",
"def get_accepted_registration_requests(self,user,site):\n return self.filter(project=site,\n user=user,\n status=RegistrationRequest.ACCEPTED)",
"async def find_notifications_by_status(db_session: Session, status: str):\n notifications = None\n if status == NotificationStatusEnum.FAILURE:\n seconds = get_api_settings().DELIVERY_FAILURE_RETRY_TIME_FRAME\n notifications = await NotificaitonCRUD.find_notifications_by_status_time(db_session,\n status,\n seconds)\n else:\n notifications = await NotificaitonCRUD.find_notifications_by_status(db_session, status)\n\n return notifications"
] | [
"0.6479581",
"0.6326572",
"0.6316774",
"0.62999344",
"0.6126734",
"0.61056966",
"0.60382605",
"0.5866562",
"0.57933176",
"0.5786126",
"0.57370985",
"0.5717551",
"0.5700132",
"0.5696704",
"0.5616951",
"0.5557678",
"0.5550404",
"0.5544873",
"0.5542309",
"0.5527181",
"0.5525483",
"0.5508973",
"0.5468335",
"0.54445195",
"0.5436609",
"0.5435641",
"0.5420884",
"0.54193306",
"0.5415823",
"0.53987074"
] | 0.7002635 | 0 |
Update the database once the manager makes a decision on a return request. The boolean parameter "approved" is passed to indicate whether the manager accepted or rejected the request. Upon approval, update the status of the return request, then update the order by removing the amount of books specified by quantity and ISBN, then finally update the stock count of the book that was returned. Upon rejection, just update the status of the request. | def update_request_status(self, requestID, ISBN, quantity, approved):
if approved:
self.cursor.execute("""SELECT orderNumber FROM returnrequest WHERE requestID=%s""", (requestID,))
orderNumber = self.cursor.fetchone()[0]
self.cursor.execute("""UPDATE returnrequest SET status='APPROVED' WHERE requestID=%s""", (requestID,))
self.cursor.execute("""SELECT quantity FROM productof WHERE orderNumber=%s AND ISBN=%s""",
(orderNumber, ISBN))
remaining_books_ordered = self.cursor.fetchone()[0] - int(quantity)
if not int(remaining_books_ordered):
self.cursor.execute("""DELETE FROM productof WHERE orderNumber=%s AND ISBN=%s""", (orderNumber, ISBN))
else:
self.cursor.execute("""UPDATE productof SET quantity=quantity-%s WHERE orderNumber=%s AND ISBN=%s""",
(quantity, orderNumber, ISBN))
self.cursor.execute("""UPDATE book SET stock=stock+%s WHERE ISBN=%s""", (quantity, ISBN))
self.db.commit()
if self.is_empty_order(orderNumber):
self.cursor.execute("""DELETE FROM orderlog WHERE orderNumber=%s""", (orderNumber,))
else:
self.cursor.execute("""UPDATE returnrequest SET status='DENIED' WHERE requestID=%s""", (requestID,))
self.db.commit() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def request_return(self, orderNumber, ISBN, quantity):\n\n date = datetime.date.today()\n\n self.cursor.execute(\"\"\"INSERT INTO returnrequest (orderNumber, requestDate, ISBN, quantity)\n VALUES (%s,%s,%s,%s)\"\"\", (orderNumber, date, ISBN, quantity))\n self.db.commit()",
"def on_update_after_submit(self):\n if self.get('update_request') and not self.is_pending_approval():\n if self.is_revert:\n self.set_as_reverted()\n else:\n self.set_as_success()",
"def Approve(self, request, global_params=None):\n config = self.GetMethodConfig('Approve')\n return self._RunMethod(\n config, request, global_params=global_params)",
"def post(self):\n order = None\n args = book_return_parser.parse_args()\n order_id = args['order_id']\n copy_id = args['copy_id']\n if order_id is not None and copy_id is not None:\n return 'Only one parameter is needed', 400\n if order_id is not None:\n order = db.session.query(models.Order).filter_by(id=order_id).first()\n if copy_id is not None:\n order = db.session.query(models.Order).filter_by(copy=copy_id).first()\n if order is None:\n return 'Please provide a correct order_id or copy_id for the book', 404\n copy = db.session.query(models.Copy).filter_by(id=order.copy).first()\n if copy is None:\n return 'Copy of the book does not exist', 404\n order = change_order_status(order.id, ORDER_STATUS_COMPLETED)\n copy.status = BOOK_COPY_STATUS_AVAILABLE\n db.session.commit()\n return {'order': order.serialize(),\n 'message': 'Book returned, Order completed!'}, 200",
"def update(self, instance, validated_data):\n\n # If an order is cancelled or delivered, it cannot be modified.\n if instance.status == CANCELLED or instance.status == DELIVERED:\n raise exceptions.PermissionDenied('This order cannot be modified.')\n\n # If an order is already confirmed but UI/agent sends another confirmation request by mistake,\n # we deny it as each confirmation is a big operation that includes generating invoices/ledger entries.\n if instance.status == validated_data['status'] == CONFIRMED:\n raise exceptions.PermissionDenied('This order is already confirmed.')\n\n if instance.status == ACCEPTED and validated_data['status'] == CONFIRMED:\n # 1. Transition: accepted -> confirmed\n instance.status = validated_data.get('status')\n elif instance.status == CONFIRMED and validated_data['status'] in [CANCELLED, DELIVERED]:\n # 2. Transition: confirmed -> cancelled/delivered and return\n instance.status = validated_data.get('status')\n instance.save(update_fields=['status'])\n return instance\n else:\n # In case of any invalid transition, reject it.\n raise exceptions.PermissionDenied('There seems to be some discrepancy. Please contact your agent.')\n\n # Get exclusive lock on all relevant data rows\n orderlines = instance.orderlines.select_for_update().select_related('product').all()\n\n # Do order and product update in a single transaction\n with transaction.atomic():\n\n # Validate that order can be approved.\n self._validate_units_and_balance_in_orderlines(orderlines, instance.user)\n\n for orderline in orderlines:\n\n # Decrement product stock count by orderline(buying) requirement\n product = orderline.product\n product.units = F('units') - orderline.units\n product.save(update_fields=['units'])\n\n # Lock current standing price into the orderline, calculate sub total and lock it.\n product_price = product.price\n orderline.confirmed_price = product_price\n orderline.locked = CONFIRMED\n orderline.sub_total = product_price * F('units')\n orderline.save(update_fields=['confirmed_price', 'locked', 'sub_total'])\n\n # Mark order as confirmed.\n instance.save(update_fields=['status'])\n return instance",
"def UpdateStatus(self,pid):\n\t\tb1=Rents.objects.filter(paymentid_id=pid).first()\n\t\tamount=Payment.objects.filter(paymentid=pid).values('amount')\n\t\tb=b1.__dict__\n\t\tquant=b['quantity']\n\t\tbookid=b['bookid_id']\n\t\tprice=amount[0]['amount']/quant\n\t\t#price=float(\"{.2f}\".format(amount[0]['amount']))/float(\"{0:.2f}\".format(quant))\n\t\tRents.objects.filter(paymentid_id=pid).update(status='r')\n\t\tBook.objects.filter(bookid=bookid).update(quantity=F('quantity')+quant)\n\t\tStatus.objects.filter(ISBN=b['ISBN'],rentprice=price).update(quantity=F('quantity')+quant)\n\t\tUpload.objects.filter(owner_id_id=b['owner_id_id'],sellprice=price).update(qtyavailable=F('qtyavailable')+quant)\n\t\tself.notifyBuyer(b['ISBN'])",
"def warehouse_officer_confirm_qty(self):\n if (\n self.approve_request_ids is None\n or self.approve_request_ids is False\n ):\n raise UserError(\"No line(s) defined!\")\n self._compute_confirm()\n for line in self.approve_request_ids:\n line._compute_state()\n if any(line.state != \"available\" for line in self.approve_request_ids):\n raise Warning(\n \"Please procure the items that are short in stock or process pending purchase agreements and try again!\"\n )\n else:\n self.state = 'transfer'",
"def on_update(self):\n if self.get('update_request') and not self.is_pending_approval():\n if self.is_revert:\n self.set_as_reverted()\n else:\n self.set_as_success()",
"def approve(self):\n self._check_if_open()\n data = {\"approved\": True}\n return self.post(\"approve\", data)",
"def reviewhandler():\n objectid = request.values.get('objectid', 0, type=int)\n if not objectid:\n abort(400)\n\n form = AuthorUpdateForm(formdata=request.form)\n visitor = DataExporter()\n visitor.visit(form)\n\n workflow_object = workflow_object_class.get(objectid)\n workflow_object.extra_data[\"approved\"] = True\n workflow_object.extra_data[\"ticket\"] = request.form.get('ticket') == \"True\"\n workflow_object.extra_data['formdata'] = visitor.data\n workflow_object.data = formdata_to_model(workflow_object, visitor.data)\n workflow_object.save()\n db.session.commit()\n\n resume.delay(workflow_object.id)\n\n return render_template('authors/forms/new_review_accepted.html',\n approved=True)",
"def approve(self):\n self.approved = True\n self.quest_node['approved'] = True\n graph.push(self.quest_node)\n self.payout()",
"def update(self, book_info, destroy):\n self.connect()\n is_issue = len(book_info) == 2\n\n bid = book_info[0].get()\n if is_issue:\n issue_to = book_info[1].get()\n\n if is_issue:\n extract_bid = f\"select bid from {self.book_table}\"\n else:\n extract_bid = f\"select bid from {self.issued_table}\"\n\n status = False\n try:\n self.cur.execute(extract_bid)\n self.con.commit()\n for i in self.cur:\n self.all_bid.append(i[0])\n\n if bid in self.all_bid:\n check_avail = f\"select status from {self.book_table} where \" \\\n f\"bid = '{bid}'\"\n self.cur.execute(check_avail)\n self.con.commit()\n check = None\n for i in self.cur:\n check = i[0]\n\n if (is_issue and check == 'avail'\n or not is_issue and check == 'issued'):\n status = True\n else:\n status = False\n else:\n messagebox.showinfo(\"Error\", \"Book ID not present\")\n except MySQLError as err:\n messagebox.showinfo(\"Error\", \"Can't fetch Book IDs\")\n print(err)\n\n if is_issue:\n issue_sql = f\"insert into {self.issued_table} values ('{bid}',\" \\\n f\"'{issue_to}')\"\n up_status = f\"update {self.book_table} set status = 'issued' \" \\\n f\"where bid = '{bid}'\"\n else:\n issue_sql = f\"delete from {self.issued_table} where bid = '{bid}'\"\n up_status = f\"update {self.book_table} set status = 'avail' \" \\\n f\"where bid = '{bid}'\"\n\n try:\n if bid in self.all_bid and status:\n self.cur.execute(issue_sql)\n self.con.commit()\n self.cur.execute(up_status)\n self.con.commit()\n if is_issue:\n msg = \"Book Issued Successfully\"\n else:\n msg = \"Book Returned Successfully\"\n state = 'Success'\n else:\n if is_issue:\n msg = \"Book Already Issued\"\n else:\n msg = \"Please check the book ID\"\n state = \"Message\"\n messagebox.showinfo(state, msg)\n except MySQLError as err:\n messagebox.showinfo(\n \"Search Error\", \"The value entered is wrong, Try again\"\n )\n print(err)\n self.all_bid.clear()\n destroy()",
"def close_access_approval(\n request: AuthenticatedHttpRequest,\n *,\n access_request_pk: int,\n entity: str,\n approval_request_pk: int,\n) -> HttpResponse:\n\n with transaction.atomic():\n if entity == \"importer\":\n access_request = get_object_or_404(ImporterAccessRequest, pk=access_request_pk)\n approval_request = get_object_or_404(\n ImporterApprovalRequest.objects.select_for_update(), pk=approval_request_pk\n )\n else:\n access_request = get_object_or_404(ExporterAccessRequest, pk=access_request_pk)\n approval_request = get_object_or_404(\n ExporterApprovalRequest.objects.select_for_update(), pk=approval_request_pk\n )\n\n case_progress.access_request_in_processing(access_request)\n\n if approval_request.requested_from != request.user:\n raise PermissionDenied\n\n if not can_user_manage_org_contacts(request.user, access_request.link):\n raise PermissionDenied\n\n if request.method == \"POST\":\n form = ApprovalRequestResponseForm(request.POST, instance=approval_request)\n\n if form.is_valid():\n approval_request = form.save(commit=False)\n approval_request.status = ApprovalRequest.Statuses.COMPLETED\n approval_request.response_date = timezone.now()\n approval_request.response_by = request.user\n approval_request.save()\n send_approval_request_completed_email()\n return redirect(reverse(\"workbasket\"))\n\n else:\n form = ApprovalRequestResponseForm(instance=approval_request)\n\n context = {\n \"process\": access_request,\n \"form\": form,\n \"entity\": entity,\n \"approval\": approval_request,\n }\n\n return render(request, \"web/domains/case/access/case-approval-respond.html\", context)",
"def update(approval_id=None,state=None):\n\n if approval_id is None or state is None: return\n \n client = Client('http://labss2.fiit.stuba.sk/pis/ws/Students/Team071approval?WSDL')\n approval = get(int(approval_id))\n approval.state = int(state)\n approval.name = \"\"\n\n client.service.update('071', 'Vreqif', approval.id, approval)\n\n # check if applicaiton is now approved or canceled, then notify employee\n a.check_state_and_notify(approval.application_id)\n\n return approval",
"def change_approval(self, status):\r\n if status == 'approve':\r\n return self.approve()\r\n elif status == 'disapprove':\r\n return self.disapprove()",
"def return_book(self, user, book):\n r = self.get(rented_by=user, book=book, returned_on=None)\n r.returned_on = datetime.now()\n r.save()\n r.book.in_stock += 1\n r.book.save()",
"def action_approve(self):\n if not self.date_approve:\n self.date_approve = fields.Datetime.now()\n\n config = self.env['ka_hr_payroll.config'].default_config()\n if check_rapel_status(self, config):\n self.action_rapel()\n else:\n self.action_done()",
"def update_investment():\n\n user_id = session['user']\n inv_id = request.args.get('update-inv')\n input_quantity = request.args.get('quantity')\n quantity = int(str(input_quantity).replace(',', ''))\n input_cost = request.args.get('cost')\n cost = int(str(input_cost).replace(',', ''))\n date_of_investment = request.args.get('inv-date')\n\n # Query selected investment to update\n updated_inv = Investment.query.get(inv_id)\n updated_inv.quantity = quantity\n updated_inv.cost = cost\n updated_inv.date_of_investment = date_of_investment\n\n db.session.commit()\n\n return redirect('/user-%s' % user_id)",
"def editrecipes():\n \n if request.method == \"POST\":\n \n rname = request.form.get('rname')\n ingredient_id = request.form.get('ingredient_id')\n ingredient_amount = request.form.get('editAmount')\n \n version_number = request.form.get('version_number')\n \n # connect and update database\n con = db_connect()\n cur = con.cursor()\n \n sql = \"\"\"UPDATE recipes SET ingredient_amount=?/100.0 WHERE rname=? AND ingredient_id=? AND version_number=?\"\"\"\n\n cur.execute(sql, (ingredient_amount, rname, ingredient_id, version_number))\n con.commit()\n con.close()\n\n return redirect(url_for('recipes.recipesoverview', rname=rname, version_number=version_number))\n \n elif request.method == \"GET\":\n ingredient_id = request.args.get(\"product_code\")\n rname = request.args.get(\"rname\")\n version_number = request.args.get('version_number')\n is_approved = request.args.get('is_approved')\n\n # for recipe approval\n if is_approved:\n if is_approved == '0':\n approved = 1\n elif is_approved == '1':\n approved = 0\n\n # connect and update database\n con = db_connect()\n cur = con.cursor()\n \n # update approval on current version\n sql = \"\"\"UPDATE recipes SET approved=? WHERE rname=? AND version_number=?\"\"\"\n cur.execute(sql, (approved, rname, version_number))\n\n # remove approval from all other recipes (ensures users can only have one version approved)\n sql = \"\"\"UPDATE recipes SET approved=? WHERE rname=? AND NOT version_number=?\"\"\"\n cur.execute(sql, (0, rname, version_number))\n con.commit()\n con.close()\n\n # for ingredient amount\n con = db_connect()\n cur = con.cursor()\n cur.execute(\"\"\" SELECT name, rname, ingredient_id, round(ingredient_amount*100, 2) AS ingredient_amount \n FROM recipes JOIN ingredients ON ingredients.product_code = recipes.ingredient_id \n WHERE rname = :rname AND ingredient_id =:ingredient_id AND version_number =:version_number\"\"\", {'rname':rname, 'ingredient_id':ingredient_id, 'version_number':version_number})\n ingredient = cur.fetchall()\n return jsonify(serialize(cur, ingredient))",
"def update_amounts(self, save=True):\n self.amount_donated = self.get_amount_total(\n [StatusDefinition.SUCCESS, StatusDefinition.PENDING,\n StatusDefinition.PLEDGED])\n self.amount_needed = self.amount_asked - self.amount_donated\n\n if self.amount_needed < 0:\n # Should never be less than zero\n self.amount_needed = 0\n\n if save:\n self.save()",
"def return_book(self, book_id, return_date):\r\n for book in LibraryDB.book_list:\r\n if book.book_id == book_id and book_id in [i.book_id for i in self.issued_books]:\r\n book.availability = True\r\n book.return_date = return_date\r\n result = self.calculate_fine(book)\r\n self.fine += result\r\n self.issued_books.remove(book)\r\n date = book.return_date.strftime('%b %d, %Y')\r\n LibraryDB.transaction_history.append(Transaction(book, self, \"Book Returned\",str(date)))\r\n if result == 0:\r\n print(\"Book returned on time!\")\r\n else:\r\n print(\"Book returned!\" + \"\\n\" + \"Fine amount : \" + str(result) + '\\n')\r\n break\r\n else:\r\n print(\"Please enter a valid id\")",
"def handle_nr_approve(self, nr, svc) -> Request:\n return self.approve_nr(nr, svc)",
"def update_approve(to_address: str, delta_amount: int) -> int:\n raise NotImplementedError()",
"def approve(self, employee: Employee, comments: str = None) -> None:\n from .exceptions import OperationForbiddenError, OrderEmptyError\n\n # If order is not in the \"PENDING\" state, raise an\n # OperationForbiddenError\n if not self.is_pending:\n raise OperationForbiddenError(\n self.STATE_CHANGE_FORBIDDEN_ERROR_MSG % {\n 'current_state': Order.OrderState.get_choice_display(\n self.state\n ),\n 'new_state': Order.OrderState.APPROVED.choice_display\n }\n )\n\n # If the order's item list is empty, raise an OrderEmptyError\n if not self.orderitem_set.exists():\n raise OrderEmptyError(\n self,\n 'An order with no associated OrderItems cannot be '\n 'approved.'\n )\n\n # Perform db mutations in a transaction\n with transaction.atomic():\n # Adjust the stock of each item in the order's item list\n order_item: OrderItem\n for order_item in self.orderitem_set.all():\n item: Inventory = order_item.item\n item.deduct(employee.user, order_item.quantity)\n\n # Mark this order as approved\n self.update(\n employee.user,\n comments=comments,\n handler=employee,\n review_date=now(),\n state=Order.OrderState.APPROVED.choice_value\n )",
"def manage_access_approval_withdraw(\n request: AuthenticatedHttpRequest,\n *,\n access_request_pk: int,\n entity: Literal[\"importer\", \"exporter\"],\n approval_request_pk: int,\n) -> HttpResponse:\n\n with transaction.atomic():\n model_cls = ImporterAccessRequest if entity == \"importer\" else ExporterAccessRequest\n access_request = get_object_or_404(model_cls, pk=access_request_pk)\n\n case_progress.access_request_in_processing(access_request)\n\n approval_request = get_object_or_404(\n access_request.approval_requests.filter(is_active=True).select_for_update(),\n pk=approval_request_pk,\n )\n\n approval_request.is_active = False\n approval_request.status = ApprovalRequest.Statuses.CANCELLED\n approval_request.save()\n\n return redirect(\n reverse(\n \"access:case-management-access-approval\",\n kwargs={\"access_request_pk\": access_request.pk, \"entity\": entity},\n )\n )",
"def test_approve(self):\r\n request = RequestFactory()\r\n post = request.post(self.url, {'field': 'mod_queue',\r\n 'op': 'approve',\r\n 1: [self.problem_id.to_deprecated_string(), '2.0', '2']})\r\n view.approve(post, self.course_id, 'mod_queue')\r\n problem_hints = XModuleUserStateSummaryField.objects.get(field_name='mod_queue', usage_id=self.problem_id).value\r\n self.assertTrue('2.0' not in json.loads(problem_hints) or len(json.loads(problem_hints)['2.0']) == 0)\r\n problem_hints = XModuleUserStateSummaryField.objects.get(field_name='hints', usage_id=self.problem_id).value\r\n self.assertTrue(json.loads(problem_hints)['2.0']['2'] == ['Hint 2', 1])\r\n self.assertTrue(len(json.loads(problem_hints)['2.0']) == 2)",
"def release_ownership_access_approval(\n request: AuthenticatedHttpRequest,\n *,\n approval_request_pk: int,\n entity: Literal[\"importer\", \"exporter\"],\n) -> HttpResponse:\n\n with transaction.atomic():\n if entity == \"importer\":\n approval_request = get_object_or_404(\n ImporterApprovalRequest.objects.select_for_update(), pk=approval_request_pk\n )\n else:\n approval_request = get_object_or_404(\n ExporterApprovalRequest.objects.select_for_update(), pk=approval_request_pk\n )\n\n case_progress.approval_request_in_processing(approval_request)\n\n if approval_request.requested_from != request.user:\n raise PermissionDenied\n\n org = approval_request.access_request.get_specific_model().link\n if not can_user_manage_org_contacts(request.user, org):\n raise PermissionDenied\n\n approval_request.requested_from = None\n approval_request.save()\n\n return redirect(reverse(\"workbasket\"))",
"def abc_confirm_invoice(self, lines, packages, data, params, res):\n invoice = params.get('invoice')\n if invoice and invoice.state == 'draft':\n self.env.cr.commit()\n env = None\n try:\n # Ne cursor doesn't time out when requesting lock.\n # Could be bad I guess? Works for now.\n # TODO: Look into setting a more reasonable lock wait time.\n new_cr = Registry(self.env.cr.dbname).cursor()\n new_cr.autocommit(True)\n env = api.Environment(new_cr, self.env.uid, self.env.context)\n # Validate invoice\n invoice.signal_workflow('invoice_open')\n res['invoice']['name'] = invoice.number\n res['messages'].append(u\"Created and confirmed invoice %s.\" % invoice.number)\n res['results']['invoice'] = 'confirmed'\n # Commit to unlock the invoice sequence\n env.cr.commit()\n except Exception as e:\n res['warnings'].append((\n _(u\"Failed to confirm invoice %s!\") % (invoice and (invoice.number or invoice.name) or 'Unknown'),\n '%s\\n\\nTraceback:\\n%s' % (e.message or 'Unknown Error', traceback.format_exc())))\n finally:\n if env:\n env.cr.close()",
"def approve (self, response) :\n if 'event' in response and 'moderator' in response :\n eventId = response ['event']\n userId = response ['moderator']\n else :\n raise ModerationError (response)\n\n mod_status = 'OK'\n if 'status' in response :\n mod_status = response ['status']\n \n event = Event.object.get (id = eventId)\n approval = Approval (approved = event, moderatorId = userId, status = mod_status)\n approval.save ()\n self.editValues (event.answer, response)",
"def approve(self, approver=None, parent_job=None):\r\n if self.status != 'PENDING':\r\n msg = _(\r\n \"Only orders that are in 'PENDING' state can be approved. \"\r\n \"Current state of order is '{status}'.\"\r\n ).format(status=self.status)\r\n raise CloudBoltException(msg)\r\n\r\n approve_this_order = False\r\n if self.is_multilevel_approval():\r\n logger.info('models.approve is multilevel!')\r\n self.approve_my_grms(approver)\r\n logger.info(f'models.approve after approve_my_grms ({approver})!')\r\n if self.is_multilevel_approval():\r\n logger.info('models.approve ml approval complete!')\r\n approve_this_order = True\r\n else:\r\n logger.info('models.approve is NOT multilevel!')\r\n #single-level approval\r\n approve_this_order = True\r\n\r\n if not approve_this_order:\r\n #should only kick off if multilevel approvals\r\n msg = _(\r\n \"Cannot fully approve this order. Multilevel approvals not complete. \"\r\n \"Current state of order is '{status}'.\"\r\n ).format(status=self.status)\r\n return [], msg\r\n\r\n try:\r\n # Raise an error to bubble up specific reason as part of the exception\r\n self.group.quota_set.can_use(raise_error=True, **self.net_usage())\r\n except QuotaSetError as quota_set_error:\r\n raise QuotaError(_(\r\n \"Cannot approve order #{order_id} because doing so would exceed the \"\r\n \"quota for group '{group}'. {error}\"\r\n ).format(order_id=self.id, group=self.group, error=quota_set_error))\r\n\r\n # Before we create job records, order the order items to make\r\n # sure decom jobs are queued before prov jobs. the job engine\r\n # may still parallelize them, that's something we can revisit\r\n # later. In the meantime, customers can set the concurrency\r\n # level to 1 to prevent this.\r\n # we're taking advantage of the fact that \"decom\" comes before\r\n # \"prov\" in the alphabet here.\r\n order_items = [oi.cast() for oi in self.top_level_items.order_by(\r\n \"real_type\", \"add_date\")]\r\n\r\n order_items, msg = self.__filter_illegal_order_items(order_items)\r\n if not order_items:\r\n msg = _(\"{message} There are no valid order items left. This order is \"\r\n \"being marked as complete.\").format(message=msg)\r\n self.complete(\"SUCCESS\")\r\n return [], msg\r\n\r\n self.status = \"ACTIVE\"\r\n self.approved_by = approver\r\n self.approve_date = get_current_time()\r\n self.save()\r\n\r\n history_msg = _(\"The '{order}' order has been approved.\").format(order=escape(self))\r\n self.add_event('APPROVED', history_msg, profile=self.owner)\r\n\r\n # run pre order execution hook\r\n try:\r\n cbhooks.run_hooks(\"pre_order_execution\", order=self)\r\n except cbhooks.exceptions.HookFailureException as e:\r\n self.status = \"FAILURE\"\r\n self.save()\r\n msg = _(\"Failed to run hook for order approval. Status: {status},\"\r\n \" Output: {output}, Errors: {errors}\").format(status=e.status, output=e.output, errors=e.errors)\r\n\r\n history_msg = _(\"The '{order}' order has failed.\").format(order=escape(self))\r\n self.add_event('FAILED', history_msg, profile=self.owner)\r\n raise CloudBoltException(msg)\r\n\r\n from jobs.models import Job\r\n # Saving job objects will cause them to be kicked off by the\r\n # job engine within a minute\r\n jobs = []\r\n\r\n for order_item in order_items:\r\n jobtype = getattr(order_item, 'job_type', None)\r\n if not jobtype:\r\n # the job type will default to the first word of the class type\r\n # ex. \"provision\", \"decom\"\r\n\r\n jobtype = str(order_item.real_type).split(\" \", 1)[0]\r\n quantity = 1\r\n # quantity is a special field on order_items. If an\r\n # order_item has the quantity field, kick off that many\r\n # jobs\r\n if hasattr(order_item, 'quantity') and \\\r\n order_item.quantity is not None and \\\r\n order_item.quantity != '':\r\n quantity = int(order_item.quantity)\r\n for i in range(quantity):\r\n job = Job(job_parameters=order_item,\r\n type=jobtype,\r\n owner=self.owner,\r\n parent_job=parent_job)\r\n job.save()\r\n\r\n # Associate the job with any server(s)\r\n # This may seem unnecessary because it's done when most jobs\r\n # run, but it's needed at the very least for scheduled server\r\n # modification jobs (for changing resources) so they show up on\r\n # the server as scheduled before they actually run\r\n servers = []\r\n if hasattr(order_item, \"server\"):\r\n servers = [order_item.server]\r\n elif hasattr(order_item, \"servers\"):\r\n servers = order_item.servers.all()\r\n for server in servers:\r\n server.jobs.add(job)\r\n\r\n jobs.append(job)\r\n\r\n # If it didn't make any jobs, just call it done\r\n if not jobs:\r\n self.complete(\"SUCCESS\")\r\n\r\n return jobs, msg"
] | [
"0.6181021",
"0.5821405",
"0.5728504",
"0.5678125",
"0.56594336",
"0.5642089",
"0.5628899",
"0.5625057",
"0.56249446",
"0.5601221",
"0.5569007",
"0.5563893",
"0.55225724",
"0.55221975",
"0.5515816",
"0.5506918",
"0.5492608",
"0.5466435",
"0.54306227",
"0.5397892",
"0.5373693",
"0.53709704",
"0.53665817",
"0.53434455",
"0.53367454",
"0.53341836",
"0.53270745",
"0.5325234",
"0.531787",
"0.53144675"
] | 0.76989347 | 0 |
Return a list of all ISBN's in the database. This is used to randomly select books for orders in the mock data | def demo_get_all_books(self):
results = []
self.cursor.execute("""SELECT ISBN FROM book""")
for book in self.cursor.fetchall():
results.append(book[0])
return results | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_books_in_order(self, orderNumber):\n self.cursor.execute(\"\"\"SELECT ISBN, quantity FROM orderlog O, productof P WHERE O.orderNumber = P.orderNumber\n AND O.orderNumber=%s\"\"\",(orderNumber,))\n result = []\n for i in self.cursor.fetchall():\n result.append([i[0],i[1]])\n return result",
"def get_all(self):\n cursor = self._dbcon.cursor()\n cursor.execute(u\"select rowid,* from books\")\n result = cursor.fetchall()\n cursor.close()\n return [self._book_from_query_result(x) for x in result]",
"def getBooks(self, showAll=False):\n if showAll:\n sql = '''select ID, NAME from books;'''\n else:\n sql = '''\nselect books.id, books.name, books.author\nfrom books where exists (\nselect * from clippings where books.id = clippings.book);'''\n\n cur = self.__execute__(sql)\n return BookIter(cur)",
"def ISBNs(self, default=[{}]):\n tmp = self.data.get('metadata', {}).get('isbns', default)\n return [HEP.ISBNObject(i) for i in tmp]",
"def get_all_borrowed_books():\n return BorrowBook.query.all()",
"def get_books(self):\n # Implemented from template for\n # osid.resource.BinLookupSession.get_bins_template\n # NOTE: This implementation currently ignores plenary view\n if self._catalog_session is not None:\n return self._catalog_session.get_catalogs()\n collection = JSONClientValidated('commenting',\n collection='Book',\n runtime=self._runtime)\n result = collection.find().sort('_id', DESCENDING)\n\n return objects.BookList(result, runtime=self._runtime, proxy=self._proxy)",
"def get_all_books() -> List[Dict]:\n pass",
"def get_books(self, genre_id):\n books = []\n book = Book(self.db)\n for row in self.db.cursor().execute('SELECT book_id FROM books WHERE genre_id=' + str(genre_id)):\n books.append(book.get_book(row[0]))\n\n return books",
"def auto_gen_isbn():\n isbn_number = []\n\n while isbn_number == []:\n\n for i in range(10):\n dig = random.randint(0, 9)\n isbn_number.append(dig)\n\n pos = 10\n addition = 0\n for num in isbn_number:\n mult = pos * num\n addition += mult\n pos -= 1\n\n final_result = addition % 11\n\n if final_result != 0:\n isbn_number = []\n\n else:\n break\n\n string = str()\n for num in isbn_number:\n car = str(num)\n string += car\n\n string = list(string)\n\n string = string[0] + string[1] + '-' + string[2] + string[3] \\\n + string[4] + string[5] + string[6] + '-' + string[7] \\\n + string[8] + '-' + string[9]\n\n return string",
"def get_single_books(isbn):\n return_value = Book.get_book(isbn)\n return jsonify(return_value)",
"def get_book_by_isbn(isbn):\n return Book.get_book(isbn)",
"def get_isbn_items(query=\"\"):\n url = \"https://archive.org/advancedsearch.php?q=\" + query\n r = requests.get(url)\n isbn_items = r.json()[\"response\"][\"docs\"]\n print(f\"Length of isbn_items: {len(isbn_items)}\")\n return isbn_items",
"def get_order_books(self):\n return self.execute_http_call(\"/api/order_books\", \"GET\", headers=None)",
"def getISBN(self):\n return self.bookISBN",
"def get_recommended_books(self, orderNumber, loginID):\n invalid_isbn_list = []\n books_in_order = []\n possible_isbn_list = []\n self.cursor.execute(\"\"\"SELECT orderNumber FROM orderlog WHERE loginID=%s\"\"\", (loginID,))\n for order in self.cursor.fetchall():\n self.cursor.execute(\"\"\"SELECT ISBN FROM productof WHERE orderNumber=%s\"\"\", (order[0],))\n for ISBN in self.cursor.fetchall():\n invalid_isbn_list.append(ISBN[0])\n self.cursor.execute(\"\"\"SELECT ISBN FROM productof WHERE orderNumber=%s\"\"\", (orderNumber,))\n for ISBN in self.cursor.fetchall():\n books_in_order.append(ISBN[0])\n self.cursor.execute(\"\"\"SELECT P.ISBN FROM productof P WHERE EXISTS \n (SELECT orderNumber FROM productof P2 WHERE ISBN = %s AND P2.orderNumber = P.orderNumber)\"\"\", (ISBN[0],))\n for valid_isbn in self.cursor.fetchall():\n possible_isbn_list.append(valid_isbn[0])\n valid_isbn_list = [i for i in possible_isbn_list if i not in invalid_isbn_list]\n return_list = []\n for book in valid_isbn_list:\n book, author = self.get_single_book_info(book)\n return_list.append([book, author])\n return return_list",
"def search_for_books(self, query):\n books = []\n book = Book(self.db)\n for row in self.db.cursor().execute('SELECT genre_id FROM genres WHERE ' + query):\n books.extend(self.get_books(row[0]))\n\n return books",
"def isbn_lookup(isbn):\n base = \"https://www.googleapis.com/books/v1/volumes?q=isbn=\"\n# Unfortunately we can't use the superior \"with spam as eggs\" syntax here...\n search = urlopen(base + isbn + \"&prettyprint=false\")\n lines = search.read()\n search.close()\n for bool_pair in [(\"false\", \"False\"), (\"true\", \"True\")]:\n lines = lines.replace(*bool_pair)\n volume_info = literal_eval(lines)[\"items\"][0][\"volumeInfo\"]\n title = volume_info[\"title\"]\n authors = ', '.join(a for a in volume_info[\"authors\"])\n return \"Title:\\t\\t%s\\nAuthor(s):\\t%s\" % (title, authors)",
"def openbooks_api(isbns):\n openbooks_volumes = []\n openbooks_endpoint = 'https://openlibrary.org/api/books'\n\n no_dash_isbns = [str(x.replace('-', '')) for x in isbns]\n\n for isbn in no_dash_isbns:\n payload = {'bibkeys': 'ISBN:' + isbn,\n 'format': 'json',\n 'jscmd': 'data'\n }\n r = requests.get(openbooks_endpoint, params=payload)\n d = {'no_dash_isbn': isbn,\n 'response': r.json()}\n openbooks_volumes.append(d)\n time.sleep(0.2)\n\n return openbooks_volumes",
"def get_book_list(session):\n debug('Getting the book list')\n\n purchased_packages = session.get('https://leanpub.com/api/v1/purchased_packages?include=book&archived=false&type=library').json()\n\n books_to_download = []\n\n for purchased_package in purchased_packages['data']:\n book_to_download = {\n 'id': purchased_package['attributes']['short_url']\n }\n\n book = None\n\n for included in purchased_packages['included']: # Get the book data\n if included['id'] == purchased_package['relationships']['book']['data']['id'] and included['type'] == 'Book':\n book = included['attributes']\n\n if not book:\n debug('Book not found for id #' + purchased_package['relationships']['book']['data']['id'], err=True)\n continue\n\n book_to_download['name'] = book['title']\n book_to_download['format'] = get_format_to_download(book, env('PREFERED_FORMAT'))\n\n books_to_download.append(book_to_download)\n\n debug('{} books to download'.format(len(books_to_download)))\n\n return books_to_download",
"def book_by_isbn(ISBN):\n data = {}\n for book in root.findall('Book'):\n for elem in book:\n isbn = book.find('ISBN').text\n if isbn == ISBN:\n data['id'] = book.attrib['id']\n data[elem.tag] = elem.text\n return data",
"def test_search_client_by_isbn(self, mock_get):\n\n response = isbn_utils.search_by(self.filter_isbn, self.ISBN)\n self.assertEqual(response.data, json.loads(open(UNIT_TEST_RESOURCES_FOLDER +\n FILE_NAME_ISBN_SEARCH_RESPONSE).read())[\"data\"])",
"def get_all_books():\n for n, book in enumerate(BOOKS, 1):\n state = 'YES' if book['read'] else 'NO'\n print(\n f\"{[n]} - {book['name'].capitalize()}, by {book['author'].capitalize()} - Read: {state}\"\n )",
"def get_single_book_info(self, isbn):\n self.cursor.execute(\"SELECT * FROM book WHERE ISBN=%s\", (isbn,))\n books = self.cursor.fetchall()\n for book in books:\n authors = []\n self.cursor.execute(\"\"\"SELECT name FROM Author A, Wrote W, Book B WHERE A.ID = W.authorID AND\n W.ISBN = B.ISBN AND B.ISBN = %s\"\"\", (isbn,))\n for auth in self.cursor.fetchall():\n authors.append(auth[0])\n return book, authors",
"def get_books_from_api(request, url='https://www.googleapis.com/books/v1/volumes?q=Hobbit'):\n response = requests.get(url)\n data = response.json()\n items = data.get('items')\n if items is None:\n items = []\n for item in items:\n book = item.get('volumeInfo')\n title = book.get('title', '--')\n authors = book.get('authors', ['unknown'])\n publishedDate = book.get('publishedDate')\n isbns = book.get('industryIdentifiers', [])\n pages = book.get('pageCount')\n cover_url = book.get('imageLinks')\n if cover_url:\n cover_url = cover_url.get('thumbnail')\n language = book.get('language')\n authors_list = []\n for author in authors:\n auth = get_author_object(author)\n authors_list.append(auth)\n isbn_10 = None\n isbn_13 = None\n for isbn in isbns:\n if isbn['type'] == 'ISBN_10':\n isbn_10 = isbn['identifier']\n elif isbn['type'] == 'ISBN_13':\n isbn_13 = isbn['identifier']\n lang = get_language_object(language)\n try:\n published = datetime.strptime(publishedDate, '%Y-%m-%d')\n except ValueError:\n year = int(publishedDate[:4])\n month = None\n day = None\n except TypeError:\n year = None\n month = None\n day = None\n else:\n year = published.year\n month = published.month\n day = published.day\n try:\n book = get_object_or_404(Book, title=title, publishedYear=year, publishedMonth=month, publishedDay=day,\n language=lang, pages=pages, cover=cover_url, isbn_10=isbn_10, isbn_13=isbn_13)\n for name in book.authors.all():\n if name not in authors_list:\n raise Http404\n except Http404:\n book = Book.objects.create(title=title, publishedYear=year, publishedMonth=month, publishedDay=day,\n language=lang, pages=pages, cover=cover_url, isbn_10=isbn_10, isbn_13=isbn_13)\n book.authors.set(authors_list)\n return redirect('all-books')",
"def test_get_all_books(self):\n\n\t\t# create book\n\t\tbook_1 = {\n\t\t\t'title': 'Hello Books',\n\t\t\t'isbn': '5698745124'\n\t\t}\n\t\tbook_2 = {\n\t\t\t'title': 'Hello Books 2',\n\t\t\t'isbn': '8765456766'\n\t\t}\n\t\tlogin_data = self.login_test_user()\n\t\ttoken = login_data['auth_token']\n\t\tpost_book_1 = self.client.post(\n\t\t\tf'{URL_BOOKS}',\n\t\t\theaders=dict(Authorization=f'Bearer {token}'),\n\t\t\tcontent_type='application/json',\n\t\t\tdata=json.dumps(book_1)\n\t\t)\n\n\t\tpost_book_2 = self.client.post(\n\t\t\tf'{URL_BOOKS}',\n\t\t\theaders=dict(Authorization=f'Bearer {token}'),\n\t\t\tcontent_type='application/json',\n\t\t\tdata=json.dumps(book_2)\n\t\t)\n\n\t\tres = self.client.get(\n\t\t\tf'{URL_BOOKS}',\n\t\t\theaders=dict(Authorization=f'Bearer {token}'))\n\n\t\tres_data = json.loads(res.data.decode())\n\t\tself.assertEqual(len(res_data.get('books')), 2)",
"def consult_books(self, bar_code: str):\n try:\n book_data = []\n self.db.cursor.execute('SELECT * from books WHERE bar_code = %s', (bar_code,))\n for i in self.db.cursor.fetchall():\n book_data.append(i)\n except Exception as error:\n print(error)\n else:\n print(f\"ID BOOK: {book_data[0][0]}\\n\"\n f\"TITLE: {book_data[0][1]}\\n\"\n f\"AUTHOR: {book_data[0][2]}\\n\"\n f\"PRICE: R$:{book_data[0][3]}\\n\"\n f\"BAR CODE: {book_data[0][4]}\\n\"\n f\"STOCK: {book_data[0][5]}\")",
"def get_books_by_ids(self, book_ids):\n # Implemented from template for\n # osid.resource.BinLookupSession.get_bins_by_ids_template\n # NOTE: This implementation currently ignores plenary view\n # Also, this should be implemented to use get_Book() instead of direct to database\n if self._catalog_session is not None:\n return self._catalog_session.get_catalogs_by_ids(catalog_ids=book_ids)\n catalog_id_list = []\n for i in book_ids:\n catalog_id_list.append(ObjectId(i.get_identifier()))\n collection = JSONClientValidated('commenting',\n collection='Book',\n runtime=self._runtime)\n result = collection.find({'_id': {'$in': catalog_id_list}}).sort('_id', DESCENDING)\n\n return objects.BookList(result, runtime=self._runtime, proxy=self._proxy)",
"def get_user_books(user_id):\n return session.query(Book).filter(Book.user_id == user_id).all()",
"def get_book_data(isbn: int):\n try:\n book = next(iter(core.Book.search(('isbn', 'eq', isbn))))\n except StopIteration:\n pass # actually, I could put the whole rest of the function here\n else:\n data = core.Book.view_str(book.id)\n del data['id'], data['status'], data['return_date'], data['borrowed_by']\n del data['borrowed_by_id'], data['__str__']\n return data\n\n try:\n r = requests.get('https://portal.dnb.de/opac.htm?query=isbn%3D'\n + str(isbn) + '&method=simpleSearch&cqlMode=true')\n r.raise_for_status()\n except requests.exceptions.RequestException:\n raise core.BuchSchlossError('no_connection', 'no_connection')\n\n person_re = re.compile(r'(\\w*, \\w*) \\((\\w*)\\)')\n results = {'concerned_people': []}\n\n page = bs4.BeautifulSoup(r.text)\n table = page.select_one('#fullRecordTable')\n if table is None:\n # see if we got multiple results\n link_to_first = page.select_one('#recordLink_0')\n if link_to_first is None:\n raise core.BuchSchlossError(\n 'Book_not_found', 'Book_with_ISBN_{}_not_in_DNB', isbn)\n r = requests.get('https://portal.dnb.de'+link_to_first['href'])\n page = bs4.BeautifulSoup(r.text)\n table = page.select_one('#fullRecordTable')\n\n for tr in table.select('tr'):\n td = [x.get_text('\\n').strip() for x in tr.select('td')]\n if len(td) == 2:\n if td[0] == 'Titel':\n results['title'] = td[1].split('/')[0].strip()\n elif td[0] == 'Person(en)':\n for p in td[1].split('\\n'):\n g = person_re.search(p)\n if g is None:\n continue\n g = g.groups()\n if g[1] == 'Verfasser':\n results['author'] = g[0]\n else:\n results['concerned_people'].append(g[1]+': '+g[0])\n elif td[0] == 'Verlag':\n results['publisher'] = td[1].split(':')[1].strip()\n elif td[0] == 'Zeitliche Einordnung':\n results['year'] = td[1].split(':')[1].strip()\n elif td[0] == 'Sprache(n)':\n results['language'] = td[1].split(',')[0].split()[0].strip()\n\n results['concerned_people'] = '; '.join(results['concerned_people'])\n return results",
"def get_all(cls):\n\t\treturn [el._to_dict() for el in Book.query.all()]"
] | [
"0.68952596",
"0.68626666",
"0.68582714",
"0.6747548",
"0.6734756",
"0.64708024",
"0.6463513",
"0.633142",
"0.63001794",
"0.62979406",
"0.6250897",
"0.62271947",
"0.6175948",
"0.61665535",
"0.6147512",
"0.61141545",
"0.6082323",
"0.6067532",
"0.60593945",
"0.6036859",
"0.6009479",
"0.60009223",
"0.59984314",
"0.5954201",
"0.5932972",
"0.5911042",
"0.58977646",
"0.5889688",
"0.58850753",
"0.5884859"
] | 0.81246585 | 0 |
Processes the tokenized articles and generates a DataFrame Returns pd.DataFrame | def process_wiki_tokenized() -> pd.DataFrame:
text_ids = []
text_string = []
articles = []
text_ids_intro = []
with open(WIKI_ARTICLES_TOKENIZED_PATH, "r") as json_file:
json_list = list(json_file)
for json_str in tqdm(json_list):
result = json.loads(json_str)
sections = result["tokenized_text"]
raw_text = result["raw_text"]
if not sections:
continue
# The original structure of a Wikipedia article is article <- sections <- paragraphs <- sentences <- words
# This removes the `sections` dimension
article_text_ids = list(itertools.chain.from_iterable(sections))
article_raw_text = list(itertools.chain.from_iterable(raw_text))
if not article_text_ids:
continue
if sections[0]:
article_text_ids_intro = sections[0]
else:
article_text_ids_intro = [article_text_ids[0]]
# Workaround for the cases where the introduction is null
text_ids.append(article_text_ids)
text_string.append(article_raw_text)
articles.append(clean_title(result["title"]))
text_ids_intro.append(article_text_ids_intro)
return pd.DataFrame(
list(zip(articles, text_ids, text_string, text_ids_intro)),
columns=["article", "text_ids", "raw_text", "text_ids_intro"],
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def tokenization(dataframe):\n nlp = English()\n tokenizer = nlp.Defaults.create_tokenizer(nlp)\n stemmer = SnowballStemmer(\"english\")\n\n dataframe.loc[:, \"tokens\"] = dataframe.loc[:, \"content\"].apply(\n lambda cell: [\n token.lemma_\n for token in tokenizer(cell)\n if token.is_stop is False\n and token.is_space is False\n and token.is_punct is False\n ]\n )\n dataframe.loc[:, \"tokens\"] = dataframe.loc[:, \"tokens\"].apply(\n lambda cell: [token for token in cell if len(token) >= 2]\n )\n dataframe[\"pos\"] = dataframe[\"content\"].apply(\n lambda cell: [\n tag for (word, tag) in nltk.pos_tag(cell.split())\n ] # spacy pos tagger does work properly, use content as important words like \"I\" or \"He\" are removed in\n # tokens (stopwords)\n )\n dataframe.loc[:, \"stems\"] = dataframe.loc[:, \"tokens\"].apply(\n lambda cell: [stemmer.stem(token) for token in cell]\n )\n return dataframe",
"def get_news():\n # empty dataframe\n df = pd.DataFrame() \n # read each url in list\n for url in inshorts_urls(): \n # add each dataframe of cards to df\n df = pd.concat([df, get_article(url)])\n # return all urls' cards\n return df",
"def dataframe_for_codeup_blog(urls):\n article_dict = []\n for url in urls:\n article_dict.append(get_article_dictionary(url))\n df = pd.DataFrame(article_dict)\n df.to_csv('./codeup_blog_posts.csv')\n return df",
"def get_data(articles): # Here, the articles will be very long strings\r\n vdictionary = {} # dictionary for tokens that are found in dictionary\r\n _odata = [0] * 12 # list collecting everything except date; last number of words=index:0\r\n word_length = 0 # initializing the value of word length; will be updated via loop\r\n tokens = re.findall('\\w+', articles) # Note that \\w+ splits hyphenated words\r\n for token in tokens: # Goes through generated tokens from articles\r\n if (not token.isdigit()) and (len(token) > 1) and (token in lm_dictionary.keys()): # conditions for checking if token is in dictionary\r\n _odata[1] += 1 # updating word count \r\n word_length += len(token) # updating word length\r\n if token not in vdictionary: # initial statement regarding steps for handling tokens not in the dictionary\r\n vdictionary[token] = 1 # count of tokens in text that show up in dictionary\r\n \r\n####### Keeping Track of Categorical Token Counts (Nonzero entry=True) also checks if word is stop word\r\n if lm_dictionary[token].positive and not lm_dictionary[token].stopword: _odata[2] += 1\r\n if lm_dictionary[token].negative and not lm_dictionary[token].stopword: _odata[3] += 1\r\n if lm_dictionary[token].uncertainty and not lm_dictionary[token].stopword: _odata[4] += 1\r\n if lm_dictionary[token].litigious and not lm_dictionary[token].stopword: _odata[5] += 1\r\n if lm_dictionary[token].weak_modal and not lm_dictionary[token].stopword: _odata[6] += 1\r\n if lm_dictionary[token].moderate_modal and not lm_dictionary[token].stopword: _odata[7] += 1\r\n if lm_dictionary[token].strong_modal and not lm_dictionary[token].stopword: _odata[8] += 1\r\n if lm_dictionary[token].constraining and not lm_dictionary[token].stopword: _odata[9] += 1\r\n #total_syllables += lm_dictionary[token].syllables # interesting parameter to measure\r\n\r\n #_odata[12] = len(re.findall('[0-9]', doc))\r\n # drop punctuation within numbers for number count\r\n articles = re.sub('(?!=[0-9])(\\.|,)(?=[0-9])', '', articles)\r\n articles = articles.translate(str.maketrans(string.punctuation, \" \" * len(string.punctuation)))\r\n #_odata[13] = len(re.findall(r'\\b[-+\\(]?[$€£]?[-+(]?\\d+\\)?\\b', doc))\r\n # _odata[14] = total_syllables / _odata[2]\r\n #print(_odata[1])\r\n _odata[10] = word_length / _odata[1] # computing average word length\r\n _odata[11] = len(vdictionary) # total vocab count\r\n \r\n # Convert counts to %\r\n for i in range(2, 9 + 1): # specifying range of percentages\r\n try:\r\n _odata[i] = (_odata[i] / _odata[1]) * 100 # updating count to percent\r\n except:\r\n print(\"zero denominator\")\r\n # Vocabulary\r\n \r\n return _odata # returning the data\r",
"def get_article1(url):\n # set agent\n agent = 'codeup ds germain' \n # query\n response = requests.get(url, headers={'User-Agent': agent}) \n # soup\n soup = BeautifulSoup(response.text) \n # get cat\n category = soup.find_all('li', {'class':'active-category selected'})[0].text \n # get raw cards\n cards = soup.select('.news-card') \n # create list of dicts for dataframe\n card_dict_list = [] \n # iterate each card\n for card in cards: \n # headline\n headline = card.find_all('span', {'itemprop':'headline'})[0].text \n # publish time\n publish_time = card.find_all('span', {'class':'time'})[0].text \n # content\n content = card.find_all('div', {'itemprop':'articleBody'})[0].text.strip() \n # create dict\n card_dict = {'headline':headline, 'publish_time':publish_time,\n 'category':category, 'content':content} \n # push dict to list\n card_dict_list.append(card_dict) \n # return dataframe\n return pd.DataFrame(card_dict_list)",
"def process_text(sentences):\n corpus = []\n for sentence in sentences:\n texts = re.sub('[^a-zA-Z]',' ', sentence).split()\n join_words = ' '.join(texts)\n corpus.append(join_words)\n return pd.DataFrame(corpus, columns=['Reviews'])",
"def create_tokens(self, dataframe):\n\n tokenize_dict = {}\n filtered_token_dict = {}\n iterator = dataframe.to_dict('dict')['line']\n stopWords = ['e1', '/e1', 'e2', '/e2', '<', '>', '<e1>', '</e1>', '<e2>', '</e2>']\n for key, val in iterator.items():\n tokenize_dict[key] = nltk.word_tokenize(val)\n\n for key, val in tokenize_dict.items():\n all_tokens = []\n filtered_tokens = []\n for i in range(len(val)):\n if val[i] == '<':\n val[i] = ''.join(val[i:i+3])\n \n all_tokens = [e for e in val if e not in ('e1', 'e2', '/e1', '/e2', '>')]\n filtered_tokens = [word for word in val if word not in stopWords]\n filtered_token_dict[key] = ', '.join(str(word) for word in filtered_tokens)\n tokenize_dict[key] = ', '.join(str(s) for s in all_tokens)\n\n tokenize_dataframe = self.create_dataframe(tokenize_dict, ['token'])\n filtered_tok_dataframe = self.create_dataframe(filtered_token_dict, ['filtered tokens'])\n \n dataframe['tokens'] = tokenize_dataframe['token']\n dataframe['filtered tokens'] = filtered_tok_dataframe['filtered tokens']\n\n return dataframe",
"def _load_all_articles_bow_preprocessed_content(self) -> pd.DataFrame:\n\n # Placeholder dataframe to store the article content\n all_preprocessed_content = pd.DataFrame(columns=['id', 'processed_content'])\n\n # Append preprocessed content from each publication\n for publication in ['daily_mail', 'the_guardian']:\n\n # If using an existing vocabulary, only pull the articles which have not yet been encoded\n if self._use_existing_vocab:\n\n sql_query = psy_sql.SQL(\"\"\"\n SELECT * FROM {source_schema_and_table}\n WHERE id NOT IN (SELECT id FROM encoded_articles.tfidf_representation);\n \"\"\").format(\n source_schema_and_table=psy_sql.Identifier(publication, 'article_content_bow_preprocessed')\n )\n\n publication_content = self._db_connection.get_dataframe(query=sql_query)\n\n # Otherwise re-load all articles to encode again\n else:\n publication_content = self._db_connection.get_dataframe(\n table_name='article_content_bow_preprocessed',\n schema=publication\n )\n\n all_preprocessed_content = pd.concat([all_preprocessed_content, publication_content])\n\n return all_preprocessed_content",
"def tokenization(news, word_to_id):\n tokenized_news = []\n lengths = []\n for date in news:\n daily_headlines = []\n daily_lengths = []\n for headline in news[date]:\n daily_lengths.append(len(headline))\n token = []\n for word in headline:\n if word in word_to_id:\n token.append(word_to_id[word])\n else:\n token.append(word_to_id['<unk>'])\n daily_headlines.append(token)\n\n lengths.append(daily_lengths)\n tokenized_news.append(daily_headlines)\n #tokens = np.array(tokens).astype('int32')\n return tokenized_news, lengths",
"def create_tokens(dataframe):\n\n tokenize_dict = {}\n iterator = dataframe.to_dict('dict')['line']\n\n for key, val in iterator.items():\n tokenize_dict[key] = nltk.word_tokenize(val)\n\n for key, val in tokenize_dict.items():\n l = []\n for i in range(len(val)):\n if val[i] == '<':\n val[i] = ''.join(val[i:i+3])\n \n l = [e for e in val if e not in ('e1', 'e2', '/e1', '/e2', '>')]\n tokenize_dict[key] = ', '.join(str(s) for s in l)\n\n tokenize_dataframe = create_dataframe(tokenize_dict, ['token'])\n\n dataframe['tokens'] = tokenize_dataframe['token']\n\n return dataframe",
"def encode_articles(self) -> None:\n\n # Retrieve all of the preprocessed version of articles\n preprocessed_content = self._load_all_articles_bow_preprocessed_content()\n\n # If a new vocabulary needs to be established, then analyse all texts\n if not self._use_existing_vocab:\n self._analyse_and_overwrite_existing_vocabulary(preprocessed_content['processed_content'].values)\n\n # Create a vectoriser using either the pre-existing vocabulary or a new one which has been extracted\n vocabulary = self._load_vocabulary()\n vectoriser = sklearn_text.TfidfVectorizer(vocabulary=vocabulary)\n\n encoded_articles_matrix = vectoriser.fit_transform(preprocessed_content['processed_content'].values)\n\n encoded_articles_dataframe = pd.DataFrame(\n # postgresql has a maximum number of columns which would be exceeded with two many words as columns,\n # so store them all as an array\n columns=['encoded'],\n index=preprocessed_content['id'].values\n )\n\n encoded_articles_dataframe['encoded'] = encoded_articles_matrix.toarray().tolist()\n\n encoded_articles_dataframe = encoded_articles_dataframe.reset_index().rename(columns={'index': 'id'})\n\n # Fully replace tf-idf table if vocabulary has been built again from scratch and the dimensions of the matrix\n # will have changed\n if not self._use_existing_vocab:\n self._db_connection.execute_database_operation('TRUNCATE TABLE encoded_articles.tfidf_representation;')\n\n self._db_connection.upload_dataframe(\n dataframe=encoded_articles_dataframe,\n table_name='tfidf_representation',\n schema='encoded_articles',\n if_exists='append',\n index=False\n )",
"def tokenize(self):\n\n self.feats = {\n 'features': [], # Lists of the `InputFeatures` objects.\n 'segments': [], # Segments of the phrase. 0: Promoun, 1: A-term, 2: B-term \n 'df_ids': [], # DataFrame index.\n 'target_token_ids': [] # Indexes of the target term in the tokens lists.\n }\n unique_id = 0 # Unique ID of the dataset.\n for _, row in tqdm(self.df.iterrows()):\n segment_tokens = self.tokenize_single_row(row)\n for j, segment in enumerate(segment_tokens):\n if segment['target_token_index'] > 0:\n features = self.tokens_to_features(unique_id, segment['tokens'])\n unique_id += 1\n self.feats['features'].append(features)\n self.feats['segments'].append(j)\n self.feats['target_token_ids'].append(segment['target_token_index'] )\n self.feats['df_ids'].append(row.ID)",
"def make_data_frame(words, years, feature_dict):\n\n temp = collections.defaultdict(list)\n feature_dict[\"word\"] = lambda word, year : word\n feature_dict[\"year\"] = lambda word, year : year\n for word in words:\n for year in years:\n for feature, feature_func in feature_dict.iteritems():\n temp[feature].append(feature_func(word, year))\n df = pd.DataFrame(temp)\n df = df.replace([np.inf, -np.inf], np.nan)\n df = df.dropna()\n return df",
"def tokenize_df(\n self, df: pd.DataFrame, text_column: AnyStr, language_column: AnyStr = \"\", language: AnyStr = \"language_column\"\n ) -> pd.DataFrame:\n self.tokenized_column = generate_unique(\"tokenized\", df.keys(), text_column)\n # Initialize the tokenized column to empty documents\n df[self.tokenized_column] = pd.Series([Doc(Vocab())] * len(df.index), dtype=\"object\")\n if language == \"language_column\":\n languages = df[language_column].dropna().unique()\n unsupported_languages = set(languages) - set(SUPPORTED_LANGUAGES_SPACY.keys())\n if unsupported_languages:\n raise TokenizationError(\n f\"Found {len(unsupported_languages)} unsupported languages in input dataset: {unsupported_languages}\"\n )\n for lang in languages: # iterate over languages\n language_indices = df[language_column] == lang\n text_slice = df.loc[language_indices, text_column] # slicing input df by language\n if len(text_slice) != 0:\n tokenized_list = self.tokenize_list(text_list=text_slice, language=lang)\n df.loc[language_indices, self.tokenized_column] = pd.Series(\n tokenized_list, dtype=\"object\", index=text_slice.index, # keep index (important)\n )\n else:\n tokenized_list = self.tokenize_list(text_list=df[text_column], language=language)\n df[self.tokenized_column] = tokenized_list\n return df",
"def create_NER(dataframe):\n\n dataframe['entities'] = dataframe['line']\n entity_dict = {}\n for i, val in enumerate(dataframe['entities']):\n e1 = re.findall('<e1>(.*?)</e1>', val)\n e2 = re.findall('<e2>(.*?)</e2>', val)\n entity_dict[i+1] = (str(e1[0]), str(e2[0]))\n\n entity_dataframe = create_dataframe(entity_dict, ['e1', 'e2'])\n dataframe = dataframe.drop(columns=['entities'])\n dataframe['e1'] = entity_dataframe['e1']\n dataframe['e2'] = entity_dataframe['e2']\n\n return dataframe",
"def text_feature_extract(df):\n return df",
"def save_to_dataframe(self):\n titles, years, months, days, authors = list(), list(), list(), list(), list()\n for doc in self.results[\"documents\"]:\n titles.append(doc['title'])\n years.append(doc['year'])\n months.append(doc['month'])\n days.append(doc['day'])\n authors.append(doc['authors'])\n return pd.DataFrame({\"title\": titles, \"years\": years, \"months\": months, \"days\": days, \"author\": authors})",
"def _extract(texts: list[str], tokens: list[list[str]], sentences: list[list[str]], /,\n avg_words=True, avg_sentences=True, pos_distribution=True,\n foreign_words_ratio=True, lexicon=True, punctuation_distribution=True,\n n_jobs=1) -> pd.DataFrame:\n\n def process(function, objects: list, feature_name: str):\n result_ = np.vstack(Parallel(n_jobs)(delayed(function)(objects_) for objects_ in objects))\n\n # Build a list of the column names to create a features DataFrame\n n_columns = result_.shape[1]\n columns_name = [feature_name + f'_{i}' for i in range(1, n_columns + 1)]\n\n return pd.DataFrame(result_, columns=columns_name)\n\n results = []\n # Average length of words\n if avg_words:\n results.append(process(funcs.avg_length, tokens, AVG_WORDS))\n # Average length of sentences\n if avg_sentences:\n results.append(process(funcs.avg_length, sentences, AVG_SENTENCES))\n # POS distribution\n if pos_distribution:\n results.append(process(funcs.pos_distribution, tokens, POS_DISTRIBUTION))\n # Lexicon size\n if lexicon:\n results.append(process(funcs.lexicon, tokens, LEXICON_SIZE))\n # Foreign words ratio\n if foreign_words_ratio:\n results.append(process(funcs.foreign_words_ratio, tokens, FOREIGN_RATIO))\n # Punctuations distribution\n if punctuation_distribution:\n results.append(process(funcs.punctuations_distribution, texts, PUNCTUATIONS_DISTRIBUTION))\n\n if not results:\n raise ValueError(\"At least one feature must be chosen\")\n\n return pd.concat(results, axis=1)",
"def parse_gulde_news():\n # set url\n url = 'https://web-scraping-demo.zgulde.net/news' \n agent = 'codeup ds germain'\n # query\n response = requests.get(url, headers={'User-Agent': agent}) \n # soup\n soup = BeautifulSoup(response.text) \n # raw list of articles\n articles = soup.select('.grid.gap-y-12 > div') \n # list of dicts for dataframe\n article_list = [] \n # parse each article\n for article in articles: \n # grab title\n title = article.h2.text \n # grab date, author, contents of article\n date, author, contents = article.select('.py-3')[0]\\\n .find_all('p') \n # add dict of info to list\n article_list.append({'title':title, 'date':date.text,\n 'author':author.text, 'contents':contents.text}) \n # return dataframe\n return pd.DataFrame(article_list)",
"def get_top_keywords_from_articles(self, kwords_list):\n _all_keywords = []\n for a in kwords_list:\n if a != []:\n for w in a:\n _all_keywords.append([w['keyword'],w['weight'],w['label']])\n _df_g = pd.DataFrame(_all_keywords, columns=[\"Keyword\", \"Count\",\"Label\"])\n _df_g.sort_values(by=\"Count\", inplace=True, ascending=False)\n _df_g.reset_index(drop=True, inplace=True)\n _df_g.to_csv('test.csv')\n print(len(_df_g))\n\n _df_g['Keyword'] = _df_g['Keyword'].apply(self.remove_repeat_words)\n _df_g.dropna(axis=0, inplace=True)\n p1,p2 = self.pos_taggers(_df_g)\n _df_g['c_POS'] = p1\n _df_g['s_POS'] = p2\n _df_g['c_POS_score'] = _df_g['c_POS'].apply(self.combine_pos_score)\n _df_g['s_POS_score'] = _df_g['s_POS'].apply(self.specific_pos_score)\n _df_g['Count'] = _df_g['Count'] + _df_g['c_POS_score'] + _df_g['s_POS_score'] \n print(len(_df_g))\n _df_g.sort_values(by='Count',inplace=True, ascending=False)\n print(len(_df_g))\n _df_g = _df_g.reset_index(drop=True)\n _df_g = _df_g[:10]\n response_dict = dict()\n response_dict['nc'] = \", \".join(_df_g['Keyword'].to_list())\n return response_dict",
"def cleaninto_df(frame:pd) -> pd:\n # remove repeated characters EXAMPLE: DIMPLLLLEEEEE -> DIMPLE\n # nopunc = word_tokenize(nopunc) this might not work. find something else\n\n stop = stopwords.words('english')\n newStopWords = ['get', 'http','there','and','i','t','it','d']\n stop.extend(newStopWords)\n lemmatizer = WordNetLemmatizer()\n clean = []\n new_col = []\n frame['Cleaned'] = None\n for tweet in frame.content:\n if 'RT' in tweet:\n if tweet.index('RT')>5:\n tweet = tweet[:tweet.index('RT')]\n else:\n tweet = tweet[2:]\n # WHAT ARE WE TRYING TO CLEAN HERE?\n # cleaning with preprocessor library https://pypi.org/project/tweet-preprocessor/\n tweet = ' '.join(re.sub(\"(@\\w+)|([^A-Za-z]+)|(\\w+:\\/\\/\\S+)\", \" \", tweet).split())\n # changes #November1 -> November: need to remove full hashtag?\n # changes @poetweatherford: -> poetweatherford\n # changes don’t -> don t, children's -> children s\n print(\"after regex:\" + str(tweet))\n clean.append(tweet.lower())\n for clean_tweet in clean:\n word_tokens = word_tokenize(clean_tweet)\n clean_tokens = [word for word in word_tokens if word not in stop]\n stems = []\n for item in clean_tokens:\n stems.append(lemmatizer.lemmatize(item))\n new_sentence = ' '.join(stems)\n new_col.append(new_sentence.lower())\n frame['Cleaned'] = new_col\n return frame",
"def textTagger():\r\n df_results=pd.DataFrame()\r\n \r\n #for word in medications:\r\n ## print (m_word)\r\n # med_list.append(m_word)\r\n # se=[]\r\n # sentence=[]\r\n counter=0\r\n results=[]\r\n for s_word in surprise_words:\r\n s_word=sent_tokenize(str(s_word))\r\n s_word=\"\".join(s_word)\r\n surprise_list.append(s_word)\r\n \r\n for row in df['full_text']:\r\n sentence=[]\r\n sentence.append(sent_tokenize(str(row)))\r\n #print (row)\r\n time.sleep(1)\r\n results=[]\r\n seen=[]\r\n word_list=[]\r\n found=[]\r\n for word in word_tokenize(str(row)):\r\n word_list.append(word)\r\n #for med in word_tokenize(str(medications)):\r\n # med_list.append(med)\r\n for n in word_list:\r\n \r\n for a in word_tokenize(str(medications)):\r\n #a=re.sub('[\\W_]+', '', a)\r\n #print (a) \r\n if a == n and a not in seen: \r\n found.append(a)\r\n tokens=word_tokenize(str(row))\r\n text=nltk.text.ConcordanceIndex(tokens)\r\n #print (a,n,text)\r\n results.append(concordance(text,str(a)))\r\n seen.append(a)\r\n print (results)\r\n continue\r\n else:\r\n continue\r\n counter+=1\r\n print (counter)\r\n \r\n \r\n df_results['Tag-Word']=found\r\n df_results['Sentence']=pd.Series(results)\r\n df_results.dropna()\r\n df_results.index.name='Index'\r\n #print(df_results)\r\n df_results.to_csv('med_tagged_step2.csv', sep='|')\r\n return df_results",
"def tokenize(self, df: DataFrame, text_col=\"text\") -> DataFrame:\n\n texts = df[text_col]\n if self.to_lower:\n texts = texts.apply(lambda text: text.lower())\n\n if self.use_remove_punctuation:\n texts = texts.apply(lambda text: self.remove_punctuation(text))\n\n # Split sentences into list of words\n df[\"tokenized\"] = texts.apply(\n lambda text: nltk.tokenize.word_tokenize(text, language=\"german\")\n )\n\n if self.use_lemmatize:\n df[\"lemmata\"] = df[\"tokenized\"].apply(lambda tokens: self.lemmatize(tokens))\n\n if self.use_stem:\n df[\"stems\"] = df[\"tokenized\"].apply(lambda tokens: self.stem(tokens))\n\n return df",
"def create_NER(self, dataframe):\n\n dataframe['entities'] = dataframe['line']\n entity_dict = {}\n entity_type = {}\n\n for i, val in enumerate(dataframe['entities']):\n e1 = re.findall('<e1>(.*?)</e1>', val)\n e2 = re.findall('<e2>(.*?)</e2>', val)\n entity_dict[i+1] = (str(e1[0]), str(e2[0]))\n doc = nlp(e1[0])\n for ent in doc.ents:\n if ent.label_:\n entity_type[i] = ent.label_\n else:\n entity_type[i] = ('NOT RECOGNIZED')\n \n doc = nlp(e2[0])\n for ent in doc.ents:\n if ent.label_:\n entity_type[i] = entity_type[i] + ent.label_\n else:\n entity_type[i] = entity_type[i] + ('NOT RECOGNIZED')\n\n entity_dataframe = self.create_dataframe(entity_dict, ['e1', 'e2'])\n entity_type_df = self.create_dataframe(entity_type, ['e1', 'e2'])\n\n dataframe = dataframe.drop(columns=['entities'])\n dataframe['e1'] = entity_dataframe['e1']\n dataframe['e2'] = entity_dataframe['e2']\n dataframe['e1_type'] = entity_type_df['e1']\n dataframe['e2_type'] = entity_type_df['e2']\n\n return dataframe",
"def generate_features(\n df: pd.DataFrame, spacy_model: str, language: str\n) -> pd.DataFrame:\n logging.info(\"Loading Spacy model...\")\n nlp = spacy.load(spacy_model)\n\n # Makes all tokens lowercase\n logging.info(\"Lowercase\")\n df[\"token_lower\"] = df[\"token\"].str.lower()\n\n logging.info(\"Lemma, pos\")\n spacy_pipe = nlp.pipe(df[\"token_lower\"].values, disable=[\"ner\", \"parser\"])\n features_gen = ((doc[0].lemma_, doc[0].pos_) for doc in spacy_pipe)\n df[\"lemma\"], df[\"pos\"] = zip(*features_gen)\n\n # Prepare stemmers\n logging.info(\"Loading Snowball Stemmer...\")\n snow = SnowballStemmer(language=language)\n\n logging.info(\"Snowball stemmer\")\n df[\"snowballStemmer\"] = df.apply(lambda row: snow.stem(row[\"token_lower\"]), axis=1)\n\n logging.info(\"Loading Porter Stemmer...\")\n port = PorterStemmer()\n\n logging.info(\"Porter stemmer\")\n df[\"porterStemmer\"] = df.apply(lambda row: port.stem(row[\"token_lower\"]), axis=1)\n\n # Adds columns with a binary if the word contains a possible negation prefix or suffix\n logging.info(\"Prefix\")\n df[\"possible_prefix\"] = df.apply(\n lambda row: possible_negation_prefix(row[\"token_lower\"]), axis=1\n )\n\n logging.info(\"Suffix\")\n df[\"possible_suffix\"] = df.apply(\n lambda row: possible_negation_suffix(row[\"token_lower\"]), axis=1\n )\n\n # Adds new columns for the previous and next lemma and pos-tag\n logging.info(\"Add prev/next shifts\")\n df[\"prev_Lemma\"] = df[\"lemma\"].shift(periods=1)\n df[\"next_Lemma\"] = df[\"lemma\"].shift(periods=-1)\n df[\"prev_pos\"] = df[\"pos\"].shift(periods=1)\n df[\"next_pos\"] = df[\"pos\"].shift(periods=-1)\n return df",
"def LM_sentiment(news_df):#be sure to set tick as an argument after testing\r\n OUTPUT_FILE = f'Sentiment_Data/test_file.csv' # User defined output file to write data to\r\n L=[]\r\n #D.append(OUTPUT_FIELDS)\r\n \r\n for i in range(len(news_df)): # Uses date in DataFrame as indexing loop\r\n #print(\"Sources for this day are: \"+news_df.loc[DATE]['Media']) # getting the news sources (Find better way of Collecting financial news)\r\n articles=news_df.iloc[i]['Article'] # get articles from specified date\r\n articles= re.sub('(May|MAY)', ' ', articles) # drop all May month references; avoid conflicting with \"may\" a modal word\r\n articles=articles.upper() # make everything uppercase\r\n output_data=get_data(articles) # returning sentiment scores from function as a list \r\n output_data[0]=news_df.iloc[i].name # storing the date of articles as first entry of list \r\n L.append(output_data) # appending article info to list\r\n L=pd.DataFrame(L,columns=OUTPUT_FIELDS) # constructing DataFrame from article data\r\n L.set_index('date',inplace=True) # setting the index in place\r\n return L # returning the DataFrame\r",
"def tokenize_and_explode(args, df):\n df['token'] = df.word.apply(args.tokenizer.tokenize)\n df = df.explode('token', ignore_index=True)\n df['token2word'] = df['token'].apply(\n args.tokenizer.convert_tokens_to_string).str.strip().str.lower()\n df = convert_token_to_idx(df, args.tokenizer)\n df = check_token_is_root(args, df)\n df = add_glove_embeddings(df, dim=50)\n\n return df",
"def preprocess(tmp_df, preprocess=False):\n\n # all in one go in order to just have to tokenize once\n if preprocess:\n tmp_df[\"description\"] = tmp_df[\"description\"].apply(\n clean_stop_punct_digit_n_lower)\n # words = tmp_df['description'] \\\n # .str.split(expand=True).stack().value_counts()\n # ratio = tmp_df['description'].apply(remove_duplicate)\\\n # .str.split(expand=True).stack().value_counts() \\\n # / tmp_df.shape[0]\n # words.to_csv('freq_words.csv')\n # ratio.to_csv(\"ratio.csv\")\n\n return tmp_df",
"def _calculate_similarities(self) -> pd.DataFrame:\n\n df_encoded_articles = self._db_connection.get_dataframe(\n table_name='tfidf_representation',\n schema='encoded_articles'\n ).set_index('id')\n\n # Pandas loads the array column 'encoded' as a string e.g. \"[0.0, 0.6, 0.8]\" which needs translating to an array\n encoded_representations = np.array(df_encoded_articles['encoded'].tolist())\n\n return pd.DataFrame(\n index=df_encoded_articles.index,\n columns=df_encoded_articles.index,\n data=pairwise.cosine_similarity(encoded_representations)\n )",
"def make_new_request():\n urls = [\n \"https://inshorts.com/en/read/business\",\n \"https://inshorts.com/en/read/sports\",\n \"https://inshorts.com/en/read/technology\",\n \"https://inshorts.com/en/read/entertainment\"\n ]\n\n output = []\n \n for url in urls:\n # We use .extend in order to make a flat output list.\n output.extend(get_articles_from_topic(url))\n \n df = pd.DataFrame(output)\n df.to_csv('inshorts_news_articles.csv')\n return df"
] | [
"0.65940297",
"0.65564024",
"0.65153795",
"0.64343864",
"0.6338675",
"0.6319045",
"0.62618953",
"0.6175134",
"0.61724496",
"0.6104356",
"0.60394055",
"0.60166335",
"0.5988761",
"0.59751725",
"0.5969283",
"0.59643435",
"0.5955965",
"0.5925812",
"0.592225",
"0.5913911",
"0.5912447",
"0.59062",
"0.58452207",
"0.58419806",
"0.5827578",
"0.58239084",
"0.5823167",
"0.58142644",
"0.58000445",
"0.5797373"
] | 0.76601696 | 0 |
Get list of installed kerneldevel packages. | def kdevel():
return subprocess.check_output([
"rpm", "-q", "-a", "kernel-devel"]).splitlines() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_installed_jdk_packages():\n # Convert to a set and back to a list again to uniqueify.\n return sorted(list(set(rpm_query_whatprovides('java-devel', 'java7-devel', 'jdk'))))",
"def get_installed_jre_packages():\n # Convert to a set and back to a list again to uniqueify.\n return sorted(list(set(rpm_query_whatprovides('java', 'java7', 'jdk'))))",
"def getInstalledPackages():\n reqs = subprocess.check_output([sys.executable,\n '-m', 'pip', 'freeze'])\n installed_packages = [r.decode().split('==')[0]\n for r in reqs.split()]\n return installed_packages",
"def get_installed_packages():\n global INSTALLED_PACKAGES\n chk = Popen(\"{} -m pip freeze\".format(sys.executable),\n shell=True, stdout=PIPE)\n installed = chk.communicate()[0].decode().splitlines()\n for pkg in installed:\n item = pkg.split(\"==\")\n INSTALLED_PACKAGES[item[0]] = item[1]",
"def get_not_installed_rpm_packages():\n def is_installed(elem):\n return elem in PMDK_TOOLS and elem in listdir('/usr/bin/') or\\\n elem == \"pmdk\" or elem + '.so' in listdir('/usr/lib64/')\n\n elements = get_libraries_names()\n not_installed_packages = []\n for elem in elements:\n if not is_installed(elem):\n not_installed_packages.append(elem)\n return not_installed_packages",
"def list_packages():\n\n shelf_dir = settings.shelf_dir\n\n package_list = os.listdir(shelf_dir)\n\n package_list.sort()\n\n return package_list",
"def getInstalledPackages(self) -> PackageContainer:\n\t\tself.getPackageManager()\n\t\tif self.package_manager == \"apt\":\n\t\t\tpackages = subprocess.check_output([\"apt\", \"list\", \"--installed\"], encoding='UTF-8', universal_newlines=True)\n\t\t\tpackages = packages.split(\"\\n\")[1:-1]\n\t\telse:\n\t\t\tlogger.error(\"Package manager not supported for extracting packages.\")\n\t\t\traise ValueError(\"Package manager unsupported\")\n\n\t\t# Parse packages to self.installed_packages\n\t\tself.parsePackages(packages)\n\n\t\tlogger.info(\"Installed packages collected\")\n\t\treturn self.installed_packages",
"def installed_packages():\n with open(os.path.join(_DIRECTORY, 'package.json'), 'r') as f:\n packagejson = json.load(f)\n return packagejson['dependencies'].keys()",
"def get_installed_packages(cache=False,\n output_dir='.',\n output_filename='installed.pkgs.txt'):\n output = os.path.join(output_dir, output_filename)\n cmd = '''aptitude search '~i !~M' -F '%%p' | sort -u > %r''' % (\n output)\n ensure_file(cmd, output, shell=True, overwrite=not(cache))\n installed = list(read_lines(output))\n return installed",
"def packages(self):\n return []",
"def list_packages(self):\n\n # First extract loaded module names from sys.modules\n sys_modules = sys.modules.keys()\n\n packages = {}\n\n # First add moduels in sys.modules (built-ins,\n # preloads and already loaded ones)\n for name in sys_modules:\n d = self.find_package(name)\n if not d: continue\n try:\n pkginfo = packages[d['type']]\n pkginfo[d['name']] = d['path']\n except Exception, e:\n packages[d['type']] = { d['name'] : d['path'] }\n\n #import site\n # Loop through all directories in sys.path and check for modules\n # Dont iterate through <prefix>/lib directory\n libdir = os.path.join(sys.prefix, 'lib')\n\n walked = []\n for top_level in self.paths:\n if not os.path.isdir(top_level):\n continue\n\n # Dont iterate through libdir\n if os.path.abspath(top_level) == os.path.abspath(libdir):\n continue\n\n walked.append(top_level)\n for item in os.listdir(top_level):\n\n fullpath = os.path.join(top_level, item)\n if fullpath in walked: continue\n\n walked.append(fullpath)\n # Remove the extension\n idx = item.find('.')\n if idx != -1: item = item[:idx]\n d = self.find_package(item)\n if not d: continue\n try:\n pkginfo = packages[d['type']]\n pkginfo[d['name']] = d['path']\n except Exception, e:\n packages[d['type']] = { d['name'] : d['path'] } \n\n for key,item in packages.items():\n print\n print self.pkgTypeInfo(key)\n print\n\n # Print sorted\n listofitems = item.keys()\n listofitems.sort()\n\n for key2 in listofitems:\n print key2,':',item[key2]",
"def deb_installed_kernel(installed, kernel_version, arch):\n packages = (\"linux-image-\", \"linux-headers-\")\n to_keep = tuple(\n deb_kernel_package(name.rstrip(\"-\"), kernel_version, arch) for name in packages\n )\n\n to_remove = []\n for line in installed[\"stdout\"].splitlines():\n if \" linux-\" not in line:\n continue\n package = line.split()[1].strip()\n if any(package.startswith(name) for name in packages) and not any(\n package.startswith(name) for name in to_keep\n ):\n to_remove.append(package)\n return to_remove",
"def get_package_list():\n pip_freeze = subprocess.check_output(('pip', 'freeze')).decode('utf8')\n package_list = [x.strip().split('==') for x in pip_freeze.split('\\n') if x.find('==') != -1]\n package_list = [(x[0].lower(), x[1]) for x in package_list]\n return package_list",
"def get_incompatible_packages():\n pkgconfig_directory = '/usr/lib64/pkgconfig/'\n incompatibe_packages = []\n libraries = get_libraries_names() - set(NO_PKG_CONFIGS)\n for library in libraries:\n with open(pkgconfig_directory + library + '.pc') as f:\n out = f.readlines()\n for line in out:\n if 'version=' in line:\n version = line.split('=')[1].strip(linesep)\n if not version in PMDK_VERSION.replace('~', '-'):\n incompatibe_packages.append(library)\n return incompatibe_packages",
"def list_packages(self):\n for tag, pkg in PACKAGES.iteritems():\n print \"{tag} - {label}\".format(tag=tag, label=pkg['label'])",
"def get_packages():\n\n packages = find_packages()\n packages = ['{}.{}'.format('uniq', package) for package in packages]\n packages.append('uniq')\n return packages",
"def dcs_modules():\n\n dcs_dirname = os.path.dirname(__file__)\n module_prefix = __package__ + '.'\n\n if getattr(sys, 'frozen', False):\n importer = pkgutil.get_importer(dcs_dirname)\n return [module for module in list(importer.toc) if module.startswith(module_prefix) and module.count('.') == 2]\n else:\n return [module_prefix + name for _, name, is_pkg in pkgutil.iter_modules([dcs_dirname]) if not is_pkg]",
"def get_installed_packages() -> List['Package']:\n repo_packages_names = set(expac(\"-S\", ['n'], []))\n\n # packages the user wants to install from aur\n aur_names = packages_from_other_sources()[0]\n repo_packages_names -= aur_names\n\n installed_packages_names = set(expac(\"-Q\", ['n'], []))\n installed_repo_packages_names = installed_packages_names & repo_packages_names\n unclassified_installed_names = installed_packages_names - installed_repo_packages_names\n\n return_list = []\n\n # installed repo packages\n if installed_repo_packages_names:\n return_list.extend(\n Package.get_packages_from_expac(\"-Q\", list(installed_repo_packages_names), PossibleTypes.REPO_PACKAGE)\n )\n\n # installed aur packages\n installed_aur_packages_names = set(\n [package.name for package in Package.get_packages_from_aur(list(unclassified_installed_names))]\n )\n\n # package names the user gave us must be in the aur\n for name in aur_names:\n if name not in installed_aur_packages_names:\n aurman_error(\"Package {} not found in AUR!\".format(Colors.BOLD(Colors.LIGHT_MAGENTA(name))))\n raise InvalidInput(\"Package {} not found in AUR!\".format(Colors.BOLD(Colors.LIGHT_MAGENTA(name))))\n\n if installed_aur_packages_names:\n return_list.extend(\n Package.get_packages_from_expac(\"-Q\", list(installed_aur_packages_names), PossibleTypes.AUR_PACKAGE)\n )\n\n unclassified_installed_names -= installed_aur_packages_names\n\n # installed not repo not aur packages\n if unclassified_installed_names:\n return_list.extend(\n Package.get_packages_from_expac(\n \"-Q\", list(unclassified_installed_names),\n PossibleTypes.PACKAGE_NOT_REPO_NOT_AUR\n )\n )\n\n return return_list",
"def packages(self):\r\n return self._packages",
"def get_available_packages():\n all_providers_yaml = load_package_data()\n provider_package_names = [\n provider[\"package-name\"] for provider in all_providers_yaml if not provider.get(\"suspended\")\n ]\n return [\n \"apache-airflow\",\n \"docker-stack\",\n *provider_package_names,\n \"apache-airflow-providers\",\n \"helm-chart\",\n ]",
"def get_packages():\n packages = []\n for repo in repositories:\n packages.extend(repo.get_packages())\n return packages",
"def get_system_modules():\n # print(\"## \" + \"System modules \" + \"#\"*60)\n import sys\n\n system_modules = sorted(sys.modules.keys())\n # for m in system_modules:\n # print(m)\n\n # print(\"## \" + \"pkg_resources \" + \"#\"*60)\n pkg_resources_pkgs = []\n for dist in __import__(\"pkg_resources\").working_set:\n if dist.project_name not in system_modules:\n pkg_resources_pkgs.append(dist.project_name)\n\n pkg_resources_pkgs = sorted(pkg_resources_pkgs)\n\n # for p in pkg_resources_pkgs:\n # print(p)\n\n # print(\"## \" + \"pkgutil \" + \"#\"*60)\n import pkgutil\n\n pkg_utils = []\n for m in pkgutil.iter_modules():\n if m[1] not in (system_modules + pkg_resources_pkgs):\n pkg_utils.append(m[1])\n pkg_utils = sorted(pkg_utils)\n # for m in pkg_utils:\n # print(m)\n return sorted(system_modules + pkg_resources_pkgs + pkg_utils)",
"def py2_pkgs():\n fedoras = {}\n for version in range(FIRST, RAWHIDEVER+1):\n fedoras[version] = set()\n for dependency in ('python(abi) = 2.7',\n 'libpython2.7.so.1.0()(64bit)',\n 'libpython2.7_d.so.1.0()(64bit)'):\n pkgs = repoquery(version=version,\n whatrequires=dependency)\n news = {f'{p.name} {p.evr}' for p in pkgs}\n if news:\n print(f'{len(news)} pkgs require {dependency} in Fedora {version}',\n file=sys.stderr)\n fedoras[version] |= set(news)\n names = {nevr.split(' ')[0] for nevr in fedoras[version]}\n for older_version in fedoras:\n if older_version == version:\n continue\n for nevr in set(fedoras[older_version]):\n if nevr.split(' ')[0] in names:\n fedoras[older_version].remove(nevr)\n return fedoras",
"def list_compute_packages(self):\n return set(self.compute_packages.keys())",
"def list_packages(self):\n if not self.is_adb_available():\n return None\n\n packages = self._do_adb_command('shell pm list packages -f')\n if packages:\n packages = packages.split('\\n')\n else:\n packages = []\n ret = []\n for package in packages:\n parts = package.split(':')\n if len(parts) < 2:\n continue\n needed = parts[1].split('.apk=')\n _p = AndroidPackage()\n _p.path = needed[0] + '.apk'\n _p.package = needed[1]\n _p.package = _p.package.join(_p.package.split())\n ret.append(_p)\n return ret",
"def list_package(all: bool = False) -> List[List[str]]:\n if not all:\n pkgs_info = read_installation_records()\n else:\n pkgs_info = []\n for pkg in pkg_resources.working_set:\n pkgs_info.append([pkg.project_name, pkg.version])\n\n return pkgs_info",
"def get_dep_map(kerneldir):\n\n\tf = open(os.path.join(kerneldir, 'modules.dep'))\n\tdeps = {}\n\tfor l in f:\n\t\t#print repr(l)\n\t\tmod, dep_list_str = l.strip().split(':', 1)\n\t\tassert mod not in deps\n\n\t\tkmod = KModuleName(mod)\n\t\tdep_list = [KModuleName(x) for x in dep_list_str.strip().split()]\n\t\tdep_list.insert(0, kmod)\t# prepend ourself as a dependency\n\n\t\tdeps[kmod] = dep_list\n\n\tf.close()\n\treturn deps",
"def get_required_packages(self) -> list:\n\t\tret = []\n\t\tlocal_packages = ChocoInfo.get_local_packages(\n\t\t\tPUSHED_PACKAGES_PATH)\n\n\t\tprint(\"local_packages\", local_packages)\n\n\t\treturn [c_package for c_package in self._community_packages if c_package not in local_packages]",
"def required_packages(cls) -> List[Text]:\n return []",
"def get_used_release_specs(package, installed_version=None):"
] | [
"0.73078793",
"0.6860375",
"0.6792763",
"0.66517985",
"0.6592715",
"0.6348581",
"0.63128823",
"0.6310863",
"0.6231832",
"0.6145637",
"0.61211216",
"0.61205536",
"0.61111844",
"0.60494787",
"0.6042742",
"0.598851",
"0.5966998",
"0.5907414",
"0.58974636",
"0.58636194",
"0.5860474",
"0.58306193",
"0.5810718",
"0.57934356",
"0.5782976",
"0.5774966",
"0.57745",
"0.5758255",
"0.5745343",
"0.57326543"
] | 0.7870529 | 0 |
Gets statistics about every english leauge game players competed in for the specified year (year must be between 2005 & 2010). If use_local is True, then if they exist, the shelved player games for the year will be returned without resorting to reparsing the fixtures for each team. | def get_player_games(self, year, use_local=True): | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_player_stats_from_game(team, year, week):",
"def getPlayerBaseStat(self, year = 2014):\r\n \r\n year_next = (year % 100) + 1\r\n season = str(year) + '-' + str(year_next)\r\n \r\n stat_url = 'http://stats.nba.com/stats/leaguedashplayerstats?College=&'\\\r\n 'Conference=&Country=&DateFrom=&DateTo=&Division=&'\\\r\n 'DraftPick=&DraftYear=&GameScope=&GameSegment=&Height=&'\\\r\n 'LastNGames=0&LeagueID=00&Location=&MeasureType=Base&Month=0&'\\\r\n 'OpponentTeamID=0&Outcome=&PORound=0&PaceAdjust=N&'\\\r\n 'PerMode=PerGame&Period=0&PlayerExperience=&PlayerPosition=&'\\\r\n 'PlusMinus=N&Rank=N&Season='+ season + '&SeasonSegment=&'\\\r\n 'SeasonType=Regular+Season&ShotClockRange=&StarterBench=&'\\\r\n 'TeamID=0&VsConference=&VsDivision=&Weight='\r\n \r\n response = requests.get(stat_url)\r\n data = json.loads(response.text)\r\n \r\n headers = data['resultSets'][0]['headers']\r\n stat_data = data['resultSets'][0]['rowSet']\r\n baseStat_df = pd.DataFrame(stat_data,columns=headers) \r\n \r\n return baseStat_df",
"def getTeamStat(self, year = 2014):\r\n \r\n year_next = (year % 100) + 1\r\n season = str(year) + '-' + str(year_next)\r\n \r\n stat_url = 'http://stats.nba.com/stats/leaguedashteamstats?Conference=&'\\\r\n 'DateFrom=&DateTo=&Division=&GameScope=&GameSegment=&'\\\r\n 'LastNGames=0&LeagueID=00&Location=&MeasureType=Base&'\\\r\n 'Month=0&OpponentTeamID=0&Outcome=&PORound=0&PaceAdjust=N&'\\\r\n 'PerMode=PerGame&Period=0&PlayerExperience=&PlayerPosition=&'\\\r\n 'PlusMinus=N&Rank=N&Season=' + season + '&SeasonSegment=&'\\\r\n 'SeasonType=Regular+Season&ShotClockRange=&StarterBench=&'\\\r\n 'TeamID=0&VsConference=&VsDivision='\r\n \r\n response = requests.get(stat_url)\r\n data = json.loads(response.text)\r\n \r\n headers = data['resultSets'][0]['headers']\r\n stat_data = data['resultSets'][0]['rowSet']\r\n df = pd.DataFrame(stat_data,columns=headers) \r\n \r\n team_df = df[[\"TEAM_ID\",\"TEAM_NAME\",\"GP\",\"W\",\"L\",\"W_PCT\",\"MIN\",\"FGM\",\r\n \"FGA\",\"FG_PCT\",\"FG3M\",\"FG3A\",\"FG3_PCT\",\"FTM\",\"FTA\",\"FT_PCT\",\r\n \"OREB\",\"DREB\",\"REB\",\"AST\",\"TOV\",\"STL\",\"BLK\",\"BLKA\",\"PF\",\r\n \"PFD\",\"PTS\",\"PLUS_MINUS\"]]\r\n \r\n return team_df",
"def get_player_stats(df_players, url, headers):\n\tfor i, pid in enumerate(df_players['PERSON_ID']):\n\t\tif i==0:\n\t\t\tdf_stats=get_player_career_reg_season_stats(pid, url, headers)\n\t\telse:\n\t\t\tdf_stats=df_stats.append(\n\t\t\t\tget_player_career_reg_season_stats(pid, url, headers)\n\t\t\t)\n\t\tprint('i={} Added player stats for ID={}'.format(i, pid))\n\t\ttime.sleep(2) # sleep so we don't get blocked\n\n\treturn df_players.merge(df_stats, left_on=\"PERSON_ID\", right_on=\"PLAYER_ID\", how='left')",
"def get_players_df(year: int) -> pd.DataFrame:\n endpoint = _BASE_ENDPOINT + _PLAYER_ENDPOINT\n r = requests.get(endpoint.format(year=year)).json()\n standard_stats = r.get(\"league\").get(\"standard\")\n result_dict = [{\n \"FirstName\": s.get(\"firstName\"),\n \"LastName\": s.get(\"lastName\"),\n \"PlayerID\": s.get(\"personId\"),\n \"TeamID\": s.get(\"teamId\"),\n } for s in standard_stats]\n return pd.DataFrame(result_dict)",
"def basic_player_stats(\n self, player_name: str,\n platform: ALPlatform,\n skip_tracker_rank=False) -> list:\n params: dict = {'platform': platform.value, 'player': player_name}\n if skip_tracker_rank:\n params.update({'skipRank': True})\n return self._make_request(additional_params=params)",
"def get_player_stats(season_start_year, csv_file_name = None):\n season_year_full = convert_season_start_to_season_years(\n starting_year = season_start_year\n )\n \n players_df = playergamelogs.PlayerGameLogs(\n season_nullable = season_year_full\n ).player_game_logs.get_data_frame()\n \n # Keep the relevant columns\n players_df = players_df[[\n \"SEASON_YEAR\", \"PLAYER_ID\", \"PLAYER_NAME\", \"TEAM_NAME\",\n \"GAME_ID\", \"GAME_DATE\", \"MATCHUP\", \"WL\", \"MIN\",\n \"FGM\", \"FGA\", \"FTM\", \"FTA\", \"FG3M\", \"PTS\", \"REB\",\n \"AST\", \"STL\", \"BLK\", \"TOV\"]]\n \n # Convert GAME_DATE to datetime\n players_df[\"GAME_DATE\"] = pd.to_datetime(\n players_df[\"GAME_DATE\"]\n )\n \n # Save the data frame to a csv if a file name exists\n if csv_file_name != None:\n # Save to current directory\n csv_path = Path(\"./data/\" + csv_file_name + \".csv\")\n players_df.to_csv(path_or_buf = csv_path,\n index = False,\n na_rep = 'NULL')\n \n return players_df",
"def get_season_player_stats(self, year=None, years=None, stat_type=None, stat_types=None):\n # Call parent class' get_stats() method, then perform our own extra commands.\n df = super(ProFootballReference, self).get_season_player_stats(year, years, stat_type, stat_types)\n\n # Fill in missing data for main columns (year, team, etc.) and remove extraneous\n # columns created when merging data frames (such as year_receiving, team_rushing, etc.).\n for column_prefix in ['player_', 'team_', 'year_', 'age_', 'pos_', 'g_', 'gs_']:\n self.__clean_repeated_columns(df, column_prefix)\n\n # Create columns for Pro Bowl and All-Pro appearances, and remove the symbols from each player's name.\n self.__create_accolade_columns(df)\n df['player'] = df['player'].apply(self.__remove_accolade_chars)\n\n # If we have kicking data, rename some columns so field goal distance is obvious.\n df = self.__rename_field_goal_columns(df, stat_type, stat_types)\n\n return df",
"def player_stats_query(week, player_list, session=s): \n #initialize lists\n pos_list = []\n team_list = []\n \n #cycle thru each player that is currently available\n for player in avail_player_key:\n #build the API url for the unique player key\n url_player = base_query_url+'league/'+leagueID+'/players;player_keys='+player+'/stats;type=week;week='+str(week)\n #convert API call to json\n raw = s.get(url_player, params={'format': 'json'}).json()\n #parse out the players details info (e.g. position, owned, etc.)\n player_details = raw['fantasy_content']['league'][1]['players']['0']['player'][0]\n #parse out position from player details\n pos = player_details[9]['display_position'].upper()\n \n ## FILTER OUT NON-OFFENSE POSITIONS\n if pos not in ['QB', 'WR', 'RB', 'TE']:\n continue\n else:\n \n #parse out team from player_details\n team = player_details[6]['editorial_team_abbr'].upper()\n #append data to lists\n pos_list.append(pos)\n team_list.append(team)\n \n #initialize a stats list\n stats_list = []\n #parse out the player stats\n player_stats = raw['fantasy_content']['league'][1]['players']['0']['player'][1]['player_stats']['stats']\n #loop thru all of the various stats\n for stat in player_stats:\n stat_dict = stat['stat']\n stats_list.append(stat_dict)\n \n return stats_list",
"def _load_player_map(self) -> None:\n # Loading people that have had ab appearance in the year specified\n # This might not be general enough as some players get paid even if they don't play\n sql = \"\"\"\\\n select p.playerid, p.namefirst, p.namelast, p.namegiven, a.team_id\n from people p\n INNER JOIN appearances a ON p.playerid = a.playerid and a.yearid = %s\n \"\"\"\n\n self._cursor.execute(sql, (self._yearid,))\n duplicates = 0\n all_players = self._cursor.fetchall()\n for player in all_players:\n r = {'playerid': player[0], 'namefirst': player[1], 'namelast': player[2],\n 'namegiven': player[3], 'team_id': player[4]}\n\n # Build a key from namefirst, namelast and team_id, then remove all spaces\n # Make sure we don't already have the player loaded, count and report duplicates.\n key = \"{}{}{}\".format(player[1], player[2], player[4]).replace(\" \", \"\")\n if self._player_map.get(key) is None:\n self._player_map[key] = r\n else:\n duplicates += 1\n\n # We'll add the player again using his given first name if different from namefirst\n given_first = player[3].split()[0]\n if given_first != player[1]:\n key2 = \"{}{}{}\".format(given_first, player[2], player[4]).replace(\" \", \"\")\n if self._player_map.get(key2) is None:\n self._player_map[key2] = r\n else:\n duplicates += 1\n\n if duplicates > 0:\n raise RuntimeError(\"Duplicates found building player map: \" + str(duplicates))",
"def available_players_query():\n\t#start the calculation timer\n\tcalc_start = time.time()\n\n\t#initialize everything\n\tlast_first_names = []\n\tfull_names = []\n\tplayer_key = []\n\tplayer_pos = []\n\tstart = 1\n\tdone = False\n\n\t#this is where the data is actually created\n\t#loop thru to get all of the players available\n\twhile(not done):\n\t\tquery_url = base_query_url + 'league/' + leagueID + '/players;status=A;sort=PTS;start=%s;count=25' %start\n\t\t\n\t\tr = s.get(query_url, params={'format': 'json'})\n\t\toutput = r.json()\n\t\toutput = output['fantasy_content']\n\t\toutput = output['league']\n\t\toutput = output[1]\n\t\toutput = output['players']\n\t\tcount = output['count']\n\t\tplayer_num = list(output.keys())\n\t\tplayer_num = player_num[0:len(player_num)-1]\n\t\t#grab the names for each of the players in this batch of players\n\t\tfor i in player_num:\n\t\t\t#get to player details\n\t\t\toutput1 = output[i]\n\t\t\toutput1 = output1['player']\n\t\t\toutput1 = output1[0]\n\t\t\t#get player name\n\t\t\toutput_name = output1[2]\n\t\t\toutput_name = output_name['name']\n\t\t\tfirst = output_name['first']\n\t\t\tlast = output_name['last']\n\t\t\tfull = output_name['full']\n\t\t\tlast_first = last + ', ' + first\n\t\t\t#get player key\n\t\t\toutput_key = list(output1[0].values())[0]\n\t\t\t#get player position\n\t\t\toutput_pos = list(output1[9].values())[0]\n #add items to lists\n\t\t\tlast_first_names.append(last_first)\n\t\t\tfull_names.append(full)\n\t\t\tplayer_key.append(output_key)\n\t\t\tplayer_pos.append(output_pos)\n\t\t\n\t\t#stopping rule: if the number of players on the page is less than 25, then stop\n\t\tstart += 25\n\t\tif count < 25:\n\t\t\tdone = True\n\n\t#stop the timer\n\tcalc_end = time.time()\n\t#print the calculation time\n\tprint('Process complete')\n\tprint('Calculation time for all available players: {0:0.2f} seconds'.format((calc_end-calc_start)))\n\t#return the players name and player key lists\n\treturn full_names, player_key, player_pos",
"def get_team_stats(self, team_name, year):\n \n base_url = 'http://www.sports-reference.com/cbb/schools/' + \\\n team_name + '/' + str(year) + '.html'\n\n response = urllib2.urlopen(base_url)\n content = response.read()\n soup = BeautifulSoup(content)\n soup_results = soup.find('td', text='Team')\n team_stats = []\n \n if soup_results:\n soup_results = soup_results.parent()\n \n for result in soup_results[1::]:\n if result.string:\n team_stats.append(float(result.string))\n else:\n team_stats.append(None)\n else:\n team_stats += [None]*21\n\n return team_stats",
"def add_players(game: LolGame, players: List[dict], add_page_id: bool = False) -> LolGame:\n\n for team_side in game[\"teams\"]:\n team_side_leaguepedia = \"1\" if team_side == \"BLUE\" else \"2\"\n\n for idx, game_player in enumerate(game[\"teams\"][team_side][\"players\"]):\n try:\n # We get the player object from the Leaguepedia players list\n player_latest_data = next(\n p\n for p in players\n if p[\"Side\"] == team_side_leaguepedia\n and lit.get_id(p[\"Champion\"], object_type=\"champion\") == game_player[\"championId\"]\n )\n\n game_player[\"role\"] = role_translation[player_latest_data[\"gameRoleNumber\"]]\n\n unique_identifiers = LeaguepediaPlayerIdentifier(\n name=player_latest_data.get(\"currentGameName\"),\n irlName=player_latest_data.get(\"irlName\"),\n country=player_latest_data.get(\"Country\"),\n residency=player_latest_data.get(\"Residency\"),\n age=player_latest_data.get(\"Age\"),\n role=player_latest_data.get(\"Role\"),\n team=player_latest_data.get(\"Team\"),\n kills=player_latest_data.get(\"Kills\"),\n deaths=player_latest_data.get(\"Deaths\"),\n assists=player_latest_data.get(\"Assists\"),\n ss=player_latest_data.get(\"SummonerSpells\"),\n gold=player_latest_data.get(\"Gold\"),\n cs=player_latest_data.get(\"CS\"),\n items=player_latest_data.get(\"Items\"),\n trinket=player_latest_data.get(\"Trinket\"),\n keystoneMastery=player_latest_data.get(\"KeystoneMastery\"),\n keystoneRune=player_latest_data.get(\"KeystoneRune\"),\n runes=player_latest_data.get(\"Runes\"),\n )\n\n if add_page_id:\n unique_identifiers[\"pageId\"] = int(player_latest_data[\"pageId\"])\n\n game_player[\"uniqueIdentifiers\"] = {\"leaguepedia\": unique_identifiers}\n\n except StopIteration:\n # Since we cannot get the role properly, we try to infer it\n game_player[\"role\"] = list(role_translation.values())[idx]\n\n return game",
"def collect_teams(year: int = 2005) -> None:\n\n\twith open('../resources/config.json') as config_file, open('../resources/secrets.json') as secrets_file:\n\t\tconfig_json = json.load(config_file)\n\t\tsecrets_json = json.load(secrets_file)\n\n\t\turl = '/'.join(['http:', '', config_json['base_url'], config_json['fbs_teams_endpoint']])\n\t\tapi_key = secrets_json['api_key']\n\n\theaders = {'Authorization': api_key}\n\tparams = {'year': year}\n\n\tresponse = requests.get(url, headers = headers, params = params).json()\n\n\t# dict of one array for json dump\n\tteam_names = {'teamNames': list(map(lambda r: r['school'], response))}\n\n\twith open('../resources/teams.json', 'w') as teams_file:\n\t\tjson.dump(team_names, teams_file)",
"def get_leagues_and_countries(source=utils.get_native_source):\n if not isinstance(source, games.models.Source):\n # If I used source=native_source() or if native_source was a global variable then\n # during db initialization (running command initialize) you would get an error since\n # it gets its value when the database is empty.\n source = source()\n logger.info(\"getting leagues and countries from source %s...\", source)\n if not source:\n return [], []\n data, meta, status_code = sportmonks.countries.all(include='leagues.seasons')\n if not data:\n # if the status code is not 200 data and meta are None\n return [], []\n # with open('sportmonks/response_texts/aws_01.txt', 'w') as outfile:\n # json.dump(meta, outfile, indent=4)\n # json.dump(data, outfile, indent=4)\n\n pre_countries, pre_competitions = [], []\n\n try:\n # Notice that only the first supported sport will be processed (currently this is is acceptable since we only\n # support football and so the first supported sport will always be football)\n sport_sids = parse_sport(meta)\n sports = []\n for sport_sid in sport_sids:\n sport = games.models.Sport.by_sid(sid=sport_sid, source=source)\n if not sport:\n logger.info(\"Sport contained in the response with sid {} is not supported\".format(sport_sid))\n continue\n sports.append(sport)\n if not sports:\n logger.error(\"No supported sport in the response\")\n return [], []\n football_gname = games.naming.sport_names.get('football', None)\n football = games.models.Sport.objects.get(name=football_gname)\n if football not in sports:\n logger.info(\"Football is not in response\")\n return [], []\n # logger.debug(\"Trying to get sport from source: %s and sid: %s\", source, sport_sid)\n sport_gname = football_gname\n for item in data:\n try:\n country_sid = item.get('id')\n # logger.debug('country_sid: %s', country_sid)\n country_sname = item.get('name')\n # logger.debug('country_sname: %s', country_sname)\n extra = item.get('extra')\n # logger.debug('extra: %s', extra)\n leagues = item.get('leagues').get('data')\n # logger.debug('leagues: %s', leagues)\n try:\n fifa_code = extra.get('fifa') # some countries might lack extra information\n except AttributeError:\n fifa_code = None\n except Exception as e:\n logger.data_error('%s', e)\n continue\n pre_country = pre_models.PreCountry(source=source, sname=country_sname, sid=country_sid, fifa_code=fifa_code)\n pre_countries.append(pre_country)\n for league in leagues:\n try:\n # sportmonks uses sgname for leagues. I use this sgname as an sname (comp_season_specific name)\n competition_sname = league.get('name')\n # logger.debug('competition_sname: %s', competition_sname)\n sid = league.get('id')\n # logger.debug('sid: %s', sid)\n seasons = league.get('seasons').get('data')\n # logger.debug('seasons: %s', seasons)\n except Exception as e:\n logger.data_error('%s', e)\n continue\n competition_season_utils = []\n # comp_seas_sids = []\n for season in seasons:\n try:\n season_name = season.get('name')\n # logger.debug('season_name: %s', season_name)\n # season_name = seasons_special_treatment(season_name)\n competition_season_sid = season.get('id')\n # logger.debug('competition_season_sid: %s', competition_season_sid)\n is_current_season = season.get('is_current_season', False)\n # logger.debug('is_current_season: %s', is_current_season)\n except Exception as e:\n logger.data_error('%s', e)\n continue\n # comp_seas_sids.append(competition_season_sid)\n zak_season_name = games.models.Season.zakandify_season_string(season_name)\n season = zakanda.utils.season_from_season_name(zak_season_name)\n competition_season_type = get_competition_season_type(season)\n competition_season_util = pre_models.CompetitionSeasonUtil(season, competition_season_sid, competition_sname, competition_season_type)\n competition_season_utils.append(competition_season_util)\n # logger.debug(\"competition season sids: %s\", comp_seas_sids)\n pre_competition = pre_models.PreCompetition(\n source=source, sname=competition_sname, sid=sid, sport_name=sport_gname,\n competition_season_utils=competition_season_utils, pre_country=pre_country)\n pre_competitions.append(pre_competition)\n\n except Exception as e:\n logger.error('%s Unexpected problem with sportmonks.countries.all from source %s', e, source)\n logger.info(\"%s pre countries and %s pre competitions were created\", len(pre_countries), len(pre_competitions))\n return pre_countries, pre_competitions",
"def fetch_players_stats():\n players_scraper = PlayerStatsScraper(API_URL, API_HEADERS)\n result = players_scraper.save_objects()\n return result",
"def mlbstats(self, irc, msg, args, optlist, optplayer):\n\n (first, last) = optplayer.split(\" \", 1) #playername needs to be \"first-last\"\n searchplayer = first + '-' + last\n\n optyear = False\n for (option, arg) in optlist:\n if option == 'year':\n optyear = arg\n \n url = self._b64decode('aHR0cDovL3NlYXJjaC5lc3BuLmdvLmNvbS8=') + '%s' % searchplayer\n \n #self.log.info(url)\n\n try:\n req = urllib2.Request(url)\n html = (urllib2.urlopen(req)).read()\n except:\n irc.reply(\"Failed to open: %s\" % url)\n return\n \n soup = BeautifulSoup(html)\n \n if not soup.find('li', attrs={'class':'result mod-smart-card'}):\n irc.reply(\"I didn't find a link for: %s. Perhaps you should be more specific and give a full playername\" % optplayer)\n return\n else: \n playercard = soup.find('li', attrs={'class':'result mod-smart-card'})\n \n if 'http://espn.go.com/mlb/players/stats?playerId=' not in playercard.renderContents():\n irc.reply(\"Could not find a link to career stats for: %s\" % optplayer)\n return\n else:\n #if playercard.find('a', attrs={'href':re.compile('.*?espn.go.com/mlb/players/stats.*?')}):\n link = playercard.find('a', attrs={'href':re.compile('.*?espn.go.com/mlb/players/stats.*?')})['href']\n \n if not link:\n irc.reply(\"I didn't find the link I needed for career stats. Did something break?\")\n return\n else:\n try:\n req = urllib2.Request(link)\n html = (urllib2.urlopen(req)).read()\n except:\n irc.reply(\"Failed to open: %s\" % link)\n return\n \n soup = BeautifulSoup(html)\n playerName = soup.find('title')\n table = soup.find('table', attrs={'class':'tablehead'}) # everything stems from the table.\n header = table.find('tr', attrs={'class':'colhead'}).findAll('td') # columns to reference.\n\n if optyear:\n seasonrows = table.findAll('tr', attrs={'class':re.compile('^oddrow$|^evenrow$')}) # find all outside the season+totals\n season_data = collections.defaultdict(list) # key will be the year.\n \n for row in seasonrows: \n tds = row.findAll('td')\n for i,td in enumerate(tds):\n season_data[str(tds[0].getText())].append(str(ircutils.bold(header[i].getText()) + \": \" + td.getText()))\n \n outyear = season_data.get(str(optyear), None)\n \n if not outyear:\n irc.reply(\"No stats found for %s in %s\" % (optplayer, optyear))\n else:\n outyear = string.join([item for item in outyear], \" | \")\n irc.reply(\"{0} :: {1}\".format(optplayer,outyear)) \n else:\n endrows = table.findAll('tr', attrs={'class':re.compile('^evenrow bi$|^oddrow bi$')})\n \n for total in endrows:\n if total.find('td', text=\"Total\"):\n totals = total.findAll('td')\n if total.find('td', text=\"Season Averages\"):\n seasonaverages = total.findAll('td')\n \n del seasonaverages[0] #remove the first td, but match up header via j+2\n del totals[0:2]\n\n seasonstring = string.join([header[i+2].getText() + \": \" + td.getText() for i,td in enumerate(seasonaverages)], \" | \")\n totalstring = string.join([header[i+2].getText() + \": \" + td.getText() for i,td in enumerate(totals)], \" | \")\n \n irc.reply(\"{0} Season Averages :: {1}\".format(ircutils.bold(optplayer), seasonstring))\n irc.reply(\"{0} Career Totals :: {1}\".format(ircutils.bold(optplayer), totalstring))",
"def players_onsale(self):\r\n headers = {\"Content-type\": \"application/x-www-form-urlencoded\", \"Accept\": \"text/plain\",\r\n 'Referer': 'http://' + self.domain + '/team_news.phtml', \"User-Agent\": user_agent}\r\n req = self.session.get('http://' + self.domain + '/teamInfo.phtml?tid=' + str(self.community_id),\r\n headers=headers).content\r\n soup = BeautifulSoup(req, \"html.parser\")\r\n\r\n current_year = dt.today().year\r\n current_month = dt.today().month\r\n on_sale = list()\r\n year_flag = 0\r\n for i in soup.find_all('table', {'class', 'tablecontent03'})[2].find_all('tr')[1:]:\r\n columns = i.find_all('td')\r\n player_id = int(re.findall('\\d+', columns[0].img['src'])[0])\r\n playername = columns[1].text.strip()\r\n team_id = int(re.findall('\\d+', columns[2].img['src'])[0])\r\n team = columns[2].a['title'].strip()\r\n min_price = float(columns[3].text.replace(\".\", \"\").strip())\r\n market_price = float(columns[4].text.replace(\".\", \"\").strip())\r\n points = int(columns[5].text.strip().strip())\r\n # Controlamos el cambio de año, ya que comunio no lo dá\r\n if current_month <= 7 < int(columns[6].text[3:5]):\r\n year_flag = 1\r\n date = datetime.strptime(str(current_year - year_flag) + columns[6].text[3:5] + columns[6].text[:2], '%Y%m%d').date()\r\n owner = columns[7].text.strip()\r\n position = self.translate_position(columns[8].text.strip())\r\n # Comprobamos si solamente queremos los de la computadora o no\r\n on_sale.append([player_id, playername, team_id, team, min_price, market_price, points, date, owner, position])\r\n\r\n return on_sale",
"def get_player_win_loss_stats(player_name: str) -> PlayerWinLossRecords:\n parsed_name = parse_player_name(player_name)\n player_bio = get_player_bio(parsed_name)\n # try:\n # if player_link_cache.__contains__(parsed_name):\n # print(list(map(lambda x: x[0], player_link_cache.__iter__())))\n # player_bio = player_link_cache[parsed_name]\n # else:\n # player_bio = get_player_bio(parsed_name)\n # except ValueError as e:\n # logError(e)\n # # return empty records object\n # return PlayerWinLossRecords()\n player_win_loss_records = {}\n win_loss_types = [\"tour\", \"challenger\", \"itf\"]\n for win_loss_type in win_loss_types:\n player_win_loss_records[\n win_loss_type] = get_player_win_loss_stats_for_tour(\n parsed_name, tour_type=win_loss_type)\n return PlayerWinLossRecords(**player_win_loss_records)",
"def collect_data_by_year(year):\n \n # Format page request\n url = 'http://nflcombineresults.com/nflcombinedata_expanded.php'\n headers = {'user-agent': UserAgent().random}\n params = {'year': year}\n \n soup = get_soup(url, headers=headers, params=params)\n \n # Check if the database contains the requested year\n possible_set = set([])\n possible_years = soup.find('select', attrs={'id': 'year'}).find_all('option')\n \n for y in possible_years:\n possible_set.add(y.get_text())\n \n if str(year) not in possible_set:\n return {} # Return empty dict if the year is incompatible\n \n rows = soup.find_all('tr', attrs={'class': 'tablefont'})\n full_dict = {}\n counter = 1\n\n for row in rows:\n tds = row.find_all('td')\n row_dict = {}\n \n for i, td in enumerate(tds):\n is_hidden = td.find('div', attrs={'style': 'visibility:hidden;'})\n \n # Check if the player has any data for each cell\n if is_hidden:\n text = ''\n else:\n text = td.get_text()\n \n # Add data to a preliminary dict\n row_dict[i] = text\n \n # Assign relevant keys to data\n row_dict['year'] = row_dict.pop(0)\n row_dict['name'] = row_dict.pop(1)\n row_dict['school'] = row_dict.pop(2)\n row_dict['POS'] = row_dict.pop(3)\n row_dict['height'] = row_dict.pop(4)\n row_dict['weight'] = row_dict.pop(5)\n row_dict['handSize'] = row_dict.pop(6)\n row_dict['armLength'] = row_dict.pop(7)\n row_dict['wonderlic'] = row_dict.pop(8)\n row_dict['forty'] = row_dict.pop(9)\n row_dict['bench'] = row_dict.pop(10)\n row_dict['vert'] = row_dict.pop(11)\n row_dict['broad'] = row_dict.pop(12)\n row_dict['shuttle'] = row_dict.pop(13)\n row_dict['3cone'] = row_dict.pop(14)\n row_dict['60shuttle'] = row_dict.pop(15)\n \n # Create unique player code\n name = row_dict['name']\n name_code = name.replace(\"'\", '').replace('.', '').replace(' ', '-').lower()\n key = '{}-{}-{}'.format(name_code, counter, year)\n counter += 1\n \n # Create dict of dicts containing all data\n full_dict[key] = row_dict\n \n return full_dict",
"def getPlayerAdvStat(self, stat, year = 2014):\r\n \r\n year_next = (year % 100) + 1\r\n season = str(year) + '-' + str(year_next)\r\n \r\n stat_call = stat.lower()\r\n stat_dict = {'touch':'Possessions', 'possession':'Possessions',\r\n 'speed':'SpeedDistance', 'distance':'SpeedDistance'}\r\n \r\n stat_url = 'http://stats.nba.com/stats/leaguedashptstats?College=&'\\\r\n 'Conference=&Country=&DateFrom=&DateTo=&Division=&'\\\r\n 'DraftPick=&DraftYear=&GameScope=&Height=&LastNGames=0&'\\\r\n 'LeagueID=00&Location=&Month=0&OpponentTeamID=0&Outcome=&'\\\r\n 'PORound=0&PerMode=PerGame&PlayerExperience=&PlayerOr'\\\r\n 'Team=Player&PlayerPosition=&PtMeasureType=' + \\\r\n stat_dict[stat_call] + '&Season=' + season + \\\r\n '&SeasonSegment=&SeasonType=Regular+Season&StarterBench=&'\\\r\n 'TeamID=0&VsConference=&VsDivision=&Weight='\r\n \r\n response = requests.get(stat_url)\r\n data = json.loads(response.text)\r\n \r\n headers = data['resultSets'][0]['headers']\r\n stat_data = data['resultSets'][0]['rowSet']\r\n advStat_df = pd.DataFrame(stat_data,columns=headers) \r\n \r\n return advStat_df",
"def returnPlayerStats(self):\n\t\tplayerStats = [self.name, \n\t\t\t\t\t self.agility, \n\t\t\t\t\t self.personality, \n\t\t\t\t\t self.sanity, \n\t\t\t\t\t self.strength, \n\t\t\t\t\t self.progress]\n\t\treturn playerStats",
"def season_games(year):\n\tLOG.debug('Getting season %d', year)\n\tdata = read_html(io=season_games_url(year),\n\t\t\t\t\t attrs={'id': 'games'},\n\t\t\t\t\t infer_types=False,\n\t\t\t\t\t header=0)\n\tif len(data) != 1:\n\t\traise CantFindTheRightTable\n\tdata = data.pop()\n\n\t# Cleaning.\n\tdel data[\"Unnamed: 3\"]\n\t# The code below issues \"UserWarning: \" So we catch UserWarnings.\n\twith warnings.catch_warnings():\n\t\twarnings.filterwarnings(action='ignore', category=UserWarning,\n\t\t\t\t\t\t\t\tmodule=r'pandas\\.core\\.frame',\n\t\t\t\t\t\t\t\tmessage=(r\"Boolean Series key will be reindexed\"\n\t\t\t\t\t\t\t\t\t\t r\" to match DataFrame index\\.\"))\n\t\t# These rows are mid-table header rows.\n\t\tdata = data[data.Week != \"Week\"][data.Week != \"nan\"]\n\n\tdata['week'] = (data.Week\n\t\t\t\t\t.replace(\"WildCard\", \"wild-card\")\n\t\t\t\t\t.replace(\"Division\", \"divisional\")\n\t\t\t\t\t.replace(\"ConfChamp\", \"conference\")\n\t\t\t\t\t.replace(\"SuperBowl\", \"super-bowl\")\n\t\t\t\t\t.apply(\n\t\t\t\t\t\tlambda s: (int(s)\n\t\t\t\t\t\t\t\t if all(c in '1234567890' for c in s)\n\t\t\t\t\t\t\t\t else s)))\n\tdel data['Week']\n\n\tdata['season'] = year\n\tdata['game_date'] = pd.to_datetime(\n\t\tdata.Date\n\t\t.replace(r\"$\", r\", %d\" % year, regex=True)\n\t\t.replace(r\"^(January|February) (\\d+), \\d+$\", r\"\\1 \\2, %d\" % (year + 1),\n\t\t\t\t regex=True))\n\tdel data['Date']\n\n\tfor column in \"PtsW\", \"PtsL\", \"YdsW\", \"TOW\", \"YdsL\", \"TOL\":\n\t data[column] = data[column].apply(int)\n\n\tdata['WatL'] = data['Unnamed: 5'].apply(lambda x: x == '@')\n\tdel data['Unnamed: 5']\n\tdata['hometeam'] = (~data.WatL * data['Winner/tie'] +\n\t\t\t\t\t\tdata.WatL * data['Loser/tie'])\n\tdata['awayteam'] = (data.WatL * data['Winner/tie'] +\n\t\t\t\t\t\t~data.WatL * data['Loser/tie'])\n\tdata['winner'] = data['Winner/tie']\n\tfor column in 'Winner/tie', 'Loser/tie', \"WatL\":\n\t\tdel data[column]\n\tfor column in 'hometeam', 'awayteam', 'winner':\n\t\tdata[column] = data[column].apply(lambda s: s.split()[-1].lower())\n\n\treturn data",
"def get_team_stats(players: list[Player]) -> dict[int]:\n\n team_stats = {}\n\n total_reaction = 0\n total_mechanical_skill = 0\n total_tactical_skill = 0\n total_game_knowledge = 0\n total_xp = 0\n\n for player in players:\n total_reaction += player.reaction\n total_mechanical_skill += player.mechanical_skill\n total_tactical_skill += player.tactical_skill\n total_game_knowledge += player.game_knowledge\n total_xp += player.xp\n\n team_stats.update(\n {\"reaction\": total_reaction,\n \"mechanical_skill\": total_mechanical_skill,\n \"tactical_skill\": total_tactical_skill,\n \"game_knowledge\": total_game_knowledge,\n \"xp\": total_xp})\n\n return team_stats",
"def load_fixture_player_stats(self):\n stats_list = []\n\n print(\"Getting fixture players..\")\n with Pool(self.pool) as p:\n fixture_info = list(tqdm(p.imap(self.fixture_info_singel, self.fixture_ids, chunksize=1), total=len(self.fixture_ids)))\n print('Getting data from workers..')\n i = 0\n for info in fixture_info:\n stats = {}\n if info:\n stats = {info['id']: []}\n if 'teamLists' in info:\n team_list = info['teamLists']\n for lineups in team_list:\n if lineups:\n team_id = lineups['teamId']\n lineup = lineups['lineup']\n substitutes = lineups['substitutes']\n for l in lineup:\n stats[info['id']].append(l['id'])\n for s in substitutes:\n stats[info['id']].append(s['id'])\n else:\n i += 1\n if stats:\n stats_list.append(stats)\n print('Completed')\n if i >0:\n print(f'{i} games retreived had no stats')\n return stats_list",
"def get_players():\n nfl_players = redis_cache('nfl_players_key', NFL_Player_2015.query.all)\n return nfl_players",
"def find_all_by_player(self, player):\n cursor = self._connection.cursor()\n command = 'SELECT * FROM scores WHERE player=? ORDER BY level'\n cursor.execute(command, [player])\n return cursor.fetchall()",
"def get_player_data(self, player, season, mtgs=None, past=None, future=None, single=False):\n\n avail = []\n scheduled = []\n\n # Should be empty arrays if None\n if past is None:\n past = []\n if future is None:\n future = []\n\n nplayed = Schedule.objects.filter(meeting__in=past, player=player).count()\n nscheduled = Schedule.objects.filter(meeting__in=future, player=player).count()\n\n av = PlayerAvailability.objects.get_for_season_player(player, season)\n\n p = {\n 'name': player.first + ' ' + player.last,\n 'id': player.id,\n 'isavail': av.available,\n 'scheduled': av.scheduled,\n 'played': av.played,\n 'nplayed': nplayed,\n 'nscheduled': nscheduled + nplayed,\n 'single': single\n }\n\n return p",
"def fixture_player_stats(self):\n stats_list = []\n fixture_tuples = []\n fixture_player_ids = self.load_fixture_player_stats()\n i = 0\n for fixture in fixture_player_ids:\n for fixture_id, value in fixture.items():\n if value:\n for player_id in value:\n fixture_tuples.append((fixture_id, player_id))\n print(\"Getting player info for all fixtures..\")\n with Pool(self.pool) as p:\n fixture_stats = list(tqdm(p.imap(self.fixture_player_stats_singel_wrapper, fixture_tuples, chunksize=1), total=len(fixture_tuples)))\n for fixture in fixture_stats:\n if fixture:\n stats_list.append(fixture)\n else:\n i += 1\n print('Completed')\n if i >0:\n print(f'{i} games retreived had no stats')\n self.save_completed('player_fixture', stats_list, StorageConfig.STATS_DIR)",
"def scrape_all_world_cup_games():\n\n def scrape_scores_year(year):\n urls = scrape_world_cup_scoreboard(year)\n scores = [scrape_fifa_game(url, 'FIFA World Cup') for url in urls]\n return scores\n\n l = []\n for year in sorted(world_cup_mapping.keys()):\n l.extend(scrape_scores_year(year))\n return l"
] | [
"0.6519908",
"0.5899349",
"0.57082665",
"0.5681794",
"0.56795835",
"0.56269175",
"0.5610314",
"0.55635273",
"0.5557057",
"0.5533035",
"0.5482166",
"0.5445572",
"0.53892154",
"0.5381348",
"0.53558725",
"0.53406656",
"0.5335924",
"0.5334206",
"0.5321553",
"0.5291166",
"0.5270813",
"0.51805544",
"0.5157184",
"0.5111913",
"0.50924546",
"0.5064316",
"0.5059153",
"0.49850345",
"0.49701026",
"0.49630007"
] | 0.7862858 | 0 |
Itera sobre los items del carrito y obtiene los productos de la base de datos | def __iter__(self):
ids_productos = self.carro.keys()
#obtiene los objetos producto y los agrega al carro
productos = Producto.objects.filter(id__in=ids_productos)
for producto in productos:
self.carro[str(producto.id)]['producto'] = producto
for item in self.carro.values():
item['precio']=Decimal(item['precio'])
item['precio_total'] = item['precio']*item['cantidad']
yield item | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def obtener_productos():\n\n # Se crea la lista de objetos Producto()\n productos = [\n Producto(\"Caja chica\", 5, 100.0),\n Producto(\"Caja mediana\", 3, 185.0),\n Producto(\"Caja grande\", 1, 299.0)\n ]\n\n return productos",
"def get_data(self):\n products_list = []\n for category in CATEGORIES:\n json_data = self.url_to_json(category)\n pages_nb = self.retrieve_cat_pages_nb(json_data)\n for page in range(pages_nb):\n page_json_data = self.page_to_json(category, page+1)\n products = page_json_data[\"products\"]\n for p in products:\n params = {\n 'brands': \"\",\n 'product_name_fr': \"\",\n 'nutrition_grades': \"\",\n 'stores': \"\",\n 'url': \"\",\n 'categories': \"\"\n }\n for key in params:\n try:\n params[key] = p[key]\n except KeyError:\n continue\n if params['product_name_fr'] != \"\" and params['nutrition_grades'] != \"\" and params['url'] != \"\" and params['categories'] != \"\":\n product = Product(brand=params['brands'],\n name=params['product_name_fr'],\n nutrition_grade=params['nutrition_grades'],\n stores=params['stores'], url=params['url'],\n category=params['categories'])\n products_list.append(product)\n try:\n self.manager.save_all(self.clean_data(products_list))\n print(f\"\\n La base de données |{DB_NAME}| a été peuplée \\n\")\n except:\n print(\"\\n Une erreur s'est produite lors \"\n \"du peuplement de la base de données \\n\")",
"def return_items(self):\n cur = self.cursor\n cur.execute(f\"SELECT * FROM {self.product_name}\")\n products = cur.fetchall()\n return products",
"def __iter__(self):\n #gets product data keys e.g price, quantity\n product_ids = self.cart.keys()\n\n #checks if the product exist in the database by filtering by product_ids\n products = Product.objects.filter(id__in=product_ids)\n cart = self.cart.copy()\n\n #loop through the products 1 by 1 and re-assigns them to the product.id in the cart\n for product in products:\n cart[str(product.id)][\"product\"] = product\n\n # get price and quatity of items and mutiplies price by quantity to get total price of items\n for item in cart.values():\n item[\"price\"] = Decimal(item[\"price\"])\n item[\"total_price\"] = item[\"price\"] * item[\"qty\"]\n yield item",
"def __iter__(self):\n product_ids = self.basket.keys()\n products = Product.products.filter(id__in=product_ids)\n basket = self.basket.copy()\n\n for product in products:\n basket[str(product.id)]['product'] = product\n\n for item in basket.values():\n item['price'] = Decimal(item['price'])\n item['total_price'] = item['price'] * item['qty']\n yield item",
"def get_all_products(self):\n\t\tpass",
"def get_products(self):\n\n lst = []\n for product in self.products.findall('product'):\n id = product.find('id').text\n name = product.find('name').text\n dispensary_id = product.find('dispensary_id').text\n dispensary_name = product.find('dispensary_name').text\n canabis_brand = product.find('canabis_brand').text\n canabis_strain = product.find('canabis_strain').text\n category = product.find('category').text\n subcategory = product.find('subcategory').text\n thc_level = product.find('thc_level').text\n cbd_level = product.find('cbd_level').text\n cbn_level = product.find('cbn_level').text\n thc_level_type = product.find('thc_level_type').text\n cbd_level_type = product.find('cbd_level_type').text\n cbn_level_type = product.find('cbn_level_type').text\n\n description = product.find('description').text\n created_at = product.find('created_at').text\n updated_at = product.find('updated_at').text\n\n prices = []\n urls = []\n images = []\n\n for child in product:\n if child.tag == 'prices':\n for cost in child.findall('cost'):\n prices.append(Price(cost.attrib['unit'], cost.text))\n\n if child.tag == 'urls':\n admin = child.find('admin').text\n public = child.find('public').text\n urls.append(UrlInfo(admin, public))\n\n if child.tag == 'images':\n for image in child.findall('image'):\n images.append(Image(image.attrib['main'], image.text,))\n\n lst.append(Product(id, name, dispensary_id, dispensary_name,\n canabis_brand, canabis_strain,\n category, subcategory, thc_level, cbd_level,\n cbn_level, thc_level_type, cbd_level_type,\n cbn_level_type, prices, urls, images,\n description, created_at, updated_at))\n\n return lst",
"def __iter__(self):\n product_ids = self.cart.keys()\n # get the product objects and add them to the cart\n products = Product.objects.filter(id__in=product_ids)\n\n cart = self.cart.copy()\n for product in products:\n cart[str(product.id)]['product'] = product\n\n for item in cart.values():\n item['price'] = Decimal(item['price'])\n if item['duration']!=None:\n item['total_price'] = Decimal(item['price']) * item['quantity'] * Decimal(item['duration'])\n else:\n item['total_price'] = Decimal(item['price']) * item['quantity']\n yield item",
"def cargarProductosSinObra(self):\n\n self.limpiarTabla(self.tableProductos)\n\n ##Cnsulta para obtener todos los productos del sistema, con su correspondiente\n ##codigo de barra, monodroga, descuento, importe\n query=self.sesion.query(ProductoModel.codigo_barra,ProductoModel.id_medicamento,ProductoModel.id_presentacion,MonodrogaModel.nombre,ProductoModel.importe).\\\n join(MedicamentoModel).filter(ProductoModel.id_medicamento==MedicamentoModel.nombre_comercial).\\\n join(MonodrogaModel).filter(MedicamentoModel.id_monodroga==MonodrogaModel.nombre).\\\n filter(ProductoModel.baja==False).order_by(ProductoModel.codigo_barra)\n\n ##Se cargan los datos obtenidos en la tabla de Producto\n for n, obj in enumerate(query):\n self.tableProductos.insertRow(n)\n self.tableProductos.setItem(n, 0, QtGui.QTableWidgetItem(str(obj[0])))\n self.tableProductos.setItem(n, 1, QtGui.QTableWidgetItem(str(obj[1])))\n self.tableProductos.setItem(n, 2, QtGui.QTableWidgetItem(str(obj[2])))\n self.tableProductos.setItem(n, 3, QtGui.QTableWidgetItem(str(obj[3])))\n self.tableProductos.setItem(n, 4, QtGui.QTableWidgetItem(str(0)))\n self.tableProductos.setItem(n, 5, QtGui.QTableWidgetItem(str(obj[4])))\n\n ##Se carga la cantidad de cada producto en la tabla\n for row,producto in enumerate(ProductoModel.buscarTodos(ProductoModel.codigo_barra,self.sesion)):\n self.tableProductos.setItem(row,6,QtGui.QTableWidgetItem(str(producto.getCantidad(self.sesion))))",
"def __iter__(self): \n item_ids = self.cart.keys()\n\n # getting product objects and adding them to the cart\n items = Item.objects.filter(id__in=item_ids)\n for item in items:\n self.cart[str(item.id)]['item'] = item\n # iterating over the cart items and convert the item prices back to the decimal adding a total price attribute to each item\n for item in self.cart.values():\n item['price'] = Decimal(item['price'])\n item['total_price'] = item['price'] * item['quantity']\n yield item",
"def get_products(self) -> dict:\n\t\tproducts = dict()\n\n\t\tdb = Database()\n\t\tdb.create_connection(self._file_path)\n\t\trows = db.get_products()\n\t\tdb.close_connection()\n\n\t\tfor row in rows:\n\t\t\tif row[0] not in products:\n\t\t\t\ttry:\n\t\t\t\t\tproducts[row[0]] = Product(row[0], row[1], row[2], row[3]) # code, price, lastupdate, currency\n\t\t\t\texcept Exception as e: \n\t\t\t\t\t# IF the database was not correct parsed, the item will be discarted, \n\t\t\t\t\t# the event will be logged in the log file and the program will continue\n\t\t\t\t\tlogging.error(str(datetime.now())+': ' + e)\n\t\t\t\t\tcontinue\n\n\t\treturn products",
"def products(self):\r\n return self._products",
"def get_product_data_off(self):\n list_products_name = []\n for x in self.list_categories: \n \"\"\"get products' data from openfoodfacts api with string as paramaters\"\"\"\n parameters = {\n 'action': 'process',\n 'json': 1,\n 'countries': 'France',\n 'page_size': 100,\n 'page': 1,\n 'tagtype_0': 'categories',\n 'tag_contains_0': 'contains',\n 'tag_0': x\n }\n r = requests.get('https://fr.openfoodfacts.org/cgi/search.pl',\n params=parameters) # passing parameters in URL\n print(r.url)\n data = r.json() # r. from requests module decodes json file\n products = data['products'] #access dictionnary items by referring to its key name, products ordered by id\n list_products_name.append(products) \n self.list_products = list_products_name # list_categories_name is passed in the instance property",
"def cargar_productos(self, obraSocial):\n self.limpiarTabla(self.tableProductos)\n\n query=self.sesion.query(ProductoModel.codigo_barra,ProductoModel.id_medicamento,ProductoModel.id_presentacion,MonodrogaModel.nombre,DescuentoModel.descuento,ProductoModel.importe).\\\n join(MedicamentoModel).filter(ProductoModel.id_medicamento==MedicamentoModel.nombre_comercial).\\\n join(MonodrogaModel).filter(MedicamentoModel.id_monodroga==MonodrogaModel.nombre).\\\n join(DescuentoModel).filter(DescuentoModel.producto==ProductoModel.codigo_barra).\\\n filter(DescuentoModel.obra_social==obraSocial,ProductoModel.baja==False).order_by(ProductoModel.codigo_barra)\n\n for n, obj in enumerate(query):\n self.tableProductos.insertRow(n)\n for m, campo in enumerate(obj):\n self.tableProductos.setItem(n, m, QtGui.QTableWidgetItem(str(campo)))\n\n for row,producto in enumerate(ProductoModel.buscarTodos(ProductoModel.codigo_barra,self.sesion)):\n self.tableProductos.setItem(row,6,QtGui.QTableWidgetItem(str(producto.getCantidad(self.sesion))))",
"def generateProducts(self):\r\n\r\n # Creates items in each category\r\n for i in range(self.num_of_items):\r\n self.ID_DICT[i+self.num_of_items] = random.randint(1, 10)\r\n self.ID_DICT[i+self.num_of_items*2] = random.randint(1, 10)\r\n self.ID_DICT[i+self.num_of_items*3] = random.randint(1, 10)\r\n self.ID_DICT[i+self.num_of_items*4] = random.randint(1, 10)\r\n self.ID_DICT[i+self.num_of_items*5] = random.randint(1, 10)\r\n self.ID_DICT[i+self.num_of_items*6] = random.randint(1, 10)\r\n\r\n\r\n # Sort for easy selection\r\n sorted(self.ID_DICT)\r\n\r\n for product in self.ID_DICT.keys():\r\n temp_int = self.ID_DICT[product]\r\n self.c.execute(\"INSERT INTO Products (ProductID, Price) VALUES (?, ?)\", (product, self.ID_DICT[product]))\r\n self.conn.commit()\r\n\r\n if self.print_items:\r\n print(\"\\nAll items in store:\")\r\n print(self.ID_DICT)\r\n print()",
"def buscarProd(self):\n medicamento = str(self.lineMedicamento.text())\n monodroga = str(self.lineMonodroga.text())\n data = self.getAllTabla(self.tableProductos)\n\n if medicamento != \"\":\n dataMedic = filter(lambda x: x[1].upper() == medicamento.upper(), data.values())\n else:\n dataMedic = data.values()\n if monodroga != \"\":\n dataMono = filter(lambda x: x[3].upper() == monodroga.upper(), dataMedic)\n else:\n dataMono = dataMedic\n\n for dato in data:\n self.tableProductos.setRowHidden(dato,False)\n\n for dato in data:\n if not data[dato] in dataMono:\n self.tableProductos.setRowHidden(dato,True)",
"def fill_item_list(self):\n return_list = []\n with Transaction().start(DBNAME, 1):\n self.productlist = self.Product.search([('description', '=', 'Stock'), ('type', '=', 'goods')])\n for i in self.productlist:\n return_list.append(i.template.name)\n return return_list",
"def get_products(self):\n con = dbcon()\n cur = con.cursor()\n cur.execute(\"SELECT * FROM products;\")\n res = cur.fetchall()\n if res:\n prdcts=[]\n for prodct_item in res:\n picked_prdct = {\n 'product_id':prodct_item[0],\n 'product_name':prodct_item[1],\n 'price':prodct_item[2],\n 'quantity':prodct_item[3]\n }\n prdcts.append(picked_prdct)\n return jsonify({\"Products\": prdcts}), 200\n return jsonify({\"message\":\"No products in store\"})",
"def __iter__(self):\n return self._products.__iter__()",
"def fill_products(self):\n cursor = self.conn.cursor(cursor_factory=psycopg2.extras.DictCursor)\n categories = dict()\n for page in range(1, 2):\n result = requests.get(\n 'https://fr.openfoodfacts.org/cgi/search.pl?page_size=1000&page={}&action=process&json=1'.format(\n page)).json()\n for element in result['products']:\n try:\n cursor.execute(\n \"INSERT INTO product (name, store, nutrition_grade, url) VALUES (%s, %s, %s, %s) RETURNING \"\n \"id, name\",\n (element[\"product_name\"], element[\"stores\"], element[\"nutrition_grade_fr\"], element[\"url\"]))\n # un except pour éviter les erreurs de clés\n query_result = cursor.fetchone()\n for category in element[\"categories_tags\"]:\n try:\n cursor.execute(\"INSERT INTO product_category(product_id, category_id) VALUES (%s, %s)\",\n (query_result[0], self.categories[category]))\n except KeyError:\n print(\"Categorie insertion failed\")\n\n print(element[\"product_name\"])\n except KeyError:\n print(f'product insertion failed:')\n\n self.conn.commit()\n cursor.close()",
"def get_products(self):\n page = 1\n out = []\n while True:\n resp = self.get_session().Product.find(limit=10,page=page)\n if not len(resp):\n return\n yield resp\n page += 1",
"def get_products(self, data, category):\r\n for product_information in data['products']:\r\n name = product_information.get('product_name', None)\r\n # in order to remove linebreak from product name\r\n # print(\"WITH LINEBREAK : \", repr(name))\r\n if name:\r\n name = name.replace('\\n', '')\r\n # print(\"WITHOUT LINEBREAK : \", repr(name))\r\n category = Categories.objects.get(name=category)\r\n nutriscore = product_information.get('nutrition_grades', None)\r\n link = product_information.get('url', None)\r\n image = product_information.get('image_url', None)\r\n nutrition_image = product_information.get\\\r\n ('image_nutrition_url', None)\r\n if category is None \\\r\n or name is None \\\r\n or len(name) > 75 \\\r\n or nutriscore is None \\\r\n or link is None \\\r\n or image is None \\\r\n or nutrition_image is None:\r\n continue\r\n else:\r\n try:\r\n product, created = Products.objects.get_or_create(\r\n name=str(name),\r\n category=category,\r\n nutriscore=nutriscore,\r\n link=link,\r\n image=image,\r\n nutrition_image=nutrition_image,\r\n )\r\n if created:\r\n product.save()\r\n print(product.name)\r\n\r\n except Products.DoesNotExist:\r\n raise CommandError(\"Products %s could not been reached\"\r\n % name)\r\n except IntegrityError:\r\n continue",
"def get_items_from_category(save_db=False):\n query_result = pd.read_sql_query(\"\"\"SELECT c1.id, c1.name, c1.parent_id, c1.url\n FROM categories c1 LEFT OUTER JOIN categories c2\n ON c1.id = c2.parent_id\n WHERE c2.parent_id IS NULL\n LIMIT 1400 OFFSET 1300\"\"\", conn)\n for i in query_result.itertuples():\n name = i.name[:-10].strip()\n cat_url = i.url\n cat_id = i.id\n quantity = i.name[-10:].strip()\n \n for i in range(100):\n url = cat_url + f'&page={i+1}'\n print(url)\n soup = get_url(url)\n \n result = []\n \"\"\" item: div 'product-item' > div 'content'\n img: img 'product-imgage'\n title: p 'title'\n price: span 'price-regular'\n sale-tag: span 'sale-tag'\n final-price: span 'final-price'\n \"\"\"\n try:\n div_container = soup.find_all('div', {'class': 'product-item'})\n except Exception as err:\n print('ERROR BY DIV FINDALL: ', err)\n if div_container:\n for div in div_container:\n # it = {'item_id':'','name':'', 'brand':'', 'url':'', 'img_url':'', 'price':'', 'sale-tag':'', 'final-price':''}\n item_id = None\n item_path = div['data-category']\n item_name = div.a['title']\n brand = div['data-brand']\n item_url = div.a['href']\n img_url = div.img['src']\n regular_price = div.find('span', {'class': 'price-regular'}).text\n sale_tag = div.find('span', {'class': 'final-price'}).text[-5:-1]\n final_price = div.find('span', {'class': 'final-price'}).text[:-5].strip()\n\n item = Items(item_id, item_path, cat_id, item_name, brand, item_url,\n img_url, regular_price, sale_tag, final_price)\n if save_db:\n item.save_into_db()\n print(f'SAVE {item_name} INTO DTB')\n result.append(item)\n else:\n break",
"def fill_data_product(self):\n self.product.fill_data_product(self.list_products, self.mycursor, self.my_database)",
"def products(self):\n return list(Product.select())",
"def get_all_products():\n data = order_obj.get_all_products()\n return data",
"def poster_list_products(products):\r\n print('\\n Choisir un produit : ')\r\n dict_product = {}\r\n index = 1\r\n\r\n for i in products:\r\n poster_products = cl.Food(i, index)\r\n dict_product[poster_products.index] = poster_products.name\r\n print(index, \" : \", poster_products.name)\r\n index += 1\r\n return dict_product",
"def products(self):\n response = requests.get(self._url(self._PRODUCTS_PATH), headers=self._headers)\n return response.json()",
"def __iter__(self):\n conta_ids = self.cart.keys()\n # get the conta objects and add them to the cart\n contas = Conta.objects.filter(id__in=conta_ids)\n for conta in contas:\n self.cart[str(conta.id)]['conta'] = conta\n\n for item in self.cart.values():\n item['conta'] = item['conta']\n item['valor'] = item['valor']\n item['d_c'] = item['d_c']\n item['codigo_historico'] = item['codigo_historico']\n item['historico'] = item['historico']\n yield item",
"def poster_product_list(product):\r\n print('\\n Séléctionner un product : ')\r\n dict_produit = {}\r\n index = 1\r\n for i in product:\r\n poster_product = cl.Food(i, index)\r\n dict_produit[poster_product.index] = poster_product.name\r\n print(index, \" : \", poster_product.name)\r\n index += 1\r\n return dict_produit"
] | [
"0.7394557",
"0.7128512",
"0.70565516",
"0.69151694",
"0.686523",
"0.6854671",
"0.68118054",
"0.6673467",
"0.66018593",
"0.65965927",
"0.65736145",
"0.6560011",
"0.6546499",
"0.6536913",
"0.653328",
"0.6517128",
"0.65117145",
"0.6508801",
"0.6440741",
"0.6420369",
"0.6409673",
"0.6396618",
"0.6355537",
"0.63461363",
"0.6340616",
"0.6326671",
"0.6300908",
"0.6292127",
"0.6268347",
"0.62517005"
] | 0.74252695 | 0 |
Agregar un producto al carrito o actualizar su cantidad. | def add(self, producto, cantidad = 1, actualizar_cantidad = False):
id_producto = str(producto.id)
if id_producto not in self.carro:
self.carro[id_producto] = {"cantidad":0,"precio":str(producto.precio)}
if actualizar_cantidad:
self.carro[id_producto]["cantidad"]= cantidad
else:
self.carro[id_producto]["cantidad"] += cantidad
self.save() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def agregar_producto(self, producto):\n\n self.productos.append(producto)",
"def agregarProducto(self):\n itemActual=self.tableFactura.currentItem()\n producto = int(self.tableFactura.item(itemActual.row(),0).text())\n descuento = DescuentoModel.buscar(DescuentoModel.obra_social,self.sesion,self.obraSocial).\\\n filter(DescuentoModel.producto==producto)[0].descuento\n cantidad = int(self.tableFactura.item(itemActual.row(), 1).text())\n importe = float(self.tableFactura.item(itemActual.row(), 2).text()) * descuento\n row = self.tableNC.rowCount()\n self.tableNC.insertRow(row)\n self.tableNC.setItem(row, 0, QtGui.QTableWidgetItem(str(producto)))\n self.tableNC.setItem(row, 1, QtGui.QTableWidgetItem(str(cantidad)))\n self.tableNC.setItem(row, 2, QtGui.QTableWidgetItem(str(importe)))\n self.detallesReintegrables.append([int(self.numeroFacturaActual),itemActual.row()+1,descuento,importe])\n self.detallesImprimibles.append([producto,cantidad,descuento,importe])\n self.tableFactura.hideRow(itemActual.row())",
"def actualizar(self, producto, cantidad):\n producto_id = str(producto)\n\n if producto_id in self.carro:\n self.carro[producto_id]['cantidad'] = cantidad\n \n self.guardar()",
"def add(self, product):\n pass",
"def agregarProducto(self):\n itemActual=self.tableProductos.currentItem()\n cantidad, ok = QtGui.QInputDialog.getInt(self,\"Cantidad\",\"Ingrese cantidad del producto\",1,1,2000,5)\n if not ok:\n self.showMsjEstado(\"No se ha seleccionado cantidad del producto\")\n else:\n cantidadProducto=int(self.tableProductos.item(itemActual.row(),6).text())\n if cantidad>cantidadProducto:\n QtGui.QMessageBox.information(self,\"Aviso\",\"La cantidad ingresada es mayor que la del stock\")\n else:\n if self.productosAgregados == 0 and self.factura == None:\n self.factura=FacturaModel(FacturaModel.generarNumero(self.sesion))\n self.factura.guardar(self.sesion)\n self.productosAgregados+=1\n rowItemActual=itemActual.row()\n rows=self.tableFactura.rowCount()\n self.tableFactura.insertRow(rows)\n\n #--Carga de items en la tabla--*\n producto = int(self.tableProductos.item(rowItemActual,0).text())\n importeActual=float(self.tableProductos.item(rowItemActual,5).text())\n descuentoActual=float(self.tableProductos.item(rowItemActual,4).text())\n subtotal=importeActual*(1-descuentoActual)\n ####-------------------------#####\n detalleFactura=DetalleFacturaModel(self.factura.numero,producto,cantidad,\n subtotal*cantidad,descuentoActual,self.productosAgregados\n )\n self.descontarCantidad(detalleFactura,producto,cantidad)\n self.tableFactura.setItem(rows,0,QtGui.QTableWidgetItem(str(detalleFactura.producto)))\n self.tableFactura.setItem(rows,1,QtGui.QTableWidgetItem(str(detalleFactura.cantidad)))\n self.tableFactura.setItem(rows, 2, QtGui.QTableWidgetItem(str(\"%.2f\"%(subtotal*cantidad))))\n\n detalleFactura.guardar(self.sesion)\n self.detallesTabla[rows] = detalleFactura\n\n self.data[rows] = [\n producto, cantidad, subtotal*cantidad, descuentoActual\n ]\n\n self.actualizar()\n self.objectModified.emit()",
"def add(self, product, product_qty):\n product_id = str(product.id)\n if product_id in self.cart:\n self.cart[product_id][\"qty\"] = product_qty\n else:\n self.cart[product_id] = {'price': str(product.price), 'qty':int(product_qty)}\n self.save()",
"def add_product(self):\n self.owner.new_product(self.barcode, self.description, self.price, self._add_product_callback)",
"def add(self, product, qty):\n product_id = str(product.id)\n\n if product_id in self.basket:\n self.basket[product_id]['qty'] = qty\n else:\n self.basket[product_id] = {'price': str(product.price), 'qty': qty}\n\n self.save()",
"def insert(self, product):\n pass",
"def add_product(self, product: Product):\n log.debug(\"Adding a new product\")\n product_parameters = product.to_db()\n try:\n with DBCursor(self.host) as cursor:\n cursor.execute(\"INSERT INTO items VALUES (?, ?, ?, ?, ?)\", (product_parameters['name'].lower(), product_parameters['units'], product_parameters['last_buy'], product_parameters['cost'], product_parameters['price']))\n except sqlite3.IntegrityError:\n log.critical(\"An integrity error was raised. Maybe a matching name or id.\")\n raise DatabaseIntegrityError(\"There's a matching name or id already stored.\")\n else:\n log.info(f\"{product.__repr__} was added successfully.\")",
"def add_product(cls, product_name, price, quantity):\n Product.insert(product_name=product_name,\n product_price=price,\n product_quantity=quantity,\n date_updated=date.today()).on_conflict(\n conflict_target=[Product.product_name],\n preserve=[Product.product_price,\n Product.product_quantity,\n Product.date_updated]).execute()\n print(f'\\nProduct added successfully!')\n print(f'Product: {product_name} ' +\n f'Price: ${int(price) / 100:.2f} ' +\n f'Quantity: {quantity}\\n')",
"def add(self, product):\n product_id = str(product.id)\n self.wishlist[product_id] = {'price': str(product.price)}\n self.save()",
"def add_product():\n name = input(\"\\nPlease enter the name of the new product: \")\n\n quantity = input(\"Please enter the quantity of the new product: \")\n while quantity.isdigit() == False:\n print(\"Please enter a valid number.\")\n quantity = input(\"Please enter the quantity of the new product: \")\n quantity = int(quantity)\n\n price = input(\"Please enter the price of the new product(in dollars): \").strip(\"$\")\n while True:\n try:\n price = float(price)\n break\n except ValueError:\n print(\"Please enter a valid price\")\n price = input(\"Please enter the price of the new product: \")\n\n price = price * 100\n\n try:\n Product.create(product_name=name,\n product_price=price,\n product_quantity=quantity)\n latest_item = Product.select().order_by(Product.product_id.desc()).get()\n print(f\"You just added {latest_item.product_name} as the {latest_item.product_id}th item in the inventory.\\n\")\n\n except IntegrityError:\n to_update = Product.get(product_name=name)\n to_update.product_name = name\n to_update.product_price = price\n to_update.product_quantity = quantity\n to_update.date_updated = datetime.datetime.now()\n to_update.save()\n print(f\"You just updated {to_update.product_name}\\n\")\n input(\"\\nPress ENTER to continue\")\n clear()",
"def add_item(self, obj): # deprecated\n logger.info('ItemProduct adding item initiated')\n try:\n if not obj['edit']:\n unit, = self.ProductUom.find([('name', '=', obj['units'])])\n template = self.ProductTemplate()\n try:\n if self.Product.find([('code', '=', obj['id']), ('description', '=', 'Stock'),\n ('type', '=', 'goods')])[-1]:\n return False\n if self.Product.find([('name', '=', obj['name']), ('description', '=', 'Stock'),\n ('type', '=', 'goods')])[-1]:\n return False\n except Exception:\n pass\n template.category = self.ProductCategory.find([('name', '=', obj['category'])])[-1]\n template.default_uom = unit\n template.purchase_uom = unit\n template.type = 'goods'\n else:\n product = self.Product.find([('code', '=', obj['id']), ('description', '=', 'Stock'),\n ('type', '=', 'goods')])[-1]\n template = product.template\n unit, = self.ProductUom.find([('name', '=', obj['units'])])\n template.default_uom = unit\n template.purchase_uom = unit\n template.category = self.ProductCategory.find([('name', '=', obj['category'])])[-1]\n\n rate = Decimal(obj['rate'])\n cost = rate / 2\n template.name = obj['name']\n template.list_price = Decimal(rate)\n template.cost_price = Decimal(cost)\n template.purchasable = True\n template.account_expense = self.accounts['expense']\n template.account_receivable = self.accounts['receivable']\n product = self.Product.find([('name', '=', template.name),\n ('description', '=', 'Stock'), ('type', '=', 'goods')])\n if product:\n product = product[-1]\n else:\n product = self.Product.find([('name', '=', template.name), ('type', '=', 'goods')])\n ids = []\n for i in product:\n ids.append(i.id)\n ids.sort()\n print \"ids\", ids\n product = self.Product(id=ids[-1])\n product.code = obj['id']\n product.description = 'Stock'\n product.save()\n return True\n except Exception:\n if settings.level == 10:\n logger.exception('raised exception')\n return False",
"def insert_products(self):\n logic = ProductLogic()\n \n try:\n # We create the list of product objects\n products = self.objects_factory.create_product_object_list()\n products = set(products)\n\n for product in products:\n logic.insert(product)\n except:\n print('Il y a eu un problème lors de la récupération des données, veuillez rééssayer')",
"def register_product(p: Product) -> ExecRet:\n market = get_market()\n pid = p.pid\n if pid in market.products.keys():\n return ExecRet.err(message='pid %d already exists' % pid)\n market.add_product(p)\n LOGGER.info('added product %s' % p.json())\n return ExecRet.ok()",
"def create_new_product(self):\n if len(self.lineEdit_name.text()) != 0 and len(self.lineEdit_desc.text()) != 0 and len(\n self.lineEdit_cost.text()) != 0 and len(self.lineEdit_cat.text()) != 0:\n try:\n cost = float(self.lineEdit_cost.text())\n list = self.product_list()\n try:\n add_product(list)\n self.frame_3.show()\n self.label_16.setText('NEW PRODUCT CREATE SUCCESSFULLY!')\n except:\n self.frame_3.show()\n self.label_16.setText('ERROR CREATE NEW PRODUCT!')\n\n except ((ValueError)):\n self.frame_3.show()\n self.label_16.setText('IN THE COST FIELDS: JUST NUMBERS!')\n\n '''else:\n self.frame_3.show()\n self.label_16.setText('IN THE COST FIELDS: JUST NUMBERS!')'''\n else:\n self.frame_3.show()\n self.label_16.setText('THERE CAN BE NO BLANCK FIELDS!')",
"def id_produto(self, id_produto):\n self._id_produto = id_produto",
"def id_produto(self, id_produto):\n self._id_produto = id_produto",
"def add_product(self, name, energy_points):\n now = datetime.datetime.now()\n date = \"{}-{}-{}\".format(now.year, now.month, now.day)\n Product(productName=name, energyPoints=energy_points, date=date)",
"def add_product(self, product):\n return self._make_post_request(self._urls['products'],\n data=dict(name=product))",
"def add_item(self, product_code: str, quantity: int):\n if quantity>0:\n if product_code in self._product_prices.keys():\n if product_code not in self._items:\n self._items[product_code] = quantity\n else:\n q = self._items[product_code]\n self._items[product_code] = q + quantity\n else:\n logging.warning(str(datetime.now())+\" Product Code: '\"+product_code+\"' is incorrect.\")\n else:\n logging.warning(str(datetime.now())+\" Quantity: '\" +str(quantity) +\"' for product code: '\"+product_code+\"' is incorrect.\")",
"def cargarProductosSinObra(self):\n\n self.limpiarTabla(self.tableProductos)\n\n ##Cnsulta para obtener todos los productos del sistema, con su correspondiente\n ##codigo de barra, monodroga, descuento, importe\n query=self.sesion.query(ProductoModel.codigo_barra,ProductoModel.id_medicamento,ProductoModel.id_presentacion,MonodrogaModel.nombre,ProductoModel.importe).\\\n join(MedicamentoModel).filter(ProductoModel.id_medicamento==MedicamentoModel.nombre_comercial).\\\n join(MonodrogaModel).filter(MedicamentoModel.id_monodroga==MonodrogaModel.nombre).\\\n filter(ProductoModel.baja==False).order_by(ProductoModel.codigo_barra)\n\n ##Se cargan los datos obtenidos en la tabla de Producto\n for n, obj in enumerate(query):\n self.tableProductos.insertRow(n)\n self.tableProductos.setItem(n, 0, QtGui.QTableWidgetItem(str(obj[0])))\n self.tableProductos.setItem(n, 1, QtGui.QTableWidgetItem(str(obj[1])))\n self.tableProductos.setItem(n, 2, QtGui.QTableWidgetItem(str(obj[2])))\n self.tableProductos.setItem(n, 3, QtGui.QTableWidgetItem(str(obj[3])))\n self.tableProductos.setItem(n, 4, QtGui.QTableWidgetItem(str(0)))\n self.tableProductos.setItem(n, 5, QtGui.QTableWidgetItem(str(obj[4])))\n\n ##Se carga la cantidad de cada producto en la tabla\n for row,producto in enumerate(ProductoModel.buscarTodos(ProductoModel.codigo_barra,self.sesion)):\n self.tableProductos.setItem(row,6,QtGui.QTableWidgetItem(str(producto.getCantidad(self.sesion))))",
"def add_product(block, product):\n block.append(Product(product['Номер'], product['Продукт'],\n product['ккал'].replace(',', '.'),\n product['білок'].replace(',', '.'),\n product['жири'].replace(',', '.'),\n product['вуглеводи'].replace(',', '.'),\n product['холестерин'].replace(',', '.'),\n product['1 ХO є в граммах продукта (Старыи розрах)'].replace(',', '.'),\n product['1 ХO є в граммах продукта (новий розрах)'].replace(',', '.')))",
"def agregar_elem(self, e):\r\n\r\n indice = e.key % self.n\r\n if self.tabla[indice] is None:\r\n self.elementos += 1\r\n self.tabla[indice] = dl.lista_dobleE()\r\n self.tabla[indice].insertar_elem(e)\r\n else: \r\n if self.tabla[indice].insertar_elem(e) == True:\r\n pass\r\n else:\r\n self.elementos += 1\r\n if self.factor_carga() > 0.7:\r\n self.mostrar()\r\n print(\"Factor de carga: {}\".format(round(self.factor_carga(),3)))\r\n self.__rehashing()",
"def change_product_qty(self):\n Inventory = self.env['stock.inventory']\n\n\n for wizard in self:\n product = wizard.product_id.with_context(location=wizard.location_id.id, lot_id=wizard.lot_id.id)\n line_data = wizard._action_start_line()\n\n\n if wizard.product_id.id and wizard.lot_id.id:\n inventory_filter = 'none'\n elif wizard.product_id.id:\n inventory_filter = 'product'\n else:\n inventory_filter = 'none'\n inventory = Inventory.create({\n 'name': _('INV: %s') % tools.ustr(wizard.product_id.display_name),\n 'filter': inventory_filter,\n 'product_id': wizard.product_id.id,\n 'location_id': wizard.location_id.id,\n 'lot_id': wizard.lot_id.id,\n 'line_ids': [(0, 0, line_data)],\n })\n inventory.action_done()\n return {'type': 'ir.actions.act_window_close'}",
"def add_item(product, price):\n ADD_PRODUCTS[product] = price",
"def add_product(self, label):\n print('Adding product:', label)\n client = self.application.__init_blockchain_client__()\n response = client.addProduct(label)\n client.close()\n\n return response",
"def add_product(self, name, cost, stock, location):\n\n cur.execute(\"\"\"INSERT INTO catalogue(vendorname, productname, unitcost, stock, location) \n VALUES (?, ?, ?, ?, ?)\"\"\", (self.vendorname, name, cost, stock, location))",
"def save_product(cls, product: dict, substitutes: (tuple, None) = None,\n user=None):\n\n nutriments = product.get('nutriments', {})\n with transaction.atomic():\n product_db, created = Product.objects.update_or_create(\n bar_code=product['code'],\n defaults={\n 'name': product.get('product_name', None),\n 'generic_name': product.get('generic_name', None),\n 'nutrition_grades': product.get('nutrition_grades', None),\n 'fat': str(nutriments.get('fat_100g', None)),\n 'saturated_fat': str(nutriments.get('saturated-fat_100g',\n None)),\n 'sugars': str(nutriments.get('sugars_100g', None)),\n 'salt': str(nutriments.get('salt_100g', None)),\n 'image_url': product.get('image_url', None)\n }\n )\n\n if created:\n categories = []\n for category in reversed(\n product.get('categories_hierarchy', ())):\n category_db, created = Category.objects.get_or_create(\n name=category)\n categories.append(category_db)\n ProductCategory.objects.bulk_create(\n [ProductCategory(product=product_db, category=category,\n hierarchy=i)\n for i, category in enumerate(categories, start=1)]\n )\n\n ingredients = []\n iteration_ingredients = ()\n if product.get('ingredients_text_fr', ()):\n iteration_ingredients = product.get('ingredients_text_fr')\n elif product.get('ingredients', ()):\n iteration_ingredients = product.get('ingredients')\n\n for ingredient in iteration_ingredients:\n ingredient = ingredient[:200]\n ingredient_db, created = Ingredient.objects.get_or_create(\n name=ingredient)\n ingredients.append(ingredient_db)\n\n product_db.ingredients.add(*ingredients)\n\n brands = []\n for brand in product.get('brands_tags', ()):\n brand_db, created = Brand.objects.get_or_create(name=brand)\n brands.append(brand_db)\n product_db.brands.add(*brands)\n\n stores = []\n for store in product.get('stores_tags', ()):\n store_db, created = Store.objects.get_or_create(name=store)\n stores.append(store_db)\n product_db.stores.add(*stores)\n\n if substitutes is not None and user is not None:\n for substitute in substitutes:\n substitute_db = cls.save_product(substitute)\n cls.save_link_p_s_p(user, product_db, substitute_db)\n\n return product_db"
] | [
"0.74455756",
"0.73769337",
"0.72791195",
"0.68235534",
"0.68147254",
"0.6459701",
"0.6367066",
"0.63326365",
"0.62502146",
"0.6228306",
"0.6128186",
"0.60275525",
"0.5993757",
"0.5993371",
"0.58744925",
"0.5865621",
"0.5846246",
"0.58283526",
"0.58283526",
"0.5824645",
"0.58242",
"0.5819324",
"0.57939434",
"0.5779488",
"0.5778944",
"0.57648677",
"0.5745716",
"0.57327825",
"0.57295114",
"0.5724175"
] | 0.79951173 | 0 |
Test that enumeration are properly detected | def test_enum_detection():
grammar = """
IsEnum: "keyword1" | "keyword2" | "keyword3";
IsNotEnum: val="keyword1" | val="keyword2" | val="keyword3";
StillNotEnum: val="keyword1" | "keyword2" | "keyword3";
// identified as EDatatype with object type
NotEnumAgain: SubEnum | SubEnum2;
// this is an enumeration
SubEnum: "keyword1" | "keyword2";
SubEnum2: "keyword3" | "keyword4";
"""
mm = metamodel_from_str(grammar)
IsEnum = mm['IsEnum']
assert isinstance(IsEnum, ecore.EEnum)
assert IsEnum.name == 'IsEnum'
assert all((x in IsEnum for x in ("keyword1", "keyword2", "keyword3")))
IsNotEnum = mm['IsNotEnum']
assert IsNotEnum.name == 'IsNotEnum'
assert isinstance(IsNotEnum, ecore.EClass)
StillNotEnum = mm['StillNotEnum']
assert StillNotEnum.name == 'StillNotEnum'
assert isinstance(StillNotEnum, ecore.EClass)
NotEnumAgain = mm['NotEnumAgain']
assert isinstance(NotEnumAgain, ecore.EDataType)
assert NotEnumAgain.name == 'NotEnumAgain'
SubEnum = mm['SubEnum']
assert isinstance(SubEnum, ecore.EEnum)
assert SubEnum.name == 'SubEnum'
assert all((x in IsEnum for x in ("keyword1", "keyword2"))) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_test_enum_parameters(self):\n pass",
"def test_driver_enums(self):\n\n self.assert_enum_has_no_duplicates(DataParticleType())\n self.assert_enum_has_no_duplicates(ProtocolState())\n self.assert_enum_has_no_duplicates(ProtocolEvent())\n self.assert_enum_has_no_duplicates(Parameter())\n self.assert_enum_has_no_duplicates(Command())\n\n # Test capabilities for duplicates, them verify that capabilities is a subset of protocol events\n self.assert_enum_has_no_duplicates(Capability())\n self.assert_enum_complete(Capability(), ProtocolEvent())",
"def testRepeatedInt(self):\n self.assertRaises(messages.EnumDefinitionError,\n messages.Enum.def_enum,\n {'Ok': 1, 'Repeated': 1},\n 'BadEnum')",
"def check_enum(enumerator, value):\n is_valid = False\n for data in enumerator:\n if data == value:\n is_valid = True\n break\n\n if is_valid:\n return value\n else:\n my_banner(\"Value must be from enum \" + enumerator +\" Value has been set to N/A\")\n return \"na\"",
"def check_enumeration_style(ctx, stmt):\n elemtype = stmt.search_one(\"type\")\n if elemtype is None or elemtype.arg != \"enumeration\":\n return\n\n for enum in elemtype.search(\"enum\"):\n if re.match(r\"[a-z]\", enum.arg):\n err_add(ctx.errors, stmt.pos, \"OC_ENUM_CASE\",\n (enum.arg, enum.arg.upper().replace(\"-\", \"_\")))\n elif not re.match(r\"^[A-Z0-9][A-Z0-9\\_\\.]{0,}$\", enum.arg):\n err_add(ctx.errors, stmt.pos, \"OC_ENUM_UNDERSCORES\",\n (enum.arg, enum.arg.upper().replace(\"-\", \"_\")))",
"def testNonInt(self):\n self.assertRaises(messages.EnumDefinitionError,\n messages.Enum.def_enum,\n {'Bad': '1'},\n 'BadEnum')",
"def test_get_enum_by_value():\n assert BusinessType.get_enum_by_value('CP') == BusinessType.COOPERATIVE\n assert BusinessType.get_enum_by_value('FM') == BusinessType.PARTNERSHIP_AND_SOLE_PROP\n assert BusinessType.get_enum_by_value('NOT_FOUND') is None",
"def test_check_enumtype(self):\n self.cursor.execute(self.imp_seg)\n insert = \"\"\"\n INSERT INTO imports.import_segment(\n avancement, statut, id_import)\n VALUES (\n 'Tracé arrêté','VV', 2);\n \"\"\"\n self.cursor.execute(insert)\n self.cursor.execute(\"SELECT veloroutes.import_veloroutes_segment()\")\n self.cursor.execute(\"SELECT avancement FROM veloroutes.segment\")\n result = self.cursor.fetchall()\n self.assertEqual('02', result[0][0])",
"def test_enum_datatypes(self) -> None:\n directory = os.path.join(PAYLOAD_DIRECTORY, 'enum_payloads')\n type_name = 'NULL'\n\n def get_id(type_: Dict[str, str]) -> int:\n \"\"\"A helper function to improve test case readability.\"\"\"\n return int(type_[f'{type_name}_id'])\n\n # ArmorFacing\n filepath = os.path.join(directory, 'armor_facing.json')\n type_name = 'armor_facing'\n with open(filepath, encoding='utf-8') as payload_file:\n payload: Dict[str, Any] = json.load(payload_file)\n type_list: List[Dict[str, str]] = payload[f'{type_name}_list']\n self.assertEqual(get_id(type_list[0]), ps2.ArmourFacing.FRONT)\n self.assertEqual(get_id(type_list[1]), ps2.ArmourFacing.RIGHT)\n self.assertEqual(get_id(type_list[2]), ps2.ArmourFacing.TOP)\n self.assertEqual(get_id(type_list[3]), ps2.ArmourFacing.REAR)\n self.assertEqual(get_id(type_list[4]), ps2.ArmourFacing.LEFT)\n self.assertEqual(get_id(type_list[5]), ps2.ArmourFacing.BOTTOM)\n self.assertEqual(get_id(type_list[6]), ps2.ArmourFacing.ALL)\n\n # FireModeType\n filepath = os.path.join(directory, 'fire_mode_type.json')\n type_name = 'fire_mode_type'\n with open(filepath, encoding='utf-8') as payload_file:\n payload: Dict[str, Any] = json.load(payload_file)\n type_list: List[Dict[str, str]] = payload[f'{type_name}_list']\n self.assertEqual(get_id(type_list[0]), ps2.FireModeType.PROJECTILE)\n self.assertEqual(get_id(type_list[1]), ps2.FireModeType.IRON_SIGHT)\n self.assertEqual(get_id(type_list[2]), ps2.FireModeType.MELEE)\n self.assertEqual(get_id(type_list[3]),\n ps2.FireModeType.TRIGGER_ITEM_ABILITY)\n self.assertEqual(get_id(type_list[4]), ps2.FireModeType.THROWN)\n\n # MetagameEventState\n filepath = os.path.join(directory, 'metagame_event_state.json')\n type_name = 'metagame_event_state'\n with open(filepath, encoding='utf-8') as payload_file:\n payload: Dict[str, Any] = json.load(payload_file)\n type_list: List[Dict[str, str]] = payload[f'{type_name}_list']\n self.assertEqual(get_id(type_list[0]), ps2.MetagameEventState.STARTED)\n self.assertEqual(get_id(type_list[1]),\n ps2.MetagameEventState.RESTARTED)\n self.assertEqual(get_id(type_list[2]),\n ps2.MetagameEventState.CANCELLED)\n self.assertEqual(get_id(type_list[3]), ps2.MetagameEventState.ENDED)\n self.assertEqual(get_id(type_list[4]),\n ps2.MetagameEventState.XP_BONUS_CHANGED)\n\n # TargetType\n filepath = os.path.join(directory, 'target_type.json')\n type_name = 'target_type'\n with open(filepath, encoding='utf-8') as payload_file:\n payload: Dict[str, Any] = json.load(payload_file)\n type_list: List[Dict[str, str]] = payload[f'{type_name}_list']\n self.assertEqual(get_id(type_list[0]), ps2.TargetType.SELF)\n self.assertEqual(get_id(type_list[1]), ps2.TargetType.ANY)\n self.assertEqual(get_id(type_list[2]), ps2.TargetType.ENEMY)\n self.assertEqual(get_id(type_list[3]), ps2.TargetType.ALLY)",
"def test_domain_and_target_type(self):\n t = Enumerate([2, \"asfa\", \"ipsi\"])\n assert t.domain_type == \"categorical\"\n assert t.target_type == \"integer\"",
"def test_direction(self):\n self.check_validation_error(\"value is not a valid enumeration member; permitted: '<', '>'\", direction=\"<>\")",
"def test_enum(self):\n with sphinx_build('pyexample'):\n with open('_build/text/docfx_yaml/example.enum_type.EnumFoo.yml') as yml_file:\n data = yaml.safe_load(yml_file)\n for item in data['items']:\n if item['uid'] == 'example.enum_type.EnumFoo':\n self.assertEqual(\n item['children'],\n ['example.enum_type.EnumFoo.VALUE0', 'example.enum_type.EnumFoo.VALUE1']\n )\n if item['uid'] == 'example.enum_type.EnumFoo.VALUE0':\n self.assertEqual(\n item['syntax'],\n {'content': 'VALUE0 = 0', 'return': {'type': ['example.enum_type.EnumFoo']}}\n )\n self.assertEqual(\n item['type'],\n 'attribute'\n )\n if item['uid'] == 'example.enum_type.EnumFoo.VALUE1':\n self.assertEqual(\n item['syntax'],\n {'content': 'VALUE1 = 1', 'return': {'type': ['example.enum_type.EnumFoo']}}\n )\n self.assertEqual(\n item['type'],\n 'attribute'\n )",
"def test_enum(self):\n\n # XXX should test null or empty lists, ill-formed names\n name = 'george'\n pairs = [('abc', 3), ('def', 5), ('ghi', 7)]\n enum = M.EnumSpec.create(name, pairs)\n # self.assertEqual( ','.join(pairs), enum.__repr__())\n self.assertEqual(3, enum.value('abc'))\n self.assertEqual(5, enum.value('def'))\n self.assertEqual(7, enum.value('ghi'))",
"def test_additional_properties_with_array_of_enums(self):\n pass",
"def test_enum_out_of_range(self):\n @converters.wrap\n def inner_test(param: enums.DisconnectReason):\n \"\"\"This shouldn't be called, converting should fail.\"\"\"\n pass\n self.assert_raises_request_error(lambda: inner_test(param='4'), 3114)",
"def test_enum_aliases():\n class RedundantStatus(OrderedStrEnum):\n\n draft = 'draft'\n unpublished = 'draft'\n published = 'published'\n archived = 'archived'\n\n __order__ = 'draft, unpublished, published, archived'\n\n yield (tools.eq_, RedundantStatus.draft.ordinal, RedundantStatus.unpublished.ordinal)\n yield (tools.eq_, RedundantStatus.draft, RedundantStatus.unpublished)\n yield (tools.assert_less, RedundantStatus.unpublished, RedundantStatus.archived)",
"def testNegativeInt(self):\n self.assertRaises(messages.EnumDefinitionError,\n messages.Enum.def_enum,\n {'Bad': -1},\n 'BadEnum')",
"def test_enumerated_initialization(enumerated_design_space):\n assert enumerated_design_space.name == 'enumerated'\n assert enumerated_design_space.description == 'desc'\n assert len(enumerated_design_space.descriptors) == 2\n assert enumerated_design_space.descriptors[0].key == 'x'\n assert enumerated_design_space.descriptors[1].key == 'color'\n assert enumerated_design_space.data == [{'x': 0.0, 'color': 'r'}, {'x': 1.0, 'color': 'b'}]",
"def enumeration(self):\n raise exceptions.NotImplementedError()",
"def test_03_visit_special(self):",
"def test_enum(self):\n i = Organism(state='LIVING')\n print(i)\n print(i.state)\n print(i.state.code)\n print(i.state.code.text)\n print(type(i.state))\n print(StateEnum.LIVING)\n assert str(i.state) == 'LIVING'\n assert i.state.code == StateEnum.LIVING\n obj = json.loads(json_dumper.dumps(i))\n assert obj['state'] == 'LIVING'\n obj = yaml.safe_load(yaml_dumper.dumps(i))\n assert obj['state'] == 'LIVING'\n reconstituted = json_loader.loads(json_dumper.dumps(i), target_class=Organism)\n print(f'RECONSTITUTED = {reconstituted}')\n assert reconstituted.state.code == StateEnum.LIVING",
"def test_enumerations( self ):\n with self.app.app_context():\n url = '/donation/enumeration/{}/{}'\n\n response = self.test_client.get( url.format( 'giftmodel', 'given_to' ), headers=self.headers )\n self.assertGreater( len( json.loads( response.data.decode( 'utf-8' ) ) ), 0 )\n\n response = self.test_client.get( url.format( 'transactionmodel', 'type' ), headers=self.headers )\n self.assertGreater( len( json.loads( response.data.decode( 'utf-8' ) ) ), 0 )\n\n response = self.test_client.get( url.format( 'transactionmodel', 'status' ), headers=self.headers )\n self.assertGreater( len( json.loads( response.data.decode( 'utf-8' ) ) ), 0 )\n\n response = self.test_client.get( url.format( 'agentmodel', 'type' ), headers=self.headers )\n self.assertGreater( len( json.loads( response.data.decode( 'utf-8' ) ) ), 0 )",
"def is_enum(self):\n return False",
"def test_error_type(self):\n\n value = 0\n\n iter_given_code = self.test_error_type.__iter__()\n length = self.test_error_type.__len__()\n\n while value < self.MAX_ERROR_TYPE_VALUE or length > 0:\n\n if value == 18:\n value = 0xffff\n\n self.assertEqual(value, iter_given_code.__next__())\n\n if value < self.MAX_ERROR_TYPE_VALUE:\n value += 1\n\n length -= 1",
"def _assert_enum_types_equal(self, type1, type2):\n self.assertEqual(type1.name, type2.name)\n self.assertEqual(type1.description, type2.description)\n self.assertEqual(type1.func_descriptor, type2.func_descriptor)",
"def enum(self):\r\n raise NotImplementedError",
"def test_enum_log(self):\n dt = h5t.special_dtype(enum=('i', {'a': 1, 'b': 2}))\n htype = h5t.py_create(dt, logical=True)\n self.assertIsInstance(htype, h5t.TypeEnumID)",
"def test_enumerate_list(self) -> None:\n self.assertEqual(list(my_enumerate('Janki Patel')), list(enumerate('Janki Patel')))\n \"\"\"test that in one side it has janki patel but in another side it has blank string so this is not equla function\"\"\"\n self.assertNotEqual(list(my_enumerate('Janki Patel')), list(enumerate('')))\n self.assertNotEqual(list(my_enumerate('Janki')), list(enumerate('Janak')))",
"def is_enumeration_type(self):\n raise exceptions.NotImplementedError()",
"def testTooLargeInt(self):\n self.assertRaises(messages.EnumDefinitionError,\n messages.Enum.def_enum,\n {'Bad': (2 ** 29)},\n 'BadEnum')"
] | [
"0.72406197",
"0.6865994",
"0.678129",
"0.6744498",
"0.65896946",
"0.6527991",
"0.64988136",
"0.6457832",
"0.6426472",
"0.64170265",
"0.6393479",
"0.6368146",
"0.63569844",
"0.63335544",
"0.62593937",
"0.6241549",
"0.62227577",
"0.622051",
"0.62172335",
"0.6112095",
"0.60840493",
"0.6059113",
"0.6043429",
"0.6019125",
"0.59928966",
"0.5980354",
"0.59799534",
"0.593065",
"0.5895129",
"0.5869339"
] | 0.70447934 | 1 |
sign of x, i.e. +1 or 1; returns 1 for x == 0 | def sign(x):
if x >= 0:
return 1
return -1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sign(x):\n if x >= 0:\n return 1\n else:\n return -1",
"def signum(x: float) -> float:\n if x < 0:\n return -1.0\n elif x > 0:\n return 1.0\n return 0.0",
"def sign(x):\n return(copysign(1, x))",
"def sign(num: float):\n return 1 if num >= 0 else -1",
"def sign(x):\n if x < 0.0:\n sign = -1\n elif x == 0.0:\n sign = 0\n elif x > 0.0:\n sign = 1\n return sign",
"def signum ( x ) :\n ### for integers\n from ostap.core.ostap_types import is_integer as _is_integer \n if _is_integer ( x ) : return 0 if 0 == x else +1 if 0<x else -1\n ## for floating numbers\n return 0 if iszero ( x ) else +1 if 0 < x else -1",
"def sign(a):\n return (a > 0) - (a < 0)",
"def sign(a) :\n return (a>0) - (a<0)",
"def sign(n: float) -> int:\n return 1 if n > 0 else -1",
"def sign(n):\n return (n > 0) - (n < 0)",
"def sign(v):\n return np.where(v < 0, -1.0, 1.0)",
"def _sign(self, number):\n return cmp(number,0)",
"def invert0(x):\n return 0 if x > 0 else 1",
"def signal(x):\r\n if x >= 0.0:\r\n return 1.0\r\n return -1.0",
"def isnegative(x):\n if x < 0 :\n return True\n return False",
"def sign(self):\n return 1 - 2 * self._ltz()",
"def sgn(x) -> int:\n if x > 0:\n return 1\n if x < 0:\n return -1\n return 0",
"def sign(d):\n if d > 0:\n return 1\n if d == 0:\n return 0\n if d < 0:\n return -1\n return None",
"def sign(e, x):\n if not isinstance(e, Basic):\n raise TypeError(\"e should be an instance of Basic\")\n\n if e.is_positive:\n return 1\n elif e.is_negative:\n return -1\n elif e.is_zero:\n return 0\n\n elif not e.has(x):\n from sympy.simplify import logcombine\n e = logcombine(e)\n return _sign(e)\n elif e == x:\n return 1\n elif e.is_Mul:\n a, b = e.as_two_terms()\n sa = sign(a, x)\n if not sa:\n return 0\n return sa * sign(b, x)\n elif isinstance(e, exp):\n return 1\n elif e.is_Pow:\n if e.base == S.Exp1:\n return 1\n s = sign(e.base, x)\n if s == 1:\n return 1\n if e.exp.is_Integer:\n return s**e.exp\n elif isinstance(e, log):\n return sign(e.args[0] - 1, x)\n\n # if all else fails, do it the hard way\n c0, e0 = mrv_leadterm(e, x)\n return sign(c0, x)",
"def _call(self, x):\n return x.ufuncs.sign()",
"def _sign(a):\n if a == 0.0:\n return 1\n else:\n return a/abs(a)",
"def nonzero_sign(\n x: type_alias.TensorLike,\n name: str = 'nonzero_sign') -> tf.Tensor:\n with tf.name_scope(name):\n x = tf.convert_to_tensor(value=x)\n\n one = tf.ones_like(x)\n return tf.where(tf.greater_equal(x, 0.0), one, -one)",
"def negate(x):\n return x ^ 1",
"def fun(self, x):\n if np.any(x < 0):\n return np.inf\n else:\n return 0",
"def replace_lowest_one_with_zero(x):\n return x & (x-1)",
"def p(x):\n if x<0 or x>1:\n return 0\n else:\n return 1",
"def opposite(x):\n return -1*x",
"def fun(self, x):\n if np.any(x > 0):\n return np.inf\n else:\n return 0",
"def sign_st(x):\n from tframe import hub as th\n def sign(v):\n return (tf.cast(tf.math.greater_equal(v, 0), th.dtype) - 0.5) * 2\n def grad(dy):\n return dy * tf.cast(tf.logical_and(\n tf.greater_equal(x, -1.0), tf.less_equal(x, 1.0)), dtype=th.dtype)\n return sign(x), grad",
"def absolute(x):\n return -x if x < 0 else x"
] | [
"0.923454",
"0.83496517",
"0.8333785",
"0.82949734",
"0.8244531",
"0.8236217",
"0.7956602",
"0.78698367",
"0.7838812",
"0.7807624",
"0.75421077",
"0.742348",
"0.74220794",
"0.7420152",
"0.72951996",
"0.7290333",
"0.7282625",
"0.72503245",
"0.71639484",
"0.71388626",
"0.70881534",
"0.7084784",
"0.70101655",
"0.6948384",
"0.69056135",
"0.68849087",
"0.68745726",
"0.6844042",
"0.68016744",
"0.67221415"
] | 0.92341864 | 1 |
gives the real roots of x2 + a1 x + a0 = 0 | def _realroots_quadratic(a1, a0):
D = a1*a1 - 4*a0
if D < 0:
return []
SD = math.sqrt(D)
return [0.5 * (-a1 + SD), 0.5 * (-a1 - SD)] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _realroots_cubic(a2, a1, a0):\n # see http://mathworld.wolfram.com/CubicFormula.html for details\n\n Q = (3*a1 - a2*a2) / 9.0\n R = (9*a2*a1 - 27*a0 - 2*a2*a2*a2) / 54.0\n D = Q*Q*Q + R*R\n\n if D > 0: # one real and two complex roots\n SD = math.sqrt(D)\n if R + SD >= 0:\n S = (R + SD)**(1/3.0)\n else:\n S = -(-R - SD)**(1/3.0)\n if R - SD >= 0:\n T = (R - SD)**(1/3.0)\n else:\n T = -(SD - R)**(1/3.0)\n return [S + T - a2/3.0]\n elif D == 0:\n if Q == 0: # one real root (R==0)\n return [-a2/3.0]\n else: # two real roots (R>0, Q<0)\n S = -math.sqrt(-Q)\n return [2*S - a2/3.0, -S - a2/3.0]\n else: # three real roots (Q<0)\n SQ = math.sqrt(-Q)\n arg = R / (SQ**3)\n if arg >= 1:\n theta = 0\n elif arg <= -1:\n theta = math.pi\n else:\n theta = math.acos(R/(SQ**3))\n return [2 * SQ * math.cos((theta + 2*2*i*math.pi)/3.0) - a2/3.0 for i in range(3)]",
"def root_2(a, b, c):\n p1 = sqrt(b * b - 4. * a * c)\n p2 = -2. * a\n\n x1 = (b - p1) / p2\n x2 = (b + p1) / p2\n\n return x1, x2",
"def roots_quadratic(f):\n\n a, b, c = f.all_coeffs()\n dom = f.get_domain()\n\n def _sqrt(d):\n # remove squares from square root since both will be represented\n # in the results; a similar thing is happening in roots() but\n # must be duplicated here because not all quadratics are binomials\n co = []\n other = []\n for di in Mul.make_args(d):\n if di.is_Pow and di.exp.is_Integer and di.exp % 2 == 0:\n co.append(Pow(di.base, di.exp//2))\n else:\n other.append(di)\n if co:\n d = Mul(*other)\n co = Mul(*co)\n return co*sqrt(d)\n return sqrt(d)\n\n def _simplify(expr):\n if dom.is_Composite:\n return factor(expr)\n else:\n from sympy.simplify.simplify import simplify\n return simplify(expr)\n\n if c is S.Zero:\n r0, r1 = S.Zero, -b/a\n\n if not dom.is_Numerical:\n r1 = _simplify(r1)\n elif r1.is_negative:\n r0, r1 = r1, r0\n elif b is S.Zero:\n r = -c/a\n if not dom.is_Numerical:\n r = _simplify(r)\n\n R = _sqrt(r)\n r0 = -R\n r1 = R\n else:\n d = b**2 - 4*a*c\n A = 2*a\n B = -b/A\n\n if not dom.is_Numerical:\n d = _simplify(d)\n B = _simplify(B)\n\n D = factor_terms(_sqrt(d)/A)\n r0 = B - D\n r1 = B + D\n if a.is_negative:\n r0, r1 = r1, r0\n elif not dom.is_Numerical:\n r0, r1 = [expand_2arg(i) for i in (r0, r1)]\n\n return [r0, r1]",
"def getRoots(self):\n a, b, c = self.getCoefficients()[2], self.getCoefficients()[1], self.getCoefficients()[0]\n delta = b**2 - 4*a*c\n if delta >= 0:\n roots = sorted([(-b - math.sqrt(delta))/(2*a), (-b + math.sqrt(delta))/(2*a)])\n else:\n roots = sorted([(-b - math.sqrt(-delta)*1j)/(2*a), (-b + math.sqrt(-delta)*1j)/(2*a)], key=lambda x: (x.real, x.imag))\n return roots",
"def analyticSol (x):\n\treturn x*(1-x);",
"def f1(x, a, b):\n #return x**43 - b*x**42 + x**7 - x**6 * a + 84*x - 42 * b - 42 * a\n return (x**42 + 42)/(x-a) + (x**6 + 42)/(x-b)",
"def find_roots(a: int, b: int, c: int):\n if a == 0:\n x = -c / b\n print(f\"Solution is {x}\")\n return x\n\n d = b ** 2 - (4 * a * c)\n\n if d < 0:\n print(\"No solutions\")\n return None\n elif d == 0:\n x = (-b + sqrt(d)) / (2 * a)\n print(f\"Solution is {x}\")\n return x\n\n x1 = (-b + sqrt(d)) / (2 * a)\n x2 = (-b - sqrt(d)) / (2 * a)\n\n print(f\"Solutions are {x1} and {x2}\")\n return x1, x2",
"def rkf( f, a, b, x0, tol, hmax, hmin ):\n\n # Coefficients used to compute the independent variable argument of f\n\n a2 = 2.500000000000000e-01 # 1/4\n a3 = 3.750000000000000e-01 # 3/8\n a4 = 9.230769230769231e-01 # 12/13\n a5 = 1.000000000000000e+00 # 1\n a6 = 5.000000000000000e-01 # 1/2\n\n # Coefficients used to compute the dependent variable argument of f\n\n b21 = 2.500000000000000e-01 # 1/4\n b31 = 9.375000000000000e-02 # 3/32\n b32 = 2.812500000000000e-01 # 9/32\n b41 = 8.793809740555303e-01 # 1932/2197\n b42 = -3.277196176604461e+00 # -7200/2197\n b43 = 3.320892125625853e+00 # 7296/2197\n b51 = 2.032407407407407e+00 # 439/216\n b52 = -8.000000000000000e+00 # -8\n b53 = 7.173489278752436e+00 # 3680/513\n b54 = -2.058966861598441e-01 # -845/4104\n b61 = -2.962962962962963e-01 # -8/27\n b62 = 2.000000000000000e+00 # 2\n b63 = -1.381676413255361e+00 # -3544/2565\n b64 = 4.529727095516569e-01 # 1859/4104\n b65 = -2.750000000000000e-01 # -11/40\n\n # Coefficients used to compute local truncation error estimate. These\n # come from subtracting a 4th order RK estimate from a 5th order RK\n # estimate.\n\n r1 = 2.777777777777778e-03 # 1/360\n r3 = -2.994152046783626e-02 # -128/4275\n r4 = -2.919989367357789e-02 # -2197/75240\n r5 = 2.000000000000000e-02 # 1/50\n r6 = 3.636363636363636e-02 # 2/55\n\n # Coefficients used to compute 4th order RK estimate\n\n c1 = 1.157407407407407e-01 # 25/216\n c3 = 5.489278752436647e-01 # 1408/2565\n c4 = 5.353313840155945e-01 # 2197/4104\n c5 = -2.000000000000000e-01 # -1/5\n\n # Set t and x according to initial condition and assume that h starts\n # with a value that is as large as possible.\n \n t = a\n x = numpy.array(x0)\n h = hmax\n\n # Initialize arrays that will be returned\n\n T = numpy.array( [t] )\n X = numpy.array( [x] )\n \n while t < b:\n\n # Adjust step size when we get to last interval\n\n if t + h > b:\n h = b - t;\n\n # Compute values needed to compute truncation error estimate and\n # the 4th order RK estimate.\n\n k1 = h * f( x, t )\n k2 = h * f( x + b21 * k1, t + a2 * h )\n k3 = h * f( x + b31 * k1 + b32 * k2, t + a3 * h )\n k4 = h * f( x + b41 * k1 + b42 * k2 + b43 * k3, t + a4 * h )\n k5 = h * f( x + b51 * k1 + b52 * k2 + b53 * k3 + b54 * k4, t + a5 * h )\n k6 = h * f( x + b61 * k1 + b62 * k2 + b63 * k3 + b64 * k4 + b65 * k5, \\\n t + a6 * h )\n\n # Compute the estimate of the local truncation error. If it's small\n # enough then we accept this step and save the 4th order estimate.\n \n r = abs( r1 * k1 + r3 * k3 + r4 * k4 + r5 * k5 + r6 * k6 ) / h\n if len( numpy.shape( r ) ) > 0:\n r = max( r )\n if r <= tol:\n t = t + h\n x = x + c1 * k1 + c3 * k3 + c4 * k4 + c5 * k5\n T = numpy.append( T, t )\n X = numpy.append( X, [x], 0 )\n\n # Now compute next step size, and make sure that it is not too big or\n # too small.\n\n h = h * min( max( 0.84 * ( tol / r )**0.25, 0.1 ), 4.0 )\n\n if h > hmax:\n h = hmax\n elif h < hmin:\n raise RuntimeError(\"Error: Could not converge to the required tolerance %e with minimum stepsize %e.\" % (tol,hmin))\n break\n # endwhile\n\n return ( T, X )",
"def root_1(a, b):\n return -b / a",
"def root_3(a, b, c, d):\n abc = a * b * c\n bbb = b * b * b\n aad = a * a * d\n\n dd = (18. * abc * d - 4. * bbb * d\n + b * b * c * c - 4. * a * c * c * c\n - 27. * aad * d)\n d0 = b * b - 3. * a * c\n\n # second and third cubic unity roots (first is just 1)\n cu2 = -0.5 + 0.86602540378443864676j\n cu3 = -0.5 - 0.86602540378443864676j\n\n if not dd and not d0: # all real roots\n x1 = x2 = x3 = -b / (3. * a)\n elif not dd and d0: # double root, simple root\n x1 = x2 = ((9. * a * d - b * c) / (2. * d0))\n x3 = (4. * abc - 9. * aad - bbb) / (a * d0)\n else:\n d1 = 2. * bbb - 9. * abc\n d1 = d1 + 27. * aad\n\n if not d0: cin = d1 + 0j # inner terms cancel\n else: cin = (d1 - sqrt(-27.0 * a * a * dd)) / 2.\n\n cc = cin ** (1. / 3.)\n p = (-1. / (3. * a))\n\n x1 = p * (b + cc + d0 / cc)\n x2 = p * (b + cu2 * cc + d0 / (cu2 * cc))\n x3 = p * (b + cu3 * cc + d0 / (cu3 * cc))\n\n return x1, x2, x3",
"def roots_cubic(f, trig=False):\n if trig:\n a, b, c, d = f.all_coeffs()\n p = (3*a*c - b**2)/(3*a**2)\n q = (2*b**3 - 9*a*b*c + 27*a**2*d)/(27*a**3)\n D = 18*a*b*c*d - 4*b**3*d + b**2*c**2 - 4*a*c**3 - 27*a**2*d**2\n if (D > 0) == True:\n rv = []\n for k in range(3):\n rv.append(2*sqrt(-p/3)*cos(acos(q/p*sqrt(-3/p)*Rational(3, 2))/3 - k*pi*Rational(2, 3)))\n return [i - b/3/a for i in rv]\n\n # a*x**3 + b*x**2 + c*x + d -> x**3 + a*x**2 + b*x + c\n _, a, b, c = f.monic().all_coeffs()\n\n if c is S.Zero:\n x1, x2 = roots([1, a, b], multiple=True)\n return [x1, S.Zero, x2]\n\n # x**3 + a*x**2 + b*x + c -> u**3 + p*u + q\n p = b - a**2/3\n q = c - a*b/3 + 2*a**3/27\n\n pon3 = p/3\n aon3 = a/3\n\n u1 = None\n if p is S.Zero:\n if q is S.Zero:\n return [-aon3]*3\n u1 = -root(q, 3) if q.is_positive else root(-q, 3)\n elif q is S.Zero:\n y1, y2 = roots([1, 0, p], multiple=True)\n return [tmp - aon3 for tmp in [y1, S.Zero, y2]]\n elif q.is_real and q.is_negative:\n u1 = -root(-q/2 + sqrt(q**2/4 + pon3**3), 3)\n\n coeff = I*sqrt(3)/2\n if u1 is None:\n u1 = S.One\n u2 = Rational(-1, 2) + coeff\n u3 = Rational(-1, 2) - coeff\n b, c, d = a, b, c # a, b, c, d = S.One, a, b, c\n D0 = b**2 - 3*c # b**2 - 3*a*c\n D1 = 2*b**3 - 9*b*c + 27*d # 2*b**3 - 9*a*b*c + 27*a**2*d\n C = root((D1 + sqrt(D1**2 - 4*D0**3))/2, 3)\n return [-(b + uk*C + D0/C/uk)/3 for uk in [u1, u2, u3]] # -(b + uk*C + D0/C/uk)/3/a\n\n u2 = u1*(Rational(-1, 2) + coeff)\n u3 = u1*(Rational(-1, 2) - coeff)\n\n if p is S.Zero:\n return [u1 - aon3, u2 - aon3, u3 - aon3]\n\n soln = [\n -u1 + pon3/u1 - aon3,\n -u2 + pon3/u2 - aon3,\n -u3 + pon3/u3 - aon3\n ]\n\n return soln",
"def cubic_roots(a0, a1, a2, a3=None):\n\n N = len(a0)\n out = np.full([N, 3], np.nan)\n\n # Calculate the normalized form x^3 + a2 * x^2 + a1 * x + a0 = 0\n b_a = a2 if a3 is None else a2 / a3\n b_a2 = b_a * b_a\n c_a = a1 if a3 is None else a1 / a3\n d_a = a0 if a3 is None else a0 / a3\n\n # Solve the cubic equation\n Q = (3 * c_a - b_a2) / 9\n R = (9 * b_a * c_a - 27 * d_a - 2 * b_a * b_a2) / 54\n Q3 = Q * Q * Q\n D = Q3 + R * R\n b_a_3 = (1.0 / 3.0) * b_a\n\n sel = Q == 0.0\n if np.any(sel):\n o = out[sel, 0]\n\n sel2 = R == 0.0\n if np.any(sel2):\n o[sel2] = -b_a_3[sel][sel2]\n\n if np.any(~sel2):\n o[~sel2] = np.pow(2 * R[sel][~sel2], 1 / 3.0) - b_a_3[sel][~sel2]\n\n out[sel, 0] = o\n\n sel = D <= 0.0\n if np.any(sel):\n # Three real roots\n theta = np.arccos(R[sel] / np.sqrt(-Q3[sel]))\n sqrt_Q = np.sqrt(-Q[sel])\n\n out[sel, 0] = 2 * sqrt_Q * np.cos(theta / 3.0) - b_a_3[sel]\n out[sel, 1] = 2 * sqrt_Q * np.cos((theta + 2 * np.pi) / 3.0) - b_a_3[sel]\n out[sel, 2] = 2 * sqrt_Q * np.cos((theta + 4 * np.pi) / 3.0) - b_a_3[sel]\n\n return out",
"def test_roots_slow():\n a, b, c, d, x = symbols(\"a,b,c,d,x\")\n\n f1 = x ** 2 * c + (a / b) + x * c * d - a\n f2 = x ** 2 * (a + b * (c - d) * a) + x * a * b * c / (b * d - d) + (a * d - c / d)\n\n assert list(roots(f1, x).values()) == [1, 1]\n assert list(roots(f2, x).values()) == [1, 1]\n\n (zz, yy, xx, zy, zx, yx, k) = symbols(\"zz,yy,xx,zy,zx,yx,k\")\n\n e1 = (zz - k) * (yy - k) * (xx - k) + zy * yx * zx + zx - zy - yx\n e2 = (zz - k) * yx * yx + zx * (yy - k) * zx + zy * zy * (xx - k)\n\n assert list(roots(e1 - e2, k).values()) == [1, 1, 1]\n\n f = x ** 3 + 2 * x ** 2 + 8\n R = list(roots(f).keys())\n\n assert not any(i for i in [f.subs(x, ri).n(chop=True) for ri in R])",
"def compute_root(poly, x_0, epsilon):\n guess = 0\n while True:\n poly_value = evaluate_poly(poly, x_0)\n if -epsilon < poly_value < epsilon:\n guess += 1\n return x_0, guess\n else:\n poly_derivative = compute_deriv(poly)\n deriv_value = evaluate_poly(poly_derivative, x_0)\n x_0 = x_0 - poly_value / deriv_value\n guess += 1",
"def find_root(function, point_a, point_b, step, tol, max_iterations, \n show_process = False):\n left_point , right_point = search_interval_3d(function, point_a, point_b, \n step, tol, max_iterations,\n show_process)\n \n point_where_zero = bisection_3d(function, left_point, right_point, tol, \n max_iterations, show_process)\n \n return point_where_zero",
"def alpha_exact(z, x, beta):\n f = lambda a: a - beta/2 * sqrt(x**2 + 4*(1+x) * sin(a)**2 ) - z\n \n res = scipy.optimize.root_scalar(f, bracket=(-1,1))\n \n return res.root",
"def _realroots_quartic(a3, a2, a1, a0):\n # see http://mathworld.wolfram.com/QuarticEquation.html for details\n ys = _realroots_cubic(-a2, a1*a3 - 4*a0, 4*a0*a2 - a1*a1 - a0*a3*a3)\n ys = [y for y in ys if a3*a3-4*a2+4*y >= 0 and y*y-4*a0 >= 0]\n if not ys:\n return []\n y1 = min(ys)\n if a3*y1-2*a1 < 0:\n return (_realroots_quadratic(0.5*(a3+math.sqrt(a3*a3-4*a2+4*y1)), 0.5*(y1-math.sqrt(y1*y1-4*a0))) +\n _realroots_quadratic(0.5*(a3-math.sqrt(a3*a3-4*a2+4*y1)), 0.5*(y1+math.sqrt(y1*y1-4*a0))))\n else:\n return (_realroots_quadratic(0.5*(a3+math.sqrt(a3*a3-4*a2+4*y1)), 0.5*(y1+math.sqrt(y1*y1-4*a0))) +\n _realroots_quadratic(0.5*(a3-math.sqrt(a3*a3-4*a2+4*y1)), 0.5*(y1-math.sqrt(y1*y1-4*a0))))",
"def rootfind_newton(func, x0, a, b, maxiter=50, tol=1.0e-11):\n\n for iter in xrange(maxiter):\n\n fval, fpval, args = func(x0)\n # print \"x0=%.4f fval=%.2e fpval=%.2e [%.4f, %.4f]\" % (x0, fval, fpval, a, b)\n\n if fval < 0:\n a = x0\n else:\n b = x0\n\n x = x0 - fval/fpval\n if not (a < x < b):\n # Once we have bracketed the root, we don't allow the\n # iterations to leave the bracket.\n x = 0.5*(a+b)\n\n if np.abs(x-x0) < tol or np.abs(fval) < tol:\n break\n\n x0 = x\n\n return x, fval, iter, args",
"def test_cubic_roots(roots, a0, a1, a2, a3=None, tol=1.0e-12):\n\n N = len(a0)\n for n in range(N):\n c0 = a0[n]\n c1 = a1[n]\n c2 = a2[n]\n c3 = a3[n]\n\n print(f\"Polynomial {n}: a = {(c0,c1,c2,c3)}\")\n\n rts = np.unique(roots[n])\n rts = rts[~np.isnan(rts)]\n\n for x in rts:\n f = c0 + c1 * x + c2 * x**2 + c3 * x**3\n ok = np.abs(f) <= tol\n\n print(f\" root x = {x}: f(x) = {f} {'OK' if ok else 'FAILED'}\")\n\n if not ok:\n raise Exception(\"NOT OK!\")\n\n if len(rts) == 0:\n print(\" no real roots.\")",
"def root1(self):\r\n if self.discriminant() < 0.0:\r\n return None\r\n return(-self.__b + math.sqrt(self.discriminant()))/(2*self.__a)",
"def iroots(a, b, c):\n discriminant = b*b-4*a*c\n if discriminant == 0:\n return -b/(2*a), None\n else:\n return (-b+isqrt(discriminant))/(2*a), (-b-isqrt(discriminant))/(2*a)",
"def compute_root(poly, x_0, epsilon):\n # TO DO ...\n diff = evaluate_poly(poly, x_0)\n count = 0\n\n if abs(diff) > epsilon:\n #Newton's Method Formula\n x_1 = x_0 - (evaluate_poly(poly, x_0) / evaluate_poly(compute_deriv(poly), x_0))\n #Recursion!\n x_0 , count = compute_root(poly, x_1, epsilon)\n else:\n pass\n\n return x_0, count + 1",
"def test_cases():\r\n quadratic_roots(1,3,-21)\r\n quadratic_roots(2,-4,-6)\r\n quadratic_roots(1,4,-12)\r\n quadratic_roots(4,12,9)\r\n quadratic_roots(-2,-11,-21)\r\n quadratic_roots(4,1,4)\r\n quadratic_roots(1,1,0)\r\n quadratic_roots(1,0,-16)\r\n quadratic_roots(1,-14,-49)\r\n quadratic_roots(1,10,25)",
"def root2(self):\r\n if self.discriminant() < 0.0:\r\n return None\r\n return(-self.__b - math.sqrt(self.discriminant()))/(2*self.__a)",
"def evaluate(x,a,b,c,k1,k2,f1,f2):\n return a * np.sin(k1 * x - f1) + b * np.cos(k2 * x - f2) + c",
"def solve(n=5000,C=-6*10**11,a=900,b=3):\n coeffs = np.zeros(n+2)\n coeffs[0] = a-b*n\n coeffs[1] = b*(n+1) - a\n coeffs[-3] = -C\n coeffs[-2] = 2*C - a\n coeffs[-1] = a+b-C\n mp.dps = 27\n roots = polyroots(coeffs)\n for root in roots:\n print root",
"def rewofzs1(x,y):\n z=x+y*(1j)\n a=1.0/(2.0*z*z)\n q=(1j)/(z*jnp.sqrt(jnp.pi))*(1.0 + a*(1.0 + a*3.0))\n return jnp.real(q)",
"def rootsearch(self, f, a, b, dx):\n x1 = a \n f1 = f(a)\n x2 = a + dx\n f2 = f(x2)\n while f1*f2 > 0.0:\n if x1 >= b: \n return None,None\n x1 = x2\n f1 = f2\n x2 = x1 + dx\n f2 = f(x2)\n else:\n return x1,x2",
"def sqrt(x):\n return 0.0",
"def alpha_exact_case_D(z, x, beta, lamb):\n #beta = np.sqrt(beta2)\n f = lambda a: a + 1/2 * (lamb - beta*sqrt(lamb**2 + x**2 + 4*(1+x)*sin(a)**2 + 2*lamb*sin(2*a))) - z\n \n res = scipy.optimize.root_scalar(f, bracket=(-1,1))\n \n return res.root"
] | [
"0.7072745",
"0.6673059",
"0.66104",
"0.65342593",
"0.6513323",
"0.6455991",
"0.64308345",
"0.6430545",
"0.6397649",
"0.63600725",
"0.6351306",
"0.6308762",
"0.6297681",
"0.62692684",
"0.62394047",
"0.6201631",
"0.61936194",
"0.61782265",
"0.6142859",
"0.6139535",
"0.6117559",
"0.60987926",
"0.6061084",
"0.6059221",
"0.6048745",
"0.6033193",
"0.60257673",
"0.6023998",
"0.60197824",
"0.6006587"
] | 0.7336006 | 0 |
In order to split, there must be a token that is two words. That means there is at least one duplicated word_end that is not a word. | def split_precondition(
tokens: Sequence[str], words: Sequence[str], word_ends: Sequence[str]
) -> bool:
duplicated_word_ends = []
for end1, end2 in zip(word_ends, word_ends[1:]):
if end1 == end2:
duplicated_word_ends.append(end1)
if not duplicated_word_ends:
return False
duplicate_not_word = False
for duplicate in duplicated_word_ends:
if duplicate not in words:
duplicate_not_word = True
break
if not duplicate_not_word:
return False
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def subword_tokenize(self, word: str) -> List[str]:\r\n end_idx = min([len(word), self.ngram_max])\r\n sw_tokens = [self.SOW]\r\n start_idx = 0\r\n\r\n while start_idx < len(word):\r\n subword = word[start_idx:end_idx]\r\n if subword in self.bpe_vocab:\r\n sw_tokens.append(subword)\r\n start_idx = end_idx\r\n end_idx = min([len(word), start_idx + self.ngram_max])\r\n elif len(subword) == 1:\r\n sw_tokens.append(self.UNK)\r\n start_idx = end_idx\r\n end_idx = min([len(word), start_idx + self.ngram_max])\r\n else:\r\n end_idx -= 1\r\n\r\n sw_tokens.append(self.EOW)\r\n return sw_tokens",
"def guess_splitwords():\n\n if t_word[:2] == 'un' and (t_pos == 'ADJD' or t_pos == 'ADJA'):\n create_splitword_tags(t_word[:2], t_word[2:])\n create_negation_frame()\n create_splitword_target(t_word[:2])\n create_splitword_focus(t_word[2:])\n create_splitword_negated(t_word[2:])\n create_splitword_scope(t_word[2:])",
"def test_wordMatch(self):\n words = []\n for line in self.output:\n words.extend(string.split(line))\n self.failUnless(self.sampleSplitText == words)",
"def _split_to_wordpieces(self, tokens: List[str]) -> Tuple[List[str], List[int]]:\n bert_tokens = [] # Original tokens split into wordpieces.\n # Index of each wordpiece that starts a new token.\n token_start_indices = []\n for i, token in enumerate(tokens):\n # '+ 1' is because bert_tokens will be prepended by [CLS] token later.\n token_start_indices.append(len(bert_tokens) + 1)\n pieces = self._tokenizer.tokenize(token)\n bert_tokens.extend(pieces)\n return bert_tokens, token_start_indices",
"def return_split_word(word):\n\n res = []\n words = find_combination_of_words(word)\n for word in words:\n if len(word[1]) > 0:\n res.append((word[0], max(word[1], key=lambda w: len(w))))\n\n return res",
"def split_word(word):\n return [(word[:i], word[i:]) for i in range(len(word) + 1)]",
"def create_splitword_tags(wordpart_1, wordpart_2):\n\n # Create new <splitwords> tag\n if not sentence.sem.find('splitwords'):\n splitwords = chapter_input.new_tag('splitwords')\n sentence.sem.insert(2, splitwords)\n else:\n splitwords = sentence.sem.find('splitwords')\n\n # Create new <splitword> tag within <splitwords>\n splitword = chapter_input.new_tag('splitword', idref=t_id)\n splitwords.append(splitword)\n\n # Create sub tags <part> 1\n part1 = chapter_input.new_tag('part', word=wordpart_1, id=t_id+'_s0')\n splitword.insert(0, part1)\n\n # Create sub tags <part> 2\n part2 = chapter_input.new_tag('part', word=wordpart_2, id=t_id+'_s1')\n splitword.insert(1, part2)",
"def test_forward_end_word_start_of_word(self):\n before_b = \"\"\"\\\n Americans live in the most severe weather-prone country on Earth. Each year, Americans cope with an average of 10,000 thunderstorms, 2,500 floods, 1,000 tornadoes, as well as an average of 6 deadly hurricanes. Potentially deadly weather impacts every American. Communities can now rely on the National Weather Service’s StormReady program to help them guard against the ravages of Mother Nature.\n\n Some 90% of all presidentially declared disasters are weather related, leading to around 500 deaths per year and nearly $14 billion in damage. StormReady, a program started in 1999 in Tulsa, OK, helps arm America's communities with the communication and safety skills needed to save lives and property– before and during the event. StormReady helps community leaders and emergency managers strengthen local safety programs.\n\n StormReady communities are better prepared to save lives from the onslaught of severe weather through better planning, education, and awareness. No community is storm proof, but StormReady can help communities save lives. Does StormReady make a difference?\n \"\"\"\n after_b = \"\"\"\\\n Americans live in the most severe weather-prone country on Earth. Each year, Americans cope with an average of 10,000 thunderstorms, 2,500 floods, 1,000 tornadoes, as well as an average of 6 deadly hurricanes. Potentially deadly weather impacts every American. Communities can now rely on the National Weather Service’s StormReady program to help them guard against the ravages of Mother Nature.\n\n Some 90% of all presidentially declared disasters are weather related, leading to around 500 deaths per year and nearly $14 billion in damage. StormReady, a program started in 1999 in Tulsa, OK, helps arm America's communities with the communication and safety skills needed to save lives and property– before and during the event. StormReady helps community leaders and emergency managers strengthen local safety programs.\n\n StormReady communities are better prepared to save lives from the onslaught of severe weather through better planning, education, and awareness. No community is storm proof, but StormReady can help communities save lives. Does StormReady make a difference?\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"1.310\", \"1.310\"),\n after_sel=(\"1.317\", \"1.317\"),\n command_name=\"forward-end-word\",\n )",
"def test_create_tokens_with_no_repeating_words():\n list_responses = ['I am testing', 'this is a test', 'make my tokens']\n check = edurate_gensim.create_tokens(list_responses)\n assert check == [['testing'], ['test'], ['make', 'tokens']]\n assert (\"am\" in check) is False\n assert (\"my\" in check) is False",
"def _split_message(self, msg, extra_prefix):\n words = msg.split(\" \")\n lastword = \"\"\n previous = \"\"\n msg += \" \" + self.stop_word\n\n for word in words:\n key = self._make_key(extra_prefix, self.separator.join([previous, lastword]))\n self.db.add(key, word)\n yield [previous, lastword, word]\n previous = lastword\n lastword = word",
"def splitWordList(self, text):\n result = list()\n if text is None:\n return result\n\n t = text + \"⁋\"\n t = t.replace('\\n', '⁋')\n t = re.sub(WordListProcessor.REFERENCE_PATTERN, \"\", t)\n t = re.sub(WordListProcessor.SUPERSCRIPT_PATTERN, \"\", t) # TODO: Extract sense!\n t = re.sub(WordListProcessor.HTML_REMOVER, \"\", t)\n t = t.replace(\""\", \"\\\"\")\n t = t.replace(\",\", \"⁋,\")\n t = t.replace(\";\", \"⁋\")\n # print(t)\n # t = re.sub(WordListProcessor.BRACKETED_DELIMITER, \"$1$2$3$4$5$6\", t)\n # t = re.sub(WordListProcessor.ESCAPE_DELIMITER1, \"$1$2\", t)\n # t = re.sub(WordListProcessor.ESCAPE_DELIMITER2, \"$1$2\", t)\n # t = re.sub(WordListProcessor.ESCAPE_DELIMITER3, \"$1$2\", t)\n t = self.escapeDelimiters(t)\n # print(t)\n t = t.replace(\"⁋;\", \"⁋\")\n t = t.replace(\"⁋,\", \"⁋\")\n t = t.replace(\"]] or [[\", \"]]⁋[[\")\n t = t.replace(\"]] and [[\", \"]]⁋[[\")\n t = t.replace(\" - \", \"⁋\")\n # t = t.replace(\" / \", \"⁋\")\n j = t.find(\" / \") # Use ' / ' only as a delimiter if there are at least two of them!\n if j >= 0:\n j = t.find(\" / \", j)\n if j >= 0:\n t = t.replace(\" / \", \"⁋\")\n # print(t)\n\n # print(t)\n while True:\n delim = t.find('⁋')\n if delim >= 0:\n word = t[0:delim]\n if word:\n # Normalize the word.\n word = word.strip()\n if word.lower().startswith(\"see also\"):\n word = word[8:].strip()\n if word.lower().startswith(\"see\"):\n word = word[3:].strip()\n if word.startswith(\":\"):\n word = word[1:].strip()\n word = self.deWikify(word).strip()\n word = self.removeBrackets(word).strip()\n word = self.removeTemplates(word).strip()\n word = self.removeComments(word).strip()\n if word.lower().startswith(\"see also\"):\n word = word[8:].strip()\n if word.lower().startswith(\"see\"):\n word = word[3:].strip()\n if word.startswith(\":\"):\n word = word[1:].strip()\n if word.endswith(\".\"):\n word = word[:-1].strip()\n if word.endswith(\",\"):\n word = word[:-1].strip()\n\n # Check for slashes.\n word = word.replace(\" / \", \"/\")\n word = word.replace(\"/ \", \"/\")\n i = word.find('/')\n if word:\n if i >= 0 and word.find(' ') < 0:\n while True:\n result.append(word[0:i])\n word = word[i + 1:]\n i = word.find('/')\n if i < 0:\n break\n result.append(word)\n else:\n result.append(word)\n\n t = t[delim + 1:]\n\n else:\n break\n\n return result",
"def _parse_word(self, token, ctxinfo) :\n ignore = False\n if token.startswith(\"|\") and token.endswith(\"|\") : # regular token\n token = token[1:-1]\n token_parts = token.rsplit( \"_\", 1 )\n if len(token_parts) == 2 :\n lemma_and_index, pos = token_parts\n lemma_parts = lemma_and_index.rsplit( \":\", 1 )\n if len(lemma_parts) == 2 : \n lemma, index = lemma_parts\n if lemma.endswith(\"\\\\\") :\n lemma = lemma[:-1] # separator was \\: \n else :\n ignore = True\n else :\n ignore = True\n if ignore :\n ctxinfo.warn(\"Ignoring bad token `{token}`\", token=token)\n return None\n else : \n return (lemma, index, pos)",
"def test_create_tokens_with_repeating_words():\n list_responses = ['I am testing', 'testing testing testing', 'make my tokens']\n check = edurate_gensim.create_tokens(list_responses)\n assert check == [['testing'], ['testing', 'testing', 'testing'], ['make', 'tokens']]\n assert (\"I\" in check) is False\n assert (\"am\" in check) is False\n assert (\"my\" in check) is False",
"async def _split_message(self, msg, extra_prefix):\n words = msg.split(\" \")\n lastword = \"\"\n previous = \"\"\n msg += \" \" + self.stop_word\n\n for word in words:\n key = self._make_key(extra_prefix, self.separator.join([previous, lastword]))\n await self.db.add(key, word)\n yield [previous, lastword, word]\n previous = lastword\n lastword = word",
"def valid(phrase):\n words = []\n series_of_words = phrase.split(' ')\n words.append(series_of_words.pop())\n for word in series_of_words:\n if word in words:\n return False\n words.append(word)\n return True",
"def create_splitword_negated(word_part):\n\n split_word = sentence.find('splitword', {'idref' : t_id})\n wordpart_idref = split_word.find('part', {'word' : word_part})\n\n last_frame = sentence.sem.frames.find_all('frame', {'name' : NEGATION_FRAME_NAME})[-1]\n\n # Create negated\n negated = chapter_input.new_tag('fe')\n negated['name'] = NEGATED_TAG_NAME\n negated['id'] = last_frame.get('id')+'_e2'\n last_frame.insert(2, negated)\n\n # Create negated <fenode>\n negated_fenode = chapter_input.new_tag('fenode')\n negated_fenode['idref'] = wordpart_idref.get('id')\n negated_fenode['is_split'] = 'yes'\n negated.insert(0, negated_fenode)",
"def tokenize(self, text):\n split = []\n terms = []\n for term in re.split('\\s', text):\n # If the term is empty, skip it, since we probably just have\n # multiple whitespace cahracters.\n if term == '':\n continue\n # Now, a word can be preceded or succeeded by symbols, so let's\n # split those out\n @timeout_decorator.timeout(1)\n def slow_match(term):\n return TERM_SPEC.search(term)\n try:\n match = slow_match(term)\n except TimeoutError:\n import sys\n print >> sys.stderr, \"TIMEOUT when running regex on %s (%s).\\nRe-running with re2 (sorry if you have Unicode, this will tokenize it wrong)\" % (term.encode(\"utf-8\"), repr(term))\n match = TERM_SPEC2.search(term)\n if match is None:\n terms.append(term)\n split.append(True)\n continue\n\n # In the new TERM_SPEC, skip the third regex group\n # -jpt\n# import sys\n# print >> sys.stderr, \"match groups =\", repr(match.groups()), term.encode(\"utf-8\")\n match_groups = match.groups()[0], match.groups()[1], match.groups()[3]\n for subTerm in match_groups:\n if subTerm != '':\n terms.append(subTerm)\n split.append(False)\n split[-1] = True\n return split, terms",
"def filter_tokens(tokens):\n token_list = []\n for token in tokens:\n if len(token) > 2 and token not in STOPWORDS:\n token_list.append(token)\n else:\n continue\n \n return token_list",
"def index_word(self, token):\n if token not in self.word_2_idx:\n idx = len(self.word_2_idx)\n self.word_2_idx[token] = idx\n self.idx_2_word[idx] = token",
"def sub_tokenize(self,\n token: str,\n ) -> List[str]:\n # Refuse to handle tokens with characters other than alnum & _'\n if not self.can_tokenize(token): return [token]\n\n sub_tokens: List[str] = None\n\n # First pass - snake_case, CamelCase, digits\n sub_tokens_first_pass: List[str] = self.RE_FIRST_PASS_SPLITTER.split(token)\n\n # Special case: pure CamelCase, skip second pass\n if not \"_\" in sub_tokens_first_pass and sub_tokens_first_pass[0][0].isupper():\n sub_tokens = sub_tokens_first_pass\n # end if\n\n # Second pass - utilizing context\n if sub_tokens is None:\n sub_tokens_second_pass = list()\n after_first_underscore = False\n after_second_underscore = False\n maybe_suffix = False\n for st in sub_tokens_first_pass:\n if st == \"_\":\n maybe_suffix = False\n if not after_first_underscore:\n after_first_underscore = True\n elif not after_second_underscore:\n after_second_underscore = True\n # end if\n sub_tokens_second_pass.append(st)\n continue\n # end if\n\n if after_second_underscore and not st[0].islower(): maybe_suffix = True\n\n if len(st) == 1:\n if not maybe_suffix: maybe_suffix = True\n sub_tokens_second_pass.append(st)\n continue\n # end if\n\n if not maybe_suffix:\n # Carefully identify additional suffixes, only if the new core part is more frequent than the current one\n fractions = list()\n core = st\n while len(core) > 0:\n # Keep atomic words\n if self.context[core] == np.PINF: break\n for suffix in self.SUFFIXES:\n if core.endswith(suffix):\n fractions.insert(0, suffix)\n core = core[:-len(suffix)]\n break\n # end if\n else:\n break\n # end for-else\n # end while\n if len(core) > 0: fractions.insert(0, core)\n\n while len(fractions) > 1:\n if self.context[fractions[0].lower()] >= self.context[st.lower()]:\n # Adopt the new split\n break\n else:\n fractions[0] = fractions[0]+fractions[1]\n del fractions[1]\n # end if\n # end while\n\n # Prefix checking (one character)\n if len(fractions[0]) > 1 and fractions[0][0] and self.context[fractions[0][1:].lower()] >= self.context[fractions[0].lower()]:\n # Take out the first char as prefix\n fractions.insert(0, fractions[0][0])\n fractions[1] = fractions[1][1:]\n # end if\n\n sub_tokens_second_pass.extend(fractions)\n\n maybe_suffix = True\n else:\n # Splits the suffix into small pieces, unless the word exists in context\n if self.context[st.lower()] >= self.CONTEXT_THRESHOLD:\n sub_tokens_second_pass.append(st)\n else:\n fractions = list()\n remain = st\n while len(remain) > 0:\n # Try full match in context\n if self.context[remain.lower()] >= self.CONTEXT_THRESHOLD:\n fractions.insert(0, remain)\n remain = \"\"\n break\n else:\n # Try to find a suffix\n for suffix in self.SUFFIXES:\n if remain.endswith(suffix):\n fractions.insert(0, suffix)\n remain = remain[:-len(suffix)]\n break\n # end if\n else:\n # Try to find a suffix match in context\n for length in range(1, len(remain) + 1):\n if self.context[remain[-length:].lower()] >= self.CONTEXT_THRESHOLD:\n fractions.insert(0, remain[-length:])\n remain = remain[:-length]\n break\n # end if\n else:\n # If this is a CamelCase, leave as is; else take the last char out\n if remain[0].isupper():\n fractions.insert(0, remain)\n break\n else:\n fractions.insert(0, remain[-1])\n remain = remain[:-1]\n # end if\n # end for-else\n # end for-else\n # end if\n # end while\n sub_tokens_second_pass.extend(fractions)\n # end if\n # end if\n # end for\n\n sub_tokens = sub_tokens_second_pass\n # end if\n\n return sub_tokens",
"def split_into_words(context_text):\n doc_tokens = []\n char_to_word_offset = []\n prev_is_whitespace = True\n for c in context_text:\n if is_whitespace(c):\n prev_is_whitespace = True\n else:\n if prev_is_whitespace:\n doc_tokens.append(c)\n else:\n doc_tokens[-1] += c\n prev_is_whitespace = False\n char_to_word_offset.append(len(doc_tokens) - 1)\n return doc_tokens, char_to_word_offset",
"def get_mismatch_token_idx(self, words: List[str]):\n # todo(yuxian): this line is extremely slow, need optimization\n wordpieces, offsets = self._allennlp_tokenizer.intra_word_tokenize(words)\n\n # For tokens that don't correspond to any word pieces, we put (-1, -1) into the offsets.\n # That results in the embedding for the token to be all zeros.\n offsets = [x if x is not None else (-1, -1) for x in offsets]\n\n output = {\n \"token_ids\": torch.LongTensor([t.text_id for t in wordpieces]),\n \"word_mask\": torch.BoolTensor([True] * len(words)), # for original tokens (i.e. word-level)\n \"offsets\": torch.LongTensor(offsets),\n \"wordpiece_mask\": torch.BoolTensor([True] * len(wordpieces)), # for wordpieces (i.e. subword-level)\n }\n return output",
"def pairwise_tokenize(sentence,w2v,remove_stopwords=True):\n\n ignore_words = stopwords.words('english')\n\n #Remove non-alphanumeric\n pattern = re.compile('[\\W_]+') \n sentence = pattern.sub(\" \",sentence) \n sentence = sentence.strip()\n words = sentence.split(\" \")\n\n compound_word_idx = []\n a_idx = 0\n for a,b in pairwise(words):\n combined = a +\"_\" + b\n try:\n w2v[combined]\n compound_word_idx.append(a_idx) #append the index of the 1st compound word\n a_idx += 1\n except KeyError:\n a_idx += 1\n\n for idx in compound_word_idx:\n words[idx] = words[idx] + \"_\" + words[idx + 1] #compound_word_idx stores index of 1st word, so combine with the next word\n\n #This cannot be combined into another loop to maintain where indices point\n for idx in reversed(compound_word_idx):\n words.pop(idx+1)\n\n if remove_stopwords == True:\n filtered = []\n for word in words:\n word = word.decode(\"utf-8\")\n if word not in ignore_words:\n filtered.append(word)\n\n words = filtered\n\n return words",
"def shared_words(text1, text2):\r\n\r\n list1 = tokenize(text1.strip(' '))\r\n list2 = tokenize(text2.strip(' '))\r\n\r\n list3 = set(list1) & set(list2)\r\n list3.remove(' ');\r\n\r\n return list3",
"def tokenize_words(line):\n return",
"def is_split_token(self):\n return '/' in self.token",
"def semantic_split(input_string):\n\n # if already a word, go back\n\n\n word_gain = False\n\n split_string = wordninja.split(input_string)\n\n # if at least one word is gained that is not 'a' or 'o'\n for string in split_string:\n if (has_vowels(string) and (string not in {\"a\", \"o\", \"A\", \"O\"})):\n word_gain = True\n\n # check if meaningful words gained; if not, return original\n if word_gain:\n return \" \".join(split_string)\n else:\n return input_string",
"def _split(self):\n \n self._words = []\n \n # (1) Expand contractions\n text = self._text.replace(\"'m \", \" am \")\n text = text.replace(\"'d \", \" would \")\n text = text.replace(\"'ll \", \" will \")\n text = text.replace(\"'ve \", \" have \")\n text = text.replace(\"'re \", \" are \")\n text = text.replace(\"can't \", \"can not \")\n text = text.replace(\"won't \", \"will not \")\n text = text.replace(\"n't \", \" not \")\n # Assume possesives are contractions of is\n text = text.replace(\"'s \", \" is \")\n text = text.replace(\"s' \", \"s \")\n \n # (2) Replace newlines, carriage returns, tabs, form feed with space.\n text = re.sub('[\\r\\n\\t\\f]', ' ', text)\n \n # (3) remove duplicate spaces\n text = re.sub(' +', ' ', text.strip())\n \n # Empty text\n if len(text) == 0:\n return \n \n # (4) Split text by whitespace (tokenize).\n words = text.split(' ')\n \n # (5) Separate out punctuation\n for word in words:\n length = len(word)\n \n begin = 0\n for i in range(0,length):\n if not word[i].isdigit() and not word[i].isalpha():\n # decimal, thousandths, fraction symbol\n if word[i] in ['.', ',', '/'] and i < length-1 and word[i+1].isdigit():\n continue\n # degree\n if word[i] in ['°'] and i < length-1 and word[i+1] in [ 'f', 'F', 'c', 'C']:\n continue\n # sign symbol\n if word[i] in ['-', '+'] and i < length-1 and (word[i+1].isdigit() or word[i+1] in ['.', ',']):\n # first char or exponent\n if begin == i or word[i-1] in ['e', 'E']:\n continue\n \n if begin != i:\n self._words.append( { 'word': word[begin:i], 'tag': Vocabulary.UNTAG } )\n if word[i] in [ '.', '?', '!', ',', ':', ';', '(', ')', '[', ']', '\"', '\\'', '¿', '¡']:\n self._words.append( { 'word': word[i], 'tag': Vocabulary.PUNCT } )\n # non-printable ascii\n elif (ord(word[i]) >= 0 and ord(word[i]) <= 7) or (ord(word[i]) >= 14 and ord(word[i]) <= 31):\n pass\n else:\n self._words.append( { 'word': word[i], 'tag': Vocabulary.SYMBOL } )\n begin = i + 1\n if begin < length:\n self._words.append( { 'word': word[begin:], 'tag': Vocabulary.UNTAG } )",
"def add2chain2nd(splitted_text):\n # Our key is the unique occurrence of a pair of words\n inputText = splitted_text\n if len(inputText) > 1:\n for i, word in enumerate(inputText):\n if i == 0: # Chaining the first and second word in tweet to start key\n if (None, startKey) not in chain:\n chain[(None, startKey)] = [word]\n else:\n chain[(None, startKey)].append(word)\n elif i == 1:\n if (startKey,inputText[i-1]) not in chain:\n chain[(startKey,inputText[i-1])] = [word]\n else:\n chain[(startKey,inputText[i-1])].append(word)\n else:\n if (inputText[i-2],inputText[i-1]) not in chain:\n chain[(inputText[i-2],inputText[i-1])] = [word]\n else:\n chain[(inputText[i-2],inputText[i-1])].append(word)\n if i == len(inputText)-1: # Use last two words as key to end\n if (inputText[i-1],word) not in chain:\n chain[(inputText[i-1],word)] = [endKey]\n else:\n chain[(inputText[i-1],word)].append(endKey)\n if (None,startKey) not in chain:\n chain[(None,startKey)] = [inputText[0]]\n else:\n chain[(None,startKey)].append(inputText[0])\n if (inputText[0],endKey) not in chain:\n chain[(inputText[0],endKey)] = [None]\n else:\n chain[(inputText[0],endKey)].append(None)",
"def _tokenize_less_start_end(self, corpus: str):\n corpus_tokens = self.tokenize(corpus, 0)\n return [x for x in corpus_tokens if x != self.TOKENS.END.value]"
] | [
"0.65557843",
"0.6295449",
"0.6196509",
"0.6070473",
"0.6030138",
"0.5977965",
"0.5975292",
"0.5914505",
"0.58049476",
"0.5770824",
"0.5767611",
"0.5764669",
"0.57122844",
"0.5690039",
"0.5681785",
"0.5662937",
"0.5659659",
"0.56561136",
"0.5654984",
"0.5654885",
"0.56434584",
"0.5612741",
"0.5609514",
"0.5605653",
"0.557594",
"0.55757034",
"0.5562001",
"0.5551707",
"0.55396116",
"0.5516026"
] | 0.7395432 | 0 |
Returns a pandas dataframe denoting the total number of NA values and the percentage of NA values in each column. The column names are noted on the index. | def assess_NA(data):
# pandas series denoting features and the sum of their null values
null_sum = data.isnull().sum() # instantiate columns for missing data
total = null_sum.sort_values(ascending=False)
percent = (((null_sum / len(data.index)) * 100).round(2)). \
sort_values(ascending=False)
# concatenate along the columns to create the complete dataframe
df_NA = pd.concat([total, percent], axis=1,
keys=['Number of NA', 'Percent NA'])
# drop rows that don't have any missing data; omit if you want to
# keep all rows
df_NA = df_NA[(df_NA.T != 0).any()]
return df_NA | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def na_count(df):\n print(\"Size of the current file is:\", df.shape)\n print(\"\")\n printmd(\"*__Percentage of Na per columns in the data:__*\")\n # Calculate the number of the NA values and its precentage\n df_temp = df.isnull().sum().reset_index()\n df_temp.columns = ['column_name', 'na_size']\n df_temp['na_size_percentage'] = round(df_temp.na_size*100.0/df.shape[0], 2)\n df_temp = df_temp.sort_values(by='na_size', ascending=False)\n print(df_temp.loc[df_temp['na_size'] > 0])\n # print((round(df.isnull().sum()/df.shape[0]*100)))\n print(\"\")",
"def missing_values_col(df):\n null_count = df.isnull().sum()\n null_percentage = (null_count / df.shape[0]) * 100\n empty_count = pd.Series(((df == ' ') | (df == '')).sum())\n empty_percentage = (empty_count / df.shape[0]) * 100\n nan_count = pd.Series(((df == 'nan') | (df == 'NaN')).sum())\n nan_percentage = (nan_count / df.shape[0]) * 100\n return pd.DataFrame({'num_missing': null_count, 'missing_percentage': null_percentage,\n 'num_empty': empty_count, 'empty_percentage': empty_percentage,\n 'nan_count': nan_count, 'nan_percentage': nan_percentage})",
"def nan_val_summary(df):\n nan_arr = np.count_nonzero(df.isnull(), axis=0)\n frac = nan_arr / df.shape[0]\n nan_df = pd.DataFrame(\n {'columns': df.columns,\n 'nan_count': nan_arr,\n 'fraction_missing': frac}\n )\n return nan_df",
"def missing_value_count_and_percent(df):\n df = pd.concat({'num_missing_values':df.isnull().sum(), 'pct_missing_values':df.isnull().mean().round(4)}, axis=1)",
"def sum_missing_values_cols(df):\n missing_values = df.isnull().sum(axis=1).value_counts().sort_index()\n cols_missing_values = {'num_cols_missing': missing_values.index.tolist(), 'num_rows': missing_values.values.tolist()}\n cols_missing_values = pd.DataFrame(cols_missing_values)\n total_rows = df.shape[0]\n cols_missing_values['pct_cols_missing'] = (cols_missing_values.num_rows/total_rows)*100\n return cols_missing_values",
"def sum_missing_values_attributes(df):\n missing_values = pd.DataFrame(df.isna().sum(axis=0), columns=['num_row_missing'])\n total_rows = df.shape[0]\n missing_values['pct_rows_missing'] = missing_values.num_row_missing/total_rows\n return missing_values",
"def show_column_nulls(df):\n\n df_null = df.isnull().sum() / len(df)\n return pd.DataFrame(df_null, columns=[\"Null Frequency\"])",
"def na_counter(df, display_plot=True, display_as_list=True):\n coln = list(df.columns)\n columns = ['NA Count', \"PERCENT\"]\n\n # if df.isnull().any().sum() == 0:\n # return pd.DataFrame(columns=columns)\n\n na_dict = {}\n for col in coln:\n if df[col].isnull().any():\n na_count = df[col].isnull().sum()\n na_percent = df[col].isnull().sum() / len(df)\n na_dict[col] = [na_count, na_percent]\n\n if len(na_dict) == 0:\n return pd.DataFrame()\n\n df_na = pd.DataFrame(na_dict).T\n df_na.columns = columns\n df_na.index.name = \"Column\"\n\n df_na = df_na.sort_values(by=\"PERCENT\", ascending=True)\n\n print(f\"No of columns in dataframe: {len(df.columns)}\")\n print(f\"No of columns with NA values: {len(df_na)}\")\n\n if display_plot:\n if df_na.shape[0] < 30:\n plt.rcParams['figure.figsize'] = (16.0, 8.0)\n df_na.PERCENT.plot(kind=\"bar\", title=\"% na values\")\n else:\n plt.rcParams['figure.figsize'] = (8,30)\n df_na.PERCENT.plot(kind=\"barh\",title=\"% na values\")\n plt.show()\n\n if display_as_list is True:\n display_all(df_na)\n\n return df_na",
"def nanPercentage(df):\n rows = df.shape[0]\n cols = df.shape[1]\n tot = rows*cols\n \n nanNum = 0\n for i in range(df.shape[0]):\n nanNum = nanNum + np.sum ( pd.isnull(df.iloc[i]) )\n logger.debug ('nan %d tot %d ' % (nanNum, tot) )\n perc = (100*nanNum) / (tot * 1.0)\n return perc",
"def null_cols(data):\n\n nulls = data.isna().sum()\n return nulls[nulls > 0] / len(data) * 100",
"def list_missing_pct(df):\n percent = (df.isnull().sum() / df.isnull().count()).sort_values(ascending=False)\n return percent",
"def information(self):\n \n \n x = list(zip(self.df.count(), self.df.dtypes, (self.df.isnull().sum() / self.df.shape[0])))\n y = dict(zip(self.df.columns, x))\n return pd.DataFrame(y, index=['Number of Values', 'Data Type', 'Percent Missing']).transpose()",
"def get_percentage_of_nulls_pd(X, columns_to_check='all', rows_to_scan='all', only_nulls=True, deci=None):\n per_null_dict = get_percentage_of_nulls(X, deci=deci, columns_to_check=columns_to_check, \n rows_to_scan=rows_to_scan, only_nulls=only_nulls)\n per_null_table = pd.DataFrame(per_null_dict.values(), index=per_null_dict.keys(), columns=['Percentage'])\n per_null_table.sort('Percentage', ascending=False, inplace=True)\n return per_null_table",
"def get_nan_counts(data, cols, null_col_suffix=''):\n nulls_df = pd.DataFrame(pd.isnull(data[cols]).sum())\n nulls_df.columns = ['null_counts'+null_col_suffix]\n nulls_df['feature'] = nulls_df.index\n nulls_df.reset_index(inplace=True, drop=True)\n return nulls_df",
"def check_missing_values(df):\r\n for i in range(len(df.columns)):\r\n n_missing_values = df[df.columns[i]].isnull().sum()\r\n if n_missing_values != 0:\r\n print('Column %s missing %s value(s) (%.2f%% of column values).'%(df.columns[i], n_missing_values, (n_missing_values/len(df[df.columns[i]].index))*100))\r\n c = 0\r\n for i in range(len(df.values)):\r\n n_missing_values = df.iloc[i].isnull().sum()\r\n if n_missing_values != 0:\r\n c += 1\r\n print('> Number of rows containing missing values %s (%.2f%% of whole dataset).'%(c, (c/len(df.values))*100))",
"def get_percentage_missing(series):\n num = series.isnull().sum()\n den = len(series)\n return round(num / den, 2)",
"def get_percentage_missing(series):\n num = series.isnull().sum()\n den = len(series)\n return round(num / den, 2)",
"def get_percentage_missing(series):\n num = series.isnull().sum()\n den = len(series)\n return round(num/den*100, 2)",
"def null_count(df):\n return df.isnull().sum().sum()",
"def na_ratio(ts: TimeSeries) -> float:\n\n return ts.pd_dataframe().isnull().sum().mean() / len(ts)",
"def computeRawPercNaNTable(df):\n species = df[\"PG.Organisms\"].unique()\n percentageList = []\n specieList = []\n for i in species:\n percentages = allSamplesPercNaN(df, specie = i)\n percentageList.append(percentages)\n specieList.append(i)\n \n valuesList = []\n samples = percentageList[0][\"sample\"]\n for i in range(len(percentageList)):\n valuesList.append(percentageList[i][\"percentage_NaN\"].values)\n percTable = pd.DataFrame(np.array([samples.values] + valuesList).T, columns = [\"sample\"] + list(species))\n return percTable",
"def missing_values_ratio(series: TimeSeries) -> float:\n\n return series.pd_dataframe().isnull().sum().mean() / len(series)",
"def check_nan(df, show_plot=False):\n void = pd.DataFrame(np.sum(df.isna()), columns=['absolute'])\n void['percentage'] = round((void.absolute / df.shape[0]) * 100, 2)\n\n if show_plot:\n print('\\n\\n')\n plt.figure(figsize=(12, 5))\n plt.plot(void.index.values, void.percentage.values, 'ro')\n plt.xlabel('Columns indexes')\n plt.ylabel('% of missing values')\n plt.title('Percentage of missing values per feature')\n plt.xticks(rotation=45)\n return void.T",
"def missing_analysis(df):\n df_isnull = (df.isnull().sum() / len(df))*100\n df_isnull = df_isnull.drop(df_isnull[df_isnull ==0].index).sort_values(ascending = False)\n missing_data = pd.DataFrame({'Percentual Missing': df_isnull})\n missing_data.plot.bar()",
"def reportnulls(self):\n self.null_counts = self.df.isnull().sum().sort_values(ascending=False)\n\n # return count of null values\n return self.null_counts",
"def get_missing_pct(self, df):\n # get the number of missing data points per column\n missing_values_count = df.isnull().sum()\n\n # how many total missing values do we have?\n total_cells = np.product(df.shape)\n total_missing = missing_values_count.sum()\n\n # percent of data that is missing\n percent_missing = (total_missing/total_cells) * 100\n \n return \"Total missing data percentage is %.4f\" % (percent_missing)",
"def summary(df):\n summary_list = []\n print 'SHAPE', df.shape\n \n for i in df.columns:\n vals = df[i] \n if df[i].dtype == 'O':\n try:\n most_frequent = Counter(df[i].tolist()).most_common(1)\n uniq = vals.nunique()\n except TypeError:\n most_frequent = 'NA'\n uniq = 'NA'\n summary_list.append([i,\n vals.dtype, \n 'NA', \n 'NA', \n most_frequent,\n uniq, \n sum(pd.isnull(vals)),\n sum(pd.isnull(vals))/(1.0*len(df))])\n elif df[i].dtype == '<M8[ns]':\n most_frequent = Counter(df[i].tolist()).most_common(1)\n summary_list.append([i,\n vals.dtype, \n vals.min(), \n vals.max(), \n most_frequent,\n vals.nunique(), \n sum(pd.isnull(vals)),\n sum(pd.isnull(vals))/(1.0*len(df))])\n else:\n summary_list.append([i,\n vals.dtype, \n vals.min(), \n vals.max(), \n vals.mean(),\n vals.nunique(), \n sum(pd.isnull(vals)),\n sum(pd.isnull(vals))/(1.0*len(df))])\n return pd.DataFrame(summary_list, columns=['col','datatype','min','max','mean_or_most_common','num_uniq','null_count','null_pct'])",
"def filling_nan_values(df: pd.DataFrame) -> pd.DataFrame: \n ratio = df.count()/len(df) \n cols = ratio[ratio < 1].index\n for col in cols: \n print(f\"Filling Column:{col}\")\n df[col] = df[col].fillna(df[col].mean())\n return df",
"def get_nan_columns(df):\n df = nan_val_summary(df)\n return df[df['fraction_missing'] > 0]['columns'].values",
"def summarize_dataframe(df,columns_to_check='all',show_progress=False,arity_thresh=20):\n if columns_to_check != 'all':\n df = df[columns_to_check]\n\n nrow = len(df)\n summary_df = pd.DataFrame(columns = ['feature','datatype','nmissing','arity','accepted values'])\n len_df = len(summary_df)\n for col in df.columns:\n nmiss = nrow - df[col].value_counts().sum()\n narity = len(df[col].unique())\n if show_progress:\n #print(col, df[col].dtype,nmiss, \"\\t\", narity,\":\\t\", df[col].ix[8320])\n #else:\n print(col, df[col].dtype,nmiss, \"\\t\", narity)\n accept_val = None\n if narity < arity_thresh:\n accept_val = df[col].unique()\n else:\n accept_val = 'Too many to show'\n summary_df.loc[len_df] = [col,df[col].dtype,nmiss,narity,accept_val]\n len_df+=1\n # assing fraction of missing\n summary_df['x_missing'] = summary_df['nmissing']/float(nrow)\n\n return summary_df"
] | [
"0.76396763",
"0.7637532",
"0.7542576",
"0.72621375",
"0.7246462",
"0.7233",
"0.7130642",
"0.71104866",
"0.70493215",
"0.6875617",
"0.684683",
"0.6755909",
"0.6543785",
"0.6444147",
"0.64351976",
"0.6404165",
"0.6404165",
"0.62979305",
"0.62778085",
"0.62736887",
"0.62029946",
"0.6152391",
"0.606094",
"0.60476303",
"0.5975499",
"0.59601456",
"0.5955058",
"0.59186214",
"0.58699596",
"0.5866814"
] | 0.7825975 | 0 |
Optimized gif, jpg and png images. If the FLAG settings is set to True, just rename the file alread optimized. | def image_optimizer_finalized(pelican):
for dirpath, _, filenames in os.walk(pelican.settings['OUTPUT_PATH']):
for name in filenames:
if os.path.splitext(name)[1] in COMMANDS.keys():
if pelican.settings[FLAG]:
if '_optimized' in name:
filepath = os.path.join(dirpath, name)
newname = re.sub(OPTIMIZED + r'\.(png|jpg|jpeg|gif)',
r'.\1', name)
newfilepath = os.path.join(dirpath, newname)
shutil.move(filepath, newfilepath)
else:
optimize(pelican, dirpath, name) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def make_gif():\n if MIGRATION:\n import imageio\n for n, JPG_DIR in enumerate(JPG_DIRS):\n images, image_file_names = [], []\n for file_name in os.listdir(JPG_DIR):\n if file_name.endswith('.jpg'):\n image_file_names.append(file_name) \n sorted_files = sorted(image_file_names, key=lambda y: int(y.split('_')[1]))\n for i in range(len(sorted_files)): \n file_path = os.path.join(JPG_DIR, sorted_files[i])\n images.append(imageio.imread(file_path))\n imageio.mimsave(FNAME.rsplit('.', 1)[0] + '_migration' + str(n) + '.gif', images, 'GIF', loop=1, fps=FPS)",
"def create_gif():\n anim_file = 'sample/training.gif'\n\n with imageio.get_writer(anim_file, mode='I') as writer:\n filenames = glob.glob('sample/*.jpg')\n filenames = sorted(filenames, key=lambda filename: int(filename[11:-4]))\n for filename in filenames:\n image = imageio.imread(filename)\n writer.append_data(image)\n image = imageio.imread(filename)\n writer.append_data(image)",
"def rename_outputs(size, i, format):\n for k in range(size):\n src_name = 'out' + str(k) + '.png'\n dst_name = 'objective' + str(i) + '_' + format + '.png'\n os.rename(src_name, dst_name)",
"def gif(self, num_games, slow_mult=2, delete_pics=True,\n kill_limit_per_game=1000):\n slow_mult = int(slow_mult)\n gif_name = \"gifs\\\\\"+self.name\n\n try:\n os.remove(gif_name+'.gif')\n except Exception:\n pass\n\n kill_limit = kill_limit_per_game * num_games\n\n c = 0\n e = 0\n while c < kill_limit and e < num_games:\n self.env.reset()\n game_over = False\n # get initial input\n input_t = self.env.observe()\n\n plt.imshow(self.env.draw_state(),\n interpolation='none', cmap='gray')\n plt.savefig(\"gifs\\\\%d.png\" % c)\n plt.close()\n c += 1\n while not game_over and c < kill_limit:\n input_tm1 = input_t\n\n # get next action\n q = self.model.predict(input_tm1)\n action = np.argmax(q[0])\n\n # apply action, get rewards and new state\n input_t, reward, game_over = self.env.act(action)\n\n plt.imshow(self.env.draw_state(),\n interpolation='none', cmap='gray')\n plt.savefig(\"gifs\\\\%d.png\" % c)\n plt.close()\n c += 1\n\n e += 1\n\n # Making a temporary gif and slowing it down seems to be the only way I\n # can make a slower gif. For some reason the command works in cmd but\n # not here so i guess I am stuck with fast gifs.\n \"\"\"\n call1 = ['ffmpeg', '-i', '%d.png', gif_name+'_temp.gif']\n subprocess.call(call1)\n call2 = ['ffmpeg', '-i', gif_name+'_temp.gif', '-filter:v',\n '\"setpts={}.0*PTS\"'.format(slow_mult), gif_name+'.gif']\n subprocess.call(call2, shell=True)\n # ffmpeg -i catch_small_model.gif -filter:v \"setpts=3.0*PTS\" catch_small_model_slow.gif\n print(call2)\n try:\n os.remove(gif_name+'_temp.gif')\n except Exception as e:\n print(e)\n \"\"\"\n subprocess.call(['ffmpeg', '-i', 'gifs\\\\%d.png', gif_name+'.gif'])\n\n if delete_pics:\n for i in range(c):\n try:\n os.remove(\"gifs\\\\%d.png\" % i)\n except Exception as e:\n print(e)",
"def make_gifs_test(title, sort, path):\n images = os.listdir(path)\n generated_images = []\n\n for i in range(len(images)):\n file = os.path.join(path, '%s_%s_Results_%03d.png' % (title, sort, i+1))\n generated_images.append(imageio.imread(file))\n\n imageio.mimsave(path + '{}_{}_Test_Results.gif'.format(sort, title), generated_images, fps=2)\n print(\"{} gif file is generated.\".format(title))",
"def gifsicle(fname1, /, *, chunksize = 1048576, debug = False, timeout = 60.0):\n\n # Import standard modules ...\n import os\n import shutil\n import subprocess\n import tempfile\n\n # Import sub-functions ...\n from ..sha512 import sha512\n\n # Check that \"gifsicle\" is installed ...\n if shutil.which(\"gifsicle\") is None:\n raise Exception(\"\\\"gifsicle\\\" is not installed\") from None\n\n # Check that the image exists ...\n if not os.path.exists(fname1):\n raise Exception(f\"\\\"{fname1}\\\" does not exist\") from None\n\n # Create temporary directory ...\n with tempfile.TemporaryDirectory(prefix = \"gifsicle.\") as tname:\n # Create temporary name ...\n fname2 = f\"{tname}/image.gif\"\n\n # Optimise GIF ...\n subprocess.run(\n [\n \"gifsicle\",\n \"--unoptimize\",\n \"--optimize=3\",\n \"--output\", fname2,\n fname1\n ],\n check = True,\n encoding = \"utf-8\",\n stderr = subprocess.DEVNULL,\n stdout = subprocess.DEVNULL,\n timeout = timeout,\n )\n\n # Find the two sizes and don't replace the original if the new one is\n # larger, or equal ...\n if os.path.getsize(fname2) >= os.path.getsize(fname1):\n if debug:\n print(f\"INFO: Skipping because \\\"{fname2}\\\" is larger than, or equal to, \\\"{fname1}\\\"\")\n return\n\n # Find the two hashes and don't replace the original if the new one is\n # the same ...\n if sha512(fname1, chunksize = chunksize) == sha512(fname2, chunksize = chunksize):\n if debug:\n print(f\"INFO: Skipping because \\\"{fname2}\\\" is the same as \\\"{fname1}\\\"\")\n return\n\n # Replace the original ...\n shutil.move(fname2, fname1)",
"def AnimFromPng(name, gif=True, fps=15):\n if(gif):\n imgconvert = \"convert \" + \"-delay \" + str(int(1000/fps))\n imgconvert += \" -dispose None \" + name + \"*.png -loop 0 \" + name + \".gif\"\n system(imgconvert)\n print imgconvert\n else:\n aviconvert = \"ffmpeg -i \" + name + \"%03d.png -b:v 2048k -r \" + str(fps) + \" \" + name + \".avi\"\n system(aviconvert)\n print aviconvert",
"def make_gif(image_list, gif_name):\n if not gif_name.endswith(\".gif\"):\n gif_name += \".gif\"\n imageio.mimsave(gif_name, [imageio.imread(x) for x in image_list])",
"def compose_in_gif(images, output_file, delay):\n images[0].save(\n output_file, \n format='GIF', append_images=images[1:], \n save_all=True, duration=delay, loop=0,\n )",
"def image_optimizer_initialized(pelican):\n if not pelican.settings[FLAG]:\n return\n\n for dirpath, _, filenames in os.walk(pelican.settings['PATH']):\n for name in filenames:\n if os.path.splitext(name)[1] in COMMANDS.keys():\n if not re.search(OPTIMIZED + r'\\.(png|jpg|jpeg|gif)', name):\n optimize(pelican, dirpath, name)",
"def animated_gif(folder_with_images, gif_filename, loop_duration, size):\r\n\r\n\tos.chdir(folder_with_images) # changes directory to the folder with the images\r\n\r\n\tpng_files = []\r\n\r\n\t# get list of png files in folder\r\n\tfor fn in os.listdir(folder_with_images):\r\n\t\tif fn.endswith('.png'):\r\n\t\t\tpng_files.append(fn)\r\n\r\n\tsort_nicely(png_files)\r\n\r\n\tprint(png_files)\r\n\r\n\t# number of png_files\r\n\tnum_pngs = len(png_files)\r\n\tpng_time = float(loop_duration)/ float(num_pngs)\r\n\r\n\timages = [Image.open(fn) for fn in png_files]\r\n\tdim = (size, size) # change sizes for the image file dimension\r\n\t#for im in images:\r\n\t#\tim.thumbnail(dim, Image.ANTIALIAS)\r\n\r\n\toutput_file = os.path.join(folder_with_images, gif_filename) # path for output file\r\n\twriteGif(output_file, images, png_time) # writes out GIF\r",
"def makeGif(imgPath):\r\n import imageio\r\n filenames = os.listdir(imgPath)\r\n filenames.sort()\r\n images = []\r\n for filename in filenames:\r\n images.append(imageio.imread(os.path.join(imgPath, filename)))\r\n imageio.mimsave(os.path.join(imgPath, \"sharpVid.gif\"), images, duration=0.2)",
"def gif(filename, array, fps=10, scale=1.0):\n # ensure that the file has the .gif extension\n filename = filename + '.gif'\n\n # copy into the color dimension if the images are black and white\n if array.ndim == 3:\n array = array[..., np.newaxis] * np.ones(3)\n\n # make the moviepy clip\n clip = ImageSequenceClip(list(array), fps=fps).resize(scale)\n clip.write_gif(filename, fps=fps)\n return True",
"def compress_img():\n in_path = 'output/templates/rgb/'\n out_path = 'output/templates/imgs/'\n names = os.listdir(in_path)\n for i, name in enumerate(names):\n img = cv2.imread(in_path + name, 0)\n if any(np.array(img.shape) > 1000):\n img = cv2.resize(img, None, fx=0.5, fy=0.5, interpolation=cv2.INTER_CUBIC)\n cv2.imwrite(out_path + name, img)\n\n return",
"def create_gif(base_folder):\n img_list = []\n search_path = glob.glob(os.path.join(base_folder, '*.png'))\n search_path.sort()\n for f in search_path:\n im = Image.open(f)\n img_list.append(im)\n save_file = os.path.join(base_folder, 'animated_gif.gif')\n img_list[0].save(save_file,\n save_all=True, append_images=img_list[1:], optimize=False, duration=180, loop=0)",
"def main():\n print(\"For each image, type the new name of the file.\" +\n \" No extension necessary!\", end=\"\\n\\n\")\n file_list = input_path.glob(f\"*.{args.ext}\")\n plt.ion()\n\n for pic in file_list:\n img = io.imread(str(pic))\n img = rescale(img, 0.25)\n img = rotate(img, 90, resize = True)\n plt.draw()\n plt.pause(0.001)\n if args.vinyl:\n new_name = get_vinyl_name()\n else:\n print(\"\\n\")\n new_name = input(\n \"Please enter a new filename. Press [enter] to skip: \")\n if new_name:\n if not new_name.endswith(args.ext):\n new_name += \".\" + args.ext\n # io.imsave(output_path / new_name, img)\n shutil.copyfile(pic, output_path / new_name)\n if args.replace:\n os.remove(pic)",
"def make_GIF(image_path: Union[Path, str]) -> None:\n import imageio\n from pygifsicle import optimize\n\n if isinstance(image_path, str):\n image_path = Path(image_path)\n\n image_dir = image_path.parent\n image_file = image_path.stem\n gif_path = image_dir / f\"{image_file}.gif\"\n gif_path = Path(\"./xxxx.gif\")\n with imageio.get_writer(gif_path, mode='I') as writer:\n img_files = sorted((img_file for img_file in image_dir.glob('*.png')))\n for img_file in img_files:\n writer.append_data(imageio.imread(img_file))\n print(f\"{len(img_files)} images loaded from {image_path}\")\n try:\n optimize(gif_path)\n except Exception:\n print(\"gifsicle not installed\")",
"def rename_images(_input_image_paths : list[str], _output_image_dir : str) -> None:\n #Starts at 0 to account for incrementing when seeing a non-alternative\n # (the 1st image can never be an alternative take)\n new_index = 0\n\n #Get the \"base name\" for the images - e.g., PICT, DCIM, IMG_, etc.\n #Assumes that each image has the same base name as the first one.\n #Also gets the \"base index\", e.g., 1, 001, 00018, etc.\n base_name, base_index = get_image_base_name_and_index(_input_image_paths[0])\n debug(f\"Base name ({base_name}) and index({base_index})\")\n\n #Since the first image can't be an alternative take, this lets us\n # get the proper index length for all of the images\n index_length = len(base_index)\n\n #For each image, rename based on the new indices\n for image in _input_image_paths:\n flag = get_alternative_flag(image)\n\n #increment the index if this was not an alternative-take image\n if flag == '':\n new_index += 1\n\n #Create an index padded with a sufficient number of prefixing '0's\n formatted_index = \"0\" * (index_length - len(str(new_index))) + str(new_index)\n\n #Get the file name's extension\n extension = os.path.splitext(image)[1]\n\n #Create the new file name based off of the current index\n new_filepath = _output_image_dir + base_name + formatted_index + flag + extension\n debug(f\"Saving {image} to {new_filepath}\")\n\n #Save the image with the updated path name\n with Image.open(image) as image_object:\n image_object.save(new_filepath)",
"def flag(value):\n flag = value.lower().encode('ascii', 'ignore')\n if os.path.isfile(os.path.join(settings.DOCUMENT_ROOT, \"flags\", \"%s.png\" % flag)):\n return \"<img src='%sflags/%s.png' class='countryflag' alt='flag' title='%s' />\" % (settings.MEDIA_URL, flag, flag)\n \n # No flag image found, return the Necta flag hehe\n return \"<img src='%sflags/nectaflag.png' class='countryflag' alt='flag' />\" % (settings.MEDIA_URL)",
"def rename_images():\r\n grp_img_dir = os.listdir('Group_Training_Images')\r\n \r\n for grp_img_folder in grp_img_dir:\r\n image_folders = os.listdir('Group_Training_Images'+'/'+grp_img_folder)\r\n \r\n for img_label in image_folders:\r\n image_path = 'Group_Training_Images'+'/'+grp_img_folder+'/'+img_label\r\n \r\n original_file_names = os.listdir(image_path)\r\n \r\n if len(original_file_names) > 1:\r\n for idx, img in enumerate(os.listdir(image_path)):\r\n assert '.jpeg' in img or '.jpg' in img, img +' incorrect format'\r\n new_name = img_label+'_'+grp_img_folder+'_'+str(idx+1)+'.jpeg'\r\n os.rename(image_path+'/'+img, image_path+'/'+ new_name)\r\n else:\r\n assert ('.jpeg' in original_file_names[0] or \r\n '.jpg' in original_file_names[0]), original_file_names[0] +' incorrect format'\r\n new_name = img_label+'_'+grp_img_folder+'.jpeg'\r\n os.rename(image_path+'/'+original_file_names[0], image_path+'/'+ new_name)",
"def change_image_name(self, name):\n self.image.name = name",
"def saveGIFBatch(directory, path, name=''):\n # for each frame in batch\n images = []\n for filename in directory:\n print(filename)\n images.append(imageio.imread(filename))\n\n name_gif = path + '/' + name + '.gif'\n imageio.mimsave(name_gif, images)",
"def update_destination_file_name (file_name):\n\tglobal COUNTER \n\tCOUNTER += 1\n\tsplitted = file_name.split('/')\n\treturn file_name[:len(file_name)-len(splitted[-1])] + 'Image%05d' % COUNTER +'_'+splitted[-1]",
"def image_name(name):\n \n # Gets the '.' position\n dot = name.find('.')\n # Slice the name from beginning and before '.'\n img = name[:dot]\n # return string with jpg format\n return \"{}.jpg\".format(img)",
"def _change_name(self, suff, info_extra):\n if 'cable-ring' in self.path:\n i1 = info_extra['convex_hull_area']\n i2 = info_extra['best_possible_area']\n f = i1 / i2\n suff = suff.replace('.png',\n f'-area-{i1:0.3f}-best-{i2:0.3f}-FRAC-{f:0.3f}.png')\n elif 'cloth-flat' in self.path:\n i1 = info_extra['cloth_coverage']\n suff = suff.replace('.png', f'-coverage-{i1:0.3f}.png')\n elif 'bag-alone' in self.path:\n i1 = info_extra['convex_hull_area']\n i2 = info_extra['best_possible_area']\n suff = suff.replace('.png', f'-area-{i1:0.3f}-best-{i2:0.3f}.png')\n else:\n pass\n return suff",
"def _rename_resize_image(self, instance=None, **kwargs):\n if getattr(instance, self.name):\n filename = getattr(instance, self.name).path\n ext = os.path.splitext(filename)[1].lower().replace('jpg', 'jpeg')\n dst = self.generate_filename(instance, '%s_%s%s' % (self.name,\n instance._get_pk_val(), ext))\n dst_fullpath = os.path.join(settings.MEDIA_ROOT, dst)\n if os.path.abspath(filename) != os.path.abspath(dst_fullpath):\n os.rename(filename, dst_fullpath)\n for variation in self.variations:\n variation_filename = self._get_variation_filename(variation, dst_fullpath)\n shutil.copyfile(dst_fullpath, variation_filename)\n self._resize_image(variation_filename, variation)\n setattr(instance, self.attname, dst)\n instance.save()",
"def compress_image(filename,k):",
"def gif_generation(orig_label_path, bound_data_path):\n for sample in os.listdir(bound_data_path):\n if not sample.startswith('.') and osp.isdir(osp.join(bound_data_path, sample)):\n sample_path = osp.join(bound_data_path, sample)\n for artery in os.listdir(sample_path):\n orig_label_pick_path = osp.join(orig_label_path, sample, artery, 'data.pkl')\n bound_pick_path = osp.join(bound_data_path, sample, artery, 'data.pkl')\n\n # function to save result of each artery into gif\n save_gif_artery(orig_label_pick_path, bound_pick_path)",
"def handle_image(name):\n from_path = args.from_dir + name\n to_path = args.to_dir + name\n\n if width != args.width:\n subprocess.call('jpegtran -rotate 90 -grayscale ' + from_path + ' > ' \\\n + to_path, shell=True)\n else:\n subprocess.call('jpegtran -grayscale ' + from_path + ' > ' + to_path,\\\n shell=True)",
"def rename(img):\n ext = splitext(img)[1].lower()\n name = get_date(open(img))\n if name is not None:\n name = name + ext\n return copy(img, name)"
] | [
"0.64518267",
"0.60565406",
"0.6031277",
"0.6013981",
"0.58980536",
"0.5897385",
"0.5897077",
"0.5859662",
"0.57910424",
"0.57670325",
"0.57307136",
"0.5720459",
"0.56009597",
"0.5590175",
"0.5568311",
"0.55400527",
"0.55328435",
"0.55229425",
"0.54522663",
"0.5441755",
"0.54343426",
"0.53978854",
"0.5374758",
"0.53475577",
"0.5341275",
"0.5331061",
"0.5328753",
"0.53042525",
"0.53018975",
"0.5297938"
] | 0.6643921 | 0 |
core function for parallel run_correlation | def run_correlation_worker(run_parameters, spreadsheet_df, phenotype_df, job_id):
# selects the ith row in phenotype_df
np.random.seed(job_id)
phenotype_df = phenotype_df.iloc[[job_id], :]
spreadsheet_df_trimmed, phenotype_df_trimmed, err_msg = datacln.check_input_value_for_gene_prioritazion(
spreadsheet_df, phenotype_df)
pc_array = get_correlation(spreadsheet_df_trimmed.as_matrix(), phenotype_df_trimmed.values[0], run_parameters)
gene_name_list = spreadsheet_df_trimmed.index
phenotype_name = phenotype_df.index.values[0]
generate_correlation_output(pc_array, phenotype_name, gene_name_list, run_parameters) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calculate_correlation(data):\n pass",
"def auto_correlation(arr):\n return cross_correlation(arr, arr)",
"def run_net_correlation_worker(run_parameters, spreadsheet_df, phenotype_df, network_mat,\n spreadsheet_genes_as_input, baseline_array, job_id):\n\n np.random.seed(job_id)\n\n phenotype_df = phenotype_df.iloc[[job_id], :]\n spreadsheet_df_trimmed, phenotype_df_trimmed, ret_msg = datacln.check_input_value_for_gene_prioritazion(\n spreadsheet_df, phenotype_df)\n\n sample_smooth = spreadsheet_df_trimmed.as_matrix()\n\n pc_array = get_correlation(sample_smooth, phenotype_df_trimmed.values[0], run_parameters)\n pearson_array = pc_array.copy()\n pc_array[~np.in1d(spreadsheet_df_trimmed.index, spreadsheet_genes_as_input)] = 0.0\n pc_array = np.abs(trim_to_top_beta(pc_array, run_parameters[\"top_beta_of_sort\"]))\n restart_accumulator = pc_array.copy()\n restart_accumulator[restart_accumulator != 0] = 1\n\n pc_array = pc_array / max(sum(pc_array), EPSILON_0)\n pc_array = kn.smooth_matrix_with_rwr(pc_array, network_mat, run_parameters)[0]\n\n pc_array = pc_array - baseline_array\n quantitative_score = pc_array\n viz_score = (pc_array - min(pc_array)) / (max(pc_array) - min(pc_array))\n\n phenotype_name = phenotype_df_trimmed.index.values[0]\n gene_name_list = spreadsheet_df_trimmed.index\n gene_orig_list = spreadsheet_genes_as_input\n\n generate_net_correlation_output(pearson_array, quantitative_score, viz_score, restart_accumulator,\n phenotype_name, gene_name_list, gene_orig_list, run_parameters)",
"def correlate_cpu(data, num_timeseries, size_timeseries,\n correlations_cpu, indices_step):\n num_timesteps = size_timeseries\n window_size = size_timeseries\n\n sums = [0.0] * num_timeseries\n sums_sq = [0.0] * num_timeseries\n sums_xy = [0.0] * calc_num_correlations(num_timeseries)\n\n for k in range(num_timesteps):\n index_correlation = 0\n\n for i in range(num_timeseries):\n old = 0.0 if k < window_size else data[i][k - window_size]\n new = data[i][k]\n sums[i] += new - old\n sums_sq[i] += new * new - old * old\n\n for i in range(num_timeseries):\n old_x = 0.0 if k < window_size else data[i][k - window_size]\n new_x = data[i][k]\n\n for j in range(i):\n old_y = 0.0 if k < window_size else data[j][k - window_size]\n new_y = data[j][k]\n sums_xy[index_correlation] += new_x * new_y - old_x * old_y\n numerator = (window_size * sums_xy[index_correlation] -\n sums[i] * sums[j])\n denominator = ((1 / math.sqrt(window_size * sums_sq[i] -\n sums[i] * sums[i])) *\n (1 / math.sqrt(window_size * sums_sq[j] -\n sums[j] * sums[j])))\n correlations_cpu[index_correlation] = numerator * denominator\n indices_step[2 * index_correlation] = j\n indices_step[2 * index_correlation + 1] = i\n index_correlation += 1",
"def get_correlation_array(self, src):\n self.correlation_array = parallel.call_and_bcast(self.get_array, src)",
"def run_bootstrap_correlation(run_parameters):\n run_parameters[\"results_tmp_directory\"] = kn.create_dir(run_parameters[\"results_directory\"], 'tmp')\n\n phenotype_response_df = kn.get_spreadsheet_df(run_parameters[\"phenotype_name_full_path\"])\n spreadsheet_df = kn.get_spreadsheet_df(run_parameters[\"spreadsheet_name_full_path\"])\n phenotype_response_df = phenotype_response_df.T\n n_bootstraps = run_parameters[\"number_of_bootstraps\"]\n\n number_of_jobs = len(phenotype_response_df.index)\n jobs_id = range(0, number_of_jobs)\n zipped_arguments = dstutil.zip_parameters(run_parameters, spreadsheet_df, phenotype_response_df, n_bootstraps, jobs_id)\n dstutil.parallelize_processes_locally(run_bootstrap_correlation_worker, zipped_arguments, number_of_jobs)\n\n write_phenotype_data_all(run_parameters)\n kn.remove_dir(run_parameters[\"results_tmp_directory\"])",
"def _pearsonr(x: xr.DataArray, y: xr.DataArray, monitor: Monitor) -> xr.Dataset:\n with monitor.starting(\"Calculate Pearson correlation\", total_work=6):\n n = len(x['time'])\n\n xm, ym = x - x.mean(dim='time'), y - y.mean(dim='time')\n xm['time'] = [i for i in range(0, len(xm.time))]\n ym['time'] = [i for i in range(0, len(ym.time))]\n xm_ym = xm * ym\n r_num = xm_ym.sum(dim='time')\n xm_squared = np.square(xm)\n ym_squared = np.square(ym)\n r_den = np.sqrt(xm_squared.sum(dim='time') * ym_squared.sum(dim='time'))\n r_den = r_den.where(r_den != 0)\n r = r_num / r_den\n\n # Presumably, if abs(r) > 1, then it is only some small artifact of floating\n # point arithmetic.\n # At this point r should be a lon/lat dataArray, so it should be safe to\n # load it in memory explicitly. This may take time as it will kick-start\n # deferred processing.\n # Comparing with NaN produces warnings that can be safely ignored\n default_warning_settings = np.seterr(invalid='ignore')\n with monitor.child(1).observing(\"task 1\"):\n negativ_r = r.values < -1.0\n with monitor.child(1).observing(\"task 2\"):\n r.values[negativ_r] = -1.0\n with monitor.child(1).observing(\"task 3\"):\n positiv_r = r.values > 1.0\n with monitor.child(1).observing(\"task 4\"):\n r.values[positiv_r] = 1.0\n np.seterr(**default_warning_settings)\n r.attrs = {'description': 'Correlation coefficients between'\n ' {} and {}.'.format(x.name, y.name)}\n\n df = n - 2\n t_squared = np.square(r) * (df / ((1.0 - r.where(r != 1)) * (1.0 + r.where(r != -1))))\n\n prob = df / (df + t_squared)\n with monitor.child(1).observing(\"task 5\"):\n prob_values_in = prob.values\n with monitor.child(1).observing(\"task 6\"):\n prob.values = betainc(0.5 * df, 0.5, prob_values_in)\n prob.attrs = {'description': 'Rough indicator of probability of an'\n ' uncorrelated system producing datasets that have a Pearson'\n ' correlation at least as extreme as the one computed from'\n ' these datsets. Not entirely reliable, but reasonable for'\n ' datasets larger than 500 or so.'}\n\n retset = xr.Dataset({'corr_coef': r,\n 'p_value': prob})\n return retset",
"def run_bootstrap_correlation_worker(run_parameters, spreadsheet_df, phenotype_df, n_bootstraps, job_id):\n\n np.random.seed(job_id)\n\n phenotype_df = phenotype_df.iloc[[job_id], :]\n spreadsheet_df_trimmed, phenotype_df_trimmed, ret_msg = datacln.check_input_value_for_gene_prioritazion(\n spreadsheet_df, phenotype_df)\n\n pearson_array = get_correlation(spreadsheet_df_trimmed.as_matrix(), phenotype_df_trimmed.values[0], run_parameters)\n borda_count = np.zeros(spreadsheet_df.shape[0])\n gm_accumulator = np.ones(spreadsheet_df.shape[0])\n for bootstrap_number in range(0, n_bootstraps):\n sample_random, sample_permutation = sample_a_matrix_pearson(\n spreadsheet_df_trimmed.as_matrix(), 1.0, run_parameters[\"cols_sampling_fraction\"])\n phenotype_response = phenotype_df_trimmed.values[0, None]\n phenotype_response = phenotype_response[0, sample_permutation]\n pc_array = get_correlation(sample_random, phenotype_response, run_parameters)\n borda_count = sum_array_ranking_to_borda_count(borda_count, np.abs(pc_array))\n gm_accumulator = (np.abs(pc_array) + EPSILON_0) * gm_accumulator\n pcc_gm_array = gm_accumulator ** (1 / n_bootstraps)\n borda_count = borda_count / n_bootstraps\n\n phenotype_name = phenotype_df_trimmed.index.values[0]\n gene_name_list = spreadsheet_df_trimmed.index\n viz_score = (borda_count - min(borda_count)) / (max(borda_count) - min(borda_count))\n\n generate_bootstrap_correlation_output(borda_count, viz_score, pearson_array,\n phenotype_name, gene_name_list, run_parameters)",
"def _calculate_cc(self, array, corr_range, tau_max, lag_mode):\n\n # lag_mode dict\n mode = self.lag_modi[lag_mode]\n only_tri = int(self.only_tri)\n\n if lag_mode == 'all':\n corrmat = numpy.zeros((2*tau_max + 1, self.N, self.N),\n dtype='float32')\n elif lag_mode == 'sum':\n corrmat = numpy.zeros((2, self.N, self.N), dtype='float32')\n elif lag_mode == 'max':\n corrmat = numpy.zeros((2, self.N, self.N), dtype='float32')\n\n # loop over all node pairs, NOT symmetric due to time shifts!\n for i in range(self.N-only_tri):\n for j in range((i+1)*only_tri, self.N):\n\n if mode == 2:\n maxcross = 0.0\n argmax = 0\n\n # loop over taus INCLUDING the last tau value\n for t in range(2*tau_max+1):\n\n # here the actual cross correlation is calculated\n crossij = (array[tau_max, i, :] * array[t, j, :]).mean()\n\n # fill in values in matrix depending on lag_mode\n if mode == 0:\n corrmat[t, i, j] = crossij\n\n elif mode == 1:\n if t <= tau_max:\n corrmat[1, i, j] += numpy.abs(crossij)\n if t >= tau_max:\n corrmat[0, i, j] += numpy.abs(crossij)\n\n elif mode == 2:\n # calculate max and argmax by comparing to previous\n # value and storing max\n if numpy.abs(crossij) > maxcross:\n maxcross = numpy.abs(crossij)\n argmax = t\n\n if mode == 2:\n corrmat[0, i, j] = maxcross\n corrmat[1, i, j] = argmax - tau_max\n\n if self.only_tri:\n if lag_mode == 'all':\n corrmat = corrmat + corrmat.transpose(0, 2, 1)[::-1]\n elif lag_mode == 'sum':\n corrmat[0] += corrmat[1].transpose()\n corrmat[1] = corrmat[0].transpose()\n elif lag_mode == 'max':\n corrmat[0] += corrmat[0].transpose()\n corrmat[1] -= corrmat[1].transpose()\n\n return corrmat",
"def run_bootstrap_net_correlation_worker(run_parameters, spreadsheet_df, phenotype_df, network_mat,\n spreadsheet_genes_as_input, baseline_array, job_id):\n\n np.random.seed(job_id)\n\n restart_accumulator = np.zeros(network_mat.shape[0])\n gm_accumulator = np.ones(network_mat.shape[0])\n borda_count = np.zeros(network_mat.shape[0])\n\n phenotype_df = phenotype_df.iloc[[job_id], :]\n spreadsheet_df_trimmed, phenotype_df_trimmed, ret_msg = datacln.check_input_value_for_gene_prioritazion(\n spreadsheet_df, phenotype_df)\n\n sample_smooth = spreadsheet_df_trimmed.as_matrix()\n\n pearson_array = get_correlation(sample_smooth, phenotype_df_trimmed.values[0], run_parameters)\n n_bootstraps = run_parameters[\"number_of_bootstraps\"]\n for bootstrap_number in range(0, n_bootstraps):\n sample_random, sample_permutation = sample_a_matrix_pearson(\n sample_smooth, 1.0, run_parameters[\"cols_sampling_fraction\"])\n\n phenotype_response = phenotype_df_trimmed.values[0, None]\n phenotype_response = phenotype_response[0, sample_permutation]\n pc_array = get_correlation(sample_random, phenotype_response, run_parameters)\n\n pc_array[~np.in1d(spreadsheet_df_trimmed.index, spreadsheet_genes_as_input)] = 0.0\n pc_array = np.abs(trim_to_top_beta(pc_array, run_parameters[\"top_beta_of_sort\"]))\n restart_accumulator[pc_array != 0] += 1.0\n\n pc_array = pc_array / max(sum(pc_array), EPSILON_0)\n pc_array = kn.smooth_matrix_with_rwr(pc_array, network_mat, run_parameters)[0]\n pc_array = pc_array - baseline_array\n\n borda_count = sum_array_ranking_to_borda_count(borda_count, pc_array)\n gm_accumulator = (np.abs(pc_array) + EPSILON_0) * gm_accumulator\n\n restart_accumulator = restart_accumulator / n_bootstraps\n borda_count = borda_count / n_bootstraps\n # pcc_gm_array = gm_accumulator ** (1 / n_bootstraps)\n viz_score = (borda_count - min(borda_count)) / (max(borda_count) - min(borda_count))\n\n phenotype_name = phenotype_df_trimmed.index.values[0]\n gene_name_list = spreadsheet_df_trimmed.index\n gene_orig_list = spreadsheet_genes_as_input\n quantitative_score = borda_count\n generate_net_correlation_output(pearson_array, quantitative_score, viz_score, restart_accumulator,\n phenotype_name, gene_name_list, gene_orig_list, run_parameters)",
"def corr():\n\n @sinks\n def _dagpype_internal_fn_act(target):\n c = dagpype_c.Correlator()\n try:\n while True:\n x, y = (yield)\n c.push(float(x), float(y))\n except GeneratorExit:\n target.send(c.corr())\n target.close()\n return _dagpype_internal_fn_act",
"def compute_pol_correlation_single(x, y, nbin, rmax, lx, ly, natoms, mol, tx, ty):\n ### allocate array to store the results\n pol_correlation = np.zeros((nbin), dtype = np.float64)\n counter = np.zeros((nbin), dtype = np.float64)\n ### generate a linked list of the com\n nsegx, nsegy, head, llist = gen_linked_list(x,y,lx,ly,rmax, natoms)\n ### loop over the linked list\n etcorrelation = equal_time_correlationWrapper.Equal_time_correlation()\n etcorrelation.compute(nsegx, nsegy, natoms, head, llist, mol, x, y, pol_correlation, lx, ly, rmax, nbin, tx, ty, counter)\n return pol_correlation, counter",
"def Corr(x,y):\n \n cocoeff1 = np.empty((y.shape[1],y.shape[2]))\n cocoeff2 = np.empty((y.shape[1],y.shape[2]))\n for i in xrange(y.shape[1]):\n for j in xrange(y.shape[2]):\n cocoeff1[i,j],cocoeff2[i,j] = sts.pearsonr(x[:,i,j],y[:,i,j])\n \n print 'Completed: Correlation calculations!'\n \n return cocoeff1, cocoeff2",
"def test_run_grouped_correlation(self):\r\n # hand calculation of spearman and pearson for 01\r\n # md_g1 = array([6.1, 0.0, 14.2, 6.5, 21])\r\n # md_g2 = array([.3, 9.1, .8, 5.0, 11])\r\n # o1_g1 = array([22, 48, 34, 0, 0])\r\n # o1_g2 = array([0, 15, 0, 76, 74])\r\n # c1_g1 = -0.6155870112510925 #spearman(md_g1, o1_g1)\r\n # c2_g2 = 0.66688592885535025 #spearman(md_g2, o1_g2)\r\n # fisher_population_correlation([-0.6155870112510925,\r\n # 0.66688592885535025], [5,5])\r\n # fpc, h = (0.043595171909468329, 0.12776325359984511)\r\n g1_rhos = [corrcoef(self.otus1[0][i], self.mds1[0])[0][1]\r\n for i in range(10)]\r\n g2_rhos = [corrcoef(self.otus1[1][i], self.mds1[1])[0][1]\r\n for i in range(10)]\r\n exp_rhos = [g1_rhos, g2_rhos]\r\n g1_pvals = [assign_correlation_pval(g1_rhos[i], 5,\r\n 'parametric_t_distribution') for i in range(10)]\r\n g2_pvals = [assign_correlation_pval(g2_rhos[i], 5,\r\n 'parametric_t_distribution') for i in range(10)]\r\n exp_pvals = [g1_pvals, g2_pvals]\r\n exp_f_pvals = [fisher([g1_pvals[i], g2_pvals[i]]) for i in range(10)]\r\n\r\n tmp = [fisher_population_correlation([g1_rhos[i], g2_rhos[i]], [5, 5])\r\n for i in range(10)]\r\n exp_f_rhos = [x[0] for x in tmp]\r\n exp_f_hs = [x[1] for x in tmp]\r\n\r\n obs_rhos, obs_pvals, obs_f_pvals, obs_f_rhos, obs_f_hs = \\\r\n run_grouped_correlation(self.mds1, self.otus1, 'pearson',\r\n CORRELATION_TEST_CHOICES, 'parametric_t_distribution')\r\n\r\n assert_almost_equal(obs_rhos, exp_rhos)\r\n assert_almost_equal(obs_pvals, exp_pvals)\r\n assert_almost_equal(obs_f_pvals, exp_f_pvals)\r\n assert_almost_equal(obs_f_rhos, exp_f_rhos)\r\n assert_almost_equal(obs_f_hs, exp_f_hs)",
"def calculate_correlation(self):\n self.network.index_nodes()\n self._calculate_dist()\n pearson_correlation, pearson_pvalue = scipy.stats.pearsonr(self.dist[:,0], self.dist[:,1])\n spearman_correlation, spearman_pvalue = scipy.stats.spearmanr(self.dist[:,0], self.dist[:,1])\n return pearson_correlation, pearson_pvalue, spearman_correlation, spearman_pvalue",
"def test(size_timeseries, num_timeseries):\n data = random_data(num_timeseries, size_timeseries)\n correlations_dfe = [0.0] * calc_num_correlations(num_timeseries)\n correlations_cpu = [0.0] * calc_num_correlations(num_timeseries)\n indices_step = [0.0] * calc_num_correlations(num_timeseries) * 2\n\n start_time = time.time()\n correlate_dfe(data, size_timeseries, num_timeseries, correlations_dfe)\n dfe_total = time.time() - start_time\n print 'DFE correlation total time:\\t%.5lfs' % dfe_total\n\n start_time = time.time()\n correlate_cpu(data, num_timeseries, size_timeseries,\n correlations_cpu, indices_step)\n cpu_total = time.time() - start_time\n print 'CPU correlation total time:\\t%.5lfs' % cpu_total\n\n check(correlations_dfe, correlations_cpu, num_timeseries, indices_step)",
"def pearson_correlation(sim, obs, dim=\"time\"):\n # wrap numpy function\n kwargs = dict(\n input_core_dims=[[dim], [dim]], dask=\"parallelized\", output_dtypes=[float]\n )\n pearsonr = xr.apply_ufunc(_pearson_correlation, sim, obs, **kwargs)\n pearsonr.name = \"pearson_coef\"\n return pearsonr",
"def _compute_correlations(self, data):\n mappings = self.mappings_\n n_channels, n_times = data.shape\n\n # get the predictions\n y_pred = data.T.dot(mappings.T)\n y_pred = y_pred.reshape((n_times, len(self.picks),\n self.n_resample), order='F')\n # pool them using median\n # XXX: weird that original implementation sorts and takes middle value.\n # Isn't really the median if n_resample even\n y_pred = np.median(y_pred, axis=-1)\n # compute correlation\n num = np.sum(data.T * y_pred, axis=0)\n denom = (np.sqrt(np.sum(data.T ** 2, axis=0)) *\n np.sqrt(np.sum(y_pred ** 2, axis=0)))\n\n corr = num / denom\n return corr",
"def corr(self):\n pass",
"def double_sum_covar(list_tuple_errs: list[float], corr_ranges: list[float], list_area_tot: list[float],\n list_lat: list[float], list_lon: list[float], nproc: int = 1) -> float:\n n = len(list_tuple_errs)\n\n if nproc == 1:\n print('Deriving double covariance sum with 1 core...')\n var_err = 0\n for i in range(n):\n for j in range(n):\n d = distance_latlon((list_lon[i], list_lat[i]), (list_lon[j], list_lat[j]))\n for k in range(len(corr_ranges)):\n var_err += kernel_sph(0, d, corr_ranges[k]) * list_tuple_errs[i][k] * list_tuple_errs[j][k] * \\\n list_area_tot[i] * list_area_tot[j]\n else:\n print('Deriving double covariance sum with '+str(nproc)+' cores...')\n pack_size = int(np.ceil(n/nproc))\n argsin = [(list_tuple_errs, corr_ranges, list_area_tot, list_lon, list_lat, np.arange(\n i, min(i+pack_size, n))) for k, i in enumerate(np.arange(0, n, pack_size))]\n pool = mp.Pool(nproc, maxtasksperchild=1)\n outputs = pool.map(part_covar_sum, argsin, chunksize=1)\n pool.close()\n pool.join()\n\n var_err = np.sum(np.array(outputs))\n\n area_tot = 0\n for j in range(len(list_area_tot)):\n area_tot += list_area_tot[j]\n\n var_err /= np.nansum(area_tot) ** 2\n\n return np.sqrt(var_err)",
"def test_correlation(self):\r\n x = [1, 2, 3, 5]\r\n y = [0, 0, 0, 0]\r\n z = [1, 1, 1, 1]\r\n a = [2, 4, 6, 8]\r\n b = [1.5, 1.4, 1.2, 1.1]\r\n c = [15, 10, 5, 20]\r\n\r\n bad = [1, 2, 3] # originally gave r = 1.0000000002\r\n\r\n self.assertFloatEqual(correlation(x, x), (1, 0))\r\n self.assertFloatEqual(correlation(x, y), (0, 1))\r\n self.assertFloatEqual(correlation(y, z), (0, 1))\r\n self.assertFloatEqualAbs(correlation(x, a), (0.9827076, 0.01729), 1e-5)\r\n self.assertFloatEqualAbs(\r\n correlation(x, b), (-0.9621405, 0.03786), 1e-5)\r\n self.assertFloatEqualAbs(correlation(x, c), (0.3779645, 0.622), 1e-3)\r\n self.assertEqual(correlation(bad, bad), (1, 0))",
"def get_correlation(self,\n incl_chromosomes,\n fasta_file=None,\n annotation_files=None,\n weights_eval=False,\n *args,\n **kwargs):\n if self.generator_train.__class__.__name__ == 'MultiGenerator':\n assert fasta_file and annotation_files,\\\n \"\"\" To evaluate a MultiGenerator model, the fasta file and the\n annotation file need to be passed as inputs.\"\"\"\n command_dict = self.generator_train.command_dict[0]\n\n if 'keras_dna.sequence.SeqIntervalDl' in command_dict.get_details():\n one_hot_encoding = True\n else:\n one_hot_encoding = False\n batch_size = self.generator_train.command_dict[-1].as_input()['batch_size']\n output_shape = self.generator_train.command_dict[-1].as_input()['output_shape']\n else:\n command_dict = self.generator_train.command_dict\n one_hot_encoding = command_dict.as_input()['one_hot_encoding']\n batch_size = command_dict.as_input()['batch_size']\n output_shape = command_dict.as_input()['output_shape']\n\n assert 'keras_dna.sequence.ContinuousDataset' in command_dict.get_details(),\\\n \"\"\"Correlation score is only available for continuous dataset\"\"\"\n \n dico = command_dict.get_details()['keras_dna.sequence.ContinuousDataset']\n if dico['nb_annotation_type']:\n nb_annotation = dico['nb_annotation_type']\n else:\n nb_annotation = 1\n \n if annotation_files:\n if isinstance(dico['annotation_files'], list):\n assert len(annotation_files) == len(dico['annotation_files']),\\\n \"\"\"annotation_files must be a list with the name number of\n entries as annotation_files in the generator, complete with\n zeros if needed\"\"\"\n else:\n assert len(annotation_files) == 1,\\\n \"\"\"annotation_files must be a list with the same number of\n entries as annotation_files in the generator, complete with\n zeros if needed\"\"\"\n indexes = np.where(np.array(annotation_files) != '0')[0]\n \n else:\n if isinstance(dico['annotation_files'], list):\n indexes = range(len(dico['annotation_files']))\n else:\n indexes = [0]\n \n if isinstance(dico['annotation_files'], list):\n nb_types = len(dico['annotation_files']) // nb_annotation\n else:\n nb_types = 1\n\n eval_dict = deepcopy(command_dict.as_input())\n\n if fasta_file:\n eval_dict['fasta_file'] = fasta_file\n\n if annotation_files:\n annotation_files = np.array(annotation_files)\n annotation_files[annotation_files == '0'] = annotation_files[indexes[0]]\n eval_dict['annotation_files'] = list(annotation_files)\n\n eval_dict['incl_chromosomes'] = incl_chromosomes\n eval_dict['batch_size'] = batch_size\n eval_dict['one_hot_encoding'] = one_hot_encoding\n eval_dict['output_shape'] = output_shape\n eval_dict['overlapping'] = False\n\n if not weights_eval:\n eval_dict['weighting_mode'] = None\n\n generator_eval = Generator(**eval_dict)\n\n metrics = [partial(correlate,\n cell_idx=int(idx / nb_annotation),\n idx=int(idx % nb_annotation),\n nb_types=int(nb_types),\n nb_annotation=int(nb_annotation)) for idx in indexes]\n \n for idx, metric in zip(indexes, metrics):\n metric.__name__ = 'correlate_{}_{}'.format(int(idx / nb_annotation),\n int(idx % nb_annotation))\n\n model = clone_model(self.model)\n for i, layer in enumerate(self.model.layers):\n model.layers[i].set_weights(layer.get_weights())\n\n model.compile(optimizer=self.model.optimizer,\n loss=self.model.loss,\n metrics=metrics)\n evaluations = model.evaluate_generator(generator=generator_eval(),\n steps=len(generator_eval),\n *args,\n **kwargs)\n \n return {'correlate_{}_{}'.format(int(idx / nb_annotation),\\\n idx % nb_annotation) : evaluations[idx + 1] for idx in indexes}",
"def run_bootstrap_net_correlation(run_parameters):\n run_parameters[\"results_tmp_directory\"] = kn.create_dir(run_parameters[\"results_directory\"], 'tmp')\n\n network_df = kn.get_network_df(run_parameters['gg_network_name_full_path'])\n node_1_names, node_2_names = kn.extract_network_node_names(network_df)\n unique_gene_names = kn.find_unique_node_names(node_1_names, node_2_names)\n unique_gene_names = sorted(unique_gene_names)\n genes_lookup_table = kn.create_node_names_dict(unique_gene_names)\n\n network_df = kn.map_node_names_to_index(network_df, genes_lookup_table, 'node_1')\n network_df = kn.map_node_names_to_index(network_df, genes_lookup_table, 'node_2')\n network_df = kn.symmetrize_df(network_df)\n network_mat_sparse = kn.convert_network_df_to_sparse(\n network_df, len(unique_gene_names), len(unique_gene_names))\n\n network_mat = normalize(network_mat_sparse, norm=\"l1\", axis=0)\n\n del network_df\n del network_mat_sparse\n del node_1_names\n del node_2_names\n del genes_lookup_table\n gc.collect()\n\n phenotype_response_df = kn.get_spreadsheet_df(run_parameters[\"phenotype_name_full_path\"])\n spreadsheet_df = kn.get_spreadsheet_df(run_parameters[\"spreadsheet_name_full_path\"])\n spreadsheet_genes_as_input = spreadsheet_df.index.values\n phenotype_response_df = phenotype_response_df.T\n\n spreadsheet_df = kn.update_spreadsheet_df(spreadsheet_df, unique_gene_names)\n spreadsheet_df = zscore_dataframe(spreadsheet_df)\n sample_smooth, iterations = kn.smooth_matrix_with_rwr(spreadsheet_df.as_matrix(), network_mat.T, run_parameters)\n spreadsheet_df = pd.DataFrame(sample_smooth, index=spreadsheet_df.index, columns=spreadsheet_df.columns)\n\n baseline_array = np.ones(network_mat.shape[0]) / network_mat.shape[0]\n baseline_array = kn.smooth_matrix_with_rwr(baseline_array, network_mat, run_parameters)[0]\n\n number_of_jobs = len(phenotype_response_df.index)\n jobs_id = range(0, number_of_jobs)\n zipped_arguments = dstutil.zip_parameters(run_parameters, spreadsheet_df, phenotype_response_df, network_mat,\n spreadsheet_genes_as_input, baseline_array, jobs_id)\n dstutil.parallelize_processes_locally(run_bootstrap_net_correlation_worker, zipped_arguments, number_of_jobs)\n\n write_phenotype_data_all(run_parameters)\n kn.remove_dir(run_parameters[\"results_tmp_directory\"])",
"def run_net_correlation(run_parameters):\n run_parameters[\"results_tmp_directory\"] = kn.create_dir(run_parameters[\"results_directory\"], 'tmp')\n\n network_df = kn.get_network_df(run_parameters['gg_network_name_full_path'])\n\n node_1_names, node_2_names = kn.extract_network_node_names(network_df)\n unique_gene_names = kn.find_unique_node_names(node_1_names, node_2_names)\n\n unique_gene_names = sorted(unique_gene_names)\n\n genes_lookup_table = kn.create_node_names_dict(unique_gene_names)\n\n network_df = kn.map_node_names_to_index(network_df, genes_lookup_table, 'node_1')\n network_df = kn.map_node_names_to_index(network_df, genes_lookup_table, 'node_2')\n\n network_df = kn.symmetrize_df(network_df)\n network_mat_sparse = kn.convert_network_df_to_sparse(\n network_df, len(unique_gene_names), len(unique_gene_names))\n\n network_mat = normalize(network_mat_sparse, norm=\"l1\", axis=0)\n\n del network_df\n del network_mat_sparse\n del node_1_names\n del node_2_names\n del genes_lookup_table\n gc.collect()\n\n phenotype_response_df = kn.get_spreadsheet_df(run_parameters[\"phenotype_name_full_path\"])\n spreadsheet_df = kn.get_spreadsheet_df(run_parameters[\"spreadsheet_name_full_path\"])\n spreadsheet_genes_as_input = spreadsheet_df.index.values\n phenotype_response_df = phenotype_response_df.T\n\n spreadsheet_df = kn.update_spreadsheet_df(spreadsheet_df, unique_gene_names)\n spreadsheet_df = zscore_dataframe(spreadsheet_df)\n\n sample_smooth, iterations = kn.smooth_matrix_with_rwr(spreadsheet_df.as_matrix(), network_mat.T, run_parameters)\n spreadsheet_df = pd.DataFrame(sample_smooth, index=spreadsheet_df.index, columns=spreadsheet_df.columns)\n\n baseline_array = np.ones(network_mat.shape[0]) / network_mat.shape[0]\n baseline_array = kn.smooth_matrix_with_rwr(baseline_array, network_mat, run_parameters)[0]\n\n number_of_jobs = len(phenotype_response_df.index)\n jobs_id = range(0, number_of_jobs)\n zipped_arguments = dstutil.zip_parameters(run_parameters, spreadsheet_df, phenotype_response_df, network_mat,\n spreadsheet_genes_as_input, baseline_array, jobs_id)\n dstutil.parallelize_processes_locally(run_net_correlation_worker, zipped_arguments, number_of_jobs)\n\n write_phenotype_data_all(run_parameters)\n kn.remove_dir(run_parameters[\"results_tmp_directory\"])",
"def test_compute_correlation_paired(self):\r\n # Verified using R's cor.test function.\r\n exp = ((0.90243902439024193, 0.013812916237431808, 0.03,\r\n (0.33958335414859975, 0.98938788428012969)), None)\r\n \r\n np.random.seed(self.value_for_seed)\r\n obs = _compute_correlation(self.taxa_summary_paired1,\r\n self.taxa_summary_paired2, 'paired', 'pearson', 'two-sided',\r\n 999, 0.95)\r\n self.compare_multiple_level_array(obs, exp)",
"def _set_correlations(self) -> None:\n pass",
"def correlation(\n self,\n freq_1: float,\n time_1: float,\n freq_2: Optional[float] = None,\n time_2: Optional[float] = None,\n dw: Optional[tuple] = (1.0, 1.0),\n dagg: Optional[tuple] = (1, 0),\n interaction_picture: Optional[bool] = False,\n change_only: Optional[bool] = False,\n progress_type: Optional[Text] = None) -> complex:\n dt = self._process_tensor.dt\n if time_2 is None:\n time_2 = time_1\n if freq_2 is None:\n freq_2 = freq_1\n self.generate_system_correlations(time_2, progress_type)\n corr_mat_dim = int(np.round(time_2/dt))\n _sys_correlations = self._system_correlations[:corr_mat_dim,\n :corr_mat_dim]\n _sys_correlations = np.nan_to_num(_sys_correlations)\n re_kernel,im_kernel = self._calc_kernel(freq_1, time_1,\n freq_2, time_2, dagg)\n coup_1 = dw[0] * self._bath.correlations.spectral_density(freq_1)**0.5\n coup_2 = dw[1] * self._bath.correlations.spectral_density(freq_2)**0.5\n correlation = np.sum(_sys_correlations.real*re_kernel + \\\n 1j*_sys_correlations.imag*im_kernel) * \\\n coup_1 * coup_2\n if (not change_only) and (freq_1 == freq_2) \\\n and (dagg in ((1, 0), (0, 1))):\n if self._temp > 0:\n correlation += np.exp(-freq_1/self._temp) \\\n / (1 - np.exp(-freq_1/self._temp))\n if dagg == (0, 1):\n correlation += 1\n\n if not interaction_picture:\n correlation *= np.exp(1j * ((2*dagg[0] - 1) * freq_2 * time_2 + \\\n (2*dagg[1] - 1) * freq_1 * time_1))\n return correlation",
"def mimo_sync(self,re1,im1,re2,im2):\n wnd = np.int_(4*(self._GI + self._FFT))\n Nprep = np.int_(self._FFT/2)\n mavg = np.int_(self._FFT/4) # moving average period for power and corr12\n mavg3 = 2*self._FFT # average period for corr3\n if np.size(re1)!=np.size(im1) or np.size(re2)!=np.size(im2) or np.size(re1)!=np.size(re2):\n raise Exception(\"Vectors re1, im1, re2, im2 do not have the same length!!!\")\n if np.size(re1) < (wnd-mavg+mavg3+self._FFT/2):\n raise Exception(\"Vectors re1, im1, re2, im2 not long enough ({}) to run synchronization (required length={})!!!\".format(np.size(re1),wnd-mavg+mavg3+self._FFT/2))\n iqcpx = np.empty(re1.shape, dtype=complex)\n iqcpx.real = (re1+re2)/2\n iqcpx.imag = (im1+im2)/2\n iqdata = np.concatenate((np.zeros(Nprep,),iqcpx))\n power = np.zeros((wnd,1))\n corr12 = np.zeros((wnd,1), dtype=complex)\n corr3 = np.zeros((wnd,1), dtype=complex)\n # perform the autocorrelation on the STF symbols\n for n in range(0, wnd-mavg):\n power[n] = np.real(np.dot(iqdata[n:n+mavg].transpose(),\n iqdata[n:n+mavg].conjugate())/mavg)\n corr12[n+mavg] = np.sum(iqdata[n+self._FFT/4:n+self._FFT/4+mavg] *\n np.conj(iqdata[n+self._FFT/2:n+self._FFT/2+mavg]) -\n iqdata[n:n+mavg] *\n np.conj(iqdata[n+self._FFT/4:n+self._FFT/4+mavg]))\n corr3[n+mavg] = np.dot(np.transpose(iqdata[n+self._FFT/4:n+self._FFT/4+mavg3]),\n np.conj(iqdata[n+self._FFT/2:n+self._FFT/2+mavg3]))\n # get first index where power rises above threshold\n idx1 = np.flatnonzero((power>0.75*np.sum(power)/np.size(power)))[0]\n idx2 = np.argmax(np.abs(corr12[idx1:idx1+self._FFT/2]))\n idx = idx1+idx2-Nprep\n c3i = idx1+idx2-Nprep-1+mavg\n # get the phase at the start index and calculate the frequency offset\n fo_meas = -np.angle(np.mean(corr3[c3i:c3i+mavg]))/(np.pi/2*self._FFT)*self._FS\n return idx, fo_meas",
"def correlate(fft1,fft2, maxlag,dt, Nfft, method=\"cross-correlation\"):\n\n if fft1.ndim == 1:\n nwin=1\n elif fft1.ndim == 2:\n nwin= int(fft1.shape[0])\n\n t0=time.time()\n corr=np.zeros(shape=(nwin,Nfft),dtype=np.complex64)\n fft1_globe = cuda.to_device(fft1[:,:Nfft//2].reshape(fft1.size,))\n fft2_globe = cuda.to_device(fft2[:,:Nfft//2].reshape(fft2.size,))\n corr_globe = cuda.device_array(shape=(nwin*(Nfft//2),),dtype=np.complex64)\n \n threadsperblock = 2000\n blockspergrid = math.ceil(fft1_globe.size/threadsperblock)\n \n if method == 'deconv':\n decon_gpu[threadsperblock,blockspergrid](fft1_globe,fft2_globe,corr_globe)\n elif method =='coherence':\n coherence_gpu[threadsperblock,blockspergrid](fft1_globe,fft2_globe,corr_globe)\n\n tcorr = corr_globe.copy_to_host()\n corr = tcorr.reshape(nwin,Nfft//2)\n\n ncorr = np.zeros(shape=Nfft,dtype=np.complex64)\n ncorr[:Nfft//2] = np.mean(corr,axis=0)\n ncorr[-(Nfft//2)+1:]=np.flip(np.conj(ncorr[1:(Nfft//2)]),axis=0)\n ncorr[0]=complex(0,0)\n ncorr = np.real(np.fft.ifftshift(scipy.fftpack.ifft(ncorr, Nfft, axis=0)))\n\n t1=time.time()\n print('it takes '+str(t1-t0)+' s')\n\n tcorr = np.arange(-Nfft//2 + 1, Nfft//2)*dt\n ind = np.where(np.abs(tcorr) <= maxlag)[0]\n ncorr = ncorr[ind]\n tcorr = tcorr[ind]\n\n return ncorr,tcorr",
"def corr(arr1, arr2):\n\n\n X = []\n Y = []\n for index in range(len(arr1)):\n if arr1[index] == None or arr2[index] == None:\n continue\n X.append(arr1[index])\n Y.append(arr2[index])\n\n\n r = np.corrcoef(X, Y)[0,1]\n f = 0.5*np.log((1+r)/(1-r))\n se = 1/np.sqrt(len(X)-3)\n ucl = f + 2*se\n lcl = f - 2*se\n\n lcl = (np.exp(2*lcl) - 1) / (np.exp(2*lcl) + 1)\n ucl = (np.exp(2*ucl) - 1) / (np.exp(2*ucl) + 1)\n\n return r,lcl,ucl"
] | [
"0.70680016",
"0.6528472",
"0.6504297",
"0.63749677",
"0.6360196",
"0.63229495",
"0.6294331",
"0.6224919",
"0.62199193",
"0.62124133",
"0.6204594",
"0.61613137",
"0.6061361",
"0.60537165",
"0.6026039",
"0.6024764",
"0.5963405",
"0.59373075",
"0.59083116",
"0.59070826",
"0.5844545",
"0.58440036",
"0.58289564",
"0.5821002",
"0.57943004",
"0.5790324",
"0.5788008",
"0.5774721",
"0.5774278",
"0.5750012"
] | 0.657478 | 1 |
core function for parallel run_bootstrap_correlation | def run_bootstrap_correlation_worker(run_parameters, spreadsheet_df, phenotype_df, n_bootstraps, job_id):
np.random.seed(job_id)
phenotype_df = phenotype_df.iloc[[job_id], :]
spreadsheet_df_trimmed, phenotype_df_trimmed, ret_msg = datacln.check_input_value_for_gene_prioritazion(
spreadsheet_df, phenotype_df)
pearson_array = get_correlation(spreadsheet_df_trimmed.as_matrix(), phenotype_df_trimmed.values[0], run_parameters)
borda_count = np.zeros(spreadsheet_df.shape[0])
gm_accumulator = np.ones(spreadsheet_df.shape[0])
for bootstrap_number in range(0, n_bootstraps):
sample_random, sample_permutation = sample_a_matrix_pearson(
spreadsheet_df_trimmed.as_matrix(), 1.0, run_parameters["cols_sampling_fraction"])
phenotype_response = phenotype_df_trimmed.values[0, None]
phenotype_response = phenotype_response[0, sample_permutation]
pc_array = get_correlation(sample_random, phenotype_response, run_parameters)
borda_count = sum_array_ranking_to_borda_count(borda_count, np.abs(pc_array))
gm_accumulator = (np.abs(pc_array) + EPSILON_0) * gm_accumulator
pcc_gm_array = gm_accumulator ** (1 / n_bootstraps)
borda_count = borda_count / n_bootstraps
phenotype_name = phenotype_df_trimmed.index.values[0]
gene_name_list = spreadsheet_df_trimmed.index
viz_score = (borda_count - min(borda_count)) / (max(borda_count) - min(borda_count))
generate_bootstrap_correlation_output(borda_count, viz_score, pearson_array,
phenotype_name, gene_name_list, run_parameters) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run_bootstrap_correlation(run_parameters):\n run_parameters[\"results_tmp_directory\"] = kn.create_dir(run_parameters[\"results_directory\"], 'tmp')\n\n phenotype_response_df = kn.get_spreadsheet_df(run_parameters[\"phenotype_name_full_path\"])\n spreadsheet_df = kn.get_spreadsheet_df(run_parameters[\"spreadsheet_name_full_path\"])\n phenotype_response_df = phenotype_response_df.T\n n_bootstraps = run_parameters[\"number_of_bootstraps\"]\n\n number_of_jobs = len(phenotype_response_df.index)\n jobs_id = range(0, number_of_jobs)\n zipped_arguments = dstutil.zip_parameters(run_parameters, spreadsheet_df, phenotype_response_df, n_bootstraps, jobs_id)\n dstutil.parallelize_processes_locally(run_bootstrap_correlation_worker, zipped_arguments, number_of_jobs)\n\n write_phenotype_data_all(run_parameters)\n kn.remove_dir(run_parameters[\"results_tmp_directory\"])",
"def run_bootstrap_net_correlation_worker(run_parameters, spreadsheet_df, phenotype_df, network_mat,\n spreadsheet_genes_as_input, baseline_array, job_id):\n\n np.random.seed(job_id)\n\n restart_accumulator = np.zeros(network_mat.shape[0])\n gm_accumulator = np.ones(network_mat.shape[0])\n borda_count = np.zeros(network_mat.shape[0])\n\n phenotype_df = phenotype_df.iloc[[job_id], :]\n spreadsheet_df_trimmed, phenotype_df_trimmed, ret_msg = datacln.check_input_value_for_gene_prioritazion(\n spreadsheet_df, phenotype_df)\n\n sample_smooth = spreadsheet_df_trimmed.as_matrix()\n\n pearson_array = get_correlation(sample_smooth, phenotype_df_trimmed.values[0], run_parameters)\n n_bootstraps = run_parameters[\"number_of_bootstraps\"]\n for bootstrap_number in range(0, n_bootstraps):\n sample_random, sample_permutation = sample_a_matrix_pearson(\n sample_smooth, 1.0, run_parameters[\"cols_sampling_fraction\"])\n\n phenotype_response = phenotype_df_trimmed.values[0, None]\n phenotype_response = phenotype_response[0, sample_permutation]\n pc_array = get_correlation(sample_random, phenotype_response, run_parameters)\n\n pc_array[~np.in1d(spreadsheet_df_trimmed.index, spreadsheet_genes_as_input)] = 0.0\n pc_array = np.abs(trim_to_top_beta(pc_array, run_parameters[\"top_beta_of_sort\"]))\n restart_accumulator[pc_array != 0] += 1.0\n\n pc_array = pc_array / max(sum(pc_array), EPSILON_0)\n pc_array = kn.smooth_matrix_with_rwr(pc_array, network_mat, run_parameters)[0]\n pc_array = pc_array - baseline_array\n\n borda_count = sum_array_ranking_to_borda_count(borda_count, pc_array)\n gm_accumulator = (np.abs(pc_array) + EPSILON_0) * gm_accumulator\n\n restart_accumulator = restart_accumulator / n_bootstraps\n borda_count = borda_count / n_bootstraps\n # pcc_gm_array = gm_accumulator ** (1 / n_bootstraps)\n viz_score = (borda_count - min(borda_count)) / (max(borda_count) - min(borda_count))\n\n phenotype_name = phenotype_df_trimmed.index.values[0]\n gene_name_list = spreadsheet_df_trimmed.index\n gene_orig_list = spreadsheet_genes_as_input\n quantitative_score = borda_count\n generate_net_correlation_output(pearson_array, quantitative_score, viz_score, restart_accumulator,\n phenotype_name, gene_name_list, gene_orig_list, run_parameters)",
"def run_bootstrap_net_correlation(run_parameters):\n run_parameters[\"results_tmp_directory\"] = kn.create_dir(run_parameters[\"results_directory\"], 'tmp')\n\n network_df = kn.get_network_df(run_parameters['gg_network_name_full_path'])\n node_1_names, node_2_names = kn.extract_network_node_names(network_df)\n unique_gene_names = kn.find_unique_node_names(node_1_names, node_2_names)\n unique_gene_names = sorted(unique_gene_names)\n genes_lookup_table = kn.create_node_names_dict(unique_gene_names)\n\n network_df = kn.map_node_names_to_index(network_df, genes_lookup_table, 'node_1')\n network_df = kn.map_node_names_to_index(network_df, genes_lookup_table, 'node_2')\n network_df = kn.symmetrize_df(network_df)\n network_mat_sparse = kn.convert_network_df_to_sparse(\n network_df, len(unique_gene_names), len(unique_gene_names))\n\n network_mat = normalize(network_mat_sparse, norm=\"l1\", axis=0)\n\n del network_df\n del network_mat_sparse\n del node_1_names\n del node_2_names\n del genes_lookup_table\n gc.collect()\n\n phenotype_response_df = kn.get_spreadsheet_df(run_parameters[\"phenotype_name_full_path\"])\n spreadsheet_df = kn.get_spreadsheet_df(run_parameters[\"spreadsheet_name_full_path\"])\n spreadsheet_genes_as_input = spreadsheet_df.index.values\n phenotype_response_df = phenotype_response_df.T\n\n spreadsheet_df = kn.update_spreadsheet_df(spreadsheet_df, unique_gene_names)\n spreadsheet_df = zscore_dataframe(spreadsheet_df)\n sample_smooth, iterations = kn.smooth_matrix_with_rwr(spreadsheet_df.as_matrix(), network_mat.T, run_parameters)\n spreadsheet_df = pd.DataFrame(sample_smooth, index=spreadsheet_df.index, columns=spreadsheet_df.columns)\n\n baseline_array = np.ones(network_mat.shape[0]) / network_mat.shape[0]\n baseline_array = kn.smooth_matrix_with_rwr(baseline_array, network_mat, run_parameters)[0]\n\n number_of_jobs = len(phenotype_response_df.index)\n jobs_id = range(0, number_of_jobs)\n zipped_arguments = dstutil.zip_parameters(run_parameters, spreadsheet_df, phenotype_response_df, network_mat,\n spreadsheet_genes_as_input, baseline_array, jobs_id)\n dstutil.parallelize_processes_locally(run_bootstrap_net_correlation_worker, zipped_arguments, number_of_jobs)\n\n write_phenotype_data_all(run_parameters)\n kn.remove_dir(run_parameters[\"results_tmp_directory\"])",
"def calculate_correlation(data):\n pass",
"def auto_correlation(arr):\n return cross_correlation(arr, arr)",
"def run_net_correlation_worker(run_parameters, spreadsheet_df, phenotype_df, network_mat,\n spreadsheet_genes_as_input, baseline_array, job_id):\n\n np.random.seed(job_id)\n\n phenotype_df = phenotype_df.iloc[[job_id], :]\n spreadsheet_df_trimmed, phenotype_df_trimmed, ret_msg = datacln.check_input_value_for_gene_prioritazion(\n spreadsheet_df, phenotype_df)\n\n sample_smooth = spreadsheet_df_trimmed.as_matrix()\n\n pc_array = get_correlation(sample_smooth, phenotype_df_trimmed.values[0], run_parameters)\n pearson_array = pc_array.copy()\n pc_array[~np.in1d(spreadsheet_df_trimmed.index, spreadsheet_genes_as_input)] = 0.0\n pc_array = np.abs(trim_to_top_beta(pc_array, run_parameters[\"top_beta_of_sort\"]))\n restart_accumulator = pc_array.copy()\n restart_accumulator[restart_accumulator != 0] = 1\n\n pc_array = pc_array / max(sum(pc_array), EPSILON_0)\n pc_array = kn.smooth_matrix_with_rwr(pc_array, network_mat, run_parameters)[0]\n\n pc_array = pc_array - baseline_array\n quantitative_score = pc_array\n viz_score = (pc_array - min(pc_array)) / (max(pc_array) - min(pc_array))\n\n phenotype_name = phenotype_df_trimmed.index.values[0]\n gene_name_list = spreadsheet_df_trimmed.index\n gene_orig_list = spreadsheet_genes_as_input\n\n generate_net_correlation_output(pearson_array, quantitative_score, viz_score, restart_accumulator,\n phenotype_name, gene_name_list, gene_orig_list, run_parameters)",
"def run_correlation_worker(run_parameters, spreadsheet_df, phenotype_df, job_id):\n # selects the ith row in phenotype_df\n\n np.random.seed(job_id)\n\n phenotype_df = phenotype_df.iloc[[job_id], :]\n\n spreadsheet_df_trimmed, phenotype_df_trimmed, err_msg = datacln.check_input_value_for_gene_prioritazion(\n spreadsheet_df, phenotype_df)\n\n pc_array = get_correlation(spreadsheet_df_trimmed.as_matrix(), phenotype_df_trimmed.values[0], run_parameters)\n\n gene_name_list = spreadsheet_df_trimmed.index\n phenotype_name = phenotype_df.index.values[0]\n generate_correlation_output(pc_array, phenotype_name, gene_name_list, run_parameters)",
"def correlate_cpu(data, num_timeseries, size_timeseries,\n correlations_cpu, indices_step):\n num_timesteps = size_timeseries\n window_size = size_timeseries\n\n sums = [0.0] * num_timeseries\n sums_sq = [0.0] * num_timeseries\n sums_xy = [0.0] * calc_num_correlations(num_timeseries)\n\n for k in range(num_timesteps):\n index_correlation = 0\n\n for i in range(num_timeseries):\n old = 0.0 if k < window_size else data[i][k - window_size]\n new = data[i][k]\n sums[i] += new - old\n sums_sq[i] += new * new - old * old\n\n for i in range(num_timeseries):\n old_x = 0.0 if k < window_size else data[i][k - window_size]\n new_x = data[i][k]\n\n for j in range(i):\n old_y = 0.0 if k < window_size else data[j][k - window_size]\n new_y = data[j][k]\n sums_xy[index_correlation] += new_x * new_y - old_x * old_y\n numerator = (window_size * sums_xy[index_correlation] -\n sums[i] * sums[j])\n denominator = ((1 / math.sqrt(window_size * sums_sq[i] -\n sums[i] * sums[i])) *\n (1 / math.sqrt(window_size * sums_sq[j] -\n sums[j] * sums[j])))\n correlations_cpu[index_correlation] = numerator * denominator\n indices_step[2 * index_correlation] = j\n indices_step[2 * index_correlation + 1] = i\n index_correlation += 1",
"def get_correlation_array(self, src):\n self.correlation_array = parallel.call_and_bcast(self.get_array, src)",
"def compute_theta():\n\n ### Compute consensus profile: ###\n # Compute corefficient vector (Probability Distribution):\n # th = 0.0001\n # ca = 0.5\n CV = find_CV(th=0.0001, ca=0.5, sd=1)\n\n list_orfs = list( scikit_data.keys() )\n mc_dict = {}\n theta_df = pd.DataFrame(columns=['ORF', 'p_5', 'p_10', 'p_20', 'p_80', 'p_90', 'p_95', 'p3_5', 'p3_10', 'p3_20', 'p3_80', 'p3_90', 'p3_95'])\n\n peaks = True # reshuffle previously identified peak positions instead of consernsus profile as here equivalent and faster\n\n counter = 0\n for ix, orf in enumerate(list_orfs):\n\n current_data = scikit_data[orf]\n\n current_mm = mm_consensus[orf] # boolean: True for good sequence, False for multi-mapping\n print(ix, orf, current_data.shape[1], len(current_mm))\n if current_data.shape[1] == len(current_mm):\n\n current_data_mm = current_data[:,current_mm] # for randomized consensus, chop \n current_data[:,~current_mm] = 0 # after, for false consensus (i.e. multimapping), set to 0\n mc_dict[orf], current_peaks = run_mc(current_data, CV)\n\n if peaks:\n max_iter = 100 \n pool = mp.Pool(processes=10)\n output = pool.map(rand_mc_frompeaks, [current_peaks for iteration in range(max_iter)])\n output = np.array(output)\n pool.close()\n\n else:\n max_iter = 100 \n pool = mp.Pool(processes=10)\n output = pool.map(get_rand_mc, [current_data_mm for iteration in range(max_iter)])\n output = np.array(output)\n pool.close()\n \n output3 = np.zeros(( output.shape[0], output.shape[1]-2 ))\n for rand_experiment in range(output3.shape[0]):\n for position in range(output3.shape[1]-2): #to get kmers of length 3\n output3[rand_experiment, position] = np.mean(output[rand_experiment, position:position+3])\n\n p_5 = np.around( np.percentile(output, 5), 5)\n p_10 = np.around( np.percentile(output, 10), 5)\n p_20 = np.around( np.percentile(output, 20), 5)\n p_80 = np.around( np.percentile(output, 80), 5)\n p_90 = np.around( np.percentile(output, 90), 5)\n p_95 = np.around( np.percentile(output, 95), 5)\n \n p3_5 = np.around( np.percentile(output3, 5), 5)\n p3_10 = np.around( np.percentile(output3, 10), 5)\n p3_20 = np.around( np.percentile(output3, 20), 5)\n p3_80 = np.around( np.percentile(output3, 80), 5)\n p3_90 = np.around( np.percentile(output3, 90), 5)\n p3_95 = np.around( np.percentile(output3, 95), 5)\n \n theta_df.loc[counter] = [orf, p_5, p_10, p_20, p_80, p_90, p_95, p3_5, p3_10, p3_20, p3_80, p3_90, p3_95]\n counter += 1\n\n theta_df.to_csv(\"../data/figures/figure3/theta.txt\", header=True, index=False, sep='\\t')",
"def test_run_grouped_correlation(self):\r\n # hand calculation of spearman and pearson for 01\r\n # md_g1 = array([6.1, 0.0, 14.2, 6.5, 21])\r\n # md_g2 = array([.3, 9.1, .8, 5.0, 11])\r\n # o1_g1 = array([22, 48, 34, 0, 0])\r\n # o1_g2 = array([0, 15, 0, 76, 74])\r\n # c1_g1 = -0.6155870112510925 #spearman(md_g1, o1_g1)\r\n # c2_g2 = 0.66688592885535025 #spearman(md_g2, o1_g2)\r\n # fisher_population_correlation([-0.6155870112510925,\r\n # 0.66688592885535025], [5,5])\r\n # fpc, h = (0.043595171909468329, 0.12776325359984511)\r\n g1_rhos = [corrcoef(self.otus1[0][i], self.mds1[0])[0][1]\r\n for i in range(10)]\r\n g2_rhos = [corrcoef(self.otus1[1][i], self.mds1[1])[0][1]\r\n for i in range(10)]\r\n exp_rhos = [g1_rhos, g2_rhos]\r\n g1_pvals = [assign_correlation_pval(g1_rhos[i], 5,\r\n 'parametric_t_distribution') for i in range(10)]\r\n g2_pvals = [assign_correlation_pval(g2_rhos[i], 5,\r\n 'parametric_t_distribution') for i in range(10)]\r\n exp_pvals = [g1_pvals, g2_pvals]\r\n exp_f_pvals = [fisher([g1_pvals[i], g2_pvals[i]]) for i in range(10)]\r\n\r\n tmp = [fisher_population_correlation([g1_rhos[i], g2_rhos[i]], [5, 5])\r\n for i in range(10)]\r\n exp_f_rhos = [x[0] for x in tmp]\r\n exp_f_hs = [x[1] for x in tmp]\r\n\r\n obs_rhos, obs_pvals, obs_f_pvals, obs_f_rhos, obs_f_hs = \\\r\n run_grouped_correlation(self.mds1, self.otus1, 'pearson',\r\n CORRELATION_TEST_CHOICES, 'parametric_t_distribution')\r\n\r\n assert_almost_equal(obs_rhos, exp_rhos)\r\n assert_almost_equal(obs_pvals, exp_pvals)\r\n assert_almost_equal(obs_f_pvals, exp_f_pvals)\r\n assert_almost_equal(obs_f_rhos, exp_f_rhos)\r\n assert_almost_equal(obs_f_hs, exp_f_hs)",
"def ParallelToserial(self):\n pass",
"def bootstrap_roc_(args: Tuple[ROC, int]) -> ROC:\n cur_roc, seed = args\n bs_roc = cur_roc.bootstrap(seed)\n bs_roc.roc()\n return bs_roc",
"def generate_bootstrap_correlation_output(borda_count, viz_score, pearson_array, \n phenotype_name, gene_name_list, run_parameters):\n phenotype_name_list = np.repeat(phenotype_name, len(gene_name_list))\n viz_score = np.round(viz_score, 8)\n borda_count = np.round(borda_count, 8)\n pearson_array = np.round(pearson_array, 8)\n\n output_val = np.column_stack(\n (phenotype_name_list, gene_name_list, borda_count, viz_score, pearson_array))\n\n df_header = ['Response', 'Gene_ENSEMBL_ID', 'quantitative_sorting_score', 'visualization_score', 'baseline_score']\n result_df = pd.DataFrame(output_val, columns=df_header).sort_values(\"visualization_score\", ascending=0)\n result_df.index = range(result_df.shape[0])\n\n write_one_phenotype(result_df, phenotype_name, gene_name_list, run_parameters)",
"def clusterBootstrap(options):\n\n log = open(options.log,\"a\",0)\n log.write(\"\\n %s: running in cluster-bootstrap mode with %i \" \\\n \"iterations\" % (timeStr(), int(options.nbootstrap)))\n log.write(\"\\n %s: writing bootstrap data pickle...\" % timeStr())\n\n # read existing restart information\n file = open(\"%s/restart.pkl\" % options.tmpdir,\"r\")\n bootstrap_restardic = cPickle.load(file)\n file.close()\n\n # modify bootstrap options; set bootstrap slave, --nocluster\n # and get the load distributions\n bootstrap_restardic[\"options\"].bootmaster = False\n bootstrap_restardic[\"options\"].nocluster = True\n bootstrap_restardic[\"options\"].restart = True\n\n # write new restart pkl\n boot_restart_file = \"%s/bootruns_restart.pkl\" % options.tmpdir\n file = open(boot_restart_file,\"w\")\n cPickle.dump(bootstrap_restardic, file)\n file.close()\n\n # use drmaa to spread multiple jobs across the cluster.\n c_session = drmaa.Session()\n c_session.initialize()\n\n jobloads = clusterDistribute(int(options.nbootstrap),\n int(options.njobs),\n options)\n\n # loop through the jobloads:\n i=0\n joblist = []\n boostrap_files = []\n boostrap_logs = []\n boostrap_dbs = []\n for jobload in jobloads:\n j=0\n while j<jobload[0]:\n results = runClusterBootstrap(c_session, i, int(jobload[1]),\n boot_restart_file, options)\n\n joblist.append(results[0])\n boostrap_files.append(results[1])\n boostrap_logs.append(results[2])\n boostrap_dbs.append(results[3])\n i+=1\n j+=1\n\n log.write(\"\\n %s: %s jobs launched...\" \\\n \"\" % (timeStr(), os.path.basename(sys.argv[0]))\n )\n\n prefix = \"%s/%s\" % (options.tmpdir, os.path.basename(sys.argv[0]))\n exit_status, sucess, fail, options = monitorDrmaaJobs(c_session, joblist,\n boostrap_dbs, prefix,\n options, delo=False,\n dele=False)\n\n # clearup job metadata and information\n for jobid in joblist:\n c_session.synchronize(jobid,\n drmaa.Session.TIMEOUT_WAIT_FOREVER,\n True)\n\n c_session.exit()\n\n log.close()\n return(boostrap_logs, boostrap_dbs)",
"def main():\n logfile = setup_log(os.path.join(os.environ['decor'], 'logs',\n 'transform_corr'))\n logfile.info('Started 9.transform_corr.py')\n\n subj_list = ['RSDE', 'VREA']\n for subject in subj_list:\n os.chdir(os.path.join(os.environ['decor'], subject, '6mmblur_results'))\n for m in ['AV', 'A', 'V', 'lowlev']:\n tcorr_suf = '6mmblur_tcorr_out_spearman'\n setnames_call_funcs(logfile, subject, m, tcorr_suf)",
"def corr():\n\n @sinks\n def _dagpype_internal_fn_act(target):\n c = dagpype_c.Correlator()\n try:\n while True:\n x, y = (yield)\n c.push(float(x), float(y))\n except GeneratorExit:\n target.send(c.corr())\n target.close()\n return _dagpype_internal_fn_act",
"def _calculate_cc(self, array, corr_range, tau_max, lag_mode):\n\n # lag_mode dict\n mode = self.lag_modi[lag_mode]\n only_tri = int(self.only_tri)\n\n if lag_mode == 'all':\n corrmat = numpy.zeros((2*tau_max + 1, self.N, self.N),\n dtype='float32')\n elif lag_mode == 'sum':\n corrmat = numpy.zeros((2, self.N, self.N), dtype='float32')\n elif lag_mode == 'max':\n corrmat = numpy.zeros((2, self.N, self.N), dtype='float32')\n\n # loop over all node pairs, NOT symmetric due to time shifts!\n for i in range(self.N-only_tri):\n for j in range((i+1)*only_tri, self.N):\n\n if mode == 2:\n maxcross = 0.0\n argmax = 0\n\n # loop over taus INCLUDING the last tau value\n for t in range(2*tau_max+1):\n\n # here the actual cross correlation is calculated\n crossij = (array[tau_max, i, :] * array[t, j, :]).mean()\n\n # fill in values in matrix depending on lag_mode\n if mode == 0:\n corrmat[t, i, j] = crossij\n\n elif mode == 1:\n if t <= tau_max:\n corrmat[1, i, j] += numpy.abs(crossij)\n if t >= tau_max:\n corrmat[0, i, j] += numpy.abs(crossij)\n\n elif mode == 2:\n # calculate max and argmax by comparing to previous\n # value and storing max\n if numpy.abs(crossij) > maxcross:\n maxcross = numpy.abs(crossij)\n argmax = t\n\n if mode == 2:\n corrmat[0, i, j] = maxcross\n corrmat[1, i, j] = argmax - tau_max\n\n if self.only_tri:\n if lag_mode == 'all':\n corrmat = corrmat + corrmat.transpose(0, 2, 1)[::-1]\n elif lag_mode == 'sum':\n corrmat[0] += corrmat[1].transpose()\n corrmat[1] = corrmat[0].transpose()\n elif lag_mode == 'max':\n corrmat[0] += corrmat[0].transpose()\n corrmat[1] -= corrmat[1].transpose()\n\n return corrmat",
"def cross_cluster_timeseries(data1, data2, roi_mask_nparray, n_clusters, similarity_metric, affinity_threshold, cluster_method = 'ward'):\n \n \n \n import scipy as sp\n import time\n import sklearn as sk\n from sklearn import cluster, datasets, preprocessing\n from sklearn.cluster import FeatureAgglomeration\n from sklearn.feature_extraction import image\n\n \n \n print(\"Calculating Cross-clustering\")\n print(\"Calculating pairwise distances between areas\")\n \n dist_btwn_data_1_2 = np.array(sp.spatial.distance.cdist(data1.T, data2.T, metric = similarity_metric))\n sim_btwn_data_1_2=1-dist_btwn_data_1_2\n sim_btwn_data_1_2[np.isnan(sim_btwn_data_1_2)]=0\n sim_btwn_data_1_2[sim_btwn_data_1_2<affinity_threshold]=0\n\n print(\"Calculating pairwise distances between voxels in ROI 1 \")\n dist_of_1 = sp.spatial.distance.pdist(sim_btwn_data_1_2, metric = 'euclidean')\n dist_matrix = sp.spatial.distance.squareform(dist_of_1)\n sim_matrix=1-sk.preprocessing.normalize(dist_matrix, norm='max')\n sim_matrix[sim_matrix<affinity_threshold]=0\n\n\n if cluster_method == 'ward':\n # ## BEGIN WARD CLUSTERING CODE \n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n if roi_mask_nparray!='empty':\n #import pdb; pdb.set_trace()\n shape = roi_mask_nparray.shape\n connectivity = image.grid_to_graph(n_x=shape[0], n_y=shape[1],\n n_z=shape[2], mask=roi_mask_nparray)\n \n ward = FeatureAgglomeration(n_clusters=n_clusters, connectivity=connectivity,\n linkage='ward')\n ward.fit(sim_matrix)\n y_pred = ward.labels_.astype(np.int)\n else:\n print(\"Calculating Hierarchical Cross-clustering\")\n ward = FeatureAgglomeration(n_clusters=n_clusters, affinity='euclidean', linkage='ward') \n ward.fit(sim_matrix)\n y_pred = ward.labels_.astype(np.int)\n \n # # END WARD CLUSTERING CODE \n else:\n \n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n #cluster_method== 'spectral':\n #Spectral method\n spectral = cluster.SpectralClustering(n_clusters, eigen_solver='arpack', random_state = 5, affinity=\"precomputed\", assign_labels='discretize') \n spectral.fit(sim_matrix)\n y_pred = spectral.labels_.astype(np.int) \n\n# \n # BEGIN SPECTRAL CLUSTERING CODE \n \n # END SPECTRAL CLUSTERING CODE \n\n\n\n# sim_matrix[np.isnan((sim_matrix))]=0\n# sim_matrix[sim_matrix<0]=0\n# sim_matrix[sim_matrix>1]=1\n\n ## BEGIN WARD CLUSTERING CODE \n# print(\"Calculating Hierarchical Cross-clustering\")\n# ward = FeatureAgglomeration(n_clusters=n_clusters, affinity='euclidean', linkage='ward') \n# ward.fit(sim_matrix)\n# y_pred = ward.labels_.astype(np.int)\n# \n ## END WARD CLUSTERING CODE \n \n# # BEGIN SPECTRAL CLUSTERING CODE \n# spectral = cluster.SpectralClustering(n_clusters, eigen_solver='arpack', random_state = 5, affinity=\"precomputed\", assign_labels='discretize') \n# spectral.fit(sim_matrix)\n# y_pred = spectral.labels_.astype(np.int)\n# # END SPECTRAL CLUSTERING CODE \n \n return y_pred",
"def runSlaveRun():\n\n np.set_printoptions(linewidth=1000)\n function = None\n options = None\n\n # print(\"Process {}/{} reporting for duty!\".format(rank, size))\n\n function = comm.bcast(function, root=0)\n arguments = comm.scatter(options, root=0)\n\n results = function(*arguments)\n\n comm.Barrier()\n comm.gather(results, root=0)\n comm.Disconnect()",
"def calc_bootstrap(fcs,obs,ref,func, bootstrap_range, L, B):\n \n from sklearn.utils import resample\n \n idxs = np.arange(len(fcs))\n results = []\n \n random_state = 0\n for smp in range(B):\n block_sample = np.array([]).astype(int)\n while(len(block_sample) < len(fcs)):\n random_state += 1\n rolls = resample(idxs, n_samples=1, random_state=random_state)[0]\n block = np.roll(idxs, rolls)[0:L]\n block_sample = np.append(block_sample, block)\n\n block_sample = block_sample[0:len(idxs)]\n results.append(func(fcs[block_sample],obs[block_sample],ref[block_sample]))\n \n try:\n out = [ np.percentile(results, bootstrap_range[0]), \n func(fcs,obs,ref), #np.mean(results), \n np.percentile(results, bootstrap_range[1])]\n except:\n out = [ np.percentile(results, 2.5), \n func(fcs,obs,ref), #np.mean(results), \n np.percentile(results, 97.5)]\n\n # For indicating the statistical significance \n # of the lower boundary:\n if(out[0]>0): \n out.append('*')\n else:\n out.append('')\n \n return out",
"def test_PerfectModel_verify_bootstrap_deterministic(\n perfectModelEnsemble_initialized_control, comparison, metric, dim, reference\n):\n pm = perfectModelEnsemble_initialized_control.isel(lead=[0, 1, 2], init=range(6))\n if isinstance(reference, str):\n reference = [reference]\n if metric == \"contingency\":\n metric_kwargs = {\n \"forecast_category_edges\": category_edges,\n \"observation_category_edges\": category_edges,\n \"score\": \"accuracy\",\n }\n elif metric == \"roc\":\n metric_kwargs = {\"bin_edges\": category_edges}\n else:\n metric_kwargs = {}\n # acc on dim member only is ill defined\n if dim == \"member\" and metric in PEARSON_R_CONTAINING_METRICS:\n dim = [\"init\", \"member\"]\n\n actual = pm.verify(\n comparison=comparison,\n metric=metric,\n dim=dim,\n reference=reference,\n **metric_kwargs,\n ).tos\n if metric in [\"contingency\"] or metric in PEARSON_R_CONTAINING_METRICS:\n # less strict here with all NaNs, pearson_r yields NaNs for climatology\n if \"climatology\" in reference:\n actual = actual.drop_sel(skill=\"climatology\")\n assert not actual.isnull().all()\n else:\n assert not actual.isnull().any()\n\n # bootstrap()\n actual = pm.bootstrap(\n comparison=comparison,\n metric=metric,\n dim=dim,\n iterations=ITERATIONS,\n reference=reference,\n **metric_kwargs,\n ).tos\n if len(reference) > 0:\n actual = actual.drop_sel(results=\"p\")\n\n if metric in [\"contingency\"] or metric in PEARSON_R_CONTAINING_METRICS:\n # less strict here with all NaNs, pearson_r yields NaNs for climatology\n if \"climatology\" in reference:\n actual = actual.drop_sel(skill=\"climatology\")\n assert not actual.sel(results=\"verify skill\").isnull().all()\n else:\n assert not actual.sel(results=\"verify skill\").isnull().any()",
"def _set_correlations(self) -> None:\n pass",
"def test_parallel_pandas_grouped_apply(fun):\n\n df = pd.DataFrame(data={'m': [0, 0, 0, 1, 1, 1, 2, 2, 2],\n 'c': [0, 1, 2] * 3,\n 'log_prob': [1, 2, 3, 4, 5, 6, 7, 8, 9]})\n print('input data')\n print(df)\n\n reg = pandas_grouped_apply(\n coo=sp.coo_matrix((df['log_prob'], (df['m'], df['c'])), shape=[3, 3]),\n fun=fun,\n parallel=False,\n )\n print('normal application of groupby apply')\n print(reg)\n\n parallel = pandas_grouped_apply(\n coo=sp.coo_matrix((df['log_prob'], (df['m'], df['c'])), shape=[3, 3]),\n fun=fun,\n parallel=True,\n )\n print('parallel application of groupby apply')\n print(parallel)\n\n np.testing.assert_array_equal(reg['m'], parallel['m'])\n np.testing.assert_array_equal(reg['result'], parallel['result'])",
"def _pearsonr(x: xr.DataArray, y: xr.DataArray, monitor: Monitor) -> xr.Dataset:\n with monitor.starting(\"Calculate Pearson correlation\", total_work=6):\n n = len(x['time'])\n\n xm, ym = x - x.mean(dim='time'), y - y.mean(dim='time')\n xm['time'] = [i for i in range(0, len(xm.time))]\n ym['time'] = [i for i in range(0, len(ym.time))]\n xm_ym = xm * ym\n r_num = xm_ym.sum(dim='time')\n xm_squared = np.square(xm)\n ym_squared = np.square(ym)\n r_den = np.sqrt(xm_squared.sum(dim='time') * ym_squared.sum(dim='time'))\n r_den = r_den.where(r_den != 0)\n r = r_num / r_den\n\n # Presumably, if abs(r) > 1, then it is only some small artifact of floating\n # point arithmetic.\n # At this point r should be a lon/lat dataArray, so it should be safe to\n # load it in memory explicitly. This may take time as it will kick-start\n # deferred processing.\n # Comparing with NaN produces warnings that can be safely ignored\n default_warning_settings = np.seterr(invalid='ignore')\n with monitor.child(1).observing(\"task 1\"):\n negativ_r = r.values < -1.0\n with monitor.child(1).observing(\"task 2\"):\n r.values[negativ_r] = -1.0\n with monitor.child(1).observing(\"task 3\"):\n positiv_r = r.values > 1.0\n with monitor.child(1).observing(\"task 4\"):\n r.values[positiv_r] = 1.0\n np.seterr(**default_warning_settings)\n r.attrs = {'description': 'Correlation coefficients between'\n ' {} and {}.'.format(x.name, y.name)}\n\n df = n - 2\n t_squared = np.square(r) * (df / ((1.0 - r.where(r != 1)) * (1.0 + r.where(r != -1))))\n\n prob = df / (df + t_squared)\n with monitor.child(1).observing(\"task 5\"):\n prob_values_in = prob.values\n with monitor.child(1).observing(\"task 6\"):\n prob.values = betainc(0.5 * df, 0.5, prob_values_in)\n prob.attrs = {'description': 'Rough indicator of probability of an'\n ' uncorrelated system producing datasets that have a Pearson'\n ' correlation at least as extreme as the one computed from'\n ' these datsets. Not entirely reliable, but reasonable for'\n ' datasets larger than 500 or so.'}\n\n retset = xr.Dataset({'corr_coef': r,\n 'p_value': prob})\n return retset",
"def double_sum_covar(list_tuple_errs: list[float], corr_ranges: list[float], list_area_tot: list[float],\n list_lat: list[float], list_lon: list[float], nproc: int = 1) -> float:\n n = len(list_tuple_errs)\n\n if nproc == 1:\n print('Deriving double covariance sum with 1 core...')\n var_err = 0\n for i in range(n):\n for j in range(n):\n d = distance_latlon((list_lon[i], list_lat[i]), (list_lon[j], list_lat[j]))\n for k in range(len(corr_ranges)):\n var_err += kernel_sph(0, d, corr_ranges[k]) * list_tuple_errs[i][k] * list_tuple_errs[j][k] * \\\n list_area_tot[i] * list_area_tot[j]\n else:\n print('Deriving double covariance sum with '+str(nproc)+' cores...')\n pack_size = int(np.ceil(n/nproc))\n argsin = [(list_tuple_errs, corr_ranges, list_area_tot, list_lon, list_lat, np.arange(\n i, min(i+pack_size, n))) for k, i in enumerate(np.arange(0, n, pack_size))]\n pool = mp.Pool(nproc, maxtasksperchild=1)\n outputs = pool.map(part_covar_sum, argsin, chunksize=1)\n pool.close()\n pool.join()\n\n var_err = np.sum(np.array(outputs))\n\n area_tot = 0\n for j in range(len(list_area_tot)):\n area_tot += list_area_tot[j]\n\n var_err /= np.nansum(area_tot) ** 2\n\n return np.sqrt(var_err)",
"def main() -> None:\n\n windows = [\"55\"] #[\"10\", \"25\", \"40\", \"55\"]\n dates: List[str] = [\"1990-01-01\", \"2020-12-31\"]\n\n normalized_returns_data(dates, \"1d\")\n\n for window in windows:\n epochs_rolling_avg_correlation_matrix_data(dates, \"1d\", window)",
"def parallel_calculation(self, serial_fun, init_config, **kwargs):\r\n burned_in_config = self.burn_in(init_config, **kwargs)\r\n num_processes = 1\r\n if platform in ('linux', 'linux2'):\r\n num_processes = kwargs.get('num_processes', mp.cpu_count())\r\n if num_processes > 1:\r\n output = mp.Queue()\r\n\r\n def fun(seed, output):\r\n output.put(\r\n serial_fun(\r\n burned_in_config,\r\n urng=np.random.RandomState(seed).random, **kwargs\r\n )\r\n )\r\n\r\n processes = [\r\n mp.Process(target=fun, args=(seed, output))\r\n for seed in np.random.randint(88, size=num_processes)\r\n ]\r\n for p in processes:\r\n p.start()\r\n for p in processes:\r\n p.join()\r\n process_results = [output.get() for p in processes]\r\n return np.mean(process_results)\r\n return serial_fun(burned_in_config, **kwargs)",
"def _compute_correlations(self, data):\n mappings = self.mappings_\n n_channels, n_times = data.shape\n\n # get the predictions\n y_pred = data.T.dot(mappings.T)\n y_pred = y_pred.reshape((n_times, len(self.picks),\n self.n_resample), order='F')\n # pool them using median\n # XXX: weird that original implementation sorts and takes middle value.\n # Isn't really the median if n_resample even\n y_pred = np.median(y_pred, axis=-1)\n # compute correlation\n num = np.sum(data.T * y_pred, axis=0)\n denom = (np.sqrt(np.sum(data.T ** 2, axis=0)) *\n np.sqrt(np.sum(y_pred ** 2, axis=0)))\n\n corr = num / denom\n return corr",
"def chipseq_cross_correlation():\n\n mkdir(CROSS_CORRELATION_DIR)\n \n template = \"\"\"Rscript {run_spp} -c={input_bam} -savp={srr}.pdf -out={srr}.txt\"\"\"\n\n printp(\"\"\"\\n#\\n# ChIP-seq QC\\n#\\n\"\"\")\n printp(\"\"\"# drmr:label cross-correlation\\n\"\"\")\n printp(\"\"\"\\n# drmr:job nodes=1 processors=1 memory=15g working_directory={} time_limit=4h\"\"\".format(CROSS_CORRELATION_DIR))\n\n run_spp = os.getenv(\"RUN_SPP_PATH\")\n\n for sample, info in DATA.items():\n for x in ['treatment', 'control']:\n input_bam = get_pruned_bam(sample, control = False) if x == 'treatment' else get_pruned_bam(sample, control = True)\n srr = get_srr(sample) if x == 'treatment' else get_input_control_srr(sample)\n printp(template.format(**locals()))\n\n printp(\"\"\"\\n# drmr:wait\"\"\")"
] | [
"0.7209196",
"0.68485564",
"0.62561953",
"0.5953953",
"0.593656",
"0.58523387",
"0.58196706",
"0.57341266",
"0.5679761",
"0.563205",
"0.56270427",
"0.5559051",
"0.54862607",
"0.54845196",
"0.5357379",
"0.53566194",
"0.5339225",
"0.5313855",
"0.5287374",
"0.52761865",
"0.5266091",
"0.52620703",
"0.5260764",
"0.525915",
"0.5247453",
"0.52463055",
"0.521256",
"0.52019405",
"0.51948607",
"0.51895404"
] | 0.70515513 | 1 |
core function for parallel run_net_correlation | def run_net_correlation_worker(run_parameters, spreadsheet_df, phenotype_df, network_mat,
spreadsheet_genes_as_input, baseline_array, job_id):
np.random.seed(job_id)
phenotype_df = phenotype_df.iloc[[job_id], :]
spreadsheet_df_trimmed, phenotype_df_trimmed, ret_msg = datacln.check_input_value_for_gene_prioritazion(
spreadsheet_df, phenotype_df)
sample_smooth = spreadsheet_df_trimmed.as_matrix()
pc_array = get_correlation(sample_smooth, phenotype_df_trimmed.values[0], run_parameters)
pearson_array = pc_array.copy()
pc_array[~np.in1d(spreadsheet_df_trimmed.index, spreadsheet_genes_as_input)] = 0.0
pc_array = np.abs(trim_to_top_beta(pc_array, run_parameters["top_beta_of_sort"]))
restart_accumulator = pc_array.copy()
restart_accumulator[restart_accumulator != 0] = 1
pc_array = pc_array / max(sum(pc_array), EPSILON_0)
pc_array = kn.smooth_matrix_with_rwr(pc_array, network_mat, run_parameters)[0]
pc_array = pc_array - baseline_array
quantitative_score = pc_array
viz_score = (pc_array - min(pc_array)) / (max(pc_array) - min(pc_array))
phenotype_name = phenotype_df_trimmed.index.values[0]
gene_name_list = spreadsheet_df_trimmed.index
gene_orig_list = spreadsheet_genes_as_input
generate_net_correlation_output(pearson_array, quantitative_score, viz_score, restart_accumulator,
phenotype_name, gene_name_list, gene_orig_list, run_parameters) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run_net_correlation(run_parameters):\n run_parameters[\"results_tmp_directory\"] = kn.create_dir(run_parameters[\"results_directory\"], 'tmp')\n\n network_df = kn.get_network_df(run_parameters['gg_network_name_full_path'])\n\n node_1_names, node_2_names = kn.extract_network_node_names(network_df)\n unique_gene_names = kn.find_unique_node_names(node_1_names, node_2_names)\n\n unique_gene_names = sorted(unique_gene_names)\n\n genes_lookup_table = kn.create_node_names_dict(unique_gene_names)\n\n network_df = kn.map_node_names_to_index(network_df, genes_lookup_table, 'node_1')\n network_df = kn.map_node_names_to_index(network_df, genes_lookup_table, 'node_2')\n\n network_df = kn.symmetrize_df(network_df)\n network_mat_sparse = kn.convert_network_df_to_sparse(\n network_df, len(unique_gene_names), len(unique_gene_names))\n\n network_mat = normalize(network_mat_sparse, norm=\"l1\", axis=0)\n\n del network_df\n del network_mat_sparse\n del node_1_names\n del node_2_names\n del genes_lookup_table\n gc.collect()\n\n phenotype_response_df = kn.get_spreadsheet_df(run_parameters[\"phenotype_name_full_path\"])\n spreadsheet_df = kn.get_spreadsheet_df(run_parameters[\"spreadsheet_name_full_path\"])\n spreadsheet_genes_as_input = spreadsheet_df.index.values\n phenotype_response_df = phenotype_response_df.T\n\n spreadsheet_df = kn.update_spreadsheet_df(spreadsheet_df, unique_gene_names)\n spreadsheet_df = zscore_dataframe(spreadsheet_df)\n\n sample_smooth, iterations = kn.smooth_matrix_with_rwr(spreadsheet_df.as_matrix(), network_mat.T, run_parameters)\n spreadsheet_df = pd.DataFrame(sample_smooth, index=spreadsheet_df.index, columns=spreadsheet_df.columns)\n\n baseline_array = np.ones(network_mat.shape[0]) / network_mat.shape[0]\n baseline_array = kn.smooth_matrix_with_rwr(baseline_array, network_mat, run_parameters)[0]\n\n number_of_jobs = len(phenotype_response_df.index)\n jobs_id = range(0, number_of_jobs)\n zipped_arguments = dstutil.zip_parameters(run_parameters, spreadsheet_df, phenotype_response_df, network_mat,\n spreadsheet_genes_as_input, baseline_array, jobs_id)\n dstutil.parallelize_processes_locally(run_net_correlation_worker, zipped_arguments, number_of_jobs)\n\n write_phenotype_data_all(run_parameters)\n kn.remove_dir(run_parameters[\"results_tmp_directory\"])",
"def run_bootstrap_net_correlation(run_parameters):\n run_parameters[\"results_tmp_directory\"] = kn.create_dir(run_parameters[\"results_directory\"], 'tmp')\n\n network_df = kn.get_network_df(run_parameters['gg_network_name_full_path'])\n node_1_names, node_2_names = kn.extract_network_node_names(network_df)\n unique_gene_names = kn.find_unique_node_names(node_1_names, node_2_names)\n unique_gene_names = sorted(unique_gene_names)\n genes_lookup_table = kn.create_node_names_dict(unique_gene_names)\n\n network_df = kn.map_node_names_to_index(network_df, genes_lookup_table, 'node_1')\n network_df = kn.map_node_names_to_index(network_df, genes_lookup_table, 'node_2')\n network_df = kn.symmetrize_df(network_df)\n network_mat_sparse = kn.convert_network_df_to_sparse(\n network_df, len(unique_gene_names), len(unique_gene_names))\n\n network_mat = normalize(network_mat_sparse, norm=\"l1\", axis=0)\n\n del network_df\n del network_mat_sparse\n del node_1_names\n del node_2_names\n del genes_lookup_table\n gc.collect()\n\n phenotype_response_df = kn.get_spreadsheet_df(run_parameters[\"phenotype_name_full_path\"])\n spreadsheet_df = kn.get_spreadsheet_df(run_parameters[\"spreadsheet_name_full_path\"])\n spreadsheet_genes_as_input = spreadsheet_df.index.values\n phenotype_response_df = phenotype_response_df.T\n\n spreadsheet_df = kn.update_spreadsheet_df(spreadsheet_df, unique_gene_names)\n spreadsheet_df = zscore_dataframe(spreadsheet_df)\n sample_smooth, iterations = kn.smooth_matrix_with_rwr(spreadsheet_df.as_matrix(), network_mat.T, run_parameters)\n spreadsheet_df = pd.DataFrame(sample_smooth, index=spreadsheet_df.index, columns=spreadsheet_df.columns)\n\n baseline_array = np.ones(network_mat.shape[0]) / network_mat.shape[0]\n baseline_array = kn.smooth_matrix_with_rwr(baseline_array, network_mat, run_parameters)[0]\n\n number_of_jobs = len(phenotype_response_df.index)\n jobs_id = range(0, number_of_jobs)\n zipped_arguments = dstutil.zip_parameters(run_parameters, spreadsheet_df, phenotype_response_df, network_mat,\n spreadsheet_genes_as_input, baseline_array, jobs_id)\n dstutil.parallelize_processes_locally(run_bootstrap_net_correlation_worker, zipped_arguments, number_of_jobs)\n\n write_phenotype_data_all(run_parameters)\n kn.remove_dir(run_parameters[\"results_tmp_directory\"])",
"def run_bootstrap_net_correlation_worker(run_parameters, spreadsheet_df, phenotype_df, network_mat,\n spreadsheet_genes_as_input, baseline_array, job_id):\n\n np.random.seed(job_id)\n\n restart_accumulator = np.zeros(network_mat.shape[0])\n gm_accumulator = np.ones(network_mat.shape[0])\n borda_count = np.zeros(network_mat.shape[0])\n\n phenotype_df = phenotype_df.iloc[[job_id], :]\n spreadsheet_df_trimmed, phenotype_df_trimmed, ret_msg = datacln.check_input_value_for_gene_prioritazion(\n spreadsheet_df, phenotype_df)\n\n sample_smooth = spreadsheet_df_trimmed.as_matrix()\n\n pearson_array = get_correlation(sample_smooth, phenotype_df_trimmed.values[0], run_parameters)\n n_bootstraps = run_parameters[\"number_of_bootstraps\"]\n for bootstrap_number in range(0, n_bootstraps):\n sample_random, sample_permutation = sample_a_matrix_pearson(\n sample_smooth, 1.0, run_parameters[\"cols_sampling_fraction\"])\n\n phenotype_response = phenotype_df_trimmed.values[0, None]\n phenotype_response = phenotype_response[0, sample_permutation]\n pc_array = get_correlation(sample_random, phenotype_response, run_parameters)\n\n pc_array[~np.in1d(spreadsheet_df_trimmed.index, spreadsheet_genes_as_input)] = 0.0\n pc_array = np.abs(trim_to_top_beta(pc_array, run_parameters[\"top_beta_of_sort\"]))\n restart_accumulator[pc_array != 0] += 1.0\n\n pc_array = pc_array / max(sum(pc_array), EPSILON_0)\n pc_array = kn.smooth_matrix_with_rwr(pc_array, network_mat, run_parameters)[0]\n pc_array = pc_array - baseline_array\n\n borda_count = sum_array_ranking_to_borda_count(borda_count, pc_array)\n gm_accumulator = (np.abs(pc_array) + EPSILON_0) * gm_accumulator\n\n restart_accumulator = restart_accumulator / n_bootstraps\n borda_count = borda_count / n_bootstraps\n # pcc_gm_array = gm_accumulator ** (1 / n_bootstraps)\n viz_score = (borda_count - min(borda_count)) / (max(borda_count) - min(borda_count))\n\n phenotype_name = phenotype_df_trimmed.index.values[0]\n gene_name_list = spreadsheet_df_trimmed.index\n gene_orig_list = spreadsheet_genes_as_input\n quantitative_score = borda_count\n generate_net_correlation_output(pearson_array, quantitative_score, viz_score, restart_accumulator,\n phenotype_name, gene_name_list, gene_orig_list, run_parameters)",
"def double_sum_covar(list_tuple_errs: list[float], corr_ranges: list[float], list_area_tot: list[float],\n list_lat: list[float], list_lon: list[float], nproc: int = 1) -> float:\n n = len(list_tuple_errs)\n\n if nproc == 1:\n print('Deriving double covariance sum with 1 core...')\n var_err = 0\n for i in range(n):\n for j in range(n):\n d = distance_latlon((list_lon[i], list_lat[i]), (list_lon[j], list_lat[j]))\n for k in range(len(corr_ranges)):\n var_err += kernel_sph(0, d, corr_ranges[k]) * list_tuple_errs[i][k] * list_tuple_errs[j][k] * \\\n list_area_tot[i] * list_area_tot[j]\n else:\n print('Deriving double covariance sum with '+str(nproc)+' cores...')\n pack_size = int(np.ceil(n/nproc))\n argsin = [(list_tuple_errs, corr_ranges, list_area_tot, list_lon, list_lat, np.arange(\n i, min(i+pack_size, n))) for k, i in enumerate(np.arange(0, n, pack_size))]\n pool = mp.Pool(nproc, maxtasksperchild=1)\n outputs = pool.map(part_covar_sum, argsin, chunksize=1)\n pool.close()\n pool.join()\n\n var_err = np.sum(np.array(outputs))\n\n area_tot = 0\n for j in range(len(list_area_tot)):\n area_tot += list_area_tot[j]\n\n var_err /= np.nansum(area_tot) ** 2\n\n return np.sqrt(var_err)",
"def correlate_cpu(data, num_timeseries, size_timeseries,\n correlations_cpu, indices_step):\n num_timesteps = size_timeseries\n window_size = size_timeseries\n\n sums = [0.0] * num_timeseries\n sums_sq = [0.0] * num_timeseries\n sums_xy = [0.0] * calc_num_correlations(num_timeseries)\n\n for k in range(num_timesteps):\n index_correlation = 0\n\n for i in range(num_timeseries):\n old = 0.0 if k < window_size else data[i][k - window_size]\n new = data[i][k]\n sums[i] += new - old\n sums_sq[i] += new * new - old * old\n\n for i in range(num_timeseries):\n old_x = 0.0 if k < window_size else data[i][k - window_size]\n new_x = data[i][k]\n\n for j in range(i):\n old_y = 0.0 if k < window_size else data[j][k - window_size]\n new_y = data[j][k]\n sums_xy[index_correlation] += new_x * new_y - old_x * old_y\n numerator = (window_size * sums_xy[index_correlation] -\n sums[i] * sums[j])\n denominator = ((1 / math.sqrt(window_size * sums_sq[i] -\n sums[i] * sums[i])) *\n (1 / math.sqrt(window_size * sums_sq[j] -\n sums[j] * sums[j])))\n correlations_cpu[index_correlation] = numerator * denominator\n indices_step[2 * index_correlation] = j\n indices_step[2 * index_correlation + 1] = i\n index_correlation += 1",
"def calculate_correlation(data):\n pass",
"def run_parallel(heritability, x_start_i, x_stop_i, cluster='usc'):\n\trun_id = 'corr_trait_sim'\n\tjob_id = ' % s_ % d_ % d' % (run_id, x_start_i, x_stop_i)\n\tfile_prefix = env.env['results_dir'] + run_id + '_' + str(x_start_i) + '_' + str(x_stop_i)\n\n\t#Cluster specific parameters\t\n\tif cluster == 'gmi': #GMI cluster.\n\t\tshstr = '#!/bin/sh\\n'\n\t\tshstr += '#$ -N %s\\n' % job_id\n\t\tshstr += \"#$ -q q.norm@blade*\\n\"\n\t\tshstr += '#$ -o %s.log\\n' % job_id\n\t\t#shstr += '#$ -cwd /home/GMI/$HOME\\n'\n\t\t#shstr += '#$ -M [email protected]\\n\\n'\n\n\telif cluster == 'usc': #USC cluster.\n\t\tshstr = \"#!/bin/csh\\n\"\n\t\tshstr += \"#PBS -l walltime=%s \\n\" % '72:00:00'\n\t\tshstr += \"#PBS -l mem=%s \\n\" % '1950mb'\n\t\tshstr += \"#PBS -q cmb\\n\"\n\t\tshstr += \"#PBS -N p%s \\n\" % job_id\n\n\tshstr += \"(python %scorr_trait_sim.py %s %d %d \" % (env.env['script_dir'], heritability, x_start_i, x_stop_i)\n\n\tshstr += \"> \" + file_prefix + \"_job.out) >& \" + file_prefix + \"_job.err\\n\"\n\tprint '\\n', shstr, '\\n'\n\tscript_file_name = run_id + \".sh\"\n\tf = open(script_file_name, 'w')\n\tf.write(shstr)\n\tf.close()\n\n\t#Execute qsub script\n\tos.system(\"qsub \" + script_file_name)",
"def thread_Compute(self,X,layer):\n threads=[]\n #========get the corresponding weights length============\n #pooling n_c=input matirx channels\n #conv n_c =the weights channels\n if layer[\"l_type\"]==\"conv\":\n n_C=kernels[layer[\"kernel\"]].shape[3]\n else:\n n_C=X.shape[3]\n \n #======================\n pos=self.getPos(n_C,0,len(self.nodes))\n start,end=pos\n for node in self.nodes:\n start,end=self.getPos(n_C,end,len(self.nodes))\n a=(start,end)\n d=X[0,:,:,:]\n conv_dict = {\"data\":d,\"pos\":a,\"layer\":layer}\n threads.append(client(conv_dict,node[\"ip\"],node[\"port\"]))\n for t in threads:\n t.start()\n out= self.layerResult(layer,X,pos)\n for t in threads:\n t.join()\n out=np.concatenate((out,t.value()[\"data\"]), axis=2)\n self.count=0\n\n return out",
"def compute_autocorrelation_rlzn_ensemble(fopen_list, te):\n print 'Compute the autocorrelation'\n\n # initialize components of rho\n sumuu = 0.0\n sumvv = 0.0\n\n psiuu = 0.0\n psivv = 0.0\n\n sumup2 = 0.0\n sumvp2 = 0.0\n\n # get characteristics of mean velocity field\n fbs = netCDF4.Dataset('buoyancySurface.nc','r')\n lonCell = fbs.variables['lonCell']\n latCell = fbs.variables['latCell']\n lon = np.degrees(np.mod(lonCell[:]+np.pi,2*np.pi)-np.pi)\n lat = np.degrees(latCell[:])\n hull = spatial.ConvexHull(np.vstack((lon,lat)).T) \n triang = Triangulation(lon,lat)\n buoy_surf_zonal = fbs.variables['buoyancySurfaceVelocityZonal']\n buoy_surf_merid = fbs.variables['buoyancySurfaceVelocityMeridional']\n\n \n # build up layers for interpolation of particle layers\n interp_zonal = []\n interp_merid = []\n nlayers = len(fbs.dimensions['nBuoyancySurfaces'])\n for alayer in np.arange(nlayers):\n interp_zonal.append(LinearTriInterpolator(triang, buoy_surf_zonal[0,:,alayer]))\n interp_merid.append(LinearTriInterpolator(triang, buoy_surf_merid[0,:,alayer]))\n\n for num, afile in enumerate(fopen_list):\n print 'working on %d' % num\n # interpolate mean velocities onto points for the computation\n x = afile.variables['xParticle'][:te,:]\n y = afile.variables['yParticle'][:te,:]\n z = afile.variables['zParticle'][:te,:]\n latr, lonr = proj_lat_long(x,y,z)\n latr = np.degrees(latr)\n lonr = np.degrees(lonr)\n\n ubar = np.zeros(x.shape)\n vbar = np.zeros(x.shape)\n nparticle_layer = x.shape[1]/nlayers\n for alayer in np.arange(nlayers):\n ps = np.arange(alayer*nparticle_layer,(alayer+1)*nparticle_layer)\n ubar[:,ps] = interp_zonal[alayer](lonr[:,ps],latr[:,ps])\n vbar[:,ps] = interp_merid[alayer](lonr[:,ps],latr[:,ps])\n\n # compute portions of autocorrelation\n u = afile.variables['lonVel'][:te,:]\n up = u - ubar\n up0 = up[0,:]\n\n v = afile.variables['latVel'][:te,:]\n vp = v - vbar\n vp0 = vp[0,:]\n\n sumuu += up0*up\n sumvv += vp0*vp\n\n psiuu += up0*up0\n psivv += vp0*vp0\n \n sumup2 += np.nanmean(up**2.0, axis=0)\n sumvp2 += np.nanmean(vp**2.0, axis=0)\n \n\n fbs.close()\n\n # note division by psi removes need to divide the sums by the number of realizations\n sumuu /= psiuu \n sumvv /= psivv\n\n sumup2 /= len(fopen_list)\n sumvp2 /= len(fopen_list)\n\n print 'done'\n\n return sumuu, sumvv, sumup2, sumvp2, lonr[0,:], latr[0,:], lon, lat, hull",
"def test(size_timeseries, num_timeseries):\n data = random_data(num_timeseries, size_timeseries)\n correlations_dfe = [0.0] * calc_num_correlations(num_timeseries)\n correlations_cpu = [0.0] * calc_num_correlations(num_timeseries)\n indices_step = [0.0] * calc_num_correlations(num_timeseries) * 2\n\n start_time = time.time()\n correlate_dfe(data, size_timeseries, num_timeseries, correlations_dfe)\n dfe_total = time.time() - start_time\n print 'DFE correlation total time:\\t%.5lfs' % dfe_total\n\n start_time = time.time()\n correlate_cpu(data, num_timeseries, size_timeseries,\n correlations_cpu, indices_step)\n cpu_total = time.time() - start_time\n print 'CPU correlation total time:\\t%.5lfs' % cpu_total\n\n check(correlations_dfe, correlations_cpu, num_timeseries, indices_step)",
"def _calculate_cc(self, array, corr_range, tau_max, lag_mode):\n\n # lag_mode dict\n mode = self.lag_modi[lag_mode]\n only_tri = int(self.only_tri)\n\n if lag_mode == 'all':\n corrmat = numpy.zeros((2*tau_max + 1, self.N, self.N),\n dtype='float32')\n elif lag_mode == 'sum':\n corrmat = numpy.zeros((2, self.N, self.N), dtype='float32')\n elif lag_mode == 'max':\n corrmat = numpy.zeros((2, self.N, self.N), dtype='float32')\n\n # loop over all node pairs, NOT symmetric due to time shifts!\n for i in range(self.N-only_tri):\n for j in range((i+1)*only_tri, self.N):\n\n if mode == 2:\n maxcross = 0.0\n argmax = 0\n\n # loop over taus INCLUDING the last tau value\n for t in range(2*tau_max+1):\n\n # here the actual cross correlation is calculated\n crossij = (array[tau_max, i, :] * array[t, j, :]).mean()\n\n # fill in values in matrix depending on lag_mode\n if mode == 0:\n corrmat[t, i, j] = crossij\n\n elif mode == 1:\n if t <= tau_max:\n corrmat[1, i, j] += numpy.abs(crossij)\n if t >= tau_max:\n corrmat[0, i, j] += numpy.abs(crossij)\n\n elif mode == 2:\n # calculate max and argmax by comparing to previous\n # value and storing max\n if numpy.abs(crossij) > maxcross:\n maxcross = numpy.abs(crossij)\n argmax = t\n\n if mode == 2:\n corrmat[0, i, j] = maxcross\n corrmat[1, i, j] = argmax - tau_max\n\n if self.only_tri:\n if lag_mode == 'all':\n corrmat = corrmat + corrmat.transpose(0, 2, 1)[::-1]\n elif lag_mode == 'sum':\n corrmat[0] += corrmat[1].transpose()\n corrmat[1] = corrmat[0].transpose()\n elif lag_mode == 'max':\n corrmat[0] += corrmat[0].transpose()\n corrmat[1] -= corrmat[1].transpose()\n\n return corrmat",
"def correlate(fft1,fft2, maxlag,dt, Nfft, method=\"cross-correlation\"):\n\n if fft1.ndim == 1:\n nwin=1\n elif fft1.ndim == 2:\n nwin= int(fft1.shape[0])\n\n t0=time.time()\n corr=np.zeros(shape=(nwin,Nfft),dtype=np.complex64)\n fft1_globe = cuda.to_device(fft1[:,:Nfft//2].reshape(fft1.size,))\n fft2_globe = cuda.to_device(fft2[:,:Nfft//2].reshape(fft2.size,))\n corr_globe = cuda.device_array(shape=(nwin*(Nfft//2),),dtype=np.complex64)\n \n threadsperblock = 2000\n blockspergrid = math.ceil(fft1_globe.size/threadsperblock)\n \n if method == 'deconv':\n decon_gpu[threadsperblock,blockspergrid](fft1_globe,fft2_globe,corr_globe)\n elif method =='coherence':\n coherence_gpu[threadsperblock,blockspergrid](fft1_globe,fft2_globe,corr_globe)\n\n tcorr = corr_globe.copy_to_host()\n corr = tcorr.reshape(nwin,Nfft//2)\n\n ncorr = np.zeros(shape=Nfft,dtype=np.complex64)\n ncorr[:Nfft//2] = np.mean(corr,axis=0)\n ncorr[-(Nfft//2)+1:]=np.flip(np.conj(ncorr[1:(Nfft//2)]),axis=0)\n ncorr[0]=complex(0,0)\n ncorr = np.real(np.fft.ifftshift(scipy.fftpack.ifft(ncorr, Nfft, axis=0)))\n\n t1=time.time()\n print('it takes '+str(t1-t0)+' s')\n\n tcorr = np.arange(-Nfft//2 + 1, Nfft//2)*dt\n ind = np.where(np.abs(tcorr) <= maxlag)[0]\n ncorr = ncorr[ind]\n tcorr = tcorr[ind]\n\n return ncorr,tcorr",
"def test_euclidean_parallel_transport(self):\n \n self._test_parallel_transport(k=0)",
"def run_bootstrap_correlation_worker(run_parameters, spreadsheet_df, phenotype_df, n_bootstraps, job_id):\n\n np.random.seed(job_id)\n\n phenotype_df = phenotype_df.iloc[[job_id], :]\n spreadsheet_df_trimmed, phenotype_df_trimmed, ret_msg = datacln.check_input_value_for_gene_prioritazion(\n spreadsheet_df, phenotype_df)\n\n pearson_array = get_correlation(spreadsheet_df_trimmed.as_matrix(), phenotype_df_trimmed.values[0], run_parameters)\n borda_count = np.zeros(spreadsheet_df.shape[0])\n gm_accumulator = np.ones(spreadsheet_df.shape[0])\n for bootstrap_number in range(0, n_bootstraps):\n sample_random, sample_permutation = sample_a_matrix_pearson(\n spreadsheet_df_trimmed.as_matrix(), 1.0, run_parameters[\"cols_sampling_fraction\"])\n phenotype_response = phenotype_df_trimmed.values[0, None]\n phenotype_response = phenotype_response[0, sample_permutation]\n pc_array = get_correlation(sample_random, phenotype_response, run_parameters)\n borda_count = sum_array_ranking_to_borda_count(borda_count, np.abs(pc_array))\n gm_accumulator = (np.abs(pc_array) + EPSILON_0) * gm_accumulator\n pcc_gm_array = gm_accumulator ** (1 / n_bootstraps)\n borda_count = borda_count / n_bootstraps\n\n phenotype_name = phenotype_df_trimmed.index.values[0]\n gene_name_list = spreadsheet_df_trimmed.index\n viz_score = (borda_count - min(borda_count)) / (max(borda_count) - min(borda_count))\n\n generate_bootstrap_correlation_output(borda_count, viz_score, pearson_array,\n phenotype_name, gene_name_list, run_parameters)",
"def run_correlation_worker(run_parameters, spreadsheet_df, phenotype_df, job_id):\n # selects the ith row in phenotype_df\n\n np.random.seed(job_id)\n\n phenotype_df = phenotype_df.iloc[[job_id], :]\n\n spreadsheet_df_trimmed, phenotype_df_trimmed, err_msg = datacln.check_input_value_for_gene_prioritazion(\n spreadsheet_df, phenotype_df)\n\n pc_array = get_correlation(spreadsheet_df_trimmed.as_matrix(), phenotype_df_trimmed.values[0], run_parameters)\n\n gene_name_list = spreadsheet_df_trimmed.index\n phenotype_name = phenotype_df.index.values[0]\n generate_correlation_output(pc_array, phenotype_name, gene_name_list, run_parameters)",
"def get_correlation_array(self, src):\n self.correlation_array = parallel.call_and_bcast(self.get_array, src)",
"def main_correlate(tel1, date1, tel2, date2, nchan, tstart, tend, dedisperse,\n do_foldspec, ntbin, ngate,\n do_waterfall, ntw_min,\n save_xcorr, verbose=0):\n comm = MPI.COMM_WORLD\n if comm.size > 1 and save_xcorr:\n if comm.rank == 0:\n\t print(\"Warning, h5py mpio is sometimes slow. Consider disabling save_xcorr\")\n\t# save_xcorr = False\n # observing parameters\n t0 = Time(tstart, scale='utc')\n t1 = Time(tend, scale='utc')\n\n Obs = obsdata()\n obskey1 = Obs[tel1].nearest_observation(date1)\n obskey2 = Obs[tel2].nearest_observation(date2)\n psr1 = Obs[tel1][obskey1]['src']\n psr2 = Obs[tel2][obskey2]['src']\n files1 = Obs[tel1].file_list(obskey1)\n files2 = Obs[tel2].file_list(obskey2)\n\n assert psr1 == psr2\n if comm.rank == 0:\n print(\"forming visibilities from (telescope, observation_key) = \\n\"\n \"\\t ({0}, {1}) and ({2}, {3}), source {4}\".format(tel1, obskey1, tel2, obskey2, psr1))\n dm = Obs['psrs'][psr1]['dm']\n with LOFARdata_Pcombined(*files1, comm=comm) as fh1,\\\n GMRTdata(*files2, comm=comm) as fh2:\n phasepol1 = Obs['lofar'][obskey1].get_phasepol(fh1.time0, rphase=None)\n phasepol2 = Obs['gmrt'][obskey2].get_phasepol(fh2.time0, rphase=None)\n nt = min(fh1.ntimebins(t0, t1), fh2.ntimebins(t0, t1))\n # out = (foldspec, icount, waterfall)\n out = correlate.correlate(fh1, fh2, dm=dm, nchan=nchan, ngate=ngate,\n ntbin=ntbin, nt=nt, ntw=ntw_min,\n t0=t0, t1=t1, dedisperse=dedisperse,\n phasepol=(phasepol1, phasepol2),\n do_waterfall=do_waterfall,\n do_foldspec=do_foldspec,\n save_xcorr=save_xcorr,\n comm=comm)\n myfoldspec = out[0]\n myicount = out[1]\n mywaterfall = out[2]\n\n savepref = \"{0}{1}_{2}chan{3}ntbin\".format(tel1[0], tel2[0], nchan, ntbin)\n dt = t1 - t0\n if do_waterfall:\n waterfall = np.zeros_like(mywaterfall)\n comm.Reduce(mywaterfall, waterfall, op=MPI.SUM, root=0)\n if comm.rank == 0:\n # waterfall = normalize_counts(waterfall)\n np.save(\"{0}waterfall_{1}+{2:08}sec.npy\"\n .format(savepref, t0, dt.sec), waterfall)\n\n if do_foldspec:\n foldspec = np.zeros_like(myfoldspec)\n icount = np.zeros_like(myicount)\n comm.Reduce(myfoldspec, foldspec, op=MPI.SUM, root=0)\n comm.Reduce(myicount, icount, op=MPI.SUM, root=0)\n if comm.rank == 0:\n fname = (\"{0}foldspec_{1}+{2:08}sec.npy\")\n iname = (\"{0}icount_{1}+{2:08}sec.npy\")\n np.save(fname.format(savepref, t0, dt.sec), foldspec)\n np.save(iname.format(savepref, t0, dt.sec), icount)\n\n # get normalized flux in each bin (where any were added)\n f2 = normalize_counts(foldspec, icount)\n foldspec1 = f2.sum(axis=2)\n fluxes = foldspec1.sum(axis=0)\n foldspec3 = f2.sum(axis=0)\n\n with open('{0}flux_{1}+{2:08}sec.dat'\n .format(savepref, t0, dt.sec), 'w') as f:\n for i, flux in enumerate(fluxes):\n f.write('{0:12d} {1:12.9g}\\n'.format(i + 1, flux))\n\n plots = True\n if plots and comm.rank == 0:\n if do_waterfall:\n w = waterfall.copy()\n try:\n pmap('{0}waterfall_{1}+{2:08}sec.pgm'\n .format(savepref, t0, dt.sec), w, 1, verbose=True)\n except:\n pass\n if do_foldspec:\n pmap('{0}folded_{1}+{2:08}sec.pgm'\n .format(savepref, t0, dt.sec), foldspec1, 0, verbose)\n # TODO: Note, I (aaron) don't think this works for LOFAR data\n # since nchan=20, but we concatenate several subband files\n # together, so f2.nchan = N_concat * nchan\n # It should work for my \"new\" LOFAR_Pconcate file class\n pmap('{0}foldedbin_{1}+{2:08}sec.pgm'\n .format(savepref, t0, dt.sec),\n f2.transpose(0, 2, 1).reshape(nchan, -1), 1, verbose)\n pmap('{0}folded3_{1}+{2:08}sec.pgm'\n .format(savepref, t0, dt.sec), foldspec3, 0, verbose)",
"def parallel_run():\n from IPython.parallel import Client\n\n c = Client() # here is where the client establishes the connection\n lv = c.load_balanced_view() # this object represents the engines (workers)\n\n\n rays = []\n maxs=25\n bounding = AABA(xmin=0, ymin=0, zmin=0, xmax=maxs, ymax=maxs, zmax=maxs,)\n gridd = np.zeros((maxs,maxs,maxs))\n # spectrum for red to nir leaves\n red_nir_leaves = spectrum(np.array([0.5, 0.85]), np.array([0.1, 0.6]), np.array([0.5, 0.1]))\n # spectrum for soil\n red_nir_soil = spectrum(np.array([0.5, 0.85]), np.array([0.3, 0.4]), np.array([0.0, 0.0]))\n\n\n # scattering setup\n scatt = BRDSF(red_nir_leaves, 0.0)\n lf = leaf(55.0, 0.8) # leaf angle distribution and leaf area density\n\n\n tasks = []\n for x in xrange(maxs):\n for y in xrange(maxs):\n tasks.append(lv.apply(prun, x,y, maxs, gridd, scatt, red_nir_soil, bounding, lf))\n\n result = [task.get() for task in tasks] # blocks until all results are back\n\n return results",
"def _pearsonr(x: xr.DataArray, y: xr.DataArray, monitor: Monitor) -> xr.Dataset:\n with monitor.starting(\"Calculate Pearson correlation\", total_work=6):\n n = len(x['time'])\n\n xm, ym = x - x.mean(dim='time'), y - y.mean(dim='time')\n xm['time'] = [i for i in range(0, len(xm.time))]\n ym['time'] = [i for i in range(0, len(ym.time))]\n xm_ym = xm * ym\n r_num = xm_ym.sum(dim='time')\n xm_squared = np.square(xm)\n ym_squared = np.square(ym)\n r_den = np.sqrt(xm_squared.sum(dim='time') * ym_squared.sum(dim='time'))\n r_den = r_den.where(r_den != 0)\n r = r_num / r_den\n\n # Presumably, if abs(r) > 1, then it is only some small artifact of floating\n # point arithmetic.\n # At this point r should be a lon/lat dataArray, so it should be safe to\n # load it in memory explicitly. This may take time as it will kick-start\n # deferred processing.\n # Comparing with NaN produces warnings that can be safely ignored\n default_warning_settings = np.seterr(invalid='ignore')\n with monitor.child(1).observing(\"task 1\"):\n negativ_r = r.values < -1.0\n with monitor.child(1).observing(\"task 2\"):\n r.values[negativ_r] = -1.0\n with monitor.child(1).observing(\"task 3\"):\n positiv_r = r.values > 1.0\n with monitor.child(1).observing(\"task 4\"):\n r.values[positiv_r] = 1.0\n np.seterr(**default_warning_settings)\n r.attrs = {'description': 'Correlation coefficients between'\n ' {} and {}.'.format(x.name, y.name)}\n\n df = n - 2\n t_squared = np.square(r) * (df / ((1.0 - r.where(r != 1)) * (1.0 + r.where(r != -1))))\n\n prob = df / (df + t_squared)\n with monitor.child(1).observing(\"task 5\"):\n prob_values_in = prob.values\n with monitor.child(1).observing(\"task 6\"):\n prob.values = betainc(0.5 * df, 0.5, prob_values_in)\n prob.attrs = {'description': 'Rough indicator of probability of an'\n ' uncorrelated system producing datasets that have a Pearson'\n ' correlation at least as extreme as the one computed from'\n ' these datsets. Not entirely reliable, but reasonable for'\n ' datasets larger than 500 or so.'}\n\n retset = xr.Dataset({'corr_coef': r,\n 'p_value': prob})\n return retset",
"def compute_parallel(self, inputs, communicator):\n self.compute_sequential([inputs], [communicator])",
"def test_net_on_dataset(args, dataset_name, proposal_file, output_dir, multi_gpu=False, gpu_id=0, use_matlab = False, early_stop=False):\n\n \n # print(\"test_net_on_dataset\")\n dataset = JsonDataset(dataset_name)\n test_timer = Timer()\n \n test_timer.tic()\n \n all_boxes = test_net(args, dataset_name, proposal_file, output_dir, gpu_id=gpu_id, early_stop=early_stop)\n test_timer.toc()\n\n logger.info('Total inference time: {:.3f}s'.format(test_timer.average_time))\n\n roidb = dataset.get_roidb()\n num_images = len(roidb)\n num_classes = cfg.MODEL.NUM_CLASSES + 1\n final_boxes = empty_results(num_classes, num_images)\n test_corloc = 'train' in dataset_name\n \n\n all_cls_scores = {}\n\n for i, entry in enumerate(roidb):\n\n if early_stop and i > 10: break\n\n boxes = all_boxes[entry['image']]\n \n cls_key = entry['image'].replace('.jpg','').split('/')[-1]\n\n # print(cls_key)\n\n if boxes['scores'] is not None:\n if test_corloc:\n # print(\"corlooking\")\n _, _, cls_boxes_i = box_results_for_corloc(boxes['scores'], boxes['boxes'])\n else:\n _, _, cls_boxes_i = box_results_with_nms_and_limit(boxes['scores'], boxes['boxes'])\n\n extend_results(i, final_boxes, cls_boxes_i)\n else:\n final_boxes = None\n \n results = task_evaluation.evaluate_all(dataset, final_boxes, output_dir, test_corloc, use_matlab = use_matlab)\n return results",
"def ParallelToserial(self):\n pass",
"def connector_mediation(task):\n\tatlas = 'power'\n\tproject='hcp'\n\tknown_membership,network_names,num_nodes,name_int_dict = network_labels(atlas)\n\tsubjects = np.load('%s/dynamic_mod/results/%s_%s_%s_subs_fz.npy' %(homedir,'hcp',task,atlas))\n\tstatic_results = graph_metrics(subjects,task,atlas,run_version='fz')\n\tmatrices = static_results['matrices']\n\tsubject_pcs = static_results['subject_pcs']\n\tsubject_mods = static_results['subject_mods']\n\tmod_pc_corr = np.zeros(subject_pcs.shape[1])\n\tfor i in range(subject_pcs.shape[1]):\n\t\tmod_pc_corr[i] = nan_pearsonr(subject_mods,subject_pcs[:,i])[0]\n\tmean_conn = np.nanmean(matrices,axis=0)\n\te_tresh = np.percentile(mean_conn,85)\n\tsubject_pcs[np.isnan(subject_pcs)] = 0.0\n\tm = np.zeros((264,264,264))\n\tpool = Pool(40)\n\tfor n in range(264):\n\t\tprint n\n\t\tsys.stdout.flush()\n\t\tvariables = []\n\t\tfor i,j in combinations(range(264),2):\n\t\t\tvariables.append(pd.DataFrame(data={'pc':subject_pcs[:,n],'weight':matrices[:,i,j],'q':subject_mods},index=range(len(subject_pcs))))\n\t\tresults = pool.map(multi_med,variables)\n\t\tfor r,i in zip(results,combinations(range(264),2)):\n\t\t\tm[n,i[0],i[1]] = r\n\t\t\tm[n,i[1],i[0]] = r\n\t\tnp.save('/home/despoB/mb3152/dynamic_mod/results/full_med_matrix_new_%s.npy'%(task),m)",
"def test_3():\n \n\n # Functions wrapped by agents\n def f(in_streams, out_streams):\n multiply_and_add(in_streams[0], out_streams[0],\n multiplicand=2, addend=1)\n\n def g(in_streams, out_streams):\n t = Stream('t')\n filter_then_square(in_streams[0], t,\n filter_threshold=20)\n print_stream(t, name='p1')\n\n def sums(in_streams, out_streams):\n s = Stream('s')\n sum_window(in_streams[0], s, window_size=3, step_size=3)\n print_stream(s, name=' p2')\n\n processes = \\\n {\n 'source_process':\n {'in_stream_names_types': [('in', 'i')],\n 'out_stream_names_types': [('out', 'i')],\n 'compute_func': f,\n 'sources':\n {'acceleration':\n {'type': 'i',\n 'func': source_thread_target\n },\n }\n },\n 'process_1':\n {'in_stream_names_types': [('in', 'i')],\n 'out_stream_names_types': [],\n 'compute_func': g,\n 'sources': {}\n },\n 'process_2':\n {'in_stream_names_types': [('in', 'i')],\n 'out_stream_names_types': [],\n 'compute_func': sums,\n 'sources': {}\n }\n }\n \n connections = \\\n {\n 'source_process' :\n {\n 'out' : [('process_1', 'in'), ('process_2', 'in')],\n 'acceleration' : [('source_process', 'in')]\n },\n 'process_1':\n {\n },\n 'process_2':\n {\n }\n }\n\n multicore(processes, connections)",
"def run_bootstrap_correlation(run_parameters):\n run_parameters[\"results_tmp_directory\"] = kn.create_dir(run_parameters[\"results_directory\"], 'tmp')\n\n phenotype_response_df = kn.get_spreadsheet_df(run_parameters[\"phenotype_name_full_path\"])\n spreadsheet_df = kn.get_spreadsheet_df(run_parameters[\"spreadsheet_name_full_path\"])\n phenotype_response_df = phenotype_response_df.T\n n_bootstraps = run_parameters[\"number_of_bootstraps\"]\n\n number_of_jobs = len(phenotype_response_df.index)\n jobs_id = range(0, number_of_jobs)\n zipped_arguments = dstutil.zip_parameters(run_parameters, spreadsheet_df, phenotype_response_df, n_bootstraps, jobs_id)\n dstutil.parallelize_processes_locally(run_bootstrap_correlation_worker, zipped_arguments, number_of_jobs)\n\n write_phenotype_data_all(run_parameters)\n kn.remove_dir(run_parameters[\"results_tmp_directory\"])",
"def corr2(config, logger=None):\n # Setup logger based on config verbose value\n if logger is None:\n logger = treecorr.config.setup_logger(\n treecorr.config.get(config,'verbose',int,1),\n config.get('log_file',None))\n\n # Check that config doesn't have any extra parameters.\n # (Such values are probably typos.)\n # Also convert the given parameters to the correct type, etc.\n config = treecorr.config.check_config(config, corr2_valid_params, corr2_aliases, logger)\n\n import pprint\n logger.debug('Using configuration dict:\\n%s',pprint.pformat(config))\n\n if ( 'output_dots' not in config \n and config.get('log_file',None) is None \n and config['verbose'] >= 2 ):\n config['output_dots'] = True\n\n # Set the number of threads\n num_threads = config.get('num_threads',0)\n logger.debug('From config dict, num_threads = %d',num_threads)\n if num_threads <= 0:\n import multiprocessing\n num_threads = multiprocessing.cpu_count()\n logger.debug('multiprocessing.cpu_count() = %d',num_threads)\n if num_threads > 1:\n logger.debug('Telling OpenMP to use %d threads',num_threads)\n num_threads = treecorr.set_omp_threads(num_threads)\n logger.debug('OpenMP reports that it will use %d threads',num_threads)\n if num_threads > 1:\n logger.info('Using %d threads.',num_threads)\n elif 'num_threads' in config:\n # Only warn if the user specifically asked for num_threads != 1.\n logger.warn('Unable to use multiple threads, since OpenMP is not enabled.')\n\n # Read in the input files. Each of these is a list.\n cat1 = treecorr.read_catalogs(config, 'file_name', 'file_list', 0, logger)\n if len(cat1) == 0:\n raise AttributeError(\"Either file_name or file_list is required\")\n cat2 = treecorr.read_catalogs(config, 'file_name2', 'rand_file_list2', 1, logger)\n rand1 = treecorr.read_catalogs(config, 'rand_file_name', 'rand_file_list', 0, logger)\n rand2 = treecorr.read_catalogs(config, 'rand_file_name2', 'rand_file_list2', 1, logger)\n if len(cat2) == 0 and len(rand2) > 0:\n raise AttributeError(\"rand_file_name2 is invalid without file_name2\")\n logger.info(\"Done reading input catalogs\")\n\n # Do GG correlation function if necessary\n if 'gg_file_name' in config or 'm2_file_name' in config:\n logger.info(\"Start GG calculations...\")\n gg = treecorr.GGCorrelation(config,logger)\n gg.process(cat1,cat2)\n logger.info(\"Done GG calculations.\")\n if 'gg_file_name' in config:\n gg.write(config['gg_file_name'])\n if 'm2_file_name' in config:\n gg.writeMapSq(config['m2_file_name'])\n\n # Do NG correlation function if necessary\n if 'ng_file_name' in config or 'nm_file_name' in config or 'norm_file_name' in config:\n if len(cat2) == 0:\n raise AttributeError(\"file_name2 is required for ng correlation\")\n logger.info(\"Start NG calculations...\")\n ng = treecorr.NGCorrelation(config,logger)\n ng.process(cat1,cat2)\n logger.info(\"Done NG calculation.\")\n\n # The default ng_statistic is compensated _iff_ rand files are given.\n rg = None\n if len(rand1) == 0:\n if config.get('ng_statistic',None) == 'compensated':\n raise AttributeError(\"rand_files is required for ng_statistic = compensated\")\n elif config.get('ng_statistic','compensated') == 'compensated':\n rg = treecorr.NGCorrelation(config,logger)\n rg.process(rand1,cat2)\n logger.info(\"Done RG calculation.\")\n\n if 'ng_file_name' in config:\n ng.write(config['ng_file_name'], rg)\n if 'nm_file_name' in config:\n ng.writeNMap(config['nm_file_name'], rg)\n\n if 'norm_file_name' in config:\n gg = treecorr.GGCorrelation(config,logger)\n gg.process(cat2)\n logger.info(\"Done GG calculation for norm\")\n dd = treecorr.NNCorrelation(config,logger)\n dd.process(cat1)\n logger.info(\"Done DD calculation for norm\")\n rr = treecorr.NNCorrelation(config,logger)\n rr.process(rand1)\n logger.info(\"Done RR calculation for norm\")\n dr = None\n if config['nn_statistic'] == 'compensated':\n dr = treecorr.NNCorrelation(config,logger)\n dr.process(cat1,rand1)\n logger.info(\"Done DR calculation for norm\")\n ng.writeNorm(config['norm_file_name'],gg,dd,rr,dr,rg)\n\n # Do NN correlation function if necessary\n if 'nn_file_name' in config:\n if len(rand1) == 0:\n raise AttributeError(\"rand_file_name is required for NN correlation\")\n if len(cat2) > 0 and len(rand2) == 0:\n raise AttributeError(\"rand_file_name2 is required for NN cross-correlation\")\n logger.info(\"Start NN calculations...\")\n dd = treecorr.NNCorrelation(config,logger)\n dd.process(cat1,cat2)\n logger.info(\"Done NN calculations.\")\n\n dr = None\n rd = None\n if len(cat2) == 0:\n rr = treecorr.NNCorrelation(config,logger)\n rr.process(rand1)\n logger.info(\"Done RR calculations.\")\n\n if config['nn_statistic'] == 'compensated':\n dr = treecorr.NNCorrelation(config,logger)\n dr.process(cat1,rand1)\n logger.info(\"Done DR calculations.\")\n else:\n rr = treecorr.NNCorrelation(config,logger)\n rr.process(rand1,rand2)\n logger.info(\"Done RR calculations.\")\n\n if config['nn_statistic'] == 'compensated':\n dr = treecorr.NNCorrelation(config,logger)\n dr.process(cat1,rand2)\n logger.info(\"Done DR calculations.\")\n rd = treecorr.NNCorrelation(config,logger)\n rd.process(rand1,cat2)\n logger.info(\"Done RD calculations.\")\n dd.write(config['nn_file_name'],rr,dr,rd)\n\n # Do KK correlation function if necessary\n if 'kk_file_name' in config:\n logger.info(\"Start KK calculations...\")\n kk = treecorr.KKCorrelation(config,logger)\n kk.process(cat1,cat2)\n logger.info(\"Done KK calculations.\")\n kk.write(config['kk_file_name'])\n\n # Do NG correlation function if necessary\n if 'nk_file_name' in config:\n if len(cat2) == 0:\n raise AttributeError(\"file_name2 is required for nk correlation\")\n logger.info(\"Start NK calculations...\")\n nk = treecorr.NKCorrelation(config,logger)\n nk.process(cat1,cat2)\n logger.info(\"Done NK calculation.\")\n\n rk = None\n if len(rand1) == 0:\n if config.get('nk_statistic',None) == 'compensated':\n raise AttributeError(\"rand_files is required for nk_statistic = compensated\")\n elif config.get('nk_statistic','compensated') == 'compensated':\n rk = treecorr.NKCorrelation(config,logger)\n rk.process(rand1,cat2)\n logger.info(\"Done RK calculation.\")\n\n nk.write(config['nk_file_name'], rk)\n\n # Do KG correlation function if necessary\n if 'kg_file_name' in config:\n if len(cat2) == 0:\n raise AttributeError(\"file_name2 is required for kg correlation\")\n logger.info(\"Start KG calculations...\")\n kg = treecorr.KGCorrelation(config,logger)\n kg.process(cat1,cat2)\n logger.info(\"Done KG calculation.\")\n kg.write(config['kg_file_name'])",
"def optimize_correlation(self,dataloader,num_epochs=10, optimizer=None):\n print(\" Start Correlation optimizing...\")\n if optimizer is None:\n optimizer = optim.SGD(self.parameters(),lr=0.001,momentum=0.9)\n loss_sub_log = []\n for epoch in range(num_epochs):\n current_loss = float(0)\n # batch_num = 0\n for batch_idx, batch_data in enumerate(dataloader,start=0):\n data, labels = batch_data\n optimizer.zero_grad()\n forward_correlation_result = F.softmax(self.forward(data),dim=1)\n # labels_extended = labels.expand(forward_correlation_result.shape)\n labels_extended = torch.zeros(forward_correlation_result.shape)\n for idx, label in enumerate(labels):\n labels_extended[idx][label] = 1\n\n error = forward_correlation_result-labels_extended\n # print(forward_correlation_result[0])\n # print(labels_extended[0])\n # print(labels[0])\n loss = -correlation_loss(self.latest_hidden_out,error)\n loss.backward()\n optimizer.step()\n current_loss = loss.item()\n # batch_num+=1\n loss_sub_log.append(current_loss)\n print(f\" sub epoch {epoch} correlation loss: {-current_loss}\")\n\n return optimizer",
"def test_2():\n \n # Functions wrapped by agents\n def f(in_streams, out_streams):\n multiply_and_add(in_streams[0], out_streams[0],\n multiplicand=2, addend=1)\n\n def g(in_streams, out_streams):\n filter_then_square(in_streams[0], out_streams[0],\n filter_threshold=20)\n\n def h(in_streams, out_streams):\n s = Stream('s')\n sum_window(in_streams[0], s, window_size=3, step_size=3)\n print_stream(s, name=s.name)\n \n\n # Specify processes and connections.\n processes = \\\n {\n 'source_process':\n {'in_stream_names_types': [('in', 'i')],\n 'out_stream_names_types': [('out', 'i')],\n 'compute_func': f,\n 'sources':\n {'acceleration':\n {'type': 'i',\n 'func': source_thread_target\n },\n },\n 'actuators': {}\n },\n 'filter_and_square_process':\n {'in_stream_names_types': [('in', 'i')],\n 'out_stream_names_types': [('filtered', 'i')],\n 'compute_func': g,\n 'sources': {},\n 'actuators': {}\n },\n 'aggregate_and_output_process':\n {'in_stream_names_types': [('in', 'i')],\n 'out_stream_names_types': [],\n 'compute_func': h,\n 'sources': {},\n 'actuators': {}\n }\n }\n \n connections = \\\n {\n 'source_process' :\n {\n 'out' : [('filter_and_square_process', 'in')],\n 'acceleration' : [('source_process', 'in')]\n },\n 'filter_and_square_process' :\n {\n 'filtered' : [('aggregate_and_output_process', 'in')],\n },\n 'aggregate_and_output_process':\n {}\n }\n\n multicore(processes, connections)",
"def compute_autocorrelation_and_timescale(rootdir, folder_prefix, cluster_path, te): #{{{\n\n print 'compute_autocorrelation_and_timescale'\n\n ####################################################################################################\n # set up paths and clusters\n ####################################################################################################\n\n rlzn_path_list = get_realization_paths(rootdir, folder_prefix)\n\n fopen_list = open_netcdf_files(rlzn_path_list,'output*nc')\n\n indicesToParticle, indicesOnCluster, maxIndices = get_clusters(cluster_path)\n\n # just eddy part\n rhouu, rhovv, up2, vp2, lonp, latp, lon, lat, hull = compute_autocorrelation_rlzn_ensemble(fopen_list, te)\n \n np.save('rhouu'+str(te),rhouu)\n np.save('rhovv'+str(te),rhovv)\n np.save('up'+str(te),np.sqrt(up2))\n np.save('vp'+str(te),np.sqrt(vp2))\n np.save('lonp'+str(te),lonp)\n np.save('latp'+str(te),latp)\n np.save('lon'+str(te),lon)\n np.save('lat'+str(te),lat)\n np.save('hullsimplicies'+str(te),hull.simplices)\n \n rhouu = compute_cluster_ensemble(rhouu, indicesOnCluster, maxIndices, indicesToParticle)\n rhovv = compute_cluster_ensemble(rhovv, indicesOnCluster, maxIndices, indicesToParticle)\n up2 = compute_cluster_ensemble(up2, indicesOnCluster, maxIndices, indicesToParticle)\n vp2 = compute_cluster_ensemble(vp2, indicesOnCluster, maxIndices, indicesToParticle)\n lonp = compute_cluster_ensemble(lonp, indicesOnCluster, maxIndices, indicesToParticle)\n latp = compute_cluster_ensemble(latp, indicesOnCluster, maxIndices, indicesToParticle)\n\n np.save('rhouu_cluster'+str(te),rhouu)\n np.save('rhovv_cluster'+str(te),rhovv)\n np.save('up_cluster'+str(te),np.sqrt(up2))\n np.save('vp_cluster'+str(te),np.sqrt(vp2))\n np.save('lonp_cluster'+str(te),lonp)\n np.save('latp_cluster'+str(te),latp)\n\n close_netcdf_files(fopen_list)\n\n print 'compute_autocorrelation_and_timescale done'\n return rhouu, rhovv, np.sqrt(up2), np.sqrt(up2), lonp, latp, lon, lat, hull.simplices #}}}",
"def plscorr_eval(train_fmri_ts, train_feat_ts, val_fmri_ts, val_feat_ts,\n out_dir, mask_file):\n train_feat_ts = train_feat_ts.reshape(-1, train_feat_ts.shape[3]).T\n val_feat_ts = val_feat_ts.reshape(-1, val_feat_ts.shape[3]).T\n train_fmri_ts = train_fmri_ts.T\n val_fmri_ts = val_fmri_ts.T\n\n # Iteration loop for different component number\n #for n in range(5, 19):\n # print '--- Components number %s ---' %(n)\n # plsca = PLSCanonical(n_components=n)\n # plsca.fit(train_feat_ts, train_fmri_ts)\n # pred_feat_c, pred_fmri_c = plsca.transform(val_feat_ts, val_fmri_ts)\n # pred_fmri_ts = plsca.predict(val_feat_ts) \n # # calculate correlation coefficient between truth and prediction\n # r = corr2_coef(val_fmri_ts.T, pred_fmri_ts.T, mode='pair')\n # # get top 20% corrcoef for model evaluation\n # vsample = int(np.rint(0.2*len(r)))\n # print 'Sample size for evaluation : %s' % (vsample)\n # r.sort()\n # meanr = np.mean(r[-1*vsample:])\n # print 'Mean prediction corrcoef : %s' %(meanr)\n \n # model generation based on optimized CC number\n cc_num = 10\n plsca = PLSCanonical(n_components=cc_num)\n plsca.fit(train_feat_ts, train_fmri_ts)\n from sklearn.externals import joblib\n joblib.dump(plsca, os.path.join(out_dir, 'plsca_model.pkl'))\n plsca = joblib.load(os.path.join(out_dir, 'plsca_model.pkl'))\n\n # calculate correlation coefficient between truth and prediction\n pred_fmri_ts = plsca.predict(val_feat_ts)\n fmri_pred_r = corr2_coef(val_fmri_ts.T, pred_fmri_ts.T, mode='pair')\n mask = vutil.data_swap(mask_file)\n vxl_idx = np.nonzero(mask.flatten()==1)[0]\n tmp = np.zeros_like(mask.flatten(), dtype=np.float64)\n tmp[vxl_idx] = fmri_pred_r\n tmp = tmp.reshape(mask.shape)\n vutil.save2nifti(tmp, os.path.join(out_dir, 'pred_fmri_r.nii.gz'))\n pred_feat_ts = pls_y_pred_x(plsca, val_fmri_ts)\n pred_feat_ts = pred_feat_ts.T.reshape(96, 14, 14, 540)\n np.save(os.path.join(out_dir, 'pred_feat.npy'), pred_feat_ts)\n\n # get PLS-CCA weights\n feat_cc, fmri_cc = plsca.transform(train_feat_ts, train_fmri_ts)\n np.save(os.path.join(out_dir, 'feat_cc.npy'), feat_cc)\n np.save(os.path.join(out_dir, 'fmri_cc.npy'), fmri_cc)\n feat_weight = plsca.x_weights_.reshape(96, 14, 14, cc_num)\n #feat_weight = plsca.x_weights_.reshape(96, 11, 11, cc_num)\n fmri_weight = plsca.y_weights_\n np.save(os.path.join(out_dir, 'feat_weights.npy'), feat_weight)\n np.save(os.path.join(out_dir, 'fmri_weights.npy'), fmri_weight)\n fmri_orig_ccs = get_pls_components(plsca.y_scores_, plsca.y_loadings_)\n np.save(os.path.join(out_dir, 'fmri_orig_ccs.npy'), fmri_orig_ccs)"
] | [
"0.6919708",
"0.6784384",
"0.6579483",
"0.61940783",
"0.6094425",
"0.586848",
"0.58443725",
"0.5801506",
"0.5733648",
"0.5731114",
"0.5730464",
"0.57225734",
"0.5692849",
"0.5674661",
"0.5672113",
"0.5648199",
"0.5641616",
"0.564092",
"0.5613428",
"0.5611891",
"0.55905354",
"0.5578036",
"0.55563205",
"0.55499",
"0.55285853",
"0.55034155",
"0.54760134",
"0.54377705",
"0.5429239",
"0.54246014"
] | 0.7026125 | 0 |
sum to borda count with a contigous array added to borda count | def sum_array_ranking_to_borda_count(borda_count, corr_array):
num_elem = borda_count.size
# either assign (no duplicate case) or enumerate the correlation array
if num_elem == (np.unique(corr_array)).size:
borda_count[np.argsort(corr_array)] += np.int_(sorted(np.arange(0, corr_array.size) + 1))
return borda_count
# enumerate the borda vote
borda_add = np.zeros(num_elem)
enum_value = 1
sort_order = np.argsort(corr_array)
current_value = corr_array[sort_order[0]]
for k in range(0, num_elem):
if corr_array[sort_order[k]] != current_value:
enum_value += 1
current_value = corr_array[sort_order[k]]
borda_add[sort_order[k]] = enum_value
# scale to the number of elements in the array -- philosopical choice here --
borda_add = borda_add + (num_elem - enum_value)
return borda_count + borda_add | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def counts_scan_binned_add(counts,val_addr,val_timestamps,pixels,dwell_time,bin_time,x,y):\n\n counts1 = np.zeros(1,(np.ceil(dwell_time/bin_time),x,y,23))\n for i in range(x):\n for j in range(y):\n delta= 0\n while val_timestamps[pixels[i,j]+delta]- val_timestamps[pixels[i,j]]<dwell_time:\n bin_number = np.floor(val_timestamps[pixels[i,j]+delta]- val_timestamps[pixels[i,j]])\n counts1[0,bin_number,i,j,val_addr[pixels[i,j]+delta]] += 1\n delta += 1\n counts = np.concatenate((counts,counts),axis = 0)\n return(counts)",
"def sum_reduce_nb(col, a, *args):\n return np.nansum(a)",
"def count():",
"def add(self, key):\n\t\t#super(CountingBloomFilter, self).add(key)\n\t\t#super(CountingBloomFilter, self).generateStats()\n\t\tfor i in self.getBitArrayIndices(key):\n\t\t\tself.ba[i] += 1\n\t\tself.n += 1",
"def sum1d_pos(summand):\n total = 0\n for i in range(summand.size):\n total += summand[i]\n return total",
"def group_count(counts, comp_ids):\n # binning\n for i in range(comp_ids.size):\n val = comp_ids[i]\n counts[val] += 1\n # inclusive scan\n total = 0\n for i in range(counts.size):\n ct = counts[i]\n counts[i] = ct + total\n total += ct",
"def gal_count(clusters):\n sum = 0\n for x in clusters:\n sum += x.ngal\n return sum",
"def test_expand_counts(self):\n c = array([2,0,1,2])\n self.assertEqual(expand_counts(c), array([0,0,2,3,3]))",
"def countArrary(input_a):\n if len(input_a) == 1:\n return 0\n else:\n # split the input array\n split_a = [input_a]\n while len(split_a) != len(input_a):\n new_split_a = []\n for sub_a in split_a:\n if len(sub_a) > 1:\n b, c = split_array(sub_a)\n new_split_a.append(b)\n new_split_a.append(c)\n else:\n new_split_a.append(sub_a)\n split_a = deepcopy(new_split_a)\n\n # merge and count\n merge_a = deque(split_a)\n count = 0\n while len(merge_a[0]) < len(input_a):\n new_merge_a = []\n while merge_a:\n a = merge_a.popleft()\n if merge_a:\n b = merge_a.popleft()\n c, c_inv = merge_and_count(a, b)\n count += c_inv\n new_merge_a.append(c)\n else:\n new_merge_a.append(a)\n\n merge_a = deque(deepcopy(new_merge_a))\n\n # print(merge_a)\n return count",
"def sum_elements(arr):\n return sum(arr)",
"def calculate(self, b):\n self.n_steps = self.n_steps + 1\n self.length = b.length\n self.natoms = b.natoms\n for i in range(0,self.natoms-1):\n for j in range(i+1,self.natoms):\n rij = (b.atoms[i].xyz - b.atoms[j].xyz)\n rij = rij - self.pbc_correction(rij)\n mag_rij = la.norm(rij)\n bin_no = int(round(mag_rij/self.dr))\n if bin_no <= self.n_max:\n self.gr[bin_no] = self.gr[bin_no] + 1",
"def _gu_sum(a, **kwds):\n return np.sum(np.ascontiguousarray(a), axis=-1, **kwds)",
"def test_counts(self):\n c = array([5,0,1,1,5,5])\n obs = counts(c)\n exp = array([1,2,0,0,0,3])\n self.assertEqual(obs, exp)\n d = array([2,2,1,0])\n obs = counts(d, obs)\n exp = array([2,3,2,0,0,3])\n self.assertEqual(obs, exp)",
"def amine(listAmine, count):\n \n for type in listAmine.keys():\n for nitrogen in listAmine[type]:\n nbNeighbor = numberNeigthbor(nitrogen[\"neighbors\"])\n for neighbor in nitrogen[\"neighbors\"]:\n if not nbNeighbor in count[type].keys():\n count[type][nbNeighbor] = structure.countElements()\n if not nbNeighbor in count[\"GlobalAmine\"].keys():\n count[\"GlobalAmine\"][nbNeighbor] = structure.countElements()\n\n\n if neighbor[\"element\"] in count[type][nbNeighbor].keys():\n count[type][nbNeighbor][neighbor[\"element\"]] = count[type][nbNeighbor][neighbor[\"element\"]] + 1\n count[\"GlobalAmine\"][nbNeighbor][neighbor[\"element\"]] = count[\"GlobalAmine\"][nbNeighbor][neighbor[\"element\"]] + 1\n\n else:\n count[type][nbNeighbor][\"others\"] = count[type][nbNeighbor][\"others\"] + 1\n count[\"GlobalAmine\"][nbNeighbor][\"others\"] = count[\"GlobalAmine\"][nbNeighbor][\"others\"] + 1",
"def part_two(rucksacks: list) -> int:\n summ = 0\n for i in range(0, len(rucksacks), 3):\n first_group = set(rucksacks[i])\n second_group = set(rucksacks[i + 1])\n third_group = set(rucksacks[i + 2])\n badge = first_group.intersection(second_group).intersection(third_group)\n badge = list(badge)[0] # extract item id from set\n summ += PRIORITY.get(badge, 0)\n return summ",
"def n_suma(a1,nr_wyrazu,r):\n return (2*a1+(nr_wyrazu-1))*nr_wyrazu/2",
"def totalhashes(self):\n return np.sum(self.counts)",
"def test_sum_counts_by_consensus(self):\r\n #otu_table = parse_otu_table(self.otu_table)\r\n #otu_table = parse_biom_table(self.otu_table)\r\n obs_result, obs_mapping = sum_counts_by_consensus(self.otu_table, 3)\r\n exp_result = {(\r\n 'Root', 'Bacteria', 'Actinobacteria'): array([1, 0, 2, 4]),\r\n ('Root', 'Bacteria', 'Firmicutes'): array([1, 3, 1, 1]),\r\n ('Root', 'Bacteria', 'Other'): array([1, 2, 1, 0])}\r\n exp_mapping = {'s1': 0, 's2': 1, 's3': 2, 's4': 3}\r\n self.assertItemsEqual(obs_result, exp_result)\r\n self.assertEqual(obs_mapping, exp_mapping)\r\n\r\n obs_result, obs_mapping = sum_counts_by_consensus(self.otu_table, 2)\r\n exp_result = {('Root', 'Bacteria'): array([3, 5, 4, 5])}\r\n exp_mapping = {'s1': 0, 's2': 1, 's3': 2, 's4': 3}\r\n self.assertItemsEqual(obs_result, exp_result)\r\n self.assertEqual(obs_mapping, exp_mapping)\r\n\r\n obs_result, obs_mapping = sum_counts_by_consensus(self.otu_table, 4)\r\n exp_result = {('Root', 'Bacteria', 'Actinobacteria', 'Actinobacteria'):\r\n array([1, 0, 2, 4]),\r\n ('Root', 'Bacteria', 'Firmicutes', '\"Clostridia\"'):\r\n array([1, 3, 1, 1]),\r\n ('Root', 'Bacteria', 'Other', 'Other'): array([1, 2, 1, 0])}\r\n exp_mapping = {'s1': 0, 's2': 1, 's3': 2, 's4': 3}\r\n self.assertItemsEqual(obs_result, exp_result)\r\n self.assertEqual(obs_mapping, exp_mapping)",
"def sum_numba(A):\n N = A.shape\n B = np.zeros((N[0], N[2]))\n for i in range(N[0]):\n for j in range(N[2]):\n for k in range(N[1]):\n B[i, j] += A[i, k, j]\n return B",
"def total2d(arr: List[List[int]]) -> int: # _8 [✅]\n # ** try to solve this in one line using a list comprehension\n return sum( [sum(sub_arr) for sub_arr in arr ] )",
"def SumaryGastos(vj):\n\n vj.GastosCUC = 0.0\n for row in vj.tbGastos.rows.values():\n vj.GastosCUC += row.valCuc\n\n UpdateRecupIdx(vj)",
"def total(h):\r\n\treturn sum(i.points() for i in h)",
"def countTriplets(arr, r):\n c_2, c_3 = Counter(), Counter()\n n_triplets = 0\n for e in arr:\n # print(f'arr: {arr}, e: {e}, c_3: {c_3}, c_2: {c_2}, n_triplets: {n_triplets}')\n if e in c_3:\n n_triplets += c_3[e]\n if e in c_2:\n c_3[e*r] += c_2[e]\n c_2[e*r] += 1\n return n_triplets",
"def countTriplets1(arr, r):\n from collections import Counter\n arr_dict = Counter()\n ratio_range = []\n triplets = 0\n\n # Build the counter\n for x in arr:\n arr_dict[x] += 1\n\n # Build a list for easier iteration\n for key, value in arr_dict.items():\n ratio_range.append(tuple([key,value]))\n ratio_range.sort()\n \n for y in range(len(ratio_range)-2):\n firstvalue = ratio_range[y][1]\n secondvalue = ratio_range[y+1][1]\n thirdvalue = ratio_range[y+2][1]\n print(ratio_range, firstvalue, secondvalue,thirdvalue)\n\n summedvalue = (firstvalue + secondvalue + thirdvalue) - 3\n triplet_count = 2**summedvalue\n print(summedvalue, triplet_count)\n triplets += triplet_count\n\n return triplets, arr_dict, ratio_range",
"def acumsum (a,dimension=None):\r\n if dimension == None:\r\n a = N.ravel(a)\r\n dimension = 0\r\n if type(dimension) in [ListType, TupleType, N.ndarray]:\r\n dimension = list(dimension)\r\n dimension.sort()\r\n dimension.reverse()\r\n for d in dimension:\r\n a = N.add.accumulate(a,d)\r\n return a\r\n else:\r\n return N.add.accumulate(a,dimension)",
"def sum_array(arr):\n sum = 0\n for num in arr:\n sum += num\n return sum",
"def fast_hist(a, b, n):\n # print(n)\n # print(b.max())\n k = (a >= 0) & (a < n)\n\n\n # a = np.floor(a)\n # a = a.astype(np.int)\n # print(a.max())\n # print(a.dtype)\n # print(a.shape)\n # print(type(a))\n\n return np.bincount((n * a[k].astype(int) + b[k]).astype(int), minlength=n ** 2).reshape(n, n)",
"def __init__(self, nums):\n self.sums,tmp =[],0\n for n in nums:\n tmp +=n\n self.sums.append(tmp)",
"def test01(self):\n a = np.arange(1e5)\n sa = a.sum(dtype='i8')\n ac = bcolz.carray(a)\n sac = ac.sum(dtype='i8')\n # print \"numpy sum-->\", sa\n # print \"carray sum-->\", sac\n self.assertTrue(sa.dtype == sac.dtype,\n \"sum() is not working correctly.\")\n self.assertTrue(sa == sac, \"sum() is not working correctly.\")",
"def total_baryon_number(particles: list[Particle]) -> int:\n return sum(particle.baryon_number for particle in particles)"
] | [
"0.6125926",
"0.6062502",
"0.6015958",
"0.59297216",
"0.5901682",
"0.5888271",
"0.58856165",
"0.58840656",
"0.5867621",
"0.5845964",
"0.58243126",
"0.58193743",
"0.57942975",
"0.57864875",
"0.57764876",
"0.576572",
"0.5749702",
"0.56927705",
"0.56662244",
"0.565349",
"0.56369776",
"0.5633893",
"0.5625445",
"0.56249726",
"0.56077963",
"0.560529",
"0.560488",
"0.56014025",
"0.55941814",
"0.5587706"
] | 0.68930274 | 0 |
percent_sample x percent_sample random sample, from spreadsheet_mat. | def sample_a_matrix_pearson(spreadsheet_mat, rows_fraction, cols_fraction):
features_size = int(np.round(spreadsheet_mat.shape[0] * (1 - rows_fraction)))
features_permutation = np.random.permutation(spreadsheet_mat.shape[0])
features_permutation = features_permutation[0:features_size].T
patients_size = int(np.round(spreadsheet_mat.shape[1] * cols_fraction))
sample_permutation = np.random.permutation(spreadsheet_mat.shape[1])
sample_permutation = sample_permutation[0:patients_size]
sample_random = spreadsheet_mat[:, sample_permutation]
sample_random[features_permutation[:, None], :] = 0
return sample_random, sample_permutation | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sample_percent(self, percentage):\n count = int(len(self.features) * (percentage / 100))\n indices = np.random.randint(0, high=len(self.features), size=count)\n return ProcessedImageData(self.features[indices], self.labels[indices], indices)",
"def sample(self, percentage):\n if not (0.0 < percentage < 1.0):\n raise ValueError(\"Parameter percentage has to be in (0.0, 1.0).\")\n\n cls = self.__class__\n value_count = int(len(self) * percentage)\n values = random.sample(self, value_count)\n\n sample = cls.from_twodim_list(values)\n rest_values = self._timeseriesData[:]\n\n for value in values:\n rest_values.remove(value)\n\n rest = cls.from_twodim_list(rest_values)\n\n return sample, rest",
"def sample_fitness(individual):\n\n return individual.dataframe.sample(frac=0.1, random_state=0).mean().mean()",
"def test_generate_sample_sheet(self):\n pass",
"def sample_from_mixture(x, pred_weights, pred_means, pred_std, amount):\n samples = np.zeros((amount, 2))\n n_mix = len(pred_weights[0])\n to_choose_from = np.arange(n_mix)\n for j, (weights, means, std_devs) in enumerate(\n zip(pred_weights, pred_means, pred_std)):\n index = np.random.choice(to_choose_from, p=weights)\n samples[j, 1] = np.random.normal(means[index], std_devs[index], size=1)\n samples[j, 0] = x[j]\n\n if j == amount - 1:\n break\n return samples",
"def __GenerateVariantsDistribution(self):\n np.random.seed(self.random_seed_parametr)\n try:\n Students = pd.read_excel(self.student_path)\n print('Load {}'.format(self.student_path))\n students_number = len(Students)\n\n self.__create_ALL_LR()\n Course_structure, variants_numbers = self.__generate_stracture()\n print('Generate stracture')\n Number_of_weaks = len(Course_structure)\n\n number_of_distribution = 0\n for WeakNumber in range(Number_of_weaks):\n for TaskNumber in range(Course_structure[WeakNumber]):\n Students['Week {0} Task {1}'.format(WeakNumber + 1, TaskNumber + 1)] = np.random.randint(\n variants_numbers[number_of_distribution], size=students_number)\n number_of_distribution += 1\n\n writer = pd.ExcelWriter(self.students_with_variants_path)\n print('Save {}'.format(self.students_with_variants_path))\n Students.to_excel(writer)\n writer.save()\n except:\n print('File with students doesnot exist')",
"def train_test_samples(df):\n\n from math import floor\n\n shuffled_df = df.reindex(np.random.permutation(df.index))\n\n seventy_five_percent = int(floor(len(shuffled_df) * 0.75))\n train_df = shuffled_df.iloc[:seventy_five_percent, ]\n test_df = shuffled_df.iloc[seventy_five_percent:, ]\n\n return train_df, test_df",
"def summarize_sample_props(psd_list, sample_list):\n prop_list = [psd.sample_props for psd in psd_list]\n cols = ['amplicon median', 'mean size', 'lower size', 'upper size']\n\n return pd.DataFrame(prop_list, columns=cols, index=sample_list)",
"def random_percentage(m):\n pages = []\n for i in range(m):\n p = 'https://en.wikipedia.org/wiki/Special:Random'\n pages.append(p)\n return find_percentage(pages)",
"def _get_random_sample(self):\n p=np.zeros(len(self.dim_ranges))\n for i in range(len(self.dim_ranges)):\n temp=np.linspace(self.dim_ranges[i][0],self.dim_ranges[i][1],1000)\n p[i]=np.random.choice(temp,1,True,None)\n\n return p",
"def _fract_whole_data(self) :\n if self._fract_data == -1 :\n pass\n else :\n rows = self._df.shape[0]\n fract_rows = int(rows*self._fract_data)\n self._df = self._df.sample(fract_rows).copy()",
"def downsample(data, percent):\n n_genes = data.shape[0]\n n_cells = data.shape[1]\n new_data = data.copy()\n total_count = float(data.sum())\n to_remove = total_count*percent\n # sum of read counts per cell\n cell_sums = data.sum(0).astype(float)\n # probability of selecting genes per cell\n cell_gene_probs = data/cell_sums\n # probability of selecting cells\n cell_probs = np.array(cell_sums/total_count).flatten()\n cells_selected = np.random.multinomial(to_remove, pvals=cell_probs)\n for i, num_selected in enumerate(cells_selected):\n cell_gene = np.array(cell_gene_probs[:,i]).flatten()\n genes_selected = np.random.multinomial(num_selected, pvals=cell_gene)\n if sparse.issparse(data):\n genes_selected = sparse.csc_matrix(genes_selected).T\n new_data[:,i] -= genes_selected\n new_data[new_data < 0] = 0\n return new_data",
"def gen_samp(self, N, M):\n Sm = np.empty((M, 2))\n count = 0\n for i in range(1, M+1, 1):\n Sm[count, 0] = (i-1)/M\n Sm[count, 1] = i/M\n count = count + 1\n \n Ns = int(N/M)\n \n uni_rand = np.empty((M, Ns))\n for i in range(0, M):\n uni_rand[i,:] = np.random.uniform(Sm[i,0], Sm[i,1], Ns)\n \n uni_rand = uni_rand.reshape((1, -1))\n \n sample = self.dist.ppf(uni_rand)\n \n return(sample)",
"def __call__(self, sample):\n img, landmarks = sample['image'], sample['landmarks']\n p = random.random()\n if p <= 1:\n h, w, c = img.shape\n for i in range(w//2):\n img[:, i, :], img[:, w-1-i, :] = img[:, w-1-i, :], img[:, i, :]\n for i in range(0, len(landmarks[0]), 2):\n x = landmarks[0][i]\n landmarks[0][i] = w-1-x\n return {'image': img,\n 'landmarks': landmarks}",
"def df_sample_concepts(self):\n return self.abundance_mat_mult(False)",
"def pwmScoresSample(pwm=\"\",sampleSize=1000):\n scoresList = []\n motifsize = len(pwm[pwm.keys()[0]])\n \n for _ in xrange(sampleSize):\n random_seq = randomSequence(motifsize)\n scoresList.append(pwm_score(random_seq, pwm))\n \n return scoresList",
"def probability(self, samples):\n pass",
"def IBP_sampler(mat):\n mat.val, mat.siblings[0].val = numba_mu.sample_2d_IBP(\n mat(),\n mat.siblings[0](),\n mat.layer.child().transpose(transpose_order),\n mat.layer.lbda(),\n mat.siblings[0].bernoulli_prior,\n mat.layer.alpha)",
"def sample(probs):\n\n probs = probs / probs.sum()\n return np.random.choice(np.arange(len(probs)), p=probs.flatten())",
"def random_50_percent(mean=10, sigma=None):\n if sigma is None:\n sigma = mean / 4.0\n for duration in gaussian(mean, sigma):\n yield int(abs(duration)), int(abs(duration))",
"def random_pmf(nb_labels):\n random_numbers = np.random.random(nb_labels)\n return random_numbers / np.sum(random_numbers)",
"def load_data(sample_frac):\n # load snhunters data set\n data = sio.loadmat('../../../../data/3pi_20x20_skew2_signPreserveNorm.mat')\n #x = np.concatenate((data['X'], data['testX']))\n #y = np.squeeze(np.concatenate((data['y'], data['testy'])))\n x = data['X']\n y = data['y']\n # split the data into training, validation and test sets similar to MNIST\n m = data['X'].shape[0]\n m = m - int(.25*m)\n split = int(sample_frac*m)\n print(m,split)\n x_train = data['X'][:split]\n y_train = np.squeeze(data['y'])[:split]\n x_valid = data['X'][split:]\n y_valid = np.squeeze(data['y'])[split:]\n x_test = data['testX']\n y_test = np.squeeze(data['testy'])\n\n return x, y, m, split, x_train, y_train, x_valid, y_valid, x_test, y_test",
"def sample_gaussian(self, probabilities):\n return tf.add(probabilities, tf.random.normal(probabilities.shape, mean=0.0, stddev=1.0))",
"def __call__(self, sample):\n image, landmarks = sample['image'], sample['landmarks']\n choices = ((0, 1, 2), (0, 2, 1), (1, 0, 2),\n (1, 2, 0), (2, 1, 0), (2, 0, 1))\n p = random.random()\n if p <= 0.5:\n idx = random.randint(0, 5)\n swap = choices[idx]\n image = image[:, :, swap]\n return {'image': image,\n 'landmarks': landmarks}",
"def trans_specprof(m):\n m = asmatrix(m)\n row_sums = sum(m, axis=1)\n result = m / row_sums\n return result",
"def cell_permutation(self):\n\n self.log.info(\"Begin Sample Permutation Analysis.\")\n\n # Initialize some variables.\n self.seg_analyzer.break_points(permutation=True)\n permutation_list = self.seg_analyzer.sample_names\n # cell_permutation_data_dict = defaultdict(lambda: defaultdict(list))\n odds_string = \"\"\n unique_targeted_odds_ratio_list = []\n total_targeted_odds_ratio_list = []\n total_targeted_del_odds_ratio_list = []\n total_targeted_ins_odds_ratio_list = []\n unique_targeted_ins_odds_ratio_list = []\n unique_targeted_del_odds_ratio_list = []\n\n # Run a loop for the iterations. Shuffle the list and make a copy for each loop.\n\n for i in range(int(self.args.Iteration_Count)):\n numpy.random.shuffle(permutation_list)\n shuffled_permutation_list = permutation_list\n sub_list = []\n count = 0\n\n if i % int(self.args.Prog_Check) == 0:\n self.log.info(\"Iteration {0} of {1} for Sample Permutation Analysis.\"\n .format(i, self.args.Iteration_Count))\n\n # Pybedtools keeps all temporary files until Python exits. This helps keep the disk clean.\n pybedtools.cleanup()\n\n # Create a list with two unique, random lists of indices.\n while count < 2:\n n = (numpy.random.choice(shuffled_permutation_list, int(self.args.Sample_Group_Size), replace=False))\n\n # Remove the first set from the list\n shuffled_permutation_list = list(set(shuffled_permutation_list).difference(n))\n sub_list.append(n)\n count += 1\n\n # Retrieve a namedtuple of the permuted samples\n d0 = self.seg_analyzer.target_intersection(sub_list[0])\n d1 = self.seg_analyzer.target_intersection(sub_list[1])\n\n # cell_permutation_data_dict[0]['del'].append([d0.total_del, d0.total_targeted_del_breakpoints,\n # d0.total_unique_del, d0.unique_targeted_del_breakpoints])\n # cell_permutation_data_dict[1]['del'].append([d1.total_del, d1.total_targeted_del_breakpoints,\n # d1.total_unique_del, d1.unique_targeted_del_breakpoints])\n # cell_permutation_data_dict[0]['ins'].append([d0.total_ins, d0.total_targeted_ins_breakpoints,\n # d0.total_unique_ins, d0.unique_targeted_ins_breakpoints])\n #\n # cell_permutation_data_dict[1]['ins'].append([d1.total_ins, d1.total_targeted_ins_breakpoints,\n # d1.total_unique_ins, d1.unique_targeted_ins_breakpoints])\n\n total_breakpoint0 = d0.total_del+d0.total_ins\n total_targeted0 = d0.total_targeted_del_breakpoints+d0.total_targeted_ins_breakpoints\n total_unique_breakpoint0 = d0.total_unique_del+d0.total_unique_ins\n total_unique_targeted0 = d0.unique_targeted_del_breakpoints+d0.unique_targeted_ins_breakpoints\n\n total_breakpoint1 = d1.total_del+d1.total_ins\n total_targeted1 = d1.total_targeted_del_breakpoints+d1.total_targeted_ins_breakpoints\n total_unique_breakpoint1 = d1.total_unique_del+d1.total_unique_ins\n total_unique_targeted1 = d1.unique_targeted_del_breakpoints+d1.unique_targeted_ins_breakpoints\n\n total_target_ratio0 = total_targeted0/total_breakpoint0\n total_target_ratio1 = total_targeted1/total_breakpoint1\n\n total_target_odds = total_target_ratio0/total_target_ratio1\n\n unique_target0 = total_unique_targeted0/total_unique_breakpoint0\n unique_target1 = total_unique_targeted1/total_unique_breakpoint1\n\n unique_target_odds = unique_target0/unique_target1\n\n try:\n del_target_odds = \\\n (d0.total_del/d0.total_targeted_del_breakpoints)/(d1.total_del/d1.total_targeted_del_breakpoints)\n except ZeroDivisionError:\n del_target_odds = 0\n try:\n udel_target_odds = \\\n (d0.unique_targeted_del_breakpoints / d0.total_unique_del) / (d1.unique_targeted_del_breakpoints /\n d1.total_unique_del)\n except ZeroDivisionError:\n udel_target_odds = 0\n try:\n ins_target_odds = \\\n (d0.total_targeted_ins_breakpoints/d0.total_ins)/(d1.total_targeted_ins_breakpoints/d1.total_ins)\n except ZeroDivisionError:\n ins_target_odds = 0\n try:\n uins_target_odds = \\\n (d0.unique_targeted_ins_breakpoints / d0.total_unique_ins) / (d1.unique_targeted_ins_breakpoints /\n d1.total_unique_ins)\n except ZeroDivisionError:\n uins_target_odds = 0\n\n total_targeted_odds_ratio_list.append(total_target_odds)\n unique_targeted_odds_ratio_list.append(unique_target_odds)\n total_targeted_del_odds_ratio_list.append(del_target_odds)\n total_targeted_ins_odds_ratio_list.append(ins_target_odds)\n unique_targeted_del_odds_ratio_list.append(udel_target_odds)\n unique_targeted_ins_odds_ratio_list.append(uins_target_odds)\n\n odds_string += \\\n \"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\" \\\n \"\\t{}\\t{}\\t{}\\t{}\\t{}\\n\"\\\n .format(total_target_odds, unique_target_odds, del_target_odds, udel_target_odds, ins_target_odds,\n uins_target_odds, total_breakpoint0, d0.total_del, d0.total_ins, total_targeted0,\n d0.total_targeted_del_breakpoints, d0.total_targeted_ins_breakpoints, total_unique_breakpoint0,\n d0.total_unique_del, d0.total_unique_ins, total_unique_targeted0,\n d0.unique_targeted_del_breakpoints, d0.unique_targeted_ins_breakpoints, total_breakpoint1,\n d1.total_del, d1.total_ins, total_targeted1, d1.total_targeted_del_breakpoints,\n d1.total_targeted_ins_breakpoints, total_unique_breakpoint1, d1.total_unique_del,\n d1.total_unique_ins, total_unique_targeted1, d1.unique_targeted_del_breakpoints,\n d1.unique_targeted_ins_breakpoints)\n\n odds_labels = \"Total Targeted\\tUnique Targeted\\tDel Targeted\\tUnique Del Targeted\\tIns Targeted\\t\" \\\n \"Unique Ins Targeted\\tSample_0 Total\\tSample_0 tDel\\tSample_0 tIns\\tSample_0 Targeted\\t\" \\\n \"Sample_0 tDel Targeted\\tSample_0 tIns Targeted\\tSample_0 Unique\\tSample_0 uDel\\tSample_0 uIns\\t\"\\\n \"Sample_0 uTargeted\\tSample_0 uDel Targeted\\tSample_0 uIns Targeted\\tSample_1 Total\\t\" \\\n \"Sample_1 tDel\\tSample_1 tIns\\tSample_1 Targeted\\tSample_1 tDel Targeted\\t\" \\\n \"Sample_1 tIns Targeted\\tSample_1 Unique\\tSample_1 uDel Targeted\\tSample_1 uIns Targeted\\n\"\n\n total_odds_mean = round(scipy.mean(total_targeted_odds_ratio_list), 2)\n del_odds_mean = round(scipy.mean(total_targeted_del_odds_ratio_list), 2)\n ins_odds_mean = round(scipy.mean(total_targeted_ins_odds_ratio_list), 2)\n\n unique_odds_mean = round(scipy.mean(unique_targeted_odds_ratio_list), 2)\n unique_del_odds_mean = round(scipy.mean(unique_targeted_del_odds_ratio_list), 2)\n unique_ins_odds_mean = round(scipy.mean(unique_targeted_ins_odds_ratio_list), 2)\n\n total975 = numpy.percentile(total_targeted_odds_ratio_list, 97.5, interpolation='linear')\n total25 = numpy.percentile(total_targeted_odds_ratio_list, 2.5, interpolation='linear')\n\n del975 = numpy.percentile(total_targeted_del_odds_ratio_list, 97.5, interpolation='linear')\n del25 = numpy.percentile(total_targeted_del_odds_ratio_list, 2.5, interpolation='linear')\n\n ins975 = numpy.percentile(total_targeted_ins_odds_ratio_list, 97.5, interpolation='linear')\n ins25 = numpy.percentile(total_targeted_ins_odds_ratio_list, 2.5, interpolation='linear')\n\n unique_total975 = numpy.percentile(unique_targeted_odds_ratio_list, 97.5, interpolation='linear')\n unique_total25 = numpy.percentile(unique_targeted_odds_ratio_list, 2.5, interpolation='linear')\n\n unique_del975 = numpy.percentile(unique_targeted_del_odds_ratio_list, 97.5, interpolation='linear')\n unique_del25 = numpy.percentile(unique_targeted_del_odds_ratio_list, 2.5, interpolation='linear')\n\n unique_ins975 = numpy.percentile(unique_targeted_ins_odds_ratio_list, 97.5, interpolation='linear')\n unique_ins25 = numpy.percentile(unique_targeted_ins_odds_ratio_list, 2.5, interpolation='linear')\n\n outstring = \"Permutation Analysis Module v{}; {} Type Permutations run {}\\n\" \\\n \"Target File:\\t{}\\nSegCopy File:\\t{}\\n\\n\" \\\n \"\\tTotalOddsMean\\tUniqueOddsMean\\tTotal 97.5\\tTotal 2.5\\tUnique 97.5\\tUnique 2.5\\n\" \\\n \"Total\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\nDel\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\nIns\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n\" \\\n \"\\n\\n{}\\n{}\" \\\n .format(__version__, self.args.Permutation_Type, date.today().strftime(\"%Y-%m-%d\"), self.args.Target_File,\n self.args.Segment_File, total_odds_mean, unique_odds_mean, total975, total25, unique_total975,\n unique_total25, del_odds_mean, unique_del_odds_mean, del975, del25, unique_del975, unique_del25,\n ins_odds_mean, unique_ins_odds_mean, ins975, ins25, unique_ins975, unique_ins25, odds_labels,\n odds_string)\n\n outfile = open(\"{0}{1}_odds_ratios.txt\".format(self.args.Working_Folder, self.args.Job_Name), 'w')\n outfile.write(outstring)\n outfile.close()\n self.log.info(\"Sample Permutation Complete\")\n\n return\n #\n # ratio_mean_list = []\n # ratio_std_list = []\n # ratio_list = []\n # odds_ratio_list = []\n # outstring = \"\"\n #\n # # Format data for output file.\n # for sub_group in natsort.natsorted(cell_permutation_data_dict):\n # for key, values in cell_permutation_data_dict[sub_group].items():\n # if key == \"bp\":\n # break_point_mean = int(round(scipy.mean(values)))\n # break_point_std = round(scipy.std(values), 2)\n # break_point_median = int(round(scipy.median(values)))\n # elif key == \"intsect\":\n # intersect_mean = int(round(scipy.mean(values)))\n # intersect_std = round(scipy.std(values), 2)\n # intersect_median = int(round(scipy.median(values)))\n # elif key == \"bp/intsect\":\n # ratio_mean = scipy.mean(values)\n # ratio_std = scipy.std(values)\n # ratio_list.append(values)\n #\n # ratio_mean_list.append(ratio_mean)\n # ratio_std_list.append(ratio_std)\n #\n # outstring += \"{0}\\t{1}\\t{2}\\t{3}\\t{4}\\t{5}\"\\\n # .format(break_point_mean, break_point_median, break_point_std, intersect_mean, intersect_median,\n # intersect_std)\n # outstring += \"\\t\"\n #\n # for l1, l2 in zip(ratio_list[0], ratio_list[1]):\n # odds_ratio_list.append(l1/l2)\n #\n # t = stats.t.interval(0.95, df=self.freq_calc_iterations-1, loc=scipy.mean(odds_ratio_list),\n # scale=scipy.std(odds_ratio_list) / numpy.sqrt(self.freq_calc_iterations))\n #\n # pval = stats.ttest_1samp(odds_ratio_list, 1)\n #\n # outstring += \"{0}\\t{1}\\t{2}\\t{3}\\t{4}\\n\"\\\n # .format(round(scipy.mean(odds_ratio_list), 2), round(scipy.std(odds_ratio_list), 2), round(t[0], 2),\n # round(t[1], 2), pval[1])\n #\n # for v in odds_ratio_list:\n # outstring += \"{0}\\n\".format(v)\n #\n # outfile.write(outstring)\n # outfile.close()\n #\n # print(\"Permutation Analysis of Samples Complete.\")\n #\n # return",
"def sample_row(row, columns):\n\tsampled_row = pd.Series( index = columns)\n\n\t# Sampleo cada feature segun la distribucion de la fila\n\tfor c in columns:\n\t\tc = c.rstrip('.mean')\n\t\tsampled_row[c + '.mean'] = random.normal(row[c + '.mean'], row[c + '.std'])\n\n\t# Agrego la columna clase\n\tsampled_row['class'] = row['class']\n\n\treturn sampled_row",
"def _sample(preds, temperature=1.0):\n preds = np.asarray(preds).astype('float64')\n preds = np.log(preds) / temperature\n exp_preds = np.exp(preds)\n preds = exp_preds / np.sum(exp_preds)\n probas = np.random.multinomial(1, preds, 1)\n return np.argmax(probas)",
"def sample_X(self, m, n):\n return np.random.permutation(m)[:n]",
"def sample(prediction):\n p = np.zeros(shape=[1, vocabulary_size], dtype=np.float)\n p[0, sample_distribution(prediction[0])] = 1.0\n return p"
] | [
"0.6161979",
"0.54894763",
"0.54406565",
"0.52323145",
"0.52166736",
"0.517619",
"0.51314974",
"0.51023704",
"0.50680196",
"0.5064273",
"0.5061932",
"0.5058372",
"0.5058371",
"0.5053124",
"0.5050355",
"0.50431114",
"0.5041083",
"0.5039392",
"0.5032886",
"0.50317216",
"0.5027512",
"0.5027043",
"0.5008644",
"0.49912277",
"0.49805728",
"0.49790588",
"0.4975038",
"0.49713835",
"0.49696046",
"0.49334672"
] | 0.6506959 | 0 |
zscore by rows for genes x samples dataframe | def zscore_dataframe(genes_by_sample_df):
zscore_df = (genes_by_sample_df.sub(genes_by_sample_df.mean(axis=1), axis=0)).truediv(
np.maximum(genes_by_sample_df.std(axis=1), 1e-12), axis=0)
return zscore_df | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def zscore(vals):",
"def compute_z_score(stats, columns, col_name):\n if stats[col_name]['data_type'] != DATA_TYPES.NUMERIC:\n return {}\n\n z_scores = list(map(abs,(st.zscore(columns[col_name]))))\n threshold = 3\n z_score_outlier_indexes = [i for i in range(len(z_scores)) if z_scores[i] > threshold]\n data = {\n 'z_score_outliers': z_score_outlier_indexes\n ,'mean_z_score': round(10 * (1 - np.mean(z_scores)))\n ,'z_test_based_outlier_score': round(10 * (1 - len(z_score_outlier_indexes)/len(columns[col_name])))\n ,'z_test_based_outlier_score_description':\"\"\"\n This score indicates the amount of data that are 3 STDs or more away from the mean. That is to say, the amount of data that we consider to be an outlir. A hgih z socre means your data contains a large amount of outliers.\n \"\"\"\n }\n return data",
"def make_zscores(relfreqs, means, stdevs):\n normalized = relfreqs.sub(means, axis=\"index\")\n save_dataframe(normalized, \"2-normalized.csv\")\n #print(normalized.head())\n zscores = normalized.div(stdevs, axis=\"index\")\n save_dataframe(zscores, \"3-zscores.csv\")\n #print(zscores.head())\n return zscores",
"def z_score(x: np.ndarray) -> np.ndarray:\n return (x - np.mean(x)) / np.std(x)",
"def get_zscore_data(self):\n self.update_filter_inds()\n return _z_score(self)",
"def z_score(raw_score):\n array = pd.Series(raw_score)\n\n mean = array.mean()\n sd = array.std(ddof=0)\n\n Z = (array-mean)/sd\n\n return(list(Z))",
"def zscore_by_group(X, labels, group):\n assert(X.shape[0] == len(labels))\n idx = np.where(labels == group)[0]\n X_group_mean = np.mean(X.loc[idx], axis=0)\n X_group_std = np.std(X.loc[idx], axis=0)\n return((X - X_group_mean) / X_group_std)",
"def Test():\n x=np.array([[4,-100],[1,50],[4,50]])\n x_norm=z_score(x)\n print(x_norm)\n return",
"def z_score_std(train, test):\n scalers = {}\n for i, sample in enumerate(train):\n scalers[i] = StandardScaler()\n train[i] = scalers[i].fit_transform(sample)\n\n for i, sample in enumerate(test):\n test[i] = scalers[i].transform(sample)\n\n return train, test",
"def modified_z_score(x: np.ndarray) -> np.ndarray:\n return 0.6745 * (x - np.median(x)) / median_absolute_deviation(x)",
"def z_score_transformation(data, numeric_list):\n\n transformed_data = data[numeric_list].apply(stats.zscore())\n\n return transformed_data",
"def z_score(self, x):\n\n mean = self.mean\n stddev = self.stddev\n\n z = (x - mean) / stddev\n\n return z",
"def z_score(num, mean, std_dev):\n\treturn (num - mean) / std_dev",
"def zakharovfcn(x: np.ndarray) -> np.ndarray:\n\n n = x.shape[1]\n comp1 = np.sum(x**2, axis=1)\n comp2 = np.sum(0.5 * np.arange(1, n + 1) * x, axis=1)\n\n scores = comp1 + comp2**2 + comp2**4\n\n return scores",
"def zscore(time_series, axis=-1):\r\n time_series = np.asarray(time_series)\r\n et = time_series.mean(axis=axis)\r\n st = time_series.std(axis=axis)\r\n sl = [slice(None)] * len(time_series.shape)\r\n sl[axis] = np.newaxis\r\n zt = time_series - et[sl]\r\n zt /= st[sl]\r\n return zt",
"def score_samples(self, X):\n ...",
"def normalize(row):\n study = row['study']\n val = row[key]\n group_mean = df.groupby('study').mean().loc[study,key]\n group_std = df.groupby('study').std().loc[study,key]\n zval = (val - group_mean) / group_std\n return zval",
"def lz (inlist, score):\r\n z = (score-mean(inlist))/samplestdev(inlist)\r\n return z",
"def z_score(self) -> float:\n return float((self.tsdf.pct_change().iloc[-1] - self.tsdf.pct_change().mean()) / self.tsdf.pct_change().std())",
"def combine_z_scores(df, stat):\n guide1 = df.columns[0]\n guide2 = df.columns[1]\n combo_stats = (df.groupby(['condition', 'gene_a', 'gene_b'])\n .agg(sum_stat=(stat, 'sum'))\n .reset_index())\n guide_pair_df = (df.groupby(['condition', 'gene_a', 'gene_b'])\n .apply(lambda d: len({frozenset(x) for x in zip(d[guide1], d[guide2])})) # use sets to count\n # # unique guide pairs\n .reset_index(name='guide_pairs'))\n combo_stats = combo_stats.merge(guide_pair_df, how='inner', on=['condition', 'gene_a', 'gene_b'])\n combo_stats['pair_z_score'] = (combo_stats['sum_stat'] / np.sqrt(combo_stats['guide_pairs']))\n return combo_stats[['condition', 'gene_a', 'gene_b', 'guide_pairs', 'pair_z_score']]",
"def identify_and_handel_outliers(self):\n col_list = [] # This will hold the column names created for the administration of the modified z-score test\n values_dropped = []\n cont_cols = self.df.select_dtypes(exclude=[\"category\"]).columns # Gets continous columns \n for col in cont_cols:\n#TODO: Add lines to check column len(), if len() == 0, drop drop column, create cont_cols and cat_cols, and drop from there as well. \n df_len = len(self.df)\n top_value = self.df[col].value_counts(normalize=True, ascending=False, dropna=True)\n top_value = top_value.head(1).reset_index().to_numpy()[0] #Gets the top occuring value along with its percentage of occurances\n if top_value[1] > 0.5:#Test if the top occuring value makes up more than 50% of the data\n remaining_col = self.df[col][~self.df[col].isin([top_value[0]])] #Gets all values not within the 50% of single value data\n self.df[f\"{col}_mod_z\"] = phase_one_data_prep.modified_zscore(remaining_col) #Gets modified z-score for remaining items\n self.df[f\"{col}_mod_z\"] = self.df[f\"{col}_mod_z\"].fillna(0) #Fills all missing z-scores\\\n #with zero(because that 50% of data removed would be zero anyways)\n self.df = self.df[self.df[f\"{col}_mod_z\"] < 3] #Removed all values outside 3\n col_list.append(f\"{col}_mod_z\")#Appends name of column to list\n values_dropped.append((col, df_len - len(self.df)))\n else:\n self.df[f\"{col}_mod_z\"] = phase_one_data_prep.modified_zscore(self.df[col]) #Gets modified z-score \n self.df[f\"{col}_mod_z\"] = self.df[f\"{col}_mod_z\"].fillna(0)\n self.df = self.df[self.df[f\"{col}_mod_z\"] < 3] #Removed all values outside 3\n col_list.append(f\"{col}_mod_z\")#Appends name of column to list\n values_dropped.append((col, df_len - len(self.df)))\n self.df.drop(columns = col_list, inplace=True)#Removed columns created to test modified z-score\n self.outliers_dropped = values_dropped",
"def compute_ave_score_w_sample(genes, samples):\n\n scores = np.zeros(len(genes), dtype=np.uint32)\n\n for i, v in enumerate(genes):\n for j in samples:\n score, _ = run_duel(v, j)\n scores[i] += score\n continue\n continue\n\n return scores / len(samples)",
"def z_score(self, x):\n return (x - self.n) / self.p",
"def modified_zscore(col):\n col = col.dropna()\n med_col = col.median()\n med_abs_dev = MAD(col)\n mod_z = 0.6745*((col- med_col)/med_abs_dev)\n return np.abs(mod_z)",
"def features_sparseness(dat,sort=0): \n \n lblst=dat.columns.tolist()\n ll=len(dat)\n res=pd.Series(index=lblst,name='sparseness')\n \n for lb in lblst:\n ct=dat[lb].value_counts()\n res[lb]= ct.iloc[0]/ll\n \n if sort==1:\n res.sort_values(ascending=True,inplace=True)\n elif sort==-1:\n res.sort_values(ascending=False,inplace=True)\n else:\n pass\n \n return res",
"def zscore_pupil(self, dtype = 'bp_filt_pupil'):\r\n\r\n exec('self.' + str(dtype) + '_zscore = (self.' + str(dtype) + ' - np.mean(self.' + str(dtype) + ')) / np.std(self.' + str(dtype) + ')')",
"def drop_outliers_z_score(df, z=3):\n n_initial_rows = df.shape[0]\n drop_list = set()\n\n print('-' * 25)\n print('OUTLIERS DELETION: Z-SCORE METHOD\\n')\n\n for el in df.columns.values:\n drop_list = drop_list | \\\n set(df[el][np.abs(df[el]-df[el].mean()) >=\n (z*df[el].std())].index.values)\n\n drop_list = list(set(drop_list))\n count = len(drop_list)\n df.drop(drop_list, inplace=True)\n\n print('N of deleted rows: {} | % of deleted rows: {}% | '\n 'z-score: {}'.format(count, round(100 * (count / n_initial_rows), 3),\n z))\n return df",
"def super_complexs_score(df):\r\n return df['volume'] * 2",
"def azmap (scores, compare, dimension=0):\r\n mns = amean(compare,dimension)\r\n sstd = asamplestdev(compare,0)\r\n return (scores - mns) / sstd",
"def weighted_zscore(df: pd.DataFrame, lookback: int) -> pd.Series:\n wmean = rolling_weighted_mean(df[\"close\"], df[\"volume\"], lookback)\n wstd = rolling_weighted_std(df[\"close\"], df[\"volume\"], lookback, wmean)\n return ((df[\"close\"] - wmean) / wstd).dropna()"
] | [
"0.66935366",
"0.6103009",
"0.6100811",
"0.5995226",
"0.5923462",
"0.5915038",
"0.5738564",
"0.5714042",
"0.5642529",
"0.56247556",
"0.5544849",
"0.55446357",
"0.5512652",
"0.5510265",
"0.5501829",
"0.5499937",
"0.548653",
"0.54845107",
"0.54658073",
"0.5465162",
"0.54583454",
"0.5447478",
"0.5415131",
"0.540311",
"0.53719324",
"0.5370687",
"0.5349243",
"0.53239644",
"0.53049225",
"0.5287323"
] | 0.8123862 | 0 |
Returns an object from a dot path. Path can either be a full path, in which case the `get_object` function will try to import the module and follow the path. Or it can be a path relative to the object passed in as the second argument. | def get_object(path='', obj=None):
if not path:
return obj
path = path.split('.')
if obj is None:
obj = importlib.import_module(path[0])
path = path[1:]
for item in path:
if isinstance(obj, types.ModuleType):
submodule = '{}.{}'.format(_package(obj), item)
try:
obj = importlib.import_module(submodule)
except Exception as import_error:
try:
obj = getattr(obj, item)
except:
# FIXME: I know I should probably merge the errors, but
# it's easier just to throw the import error since
# it's most probably the one user wants to see.
# Create a new LoadingError and throw a combination
# of the import error and attribute error.
raise import_error
else:
obj = getattr(obj, item)
return obj | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_object(path):\n\n try:\n dot = path.rindex('.')\n except ValueError:\n raise ValueError(\"Error loading object '%s': not a full path\" % path)\n\n module, name = path[:dot], path[dot + 1:]\n mod = import_module(module)\n\n try:\n obj = getattr(mod, name)\n except AttributeError:\n raise NameError(\"Module '%s' doesn't define any object named '%s'\" % (module, name))\n\n return obj",
"def load_object(path):\n try:\n dot = path.rindex('.')\n except ValueError:\n raise ValueError(\"Error loading object '%s': not a full path\" % path)\n\n module, name = path[:dot], path[dot+1:]\n mod = import_module(module)\n\n try:\n obj = getattr(mod, name)\n except AttributeError:\n raise NameError(\"Module '%s' doesn't define any object named '%s'\" % (module, name))\n\n return obj",
"def load_object(path):\n\n try:\n dot = path.rindex('.')\n except ValueError:\n raise ValueError, \"Error loading object '%s': not a full path\" % path\n\n module, name = path[:dot], path[dot+1:]\n try:\n mod = __import__(module, {}, {}, [''])\n except ImportError, e:\n raise ImportError, \"Error loading object '%s': %s\" % (path, e)\n\n try:\n obj = getattr(mod, name)\n except AttributeError:\n raise NameError, \"Module '%s' doesn't define any object named '%s'\" % (module, name)\n\n return obj",
"def import_object(path):\n\n try:\n dot = path.rindex('.')\n except ValueError:\n raise ValueError(\"Error loading object '%s': not a full path\" % path)\n\n module, name = path[:dot], path[dot+1:]\n mod = import_module(module)\n\n try:\n obj = getattr(mod, name)\n except AttributeError:\n raise NameError(\"Module '%s' doesn't define any object named '%s'\" % (module, name))\n\n return obj",
"def get_obj_from_path(path):\n (username, namespace, reponame, objtype, objid) = pagure.utils.parse_path(\n path\n )\n session = _get_session()\n repo = pagure.lib.query.get_authorized_project(\n session, reponame, user=username, namespace=namespace\n )\n\n if repo is None:\n raise PagureEvException(\"Project '%s' not found\" % reponame)\n\n # find the appropriate object getter function from OBJECTS\n try:\n getfunc = OBJECTS[objtype]\n except KeyError:\n raise PagureEvException(\"Invalid object provided: '%s'\" % objtype)\n\n return getfunc(repo, objid)",
"def import_object(dottedpath):\n splitted_path = dottedpath.split('.')\n module = '.'.join(splitted_path[:-1])\n obj = splitted_path[-1]\n \n module = import_module(module)\n \n return getattr(module, obj)",
"def _getattr_path(obj: Any, path: str) -> Any:\n if not path:\n return None\n\n for attr in path.split('.'):\n obj = getattr(obj, attr, None)\n return obj",
"def import_from_path(path_to_module, obj_name = None):\n module_name = path_to_module.replace(\"/\",\".\").strip(\".py\")\n module = import_module(module_name)\n if obj_name == None:\n return module\n obj = getattr(module, obj_name)\n return obj",
"def _get_object(self, path):\n if path == \"/\":\n return self.target\n\n parts = path[1:].split(\"/\")\n last = self.target\n for part in parts:\n if type(last) == dict:\n last = last[part]\n else:\n last = getattr(last, \"get_\" + part)()\n return last",
"def getObjByPath(self, path):\n return getObjByPath(self, path)",
"def load_object(imp_path):\n module_name, obj_name = imp_path.split(\".\", 1)\n module = __import__(module_name)\n obj = attrgetter(obj_name)(module)\n\n return obj",
"def get_object(self, path: str) -> Object:\n objects_found = [item for item in self._objects.values() if item.path == path]\n if len(objects_found) == 0:\n raise ClientError(\n \"ObjectNotFoundException\", f\"Object with id={path} not found\"\n )\n return objects_found[0]",
"def import_object(name):\n parts = name.split('.')\n obj = __import__('.'.join(parts[:-1]), None, None, [parts[-1]], 0)\n return getattr(obj, parts[-1])",
"def find_object_by_path(cls, object_path):\n # XXX: ideally this would be per-connection method.\n with cls._object_path_map_lock:\n return cls._object_path_to_object_map[object_path]",
"def load_object(path):\r\n with open(path,\"rb\") as f:\r\n object = pickle.load(f) \r\n return object",
"def getZopeObj(self, path):\n return self.getObjByPath(path)",
"def __get__(self, obj, objtype):\n raw_path = super(Path,self).__get__(obj,objtype)\n return self._resolve(raw_path)",
"def _locate(path: str) -> Any:\n if path == \"\":\n raise ImportError(\"Empty path\")\n from importlib import import_module\n from types import ModuleType\n\n parts = [part for part in path.split(\".\")]\n for part in parts:\n if not len(part):\n raise ValueError(\n f\"Error loading '{path}': invalid dotstring.\"\n + \"\\nRelative imports are not supported.\"\n )\n assert len(parts) > 0\n part0 = parts[0]\n try:\n obj = import_module(part0)\n except Exception as exc_import:\n raise ImportError(\n f\"Error loading '{path}':\\n{repr(exc_import)}\"\n + f\"\\nAre you sure that module '{part0}' is installed?\"\n ) from exc_import\n for m in range(1, len(parts)):\n part = parts[m]\n try:\n obj = getattr(obj, part)\n except AttributeError as exc_attr:\n parent_dotpath = \".\".join(parts[:m])\n if isinstance(obj, ModuleType):\n mod = \".\".join(parts[: m + 1])\n try:\n obj = import_module(mod)\n continue\n except ModuleNotFoundError as exc_import:\n raise ImportError(\n f\"Error loading '{path}':\\n{repr(exc_import)}\"\n + f\"\\nAre you sure that '{part}' is importable from module '{parent_dotpath}'?\"\n ) from exc_import\n except Exception as exc_import:\n raise ImportError(\n f\"Error loading '{path}':\\n{repr(exc_import)}\"\n ) from exc_import\n raise ImportError(\n f\"Error loading '{path}':\\n{repr(exc_attr)}\"\n + f\"\\nAre you sure that '{part}' is an attribute of '{parent_dotpath}'?\"\n ) from exc_attr\n return obj",
"def load_obj(path: str):\n with open(path, 'rb') as h:\n return pickle.load(h)",
"def resolve_object(object_string):\n (module_name, object_name) = object_string.rsplit(\".\", 1)\n \n try:\n module = import_module(module_name)\n except ImportError, exc:\n raise ValueError(\"Could not import module %s: %s\" % (module_name, exc))\n \n if not hasattr(module, object_name):\n raise ValueError(\"Module %s does not have object %s\" %\n (module_name, object_name))\n \n return getattr(module, object_name)",
"def load_obj(path):\n with open(path, 'rb') as f:\n return pickle.load(f)",
"def import_python_obj(path: str) -> RAW_CFG:\n mod_name, obj_name = path.rsplit('.', 1)\n try:\n mod = import_module(mod_name)\n obj = getattr(mod, obj_name)\n except (ImportError, ValueError, ModuleNotFoundError, AttributeError):\n raise ConfigException(f\"Could not import python object: {path}\")\n return cast(RAW_CFG, obj)",
"def open_pbobject(path, pb_class):\n assert path.endswith(\".json\"), 'File extension for {} needs to be json.'.format(path)\n if path.startswith('s3://'):\n return open_remote_pb_object(path, pb_class)\n assert os.path.exists(path), f'Path not found: {path}'\n with open(path, 'r', encoding='UTF-8') as json_file:\n pb_object = Parse(json_file.read(), pb_class())\n return pb_object",
"def resolve_path(self, path):\n if path:\n if path[0] == '/':\n #zope objects case\n try: return self.unrestrictedTraverse(path)\n except: pass\n else:\n #aliss (python) objects case\n try: return self.get_aliss_object(path)\n except: pass\n #case of no path\n pass",
"def import_object(name: str) -> Any:\n if name.count(\".\") == 0:\n return __import__(name)\n\n parts = name.split(\".\")\n obj = __import__(\".\".join(parts[:-1]), fromlist=[parts[-1]])\n try:\n return getattr(obj, parts[-1])\n except AttributeError:\n raise ImportError(\"No module named %s\" % parts[-1])",
"def getDmdObj(self, path):\n if path.startswith(\"/\"): path = path[1:]\n return self.getDmd().getObjByPath(path)",
"def load_from_path(path):\n module, attr = path.rsplit('.', 1)\n mod = importlib.import_module(module)\n return getattr(mod, attr)",
"def from_path(cls, path: str) -> Any:",
"def import_local(path_to_object):\n path, name = path_to_object.rsplit('.', 1)\n\n app = path.split('.')[0]\n\n if app not in settings.INSTALLED_APPS:\n raise AssertionError(\n \"Cannot import from outside installed apps\"\n )\n\n return getattr(importlib.import_module(path), name)",
"def load_dotted(name):\n components = name.split('.')\n path = [components.pop(0)]\n obj = __import__(path[0])\n while components:\n comp = components.pop(0)\n path.append(comp)\n try:\n obj = getattr(obj, comp)\n except AttributeError:\n __import__('.'.join(path))\n try:\n obj = getattr(obj, comp)\n except AttributeError:\n raise ImportError('.'.join(path))\n\n return obj"
] | [
"0.80234915",
"0.7997897",
"0.79645133",
"0.7956834",
"0.7715109",
"0.76952195",
"0.6850307",
"0.6848769",
"0.680444",
"0.6723199",
"0.66649795",
"0.66210204",
"0.6550689",
"0.6484591",
"0.6483146",
"0.6427085",
"0.641364",
"0.64006466",
"0.63594925",
"0.6324595",
"0.63090205",
"0.6230912",
"0.6181057",
"0.61792636",
"0.61532116",
"0.60831964",
"0.60710377",
"0.6063444",
"0.6047197",
"0.6047105"
] | 0.80496776 | 0 |
Load recursively all submodules of the modules and return all the subclasses of the provided class | def load_subclasses(klass, modules=None):
if modules:
if isinstance(modules, six.string_types):
modules = [modules]
loader = Loader()
loader.load(*modules)
return klass.__subclasses__() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parent_class_modules(cls):\n if not issubclass(cls, spack.package_base.PackageBase) or issubclass(\n spack.package_base.PackageBase, cls\n ):\n return []\n result = []\n module = sys.modules.get(cls.__module__)\n if module:\n result = [module]\n for c in cls.__bases__:\n result.extend(parent_class_modules(c))\n return result",
"def get_subclasses(module, clazz):\n for subclazz_name, subclazz in inspect.getmembers(module):\n if hasattr(subclazz, '__bases__') and clazz in subclazz.__bases__:\n yield (subclazz_name, subclazz)",
"def find_all_classes(module_path: Union[str, ModuleType], cls: type) -> List[type]:\n if isinstance(module_path, ModuleType):\n mod = module_path\n else:\n mod = importlib.import_module(module_path)\n\n cls_list = []\n\n def _append_cls(obj):\n # Leverage the closure trick to reuse code\n if isinstance(obj, type) and issubclass(obj, cls) and cls not in cls_list:\n cls_list.append(obj)\n\n for attr in dir(mod):\n _append_cls(getattr(mod, attr))\n\n if hasattr(mod, \"__path__\"):\n # if the model is a package\n for _, modname, _ in pkgutil.iter_modules(mod.__path__):\n sub_mod = importlib.import_module(f\"{mod.__package__}.{modname}\")\n for m_cls in find_all_classes(sub_mod, cls):\n _append_cls(m_cls)\n return cls_list",
"def childs(cls, forceLoad: bool = True) -> list:\n if forceLoad:\n ModuleLoader.loadModules(cls.__module__)\n\n return type.__subclasses__(cls)",
"def class_hierarchy(clslist):\n for cls in clslist:\n subclass_list = cls.__subclasses__()\n if subclass_list:\n for subcls in class_hierarchy(subclass_list):\n yield subcls\n else:\n yield cls",
"def get_subclasses(mod, cls):\n for name, obj in inspect.getmembers(mod):\n if hasattr(obj, \"__bases__\") and cls in obj.__bases__:\n yield obj",
"def _get_classes(package_name, base_class):\n classes = {}\n\n base_dir = os.getcwd()\n root_module_name = base_dir.split('/')[-1]\n package_dir = base_dir + '/%s' % package_name\n if os.path.isdir(package_dir):\n for module_path in os.listdir(package_dir):\n if not module_path.endswith('.py'):\n continue\n\n module_name = os.path.splitext(module_path)[0]\n module_full_name = '%s.%s.%s' % (root_module_name, package_name, module_name)\n __import__(module_full_name)\n work_module = sys.modules[module_full_name]\n for module_item in work_module.__dict__.values():\n if type(module_item) is type \\\n and issubclass(module_item, base_class) \\\n and module_item is not base_class\\\n and hasattr(module_item, 'name') and module_item.name:\n classes.setdefault(module_item.name, []).append(module_item)\n\n # check no duplicated names\n for work_name, work_modules in classes.items():\n if len(work_modules) > 1:\n raise DuplicatedNameException('Modules %s have same name \"%s\"' % (\n ' and '.join(map(str, work_modules)),\n work_name\n ))\n\n # create immutable list of modules\n return tuple([(work_name, work_modules[0]) for work_name, work_modules in classes.items()])",
"def load(self) -> t.Iterable[docspec.Module]:\n # Load all haystack modules\n temp_loader = PythonLoader(search_path=[\"../../../haystack\"])\n temp_loader.init(Context(directory=\".\"))\n all_modules = list(temp_loader.load())\n\n # Collect all classes\n classes = {}\n for module in all_modules:\n for member in module.members:\n if isinstance(member, docspec.Class):\n classes[member.name] = member\n\n # Load the modules specified in the search path\n modules = super().load()\n\n # Add inherited methods to the classes\n modules = self.include_inherited_methods(modules, classes)\n\n return modules",
"def get_subclasses(self, class_name):\n return class_name.__subclasses__()",
"def _get_all_loaded_classes(self):\n classes = {}\n for module in self.modules.values():\n for k,v in module.__dict__.items():\n # skip anything that's not a game class\n if not type(v) is type:\n continue\n base_classes = (game_object.GameObject, game_hud.GameHUD, game_room.GameRoom)\n # TODO: find out why above works but below doesn't!! O___O\n #base_classes = self.builtin_base_classes\n if issubclass(v, base_classes):\n classes[k] = v\n return classes",
"def get_all_classes_defined_in_module(module):\n for _cls in inspect.getmembers(module, inspect.isclass):\n if module.__name__ == _cls[1].__module__:\n yield _cls",
"def all(cls, package=None):\n # Determine modules that may contain extensions.\n packages = get_packages()\n if package is None:\n modules = packages.modules\n elif isinstance(package, Package):\n modules = package.modules\n else:\n modules = packages[package].modules\n # Find all subclasses of `cls`.\n subclasses = [cls]\n # Used to weed out duplicates (due to diamond inheritance).\n seen = set([cls])\n idx = 0\n while idx < len(subclasses):\n base = subclasses[idx]\n # Allow subclasses to override `all()`.\n for subclass in (base.__subclasses__()\n if base.all.__func__ is cls.all.__func__\n else base.all(package)):\n if subclass not in seen:\n subclasses.append(subclass)\n seen.add(subclass)\n idx += 1\n # Find disabled implementations.\n disabled = set()\n for key in cls.disable_map:\n interface, module = key\n if module in packages.modules and issubclass(interface, cls):\n disabled.update(cls.disable_map[key])\n # Filter out abstract classes, disabled implementations and\n # implementations not included with the active application.\n implementations = []\n for subclass in subclasses:\n if subclass.__module__ not in modules:\n continue\n if disabled:\n matches = [subclass]\n matches.append(subclass.__name__)\n matches.append(\n \"%s.%s\"\n % (subclass.__module__, subclass.__class__.__name__))\n if isinstance(subclass.priority, str):\n matches.append(subclass.priority)\n if isinstance(subclass.priority, list):\n for priority in subclass.priority:\n if isinstance(priority, str):\n matches.append(priority)\n if subclass.signature.__func__ is not \\\n Extension.signature.__func__:\n matches.append(subclass.signature())\n if any([match in matches for match in disabled]):\n continue\n implementations.append(subclass)\n return [implementation\n for implementation in implementations\n if implementation.enabled()]",
"def iter_classes(base_class, *modules, class_filter=None):\n for root_module in modules:\n try:\n module_repo = walk_modules(root_module)\n except:\n continue\n for module in module_repo:\n for obj in vars(module).values():\n if inspect.isclass(obj) and issubclass(obj, base_class) and obj.__module__ == module.__name__:\n if not class_filter or class_filter(obj):\n yield obj",
"def expand_classes_glob(classes, salt_data):\n all_classes = []\n expanded_classes = []\n saltclass_path = salt_data[\"path\"]\n\n for _class in classes:\n all_classes.extend(match_class_glob(_class, saltclass_path))\n\n for _class in all_classes:\n if _class not in expanded_classes:\n expanded_classes.append(_class)\n\n return expanded_classes",
"def load_all_submodules():\n # Load all modules in the current directory.\n pattern_list = _load_all_modules(__file__, __name__)\n return pattern_list",
"def discover_classes(\n package,\n cls_match_func=trivial,\n module_match_func=trivial,\n):\n for module in discover_modules(package, module_match_func):\n # Check all the classes in that module\n for _, imported_class in inspect.getmembers(module, inspect.isclass):\n # Don't include things that are only there due to a side-effect of\n # importing\n if imported_class.__module__ != module.__name__:\n continue\n\n if cls_match_func(imported_class):\n yield imported_class",
"def get_classes_from_package(package, import_sub_packages=False):\n\n if (import_sub_packages):\n import os\n import importlib\n\n # First, find all __init__.py files in subdirectories of this package\n root_dir = os.path.dirname(package.__file__)\n\n root_relative = os.path.dirname(root_dir)\n\n # Now loop\n for root, _, files in os.walk(root_dir):\n\n if \"__init__.py\" in files:\n\n module_name = os.path.relpath(root, root_relative).replace(\n os.sep, \".\")\n\n importlib.import_module(module_name)\n\n return ClassEnumerator(module=package, recursive=True).get_models()",
"def load_sub_modules(module):\n for loader, name, is_pkg in pkgutil.walk_packages(module.__path__):\n if '.' in name:\n continue\n\n import_module(f'{module.__name__}.{name}')",
"def all_subclasses(cls):\r\n for s in cls.__subclasses__():\r\n yield s\r\n for c in s.all_subclasses():\r\n yield c",
"def get_module_plugins(module, classobj):\n try:\n names = module.__all__\n except AttributeError:\n names = [x for x in vars(module) if not x.startswith('_')]\n for name in names:\n try:\n obj = getattr(module, name)\n except AttributeError:\n continue\n try:\n if issubclass(obj, classobj):\n yield obj\n except TypeError:\n continue",
"def scan_morepath_modules(cls: type[morepath.App]) -> None:\n for module in sorted(morepath_modules(cls)):\n morepath.scan(import_module(module))",
"def Subclass_finder(cls):\n\n subclasses = [] # Create a list to deposit subclasses\n\n for subclass in cls.__subclasses__():\n subclasses.append(subclass) # Add founded subclass\n subclasses.extend(Subclass_finder(subclass)) # Check if there is a subclass\n # of a subclass.\n\n Output_types = [] # Create a list to deposit final strings\n for i in range(len(subclasses)): \n instance = subclasses[i]() # Create an instance for the \n Output_types.append(instance.kind) # Add them to the output list\n \n return Output_types",
"def get_all_subclasses(python_class):\n python_class.__subclasses__()\n\n subclasses = set()\n check_these = [python_class]\n\n while check_these:\n parent = check_these.pop()\n for child in parent.__subclasses__():\n if child not in subclasses:\n subclasses.add(child)\n check_these.append(child)\n\n return sorted(subclasses, key=lambda x: x.__name__)",
"def walktree(classes, children, parent):\r\n results = []\r\n classes.sort(key=attrgetter('__module__', '__name__'))\r\n for c in classes:\r\n results.append((c, c.__bases__))\r\n if c in children:\r\n results.append(walktree(children[c], children, c))\r\n return results",
"def all_subclasses(cls):\n for subclass in cls.__subclasses__():\n yield subclass\n for subc in all_subclasses(subclass):\n yield subc",
"def _get_all_bases(class_or_name: Union[str, Type]) -> List[str]:\n if isinstance(class_or_name, str):\n return [class_or_name]\n\n classes = [class_or_name.__name__]\n for base in class_or_name.__bases__:\n classes.extend(_get_all_bases(base))\n\n return deduplicate(classes)",
"def get_classes_from_module(self, module):\n classes = dict([(name, cls)\n for name, cls in module.__dict__.items()\n if isinstance(cls, type)])\n self.set_latest_classes(classes)\n return self.get_latest_classes()",
"def _import_all_modules():\n import inspect\n import os\n\n all_objects = []\n globals_, locals_ = globals(), locals()\n\n # dynamically import all the package modules\n modules = set()\n json_files = set()\n for filename in os.listdir(os.path.dirname(__file__)):\n # process all python files in directory that don't start with underscore\n # (which also keeps this module from importing itself)\n modulename, ext = os.path.splitext(filename)\n if filename[0] != \"_\":\n if ext == \".py\":\n modules.add(modulename)\n elif ext == \".json\":\n json_files.add(filename)\n\n old_length = len(modules) + 1\n errors = {}\n while len(modules) and old_length > len(modules):\n old_length = len(modules)\n for modulename in modules.copy():\n package_module = \".\".join([__name__, modulename])\n try:\n module = __import__(package_module, globals_, locals_, [modulename])\n except ModuleNotFoundError as err:\n raise err\n except ImportError as err:\n errors[modulename] = repr(err)\n continue\n\n # Only the class with the same name as the file will be imported\n found_class = False\n for obj_name in filter(lambda name: name[0] != \"_\", module.__dict__):\n found_class = modulename.lower() == obj_name.lower()\n obj = module.__dict__[obj_name]\n if found_class and inspect.isclass(\n obj\n ): # Check that the object found is a class\n globals_[obj_name] = module.__dict__[obj_name]\n all_objects.append(obj_name)\n break\n\n if not found_class:\n logger.warning(\n \"File {}.py does not contain a class named {}. The file will be ignored.\"\n \"\".format(package_module, modulename)\n )\n\n modules.discard(modulename) # Remove module from the available list\n\n if modules:\n logger.warning(\"Failed to import from {} modules {}.\".format(__name__, modules))\n for modulename in modules:\n logger.debug(\"{}: {}\".format(modulename, errors[modulename]))\n\n from cosapp.systems import System\n from jsonschema import ValidationError\n\n def systemFactory(name: str, filename: str) -> System:\n obj = System.load(filename)\n obj.name = name\n return obj\n\n for json_file in json_files: # Fake class behavior for System JSON file\n try:\n tmp_system = System.load(json_file)\n except (TypeError, AttributeError, ValidationError):\n logger.warning(\n 'JSON file \"{}\" does not defined a CoSApp System.'.format(json_file)\n )\n else:\n obj_name = tmp_system.name.capitalize()\n globals_[obj_name] = lambda name: systemFactory(name, json_file)\n all_objects.append(obj_name)\n\n return all_objects",
"def _classes_(cls):\n for base_cls in cls.__bases__:\n # Avoid infinite loop\n if base_cls == Sandbox:\n continue\n\n yield base_cls",
"def _inspect_module(module):\n module_list = getmembers(module, predicate=ismodule)\n classes = getmembers(module, predicate=isclass)\n for (name, cls) in classes:\n if issubclass(cls, db.Model) and not issubclass(cls, Taxonomy):\n if cls is not db.Model:\n _data_classes[name] = cls\n return [mod[1] for mod in module_list]"
] | [
"0.7165705",
"0.69359386",
"0.68156147",
"0.68128246",
"0.6774304",
"0.6754992",
"0.6701584",
"0.65855294",
"0.656538",
"0.655214",
"0.65020764",
"0.64865786",
"0.64505965",
"0.6402055",
"0.63457954",
"0.63250345",
"0.6317348",
"0.6310051",
"0.63073546",
"0.62523115",
"0.62041616",
"0.6192695",
"0.6191184",
"0.61563486",
"0.6150664",
"0.6142029",
"0.61075866",
"0.60892725",
"0.6083977",
"0.6072277"
] | 0.72843313 | 0 |
Subsets and Splits