text
stringlengths
78
104k
score
float64
0
0.18
def has(self, id, domain): """ Checks if a message has a translation. @rtype: bool @return: true if the message has a translation, false otherwise """ assert isinstance(id, (str, unicode)) assert isinstance(domain, (str, unicode)) if self.defines(id, domain): return True if self.fallback_catalogue is not None: return self.fallback_catalogue.has(id, domain) return False
0.004175
def to_representation(self, instance): """ Return the updated course data dictionary. Arguments: instance (dict): The course data. Returns: dict: The updated course data. """ updated_course = copy.deepcopy(instance) enterprise_customer_catalog = self.context['enterprise_customer_catalog'] updated_course['enrollment_url'] = enterprise_customer_catalog.get_course_enrollment_url( updated_course['key'] ) for course_run in updated_course['course_runs']: course_run['enrollment_url'] = enterprise_customer_catalog.get_course_run_enrollment_url( course_run['key'] ) return updated_course
0.006667
def _update(self, response_headers): """ Update the state of the rate limiter based on the response headers: X-Ratelimit-Used: Approximate number of requests used this period X-Ratelimit-Remaining: Approximate number of requests left to use X-Ratelimit-Reset: Approximate number of seconds to end of period PRAW 5's rate limiting logic is structured for making hundreds of evenly-spaced API requests, which makes sense for running something like a bot or crawler. This handler's logic, on the other hand, is geared more towards interactive usage. It allows for short, sporadic bursts of requests. The assumption is that actual users browsing reddit shouldn't ever be in danger of hitting the rate limit. If they do hit the limit, they will be cutoff until the period resets. """ if 'x-ratelimit-remaining' not in response_headers: # This could be because the API returned an error response, or it # could be because we're using something like read-only credentials # which Reddit doesn't appear to care about rate limiting. return self.used = float(response_headers['x-ratelimit-used']) self.remaining = float(response_headers['x-ratelimit-remaining']) self.seconds_to_reset = int(response_headers['x-ratelimit-reset']) _logger.debug('Rate limit: %s used, %s remaining, %s reset', self.used, self.remaining, self.seconds_to_reset) if self.remaining <= 0: self.next_request_timestamp = time.time() + self.seconds_to_reset else: self.next_request_timestamp = None
0.001149
def get_resources(self, types=None, names=None, languages=None): """ Get resources. types = a list of resource types to search for (None = all) names = a list of resource names to search for (None = all) languages = a list of resource languages to search for (None = all) Return a dict of the form {type_: {name: {language: data}}} which might also be empty if no matching resources were found. """ return GetResources(self.filename, types, names, languages)
0.009107
def process_arguments(self, func, args): """Process arguments from the command line into positional and kw args. Arguments are consumed until the argument spec for the function is filled or a -- is found or there are no more arguments. Keyword arguments can be specified using --field=value, -f value or --field value. Positional arguments are specified just on the command line itself. If a keyword argument (`field`) is a boolean, it can be set to True by just passing --field or -f without needing to explicitly pass True unless this would cause ambiguity in parsing since the next expected positional argument is also a boolean or a string. Args: func (callable): A function previously annotated with type information args (list): A list of all of the potential arguments to this function. Returns: (args, kw_args, unused args): A tuple with a list of args, a dict of keyword args and a list of any unused args that were not processed. """ pos_args = [] kw_args = {} while len(args) > 0: if func.metadata.spec_filled(pos_args, kw_args) and not self._is_flag(args[0]): break arg = args.pop(0) if arg == '--': break elif self._is_flag(arg): arg_value = None arg_name = None if len(arg) == 2: arg_name = func.metadata.match_shortname(arg[1:], filled_args=pos_args) else: if not arg.startswith('--'): raise ArgumentError("Invalid method of specifying keyword argument that did not start with --", argument=arg) # Skip the -- arg = arg[2:] # Check if the value is embedded in the parameter # Make sure we allow the value after the equals sign to also # contain an equals sign. if '=' in arg: arg, arg_value = arg.split('=', 1) arg_name = func.metadata.match_shortname(arg, filled_args=pos_args) arg_type = func.metadata.param_type(arg_name) if arg_type is None: raise ArgumentError("Attempting to set a parameter from command line that does not have type information", argument=arg_name) # If we don't have a value yet, attempt to get one from the next parameter on the command line if arg_value is None: arg_value = self._extract_arg_value(arg_name, arg_type, args) kw_args[arg_name] = arg_value else: pos_args.append(arg) # Always check if there is a trailing '--' and chomp so that we always # start on a function name. This can happen if there is a gratuitous # -- for a 0 arg function or after an implicit boolean flag like -f -- if len(args) > 0 and args[0] == '--': args.pop(0) return pos_args, kw_args, args
0.005979
def noise3d(self, x, y, z): """ Generate 3D OpenSimplex noise from X,Y,Z coordinates. """ # Place input coordinates on simplectic honeycomb. stretch_offset = (x + y + z) * STRETCH_CONSTANT_3D xs = x + stretch_offset ys = y + stretch_offset zs = z + stretch_offset # Floor to get simplectic honeycomb coordinates of rhombohedron (stretched cube) super-cell origin. xsb = floor(xs) ysb = floor(ys) zsb = floor(zs) # Skew out to get actual coordinates of rhombohedron origin. We'll need these later. squish_offset = (xsb + ysb + zsb) * SQUISH_CONSTANT_3D xb = xsb + squish_offset yb = ysb + squish_offset zb = zsb + squish_offset # Compute simplectic honeycomb coordinates relative to rhombohedral origin. xins = xs - xsb yins = ys - ysb zins = zs - zsb # Sum those together to get a value that determines which region we're in. in_sum = xins + yins + zins # Positions relative to origin point. dx0 = x - xb dy0 = y - yb dz0 = z - zb value = 0 extrapolate = self._extrapolate3d if in_sum <= 1: # We're inside the tetrahedron (3-Simplex) at (0,0,0) # Determine which two of (0,0,1), (0,1,0), (1,0,0) are closest. a_point = 0x01 a_score = xins b_point = 0x02 b_score = yins if a_score >= b_score and zins > b_score: b_score = zins b_point = 0x04 elif a_score < b_score and zins > a_score: a_score = zins a_point = 0x04 # Now we determine the two lattice points not part of the tetrahedron that may contribute. # This depends on the closest two tetrahedral vertices, including (0,0,0) wins = 1 - in_sum if wins > a_score or wins > b_score: # (0,0,0) is one of the closest two tetrahedral vertices. c = b_point if (b_score > a_score) else a_point # Our other closest vertex is the closest out of a and b. if (c & 0x01) == 0: xsv_ext0 = xsb - 1 xsv_ext1 = xsb dx_ext0 = dx0 + 1 dx_ext1 = dx0 else: xsv_ext0 = xsv_ext1 = xsb + 1 dx_ext0 = dx_ext1 = dx0 - 1 if (c & 0x02) == 0: ysv_ext0 = ysv_ext1 = ysb dy_ext0 = dy_ext1 = dy0 if (c & 0x01) == 0: ysv_ext1 -= 1 dy_ext1 += 1 else: ysv_ext0 -= 1 dy_ext0 += 1 else: ysv_ext0 = ysv_ext1 = ysb + 1 dy_ext0 = dy_ext1 = dy0 - 1 if (c & 0x04) == 0: zsv_ext0 = zsb zsv_ext1 = zsb - 1 dz_ext0 = dz0 dz_ext1 = dz0 + 1 else: zsv_ext0 = zsv_ext1 = zsb + 1 dz_ext0 = dz_ext1 = dz0 - 1 else: # (0,0,0) is not one of the closest two tetrahedral vertices. c = (a_point | b_point) # Our two extra vertices are determined by the closest two. if (c & 0x01) == 0: xsv_ext0 = xsb xsv_ext1 = xsb - 1 dx_ext0 = dx0 - 2 * SQUISH_CONSTANT_3D dx_ext1 = dx0 + 1 - SQUISH_CONSTANT_3D else: xsv_ext0 = xsv_ext1 = xsb + 1 dx_ext0 = dx0 - 1 - 2 * SQUISH_CONSTANT_3D dx_ext1 = dx0 - 1 - SQUISH_CONSTANT_3D if (c & 0x02) == 0: ysv_ext0 = ysb ysv_ext1 = ysb - 1 dy_ext0 = dy0 - 2 * SQUISH_CONSTANT_3D dy_ext1 = dy0 + 1 - SQUISH_CONSTANT_3D else: ysv_ext0 = ysv_ext1 = ysb + 1 dy_ext0 = dy0 - 1 - 2 * SQUISH_CONSTANT_3D dy_ext1 = dy0 - 1 - SQUISH_CONSTANT_3D if (c & 0x04) == 0: zsv_ext0 = zsb zsv_ext1 = zsb - 1 dz_ext0 = dz0 - 2 * SQUISH_CONSTANT_3D dz_ext1 = dz0 + 1 - SQUISH_CONSTANT_3D else: zsv_ext0 = zsv_ext1 = zsb + 1 dz_ext0 = dz0 - 1 - 2 * SQUISH_CONSTANT_3D dz_ext1 = dz0 - 1 - SQUISH_CONSTANT_3D # Contribution (0,0,0) attn0 = 2 - dx0 * dx0 - dy0 * dy0 - dz0 * dz0 if attn0 > 0: attn0 *= attn0 value += attn0 * attn0 * extrapolate(xsb + 0, ysb + 0, zsb + 0, dx0, dy0, dz0) # Contribution (1,0,0) dx1 = dx0 - 1 - SQUISH_CONSTANT_3D dy1 = dy0 - 0 - SQUISH_CONSTANT_3D dz1 = dz0 - 0 - SQUISH_CONSTANT_3D attn1 = 2 - dx1 * dx1 - dy1 * dy1 - dz1 * dz1 if attn1 > 0: attn1 *= attn1 value += attn1 * attn1 * extrapolate(xsb + 1, ysb + 0, zsb + 0, dx1, dy1, dz1) # Contribution (0,1,0) dx2 = dx0 - 0 - SQUISH_CONSTANT_3D dy2 = dy0 - 1 - SQUISH_CONSTANT_3D dz2 = dz1 attn2 = 2 - dx2 * dx2 - dy2 * dy2 - dz2 * dz2 if attn2 > 0: attn2 *= attn2 value += attn2 * attn2 * extrapolate(xsb + 0, ysb + 1, zsb + 0, dx2, dy2, dz2) # Contribution (0,0,1) dx3 = dx2 dy3 = dy1 dz3 = dz0 - 1 - SQUISH_CONSTANT_3D attn3 = 2 - dx3 * dx3 - dy3 * dy3 - dz3 * dz3 if attn3 > 0: attn3 *= attn3 value += attn3 * attn3 * extrapolate(xsb + 0, ysb + 0, zsb + 1, dx3, dy3, dz3) elif in_sum >= 2: # We're inside the tetrahedron (3-Simplex) at (1,1,1) # Determine which two tetrahedral vertices are the closest, out of (1,1,0), (1,0,1), (0,1,1) but not (1,1,1). a_point = 0x06 a_score = xins b_point = 0x05 b_score = yins if a_score <= b_score and zins < b_score: b_score = zins b_point = 0x03 elif a_score > b_score and zins < a_score: a_score = zins a_point = 0x03 # Now we determine the two lattice points not part of the tetrahedron that may contribute. # This depends on the closest two tetrahedral vertices, including (1,1,1) wins = 3 - in_sum if wins < a_score or wins < b_score: # (1,1,1) is one of the closest two tetrahedral vertices. c = b_point if (b_score < a_score) else a_point # Our other closest vertex is the closest out of a and b. if (c & 0x01) != 0: xsv_ext0 = xsb + 2 xsv_ext1 = xsb + 1 dx_ext0 = dx0 - 2 - 3 * SQUISH_CONSTANT_3D dx_ext1 = dx0 - 1 - 3 * SQUISH_CONSTANT_3D else: xsv_ext0 = xsv_ext1 = xsb dx_ext0 = dx_ext1 = dx0 - 3 * SQUISH_CONSTANT_3D if (c & 0x02) != 0: ysv_ext0 = ysv_ext1 = ysb + 1 dy_ext0 = dy_ext1 = dy0 - 1 - 3 * SQUISH_CONSTANT_3D if (c & 0x01) != 0: ysv_ext1 += 1 dy_ext1 -= 1 else: ysv_ext0 += 1 dy_ext0 -= 1 else: ysv_ext0 = ysv_ext1 = ysb dy_ext0 = dy_ext1 = dy0 - 3 * SQUISH_CONSTANT_3D if (c & 0x04) != 0: zsv_ext0 = zsb + 1 zsv_ext1 = zsb + 2 dz_ext0 = dz0 - 1 - 3 * SQUISH_CONSTANT_3D dz_ext1 = dz0 - 2 - 3 * SQUISH_CONSTANT_3D else: zsv_ext0 = zsv_ext1 = zsb dz_ext0 = dz_ext1 = dz0 - 3 * SQUISH_CONSTANT_3D else: # (1,1,1) is not one of the closest two tetrahedral vertices. c = (a_point & b_point) # Our two extra vertices are determined by the closest two. if (c & 0x01) != 0: xsv_ext0 = xsb + 1 xsv_ext1 = xsb + 2 dx_ext0 = dx0 - 1 - SQUISH_CONSTANT_3D dx_ext1 = dx0 - 2 - 2 * SQUISH_CONSTANT_3D else: xsv_ext0 = xsv_ext1 = xsb dx_ext0 = dx0 - SQUISH_CONSTANT_3D dx_ext1 = dx0 - 2 * SQUISH_CONSTANT_3D if (c & 0x02) != 0: ysv_ext0 = ysb + 1 ysv_ext1 = ysb + 2 dy_ext0 = dy0 - 1 - SQUISH_CONSTANT_3D dy_ext1 = dy0 - 2 - 2 * SQUISH_CONSTANT_3D else: ysv_ext0 = ysv_ext1 = ysb dy_ext0 = dy0 - SQUISH_CONSTANT_3D dy_ext1 = dy0 - 2 * SQUISH_CONSTANT_3D if (c & 0x04) != 0: zsv_ext0 = zsb + 1 zsv_ext1 = zsb + 2 dz_ext0 = dz0 - 1 - SQUISH_CONSTANT_3D dz_ext1 = dz0 - 2 - 2 * SQUISH_CONSTANT_3D else: zsv_ext0 = zsv_ext1 = zsb dz_ext0 = dz0 - SQUISH_CONSTANT_3D dz_ext1 = dz0 - 2 * SQUISH_CONSTANT_3D # Contribution (1,1,0) dx3 = dx0 - 1 - 2 * SQUISH_CONSTANT_3D dy3 = dy0 - 1 - 2 * SQUISH_CONSTANT_3D dz3 = dz0 - 0 - 2 * SQUISH_CONSTANT_3D attn3 = 2 - dx3 * dx3 - dy3 * dy3 - dz3 * dz3 if attn3 > 0: attn3 *= attn3 value += attn3 * attn3 * extrapolate(xsb + 1, ysb + 1, zsb + 0, dx3, dy3, dz3) # Contribution (1,0,1) dx2 = dx3 dy2 = dy0 - 0 - 2 * SQUISH_CONSTANT_3D dz2 = dz0 - 1 - 2 * SQUISH_CONSTANT_3D attn2 = 2 - dx2 * dx2 - dy2 * dy2 - dz2 * dz2 if attn2 > 0: attn2 *= attn2 value += attn2 * attn2 * extrapolate(xsb + 1, ysb + 0, zsb + 1, dx2, dy2, dz2) # Contribution (0,1,1) dx1 = dx0 - 0 - 2 * SQUISH_CONSTANT_3D dy1 = dy3 dz1 = dz2 attn1 = 2 - dx1 * dx1 - dy1 * dy1 - dz1 * dz1 if attn1 > 0: attn1 *= attn1 value += attn1 * attn1 * extrapolate(xsb + 0, ysb + 1, zsb + 1, dx1, dy1, dz1) # Contribution (1,1,1) dx0 = dx0 - 1 - 3 * SQUISH_CONSTANT_3D dy0 = dy0 - 1 - 3 * SQUISH_CONSTANT_3D dz0 = dz0 - 1 - 3 * SQUISH_CONSTANT_3D attn0 = 2 - dx0 * dx0 - dy0 * dy0 - dz0 * dz0 if attn0 > 0: attn0 *= attn0 value += attn0 * attn0 * extrapolate(xsb + 1, ysb + 1, zsb + 1, dx0, dy0, dz0) else: # We're inside the octahedron (Rectified 3-Simplex) in between. # Decide between point (0,0,1) and (1,1,0) as closest p1 = xins + yins if p1 > 1: a_score = p1 - 1 a_point = 0x03 a_is_further_side = True else: a_score = 1 - p1 a_point = 0x04 a_is_further_side = False # Decide between point (0,1,0) and (1,0,1) as closest p2 = xins + zins if p2 > 1: b_score = p2 - 1 b_point = 0x05 b_is_further_side = True else: b_score = 1 - p2 b_point = 0x02 b_is_further_side = False # The closest out of the two (1,0,0) and (0,1,1) will replace the furthest out of the two decided above, if closer. p3 = yins + zins if p3 > 1: score = p3 - 1 if a_score <= b_score and a_score < score: a_point = 0x06 a_is_further_side = True elif a_score > b_score and b_score < score: b_point = 0x06 b_is_further_side = True else: score = 1 - p3 if a_score <= b_score and a_score < score: a_point = 0x01 a_is_further_side = False elif a_score > b_score and b_score < score: b_point = 0x01 b_is_further_side = False # Where each of the two closest points are determines how the extra two vertices are calculated. if a_is_further_side == b_is_further_side: if a_is_further_side: # Both closest points on (1,1,1) side # One of the two extra points is (1,1,1) dx_ext0 = dx0 - 1 - 3 * SQUISH_CONSTANT_3D dy_ext0 = dy0 - 1 - 3 * SQUISH_CONSTANT_3D dz_ext0 = dz0 - 1 - 3 * SQUISH_CONSTANT_3D xsv_ext0 = xsb + 1 ysv_ext0 = ysb + 1 zsv_ext0 = zsb + 1 # Other extra point is based on the shared axis. c = (a_point & b_point) if (c & 0x01) != 0: dx_ext1 = dx0 - 2 - 2 * SQUISH_CONSTANT_3D dy_ext1 = dy0 - 2 * SQUISH_CONSTANT_3D dz_ext1 = dz0 - 2 * SQUISH_CONSTANT_3D xsv_ext1 = xsb + 2 ysv_ext1 = ysb zsv_ext1 = zsb elif (c & 0x02) != 0: dx_ext1 = dx0 - 2 * SQUISH_CONSTANT_3D dy_ext1 = dy0 - 2 - 2 * SQUISH_CONSTANT_3D dz_ext1 = dz0 - 2 * SQUISH_CONSTANT_3D xsv_ext1 = xsb ysv_ext1 = ysb + 2 zsv_ext1 = zsb else: dx_ext1 = dx0 - 2 * SQUISH_CONSTANT_3D dy_ext1 = dy0 - 2 * SQUISH_CONSTANT_3D dz_ext1 = dz0 - 2 - 2 * SQUISH_CONSTANT_3D xsv_ext1 = xsb ysv_ext1 = ysb zsv_ext1 = zsb + 2 else:# Both closest points on (0,0,0) side # One of the two extra points is (0,0,0) dx_ext0 = dx0 dy_ext0 = dy0 dz_ext0 = dz0 xsv_ext0 = xsb ysv_ext0 = ysb zsv_ext0 = zsb # Other extra point is based on the omitted axis. c = (a_point | b_point) if (c & 0x01) == 0: dx_ext1 = dx0 + 1 - SQUISH_CONSTANT_3D dy_ext1 = dy0 - 1 - SQUISH_CONSTANT_3D dz_ext1 = dz0 - 1 - SQUISH_CONSTANT_3D xsv_ext1 = xsb - 1 ysv_ext1 = ysb + 1 zsv_ext1 = zsb + 1 elif (c & 0x02) == 0: dx_ext1 = dx0 - 1 - SQUISH_CONSTANT_3D dy_ext1 = dy0 + 1 - SQUISH_CONSTANT_3D dz_ext1 = dz0 - 1 - SQUISH_CONSTANT_3D xsv_ext1 = xsb + 1 ysv_ext1 = ysb - 1 zsv_ext1 = zsb + 1 else: dx_ext1 = dx0 - 1 - SQUISH_CONSTANT_3D dy_ext1 = dy0 - 1 - SQUISH_CONSTANT_3D dz_ext1 = dz0 + 1 - SQUISH_CONSTANT_3D xsv_ext1 = xsb + 1 ysv_ext1 = ysb + 1 zsv_ext1 = zsb - 1 else: # One point on (0,0,0) side, one point on (1,1,1) side if a_is_further_side: c1 = a_point c2 = b_point else: c1 = b_point c2 = a_point # One contribution is a _permutation of (1,1,-1) if (c1 & 0x01) == 0: dx_ext0 = dx0 + 1 - SQUISH_CONSTANT_3D dy_ext0 = dy0 - 1 - SQUISH_CONSTANT_3D dz_ext0 = dz0 - 1 - SQUISH_CONSTANT_3D xsv_ext0 = xsb - 1 ysv_ext0 = ysb + 1 zsv_ext0 = zsb + 1 elif (c1 & 0x02) == 0: dx_ext0 = dx0 - 1 - SQUISH_CONSTANT_3D dy_ext0 = dy0 + 1 - SQUISH_CONSTANT_3D dz_ext0 = dz0 - 1 - SQUISH_CONSTANT_3D xsv_ext0 = xsb + 1 ysv_ext0 = ysb - 1 zsv_ext0 = zsb + 1 else: dx_ext0 = dx0 - 1 - SQUISH_CONSTANT_3D dy_ext0 = dy0 - 1 - SQUISH_CONSTANT_3D dz_ext0 = dz0 + 1 - SQUISH_CONSTANT_3D xsv_ext0 = xsb + 1 ysv_ext0 = ysb + 1 zsv_ext0 = zsb - 1 # One contribution is a _permutation of (0,0,2) dx_ext1 = dx0 - 2 * SQUISH_CONSTANT_3D dy_ext1 = dy0 - 2 * SQUISH_CONSTANT_3D dz_ext1 = dz0 - 2 * SQUISH_CONSTANT_3D xsv_ext1 = xsb ysv_ext1 = ysb zsv_ext1 = zsb if (c2 & 0x01) != 0: dx_ext1 -= 2 xsv_ext1 += 2 elif (c2 & 0x02) != 0: dy_ext1 -= 2 ysv_ext1 += 2 else: dz_ext1 -= 2 zsv_ext1 += 2 # Contribution (1,0,0) dx1 = dx0 - 1 - SQUISH_CONSTANT_3D dy1 = dy0 - 0 - SQUISH_CONSTANT_3D dz1 = dz0 - 0 - SQUISH_CONSTANT_3D attn1 = 2 - dx1 * dx1 - dy1 * dy1 - dz1 * dz1 if attn1 > 0: attn1 *= attn1 value += attn1 * attn1 * extrapolate(xsb + 1, ysb + 0, zsb + 0, dx1, dy1, dz1) # Contribution (0,1,0) dx2 = dx0 - 0 - SQUISH_CONSTANT_3D dy2 = dy0 - 1 - SQUISH_CONSTANT_3D dz2 = dz1 attn2 = 2 - dx2 * dx2 - dy2 * dy2 - dz2 * dz2 if attn2 > 0: attn2 *= attn2 value += attn2 * attn2 * extrapolate(xsb + 0, ysb + 1, zsb + 0, dx2, dy2, dz2) # Contribution (0,0,1) dx3 = dx2 dy3 = dy1 dz3 = dz0 - 1 - SQUISH_CONSTANT_3D attn3 = 2 - dx3 * dx3 - dy3 * dy3 - dz3 * dz3 if attn3 > 0: attn3 *= attn3 value += attn3 * attn3 * extrapolate(xsb + 0, ysb + 0, zsb + 1, dx3, dy3, dz3) # Contribution (1,1,0) dx4 = dx0 - 1 - 2 * SQUISH_CONSTANT_3D dy4 = dy0 - 1 - 2 * SQUISH_CONSTANT_3D dz4 = dz0 - 0 - 2 * SQUISH_CONSTANT_3D attn4 = 2 - dx4 * dx4 - dy4 * dy4 - dz4 * dz4 if attn4 > 0: attn4 *= attn4 value += attn4 * attn4 * extrapolate(xsb + 1, ysb + 1, zsb + 0, dx4, dy4, dz4) # Contribution (1,0,1) dx5 = dx4 dy5 = dy0 - 0 - 2 * SQUISH_CONSTANT_3D dz5 = dz0 - 1 - 2 * SQUISH_CONSTANT_3D attn5 = 2 - dx5 * dx5 - dy5 * dy5 - dz5 * dz5 if attn5 > 0: attn5 *= attn5 value += attn5 * attn5 * extrapolate(xsb + 1, ysb + 0, zsb + 1, dx5, dy5, dz5) # Contribution (0,1,1) dx6 = dx0 - 0 - 2 * SQUISH_CONSTANT_3D dy6 = dy4 dz6 = dz5 attn6 = 2 - dx6 * dx6 - dy6 * dy6 - dz6 * dz6 if attn6 > 0: attn6 *= attn6 value += attn6 * attn6 * extrapolate(xsb + 0, ysb + 1, zsb + 1, dx6, dy6, dz6) # First extra vertex attn_ext0 = 2 - dx_ext0 * dx_ext0 - dy_ext0 * dy_ext0 - dz_ext0 * dz_ext0 if attn_ext0 > 0: attn_ext0 *= attn_ext0 value += attn_ext0 * attn_ext0 * extrapolate(xsv_ext0, ysv_ext0, zsv_ext0, dx_ext0, dy_ext0, dz_ext0) # Second extra vertex attn_ext1 = 2 - dx_ext1 * dx_ext1 - dy_ext1 * dy_ext1 - dz_ext1 * dz_ext1 if attn_ext1 > 0: attn_ext1 *= attn_ext1 value += attn_ext1 * attn_ext1 * extrapolate(xsv_ext1, ysv_ext1, zsv_ext1, dx_ext1, dy_ext1, dz_ext1) return value / NORM_CONSTANT_3D
0.002448
def list_all_customers(cls, **kwargs): """List Customers Return a list of Customers This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_customers(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[Customer] If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_customers_with_http_info(**kwargs) else: (data) = cls._list_all_customers_with_http_info(**kwargs) return data
0.002323
def resnet_main(seed, flags, model_function, input_function, shape=None): """Shared main loop for ResNet Models. Args: flags: FLAGS object that contains the params for running. See ResnetArgParser for created flags. model_function: the function that instantiates the Model and builds the ops for train/eval. This will be passed directly into the estimator. input_function: the function that processes the dataset and returns a dataset that the estimator can train on. This will be wrapped with all the relevant flags for running and passed to estimator. shape: list of ints representing the shape of the images used for training. This is only used if flags.export_dir is passed. """ mlperf_log.resnet_print(key=mlperf_log.RUN_START) # Using the Winograd non-fused algorithms provides a small performance boost. os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1' # Create session config based on values of inter_op_parallelism_threads and # intra_op_parallelism_threads. Note that we default to having # allow_soft_placement = True, which is required for multi-GPU and not # harmful for other modes. session_config = tf.ConfigProto( inter_op_parallelism_threads=flags.inter_op_parallelism_threads, intra_op_parallelism_threads=flags.intra_op_parallelism_threads, allow_soft_placement=True) if flags.num_gpus == 0: distribution = tf.contrib.distribute.OneDeviceStrategy('device:CPU:0') elif flags.num_gpus == 1: distribution = tf.contrib.distribute.OneDeviceStrategy('device:GPU:0') else: distribution = tf.contrib.distribute.MirroredStrategy( num_gpus=flags.num_gpus ) mlperf_log.resnet_print(key=mlperf_log.RUN_SET_RANDOM_SEED, value=seed) run_config = tf.estimator.RunConfig(train_distribute=distribution, session_config=session_config, tf_random_seed=seed) mlperf_log.resnet_print(key=mlperf_log.INPUT_BATCH_SIZE, value=flags.batch_size) classifier = tf.estimator.Estimator( model_fn=model_function, model_dir=flags.model_dir, config=run_config, params={ 'resnet_size': flags.resnet_size, 'data_format': flags.data_format, 'batch_size': flags.batch_size, 'version': flags.version, 'loss_scale': flags.loss_scale, 'dtype': flags.dtype, 'label_smoothing': flags.label_smoothing, 'enable_lars': flags.enable_lars, 'weight_decay': flags.weight_decay, 'fine_tune': flags.fine_tune }) if flags.benchmark_log_dir is not None: benchmark_logger = logger.BenchmarkLogger(flags.benchmark_log_dir) benchmark_logger.log_run_info('resnet') else: benchmark_logger = None mlperf_log.resnet_print(key=mlperf_log.TRAIN_LOOP) # The reference performs the first evaluation on the fourth epoch. (offset # eval by 3 epochs) mlperf_log.resnet_print(key=mlperf_log.EVAL_EPOCH_OFFSET, value=3) success = False for i in range(flags.train_epochs // flags.epochs_between_evals): # Data for epochs_between_evals (i.e. 4 epochs between evals) worth of # epochs is concatenated and run as a single block inside a session. For # this reason we declare all of the epochs that will be run at the start. # Submitters may report in a way which is reasonable for their control flow. for j in range(flags.epochs_between_evals): mlperf_log.resnet_print(key=mlperf_log.TRAIN_EPOCH, value=i * flags.epochs_between_evals + j) train_hooks = hooks_helper.get_train_hooks( flags.hooks, batch_size=flags.batch_size, benchmark_log_dir=flags.benchmark_log_dir) _log_cache = [] def formatter(x): """Abuse side effects to get tensors out of the model_fn.""" if _log_cache: _log_cache.pop() _log_cache.append(x.copy()) return str(x) compliance_hook = tf.train.LoggingTensorHook( tensors={_NUM_EXAMPLES_NAME: _NUM_EXAMPLES_NAME}, every_n_iter=int(1e10), at_end=True, formatter=formatter) print('Starting a training cycle.') def input_fn_train(): return input_function( is_training=True, data_dir=flags.data_dir, batch_size=per_device_batch_size(flags.batch_size, flags.num_gpus), num_epochs=flags.epochs_between_evals, num_gpus=flags.num_gpus, dtype=flags.dtype ) classifier.train(input_fn=input_fn_train, hooks=train_hooks + [compliance_hook], max_steps=flags.max_train_steps) train_examples = int(_log_cache.pop()[_NUM_EXAMPLES_NAME]) mlperf_log.resnet_print(key=mlperf_log.INPUT_SIZE, value=train_examples) print('Starting to evaluate.') # Evaluate the model and print results def input_fn_eval(): return input_function( is_training=False, data_dir=flags.data_dir, batch_size=per_device_batch_size(flags.batch_size, flags.num_gpus), num_epochs=1, dtype=flags.dtype ) mlperf_log.resnet_print(key=mlperf_log.EVAL_START) # flags.max_train_steps is generally associated with testing and profiling. # As a result it is frequently called with synthetic data, which will # iterate forever. Passing steps=flags.max_train_steps allows the eval # (which is generally unimportant in those circumstances) to terminate. # Note that eval will run for max_train_steps each loop, regardless of the # global_step count. eval_results = classifier.evaluate(input_fn=input_fn_eval, steps=flags.max_train_steps) mlperf_log.resnet_print(key=mlperf_log.EVAL_STOP) mlperf_log.resnet_print(key=mlperf_log.EVAL_SIZE, value=int(eval_results[_NUM_EXAMPLES_NAME])) mlperf_log.resnet_print(key=mlperf_log.EVAL_ACCURACY, value=float(eval_results['accuracy'])) mlperf_log.resnet_print(key=mlperf_log.EVAL_TARGET, value=flags.stop_threshold) print(eval_results) if benchmark_logger: benchmark_logger.log_estimator_evaluation_result(eval_results) if model_helpers.past_stop_threshold( flags.stop_threshold, eval_results['accuracy']): success = True break mlperf_log.resnet_print(key=mlperf_log.RUN_STOP, value={"success": success}) mlperf_log.resnet_print(key=mlperf_log.RUN_FINAL)
0.007122
def status(self, obj): """Get the wifi interface status.""" data_size = DWORD() data = PDWORD() opcode_value_type = DWORD() self._wlan_query_interface(self._handle, obj['guid'], 6, byref(data_size), byref(data), byref(opcode_value_type)) return status_dict[data.contents.value]
0.005038
def _filter_contains(self, term, field_name, field_type, is_not): """ Splits the sentence in terms and join them with OR, using stemmed and un-stemmed. Assumes term is not a list. """ if field_type == 'text': term_list = term.split() else: term_list = [term] query = self._or_query(term_list, field_name, field_type) if is_not: return xapian.Query(xapian.Query.OP_AND_NOT, self._all_query(), query) else: return query
0.005484
def p_action(p): """ action : CLIENT_KWD KEY DOT KEY LPAREN args RPAREN action : SERVER_KWD KEY DOT KEY LPAREN args RPAREN action : CLIENT_KWD KEY DOT KEY LPAREN args RPAREN IF_KWD REGEX_MATCH_INCOMING_KWD LPAREN p_string_arg RPAREN action : SERVER_KWD KEY DOT KEY LPAREN args RPAREN IF_KWD REGEX_MATCH_INCOMING_KWD LPAREN p_string_arg RPAREN """ if len(p)==8: p[0] = [p[1], p[2], p[4], p[6], None] elif len(p)==13: p[0] = [p[1], p[2], p[4], p[6], p[11]]
0.00996
def _recv_sf(self, data): """Process a received 'Single Frame' frame""" self.rx_timer.cancel() if self.rx_state != ISOTP_IDLE: warning("RX state was reset because single frame was received") self.rx_state = ISOTP_IDLE length = six.indexbytes(data, 0) & 0xf if len(data) - 1 < length: return 1 msg = data[1:1 + length] self.rx_queue.put(msg) for cb in self.rx_callbacks: cb(msg) self.call_release() return 0
0.003731
def commands(self): """ Returns a list of commands that are supported by the motor controller. Possible values are `run-forever`, `run-to-abs-pos`, `run-to-rel-pos`, `run-timed`, `run-direct`, `stop` and `reset`. Not all commands may be supported. - `run-forever` will cause the motor to run until another command is sent. - `run-to-abs-pos` will run to an absolute position specified by `position_sp` and then stop using the action specified in `stop_action`. - `run-to-rel-pos` will run to a position relative to the current `position` value. The new position will be current `position` + `position_sp`. When the new position is reached, the motor will stop using the action specified by `stop_action`. - `run-timed` will run the motor for the amount of time specified in `time_sp` and then stop the motor using the action specified by `stop_action`. - `run-direct` will run the motor at the duty cycle specified by `duty_cycle_sp`. Unlike other run commands, changing `duty_cycle_sp` while running *will* take effect immediately. - `stop` will stop any of the run commands before they are complete using the action specified by `stop_action`. - `reset` will reset all of the motor parameter attributes to their default value. This will also have the effect of stopping the motor. """ (self._commands, value) = self.get_cached_attr_set(self._commands, 'commands') return value
0.009548
def to_native_types(self, slicer=None, na_rep=None, date_format=None, quoting=None, **kwargs): """ convert to our native types format, slicing if desired """ values = self.values i8values = self.values.view('i8') if slicer is not None: values = values[..., slicer] i8values = i8values[..., slicer] from pandas.io.formats.format import _get_format_datetime64_from_values fmt = _get_format_datetime64_from_values(values, date_format) result = tslib.format_array_from_datetime( i8values.ravel(), tz=getattr(self.values, 'tz', None), format=fmt, na_rep=na_rep).reshape(i8values.shape) return np.atleast_2d(result)
0.004005
def _rpc(http, project, method, base_url, request_pb, response_pb_cls): """Make a protobuf RPC request. :type http: :class:`requests.Session` :param http: HTTP object to make requests. :type project: str :param project: The project to connect to. This is usually your project name in the cloud console. :type method: str :param method: The name of the method to invoke. :type base_url: str :param base_url: The base URL where the API lives. :type request_pb: :class:`google.protobuf.message.Message` instance :param request_pb: the protobuf instance representing the request. :type response_pb_cls: A :class:`google.protobuf.message.Message` subclass. :param response_pb_cls: The class used to unmarshall the response protobuf. :rtype: :class:`google.protobuf.message.Message` :returns: The RPC message parsed from the response. """ req_data = request_pb.SerializeToString() response = _request(http, project, method, req_data, base_url) return response_pb_cls.FromString(response)
0.000878
def clean(self, *args, **kwargs): """Clean all loaded values to reload when switching envs""" for key in list(self.store.keys()): self.unset(key)
0.011561
def connect(self): ''' instantiate objects / parse config file ''' # open config file for parsing try: settings = configparser.ConfigParser() settings._interpolation = configparser.ExtendedInterpolation() except Exception as err: self.logger.error("Failed to instantiate config parser exception: %s" % err) raise try: settings.read(self.__config__) except Exception as err: self.logger.error("Failed to read config file exception: %s" % err) raise # Connect to Symphony symphony_p12 = settings.get('symphony', 'symphony_p12') symphony_pwd = settings.get('symphony', 'symphony_pwd') symphony_pod_uri = settings.get('symphony', 'symphony_pod_uri') symphony_keymanager_uri = settings.get('symphony', 'symphony_keymanager_uri') symphony_agent_uri = settings.get('symphony', 'symphony_agent_uri') symphony_sessionauth_uri = settings.get('symphony', 'symphony_sessionauth_uri') symphony_sid = settings.get('symphony', 'symphony_sid') crypt = symphony.Crypt(symphony_p12, symphony_pwd) symphony_crt, symphony_key = crypt.p12parse() try: # instantiate auth methods auth = symphony.Auth(symphony_sessionauth_uri, symphony_keymanager_uri, symphony_crt, symphony_key) # get session token session_token = auth.get_session_token() self.logger.info("AUTH ( session token ): %s" % session_token) # get keymanager token keymngr_token = auth.get_keymanager_token() self.logger.info("AUTH ( key manager token ): %s" % keymngr_token) # instantiate agent methods agent = symphony.Agent(symphony_agent_uri, session_token, keymngr_token) # instantiate pod methods pod = symphony.Pod(symphony_pod_uri, session_token, keymngr_token) self.logger.info("INSTANTIATION ( all objects successful)") except Exception as err: self.logger.error("Failed to authenticate and initialize: %s" % err) raise # return references and such return agent, pod, symphony_sid
0.003546
def merge(profile, branch, merge_into): """Merge a branch into another branch. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. branch The name of the branch to merge. merge_into The name of the branch you want to merge into. Returns: A dict wtih data about the merge. """ data = merges.merge(profile, branch, merge_into) return data
0.001698
def symlink_list(self, load): ''' Return a dict of all symlinks based on a given path in the repo ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') if not salt.utils.stringutils.is_hex(load['saltenv']) \ and load['saltenv'] not in self.envs(): return {} if 'prefix' in load: prefix = load['prefix'].strip('/') else: prefix = '' symlinks = self._file_lists(load, 'symlinks') return dict([(key, val) for key, val in six.iteritems(symlinks) if key.startswith(prefix)])
0.00292
def Zuo_Stenby(T, Tc, Pc, omega): r'''Calculates air-water surface tension using the reference fluids methods of [1]_. .. math:: \sigma^{(1)} = 40.520(1-T_r)^{1.287} \sigma^{(2)} = 52.095(1-T_r)^{1.21548} \sigma_r = \sigma_r^{(1)}+ \frac{\omega - \omega^{(1)}} {\omega^{(2)}-\omega^{(1)}} (\sigma_r^{(2)}-\sigma_r^{(1)}) \sigma = T_c^{1/3}P_c^{2/3}[\exp{(\sigma_r)} -1] Parameters ---------- T : float Temperature of fluid [K] Tc : float Critical temperature of fluid [K] Pc : float Critical pressure of fluid [Pa] omega : float Acentric factor for fluid, [-] Returns ------- sigma : float Liquid surface tension, N/m Notes ----- Presently untested. Have not personally checked the sources. I strongly believe it is broken. The reference values for methane and n-octane are from the DIPPR database. Examples -------- Chlorobenzene >>> Zuo_Stenby(293., 633.0, 4530000.0, 0.249) 0.03345569011871088 References ---------- .. [1] Zuo, You-Xiang, and Erling H. Stenby. "Corresponding-States and Parachor Models for the Calculation of Interfacial Tensions." The Canadian Journal of Chemical Engineering 75, no. 6 (December 1, 1997): 1130-37. doi:10.1002/cjce.5450750617 ''' Tc_1, Pc_1, omega_1 = 190.56, 4599000.0/1E5, 0.012 Tc_2, Pc_2, omega_2 = 568.7, 2490000.0/1E5, 0.4 Pc = Pc/1E5 def ST_r(ST, Tc, Pc): return log(1 + ST/(Tc**(1/3.0)*Pc**(2/3.0))) ST_1 = 40.520*(1 - T/Tc)**1.287 # Methane ST_2 = 52.095*(1 - T/Tc)**1.21548 # n-octane ST_r_1, ST_r_2 = ST_r(ST_1, Tc_1, Pc_1), ST_r(ST_2, Tc_2, Pc_2) sigma_r = ST_r_1 + (omega-omega_1)/(omega_2 - omega_1)*(ST_r_2-ST_r_1) sigma = Tc**(1/3.0)*Pc**(2/3.0)*(exp(sigma_r)-1) sigma = sigma/1000 # N/m, please return sigma
0.000517
def _normalize_check_url(self, check_url): """ Normalizes check_url by: * Adding the `http` scheme if missing * Adding or replacing port with `self.port` """ # TODO: Write tests for this method split_url = urlsplit(check_url) host = splitport(split_url.path or split_url.netloc)[0] return '{0}://{1}:{2}'.format(self.scheme, host, self.port)
0.004819
def _increase_gc_sockets(): """Increase default sockets for zmq gc. Avoids scaling issues. https://github.com/zeromq/pyzmq/issues/471 """ ctx = zmq.Context() ctx.max_sockets = MAX_SOCKETS gc.context = ctx
0.004386
def service_list(profile=None, **connection_args): ''' Return a list of available services (keystone services-list) CLI Example: .. code-block:: bash salt '*' keystone.service_list ''' kstone = auth(profile, **connection_args) ret = {} for service in kstone.services.list(): ret[service.name] = dict((value, getattr(service, value)) for value in dir(service) if not value.startswith('_') and isinstance(getattr(service, value), (six.string_types, dict, bool))) return ret
0.005068
def configure_once(config=None, bind_in_runtime=True): """Create an injector with a callable config if not present, otherwise, do nothing.""" with _INJECTOR_LOCK: if _INJECTOR: return _INJECTOR return configure(config, bind_in_runtime=bind_in_runtime)
0.006944
def cltext(fname): """ Internal undocumented command for closing a text file opened by RDTEXT. No URL available; relevant lines from SPICE source: FORTRAN SPICE, rdtext.f:: C$Procedure CLTEXT ( Close a text file opened by RDTEXT) ENTRY CLTEXT ( FILE ) CHARACTER*(*) FILE C VARIABLE I/O DESCRIPTION C -------- --- -------------------------------------------------- C FILE I Text file to be closed. CSPICE, rdtext.c:: /* $Procedure CLTEXT ( Close a text file opened by RDTEXT) */ /* Subroutine */ int cltext_(char *file, ftnlen file_len) :param fname: Text file to be closed. :type fname: str """ fnameP = stypes.stringToCharP(fname) fname_len = ctypes.c_int(len(fname)) libspice.cltext_(fnameP, fname_len)
0.002307
def from_api_repr(cls, resource): """Factory: construct a model reference given its API representation Args: resource (Dict[str, object]): Model reference representation returned from the API Returns: google.cloud.bigquery.model.ModelReference: Model reference parsed from ``resource``. """ ref = cls() ref._proto = json_format.ParseDict(resource, types.ModelReference()) return ref
0.004024
def _GetFormatErrorLocation( self, yaml_definition, last_definition_object): """Retrieves a format error location. Args: yaml_definition (dict[str, object]): current YAML definition. last_definition_object (DataTypeDefinition): previous data type definition. Returns: str: format error location. """ name = yaml_definition.get('name', None) if name: error_location = 'in: {0:s}'.format(name or '<NAMELESS>') elif last_definition_object: error_location = 'after: {0:s}'.format(last_definition_object.name) else: error_location = 'at start' return error_location
0.006144
def sport_update(self, sport_id, names=[], account=None, **kwargs): """ Update a sport. This needs to be **proposed**. :param str sport_id: The id of the sport to update :param list names: Internationalized names, e.g. ``[['de', 'Foo'], ['en', 'bar']]`` :param str account: (optional) the account to allow access to (defaults to ``default_account``) """ assert isinstance(names, list) if not account: if "default_account" in self.config: account = self.config["default_account"] if not account: raise ValueError("You need to provide an account") account = Account(account) sport = Sport(sport_id) op = operations.Sport_update( **{ "fee": {"amount": 0, "asset_id": "1.3.0"}, "sport_id": sport["id"], "new_name": names, "prefix": self.prefix, } ) return self.finalizeOp(op, account["name"], "active", **kwargs)
0.001843
def kill_job(self, job_id): """ Kills a running job """ self._loop.call_soon_threadsafe(asyncio.ensure_future, self._simple_send(ClientKillJob(job_id)))
0.016304
def transform(self, X): """ Args: X: DataFrame with NaN's Returns: Dictionary with one key - 'X' corresponding to given DataFrame but without nan's """ if self.fill_missing: X = self.filler.complete(X) return {'X': X}
0.009934
def solve_limited(self, assumptions=[]): """ Solve internal formula using given budgets for conflicts and propagations. """ if self.minisat: if self.use_timer: start_time = time.clock() # saving default SIGINT handler def_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_DFL) self.status = pysolvers.minisatgh_solve_lim(self.minisat, assumptions) # recovering default SIGINT handler def_sigint_handler = signal.signal(signal.SIGINT, def_sigint_handler) if self.use_timer: self.call_time = time.clock() - start_time self.accu_time += self.call_time return self.status
0.007762
def tzinfo_eq(tzinfo1, tzinfo2, startYear = 2000, endYear=2020): """ Compare offsets and DST transitions from startYear to endYear. """ if tzinfo1 == tzinfo2: return True elif tzinfo1 is None or tzinfo2 is None: return False def dt_test(dt): if dt is None: return True return tzinfo1.utcoffset(dt) == tzinfo2.utcoffset(dt) if not dt_test(datetime.datetime(startYear, 1, 1)): return False for year in range(startYear, endYear): for transitionTo in 'daylight', 'standard': t1=getTransition(transitionTo, year, tzinfo1) t2=getTransition(transitionTo, year, tzinfo2) if t1 != t2 or not dt_test(t1): return False return True
0.006502
def weighted_mean(X, embedding, neighbors, distances): """Initialize points onto an existing embedding by placing them in the weighted mean position of their nearest neighbors on the reference embedding. Parameters ---------- X: np.ndarray embedding: TSNEEmbedding neighbors: np.ndarray distances: np.ndarray Returns ------- np.ndarray """ n_samples = X.shape[0] n_components = embedding.shape[1] partial_embedding = np.zeros((n_samples, n_components)) for i in range(n_samples): partial_embedding[i] = np.average( embedding[neighbors[i]], axis=0, weights=distances[i], ) return partial_embedding
0.002869
def prompt_choice_list(msg, a_list, default=1, help_string=None): """Prompt user to select from a list of possible choices. :param msg:A message displayed to the user before the choice list :type msg: str :param a_list:The list of choices (list of strings or list of dicts with 'name' & 'desc') "type a_list: list :param default:The default option that should be chosen if user doesn't enter a choice :type default: int :returns: The list index of the item chosen. """ verify_is_a_tty() options = '\n'.join([' [{}] {}{}' .format(i + 1, x['name'] if isinstance(x, dict) and 'name' in x else x, ' - ' + x['desc'] if isinstance(x, dict) and 'desc' in x else '') for i, x in enumerate(a_list)]) allowed_vals = list(range(1, len(a_list) + 1)) while True: val = _input('{}\n{}\nPlease enter a choice [Default choice({})]: '.format(msg, options, default)) if val == '?' and help_string is not None: print(help_string) continue if not val: val = '{}'.format(default) try: ans = int(val) if ans in allowed_vals: # array index is 0-based, user input is 1-based return ans - 1 raise ValueError except ValueError: logger.warning('Valid values are %s', allowed_vals)
0.00406
def ifftr(wave, npoints=None, indep_min=None, indep_max=None): r""" Return the real part of the inverse Fast Fourier Transform of a waveform. :param wave: Waveform :type wave: :py:class:`peng.eng.Waveform` :param npoints: Number of points to use in the transform. If **npoints** is less than the size of the independent variable vector the waveform is truncated; if **npoints** is greater than the size of the independent variable vector, the waveform is zero-padded :type npoints: positive integer :param indep_min: Independent vector start point of computation :type indep_min: integer or float :param indep_max: Independent vector stop point of computation :type indep_max: integer or float :rtype: :py:class:`peng.eng.Waveform` .. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]] .. Auto-generated exceptions documentation for .. peng.wave_functions.ifftr :raises: * RuntimeError (Argument \`indep_max\` is not valid) * RuntimeError (Argument \`indep_min\` is not valid) * RuntimeError (Argument \`npoints\` is not valid) * RuntimeError (Argument \`wave\` is not valid) * RuntimeError (Incongruent \`indep_min\` and \`indep_max\` arguments) * RuntimeError (Non-uniform frequency spacing) .. [[[end]]] """ return real(ifft(wave, npoints, indep_min, indep_max))
0.000675
def assert_equivalent(o1, o2): '''Asserts that o1 and o2 are distinct, yet equivalent objects ''' if not (isinstance(o1, type) and isinstance(o2, type)): assert o1 is not o2 assert o1 == o2 assert o2 == o1
0.004292
def get_short_lambda_body_text(lambda_func): """Return the source of a (short) lambda function. If it's impossible to obtain, returns None. """ try: source_lines, _ = inspect.getsourcelines(lambda_func) except (IOError, TypeError): return None # skip `def`-ed functions and long lambdas if len(source_lines) != 1: return None source_text = os.linesep.join(source_lines).strip() # find the AST node of a lambda definition # so we can locate it in the source code source_ast = ast.parse(source_text) lambda_node = next((node for node in ast.walk(source_ast) if isinstance(node, ast.Lambda)), None) if lambda_node is None: # could be a single line `def fn(x): ...` return None # HACK: Since we can (and most likely will) get source lines # where lambdas are just a part of bigger expressions, they will have # some trailing junk after their definition. # # Unfortunately, AST nodes only keep their _starting_ offsets # from the original source, so we have to determine the end ourselves. # We do that by gradually shaving extra junk from after the definition. lambda_text = source_text[lambda_node.col_offset:] lambda_body_text = source_text[lambda_node.body.col_offset:] min_length = len('lambda:_') # shortest possible lambda expression while len(lambda_text) > min_length: try: # What's annoying is that sometimes the junk even parses, # but results in a *different* lambda. You'd probably have to # be deliberately malicious to exploit it but here's one way: # # bloop = lambda x: False, lambda x: True # get_short_lamnda_source(bloop[0]) # # Ideally, we'd just keep shaving until we get the same code, # but that most likely won't happen because we can't replicate # the exact closure environment. code = compile(lambda_body_text, '<unused filename>', 'eval') # Thus the next best thing is to assume some divergence due # to e.g. LOAD_GLOBAL in original code being LOAD_FAST in # the one compiled above, or vice versa. # But the resulting code should at least be the same *length* # if otherwise the same operations are performed in it. if len(code.co_code) == len(lambda_func.__code__.co_code): # return lambda_text return lambda_body_text except SyntaxError: pass lambda_text = lambda_text[:-1] lambda_body_text = lambda_body_text[:-1] return None
0.00037
def _iter_ns_range(self): """Iterates over self._ns_range, delegating to self._iter_key_range().""" while True: if self._current_key_range is None: query = self._ns_range.make_datastore_query() namespace_result = query.Get(1) if not namespace_result: break namespace = namespace_result[0].name() or "" self._current_key_range = key_range.KeyRange( namespace=namespace, _app=self._ns_range.app) yield ALLOW_CHECKPOINT for key, o in self._iter_key_range( copy.deepcopy(self._current_key_range)): # The caller must consume yielded values so advancing the KeyRange # before yielding is safe. self._current_key_range.advance(key) yield o if (self._ns_range.is_single_namespace or self._current_key_range.namespace == self._ns_range.namespace_end): break self._ns_range = self._ns_range.with_start_after( self._current_key_range.namespace) self._current_key_range = None
0.008612
def make_interval(long_name, short_name): """ Create an interval segment """ return Group( Regex("(-+)?[0-9]+") + ( upkey(long_name + "s") | Regex(long_name + "s").setParseAction(upcaseTokens) | upkey(long_name) | Regex(long_name).setParseAction(upcaseTokens) | upkey(short_name) | Regex(short_name).setParseAction(upcaseTokens) ) ).setResultsName(long_name)
0.002146
def generate_bqm(graph, table, decision_variables, linear_energy_ranges=None, quadratic_energy_ranges=None, min_classical_gap=2): """ Args: graph: A networkx.Graph table: An iterable of valid spin configurations. Each configuration is a tuple of variable assignments ordered by `decision`. decision_variables: An ordered iterable of the variables in the binary quadratic model. linear_energy_ranges: Dictionary of the form {v: (min, max), ...} where min and max are the range of values allowed to v. The default range is [-2, 2]. quadratic_energy_ranges: Dict of the form {(u, v): (min, max), ...} where min and max are the range of values allowed to (u, v). The default range is [-1, 1]. min_classical_gap: A float. The minimum energy gap between the highest feasible state and the lowest infeasible state. """ # Check for auxiliary variables in the graph if len(graph) != len(decision_variables): raise ValueError('Penaltymodel-lp does not handle problems with auxiliary variables') if not linear_energy_ranges: linear_energy_ranges = {} if not quadratic_energy_ranges: quadratic_energy_ranges = {} # Simplify graph naming # Note: nodes' and edges' order determine the column order of the LP nodes = decision_variables edges = graph.edges # Set variable names for lengths m_linear = len(nodes) # Number of linear biases m_quadratic = len(edges) # Number of quadratic biases n_noted = len(table) # Number of spin combinations specified in the table n_unnoted = 2**m_linear - n_noted # Number of spin combinations that were not specified # Linear programming matrix for spin states specified by 'table' noted_states = table.keys() if isinstance(table, dict) else table noted_states = list(noted_states) noted_matrix = _get_lp_matrix(np.asarray(noted_states), nodes, edges, 1, 0) # Linear programming matrix for spins states that were not specified by 'table' spin_states = product([-1, 1], repeat=m_linear) if m_linear > 1 else [-1, 1] unnoted_states = [state for state in spin_states if state not in noted_states] unnoted_matrix = _get_lp_matrix(np.asarray(unnoted_states), nodes, edges, 1, -1) if unnoted_matrix is not None: unnoted_matrix *= -1 # Taking negative in order to flip the inequality # Constraints if isinstance(table, dict): noted_bound = np.asarray([table[state] for state in noted_states]) unnoted_bound = np.full((n_unnoted, 1), -1 * max(table.values())) # -1 for flipped inequality else: noted_bound = np.zeros((n_noted, 1)) unnoted_bound = np.zeros((n_unnoted, 1)) # Bounds linear_range = (MIN_LINEAR_BIAS, MAX_LINEAR_BIAS) quadratic_range = (MIN_QUADRATIC_BIAS, MAX_QUADRATIC_BIAS) bounds = [linear_energy_ranges.get(node, linear_range) for node in nodes] bounds += [get_item(quadratic_energy_ranges, edge, quadratic_range) for edge in edges] # Note: Since ising has {-1, 1}, the largest possible gap is [-largest_bias, largest_bias], # hence that 2 * sum(largest_biases) max_gap = 2 * sum(max(abs(lbound), abs(ubound)) for lbound, ubound in bounds) bounds.append((None, None)) # Bound for offset bounds.append((min_classical_gap, max_gap)) # Bound for gap. # Cost function cost_weights = np.zeros((1, m_linear + m_quadratic + 2)) cost_weights[0, -1] = -1 # Only interested in maximizing the gap # Returns a Scipy OptimizeResult result = linprog(cost_weights.flatten(), A_eq=noted_matrix, b_eq=noted_bound, A_ub=unnoted_matrix, b_ub=unnoted_bound, bounds=bounds) #TODO: propagate scipy.optimize.linprog's error message? if not result.success: raise ValueError('Penaltymodel-lp is unable to find a solution.') # Split result x = result.x h = x[:m_linear] j = x[m_linear:-2] offset = x[-2] gap = x[-1] if gap <= 0: raise ValueError('Penaltymodel-lp is unable to find a solution.') # Create BQM bqm = dimod.BinaryQuadraticModel.empty(dimod.SPIN) bqm.add_variables_from((v, bias) for v, bias in zip(nodes, h)) bqm.add_interactions_from((u, v, bias) for (u, v), bias in zip(edges, j)) bqm.add_offset(offset) return bqm, gap
0.005137
def get_descriptor_for_layer(self, layer): """ Returns the standard JSON descriptor for the layer. There is a lot of usefule information in there. """ if not layer in self._layer_descriptor_cache: params = {'f': 'pjson'} if self.token: params['token'] = self.token response = requests.get(self._build_request(layer), params=params) self._layer_descriptor_cache[layer] = response.json() return self._layer_descriptor_cache[layer]
0.005556
def from_scaledproto(cls, scaledproto_mesh, pos, vel, euler, euler_vel, rotation_vel=(0,0,0), component_com_x=None): """ TODO: add documentation """ mesh = cls(**scaledproto_mesh.items()) # roche coordinates have already been copied # so do NOT call mesh._copy_roche_values() here mesh._place_in_orbit(pos, vel, euler, euler_vel, rotation_vel, component_com_x) return mesh
0.015564
def url(self, filetype, base_dir=None, sasdir='sas', **kwargs): """Return the url of a given type of file. Parameters ---------- filetype : str File type parameter. Returns ------- full : str The sas url to the file. """ location = self.location(filetype, **kwargs) return join(self.remote_base, sasdir, location) if self.remote_base and location else None
0.006479
def clip_geometry_to_srs_bounds(geometry, pyramid, multipart=False): """ Clip input geometry to SRS bounds of given TilePyramid. If geometry passes the antimeridian, it will be split up in a multipart geometry and shifted to within the SRS boundaries. Note: geometry SRS must be the TilePyramid SRS! - geometry: any shapely geometry - pyramid: a TilePyramid object - multipart: return list of geometries instead of a GeometryCollection """ if not geometry.is_valid: raise ValueError("invalid geometry given") pyramid_bbox = box(*pyramid.bounds) # Special case for global tile pyramids if geometry extends over tile # pyramid boundaries (such as the antimeridian). if pyramid.is_global and not geometry.within(pyramid_bbox): inside_geom = geometry.intersection(pyramid_bbox) outside_geom = geometry.difference(pyramid_bbox) # shift outside geometry so it lies within SRS bounds if isinstance(outside_geom, Polygon): outside_geom = [outside_geom] all_geoms = [inside_geom] for geom in outside_geom: geom_bounds = Bounds(*geom.bounds) if geom_bounds.left < pyramid.left: geom = translate(geom, xoff=2*pyramid.right) elif geom_bounds.right > pyramid.right: geom = translate(geom, xoff=-2*pyramid.right) all_geoms.append(geom) if multipart: return all_geoms else: return GeometryCollection(all_geoms) else: if multipart: return [geometry] else: return geometry
0.000607
def is_readable(path): ''' Returns True if provided file or directory exists and can be read with the current user. Returns False otherwise. ''' return os.access(os.path.abspath(path), os.R_OK)
0.00939
def base10_integer_to_basek_string(k, x): """Convert an integer into a base k string.""" if not (2 <= k <= max_k_labeled): raise ValueError("k must be in range [2, %d]: %s" % (max_k_labeled, k)) return ((x == 0) and numerals[0]) or \ (base10_integer_to_basek_string(k, x // k).\ lstrip(numerals[0]) + numerals[x % k])
0.005277
def OS_filter(x,h,N,mode=0): """ Overlap and save transform domain FIR filtering. This function implements the classical overlap and save method of transform domain filtering using a length P FIR filter. Parameters ---------- x : input signal to be filtered as an ndarray h : FIR filter coefficients as an ndarray of length P N : FFT size > P, typically a power of two mode : 0 or 1, when 1 returns a diagnostic matrix Returns ------- y : the filtered output as an ndarray y_mat : an ndarray whose rows are the individual overlap outputs. Notes ----- y_mat is used for diagnostics and to gain understanding of the algorithm. Examples -------- >>> n = arange(0,100) >>> x = cos(2*pi*0.05*n) >>> b = ones(10) >>> y = OS_filter(x,h,N) >>> # set mode = 1 >>> y, y_mat = OS_filter(x,h,N,1) """ P = len(h) # zero pad start of x so first frame can recover first true samples of x x = np.hstack((np.zeros(P-1),x)) L = N - P + 1 Nx = len(x) Nframe = int(np.ceil(Nx/float(L))) # zero pad end of x to full number of frames needed x = np.hstack((x,np.zeros(Nframe*L-Nx))) y = np.zeros(int(Nframe*N)) # create an instrumentation matrix to observe the overlap and save behavior y_mat = np.zeros((Nframe,int(Nframe*N))) H = fft.fft(h,N) # begin the filtering operation for k in range(Nframe): xk = x[k*L:k*L+N] Xk = fft.fft(xk,N) Yk = H*Xk yk = np.real(fft.ifft(Yk)) # imag part should be zero y[k*L+P-1:k*L+N] = yk[P-1:] y_mat[k,k*L:k*L+N] = yk if mode == 1: return y[P-1:Nx], y_mat[:,P-1:Nx] else: return y[P-1:Nx]
0.010204
def get_port_channel_detail_output_lacp_ready_agg(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_port_channel_detail = ET.Element("get_port_channel_detail") config = get_port_channel_detail output = ET.SubElement(get_port_channel_detail, "output") lacp = ET.SubElement(output, "lacp") ready_agg = ET.SubElement(lacp, "ready-agg") ready_agg.text = kwargs.pop('ready_agg') callback = kwargs.pop('callback', self._callback) return callback(config)
0.003546
def send_email(to, subject, html_content, files=None, dryrun=False, cc=None, bcc=None, mime_subtype='mixed', mime_charset='utf-8', **kwargs): """ Send email using backend specified in EMAIL_BACKEND. """ path, attr = configuration.conf.get('email', 'EMAIL_BACKEND').rsplit('.', 1) module = importlib.import_module(path) backend = getattr(module, attr) to = get_email_address_list(to) to = ", ".join(to) return backend(to, subject, html_content, files=files, dryrun=dryrun, cc=cc, bcc=bcc, mime_subtype=mime_subtype, mime_charset=mime_charset, **kwargs)
0.004573
def _updateModelDBResults(self): """ Retrieves the current results and updates the model's record in the Model database. """ # ----------------------------------------------------------------------- # Get metrics metrics = self._getMetrics() # ----------------------------------------------------------------------- # Extract report metrics that match the requested report REs reportDict = dict([(k,metrics[k]) for k in self._reportMetricLabels]) # ----------------------------------------------------------------------- # Extract the report item that matches the optimize key RE # TODO cache optimizedMetricLabel sooner metrics = self._getMetrics() optimizeDict = dict() if self._optimizeKeyPattern is not None: optimizeDict[self._optimizedMetricLabel] = \ metrics[self._optimizedMetricLabel] # ----------------------------------------------------------------------- # Update model results results = json.dumps((metrics , optimizeDict)) self._jobsDAO.modelUpdateResults(self._modelID, results=results, metricValue=optimizeDict.values()[0], numRecords=(self._currentRecordIndex + 1)) self._logger.debug( "Model Results: modelID=%s; numRecords=%s; results=%s" % \ (self._modelID, self._currentRecordIndex + 1, results)) return
0.005579
def dn_cn_of_certificate_with_san(domain): '''Return the Common Name (cn) from the Distinguished Name (dn) of the certificate which contains the `domain` in its Subject Alternativ Name (san) list. Needs repo ~/.fabsetup-custom. Return None if no certificate is configured with `domain` in SAN. ''' cn_dn = None from config import domain_groups cns = [domains[0] for domains in domain_groups if domain in domains] if cns: if len(cns) > 1: print_msg(yellow(flo('Several certificates are configured to ' 'contain {domain} ' '(You should clean-up your config.py)\n'))) cn_dn = cns[0] return cn_dn
0.002614
def authenticate_direct_bind(self, username, password): """ Performs a direct bind. We can do this since the RDN is the same as the login attribute. Hence we just string together a dn to find this user with. Args: username (str): Username of the user to bind (the field specified as LDAP_BIND_RDN_ATTR) password (str): User's password to bind with. Returns: AuthenticationResponse """ bind_user = '{rdn}={username},{user_search_dn}'.format( rdn=self.config.get('LDAP_USER_RDN_ATTR'), username=username, user_search_dn=self.full_user_search_dn, ) connection = self._make_connection( bind_user=bind_user, bind_password=password, ) response = AuthenticationResponse() try: connection.bind() log.debug( "Authentication was successful for user '{0}'".format(username)) response.status = AuthenticationResponseStatus.success # Get user info here. user_info = self.get_user_info( dn=bind_user, _connection=connection) response.user_dn = bind_user response.user_id = username response.user_info = user_info if self.config.get('LDAP_SEARCH_FOR_GROUPS'): response.user_groups = self.get_user_groups( dn=bind_user, _connection=connection) except ldap3.core.exceptions.LDAPInvalidCredentialsResult: log.debug( "Authentication was not successful for user '{0}'".format(username)) response.status = AuthenticationResponseStatus.fail except Exception as e: log.error(e) response.status = AuthenticationResponseStatus.fail self.destroy_connection(connection) return response
0.002046
def _mkstemp_copy(path, preserve_inode=True): ''' Create a temp file and move/copy the contents of ``path`` to the temp file. Return the path to the temp file. path The full path to the file whose contents will be moved/copied to a temp file. Whether it's moved or copied depends on the value of ``preserve_inode``. preserve_inode Preserve the inode of the file, so that any hard links continue to share the inode with the original filename. This works by *copying* the file, reading from the copy, and writing to the file at the original inode. If ``False``, the file will be *moved* rather than copied, and a new file will be written to a new inode, but using the original filename. Hard links will then share an inode with the backup, instead (if using ``backup`` to create a backup copy). Default is ``True``. ''' temp_file = None # Create the temp file try: temp_file = salt.utils.files.mkstemp(prefix=salt.utils.files.TEMPFILE_PREFIX) except (OSError, IOError) as exc: raise CommandExecutionError( "Unable to create temp file. " "Exception: {0}".format(exc) ) # use `copy` to preserve the inode of the # original file, and thus preserve hardlinks # to the inode. otherwise, use `move` to # preserve prior behavior, which results in # writing the file to a new inode. if preserve_inode: try: shutil.copy2(path, temp_file) except (OSError, IOError) as exc: raise CommandExecutionError( "Unable to copy file '{0}' to the " "temp file '{1}'. " "Exception: {2}".format(path, temp_file, exc) ) else: try: shutil.move(path, temp_file) except (OSError, IOError) as exc: raise CommandExecutionError( "Unable to move file '{0}' to the " "temp file '{1}'. " "Exception: {2}".format(path, temp_file, exc) ) return temp_file
0.004204
def create(self, size, number, meta, name=None, image=None, attempts=3): """Create container VM nodes. Uses a container declaration which is undocumented. :param size: The machine type to use. :type size: ``str`` or :class:`GCENodeSize` :param number: Amount of nodes to be spawned. :type number: ``int`` :param meta: Metadata dictionary for the nodes. :type meta: ``dict`` or ``None`` :param name: The name of the node to create. :type name: ``str`` :param image: The image used to create the disk - optional for multiple nodes. :type image: ``str`` or :class:`GCENodeImage` or ``None`` :param attempts: The amount of tries to perform in case nodes fail to create. :type attempts: ``int`` :return: A list of newly created Node objects for the new nodes. :rtype: ``list`` of :class:`Node` """ if name is None: name = Common.get_random_hostname() if image is None and number == 1: raise ComputeEngineManagerException("Base image not provided.") successful = 0 nodes = [] while number - successful > 0 and attempts > 0: if number == 1: # Used because of suffix naming scheme in ex_create_multiple_nodes() for a single node. nodes = [self.gce.create_node(name, size, image, **meta)] else: nodes = self.gce.ex_create_multiple_nodes(name, size, None, number - successful, ignore_errors=False, poll_interval=1, **meta) for node in nodes: if isinstance(node, GCEFailedNode): self.logger.error("Node failed to create, code %s error: %s", node.code, node.error) continue successful += 1 self.nodes.append(node) attempts -= 1 if number != successful: self.logger.error("We tried but %d nodes failed to create.", number - successful) return nodes
0.003988
def validate_json_request(required_fields): """ Return a decorator that ensures that the request passed to the view function/method has a valid JSON request body with the given required fields. The dict parsed from the JSON is then passed as the second argument to the decorated function/method. For example: @json_request({'name', 'date'}) def view_func(request, request_dict): ... """ def decorator(func): @wraps(func) def wrapped_func(request, *args, **kwargs): try: request_dict = json.loads(request.raw_post_data) except ValueError as e: return JsonResponseBadRequest('invalid POST JSON: %s' % e) for k in required_fields: if k not in request_dict: return JsonResponseBadRequest( 'POST JSON must contain property \'%s\'' % k) return func(request, request_dict, *args, **kwargs) return wrapped_func return decorator
0.000965
def add_verified_read(self): """Add ``read`` perm for all verified subj. Public ``read`` is removed if present. """ self.remove_perm(d1_common.const.SUBJECT_PUBLIC, 'read') self.add_perm(d1_common.const.SUBJECT_VERIFIED, 'read')
0.007407
def _get_heading_level(self, element): """ Returns the level of heading. :param element: The heading. :type element: hatemile.util.html.htmldomelement.HTMLDOMElement :return: The level of heading. :rtype: int """ # pylint: disable=no-self-use tag = element.get_tag_name() if tag == 'H1': return 1 elif tag == 'H2': return 2 elif tag == 'H3': return 3 elif tag == 'H4': return 4 elif tag == 'H5': return 5 elif tag == 'H6': return 6 return -1
0.00311
def clean_json(resource_json, resources_map): """ Cleanup the a resource dict. For now, this just means replacing any Ref node with the corresponding physical_resource_id. Eventually, this is where we would add things like function parsing (fn::) """ if isinstance(resource_json, dict): if 'Ref' in resource_json: # Parse resource reference resource = resources_map[resource_json['Ref']] if hasattr(resource, 'physical_resource_id'): return resource.physical_resource_id else: return resource if "Fn::FindInMap" in resource_json: map_name = resource_json["Fn::FindInMap"][0] map_path = resource_json["Fn::FindInMap"][1:] result = resources_map[map_name] for path in map_path: result = result[clean_json(path, resources_map)] return result if 'Fn::GetAtt' in resource_json: resource = resources_map.get(resource_json['Fn::GetAtt'][0]) if resource is None: return resource_json try: return resource.get_cfn_attribute(resource_json['Fn::GetAtt'][1]) except NotImplementedError as n: logger.warning(str(n).format( resource_json['Fn::GetAtt'][0])) except UnformattedGetAttTemplateException: raise ValidationError( 'Bad Request', UnformattedGetAttTemplateException.description.format( resource_json['Fn::GetAtt'][0], resource_json['Fn::GetAtt'][1])) if 'Fn::If' in resource_json: condition_name, true_value, false_value = resource_json['Fn::If'] if resources_map.lazy_condition_map[condition_name]: return clean_json(true_value, resources_map) else: return clean_json(false_value, resources_map) if 'Fn::Join' in resource_json: join_list = clean_json(resource_json['Fn::Join'][1], resources_map) return resource_json['Fn::Join'][0].join([str(x) for x in join_list]) if 'Fn::Split' in resource_json: to_split = clean_json(resource_json['Fn::Split'][1], resources_map) return to_split.split(resource_json['Fn::Split'][0]) if 'Fn::Select' in resource_json: select_index = int(resource_json['Fn::Select'][0]) select_list = clean_json(resource_json['Fn::Select'][1], resources_map) return select_list[select_index] if 'Fn::Sub' in resource_json: if isinstance(resource_json['Fn::Sub'], list): warnings.warn( "Tried to parse Fn::Sub with variable mapping but it's not supported by moto's CloudFormation implementation") else: fn_sub_value = clean_json(resource_json['Fn::Sub'], resources_map) to_sub = re.findall('(?=\${)[^!^"]*?}', fn_sub_value) literals = re.findall('(?=\${!)[^"]*?}', fn_sub_value) for sub in to_sub: if '.' in sub: cleaned_ref = clean_json({'Fn::GetAtt': re.findall('(?<=\${)[^"]*?(?=})', sub)[0].split('.')}, resources_map) else: cleaned_ref = clean_json({'Ref': re.findall('(?<=\${)[^"]*?(?=})', sub)[0]}, resources_map) fn_sub_value = fn_sub_value.replace(sub, cleaned_ref) for literal in literals: fn_sub_value = fn_sub_value.replace(literal, literal.replace('!', '')) return fn_sub_value pass if 'Fn::ImportValue' in resource_json: cleaned_val = clean_json(resource_json['Fn::ImportValue'], resources_map) values = [x.value for x in resources_map.cross_stack_resources.values() if x.name == cleaned_val] if any(values): return values[0] else: raise ExportNotFound(cleaned_val) if 'Fn::GetAZs' in resource_json: region = resource_json.get('Fn::GetAZs') or DEFAULT_REGION result = [] # TODO: make this configurable, to reflect the real AWS AZs for az in ('a', 'b', 'c', 'd'): result.append('%s%s' % (region, az)) return result cleaned_json = {} for key, value in resource_json.items(): cleaned_val = clean_json(value, resources_map) if cleaned_val is None: # If we didn't find anything, don't add this attribute continue cleaned_json[key] = cleaned_val return cleaned_json elif isinstance(resource_json, list): return [clean_json(val, resources_map) for val in resource_json] else: return resource_json
0.00346
def _CopyToDateTimeString(self): """Copies the POSIX timestamp to a date and time string. Returns: str: date and time value formatted as: "YYYY-MM-DD hh:mm:ss.#########" or None if the timestamp is missing or invalid. """ if self._timestamp is None: return None timestamp, nanoseconds = divmod( self._timestamp, definitions.NANOSECONDS_PER_SECOND) number_of_days, hours, minutes, seconds = self._GetTimeValues(timestamp) year, month, day_of_month = self._GetDateValuesWithEpoch( number_of_days, self._EPOCH) return '{0:04d}-{1:02d}-{2:02d} {3:02d}:{4:02d}:{5:02d}.{6:09d}'.format( year, month, day_of_month, hours, minutes, seconds, nanoseconds)
0.002751
def _new_report(self, type, start_date, end_date, product_id='BTC-USD', account_id=None, format=None, email=None): """`<https://docs.exchange.coinbase.com/#create-a-new-report>`_""" data = { 'type':type, 'start_date':self._format_iso_time(start_date), 'end_date':self._format_iso_time(end_date), 'product_id':product_id, 'account_id':account_id, 'format':format, 'email':email } return self._post('reports', data=data)
0.024917
def get_lib(self, arch='x86'): """ Get lib directories of Visual C++. """ if arch == 'x86': arch = '' if arch == 'x64': arch = 'amd64' lib = os.path.join(self.vc_dir, 'lib', arch) if os.path.isdir(lib): logging.info(_('using lib: %s'), lib) return [lib] logging.debug(_('lib not found: %s'), lib) return []
0.004695
def sd2df_CSV(self, table: str, libref: str = '', dsopts: dict = None, tempfile: str = None, tempkeep: bool = False, **kwargs) -> 'pd.DataFrame': """ This is an alias for 'sasdata2dataframe' specifying method='CSV'. Why type all that? SASdata object that refers to the Sas Data Set you want to export to a Pandas Data Frame :param table: the name of the SAS Data Set you want to export to a Pandas Data Frame :param libref: the libref for the SAS Data Set. :param dsopts: a dictionary containing any of the following SAS data set options(where, drop, keep, obs, firstobs): - where is a string - keep are strings or list of strings. - drop are strings or list of strings. - obs is a numbers - either string or int - first obs is a numbers - either string or int - format is a string or dictionary { var: format } .. code-block:: python {'where' : 'msrp < 20000 and make = "Ford"' 'keep' : 'msrp enginesize Cylinders Horsepower Weight' 'drop' : ['msrp', 'enginesize', 'Cylinders', 'Horsepower', 'Weight'] 'obs' : 10 'firstobs' : '12' 'format' : {'money': 'dollar10', 'time': 'tod5.'} } :param tempfile: [optional] an OS path for a file to use for the local CSV file; default it a temporary file that's cleaned up :param tempkeep: if you specify your own file to use with tempfile=, this controls whether it's cleaned up after using it :param kwargs: dictionary :return: Pandas data frame """ dsopts = dsopts if dsopts is not None else {} return self.sasdata2dataframe(table, libref, dsopts, method='CSV', tempfile=tempfile, tempkeep=tempkeep, **kwargs)
0.006917
def counter(self, ch, part=None): """Return a counter on the channel ch. ch: string or integer. The channel index number or channel name. part: int or None The 0-based enumeration of a True part to return. This has an effect whether or not the mask or filter is turned on. Raise IndexError if the part does not exist. See `Counter <https://docs.python.org/2.7/library/collections.html#counter-objects>`_ for the counter object returned. """ return Counter(self(self._key(ch), part=part))
0.003317
def write_config_json(config_file, data): """Serializes an object to disk. Args: config_file (str): The path on disk to save the file. data (object): The object to serialize. """ outfile = None try: with open(config_file, 'w') as outfile: json.dump(data, outfile) except: line, filename, synerror = trace() raise ArcRestHelperError({ "function": "init_config_json", "line": line, "filename": filename, "synerror": synerror, } ) finally: outfile = None del outfile gc.collect()
0.004071
def validate_properties(self, model, context=None): """ Validate simple properties Performs validation on simple properties to return a result object. :param model: object or dict :param context: object, dict or None :return: shiftschema.result.Result """ result = Result() for property_name in self.properties: prop = self.properties[property_name] value = self.get(model, property_name) errors = prop.validate( value=value, model=model, context=context ) if errors: result.add_errors( errors=errors, property_name=property_name ) return result
0.002475
def response(self, in_thread: Optional[bool] = None) -> "Message": """ Create a response message. Depending on the incoming message the response can be in a thread. By default the response follow where the incoming message was posted. Args: in_thread (boolean): Overwrite the `threading` behaviour Returns: a new :class:`slack.event.Message` """ data = {"channel": self["channel"]} if in_thread: if "message" in self: data["thread_ts"] = ( self["message"].get("thread_ts") or self["message"]["ts"] ) else: data["thread_ts"] = self.get("thread_ts") or self["ts"] elif in_thread is None: if "message" in self and "thread_ts" in self["message"]: data["thread_ts"] = self["message"]["thread_ts"] elif "thread_ts" in self: data["thread_ts"] = self["thread_ts"] return Message(data)
0.002879
def check_bcr_catchup(self): """we're exceeding data request speed vs receive + process""" logger.debug(f"Checking if BlockRequests has caught up {len(BC.Default().BlockRequests)}") # test, perhaps there's some race condition between slow startup and throttle sync, otherwise blocks will never go down for peer in self.Peers: # type: NeoNode peer.stop_block_loop(cancel=False) peer.stop_peerinfo_loop(cancel=False) peer.stop_header_loop(cancel=False) if len(BC.Default().BlockRequests) > 0: for peer in self.Peers: peer.keep_alive() peer.health_check(HEARTBEAT_BLOCKS) peer_bcr_len = len(peer.myblockrequests) # if a peer has cleared its queue then reset heartbeat status to avoid timing out when resuming from "check_bcr" if there's 1 or more really slow peer(s) if peer_bcr_len == 0: peer.start_outstanding_data_request[HEARTBEAT_BLOCKS] = 0 print(f"{peer.prefix} request count: {peer_bcr_len}") if peer_bcr_len == 1: next_hash = BC.Default().GetHeaderHash(self.CurrentBlockheight + 1) print(f"{peer.prefix} {peer.myblockrequests} {next_hash}") else: # we're done catching up. Stop own loop and restart peers self.stop_check_bcr_loop() self.check_bcr_loop = None logger.debug("BlockRequests have caught up...resuming sync") for peer in self.Peers: peer.ProtocolReady() # this starts all loops again # give a little bit of time between startup of peers time.sleep(2)
0.003432
def plugin_request(plugin_str): ''' Extract plugin name and version specifiers from plugin descriptor string. .. versionchanged:: 0.25.2 Import from `pip_helpers` locally to avoid error `sci-bots/mpm#5`_. .. _sci-bots/mpm#5: https://github.com/sci-bots/mpm/issues/5 ''' from pip_helpers import CRE_PACKAGE match = CRE_PACKAGE.match(plugin_str) if not match: raise ValueError('Invalid plugin descriptor. Must be like "foo", ' '"foo==1.0", "foo>=1.0", etc.') return match.groupdict()
0.00177
async def refresh_token(self, refresh_token): """ :param refresh_token: an openid refresh-token from a previous token request """ async with self._client_session() as client: well_known = await self._get_well_known(client) try: return await self._post( client, well_known['token_endpoint'], data={ 'grant_type': GRANT_TYPE_REFRESH_TOKEN, 'refresh_token': refresh_token, } ) except aiohttp.ClientResponseError as e: raise ConfigException('oidc: failed to refresh access token')
0.004161
def parse_requirement(self, node): """ Parses <Requirement> @param node: Node containing the <Requirement> element @type node: xml.etree.Element """ if 'name' in node.lattrib: name = node.lattrib['name'] else: self.raise_error('<Requirement> must specify a name') if 'dimension' in node.lattrib: dimension = node.lattrib['dimension'] else: self.raise_error("Requirement \{0}' must specify a dimension.", name) self.current_component_type.add_requirement(Requirement(name, dimension))
0.00813
def rl_set_prompt(prompt: str) -> None: # pragma: no cover """ Sets readline's prompt :param prompt: the new prompt value """ safe_prompt = rl_make_safe_prompt(prompt) if rl_type == RlType.GNU: encoded_prompt = bytes(safe_prompt, encoding='utf-8') readline_lib.rl_set_prompt(encoded_prompt) elif rl_type == RlType.PYREADLINE: readline.rl._set_prompt(safe_prompt)
0.002398
def get_default_locale_callable(): """ Wrapper function so that the default mapping is only built when needed """ exec_dir = os.path.dirname(os.path.realpath(__file__)) xml_path = os.path.join(exec_dir, 'data', 'FacebookLocales.xml') fb_locales = _build_locale_table(xml_path) def default_locale(request): """ Guess an appropiate FB locale based on the active Django locale. If the active locale is available, it is returned. Otherwise, it tries to return another locale with the same language. If there isn't one avaible, 'en_US' is returned. """ chosen = 'en_US' language = get_language() if language: locale = to_locale(language) lang, _, reg = locale.partition('_') lang_map = fb_locales.get(lang) if lang_map is not None: if reg in lang_map['regs']: chosen = lang + '_' + reg else: chosen = lang + '_' + lang_map['default'] return chosen return default_locale
0.000909
def find_build_map(stack_builders): """ Find the BUILD_MAP instruction for which the last element of ``stack_builders`` is a store. """ assert isinstance(stack_builders[-1], instrs.STORE_MAP) to_consume = 0 for instr in reversed(stack_builders): if isinstance(instr, instrs.STORE_MAP): # NOTE: This branch should always be hit on the first iteration. to_consume += 1 elif isinstance(instr, instrs.BUILD_MAP): to_consume -= instr.arg if to_consume <= 0: return instr else: raise DecompilationError( "Couldn't find BUILD_MAP for last element of %s." % stack_builders )
0.001414
def deliver_hook(instance, target, payload_override=None): """ Deliver the payload to the target URL. By default it serializes to JSON and POSTs. """ payload = payload_override or serialize_hook(instance) if hasattr(settings, 'HOOK_DELIVERER'): deliverer = get_module(settings.HOOK_DELIVERER) deliverer(target, payload, instance=instance) else: client.post( url=target, data=json.dumps(payload, cls=serializers.json.DjangoJSONEncoder), headers={'Content-Type': 'application/json'} ) return None
0.001678
def __calculate_current_value(self, asset_class: AssetClass): """ Calculate totals for asset class by adding all the children values """ # Is this the final asset class, the one with stocks? if asset_class.stocks: # add all the stocks stocks_sum = Decimal(0) for stock in asset_class.stocks: # recalculate into base currency! stocks_sum += stock.value_in_base_currency asset_class.curr_value = stocks_sum if asset_class.classes: # load totals for child classes for child in asset_class.classes: self.__calculate_current_value(child) asset_class.curr_value += child.curr_value
0.004027
def result(retn): ''' Return a value or raise an exception from a retn tuple. ''' ok, valu = retn if ok: return valu name, info = valu ctor = getattr(s_exc, name, None) if ctor is not None: raise ctor(**info) info['errx'] = name raise s_exc.SynErr(**info)
0.003175
def _read_attr(attr_name): """ Parse attribute from file 'pefile.py' and avoid importing this module directly. __version__, __author__, __contact__, """ regex = attr_name + r"\s+=\s+'(.+)'" if sys.version_info.major == 2: with open('pefile.py', 'r') as f: match = re.search(regex, f.read()) else: with open('pefile.py', 'r', encoding='utf-8') as f: match = re.search(regex, f.read()) # Second item in the group is the value of attribute. return match.group(1)
0.001852
def getRect(self): """ Returns the window bounds as a tuple of (x,y,w,h) """ return (self.x, self.y, self.w, self.h)
0.048387
def parse_options(arguments): """Parse command line arguments. The parsing logic is fairly simple. It can only parse long-style parameters of the form:: --key value Several parameters can be defined in the environment and will be used unless explicitly overridden with command-line arguments. The access key, secret and endpoint values will be loaded from C{AWS_ACCESS_KEY_ID}, C{AWS_SECRET_ACCESS_KEY} and C{AWS_ENDPOINT} environment variables. @param arguments: A list of command-line arguments. The first item is expected to be the name of the program being run. @raises OptionError: Raised if incorrectly formed command-line arguments are specified, or if required command-line arguments are not present. @raises UsageError: Raised if C{--help} is present in command-line arguments. @return: A C{dict} with key/value pairs extracted from the argument list. """ arguments = arguments[1:] options = {} while arguments: key = arguments.pop(0) if key in ("-h", "--help"): raise UsageError("Help requested.") if key.startswith("--"): key = key[2:] try: value = arguments.pop(0) except IndexError: raise OptionError("'--%s' is missing a value." % key) options[key] = value else: raise OptionError("Encountered unexpected value '%s'." % key) default_key = os.environ.get("AWS_ACCESS_KEY_ID") if "key" not in options and default_key: options["key"] = default_key default_secret = os.environ.get("AWS_SECRET_ACCESS_KEY") if "secret" not in options and default_secret: options["secret"] = default_secret default_endpoint = os.environ.get("AWS_ENDPOINT") if "endpoint" not in options and default_endpoint: options["endpoint"] = default_endpoint for name in ("key", "secret", "endpoint", "action"): if name not in options: raise OptionError( "The '--%s' command-line argument is required." % name) return options
0.000469
def _load(self, file_parser, section_name): """The current element is loaded from the configuration file, all constraints and requirements are checked. """ # pylint: disable-msg=W0621 log = logging.getLogger('argtoolbox') try: log.debug("looking for field (section=" + section_name + ") : " + self._name) data = None try: if self.e_type == int: data = file_parser.getint(section_name, self._name) elif self.e_type == float: data = file_parser.getfloat(section_name, self._name) elif self.e_type == bool: data = file_parser.getboolean(section_name, self._name) elif self.e_type == list: data = file_parser.get(section_name, self._name) data = data.strip() data = data.decode(locale.getpreferredencoding()) data = data.split() if not data: msg = "The optional field '%(name)s' was present, \ type is list, but the current value is an empty \ list." % {"name": self._name} log.error(msg) raise ValueError(msg) elif self.e_type == str: data = file_parser.get(section_name, self._name) # happens only when the current field is present, # type is string, but value is '' if not data: msg = "The optional field '%(name)s' was present, \ type is string, but the current value is an empty \ string." % {"name": self._name} log.error(msg) raise ValueError(msg) data = data.decode(locale.getpreferredencoding()) else: msg = "Data type not supported : %(type)s " % { "type": self.e_type} log.error(msg) raise TypeError(msg) except ValueError as ex: msg = "The current field '%(name)s' was present, but the \ required type is : %(e_type)s." % { "name": self._name, "e_type": self.e_type } log.error(msg) log.error(str(ex)) raise ValueError(str(ex)) log_data = {"name": self._name, "data": data, "e_type": self.e_type} if self.hidden: log_data['data'] = "xxxxxxxx" log.debug("field found : '%(name)s', value : '%(data)s', \ type : '%(e_type)s'", log_data) self.value = data except ConfigParser.NoOptionError: if self.conf_required: msg = "The required field '%(name)s' was missing from the \ config file." % {"name": self._name} log.error(msg) raise ValueError(msg) if self.default is not None: self.value = self.default log_data = {"name": self._name, "data": self.default, "e_type": self.e_type} if self.hidden: log_data['data'] = "xxxxxxxx" log.debug("Field not found : '%(name)s', default value : \ '%(data)s', type : '%(e_type)s'", log_data) else: log.debug("Field not found : '" + self._name + "'")
0.001403
def load(name): """ Loads the font specified by name and returns it as an instance of `PIL.ImageFont <http://pillow.readthedocs.io/en/latest/reference/ImageFont.html>`_ class. """ try: font_dir = os.path.dirname(__file__) pil_file = os.path.join(font_dir, '{}.pil'.format(name)) pbm_file = os.path.join(font_dir, '{}.pbm'.format(name)) return ImageFont.load(pil_file) except FileNotFoundError: raise Exception('Failed to load font "{}". '.format(name) + 'Check ev3dev.fonts.available() for the list of available fonts')
0.005042
def get_redis_info(): """Check Redis connection.""" from kombu.utils.url import _parse_url as parse_redis_url from redis import ( StrictRedis, ConnectionError as RedisConnectionError, ResponseError as RedisResponseError, ) for conf_name in ('REDIS_URL', 'BROKER_URL', 'CELERY_BROKER_URL'): if hasattr(settings, conf_name): url = getattr(settings, conf_name) if url.startswith('redis://'): break else: log.error("No redis connection info found in settings.") return {"status": NO_CONFIG} _, host, port, _, password, database, _ = parse_redis_url(url) start = datetime.now() try: rdb = StrictRedis( host=host, port=port, db=database, password=password, socket_timeout=TIMEOUT_SECONDS, ) info = rdb.info() except (RedisConnectionError, TypeError) as ex: log.error("Error making Redis connection: %s", ex.args) return {"status": DOWN} except RedisResponseError as ex: log.error("Bad Redis response: %s", ex.args) return {"status": DOWN, "message": "auth error"} micro = (datetime.now() - start).microseconds del rdb # the redis package does not support Redis's QUIT. ret = { "status": UP, "response_microseconds": micro, } fields = ("uptime_in_seconds", "used_memory", "used_memory_peak") ret.update({x: info[x] for x in fields}) return ret
0.000673
def _make_request(self, method, path, data=None, **kwargs): """Make a request. Use the `requests` module to actually perform the request. Args: `method`: The method to use. `path`: The path to the resource. `data`: Any data to send (for POST and PUT requests). `kwargs`: Other parameters for `requests`. Returns: The content of the response. Raises: An exception depending on the HTTP status code of the response. """ _logger.debug("Method for request is %s" % method) url = self._construct_full_url(path) _logger.debug("URL for request is %s" % url) self._auth_info.populate_request_data(kwargs) _logger.debug("The arguments are %s" % kwargs) # Add custom headers for the request if self._auth_info._headers: kwargs.setdefault('headers', {}).update(self._auth_info._headers) res = requests.request(method, url, data=data, **kwargs) if res.ok: _logger.debug("Request was successful.") return res.content.decode('utf-8') if hasattr(res, 'content'): _logger.debug("Response was %s:%s", res.status_code, res.content) raise self._exception_for(res.status_code)( res.content, http_code=res.status_code ) else: msg = "No response from URL: %s" % res.request.url _logger.error(msg) raise NoResponseError(msg)
0.0013
def _socketpair_compat(): """TCP/IP socketpair including Windows support""" listensock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_IP) listensock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) listensock.bind(("127.0.0.1", 0)) listensock.listen(1) iface, port = listensock.getsockname() sock1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_IP) sock1.setblocking(0) try: sock1.connect(("127.0.0.1", port)) except socket.error as err: if err.errno != errno.EINPROGRESS and err.errno != errno.EWOULDBLOCK and err.errno != EAGAIN: raise sock2, address = listensock.accept() sock2.setblocking(0) listensock.close() return (sock1, sock2)
0.005222
def result(self, result): """Sets the result of this ResponseStatus. :param result: The result of this ResponseStatus. # noqa: E501 :type: str """ if result is None: raise ValueError("Invalid value for `result`, must not be `None`") # noqa: E501 allowed_values = ["OK", "ERROR"] # noqa: E501 if result not in allowed_values: raise ValueError( "Invalid value for `result` ({0}), must be one of {1}" # noqa: E501 .format(result, allowed_values) ) self._result = result
0.003306
def proximal_l1_l2(space, lam=1, g=None): r"""Proximal operator factory of the group-L1-L2 norm/distance. Implements the proximal operator of the functional :: F(x) = lam || |x - g|_2 ||_1 with ``x`` and ``g`` elements in ``space``, and scaling factor ``lam``. Here, ``|.|_2`` is the pointwise Euclidean norm of a vector-valued function. Parameters ---------- space : `LinearSpace` or `ProductSpace` Domain of the functional. lam : positive float, optional Scaling factor or regularization parameter. g : ``space`` element, optional Element to which the L1-L2 distance is taken. Default: ``space.zero``. Returns ------- prox_factory : function Factory for the proximal operator to be initialized Notes ----- For the functional .. math:: F(x) = \lambda \| |x - g|_2 \|_1, and a step size :math:`\sigma`, the proximal operator of :math:`\sigma F` is given as the "soft-shrinkage" operator .. math:: \mathrm{prox}_{\sigma F}(x) = \begin{cases} g, & \text{where } |x - g|_2 \leq \sigma\lambda, \\ x - \sigma\lambda \frac{x - g}{|x - g|_2}, & \text{elsewhere.} \end{cases} Here, all operations are to be read pointwise. See Also -------- proximal_l1 : Scalar or non-isotropic vectorial variant """ lam = float(lam) if g is not None and g not in space: raise TypeError('{!r} is not an element of {!r}'.format(g, space)) class ProximalL1L2(Operator): """Proximal operator of the group-L1-L2 norm/distance.""" def __init__(self, sigma): """Initialize a new instance. Parameters ---------- sigma : positive float Step size parameter. """ super(ProximalL1L2, self).__init__( domain=space, range=space, linear=False) self.sigma = float(sigma) def _call(self, x, out): """Return ``self(x, out=out)``.""" # diff = x - g if g is not None: diff = x - g else: if x is out: # Handle aliased `x` and `out` (original `x` needed later) diff = x.copy() else: diff = x # We write the operator as # x - (x - g) / max(|x - g|_2 / sig*lam, 1) pwnorm = PointwiseNorm(self.domain, exponent=2) denom = pwnorm(diff) denom /= self.sigma * lam denom.ufuncs.maximum(1, out=denom) # out = (x - g) / denom for out_i, diff_i in zip(out, diff): diff_i.divide(denom, out=out_i) # out = x - ... out.lincomb(1, x, -1, out) return ProximalL1L2
0.000347
def graph_to_gluon(self, graph, ctx): """Construct SymbolBlock from onnx graph. Parameters ---------- graph : onnx protobuf object The loaded onnx graph ctx : Context or list of Context Loads the model into one or many context(s). Returns ------- sym_block :gluon.nn.SymbolBlock The returned gluon SymbolBlock """ sym, arg_params, aux_params = self.from_onnx(graph) metadata = self.get_graph_metadata(graph) data_names = [input_tensor[0] for input_tensor in metadata['input_tensor_data']] data_inputs = [symbol.var(data_name) for data_name in data_names] from ....gluon import SymbolBlock net = SymbolBlock(outputs=sym, inputs=data_inputs) net_params = net.collect_params() for param in arg_params: if param in net_params: net_params[param].shape = arg_params[param].shape net_params[param]._load_init(arg_params[param], ctx=ctx) for param in aux_params: if param in net_params: net_params[param].shape = aux_params[param].shape net_params[param]._load_init(aux_params[param], ctx=ctx) return net
0.002353
def write_json_flag(flag, fobj, **kwargs): """Write a `DataQualityFlag` to a JSON file Parameters ---------- flag : `DataQualityFlag` data to write fobj : `str`, `file` target file (or filename) to write **kwargs other keyword arguments to pass to :func:`json.dump` See also -------- json.dump for details on acceptable keyword arguments """ # write to filename if isinstance(fobj, string_types): with open(fobj, 'w') as fobj2: return write_json_flag(flag, fobj2, **kwargs) # build json packet data = {} data['ifo'] = flag.ifo data['name'] = flag.tag data['version'] = flag.version data['active'] = flag.active data['known'] = flag.known data['metadata'] = {} data['metadata']['active_indicates_ifo_badness'] = not flag.isgood data['metadata']['flag_description'] = flag.description # write json.dump(data, fobj, **kwargs)
0.001025
def line_to_offset(self, line, column): """ Converts 1-based line number and 0-based column to 0-based character offset into text. """ line -= 1 if line >= len(self._line_offsets): return self._text_len elif line < 0: return 0 else: return min(self._line_offsets[line] + max(0, column), self._text_len)
0.014368
def get_vhost(self, vname): """ Returns the attributes of a single named vhost in a dict. :param string vname: Name of the vhost to get. :returns dict vhost: Attribute dict for the named vhost """ vname = quote(vname, '') path = Client.urls['vhosts_by_name'] % vname vhost = self._call(path, 'GET', headers=Client.json_headers) return vhost
0.004819
def create(self, ignore_warnings=None): """Create this AppProfile. .. note:: Uses the ``instance`` and ``app_profile_id`` on the current :class:`AppProfile` in addition to the ``routing_policy_type``, ``description``, ``cluster_id`` and ``allow_transactional_writes``. To change them before creating, reset the values via .. code:: python app_profile.app_profile_id = 'i-changed-my-mind' app_profile.routing_policy_type = ( google.cloud.bigtable.enums.RoutingPolicyType.SINGLE ) app_profile.description = 'new-description' app-profile.cluster_id = 'other-cluster-id' app-profile.allow_transactional_writes = True before calling :meth:`create`. :type: ignore_warnings: bool :param: ignore_warnings: (Optional) If true, ignore safety checks when creating the AppProfile. """ return self.from_pb( self.instance_admin_client.create_app_profile( parent=self._instance.name, app_profile_id=self.app_profile_id, app_profile=self._to_pb(), ignore_warnings=ignore_warnings, ), self._instance, )
0.001462
def post_calendar_events(self, calendar_id, body, params=None): """ `<>`_ :arg calendar_id: The ID of the calendar to modify :arg body: A list of events """ for param in (calendar_id, body): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") return self.transport.perform_request( "POST", _make_path("_ml", "calendars", calendar_id, "events"), params=params, body=body, )
0.00363
def read_csv_lightcurve(lcfile): ''' This reads in a K2 lightcurve in CSV format. Transparently reads gzipped files. Parameters ---------- lcfile : str The light curve file to read. Returns ------- dict Returns an lcdict. ''' # read in the file first if '.gz' in os.path.basename(lcfile): LOGINFO('reading gzipped K2 LC: %s' % lcfile) infd = gzip.open(lcfile,'rb') else: LOGINFO('reading K2 LC: %s' % lcfile) infd = open(lcfile,'rb') lctext = infd.read().decode() infd.close() # figure out the header and get the LC columns lcstart = lctext.index('# LIGHTCURVE\n') lcheader = lctext[:lcstart+12] lccolumns = lctext[lcstart+13:].split('\n') lccolumns = [x.split(',') for x in lccolumns if len(x) > 0] # initialize the lcdict and parse the CSV header lcdict = _parse_csv_header(lcheader) # tranpose the LC rows into columns lccolumns = list(zip(*lccolumns)) # write the columns to the dict for colind, col in enumerate(lcdict['columns']): # this picks out the caster to use when reading each column using the # definitions in the lcutils.COLUMNDEFS dictionary lcdict[col.lower()] = np.array([COLUMNDEFS[col][2](x) for x in lccolumns[colind]]) lcdict['columns'] = [x.lower() for x in lcdict['columns']] return lcdict
0.002068
def substring_search(query, list_of_strings, limit_results=DEFAULT_LIMIT): """ main function to call for searching """ matching = [] query_words = query.split(' ') # sort by longest word (higest probability of not finding a match) query_words.sort(key=len, reverse=True) counter = 0 for s in list_of_strings: target_words = s.split(' ') # the anyword searching function is separate if(anyword_substring_search(target_words, query_words)): matching.append(s) # limit results counter += 1 if(counter == limit_results): break return matching
0.00149
def delete(self, timeout=-1, custom_headers=None, force=False): """Deletes current resource. Args: timeout: Timeout in seconds. custom_headers: Allows to set custom http headers. force: Flag to force the operation. """ uri = self.data['uri'] logger.debug("Delete resource (uri = %s)" % (str(uri))) return self._helper.delete(uri, timeout=timeout, custom_headers=custom_headers, force=force)
0.003906
def analyze(self): """Run analysis.""" bench = self.kernel.build_executable(verbose=self.verbose > 1, openmp=self._args.cores > 1) element_size = self.kernel.datatypes_size[self.kernel.datatype] # Build arguments to pass to command: args = [str(s) for s in list(self.kernel.constants.values())] # Determine base runtime with 10 iterations runtime = 0.0 time_per_repetition = 2.0 / 10.0 repetitions = self.iterations // 10 mem_results = {} # TODO if cores > 1, results are for openmp run. Things might need to be changed here! while runtime < 1.5: # Interpolate to a 2.0s run if time_per_repetition != 0.0: repetitions = 2.0 // time_per_repetition else: repetitions = int(repetitions * 10) mem_results = self.perfctr([bench] + [str(repetitions)] + args, group="MEM") runtime = mem_results['Runtime (RDTSC) [s]'] time_per_repetition = runtime / float(repetitions) raw_results = [mem_results] # Base metrics for further metric computations: # An iteration is equal to one high-level code inner-most-loop iteration iterations_per_repetition = reduce( operator.mul, [self.kernel.subs_consts(max_ - min_) / self.kernel.subs_consts(step) for idx, min_, max_, step in self.kernel._loop_stack], 1) iterations_per_cacheline = (float(self.machine['cacheline size']) / self.kernel.bytes_per_iteration) cys_per_repetition = time_per_repetition * float(self.machine['clock']) # Gather remaining counters if not self.no_phenoecm: # Build events and sympy expressions for all model metrics T_OL, event_counters = self.machine.parse_perfmetric( self.machine['overlapping model']['performance counter metric']) T_data, event_dict = self.machine.parse_perfmetric( self.machine['non-overlapping model']['performance counter metric']) event_counters.update(event_dict) cache_metrics = defaultdict(dict) for i in range(len(self.machine['memory hierarchy']) - 1): cache_info = self.machine['memory hierarchy'][i] name = cache_info['level'] for k, v in cache_info['performance counter metrics'].items(): cache_metrics[name][k], event_dict = self.machine.parse_perfmetric(v) event_counters.update(event_dict) # Compile minimal runs to gather all required events minimal_runs = build_minimal_runs(list(event_counters.values())) measured_ctrs = {} for run in minimal_runs: ctrs = ','.join([eventstr(e) for e in run]) r = self.perfctr([bench] + [str(repetitions)] + args, group=ctrs) raw_results.append(r) measured_ctrs.update(r) # Match measured counters to symbols event_counter_results = {} for sym, ctr in event_counters.items(): event, regs, parameter = ctr[0], register_options(ctr[1]), ctr[2] for r in regs: if r in measured_ctrs[event]: event_counter_results[sym] = measured_ctrs[event][r] # Analytical metrics needed for futher calculation cl_size = float(self.machine['cacheline size']) total_iterations = iterations_per_repetition * repetitions total_cachelines = total_iterations / iterations_per_cacheline T_OL_result = T_OL.subs(event_counter_results) / total_cachelines cache_metric_results = defaultdict(dict) for cache, mtrcs in cache_metrics.items(): for m, e in mtrcs.items(): cache_metric_results[cache][m] = e.subs(event_counter_results) # Inter-cache transfers per CL cache_transfers_per_cl = {cache: {k: PrefixedUnit(v / total_cachelines, 'CL/CL') for k, v in d.items()} for cache, d in cache_metric_results.items()} cache_transfers_per_cl['L1']['accesses'].unit = 'LOAD/CL' # Select appropriate bandwidth mem_bw, mem_bw_kernel = self.machine.get_bandwidth( -1, # mem cache_metric_results['L3']['misses'], # load_streams cache_metric_results['L3']['evicts'], # store_streams 1) data_transfers = { # Assuming 0.5 cy / LOAD (SSE on SNB or IVB; AVX on HSW, BDW, SKL or SKX) 'T_nOL': (cache_metric_results['L1']['accesses'] / total_cachelines * 0.5), 'T_L1L2': ((cache_metric_results['L1']['misses'] + cache_metric_results['L1']['evicts']) / total_cachelines * cl_size / self.machine['memory hierarchy'][1]['non-overlap upstream throughput'][0]), 'T_L2L3': ((cache_metric_results['L2']['misses'] + cache_metric_results['L2']['evicts']) / total_cachelines * cl_size / self.machine['memory hierarchy'][2]['non-overlap upstream throughput'][0]), 'T_L3MEM': ((cache_metric_results['L3']['misses'] + cache_metric_results['L3']['evicts']) * float(self.machine['cacheline size']) / total_cachelines / mem_bw * float(self.machine['clock'])) } # Build phenomenological ECM model: ecm_model = {'T_OL': T_OL_result} ecm_model.update(data_transfers) else: event_counters = {} ecm_model = None cache_transfers_per_cl = None self.results = {'raw output': raw_results, 'ECM': ecm_model, 'data transfers': cache_transfers_per_cl, 'Runtime (per repetition) [s]': time_per_repetition, 'event counters': event_counters, 'Iterations per repetition': iterations_per_repetition, 'Iterations per cacheline': iterations_per_cacheline} # TODO make more generic to support other (and multiple) constant names self.results['Runtime (per cacheline update) [cy/CL]'] = \ (cys_per_repetition / iterations_per_repetition) * iterations_per_cacheline self.results['MEM volume (per repetition) [B]'] = \ mem_results['Memory data volume [GBytes]'] * 1e9 / repetitions self.results['Performance [MFLOP/s]'] = \ sum(self.kernel._flops.values()) / ( time_per_repetition / iterations_per_repetition) / 1e6 if 'Memory bandwidth [MBytes/s]' in mem_results: self.results['MEM BW [MByte/s]'] = mem_results['Memory bandwidth [MBytes/s]'] else: self.results['MEM BW [MByte/s]'] = mem_results['Memory BW [MBytes/s]'] self.results['Performance [MLUP/s]'] = ( iterations_per_repetition / time_per_repetition) / 1e6 self.results['Performance [MIt/s]'] = ( iterations_per_repetition / time_per_repetition) / 1e6
0.003165
def _MergeTaskStorage(self, storage_writer): """Merges a task storage with the session storage. This function checks all task stores that are ready to merge and updates the scheduled tasks. Note that to prevent this function holding up the task scheduling loop only the first available task storage is merged. Args: storage_writer (StorageWriter): storage writer for a session storage used to merge task storage. """ if self._processing_profiler: self._processing_profiler.StartTiming('merge_check') for task_identifier in storage_writer.GetProcessedTaskIdentifiers(): try: task = self._task_manager.GetProcessedTaskByIdentifier(task_identifier) self._task_manager.SampleTaskStatus(task, 'processed') to_merge = self._task_manager.CheckTaskToMerge(task) if not to_merge: storage_writer.RemoveProcessedTaskStorage(task) self._task_manager.RemoveTask(task) self._task_manager.SampleTaskStatus(task, 'removed_processed') else: storage_writer.PrepareMergeTaskStorage(task) self._task_manager.UpdateTaskAsPendingMerge(task) except KeyError: logger.error( 'Unable to retrieve task: {0:s} to prepare it to be merged.'.format( task_identifier)) continue if self._processing_profiler: self._processing_profiler.StopTiming('merge_check') task = None if not self._storage_merge_reader_on_hold: task = self._task_manager.GetTaskPendingMerge(self._merge_task) # Limit the number of attribute containers from a single task-based # storage file that are merged per loop to keep tasks flowing. if task or self._storage_merge_reader: self._status = definitions.STATUS_INDICATOR_MERGING if self._processing_profiler: self._processing_profiler.StartTiming('merge') if task: if self._storage_merge_reader: self._merge_task_on_hold = self._merge_task self._storage_merge_reader_on_hold = self._storage_merge_reader self._task_manager.SampleTaskStatus( self._merge_task_on_hold, 'merge_on_hold') self._merge_task = task try: self._storage_merge_reader = storage_writer.StartMergeTaskStorage( task) self._task_manager.SampleTaskStatus(task, 'merge_started') except IOError as exception: logger.error(( 'Unable to merge results of task: {0:s} ' 'with error: {1!s}').format(task.identifier, exception)) self._storage_merge_reader = None if self._storage_merge_reader: fully_merged = self._storage_merge_reader.MergeAttributeContainers( maximum_number_of_containers=self._MAXIMUM_NUMBER_OF_CONTAINERS) else: # TODO: Do something more sensible when this happens, perhaps # retrying the task once that is implemented. For now, we mark the task # as fully merged because we can't continue with it. fully_merged = True if self._processing_profiler: self._processing_profiler.StopTiming('merge') if fully_merged: try: self._task_manager.CompleteTask(self._merge_task) except KeyError as exception: logger.error( 'Unable to complete task: {0:s} with error: {1!s}'.format( self._merge_task.identifier, exception)) if not self._storage_merge_reader_on_hold: self._merge_task = None self._storage_merge_reader = None else: self._merge_task = self._merge_task_on_hold self._storage_merge_reader = self._storage_merge_reader_on_hold self._merge_task_on_hold = None self._storage_merge_reader_on_hold = None self._task_manager.SampleTaskStatus( self._merge_task, 'merge_resumed') self._status = definitions.STATUS_INDICATOR_RUNNING self._number_of_produced_events = storage_writer.number_of_events self._number_of_produced_sources = storage_writer.number_of_event_sources self._number_of_produced_warnings = storage_writer.number_of_warnings
0.009233
def address_checksum_and_decode(addr: str) -> Address: """ Accepts a string address and turns it into binary. Makes sure that the string address provided starts is 0x prefixed and checksummed according to EIP55 specification """ if not is_0x_prefixed(addr): raise InvalidAddress('Address must be 0x prefixed') if not is_checksum_address(addr): raise InvalidAddress('Address must be EIP55 checksummed') addr_bytes = decode_hex(addr) assert len(addr_bytes) in (20, 0) return Address(addr_bytes)
0.001802
def organize_models(self, outdir, force_rerun=False): """Organize and rename SWISS-MODEL models to a single folder with a name containing template information. Args: outdir (str): New directory to copy renamed models to force_rerun (bool): If models should be copied again even if they already exist Returns: dict: Dictionary of lists, UniProt IDs as the keys and new file paths as the values """ uniprot_to_swissmodel = defaultdict(list) for u, models in self.all_models.items(): for m in models: original_filename = '{}_{}_{}_{}'.format(m['from'], m['to'], m['template'], m['coordinate_id']) file_path = op.join(self.metadata_dir, u[:2], u[2:4], u[4:], 'swissmodel', '{}.pdb'.format(original_filename)) if op.exists(file_path): new_filename = '{}_{}_{}_{}.pdb'.format(u, m['from'], m['to'], m['template'][:4]) shutil.copy(file_path, op.join(outdir, new_filename)) uniprot_to_swissmodel[u].append(new_filename) else: log.warning('{}: no file {} found for model'.format(u, file_path)) return uniprot_to_swissmodel
0.005984
def _compute_bits(self): """ m._compute_bits() -- [utility] Set m.totbits to the number of bits and m.bits to a list of bits at each position """ bits = [] totbits = 0 bgbits = 0 bg = self.background UNCERT = lambda x: x*math.log(x)/math.log(2.0) for letter in ACGT: bgbits = bgbits + UNCERT(bg[letter]) for i in range(self.width): tot = 0 for letter in ACGT: Pij = pow(2.0, self.logP[i][letter]) tot = tot + UNCERT(Pij) #bit = Pij * self.ll[i][letter] #if bit > 0: # tot = tot + bit #print tot, bgbits, tot-bgbits bits.append(max(0,tot-bgbits)) totbits = totbits + max(0,tot-bgbits) self.bits = bits self.totalbits = totbits
0.01359