text
stringlengths
78
104k
score
float64
0
0.18
def on_trial_result(self, trial_runner, trial, result): """Callback for early stopping. This stopping rule stops a running trial if the trial's best objective value by step `t` is strictly worse than the median of the running averages of all completed trials' objectives reported up to step `t`. """ if trial in self._stopped_trials: assert not self._hard_stop return TrialScheduler.CONTINUE # fall back to FIFO time = result[self._time_attr] self._results[trial].append(result) median_result = self._get_median_result(time) best_result = self._best_result(trial) if self._verbose: logger.info("Trial {} best res={} vs median res={} at t={}".format( trial, best_result, median_result, time)) if best_result < median_result and time > self._grace_period: if self._verbose: logger.info("MedianStoppingRule: " "early stopping {}".format(trial)) self._stopped_trials.add(trial) if self._hard_stop: return TrialScheduler.STOP else: return TrialScheduler.PAUSE else: return TrialScheduler.CONTINUE
0.001554
def bin2real(binary_string, conv, endianness="@"): """ Converts a binary string representing a number to its Fixed arithmetic representation @param binary_string: binary number in simulink representation @param conv: conv structure containing conversion specs @param endianness: optionally specify bytes endianness for unpacking @return: the real number represented @attention: The length of the binary string must match with the data type defined in the conv structure, otherwise proper erros will be thrown by B{struct} module. """ data = struct.unpack(endianness + conv["fmt"], binary_string)[0] return fix2real(data, conv)
0.007153
def log_message(self, format, *args): """Log an arbitrary message. This is used by all other logging functions. Override it if you have specific logging wishes. The first argument, FORMAT, is a format string for the message to be logged. If the format string contains any % escapes requiring parameters, they should be specified as subsequent arguments (it's just like printf!). The client ip and current date/time are prefixed to every message. """ sys.stderr.write("%s - - [%s] %s\n" % (self.address_string(), self.log_date_time_string(), format%args))
0.004087
def texture_from_image(renderer, image_name): """Create an SDL2 Texture from an image file""" soft_surface = ext.load_image(image_name) texture = SDL_CreateTextureFromSurface(renderer.renderer, soft_surface) SDL_FreeSurface(soft_surface) return texture
0.003676
def fetch(self, refspec=None, progress=None, **kwargs): """Fetch the latest changes for this remote :param refspec: A "refspec" is used by fetch and push to describe the mapping between remote ref and local ref. They are combined with a colon in the format <src>:<dst>, preceded by an optional plus sign, +. For example: git fetch $URL refs/heads/master:refs/heads/origin means "grab the master branch head from the $URL and store it as my origin branch head". And git push $URL refs/heads/master:refs/heads/to-upstream means "publish my master branch head as to-upstream branch at $URL". See also git-push(1). Taken from the git manual Fetch supports multiple refspecs (as the underlying git-fetch does) - supplying a list rather than a string for 'refspec' will make use of this facility. :param progress: See 'push' method :param kwargs: Additional arguments to be passed to git-fetch :return: IterableList(FetchInfo, ...) list of FetchInfo instances providing detailed information about the fetch results :note: As fetch does not provide progress information to non-ttys, we cannot make it available here unfortunately as in the 'push' method.""" if refspec is None: # No argument refspec, then ensure the repo's config has a fetch refspec. self._assert_refspec() kwargs = add_progress(kwargs, self.repo.git, progress) if isinstance(refspec, list): args = refspec else: args = [refspec] proc = self.repo.git.fetch(self, *args, as_process=True, with_stdout=False, universal_newlines=True, v=True, **kwargs) res = self._get_fetch_info_from_stderr(proc, progress) if hasattr(self.repo.odb, 'update_cache'): self.repo.odb.update_cache() return res
0.004876
def check_w_normalized(W, N_k, tolerance = 1.0e-4): """Check the weight matrix W is properly normalized. The sum over N should be 1, and the sum over k by N_k should aslo be 1 Parameters ---------- W : np.ndarray, shape=(N, K), dtype='float' The normalized weight matrix for snapshots and states. W[n, k] is the weight of snapshot n in state k. N_k : np.ndarray, shape=(K), dtype='int' N_k[k] is the number of samples from state k. tolerance : float, optional, default=1.0e-4 Tolerance for checking equality of sums Returns ------- None : NoneType Returns a None object if test passes, otherwise raises a ParameterError with appropriate message if W is not normalized within tolerance. """ [N, K] = W.shape column_sums = np.sum(W, axis=0) badcolumns = (np.abs(column_sums - 1) > tolerance) if np.any(badcolumns): which_badcolumns = np.arange(K)[badcolumns] firstbad = which_badcolumns[0] raise ParameterError( 'Warning: Should have \sum_n W_nk = 1. Actual column sum for state %d was %f. %d other columns have similar problems' % (firstbad, column_sums[firstbad], np.sum(badcolumns))) row_sums = np.sum(W * N_k, axis=1) badrows = (np.abs(row_sums - 1) > tolerance) if np.any(badrows): which_badrows = np.arange(N)[badrows] firstbad = which_badrows[0] raise ParameterError( 'Warning: Should have \sum_k N_k W_nk = 1. Actual row sum for sample %d was %f. %d other rows have similar problems' % (firstbad, row_sums[firstbad], np.sum(badrows))) return
0.005412
def update_flag_record(self, state: str, feature_key: str) -> None: """Update redis record with new state. :param state: state for feature flag. :param feature_key: key for feature flag. """ key_name = self._format_key_name() try: parsed_flag = json.loads(self.get_flag_record(feature_key).decode('utf-8')) parsed_flag['on'] = state parsed_flag['version'] += 1 updated_flag = json.dumps(parsed_flag).encode('utf-8') except KeyError as ex: LOG.error(ex) sys.exit(1) LOG.info('updating %s to %s', feature_key, state) self.redis.hset(key_name, feature_key, updated_flag)
0.004202
def projection_pp(site, normal, dist_to_plane, reference): ''' This method finds the projection of the site onto the plane containing the slipped area, defined as the Pp(i.e. 'perpendicular projection of site location onto the fault plane' Spudich et al. (2013) - page 88) given a site. :param site: Location of the site, [lon, lat, dep] :param normal: Normal to the plane including the fault patch, describe by a normal vector[a, b, c] :param dist_to_plane: D in the plane equation, ax + by + cz = d :param reference: :class:`~openquake.hazardlib.geo.point.Point` object representing the location of project reference point :returns: pp, the projection point, [ppx, ppy, ppz], in xyz domain , a numpy array. ''' # Transform to xyz coordinate [site_x, site_y, site_z] = get_xyz_from_ll(site, reference) a = np.array([(1, 0, 0, -normal[0]), (0, 1, 0, -normal[1]), (0, 0, 1, -normal[2]), (normal[0], normal[1], normal[2], 0)]) b = np.array([site_x, site_y, site_z, dist_to_plane]) x = np.linalg.solve(a, b) pp = np.array([x[0], x[1], x[2]]) return pp
0.000805
def euclidean3d(v1, v2): """Faster implementation of euclidean distance for the 3D case.""" if not len(v1) == 3 and len(v2) == 3: print("Vectors are not in 3D space. Returning None.") return None return np.sqrt((v1[0] - v2[0]) ** 2 + (v1[1] - v2[1]) ** 2 + (v1[2] - v2[2]) ** 2)
0.006536
def sort_by_successors(l, succsOf): """ Sorts a list, such that if l[b] in succsOf(l[a]) then a < b """ rlut = dict() nret = 0 todo = list() for i in l: rlut[i] = set() for i in l: for j in succsOf(i): rlut[j].add(i) for i in l: if len(rlut[i]) == 0: todo.append(i) while len(todo) > 0: i = todo.pop() nret += 1 yield i for j in succsOf(i): rlut[j].remove(i) if len(rlut[j]) == 0: todo.append(j) if nret != len(l): raise ValueError("Cycle detected")
0.003263
def readCol(self, col, startRow=0, endRow=-1): ''' read col ''' return self.__operation.readCol(col, startRow, endRow)
0.014706
def build_edit_form(title, id, cols, return_page): """ returns the html for a simple edit form """ txt = '<H3>' + title + '<H3>' txt += '<form action="' + return_page + '" method="POST">\n' # return_page = /agents txt += ' updating id:' + str(id) + '\n<BR>' txt += ' <input type="hidden" name="rec_id" readonly value="' + str(id) + '"> ' txt += ' <TABLE width=80% valign=top border=1>' for col_num, col in enumerate(cols): txt += ' <TR>\n' txt += ' <TD><div id="form_label">' + col + '</div></TD>\n' txt += ' <TD><div id="form_input"><input type="text" name="col_' + str(col_num) + '"></div></TD>\n' txt += ' </TR>\n' txt += ' <TR><TD></TD>\n' txt += ' <TD>\n' txt += ' <input type="submit" name="update-form" value="Save Changes">\n' txt += ' <input type="submit" name="delete-form" value="Delete">\n' txt += ' <input type="submit" name="add-form" value="Add">\n' txt += ' </TD></TR></TABLE>' txt += '</form>\n' return txt
0.008547
def set_circuit_breakers(mv_grid, mode='load', debug=False): """ Calculates the optimal position of a circuit breaker on all routes of mv_grid, adds and connects them to graph. Args ---- mv_grid: MVGridDing0 Description#TODO debug: bool, defaults to False If True, information is printed during process Notes ----- According to planning principles of MV grids, a MV ring is run as two strings (half-rings) separated by a circuit breaker which is open at normal operation [#]_, [#]_. Assuming a ring (route which is connected to the root node at either sides), the optimal position of a circuit breaker is defined as the position (virtual cable) between two nodes where the conveyed current is minimal on the route. Instead of the peak current, the peak load is used here (assuming a constant voltage). If a ring is dominated by loads (peak load > peak capacity of generators), only loads are used for determining the location of circuit breaker. If generators are prevailing (peak load < peak capacity of generators), only generator capacities are considered for relocation. The core of this function (calculation of the optimal circuit breaker position) is the same as in ding0.grid.mv_grid.models.Route.calc_circuit_breaker_position but here it is 1. applied to a different data type (NetworkX Graph) and it 2. adds circuit breakers to all rings. The re-location of circuit breakers is necessary because the original position (calculated during routing with method mentioned above) shifts during the connection of satellites and therefore it is no longer valid. References ---------- .. [#] X. Tao, "Automatisierte Grundsatzplanung von Mittelspannungsnetzen", Dissertation, 2006 .. [#] FGH e.V.: "Technischer Bericht 302: Ein Werkzeug zur Optimierung der Störungsbeseitigung für Planung und Betrieb von Mittelspannungsnetzen", Tech. rep., 2008 """ # get power factor for loads and generators cos_phi_load = cfg_ding0.get('assumptions', 'cos_phi_load') cos_phi_feedin = cfg_ding0.get('assumptions', 'cos_phi_gen') # iterate over all rings and circuit breakers for ring, circ_breaker in zip(mv_grid.rings_nodes(include_root_node=False), mv_grid.circuit_breakers()): nodes_peak_load = [] nodes_peak_generation = [] # iterate over all nodes of ring for node in ring: # node is LV station -> get peak load and peak generation if isinstance(node, LVStationDing0): nodes_peak_load.append(node.peak_load / cos_phi_load) nodes_peak_generation.append(node.peak_generation / cos_phi_feedin) # node is cable distributor -> get all connected nodes of subtree using graph_nodes_from_subtree() elif isinstance(node, CableDistributorDing0): nodes_subtree = mv_grid.graph_nodes_from_subtree(node) nodes_subtree_peak_load = 0 nodes_subtree_peak_generation = 0 for node_subtree in nodes_subtree: # node is LV station -> get peak load and peak generation if isinstance(node_subtree, LVStationDing0): nodes_subtree_peak_load += node_subtree.peak_load / \ cos_phi_load nodes_subtree_peak_generation += node_subtree.peak_generation / \ cos_phi_feedin # node is LV station -> get peak load and peak generation if isinstance(node_subtree, GeneratorDing0): nodes_subtree_peak_generation += node_subtree.capacity / \ cos_phi_feedin nodes_peak_load.append(nodes_subtree_peak_load) nodes_peak_generation.append(nodes_subtree_peak_generation) else: raise ValueError('Ring node has got invalid type.') if mode == 'load': node_peak_data = nodes_peak_load elif mode == 'loadgen': # is ring dominated by load or generation? # (check if there's more load than generation in ring or vice versa) if sum(nodes_peak_load) > sum(nodes_peak_generation): node_peak_data = nodes_peak_load else: node_peak_data = nodes_peak_generation else: raise ValueError('parameter \'mode\' is invalid!') # calc optimal circuit breaker position # set init value diff_min = 10e6 # check where difference of demand/generation in two half-rings is minimal for ctr in range(len(node_peak_data)): # split route and calc demand difference route_data_part1 = sum(node_peak_data[0:ctr]) route_data_part2 = sum(node_peak_data[ctr:len(node_peak_data)]) diff = abs(route_data_part1 - route_data_part2) # equality has to be respected, otherwise comparison stops when demand/generation=0 if diff <= diff_min: diff_min = diff position = ctr else: break # relocate circuit breaker node1 = ring[position-1] node2 = ring[position] circ_breaker.branch = mv_grid._graph.adj[node1][node2]['branch'] circ_breaker.branch_nodes = (node1, node2) circ_breaker.branch.circuit_breaker = circ_breaker circ_breaker.geo_data = calc_geo_centre_point(node1, node2) if debug: logger.debug('Ring: {}'.format(ring)) logger.debug('Circuit breaker {0} was relocated to edge {1}-{2} ' '(position on route={3})'.format( circ_breaker, node1, node2, position) ) logger.debug('Peak load sum: {}'.format(sum(nodes_peak_load))) logger.debug('Peak loads: {}'.format(nodes_peak_load))
0.0046
def cmd(self, fun, *args, **kwargs): ''' Call an execution module with the given arguments and keyword arguments .. code-block:: python caller.cmd('test.arg', 'Foo', 'Bar', baz='Baz') caller.cmd('event.send', 'myco/myevent/something', data={'foo': 'Foo'}, with_env=['GIT_COMMIT'], with_grains=True) ''' func = self.sminion.functions[fun] data = { 'arg': args, 'fun': fun } data.update(kwargs) executors = getattr(self.sminion, 'module_executors', []) or \ self.opts.get('module_executors', ['direct_call']) if isinstance(executors, six.string_types): executors = [executors] for name in executors: fname = '{0}.execute'.format(name) if fname not in self.sminion.executors: raise SaltInvocationError("Executor '{0}' is not available".format(name)) return_data = self.sminion.executors[fname](self.opts, data, func, args, kwargs) if return_data is not None: break return return_data
0.004344
def get_frames(root_path): """Get path to all the frame in view SAX and contain complete frames""" ret = [] for root, _, files in os.walk(root_path): root=root.replace('\\','/') files=[s for s in files if ".dcm" in s] if len(files) == 0 or not files[0].endswith(".dcm") or root.find("sax") == -1: continue prefix = files[0].rsplit('-', 1)[0] fileset = set(files) expected = ["%s-%04d.dcm" % (prefix, i + 1) for i in range(30)] if all(x in fileset for x in expected): ret.append([root + "/" + x for x in expected]) # sort for reproduciblity return sorted(ret, key = lambda x: x[0])
0.031532
def _shutdown(self): """Private method. Reset to non-piped spawn""" global sconf_global, _ac_config_hs if not self.active: raise SCons.Errors.UserError("Finish may be called only once!") if self.logstream is not None and not dryrun: self.logstream.write("\n") self.logstream.close() self.logstream = None # remove the SConfSourceBuilder from the environment blds = self.env['BUILDERS'] del blds['SConfSourceBuilder'] self.env.Replace( BUILDERS=blds ) self.active = 0 sconf_global = None if not self.config_h is None: _ac_config_hs[self.config_h] = self.config_h_text self.env.fs = self.lastEnvFs
0.006649
def get_field_descriptor(self, ref_or_index): """ Parameters ---------- ref_or_index: str or int field lowercase name, or field position Returns ------- Field descriptor (info contained in Idd) """ if isinstance(ref_or_index, int): index = ref_or_index else: index = self._table._dev_descriptor.get_field_index(ref_or_index) return self._table._dev_descriptor.get_field_descriptor(index)
0.003906
def read_string(self, lpBaseAddress, nChars, fUnicode = False): """ Reads an ASCII or Unicode string from the address space of the process. @see: L{peek_string} @type lpBaseAddress: int @param lpBaseAddress: Memory address to begin reading. @type nChars: int @param nChars: String length to read, in characters. Remember that Unicode strings have two byte characters. @type fUnicode: bool @param fUnicode: C{True} is the string is expected to be Unicode, C{False} if it's expected to be ANSI. @rtype: str, compat.unicode @return: String read from the process memory space. @raise WindowsError: On error an exception is raised. """ if fUnicode: nChars = nChars * 2 szString = self.read(lpBaseAddress, nChars) if fUnicode: szString = compat.unicode(szString, 'U16', 'ignore') return szString
0.004036
def annotate_diamond(records, diamond_path): ''' Retrieve scientific names and lineages for taxon IDs in Diamond output Returns taxonomically annotated SeqRecords with modified description attributes ''' contigs_metadata = {} with open(diamond_path) as diamond_tax_fh: for line in diamond_tax_fh: contig, taxid, evalue = line.strip().split('\t') contigs_metadata[contig] = dict(taxid=int(taxid), evalue=float(evalue)) ncbi = ete3.NCBITaxa() taxids = {m['taxid'] for m in contigs_metadata.values()} # set of taxids taxids_lineages = ncbi.get_lineage_translator(taxids) # dict of taxid lists taxids_with_children = {x for v in taxids_lineages.values() for x in v} # flatten to set taxids_names = ncbi.get_taxid_translator(taxids_with_children) taxids_ranks = ncbi.get_rank(taxids_with_children) for contig, md in contigs_metadata.items(): md['sciname'] = taxids_names.get(md['taxid']) md['rank'] = taxids_ranks.get(md['taxid'], '') md['lineage_fmt'] = (':'.join([taxids_names.get(t, '') for t in taxids_lineages.get(md['taxid'], [])]) if md['taxid'] else None) for r in records: md = contigs_metadata[r.id] r.description = f"{md['taxid']}|{md['rank']}|{md['sciname']}|{md['lineage_fmt']}" return records
0.006356
def array2d(X, dtype=None, order=None, copy=False, force_all_finite=True): """Returns at least 2-d array with data from X""" X_2d = np.asarray(np.atleast_2d(X), dtype=dtype, order=order) if force_all_finite: _assert_all_finite(X_2d) if X is X_2d and copy: X_2d = _safe_copy(X_2d) return X_2d
0.003058
def ucast_ip_mask(ip_addr_and_mask, return_tuple=True): """ Function to check if a address is unicast and that the CIDR mask is good Args: ip_addr_and_mask: Unicast IP address and mask in the following format 192.168.1.1/24 return_tuple: Set to True it returns a IP and mask in a tuple, set to False returns True or False Returns: see return_tuple for return options """ regex_ucast_ip_and_mask = __re.compile("^((22[0-3])|(2[0-1][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))/((3[0-2])|([1-2]?[0-9]))$") if return_tuple: while not regex_ucast_ip_and_mask.match(ip_addr_and_mask): print("Not a good unicast IP and CIDR mask combo.") print("Please try again.") ip_addr_and_mask = input("Please enter a unicast IP address and mask in the follwing format x.x.x.x/x: ") ip_cidr_split = ip_addr_and_mask.split("/") ip_addr = ip_cidr_split[0] cidr = ip_cidr_split[1] return ip_addr, cidr elif not return_tuple: if not regex_ucast_ip_and_mask.match(ip_addr_and_mask): return False else: return True
0.006102
def add_options(self): """ Add program options. """ super(RtorrentQueueManager, self).add_options() self.jobs = None self.httpd = None # basic options self.add_bool_option("-n", "--dry-run", help="advise jobs not to do any real work, just tell what would happen") self.add_bool_option("--no-fork", "--fg", help="Don't fork into background (stay in foreground and log to console)") self.add_bool_option("--stop", help="Stop running daemon") self.add_bool_option("--restart", help="Stop running daemon, then fork into background") self.add_bool_option("-?", "--status", help="Check daemon status") self.add_value_option("--pid-file", "PATH", help="file holding the process ID of the daemon, when running in background") self.add_value_option("--guard-file", "PATH", help="guard file for the process watchdog")
0.009494
def chunks(dictionary, chunk_size): """ Yield successive n-sized chunks from dictionary. """ iterable = iter(dictionary) for __ in range(0, len(dictionary), chunk_size): yield {key: dictionary[key] for key in islice(iterable, chunk_size)}
0.003759
def pad(cls, data): """ Pads data to match AES block size """ if sys.version_info > (3, 0): try: data = data.encode("utf-8") except AttributeError: pass length = AES.block_size - (len(data) % AES.block_size) data += bytes([length]) * length return data else: return data + (AES.block_size - len(data) % AES.block_size) * chr(AES.block_size - len(data) % AES.block_size)
0.005837
def region_est_hull(self, level=0.95, modelparam_slice=None): """ Estimates a credible region over models by taking the convex hull of a credible subset of particles. :param float level: The desired crediblity level (see :meth:`SMCUpdater.est_credible_region`). :param slice modelparam_slice: Slice over which model parameters to consider. :return: The tuple ``(faces, vertices)`` where ``faces`` describes all the vertices of all of the faces on the exterior of the convex hull, and ``vertices`` is a list of all vertices on the exterior of the convex hull. :rtype: ``faces`` is a ``numpy.ndarray`` with shape ``(n_face, n_mps, n_mps)`` and indeces ``(idx_face, idx_vertex, idx_mps)`` where ``n_mps`` corresponds to the size of ``modelparam_slice``. ``vertices`` is an ``numpy.ndarray`` of shape ``(n_vertices, n_mps)``. """ points = self.est_credible_region( level=level, modelparam_slice=modelparam_slice ) hull = ConvexHull(points) return points[hull.simplices], points[u.uniquify(hull.vertices.flatten())]
0.005691
def all(klass, client, **kwargs): """Returns a Cursor instance for a given resource.""" resource = klass.RESOURCE_COLLECTION request = Request(client, 'get', resource, params=kwargs) return Cursor(klass, request, init_with=[client])
0.007576
def pep517_subprocess_runner(cmd, cwd=None, extra_environ=None): # type: (List[AnyStr], Optional[AnyStr], Optional[Mapping[S, S]]) -> None """The default method of calling the wrapper subprocess.""" env = os.environ.copy() if extra_environ: env.update(extra_environ) run( cmd, cwd=cwd, env=env, block=True, combine_stderr=True, return_object=False, write_to_stdout=False, nospin=True, )
0.00207
def styleMapFamilyNameFallback(info): """ Fallback to *openTypeNamePreferredFamilyName* if *styleMapStyleName* or, if *styleMapStyleName* isn't defined, *openTypeNamePreferredSubfamilyName* is *regular*, *bold*, *italic* or *bold italic*, otherwise fallback to *openTypeNamePreferredFamilyName openTypeNamePreferredFamilyName*. """ familyName = getAttrWithFallback(info, "openTypeNamePreferredFamilyName") styleName = info.styleMapStyleName if not styleName: styleName = getAttrWithFallback(info, "openTypeNamePreferredSubfamilyName") if styleName is None: styleName = "" elif styleName.lower() in _styleMapStyleNames: styleName = "" return (familyName + " " + styleName).strip()
0.003974
def sheetNames(book=None): """return sheet names of a book. Args: book (str, optional): If a book is given, pull names from that book. Otherwise, try the active one Returns: list of sheet names (typical case). None if book has no sheets. False if book doesn't exlist. """ if book: if not book.lower() in [x.lower() for x in bookNames()]: return False else: book=activeBook() if not book: return False poBook=PyOrigin.WorksheetPages(book) if not len(poBook): return None return [x.GetName() for x in poBook.Layers()]
0.004644
def support_event_list_simple(fn): """ enable __call__ to accept event_list. :param fn: :return: """ @wraps(fn) def _wrapped(self, event, *args, **kwargs): if _is_event_list(event): result = [] for e in event: ret = fn(self, e, *args, **kwargs) if ret is None: continue if isinstance(ret, (tuple, list)): result.extend(ret) else: result.append(ret) if result: if len(result) == 1: return result[0] return result return None else: return fn(self, event, *args, **kwargs) return _wrapped
0.001294
def from_dict(name, values): ''' Convert a dictionary of configuration values into a sequence of BlockadeContainerConfig instances ''' # determine the number of instances of this container count = 1 count_value = values.get('count', 1) if isinstance(count_value, int): count = max(count_value, 1) def with_index(name, idx): if name and idx: return '%s_%d' % (name, idx) return name def get_instance(n, idx=None): return BlockadeContainerConfig( with_index(n, idx), values['image'], command=values.get('command'), links=values.get('links'), volumes=values.get('volumes'), publish_ports=values.get('ports'), expose_ports=values.get('expose'), environment=values.get('environment'), hostname=values.get('hostname'), dns=values.get('dns'), start_delay=values.get('start_delay', 0), neutral=values.get('neutral', False), holy=values.get('holy', False), container_name=with_index(values.get('container_name'), idx), cap_add=values.get('cap_add')) if count == 1: yield get_instance(name) else: for idx in range(1, count+1): # TODO: configurable name/index format yield get_instance(name, idx)
0.001294
def __publish(self, port, db, queue, queue_length): """ :param port: Redis port :param db: Redis db index to report :param queue: Queue name to report :param queue_length: Queue length to report :return: """ metric_name_segaments = ['queue'] cluster = self.config['cluster_prefix'] if cluster: metric_name_segaments.append(cluster) metric_name_segaments.append(port) metric_name_segaments.append(str(db)) metric_name_segaments.append(queue) self.publish_gauge( name='.'.join(metric_name_segaments), value=queue_length )
0.003017
def create_entity(self, name, gl_structure, description=None): """ Create an entity and add it to the model. :param name: The entity name. :param gl_structure: The entity's general ledger structure. :param description: The entity description. :returns: The created entity. """ new_entity = Entity(name, gl_structure, description=description) self.entities.append(new_entity) return new_entity
0.004211
def clear(self) -> None: """ Clear all cache entries for directory and, if it is a 'pure' directory, remove the directory itself """ if self._cache_directory is not None: # Safety - if there isn't a cache directory file, this probably isn't a valid cache assert os.path.exists(self._cache_directory_index), "Attempt to clear a non-existent cache" self._load() # Shouldn't have any impact but... for e in self._cache.values(): if os.path.exists(e.loc): os.remove(e.loc) self._cache.clear() self._update() self._cache = {}
0.007364
def itemlist(item, sep, suppress_trailing=True): """Create a list of items seperated by seps.""" return condense(item + ZeroOrMore(addspace(sep + item)) + Optional(sep.suppress() if suppress_trailing else sep))
0.009174
def get_job(self, cloud_service_id, job_collection_id, job_id): ''' The Get Job operation gets the details (including the current job status) of the specified job from the specified job collection. The return type is cloud_service_id: The cloud service id job_collection_id: Name of the hosted service. job_id: The job id you wish to create. ''' _validate_not_none('cloud_service_id', cloud_service_id) _validate_not_none('job_collection_id', job_collection_id) _validate_not_none('job_id', job_id) path = self._get_job_collection_path( cloud_service_id, job_collection_id, job_id) self.content_type = "application/json" payload = self._perform_get(path).body.decode() return json.loads(payload)
0.00346
def remove(self): """ Remove file from device. """ lib.gp_camera_file_delete(self._cam._cam, self.directory.path.encode(), self.name.encode(), self._cam._ctx)
0.009615
def visualize_dim_red(r, labels, filename=None, figsize=(18,10), title='', legend=True, label_map=None, label_scale=False, label_color_map=None, **scatter_options): """ Saves a scatter plot of a (2,n) matrix r, where each column is a cell. Args: r (array): (2,n) matrix labels (array): (n,) array of ints/strings or floats. Can be None. filename (string): string to save the output graph. If None, then this just displays the plot. figsize (tuple): Default: (18, 10) title (string): graph title legend (bool): Default: True label_map (dict): map of labels to label names. Default: None label_scale (bool): True if labels is should be treated as floats. Default: False label_color_map (array): (n,) array or list of colors for each label. """ fig = plt.figure(figsize=figsize) plt.cla() if not label_scale: for i in set(labels): label = i if label_map is not None: label = label_map[i] if label_color_map is not None: c = label_color_map[i] plt.scatter(r[0, labels==i], r[1, labels==i], label=label, c=c, **scatter_options) else: plt.scatter(r[0, labels==i], r[1, labels==i], label=label, **scatter_options) else: if labels is None: plt.scatter(r[0,:], r[1,:], **scatter_options) else: plt.scatter(r[0,:], r[1,:], c=labels/labels.max(), **scatter_options) plt.title(title) if legend: plt.legend() if filename is not None: plt.savefig(filename, dpi=100) plt.close() return fig
0.009529
def to_dict(self): '''Save this data port connector into a dictionary.''' d = {'connectorId': self.connector_id, 'name': self.name, 'dataType': self.data_type, 'interfaceType': self.interface_type, 'dataflowType': self.data_flow_type, 'sourceDataPort': self.source_data_port.to_dict(), 'targetDataPort': self.target_data_port.to_dict()} if self.visible != True: d[RTS_EXT_NS_YAML + 'visible'] = self.visible if self.subscription_type: d['subscriptionType'] = self.subscription_type if self.push_interval: d['pushInterval'] = self.push_interval if self.comment: d[RTS_EXT_NS_YAML + 'comment'] = self.comment props = [] for name in self.properties: p = {'name': name} if self.properties[name]: p['value'] = str(self.properties[name]) props.append(p) if props: d[RTS_EXT_NS_YAML + 'properties'] = props return d
0.00365
def quote(self, data): """Quote any parameters that contain spaces or special character. Returns: (string): String containing parameters wrapped in double quotes """ if self.lang == 'python': quote_char = "'" elif self.lang == 'java': quote_char = "'" if re.findall(r'[!\-\=\s\$\&]{1,}', str(data)): data = '{0}{1}{0}'.format(quote_char, data) return data
0.004329
def series_table_row_offset(self, series): """ Return the number of rows preceding the data table for *series* in the Excel worksheet. """ title_and_spacer_rows = series.index * 2 data_point_rows = series.data_point_offset return title_and_spacer_rows + data_point_rows
0.006154
def class_dict_to_specs(mcs, class_dict): """Takes a class `__dict__` and returns `HeronComponentSpec` entries""" specs = {} for name, spec in class_dict.items(): if isinstance(spec, HeronComponentSpec): # Use the variable name as the specification name. if spec.name is None: spec.name = name if spec.name in specs: raise ValueError("Duplicate component name: %s" % spec.name) else: specs[spec.name] = spec return specs
0.009901
def julian_day(year, month=1, day=1): """Given a proleptic Gregorian calendar date, return a Julian day int.""" janfeb = month < 3 return (day + 1461 * (year + 4800 - janfeb) // 4 + 367 * (month - 2 + janfeb * 12) // 12 - 3 * ((year + 4900 - janfeb) // 100) // 4 - 32075)
0.003021
def _compute_validation_outputs(self, actions: List[List[ProductionRule]], best_final_states: Mapping[int, Sequence[GrammarBasedState]], world: List[WikiTablesWorld], example_lisp_string: List[str], metadata: List[Dict[str, Any]], outputs: Dict[str, Any]) -> None: """ Does common things for validation time: computing logical form accuracy (which is expensive and unnecessary during training), adding visualization info to the output dictionary, etc. This doesn't return anything; instead it `modifies` the given ``outputs`` dictionary, and calls metrics on ``self``. """ batch_size = len(actions) action_mapping = {} for batch_index, batch_actions in enumerate(actions): for action_index, action in enumerate(batch_actions): action_mapping[(batch_index, action_index)] = action[0] outputs['action_mapping'] = action_mapping outputs['best_action_sequence'] = [] outputs['debug_info'] = [] outputs['entities'] = [] outputs['logical_form'] = [] for i in range(batch_size): # Decoding may not have terminated with any completed logical forms, if `num_steps` # isn't long enough (or if the model is not trained enough and gets into an # infinite action loop). if i in best_final_states: best_action_indices = best_final_states[i][0].action_history[0] action_strings = [action_mapping[(i, action_index)] for action_index in best_action_indices] try: logical_form = world[i].get_logical_form(action_strings, add_var_function=False) self._has_logical_form(1.0) except ParsingError: self._has_logical_form(0.0) logical_form = 'Error producing logical form' if example_lisp_string: denotation_correct = self._executor.evaluate_logical_form(logical_form, example_lisp_string[i]) self._denotation_accuracy(1.0 if denotation_correct else 0.0) outputs['best_action_sequence'].append(action_strings) outputs['logical_form'].append(logical_form) outputs['debug_info'].append(best_final_states[i][0].debug_info[0]) # type: ignore outputs['entities'].append(world[i].table_graph.entities) else: outputs['logical_form'].append('') self._has_logical_form(0.0) self._denotation_accuracy(0.0) if metadata is not None: outputs["question_tokens"] = [x["question_tokens"] for x in metadata] outputs["original_table"] = [x["original_table"] for x in metadata]
0.006812
def getFaxStatsCounters(self): """Query Asterisk Manager Interface for Fax Stats. CLI Command - fax show stats @return: Dictionary of fax stats. """ if not self.hasFax(): return None info_dict = {} cmdresp = self.executeCommand('fax show stats') ctxt = 'general' for section in cmdresp.strip().split('\n\n')[1:]: i = 0 for line in section.splitlines(): mobj = re.match('(\S.*\S)\s*:\s*(\d+)\s*$', line) if mobj: if not info_dict.has_key(ctxt): info_dict[ctxt] = {} info_dict[ctxt][mobj.group(1).lower()] = int(mobj.group(2).lower()) elif i == 0: ctxt = line.strip().lower() i += 1 return info_dict
0.015695
def init_app(self, app, **kwargs): """kwargs holds initial dynaconf configuration""" self.kwargs.update(kwargs) self.settings = self.dynaconf_instance or LazySettings(**self.kwargs) app.config = self.make_config(app) app.dynaconf = self.settings
0.007018
def add_error(self, error): """ In the case where a list/tuple is passed in this just extends the list rather than having nested lists. Otherwise, the value is appended. """ if is_non_string_iterable(error) and not isinstance(error, collections.Mapping): for value in error: self.add_error(value) else: self.append(error)
0.007177
def get_usedby_and_readonly(self, id): """ Gets the build plans details os teh selected plan script as per the selected attributes. Args: id: ID of the Plan Script. Returns: array of build plans """ uri = self.URI + "/" + id + "/usedby/readonly" return self._client.get(uri)
0.008427
def _current_size(self): """ Returns the current count of all documents, including the changes from the current changeMap. """ deletes, adds, _ = Watch._extract_changes(self.doc_map, self.change_map, None) return len(self.doc_map) + len(adds) - len(deletes)
0.009836
def images(self): ''' a method to list the local docker images :return: list of dictionaries with available image fields [ { 'CREATED': '7 days ago', 'TAG': 'latest', 'IMAGE ID': '2298fbaac143', 'VIRTUAL SIZE': '302.7 MB', 'REPOSITORY': 'test1' } ] ''' sys_command = 'docker images' sys_output = self.command(sys_command) image_list = self._images(sys_output) return image_list
0.005376
def dict_flat_generator(value, attname=None, splitter=JSPLITTER, dumps=None, prefix=None, error=ValueError, recursive=True): '''Convert a nested dictionary into a flat dictionary representation''' if not isinstance(value, dict) or not recursive: if not prefix: raise error('Cannot assign a non dictionary to a JSON field') else: name = '%s%s%s' % (attname, splitter, prefix) if attname else prefix yield name, dumps(value) if dumps else value else: # loop over dictionary for field in value: val = value[field] key = prefix if field: key = '%s%s%s' % (prefix, splitter, field) if prefix else field for k, v2 in dict_flat_generator(val, attname, splitter, dumps, key, error, field): yield k, v2
0.000989
def get_challenge_for_url(url): """ Gets the challenge for the cached URL. :param url: the URL the challenge is cached for. :rtype: HttpBearerChallenge """ if not url: raise ValueError('URL cannot be None') url = parse.urlparse(url) _lock.acquire() val = _cache.get(url.netloc) _lock.release() return val
0.002833
def run(line): """ Run a shell line: run('ls /tmp') will execv('/usr/bin/ls', ['ls', '/tmp']) """ arguments = shlex.split(line) path = lookup(arguments[0]) # Lookup the first arguments in PATH execute(path, arguments)
0.004132
def run_game_of_life(years, width, height, time_delay, silent="N"): """ run a single game of life for 'years' and log start and end living cells to aikif """ lfe = mod_grid.GameOfLife(width, height, ['.', 'x'], 1) set_random_starting_grid(lfe) lg.record_source(lfe, 'game_of_life_console.py') print(lfe) start_cells = lfe.count_filled_positions() for ndx, dummy_idx in enumerate(range(years)): lfe.update_gol() if silent == "N": print_there(1,1, "Game of Life - Iteration # " + str(ndx)) print_there(1, 2, lfe) time.sleep(time_delay) end_cells = lfe.count_filled_positions() return start_cells, end_cells
0.004255
def print_results(self): """Print results of the package command.""" # Updates if self.package_data.get('updates'): print('\n{}{}Updates:'.format(c.Style.BRIGHT, c.Fore.BLUE)) for p in self.package_data['updates']: print( '{!s:<20}{}{} {!s:<50}'.format( p.get('action'), c.Style.BRIGHT, c.Fore.CYAN, p.get('output') ) ) # Packaging print('\n{}{}Package:'.format(c.Style.BRIGHT, c.Fore.BLUE)) for p in self.package_data['package']: if isinstance(p.get('output'), list): n = 5 list_data = p.get('output') print( '{!s:<20}{}{} {!s:<50}'.format( p.get('action'), c.Style.BRIGHT, c.Fore.CYAN, ', '.join(p.get('output')[:n]) ) ) del list_data[:n] for data in [ list_data[i : i + n] for i in range(0, len(list_data), n) # noqa: E203 ]: print( '{!s:<20}{}{} {!s:<50}'.format( '', c.Style.BRIGHT, c.Fore.CYAN, ', '.join(data) ) ) else: print( '{!s:<20}{}{} {!s:<50}'.format( p.get('action'), c.Style.BRIGHT, c.Fore.CYAN, p.get('output') ) ) # Bundle if self.package_data.get('bundle'): print('\n{}{}Bundle:'.format(c.Style.BRIGHT, c.Fore.BLUE)) for p in self.package_data['bundle']: print( '{!s:<20}{}{} {!s:<50}'.format( p.get('action'), c.Style.BRIGHT, c.Fore.CYAN, p.get('output') ) ) # ignore exit code if not self.args.ignore_validation: print('\n') # separate errors from normal output # print all errors for error in self.package_data.get('errors'): print('{}{}'.format(c.Fore.RED, error)) self.exit_code = 1
0.003136
def start(self): """ TODO: docstring """ logger.info("Starting interchange") # last = time.time() while True: # active_flag = False socks = dict(self.poller.poll(1)) if socks.get(self.task_incoming) == zmq.POLLIN: message = self.task_incoming.recv_multipart() logger.debug("Got new task from client") self.worker_messages.send_multipart(message) logger.debug("Sent task to worker") # active_flag = True # last = time.time() if socks.get(self.worker_messages) == zmq.POLLIN: message = self.worker_messages.recv_multipart() logger.debug("Got new result from worker") # self.result_outgoing.send_multipart(message) self.result_outgoing.send_multipart(message[1:]) logger.debug("Sent result to client")
0.002088
def lookup_signame(num): """Find the corresponding signal name for 'num'. Return None if 'num' is invalid.""" signames = signal.__dict__ num = abs(num) for signame in list(signames.keys()): if signame.startswith('SIG') and signames[signame] == num: return signame pass # Something went wrong. Should have returned above return None
0.002584
async def open_websocket(url: str, headers: Optional[list] = None, subprotocols: Optional[list] = None): """ Opens a websocket. """ ws = await create_websocket( url, headers=headers, subprotocols=subprotocols) try: yield ws finally: await ws.close()
0.002882
def get_by(self, field, value): """ Gets all Users that match the filter. The search is case-insensitive. Args: field: Field name to filter. Accepted values: 'name', 'userName', 'role' value: Value to filter. Returns: list: A list of Users. """ if field == 'userName' or field == 'name': return self._client.get(self.URI + '/' + value) elif field == 'role': value = value.replace(" ", "%20") return self._client.get(self.URI + '/roles/users/' + value)['members'] else: raise HPOneViewException('Only userName, name and role can be queried for this resource.')
0.006964
async def get_pinstate_report(self, command): """ This method retrieves a Firmata pin_state report for a pin.. See: http://firmata.org/wiki/Protocol#Pin_State_Query :param command: {"method": "get_pin_state", "params": [PIN]} :returns: {"method": "get_pin_state_reply", "params": [PIN_NUMBER, PIN_MODE, PIN_STATE]} """ pin = int(command[0]) value = await self.core.get_pin_state(pin) if value: reply = json.dumps({"method": "pin_state_reply", "params": value}) else: reply = json.dumps({"method": "pin_state_reply", "params": "Unknown"}) await self.websocket.send(reply)
0.005848
def update_readme(self, template_readme: Template): """Generate the new README file locally.""" readme = os.path.join(self.cached_repo, "README.md") if os.path.exists(readme): os.remove(readme) links = {model_type: {} for model_type in self.models.keys()} for model_type, model_uuids in self.models.items(): for model_uuid in model_uuids: links[model_type][model_uuid] = os.path.join("/", model_type, "%s.md" % model_uuid) with open(readme, "w") as fout: fout.write(template_readme.render(models=self.models, meta=self.meta, links=links)) git.add(self.cached_repo, [readme]) self._log.info("Updated %s", readme)
0.005502
def run(self, *args): """Autocomplete gender information.""" params = self.parser.parse_args(args) api_token = params.api_token genderize_all = params.genderize_all code = self.autogender(api_token=api_token, genderize_all=genderize_all) return code
0.006061
def process_text(self, array=True): """ Construct the text based on the entered content in the widget. """ if array: prefix = 'np.array([[' else: prefix = 'np.matrix([[' suffix = ']])' values = self._widget.text().strip() if values != '': # cleans repeated spaces exp = r'(\s*)' + ROW_SEPARATOR + r'(\s*)' values = re.sub(exp, ROW_SEPARATOR, values) values = re.sub(r"\s+", " ", values) values = re.sub(r"]$", "", values) values = re.sub(r"^\[", "", values) values = re.sub(ROW_SEPARATOR + r'*$', '', values) # replaces spaces by commas values = values.replace(' ', ELEMENT_SEPARATOR) # iterate to find number of rows and columns new_values = [] rows = values.split(ROW_SEPARATOR) nrows = len(rows) ncols = [] for row in rows: new_row = [] elements = row.split(ELEMENT_SEPARATOR) ncols.append(len(elements)) for e in elements: num = e # replaces not defined values if num in NAN_VALUES: num = 'np.nan' # Convert numbers to floating point if self._force_float: try: num = str(float(e)) except: pass new_row.append(num) new_values.append(ELEMENT_SEPARATOR.join(new_row)) new_values = ROW_SEPARATOR.join(new_values) values = new_values # Check validity if len(set(ncols)) == 1: self._valid = True else: self._valid = False # Single rows are parsed as 1D arrays/matrices if nrows == 1: prefix = prefix[:-1] suffix = suffix.replace("]])", "])") # Fix offset offset = self._offset braces = BRACES.replace(' ', '\n' + ' '*(offset + len(prefix) - 1)) values = values.replace(ROW_SEPARATOR, braces) text = "{0}{1}{2}".format(prefix, values, suffix) self._text = text else: self._text = '' self.update_warning()
0.001186
def process_phosphorylation_statements(self): """Looks for Phosphorylation events in the graph and extracts them into INDRA statements. In particular, looks for a Positive_regulation event node with a child Phosphorylation event node. If Positive_regulation has an outgoing Cause edge, that's the subject If Phosphorylation has an outgoing Theme edge, that's the object If Phosphorylation has an outgoing Site edge, that's the site """ G = self.G statements = [] pwcs = self.find_event_parent_with_event_child('Positive_regulation', 'Phosphorylation') for pair in pwcs: (pos_reg, phos) = pair cause = self.get_entity_text_for_relation(pos_reg, 'Cause') theme = self.get_entity_text_for_relation(phos, 'Theme') print('Cause:', cause, 'Theme:', theme) # If the trigger word is dephosphorylate or similar, then we # extract a dephosphorylation statement trigger_word = self.get_entity_text_for_relation(phos, 'Phosphorylation') if 'dephos' in trigger_word: deph = True else: deph = False site = self.get_entity_text_for_relation(phos, 'Site') theme_node = self.get_related_node(phos, 'Theme') assert(theme_node is not None) evidence = self.node_to_evidence(theme_node, is_direct=False) if theme is not None: if deph: statements.append(Dephosphorylation(s2a(cause), s2a(theme), site, evidence=evidence)) else: statements.append(Phosphorylation(s2a(cause), s2a(theme), site, evidence=evidence)) return statements
0.001009
def sort_timeseries(self, ascending=True): """Sorts the data points within the TimeSeries according to their occurrence inline. :param boolean ascending: Determines if the TimeSeries will be ordered ascending or descending. If this is set to descending once, the ordered parameter defined in :py:meth:`TimeSeries.__init__` will be set to False FOREVER. :return: Returns :py:obj:`self` for convenience. :rtype: TimeSeries """ # the time series is sorted by default if ascending and self._sorted: return sortorder = 1 if not ascending: sortorder = -1 self._predefinedSorted = False self._timeseriesData.sort(key=lambda i: sortorder * i[0]) self._sorted = ascending return self
0.005931
def get_log_stream(logger): """ Returns a stream to the root log file. If there is no logfile return the stderr log stream Returns: A stream to the root log file or stderr stream. """ file_stream = None log_stream = None for handler in logger.handlers: if isinstance(handler, logging.FileHandler): file_stream = handler.stream else: log_stream = handler.stream if file_stream: return file_stream return log_stream
0.001949
def purcell_bidirectional(target, r_toroid, num_points=1e2, surface_tension='pore.surface_tension', contact_angle='pore.contact_angle', throat_diameter='throat.diameter', pore_diameter='pore.diameter'): r""" Computes the throat capillary entry pressure assuming the throat is a toroid. Makes use of the toroidal meniscus model with mode touch. This model accounts for mensicus protrusion into adjacent pores and touching solid features. It is bidirectional becauase the connected pores generally have different sizes and this determines how far the meniscus can protrude. Parameters ---------- target : OpenPNM Object The object for which these values are being calculated. This controls the length of the calculated array, and also provides access to other necessary thermofluid properties. r_toroid : float or array_like The radius of the toroid surrounding the pore num_points : float (Default 100) The number of divisions to make along the profile length to assess the meniscus properties in order to find the touch length. surface_tension : dict key (string) The dictionary key containing the surface tension values to be used. If a pore property is given, it is interpolated to a throat list. contact_angle : dict key (string) The dictionary key containing the contact angle values to be used. If a pore property is given, it is interpolated to a throat list. throat_diameter : dict key (string) The dictionary key containing the throat diameter values to be used. pore_diameter : dict key (string) The dictionary key containing the pore diameter values to be used. Notes """ network = target.project.network conns = network['throat.conns'] values = {} for p in range(2): network['throat.temp_diameter'] = network[pore_diameter][conns[:, p]] key = 'throat.touch_pore_'+str(p) target.add_model(propname=key, model=pm.meniscus.toroidal, mode='touch', r_toroid=r_toroid, num_points=num_points, throat_diameter=throat_diameter, surface_tension=surface_tension, contact_angle=contact_angle, touch_length='throat.temp_diameter') values[p] = target[key] target.remove_model(key) del network['throat.temp_diameter'] return np.vstack((values[0], values[1])).T
0.000367
def start(self): """Initiate the download.""" log.info("Sending tftp download request to %s" % self.host) log.info(" filename -> %s" % self.file_to_transfer) log.info(" options -> %s" % self.options) self.metrics.start_time = time.time() log.debug("Set metrics.start_time to %s" % self.metrics.start_time) # FIXME: put this in a sendRRQ method? pkt = TftpPacketRRQ() pkt.filename = self.file_to_transfer pkt.mode = "octet" # FIXME - shouldn't hardcode this pkt.options = self.options self.sock.sendto(pkt.encode().buffer, (self.host, self.port)) self.next_block = 1 self.last_pkt = pkt self.state = TftpStateSentRRQ(self) while self.state: try: log.debug("State is %s" % self.state) self.cycle() except TftpTimeout as err: log.error(str(err)) self.retry_count += 1 if self.retry_count >= TIMEOUT_RETRIES: log.debug("hit max retries, giving up") raise else: log.warning("resending last packet") self.state.resendLast() except TftpFileNotFoundError as err: # If we received file not found, then we should not save the open # output file or we'll be left with a size zero file. Delete it, # if it exists. log.error("Received File not found error") if self.fileobj is not None and not self.filelike_fileobj: if os.path.exists(self.fileobj.name): log.debug("unlinking output file of %s", self.fileobj.name) os.unlink(self.fileobj.name) raise
0.003241
def AddTableColumn(self, table, column): """Add column to table if it is not already there.""" if column not in self._table_columns[table]: self._table_columns[table].append(column)
0.010256
def get_windows_tz(iana_tz): """ Returns a valid windows TimeZone from a given pytz TimeZone (Iana/Olson Timezones) Note: Windows Timezones are SHIT!... no ... really THEY ARE HOLY FUCKING SHIT!. """ timezone = IANA_TO_WIN.get( iana_tz.zone if isinstance(iana_tz, tzinfo) else iana_tz) if timezone is None: raise pytz.UnknownTimeZoneError( "Can't find Iana TimeZone " + iana_tz.zone) return timezone
0.002174
def _create_archive(self): '''This will create a tar.gz compressed archive of the scrubbed directory''' try: self.archive_path = os.path.join(self.report_dir, "%s.tar.gz" % self.session) self.logger.con_out('Creating SOSCleaner Archive - %s', self.archive_path) t = tarfile.open(self.archive_path, 'w:gz') for dirpath, dirnames, filenames in os.walk(self.dir_path): for f in filenames: f_full = os.path.join(dirpath, f) f_archive = f_full.replace(self.report_dir,'') self.logger.debug('adding %s to %s archive', f_archive, self.archive_path) t.add(f_full, arcname=f_archive) except Exception as e: #pragma: no cover self.logger.exception(e) raise Exception('CreateArchiveError: Unable to create Archive') self._clean_up() self.logger.info('Archiving Complete') self.logger.con_out('SOSCleaner Complete') if not self.quiet: # pragma: no cover t.add(self.logfile, arcname=self.logfile.replace(self.report_dir,'')) t.close()
0.009426
def hpo_genes(context, hpo_term): """Export a list of genes based on hpo terms""" LOG.info("Running scout export hpo_genes") adapter = context.obj['adapter'] header = ["#Gene_id\tCount"] if not hpo_term: LOG.warning("Please use at least one hpo term") context.abort() for line in header: click.echo(line) for term in adapter.generate_hpo_gene_list(*hpo_term): click.echo("{0}\t{1}".format(term[0], term[1]))
0.004211
def validate_config(raise_=True): """ Verifies that all configuration values have a valid setting """ ELIBConfig.check() known_paths = set() duplicate_values = set() missing_values = set() for config_value in ConfigValue.config_values: if config_value.path not in known_paths: known_paths.add(config_value.path) else: duplicate_values.add(config_value.name) try: config_value() except MissingValueError: missing_values.add(config_value.name) if raise_ and duplicate_values: raise DuplicateConfigValueError(str(duplicate_values)) if raise_ and missing_values: raise MissingValueError(str(missing_values), 'missing config value(s)') return duplicate_values, missing_values
0.001233
def merge_all(lst, strategy='smart', renderer='yaml', merge_lists=False): ''' .. versionadded:: 2019.2.0 Merge a list of objects into each other in order :type lst: Iterable :param lst: List of objects to be merged. :type strategy: String :param strategy: Merge strategy. See utils.dictupdate. :type renderer: String :param renderer: Renderer type. Used to determine strategy when strategy is 'smart'. :type merge_lists: Bool :param merge_lists: Defines whether to merge embedded object lists. CLI Example: .. code-block:: shell $ salt-call --output=txt slsutil.merge_all '[{foo: Foo}, {foo: Bar}]' local: {u'foo': u'Bar'} ''' ret = {} for obj in lst: ret = salt.utils.dictupdate.merge( ret, obj, strategy, renderer, merge_lists ) return ret
0.001148
def get_my_learning_path_session(self, proxy): """Gets the ``OsidSession`` associated with the my learning path service. :param proxy: a proxy :type proxy: ``osid.proxy.Proxy`` :return: a ``MyLearningPathSession`` :rtype: ``osid.learning.MyLearningPathSession`` :raise: ``NullArgument`` -- ``proxy`` is ``null`` :raise: ``OperationFailed`` -- unable to complete request :raise: ``Unimplemented`` -- ``supports_my_learning_path()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_my_learning_path()`` is ``true``.* """ if not self.supports_my_learning_path(): raise Unimplemented() try: from . import sessions except ImportError: raise OperationFailed() proxy = self._convert_proxy(proxy) try: session = sessions.MyLearningPathSession(proxy=proxy, runtime=self._runtime) except AttributeError: raise OperationFailed() return session
0.005629
def default_value(self): """ Generate a default value :return: the value """ if self.has_default: if hasattr(self.default, '__call__'): return self.default() else: return self.default else: raise Exception("No default value specified")
0.005682
def write_basic_mesh(Verts, E2V=None, mesh_type='tri', pdata=None, pvdata=None, cdata=None, cvdata=None, fname='output.vtk'): """Write mesh file for basic types of elements. Parameters ---------- fname : {string} file to be written, e.g. 'mymesh.vtu' Verts : {array} coordinate array (N x D) E2V : {array} element index array (Nel x Nelnodes) mesh_type : {string} type of elements: tri, quad, tet, hex (all 3d) pdata : {array} scalar data on vertices (N x Nfields) pvdata : {array} vector data on vertices (3*Nfields x N) cdata : {array} scalar data on cells (Nfields x Nel) cvdata : {array} vector data on cells (3*Nfields x Nel) Returns ------- writes a .vtu file for use in Paraview Notes ----- The difference between write_basic_mesh and write_vtu is that write_vtu is more general and requires dictionaries of cell information. write_basic_mesh calls write_vtu Examples -------- >>> import numpy as np >>> from pyamg.vis import write_basic_mesh >>> Verts = np.array([[0.0,0.0], ... [1.0,0.0], ... [2.0,0.0], ... [0.0,1.0], ... [1.0,1.0], ... [2.0,1.0], ... [0.0,2.0], ... [1.0,2.0], ... [2.0,2.0], ... [0.0,3.0], ... [1.0,3.0], ... [2.0,3.0]]) >>> E2V = np.array([[0,4,3], ... [0,1,4], ... [1,5,4], ... [1,2,5], ... [3,7,6], ... [3,4,7], ... [4,8,7], ... [4,5,8], ... [6,10,9], ... [6,7,10], ... [7,11,10], ... [7,8,11]]) >>> pdata=np.ones((12,2)) >>> pvdata=np.ones((12*3,2)) >>> cdata=np.ones((12,2)) >>> cvdata=np.ones((3*12,2)) >>> write_basic_mesh(Verts, E2V=E2V, mesh_type='tri',pdata=pdata, pvdata=pvdata, cdata=cdata, cvdata=cvdata, fname='test.vtu') See Also -------- write_vtu """ if E2V is None: mesh_type = 'vertex' map_type_to_key = {'vertex': 1, 'tri': 5, 'quad': 9, 'tet': 10, 'hex': 12} if mesh_type not in map_type_to_key: raise ValueError('unknown mesh_type=%s' % mesh_type) key = map_type_to_key[mesh_type] if mesh_type == 'vertex': uidx = np.arange(0, Verts.shape[0]).reshape((Verts.shape[0], 1)) E2V = {key: uidx} else: E2V = {key: E2V} if cdata is not None: cdata = {key: cdata} if cvdata is not None: cvdata = {key: cvdata} write_vtu(Verts=Verts, Cells=E2V, pdata=pdata, pvdata=pvdata, cdata=cdata, cvdata=cvdata, fname=fname)
0.00033
def find_tag(__matcher: str = 'v[0-9]*', *, strict: bool = True, git_dir: str = '.') -> str: """Find closest tag for a git repository. Note: This defaults to `Semantic Version`_ tag matching. Args: __matcher: Glob-style tag pattern to match strict: Allow commit-ish, if no tag found git_dir: Repository to search Returns: Matching tag name .. _Semantic Version: http://semver.org/ """ command = 'git describe --abbrev=12 --dirty'.split() with chdir(git_dir): try: stdout = check_output(command + ['--match={}'.format(__matcher), ]) except CalledProcessError: if strict: raise stdout = check_output(command + ['--always', ]) stdout = stdout.decode('ascii', 'replace') return stdout.strip()
0.001167
def _getTarball(url, into_directory, cache_key, origin_info=None): '''unpack the specified tarball url into the specified directory''' try: access_common.unpackFromCache(cache_key, into_directory) except KeyError as e: tok = settings.getProperty('github', 'authtoken') headers = {} if tok is not None: headers['Authorization'] = 'token ' + str(tok) logger.debug('GET %s', url) response = requests.get(url, allow_redirects=True, stream=True, headers=headers) response.raise_for_status() logger.debug('getting file: %s', url) logger.debug('headers: %s', response.headers) response.raise_for_status() # github doesn't exposes hashes of the archives being downloaded as far # as I can tell :( access_common.unpackTarballStream( stream = response, into_directory = into_directory, hash = {}, cache_key = cache_key, origin_info = origin_info )
0.015052
def parse_image_name(name): """ parse the image name into three element tuple, like below: (repository, name, version) :param name: `class`:`str`, name :return: (repository, name, version) """ name = name or "" if '/' in name: repository, other = name.split('/') else: repository, other = None, name if ':' in other: name, version = other.split(':') else: name, version = other, 'latest' return repository, name, version
0.001923
def child_get_property(self, child, property_name, value=None): """child_get_property(child, property_name, value=None) :param child: a widget which is a child of `self` :type child: :obj:`Gtk.Widget` :param property_name: the name of the property to get :type property_name: :obj:`str` :param value: Either :obj:`None` or a correctly initialized :obj:`GObject.Value` :type value: :obj:`GObject.Value` or :obj:`None` :returns: The Python value of the child property {{ docs }} """ if value is None: prop = self.find_child_property(property_name) if prop is None: raise ValueError('Class "%s" does not contain child property "%s"' % (self, property_name)) value = GObject.Value(prop.value_type) Gtk.Container.child_get_property(self, child, property_name, value) return value.get_value()
0.002947
def mail_sent_count(self, count): """ Test that `count` mails have been sent. Syntax: I have sent `count` emails Example: .. code-block:: gherkin Then I have sent 2 emails """ expected = int(count) actual = len(mail.outbox) assert expected == actual, \ "Expected to send {0} email(s), got {1}.".format(expected, actual)
0.002604
def cmd_crack_luhn(number): """Having known values for a Luhn validated number, obtain the possible unknown numbers. Numbers that use the Luhn algorithm for validation are Credit Cards, IMEI, National Provider Identifier in the United States, Canadian Social Insurance Numbers, Israel ID Numbers and Greek Social Security Numbers (ΑΜΚΑ). The '-' characters are ignored. Define the missing numbers with the 'x' character. Reference: https://en.wikipedia.org/wiki/Luhn_algorithm Example: \b $ habu.crack.luhn 4509-xxxx-3160-6445 """ number = number.replace('-', '') unknown_count = number.count('x') if not number.replace('x', '').isdigit(): print('Invalid format. Please, read the documentation.', file=sys.stderr) sys.exit(1) for n in range(10 ** unknown_count): candidate = number for item in '{:0{count}}'.format(n, count=unknown_count): candidate = candidate.replace('x', item, 1) if luhn_validate(candidate): print(candidate)
0.003766
def wait_for_ready(self, instance_id, limit=14400, delay=10, pending=False): """Determine if a Server is ready. A server is ready when no transactions are running on it. :param int instance_id: The instance ID with the pending transaction :param int limit: The maximum amount of seconds to wait. :param int delay: The number of seconds to sleep before checks. Defaults to 10. """ now = time.time() until = now + limit mask = "mask[id, lastOperatingSystemReload[id], activeTransaction, provisionDate]" instance = self.get_hardware(instance_id, mask=mask) while now <= until: if utils.is_ready(instance, pending): return True transaction = utils.lookup(instance, 'activeTransaction', 'transactionStatus', 'friendlyName') snooze = min(delay, until - now) LOGGER.info("%s - %d not ready. Auto retry in %ds", transaction, instance_id, snooze) time.sleep(snooze) instance = self.get_hardware(instance_id, mask=mask) now = time.time() LOGGER.info("Waiting for %d expired.", instance_id) return False
0.005004
async def build_payment_req(wallet_handle: int, submitter_did: str, inputs_json: str, outputs_json: str, extra: Optional[str]) -> (str, str): """ Builds Indy request for doing payment according to this payment method. This method consumes set of inputs and outputs. Format of inputs is specific for payment method. Usually it should reference payment transaction with at least one output that corresponds to payment address that user owns. :param wallet_handle: wallet handle (created by open_wallet). :param submitter_did : (Option) DID of request sender :param inputs_json: The list of payment sources as json array: ["source1", ...] Note that each source should reference payment address :param outputs_json: The list of outputs as json array: [{ recipient: <str>, // payment address of recipient amount: <int>, // amount }] :param extra: // optional information for payment operation :return: payment_req_json: Indy request for doing payment payment_method: used payment method """ logger = logging.getLogger(__name__) logger.debug("build_payment_req: >>> wallet_handle: %r, submitter_did: %r, inputs_json: %r, outputs_json: %r," " extra: %r", wallet_handle, submitter_did, inputs_json, outputs_json, extra) if not hasattr(build_payment_req, "cb"): logger.debug("build_payment_req: Creating callback") build_payment_req.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p, c_char_p)) c_wallet_handle = c_int32(wallet_handle) c_submitter_did = c_char_p(submitter_did.encode('utf-8')) if submitter_did is not None else None c_inputs_json = c_char_p(inputs_json.encode('utf-8')) c_outputs_json = c_char_p(outputs_json.encode('utf-8')) c_extra = c_char_p(extra.encode('utf-8')) if extra is not None else None (payment_req_json, payment_method) = await do_call('indy_build_payment_req', c_wallet_handle, c_submitter_did, c_inputs_json, c_outputs_json, c_extra, build_payment_req.cb) res = (payment_req_json.decode(), payment_method.decode()) logger.debug("build_payment_req: <<< res: %r", res) return res
0.003648
def _run_active_state_machine(self): """Store running state machine and observe its status """ # Create new concurrency queue for root state to be able to synchronize with the execution self.__running_state_machine = self.state_machine_manager.get_active_state_machine() if not self.__running_state_machine: logger.error("The running state machine must not be None") self.__running_state_machine.root_state.concurrency_queue = queue.Queue(maxsize=0) if self.__running_state_machine: self.__running_state_machine.start() self.__wait_for_finishing_thread = threading.Thread(target=self._wait_for_finishing) self.__wait_for_finishing_thread.start() else: logger.warning("Currently no active state machine! Please create a new state machine.") self.set_execution_mode(StateMachineExecutionStatus.STOPPED)
0.007455
def int_to_hex(i): """Create a hex-representation of the given serial. >>> int_to_hex(12345678) 'BC:61:4E' """ s = hex(i)[2:].upper() if six.PY2 is True and isinstance(i, long): # pragma: only py2 # NOQA # Strip the "L" suffix, since hex(1L) -> 0x1L. # NOTE: Do not convert to int earlier. int(<very-large-long>) is still long s = s[:-1] return add_colons(s)
0.004843
def _validate_hue(df, hue): """ The top-level ``hue`` parameter present in most plot types accepts a variety of input types. This method condenses this variety into a single preferred format---an iterable---which is expected by all submethods working with the data downstream of it. Parameters ---------- df : GeoDataFrame The full data input, from which standardized ``hue`` information may need to be extracted. hue : Series, GeoSeries, iterable, str The data column whose entries are being discretely colorized, as (loosely) passed by the top-level ``hue`` variable. required : boolean Whether or not this parameter is required for the plot in question. Returns ------- hue : iterable The ``hue`` parameter input as an iterable. """ if hue is None: return None elif isinstance(hue, str): hue = df[hue] return hue else: return gpd.GeoSeries(hue)
0.005076
def attach_video(self, video: String, caption: String = None, width: Integer = None, height: Integer = None, duration: Integer = None): """ Attach video :param video: :param caption: :param width: :param height: :param duration: :return: self """ self.media.attach_video(video, caption, width=width, height=height, duration=duration) return self
0.010989
def _delete(self, *args, **kwargs): """ A wrapper for deleting things :returns: The response of your delete :rtype: dict :raises: This will raise a :class:`NewRelicAPIServerException<newrelic_api.exceptions.NewRelicAPIServerException>` if there is an error from New Relic """ response = requests.delete(*args, **kwargs) if not response.ok: raise NewRelicAPIServerException('{}: {}'.format(response.status_code, response.text)) if response.text: return response.json() return {}
0.004926
def drop(self): """ Drop the table and all tables that reference it, recursively. User is prompted for confirmation if config['safemode'] is set to True. """ if self.restriction: raise DataJointError('A relation with an applied restriction condition cannot be dropped.' ' Call drop() on the unrestricted Table.') self.connection.dependencies.load() do_drop = True tables = [table for table in self.connection.dependencies.descendants(self.full_table_name) if not table.isdigit()] if config['safemode']: for table in tables: print(table, '(%d tuples)' % len(FreeTable(self.connection, table))) do_drop = user_choice("Proceed?", default='no') == 'yes' if do_drop: for table in reversed(tables): FreeTable(self.connection, table).drop_quick() print('Tables dropped. Restart kernel.')
0.00498
async def request(self, method, url, **kwargs): """Handles requests to the API""" rate_limiter = RateLimiter(max_calls=59, period=60, callback=limited) # handles ratelimits. max_calls is set to 59 because current implementation will retry in 60s after 60 calls is reached. DBL has a 1h block so obviously this doesn't work well, as it will get a 429 when 60 is reached. async with rate_limiter: # this works but doesn't 'save' over restart. need a better implementation. if not self.token: raise UnauthorizedDetected('UnauthorizedDetected (status code: 401): No TOKEN provided') headers = { 'User-Agent': self.user_agent, 'Content-Type': 'application/json' } if 'json' in kwargs: kwargs['data'] = to_json(kwargs.pop('json')) kwargs['headers'] = headers headers['Authorization'] = self.token for tries in range(5): async with self.session.request(method, url, **kwargs) as resp: log.debug('%s %s with %s has returned %s', method, url, kwargs.get('data'), resp.status) data = await json_or_text(resp) if 300 > resp.status >= 200: return data if resp.status == 429: # we are being ratelimited fmt = 'We are being rate limited. Retrying in %.2f seconds (%.3f minutes).' # sleep a bit retry_after = json.loads(resp.headers.get('Retry-After')) mins = retry_after / 60 log.warning(fmt, retry_after, mins) # check if it's a global rate limit (True as only 1 ratelimit atm - /api/bots) is_global = True # is_global = data.get('global', False) if is_global: self._global_over.clear() await asyncio.sleep(retry_after, loop=self.loop) log.debug('Done sleeping for the rate limit. Retrying...') # release the global lock now that the # global rate limit has passed if is_global: self._global_over.set() log.debug('Global rate limit is now over.') continue if resp.status == 400: raise HTTPException(resp, data) elif resp.status == 401: raise Unauthorized(resp, data) elif resp.status == 403: raise Forbidden(resp, data) elif resp.status == 404: raise NotFound(resp, data) else: raise HTTPException(resp, data) # We've run out of retries, raise. raise HTTPException(resp, data)
0.004547
def negociate_content(default='json-ld'): '''Perform a content negociation on the format given the Accept header''' mimetype = request.accept_mimetypes.best_match(ACCEPTED_MIME_TYPES.keys()) return ACCEPTED_MIME_TYPES.get(mimetype, default)
0.003968
def accept(self): """Handler for when OK is clicked.""" input_path = self.input_path.text() input_title = self.line_edit_title.text() input_source = self.line_edit_source.text() output_path = self.output_path.text() if not output_path.endswith('.tif'): # noinspection PyArgumentList,PyCallByClass,PyTypeChecker QMessageBox.warning( self, tr('InaSAFE'), tr('Output file name must be tif file')) if not os.path.exists(input_path): # noinspection PyArgumentList,PyCallByClass,PyTypeChecker QMessageBox.warning( self, tr('InaSAFE'), tr('Input file does not exist')) return algorithm = 'nearest' if self.nearest_mode.isChecked(): algorithm = 'nearest' elif self.inverse_distance_mode.isChecked(): algorithm = 'invdist' elif self.use_ascii_mode.isChecked(): algorithm = 'use_ascii' # Smoothing smoothing_method = NONE_SMOOTHING if self.numpy_smoothing.isChecked(): smoothing_method = NUMPY_SMOOTHING if self.scipy_smoothing.isChecked(): smoothing_method = SCIPY_SMOOTHING # noinspection PyUnresolvedReferences QgsApplication.instance().setOverrideCursor( QtGui.QCursor(QtCore.Qt.WaitCursor) ) extra_keywords = {} if self.check_box_custom_shakemap_id.isChecked(): event_id = self.line_edit_shakemap_id.text() extra_keywords[extra_keyword_earthquake_event_id['key']] = event_id current_index = self.combo_box_source_type.currentIndex() source_type = self.combo_box_source_type.itemData(current_index) if source_type: extra_keywords[ extra_keyword_earthquake_source['key']] = source_type file_name = convert_mmi_data( input_path, input_title, input_source, output_path, algorithm=algorithm, algorithm_filename_flag=True, smoothing_method=smoothing_method, extra_keywords=extra_keywords ) file_info = QFileInfo(file_name) base_name = file_info.baseName() self.output_layer = QgsRasterLayer(file_name, base_name) # noinspection PyUnresolvedReferences QgsApplication.instance().restoreOverrideCursor() if self.load_result.isChecked(): # noinspection PyTypeChecker mmi_ramp_roman(self.output_layer) self.output_layer.saveDefaultStyle() if not self.output_layer.isValid(): LOGGER.debug("Failed to load") else: # noinspection PyArgumentList QgsProject.instance().addMapLayer(self.output_layer) iface.zoomToActiveLayer() if (self.keyword_wizard_checkbox.isChecked() and self.keyword_wizard_checkbox.isEnabled()): self.launch_keyword_wizard() self.done(self.Accepted)
0.000634
def handle_msg(self, c, e): """Handles all messages. - If a exception is thrown, catch it and display a nice traceback instead of crashing. - Do the appropriate processing for each event type. """ try: self.handler.handle_msg(c, e) except Exception as ex: backtrace.handle_traceback(ex, c, self.get_target(e), self.config)
0.007576
def stream_execute(self, code: str = '', *, mode: str = 'query', opts: dict = None) -> WebSocketResponse: ''' Executes a code snippet in the streaming mode. Since the returned websocket represents a run loop, there is no need to specify *run_id* explicitly. ''' params = {} if self.owner_access_key: params['owner_access_key'] = self.owner_access_key opts = {} if opts is None else opts if mode == 'query': opts = {} elif mode == 'batch': opts = { 'clean': opts.get('clean', None), 'build': opts.get('build', None), 'buildLog': bool(opts.get('buildLog', False)), 'exec': opts.get('exec', None), } else: msg = 'Invalid stream-execution mode: {0}'.format(mode) raise BackendClientError(msg) request = Request(self.session, 'GET', '/stream/kernel/{}/execute'.format(self.kernel_id), params=params) async def send_code(ws): await ws.send_json({ 'code': code, 'mode': mode, 'options': opts, }) return request.connect_websocket(on_enter=send_code)
0.003674
def _merge_sorted_items(self, index): """ load a partition from disk, then sort and group by key """ def load_partition(j): path = self._get_spill_dir(j) p = os.path.join(path, str(index)) with open(p, 'rb', 65536) as f: for v in self.serializer.load_stream(f): yield v disk_items = [load_partition(j) for j in range(self.spills)] if self._sorted: # all the partitions are already sorted sorted_items = heapq.merge(disk_items, key=operator.itemgetter(0)) else: # Flatten the combined values, so it will not consume huge # memory during merging sort. ser = self.flattened_serializer() sorter = ExternalSorter(self.memory_limit, ser) sorted_items = sorter.sorted(itertools.chain(*disk_items), key=operator.itemgetter(0)) return ((k, vs) for k, vs in GroupByKey(sorted_items))
0.001959
def chromiumContext(self, url, extra_tid=None): ''' Return a active chromium context, useable for manual operations directly against chromium. The WebRequest user agent and other context is synchronized into the chromium instance at startup, and changes are flushed back to the webrequest instance from chromium at completion. ''' assert url is not None, "You need to pass a URL to the contextmanager, so it can dispatch to the correct tab!" if extra_tid is True: extra_tid = threading.get_ident() return self._chrome_context(url, extra_tid=extra_tid)
0.027682
def _pos(self, idx): """Convert an index into a pair (alpha, beta) that can be used to access the corresponding _lists[alpha][beta] position. Most queries require the index be built. Details of the index are described in self._build_index. Indexing requires traversing the tree to a leaf node. Each node has two children which are easily computable. Given an index, pos, the left-child is at pos * 2 + 1 and the right-child is at pos * 2 + 2. When the index is less than the left-child, traversal moves to the left sub-tree. Otherwise, the index is decremented by the left-child and traversal moves to the right sub-tree. At a child node, the indexing pair is computed from the relative position of the child node as compared with the offset and the remaining index. For example, using the index from self._build_index: _index = 14 5 9 3 2 4 5 _offset = 3 Tree: 14 5 9 3 2 4 5 Indexing position 8 involves iterating like so: 1. Starting at the root, position 0, 8 is compared with the left-child node (5) which it is greater than. When greater the index is decremented and the position is updated to the right child node. 2. At node 9 with index 3, we again compare the index to the left-child node with value 4. Because the index is the less than the left-child node, we simply traverse to the left. 3. At node 4 with index 3, we recognize that we are at a leaf node and stop iterating. 4. To compute the sublist index, we subtract the offset from the index of the leaf node: 5 - 3 = 2. To compute the index in the sublist, we simply use the index remaining from iteration. In this case, 3. The final index pair from our example is (2, 3) which corresponds to index 8 in the sorted list. """ if idx < 0: last_len = len(self._lists[-1]) if (-idx) <= last_len: return len(self._lists) - 1, last_len + idx idx += self._len if idx < 0: raise IndexError('list index out of range') elif idx >= self._len: raise IndexError('list index out of range') if idx < len(self._lists[0]): return 0, idx _index = self._index if not _index: self._build_index() pos = 0 child = 1 len_index = len(_index) while child < len_index: index_child = _index[child] if idx < index_child: pos = child else: idx -= index_child pos = child + 1 child = (pos << 1) + 1 return (pos - self._offset, idx)
0.001372
def reference_to_greatcircle(reference_frame, greatcircle_frame): """Convert a reference coordinate to a great circle frame.""" # Define rotation matrices along the position angle vector, and # relative to the origin. pole = greatcircle_frame.pole.transform_to(coord.ICRS) ra0 = greatcircle_frame.ra0 center = greatcircle_frame.center R_rot = rotation_matrix(greatcircle_frame.rotation, 'z') if not np.isnan(ra0): xaxis = np.array([np.cos(ra0), np.sin(ra0), 0.]) zaxis = pole.cartesian.xyz.value if np.abs(zaxis[2]) >= 1e-15: xaxis[2] = -(zaxis[0]*xaxis[0] + zaxis[1]*xaxis[1]) / zaxis[2] # what? else: xaxis[2] = 0. xaxis = xaxis / np.sqrt(np.sum(xaxis**2)) yaxis = np.cross(zaxis, xaxis) R = np.stack((xaxis, yaxis, zaxis)) elif center is not None: R1 = rotation_matrix(pole.ra, 'z') R2 = rotation_matrix(90*u.deg - pole.dec, 'y') Rtmp = matrix_product(R2, R1) rot = center.cartesian.transform(Rtmp) rot_lon = rot.represent_as(coord.UnitSphericalRepresentation).lon R3 = rotation_matrix(rot_lon, 'z') R = matrix_product(R3, R2, R1) else: R1 = rotation_matrix(pole.ra, 'z') R2 = rotation_matrix(pole.dec, 'y') R = matrix_product(R2, R1) return matrix_product(R_rot, R)
0.002175