text
stringlengths
78
104k
score
float64
0
0.18
def run_spp(self, input_bam, output, plot, cpus): """ Run the SPP read peak analysis tool. :param str input_bam: Path to reads file :param str output: Path to output file :param str plot: Path to plot file :param int cpus: Number of processors to use :return str: Command with which to run SPP """ base = "{} {} -rf -savp".format(self.tools.Rscript, self.tools.spp) cmd = base + " -savp={} -s=0:5:500 -c={} -out={} -p={}".format( plot, input_bam, output, cpus) return cmd
0.003497
def from_path(path): ''' create from path. return `None` if path is not exists. ''' if os.path.isdir(path): return DirectoryInfo(path) if os.path.isfile(path): return FileInfo(path) return None
0.007246
def export(self, location): """Export the svn repository at the url to the destination location""" url, rev = self.get_url_rev() logger.notify('Exporting svn repository %s to %s' % (url, location)) logger.indent += 2 try: if os.path.exists(location): # Subversion doesn't like to check out over an existing directory # --force fixes this, but was only added in svn 1.5 rmtree(location) call_subprocess( [self.cmd, 'export', url, location], filter_stdout=self._filter, show_stdout=False) finally: logger.indent -= 2
0.004425
def _setup_positions(self, positions): """Processes positions to account for ranges Arguments: positions - list of positions and/or ranges to process """ updated_positions = [] for i, position in enumerate(positions): ranger = re.search(r'(?P<start>-?\d*):(?P<end>\d*)', position) if ranger: if i > 0: updated_positions.append(self.separator) start = group_val(ranger.group('start')) end = group_val(ranger.group('end')) if start and end: updated_positions.extend(self._extendrange(start, end + 1)) # Since the number of positions on a line is unknown, # send input to cause exception that can be caught and call # _cut_range helper function elif ranger.group('start'): updated_positions.append([start]) else: updated_positions.extend(self._extendrange(1, end + 1)) else: updated_positions.append(positions[i]) try: if int(position) and int(positions[i+1]): updated_positions.append(self.separator) except (ValueError, IndexError): pass return updated_positions
0.001427
def get_pool(cls) -> Pool: """ Yields: existing db connection pool """ if len(cls._connection_params) < 5: raise ConnectionError('Please call SQLStore.connect before calling this method') if not cls._pool: cls._pool = yield from create_pool(**cls._connection_params) return cls._pool
0.008174
def namespace(self): """ Return the Namespace URI (if any) as a String for the current tag """ if self.m_name == -1 or (self.m_event != const.START_TAG and self.m_event != const.END_TAG): return u'' # No Namespace if self.m_namespaceUri == 0xFFFFFFFF: return u'' return self.sb[self.m_namespaceUri]
0.007895
def _func_addrs_from_prologues(self): """ Scan the entire program image for function prologues, and start code scanning at those positions :return: A list of possible function addresses """ # Pre-compile all regexes regexes = list() for ins_regex in self.project.arch.function_prologs: r = re.compile(ins_regex) regexes.append(r) # EDG says: I challenge anyone bothering to read this to come up with a better # way to handle CPU modes that affect instruction decoding. # Since the only one we care about is ARM/Thumb right now # we have this gross hack. Sorry about that. thumb_regexes = list() if hasattr(self.project.arch, 'thumb_prologs'): for ins_regex in self.project.arch.thumb_prologs: # Thumb prologues are found at even addrs, but their actual addr is odd! # Isn't that great? r = re.compile(ins_regex) thumb_regexes.append(r) # Construct the binary blob first unassured_functions = [ ] for start_, bytes_ in self._binary.memory.backers(): for regex in regexes: # Match them! for mo in regex.finditer(bytes_): position = mo.start() + start_ if position % self.project.arch.instruction_alignment == 0: mapped_position = AT.from_rva(position, self._binary).to_mva() if self._addr_in_exec_memory_regions(mapped_position): unassured_functions.append(mapped_position) # HACK part 2: Yes, i really have to do this for regex in thumb_regexes: # Match them! for mo in regex.finditer(bytes_): position = mo.start() + start_ if position % self.project.arch.instruction_alignment == 0: mapped_position = AT.from_rva(position, self._binary).to_mva() if self._addr_in_exec_memory_regions(mapped_position): unassured_functions.append(mapped_position+1) l.info("Found %d functions with prologue scanning.", len(unassured_functions)) return unassured_functions
0.003856
def _makedos(self, ax, dos_plotter, dos_options, dos_label=None): """This is basically the same as the SDOSPlotter get_plot function.""" # don't use first 4 colours; these are the band structure line colours cycle = cycler( 'color', rcParams['axes.prop_cycle'].by_key()['color'][4:]) with context({'axes.prop_cycle': cycle}): plot_data = dos_plotter.dos_plot_data(**dos_options) mask = plot_data['mask'] energies = plot_data['energies'][mask] lines = plot_data['lines'] spins = [Spin.up] if len(lines[0][0]['dens']) == 1 else \ [Spin.up, Spin.down] for line_set in plot_data['lines']: for line, spin in it.product(line_set, spins): if spin == Spin.up: label = line['label'] densities = line['dens'][spin][mask] else: label = "" densities = -line['dens'][spin][mask] ax.fill_betweenx(energies, densities, 0, lw=0, facecolor=line['colour'], alpha=line['alpha']) ax.plot(densities, energies, label=label, color=line['colour']) # x and y axis reversed versus normal dos plotting ax.set_ylim(dos_options['xmin'], dos_options['xmax']) ax.set_xlim(plot_data['ymin'], plot_data['ymax']) if dos_label is not None: ax.set_xlabel(dos_label) ax.set_xticklabels([]) ax.legend(loc=2, frameon=False, ncol=1, bbox_to_anchor=(1., 1.))
0.001207
def _ensure_frames(cls, documents): """ Ensure all items in a list are frames by converting those that aren't. """ frames = [] for document in documents: if not isinstance(document, Frame): frames.append(cls(document)) else: frames.append(document) return frames
0.005464
def dispatch(self, frame): ''' Override the default dispatch since we don't need the rest of the stack. ''' if frame.type() == HeartbeatFrame.type(): self.send_heartbeat() elif frame.type() == MethodFrame.type(): if frame.class_id == 10: cb = self._method_map.get(frame.method_id) if cb: method = self.clear_synchronous_cb(cb) method(frame) else: raise Channel.InvalidMethod( "unsupported method %d on channel %d", frame.method_id, self.channel_id) else: raise Channel.InvalidClass( "class %d is not supported on channel %d", frame.class_id, self.channel_id) else: raise Frame.InvalidFrameType( "frame type %d is not supported on channel %d", frame.type(), self.channel_id)
0.001955
def run(self, generations, p_mutate, p_crossover, elitist=True, two_point_crossover=False, refresh_after=None, quit_after=None): """ Run a standard genetic algorithm simulation for a set number of generations (iterations), each consisting of the following ordered steps: 1. competition/survival of the fittest (``compete`` method) 2. reproduction (``reproduce`` method) 3. mutation (``mutate`` method) 4. check if the new population's fittest is fitter than the overall fittest 4a. if not and the ``elitist`` option is active, replace the weakest solution with the overall fittest generations: how many generations to run p_mutate: probability of mutation in [0, 1] p_crossover: probability in [0, 1] that a crossover event will occur for each offspring elitist (default=True): option to replace the weakest solution with the strongest if a new one is not found each generation two_point_crossover (default=False): whether 2-point crossover is used refresh_after: number of generations since the last upset after which to randomly generate a new population quit_after: number of generations since the last upset after which to stop the run, possibly before reaching ``generations`` iterations return: the overall fittest solution (chromosome) """ start_time = time.time() assert 0 <= p_mutate <= 1 assert 0 <= p_crossover <= 1 # these values guaranteed to be replaced in first generation self.min_fit_ever = 1e999999999 self.max_fit_ever = -1e999999999 self.generation_fittest.clear() self.generation_fittest_fit.clear() self.overall_fittest_fit.clear() self.new_fittest_generations.clear() overall_fittest = self.get_fittest() overall_fittest_fit = self.get_fitness(overall_fittest) gens_since_upset = 0 for gen in range(1, generations + 1): survivors = self.compete(self.chromosomes) self.chromosomes = self.reproduce(survivors, p_crossover, two_point_crossover=two_point_crossover) self.mutate(self.chromosomes, p_mutate) # check for new fittest gen_fittest = self.get_fittest().copy() gen_fittest_fit = self.get_fitness(gen_fittest) if gen_fittest_fit > overall_fittest_fit: overall_fittest = gen_fittest overall_fittest_fit = gen_fittest_fit self.new_fittest_generations.append(gen) gens_since_upset = 0 else: gens_since_upset += 1 if elitist: # no new fittest found, replace least fit with overall fittest self.sort(self.chromosomes) self.chromosomes[0].dna = overall_fittest.dna if quit_after and gens_since_upset >= quit_after: print("quitting on generation", gen, "after", quit_after, "generations with no upset") break if refresh_after and gens_since_upset >= refresh_after: # been a very long time since a new best solution -- mix things up print("refreshing on generation", gen) self.mutate(self.chromosomes, 0.5) gens_since_upset = 0 self.generation_fittest[gen] = gen_fittest self.generation_fittest_fit[gen] = gen_fittest_fit self.overall_fittest_fit[gen] = overall_fittest_fit if self.should_terminate(overall_fittest): break self.fitness_cache.clear() self.run_time_s = time.time() - start_time return overall_fittest
0.008331
def connect(self, address='session'): """Connect to *address* and wait until the connection is established. The *address* argument must be a D-BUS server address, in the format described in the D-BUS specification. It may also be one of the special addresses ``'session'`` or ``'system'``, to connect to the D-BUS session and system bus, respectively. """ if isinstance(address, six.string_types): addresses = parse_dbus_address(address) else: addresses = [address] for addr in addresses: try: super(DbusClient, self).connect(addr) except pyuv.error.UVError: continue break else: raise DbusError('could not connect to any address') # Wait for authentication to complete self.get_unique_name()
0.00224
def get_select_sql(self): """ Gets the SELECT field portion for the field without the alias. If the field has a table, it will be included here like AggregateFunction(table.field) :return: Gets the SELECT field portion for the field without the alias :rtype: str """ return '{0}({1}{2}){3}'.format( self.name.upper(), self.get_distinct(), self.get_field_identifier(), self.get_over(), )
0.008016
def get_assessment_offered_lookup_session_for_bank(self, bank_id): """Gets the ``OsidSession`` associated with the assessment offered lookup service for the given bank. arg: bank_id (osid.id.Id): the ``Id`` of the bank return: (osid.assessment.AssessmentOfferedLookupSession) - an ``AssessmentOfferedLookupSession`` raise: NotFound - ``bank_id`` not found raise: NullArgument - ``bank_id`` is ``null`` raise: OperationFailed - ``unable to complete request`` raise: Unimplemented - ``supports_assessment_offered_lookup()`` or ``supports_visible_federation()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_assessment_offered_lookup()`` and ``supports_visible_federation()`` are ``true``.* """ if not self.supports_assessment_offered_lookup(): raise errors.Unimplemented() ## # Also include check to see if the catalog Id is found otherwise raise errors.NotFound ## # pylint: disable=no-member return sessions.AssessmentOfferedLookupSession(bank_id, runtime=self._runtime)
0.004177
async def process_ltd_doc(session, github_api_token, ltd_product_url, mongo_collection=None): """Ingest any kind of LSST document hosted on LSST the Docs from its source. Parameters ---------- session : `aiohttp.ClientSession` Your application's aiohttp client session. See http://aiohttp.readthedocs.io/en/stable/client.html. github_api_token : `str` A GitHub personal API token. See the `GitHub personal access token guide`_. ltd_product_url : `str` URL of the technote's product resource in the LTD Keeper API. mongo_collection : `motor.motor_asyncio.AsyncIOMotorCollection`, optional MongoDB collection. This should be the common MongoDB collection for LSST projectmeta JSON-LD records. If provided, ths JSON-LD is upserted into the MongoDB collection. Returns ------- metadata : `dict` JSON-LD-formatted dictionary. .. `GitHub personal access token guide`: https://ls.st/41d """ logger = logging.getLogger(__name__) ltd_product_data = await get_ltd_product(session, url=ltd_product_url) # Ensure the LTD product is a document product_name = ltd_product_data['slug'] doc_handle_match = DOCUMENT_HANDLE_PATTERN.match(product_name) if doc_handle_match is None: logger.debug('%s is not a document repo', product_name) return # Figure out the format of the document by probing for metadata files. # reStructuredText-based Sphinx documents have metadata.yaml file. try: return await process_sphinx_technote(session, github_api_token, ltd_product_data, mongo_collection=mongo_collection) except NotSphinxTechnoteError: # Catch error so we can try the next format logger.debug('%s is not a Sphinx-based technote.', product_name) except Exception: # Something bad happened trying to process the technote. # Log and just move on. logger.exception('Unexpected error trying to process %s', product_name) return # Try interpreting it as a Lander page with a /metadata.jsonld document try: return await process_lander_page(session, github_api_token, ltd_product_data, mongo_collection=mongo_collection) except NotLanderPageError: # Catch error so we can try the next format logger.debug('%s is not a Lander page with a metadata.jsonld file.', product_name) except Exception: # Something bad happened; log and move on logger.exception('Unexpected error trying to process %s', product_name) return
0.000345
def deaccent(text): """ Remove accentuation from the given string. Input text is either a unicode string or utf8 encoded bytestring. Return input string with accents removed, as unicode. >>> deaccent("Šéf chomutovských komunistů dostal poštou bílý prášek") u'Sef chomutovskych komunistu dostal postou bily prasek' """ if not isinstance(text, unicode): # assume utf8 for byte strings, use default (strict) error handling text = text.decode('utf8') norm = unicodedata.normalize("NFD", text) result = u('').join(ch for ch in norm if unicodedata.category(ch) != 'Mn') return unicodedata.normalize("NFC", result)
0.003003
def _format_option_value(optdict, value): """return the user input's value from a 'compiled' value""" if isinstance(value, (list, tuple)): value = ",".join(_format_option_value(optdict, item) for item in value) elif isinstance(value, dict): value = ",".join("%s:%s" % (k, v) for k, v in value.items()) elif hasattr(value, "match"): # optdict.get('type') == 'regexp' # compiled regexp value = value.pattern elif optdict.get("type") == "yn": value = "yes" if value else "no" elif isinstance(value, str) and value.isspace(): value = "'%s'" % value return value
0.001577
def comment (self, s, **args): """Write GML comment.""" self.writeln(s=u'comment "%s"' % s, **args)
0.026087
def reserve_ipblock(self, ipblock): """ Reserves an IP block within your account. """ properties = { "name": ipblock.name } if ipblock.location: properties['location'] = ipblock.location if ipblock.size: properties['size'] = str(ipblock.size) raw = { "properties": properties, } response = self._perform_request( url='/ipblocks', method='POST', data=json.dumps(raw)) return response
0.003731
def stream_download_async(self, response, user_callback): """Async Generator for streaming request body data. :param response: The initial response :param user_callback: Custom callback for monitoring progress. """ block = self.config.connection.data_block_size return StreamDownloadGenerator(response, user_callback, block)
0.005376
def __process_sentence(sentence_tuple, counts): """pull the actual sentence from the tuple (tuple contains additional data such as ID) :param _sentence_tuple: :param counts: """ sentence = sentence_tuple[2] # now we start replacing words one type at a time... sentence = __replace_verbs(sentence, counts) sentence = __replace_nouns(sentence, counts) sentence = ___replace_adjective_maybe(sentence, counts) sentence = __replace_adjective(sentence, counts) sentence = __replace_names(sentence, counts) # here we perform a check to see if we need to use A or AN depending on the # first letter of the following word... sentence = __replace_an(sentence) # replace the new repeating segments sentence = __replace_repeat(sentence) # now we will read, choose and substitute each of the RANDOM sentence tuples sentence = __replace_random(sentence) # now we are going to choose whether to capitalize words/sentences or not sentence = __replace_capitalise(sentence) # here we will choose whether to capitalize all words in the sentence sentence = __replace_capall(sentence) # check for appropriate spaces in the correct places. sentence = __check_spaces(sentence) return sentence
0.003118
def serialize(self, value, **kwargs): """Return a serialized copy of the tuple""" kwargs.update({'include_class': kwargs.get('include_class', True)}) if self.serializer is not None: return self.serializer(value, **kwargs) if value is None: return None serial_list = [self.prop.serialize(val, **kwargs) for val in value] return serial_list
0.004619
def make_link_node(rawtext, app, name, options): """ Create a link to the TL reference. :param rawtext: Text being replaced with link node. :param app: Sphinx application context :param name: Name of the object to link to :param options: Options dictionary passed to role func. """ try: base = app.config.tl_ref_url if not base: raise AttributeError except AttributeError as e: raise ValueError('tl_ref_url config value is not set') from e if base[-1] != '/': base += '/' set_classes(options) node = nodes.reference(rawtext, utils.unescape(name), refuri='{}?q={}'.format(base, name), **options) return node
0.001318
def send_sticker(self, sticker: str, reply: Message=None, on_success: callable=None, reply_markup: botapi.ReplyMarkup=None): """ Send sticker to this peer. :param sticker: File path to sticker to send. :param reply: Message object. :param on_success: Callback to call when call is complete. :type reply: int or Message """ self.twx.send_sticker(peer=self, sticker=sticker, reply_to_message_id=reply, on_success=on_success, reply_markup=reply_markup)
0.019435
def predicted_obs_vec(self): '''The predicted observation vector The observation vector for the next step in the filter. ''' if not self.has_cached_obs_vec: self.obs_vec = dot_n( self.observation_matrix, self.predicted_state_vec[:,:,np.newaxis])[:,:,0] return self.obs_vec
0.016807
def decode(s): """ Decode a string using the system encoding if needed (ie byte strings) """ if isinstance(s, bytes): return s.decode(sys.getdefaultencoding()) else: return s
0.004608
def generate_histograms_table(samples_table, samples, max_bins=1024): """ Generate a table of histograms as a DataFrame. Parameters ---------- samples_table : DataFrame Table specifying samples to analyze. For more information about the fields required in this table, please consult the module's documentation. samples : list FCSData objects from which to calculate histograms. ``samples[i]`` should correspond to ``samples_table.iloc[i]`` max_bins : int, optional Maximum number of bins to use. Returns ------- hist_table : DataFrame A multi-indexed DataFrame. Rows cotain the histogram bins and counts for every sample and channel specified in samples_table. `hist_table` is indexed by the sample's ID, the channel name, and whether the row corresponds to bins or counts. """ # Extract channels that require stats histograms headers = list(samples_table.columns) hist_headers = [h for h in headers if re_units.match(h)] hist_channels = [re_units.match(h).group(1) for h in hist_headers] # The number of columns in the DataFrame has to be set to the maximum # number of bins of any of the histograms about to be generated. # The following iterates through these histograms and finds the # largest. n_columns = 0 for sample_id, sample in zip(samples_table.index, samples): if isinstance(sample, ExcelUIException): continue for header, channel in zip(hist_headers, hist_channels): if pd.notnull(samples_table[header][sample_id]): if n_columns < sample.resolution(channel): n_columns = sample.resolution(channel) # Saturate at max_bins if n_columns > max_bins: n_columns = max_bins # Declare multi-indexed DataFrame index = pd.MultiIndex.from_arrays([[],[],[]], names = ['Sample ID', 'Channel', '']) columns = ['Bin {}'.format(i + 1) for i in range(n_columns)] hist_table = pd.DataFrame([], index=index, columns=columns) # Generate histograms for sample_id, sample in zip(samples_table.index, samples): if isinstance(sample, ExcelUIException): continue for header, channel in zip(hist_headers, hist_channels): if pd.notnull(samples_table[header][sample_id]): # Get units in which bins are being reported unit = samples_table[header][sample_id] # Decide which scale to use # Channel units result in linear scale. Otherwise, use logicle. if unit == 'Channel': scale = 'linear' else: scale = 'logicle' # Define number of bins nbins = min(sample.resolution(channel), max_bins) # Calculate bin edges and centers # We generate twice the necessary number of bins. We then take # every other value as the proper bin edges, and the remaining # values as the bin centers. bins_extended = sample.hist_bins(channel, 2*nbins, scale) bin_edges = bins_extended[::2] bin_centers = bins_extended[1::2] # Store bin centers hist_table.loc[(sample_id, channel, 'Bin Centers ({})'.format(unit)), columns[0:len(bin_centers)]] = bin_centers # Calculate and store histogram counts hist, __ = np.histogram(sample[:,channel], bins=bin_edges) hist_table.loc[(sample_id, channel, 'Counts'), columns[0:len(bin_centers)]] = hist return hist_table
0.001809
def omap(m): ''' Create a nested mapping from origin to property to values/attributes covering an entire Versa model ''' om = {} for s, p, o, a in m.match(): om.setdefault(s, {}) om[s].setdefault(p, []).append((o, a)) return om
0.007491
def make_ner_file(self, clean_visible_path, ner_xml_path): '''run tagger a child process to get XML output''' if self.template is None: raise exceptions.NotImplementedError(''' Subclasses must specify a class property "template" that provides command string format for running a tagger. It should take %(tagger_root_path)s as the path from the config file, %(clean_visible_path)s as the input XML file, and %(ner_xml_path)s as the output path to create. ''') tagger_config = dict( tagger_root_path=self.config['tagger_root_path'], clean_visible_path=clean_visible_path, ner_xml_path=ner_xml_path) ## get a java_heap_size or default to 1GB tagger_config['java_heap_size'] = self.config.get('java_heap_size', '') cmd = self.template % tagger_config start_time = time.time() ## make sure we are using as little memory as possible gc.collect() try: self._child = subprocess.Popen(cmd, stderr=subprocess.PIPE, shell=True) except OSError, exc: msg = traceback.format_exc(exc) msg += make_memory_info_msg(clean_visible_path, ner_xml_path) raise PipelineOutOfMemory(msg) s_out, errors = self._child.communicate() if not self._child.returncode == 0: if 'java.lang.OutOfMemoryError' in errors: msg = errors + make_memory_info_msg(clean_visible_path, ner_xml_path) raise PipelineOutOfMemory(msg) elif self._child.returncode == 137: msg = 'tagger returncode = 137\n' + errors msg += make_memory_info_msg(clean_visible_path, ner_xml_path) # maybe get a tail of /var/log/messages raise PipelineOutOfMemory(msg) elif 'Exception' in errors: raise PipelineBaseException(errors) else: raise PipelineBaseException('tagger exited with %r' % self._child.returncode) elapsed = time.time() - start_time logger.info('finished tagging in %.1f seconds' % elapsed) return elapsed
0.003247
def _get_system_python_executable(): """ Returns the path the system-wide python binary. (In case we're running in a virtualenv or venv) """ # This function is required by get_package_as_folder() to work # inside a virtualenv, since venv creation will fail with # the virtualenv's local python binary. # (venv/virtualenv incompatibility) # Abort if not in virtualenv or venv: if not hasattr(sys, "real_prefix") and ( not hasattr(sys, "base_prefix") or os.path.normpath(sys.base_prefix) == os.path.normpath(sys.prefix)): return sys.executable # Extract prefix we need to look in: if hasattr(sys, "real_prefix"): search_prefix = sys.real_prefix # virtualenv else: search_prefix = sys.base_prefix # venv def python_binary_from_folder(path): def binary_is_usable(python_bin): try: filenotfounderror = FileNotFoundError except NameError: # Python 2 filenotfounderror = OSError try: subprocess.check_output([ os.path.join(path, python_bin), "--version" ], stderr=subprocess.STDOUT) return True except (subprocess.CalledProcessError, filenotfounderror): return False python_name = "python" + sys.version while (not binary_is_usable(python_name) and python_name.find(".") > 0): # Try less specific binary name: python_name = python_name.rpartition(".")[0] if binary_is_usable(python_name): return os.path.join(path, python_name) return None # Return from sys.real_prefix if present: result = python_binary_from_folder(search_prefix) if result is not None: return result # Check out all paths in $PATH: bad_candidates = [] good_candidates = [] ever_had_nonvenv_path = False for p in os.environ.get("PATH", "").split(":"): # Skip if not possibly the real system python: if not os.path.normpath(p).startswith( os.path.normpath(search_prefix) ): continue # First folders might be virtualenv/venv we want to avoid: if not ever_had_nonvenv_path: sep = os.path.sep if ("system32" not in p.lower() and "usr" not in p) or \ {"home", ".tox"}.intersection(set(p.split(sep))) or \ "users" in p.lower(): # Doesn't look like bog-standard system path. if (p.endswith(os.path.sep + "bin") or p.endswith(os.path.sep + "bin" + os.path.sep)): # Also ends in "bin" -> likely virtualenv/venv. # Add as unfavorable / end of candidates: bad_candidates.append(p) continue ever_had_nonvenv_path = True good_candidates.append(p) # See if we can now actually find the system python: for p in good_candidates + bad_candidates: result = python_binary_from_folder(p) if result is not None: return result raise RuntimeError("failed to locate system python in: " + sys.real_prefix)
0.000301
def spawn(func, *args, **kwargs): """Spawn a new fiber. A new :class:`Fiber` is created with main function *func* and positional arguments *args*. The keyword arguments are passed to the :class:`Fiber` constructor, not to the main function. The fiber is then scheduled to start by calling its :meth:`~Fiber.start` method. The fiber instance is returned. """ fiber = Fiber(func, args, **kwargs) fiber.start() return fiber
0.002165
def downsample(self, factor): """ Compute a downsampled version of the skeleton by striding while preserving endpoints. factor: stride length for downsampling the saved skeleton paths. Returns: downsampled PrecomputedSkeleton """ if int(factor) != factor or factor < 1: raise ValueError("Argument `factor` must be a positive integer greater than or equal to 1. Got: <{}>({})", type(factor), factor) paths = self.interjoint_paths() for i, path in enumerate(paths): paths[i] = np.concatenate( (path[0::factor, :], path[-1:, :]) # preserve endpoints ) ds_skel = PrecomputedSkeleton.simple_merge( [ PrecomputedSkeleton.from_path(path) for path in paths ] ).consolidate() ds_skel.id = self.id # TODO: I'm sure this could be sped up if need be. index = {} for i, vert in enumerate(self.vertices): vert = tuple(vert) index[vert] = i for i, vert in enumerate(ds_skel.vertices): vert = tuple(vert) ds_skel.radii[i] = self.radii[index[vert]] ds_skel.vertex_types[i] = self.vertex_types[index[vert]] return ds_skel
0.011384
def setSceneRect( self, *args ): """ Overloads the set scene rect to handle rebuild information. """ super(XChartScene, self).setSceneRect(*args) self._dirty = True
0.019139
def get_properties(self, packet, bt_addr): """Get properties of beacon depending on type.""" if is_one_of(packet, [EddystoneTLMFrame, EddystoneURLFrame, \ EddystoneEncryptedTLMFrame, EddystoneEIDFrame]): # here we retrieve the namespace and instance which corresponds to the # eddystone beacon with this bt address return self.properties_from_mapping(bt_addr) else: return packet.properties
0.00813
def get(model, *args, **kwargs): ''' Get an autofixture instance for the passed in *model* sing the either an appropiate autofixture that was :ref:`registry <registry>` or fall back to the default:class:`AutoFixture` class. *model* can be a model class or its string representation (e.g. ``"app.ModelClass"``). All positional and keyword arguments are passed to the autofixture constructor. ''' from .compat import get_model if isinstance(model, string_types): model = get_model(*model.split('.', 1)) if model in REGISTRY: return REGISTRY[model](model, *args, **kwargs) else: return AutoFixture(model, *args, **kwargs)
0.001441
def _sendMouseEvent(ev, x, y, dwData=0): """The helper function that actually makes the call to the mouse_event() win32 function. Args: ev (int): The win32 code for the mouse event. Use one of the MOUSEEVENTF_* constants for this argument. x (int): The x position of the mouse event. y (int): The y position of the mouse event. dwData (int): The argument for mouse_event()'s dwData parameter. So far this is only used by mouse scrolling. Returns: None """ assert x != None and y != None, 'x and y cannot be set to None' # TODO: ARG! For some reason, SendInput isn't working for mouse events. I'm switching to using the older mouse_event win32 function. #mouseStruct = MOUSEINPUT() #mouseStruct.dx = x #mouseStruct.dy = y #mouseStruct.mouseData = ev #mouseStruct.time = 0 #mouseStruct.dwExtraInfo = ctypes.pointer(ctypes.c_ulong(0)) # according to https://stackoverflow.com/questions/13564851/generate-keyboard-events I can just set this. I don't really care about this value. #inputStruct = INPUT() #inputStruct.mi = mouseStruct #inputStruct.type = INPUT_MOUSE #ctypes.windll.user32.SendInput(1, ctypes.pointer(inputStruct), ctypes.sizeof(inputStruct)) width, height = _size() convertedX = 65536 * x // width + 1 convertedY = 65536 * y // height + 1 ctypes.windll.user32.mouse_event(ev, ctypes.c_long(convertedX), ctypes.c_long(convertedY), dwData, 0)
0.012146
def run(self, repo: str, branch: str, task: Task, git_repo: Repo, repo_path: Path): """ Starts up a VM, builds an docker image and gets it to the VM, runs the script over SSH, returns result. Stops the VM if ``keep_vm_running`` is not set. """ from fabric import api from fabric.exceptions import CommandTimeout # start up or get running VM vm_location = self.get_vm_location() self.ensure_vm_running(vm_location) logger.info("Running with VM located at %s", vm_location) # pushes the image to the registry so it can be pulled in the VM self.check_docker_access() # init client self.get_image_for_repo(repo, branch, git_repo, repo_path) requirements_option, requirements_hash = self.get_requirements_information(repo_path) # getting things needed for execution over SSH image_tag = self.get_image_tag(requirements_option, requirements_hash, self.get_dependencies()) image_name = self.use_registry_name task_filename, task_json = self.serialized_task(task) (vm_location / task_filename).write_text(task_json) container_name = self.get_container_name(repo, branch, git_repo) # setting up Fabric api.env.hosts = [self.vagrant.user_hostname_port()] api.env.key_filename = self.vagrant.keyfile() api.env.disable_known_hosts = True # useful for when the vagrant box ip changes. api.env.abort_exception = BuildError # raises SystemExit otherwise api.env.shell = "/bin/sh -l -c" if self.quiet: api.output.everything = False else: api.output.everything = True # executes the task try: res = api.execute(self.fabric_task, container_name=container_name, definition_filename=task_filename, image_name=image_name, image_tag=image_tag, repository=str(repo_path.relative_to(Path(self._arca.base_dir).resolve() / 'repos')), timeout=task.timeout) return Result(res[self.vagrant.user_hostname_port()].stdout) except CommandTimeout: raise BuildTimeoutError(f"The task timeouted after {task.timeout} seconds.") except BuildError: # can be raised by :meth:`Result.__init__` raise except Exception as e: logger.exception(e) raise BuildError("The build failed", extra_info={ "exception": e }) finally: # stops or destroys the VM if it should not be kept running if not self.keep_vm_running: if self.destroy: self.vagrant.destroy() shutil.rmtree(self.vagrant.root, ignore_errors=True) self.vagrant = None else: self.vagrant.halt()
0.002972
def add_resource_attrs_from_type(type_id, resource_type, resource_id,**kwargs): """ adds all the attributes defined by a type to a node. """ type_i = _get_templatetype(type_id) resource_i = _get_resource(resource_type, resource_id) resourceattr_qry = db.DBSession.query(ResourceAttr).filter(ResourceAttr.ref_key==resource_type) if resource_type == 'NETWORK': resourceattr_qry.filter(ResourceAttr.network_id==resource_id) elif resource_type == 'NODE': resourceattr_qry.filter(ResourceAttr.node_id==resource_id) elif resource_type == 'LINK': resourceattr_qry.filter(ResourceAttr.link_id==resource_id) elif resource_type == 'GROUP': resourceattr_qry.filter(ResourceAttr.group_id==resource_id) elif resource_type == 'PROJECT': resourceattr_qry.filter(ResourceAttr.project_id==resource_id) resource_attrs = resourceattr_qry.all() attrs = {} for res_attr in resource_attrs: attrs[res_attr.attr_id] = res_attr new_resource_attrs = [] for item in type_i.typeattrs: if attrs.get(item.attr_id) is None: ra = resource_i.add_attribute(item.attr_id) new_resource_attrs.append(ra) db.DBSession.flush() return new_resource_attrs
0.007048
def save_list(self, list_name, emails): """ Upload a list. The list import job is queued and will happen shortly after the API request. http://docs.sailthru.com/api/list @param list: list name @param emails: List of email values or comma separated string """ data = {'list': list_name, 'emails': ','.join(emails) if isinstance(emails, list) else emails} return self.api_post('list', data)
0.008547
def getPattern(self, word): """ Returns the pattern with key word. Example: net.getPattern("tom") => [0, 0, 0, 1] """ if word in self.patterns: return self.patterns[word] else: raise ValueError('Unknown pattern in getPattern().', word)
0.009464
def _run_on_chrom(chrom, work_bams, names, work_dir, items): """Run cn.mops on work BAMs for a specific chromosome. """ local_sitelib = utils.R_sitelib() batch = sshared.get_cur_batch(items) ext = "-%s-cnv" % batch if batch else "-cnv" out_file = os.path.join(work_dir, "%s%s-%s.bed" % (os.path.splitext(os.path.basename(work_bams[0]))[0], ext, chrom if chrom else "all")) if not utils.file_exists(out_file): with file_transaction(items[0], out_file) as tx_out_file: rcode = "%s-run.R" % os.path.splitext(out_file)[0] with open(rcode, "w") as out_handle: out_handle.write(_script.format(prep_str=_prep_load_script(work_bams, names, chrom, items), out_file=tx_out_file, local_sitelib=local_sitelib)) rscript = utils.Rscript_cmd() try: do.run([rscript, "--no-environ", rcode], "cn.mops CNV detection", items[0], log_error=False) except subprocess.CalledProcessError as msg: # cn.mops errors out if no CNVs found. Just write an empty file. if _allowed_cnmops_errorstates(str(msg)): with open(tx_out_file, "w") as out_handle: out_handle.write('track name=empty description="No CNVs found"\n') else: logger.exception() raise return [out_file]
0.00451
def load_labels(path: Union[str, Path]) -> List[SingleConditionSpec]: """Load labels files. Parameters ---------- path Path of labels file. Returns ------- List[SingleConditionSpec] List of SingleConditionSpec stored in labels file. """ condition_specs = np.load(str(path)) return [c.view(SingleConditionSpec) for c in condition_specs]
0.002545
def lchisquare(f_obs,f_exp=None): """ Calculates a one-way chi square for list of observed frequencies and returns the result. If no expected frequencies are given, the total N is assumed to be equally distributed across all groups. Usage: lchisquare(f_obs, f_exp=None) f_obs = list of observed cell freq. Returns: chisquare-statistic, associated p-value """ k = len(f_obs) # number of groups if f_exp == None: f_exp = [sum(f_obs)/float(k)] * len(f_obs) # create k bins with = freq. chisq = 0 for i in range(len(f_obs)): chisq = chisq + (f_obs[i]-f_exp[i])**2 / float(f_exp[i]) return chisq, chisqprob(chisq, k-1)
0.005926
def divide(self, phi1, inplace=True): """ DiscreteFactor division by `phi1`. Parameters ---------- phi1 : `DiscreteFactor` instance The denominator for division. inplace: boolean If inplace=True it will modify the factor itself, else would return a new factor. Returns ------- DiscreteFactor or None: if inplace=True (default) returns None if inplace=False returns a new `DiscreteFactor` instance. Examples -------- >>> from pgmpy.factors.discrete import DiscreteFactor >>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12)) >>> phi2 = DiscreteFactor(['x3', 'x1'], [2, 2], range(1, 5)]) >>> phi1.divide(phi2) >>> phi1.variables ['x1', 'x2', 'x3'] >>> phi1.cardinality array([2, 3, 2]) >>> phi1.values array([[[ 0. , 0.33333333], [ 2. , 1. ], [ 4. , 1.66666667]], [[ 3. , 1.75 ], [ 4. , 2.25 ], [ 5. , 2.75 ]]]) """ phi = self if inplace else self.copy() phi1 = phi1.copy() if set(phi1.variables) - set(phi.variables): raise ValueError("Scope of divisor should be a subset of dividend") # Adding extra variables in phi1. extra_vars = set(phi.variables) - set(phi1.variables) if extra_vars: slice_ = [slice(None)] * len(phi1.variables) slice_.extend([np.newaxis] * len(extra_vars)) phi1.values = phi1.values[tuple(slice_)] phi1.variables.extend(extra_vars) # Rearranging the axes of phi1 to match phi for axis in range(phi.values.ndim): exchange_index = phi1.variables.index(phi.variables[axis]) phi1.variables[axis], phi1.variables[exchange_index] = phi1.variables[exchange_index], phi1.variables[axis] phi1.values = phi1.values.swapaxes(axis, exchange_index) phi.values = phi.values / phi1.values # If factor division 0/0 = 0 but is undefined for x/0. In pgmpy we are using # np.inf to represent x/0 cases. phi.values[np.isnan(phi.values)] = 0 if not inplace: return phi
0.00209
def _assemble_and_send_request(self): """ Fires off the Fedex request. @warning: NEVER CALL THIS METHOD DIRECTLY. CALL send_request(), WHICH RESIDES ON FedexBaseService AND IS INHERITED. """ # Fire off the query. return self.client.service.createPickup( WebAuthenticationDetail=self.WebAuthenticationDetail, ClientDetail=self.ClientDetail, TransactionDetail=self.TransactionDetail, Version=self.VersionId, OriginDetail=self.OriginDetail, PickupServiceCategory=self.PickupServiceCategory, PackageCount=self.PackageCount, TotalWeight=self.TotalWeight, CarrierCode=self.CarrierCode, OversizePackageCount=self.OversizePackageCount, Remarks=self.Remarks, CommodityDescription=self.CommodityDescription, CountryRelationship=self.CountryRelationship )
0.002062
def _get_rows(self, options): """Return only those data rows that should be printed, based on slicing and sorting. Arguments: options - dictionary of option settings.""" if options["oldsortslice"]: rows = copy.deepcopy(self._rows[options["start"]:options["end"]]) else: rows = copy.deepcopy(self._rows) # Sort if options["sortby"]: sortindex = self._field_names.index(options["sortby"]) # Decorate rows = [[row[sortindex]] + row for row in rows] # Sort rows.sort(reverse=options["reversesort"], key=options["sort_key"]) # Undecorate rows = [row[1:] for row in rows] # Slice if necessary if not options["oldsortslice"]: rows = rows[options["start"]:options["end"]] return rows
0.003405
def start(ctx, debug, version, config): """Commands for devops operations""" ctx.obj = {} ctx.DEBUG = debug if os.path.isfile(config): with open(config) as fp: agile = json.load(fp) else: agile = {} ctx.obj['agile'] = agile if version: click.echo(__version__) ctx.exit(0) if not ctx.invoked_subcommand: click.echo(ctx.get_help())
0.002421
async def retry_request(*args, retry_exceptions=(asyncio.TimeoutError, ScriptWorkerRetryException), retry_async_kwargs=None, **kwargs): """Retry the ``request`` function. Args: *args: the args to send to request() through retry_async(). retry_exceptions (list, optional): the exceptions to retry on. Defaults to (ScriptWorkerRetryException, ). retry_async_kwargs (dict, optional): the kwargs for retry_async. If None, use {}. Defaults to None. **kwargs: the kwargs to send to request() through retry_async(). Returns: object: the value from request(). """ retry_async_kwargs = retry_async_kwargs or {} return await retry_async(request, retry_exceptions=retry_exceptions, args=args, kwargs=kwargs, **retry_async_kwargs)
0.001096
def hasannotationlayer(self, annotationtype=None,set=None): """Does the specified annotation layer exist?""" l = self.layers(annotationtype, set) return (len(l) > 0)
0.021164
def _fullsize_link_tag(self, kwargs, title): """ Render a <a href> that points to the fullsize rendition specified """ return utils.make_tag('a', { 'href': self.get_fullsize(kwargs), 'data-lightbox': kwargs['gallery_id'], 'title': title })
0.01
def document(cls): """ Decorator to document a class """ if cls.__doc__ is None: return cls baseclass_name = cls.mro()[-2].__name__ try: return DOC_FUNCTIONS[baseclass_name](cls) except KeyError: return cls
0.003846
def determine_collections(self): """Try to determine which collections this record should belong to.""" for value in record_get_field_values(self.record, '980', code='a'): if 'NOTE' in value.upper(): self.collections.add('NOTE') if 'THESIS' in value.upper(): self.collections.add('THESIS') if 'CONFERENCEPAPER' in value.upper(): self.collections.add('ConferencePaper') if "HIDDEN" in value.upper(): self.hidden = True if self.is_published(): self.collections.add("PUBLISHED") self.collections.add("CITEABLE") if 'NOTE' not in self.collections: from itertools import product # TODO: Move this to a KB kb = ['ATLAS-CONF-', 'CMS-PAS-', 'ATL-', 'CMS-DP-', 'ALICE-INT-', 'LHCb-PUB-'] values = record_get_field_values(self.record, "088", code='a') for val, rep in product(values, kb): if val.startswith(rep): self.collections.add('NOTE') break # 980 Arxiv tag if record_get_field_values(self.record, '035', filter_subfield_code="a", filter_subfield_value="arXiv"): self.collections.add("arXiv") # 980 HEP && CORE self.collections.add('HEP') self.collections.add('CORE') # 980 Conference Note if 'ConferencePaper' not in self.collections: for value in record_get_field_values(self.record, tag='962', code='n'): if value[-2:].isdigit(): self.collections.add('ConferencePaper') break # Clear out any existing ones. record_delete_fields(self.record, "980")
0.001014
def network_security_group_delete(name, resource_group, **kwargs): ''' .. versionadded:: 2019.2.0 Delete a network security group within a resource group. :param name: The name of the network security group to delete. :param resource_group: The resource group name assigned to the network security group. CLI Example: .. code-block:: bash salt-call azurearm_network.network_security_group_delete testnsg testgroup ''' result = False netconn = __utils__['azurearm.get_client']('network', **kwargs) try: secgroup = netconn.network_security_groups.delete( resource_group_name=resource_group, network_security_group_name=name ) secgroup.wait() result = True except CloudError as exc: __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs) return result
0.002217
def _request(self, url, api_call, request_args, method='GET'): """Function to request and returning JSON data. Parameters: url (str): Base url call. api_call (str): API function to be called. request_args (dict): All requests parameters. method (str): (Defauld: GET) HTTP method 'GET' or 'POST' Raises: PybooruHTTPError: HTTP Error. requests.exceptions.Timeout: When HTTP Timeout. ValueError: When can't decode JSON response. """ try: if method != 'GET': # Reset content-type for data encoded as a multipart form self.client.headers.update({'content-type': None}) response = self.client.request(method, url, **request_args) self.last_call.update({ 'API': api_call, 'url': response.url, 'status_code': response.status_code, 'status': self._get_status(response.status_code), 'headers': response.headers }) if response.status_code in (200, 201, 202, 204): return response.json() raise PybooruHTTPError("In _request", response.status_code, response.url) except requests.exceptions.Timeout: raise PybooruError("Timeout! url: {0}".format(response.url)) except ValueError as e: raise PybooruError("JSON Error: {0} in line {1} column {2}".format( e.msg, e.lineno, e.colno))
0.001259
def get_OHLCV(self, ticker, date=None, date_from=None, date_to=None): """Get Open, High, Low, Close prices and daily Volume for a given ticker. ticker - ticker or symbol date - date for a single-date query date_from, date_to - date range (used only if "date" is not specified) Returns pandas.Dataframe with data. If error occurs, then it is printed as a warning. """ data, meta = self.fetch(ticker + "~OHLCV", None, date, date_from, date_to, 'D', only_data=False) return data
0.008333
def getEdges(self, fromVol): """ Return the edges available from fromVol. """ return [ self.toObj.diff(diff) for diff in self._client.getEdges(self.toArg.vol(fromVol)) ]
0.009217
def upcaseTokens(s,l,t): """Helper parse action to convert tokens to upper case.""" return [ tt.upper() for tt in map(_ustr,t) ]
0.051471
def data_from_stream(self, stream): """ Creates a data element reading a representation from the given stream. :returns: object implementing :class:`everest.representers.interfaces.IExplicitDataElement` """ parser = self._make_representation_parser(stream, self.resource_class, self._mapping) return parser.run()
0.004728
def genl_family_add_grp(family, id_, name): """https://github.com/thom311/libnl/blob/libnl3_2_25/lib/genl/family.c#L366. Positional arguments: family -- Generic Netlink family object (genl_family class instance). id_ -- new numeric identifier (integer). name -- new human readable name (string). Returns: 0 """ grp = genl_family_grp(id_=id_, name=name) nl_list_add_tail(grp.list_, family.gf_mc_grps) return 0
0.002203
def bbox(self): "Return the envelope as a Bound Box string compatible with (bb) params" return ",".join(str(attr) for attr in (self.xmin, self.ymin, self.xmax, self.ymax))
0.018519
def get_asset_query_session_for_repository(self, repository_id): """Gets an asset query session for the given repository. arg: repository_id (osid.id.Id): the ``Id`` of the repository return: (osid.repository.AssetQuerySession) - an ``AssetQuerySession`` raise: NotFound - ``repository_id`` not found raise: NullArgument - ``repository_id`` is ``null`` raise: OperationFailed - ``unable to complete request`` raise: Unimplemented - ``supports_asset_query()`` or ``supports_visible_federation()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_asset_query()`` and ``supports_visible_federation()`` are ``true``.* """ if not self.supports_asset_query(): raise errors.Unimplemented() ## # Also include check to see if the catalog Id is found otherwise raise errors.NotFound ## # pylint: disable=no-member return sessions.AssetQuerySession(repository_id, runtime=self._runtime)
0.00273
def does_table_exist(self, model_class): ''' Checks whether a table for the given model class already exists. Note that this only checks for existence of a table with the expected name. ''' sql = "SELECT count() FROM system.tables WHERE database = '%s' AND name = '%s'" r = self._send(sql % (self.db_name, model_class.table_name())) return r.text.strip() == '1'
0.009592
def stdio(filters=None, search_dirs=None, data_dir=True, sys_path=True, panfl_=False, input_stream=None, output_stream=None): """ Reads JSON from stdin and second CLI argument: ``sys.argv[1]``. Dumps JSON doc to the stdout. :param filters: Union[List[str], None] if None then read from metadata :param search_dirs: Union[List[str], None] if None then read from metadata :param data_dir: bool :param sys_path: bool :param panfl_: bool :param input_stream: io.StringIO or None for debug purpose :param output_stream: io.StringIO or None for debug purpose :return: None """ doc = load(input_stream) # meta = doc.metadata # Local variable 'meta' value is not used verbose = doc.get_metadata('panflute-verbose', False) if search_dirs is None: # metadata 'panflute-path' can be a list, a string, or missing # `search_dirs` should be a list of str search_dirs = doc.get_metadata('panflute-path', []) if type(search_dirs) != list: search_dirs = [search_dirs] if '--data-dir' in search_dirs: data_dir = True if '--no-sys-path' in search_dirs: sys_path = False search_dirs = [dir_ for dir_ in search_dirs if dir_ not in ('--data-dir', '--no-sys-path')] if verbose: debug('panflute: data_dir={} sys_path={}'.format(data_dir, sys_path)) search_dirs = [p.normpath(p.expanduser(p.expandvars(dir_))) for dir_ in search_dirs] if not panfl_: # default panflute behaviour: search_dirs.append('.') if data_dir: search_dirs.append(get_filter_dir()) if sys_path: search_dirs += sys.path else: # panfl/pandoctools behaviour: if data_dir: search_dirs.append(get_filter_dir()) if sys_path: search_dirs += reduced_sys_path # Display message (tests that everything is working ok) msg = doc.get_metadata('panflute-echo', False) if msg: debug(msg) if filters is None: # metadata 'panflute-filters' can be a list, a string, or missing # `filters` should be a list of str filters = doc.get_metadata('panflute-filters', []) if type(filters) != list: filters = [filters] if filters: if verbose: msg = "panflute: will run the following filters:" debug(msg, ' '.join(filters)) doc = autorun_filters(filters, doc, search_dirs, verbose) elif verbose: debug("panflute: no filters were provided") dump(doc, output_stream)
0.001128
def register_plugins(cls, plugins): ''' Reguster plugins. The plugins parameter should be dict mapping model to plugin. Just calls a register_plugin for every such a pair. ''' for model in plugins: cls.register_plugin(model, plugins[model])
0.010239
def init(port=None, do_not_exit=False, disable_tls=False, log_level='WARNING'): """Start the Xenon GRPC server on the specified port, or, if a service is already running on that port, connect to that. If no port is given, a random port is selected. This means that, by default, every python instance will start its own instance of a xenon-grpc process. :param port: the port number :param do_not_exit: by default the GRPC server is shut down after Python exits (through the `atexit` module), setting this value to `True` will prevent that from happening.""" logger = logging.getLogger('xenon') logger.setLevel(logging.INFO) logger_handler = logging.StreamHandler() logger_handler.setFormatter(logging.Formatter(style='{')) logger_handler.setLevel(getattr(logging, log_level)) logger.addHandler(logger_handler) if port is None: port = find_free_port() if __server__.process is not None: logger.warning( "You tried to run init(), but the server is already running.") return __server__ __server__.port = port __server__.disable_tls = disable_tls __server__.__enter__() if not do_not_exit: atexit.register(__server__.__exit__, None, None, None) return __server__
0.000766
def make_str(value): """Converts a value into a valid string.""" if isinstance(value, bytes): try: return value.decode(sys.getfilesystemencoding()) except UnicodeError: return value.decode('utf-8', 'replace') return text_type(value)
0.003521
def subpackets(*items): """Serialize several GPG subpackets.""" prefixed = [subpacket_prefix_len(item) for item in items] return util.prefix_len('>H', b''.join(prefixed))
0.005495
def visit_NameConstant(self, node): """ Python 3 """ nnode = self.dnode(node) copy_from_lineno_col_offset( nnode, str(node.value), self.bytes_pos_to_utf8, to=node)
0.01005
def generate_results_subparser(subparsers): """Adds a sub-command parser to `subparsers` to manipulate CSV results data.""" parser = subparsers.add_parser( 'results', description=constants.RESULTS_DESCRIPTION, epilog=constants.RESULTS_EPILOG, formatter_class=ParagraphFormatter, help=constants.RESULTS_HELP) utils.add_common_arguments(parser) parser.set_defaults(func=results) be_group = parser.add_argument_group('bifurcated extend') be_group.add_argument('-b', '--bifurcated-extend', dest='bifurcated_extend', metavar='CORPUS', help=constants.RESULTS_BIFURCATED_EXTEND_HELP) be_group.add_argument('--max-be-count', dest='bifurcated_extend_size', help=constants.RESULTS_BIFURCATED_EXTEND_MAX_HELP, metavar='COUNT', type=int) parser.add_argument('-e', '--extend', dest='extend', help=constants.RESULTS_EXTEND_HELP, metavar='CORPUS') parser.add_argument('--excise', help=constants.RESULTS_EXCISE_HELP, metavar='NGRAM', type=str) parser.add_argument('-l', '--label', dest='label', help=constants.RESULTS_LABEL_HELP, metavar='LABEL') parser.add_argument('--min-count', dest='min_count', help=constants.RESULTS_MINIMUM_COUNT_HELP, metavar='COUNT', type=int) parser.add_argument('--max-count', dest='max_count', help=constants.RESULTS_MAXIMUM_COUNT_HELP, metavar='COUNT', type=int) parser.add_argument('--min-count-work', dest='min_count_work', help=constants.RESULTS_MINIMUM_COUNT_WORK_HELP, metavar='COUNT', type=int) parser.add_argument('--max-count-work', dest='max_count_work', help=constants.RESULTS_MAXIMUM_COUNT_WORK_HELP, metavar='COUNT', type=int) parser.add_argument('--min-size', dest='min_size', help=constants.RESULTS_MINIMUM_SIZE_HELP, metavar='SIZE', type=int) parser.add_argument('--max-size', dest='max_size', help=constants.RESULTS_MAXIMUM_SIZE_HELP, metavar='SIZE', type=int) parser.add_argument('--min-works', dest='min_works', help=constants.RESULTS_MINIMUM_WORK_HELP, metavar='COUNT', type=int) parser.add_argument('--max-works', dest='max_works', help=constants.RESULTS_MAXIMUM_WORK_HELP, metavar='COUNT', type=int) parser.add_argument('--ngrams', dest='ngrams', help=constants.RESULTS_NGRAMS_HELP, metavar='NGRAMS') parser.add_argument('--reciprocal', action='store_true', help=constants.RESULTS_RECIPROCAL_HELP) parser.add_argument('--reduce', action='store_true', help=constants.RESULTS_REDUCE_HELP) parser.add_argument('--relabel', help=constants.RESULTS_RELABEL_HELP, metavar='CATALOGUE') parser.add_argument('--remove', help=constants.RESULTS_REMOVE_HELP, metavar='LABEL', type=str) parser.add_argument('--sort', action='store_true', help=constants.RESULTS_SORT_HELP) utils.add_tokenizer_argument(parser) parser.add_argument('-z', '--zero-fill', dest='zero_fill', help=constants.RESULTS_ZERO_FILL_HELP, metavar='CORPUS') parser.add_argument('results', help=constants.RESULTS_RESULTS_HELP, metavar='RESULTS') unsafe_group = parser.add_argument_group( constants.RESULTS_UNSAFE_GROUP_TITLE, constants.RESULTS_UNSAFE_GROUP_DESCRIPTION) unsafe_group.add_argument('--add-label-count', action='store_true', help=constants.RESULTS_ADD_LABEL_COUNT_HELP) unsafe_group.add_argument('--add-label-work-count', action='store_true', help=constants.RESULTS_ADD_LABEL_WORK_COUNT_HELP) unsafe_group.add_argument('--collapse-witnesses', action='store_true', help=constants.RESULTS_COLLAPSE_WITNESSES_HELP) unsafe_group.add_argument('--group-by-ngram', dest='group_by_ngram', help=constants.RESULTS_GROUP_BY_NGRAM_HELP, metavar='CATALOGUE') unsafe_group.add_argument('--group-by-witness', action='store_true', help=constants.RESULTS_GROUP_BY_WITNESS_HELP)
0.000213
def save_config_value(request, response, key, value): """Sets value of key `key` to `value` in both session and cookies.""" request.session[key] = value response.set_cookie(key, value, expires=one_year_from_now()) return response
0.004082
def _connect(self): """ Establish a connection to the master process's UNIX listener socket, constructing a mitogen.master.Router to communicate with the master, and a mitogen.parent.Context to represent it. Depending on the original transport we should emulate, trigger one of the _connect_*() service calls defined above to cause the master process to establish the real connection on our behalf, or return a reference to the existing one. """ if self.connected: return self._connect_broker() stack = self._build_stack() self._connect_stack(stack)
0.002999
def get_tax_class_by_id(cls, tax_class_id, **kwargs): """Find TaxClass Return single instance of TaxClass by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_tax_class_by_id(tax_class_id, async=True) >>> result = thread.get() :param async bool :param str tax_class_id: ID of taxClass to return (required) :return: TaxClass If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_tax_class_by_id_with_http_info(tax_class_id, **kwargs) else: (data) = cls._get_tax_class_by_id_with_http_info(tax_class_id, **kwargs) return data
0.004474
def write_nodes(gtfs, output, fields=None): """ Parameters ---------- gtfs: gtfspy.GTFS output: str Path to the output file fields: list, optional which pieces of information to provide """ nodes = gtfs.get_table("stops") if fields is not None: nodes = nodes[fields] with util.create_file(output, tmpdir=True, keepext=True) as tmpfile: nodes.to_csv(tmpfile, encoding='utf-8', index=False, sep=";")
0.002132
def unpack_auth_b64(self, docker_registry): """Decode and unpack base64 'auth' credentials from config file. :param docker_registry: str, registry reference in config file :return: namedtuple, UnpackedAuth (or None if no 'auth' is available) """ UnpackedAuth = namedtuple('UnpackedAuth', ['raw_str', 'username', 'password']) credentials = self.get_credentials(docker_registry) auth_b64 = credentials.get('auth') if auth_b64: raw_str = b64decode(auth_b64).decode('utf-8') unpacked_credentials = raw_str.split(':', 1) if len(unpacked_credentials) == 2: return UnpackedAuth(raw_str, *unpacked_credentials) else: raise ValueError("Failed to parse 'auth' in '%s'" % self.json_secret_path)
0.004825
def timeline(self, request, drip_id, into_past, into_future): """ Return a list of people who should get emails. """ from django.shortcuts import render, get_object_or_404 drip = get_object_or_404(Drip, id=drip_id) shifted_drips = [] seen_users = set() for shifted_drip in drip.drip.walk(into_past=int(into_past), into_future=int(into_future)+1): shifted_drip.prune() shifted_drips.append({ 'drip': shifted_drip, 'qs': shifted_drip.get_queryset().exclude(id__in=seen_users) }) seen_users.update(shifted_drip.get_queryset().values_list('id', flat=True)) return render(request, 'drip/timeline.html', locals())
0.005249
def bind(self): """ Resets the Response object to its factory defaults. """ self._COOKIES = None self.status = 200 self.headers = HeaderDict() self.content_type = 'text/html; charset=UTF-8'
0.008734
def _extract_germline(in_file, data): """Extract germline calls non-somatic, non-filtered calls. """ out_file = "%s-germline.vcf" % utils.splitext_plus(in_file)[0] if not utils.file_uptodate(out_file, in_file) and not utils.file_uptodate(out_file + ".gz", in_file): with file_transaction(data, out_file) as tx_out_file: reader = cyvcf2.VCF(str(in_file)) reader.add_filter_to_header({'ID': 'Somatic', 'Description': 'Variant called as Somatic'}) #with contextlib.closing(cyvcf2.Writer(tx_out_file, reader)) as writer: with open(tx_out_file, "w") as out_handle: out_handle.write(reader.raw_header) for rec in reader: rec = _update_germline_filters(rec) out_handle.write(str(rec)) #writer.write_record(rec) return out_file
0.006764
def _batch_insert(self, inserts, intervals, **kwargs): ''' Support for batch insert. Default implementation is non-optimized and is a simple loop over values. ''' for timestamp,names in inserts.iteritems(): for name,values in names.iteritems(): for value in values: self._insert( name, value, timestamp, intervals, **kwargs )
0.01897
def _task_instances_for_dag_run(self, dag_run, session=None): """ Returns a map of task instance key to task instance object for the tasks to run in the given dag run. :param dag_run: the dag run to get the tasks from :type dag_run: airflow.models.DagRun :param session: the database session object :type session: sqlalchemy.orm.session.Session """ tasks_to_run = {} if dag_run is None: return tasks_to_run # check if we have orphaned tasks self.reset_state_for_orphaned_tasks(filter_by_dag_run=dag_run, session=session) # for some reason if we don't refresh the reference to run is lost dag_run.refresh_from_db() make_transient(dag_run) # TODO(edgarRd): AIRFLOW-1464 change to batch query to improve perf for ti in dag_run.get_task_instances(): # all tasks part of the backfill are scheduled to run if ti.state == State.NONE: ti.set_state(State.SCHEDULED, session=session) if ti.state != State.REMOVED: tasks_to_run[ti.key] = ti return tasks_to_run
0.003393
def order_sections(self, key, reverse=True): """Sort sections according to the value of key.""" fsort = lambda s: s.__dict__[key] return sorted(self.sections, key=fsort, reverse=reverse)
0.014286
def _load_sequences_to_reference_gene(self, g_id, force_rerun=False): """Load orthologous strain sequences to reference Protein object, save as new pickle""" protein_seqs_pickle_path = op.join(self.sequences_by_gene_dir, '{}_protein_withseqs.pckl'.format(g_id)) if ssbio.utils.force_rerun(flag=force_rerun, outfile=protein_seqs_pickle_path): protein_pickle_path = self.gene_protein_pickles[g_id] protein_pickle = ssbio.io.load_pickle(protein_pickle_path) for strain, info in self.strain_infodict.items(): strain_sequences = SeqIO.index(info['genome_path'], 'fasta') strain_gene_functional = info['functional_genes'][g_id] if strain_gene_functional: # Pull the gene ID of the strain from the orthology matrix strain_gene_key = self.df_orthology_matrix.at[g_id, strain] new_id = '{}_{}'.format(g_id, strain) if protein_pickle.sequences.has_id(new_id): continue protein_pickle.load_manual_sequence(seq=strain_sequences[strain_gene_key], ident=new_id, set_as_representative=False) protein_pickle.save_pickle(outfile=protein_seqs_pickle_path) return g_id, protein_seqs_pickle_path
0.004854
def IntraField(config={}): """Intra field interlace to sequential converter. This uses a vertical filter with an aperture of 8 lines, generated by :py:class:`~pyctools.components.interp.filtergenerator.FilterGenerator`. The aperture (and other parameters) can be adjusted after the :py:class:`IntraField` component is created. """ return Compound( config = config, deint = SimpleDeinterlace(), interp = Resize(), filgen = FilterGenerator(yaperture=8, ycut=50), gain = Arithmetic(func='data * pt_float(2)'), linkages = { ('self', 'input') : [('deint', 'input')], ('deint', 'output') : [('interp', 'input')], ('interp', 'output') : [('self', 'output')], ('filgen', 'output') : [('gain', 'input')], ('gain', 'output') : [('interp', 'filter')], } )
0.019608
def get_object_or_child_by_type(self, *types): """ Get object if child already been read or get child. Use this method for fast access to objects in case of static configurations. :param types: requested object types. :return: all children of the specified types. """ objects = self.get_objects_or_children_by_type(*types) return objects[0] if any(objects) else None
0.007059
async def evaluate_trained_model(state): """Evaluate the most recently trained model against the current best model. Args: state: the RL loop State instance. """ return await evaluate_model( state.train_model_path, state.best_model_path, os.path.join(fsdb.eval_dir(), state.train_model_name), state.seed)
0.009091
def get_image(self, digest, blob, mime_type, index, size=500): """Return an image for the given content, only if it already exists in the image cache.""" # Special case, for now (XXX). if mime_type.startswith("image/"): return "" cache_key = f"img:{index}:{size}:{digest}" return self.cache.get(cache_key)
0.005464
def merged(): # type: () -> None """ Cleanup a remotely merged branch. """ develop = conf.get('git.devel_branch', 'develop') branch = git.current_branch(refresh=True) common.assert_branch_type('feature') # Pull develop with the merged feature common.git_checkout(develop) common.git_pull(develop) # Cleanup common.git_branch_delete(branch.name) common.git_prune() common.git_checkout(develop)
0.002252
def unpack_archive(*components, **kwargs) -> str: """ Unpack a compressed archive. Arguments: *components (str[]): Absolute path. **kwargs (dict, optional): Set "compression" to compression type. Default: bz2. Set "dir" to destination directory. Defaults to the directory of the archive. Returns: str: Path to directory. """ path = fs.abspath(*components) compression = kwargs.get("compression", "bz2") dir = kwargs.get("dir", fs.dirname(path)) fs.cd(dir) tar = tarfile.open(path, "r:" + compression) tar.extractall() tar.close() fs.cdpop() return dir
0.001517
def write(self, symbol, data, initial_image=None, metadata=None): """ Writes a list of market data events. Parameters ---------- symbol : `str` symbol name for the item data : list of dicts or a pandas.DataFrame List of ticks to store to the tick-store. if a list of dicts, each dict must contain a 'index' datetime if a pandas.DataFrame the index must be a Timestamp that can be converted to a datetime. Index names will not be preserved. initial_image : dict Dict of the initial image at the start of the document. If this contains a 'index' entry it is assumed to be the time of the timestamp of the index metadata: dict optional user defined metadata - one per symbol """ pandas = False # Check for overlapping data if isinstance(data, list): start = data[0]['index'] end = data[-1]['index'] elif isinstance(data, pd.DataFrame): start = data.index[0].to_pydatetime() end = data.index[-1].to_pydatetime() pandas = True else: raise UnhandledDtypeException("Can't persist type %s to tickstore" % type(data)) self._assert_nonoverlapping_data(symbol, to_dt(start), to_dt(end)) if pandas: buckets = self._pandas_to_buckets(data, symbol, initial_image) else: buckets = self._to_buckets(data, symbol, initial_image) self._write(buckets) if metadata: self._metadata.replace_one({SYMBOL: symbol}, {SYMBOL: symbol, META: metadata}, upsert=True)
0.002818
def _observe_timeseries_fn(timeseries): """Build an observation_noise_fn that observes a Tensor timeseries.""" def observation_noise_fn(t): current_slice = timeseries[..., t, :] return tfd.MultivariateNormalDiag( loc=current_slice, scale_diag=tf.zeros_like(current_slice)) return observation_noise_fn
0.012121
def extend_from_instances(self, params: Params, instances: Iterable['adi.Instance'] = ()) -> None: """ Extends an already generated vocabulary using a collection of instances. """ min_count = params.pop("min_count", None) max_vocab_size = pop_max_vocab_size(params) non_padded_namespaces = params.pop("non_padded_namespaces", DEFAULT_NON_PADDED_NAMESPACES) pretrained_files = params.pop("pretrained_files", {}) min_pretrained_embeddings = params.pop("min_pretrained_embeddings", None) only_include_pretrained_words = params.pop_bool("only_include_pretrained_words", False) tokens_to_add = params.pop("tokens_to_add", None) params.assert_empty("Vocabulary - from dataset") logger.info("Fitting token dictionary from dataset.") namespace_token_counts: Dict[str, Dict[str, int]] = defaultdict(lambda: defaultdict(int)) for instance in Tqdm.tqdm(instances): instance.count_vocab_items(namespace_token_counts) self._extend(counter=namespace_token_counts, min_count=min_count, max_vocab_size=max_vocab_size, non_padded_namespaces=non_padded_namespaces, pretrained_files=pretrained_files, only_include_pretrained_words=only_include_pretrained_words, tokens_to_add=tokens_to_add, min_pretrained_embeddings=min_pretrained_embeddings)
0.007033
def _handle_output(self, channel, output): """ Given an initial channel and a sequence of messages or sentinels, output the messages. """ augmented = Sentinel.augment_items(output, channel=channel, secret=False) for message in augmented: self.out(message.channel, message, not message.secret)
0.029316
def check_installed_files(self): """ Checks that the hashes and sizes of the files in ``RECORD`` are matched by the files themselves. Returns a (possibly empty) list of mismatches. Each entry in the mismatch list will be a tuple consisting of the path, 'exists', 'size' or 'hash' according to what didn't match (existence is checked first, then size, then hash), the expected value and the actual value. """ mismatches = [] base = os.path.dirname(self.path) record_path = self.get_distinfo_file('RECORD') for path, hash_value, size in self.list_installed_files(): if not os.path.isabs(path): path = os.path.join(base, path) if path == record_path: continue if not os.path.exists(path): mismatches.append((path, 'exists', True, False)) elif os.path.isfile(path): actual_size = str(os.path.getsize(path)) if size and actual_size != size: mismatches.append((path, 'size', size, actual_size)) elif hash_value: if '=' in hash_value: hasher = hash_value.split('=', 1)[0] else: hasher = None with open(path, 'rb') as f: actual_hash = self.get_hash(f.read(), hasher) if actual_hash != hash_value: mismatches.append((path, 'hash', hash_value, actual_hash)) return mismatches
0.001854
def cancelTask(self, *args, **kwargs): """ Cancel Task This method will cancel a task that is either `unscheduled`, `pending` or `running`. It will resolve the current run as `exception` with `reasonResolved` set to `canceled`. If the task isn't scheduled yet, ie. it doesn't have any runs, an initial run will be added and resolved as described above. Hence, after canceling a task, it cannot be scheduled with `queue.scheduleTask`, but a new run can be created with `queue.rerun`. These semantics is equivalent to calling `queue.scheduleTask` immediately followed by `queue.cancelTask`. **Remark** this operation is idempotent, if you try to cancel a task that isn't `unscheduled`, `pending` or `running`, this operation will just return the current task status. This method gives output: ``v1/task-status-response.json#`` This method is ``stable`` """ return self._makeApiCall(self.funcinfo["cancelTask"], *args, **kwargs)
0.00469
def start(self): ''' doesn't work''' thread = threading.Thread(target=reactor.run) thread.start()
0.016529
def plot(feature, mp=None, style_function=None, **map_kwargs): """Plots a GeoVector in an ipyleaflet map. Parameters ---------- feature : telluric.vectors.GeoVector, telluric.features.GeoFeature, telluric.collections.BaseCollection Data to plot. mp : ipyleaflet.Map, optional Map in which to plot, default to None (creates a new one). style_function : func Function that returns an style dictionary for map_kwargs : kwargs, optional Extra parameters to send to ipyleaflet.Map. """ map_kwargs.setdefault('basemap', basemaps.Stamen.Terrain) if feature.is_empty: warnings.warn("The geometry is empty.") mp = Map(**map_kwargs) if mp is None else mp else: if mp is None: center = feature.envelope.centroid.reproject(WGS84_CRS) zoom = zoom_level_from_geometry(feature.envelope) mp = Map(center=(center.y, center.x), zoom=zoom, **map_kwargs) mp.add_layer(layer_from_element(feature, style_function)) return mp
0.001894
def walk_interface(self, name, data, in_location): interface = Interface() location = "{}.{}".format(in_location, name) if isinstance(data, dict): interface.name = de_identifier(self.require_field(location, "name", data, "", str)) location = "{}.{}".format(in_location, clean_identifier(interface.name)) interface.returns = de_identifier(self.require_field(location, "returns", data, "", str)) interface.description = self.recommend_field(location, "description", data, "", str) # Implementations if "production" in data: interface.production = self.walk_implementation("production", data["production"], location) else: self.not_found_error("{}.{}".format(location, "production")) if "test" in data: interface.test = self.walk_implementation("test", data["test"], location) else: interface.test = None # Arguments interface.args = list(self.walk_list("{}.args".format(location), "args", data, self.walk_interface_arg)) interface.cache = self.typecheck_field(location, "cache", data, [], list) else: self.type_error(location, dict, type(data)) return interface ''' self.require_field("url", "{}.url".format(location), data, str) self.require_field("verb", "{}.verb".format(location), data, set(("get", "post", "delete", "put"))) self.recommend_field("format", "{}.format".format(location), data, set(("json", "xml", "html", "csv", "text")), not_found="Assuming json.") self.require_field("output", "{}.output".format(location), data, str) self.recommend_field( "description", "{}.description".format(name), data, str, not_found="There will be no documentation for {}!".format(name)) self.typecheck_field("comment", "{}.comment".format(location), data, str) if "inputs" in data: self.walk_list("{}.inputs".format(location), "inputs", data, self.walk_input) # Ensure that every url has the requsite paths! if "url" in data: url_input_names = set(map(str, re.findall("<(.*?)>", data["url"]))) given_input_names = set([input['path'] for input in data["inputs"].values() if 'path' in input]) if not url_input_names.issubset(given_input_names): self.error("Expected full list of url parameters {} for {}, given only {}.".format(list(url_input_names), location, list(given_input_names))) else: self.type_error(location, dict, type(data)) return interface'''
0.007674
def totals(self, start=None, end=None): """Returns a Totals object containing the sum of all debits, credits and net change over the period of time from start to end. 'start' is inclusive, 'end' is exclusive """ qs = self._entries_range(start=start, end=end) qs_positive = qs.filter(amount__gt=Decimal("0.00")).all().aggregate(Sum('amount')) qs_negative = qs.filter(amount__lt=Decimal("0.00")).all().aggregate(Sum('amount')) #Is there a cleaner way of saying this? Should the sum of 0 things be None? positives = qs_positive['amount__sum'] if qs_positive['amount__sum'] is not None else 0 negatives = -qs_negative['amount__sum'] if qs_negative['amount__sum'] is not None else 0 if self._DEBIT_IN_DB() > 0: debits = positives credits = negatives else: debits = negatives credits = positives net = debits-credits if self._positive_credit(): net = -net return self.Totals(credits, debits, net)
0.007428
def update(self): '''Update definitions.''' # Download http://rebase.neb.com/rebase/link_withref to tmp self._tmpdir = tempfile.mkdtemp() try: self._rebase_file = self._tmpdir + '/rebase_file' print 'Downloading latest enzyme definitions' url = 'http://rebase.neb.com/rebase/link_withref' header = {'User-Agent': 'Mozilla/5.0'} req = urllib2.Request(url, headers=header) con = urllib2.urlopen(req) with open(self._rebase_file, 'wb') as rebase_file: rebase_file.write(con.read()) # Process into self._enzyme_dict self._process_file() except urllib2.HTTPError, e: print 'HTTP Error: {} {}'.format(e.code, url) print 'Falling back on default enzyme list' self._enzyme_dict = coral.constants.fallback_enzymes except urllib2.URLError, e: print 'URL Error: {} {}'.format(e.reason, url) print 'Falling back on default enzyme list' self._enzyme_dict = coral.constants.fallback_enzymes # Process into RestrictionSite objects? (depends on speed) print 'Processing into RestrictionSite instances.' self.restriction_sites = {} # TODO: make sure all names are unique for key, (site, cuts) in self._enzyme_dict.iteritems(): # Make a site try: r = coral.RestrictionSite(coral.DNA(site), cuts, name=key) # Add it to dict with name as key self.restriction_sites[key] = r except ValueError: # Encountered ambiguous sequence, have to ignore it until # coral.DNA can handle ambiguous DNA pass
0.001119
def calculate_sleep_time(attempt, delay_factor=5.0, randomization_factor=.5, max_delay=120): """Calculate the sleep time between retries, in seconds. Based off of `taskcluster.utils.calculateSleepTime`, but with kwargs instead of constant `delay_factor`/`randomization_factor`/`max_delay`. The taskcluster function generally slept for less than a second, which didn't always get past server issues. Args: attempt (int): the retry attempt number delay_factor (float, optional): a multiplier for the delay time. Defaults to 5. randomization_factor (float, optional): a randomization multiplier for the delay time. Defaults to .5. max_delay (float, optional): the max delay to sleep. Defaults to 120 (seconds). Returns: float: the time to sleep, in seconds. """ if attempt <= 0: return 0 # We subtract one to get exponents: 1, 2, 3, 4, 5, .. delay = float(2 ** (attempt - 1)) * float(delay_factor) # Apply randomization factor. Only increase the delay here. delay = delay * (randomization_factor * random.random() + 1) # Always limit with a maximum delay return min(delay, max_delay)
0.005785
def _ctypes_ex_compatvars(executable): """Returns a list of code lines for signature matching variables, and pointer saving variables. """ result = [] for p in executable.ordered_parameters: _ctypes_code_parameter(result, p, "regular") _ctypes_code_parameter(result, p, "saved") if type(executable).__name__ == "Function": _ctypes_code_parameter(result, executable, "regular") _ctypes_code_parameter(result, executable, "saved") return result
0.00396