text
stringlengths
78
104k
score
float64
0
0.18
def generate_security_data(self): """Generate a dict of security data for "initial" data.""" timestamp = int(time.time()) security_dict = { 'content_type': str(self.target_object._meta), 'object_pk': str(self.target_object._get_pk_val()), 'timestamp': str(timestamp), 'security_hash': self.initial_security_hash(timestamp), } return security_dict
0.004608
def clear_cache(self): """ Clears any cache associated with the serial model and the engines seen by the direct view. """ self.underlying_model.clear_cache() try: logger.info('DirectView results has {} items. Clearing.'.format( len(self._dv.results) )) self._dv.purge_results('all') if self._purge_client: self._dv.client.purge_everything() except: pass
0.005988
def remove_core_element(self, model): """Remove respective core element of handed scoped variable model :param ScopedVariableModel model: Scoped variable model which core element should be removed :return: """ assert model.scoped_variable.parent is self.model.state gui_helper_state_machine.delete_core_element_of_model(model)
0.008
def load(self, **kwargs): """Custom load method to address issue in 11.6.0 Final, where non existing objects would be True. """ if LooseVersion(self.tmos_ver) == LooseVersion('11.6.0'): return self._load_11_6(**kwargs) else: return super(Rule, self)._load(**kwargs)
0.006061
def sort_values(self, by, ascending=True): """Sort the DataFrame based on a column. Unlike Pandas, one can sort by data from both index and regular columns. Currently possible to sort only on a single column since Weld is missing multiple-column sort. Note this is an expensive operation (brings all data to Weld). Parameters ---------- by : str or list of str Column names to sort. ascending : bool, optional Returns ------- DataFrame DataFrame sorted according to the column. """ check_type(ascending, bool) check_str_or_list_str(by) by = as_list(by) if len(by) > 1: raise NotImplementedError('Weld does not yet support sorting on multiple columns') all_data = self.reset_index() by_data = all_data[by] sorted_indices = weld_sort(by_data._gather_data_for_weld(), by_data._gather_weld_types(), 'sort_index', ascending=ascending) new_index = self.index._iloc_indices(sorted_indices) new_columns = list(self._iter()) new_column_names = [column.name for column in new_columns] new_columns = [_series_iloc(column, sorted_indices, new_index) for column in new_columns] new_data = OrderedDict(zip(new_column_names, new_columns)) return DataFrame(new_data, new_index)
0.003968
def sync(self): """Sync a bucket. Force all API calls to S3 and populate the database with the current state of S3. """ for key in mimicdb.backend.smembers(tpl.bucket % self.name): mimicdb.backend.delete(tpl.key % (self.name, key)) mimicdb.backend.delete(tpl.bucket % self.name) mimicdb.backend.sadd(tpl.connection, self.name) for key in self.list(force=True): mimicdb.backend.sadd(tpl.bucket % self.name, key.name) mimicdb.backend.hmset(tpl.key % (self.name, key.name), dict(size=key.size, md5=key.etag.strip('"')))
0.006547
def getStreamURL(self, **params): """ Returns a stream url that may be used by external applications such as VLC. Parameters: **params (dict): optional parameters to manipulate the playback when accessing the stream. A few known parameters include: maxVideoBitrate, videoResolution offset, copyts, protocol, mediaIndex, platform. Raises: :class:`plexapi.exceptions.Unsupported`: When the item doesn't support fetching a stream URL. """ if self.TYPE not in ('movie', 'episode', 'track'): raise Unsupported('Fetching stream URL for %s is unsupported.' % self.TYPE) mvb = params.get('maxVideoBitrate') vr = params.get('videoResolution', '') params = { 'path': self.key, 'offset': params.get('offset', 0), 'copyts': params.get('copyts', 1), 'protocol': params.get('protocol'), 'mediaIndex': params.get('mediaIndex', 0), 'X-Plex-Platform': params.get('platform', 'Chrome'), 'maxVideoBitrate': max(mvb, 64) if mvb else None, 'videoResolution': vr if re.match('^\d+x\d+$', vr) else None } # remove None values params = {k: v for k, v in params.items() if v is not None} streamtype = 'audio' if self.TYPE in ('track', 'album') else 'video' # sort the keys since the randomness fucks with my tests.. sorted_params = sorted(params.items(), key=lambda val: val[0]) return self._server.url('/%s/:/transcode/universal/start.m3u8?%s' % (streamtype, urlencode(sorted_params)), includeToken=True)
0.005875
def reads_overlapping_variant( samfile, variant, chromosome=None, use_duplicate_reads=USE_DUPLICATE_READS, use_secondary_alignments=USE_SECONDARY_ALIGNMENTS, min_mapping_quality=MIN_READ_MAPPING_QUALITY): """ Find reads in the given SAM/BAM file which overlap the given variant and return them as a list of AlleleRead objects. Parameters ---------- samfile : pysam.AlignmentFile variant : varcode.Variant chromosome : str use_duplicate_reads : bool Should we use reads that have been marked as PCR duplicates use_secondary_alignments : bool Should we use reads at locations other than their best alignment min_mapping_quality : int Drop reads below this mapping quality only_alt_allele : bool Filter reads to only include those that support the alt allele of the variant. Returns sequence of AlleleRead objects. """ logger.info("Gathering reads for %s", variant) if chromosome is None: chromosome = variant.contig logger.info( "Gathering variant reads for variant %s (chromosome = %s, gene names = %s)", variant, chromosome, variant.gene_names) base1_position, ref, alt = trim_variant(variant) if len(ref) == 0: # if the variant is an insertion base1_position_before_variant = base1_position base1_position_after_variant = base1_position + 1 else: base1_position_before_variant = base1_position - 1 base1_position_after_variant = base1_position + len(ref) locus_reads = locus_read_generator( samfile=samfile, chromosome=chromosome, base1_position_before_variant=base1_position_before_variant, base1_position_after_variant=base1_position_after_variant, use_duplicate_reads=use_duplicate_reads, use_secondary_alignments=use_secondary_alignments, min_mapping_quality=min_mapping_quality) allele_reads = allele_reads_from_locus_reads( locus_reads=locus_reads, n_ref=len(ref)) return allele_reads
0.000936
def video_category(self): """doc: http://open.youku.com/docs/doc?id=90 """ url = 'https://openapi.youku.com/v2/schemas/video/category.json' r = requests.get(url) check_error(r) return r.json()
0.008333
def add(self, component: Union[Component, Sequence[Component]]) -> None: """Add a widget to the grid in the next available cell. Searches over columns then rows for available cells. Parameters ---------- components : bowtie._Component A Bowtie widget instance. """ try: self[Span(*self._available_cell())] = component except NoUnusedCellsError: span = list(self._spans.keys())[-1] self._spans[span] += component
0.003802
def recv(self, timeout=None): """Receive an ISOTP frame, blocking if none is available in the buffer for at most 'timeout' seconds.""" try: return self.rx_queue.get(timeout is None or timeout > 0, timeout) except queue.Empty: return None
0.006803
def get_or_default_template_file_name(ctx, param, provided_value, include_build): """ Default value for the template file name option is more complex than what Click can handle. This method either returns user provided file name or one of the two default options (template.yaml/template.yml) depending on the file that exists :param ctx: Click Context :param param: Param name :param provided_value: Value provided by Click. It could either be the default value or provided by user. :return: Actual value to be used in the CLI """ search_paths = [ "template.yaml", "template.yml", ] if include_build: search_paths.insert(0, os.path.join(".aws-sam", "build", "template.yaml")) if provided_value == _TEMPLATE_OPTION_DEFAULT_VALUE: # Default value was used. Value can either be template.yaml or template.yml. Decide based on which file exists # .yml is the default, even if it does not exist. provided_value = "template.yml" for option in search_paths: if os.path.exists(option): provided_value = option break result = os.path.abspath(provided_value) LOG.debug("Using SAM Template at %s", result) return result
0.005482
def tx_context_for_idx(self, tx_in_idx): """ solution_script: alleged solution to the puzzle_script puzzle_script: the script protecting the coins """ tx_in = self.tx.txs_in[tx_in_idx] tx_context = TxContext() tx_context.lock_time = self.tx.lock_time tx_context.version = self.tx.version tx_context.puzzle_script = b'' if self.tx.missing_unspent(tx_in_idx) else self.tx.unspents[tx_in_idx].script tx_context.solution_script = tx_in.script tx_context.witness_solution_stack = tx_in.witness tx_context.sequence = tx_in.sequence tx_context.tx_in_idx = tx_in_idx return tx_context
0.004354
def close(self): """Close the queue, signalling that no more data can be put into the queue.""" self.read_queue.put(QueueClosed) self.write_queue.put(QueueClosed)
0.016129
def get_ad_info(self): """ Polls for basic AD information (needed for determine password usage characteristics!) """ logger.debug('Polling AD for basic info') ldap_filter = r'(distinguishedName=%s)' % self._tree attributes = MSADInfo.ATTRS for entry in self.pagedsearch(ldap_filter, attributes): self._ldapinfo = MSADInfo.from_ldap(entry) return self._ldapinfo logger.debug('Poll finished!')
0.031401
def add(lhs, rhs): """Returns element-wise sum of the input arrays with broadcasting. Equivalent to ``lhs + rhs``, ``mx.nd.broadcast_add(lhs, rhs)`` and ``mx.nd.broadcast_plus(lhs, rhs)``. .. note:: If the corresponding dimensions of two arrays have the same size or one of them has size 1, then the arrays are broadcastable to a common shape Parameters ---------- lhs : scalar or mxnet.ndarray.array First array to be added. rhs : scalar or mxnet.ndarray.array Second array to be added. If ``lhs.shape != rhs.shape``, they must be broadcastable to a common shape. Returns ------- NDArray The element-wise sum of the input arrays. Examples -------- >>> x = mx.nd.ones((2,3)) >>> y = mx.nd.arange(2).reshape((2,1)) >>> z = mx.nd.arange(2).reshape((1,2)) >>> x.asnumpy() array([[ 1., 1., 1.], [ 1., 1., 1.]], dtype=float32) >>> y.asnumpy() array([[ 0.], [ 1.]], dtype=float32) >>> z.asnumpy() array([[ 0., 1.]], dtype=float32) >>> (x+2).asnumpy() array([[ 3., 3., 3.], [ 3., 3., 3.]], dtype=float32) >>> (x+y).asnumpy() array([[ 1., 1., 1.], [ 2., 2., 2.]], dtype=float32) >>> mx.nd.add(x,y).asnumpy() array([[ 1., 1., 1.], [ 2., 2., 2.]], dtype=float32) >>> (z + y).asnumpy() array([[ 0., 1.], [ 1., 2.]], dtype=float32) """ # pylint: disable= no-member, protected-access return _ufunc_helper( lhs, rhs, op.broadcast_add, operator.add, _internal._plus_scalar, None)
0.001183
def render(self, flags: Flags) -> List[Text]: """ Returns a list of randomly chosen outcomes for each sentence of the list. """ return [x.render(flags) for x in self.sentences]
0.009259
def _get_char(self): """Read a character from input. @rtype: string """ if self.ungotten_char is None: if self.eof: c = '' else: c = self.file.read(1) if c == '': self.eof = True elif c == '\n': self.line_number += 1 else: c = self.ungotten_char self.ungotten_char = None return c
0.004158
def _get_offset_front_id_after_onset_sample_idx(onset_sample_idx, offset_fronts): """ Returns the offset_front_id which corresponds to the offset front which occurs first entirely after the given onset sample_idx. """ # get all the offset_front_ids offset_front_ids = [i for i in np.unique(offset_fronts) if i != 0] best_id_so_far = -1 closest_offset_sample_idx = sys.maxsize for offset_front_id in offset_front_ids: # get all that offset front's indexes offset_front_idxs = _get_front_idxs_from_id(offset_fronts, offset_front_id) # get the sample indexes offset_front_sample_idxs = [s for _f, s in offset_front_idxs] # if each sample index is greater than onset_sample_idx, keep this offset front if it is the best one so far min_sample_idx = min(offset_front_sample_idxs) if min_sample_idx > onset_sample_idx and min_sample_idx < closest_offset_sample_idx: closest_offset_sample_idx = min_sample_idx best_id_so_far = offset_front_id assert best_id_so_far > 1 or best_id_so_far == -1 return best_id_so_far
0.005291
def list(context, sort, limit, where, verbose): """list(context, sort, limit, where, verbose) List all products. >>> dcictl product list :param string sort: Field to apply sort :param integer limit: Max number of rows to return :param string where: An optional filter criteria :param boolean verbose: Display verbose output """ result = product.list(context, sort=sort, limit=limit, where=where) utils.format_output(result, context.format, verbose=verbose)
0.002004
def _ram_buffer(self): """Setup the RAM buffer from the C++ code.""" # get the address of the RAM address = _LIB.Memory(self._env) # create a buffer from the contents of the address location buffer_ = ctypes.cast(address, ctypes.POINTER(RAM_VECTOR)).contents # create a NumPy array from the buffer return np.frombuffer(buffer_, dtype='uint8')
0.005025
def sanitize_for_archive(url, headers, payload): """Sanitize URL of a HTTP request by removing the token information before storing/retrieving archived items :param: url: HTTP url request :param: headers: HTTP headers request :param: payload: HTTP payload request :returns the sanitized url, plus the headers and payload """ url = re.sub('bot.*/', 'botXXXXX/', url) return url, headers, payload
0.004264
def t_BIN_STRING(self, t): r'\'[01]*\'[bB]' value = t.value[1:-2] while value and value[0] == '0' and len(value) % 8: value = value[1:] # XXX raise in strict mode # if len(value) % 8: # raise error.PySmiLexerError("Number of 0s and 1s have to divide by 8 in binary string %s" % t.value, lineno=t.lineno) return t
0.007732
def accepts(self, tp, converter): ''' Declare that other types may be converted to this property type. Args: tp (Property) : A type that may be converted automatically to this property type. converter (callable) : A function accepting ``value`` to perform conversion of the value to this property type. Returns: self ''' tp = ParameterizedProperty._validate_type_param(tp) self.alternatives.append((tp, converter)) return self
0.003407
def _latex_circuit_drawer(circuit, scale=0.7, filename=None, style=None, plot_barriers=True, reverse_bits=False, justify=None): """Draw a quantum circuit based on latex (Qcircuit package) Requires version >=2.6.0 of the qcircuit LaTeX package. Args: circuit (QuantumCircuit): a quantum circuit scale (float): scaling factor filename (str): file path to save image to style (dict or str): dictionary of style or file name of style file reverse_bits (bool): When set to True reverse the bit order inside registers for the output visualization. plot_barriers (bool): Enable/disable drawing barriers in the output circuit. Defaults to True. justify (str) : `left`, `right` or `none`. Defaults to `left`. Says how the circuit should be justified. Returns: PIL.Image: an in-memory representation of the circuit diagram Raises: OSError: usually indicates that ```pdflatex``` or ```pdftocairo``` is missing. CalledProcessError: usually points errors during diagram creation. """ tmpfilename = 'circuit' with tempfile.TemporaryDirectory() as tmpdirname: tmppath = os.path.join(tmpdirname, tmpfilename + '.tex') _generate_latex_source(circuit, filename=tmppath, scale=scale, style=style, plot_barriers=plot_barriers, reverse_bits=reverse_bits, justify=justify) image = None try: subprocess.run(["pdflatex", "-halt-on-error", "-output-directory={}".format(tmpdirname), "{}".format(tmpfilename + '.tex')], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, check=True) except OSError as ex: if ex.errno == errno.ENOENT: logger.warning('WARNING: Unable to compile latex. ' 'Is `pdflatex` installed? ' 'Skipping latex circuit drawing...') raise except subprocess.CalledProcessError as ex: with open('latex_error.log', 'wb') as error_file: error_file.write(ex.stdout) logger.warning('WARNING Unable to compile latex. ' 'The output from the pdflatex command can ' 'be found in latex_error.log') raise else: try: base = os.path.join(tmpdirname, tmpfilename) subprocess.run(["pdftocairo", "-singlefile", "-png", "-q", base + '.pdf', base]) image = Image.open(base + '.png') image = utils._trim(image) os.remove(base + '.png') if filename: image.save(filename, 'PNG') except OSError as ex: if ex.errno == errno.ENOENT: logger.warning('WARNING: Unable to convert pdf to image. ' 'Is `poppler` installed? ' 'Skipping circuit drawing...') raise return image
0.00029
def _frame_received(self, frame): """ Put the frame into the _rx_frames dict with a key of the frame_id. """ try: self._rx_frames[frame["frame_id"]] = frame except KeyError: # Has no frame_id, ignore? pass _LOGGER.debug("Frame received: %s", frame) # Give the frame to any interested functions for handler in self._rx_handlers: handler(frame)
0.004405
def get_plugin_actions(self): """Return a list of actions related to plugin""" self.new_project_action = create_action(self, _("New Project..."), triggered=self.create_new_project) self.open_project_action = create_action(self, _("Open Project..."), triggered=lambda v: self.open_project()) self.close_project_action = create_action(self, _("Close Project"), triggered=self.close_project) self.delete_project_action = create_action(self, _("Delete Project"), triggered=self.delete_project) self.clear_recent_projects_action =\ create_action(self, _("Clear this list"), triggered=self.clear_recent_projects) self.edit_project_preferences_action =\ create_action(self, _("Project Preferences"), triggered=self.edit_project_preferences) self.recent_project_menu = QMenu(_("Recent Projects"), self) if self.main is not None: self.main.projects_menu_actions += [self.new_project_action, MENU_SEPARATOR, self.open_project_action, self.close_project_action, self.delete_project_action, MENU_SEPARATOR, self.recent_project_menu, self.toggle_view_action] self.setup_menu_actions() return []
0.003132
def __callbacks(self, msg): '''this method exists only to make profiling results easier to read''' if self.callback: self.callback(msg, *self.callback_args, **self.callback_kwargs)
0.013636
def manage(commands, argv=None, delim=':'): ''' Parses argv and runs neccessary command. Is to be used in manage.py file. Accept a dict with digest name as keys and instances of :class:`Cli<iktomi.management.commands.Cli>` objects as values. The format of command is the following:: ./manage.py digest_name:command_name[ arg1[ arg2[...]]][ --key1=kwarg1[...]] where command_name is a part of digest instance method name, args and kwargs are passed to the method. For details, see :class:`Cli<iktomi.management.commands.Cli>` docs. ''' commands = {(k.decode('utf-8') if isinstance(k, six.binary_type) else k): v for k, v in commands.items()} # Default django autocompletion script is registered to manage.py # We use the same name for this script and it seems to be ok # to implement the same interface def perform_auto_complete(commands): from .lazy import LazyCli cwords = os.environ['COMP_WORDS'].split()[1:] cword = int(os.environ['COMP_CWORD']) try: curr = cwords[cword - 1] except IndexError: curr = '' suggest = [] if len(cwords) > 1 and cwords[0] in commands.keys(): value = commands[cwords[0]] if isinstance(value, LazyCli): value = value.get_digest() for cmd_name, _ in value.get_funcs(): cmd_name = cmd_name[8:] suggest.append(cmd_name) if curr == ":": curr = '' else: suggest += list(commands.keys()) + [x+":" for x in commands.keys()] suggest.sort() output = u" ".join(filter(lambda x: x.startswith(curr), suggest)) sys.stdout.write(output) auto_complete = 'IKTOMI_AUTO_COMPLETE' in os.environ or \ 'DJANGO_AUTO_COMPLETE' in os.environ if auto_complete: perform_auto_complete(commands) sys.exit(0) argv = sys.argv if argv is None else argv if len(argv) > 1: cmd_name = argv[1] raw_args = argv[2:] args, kwargs = [], {} # parsing params for item in raw_args: if item.startswith('--'): splited = item[2:].split('=', 1) if len(splited) == 2: k,v = splited elif len(splited) == 1: k,v = splited[0], True kwargs[k] = v else: args.append(item) # trying to get command instance if delim in cmd_name: digest_name, command = cmd_name.split(delim) else: digest_name = cmd_name command = None try: digest = commands[digest_name] except KeyError: _command_list(commands) sys.exit('ERROR: Command "{}" not found'.format(digest_name)) try: if command is None: if isinstance(digest, Cli): help_ = digest.description(argv[0], digest_name) sys.stdout.write(help_) sys.exit('ERROR: "{}" command digest requires command name'\ .format(digest_name)) digest(*args, **kwargs) else: digest(command, *args, **kwargs) except CommandNotFound: help_ = digest.description(argv[0], digest_name) sys.stdout.write(help_) sys.exit('ERROR: Command "{}:{}" not found'.format(digest_name, command)) else: _command_list(commands) sys.exit('Please provide any command')
0.002459
def _create_socket(self): """ Creates a new SSL enabled socket and sets its timeout. """ log.warning('No certificate check is performed for SSL connections') s = super(SSL, self)._create_socket() return wrap_socket(s)
0.007547
def get_tags(name=None, instance_id=None, call=None, location=None, kwargs=None, resource_id=None): # pylint: disable=W0613 ''' Retrieve tags for a resource. Normally a VM name or instance_id is passed in, but a resource_id may be passed instead. If both are passed in, the instance_id will be used. CLI Examples: .. code-block:: bash salt-cloud -a get_tags mymachine salt-cloud -a get_tags resource_id=vol-3267ab32 ''' if location is None: location = get_location() if instance_id is None: if resource_id is None: if name: instance_id = _get_node(name)['instanceId'] elif 'instance_id' in kwargs: instance_id = kwargs['instance_id'] elif 'resource_id' in kwargs: instance_id = kwargs['resource_id'] else: instance_id = resource_id params = {'Action': 'DescribeTags', 'Filter.1.Name': 'resource-id', 'Filter.1.Value': instance_id} return aws.query(params, setname='tagSet', location=location, provider=get_provider(), opts=__opts__, sigver='4')
0.000752
def get_edges(self): """Get the directed edges from GO term to GO term.""" edge_from_to = [] for parent, children in self.p_from_cs.items(): for child in children: edge_from_to.append((child, parent)) for parent, children in self.c_from_ps.items(): for child in children: edge_from_to.append((child, parent)) return edge_from_to
0.004717
def getString(self, config, relation=0): """ Return a representation of a Radix according to config. :param DisplayConfig config: configuration :param int relation: the relation of this value to actual value """ return String(config, self.base).xform(self, relation)
0.006349
def _is_valid_part(self): """ Return True if the value of component in attribute "part" is valid, and otherwise False. :returns: True if value of component is valid, False otherwise :rtype: boolean """ comp_str = self._encoded_value # Check if value of component do not have wildcard if ((comp_str.find(self.WILDCARD_ONE) == -1) and (comp_str.find(self.WILDCARD_MULTI) == -1)): return super(CPEComponent2_3, self)._is_valid_part() # Compilation of regular expression associated with value of part part_pattern = "^(\{0}|\{1})$".format(self.WILDCARD_ONE, self.WILDCARD_MULTI) part_rxc = re.compile(part_pattern) return part_rxc.match(comp_str) is not None
0.004802
def start(self): """Start the component's event loop (thread-safe). After the event loop is started the Qt thread calls the component's :py:meth:`~Component.start_event` method, then calls its :py:meth:`~Component.new_frame_event` and :py:meth:`~Component.new_config_event` methods as required until :py:meth:`~Component.stop` is called. Finally the component's :py:meth:`~Component.stop_event` method is called before the event loop terminates. """ if self._running: raise RuntimeError('Component {} is already running'.format( self._owner.__class__.__name__)) self._running = True self.queue_command(self._owner.start_event) # process any events that arrived before we started while self._incoming: self.queue_command(self._incoming.popleft())
0.002225
def run_out_of_sample_mds(boot_collection, ref_collection, ref_distance_matrix, index, dimensions, task=_fast_geo, rooted=False, **kwargs): """ index = index of the locus the bootstrap sample corresponds to - only important if using recalc=True in kwargs """ fit = np.empty((len(boot_collection), dimensions)) if ISPY3: query_trees = [PhyloTree(tree.encode(), rooted) for tree in boot_collection.trees] ref_trees = [PhyloTree(tree.encode(), rooted) for tree in ref_collection.trees] else: query_trees = [PhyloTree(tree, rooted) for tree in boot_collection.trees] ref_trees = [PhyloTree(tree, rooted) for tree in ref_collection.trees] for i, tree in enumerate(query_trees): distvec = np.array([task(tree, ref_tree, False) for ref_tree in ref_trees]) oos = OutOfSampleMDS(ref_distance_matrix) fit[i] = oos.fit(index, distvec, dimensions=dimensions, **kwargs) return fit
0.007239
def find_latex_font_serif(): r''' Find an available font to mimic LaTeX, and return its name. ''' import os, re import matplotlib.font_manager name = lambda font: os.path.splitext(os.path.split(font)[-1])[0].split(' - ')[0] fonts = matplotlib.font_manager.findSystemFonts(fontpaths=None, fontext='ttf') matches = [ r'.*Computer\ Modern\ Roman.*', r'.*CMU\ Serif.*', r'.*CMU.*', r'.*Times.*', r'.*DejaVu.*', r'.*Serif.*', ] for match in matches: for font in fonts: if re.match(match,font): return name(font) return None
0.025597
def traverse_preorder(self, leaves=True, internal=True): '''Perform a preorder traversal starting at this ``Node`` object Args: ``leaves`` (``bool``): ``True`` to include leaves, otherwise ``False`` ``internal`` (``bool``): ``True`` to include internal nodes, otherwise ``False`` ''' s = deque(); s.append(self) while len(s) != 0: n = s.pop() if (leaves and n.is_leaf()) or (internal and not n.is_leaf()): yield n s.extend(n.children)
0.009058
def _de_casteljau_one_round(nodes, degree, lambda1, lambda2, lambda3): r"""Performs one "round" of the de Casteljau algorithm for surfaces. .. note:: There is also a Fortran implementation of this function, which will be used if it can be built. .. note:: This is a helper function, used by :func:`make_transform` and :func:`_specialize_surface` (and :func:`make_transform` is **only** used by :func:`_specialize_surface`). Converts the ``nodes`` into a basis for a surface one degree smaller by using the barycentric weights: .. math:: q_{i, j, k} = \lambda_1 \cdot p_{i + 1, j, k} + \lambda_2 \cdot p_{i, j + 1, k} + \lambda_2 \cdot p_{i, j, k + 1} .. note: For degree :math:`d`, the number of nodes should be :math:`(d + 1)(d + 2)/2`, but we don't verify this. Args: nodes (numpy.ndarray): The nodes to reduce. degree (int): The degree of the surface. lambda1 (float): Parameter along the reference triangle. lambda2 (float): Parameter along the reference triangle. lambda3 (float): Parameter along the reference triangle. Returns: numpy.ndarray: The converted nodes. """ dimension, num_nodes = nodes.shape num_new_nodes = num_nodes - degree - 1 new_nodes = np.empty((dimension, num_new_nodes), order="F") index = 0 # parent_i1 = index + k # parent_i2 = index + k + 1 # parent_i3 = index + degree + 1 parent_i1 = 0 parent_i2 = 1 parent_i3 = degree + 1 for k in six.moves.xrange(degree): for unused_j in six.moves.xrange(degree - k): # NOTE: i = (degree - 1) - j - k new_nodes[:, index] = ( lambda1 * nodes[:, parent_i1] + lambda2 * nodes[:, parent_i2] + lambda3 * nodes[:, parent_i3] ) # Update all the indices. parent_i1 += 1 parent_i2 += 1 parent_i3 += 1 index += 1 # Update the indices that depend on k. parent_i1 += 1 parent_i2 += 1 return new_nodes
0.000466
def cli(variant_file, vep, split): """Parses a vcf file.\n \n Usage:\n parser infile.vcf\n If pipe:\n parser - """ from datetime import datetime from pprint import pprint as pp if variant_file == '-': my_parser = VCFParser(fsock=sys.stdin, split_variants=split) else: my_parser = VCFParser(infile = variant_file, split_variants=split) start = datetime.now() nr_of_variants = 0 for line in my_parser.metadata.print_header(): print(line) for variant in my_parser: pp(variant) nr_of_variants += 1 print('Number of variants: %s' % nr_of_variants)
0.005926
def update(self, values): """ Updates this row """ response = self.session.patch(self.build_url(''), data={'values': values}) if not response: return False data = response.json() self.values = data.get('values', self.values) return True
0.010135
def _report_net_metrics(self, container, tags): """Find container network metrics by looking at /proc/$PID/net/dev of the container process.""" if self._disable_net_metrics: self.log.debug("Network metrics are disabled. Skipping") return proc_net_file = os.path.join(container['_proc_root'], 'net/dev') try: if container['Id'] in self.network_mappings: networks = self.network_mappings[container['Id']] else: networks = self.docker_util.get_container_network_mapping(container) if not networks: networks = {'eth0': 'bridge'} self.network_mappings[container['Id']] = networks except Exception as e: # Revert to previous behaviour if the method is missing or failing # Debug message will only appear once per container, then the cache is used self.log.debug("Failed to build docker network mapping, using failsafe. Exception: {0}".format(e)) networks = {'eth0': 'bridge'} self.network_mappings[container['Id']] = networks try: with open(proc_net_file, 'r') as fp: lines = fp.readlines() """Two first lines are headers: Inter-| Receive | Transmit face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed """ for l in lines[2:]: cols = l.split(':', 1) interface_name = str(cols[0]).strip() if interface_name in networks: net_tags = tags + ['docker_network:'+networks[interface_name]] x = cols[1].split() m_func = FUNC_MAP[RATE][self.use_histogram] m_func(self, "docker.net.bytes_rcvd", long(x[0]), net_tags) m_func(self, "docker.net.bytes_sent", long(x[8]), net_tags) except IOError as e: # It is possible that the container got stopped between the API call and now self.log.debug("Cannot read network interface file, container likely raced to finish : {0}".format(e))
0.005973
def saveDirectory(alias): """save a directory to a certain alias/nickname""" if not settings.platformCompatible(): return False dataFile = open(settings.getDataFile(), "wb") currentDirectory = os.path.abspath(".") directory = {alias : currentDirectory} pickle.dump(directory, dataFile) speech.success(alias + " will now link to " + currentDirectory + ".") speech.success("Tip: use 'hallie go to " + alias + "' to change to this directory.")
0.026667
def set_meta_all(self, props): """Set metadata values for collection. ``props`` a dict with values for properties. """ delta_props = self.get_meta() for key in delta_props.keys(): if key not in props: delta_props[key] = None delta_props.update(props) self.set_meta(delta_props)
0.00551
def deltas(predicted_values, rewards, mask, gamma=0.99): r"""Computes TD-residuals from V(s) and rewards. Where a `delta`, i.e. a td-residual is defined as: delta_{b,t} = r_{b,t} + \gamma * v_{b,t+1} - v_{b,t}. Args: predicted_values: ndarray of shape (B, T+1). NOTE: Expects axis 2 was squeezed. These represent V(s_bt) for b < B and t < T+1 rewards: ndarray of shape (B, T) of rewards. mask: ndarray of shape (B, T) of mask for rewards. gamma: float, discount factor. Returns: ndarray of shape (B, T) of one-step TD-residuals. """ # `d`s are basically one-step TD residuals. d = [] _, T = rewards.shape # pylint: disable=invalid-name for t in range(T): d.append(rewards[:, t] + (gamma * predicted_values[:, t + 1]) - predicted_values[:, t]) return np.array(d).T * mask
0.008314
def select_as_dict(self, table_name, columns=None, where=None, extra=None): """ Get data in the database and return fetched data as a |OrderedDict| list. :param str table_name: |arg_select_table_name| :param list columns: |arg_select_as_xx_columns| :param where: |arg_select_where| :type where: |arg_where_type| :param str extra: |arg_select_extra| :return: Table data as |OrderedDict| instances. :rtype: |list| of |OrderedDict| :raises simplesqlite.NullDatabaseConnectionError: |raises_check_connection| :raises simplesqlite.TableNotFoundError: |raises_verify_table_existence| :raises simplesqlite.OperationalError: |raises_operational_error| :Example: :ref:`example-select-as-dict` """ return self.select_as_tabledata(table_name, columns, where, extra).as_dict().get(table_name)
0.003168
def en_dis_able_interrupts(self, mask): """ This callback might be used by a Register to enable/disable Interrupts. ``mask`` is an ``int``, the Interrupts are bits in this mask, the first registered interrupt has the bit ``(1 << 0)``, the n-th Interrupt the bit ``(1 << (n - 1))``. If the bit is cleared (``0``) the Interrupt will be disabled. """ for shift, interrupt in enumerate(self.interrupts): if(mask & (1 << shift)): interrupt.enable = True else: interrupt.enable = False
0.027559
def attach_rconfiguration(context, id, name, topic_id, component_types, data): """attach_rconfiguration(context, name, topic_id, component_types, data): Attach an rconfiguration to a Remote CI >>> dcictl remoteci-attach-rconfiguration ID [OPTIONS] :param string id: id of the remoteci :param string name: name of the rconfiguration [required] :param string topic_id: ID of the topic to associate this rconfiguration with [required] :param string component_types: list data to represent the overriden component_types :param string data: JSON data of the rconfiguration """ result = remoteci.add_rconfiguration(context, id, name, topic_id, component_types, data) utils.format_output(result, context.format)
0.001236
def tplot_restore(filename): """ This function will restore tplot variables that have been saved with the "tplot_save" command. .. note:: This function is compatible with the IDL tplot_save routine. If you have a ".tplot" file generated from IDL, this procedure will restore the data contained in the file. Not all plot options will transfer over at this time. Parameters: filename : str The file name and full path generated by the "tplot_save" command. Returns: None Examples: >>> # Restore the saved data from the tplot_save example >>> import pytplot >>> pytplot.restore('C:/temp/variable1.pytplot') """ #Error check if not (os.path.isfile(filename)): print("Not a valid file name") return #Check if the restored file was an IDL file if filename.endswith('.tplot'): temp_tplot = readsav(filename) for i in range(len(temp_tplot['dq'])): data_name = temp_tplot['dq'][i][0].decode("utf-8") temp_x_data = temp_tplot['dq'][i][1][0][0] #Pandas reads in data the other way I guess if len(temp_tplot['dq'][i][1][0][2].shape) == 2: temp_y_data = np.transpose(temp_tplot['dq'][i][1][0][2]) else: temp_y_data = temp_tplot['dq'][i][1][0][2] #If there are more than 4 fields, that means it is a spectrogram if len(temp_tplot['dq'][i][1][0]) > 4: temp_v_data = temp_tplot['dq'][i][1][0][4] #Change from little endian to big endian, since pandas apparently hates little endian #We might want to move this into the store_data procedure eventually if (temp_x_data.dtype.byteorder == '>'): temp_x_data = temp_x_data.byteswap().newbyteorder() if (temp_y_data.dtype.byteorder == '>'): temp_y_data = temp_y_data.byteswap().newbyteorder() if (temp_v_data.dtype.byteorder == '>'): temp_v_data = temp_v_data.byteswap().newbyteorder() store_data(data_name, data={'x':temp_x_data, 'y':temp_y_data, 'v':temp_v_data}) else: #Change from little endian to big endian, since pandas apparently hates little endian #We might want to move this into the store_data procedure eventually if (temp_x_data.dtype.byteorder == '>'): temp_x_data = temp_x_data.byteswap().newbyteorder() if (temp_y_data.dtype.byteorder == '>'): temp_y_data = temp_y_data.byteswap().newbyteorder() store_data(data_name, data={'x':temp_x_data, 'y':temp_y_data}) if temp_tplot['dq'][i][3].dtype.names is not None: for option_name in temp_tplot['dq'][i][3].dtype.names: options(data_name, option_name, temp_tplot['dq'][i][3][option_name][0]) data_quants[data_name].trange = temp_tplot['dq'][i][4].tolist() data_quants[data_name].dtype = temp_tplot['dq'][i][5] data_quants[data_name].create_time = temp_tplot['dq'][i][6] for option_name in temp_tplot['tv'][0][0].dtype.names: if option_name == 'TRANGE': tplot_options('x_range', temp_tplot['tv'][0][0][option_name][0]) if option_name == 'WSIZE': tplot_options('wsize', temp_tplot['tv'][0][0][option_name][0]) if option_name == 'VAR_LABEL': tplot_options('var_label', temp_tplot['tv'][0][0][option_name][0]) if 'P' in temp_tplot['tv'][0][1].tolist(): for option_name in temp_tplot['tv'][0][1]['P'][0].dtype.names: if option_name == 'TITLE': tplot_options('title', temp_tplot['tv'][0][1]['P'][0][option_name][0]) #temp_tplot['tv'][0][1] is all of the "settings" variables #temp_tplot['tv'][0][1]['D'][0] is "device" options #temp_tplot['tv'][0][1]['P'][0] is "plot" options #temp_tplot['tv'][0][1]['X'][0] is x axis options #temp_tplot['tv'][0][1]['Y'][0] is y axis options #################################################################### else: temp = pickle.load(open(filename,"rb")) num_data_quants = temp[0] for i in range(0, num_data_quants): data_quants[temp[i+1].name] = temp[i+1] tplot_opt_glob = temp[num_data_quants+1] return
0.011762
def get_staking_leaderboard(self, round_num=0, tournament=1): """Retrieves the leaderboard of the staking competition for the given round. Args: round_num (int, optional): The round you are interested in, defaults to current round. tournament (int, optional): ID of the tournament, defaults to 1 Returns: list: list of stakers (`dict`) Each stake in the list as the following structure: * username (`str`) * consistency (`float`) * liveLogloss (`float` or `None`) * liveAuroc (`float` or `None`) * validationLogloss (`float`) * validationAuroc (`float` or `None`) * stake (`dict`) * confidence (`decimal.Decimal`) * insertedAt (`datetime`) * soc (`decimal.Decimal`) * txHash (`str`) * value (`decimal.Decimal`) Example: >>> NumerAPI().get_staking_leaderboard(99) [{'consistency': 83.33333333333334, 'liveLogloss': 0.6941153941722517, 'liveAuroc': 0.5241153941722517, 'stake': {'confidence': Decimal('0.055'), 'insertedAt': datetime.datetime(2018, 3, 18, 0, 20, 31, 724728, tzinfo=tzutc()), 'soc': Decimal('18.18'), 'txHash': '0xf1460c7fe08e7920d3e61492501337db5c89bff22af9fd88b9ff1ad604939f61', 'value': Decimal('1.00')}, 'username': 'ci_wp', 'validationLogloss': 0.692269984475575}, 'validationAuroc': 0.512269984475575}, .. ] """ msg = "getting stakes for tournament {} round {}" self.logger.info(msg.format(tournament, round_num)) query = ''' query($number: Int! $tournament: Int!) { rounds(number: $number tournament: $tournament) { leaderboard { consistency liveLogloss liveAuroc username validationLogloss validationAuroc stake { insertedAt soc confidence value txHash } } } } ''' arguments = {'number': round_num, 'tournament': tournament} result = self.raw_query(query, arguments)['data']['rounds'][0] if result is None: return None stakes = result['leaderboard'] # filter those with actual stakes stakes = [item for item in stakes if item["stake"] is not None] # convert strings to python objects for s in stakes: utils.replace(s["stake"], "insertedAt", utils.parse_datetime_string) utils.replace(s["stake"], "confidence", utils.parse_float_string) utils.replace(s["stake"], "soc", utils.parse_float_string) utils.replace(s["stake"], "value", utils.parse_float_string) return stakes
0.001229
def withdraw(self, account_id, **params): """https://developers.coinbase.com/api/v2#withdraw-funds""" for required in ['payment_method', 'amount', 'currency']: if required not in params: raise ValueError("Missing required parameter: %s" % required) response = self._post('v2', 'accounts', account_id, 'withdrawals', data=params) return self._make_api_object(response, Withdrawal)
0.006834
def mutate(self, mutation, timeout=None, metadata=None, credentials=None): """Runs mutate operation.""" return self.stub.Mutate(mutation, timeout=timeout, metadata=metadata, credentials=credentials)
0.00813
def set(self, item, value): """ Set new item in-place. Does not consolidate. Adds new Block if not contained in the current set of items """ # FIXME: refactor, clearly separate broadcasting & zip-like assignment # can prob also fix the various if tests for sparse/categorical # TODO(EA): Remove an is_extension_ when all extension types satisfy # the interface value_is_extension_type = (is_extension_type(value) or is_extension_array_dtype(value)) # categorical/spares/datetimetz if value_is_extension_type: def value_getitem(placement): return value else: if value.ndim == self.ndim - 1: value = _safe_reshape(value, (1,) + value.shape) def value_getitem(placement): return value else: def value_getitem(placement): return value[placement.indexer] if value.shape[1:] != self.shape[1:]: raise AssertionError('Shape of new values must be compatible ' 'with manager shape') try: loc = self.items.get_loc(item) except KeyError: # This item wasn't present, just insert at end self.insert(len(self.items), item, value) return if isinstance(loc, int): loc = [loc] blknos = self._blknos[loc] blklocs = self._blklocs[loc].copy() unfit_mgr_locs = [] unfit_val_locs = [] removed_blknos = [] for blkno, val_locs in libinternals.get_blkno_placements(blknos, self.nblocks, group=True): blk = self.blocks[blkno] blk_locs = blklocs[val_locs.indexer] if blk.should_store(value): blk.set(blk_locs, value_getitem(val_locs)) else: unfit_mgr_locs.append(blk.mgr_locs.as_array[blk_locs]) unfit_val_locs.append(val_locs) # If all block items are unfit, schedule the block for removal. if len(val_locs) == len(blk.mgr_locs): removed_blknos.append(blkno) else: self._blklocs[blk.mgr_locs.indexer] = -1 blk.delete(blk_locs) self._blklocs[blk.mgr_locs.indexer] = np.arange(len(blk)) if len(removed_blknos): # Remove blocks & update blknos accordingly is_deleted = np.zeros(self.nblocks, dtype=np.bool_) is_deleted[removed_blknos] = True new_blknos = np.empty(self.nblocks, dtype=np.int64) new_blknos.fill(-1) new_blknos[~is_deleted] = np.arange(self.nblocks - len(removed_blknos)) self._blknos = algos.take_1d(new_blknos, self._blknos, axis=0, allow_fill=False) self.blocks = tuple(blk for i, blk in enumerate(self.blocks) if i not in set(removed_blknos)) if unfit_val_locs: unfit_mgr_locs = np.concatenate(unfit_mgr_locs) unfit_count = len(unfit_mgr_locs) new_blocks = [] if value_is_extension_type: # This code (ab-)uses the fact that sparse blocks contain only # one item. new_blocks.extend( make_block(values=value.copy(), ndim=self.ndim, placement=slice(mgr_loc, mgr_loc + 1)) for mgr_loc in unfit_mgr_locs) self._blknos[unfit_mgr_locs] = (np.arange(unfit_count) + len(self.blocks)) self._blklocs[unfit_mgr_locs] = 0 else: # unfit_val_locs contains BlockPlacement objects unfit_val_items = unfit_val_locs[0].append(unfit_val_locs[1:]) new_blocks.append( make_block(values=value_getitem(unfit_val_items), ndim=self.ndim, placement=unfit_mgr_locs)) self._blknos[unfit_mgr_locs] = len(self.blocks) self._blklocs[unfit_mgr_locs] = np.arange(unfit_count) self.blocks += tuple(new_blocks) # Newly created block's dtype may already be present. self._known_consolidated = False
0.000429
def mixins(self, name): """ Search mixins for name. Allow '>' to be ignored. '.a .b()' == '.a > .b()' Args: name (string): Search term Returns: Mixin object list OR False """ m = self._smixins(name) if m: return m return self._smixins(name.replace('?>?', ' '))
0.005556
def get_text(self): """Return extended progress bar text""" done_units = to_reasonable_unit(self.done, self.units) current = round(self.current / done_units['multiplier'], 2) percent = int(self.current * 100 / self.done) return '{0:.2f} of {1:.2f} {2} ({3}%)'.format(current, done_units['val'], done_units['label'], percent)
0.003802
def xmlresponse(py_data): """ Generates an XML formatted method response for the given python data. :param py_data | <variant> """ xroot = ElementTree.Element('methodResponse') xparams = ElementTree.SubElement(xroot, 'params') xparam = ElementTree.SubElement(xparams, 'param') type_map = {'bool': 'boolean', 'float': 'double', 'str': 'string', 'unicode': 'string', 'datetime': 'dateTime.iso8601', 'date': 'date.iso8601', 'time': 'time.iso8601'} def xobj(xparent, py_obj): # convert a list of information if type(py_obj) in (tuple, list): xarr = ElementTree.SubElement(xparent, 'array') xdata = ElementTree.SubElement(xarr, 'data') for val in py_obj: xval = ElementTree.SubElement(xdata, 'value') xobj(xval, val) # convert a dictionary of information elif type(py_obj) == dict: xstruct = ElementTree.SubElement(xparent, 'struct') for key, val in py_obj.items(): xmember = ElementTree.SubElement(xstruct, 'member') xname = ElementTree.SubElement(xmember, 'name') xname.text = key xval = ElementTree.SubElement(xmember, 'value') xobj(xval, val) # convert a None value elif py_obj is None: ElementTree.SubElement(xparent, 'nil') # convert a basic value else: typ = type(py_obj).__name__ typ = type_map.get(typ, typ) xitem = ElementTree.SubElement(xparent, typ) # convert a datetime/date/time if isinstance(py_obj, datetime.date) or \ isinstance(py_obj, datetime.time) or \ isinstance(py_obj, datetime.datetime): if py_obj.tzinfo and pytz: data = py_obj.astimezone(pytz.utc).replace(tzinfo=None) xitem.text = data.isoformat() else: xitem.text = py_obj.isoformat() # convert a boolean elif type(py_obj) == bool: xitem.text = nstr(int(py_obj)) # convert a non-string object elif not type(py_obj) in (str, unicode): xitem.text = nstr(py_obj) # convert a string object else: xitem.text = py_obj xobj(xparam, py_data) projex.text.xmlindent(xroot) return ElementTree.tostring(xroot)
0.000769
def __dispatch_msg(self, message): """Verify the signature and update RequestEvents / perform callbacks Note messages with an invalid wrapper, invalid hash, invalid sequence number or unexpected clientRef will be sent to debug_bad callback. """ msg = self.__validate_decode_msg(message) if msg: msg, seqnum = msg else: self.__fire_callback(_CB_DEBUG_BAD, message.body, message.content_type) return if DEBUG_ENABLED: logger.debug(decode_rcvd_msg('decode_rcvd_msg', msg, seqnum)) self.__fire_callback(_CB_DEBUG_RCVD, msg) # no reference, or set by client (not container) if msg[M_TYPE] not in _RSP_CONTAINER_REF: # solicitied if msg[M_CLIENTREF]: if not self.__handle_known_solicited(msg): logger.debug('Ignoring response for unknown request %s of type %s', msg[M_CLIENTREF], msg[M_TYPE]) # unsolicitied else: self.__perform_unsolicited_callbacks(msg) # unsolicited but can have reference set by container elif msg[M_TYPE] == E_CONTROLREQ: self.__handle_controlreq(msg[M_PAYLOAD], msg[M_CLIENTREF]) else: logger.error('Unhandled unsolicited message of type %s', msg[M_TYPE])
0.004409
def query(input, representation, resolvers=None, **kwargs): """ Get all results for resolving input to the specified output representation """ apiurl = API_BASE+'/%s/%s/xml' % (urlquote(input), representation) if resolvers: kwargs['resolver'] = ",".join(resolvers) if kwargs: apiurl+= '?%s' % urlencode(kwargs) result = [] try: tree = ET.parse(urlopen(apiurl)) for data in tree.findall(".//data"): datadict = {'resolver':data.attrib['resolver'], 'notation':data.attrib['notation'], 'value':[]} for item in data.findall("item"): datadict['value'].append(item.text) if len(datadict['value']) == 1: datadict['value'] = datadict['value'][0] result.append(datadict) except HTTPError: # TODO: Proper handling of 404, for now just returns None pass return result if result else None
0.006104
def format_to_json(data): """Converts `data` into json If stdout is a tty it performs a pretty print. """ if sys.stdout.isatty(): return json.dumps(data, indent=4, separators=(',', ': ')) else: return json.dumps(data)
0.003953
def remove(name_or_path): '''Remove an environment''' click.echo() try: r = cpenv.resolve(name_or_path) except cpenv.ResolveError as e: click.echo(e) return obj = r.resolved[0] if not isinstance(obj, cpenv.VirtualEnvironment): click.echo('{} is a module. Use `cpenv module remove` instead.') return click.echo(format_objects([obj])) click.echo() user_confirmed = click.confirm( red('Are you sure you want to remove this environment?') ) if user_confirmed: click.echo('Attempting to remove...', nl=False) try: obj.remove() except Exception as e: click.echo(bold_red('FAIL')) click.echo(e) else: click.echo(bold_green('OK!'))
0.00125
def _do_east_asian(self): """Fetch and update east-asian tables.""" self._do_retrieve(self.EAW_URL, self.EAW_IN) (version, date, values) = self._parse_east_asian( fname=self.EAW_IN, properties=(u'W', u'F',) ) table = self._make_table(values) self._do_write(self.EAW_OUT, 'WIDE_EASTASIAN', version, date, table)
0.005236
def cint8_array_to_numpy(cptr, length): """Convert a ctypes int pointer array to a numpy array.""" if isinstance(cptr, ctypes.POINTER(ctypes.c_int8)): return np.fromiter(cptr, dtype=np.int8, count=length) else: raise RuntimeError('Expected int pointer')
0.003559
def _software_params_to_argparse(parameters): """ Converts a SoftwareParameterCollection into an ArgumentParser object. Parameters ---------- parameters: SoftwareParameterCollection The software parameters Returns ------- argparse: ArgumentParser An initialized argument parser """ # Check software parameters argparse = ArgumentParser() boolean_defaults = {} for parameter in parameters: arg_desc = {"dest": parameter.name, "required": parameter.required, "help": ""} # TODO add help if parameter.type == "Boolean": default = _to_bool(parameter.defaultParamValue) arg_desc["action"] = "store_true" if not default else "store_false" boolean_defaults[parameter.name] = default else: python_type = _convert_type(parameter.type) arg_desc["type"] = python_type arg_desc["default"] = None if parameter.defaultParamValue is None else python_type(parameter.defaultParamValue) argparse.add_argument(*_cytomine_parameter_name_synonyms(parameter.name), **arg_desc) argparse.set_defaults(**boolean_defaults) return argparse
0.00335
def iter_options(grouped_choices, cutoff=None, cutoff_text=None): """ Helper function for options and option groups in templates. """ class StartOptionGroup(object): start_option_group = True end_option_group = False def __init__(self, label): self.label = label class EndOptionGroup(object): start_option_group = False end_option_group = True class Option(object): start_option_group = False end_option_group = False def __init__(self, value, display_text, disabled=False): self.value = value self.display_text = display_text self.disabled = disabled count = 0 for key, value in grouped_choices.items(): if cutoff and count >= cutoff: break if isinstance(value, dict): yield StartOptionGroup(label=key) for sub_key, sub_value in value.items(): if cutoff and count >= cutoff: break yield Option(value=sub_key, display_text=sub_value) count += 1 yield EndOptionGroup() else: yield Option(value=key, display_text=value) count += 1 if cutoff and count >= cutoff and cutoff_text: cutoff_text = cutoff_text.format(count=cutoff) yield Option(value='n/a', display_text=cutoff_text, disabled=True)
0.000702
def cif(self): """ https://es.wikipedia.org/wiki/C%C3%B3digo_de_identificaci%C3%B3n_fiscal :return: a random Spanish CIF """ first_chr = random.choice('ABCDEFGHJNPQRSUVW') doi_body = str(random.randrange(0, 10000000)).zfill(7) cif = first_chr + doi_body return cif + self._calculate_control_cif(cif)
0.005495
def fastaSubtract(fastaFiles): """ Given a list of open file descriptors, each with FASTA content, remove the reads found in the 2nd, 3rd, etc files from the first file in the list. @param fastaFiles: a C{list} of FASTA filenames. @raises IndexError: if passed an empty list. @return: An iterator producing C{Bio.SeqRecord} instances suitable for writing to a file using C{Bio.SeqIO.write}. """ reads = {} firstFile = fastaFiles.pop(0) for seq in SeqIO.parse(firstFile, 'fasta'): reads[seq.id] = seq for fastaFile in fastaFiles: for seq in SeqIO.parse(fastaFile, 'fasta'): # Make sure that reads with the same id have the same sequence. if seq.id in reads: assert str(seq.seq) == str(reads[seq.id].seq) reads.pop(seq.id, None) return iter(reads.values())
0.001129
def calc_synch_snu_ujy(b, ne, delta, sinth, width, elongation, dist, ghz, E0=1.): """Calculate a flux density from pure gyrosynchrotron emission. This combines Dulk (1985) equations 40 and 41, which are fitting functions assuming a power-law electron population, with standard radiative transfer through a uniform medium. Arguments are: b Magnetic field strength in Gauss ne The density of electrons per cubic centimeter with energies greater than 10 keV. delta The power-law index defining the energy distribution of the electron population, with ``n(E) ~ E^(-delta)``. The equation is valid for ``2 <~ delta <~ 5``. sinth The sine of the angle between the line of sight and the magnetic field direction. It's not specified for what range of values the expressions work well. width The characteristic cross-sectional width of the emitting region, in cm. elongation The the elongation of the emitting region; ``depth = width * elongation``. dist The distance to the emitting region, in cm. ghz The frequencies at which to evaluate the spectrum, **in GHz**. E0 The minimum energy of electrons to consider, in MeV. Defaults to 1 so that these functions can be called identically to the gyrosynchrotron functions. The return value is the flux density **in μJy**. The arguments can be Numpy arrays. No complaints are raised if you attempt to use the equations outside of their range of validity. """ hz = ghz * 1e9 eta = calc_synch_eta(b, ne, delta, sinth, hz, E0=E0) kappa = calc_synch_kappa(b, ne, delta, sinth, hz, E0=E0) snu = calc_snu(eta, kappa, width, elongation, dist) ujy = snu * cgs.jypercgs * 1e6 return ujy
0.005036
def mirror(self, axes='x', inplace=False): """ Generates a symmetry of the Space respect global axes. :param axes: 'x', 'y', 'z', 'xy', 'xz', 'yz'... :type axes: str :param inplace: If True, the new ``pyny.Space`` is copied and added to the current ``pyny.Space``. If False, it returns the new ``pyny.Space``. :type inplace: bool :returns: None, ``pyny.Space`` """ state = Polygon.verify Polygon.verify = False mirror = np.ones(3) if 'x' in axes: mirror *= np.array([-1, 1, 1]) if 'y' in axes: mirror *= np.array([1, -1, 1]) if 'z' in axes: mirror *= np.array([1, 1, -1]) map_ = self.get_map()[1] * mirror space = self.map2pyny(map_) Polygon.verify = state if inplace: self.add_spaces(space) return None else: return space
0.007715
def __do_parse(self, pattern_str): """ Parses the given pattern and returns the antlr parse tree. :param pattern_str: The STIX pattern :return: The parse tree :raises ParseException: If there is a parse error """ in_ = antlr4.InputStream(pattern_str) lexer = STIXPatternLexer(in_) lexer.removeErrorListeners() # remove the default "console" listener token_stream = antlr4.CommonTokenStream(lexer) parser = STIXPatternParser(token_stream) parser.removeErrorListeners() # remove the default "console" listener error_listener = ParserErrorListener() parser.addErrorListener(error_listener) # I found no public API for this... # The default error handler tries to keep parsing, and I don't # think that's appropriate here. (These error handlers are only for # handling the built-in RecognitionException errors.) parser._errHandler = antlr4.BailErrorStrategy() # To improve error messages, replace "<INVALID>" in the literal # names with symbolic names. This is a hack, but seemed like # the simplest workaround. for i, lit_name in enumerate(parser.literalNames): if lit_name == u"<INVALID>": parser.literalNames[i] = parser.symbolicNames[i] # parser.setTrace(True) try: tree = parser.pattern() # print(tree.toStringTree(recog=parser)) return tree except antlr4.error.Errors.ParseCancellationException as e: # The cancellation exception wraps the real RecognitionException # which caused the parser to bail. real_exc = e.args[0] # I want to bail when the first error is hit. But I also want # a decent error message. When an error is encountered in # Parser.match(), the BailErrorStrategy produces the # ParseCancellationException. It is not a subclass of # RecognitionException, so none of the 'except' clauses which would # normally report an error are invoked. # # Error message creation is buried in the ErrorStrategy, and I can # (ab)use the API to get a message: register an error listener with # the parser, force an error report, then get the message out of the # listener. Error listener registration is above; now we force its # invocation. Wish this could be cleaner... parser._errHandler.reportError(parser, real_exc) # should probably chain exceptions if we can... # Should I report the cancellation or recognition exception as the # cause...? six.raise_from(ParseException(error_listener.error_message), real_exc)
0.001042
def put(self, filename, data): """Create or update the specified file with the provided data. """ # Open the file for writing on the board and write chunks of data. self._pyboard.enter_raw_repl() self._pyboard.exec_("f = open('{0}', 'wb')".format(filename)) size = len(data) # Loop through and write a buffer size chunk of data at a time. for i in range(0, size, BUFFER_SIZE): chunk_size = min(BUFFER_SIZE, size - i) chunk = repr(data[i : i + chunk_size]) # Make sure to send explicit byte strings (handles python 2 compatibility). if not chunk.startswith("b"): chunk = "b" + chunk self._pyboard.exec_("f.write({0})".format(chunk)) self._pyboard.exec_("f.close()") self._pyboard.exit_raw_repl()
0.004706
def _zoom(scale:uniform=1.0, row_pct:uniform=0.5, col_pct:uniform=0.5): "Zoom image by `scale`. `row_pct`,`col_pct` select focal point of zoom." s = 1-1/scale col_c = s * (2*col_pct - 1) row_c = s * (2*row_pct - 1) return _get_zoom_mat(1/scale, 1/scale, col_c, row_c)
0.034843
def integer_decimation(data, decimation_factor): """ Downsampling by applying a simple integer decimation. Make sure that no signal is present in frequency bands above the new Nyquist frequency (samp_rate/2/decimation_factor), e.g. by applying a lowpass filter beforehand! New sampling rate is old sampling rate divided by decimation_factor. :type data: numpy.ndarray :param data: Data to filter. :param decimation_factor: Integer decimation factor :return: Downsampled data (array length: old length / decimation_factor) """ if not isinstance(decimation_factor, int): msg = "Decimation_factor must be an integer!" raise TypeError(msg) # reshape and only use every decimation_factor-th sample data = np.array(data[::decimation_factor]) return data
0.001211
def som_get_capture_objects(som_pointer): """! @brief Returns list of indexes of captured objects by each neuron. @param[in] som_pointer (c_pointer): pointer to object of self-organized map. """ ccore = ccore_library.get() ccore.som_get_capture_objects.restype = POINTER(pyclustering_package) package = ccore.som_get_capture_objects(som_pointer) result = package_extractor(package).extract() return result
0.014583
def bytes2human(n, format="%(value).1f%(symbol)s"): """ >>> bytes2human(10000) '9K' >>> bytes2human(100001221) '95M' """ symbols = ('B', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y') prefix = {} for i, s in enumerate(symbols[1:]): prefix[s] = 1 << (i + 1) * 10 for symbol in reversed(symbols[1:]): if n >= prefix[symbol]: value = float(n) / prefix[symbol] return format % locals() return format % dict(symbol=symbols[0], value=n)
0.001972
def _clean_data(api_response): ''' Returns the DATA response from a Linode API query as a single pre-formatted dictionary api_response The query to be cleaned. ''' data = {} data.update(api_response['DATA']) if not data: response_data = api_response['DATA'] data.update(response_data) return data
0.005634
def tf_demo_loss(self, states, actions, terminal, reward, internals, update, reference=None): """ Extends the q-model loss via the dqfd large-margin loss. """ embedding = self.network.apply(x=states, internals=internals, update=update) deltas = list() for name in sorted(actions): action = actions[name] distr_params = self.distributions[name].parameterize(x=embedding) state_action_value = self.distributions[name].state_action_value(distr_params=distr_params, action=action) # Create the supervised margin loss # Zero for the action taken, one for all other actions, now multiply by expert margin if self.actions_spec[name]['type'] == 'bool': num_actions = 2 action = tf.cast(x=action, dtype=util.tf_dtype('int')) else: num_actions = self.actions_spec[name]['num_actions'] one_hot = tf.one_hot(indices=action, depth=num_actions) ones = tf.ones_like(tensor=one_hot, dtype=tf.float32) inverted_one_hot = ones - one_hot # max_a([Q(s,a) + l(s,a_E,a)], l(s,a_E, a) is 0 for expert action and margin value for others state_action_values = self.distributions[name].state_action_value(distr_params=distr_params) state_action_values = state_action_values + inverted_one_hot * self.expert_margin supervised_selector = tf.reduce_max(input_tensor=state_action_values, axis=-1) # J_E(Q) = max_a([Q(s,a) + l(s,a_E,a)] - Q(s,a_E) delta = supervised_selector - state_action_value action_size = util.prod(self.actions_spec[name]['shape']) delta = tf.reshape(tensor=delta, shape=(-1, action_size)) deltas.append(delta) loss_per_instance = tf.reduce_mean(input_tensor=tf.concat(values=deltas, axis=1), axis=1) loss_per_instance = tf.square(x=loss_per_instance) return tf.reduce_mean(input_tensor=loss_per_instance, axis=0)
0.005337
def _check_section_underline(cls, section_name, context, indentation): """D4{07,08,09,12}, D215: Section underline checks. Check for correct formatting for docstring sections. Checks that: * The line that follows the section name contains dashes (D40{7,8}). * The amount of dashes is equal to the length of the section name (D409). * The section's content does not begin in the line that follows the section header (D412). * The indentation of the dashed line is equal to the docstring's indentation (D215). """ blank_lines_after_header = 0 for line in context.following_lines: if not is_blank(line): break blank_lines_after_header += 1 else: # There are only blank lines after the header. yield violations.D407(section_name) return non_empty_line = context.following_lines[blank_lines_after_header] dash_line_found = ''.join(set(non_empty_line.strip())) == '-' if not dash_line_found: yield violations.D407(section_name) if blank_lines_after_header > 0: yield violations.D412(section_name) else: if blank_lines_after_header > 0: yield violations.D408(section_name) if non_empty_line.strip() != "-" * len(section_name): yield violations.D409(len(section_name), section_name, len(non_empty_line.strip())) if leading_space(non_empty_line) > indentation: yield violations.D215(section_name) line_after_dashes_index = blank_lines_after_header + 1 # If the line index after the dashes is in range (perhaps we have # a header + underline followed by another section header). if line_after_dashes_index < len(context.following_lines): line_after_dashes = \ context.following_lines[line_after_dashes_index] if is_blank(line_after_dashes): rest_of_lines = \ context.following_lines[line_after_dashes_index:] if not is_blank(''.join(rest_of_lines)): yield violations.D412(section_name) else: yield violations.D414(section_name) else: yield violations.D414(section_name)
0.000773
def map(self, func, iterable, callback=None): """A wrapper around the built-in ``map()`` function to provide a consistent interface with the other ``Pool`` classes. Parameters ---------- worker : callable A function or callable object that is executed on each element of the specified ``tasks`` iterable. This object must be picklable (i.e. it can't be a function scoped within a function or a ``lambda`` function). This should accept a single positional argument and return a single object. tasks : iterable A list or iterable of tasks. Each task can be itself an iterable (e.g., tuple) of values or data to pass in to the worker function. callback : callable, optional An optional callback function (or callable) that is called with the result from each worker run and is executed on the master process. This is useful for, e.g., saving results to a file, since the callback is only called on the master thread. Returns ------- results : generator """ return self._call_callback(callback, map(func, iterable))
0.00161
def executeTask(self, inputs, outSR=None, processSR=None, returnZ=False, returnM=False, f="json", method="POST" ): """ performs the execute task method """ params = { "f" : f } url = self._url + "/execute" params = { "f" : "json" } if not outSR is None: params['env:outSR'] = outSR if not processSR is None: params['end:processSR'] = processSR params['returnZ'] = returnZ params['returnM'] = returnM for p in inputs: if isinstance(p, BaseGPObject): params[p.paramName] = p.value del p if method.lower() == "post": return self._post(url=url, param_dict=params, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port) else: return self._get(url=url, param_dict=params, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port)
0.016713
def pretty_str(self, indent=0): """Return a human-readable string representation of this object. Kwargs: indent (int): The amount of spaces to use as indentation. """ if self.body: return '\n'.join(stmt.pretty_str(indent) for stmt in self.body) else: return (' ' * indent) + '[empty]'
0.00554
def translate(srcCol, matching, replace): """A function translate any character in the `srcCol` by a character in `matching`. The characters in `replace` is corresponding to the characters in `matching`. The translate will happen when any character in the string matching with the character in the `matching`. >>> spark.createDataFrame([('translate',)], ['a']).select(translate('a', "rnlt", "123") \\ ... .alias('r')).collect() [Row(r=u'1a2s3ae')] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.translate(_to_java_column(srcCol), matching, replace))
0.009631
def lognormal(mu, sigma, random_state): ''' mu: float or array_like of floats sigma: float or array_like of floats random_state: an object of numpy.random.RandomState ''' return np.exp(normal(mu, sigma, random_state))
0.004149
def discharge_coefficient_to_K(D, Do, C): r'''Converts a discharge coefficient to a standard loss coefficient, for use in computation of the actual pressure drop of an orifice or other device. .. math:: K = \left[\frac{\sqrt{1-\beta^4(1-C^2)}}{C\beta^2} - 1\right]^2 Parameters ---------- D : float Upstream internal pipe diameter, [m] Do : float Diameter of orifice at flow conditions, [m] C : float Coefficient of discharge of the orifice, [-] Returns ------- K : float Loss coefficient with respect to the velocity and density of the fluid just upstream of the orifice, [-] Notes ----- If expansibility is used in the orifice calculation, the result will not match with the specified pressure drop formula in [1]_; it can almost be matched by dividing the calculated mass flow by the expansibility factor and using that mass flow with the loss coefficient. Examples -------- >>> discharge_coefficient_to_K(D=0.07366, Do=0.05, C=0.61512) 5.2314291729754 References ---------- .. [1] American Society of Mechanical Engineers. Mfc-3M-2004 Measurement Of Fluid Flow In Pipes Using Orifice, Nozzle, And Venturi. ASME, 2001. .. [2] ISO 5167-2:2003 - Measurement of Fluid Flow by Means of Pressure Differential Devices Inserted in Circular Cross-Section Conduits Running Full -- Part 2: Orifice Plates. ''' beta = Do/D beta2 = beta*beta beta4 = beta2*beta2 return ((1.0 - beta4*(1.0 - C*C))**0.5/(C*beta2) - 1.0)**2
0.004297
def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None): """Calcuate pct_change of each value to previous entry in group""" # TODO: Remove this conditional when #23918 is fixed if freq: return self.apply(lambda x: x.pct_change(periods=periods, fill_method=fill_method, limit=limit, freq=freq)) filled = getattr(self, fill_method)(limit=limit) fill_grp = filled.groupby(self.grouper.labels) shifted = fill_grp.shift(periods=periods, freq=freq) return (filled / shifted) - 1
0.003008
def ufloatDict_nominal(self, ufloat_dict): 'This gives us a dictionary of nominal values from a dictionary of uncertainties' return OrderedDict(izip(ufloat_dict.keys(), map(lambda x: x.nominal_value, ufloat_dict.values())))
0.016736
def download(self, attr): """ Download an attribute attachment (if type is malware-sample or attachment only) :param attr: attribute (should be MispAttribute instance) :returns: value of the attachment """ if attr.type not in ['malware-sample', 'attachment']: raise ValueError('Only malware-sample and attachment can be downloaded') return self.GET('/attributes/downloadAttachment/download/%i' % attr.id)
0.006237
def bartlett(timeseries, segmentlength, noverlap=None, window=None, plan=None): # pylint: disable=unused-argument """Calculate an PSD of this `TimeSeries` using Bartlett's method Parameters ---------- timeseries : `~gwpy.timeseries.TimeSeries` input `TimeSeries` data. segmentlength : `int` number of samples in single average. noverlap : `int` number of samples to overlap between segments, defaults to 50%. window : `tuple`, `str`, optional window parameters to apply to timeseries prior to FFT plan : `REAL8FFTPlan`, optional LAL FFT plan to use when generating average spectrum Returns ------- spectrum : `~gwpy.frequencyseries.FrequencySeries` average power `FrequencySeries` See also -------- lal.REAL8AverageSpectrumWelch """ return _lal_spectrum(timeseries, segmentlength, noverlap=0, method='welch', window=window, plan=plan)
0.001018
def clone_network(network_id, recipient_user_id=None, new_network_name=None, project_id=None, project_name=None, new_project=True, **kwargs): """ Create an exact clone of the specified network for the specified user. If project_id is specified, put the new network in there. Otherwise create a new project with the specified name and put it in there. """ user_id = kwargs['user_id'] ex_net = db.DBSession.query(Network).filter(Network.id==network_id).one() ex_net.check_read_permission(user_id) if project_id is None and new_project == True: log.info("Creating a new project for cloned network") ex_proj = db.DBSession.query(Project).filter(Project.id==ex_net.project_id).one() user = db.DBSession.query(User).filter(User.id==user_id).one() project = Project() if project_name is None or project_name=="": project_name=ex_proj.name + " (Cloned by %s)" % user.display_name #check a project with this name doesn't already exist: ex_project = db.DBSession.query(Project).filter(Project.name==project_name, Project.created_by==user_id).all() #If it exists, use it. if len(ex_project) > 0: project=ex_project[0] else: project.name = project_name project.created_by = user_id project.set_owner(user_id) if recipient_user_id!=None: project.set_owner(recipient_user_id) db.DBSession.add(project) db.DBSession.flush() project_id=project.id elif project_id is None: log.info("Using current project for cloned network") project_id=ex_net.project_id if new_network_name is None or new_network_name == "": new_network_name=ex_net.name log.info('Cloning Network...') #Find if there's any projects with this name in the project already ex_network = db.DBSession.query(Network).filter(Network.project_id==project_id, Network.name.like("{0}%".format(new_network_name))).all() if len(ex_network) > 0: new_network_name = new_network_name + " " + str(len(ex_network)) newnet = Network() newnet.project_id = project_id newnet.name = new_network_name newnet.description = ex_net.description newnet.layout = ex_net.layout newnet.status = ex_net.status newnet.projection = ex_net.projection newnet.created_by = user_id newnet.set_owner(user_id) if recipient_user_id is not None: newnet.set_owner(recipient_user_id) db.DBSession.add(newnet) db.DBSession.flush() newnetworkid = newnet.id log.info('CLoning Nodes') node_id_map = _clone_nodes(network_id, newnetworkid) log.info('Cloning Links') link_id_map = _clone_links(network_id, newnetworkid, node_id_map) log.info('CLoning Groups') group_id_map = _clone_groups(network_id, newnetworkid, node_id_map, link_id_map) log.info("Cloning Resource Attributes") ra_id_map = _clone_resourceattrs(network_id, newnetworkid, node_id_map, link_id_map, group_id_map) log.info("Cloning Resource Types") _clone_resourcetypes(network_id, newnetworkid, node_id_map, link_id_map, group_id_map) log.info('Cloning Scenarios') _clone_scenarios(network_id, newnetworkid, ra_id_map, node_id_map, link_id_map, group_id_map, user_id) db.DBSession.flush() return newnetworkid
0.00933
def log_pdf(self, y, mu, weights=None): """ computes the log of the pdf or pmf of the values under the current distribution Parameters ---------- y : array-like of length n target values mu : array-like of length n expected values weights : array-like shape (n,) or None, default: None containing sample weights if None, defaults to array of ones Returns ------- pdf/pmf : np.array of length n """ if weights is None: weights = np.ones_like(mu) nu = weights / self.scale return sp.stats.gamma.logpdf(x=y, a=nu, scale=mu / nu)
0.004292
def delete_job(job_id, connection=None): """Deletes a job. :param job_id: unique identifier for this job >>> delete_job('http://example.com/test') """ if connection is None: connection = r with connection.pipeline() as pipe: pipe.delete(job_key(job_id)) pipe.zrem(REDIS_KEY, job_id) pipe.execute()
0.002817
def rasterize(vectorobject, reference, outname=None, burn_values=1, expressions=None, nodata=0, append=False): """ rasterize a vector object Parameters ---------- vectorobject: Vector the vector object to be rasterized reference: Raster a reference Raster object to retrieve geo information and extent from outname: str or None the name of the GeoTiff output file; if None, an in-memory object of type :class:`Raster` is returned and parameter outname is ignored burn_values: int or list the values to be written to the raster file expressions: list SQL expressions to filter the vector object by attributes nodata: int the nodata value of the target raster file append: bool if the output file already exists, update this file with new rasterized values? If True and the output file exists, parameters `reference` and `nodata` are ignored. Returns ------- Raster or None if outname is `None`, a raster object pointing to an in-memory dataset else `None` Example ------- >>> from spatialist import Vector, Raster, rasterize >>> vec = Vector('source.shp') >>> ref = Raster('reference.tif') >>> outname = 'target.tif' >>> expressions = ['ATTRIBUTE=1', 'ATTRIBUTE=2'] >>> burn_values = [1, 2] >>> rasterize(vec, reference, outname, burn_values, expressions) """ if expressions is None: expressions = [''] if isinstance(burn_values, (int, float)): burn_values = [burn_values] if len(expressions) != len(burn_values): raise RuntimeError('expressions and burn_values of different length') failed = [] for exp in expressions: try: vectorobject.layer.SetAttributeFilter(exp) except RuntimeError: failed.append(exp) if len(failed) > 0: raise RuntimeError('failed to set the following attribute filter(s): ["{}"]'.format('", '.join(failed))) if append and outname is not None and os.path.isfile(outname): target_ds = gdal.Open(outname, GA_Update) else: if not isinstance(reference, Raster): raise RuntimeError("parameter 'reference' must be of type Raster") if outname is not None: target_ds = gdal.GetDriverByName('GTiff').Create(outname, reference.cols, reference.rows, 1, gdal.GDT_Byte) else: target_ds = gdal.GetDriverByName('MEM').Create('', reference.cols, reference.rows, 1, gdal.GDT_Byte) target_ds.SetGeoTransform(reference.raster.GetGeoTransform()) target_ds.SetProjection(reference.raster.GetProjection()) band = target_ds.GetRasterBand(1) band.SetNoDataValue(nodata) band.FlushCache() band = None for expression, value in zip(expressions, burn_values): vectorobject.layer.SetAttributeFilter(expression) gdal.RasterizeLayer(target_ds, [1], vectorobject.layer, burn_values=[value]) vectorobject.layer.SetAttributeFilter('') if outname is None: return Raster(target_ds) else: target_ds = None
0.003813
def username_enable(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") username = ET.SubElement(config, "username", xmlns="urn:brocade.com:mgmt:brocade-aaa") name_key = ET.SubElement(username, "name") name_key.text = kwargs.pop('name') enable = ET.SubElement(username, "enable") enable.text = kwargs.pop('enable') callback = kwargs.pop('callback', self._callback) return callback(config)
0.00611
def run_MDR(n,stack_float,labels=None): """run utility function for MDR nodes.""" # need to check that tmp is categorical x1 = stack_float.pop() x2 = stack_float.pop() # check data is categorical if len(np.unique(x1))<=3 and len(np.unique(x2))<=3: tmp = np.vstack((x1,x2)).transpose() if labels is None: # prediction return n.model.transform(tmp)[:,0] else: # training out = n.model.fit_transform(tmp,labels)[:,0] return out else: return np.zeros(x1.shape[0])
0.021467
def connect(self): """Connect to host """ try: self.client.connect(self.host, username=self.username, password=self.password, port=self.port, pkey=self.pkey, timeout=self.timeout) except sock_gaierror, ex: raise Exception("Unknown host '%s'" % self.host) except sock_error, ex: raise Exception("Error connecting to host '%s:%s'\n%s" % (self.host, self.port, ex)) except paramiko.AuthenticationException, ex: msg = "Host is '%s:%s'" raise Exception("Authentication Error to host '%s'" % self.host) except paramiko.SSHException, ex: msg = "General SSH error - %s" % ex raise Exception(msg)
0.006402
def _parallel_compare_helper(class_obj, pairs, x, x_link=None): """Internal function to overcome pickling problem in python2.""" return class_obj._compute(pairs, x, x_link)
0.005556
def _needs_evaluation(self) -> bool: """ Returns True when: 1. Where clause is not specified 2. Where WHERE clause is specified and it evaluates to True Returns false if a where clause is specified and it evaluates to False """ return self._schema.when is None or self._schema.when.evaluate(self._evaluation_context)
0.007895
def get_modifications(self): """Extract Modification INDRA Statements.""" # Find all event frames that are a type of protein modification qstr = "$.events.frames[(@.type is 'protein-modification')]" res = self.tree.execute(qstr) if res is None: return # Extract each of the results when possible for r in res: # The subtype of the modification modification_type = r.get('subtype') # Skip negated events (i.e. something doesn't happen) epistemics = self._get_epistemics(r) if epistemics.get('negated'): continue annotations, context = self._get_annot_context(r) frame_id = r['frame_id'] args = r['arguments'] site = None theme = None # Find the substrate (the "theme" agent here) and the # site and position it is modified on for a in args: if self._get_arg_type(a) == 'theme': theme = a['arg'] elif self._get_arg_type(a) == 'site': site = a['text'] theme_agent, theme_coords = self._get_agent_from_entity(theme) if site is not None: mods = self._parse_site_text(site) else: mods = [(None, None)] for mod in mods: # Add up to one statement for each site residue, pos = mod # Now we need to look for all regulation event to get to the # enzymes (the "controller" here) qstr = "$.events.frames[(@.type is 'regulation') and " + \ "(@.arguments[0].arg is '%s')]" % frame_id reg_res = self.tree.execute(qstr) reg_res = list(reg_res) for reg in reg_res: controller_agent, controller_coords = None, None for a in reg['arguments']: if self._get_arg_type(a) == 'controller': controller = a.get('arg') if controller is not None: controller_agent, controller_coords = \ self._get_agent_from_entity(controller) break # Check the polarity of the regulation and if negative, # flip the modification type. # For instance, negative-regulation of a phosphorylation # will become an (indirect) dephosphorylation reg_subtype = reg.get('subtype') if reg_subtype == 'negative-regulation': modification_type = \ modtype_to_inverse.get(modification_type) if not modification_type: logger.warning('Unhandled modification type: %s' % modification_type) continue sentence = reg['verbose-text'] annotations['agents']['coords'] = [controller_coords, theme_coords] ev = Evidence(source_api='reach', text=sentence, annotations=annotations, pmid=self.citation, context=context, epistemics=epistemics) args = [controller_agent, theme_agent, residue, pos, ev] # Here ModStmt is a sub-class of Modification ModStmt = modtype_to_modclass.get(modification_type) if ModStmt is None: logger.warning('Unhandled modification type: %s' % modification_type) else: # Handle this special case here because only # enzyme argument is needed if modification_type == 'autophosphorylation': args = [theme_agent, residue, pos, ev] self.statements.append(ModStmt(*args))
0.000705
def is_valid_uid(uid): """ :return: True if it is a valid DHIS2 UID, False if not """ pattern = r'^[A-Za-z][A-Za-z0-9]{10}$' if not isinstance(uid, string_types): return False return bool(re.compile(pattern).match(uid))
0.003984
def listAttachments(self, oid): """ list attachements for a given OBJECT ID """ url = self._url + "/%s/attachments" % oid params = { "f":"json" } return self._get(url, params, securityHandler=self._securityHandler, proxy_port=self._proxy_port, proxy_url=self._proxy_url)
0.014778
def status(name, sig=None): ''' Return the status for a service. If the name contains globbing, a dict mapping service name to True/False values is returned. .. versionchanged:: 2018.3.0 The service name can now be a glob (e.g. ``salt*``) Args: name (str): The name of the service to check sig (str): Not implemented Returns: bool: True if running, False otherwise dict: Maps service name to True if running, False otherwise CLI Example: .. code-block:: bash salt '*' service.status <service name> ''' contains_globbing = bool(re.search(r'\*|\?|\[.+\]', name)) if contains_globbing: services = fnmatch.filter(get_all(), name) else: services = [name] results = {} for service in services: cmd = '/usr/bin/svcs -H -o STATE {0}'.format(service) line = __salt__['cmd.run'](cmd, python_shell=False) results[service] = line == 'online' if contains_globbing: return results return results[name]
0.000946