text
stringlengths
78
104k
score
float64
0
0.18
def add_cluster_field(self, description): ''' Adds a field or a list of fields to the cluster result array. Has to be defined as a numpy dtype entry, e.g.: ('parameter', '<i4') ''' if isinstance(description, list): for item in description: if len(item) != 2: raise TypeError("Description needs to be a list of 2-tuples of a string and a dtype.") self._cluster_descr.append(item) else: if len(description) != 2: raise TypeError("Description needs to be a 2-tuple of a string and a dtype.") self._cluster_descr.append(description) self._init_arrays(size=0)
0.007246
def called_with(self, *args, **kwargs): """ Before evaluating subsequent predicates, calls :attr:`subject` with given arguments (but unlike a direct call, catches and transforms any exceptions that arise during the call). """ self._args = args self._kwargs = kwargs self._call_subject = True return CallableInspector(self)
0.007772
def tokenize(self, untokenized_string: str, model=None): """Alias for tokenize_sentences()—NLTK's PlaintextCorpusReader needs a function called tokenize in functions used as a parameter for sentence tokenization. :type untokenized_string: str :param untokenized_string: A string containing one of more sentences. """ if self.language in INDIAN_LANGUAGES: return self.indian_punctuation_tokenize_regex(untokenized_string) else: return self.tokenize_sentences(untokenized_string)
0.003534
def uncomplete(self): """Mark the task uncomplete. >>> from pytodoist import todoist >>> user = todoist.login('[email protected]', 'password') >>> project = user.get_project('PyTodoist') >>> task = project.add_task('Install PyTodoist') >>> task.uncomplete() """ args = { 'project_id': self.project.id, 'ids': [self.id] } owner = self.project.owner _perform_command(owner, 'item_uncomplete', args)
0.003922
def typechecked(memb): """Decorator applicable to functions, methods, properties, classes or modules (by explicit call). If applied on a module, memb must be a module or a module name contained in sys.modules. See pytypes.set_global_typechecked_decorator to apply this on all modules. Asserts compatibility of runtime argument and return values of all targeted functions and methods w.r.t. PEP 484-style type annotations of these functions and methods. """ if not pytypes.checking_enabled: return memb if is_no_type_check(memb): return memb if type_util._check_as_func(memb): return typechecked_func(memb) if isclass(memb): return typechecked_class(memb) if ismodule(memb): return typechecked_module(memb, True) if memb in sys.modules or memb in _pending_modules: return typechecked_module(memb, True) return memb
0.004362
def name2unicode(name): """Converts Adobe glyph names to Unicode numbers.""" if name in glyphname2unicode: return glyphname2unicode[name] m = STRIP_NAME.search(name) if not m: raise KeyError(name) return unichr(int(m.group(0)))
0.003802
def sortInternals(self): ''' Sort all internal lists (``class_like``, ``namespaces``, ``variables``, etc) mostly how doxygen would, alphabetical but also hierarchical (e.g. structs appear before classes in listings). Some internal lists are just sorted, and some are deep sorted (:func:`~exhale.graph.ExhaleRoot.deepSortList`). ''' # some of the lists only need to be sorted, some of them need to be sorted and # have each node sort its children # leaf-like lists: no child sort self.defines.sort() self.enums.sort() self.enum_values.sort() self.functions.sort() self.groups.sort() self.typedefs.sort() self.variables.sort() # hierarchical lists: sort children self.deepSortList(self.class_like) self.deepSortList(self.namespaces) self.deepSortList(self.unions) self.deepSortList(self.files) self.deepSortList(self.dirs)
0.006012
def _buildArgs(f, self=None, kwargs={}): """ Get the default arguments from the function and assign as instance vars. Return a list of 3-tuples with (name, description, defaultValue) for each argument to the function. Assigns all arguments to the function as instance variables of TMRegion. If the argument was not provided, uses the default value. Pops any values from kwargs that go to the function. """ # Get the name, description, and default value for each argument argTuples = getArgumentDescriptions(f) argTuples = argTuples[1:] # Remove 'self' # Get the names of the parameters to our own constructor and remove them # Check for _originial_init first, because if LockAttributesMixin is used, # __init__'s signature will be just (self, *args, **kw), but # _original_init is created with the original signature #init = getattr(self, '_original_init', self.__init__) init = TMRegion.__init__ ourArgNames = [t[0] for t in getArgumentDescriptions(init)] # Also remove a few other names that aren't in our constructor but are # computed automatically (e.g. numberOfCols for the TM) ourArgNames += [ 'numberOfCols', # TM ] for argTuple in argTuples[:]: if argTuple[0] in ourArgNames: argTuples.remove(argTuple) # Build the dictionary of arguments if self: for argTuple in argTuples: argName = argTuple[0] if argName in kwargs: # Argument was provided argValue = kwargs.pop(argName) else: # Argument was not provided; use the default value if there is one, and # raise an exception otherwise if len(argTuple) == 2: # No default value raise TypeError("Must provide '%s'" % argName) argValue = argTuple[2] # Set as an instance variable if 'self' was passed in setattr(self, argName, argValue) return argTuples
0.014807
def calculate_activation(self, datapoint): """ Only for a single datapoint """ activations = datapoint * self.dendrites activations = self.nonlinearity(activations) return activations.sum()
0.004673
def db_wb020(self, value=None): """ Corresponds to IDD Field `db_wb020` mean coincident dry-bulb temperature to Wet-bulb temperature corresponding to 2.0% annual cumulative frequency of occurrence Args: value (float): value for IDD Field `db_wb020` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `db_wb020`'.format(value)) self._db_wb020 = value
0.003623
def remember_order(self): """Verify that subsequent :func:`fudge.Fake.expects` are called in the right order. For example:: >>> import fudge >>> db = fudge.Fake('db').remember_order().expects('insert').expects('update') >>> db.update() Traceback (most recent call last): ... AssertionError: Call #1 was fake:db.update(); Expected: #1 fake:db.insert(), #2 fake:db.update(), end >>> fudge.clear_expectations() When declaring multiple calls using :func:`fudge.Fake.next_call`, each subsequent call will be added to the expected order of calls :: >>> import fudge >>> sess = fudge.Fake("session").remember_order().expects("get_id").returns(1) >>> sess = sess.expects("set_id").with_args(5) >>> sess = sess.next_call(for_method="get_id").returns(5) Multiple calls to ``get_id()`` are now expected :: >>> sess.get_id() 1 >>> sess.set_id(5) >>> sess.get_id() 5 >>> fudge.verify() >>> fudge.clear_expectations() """ if self._callable: raise FakeDeclarationError( "remember_order() cannot be used for Fake(callable=True) or Fake(expect_call=True)") self._expected_call_order = ExpectedCallOrder(self) registry.remember_expected_call_order(self._expected_call_order) return self
0.005323
def get_cursor(cls): """Return a message list cursor that returns sqlite3.Row objects""" db = SqliteConnection.get() db.row_factory = sqlite3.Row return db.cursor()
0.010204
def decrypt_ascii(self, ascii_string, key=None, digest="hex"): """ Receive ascii string and return decrypted data. Avaiable digests: hex: Hexadecimal base64: Base 64 hqx: hexbin4 """ digests = {"hex": binascii.a2b_hex, "base64": binascii.a2b_base64, "hqx": binascii.a2b_hqx} digestor = digests.get(digest) if not digestor: TripleSecError(u"Digestor not supported.") binary_string = digestor(ascii_string) result = self.decrypt(binary_string, key) return result
0.003185
def get_innerclasses(self): """ sequence of JavaInnerClassInfo instances describing the inner classes of this class definition reference: http://docs.oracle.com/javase/specs/jvms/se7/html/jvms-4.html#jvms-4.7.6 """ # noqa buff = self.get_attribute("InnerClasses") if buff is None: return tuple() with unpack(buff) as up: return tuple(up.unpack_objects(JavaInnerClassInfo, self.cpool))
0.004202
def scatter_group(ax, key, imask, adata, Y, projection='2d', size=3, alpha=None): """Scatter of group using representation of data Y. """ mask = adata.obs[key].cat.categories[imask] == adata.obs[key].values color = adata.uns[key + '_colors'][imask] if not isinstance(color[0], str): from matplotlib.colors import rgb2hex color = rgb2hex(adata.uns[key + '_colors'][imask]) if not is_color_like(color): raise ValueError('"{}" is not a valid matplotlib color.'.format(color)) data = [Y[mask, 0], Y[mask, 1]] if projection == '3d': data.append(Y[mask, 2]) ax.scatter(*data, marker='.', alpha=alpha, c=color, edgecolors='none', s=size, label=adata.obs[key].cat.categories[imask], rasterized=settings._vector_friendly) return mask
0.003359
def download_experiment(self, experiment_id): """ download_experiment: """ req = self.query_records_no_auth('experiment', query='download/'+experiment_id) if req.status_code == 404: print "Sorry, no experiment matching id # " + experiment_id print "Please double check the code you obtained on the\ http://psiturk.org/ee" else: # Check if folder with same name already exists. expinfo = req.json() gitr = requests.get(expinfo['git_url']).json() if os.path.exists('./'+gitr['name']): print "*"*20 print "Sorry, you already have a file or folder named\ "+gitr['name']+". Please rename or delete it before trying\ to download this experiment. You can do this by typing `rm\ -rf " + gitr['name'] + "`" print "*"*20 return if "clone_url" in gitr: git.Git().clone(gitr["clone_url"]) print "="*20 print "Downloading..." print "Name: " + expinfo['name'] print "Downloads: " + str(expinfo['downloads']) print "Keywords: " + expinfo['keywords'] print "psiTurk Version: " +\ str(expinfo['psiturk_version_string']) print "URL: http://psiturk.org/ee/"+experiment_id print "\n" print "Experiment downloaded into the `" + gitr['name'] + "`\ folder of the current directory" print "Type 'cd " + gitr['name'] + "` then run the `psiturk`\ command." print "="*20 else: print "Sorry, experiment not located on github. You might\ contact the author of this experiment. Experiment NOT\ downloaded." return
0.001474
def serialize(self, o): ''' Returns a safe serializable object that can be serialized into JSON. @param o Python object to serialize ''' if isinstance(o, (list, tuple)): return [self.serialize(i) for i in o] if isinstance(o, dict): return {k: self.serialize(v) for k, v in o.items()} if isinstance(o, datetime): return o.isoformat() if isinstance(o, Result): return self.serialize(o.serialize()) return o
0.00381
def _rdf2dot_reduced(g, stream): """ A reduced dot graph. Adapted from original source: https://rdflib.readthedocs.io/en/stable/_modules/rdflib/tools/rdf2dot.html """ import cgi import collections import rdflib from rdflib.tools.rdf2dot import LABEL_PROPERTIES, NODECOLOR types = collections.defaultdict(set) fields = collections.defaultdict(set) nodes = {} def node(x): """Return a name of the given node.""" return nodes.setdefault(x, 'node{0}'.format(len(nodes))) def label(x, g): """Generate a label for the node.""" for labelProp in LABEL_PROPERTIES: label_ = g.value(x, labelProp) if label_: return label_ try: return g.namespace_manager.compute_qname(x)[2] except Exception: return x def formatliteral(l, g): """Format and escape literal.""" v = cgi.escape(l) if l.datatype: return '&quot;%s&quot;^^%s' % (v, qname(l.datatype, g)) elif l.language: return '&quot;%s&quot;@%s' % (v, l.language) return '&quot;%s&quot;' % v def qname(x, g): """Compute qname.""" try: q = g.compute_qname(x) return q[0] + ':' + q[2] except Exception: return x def color(p): """Choose node color.""" return 'BLACK' for s, p, o in g: sn = node(s) if p == rdflib.RDFS.label: continue # inject the type predicate into the node itself if p == rdflib.RDF.type: types[sn].add((qname(p, g), cgi.escape(o))) continue if p == rdflib.term.URIRef('http://purl.org/dc/terms/isPartOf'): fields[sn].add((qname(p, g), cgi.escape(o))) continue if p == rdflib.term.URIRef('http://www.w3.org/ns/prov#wasInformedBy'): continue if isinstance(o, (rdflib.URIRef, rdflib.BNode)): on = node(o) opstr = ( '\t%s -> %s [ color=%s, label=< <font point-size="12" ' 'color="#336633">%s</font> > ] ;\n' ) stream.write(opstr % (sn, on, color(p), qname(p, g))) else: fields[sn].add((qname(p, g), formatliteral(o, g))) for u, n in nodes.items(): stream.write(u"# %s %s\n" % (u, n)) f = [ '<tr><td align="left"><b>%s</b></td><td align="left">' '<b>%s</b></td></tr>' % x for x in sorted(types[n]) ] f += [ '<tr><td align="left">%s</td><td align="left">%s</td></tr>' % x for x in sorted(fields[n]) ] opstr = ( '%s [ shape=none, color=%s label=< <table color="#666666"' ' cellborder="0" cellspacing="0" border="1"><tr>' '<td colspan="2" bgcolor="grey"><B>%s</B></td></tr><tr>' '<td href="%s" bgcolor="#eeeeee" colspan="2">' '<font point-size="12" color="#6666ff">%s</font></td>' '</tr>%s</table> > ] \n' ) stream.write(opstr % (n, NODECOLOR, label(u, g), u, u, ''.join(f))) stream.write('}\n')
0.000625
def _create_set_property_msg(self, prop, cmd, val): """Create an extended message to set a property. Create an extended message with: cmd1: 0x2e cmd2: 0x00 flags: Direct Extended d1: group d2: cmd d3: val d4 - d14: 0x00 Parameters: prop: Property name to update cmd: Command value 0x02: on mask 0x03: off mask 0x04: x10 house code 0x05: ramp rate 0x06: on level 0x07: LED brightness 0x08: Non-Toggle mask 0x09: LED bit mask (Do not use in this class. Use LED class) 0x0a: X10 All bit mask 0x0c: Trigger group bit mask val: New property value """ user_data = Userdata({'d1': self.group, 'd2': cmd, 'd3': val}) msg = ExtendedSend(self._address, COMMAND_EXTENDED_GET_SET_0X2E_0X00, user_data) msg.set_checksum() self._set_sent_property(prop, val) return msg
0.001635
def _join_url(self, base, url, admin_forwarder=False): """ overrides `urlparse.urljoin` since it removes base url path https://docs.python.org/2/library/urlparse.html#urlparse.urljoin """ if admin_forwarder: return base + url else: return urljoin(base, url)
0.006079
def query(self, dataset_key, query, query_type="sql", parameters=None): """Query an existing dataset :param dataset_key: Dataset identifier, in the form of owner/id or of a url :type dataset_key: str :param query: SQL or SPARQL query :type query: str :param query_type: The type of the query. Must be either 'sql' or 'sparql'. (Default value = "sql") :type query_type: {'sql', 'sparql'}, optional :param parameters: parameters to the query - if SPARQL query, this should be a dict containing named parameters, if SQL query,then this should be a list containing positional parameters. Boolean values will be converted to xsd:boolean, Integer values to xsd:integer, and other Numeric values to xsd:decimal. Anything else is treated as a String literal (Default value = None) :type parameters: query parameters, optional :returns: Object containing the results of the query :rtype: Results :raises RuntimeError: If a server error occurs """ # TODO Move network request to RestApiClient owner_id, dataset_id = parse_dataset_key(dataset_key) params = { "query": query } if parameters and query_type == "sparql": # if SPARQL, then the parameters should be a Mapping containing # named parameters params["parameters"] = ",".join( ["{}={}".format(k, convert_to_sparql_literal(parameters[k])) for k in parameters.keys()]) elif parameters and query_type == "sql": # if SQL, then the parameters should be an array with positional # parameters, need to unwind them to $data_world_paramN for each # 0-indexed position N parameters = {"$data_world_param{}".format(i): x for i, x in enumerate(parameters)} params["parameters"] = ",".join(["{}={}".format( k, convert_to_sparql_literal(parameters[k])) for k in parameters.keys()]) url = "{0}://{1}/{2}/{3}/{4}".format(self._protocol, self._query_host, query_type, owner_id, dataset_id) headers = { 'User-Agent': _user_agent(), 'Accept': 'application/sparql-results+json', 'Authorization': 'Bearer {0}'.format(self._config.auth_token) } response = requests.get(url, params=params, headers=headers) if response.status_code == 200: return QueryResults(response.json()) raise RuntimeError( 'Error executing query: {}'.format(response.content))
0.000724
def do(cmdline, runas=None, env=None): ''' Execute a ruby command with rbenv's shims from the user or the system CLI Example: .. code-block:: bash salt '*' rbenv.do 'gem list bundler' salt '*' rbenv.do 'gem list bundler' deploy ''' if not cmdline: # This is a positional argument so this should never happen, but this # will handle cases where someone explicitly passes a false value for # cmdline. raise SaltInvocationError('Command must be specified') path = _rbenv_path(runas) if not env: env = {} # NOTE: Env vars (and their values) need to be str type on both Python 2 # and 3. The code below first normalizes all path components to unicode to # stitch them together, and then converts the result back to a str type. env[str('PATH')] = salt.utils.stringutils.to_str( # future lint: disable=blacklisted-function os.pathsep.join(( salt.utils.path.join(path, 'shims'), salt.utils.stringutils.to_unicode(os.environ['PATH']) )) ) try: cmdline = salt.utils.args.shlex_split(cmdline) except AttributeError: cmdauth = salt.utils.args.shlex_split(six.text_type(cmdline)) result = __salt__['cmd.run_all']( cmdline, runas=runas, env=env, python_shell=False ) if result['retcode'] == 0: rehash(runas=runas) return result['stdout'] else: return False
0.001337
def check_publication_state(publication_id): """Check the publication's current state.""" with db_connect() as db_conn: with db_conn.cursor() as cursor: cursor.execute("""\ SELECT "state", "state_messages" FROM publications WHERE id = %s""", (publication_id,)) publication_state, publication_messages = cursor.fetchone() return publication_state, publication_messages
0.002433
def add_scripts_to_package(): """ Update the "scripts" parameter of the setup_arguments with any scripts found in the "scripts" directory. :return: """ global setup_arguments if os.path.isdir('scripts'): setup_arguments['scripts'] = [ os.path.join('scripts', f) for f in os.listdir('scripts') ]
0.002849
def set(self, attribute, specification, exact=False): """Set the named attribute from the specification given by the user. The value actually set may be different.""" assert isinstance(attribute, basestring) assert isinstance(exact, (int, bool)) if __debug__ and not exact: if attribute == 'requirements': assert (isinstance(specification, property_set.PropertySet) or all(isinstance(s, basestring) for s in specification)) elif attribute in ( 'usage-requirements', 'default-build', 'source-location', 'build-dir', 'id'): assert is_iterable_typed(specification, basestring) elif __debug__: assert ( isinstance(specification, (property_set.PropertySet, type(None), basestring)) or all(isinstance(s, basestring) for s in specification) ) if exact: self.__dict__[attribute] = specification elif attribute == "requirements": self.requirements = property_set.refine_from_user_input( self.requirements, specification, self.project_module, self.location) elif attribute == "usage-requirements": unconditional = [] for p in specification: split = property.split_conditional(p) if split: unconditional.append(split[1]) else: unconditional.append(p) non_free = property.remove("free", unconditional) if non_free: get_manager().errors()("usage-requirements %s have non-free properties %s" \ % (specification, non_free)) t = property.translate_paths( property.create_from_strings(specification, allow_condition=True), self.location) existing = self.__dict__.get("usage-requirements") if existing: new = property_set.create(existing.all() + t) else: new = property_set.create(t) self.__dict__["usage-requirements"] = new elif attribute == "default-build": self.__dict__["default-build"] = property_set.create(specification) elif attribute == "source-location": source_location = [] for path in specification: source_location.append(os.path.join(self.location, path)) self.__dict__["source-location"] = source_location elif attribute == "build-dir": self.__dict__["build-dir"] = os.path.join(self.location, specification[0]) elif attribute == "id": id = specification[0] if id[0] != '/': id = "/" + id self.manager.projects().register_id(id, self.project_module) self.__dict__["id"] = id elif not attribute in ["default-build", "location", "source-location", "parent", "projects-to-build", "project-root"]: self.manager.errors()( """Invalid project attribute '%s' specified for project at '%s'""" % (attribute, self.location)) else: self.__dict__[attribute] = specification
0.00448
def split_args(line): """Version of shlex.split that silently accept incomplete strings. Parameters ---------- line : str The string to split Returns ------- [str] The line split in separated arguments """ lex = shlex.shlex(line, posix=True) lex.whitespace_split = True lex.commenters = '' res = [] try: while True: res.append(next(lex)) except ValueError: # No closing quotation pass except StopIteration: # End of loop pass if lex.token: res.append(lex.token) return res
0.001656
def shutit_method_scope(func): """Notifies the ShutIt object whenever we call a shutit module method. This allows setting values for the 'scope' of a function. """ def wrapper(self, shutit): """Wrapper to call a shutit module method, notifying the ShutIt object. """ ret = func(self, shutit) return ret return wrapper
0.030303
def _execFilters(self, type, msg): """ Execute Registered Filters """ for filter in self.FILTERS: msg = filter(type, msg) return msg
0.011905
def _parse_date_default_value(property_name, default_value_string): """Parse and return the default value for a date property.""" # OrientDB doesn't use ISO-8601 datetime format, so we have to parse it manually # and then turn it into a python datetime object. strptime() will raise an exception # if the provided value cannot be parsed correctly. parsed_value = time.strptime(default_value_string, ORIENTDB_DATE_FORMAT) return datetime.date(parsed_value.tm_year, parsed_value.tm_mon, parsed_value.tm_mday)
0.007547
def remove_line(self, section): """Base implementation just pops the item from collection. Re-implements to add global behaviour """ self.beginResetModel() self.collection.pop(section) self.endResetModel()
0.007905
def run(self, N=1): '''Run the chain and store the history of visited points into the member variable ``self.samples``. Returns the number of accepted points during the run. .. seealso:: :py:class:`pypmc.tools.History` :param N: An int which defines the number of steps to run the chain. ''' if N == 0: return 0 # set the accept function if self.proposal.symmetric: get_log_rho = self._get_log_rho_metropolis else: get_log_rho = self._get_log_rho_metropolis_hastings # allocate an empty numpy array to store the run if self.target_values is not None: this_target_values = self.target_values.append(N) this_run = self.samples.append(N) accept_count = 0 for i_N in range(N): # propose new point proposed_point = self.proposal.propose(self.current_point, self.rng) proposed_eval = self.target(proposed_point) # log_rho := log(probability to accept point), where log_rho > 0 is meant to imply rho = 1 log_rho = get_log_rho(proposed_point, proposed_eval) # check for NaN if _np.isnan(log_rho): raise ValueError('encountered NaN') # accept if rho = 1 if log_rho >=0: accept_count += 1 this_run[i_N] = proposed_point self.current_point = proposed_point self.current_target_eval = proposed_eval # accept with probability rho elif log_rho >= _np.log(self.rng.rand()): accept_count += 1 this_run[i_N] = proposed_point self.current_point = proposed_point self.current_target_eval = proposed_eval # reject if not accepted else: this_run[i_N] = self.current_point #do not need to update self.current #self.current = self.current # save target value if desired if self.target_values is not None: this_target_values[i_N] = self.current_target_eval # ---------------------- end for -------------------------------- return accept_count
0.005952
def enable(self): """ Enables WinQuad setting """ nquad = self.nquad.value() for label, xsll, xsul, xslr, xsur, ys, nx, ny in \ zip(self.label[:nquad], self.xsll[:nquad], self.xsul[:nquad], self.xslr[:nquad], self.xsur[:nquad], self.ys[:nquad], self.nx[:nquad], self.ny[:nquad]): label.config(state='normal') for thing in (xsll, xsul, xslr, xsur, ys, nx, ny): thing.enable() for label, xsll, xsul, xslr, xsur, ys, nx, ny in \ zip(self.label[nquad:], self.xsll[nquad:], self.xsul[nquad:], self.xslr[nquad:], self.xsur[nquad:], self.ys[nquad:], self.nx[nquad:], self.ny[nquad:]): label.config(state='disable') for thing in (xsll, xsul, xslr, xsur, ys, nx, ny): thing.disable() self.nquad.enable() self.xbin.enable() self.ybin.enable() self.sbutt.enable()
0.001949
def get_body(self, environ=None): """Get the request body.""" body = dict( status=self.code, message=self.get_description(environ), ) if self.errors: body['errors'] = self.errors return json.dumps(body)
0.007143
def gen_experiment_id(n=10): """Generate a random string with n characters. Parameters ---------- n : int The length of the string to be generated. Returns ------- :obj:`str` A string with only alphabetic characters. """ chrs = 'abcdefghijklmnopqrstuvwxyz' inds = np.random.randint(0,len(chrs), size=n) return ''.join([chrs[i] for i in inds])
0.007407
def format(self, fmt): """Returns string representing the items specified in the format string The format string can contain: .. code:: d - drive letter p - path n - name x - extension z - file size t - file time in seconds And, you can string them together, e.g. `dpnx` returns the fully qualified name. On platforms like Unix, where drive letter doesn't make sense, it's simply ignored when used in a format string, making it easy to construct fully qualified path names in an os independent manner. Parameters ---------- fmt : str A string representing the elements you want returned. Returns ------- str A string containing the elements of the path requested in `fmt` """ val = '' for x in fmt: if x == 'd': val += self._driv elif x == 'p': val += self._path elif x == 'n': val += self._name elif x == 'x': val += self._ext elif x == 'z': if self._size != None: val += str(self._size) elif x == 't': if self._time != None: val += str(self._time) return val
0.005718
def validate_backup(configuration, backup_data): """Celery task. It will extract the backup archive into a unique folder in the temporary directory specified in the configuration. Once extracted, a Docker container will be started and will start a restoration procedure. The worker will wait for the container to exit and retrieve its return code. A notification is sent if the return code is != 0. If the return code == 0, the container will be removed. Lastly, it will remove the temporary workdir. """ extract_archive(backup_data['archive_path'], backup_data['workdir']) docker_client = Client(configuration['docker']['url']) container = run_container(docker_client, backup_data) return_code = docker_client.wait(container) print('Container return code: {}'.format(return_code)) if return_code != 0: notifier = MailNotifier(configuration['mail']) report = {'archive': backup_data['archive_path'], 'image': backup_data['image'], 'container_id': container.get('Id')} notifier.send_report(report) else: docker_client.remove_container(container) remove_file(backup_data['workdir'])
0.000805
def add_name_variant(self, name): """Add name variant. Args: :param name: name variant for the current author. :type name: string """ self._ensure_field('name', {}) self.obj['name'].setdefault('name_variants', []).append(name)
0.006873
def _eq__(self, other): """ Compare the current place object to another passed to the comparison method. The two place objects must have the same identification, even if some of their attributes might be different. @param other: a ``Place`` instance to compare with the current place object. @return: ``True`` if the given place corresponds to the current place; ``False`` otherwise. """ return self.place_id and other.place_id \ and self.place_id == other.place_id
0.003521
def get_relationship(self, attribute): """ Returns the domain relationship object for the given resource attribute. """ rel = self.__relationships.get(attribute.entity_attr) if rel is None: rel = LazyDomainRelationship(self, attribute, direction= self.relationship_direction) self.__relationships[attribute.entity_attr] = rel return rel
0.007984
def _find_package(c): """ Try to find 'the' One True Package for this project. Mostly for obtaining the ``_version`` file within it. Uses the ``packaging.package`` config setting if defined. If not defined, fallback is to look for a single top-level Python package (directory containing ``__init__.py``). (This search ignores a small blacklist of directories like ``tests/``, ``vendor/`` etc.) """ # TODO: is there a way to get this from the same place setup.py does w/o # setup.py barfing (since setup() runs at import time and assumes CLI use)? configured_value = c.get("packaging", {}).get("package", None) if configured_value: return configured_value # TODO: tests covering this stuff here (most logic tests simply supply # config above) packages = [ path for path in os.listdir(".") if ( os.path.isdir(path) and os.path.exists(os.path.join(path, "__init__.py")) and path not in ("tests", "integration", "sites", "vendor") ) ] if not packages: sys.exit("Unable to find a local Python package!") if len(packages) > 1: sys.exit("Found multiple Python packages: {0!r}".format(packages)) return packages[0]
0.000784
def update_release(self, release, project, release_id): """UpdateRelease. [Preview API] Update a complete release object. :param :class:`<Release> <azure.devops.v5_1.release.models.Release>` release: Release object for update. :param str project: Project ID or project name :param int release_id: Id of the release to update. :rtype: :class:`<Release> <azure.devops.v5_1.release.models.Release>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if release_id is not None: route_values['releaseId'] = self._serialize.url('release_id', release_id, 'int') content = self._serialize.body(release, 'Release') response = self._send(http_method='PUT', location_id='a166fde7-27ad-408e-ba75-703c2cc9d500', version='5.1-preview.8', route_values=route_values, content=content) return self._deserialize('Release', response)
0.005314
def density(G, t=None): r"""Return the density of a graph at timestamp t. The density for undirected graphs is .. math:: d = \frac{2m}{n(n-1)}, and for directed graphs is .. math:: d = \frac{m}{n(n-1)}, where `n` is the number of nodes and `m` is the number of edges in `G`. Parameters ---------- G : Graph opject DyNetx graph object t : snapshot id (default=None) If None the density will be computed on the flattened graph. Notes ----- The density is 0 for a graph without edges and 1 for a complete graph. Self loops are counted in the total number of edges so graphs with self loops can have density higher than 1. """ n = number_of_nodes(G, t) m = number_of_interactions(G, t) if m == 0 or m is None or n <= 1: return 0 d = m / (n * (n - 1)) if not G.is_directed(): d *= 2 return d
0.001988
def _disallow_state(self, state): """Disallow states that are not useful to continue simulating.""" disallow_methods = (self._is_duplicate_board, self._is_impossible_by_count) for disallow_method in disallow_methods: if disallow_method(state): return True return False
0.005618
def flushall(self, asynchronous=False): """ Delete all keys in all databases on the current host. ``asynchronous`` indicates whether the operation is executed asynchronously by the server. """ args = [] if asynchronous: args.append(Token.get_token('ASYNC')) return self.execute_command('FLUSHALL', *args)
0.005249
def new_device(device_json, abode): """Create new device object for the given type.""" type_tag = device_json.get('type_tag') if not type_tag: raise AbodeException((ERROR.UNABLE_TO_MAP_DEVICE)) generic_type = CONST.get_generic_type(type_tag.lower()) device_json['generic_type'] = generic_type if generic_type == CONST.TYPE_CONNECTIVITY or \ generic_type == CONST.TYPE_MOISTURE or \ generic_type == CONST.TYPE_OPENING: return AbodeBinarySensor(device_json, abode) elif generic_type == CONST.TYPE_CAMERA: return AbodeCamera(device_json, abode) elif generic_type == CONST.TYPE_COVER: return AbodeCover(device_json, abode) elif generic_type == CONST.TYPE_LIGHT: return AbodeLight(device_json, abode) elif generic_type == CONST.TYPE_LOCK: return AbodeLock(device_json, abode) elif generic_type == CONST.TYPE_SWITCH: return AbodeSwitch(device_json, abode) elif generic_type == CONST.TYPE_VALVE: return AbodeValve(device_json, abode) elif generic_type == CONST.TYPE_UNKNOWN_SENSOR: return _new_sensor(device_json, abode) return None
0.000854
def analyze(self, scratch, **kwargs): """Run and return the results from the Animation plugin.""" results = Counter() for script in self.iter_scripts(scratch): gen = self.iter_blocks(script.blocks) name = 'start' level = None while name != '': if name in self.ANIMATION: gen, count = self._check_animation(name, level, gen) results.update(count) name, level, _ = next(gen, ('', 0, '')) return {'animation': results}
0.003527
def libvlc_audio_output_list_get(p_instance): '''Gets the list of available audio output modules. @param p_instance: libvlc instance. @return: list of available audio outputs. It must be freed it with In case of error, NULL is returned. ''' f = _Cfunctions.get('libvlc_audio_output_list_get', None) or \ _Cfunction('libvlc_audio_output_list_get', ((1,),), None, ctypes.POINTER(AudioOutput), Instance) return f(p_instance)
0.006342
def parse(readDataInstance): """ Returns a new L{TLSDirectory64} object. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} object containing data to create a new L{TLSDirectory64} object. @rtype: L{TLSDirectory64} @return: A new L{TLSDirectory64} object. """ tlsDir = TLSDirectory64() tlsDir.startAddressOfRawData.value = readDataInstance.readQword() tlsDir.endAddressOfRawData.value = readDataInstance.readQword() tlsDir.addressOfIndex.value = readDataInstance.readQword() tlsDir.addressOfCallbacks.value = readDataInstance.readQword() tlsDir.sizeOfZeroFill.value = readDataInstance.readDword() tlsDir.characteristics.value = readDataInstance.readDword() return tlsDir
0.007134
def record(self): # type: () -> bytes ''' A method to generate the string representing this Directory Record. Parameters: None. Returns: String representing this Directory Record. ''' if not self._initialized: raise pycdlibexception.PyCdlibInternalError('Directory Record not yet initialized') # Ecma-119 9.1.5 says the date should reflect the time when the # record was written, so we make a new date now and use that to # write out the record. self.date = dates.DirectoryRecordDate() self.date.new() padlen = struct.calcsize(self.FMT) + self.len_fi padstr = b'\x00' * (padlen % 2) extent_loc = self._extent_location() xa_rec = b'' if self.xa_record is not None: xa_rec = b'\x00' * self.xa_pad_size + self.xa_record.record() rr_rec = b'' if self.rock_ridge is not None: rr_rec = self.rock_ridge.record_dr_entries() outlist = [struct.pack(self.FMT, self.dr_len, self.xattr_len, extent_loc, utils.swab_32bit(extent_loc), self.data_length, utils.swab_32bit(self.data_length), self.date.record(), self.file_flags, self.file_unit_size, self.interleave_gap_size, self.seqnum, utils.swab_16bit(self.seqnum), self.len_fi) + self.file_ident + padstr + xa_rec + rr_rec] outlist.append(b'\x00' * (len(outlist[0]) % 2)) return b''.join(outlist)
0.003621
def _keynat(string): """A natural sort helper function for sort() and sorted() without using regular expression. """ r = [] for c in string: if c.isdigit(): if r and isinstance(r[-1], int): r[-1] = r[-1] * 10 + int(c) else: r.append(int(c)) else: r.append(9 + ord(c)) return r
0.002604
def _decode_subelements(self): """Decode the stanza subelements.""" for child in self._element: if child.tag == self._subject_tag: self._subject = child.text elif child.tag == self._body_tag: self._body = child.text elif child.tag == self._thread_tag: self._thread = child.text
0.005305
def loads(self, src): """ Compile css from scss string. """ assert isinstance(src, (unicode_, bytes_)) nodes = self.scan(src.strip()) self.parse(nodes) return ''.join(map(str, nodes))
0.008658
def _pad(string, size): """ 'Pad' a string with leading zeroes to fit the given size, truncating if necessary. """ strlen = len(string) if strlen == size: return string if strlen < size: return _padding[0:size-strlen] + string return string[-size:]
0.003378
def to_raw_xml(source): """ Convert various representations of an XML structure to a normal XML string. Args: source -- The source object to be converted - ET.Element, dict or string. Returns: A rew xml string matching the source object. >>> to_raw_xml("<content/>") '<content/>' >>> to_raw_xml({'document': {'title': 'foo', 'list': [{'li':1}, {'li':2}]}}) '<document><list><li>1</li><li>2</li></list><title>foo</title></document>' >>> to_raw_xml(ET.Element('root')) '<root/>' """ if isinstance(source, basestring): return source elif hasattr(source, 'getiterator'): # Element or ElementTree. return ET.tostring(source, encoding="utf-8") elif hasattr(source, 'keys'): # Dict. xml_root = dict_to_etree(source) return ET.tostring(xml_root, encoding="utf-8") else: raise TypeError("Accepted representations of a document are string, dict and etree")
0.005081
def generated_key(key): """Create the proper generated key value""" key_name = key['name'] if key['method'] == 'uuid': LOG.debug("Setting %s to a uuid", key_name) return str(uuid4()) elif key['method'] == 'words': LOG.debug("Setting %s to random words", key_name) return random_word() elif key['method'] == 'static': if 'value' not in key.keys(): raise aomi.exceptions.AomiData("Missing static value") LOG.debug("Setting %s to a static value", key_name) return key['value'] else: raise aomi.exceptions.AomiData("Unexpected generated secret method %s" % key['method'])
0.001414
def windowed_tajima_d(pos, ac, size=None, start=None, stop=None, step=None, windows=None, min_sites=3): """Calculate the value of Tajima's D in windows over a single chromosome/contig. Parameters ---------- pos : array_like, int, shape (n_items,) Variant positions, using 1-based coordinates, in ascending order. ac : array_like, int, shape (n_variants, n_alleles) Allele counts array. size : int, optional The window size (number of bases). start : int, optional The position at which to start (1-based). stop : int, optional The position at which to stop (1-based). step : int, optional The distance between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. windows : array_like, int, shape (n_windows, 2), optional Manually specify the windows to use as a sequence of (window_start, window_stop) positions, using 1-based coordinates. Overrides the size/start/stop/step parameters. min_sites : int, optional Minimum number of segregating sites for which to calculate a value. If there are fewer, np.nan is returned. Defaults to 3. Returns ------- D : ndarray, float, shape (n_windows,) Tajima's D. windows : ndarray, int, shape (n_windows, 2) The windows used, as an array of (window_start, window_stop) positions, using 1-based coordinates. counts : ndarray, int, shape (n_windows,) Number of variants in each window. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0]], ... [[0, 0], [0, 1]], ... [[0, 0], [1, 1]], ... [[0, 1], [1, 1]], ... [[1, 1], [1, 1]], ... [[0, 0], [1, 2]], ... [[0, 1], [1, 2]], ... [[0, 1], [-1, -1]], ... [[-1, -1], [-1, -1]]]) >>> ac = g.count_alleles() >>> pos = [2, 4, 7, 14, 15, 20, 22, 25, 27] >>> D, windows, counts = allel.windowed_tajima_d(pos, ac, size=20, step=10, start=1, stop=31) >>> D array([1.36521524, 4.22566622]) >>> windows array([[ 1, 20], [11, 31]]) >>> counts array([6, 6]) """ # check inputs if not isinstance(pos, SortedIndex): pos = SortedIndex(pos, copy=False) if not hasattr(ac, 'count_segregating'): ac = AlleleCountsArray(ac, copy=False) # assume number of chromosomes sampled is constant for all variants n = ac.sum(axis=1).max() # calculate constants a1 = np.sum(1 / np.arange(1, n)) a2 = np.sum(1 / (np.arange(1, n)**2)) b1 = (n + 1) / (3 * (n - 1)) b2 = 2 * (n**2 + n + 3) / (9 * n * (n - 1)) c1 = b1 - (1 / a1) c2 = b2 - ((n + 2) / (a1 * n)) + (a2 / (a1**2)) e1 = c1 / a1 e2 = c2 / (a1**2 + a2) # locate segregating variants is_seg = ac.is_segregating() # calculate mean pairwise difference mpd = mean_pairwise_difference(ac, fill=0) # define statistic to compute for each window # noinspection PyPep8Naming def statistic(w_is_seg, w_mpd): S = np.count_nonzero(w_is_seg) if S < min_sites: return np.nan pi = np.sum(w_mpd) d = pi - (S / a1) d_stdev = np.sqrt((e1 * S) + (e2 * S * (S - 1))) wD = d / d_stdev return wD D, windows, counts = windowed_statistic(pos, values=(is_seg, mpd), statistic=statistic, size=size, start=start, stop=stop, step=step, windows=windows, fill=np.nan) return D, windows, counts
0.000515
def is_virtual_environment(path): """Check if a given path is a virtual environment's root. This is done by checking if the directory contains a Python executable in its bin/Scripts directory. Not technically correct, but good enough for general usage. """ if not path.is_dir(): return False for bindir_name in ('bin', 'Scripts'): for python in path.joinpath(bindir_name).glob('python*'): try: exeness = python.is_file() and os.access(str(python), os.X_OK) except OSError: exeness = False if exeness: return True return False
0.00152
def ensure_connectable(self, nailgun): """Ensures that a nailgun client is connectable or raises NailgunError.""" attempt_count = 1 while 1: try: with closing(nailgun.try_connect()) as sock: logger.debug('Verified new ng server is connectable at {}'.format(sock.getpeername())) return except nailgun.NailgunConnectionError: if attempt_count >= self._connect_attempts: logger.debug('Failed to connect to ng after {} attempts'.format(self._connect_attempts)) raise # Re-raise the NailgunConnectionError which provides more context to the user. attempt_count += 1 time.sleep(self.WAIT_INTERVAL_SEC)
0.017341
def _set_dot1x(self, v, load=False): """ Setter method for dot1x, mapped from YANG variable /interface/tengigabitethernet/dot1x (container) If this variable is read-only (config: false) in the source YANG file, then _set_dot1x is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_dot1x() directly. YANG Description: This provides grouping of all the dot1x configuration elements. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=dot1x.dot1x, is_container='container', presence=False, yang_name="dot1x", rest_name="dot1x", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'IEEE 802.1X Port-Based Access Control', u'cli-incomplete-no': None, u'callpoint': u'dot1x_callpoint', u'sort-priority': u'105'}}, namespace='urn:brocade.com:mgmt:brocade-dot1x', defining_module='brocade-dot1x', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """dot1x must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=dot1x.dot1x, is_container='container', presence=False, yang_name="dot1x", rest_name="dot1x", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'IEEE 802.1X Port-Based Access Control', u'cli-incomplete-no': None, u'callpoint': u'dot1x_callpoint', u'sort-priority': u'105'}}, namespace='urn:brocade.com:mgmt:brocade-dot1x', defining_module='brocade-dot1x', yang_type='container', is_config=True)""", }) self.__dot1x = t if hasattr(self, '_set'): self._set()
0.005429
def union(self, *dstreams): """ Create a unified DStream from multiple DStreams of the same type and same slide duration. """ if not dstreams: raise ValueError("should have at least one DStream to union") if len(dstreams) == 1: return dstreams[0] if len(set(s._jrdd_deserializer for s in dstreams)) > 1: raise ValueError("All DStreams should have same serializer") if len(set(s._slideDuration for s in dstreams)) > 1: raise ValueError("All DStreams should have same slide duration") cls = SparkContext._jvm.org.apache.spark.streaming.api.java.JavaDStream jdstreams = SparkContext._gateway.new_array(cls, len(dstreams)) for i in range(0, len(dstreams)): jdstreams[i] = dstreams[i]._jdstream return DStream(self._jssc.union(jdstreams), self, dstreams[0]._jrdd_deserializer)
0.003236
def _set_auditpol_data(option, value): ''' Helper function that updates the current applied settings to match what has just been set in the audit.csv files. We're doing it this way instead of running `gpupdate` Args: option (str): The name of the option to set value (str): The value to set. ['None', '0', '1', '2', '3'] Returns: bool: ``True`` if successful, otherwise ``False`` ''' auditpol_values = {'None': 'No Auditing', '0': 'No Auditing', '1': 'Success', '2': 'Failure', '3': 'Success and Failure'} defaults = _get_audit_defaults(option) return __utils__['auditpol.set_setting']( name=defaults['Auditpol Name'], value=auditpol_values[value])
0.001217
def run_list_error_summary(run_list, estimator_list, estimator_names, n_simulate, **kwargs): """Wrapper which runs run_list_error_values then applies error_values summary to the resulting dataframe. See the docstrings for those two funcions for more details and for descriptions of parameters and output. """ true_values = kwargs.pop('true_values', None) include_true_values = kwargs.pop('include_true_values', False) include_rmse = kwargs.pop('include_rmse', False) error_values = run_list_error_values(run_list, estimator_list, estimator_names, n_simulate, **kwargs) return error_values_summary(error_values, true_values=true_values, include_true_values=include_true_values, include_rmse=include_rmse)
0.001148
def get_composition_admin_session_for_repository(self, repository_id=None): """Gets a composiiton administrative session for the given repository. arg: repository_id (osid.id.Id): the Id of the repository return: (osid.repository.CompositionAdminSession) - a CompositionAdminSession raise: NotFound - repository_id not found raise: NullArgument - repository_id is null raise: OperationFailed - unable to complete request raise: Unimplemented - supports_composition_admin() or supports_visible_federation() is false compliance: optional - This method must be implemented if supports_composition_admin() and supports_visible_federation() are true. """ if repository_id is None: raise NullArgument() if not self.supports_composition_admin() or not self.supports_visible_federation(): raise Unimplemented() try: from . import sessions except ImportError: raise # OperationFailed() try: session = sessions.CompositionSearchSession(repository_id, proxy=self._proxy, runtime=self._runtime) except AttributeError: raise # OperationFailed() return session
0.00207
def _wrap_results(result, dtype, fill_value=None): """ wrap our results if needed """ if is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype): if fill_value is None: # GH#24293 fill_value = iNaT if not isinstance(result, np.ndarray): tz = getattr(dtype, 'tz', None) assert not isna(fill_value), "Expected non-null fill_value" if result == fill_value: result = np.nan result = tslibs.Timestamp(result, tz=tz) else: result = result.view(dtype) elif is_timedelta64_dtype(dtype): if not isinstance(result, np.ndarray): if result == fill_value: result = np.nan # raise if we have a timedelta64[ns] which is too large if np.fabs(result) > _int64_max: raise ValueError("overflow in timedelta operation") result = tslibs.Timedelta(result, unit='ns') else: result = result.astype('i8').view(dtype) return result
0.000943
def create(self, name, description, **attrs): """ Create a new :class:`Project` :param name: name of the :class:`Project` :param description: description of the :class:`Project` :param attrs: optional attributes for :class:`Project` """ attrs.update({'name': name, 'description': description}) return self._new_resource(payload=attrs)
0.005013
def _maximization(X, posterior, force_weights=None): """Estimate new centers, weights, and concentrations from Parameters ---------- posterior : array, [n_centers, n_examples] The posterior matrix from the expectation step. force_weights : None or array, [n_centers, ] If None is passed, will estimate weights. If an array is passed, will use instead of estimating. Returns ---------- centers (mu) : array, [n_centers x n_features] weights (alpha) : array, [n_centers, ] (alpha) concentrations (kappa) : array, [n_centers, ] """ n_examples, n_features = X.shape n_clusters, n_examples = posterior.shape concentrations = np.zeros((n_clusters,)) centers = np.zeros((n_clusters, n_features)) if force_weights is None: weights = np.zeros((n_clusters,)) for cc in range(n_clusters): # update weights (alpha) if force_weights is None: weights[cc] = np.mean(posterior[cc, :]) else: weights = force_weights # update centers (mu) X_scaled = X.copy() if sp.issparse(X): X_scaled.data *= posterior[cc, :].repeat(np.diff(X_scaled.indptr)) else: for ee in range(n_examples): X_scaled[ee, :] *= posterior[cc, ee] centers[cc, :] = X_scaled.sum(axis=0) # normalize centers center_norm = np.linalg.norm(centers[cc, :]) if center_norm > 1e-8: centers[cc, :] = centers[cc, :] / center_norm # update concentration (kappa) [TODO: add other kappa approximations] rbar = center_norm / (n_examples * weights[cc]) concentrations[cc] = rbar * n_features - np.power(rbar, 3.) if np.abs(rbar - 1.0) < 1e-10: concentrations[cc] = MAX_CONTENTRATION else: concentrations[cc] /= 1. - np.power(rbar, 2.) # let python know we can free this (good for large dense X) del X_scaled return centers, weights, concentrations
0.00049
def _find_name_version_sep(egg_info, canonical_name): # type: (str, str) -> int """Find the separator's index based on the package's canonical name. `egg_info` must be an egg info string for the given package, and `canonical_name` must be the package's canonical name. This function is needed since the canonicalized name does not necessarily have the same length as the egg info's name part. An example:: >>> egg_info = 'foo__bar-1.0' >>> canonical_name = 'foo-bar' >>> _find_name_version_sep(egg_info, canonical_name) 8 """ # Project name and version must be separated by one single dash. Find all # occurrences of dashes; if the string in front of it matches the canonical # name, this is the one separating the name and version parts. for i, c in enumerate(egg_info): if c != "-": continue if canonicalize_name(egg_info[:i]) == canonical_name: return i raise ValueError("{} does not match {}".format(egg_info, canonical_name))
0.000964
def basename(self): '''Return the name of the file with the '.fasta' or 'fq.gz' etc removed''' return os.path.basename(self.read_file)[:-len(self._get_extension(self.read_file))]
0.014851
def split_into_segments(data): """Slices JPEG meta data into a list from JPEG binary data. """ if data[0:2] != b"\xff\xd8": raise InvalidImageDataError("Given data isn't JPEG.") head = 2 segments = [b"\xff\xd8"] while 1: if data[head: head + 2] == b"\xff\xda": segments.append(data[head:]) break else: length = struct.unpack(">H", data[head + 2: head + 4])[0] endPoint = head + length + 2 seg = data[head: endPoint] segments.append(seg) head = endPoint if (head >= len(data)): raise InvalidImageDataError("Wrong JPEG data.") return segments
0.001433
def p_Notifications(self, p): """Notifications : Notifications ',' Notification | Notification""" n = len(p) if n == 4: p[0] = ('Notifications', p[1][1] + [p[3]]) elif n == 2: p[0] = ('Notifications', [p[1]])
0.00692
def isscan(self, key, *, match=None, count=None): """Incrementally iterate set elements using async for. Usage example: >>> async for val in redis.isscan(key, match='something*'): ... print('Matched:', val) """ return _ScanIter(lambda cur: self.sscan(key, cur, match=match, count=count))
0.004566
def redo(self, *args): """Generate listing of images that user can save.""" if not self.gui_up: return mod_only = self.w.modified_only.get_state() treedict = Bunch.caselessDict() self.treeview.clear() self.w.status.set_text('') channel = self.fv.get_channel(self.chname) if channel is None: return # Only list modified images for saving. Scanning Datasrc is enough. if mod_only: all_keys = channel.datasrc.keys(sort='alpha') # List all images in the channel. else: all_keys = channel.get_image_names() # Extract info for listing and saving for key in all_keys: iminfo = channel.get_image_info(key) path = iminfo.get('path') idx = iminfo.get('idx') t = iminfo.get('time_modified') if path is None: # Special handling for generated buffer, eg mosaic infile = key is_fits = True else: infile = os.path.basename(path) infile_ext = os.path.splitext(path)[1] infile_ext = infile_ext.lower() is_fits = False if 'fit' in infile_ext: is_fits = True # Only list FITS files unless it is Ginga generated buffer if not is_fits: continue # Only list modified buffers if mod_only and t is None: continue # More than one ext modified, append to existing entry if infile in treedict: if t is not None: treedict[infile].extlist.add(idx) elist = sorted(treedict[infile].extlist) treedict[infile].MODEXT = ';'.join( map(self._format_extname, elist)) # Add new entry else: if t is None: s = '' extlist = set() else: s = self._format_extname(idx) extlist = set([idx]) treedict[infile] = Bunch.Bunch( IMAGE=infile, MODEXT=s, extlist=extlist, path=path) self.treeview.set_tree(treedict) # Resize column widths n_rows = len(treedict) if n_rows == 0: self.w.status.set_text('Nothing available for saving') elif n_rows < self.settings.get('max_rows_for_col_resize', 5000): self.treeview.set_optimal_column_widths() self.logger.debug('Resized columns for {0} row(s)'.format(n_rows))
0.001118
def get_vectors_loss(ops, docs, prediction, objective="L2"): """Compute a mean-squared error loss between the documents' vectors and the prediction. Note that this is ripe for customization! We could compute the vectors in some other word, e.g. with an LSTM language model, or use some other type of objective. """ # The simplest way to implement this would be to vstack the # token.vector values, but that's a bit inefficient, especially on GPU. # Instead we fetch the index into the vectors table for each of our tokens, # and look them up all at once. This prevents data copying. ids = ops.flatten([doc.to_array(ID).ravel() for doc in docs]) target = docs[0].vocab.vectors.data[ids] if objective == "L2": d_target = prediction - target loss = (d_target ** 2).sum() elif objective == "cosine": loss, d_target = get_cossim_loss(prediction, target) return loss, d_target
0.001047
def get_wsgi_filter(self, name=None, defaults=None): """Reads the configuration soruce and finds and loads a WSGI filter defined by the filter entry with the name ``name`` per the PasteDeploy configuration format and loading mechanism. :param name: The named WSGI filter to find, load and return. Defaults to ``None`` which becomes ``main`` inside :func:`paste.deploy.loadfilter`. :param defaults: The ``global_conf`` that will be used during filter instantiation. :return: A callable that can filter a WSGI application. """ name = self._maybe_get_default_name(name) defaults = self._get_defaults(defaults) return loadfilter( self.pastedeploy_spec, name=name, relative_to=self.relative_to, global_conf=defaults, )
0.002257
def get_log_form(self, *args, **kwargs): """Pass through to provider LogAdminSession.get_log_form_for_update""" # Implemented from kitosid template for - # osid.resource.BinAdminSession.get_bin_form_for_update_template # This method might be a bit sketchy. Time will tell. if isinstance(args[-1], list) or 'log_record_types' in kwargs: return self.get_log_form_for_create(*args, **kwargs) else: return self.get_log_form_for_update(*args, **kwargs)
0.003854
def _temporary_filenames(total): """Context manager to create temporary files and remove them after use.""" temp_files = [_get_temporary_filename('optimage-') for i in range(total)] yield temp_files for temp_file in temp_files: try: os.remove(temp_file) except OSError: # Continue in case we could not remove the file. One reason is that # the fail was never created. pass
0.002212
def append(self, data, **keys): """ Append new rows to a table HDU parameters ---------- data: ndarray or list of arrays A numerical python array with fields (recarray) or a list of arrays. Should have the same fields as the existing table. If only a subset of the table columns are present, the other columns are filled with zeros. columns: list, optional if a list of arrays is sent, also send the columns of names or column numbers """ firstrow = self._info['nrows'] keys['firstrow'] = firstrow self.write(data, **keys)
0.002941
def new(self, filename=None): """start a session an independent process""" path = (self.exec_path,) if self.exec_path.filetype() in ('py', 'pyw', 'pyz', self.FTYPE): # get the absolute path to the python-executable p = find_executable("python") path = (p, 'python') + path else: # if run in frozen env (.exe): # first arg if execpath of the next session: path += (self.exec_path,) if filename: path += ('-o', filename) os.spawnl(os.P_NOWAIT, *path)
0.003373
def _flow_check_handler_internal(self): """Periodic handler to check if installed flows are present. This handler runs periodically to check if installed flows are present. This function cannot detect and delete the stale flows, if present. It requires more complexity to delete stale flows. Generally, stale flows are not present. So, that logic is not put here. """ integ_flow = self.integ_br_obj.dump_flows_for( in_port=self.int_peer_port_num) ext_flow = self.ext_br_obj.dump_flows_for( in_port=self.phy_peer_port_num) for net_uuid, lvm in six.iteritems(self.local_vlan_map): vdp_vlan = lvm.any_consistent_vlan() flow_required = False if not (vdp_vlan and ovs_lib.is_valid_vlan_tag(vdp_vlan)): return if not self._check_bridge_flow(integ_flow, vdp_vlan, lvm.lvid): LOG.error("Flow for VDP Vlan %(vdp_vlan)s, Local vlan " "%(lvid)s not present on Integ bridge", {'vdp_vlan': vdp_vlan, 'lvid': lvm.lvid}) flow_required = True if not self._check_bridge_flow(ext_flow, lvm.lvid, vdp_vlan): LOG.error("Flow for VDP Vlan %(vdp_vlan)s, Local vlan " "%(lvid)s not present on External bridge", {'vdp_vlan': vdp_vlan, 'lvid': lvm.lvid}) flow_required = True if flow_required: LOG.info("Programming flows for lvid %(lvid)s vdp vlan" " %(vdp)s", {'lvid': lvm.lvid, 'vdp': vdp_vlan}) self.program_vm_ovs_flows(lvm.lvid, 0, vdp_vlan)
0.001138
def sanitize_for_archive(url, headers, payload): """Sanitize payload of a HTTP request by removing the token information before storing/retrieving archived items :param: url: HTTP url request :param: headers: HTTP headers request :param: payload: HTTP payload request :returns url, headers and the sanitized payload """ if MeetupClient.PKEY in payload: payload.pop(MeetupClient.PKEY) if MeetupClient.PSIGN in payload: payload.pop(MeetupClient.PSIGN) return url, headers, payload
0.003413
def filter_somaticsniper(job, tumor_bam, somaticsniper_output, tumor_pileup, univ_options, somaticsniper_options): """ Filter SomaticSniper calls. :param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq :param toil.fileStore.FileID somaticsniper_output: SomaticSniper output vcf :param toil.fileStore.FileID tumor_pileup: Pileup generated for the tumor bam :param dict univ_options: Dict of universal options used by almost all tools :param dict somaticsniper_options: Options specific to SomaticSniper :returns: fsID for the filtered genome-level vcf :rtype: toil.fileStore.FileID """ work_dir = os.getcwd() input_files = { 'tumor.bam': tumor_bam['tumor_dna_fix_pg_sorted.bam'], 'tumor.bam.bai': tumor_bam['tumor_dna_fix_pg_sorted.bam.bai'], 'input.vcf': somaticsniper_output, 'pileup.txt': tumor_pileup, 'genome.fa.tar.gz': somaticsniper_options['genome_fasta'], 'genome.fa.fai.tar.gz': somaticsniper_options['genome_fai']} input_files = get_files_from_filestore(job, input_files, work_dir, docker=False) for key in ('genome.fa', 'genome.fa.fai'): input_files[key] = untargz(input_files[key + '.tar.gz'], work_dir) input_files = {key: docker_path(path) for key, path in input_files.items()} # Run snpfilter.pl parameters = ['snpfilter.pl', '--snp-file', input_files['input.vcf'], '--indel-file', input_files['pileup.txt']] # Creates /data/input.vcf.SNPfilter docker_call(tool='somaticsniper-addons', tool_parameters=parameters, work_dir=work_dir, dockerhub=univ_options['dockerhub'], tool_version=somaticsniper_options['version']) # Run prepare_for_readcount.pl parameters = ['prepare_for_readcount.pl', '--snp-file', input_files['input.vcf'] + '.SNPfilter'] # Creates /data/input.vcf.SNPfilter.pos docker_call(tool='somaticsniper-addons', tool_parameters=parameters, work_dir=work_dir, dockerhub=univ_options['dockerhub'], tool_version=somaticsniper_options['version']) # Run bam-readcount parameters = ['-b', '15', '-f', input_files['genome.fa'], '-l', input_files['input.vcf'] + '.SNPfilter.pos', '-w', '1', input_files['tumor.bam']] # Creates the read counts file with open(os.path.join(work_dir, 'readcounts.txt'), 'w') as readcounts_file: docker_call(tool='bam-readcount', tool_parameters=parameters, work_dir=work_dir, dockerhub=univ_options['dockerhub'], outfile=readcounts_file, tool_version=somaticsniper_options['bam_readcount']['version']) # Run fpfilter.pl parameters = ['fpfilter.pl', '--snp-file', input_files['input.vcf'] + '.SNPfilter', '--readcount-file', docker_path(readcounts_file.name)] # Creates input.vcf.SNPfilter.fp_pass and input.vcf.SNPfilter.fp_fail docker_call(tool='somaticsniper-addons', tool_parameters=parameters, work_dir=work_dir, dockerhub=univ_options['dockerhub'], tool_version=somaticsniper_options['version']) # Run highconfidence.pl parameters = ['highconfidence.pl', '--snp-file', input_files['input.vcf'] + '.SNPfilter.fp_pass'] # Creates input.vcf.SNPfilter.fp_pass.hc docker_call(tool='somaticsniper-addons', tool_parameters=parameters, work_dir=work_dir, dockerhub=univ_options['dockerhub'], tool_version=somaticsniper_options['version']) outfile = job.fileStore.writeGlobalFile(os.path.join(os.getcwd(), 'input.vcf.SNPfilter.fp_pass.hc')) job.fileStore.logToMaster('Filtered SomaticSniper for %s successfully' % univ_options['patient']) return outfile
0.004836
def __normalize_grades(self): """ Adjust the grades list. If a grade has been set, set All to false """ if 'grades' in self and self['grades']['All'] is True: for grade in self['grades']: if grade != 'All' and self['grades'][grade] is True: self['grades']['All'] = False break
0.005195
def _on_rpc_done(self, future): """Triggered whenever the underlying RPC terminates without recovery. This is typically triggered from one of two threads: the background consumer thread (when calling ``recv()`` produces a non-recoverable error) or the grpc management thread (when cancelling the RPC). This method is *non-blocking*. It will start another thread to deal with shutting everything down. This is to prevent blocking in the background consumer and preventing it from being ``joined()``. """ _LOGGER.info("RPC termination has signaled manager shutdown.") future = _maybe_wrap_exception(future) thread = threading.Thread( name=_RPC_ERROR_THREAD_NAME, target=self.close, kwargs={"reason": future} ) thread.daemon = True thread.start()
0.003452
def extract(args): """ %prog extract gffile --contigs: Extract particular contig(s) from the gff file. If multiple contigs are involved, use "," to separate, e.g. "contig_12,contig_150"; or provide a file with multiple contig IDs, one per line --names: Process particular ID(s) from the gff file. If multiple IDs are involved, use "," to separate; or provide a file with multiple IDs, one per line """ p = OptionParser(extract.__doc__) p.add_option("--contigs", help="Extract features from certain contigs [default: %default]") p.add_option("--names", help="Extract features with certain names [default: %default]") p.add_option("--types", type="str", default=None, help="Extract features of certain feature types [default: %default]") p.add_option("--children", default=0, choices=["1", "2"], help="Specify number of iterations: `1` grabs children, " + \ "`2` grabs grand-children [default: %default]") p.add_option("--tag", default="ID", help="Scan the tags for the names [default: %default]") p.add_option("--fasta", default=False, action="store_true", help="Write FASTA if available [default: %default]") p.set_outfile() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) gffile, = args contigfile = opts.contigs namesfile = opts.names typesfile = opts.types nametag = opts.tag contigID = parse_multi_values(contigfile) names = parse_multi_values(namesfile) types = parse_multi_values(typesfile) outfile = opts.outfile if opts.children: assert types is not None or names is not None, "Must set --names or --types" if names == None: names = list() populate_children(outfile, names, gffile, iter=opts.children, types=types) return fp = must_open(gffile) fw = must_open(opts.outfile, "w") for row in fp: atoms = row.split() if len(atoms) == 0: continue tag = atoms[0] if row[0] == "#": if row.strip() == "###": continue if not (tag == RegionTag and contigID and atoms[1] not in contigID): print(row.rstrip(), file=fw) if tag == FastaTag: break continue b = GffLine(row) attrib = b.attributes if contigID and tag not in contigID: continue if types and b.type in types: _id = b.accn if _id not in names: names.append(_id) if names is not None: if nametag not in attrib: continue if attrib[nametag][0] not in names: continue print(row.rstrip(), file=fw) if not opts.fasta: return f = Fasta(gffile) for s in contigID: if s in f: SeqIO.write([f[s]], fw, "fasta")
0.00597
def abort(self, err): """Abort all of the control blocks in the queue.""" if _debug: IOQueue._debug("abort %r", err) # send aborts to all of the members try: for iocb in self.queue: iocb.ioQueue = None iocb.abort(err) # flush the queue self.queue = [] # the queue is now empty, clear the event self.notempty.clear() except ValueError: pass
0.006173
def __insert_branch(u, v, dfs_data): """Embeds a branch Bu(v) (as described on page 22 of the paper). Returns whether the embedding was successful.""" w = L1(v, dfs_data) d_u = D(u, dfs_data) d_w = D(w, dfs_data) # Embed uw successful = __embed_frond(u, w, dfs_data) if not successful: return False # Embed a branch marker uu on the side opposite to uw, in the same frond block #successful = __embed_frond(u, v, dfs_data, as_branch_marker=True) successful = __embed_frond(u, u, dfs_data, as_branch_marker=True) if not successful: return False return True
0.006441
def update_probes(self, progress): """ update the probe tree """ new_values = self.read_probes.probes_values probe_count = len(self.read_probes.probes) if probe_count > self.tree_probes.topLevelItemCount(): # when run for the first time, there are no probes in the tree, so we have to fill it first self.fill_treewidget(self.tree_probes, new_values) else: for x in range(probe_count): topLvlItem = self.tree_probes.topLevelItem(x) for child_id in range(topLvlItem.childCount()): child = topLvlItem.child(child_id) child.value = new_values[topLvlItem.name][child.name] child.setText(1, str(child.value)) if self.probe_to_plot is not None: self.probe_to_plot.plot(self.matplotlibwidget_1.axes) self.matplotlibwidget_1.draw() if self.chk_probe_log.isChecked(): data = ','.join(list(np.array([[str(p) for p in list(p_dict.values())] for instr, p_dict in new_values.items()]).flatten())) self.probe_file.write('{:s}\n'.format(data))
0.004241
def check_success(self, device_id, sent_cmd1, sent_cmd2): """Check if last command succeeded by checking buffer""" device_id = device_id.upper() self.logger.info('check_success: for device %s cmd1 %s cmd2 %s', device_id, sent_cmd1, sent_cmd2) sleep(2) status = self.get_buffer_status(device_id) check_id = status.get('id_from', '') cmd1 = status.get('cmd1', '') cmd2 = status.get('cmd2', '') if (check_id == device_id) and (cmd1 == sent_cmd1) and (cmd2 == sent_cmd2): self.logger.info("check_success: Response device %s cmd %s cmd2 %s SUCCESS", check_id, cmd1, cmd2) return True self.logger.info("check_success: No valid response found for device %s cmd %s cmd2 %s", device_id, sent_cmd1, sent_cmd2) return False
0.005519
def volume(self): """The volume of the unit cell The actual definition of the volume depends on the number of active directions: * num_active == 0 -- always -1 * num_active == 1 -- length of the cell vector * num_active == 2 -- surface of the parallelogram * num_active == 3 -- volume of the parallelepiped """ active = self.active_inactive[0] if len(active) == 0: return -1 elif len(active) == 1: return np.linalg.norm(self.matrix[:, active[0]]) elif len(active) == 2: return np.linalg.norm(np.cross(self.matrix[:, active[0]], self.matrix[:, active[1]])) elif len(active) == 3: return abs(np.linalg.det(self.matrix))
0.003764
def _write_avg_gradient(self)->None: "Writes the average of the gradients to Tensorboard." avg_gradient = sum(x.data.mean() for x in self.gradients)/len(self.gradients) self._add_gradient_scalar('avg_gradient', scalar_value=avg_gradient)
0.015326
def _isCompatible(self, other, reporter): """ This is the environment implementation of :meth:`BaseGlyph.isCompatible`. Subclasses may override this method. """ glyph1 = self glyph2 = other # contour count if len(self.contours) != len(glyph2.contours): reporter.fatal = True reporter.contourCountDifference = True # contour pairs for i in range(min(len(glyph1), len(glyph2))): contour1 = glyph1[i] contour2 = glyph2[i] self._checkPairs(contour1, contour2, reporter, reporter.contours) # component count if len(glyph1.components) != len(glyph2.components): reporter.fatal = True reporter.componentCountDifference = True # component check component_diff = [] selfComponents = [component.baseGlyph for component in glyph1.components] otherComponents = [component.baseGlyph for component in glyph2.components] for index, (left, right) in enumerate( zip_longest(selfComponents, otherComponents) ): if left != right: component_diff.append((index, left, right)) if component_diff: reporter.warning = True reporter.componentDifferences = component_diff if not reporter.componentCountDifference and set(selfComponents) == set( otherComponents ): reporter.componentOrderDifference = True selfComponents_counted_set = collections.Counter(selfComponents) otherComponents_counted_set = collections.Counter(otherComponents) missing_from_glyph1 = ( otherComponents_counted_set - selfComponents_counted_set ) if missing_from_glyph1: reporter.fatal = True reporter.componentsMissingFromGlyph1 = sorted( missing_from_glyph1.elements() ) missing_from_glyph2 = ( selfComponents_counted_set - otherComponents_counted_set ) if missing_from_glyph2: reporter.fatal = True reporter.componentsMissingFromGlyph2 = sorted( missing_from_glyph2.elements() ) # guideline count if len(self.guidelines) != len(glyph2.guidelines): reporter.warning = True reporter.guidelineCountDifference = True # guideline check selfGuidelines = [] otherGuidelines = [] for source, names in ((self, selfGuidelines), (other, otherGuidelines)): for i, guideline in enumerate(source.guidelines): names.append((guideline.name, i)) guidelines1 = set(selfGuidelines) guidelines2 = set(otherGuidelines) if len(guidelines1.difference(guidelines2)) != 0: reporter.warning = True reporter.guidelinesMissingFromGlyph2 = list( guidelines1.difference(guidelines2)) if len(guidelines2.difference(guidelines1)) != 0: reporter.warning = True reporter.guidelinesMissingFromGlyph1 = list( guidelines2.difference(guidelines1)) # anchor count if len(self.anchors) != len(glyph2.anchors): reporter.warning = True reporter.anchorCountDifference = True # anchor check anchor_diff = [] selfAnchors = [anchor.name for anchor in glyph1.anchors] otherAnchors = [anchor.name for anchor in glyph2.anchors] for index, (left, right) in enumerate(zip_longest(selfAnchors, otherAnchors)): if left != right: anchor_diff.append((index, left, right)) if anchor_diff: reporter.warning = True reporter.anchorDifferences = anchor_diff if not reporter.anchorCountDifference and set(selfAnchors) == set( otherAnchors ): reporter.anchorOrderDifference = True selfAnchors_counted_set = collections.Counter(selfAnchors) otherAnchors_counted_set = collections.Counter(otherAnchors) missing_from_glyph1 = otherAnchors_counted_set - selfAnchors_counted_set if missing_from_glyph1: reporter.anchorsMissingFromGlyph1 = sorted( missing_from_glyph1.elements() ) missing_from_glyph2 = selfAnchors_counted_set - otherAnchors_counted_set if missing_from_glyph2: reporter.anchorsMissingFromGlyph2 = sorted( missing_from_glyph2.elements() )
0.001673
def set_community_names(communities): ''' Manage the SNMP accepted community names and their permissions. .. note:: Settings managed by Group Policy will always take precedence over those set using the SNMP interface. Therefore if this function finds Group Policy settings it will raise a CommandExecutionError Args: communities (dict): A dictionary of SNMP community names and permissions. The possible permissions can be found via ``win_snmp.get_permission_types``. Returns: bool: True if successful, otherwise False Raises: CommandExecutionError: If SNMP settings are being managed by Group Policy CLI Example: .. code-block:: bash salt '*' win_snmp.set_community_names communities="{'TestCommunity': 'Read Only'}' ''' values = dict() if __utils__['reg.key_exists'](_HKEY, _COMMUNITIES_GPO_KEY): _LOG.debug('Communities on this system are managed by Group Policy') raise CommandExecutionError( 'Communities on this system are managed by Group Policy') current_communities = get_community_names() if communities == current_communities: _LOG.debug('Communities already contain the provided values.') return True for vname in communities: if not communities[vname]: communities[vname] = 'None' try: vdata = _PERMISSION_TYPES[communities[vname]] except KeyError: message = ( "Invalid permission '{0}' specified. Valid permissions: " "{1}").format(communities[vname], _PERMISSION_TYPES.keys()) raise SaltInvocationError(message) values[vname] = vdata # Check current communities. for current_vname in current_communities: if current_vname in values: # Modify existing communities that have a different permission value. if current_communities[current_vname] != values[current_vname]: __utils__['reg.set_value']( _HKEY, _COMMUNITIES_KEY, current_vname, values[current_vname], 'REG_DWORD') else: # Remove current communities that weren't provided. __utils__['reg.delete_value']( _HKEY, _COMMUNITIES_KEY, current_vname) # Create any new communities. for vname in values: if vname not in current_communities: __utils__['reg.set_value']( _HKEY, _COMMUNITIES_KEY, vname, values[vname], 'REG_DWORD') # Get the fields post-change so that we can verify tht all values # were modified successfully. Track the ones that weren't. new_communities = get_community_names() failed_communities = dict() for new_vname in new_communities: if new_vname not in communities: failed_communities[new_vname] = None for vname in communities: if communities[vname] != new_communities[vname]: failed_communities[vname] = communities[vname] if failed_communities: _LOG.error('Unable to configure communities: %s', failed_communities) return False _LOG.debug('Communities configured successfully: %s', communities.keys()) return True
0.000909
def report_by_type_stats(sect, stats, _): """make a report of * percentage of different types documented * percentage of different types with a bad name """ # percentage of different types documented and/or with a bad name nice_stats = {} for node_type in ("module", "class", "method", "function"): try: total = stats[node_type] except KeyError: raise exceptions.EmptyReportError() nice_stats[node_type] = {} if total != 0: try: documented = total - stats["undocumented_" + node_type] percent = (documented * 100.0) / total nice_stats[node_type]["percent_documented"] = "%.2f" % percent except KeyError: nice_stats[node_type]["percent_documented"] = "NC" try: percent = (stats["badname_" + node_type] * 100.0) / total nice_stats[node_type]["percent_badname"] = "%.2f" % percent except KeyError: nice_stats[node_type]["percent_badname"] = "NC" lines = ("type", "number", "old number", "difference", "%documented", "%badname") for node_type in ("module", "class", "method", "function"): new = stats[node_type] lines += ( node_type, str(new), "NC", "NC", nice_stats[node_type].get("percent_documented", "0"), nice_stats[node_type].get("percent_badname", "0"), ) sect.append(reporter_nodes.Table(children=lines, cols=6, rheaders=1))
0.001265
def get_verse(self, v=1): """Get a specific verse.""" verse_count = len(self.verses) if v - 1 < verse_count: return self.verses[v - 1]
0.011765
def match(string, patterns): """Given a string return true if it matches the supplied list of patterns. Parameters ---------- string : str The string to be matched. patterns : None or [pattern, ...] The series of regular expressions to attempt to match. """ if patterns is None: return True else: return any(re.match(pattern, string) for pattern in patterns)
0.002242
def inHouseJoy(self): """ Returns if the object is in its house of joy. """ house = self.house() return props.object.houseJoy[self.obj.id] == house.id
0.011494
def load_and_validate_keys(authorized_keys): """ Loads authorized_keys as taken by :any:`instance_create`, :any:`disk_create` or :any:`rebuild`, and loads in any keys from any files provided. :param authorized_keys: A list of keys or paths to keys, or a single key :returns: A list of raw keys :raises: ValueError if keys in authorized_keys don't appear to be a raw key and can't be opened. """ if not authorized_keys: return None if not isinstance(authorized_keys, list): authorized_keys = [authorized_keys] ret = [] for k in authorized_keys: accepted_types = ('ssh-dss', 'ssh-rsa', 'ecdsa-sha2-nistp', 'ssh-ed25519') if any([ t for t in accepted_types if k.startswith(t) ]): # this looks like a key, cool ret.append(k) else: # it doesn't appear to be a key.. is it a path to the key? k = os.path.expanduser(k) if os.path.isfile(k): with open(k) as f: ret.append(f.read().rstrip()) else: raise ValueError("authorized_keys must either be paths " "to the key files or a list of raw " "public key of one of these types: {}".format(accepted_types)) return ret
0.003687
def parse(self, data: RawMessage) -> Message: """\ Parses a binary protobuf message into a Message object. """ try: return self.receiver.parse(data) except KeyError as err: raise UnknownCommandError from err except DecodeError as err: raise UnknownCommandError(f"{err}") from err
0.00551
def token_load(self, line_number, tokens): self.line_number = line_number assert tokens[-1] == 0x00, "line code %s doesn't ends with \\x00: %s" % ( repr(tokens), repr(tokens[-1]) ) """ NOTE: The BASIC interpreter changed REM shortcut and ELSE internaly: "'" <-> ":'" "ELSE" <-> ":ELSE" See also: http://archive.worldofdragon.org/phpBB3/viewtopic.php?f=8&t=4310&p=11632#p11630 """ for src, dst in self.tokens_replace_rules: log.info("Relace tokens %s with $%02x", pformat_byte_hex_list(src), dst ) log.debug("Before..: %s", pformat_byte_hex_list(tokens)) tokens = list_replace(tokens, src, dst) log.debug("After...: %s", pformat_byte_hex_list(tokens)) self.line_code = tokens[:-1]
0.005701
def tupled_argmax(a): """ Argmax that returns an index tuple. Note that `numpy.argmax` will return a scalar index as if you had flattened the array. Parameters ---------- a : array_like Input array. Returns ------- index : tuple Tuple of index, even if `a` is one-dimensional. Note that this can immediately be used to index `a` as in ``a[index]``. Examples -------- >>> import numpy as np >>> import deepdish as dd >>> a = np.arange(6).reshape(2,3) >>> a array([[0, 1, 2], [3, 4, 5]]) >>> dd.tupled_argmax(a) (1, 2) """ return np.unravel_index(np.argmax(a), np.shape(a))
0.001451