text
stringlengths
78
104k
score
float64
0
0.18
def add_to(self, parent, additions): "Modify parent to include all elements in additions" for x in additions: if x not in parent: parent.append(x) self.changed()
0.00905
def split_tracks(lat,lon,*args): '''assumes eastward motion''' tracks = [] lt,ln = [lat[0]],[lon[0]] zz = [[z[0]] for z in args] for i in range(1,len(lon)): lt.append(lat[i]) for z,a in zip(zz,args): z.append(a[i]) d1 = abs(lon[i] - lon[i-1]) d2 = abs((lon[i-1] + 360) - lon[i]) d3 = abs(lon[i-1] - (lon[i] + 360)) if d2 < d1: ln.append(lon[i]-360) tracks.append([np.array(lt),np.array(ln)] \ + [np.array(z) for z in zz]) lt = [lat[i-1],lat[i]] ln = [lon[i-1]+360,lon[i]] zz = [[z[i-1]] for z in args] elif d3 < d1: ln.append(lon[i]+360) tracks.append([np.array(lt),np.array(ln)] \ + [np.array(z) for z in zz]) lt = [lat[i-1],lat[i]] ln = [lon[i-1]-360,lon[i]] zz = [[z[i-1],z[i]] for z in args] else: ln.append(lon[i]) if len(lt): tracks.append([np.array(lt),np.array(ln)] \ + [np.array(z) for z in zz]) return tracks
0.018699
def get_validation_errors(data, schema=None): """Validation errors for a given record. Args: data (dict): record to validate. schema (Union[dict, str]): schema to validate against. If it is a string, it is intepreted as the name of the schema to load (e.g. ``authors`` or ``jobs``). If it is ``None``, the schema is taken from ``data['$schema']``. If it is a dictionary, it is used directly. Yields: jsonschema.exceptions.ValidationError: validation errors. Raises: SchemaNotFound: if the given schema was not found. SchemaKeyNotFound: if ``schema`` is ``None`` and no ``$schema`` key was found in ``data``. jsonschema.SchemaError: if the schema is invalid. """ schema = _load_schema_for_record(data, schema) errors = Draft4Validator( schema, resolver=LocalRefResolver.from_schema(schema), format_checker=inspire_format_checker ) return errors.iter_errors(data)
0.000969
def _call(self, x, out=None): """Return ``self(x[, out])``.""" # TODO: pass reasonable options on to the interpolator def nearest(arg, out=None): """Interpolating function with vectorization.""" if is_valid_input_meshgrid(arg, self.grid.ndim): input_type = 'meshgrid' else: input_type = 'array' interpolator = _NearestInterpolator( self.grid.coord_vectors, x, variant=self.variant, input_type=input_type) return interpolator(arg, out=out) return self.range.element(nearest, vectorized=True)
0.003082
def time_correlation_direct_by_mtx_vec_prod(P, mu, obs1, obs2=None, time=1, start_values=None, return_P_k_obs=False): r"""Compute time-correlation of obs1, or time-cross-correlation with obs2. The time-correlation at time=k is computed by the matrix-vector expression: cor(k) = obs1' diag(pi) P^k obs2 Parameters ---------- P : ndarray, shape=(n, n) or scipy.sparse matrix Transition matrix obs1 : ndarray, shape=(n) Vector representing observable 1 on discrete states obs2 : ndarray, shape=(n) Vector representing observable 2 on discrete states. If not given, the autocorrelation of obs1 will be computed mu : ndarray, shape=(n) stationary distribution vector. time : int time point at which the (auto)correlation will be evaluated. start_values : (time, ndarray <P, <P, obs2>>_t) start iteration of calculation of matrix power product, with this values. only useful when calling this function out of a loop over times. return_P_k_obs : bool if True, the dot product <P^time, obs2> will be returned for further calculations. Returns ------- cor(k) : float correlation between observations """ # input checks if not (type(time) == int): if not (type(time) == np.int64): raise TypeError("given time (%s) is not an integer, but has type: %s" % (str(time), type(time))) if obs1.shape[0] != P.shape[0]: raise ValueError("observable shape not compatible with given matrix") if obs2 is None: obs2 = obs1 # multiply element-wise obs1 and pi. this is obs1' diag(pi) l = np.multiply(obs1, mu) # raise transition matrix to power of time by substituting dot product # <Pk, obs2> with something like <P, <P, obs2>>. # This saves a lot of matrix matrix multiplications. if start_values: # begin with a previous calculated val P_i_obs = start_values[1] # calculate difference properly! time_prev = start_values[0] t_diff = time - time_prev r = range(t_diff) else: if time >= 2: P_i_obs = np.dot(P, np.dot(P, obs2)) # vector <P, <P, obs2> := P^2 * obs r = range(time - 2) elif time == 1: P_i_obs = np.dot(P, obs2) # P^1 = P*obs r = range(0) elif time == 0: # P^0 = I => I*obs2 = obs2 P_i_obs = obs2 r = range(0) for k in r: # since we already substituted started with 0 P_i_obs = np.dot(P, P_i_obs) corr = np.dot(l, P_i_obs) if return_P_k_obs: return corr, (time, P_i_obs) else: return corr
0.002202
def draw(args): """ %prog draw --input newicktrees [options] Draw phylogenetic trees into single or combined plots. Input trees should be one of the following: 1. single Newick format tree file 2. a dir containing *ONLY* the tree files to be drawn Newick format: http://evolution.genetics.washington.edu/phylip/newicktree.html This function wraps on jcvi.graphics.tree This function is better used for trees generated by jcvi.apps.phylo (rooted if possible). For drawing general Newick trees from external sources invoke jcvi.graphics.tree directly, which also gives more drawing options. """ trunc_name_options = ['headn', 'oheadn', 'tailn', 'otailn'] p = OptionParser(draw.__doc__) p.add_option("--input", help="path to single input tree file or a dir "\ "containing ONLY the input tree files") p.add_option("--combine", type="string", default="1x1", \ help="combine multiple trees into one plot in nrowxncol") p.add_option("--trunc_name", default=None, help="Options are: {0}. " \ "truncate first n chars, retains only first n chars, " \ "truncate last n chars, retain only last chars. " \ "n=1~99. [default: %default]".format(trunc_name_options)) p.add_option("--SH", default=None, help="path to a file containing SH test p-values in format:" \ "tree_file_name<tab>p-values " \ "This file can be generated with jcvi.apps.phylo build [default: %default]") p.add_option("--scutoff", default=50, type="int", help="cutoff for displaying node support, 0-100 [default: %default]") p.add_option("--barcode", default=None, help="path to seq/taxon name barcode mapping file: " \ "barcode<tab>new_name " \ "This option is downstream of `--trunc_name` [default: %default]") p.add_option("--leafcolorfile", default=None, help="path to a mapping file containing font colors " \ "for the OTUs: leafname<tab>color [default: %default]") p.set_outdir() opts, args, iopts = p.set_image_options(figsize="8x6") input = opts.input outdir = opts.outdir combine = opts.combine.split("x") trunc_name = opts.trunc_name SH = opts.SH mkdir(outdir) if not input: sys.exit(not p.print_help()) elif op.isfile(input): trees_file = input treenames = [op.basename(input)] elif op.isdir(input): trees_file = op.join(outdir, "alltrees.dnd") treenames = [] for f in sorted(os.listdir(input)): sh("cat {0}/{1} >> {2}".format(input, f, trees_file), log=False) treenames.append(f) else: sys.exit(not p.print_help()) trees = OrderedDict() tree = "" i = 0 for row in LineFile(trees_file, comment="#", load=True).lines: if i == len(treenames): break if not len(row): continue if ";" in row: # sanity check if row.index(";") != len(row)-1: ts = row.split(";") for ii in xrange(len(ts)-1): ts[ii] += ";" else: ts = [row] for t in ts: if ";" in t: tree += t if tree: trees[treenames[i]] = tree tree = "" i+=1 else: tree += t else: tree += row logging.debug("A total of {0} trees imported.".format(len(trees))) sh("rm {0}".format(op.join(outdir, "alltrees.dnd"))) _draw_trees(trees, nrow=int(combine[0]), ncol=int(combine[1]), rmargin=.3,\ iopts=iopts, outdir=outdir, shfile=SH, trunc_name=trunc_name, \ scutoff=opts.scutoff, barcodefile = opts.barcode, leafcolorfile=opts.leafcolorfile)
0.005484
def _getEventsOnDay(self, request, day): """Return all the events in this site for a given day.""" home = request.site.root_page return getAllEventsByDay(request, day, day, home=home)[0]
0.009524
def distances_within(coords_a, coords_b, cutoff, periodic=False, method="simple"): """Calculate distances between the array of coordinates *coord_a* and *coord_b* within a certain cutoff. This function is a wrapper around different routines and data structures for distance searches. It return a np.ndarray containing the distances. **Parameters** coords_a: np.ndarray((N, 3), dtype=float) First coordinate array coords_b: np.ndarray((N, 3), dtype=float) Second coordinate array cutoff: float Maximum distance to search for periodic: False or np.ndarray((3,), dtype=float) If False, don't consider periodic images. Otherwise periodic is an array containing the periodicity in the 3 dimensions. method: "simple" | "cell-lists" The method to use. *simple* is a brute-force distance search, *kdtree* uses scipy ``ckdtree`` module (periodic not available) and *cell-lists* uses the cell linked list method. """ mat = distance_matrix(coords_a, coords_b, cutoff, periodic, method) return mat[mat.nonzero()]
0.003463
def color(cls, value): """task value/score color""" index = bisect(cls.breakpoints, value) return colors.fg(cls.colors_[index])
0.013245
def simplex_select_entering_arc(self, t, pivot): ''' API: simplex_select_entering_arc(self, t, pivot) Description: Decides and returns entering arc using pivot rule. Input: t: current spanning tree solution pivot: May be one of the following; 'first_eligible' or 'dantzig'. 'dantzig' is the default value. Return: Returns entering arc tuple (k,l) ''' if pivot=='dantzig': # pick the maximum violation candidate = {} for e in self.edge_attr: if e in t.edge_attr: continue flow_ij = self.edge_attr[e]['flow'] potential_i = self.get_node(e[0]).get_attr('potential') potential_j = self.get_node(e[1]).get_attr('potential') capacity_ij = self.edge_attr[e]['capacity'] c_ij = self.edge_attr[e]['cost'] cpi_ij = c_ij - potential_i + potential_j if flow_ij==0: if cpi_ij < 0: candidate[e] = cpi_ij elif flow_ij==capacity_ij: if cpi_ij > 0: candidate[e] = cpi_ij for e in candidate: max_c = e max_v = abs(candidate[e]) break for e in candidate: if max_v < abs(candidate[e]): max_c = e max_v = abs(candidate[e]) elif pivot=='first_eligible': # pick the first eligible for e in self.edge_attr: if e in t.edge_attr: continue flow_ij = self.edge_attr[e]['flow'] potential_i = self.get_node(e[0]).get_attr('potential') potential_j = self.get_node(e[1]).get_attr('potential') capacity_ij = self.edge_attr[e]['capacity'] c_ij = self.edge_attr[e]['cost'] cpi_ij = c_ij - potential_i + potential_j if flow_ij==0: if cpi_ij < 0: max_c = e max_v = abs(cpi_ij) elif flow_ij==capacity_ij: if cpi_ij > 0: max_c = e max_v = cpi_ij else: raise Exception("Unknown pivot rule.") return max_c
0.003241
def create(domain_name, years, **kwargs): ''' Try to register the specified domain name domain_name The domain name to be registered years Number of years to register Returns the following information: - Whether or not the domain was renewed successfully - Whether or not WhoisGuard is enabled - Whether or not registration is instant - The amount charged for registration - The domain ID - The order ID - The transaction ID CLI Example: .. code-block:: bash salt 'my-minion' namecheap_domains.create my-domain-name 2 ''' idn_codes = ('afr', 'alb', 'ara', 'arg', 'arm', 'asm', 'ast', 'ave', 'awa', 'aze', 'bak', 'bal', 'ban', 'baq', 'bas', 'bel', 'ben', 'bho', 'bos', 'bul', 'bur', 'car', 'cat', 'che', 'chi', 'chv', 'cop', 'cos', 'cze', 'dan', 'div', 'doi', 'dut', 'eng', 'est', 'fao', 'fij', 'fin', 'fre', 'fry', 'geo', 'ger', 'gla', 'gle', 'gon', 'gre', 'guj', 'heb', 'hin', 'hun', 'inc', 'ind', 'inh', 'isl', 'ita', 'jav', 'jpn', 'kas', 'kaz', 'khm', 'kir', 'kor', 'kur', 'lao', 'lav', 'lit', 'ltz', 'mal', 'mkd', 'mlt', 'mol', 'mon', 'mri', 'msa', 'nep', 'nor', 'ori', 'oss', 'pan', 'per', 'pol', 'por', 'pus', 'raj', 'rum', 'rus', 'san', 'scr', 'sin', 'slo', 'slv', 'smo', 'snd', 'som', 'spa', 'srd', 'srp', 'swa', 'swe', 'syr', 'tam', 'tel', 'tgk', 'tha', 'tib', 'tur', 'ukr', 'urd', 'uzb', 'vie', 'wel', 'yid') require_opts = ['AdminAddress1', 'AdminCity', 'AdminCountry', 'AdminEmailAddress', 'AdminFirstName', 'AdminLastName', 'AdminPhone', 'AdminPostalCode', 'AdminStateProvince', 'AuxBillingAddress1', 'AuxBillingCity', 'AuxBillingCountry', 'AuxBillingEmailAddress', 'AuxBillingFirstName', 'AuxBillingLastName', 'AuxBillingPhone', 'AuxBillingPostalCode', 'AuxBillingStateProvince', 'RegistrantAddress1', 'RegistrantCity', 'RegistrantCountry', 'RegistrantEmailAddress', 'RegistrantFirstName', 'RegistrantLastName', 'RegistrantPhone', 'RegistrantPostalCode', 'RegistrantStateProvince', 'TechAddress1', 'TechCity', 'TechCountry', 'TechEmailAddress', 'TechFirstName', 'TechLastName', 'TechPhone', 'TechPostalCode', 'TechStateProvince', 'Years'] opts = salt.utils.namecheap.get_opts('namecheap.domains.create') opts['DomainName'] = domain_name opts['Years'] = six.text_type(years) def add_to_opts(opts_dict, kwargs, value, suffix, prefices): for prefix in prefices: nextkey = prefix + suffix if nextkey not in kwargs: opts_dict[nextkey] = value for key, value in six.iteritems(kwargs): if key.startswith('Registrant'): add_to_opts(opts, kwargs, value, key[10:], ['Tech', 'Admin', 'AuxBilling', 'Billing']) if key.startswith('Tech'): add_to_opts(opts, kwargs, value, key[4:], ['Registrant', 'Admin', 'AuxBilling', 'Billing']) if key.startswith('Admin'): add_to_opts(opts, kwargs, value, key[5:], ['Registrant', 'Tech', 'AuxBilling', 'Billing']) if key.startswith('AuxBilling'): add_to_opts(opts, kwargs, value, key[10:], ['Registrant', 'Tech', 'Admin', 'Billing']) if key.startswith('Billing'): add_to_opts(opts, kwargs, value, key[7:], ['Registrant', 'Tech', 'Admin', 'AuxBilling']) if key == 'IdnCode' and key not in idn_codes: log.error('Invalid IdnCode') raise Exception('Invalid IdnCode') opts[key] = value for requiredkey in require_opts: if requiredkey not in opts: log.error('Missing required parameter \'%s\'', requiredkey) raise Exception('Missing required parameter \'{0}\''.format(requiredkey)) response_xml = salt.utils.namecheap.post_request(opts) if response_xml is None: return {} domainresult = response_xml.getElementsByTagName("DomainCreateResult")[0] return salt.utils.namecheap.atts_to_dict(domainresult)
0.005516
def run_hooks(self, name, event=None, context=None): """ Runs plugin hooks for each registered plugin. """ hooks = { "pre:setup": lambda p: p.pre_setup(self), "post:setup": lambda p: p.post_setup(self), "pre:invoke": lambda p: p.pre_invoke(event, context), "post:invoke": lambda p: p.post_invoke(event, context), "pre:report": lambda p: p.pre_report(self.report), "post:report": lambda p: p.post_report(self.report), } if name in hooks: for p in self.plugins: if p.enabled: try: hooks[name](p) except Exception as e: logger.error( "IOpipe plugin %s hook raised error" % (name, str(e)) ) logger.exception(e)
0.003264
def draw_imf_samples(**kwargs): ''' Draw samples for power-law model Parameters ---------- **kwargs: string Keyword arguments as model parameters and number of samples Returns ------- array The first mass array The second mass ''' alpha_salpeter = kwargs.get('alpha', -2.35) nsamples = kwargs.get('nsamples', 1) min_mass = kwargs.get('min_mass', 5.) max_mass = kwargs.get('max_mass', 95.) max_mtotal = min_mass + max_mass a = (max_mass/min_mass)**(alpha_salpeter + 1.0) - 1.0 beta = 1.0 / (alpha_salpeter + 1.0) k = nsamples * int(1.5 + log(1 + 100./nsamples)) aa = min_mass * (1.0 + a * np.random.random(k))**beta bb = np.random.uniform(min_mass, aa, k) idx = np.where(aa + bb < max_mtotal) m1, m2 = (np.maximum(aa, bb))[idx], (np.minimum(aa, bb))[idx] return np.resize(m1, nsamples), np.resize(m2, nsamples)
0.001036
def incr(self, name, amount=1): """ Increase the value at key ``name`` by ``amount``. If no key exists, the value will be initialized as ``amount`` . Like **Redis.INCR** :param string name: the key name :param int amount: increments :return: the integer value at key ``name`` :rtype: int >>> ssdb.incr('set_count', 3) 13 >>> ssdb.incr('set_count', 1) 14 >>> ssdb.incr('set_count', -2) 12 >>> ssdb.incr('temp_count', 42) 42 """ amount = get_integer('amount', amount) return self.execute_command('incr', name, amount)
0.007205
def ssh_invite(ctx, code_length, user, **kwargs): """ Add a public-key to a ~/.ssh/authorized_keys file """ for name, value in kwargs.items(): setattr(ctx.obj, name, value) from . import cmd_ssh ctx.obj.code_length = code_length ctx.obj.ssh_user = user return go(cmd_ssh.invite, ctx.obj)
0.003058
def add_path(self, w, h): """Reference to `a:custGeom` descendant or |None| if not present.""" custGeom = self.spPr.custGeom if custGeom is None: raise ValueError('shape must be freeform') pathLst = custGeom.get_or_add_pathLst() return pathLst.add_path(w=w, h=h)
0.006369
def _embed(x, order=3, delay=1): """Time-delay embedding. Parameters ---------- x : 1d-array, shape (n_times) Time series order : int Embedding dimension (order) delay : int Delay. Returns ------- embedded : ndarray, shape (n_times - (order - 1) * delay, order) Embedded time-series. """ N = len(x) Y = np.empty((order, N - (order - 1) * delay)) for i in range(order): Y[i] = x[i * delay:i * delay + Y.shape[1]] return Y.T
0.001923
def text_filter_changed(self, text): """ Called to handle changes to the text filter. :param text: The text for the filter. """ text = text.strip() if text else None if text is not None: self.__text_filter = ListModel.TextFilter("text_for_filter", text) else: self.__text_filter = None self.__update_filter()
0.004914
def __ds(self): """ Get the I{default} service if defined in the I{options}. @return: A L{PortSelector} for the I{default} service. @rtype: L{PortSelector}. """ ds = self.__client.options.service if ds is not None: return self.__find(ds)
0.006515
def update_if_absent(self, **kwargs): """Update the settings when the target fields are None. Args: kwargs: The keyword arguments to set corresponding fields. """ for arg in kwargs: if hasattr(self, arg): if getattr(self, arg) is None: setattr(self, arg, kwargs[arg]) else: raise ValueError("Invalid RayParams parameter in" " update_if_absent: %s" % arg) self._check_usage()
0.003724
def pitch_tuning(frequencies, resolution=0.01, bins_per_octave=12): '''Given a collection of pitches, estimate its tuning offset (in fractions of a bin) relative to A440=440.0Hz. Parameters ---------- frequencies : array-like, float A collection of frequencies detected in the signal. See `piptrack` resolution : float in `(0, 1)` Resolution of the tuning as a fraction of a bin. 0.01 corresponds to cents. bins_per_octave : int > 0 [scalar] How many frequency bins per octave Returns ------- tuning: float in `[-0.5, 0.5)` estimated tuning deviation (fractions of a bin) See Also -------- estimate_tuning Estimating tuning from time-series or spectrogram input Examples -------- >>> # Generate notes at +25 cents >>> freqs = librosa.cqt_frequencies(24, 55, tuning=0.25) >>> librosa.pitch_tuning(freqs) 0.25 >>> # Track frequencies from a real spectrogram >>> y, sr = librosa.load(librosa.util.example_audio_file()) >>> pitches, magnitudes, stft = librosa.ifptrack(y, sr) >>> # Select out pitches with high energy >>> pitches = pitches[magnitudes > np.median(magnitudes)] >>> librosa.pitch_tuning(pitches) 0.089999999999999969 ''' frequencies = np.atleast_1d(frequencies) # Trim out any DC components frequencies = frequencies[frequencies > 0] if not np.any(frequencies): warnings.warn('Trying to estimate tuning from empty frequency set.') return 0.0 # Compute the residual relative to the number of bins residual = np.mod(bins_per_octave * time_frequency.hz_to_octs(frequencies), 1.0) # Are we on the wrong side of the semitone? # A residual of 0.95 is more likely to be a deviation of -0.05 # from the next tone up. residual[residual >= 0.5] -= 1.0 bins = np.linspace(-0.5, 0.5, int(np.ceil(1. / resolution)) + 1) counts, tuning = np.histogram(residual, bins) # return the histogram peak return tuning[np.argmax(counts)]
0.000477
def open(self): """Open the subtitle file into an Aeidon project.""" try: self.project.open_main(self.filename) except UnicodeDecodeError: with open(self.filename, 'rb') as openfile: encoding = get_encoding(openfile.read()) try: self.project.open_main(self.filename, encoding) except UnicodeDecodeError: LOGGER.error("'%s' encountered a fatal encoding error", self.filename) sys.exit(1) except: # pylint: disable=W0702 open_error(self.filename) except: # pylint: disable=W0702 open_error(self.filename)
0.005563
def _process_group(input_group, required_group, groupname, append_subgroups=None): """ Process one group from the input yaml. Ensure it has the required entries. If there is a subgroup that should be processed and then appended to the rest of the subgroups in that group, handle it accordingly. :param dict input_group: The dict of values of the input group :param dict required_group: The dict of required values for the input group :param str groupname: The name of the group being processed :param list append_subgroups: list of subgroups to append to each, other subgroup in this group :return: processed dict of entries for the group :rtype: dict """ if append_subgroups is None: append_subgroups = [] tool_options = {} for key in input_group: _ensure_set_contains(input_group[key], required_group.get(key, {}), groupname + '::' + key) if key in append_subgroups: continue else: tool_options[key] = input_group[key] for key in input_group: if key in append_subgroups: continue else: for yek in append_subgroups: tool_options[key].update(input_group[yek]) return tool_options
0.004769
def service_info(self, name): """Pull descriptive info of a service by name. Information returned includes the service's user friendly name and whether it was preregistered or added dynamically. Returns: dict: A dictionary of service information with the following keys set: long_name (string): The user friendly name of the service preregistered (bool): Whether the service was explicitly called out as a preregistered service. """ return self._loop.run_coroutine(self._client.service_info(name))
0.003185
def DeriveDataRegex(fieldName, db, deriveInput, overwrite, fieldVal, histObj={}, blankIfNoMatch=False): """ Return a new field value based on match (of another field) against regex queried from MongoDB :param string fieldName: Field name to query against :param MongoClient db: MongoClient instance connected to MongoDB :param dict deriveInput: Values to perform lookup against: {"lookupField1": "lookupVal1"} :param bool overwrite: Should an existing field value be replaced :param string fieldVal: Current field value :param dict histObj: History object to which changes should be appended :param bool blankIfNoMatch: Should field value be set to blank if no match is found """ if len(deriveInput) > 1: raise Exception("more than one value in deriveInput") field_val_new = fieldVal check_match = False # derive_using = deriveInput row = list(deriveInput.keys())[0] pattern = '' if deriveInput[row] != '' and (overwrite or (fieldVal == '')): lookup_dict = { 'deriveFieldName': row, 'fieldName': fieldName } coll = db['deriveRegex'] re_val = coll.find(lookup_dict, ['pattern', 'replace']) for l_val in re_val: try: match = re.match(l_val['pattern'], _DataClean_(deriveInput[row]), flags=re.IGNORECASE) if match: field_val_new = re.sub(l_val['pattern'], l_val['replace'], _DataClean_(deriveInput[row]), flags=re.IGNORECASE) pattern = l_val['pattern'] check_match = True break except KeyError as key_error_obj: warnings.warn('schema error', key_error_obj) if re_val: re_val.close() if field_val_new == fieldVal and blankIfNoMatch: field_val_new = '' pattern = 'no matching pattern' # derive_using = {"blankIfNoMatch": "no match found"} change = _CollectHistory_(lookupType='deriveRegex', fromVal=fieldVal, toVal=field_val_new, using=deriveInput, pattern=pattern) hist_obj_upd = _CollectHistoryAgg_(contactHist=histObj, fieldHistObj=change, fieldName=fieldName) return field_val_new, hist_obj_upd, check_match
0.001157
def get_reference_fields(self, exclude_models=None): """ Get all Django model fields which reference the Item model. """ if exclude_models is None: exclude_models = [] result = [] for django_model in django.apps.apps.get_models(): if any([issubclass(django_model, m) for m in exclude_models]): continue for django_field in django_model._meta.fields: if isinstance(django_field, models.ForeignKey) and django_field.related.to == Item: result = [(m, f) for (m, f) in result if not issubclass(django_model, m)] result.append((django_model, django_field)) return result
0.005487
def upload(name): '''Handle upload on POST if authorized.''' storage = fs.by_name(name) return jsonify(success=True, **handle_upload(storage))
0.006494
def move(self, path_list, dest, **kwargs): """ 移动文件或文件夹 :param path_list: 在百度盘上要移动的源文件path :type path_list: list :param dest: 要移动到的目录 :type dest: str """ def __path(path): if path.endswith('/'): return path.split('/')[-2] else: return os.path.basename(path) params = { 'opera': 'move' } data = { 'filelist': json.dumps([{ "path": path, "dest": dest, "newname": __path(path)} for path in path_list]), } url = 'http://{0}/api/filemanager'.format(BAIDUPAN_SERVER) return self._request('filemanager', 'move', url=url, data=data, extra_params=params, **kwargs)
0.004587
def json_2_nic(json_obj): """ transform JSON obj coming from Ariane to ariane_clip3 object :param json_obj: the JSON obj coming from Ariane :return: ariane_clip3 NIC object """ LOGGER.debug("NIC.json_2_nic") return NIC(nic_id=json_obj['nicID'], mac_address=json_obj['nicMacAddress'], name=json_obj['nicName'], speed=json_obj['nicSpeed'], duplex=json_obj['nicDuplex'], mtu=json_obj['nicMtu'], nic_osi_id=json_obj['nicOSInstanceID'], nic_ipa_id=json_obj['nicIPAddressID'])
0.003035
def obtain_token(self): """ Try to obtain token from all end-points that were ever used to serve the token. If the request returns 404 NOT FOUND, retry with older version of the URL. """ token_end_points = ('token/obtain', 'obtain-token', 'obtain_token') for end_point in token_end_points: try: return self.auth[end_point]._(page_size=None)['token'] except BeanBagException as e: if e.response.status_code != 404: raise raise Exception('Could not obtain token from any known URL.')
0.005908
def rate_limits(self): """Returns a list of rate limit details.""" if not self._rate_limits: self._rate_limits = utilities.get_rate_limits(self.response) return self._rate_limits
0.009302
def plateid6(self, filetype, **kwargs): """Print plate ID, accounting for 5-6 digit plate IDs. Parameters ---------- filetype : str File type parameter. plateid : int or str Plate ID number. Will be converted to int internally. Returns ------- plateid6 : str Plate ID formatted to a string of 6 characters. """ plateid = int(kwargs['plateid']) if plateid < 10000: return "{:0>6d}".format(plateid) else: return "{:d}".format(plateid)
0.003384
def jp_compose(s, base=None): """ append/encode a string to json-pointer """ if s == None: return base ss = [s] if isinstance(s, six.string_types) else s ss = [s.replace('~', '~0').replace('/', '~1') for s in ss] if base: ss.insert(0, base) return '/'.join(ss)
0.006557
def is_lop(ch,block_op_pairs_dict=get_block_op_pairs('{}[]()')): ''' # is_lop('{',block_op_pairs_dict) # is_lop('[',block_op_pairs_dict) # is_lop('}',block_op_pairs_dict) # is_lop(']',block_op_pairs_dict) # is_lop('a',block_op_pairs_dict) ''' for i in range(1,block_op_pairs_dict.__len__()+1): if(ch == block_op_pairs_dict[i][0]): return(True) else: pass return(False)
0.006757
def set_modulations(self, dimension, phonon_modes, delta_q=None, derivative_order=None, nac_q_direction=None): """Generate atomic displacements of phonon modes. The design of this feature is not very satisfactory, and thus API. Therefore it should be reconsidered someday in the fugure. Parameters ---------- dimension : array_like Supercell dimension with respect to the primitive cell. dtype='intc', shape=(3, ), (3, 3), (9, ) phonon_modes : list of phonon mode settings Each element of the outer list gives one phonon mode information: [q-point, band index (int), amplitude (float), phase (float)] In each list of the phonon mode information, the first element is a list that represents q-point in reduced coordinates. The second, third, and fourth elements show the band index starting with 0, amplitude, and phase factor, respectively. """ if self._dynamical_matrix is None: msg = ("Dynamical matrix has not yet built.") raise RuntimeError(msg) self._modulation = Modulation(self._dynamical_matrix, dimension, phonon_modes, delta_q=delta_q, derivative_order=derivative_order, nac_q_direction=nac_q_direction, factor=self._factor) self._modulation.run()
0.004082
def version_msg(): """Return the Cookiecutter version, location and Python powering it.""" python_version = sys.version[:3] location = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) message = u'Cookiecutter %(version)s from {} (Python {})' return message.format(location, python_version)
0.003125
def to_unit_cell(self, in_place=False): """ Move frac coords to within the unit cell cell. """ frac_coords = np.mod(self.frac_coords, 1) if in_place: self.frac_coords = frac_coords else: return PeriodicSite(self.species, frac_coords, self.lattice, properties=self.properties)
0.005277
def add_resource_factory(self, factory_callback: factory_callback_type, types: Union[type, Sequence[Type]], name: str = 'default', context_attr: str = None) -> None: """ Add a resource factory to this context. This will cause a ``resource_added`` event to be dispatched. A resource factory is a callable that generates a "contextual" resource when it is requested by either using any of the methods :meth:`get_resource`, :meth:`require_resource` or :meth:`request_resource` or its context attribute is accessed. When a new resource is created in this manner, it is always bound to the context through it was requested, regardless of where in the chain the factory itself was added to. :param factory_callback: a (non-coroutine) callable that takes a context instance as argument and returns the created resource object :param types: one or more types to register the generated resource as on the target context :param name: name of the resource that will be created in the target context :param context_attr: name of the context attribute the created resource will be accessible as :raises asphalt.core.context.ResourceConflict: if there is an existing resource factory for the given type/name combinations or the given context variable """ assert check_argument_types() self._check_closed() if not resource_name_re.fullmatch(name): raise ValueError('"name" must be a nonempty string consisting only of alphanumeric ' 'characters and underscores') if iscoroutinefunction(factory_callback): raise TypeError('"factory_callback" must not be a coroutine function') if not types: raise ValueError('"types" must not be empty') if isinstance(types, type): resource_types = (types,) # type: Tuple[type, ...] else: resource_types = tuple(types) # Check for a conflicting context attribute if context_attr in self._resource_factories_by_context_attr: raise ResourceConflict( 'this context already contains a resource factory for the context attribute {!r}'. format(context_attr)) # Check for conflicts with existing resource factories for type_ in resource_types: if (type_, name) in self._resource_factories: raise ResourceConflict('this context already contains a resource factory for the ' 'type {}'.format(qualified_name(type_))) # Add the resource factory to the appropriate lookup tables resource = ResourceContainer(factory_callback, resource_types, name, context_attr, True) for type_ in resource_types: self._resource_factories[(type_, name)] = resource if context_attr: self._resource_factories_by_context_attr[context_attr] = resource # Notify listeners that a new resource has been made available self.resource_added.dispatch(resource_types, name, True)
0.005877
def _load(self): """ Execute the logic behind the merging. """ if "PYFUNCEBLE_AUTO_CONFIGURATION" not in PyFunceble.environ: # The auto configuration environment variable is not set. while True: # We infinitly loop until we get a reponse which is `y|Y` or `n|N`. # We ask the user if we should install and load the default configuration. response = input( PyFunceble.Style.BRIGHT + PyFunceble.Fore.RED + "A configuration key is missing.\n" + PyFunceble.Fore.RESET + "Try to merge upstream configuration file into %s ? [y/n] " % ( PyFunceble.Style.BRIGHT + self.path_to_config + PyFunceble.Style.RESET_ALL ) ) if isinstance(response, str): # The response is a string if response.lower() == "y": # The response is a `y` or `Y`. # We merge the old values inside the new one. self._merge_values() # And we save. self._save() print( PyFunceble.Style.BRIGHT + PyFunceble.Fore.GREEN + "Done!\n" "Please try again, if it happens again," " please fill a new issue." ) # And we break the loop as we got a satisfied response. break elif response.lower() == "n": # The response is a `n` or `N`. # We inform the user that something went wrong. raise Exception("Configuration key still missing.") else: # The auto configuration environment variable is set. # We merge the old values inside the new one. self._merge_values() # And we save. self._save()
0.00273
def _download_rtd_zip(rtd_version=None, **kwargs): """ Download and extract HTML ZIP from RTD to installed doc data path. Download is skipped if content already exists. Parameters ---------- rtd_version : str or `None` RTD version to download; e.g., "latest", "stable", or "v2.6.0". If not given, download closest match to software version. kwargs : dict Keywords for ``urlretrieve()``. Returns ------- index_html : str Path to local "index.html". """ # https://github.com/ejeschke/ginga/pull/451#issuecomment-298403134 if not toolkit.family.startswith('qt'): raise ValueError('Downloaded documentation not compatible with {} ' 'UI toolkit browser'.format(toolkit.family)) if rtd_version is None: rtd_version = _find_rtd_version() data_path = os.path.dirname( _find_pkg_data_path('help.html', package='ginga.doc')) index_html = os.path.join(data_path, 'index.html') # There is a previous download of documentation; Do nothing. # There is no check if downloaded version is outdated; The idea is that # this folder would be empty again when installing new version. if os.path.isfile(index_html): return index_html url = ('https://readthedocs.org/projects/ginga/downloads/htmlzip/' '{}/'.format(rtd_version)) local_path = urllib.request.urlretrieve(url, **kwargs)[0] with zipfile.ZipFile(local_path, 'r') as zf: zf.extractall(data_path) # RTD makes an undesirable sub-directory, so move everything there # up one level and delete it. subdir = os.path.join(data_path, 'ginga-{}'.format(rtd_version)) for s in os.listdir(subdir): src = os.path.join(subdir, s) if os.path.isfile(src): shutil.copy(src, data_path) else: # directory shutil.copytree(src, os.path.join(data_path, s)) shutil.rmtree(subdir) if not os.path.isfile(index_html): raise OSError( '{} is missing; Ginga doc download failed'.format(index_html)) return index_html
0.000468
def switch(self, time=None): """Obtain switch parameter, ie number of times the stage shifts.""" stag_to_int = {'NREM1': 1, 'NREM2': 2, 'NREM3': 3, 'REM': 5, 'Wake': 0} hypno = [stag_to_int[x['stage']] for x in self.get_epochs(time=time) \ if x['stage'] in stag_to_int.keys()] return sum(asarray(diff(hypno), dtype=bool))
0.010554
def load_file(self, filename): """Load file into treeview""" self.counter.clear() # python2 issues try: etree = ET.parse(filename) except ET.ParseError: parser = ET.XMLParser(encoding='UTF-8') etree = ET.parse(filename, parser) eroot = etree.getroot() self.remove_all() self.previewer.remove_all() self.widget_editor.hide_all() self.previewer.resource_paths.append(os.path.dirname(filename)) for element in eroot: self.populate_tree('', eroot, element,from_file=True) children = self.treeview.get_children('') for child in children: self.draw_widget(child) self.previewer.show_selected(None, None)
0.003886
def cmd_slow_requests(self): """List all requests that took a certain amount of time to be processed. .. warning:: By now hardcoded to 1 second (1000 milliseconds), improve the command line interface to allow to send parameters to each command or globally. """ slow_requests = [ line.time_wait_response for line in self._valid_lines if line.time_wait_response > 1000 ] return slow_requests
0.003883
def tf_initialize(self, x_init, b): """ Initialization step preparing the arguments for the first iteration of the loop body: $x_0, 0, p_0, r_0, r_0^2$. Args: x_init: Initial solution guess $x_0$, zero vector if None. b: The right-hand side $b$ of the system of linear equations. Returns: Initial arguments for tf_step. """ if x_init is None: # Initial guess is zero vector if not given. x_init = [tf.zeros(shape=util.shape(t)) for t in b] initial_args = super(ConjugateGradient, self).tf_initialize(x_init) # r_0 := b - A * x_0 # c_0 := r_0 conjugate = residual = [t - fx for t, fx in zip(b, self.fn_x(x_init))] # r_0^2 := r^T * r squared_residual = tf.add_n(inputs=[tf.reduce_sum(input_tensor=(res * res)) for res in residual]) return initial_args + (conjugate, residual, squared_residual)
0.005149
def fmt_sia(sia, ces=True): """Format a |SystemIrreducibilityAnalysis|.""" if ces: body = ( '{ces}' '{partitioned_ces}'.format( ces=fmt_ces( sia.ces, 'Cause-effect structure'), partitioned_ces=fmt_ces( sia.partitioned_ces, 'Partitioned cause-effect structure'))) center_header = True else: body = '' center_header = False title = 'System irreducibility analysis: {BIG_PHI} = {phi}'.format( BIG_PHI=BIG_PHI, phi=fmt_number(sia.phi)) body = header(str(sia.subsystem), body, center=center_header) body = header(str(sia.cut), body, center=center_header) return box(header(title, body, center=center_header))
0.001236
def from_file(cls, address_book, filename, supported_private_objects, localize_dates): """ Use this if you want to create a new contact from an existing .vcf file. """ return cls(address_book, filename, supported_private_objects, None, localize_dates)
0.012539
def lock_input_target_config_target_running_running(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") lock = ET.Element("lock") config = lock input = ET.SubElement(lock, "input") target = ET.SubElement(input, "target") config_target = ET.SubElement(target, "config-target") running = ET.SubElement(config_target, "running") running = ET.SubElement(running, "running") callback = kwargs.pop('callback', self._callback) return callback(config)
0.003559
def genderize(name, api_token=None): """Fetch gender from genderize.io""" GENDERIZE_API_URL = "https://api.genderize.io/" TOTAL_RETRIES = 10 MAX_RETRIES = 5 SLEEP_TIME = 0.25 STATUS_FORCELIST = [502] params = { 'name': name } if api_token: params['apikey'] = api_token session = requests.Session() retries = urllib3.util.Retry(total=TOTAL_RETRIES, connect=MAX_RETRIES, status=MAX_RETRIES, status_forcelist=STATUS_FORCELIST, backoff_factor=SLEEP_TIME, raise_on_status=True) session.mount('http://', requests.adapters.HTTPAdapter(max_retries=retries)) session.mount('https://', requests.adapters.HTTPAdapter(max_retries=retries)) r = session.get(GENDERIZE_API_URL, params=params) r.raise_for_status() result = r.json() gender = result['gender'] prob = result.get('probability', None) acc = int(prob * 100) if prob else None return gender, acc
0.0027
def update_certificate(self, certificate_id, **kwargs): """Update a certificate. :param str certificate_id: The certificate id (Required) :param str certificate_data: X509.v3 trusted certificate in PEM format. :param str signature: This parameter has been DEPRECATED in the API and does not need to be provided. :param str type: type of the certificate. Values: lwm2m or bootstrap. :param str status: Status of the certificate. Allowed values: "ACTIVE" | "INACTIVE". :param str description: Human readable description of this certificate, not longer than 500 characters. :returns: Certificate object :rtype: Certificate """ api = self._get_api(iam.DeveloperApi) cert = Certificate._create_request_map(kwargs) body = iam.TrustedCertificateReq(**cert) certificate = Certificate(api.update_certificate(certificate_id, body)) return self.get_certificate(certificate.id)
0.002935
def list_assigned_licenses(entity, entity_display_name, license_keys=None, service_instance=None): ''' Lists the licenses assigned to an entity entity Dictionary representation of an entity. See ``_get_entity`` docstrings for format. entity_display_name Entity name used in logging license_keys: List of license keys to be retrieved. Default is None. service_instance Service instance (vim.ServiceInstance) of the vCenter/ESXi host. Default is None. .. code-block:: bash salt '*' vsphere.list_assigned_licenses entity={type:cluster,datacenter:dc,cluster:cl} entiy_display_name=cl ''' log.trace('Listing assigned licenses of entity %s', entity) _validate_entity(entity) assigned_licenses = salt.utils.vmware.get_assigned_licenses( service_instance, entity_ref=_get_entity(service_instance, entity), entity_name=entity_display_name) return [{'key': l.licenseKey, 'name': l.name, 'description': l.labels[0].value if l.labels else None, # VMware handles unlimited capacity as 0 'capacity': l.total if l.total > 0 else sys.maxsize} for l in assigned_licenses if (license_keys is None) or (l.licenseKey in license_keys)]
0.002172
def raw_read(self): """https://github.com/frictionlessdata/datapackage-py#resource """ contents = b'' with self.raw_iter() as filelike: for chunk in filelike: contents += chunk return contents
0.007692
def start_task_type(self, task_type_str, total_task_count): """Call when about to start processing a new type of task, typically just before entering a loop that processes many task of the given type. Args: task_type_str (str): The name of the task, used as a dict key and printed in the progress updates. total_task_count (int): The total number of the new type of task that will be processed. This starts the timer that is used for providing an ETA for completing all tasks of the given type. The task type is included in progress updates until end_task_type() is called. """ assert ( task_type_str not in self._task_dict ), "Task type has already been started" self._task_dict[task_type_str] = { "start_time": time.time(), "total_task_count": total_task_count, "task_idx": 0, }
0.007049
def convert_ints_to_bytes(in_ints, num): """Convert an integer array into a byte arrays. The number of bytes forming an integer is defined by num :param in_ints: the input integers :param num: the number of bytes per int :return the integer array""" out_bytes= b"" for val in in_ints: out_bytes+=struct.pack(mmtf.utils.constants.NUM_DICT[num], val) return out_bytes
0.009852
def scaffold(): """Start a new site.""" click.echo("A whole new site? Awesome.") title = click.prompt("What's the title?") url = click.prompt("Great. What's url? http://") # Make sure that title doesn't exist. click.echo("Got it. Creating %s..." % url)
0.00361
def plot (data, headers=None, pconfig=None): """ Return HTML for a MultiQC table. :param data: 2D dict, first keys as sample names, then x:y data pairs :param headers: list of optional dicts with column config in key:value pairs. :return: HTML ready to be inserted into the page """ if headers is None: headers = [] if pconfig is None: pconfig = {} # Allow user to overwrite any given config for this plot if 'id' in pconfig and pconfig['id'] and pconfig['id'] in config.custom_plot_config: for k, v in config.custom_plot_config[pconfig['id']].items(): pconfig[k] = v # Make a datatable object dt = table_object.datatable(data, headers, pconfig) # Collect unique sample names s_names = set() for d in dt.data: for s_name in d.keys(): s_names.add(s_name) # Make a beeswarm plot if we have lots of samples if len(s_names) >= config.max_table_rows and pconfig.get('no_beeswarm') is not True: logger.debug('Plotting beeswarm instead of table, {} samples'.format(len(s_names))) warning = '<p class="text-muted"><span class="glyphicon glyphicon-exclamation-sign" ' \ 'title="A beeswarm plot has been generated instead because of the large number of samples. '\ 'See http://multiqc.info/docs/#tables--beeswarm-plots"'\ ' data-toggle="tooltip"></span> Showing {} samples.</p>'.format(len(s_names)) return warning + beeswarm.make_plot( dt ) else: return make_table ( dt )
0.008986
def recordtype_row_strategy(column_names): """ Recordtype row strategy, rows returned as recordtypes Column names that are not valid Python identifiers will be replaced with col<number>_ """ try: from namedlist import namedlist as recordtype # optional dependency except ImportError: from recordtype import recordtype # optional dependency # replace empty column names with placeholders column_names = [name if is_valid_identifier(name) else 'col%s_' % idx for idx, name in enumerate(column_names)] recordtype_row_class = recordtype('Row', column_names) # custom extension class that supports indexing class Row(recordtype_row_class): def __getitem__(self, index): if isinstance(index, slice): return tuple(getattr(self, x) for x in self.__slots__[index]) return getattr(self, self.__slots__[index]) def __setitem__(self, index, value): setattr(self, self.__slots__[index], value) def row_factory(row): return Row(*row) return row_factory
0.001837
def process_save(X, y, tokenizer, proc_data_path, max_len=400, train=False, ngrams=None, limit_top_tokens=None): """Process text and save as Dataset """ if train and limit_top_tokens is not None: tokenizer.apply_encoding_options(limit_top_tokens=limit_top_tokens) X_encoded = tokenizer.encode_texts(X) if ngrams is not None: X_encoded = tokenizer.add_ngrams(X_encoded, n=ngrams, train=train) X_padded = tokenizer.pad_sequences( X_encoded, fixed_token_seq_length=max_len) if train: ds = Dataset(X_padded, y, tokenizer=tokenizer) else: ds = Dataset(X_padded, y) ds.save(proc_data_path)
0.00292
def session(self): """A context manager for this client's session. This function closes the current session when this client goes out of scope. """ self._session = requests.session() yield self._session.close() self._session = None
0.006757
def get_addresses_from_input_file(input_file_name): """Read addresses from input file into list of tuples. This only supports address and zipcode headers """ mode = 'r' if sys.version_info[0] < 3: mode = 'rb' with io.open(input_file_name, mode) as input_file: reader = csv.reader(input_file, delimiter=',', quotechar='"') addresses = list(map(tuple, reader)) if len(addresses) == 0: raise Exception('No addresses found in input file') header_columns = list(column.lower() for column in addresses.pop(0)) try: address_index = header_columns.index('address') zipcode_index = header_columns.index('zipcode') except ValueError: raise Exception("""The first row of the input CSV must be a header that contains \ a column labeled 'address' and a column labeled 'zipcode'.""") return list((row[address_index], row[zipcode_index]) for row in addresses)
0.003036
def permute(num): "Permutation for randomizing data order." if permute_data: return np.random.permutation(num) else: logging.warning("Warning not permuting data") return np.arange(num)
0.004545
def daemonize(umask=0, work_dir="/", max_fd=1024, redirect="/dev/null"): """ When this function is called, the process is daemonized (by forking + killing its parent). It becomes a background task. It is useful to release the console. """ if not redirect: redirect = "/dev/null" if hasattr(os, "devnull"): redirect = os.devnull try: pid = os.fork() except OSError as e: raise Exception("%s [%d]" % (e.strerror, e.errno)) # first child if pid == 0: os.setsid() try: # Fork a second child. pid = os.fork() except OSError as e: raise Exception("%s [%d]" % (e.strerror, e.errno)) # The second child. if pid == 0: os.chdir(work_dir) os.umask(umask) else: # exit first child os._exit(0) else: # Exit parent os._exit(0) #killing inherited file descriptors import resource maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1] if maxfd == resource.RLIM_INFINITY: maxfd = max_fd # close all file descriptors. for fd in range(0, maxfd): try: os.close(fd) except OSError: # ignored pass os.open(redirect, os.O_RDWR) # standard input # Duplicate standard os.dup2(0, 1) # standard output (1) os.dup2(0, 2) # standard error (2) return os.getpid()
0.002706
def class_error(self, input_data, targets, average=True, cache=None, prediction=False): """ Return the classification error rate """ if cache is not None: activations = cache else: activations = \ self.feed_forward(input_data, prediction=prediction) targets = targets.get().argmax(1) class_error = np.sum(activations.get().argmax(1) != targets) if average: class_error = float(class_error) / targets.shape[0] return class_error
0.007246
def cumsum(self, axis=0, *args, **kwargs): """ Cumulative sum of non-NA/null values. When performing the cumulative summation, any non-NA/null values will be skipped. The resulting SparseArray will preserve the locations of NaN values, but the fill value will be `np.nan` regardless. Parameters ---------- axis : int or None Axis over which to perform the cumulative summation. If None, perform cumulative summation over flattened array. Returns ------- cumsum : SparseArray """ nv.validate_cumsum(args, kwargs) if axis is not None and axis >= self.ndim: # Mimic ndarray behaviour. raise ValueError("axis(={axis}) out of bounds".format(axis=axis)) if not self._null_fill_value: return SparseArray(self.to_dense()).cumsum() return SparseArray(self.sp_values.cumsum(), sparse_index=self.sp_index, fill_value=self.fill_value)
0.001938
def _parse_directory(self): """ Parse the storage directory in the config. Returns: str """ if self._parser.has_option('storage', 'directory'): directory = self._parser.get('storage', 'directory') # Don't allow CUSTOM_APPS_DIR as a storage directory if directory == CUSTOM_APPS_DIR: raise ConfigError("{} cannot be used as a storage directory." .format(CUSTOM_APPS_DIR)) else: directory = MACKUP_BACKUP_PATH return str(directory)
0.00335
def process_and_show(self): """ Run :meth:`process` and :meth:`show_source` after each other. """ for name, klass in sorted(self.classes.items()): logger.debug('Processing class: %s', name) if not isinstance(klass, DvClass): klass = DvClass(klass, self.vma) klass.process() klass.show_source()
0.005141
def __SendMediaBody(self, start, additional_headers=None): """Send the entire media stream in a single request.""" self.EnsureInitialized() if self.total_size is None: raise exceptions.TransferInvalidError( 'Total size must be known for SendMediaBody') body_stream = stream_slice.StreamSlice( self.stream, self.total_size - start) request = http_wrapper.Request(url=self.url, http_method='PUT', body=body_stream) request.headers['Content-Type'] = self.mime_type if start == self.total_size: # End of an upload with 0 bytes left to send; just finalize. range_string = 'bytes */%s' % self.total_size else: range_string = 'bytes %s-%s/%s' % (start, self.total_size - 1, self.total_size) request.headers['Content-Range'] = range_string if additional_headers: request.headers.update(additional_headers) return self.__SendMediaRequest(request, self.total_size)
0.001787
def set_end(self,t): """ Override the GPS end time (and set the duration) of this ScienceSegment. @param t: new GPS end time. """ self.__dur -= self.__end - t self.__end = t
0.010152
def valueFromString(self, value, context=None): """ Converts the inputted string text to a value that matches the type from this column type. :param value | <str> """ if value == 'now': return datetime.datetime.now().time() elif dateutil_parser: return dateutil_parser.parse(value).time() else: time_struct = time.strptime(value, self.defaultFormat()) return datetime.time(time_struct.tm_hour, time_struct.tm_min, time_struct.tm_sec)
0.003241
def show_ntp_input_rbridge_id(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") show_ntp = ET.Element("show_ntp") config = show_ntp input = ET.SubElement(show_ntp, "input") rbridge_id = ET.SubElement(input, "rbridge-id") rbridge_id.text = kwargs.pop('rbridge_id') callback = kwargs.pop('callback', self._callback) return callback(config)
0.004525
def kernel(x1, x2, method='gaussian', sigma=1, **kwargs): """Compute kernel matrix""" if method.lower() in ['gaussian', 'gauss', 'rbf']: K = np.exp(-dist(x1, x2) / (2 * sigma**2)) return K
0.004808
def getIDsFromFiles(files): """given a path or list of files, return ABF IDs.""" if type(files) is str: files=glob.glob(files+"/*.*") IDs=[] for fname in files: if fname[-4:].lower()=='.abf': ext=fname.split('.')[-1] IDs.append(os.path.basename(fname).replace('.'+ext,'')) return sorted(IDs)
0.017094
def merge_ticket(self, ticket_id, into_id): """ Merge ticket into another (undocumented API feature). :param ticket_id: ID of ticket to be merged :param into: ID of destination ticket :returns: ``True`` Operation was successful ``False`` Either origin or destination ticket does not exist or user does not have ModifyTicket permission. """ msg = self.__request('ticket/{}/merge/{}'.format(str(ticket_id), str(into_id))) state = msg.split('\n')[2] return self.RE_PATTERNS['merge_successful_pattern'].match(state) is not None
0.004115
def Consultar(self, nro_doc): "Llama a la API pública de AFIP para obtener los datos de una persona" n = 0 while n <= 4: n += 1 # reintentar 3 veces try: if not self.client: if DEBUG: warnings.warn("reconectando intento [%d]..." % n) self.Conectar() self.response = self.client("sr-padron", "v2", "persona", str(nro_doc)) except Exception as e: self.client = None ex = exception_info() self.Traceback = ex.get("tb", "") try: self.Excepcion = norm(ex.get("msg", "").replace("\n", "")) except: self.Excepcion = "<no disponible>" if DEBUG: warnings.warn("Error %s [%d]" % (self.Excepcion, n)) else: break else: return False result = json.loads(self.response) if result['success']: data = result['data'] # extraigo datos generales del contribuyente: self.cuit = data["idPersona"] self.tipo_persona = data["tipoPersona"] self.tipo_doc = TIPO_CLAVE.get(data["tipoClave"]) self.dni = data.get("numeroDocumento") self.estado = data.get("estadoClave") self.denominacion = data.get("nombre") # analizo el domicilio domicilio = data.get("domicilioFiscal") if domicilio: self.direccion = domicilio.get("direccion", "") self.localidad = domicilio.get("localidad", "") # no usado en CABA self.provincia = PROVINCIAS.get(domicilio.get("idProvincia"), "") self.cod_postal = domicilio.get("codPostal") else: self.direccion = self.localidad = self.provincia = "" self.cod_postal = "" # retrocompatibilidad: self.domicilios = ["%s - %s (%s) - %s" % ( self.direccion, self.localidad, self.cod_postal, self.provincia,) ] # analizo impuestos: self.impuestos = data.get("impuestos", []) self.actividades = data.get("actividades", []) if 32 in self.impuestos: self.imp_iva = "EX" elif 33 in self.impuestos: self.imp_iva = "NI" elif 34 in self.impuestos: self.imp_iva = "NA" else: self.imp_iva = "S" if 30 in self.impuestos else "N" mt = data.get("categoriasMonotributo", {}) self.monotributo = "S" if mt else "N" self.actividad_monotributo = "" # TODO: mt[0].get("idCategoria") self.integrante_soc = "" self.empleador = "S" if 301 in self.impuestos else "N" self.cat_iva = "" self.data = data else: error = result['error'] self.Excepcion = error['mensaje'] return True
0.002853
def main(branch): """Checkout, update and branch from the specified branch.""" try: # Ensure that we're in a git repository. This command is silent unless # you're not actually in a git repository, in which case, you receive a # "Not a git repository" error message. output = subprocess.check_output(['git', 'rev-parse']).decode('utf-8') sys.stdout.write(output) except subprocess.CalledProcessError: # Bail if we're not in a git repository. return # This behavior ensures a better user experience for those that aren't # intimately familiar with git. ensure_remote_branch_is_tracked(branch) # Switch to the specified branch and update it. subprocess.check_call(['git', 'checkout', '--quiet', branch]) # Pulling is always safe here, because we never commit to this branch. subprocess.check_call(['git', 'pull', '--quiet']) # Checkout the top commit in the branch, effectively going "untracked." subprocess.check_call(['git', 'checkout', '--quiet', '%s~0' % branch]) # Clean up the repository of Python cruft. Because we've just switched # branches and compiled Python files should not be version controlled, # there are likely leftover compiled Python files sitting on disk which may # confuse some tools, such as sqlalchemy-migrate. subprocess.check_call(['find', '.', '-name', '"*.pyc"', '-delete']) # For the sake of user experience, give some familiar output. print('Your branch is up to date with branch \'origin/%s\'.' % branch)
0.000636
def isConjNorthNode(self): """ Returns if object is conjunct north node. """ node = self.chart.getObject(const.NORTH_NODE) return aspects.hasAspect(self.obj, node, aspList=[0])
0.01
def _make_request(self, url, parameters, result_key): """Make http/https request to Google API. Method prepares url parameters, drops None values, and gets default values. Finally makes request using protocol assigned to client and returns data. :param url: url part - specifies API endpoint :param parameters: dictionary of url parameters :param result_key: key in output where result is expected """ url = urlparse.urljoin(urlparse.urljoin(self.base, url), "json") # drop all None values and use defaults if not set parameters = {key: value for key, value in parameters.items() if value is not None} parameters.setdefault("sensor", self.sensor) parameters = self._serialize_parameters(parameters) if self.api_key: parameters["key"] = self.api_key raw_response = requests.get(url, params=parameters) response = raw_response.json() if response["status"] == status.OK and result_key is not None: return response[result_key] elif response["status"] == status.OK: del response["status"] return response else: response["url"] = raw_response.url raise errors.EXCEPTION_MAPPING.get( response["status"], errors.GmapException )(response)
0.001404
def outputMode(self, outputMode): """Specifies how data of a streaming DataFrame/Dataset is written to a streaming sink. Options include: * `append`:Only the new rows in the streaming DataFrame/Dataset will be written to the sink * `complete`:All the rows in the streaming DataFrame/Dataset will be written to the sink every time these is some updates * `update`:only the rows that were updated in the streaming DataFrame/Dataset will be written to the sink every time there are some updates. If the query doesn't contain aggregations, it will be equivalent to `append` mode. .. note:: Evolving. >>> writer = sdf.writeStream.outputMode('append') """ if not outputMode or type(outputMode) != str or len(outputMode.strip()) == 0: raise ValueError('The output mode must be a non-empty string. Got: %s' % outputMode) self._jwrite = self._jwrite.outputMode(outputMode) return self
0.008806
def read_messages(fobj, magic_table): """Read messages from a file-like object until stream is exhausted.""" messages = [] while True: magic = read_magic(fobj) if not magic: break func = magic_table.get(magic) if func is not None: messages.append(func(fobj)) else: log.error('Unknown magic: ' + str(' '.join('{0:02x}'.format(b) for b in bytearray(magic)))) return messages
0.003831
def seconds_remaining(self, ttl): """Return number of seconds left before Imgur API needs to be queried for this instance. :param int ttl: Number of seconds before this is considered out of date. :return: Seconds left before this is expired. 0 indicated update needed (no negatives). :rtype: int """ return max(0, ttl - (int(time.time()) - self.mod_time))
0.012346
def first(self): """Return the first record or raise an exception if the result doesn't contain any data :return: - Dictionary containing the first item in the response content :raise: - NoResults: If no results were found """ if not self._stream: raise InvalidUsage('first() is only available when stream=True') try: content = next(self.all()) except StopIteration: raise NoResults("No records found") return content
0.005495
def save(self): """ Saves a new rating - authenticated users can update the value if they've previously rated. """ user = self.request.user self.undoing = False rating_value = self.cleaned_data["value"] manager = self.rating_manager if user.is_authenticated(): rating_instance, created = manager.get_or_create(user=user, defaults={'value': rating_value}) if not created: if rating_instance.value == int(rating_value): # User submitted the same rating as previously, # which we treat as undoing the rating (like a toggle). rating_instance.delete() self.undoing = True else: rating_instance.value = rating_value rating_instance.save() else: rating_instance = manager.create(value=rating_value) return rating_instance
0.002979
def pack(self): '''pack a FD FDM buffer from current values''' for i in range(len(self.values)): if math.isnan(self.values[i]): self.values[i] = 0 return struct.pack(self.pack_string, *self.values)
0.008032
def install_virtualenv_p3(root, python_version): """ Install virtual environment for Python 3.3+; removing the old one if it exists """ import venv builder = venv.EnvBuilder(system_site_packages=False, clear=True, symlinks=False, upgrade=False) builder.create(root) ret_code = subprocess.call([VE_SCRIPT, PROJECT_ROOT, root, python_version]) sys.exit(ret_code)
0.007813
def list_consumer_group(self, project, logstore): """ List consumer group :type project: string :param project: project name :type logstore: string :param logstore: logstore name :return: ListConsumerGroupResponse """ resource = "/logstores/" + logstore + "/consumergroups" params = {} headers = {} (resp, header) = self._send("GET", project, None, resource, params, headers) return ListConsumerGroupResponse(resp, header)
0.005556
def setViewModel(self, model): """Sets the model for the enclosed TableView in this widget. Args: model (DataFrameModel): The model to be displayed by the Table View. """ if isinstance(model, DataFrameModel): self.enableEditing(False) self.uncheckButton() selectionModel = self.tableView.selectionModel() self.tableView.setModel(model) model.dtypeChanged.connect(self.updateDelegate) model.dataChanged.connect(self.updateDelegates) del selectionModel
0.004926
def get_document(self, id): """ Retrieves a particular document from this project. """ obj_list = self.document_list matches = [i for i in obj_list if str(i.id) == str(id)] if not matches: raise DoesNotExistError("The resource you've requested does not \ exist or is unavailable without the proper credentials.") return matches[0]
0.005025
def on_connect(self, connection): "Called when the socket connects" self._sock = connection._sock self._buffer = SocketBuffer(self._sock, self.socket_read_size) self.encoder = connection.encoder
0.00885
def update_video(self, access_token, video_id, title=None, tags=None, category=None, copyright_type=None, public_type=None, watch_password=None, description=None, thumbnail_seq=None): """doc: http://open.youku.com/docs/doc?id=50 """ url = 'https://openapi.youku.com/v2/videos/update.json' data = { 'client_id': self.client_id, 'access_token': access_token, 'video_id': video_id, 'title': title, 'tags': tags, 'category': category, 'copyright_type': copyright_type, 'public_type': public_type, 'watch_password': watch_password, 'description': description, 'thumbnail_seq': thumbnail_seq } data = remove_none_value(data) r = requests.post(url, data=data) check_error(r) return r.json()['id']
0.00523
def isin(self, values): """ Compute boolean array of whether each index value is found in the passed set of values. Parameters ---------- values : set or sequence of values Returns ------- is_contained : ndarray (boolean dtype) """ if not isinstance(values, type(self)): try: values = type(self)(values) except ValueError: return self.astype(object).isin(values) return algorithms.isin(self.asi8, values.asi8)
0.003546
def get_extended_summaryf(self, *args, **kwargs): """Extract the extended summary from a function docstring This function can be used as a decorator to extract the extended summary of a function docstring (similar to :meth:`get_sectionsf`). Parameters ---------- ``*args`` and ``**kwargs`` See the :meth:`get_extended_summary` method. Note, that the first argument will be the docstring of the specified function Returns ------- function Wrapper that takes a function as input and registers its summary via the :meth:`get_extended_summary` method""" def func(f): doc = f.__doc__ self.get_extended_summary(doc or '', *args, **kwargs) return f return func
0.002415
def of(self, *indented_blocks) -> "CodeBlock": """ By default, marks the block as expecting an indented "body" blocks of which are then supplied as arguments to this method. Unless the block specifies a "closed_by", if no body blocks are supplied or they are all Nones, this will generate a "pass" statement as the body. If there is a "closed_by" specified, then that will be used on the same indentation level as the opening of the block. After all the arguments have been handled, this block is marked as finalised and no more blocks can be appended to it. None blocks are skipped. Returns the block itself. """ if self.closed_by is None: self.expects_body_or_pass = True for block in indented_blocks: if block is not None: self._blocks.append((1, block)) # Finalise it so that we cannot add more sub-blocks to this block. self.finalise() return self
0.006809
def trigger_methods(instance, args): """" Triggers specific class methods using a simple reflection mechanism based on the given input dictionary params. Arguments: instance (object): target instance to dynamically trigger methods. args (iterable): input arguments to trigger objects to Returns: None """ # Start the magic for name in sorted(args): value = args[name] target = instance # If response attibutes if name.startswith('response_') or name.startswith('reply_'): name = name.replace('response_', '').replace('reply_', '') # If instance has response attribute, use it if hasattr(instance, '_response'): target = instance._response # Retrieve class member for inspection and future use member = getattr(target, name, None) # Is attribute isattr = name in dir(target) iscallable = ismethod(member) and not isfunction(member) if not iscallable and not isattr: raise PookInvalidArgument('Unsupported argument: {}'.format(name)) # Set attribute or trigger method if iscallable: member(value) else: setattr(target, name, value)
0.000779
def get_block_symbol_data(editor, block): """ Gets the list of ParenthesisInfo for specific text block. :param editor: Code editor instance :param block: block to parse """ def list_symbols(editor, block, character): """ Retuns a list of symbols found in the block text :param editor: code editor instance :param block: block to parse :param character: character to look for. """ text = block.text() symbols = [] cursor = QTextCursor(block) cursor.movePosition(cursor.StartOfBlock) pos = text.find(character, 0) cursor.movePosition(cursor.Right, cursor.MoveAnchor, pos) while pos != -1: if not TextHelper(editor).is_comment_or_string(cursor): # skips symbols in string literal or comment info = ParenthesisInfo(pos, character) symbols.append(info) pos = text.find(character, pos + 1) cursor.movePosition(cursor.StartOfBlock) cursor.movePosition(cursor.Right, cursor.MoveAnchor, pos) return symbols parentheses = sorted( list_symbols(editor, block, '(') + list_symbols(editor, block, ')'), key=lambda x: x.position) square_brackets = sorted( list_symbols(editor, block, '[') + list_symbols(editor, block, ']'), key=lambda x: x.position) braces = sorted( list_symbols(editor, block, '{') + list_symbols(editor, block, '}'), key=lambda x: x.position) return parentheses, square_brackets, braces
0.000628
def call(self, method, *args): """ Calls the service method defined with the arguments provided """ try: response = getattr(self.client.service, method)(*args) except (URLError, SSLError) as e: log.exception('Failed to connect to responsys service') raise ConnectError("Request to service timed out") except WebFault as web_fault: fault_name = getattr(web_fault.fault, 'faultstring', None) error = str(web_fault.fault.detail) if fault_name == 'TableFault': raise TableFault(error) if fault_name == 'ListFault': raise ListFault(error) if fault_name == 'API_LIMIT_EXCEEDED': raise ApiLimitError(error) if fault_name == 'AccountFault': raise AccountFault(error) raise ServiceError(web_fault.fault, web_fault.document) return response
0.002092
async def publish(self, subject, payload, ack_handler=None, ack_wait=DEFAULT_ACK_WAIT, ): """ Publishes a payload onto a subject. By default, it will block until the message which has been published has been acked back. An optional async handler can be publi :param subject: Subject of the message. :param payload: Payload of the message which wil be published. :param ack_handler: Optional handler for async publishing. :param ack_wait: How long in seconds to wait for an ack to be received. """ stan_subject = ''.join([self._pub_prefix, '.', subject]) guid = new_guid() pe = protocol.PubMsg() pe.clientID = self._client_id pe.guid = guid pe.subject = subject pe.data = payload # Control max inflight pubs for the client with a buffered queue. await self._pending_pub_acks_queue.put(None) # Process asynchronously if a handler is given. if ack_handler is not None: self._pub_ack_map[guid] = ack_handler try: await self._nc.publish_request( stan_subject, self._ack_subject, pe.SerializeToString(), ) return except Exception as e: del self._pub_ack_map[guid] raise e else: # Synchronous wait for ack handling. future = asyncio.Future(loop=self._loop) async def cb(pub_ack): nonlocal future future.set_result(pub_ack) self._pub_ack_map[guid] = cb try: await self._nc.publish_request( stan_subject, self._ack_subject, pe.SerializeToString(), ) await asyncio.wait_for(future, ack_wait, loop=self._loop) return future.result() except Exception as e: # Remove pending future before raising error. future.cancel() del self._pub_ack_map[guid] raise e
0.002657
def norm(table): """ fit to normal distribution """ print('# norm dist is broken', file=sys.stderr) exit() from matplotlib.pyplot import hist as hist t = [] for i in table: t.append(np.ndarray.tolist(hist(i, bins = len(i), normed = True)[0])) return t
0.016949
def new_cipher(self, key, iv, digest=None): """ @param key: the secret key, a byte string @param iv: the initialization vector, a byte string. Used as the initial nonce in counter mode @param digest: also known as tag or icv. A byte string containing the digest of the encrypted data. Only use this during decryption! @return: an initialized cipher object for this algo """ if type(key) is str: key = key.encode('ascii') if self.is_aead and digest is not None: # With AEAD, the mode needs the digest during decryption. return Cipher( self.cipher(key), self.mode(iv, digest, len(digest)), default_backend(), ) else: return Cipher( self.cipher(key), self.mode(iv), default_backend(), )
0.001994
def get_external_tools_in_course(self, course_id, params={}): """ Return external tools for the passed canvas course id. https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.index """ url = COURSES_API.format(course_id) + "/external_tools" external_tools = [] for data in self._get_paged_resource(url, params=params): external_tools.append(data) return external_tools
0.004228
def add_cell_params(self, params, pos=None): """ Add cell of Python parameters :param params: parameters to add :return: """ self.params = params cell_str = '# Parameters:\n' for k, v in params.items(): cell_str += "{} = {}\n".format(k, repr(v)) self.add_cell_code(cell_str, pos)
0.005495
def post_process(self, paths, dry_run=False, **options): """ Overridden to work around https://code.djangoproject.com/ticket/19111 """ with post_process_error_counter(self): with patched_name_fn(self, 'hashed_name', 'hashed name'): with patched_name_fn(self, 'url', 'url'): for result in super(LaxPostProcessorMixin, self).post_process(paths, dry_run, **options): yield result error_count = self._post_process_error_count if error_count: print('%s post-processing error%s.' % (error_count, '' if error_count == 1 else 's'))
0.005472