text
stringlengths
78
104k
score
float64
0
0.18
def _handle_force_timeout(self) -> None: """Called by IOLoop periodically to ask libcurl to process any events it may have forgotten about. """ while True: try: ret, num_handles = self._multi.socket_all() except pycurl.error as e: ret = e.args[0] if ret != pycurl.E_CALL_MULTI_PERFORM: break self._finish_pending_requests()
0.004474
def explain_weights_dfs(estimator, **kwargs): # type: (...) -> Dict[str, pd.DataFrame] """ Explain weights and export them to a dict with ``pandas.DataFrame`` values (as :func:`eli5.formatters.as_dataframe.format_as_dataframes` does). All keyword arguments are passed to :func:`eli5.explain_weights`. Weights of all features are exported by default. """ kwargs = _set_defaults(kwargs) return format_as_dataframes( eli5.explain_weights(estimator, **kwargs))
0.002016
def _sge_info(queue): """Returns machine information for an sge job scheduler. """ qhost_out = subprocess.check_output(["qhost", "-q", "-xml"]).decode() qstat_queue = ["-q", queue] if queue and "," not in queue else [] qstat_out = subprocess.check_output(["qstat", "-f", "-xml"] + qstat_queue).decode() slot_info = _sge_get_slots(qstat_out) mem_info = _sge_get_mem(qhost_out, queue) machine_keys = slot_info.keys() #num_cpus_vec = [slot_info[x]["slots_total"] for x in machine_keys] #mem_vec = [mem_info[x]["mem_total"] for x in machine_keys] mem_per_slot = [mem_info[x]["mem_total"] / float(slot_info[x]["slots_total"]) for x in machine_keys] min_ratio_index = mem_per_slot.index(median_left(mem_per_slot)) mem_info[machine_keys[min_ratio_index]]["mem_total"] return [{"cores": slot_info[machine_keys[min_ratio_index]]["slots_total"], "memory": mem_info[machine_keys[min_ratio_index]]["mem_total"], "name": "sge_machine"}]
0.004975
def init_config_files(self): """[optionally] copy default config files into profile dir.""" # copy config files path = self.builtin_profile_dir if self.copy_config_files: src = self.profile cfg = self.config_file_name if path and os.path.exists(os.path.join(path, cfg)): self.log.warn("Staging %r from %s into %r [overwrite=%s]"%( cfg, src, self.profile_dir.location, self.overwrite) ) self.profile_dir.copy_config_file(cfg, path=path, overwrite=self.overwrite) else: self.stage_default_config_file() else: # Still stage *bundled* config files, but not generated ones # This is necessary for `ipython profile=sympy` to load the profile # on the first go files = glob.glob(os.path.join(path, '*.py')) for fullpath in files: cfg = os.path.basename(fullpath) if self.profile_dir.copy_config_file(cfg, path=path, overwrite=False): # file was copied self.log.warn("Staging bundled %s from %s into %r"%( cfg, self.profile, self.profile_dir.location) )
0.004608
def gibbs_binding_energy(self, eads=False): """ Returns the adsorption energy or Gibb's binding energy of an adsorbate on a surface Args: eads (bool): Whether to calculate the adsorption energy (True) or the binding energy (False) which is just adsorption energy normalized by number of adsorbates. """ n = self.get_unit_primitive_area Nads = self.Nads_in_slab BE = (self.energy - n * self.clean_entry.energy) / Nads - \ sum([ads.energy_per_atom for ads in self.adsorbates]) return BE * Nads if eads else BE
0.004673
def _write_submit_script(self, template, script_filename, job_name, configs): """Generate submit script and write it to a file. Args: - template (string) : The template string to be used for the writing submit script - script_filename (string) : Name of the submit script - job_name (string) : job name - configs (dict) : configs that get pushed into the template Returns: - True: on success Raises: SchedulerMissingArgs : If template is missing args ScriptPathError : Unable to write submit script out """ try: submit_script = Template(template).substitute(jobname=job_name, **configs) # submit_script = Template(template).safe_substitute(jobname=job_name, **configs) with open(script_filename, 'w') as f: f.write(submit_script) except KeyError as e: logger.error("Missing keys for submit script : %s", e) raise (SchedulerMissingArgs(e.args, self.sitename)) except IOError as e: logger.error("Failed writing to submit script: %s", script_filename) raise (ScriptPathError(script_filename, e)) except Exception as e: print("Template : ", template) print("Args : ", job_name) print("Kwargs : ", configs) logger.error("Uncategorized error: %s", e) raise (e) return True
0.003971
def process_result(self, new_sia, min_sia): """Check if the new SIA has smaller |big_phi| than the standing result. """ if new_sia.phi == 0: self.done = True # Short-circuit return new_sia elif new_sia < min_sia: return new_sia return min_sia
0.006079
def translate_style(style, colormode, colorpalette): """ Translate the given style to an ANSI escape code sequence. ``style`` examples are: * green * bold * red_on_black * bold_green * italic_yellow_on_cyan :param str style: the style to translate :param int colormode: the color mode to use. See ``translate_rgb_to_ansi_code`` :parma dict colorpalette: the color palette to use for the color name mapping """ style_parts = iter(style.split('_')) ansi_start_sequence = [] ansi_end_sequence = [] try: # consume all modifiers part = None for mod_part in style_parts: part = mod_part if part not in ansi.MODIFIERS: break # all modifiers have been consumed mod_start_code, mod_end_code = resolve_modifier_to_ansi_code(part, colormode) ansi_start_sequence.append(mod_start_code) ansi_end_sequence.append(mod_end_code) else: # we've consumed all parts, thus we can exit raise StopIteration() # next part has to be a foreground color or the 'on' keyword # which means we have to consume background colors if part != 'on': ansi_start_code, ansi_end_code = translate_colorname_to_ansi_code( part, ansi.FOREGROUND_COLOR_OFFSET, colormode, colorpalette) ansi_start_sequence.append(ansi_start_code) ansi_end_sequence.append(ansi_end_code) # consume the required 'on' keyword after the foreground color next(style_parts) # next part has to be the background color part = next(style_parts) ansi_start_code, ansi_end_code = translate_colorname_to_ansi_code( part, ansi.BACKGROUND_COLOR_OFFSET, colormode, colorpalette) ansi_start_sequence.append(ansi_start_code) ansi_end_sequence.append(ansi_end_code) except StopIteration: # we've consumed all parts of the styling string pass # construct and return ANSI escape code sequence return ''.join(ansi_start_sequence), ''.join(ansi_end_sequence)
0.001862
def _delete_transmissions(self, content_metadata_item_ids): """ Delete ContentMetadataItemTransmision models associated with the given content metadata items. """ # pylint: disable=invalid-name ContentMetadataItemTransmission = apps.get_model( 'integrated_channel', 'ContentMetadataItemTransmission' ) ContentMetadataItemTransmission.objects.filter( enterprise_customer=self.enterprise_configuration.enterprise_customer, integrated_channel_code=self.enterprise_configuration.channel_code(), content_id__in=content_metadata_item_ids ).delete()
0.007508
def download(self, name, ids, datas=None, context=None): """Download a report from the server and return it as a remote file. For instance, to download the "Quotation / Order" report of sale orders identified by the IDs ``[2, 3]``: .. doctest:: :options: +SKIP >>> report = odoo.report.download('sale.report_saleorder', [2, 3]) .. doctest:: :hide: >>> report = odoo.report.download('sale.report_saleorder', [2]) Write it on the file system: .. doctest:: :options: +SKIP >>> with open('sale_orders.pdf', 'wb') as report_file: ... report_file.write(report.read()) ... .. doctest:: :hide: >>> with open('sale_orders.pdf', 'wb') as report_file: ... fileno = report_file.write(report.read()) # Python 3 ... *Python 2:* :return: `io.BytesIO` :raise: :class:`odoorpc.error.RPCError` (wrong parameters) :raise: `ValueError` (received invalid data) :raise: `urllib2.URLError` (connection error) *Python 3:* :return: `io.BytesIO` :raise: :class:`odoorpc.error.RPCError` (wrong parameters) :raise: `ValueError` (received invalid data) :raise: `urllib.error.URLError` (connection error) """ if context is None: context = self._odoo.env.context def check_report(name): report_model = 'ir.actions.report' if v(self._odoo.version)[0] < 11: report_model = 'ir.actions.report.xml' IrReport = self._odoo.env[report_model] report_ids = IrReport.search([('report_name', '=', name)]) report_id = report_ids and report_ids[0] or False if not report_id: raise ValueError("The report '%s' does not exist." % name) return report_id report_id = check_report(name) # Odoo >= 11.0 if v(self._odoo.version)[0] >= 11: IrReport = self._odoo.env['ir.actions.report'] report = IrReport.browse(report_id) response = report.with_context(context).render(ids, data=datas) content = response[0] # On the server the result is a bytes string, # but the RPC layer of Odoo returns it as a unicode string, # so we encode it again as bytes result = content.encode('latin1') return io.BytesIO(result) # Odoo < 11.0 else: args_to_send = [self._odoo.env.db, self._odoo.env.uid, self._odoo._password, name, ids, datas, context] data = self._odoo.json( '/jsonrpc', {'service': 'report', 'method': 'render_report', 'args': args_to_send}) if 'result' not in data and not data['result'].get('result'): raise ValueError("Received invalid data.") # Encode to bytes forced to be compatible with Python 3.2 # (its 'base64.standard_b64decode()' function only accepts bytes) result = encode2bytes(data['result']['result']) content = base64.standard_b64decode(result) return io.BytesIO(content)
0.000591
def _prepare_data_XYX0(self, X, Y, X_base, X_res, D, F, run_TRs, no_DC=False): """Prepares different forms of products between design matrix X or data Y or nuisance regressors X0. These products are re-used a lot during fitting. So we pre-calculate them. no_DC means not inserting regressors for DC components into nuisance regressor. It will only take effect if X_base is not None. """ X_DC = self._gen_X_DC(run_TRs) reg_sol = np.linalg.lstsq(X_DC, X) if np.any(np.isclose(reg_sol[1], 0)): raise ValueError('Your design matrix appears to have ' 'included baseline time series.' 'Either remove them, or move them to' ' nuisance regressors.') X_DC, X_base, idx_DC = self._merge_DC_to_base(X_DC, X_base, no_DC) if X_res is None: X0 = X_base else: X0 = np.concatenate((X_base, X_res), axis=1) n_X0 = X0.shape[1] X0TX0, X0TDX0, X0TFX0 = self._make_templates(D, F, X0, X0) XTX0, XTDX0, XTFX0 = self._make_templates(D, F, X, X0) X0TY, X0TDY, X0TFY = self._make_templates(D, F, X0, Y) return X0TX0, X0TDX0, X0TFX0, XTX0, XTDX0, XTFX0, \ X0TY, X0TDY, X0TFY, X0, X_base, n_X0, idx_DC
0.002045
def write_cycle(filename, graph, cycle, directed): """Write an eulerian tour in DOT format :param filename: the file to be written in DOT format :param graph: graph in listlist format, cannot be listdict :param bool directed: describes the graph :param cycle: tour as a vertex list :returns: nothing :complexity: `O(|V|^2 + |E|)` """ n = len(graph) weight = [[float('inf')] * n for _ in range(n)] for r in range(1, len(cycle)): weight[cycle[r-1]][cycle[r]] = r if not directed: weight[cycle[r]][cycle[r-1]] = r write_graph(filename, graph, arc_label=weight, directed=directed)
0.00149
def process(fname, allow_type): """Process a file.""" fname = str(fname) # HACK: ignore op.h which is automatically generated if fname.endswith('op.h'): return arr = fname.rsplit('.', 1) if fname.find('#') != -1 or arr[-1] not in allow_type: return if arr[-1] in CXX_SUFFIX: _HELPER.process_cpp(fname, arr[-1]) if arr[-1] in PYTHON_SUFFIX: _HELPER.process_python(fname)
0.00464
def skyimage_figure(cluster): """ Given a cluster create a Bokeh plot figure using the cluster's image. """ pf_image = figure(x_range=(0, 1), y_range=(0, 1), title='Image of {0}'.format(cluster.name)) pf_image.image_url(url=[cluster.image_path], x=0, y=0, w=1, h=1, anchor='bottom_left') pf_image.toolbar_location = None pf_image.axis.visible = False return pf_image
0.002237
def status(name, *args, **kwargs): ''' Return the status for a service. If the name contains globbing, a dict mapping service name to True/False values is returned. .. versionchanged:: 2018.3.0 The service name can now be a glob (e.g. ``salt*``) Args: name (str): The name of the service to check Returns: bool: True if running, False otherwise dict: Maps service name to True if running, False otherwise CLI Example: .. code-block:: bash salt '*' service.status <service name> ''' results = {} all_services = get_all() contains_globbing = bool(re.search(r'\*|\?|\[.+\]', name)) if contains_globbing: services = fnmatch.filter(all_services, name) else: services = [name] for service in services: results[service] = info(service)['Status'] in ['Running', 'Stop Pending'] if contains_globbing: return results return results[name]
0.002045
def delFromTimeVary(self,*params): ''' Removes any number of parameters from time_vary for this instance. Parameters ---------- params : string Any number of strings naming attributes to be removed from time_vary Returns ------- None ''' for param in params: if param in self.time_vary: self.time_vary.remove(param)
0.009153
def set(self, dic, val=None, force=False): """set can assign versatile options from `CMAOptions.versatile_options()` with a new value, use `init()` for the others. Arguments --------- `dic` either a dictionary or a key. In the latter case, `val` must be provided `val` value for `key`, approximate match is sufficient `force` force setting of non-versatile options, use with caution This method will be most probably used with the ``opts`` attribute of a `CMAEvolutionStrategy` instance. """ if val is not None: # dic is a key in this case dic = {dic:val} # compose a dictionary for key_original, val in list(dict(dic).items()): key = self.corrected_key(key_original) if not self._lock_setting or \ key in CMAOptions.versatile_options(): self[key] = val else: _print_warning('key ' + str(key_original) + ' ignored (not recognized as versatile)', 'set', 'CMAOptions') return self
0.004058
def _split_props(thing, doc): """ Separate properties from other kinds of member. """ props = inspect.getmembers(thing, lambda o: isinstance(o, property)) ps = [] docs = [ (*_get_names(names, types), names, types, desc) for names, types, desc in doc ] for prop_name, prop in props: in_doc = [d for d in enumerate(docs) if prop_name in d[0]] for d in in_doc: docs.remove(d) ps.append(prop_name) if len(docs) > 0: _, _, names, types, descs = zip(*docs) return ps, zip(names, types, descs) return ps, []
0.003333
def description(self): "this is only a property so it can raise; make it an attr once it works" if self.lastx is None: return if type(self.lastx) not in (sqparse2.SelectX,sqparse2.UpdateX,sqparse2.InsertX): return if type(self.lastx) in (sqparse2.UpdateX,sqparse2.InsertX) and self.lastx.ret is None: return # at this point we know this is an operation that returns rows if type(self.lastx) in (sqparse2.UpdateX,sqparse2.InsertX): raise NotImplementedError('todo: Cursor.description for non-select') else: # select case return [description_from_colx(self.connection,self.lastx,colx) for colx in self.lastx.cols.children]
0.024242
def tracer_diffusion_coefficient( self ): """ Tracer diffusion coefficient, D*. Args: None Returns: (Float): The tracer diffusion coefficient, D*. """ if self.has_run: return self.atoms.sum_dr_squared() / ( 6.0 * float( self.number_of_atoms ) * self.lattice.time ) else: return None
0.023196
def _find_usage_elbv1(self): """ Find usage for ELBv1 / Classic ELB and update the appropriate limits. :returns: number of Classic ELBs in use :rtype: int """ logger.debug("Checking usage for ELBv1") self.connect() lbs = paginate_dict( self.conn.describe_load_balancers, alc_marker_path=['NextMarker'], alc_data_path=['LoadBalancerDescriptions'], alc_marker_param='Marker' ) for lb in lbs['LoadBalancerDescriptions']: self.limits['Listeners per load balancer']._add_current_usage( len(lb['ListenerDescriptions']), aws_type='AWS::ElasticLoadBalancing::LoadBalancer', resource_id=lb['LoadBalancerName'], ) self.limits[ 'Registered instances per load balancer' ]._add_current_usage( len(lb['Instances']), aws_type='AWS::ElasticLoadBalancing::LoadBalancer', resource_id=lb['LoadBalancerName'] ) logger.debug('Done with ELBv1 usage') return len(lbs['LoadBalancerDescriptions'])
0.001682
def _parse_linux_cx_state(self, lines, tcp_states, state_col, protocol=None, ip_version=None): """ Parse the output of the command that retrieves the connection state (either `ss` or `netstat`) Returns a dict metric_name -> value """ metrics = {} for _, val in iteritems(self.cx_state_gauge): metrics[val] = 0 for l in lines: cols = l.split() if cols[0].startswith('tcp') or protocol == 'tcp': proto = "tcp{0}".format(ip_version) if ip_version else ("tcp4", "tcp6")[cols[0] == "tcp6"] if cols[state_col] in tcp_states: metric = self.cx_state_gauge[proto, tcp_states[cols[state_col]]] metrics[metric] += 1 elif cols[0].startswith('udp') or protocol == 'udp': proto = "udp{0}".format(ip_version) if ip_version else ("udp4", "udp6")[cols[0] == "udp6"] metric = self.cx_state_gauge[proto, 'connections'] metrics[metric] += 1 return metrics
0.007491
def clockIsBroken(): """ Returns whether twisted.internet.task.Clock has the bug that returns the wrong DelayedCall or not. """ clock = Clock() dc1 = clock.callLater(10, lambda: None) dc2 = clock.callLater(1, lambda: None) if dc1 is dc2: return True else: return False
0.003125
def fill_livetime_hist(skydir, tab_sc, tab_gti, zmax, costh_edges): """Generate a sequence of livetime distributions at the sky positions given by ``skydir``. The output of the method are two NxM arrays containing a sequence of histograms for N sky positions and M incidence angle bins where the bin edges are defined by ``costh_edges``. This method uses the same algorithm as `gtltcube` with the exception that SC time intervals are assumed to be aligned with GTIs. Parameters ---------- skydir : `~astropy.coordinates.SkyCoord` Vector of sky directions for which livetime histograms will be accumulated. tab_sc : `~astropy.table.Table` Spacecraft table. Must contain the following columns: START, STOP, LIVETIME, RA_SCZ, DEC_SZ, RA_ZENITH, DEC_ZENITH. tab_gti : `~astropy.table.Table` Table of good time intervals (GTIs). zmax : float Zenith cut. costh_edges : `~numpy.ndarray` Incidence angle bin edges in cos(angle). Returns ------- lt : `~numpy.ndarray` Array of livetime histograms. lt_wt : `~numpy.ndarray` Array of histograms of weighted livetime (livetime x livetime fraction). """ if len(tab_gti) == 0: shape = (len(costh_edges) - 1, len(skydir)) return (np.zeros(shape), np.zeros(shape)) m = (tab_sc['START'] < tab_gti['STOP'][-1]) m &= (tab_sc['STOP'] > tab_gti['START'][0]) tab_sc = tab_sc[m] cos_zmax = np.cos(np.radians(zmax)) sc_t0 = np.array(tab_sc['START'].data) sc_t1 = np.array(tab_sc['STOP'].data) sc_live = np.array(tab_sc['LIVETIME'].data) sc_lfrac = sc_live / (sc_t1 - sc_t0) sc_xyz = angle_to_cartesian(np.radians(tab_sc['RA_SCZ'].data), np.radians(tab_sc['DEC_SCZ'].data)) zn_xyz = angle_to_cartesian(np.radians(tab_sc['RA_ZENITH'].data), np.radians(tab_sc['DEC_ZENITH'].data)) tab_gti_t0 = np.array(tab_gti['START'].data) tab_gti_t1 = np.array(tab_gti['STOP'].data) # Index of the closest GTI interval idx = np.digitize(sc_t0, tab_gti_t0) - 1 # start/stop time of closest GTI interval gti_t0 = np.zeros_like(sc_t0) gti_t1 = np.zeros_like(sc_t1) gti_t0[idx >= 0] = tab_gti_t0[idx[idx >= 0]] gti_t1[idx >= 0] = tab_gti_t1[idx[idx >= 0]] nbin = len(costh_edges) - 1 lt = np.zeros((nbin,) + skydir.shape) lt_wt = np.zeros((nbin,) + skydir.shape) m0 = (idx >= 0) & (sc_t0 >= gti_t0) & (sc_t1 <= gti_t1) xyz = angle_to_cartesian(skydir.ra.rad, skydir.dec.rad) for i, t in enumerate(xyz): cos_sep = utils.dot_prod(t, sc_xyz) cos_zn = utils.dot_prod(t, zn_xyz) m = m0 & (cos_zn > cos_zmax) & (cos_sep > 0.0) bins = np.digitize(cos_sep[m], bins=costh_edges) - 1 bins = np.clip(bins, 0, nbin - 1) lt[:, i] = np.bincount(bins, weights=sc_live[m], minlength=nbin) lt_wt[:, i] = np.bincount(bins, weights=sc_live[m] * sc_lfrac[m], minlength=nbin) return lt, lt_wt
0.001271
def _add_extra_files(self, files): '''if extra files are to be analyzed with an sosreport, this will add them to the origin path to be analyzed''' try: for f in files: self.logger.con_out("adding additional file for analysis: %s" % f) fname = os.path.basename(f) f_new = os.path.join(self.dir_path, fname) shutil.copyfile(f,f_new) except IOError as e: self.logger.con_out("ExtraFileError: %s is not readable or does not exist. Skipping File" % f) self.logger.exception(e) pass except Exception as e: # pragma: no cover self.logger.exception(e) raise Exception("ExtraFileError: Unable to Process Extra File - %s" % f)
0.010139
def save(self, io, little_endian=False): """ Saves the `NBTFile()` to `io`, which can be any file-like object providing `write()`. """ if little_endian: write = lambda fmt, *args: io.write(pack('<' + fmt, *args)) else: write = lambda fmt, *args: io.write(pack('>' + fmt, *args)) write.io = io self.write(write)
0.010025
def p_simple_command_element(p): '''simple_command_element : WORD | ASSIGNMENT_WORD | redirection''' if isinstance(p[1], ast.node): p[0] = [p[1]] return parserobj = p.context p[0] = [_expandword(parserobj, p.slice[1])] # change the word node to an assignment if necessary if p.slice[1].ttype == tokenizer.tokentype.ASSIGNMENT_WORD: p[0][0].kind = 'assignment'
0.002132
def bovy_end_print(filename,**kwargs): """ NAME: bovy_end_print PURPOSE: saves the current figure(s) to filename INPUT: filename - filename for plot (with extension) OPTIONAL INPUTS: format - file-format OUTPUT: (none) HISTORY: 2009-12-23 - Written - Bovy (NYU) """ if 'format' in kwargs: pyplot.savefig(filename,**kwargs) else: pyplot.savefig(filename,format=re.split(r'\.',filename)[-1],**kwargs) pyplot.close()
0.011385
def render_like(parser, token): """ {% likes user as like_list %} <ul> {% for like in like_list %} <li>{% render_like like %}</li> {% endfor %} </ul> """ tokens = token.split_contents() var = tokens[1] return LikeRenderer(var)
0.003472
def _update_result(self, result, insertions, dimension_index): """Insert subtotals into resulting ndarray.""" for j, (ind_insertion, value) in enumerate(insertions): result = np.insert( result, ind_insertion + j + 1, value, axis=dimension_index ) return result
0.006173
def response_received(self, response): """ Called when a response to a job RPC has been received. Decodes the response and finalizes the result, then reports the result to the job manager. """ if self._closed: return assert self._job is not None logger.debug("worker {} got response".format(id(self))) result = self._job.get_result(response) self._manager.add_result(self._job, result) self._load_job()
0.003953
def holm(pvals, alpha=.05): """P-values correction with Holm method. Parameters ---------- pvals : array_like Array of p-values of the individual tests. alpha : float Error rate (= alpha level). Returns ------- reject : array, bool True if a hypothesis is rejected, False if not pvals_corrected : array P-values adjusted for multiple hypothesis testing using the Holm procedure. See also -------- bonf : Bonferroni correction fdr : Benjamini/Hochberg and Benjamini/Yekutieli FDR correction Notes ----- From Wikipedia: In statistics, the Holm–Bonferroni method (also called the Holm method) is used to counteract the problem of multiple comparisons. It is intended to control the family-wise error rate and offers a simple test uniformly more powerful than the Bonferroni correction. The Holm adjusted p-values are the running maximum of the sorted p-values divided by the corresponding increasing alpha level: .. math:: \\frac{\\alpha}{n}, \\frac{\\alpha}{n-1}, ..., \\frac{\\alpha}{1} where :math:`n` is the number of test. The full mathematical formula is: .. math:: \\widetilde {p}_{{(i)}}=\\max _{{j\\leq i}}\\left\\{(n-j+1)p_{{(j)}} \\right\\}_{{1}} Note that NaN values are not taken into account in the p-values correction. References ---------- - Holm, S. (1979). A simple sequentially rejective multiple test procedure. Scandinavian journal of statistics, 65-70. - https://en.wikipedia.org/wiki/Holm%E2%80%93Bonferroni_method Examples -------- >>> from pingouin import holm >>> pvals = [.50, .003, .32, .054, .0003] >>> reject, pvals_corr = holm(pvals, alpha=.05) >>> print(reject, pvals_corr) [False True False False True] [0.64 0.012 0.64 0.162 0.0015] """ # Convert to array and save original shape pvals = np.asarray(pvals) shape_init = pvals.shape pvals = pvals.ravel() num_nan = np.isnan(pvals).sum() # Sort the (flattened) p-values pvals_sortind = np.argsort(pvals) pvals_sorted = pvals[pvals_sortind] sortrevind = pvals_sortind.argsort() ntests = pvals.size - num_nan # Now we adjust the p-values pvals_corr = np.diag(pvals_sorted * np.arange(ntests, 0, -1)[..., None]) pvals_corr = np.maximum.accumulate(pvals_corr) pvals_corr = np.clip(pvals_corr, None, 1) # And revert to the original shape and order pvals_corr = np.append(pvals_corr, np.full(num_nan, np.nan)) pvals_corrected = pvals_corr[sortrevind].reshape(shape_init) with np.errstate(invalid='ignore'): reject = np.less(pvals_corrected, alpha) return reject, pvals_corrected
0.000359
def eth_sign_sha3(data: bytes) -> bytes: """ eth_sign/recover compatible hasher Prefixes data with "\x19Ethereum Signed Message:\n<len(data)>" """ prefix = b'\x19Ethereum Signed Message:\n' if not data.startswith(prefix): data = prefix + b'%d%s' % (len(data), data) return keccak(data)
0.003115
def apply_T6(word): '''If a VVV-sequence contains a long vowel, there is a syllable boundary between it and the third vowel, e.g. [kor.ke.aa], [yh.ti.öön], [ruu.an], [mää.yt.te].''' T6 = '' WORD = word.split('.') for i, v in enumerate(WORD): if contains_VVV(v): VV = [v.find(j) for j in LONG_VOWELS if v.find(j) > 0] if VV: I = VV[0] T6 = ' T6' if I + 2 == len(v) or is_vowel(v[I + 2]): WORD[i] = v[:I + 2] + '.' + v[I + 2:] # TODO else: WORD[i] = v[:I] + '.' + v[I:] word = '.'.join(WORD) word = word.strip('.') # TODO return word, T6
0.002793
def get_data_dir(module_name: str) -> str: """Ensure the appropriate Bio2BEL data directory exists for the given module, then returns the file path. :param module_name: The name of the module. Ex: 'chembl' :return: The module's data directory """ module_name = module_name.lower() data_dir = os.path.join(BIO2BEL_DIR, module_name) os.makedirs(data_dir, exist_ok=True) return data_dir
0.004808
def schaffer(self, x): """ Schaffer function x0 in [-100..100]""" N = len(x) s = x[0:N - 1]**2 + x[1:N]**2 return sum(s**0.25 * (np.sin(50 * s**0.1)**2 + 1))
0.010582
def rguiding(self,*args,**kwargs): """ NAME: rguiding PURPOSE: calculate the guiding-center radius (the radius of a circular orbit with the same angular momentum) INPUT: pot= potential instance or list of such instances ro= (Object-wide default) physical scale for distances to use to convert (can be Quantity) vo= (Object-wide default) physical scale for velocities to use to convert (can be Quantity) use_physical= use to override Object-wide default for using a physical scale for output OUTPUT: R_guiding HISTORY: 2018-08-29 - Written as thin wrapper around Potential.rl - Bovy (UofT) """ pot= kwargs.get('pot',self._orb.__dict__.get('_pot',None)) if pot is None: raise RuntimeError("You need to specify the potential as pot= to compute the guiding-center radius") flatten_potential(pot) if _isNonAxi(pot): raise RuntimeError('Potential given to rguiding is non-axisymmetric, but rguiding requires an axisymmetric potential') _check_consistent_units(self,pot) Lz= self.Lz(*args,use_physical=False) return nu.array([rl(pot,lz,use_physical=False) for lz in Lz])
0.014615
def denoise_z15(): """Replace tokens instead of masking.""" hparams = xmoe2_dense_0() hparams.decoder_type = "denoising" hparams.noising_spec_train = {"type": "random_zipfian", "prob": 0.15} hparams.noising_use_eval_during_train = 0.25 return hparams
0.026718
def is_list_of_states(self, arg): """ A list of states example - [('x1', 'easy'), ('x2', 'hard')] Returns ------- True, if arg is a list of states else False. """ return isinstance(arg, list) and all(isinstance(i, tuple) for i in arg)
0.006667
def lookup(self, language_ranges): """ Perform an RFC4647 language range lookup on the keys in the dictionary. `language_ranges` must be a sequence of :class:`LanguageRange` instances. Return the entry in the dictionary with a key as produced by `lookup_language`. If `lookup_language` does not find a match and the mapping contains an entry with key :data:`None`, that entry is returned, otherwise :class:`KeyError` is raised. """ keys = list(self.keys()) try: keys.remove(None) except ValueError: pass keys.sort() key = lookup_language(keys, language_ranges) return self[key]
0.00277
def recursive_symlink_dirs(source_d, destination_d): ''' Create dirs and symlink all files recursively from source_d, ignoring errors (e.g. existing files) ''' func = os.symlink if os.name == 'nt': # NOTE: need to verify that default perms only allow admins to create # symlinks on Windows func = shutil.copy if os.path.exists(destination_d): os.rmdir(destination_d) shutil.copytree(source_d, destination_d, copy_function=func)
0.002037
def _get_progress(self): """ Get current progress of emerge. Returns a dict containing current and total value. """ input_data = [] ret = {} # traverse emerge.log from bottom up to get latest information last_lines = self.py3.command_output(["tail", "-50", self.emerge_log_file]) input_data = last_lines.split("\n") input_data.reverse() for line in input_data: if "*** terminating." in line: # copy content of ret_default, not only the references ret = copy.deepcopy(self.ret_default) break else: status_re = re.compile( "\((?P<cu>[\d]+) of (?P<t>[\d]+)\) " "(?P<a>[a-zA-Z\/]+( [a-zA-Z]+)?) " "\((?P<ca>[\w\-]+)\/(?P<p>[\w\.]+)" ) res = status_re.search(line) if res is not None: ret["action"] = res.group("a").lower() ret["category"] = res.group("ca") ret["current"] = res.group("cu") ret["pkg"] = res.group("p") ret["total"] = res.group("t") break return ret
0.010989
def security_scheme(self, component_id, component): """Add a security scheme which can be referenced. :param str component_id: component_id to use as reference :param dict kwargs: security scheme fields """ if component_id in self._security_schemes: raise DuplicateComponentNameError( 'Another security scheme with name "{}" is already registered.'.format( component_id ) ) self._security_schemes[component_id] = component return self
0.005291
def binary_size(self): '''Return the number of bytes to store this group and its parameters.''' return ( 1 + # group_id 1 + len(self.name.encode('utf-8')) + # size of name and name bytes 2 + # next offset marker 1 + len(self.desc.encode('utf-8')) + # size of desc and desc bytes sum(p.binary_size() for p in self.params.values()))
0.017241
def swag_from( specs=None, filetype=None, endpoint=None, methods=None, validation=False, schema_id=None, data=None, definition=None, validation_function=None, validation_error_handler=None): """ Takes a filename.yml, a dictionary or object and loads swagger specs. :param specs: a filepath, a dictionary or an object :param filetype: yml or yaml (json and py to be implemented) :param endpoint: endpoint to build definition name :param methods: method to build method based specs :param validation: perform validation? :param schema_id: Definition id ot name to use for validation :param data: data to validate (default is request.json) :param definition: alias to schema_id :param validation_function: custom validation function which takes the positional arguments: data to be validated at first and schema to validate against at second :param validation_error_handler: custom function to handle exceptions thrown when validating which takes the exception thrown as the first, the data being validated as the second and the schema being used to validate as the third argument """ def resolve_path(function, filepath): if not filepath.startswith('/'): if not hasattr(function, 'root_path'): function.root_path = get_root_path(function) res = os.path.join(function.root_path, filepath) return res return filepath def set_from_filepath(function): final_filepath = resolve_path(function, specs) function.swag_type = filetype or specs.split('.')[-1] if endpoint or methods: if not hasattr(function, 'swag_paths'): function.swag_paths = {} if not endpoint and not methods: function.swag_path = final_filepath elif endpoint and methods: for verb in methods: key = "{}_{}".format(endpoint, verb.lower()) function.swag_paths[key] = final_filepath elif endpoint and not methods: function.swag_paths[endpoint] = final_filepath elif methods and not endpoint: for verb in methods: function.swag_paths[verb.lower()] = final_filepath def set_from_specs_dict(function): function.specs_dict = specs def decorator(function): if isinstance(specs, string_types): set_from_filepath(function) # function must have or a single swag_path or a list of them swag_path = getattr(function, 'swag_path', None) swag_paths = getattr(function, 'swag_paths', None) validate_args = { 'filepath': swag_path or swag_paths, 'root': getattr(function, 'root_path', None) } if isinstance(specs, dict): set_from_specs_dict(function) validate_args = {'specs': specs} @wraps(function) def wrapper(*args, **kwargs): if validation is True: validate( data, schema_id or definition, validation_function=validation_function, validation_error_handler=validation_error_handler, **validate_args ) return function(*args, **kwargs) return wrapper return decorator
0.00029
def _scan_pth_files(dir_paths): """Given an iterable of directory paths, yield paths to all .pth files within.""" for dir_path in dir_paths: if not os.path.exists(dir_path): continue pth_filenames = (f for f in os.listdir(dir_path) if f.endswith('.pth')) for pth_filename in pth_filenames: yield os.path.join(dir_path, pth_filename)
0.013333
def unsubscribe_list(self, list_id): """ Unsubscribe to a list :param list_id: list ID number :return: :class:`~responsebot.models.List` object """ return List(tweepy_list_to_json(self._client.unsubscribe_list(list_id=list_id)))
0.01083
def _selector_as_string(self, selector): """ Returns a selector as a CSS string :param selector: A list of tinycss Tokens :type selector: list :returns: The CSS string for the selector :rtype: str """ return ','.join( ''.join(token.as_css() for token in strip_whitespace(token_list)) for token_list in split_on_comma(selector))
0.004808
def areaBetween(requestContext, *seriesLists): """ Draws the vertical area in between the two series in seriesList. Useful for visualizing a range such as the minimum and maximum latency for a service. areaBetween expects **exactly one argument** that results in exactly two series (see example below). The order of the lower and higher values series does not matter. The visualization only works when used in conjunction with ``areaMode=stacked``. Most likely use case is to provide a band within which another metric should move. In such case applying an ``alpha()``, as in the second example, gives best visual results. Example:: &target=areaBetween(service.latency.{min,max})&areaMode=stacked &target=alpha(areaBetween(service.latency.{min,max}),0.3)&areaMode=stacked If for instance, you need to build a seriesList, you should use the ``group`` function, like so:: &target=areaBetween(group(minSeries(a.*.min),maxSeries(a.*.max))) """ if len(seriesLists) == 1: [seriesLists] = seriesLists assert len(seriesLists) == 2, ("areaBetween series argument must " "reference *exactly* 2 series") lower, upper = seriesLists if len(lower) == 1: [lower] = lower if len(upper) == 1: [upper] = upper lower.options['stacked'] = True lower.options['invisible'] = True upper.options['stacked'] = True lower.name = upper.name = "areaBetween(%s)" % upper.pathExpression return [lower, upper]
0.000641
def subtrees_for_phrase(self, phrase_type): """ Returns subtrees corresponding all phrases matching a given phrase type :param phrase_type: POS such as "NP", "VP", "det", etc. :type phrase_type: str :return: a list of NLTK.Tree.Subtree instances :rtype: list of NLTK.Tree.Subtree """ return [subtree for subtree in self.parse.subtrees() if subtree.node.lower() == phrase_type.lower()]
0.006652
def delete(self): ''' return DELETE SQL ''' SQL = 'DELETE FROM %s' % self._table if self._selectors: SQL = ' '.join([SQL, 'WHERE', self._selectors]).strip() return SQL
0.013158
def re_filter(text, regexps): """Filter text using regular expressions.""" if not regexps: return text matched_text = [] compiled_regexps = [re.compile(x) for x in regexps] for line in text: if line in matched_text: continue for regexp in compiled_regexps: found = regexp.search(line) if found and found.group(): matched_text.append(line) return matched_text or text
0.002132
def dictlist_replace(dict_list: Iterable[Dict], key: str, value: Any) -> None: """ Process an iterable of dictionaries. For each dictionary ``d``, change (in place) ``d[key]`` to ``value``. """ for d in dict_list: d[key] = value
0.003906
def ip_rtm_config_route_static_route_nh_vrf_static_route_next_hop(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") ip = ET.SubElement(config, "ip", xmlns="urn:brocade.com:mgmt:brocade-common-def") rtm_config = ET.SubElement(ip, "rtm-config", xmlns="urn:brocade.com:mgmt:brocade-rtm") route = ET.SubElement(rtm_config, "route") static_route_nh_vrf = ET.SubElement(route, "static-route-nh-vrf") static_route_next_vrf_dest_key = ET.SubElement(static_route_nh_vrf, "static-route-next-vrf-dest") static_route_next_vrf_dest_key.text = kwargs.pop('static_route_next_vrf_dest') next_hop_vrf_key = ET.SubElement(static_route_nh_vrf, "next-hop-vrf") next_hop_vrf_key.text = kwargs.pop('next_hop_vrf') static_route_next_hop = ET.SubElement(static_route_nh_vrf, "static-route-next-hop") static_route_next_hop.text = kwargs.pop('static_route_next_hop') callback = kwargs.pop('callback', self._callback) return callback(config)
0.007554
def SetAttributes(self, urn, attributes, to_delete, add_child_index=True, mutation_pool=None): """Sets the attributes in the data store.""" attributes[AFF4Object.SchemaCls.LAST] = [ rdfvalue.RDFDatetime.Now().SerializeToDataStore() ] to_delete.add(AFF4Object.SchemaCls.LAST) if mutation_pool: pool = mutation_pool else: pool = data_store.DB.GetMutationPool() pool.MultiSet(urn, attributes, replace=False, to_delete=to_delete) if add_child_index: self._UpdateChildIndex(urn, pool) if mutation_pool is None: pool.Flush()
0.014514
def pvRvz(self,vR,vz,R,z,gl=True,ngl=_DEFAULTNGL2,vTmax=1.5): """ NAME: pvR PURPOSE: calculate the marginalized (vR,vz) probability at this location (NOT normalized by the density) INPUT: vR - radial velocity (can be Quantity) vz - vertical velocity (can be Quantity) R - radius (can be Quantity) z - height (can be Quantity) gl - use Gauss-Legendre integration (True, currently the only option) ngl - order of Gauss-Legendre integration vTmax - sets integration limits to [0,vTmax] for integration over vT (default: 1.5) OUTPUT: p(vR,vz,R,z) HISTORY: 2013-01-02 - Written - Bovy (IAS) 2018-01-12 - Added Gauss-Legendre integration prefactor vTmax/2 - Trick (MPA) """ if gl: if ngl % 2 == 1: raise ValueError("ngl must be even") #Use Gauss-Legendre integration for all if ngl == _DEFAULTNGL: glx, glw= self._glxdef, self._glwdef glx12, glw12= self._glxdef12, self._glwdef12 elif ngl == _DEFAULTNGL2: glx, glw= self._glxdef2, self._glwdef2 glx12, glw12= self._glxdef, self._glwdef else: glx, glw= numpy.polynomial.legendre.leggauss(ngl) glx12, glw12= numpy.polynomial.legendre.leggauss(ngl//2) #Evaluate everywhere vTgl= vTmax/2.*(glx+1.) vTglw= glw vTfac= 0.5 * vTmax #integration over [0.,vTmax] #If inputs are arrays, tile if isinstance(R,numpy.ndarray): nR= len(R) R= numpy.tile(R,(ngl,1)).T.flatten() z= numpy.tile(z,(ngl,1)).T.flatten() vR= numpy.tile(vR,(ngl,1)).T.flatten() vz= numpy.tile(vz,(ngl,1)).T.flatten() vTgl= numpy.tile(vTgl,(nR,1)).flatten() vTglw= numpy.tile(vTglw,(nR,1)) scalarOut= False else: R= R+numpy.zeros(ngl) vR= vR+numpy.zeros(ngl) z= z+numpy.zeros(ngl) vz= vz+numpy.zeros(ngl) nR= 1 scalarOut= True #evaluate logqeval= numpy.reshape(self(R, vR, vTgl, z, vz, log=True, use_physical=False), (nR,ngl)) out= numpy.sum(numpy.exp(logqeval)*vTglw*vTfac,axis=1) if scalarOut: return out[0] else: return out
0.021225
def process_bucket_inventory(bid, inventory_bucket, inventory_prefix): """Load last inventory dump and feed as key source. """ log.info("Loading bucket %s keys from inventory s3://%s/%s", bid, inventory_bucket, inventory_prefix) account, bucket = bid.split(':', 1) region = connection.hget('bucket-regions', bid) versioned = bool(int(connection.hget('bucket-versions', bid))) session = boto3.Session() s3 = session.client('s3', region_name=region, config=s3config) # find any key visitors with inventory filtering account_info = json.loads(connection.hget('bucket-accounts', account)) ifilters = [v.inventory_filter for v in get_key_visitors(account_info) if v.inventory_filter] with bucket_ops(bid, 'inventory'): page_iterator = load_bucket_inventory( s3, inventory_bucket, inventory_prefix, versioned, ifilters) if page_iterator is None: log.info("bucket:%s could not find inventory" % bid) # case: inventory configured but not delivered yet # action: dispatch to bucket partition (assumes 100k+ for inventory) # - todo consider max inventory age/staleness for usage return invoke(process_bucket_partitions, bid) connection.hset('buckets-inventory', bid, 1) for page in page_iterator: invoke(process_keyset, bid, page)
0.001413
def getIndexOfHeteroMen(genotypes, menIndex): """Get the indexes of heterozygous men. :param genotypes: the genotypes of everybody. :param menIndex: the indexes of the men (for the genotypes). :type genotypes: numpy.array :type menIndex: numpy.array :returns: a :py:class:`numpy.array` containing the indexes of the genotypes to remove. Finds the mean that have a heterozygous genotype for this current marker. Usually used on sexual chromosomes. """ toRemove = set() for i in menIndex[0]: for genotype in [set(j.split(" ")) for j in genotypes[:, i]]: if len(genotype) != 1: # We have an heterozygous toRemove.add(i) toRemove = list(toRemove) toRemove.sort() return (np.array(toRemove, dtype=int),)
0.001209
def _set_arp_entry(self, v, load=False): """ Setter method for arp_entry, mapped from YANG variable /rbridge_id/arp_entry (list) If this variable is read-only (config: false) in the source YANG file, then _set_arp_entry is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_arp_entry() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("arp_ip_address",arp_entry.arp_entry, yang_name="arp-entry", rest_name="arp", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='arp-ip-address', extensions={u'tailf-common': {u'info': u'Address Resolution Protocol (ARP)', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'alt-name': u'arp', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'callpoint': u'ArpStaticConfigCallpoint'}}), is_container='list', yang_name="arp-entry", rest_name="arp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Address Resolution Protocol (ARP)', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'alt-name': u'arp', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'callpoint': u'ArpStaticConfigCallpoint'}}, namespace='urn:brocade.com:mgmt:brocade-arp', defining_module='brocade-arp', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """arp_entry must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("arp_ip_address",arp_entry.arp_entry, yang_name="arp-entry", rest_name="arp", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='arp-ip-address', extensions={u'tailf-common': {u'info': u'Address Resolution Protocol (ARP)', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'alt-name': u'arp', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'callpoint': u'ArpStaticConfigCallpoint'}}), is_container='list', yang_name="arp-entry", rest_name="arp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Address Resolution Protocol (ARP)', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'alt-name': u'arp', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'callpoint': u'ArpStaticConfigCallpoint'}}, namespace='urn:brocade.com:mgmt:brocade-arp', defining_module='brocade-arp', yang_type='list', is_config=True)""", }) self.__arp_entry = t if hasattr(self, '_set'): self._set()
0.003767
def run(self, clock, generalLedger): """ Execute the component at the current clock cycle. :param clock: The clock containing the current execution time and period information. :param generalLedger: The general ledger into which to create the transactions. """ for c in self.components: c.run(clock, generalLedger) for a in self.activities: a.run(clock, generalLedger)
0.004255
def removeQuery( self ): """ Removes the currently selected query. """ items = self.uiQueryTREE.selectedItems() tree = self.uiQueryTREE for item in items: parent = item.parent() if ( parent ): parent.takeChild(parent.indexOfChild(item)) else: tree.takeTopLevelItem(tree.indexOfTopLevelItem(item)) self.setQuery(self.query())
0.016985
def _memory_usage_ps(): """return memory usage of python process in MB >>> isinstance(_memory_usage_ps(),float) True """ out = subprocess.Popen( ['ps', 'v', '-p', str(os.getpid())], stdout=subprocess.PIPE).communicate()[0].split(b'\n') vsz_index = out[0].split().index(b'RSS') mem = float(out[1].split()[vsz_index]) / 1024 return mem
0.002611
def tlist2dict(tuple_list,**kwargs): ''' #duplicate keys will lost tl = [(1,2),(3,4),(1,5)] tlist2dict(tl) ''' if('deepcopy' in kwargs): deepcopy = kwargs['deepcopy'] else: deepcopy = 1 if('check' in kwargs): check = kwargs['check'] else: check = 1 if(check): if(tltl.is_tlist(tuple_list)): pass else: return(None) else: pass j = {} if(deepcopy): new = copy.deepcopy(tuple_list) else: new = tuple_list for i in range(0,new.__len__()): temp = new[i] key = temp[0] value = temp[1] j[key] = value return(j)
0.004243
def _kvmatrix2d(km,vm): ''' km = [[[1], [3]], [[1, 2], [3, 'a']], [[1, 2, 22]]] show_kmatrix(km) vm = [[[222]], ['b']] show_vmatrix(vm) d = _kvmatrix2d(km,vm) ''' d = {} kmwfs = get_kmwfs(km) vmwfs = elel.get_wfs(vm) lngth = vmwfs.__len__() for i in range(0,lngth): value = elel.getitem_via_pathlist(vm,vmwfs[i]) cond = elel.is_leaf(value) if(cond): _setitem_via_pathlist(d,kmwfs[i],value) else: _setdefault_via_pathlist(d,kmwfs[i]) return(d)
0.015306
def request_ligodotorg(url, debug=False): """Request the given URL using LIGO.ORG SAML authentication. This requires an active Kerberos ticket for the user, to get one: $ kinit [email protected] Parameters ---------- url : `str` URL path for request debug : `bool`, optional Query in verbose debuggin mode, default `False` Returns ------- urllib.addinfourl file object containing output data, use .read() to extract text content """ # set debug to 1 to see all HTTP(s) traffic debug = int(debug) # need an instance of HTTPS handler to do HTTPS httpsHandler = urllib2.HTTPSHandler(debuglevel = debug) # use a cookie jar to store session cookies jar = cookielib.LWPCookieJar() # if a cookier jar exists open it and read the cookies # and make sure it has the right permissions if os.path.exists(COOKIE_JAR): os.chmod(COOKIE_JAR, stat.S_IRUSR | stat.S_IWUSR) # set ignore_discard so that session cookies are preserved jar.load(COOKIE_JAR, ignore_discard = True) # create a cookie handler from the cookier jar cookie_handler = urllib2.HTTPCookieProcessor(jar) # need a redirect handler to follow redirects redirectHandler = urllib2.HTTPRedirectHandler() # need an auth handler that can do negotiation. # input parameter is the Kerberos service principal. auth_handler = HTTPNegotiateAuthHandler(service_principal='HTTP@%s' % (LIGO_LOGIN_URL)) # create the opener. opener = urllib2.build_opener(auth_handler, cookie_handler, httpsHandler, redirectHandler) # prepare the request object request = urllib2.Request(url) # use the opener and the request object to make the request. response = opener.open(request) # save the session cookies to a file so that they can # be used again without having to authenticate jar.save(COOKIE_JAR, ignore_discard=True) return response
0.002882
def drop_everything(engine): '''Droping all tables and custom types (enums) using `engine`. Taken from http://www.sqlalchemy.org/trac/wiki/UsageRecipes/DropEverything This method is more robust than `metadata.drop_all(engine)`. B.c. when you change a table or a type name, `drop_all` does not consider the old one. Thus, DB holds some unused entities.''' conn = engine.connect() # the transaction only applies if the DB supports # transactional DDL, i.e. Postgresql, MS SQL Server trans = conn.begin() inspector = reflection.Inspector.from_engine(engine) metadata = MetaData() tbs = [] all_fks = [] types = [] for table_name in inspector.get_table_names(): fks = [] for fk in inspector.get_foreign_keys(table_name): if not fk['name']: continue fks.append(ForeignKeyConstraint((), (), name=fk['name'])) for col in inspector.get_columns(table_name): if isinstance(col['type'], SchemaType): types.append(col['type']) t = Table(table_name, metadata, *fks) tbs.append(t) all_fks.extend(fks) try: for fkc in all_fks: conn.execute(DropConstraint(fkc)) for table in tbs: conn.execute(DropTable(table)) for custom_type in types: custom_type.drop(conn) trans.commit() except: # pragma: no cover trans.rollback() raise
0.00271
def main(): """ The main function of the Andes command-line tool. This function executes the following workflow: * Parse the command line inputs * Show the tool preamble * Output the requested helps, edit/save configs or remove outputs. Exit the main program if any of the above is executed * Process the input files and call ``main.run()`` using single- or multi-processing * Show the execution time and exit Returns ------- None """ t0, s = elapsed() # parser command line arguments args = vars(cli_new()) # configure stream handler verbose level config_logger(log_path=misc.get_log_dir(), stream_level=args['verbose']) # show preamble preamble() logger.debug('command line arguments:') logger.debug(pprint.pformat(args)) if andeshelp(**args) or search(**args) or edit_conf(**args) or remove_output(**args) \ or save_config(**args): return # process input files if len(args['filename']) == 0: logger.info('error: no input file. Try \'andes -h\' for help.') # preprocess cli args path = args.get('input_path', os.getcwd()) ncpu = args['ncpu'] if ncpu == 0 or ncpu > os.cpu_count(): ncpu = os.cpu_count() cases = [] for file in args['filename']: # use absolute path for cases which will be respected by FileMan full_paths = os.path.abspath(os.path.join(path, file)) found = glob.glob(full_paths) if len(found) == 0: logger.info('error: file {} does not exist.'.format(full_paths)) else: cases += found # remove folders and make cases unique cases = list(set(cases)) valid_cases = [] for case in cases: if os.path.isfile(case): valid_cases.append(case) logger.debug('Found files: ' + pprint.pformat(valid_cases)) if len(valid_cases) <= 0: pass elif len(valid_cases) == 1: run(valid_cases[0], **args) else: # set verbose level for multi processing logger.info('Processing {} jobs on {} CPUs'.format(len(valid_cases), ncpu)) logger.handlers[1].setLevel(logging.WARNING) # start processes jobs = [] for idx, file in enumerate(valid_cases): args['pid'] = idx job = Process( name='Process {0:d}'.format(idx), target=run, args=(file, ), kwargs=args) jobs.append(job) job.start() start_msg = 'Process {:d} <{:s}> started.'.format(idx, file) print(start_msg) logger.debug(start_msg) if (idx % ncpu == ncpu - 1) or (idx == len(valid_cases) - 1): sleep(0.1) for job in jobs: job.join() jobs = [] # restore command line output when all jobs are done logger.handlers[1].setLevel(logging.INFO) t0, s0 = elapsed(t0) if len(valid_cases) == 1: logger.info('-> Single process finished in {:s}.'.format(s0)) elif len(valid_cases) >= 2: logger.info('-> Multiple processes finished in {:s}.'.format(s0)) return
0.000925
def interpret_waveform(fileContent, RelativeChannelNo): """ Extracts the data for just 1 channel and computes the corresponding time array (in seconds) starting from 0. Important Note: RelativeChannelNo is NOT the channel number on the Saleae data logger it is the relative number of the channel that was saved. E.g. if you save channels 3, 7 and 10, the corresponding RelativeChannelNos would be 0, 1 and 2. Parameters ---------- fileContent : bytes bytes object containing the data from a .bin file exported from the saleae data logger. RelativeChannelNo : int The relative order/position of the channel number in the saved binary file. See Important Note above! Returns ------- time : ndarray A generated time array corresponding to the data list Data : list The data from the relative channel requested SampleTime : float The time between samples (in seconds) """ (ChannelData, LenOf1Channel, NumOfChannels, SampleTime) = read_data_from_bytes(fileContent) if RelativeChannelNo > NumOfChannels-1: raise ValueError("There are {} channels saved, you attempted to read relative channel number {}. Pick a relative channel number between {} and {}".format(NumOfChannels, RelativeChannelNo, 0, NumOfChannels-1)) data = ChannelData[RelativeChannelNo] del(ChannelData) time = _np.arange(0, SampleTime*LenOf1Channel, SampleTime) return (0,SampleTime*LenOf1Channel,SampleTime), data
0.007678
def to_sympy_matrix(value): """ Converts value to a `sympy.Matrix` object, if possible. Leaves the value as `sympy.Matrix` if it already was :param value: value to convert :return: :rtype: `sympy.Matrix` """ if isinstance(value, sympy.Matrix): return value try: return sympy.Matrix(value) except ValueError as original_exception: if isinstance(value, list) and len(value) and all([not isinstance(x, list) for x in value]): # Let's try to convert the contents into a list # (this is required for 0.7.4 if we pass in a list of strings) # See `test_creation_of_column_matrix_from_list_of_strings` test list_of_lists_value = [[x] for x in value] try: m = sympy.Matrix(list_of_lists_value) return m except Exception: raise original_exception else: raise original_exception
0.00206
def _assert_path_is_rw(self): """ Make sure, that `self.path` exists, is directory a readable/writeable. Raises: IOError: In case that any of the assumptions failed. ValueError: In case that `self.path` is not set. """ if not self.path: raise ValueError("`path` argument must be set!") if not os.path.exists(self.path): raise IOError("`%s` not found." % self.path) if not os.path.isdir(self.path): raise IOError("`%s` is not a directory!" % self.path) if not os.access(self.path, (os.R_OK or os.W_OK)): raise IOError( "Can't access `%s`, please check permissions." % self.path )
0.002681
def delete_many(self, **kwargs): """ Delete multiple objects from collection. First ES is queried, then the results are used to query the DB. This is done to make sure deleted objects are those filtered by ES in the 'index' method (so user deletes what he saw). """ db_objects = self.get_dbcollection_with_es(**kwargs) return self.Model._delete_many(db_objects, self.request)
0.00463
def patch_namespaced_service(self, name, namespace, body, **kwargs): # noqa: E501 """patch_namespaced_service # noqa: E501 partially update the specified Service # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_service(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Service (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param UNKNOWN_BASE_TYPE body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Service If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.patch_namespaced_service_with_http_info(name, namespace, body, **kwargs) # noqa: E501 else: (data) = self.patch_namespaced_service_with_http_info(name, namespace, body, **kwargs) # noqa: E501 return data
0.001308
def _reprJSON(self): """Returns a JSON serializable represenation of a ``Smi`` class instance. Use :func:`maspy.core.Sai._fromJSON()` to generate a new ``Smi`` instance from the return value. :returns: a JSON serializable python object """ return {'__Smi__': (self.id, self.specfile, self.attributes, self.params, self.scanListParams, self.scanList, self.precursorList, self.productList ) }
0.003711
def list(self, request, *args, **kwargs): """ To get a list of supported resources' actions, run **OPTIONS** against */api/<resource_url>/* as an authenticated user. It is possible to filter and order by resource-specific fields, but this filters will be applied only to resources that support such filtering. For example it is possible to sort resource by ?o=ram, but SugarCRM crms will ignore this ordering, because they do not support such option. Filter resources by type or category ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ There are two query argument to select resources by their type. - Specify explicitly list of resource types, for example: /api/<resource_endpoint>/?resource_type=DigitalOcean.Droplet&resource_type=OpenStack.Instance - Specify category, one of vms, apps, private_clouds or storages for example: /api/<resource_endpoint>/?category=vms Filtering by monitoring fields ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Resources may have SLA attached to it. Example rendering of SLA: .. code-block:: javascript "sla": { "value": 95.0 "agreed_value": 99.0, "period": "2016-03" } You may filter or order resources by SLA. Default period is current year and month. - Example query for filtering list of resources by actual SLA: /api/<resource_endpoint>/?actual_sla=90&period=2016-02 - Warning! If resource does not have SLA attached to it, it is not included in ordered response. Example query for ordering list of resources by actual SLA: /api/<resource_endpoint>/?o=actual_sla&period=2016-02 Service list is displaying current SLAs for each of the items. By default, SLA period is set to the current month. To change the period pass it as a query argument: - ?period=YYYY-MM - return a list with SLAs for a given month - ?period=YYYY - return a list with SLAs for a given year In all cases all currently running resources are returned, if SLA for the given period is not known or not present, it will be shown as **null** in the response. Resources may have monitoring items attached to it. Example rendering of monitoring items: .. code-block:: javascript "monitoring_items": { "application_state": 1 } You may filter or order resources by monitoring item. - Example query for filtering list of resources by installation state: /api/<resource_endpoint>/?monitoring__installation_state=1 - Warning! If resource does not have monitoring item attached to it, it is not included in ordered response. Example query for ordering list of resources by installation state: /api/<resource_endpoint>/?o=monitoring__installation_state Filtering by tags ^^^^^^^^^^^^^^^^^ Resource may have tags attached to it. Example of tags rendering: .. code-block:: javascript "tags": [ "license-os:centos7", "os-family:linux", "license-application:postgresql", "support:premium" ] Tags filtering: - ?tag=IaaS - filter by full tag name, using method OR. Can be list. - ?rtag=os-family:linux - filter by full tag name, using AND method. Can be list. - ?tag__license-os=centos7 - filter by tags with particular prefix. Tags ordering: - ?o=tag__license-os - order by tag with particular prefix. Instances without given tag will not be returned. """ return super(ResourceSummaryViewSet, self).list(request, *args, **kwargs)
0.003904
def resize(self, size, disk=None): """ Resize the droplet :param size: a size slug or a `Size` object representing the size to resize to :type size: string or `Size` :param bool disk: Set to `True` for a permanent resize, including disk changes :return: an `Action` representing the in-progress operation on the droplet :rtype: Action :raises DOAPIError: if the API endpoint replies with an error """ if isinstance(size, Size): size = size.slug opts = {"disk": disk} if disk is not None else {} return self.act(type='resize', size=size, **opts)
0.002903
def find_resources(self, rsrc_type, sort=None, yield_pages=False, **kwargs): """Find instances of `rsrc_type` that match the filter in `**kwargs`""" return rsrc_type.find(self, sort=sort, yield_pages=yield_pages, **kwargs)
0.012605
def make_request( ins, method, url, stripe_account=None, params=None, headers=None, **kwargs ): """ Return a deferred or handle error. For overriding in various classes. """ if txstripe.api_key is None: raise error.AuthenticationError( 'No API key provided. (HINT: set your API key using ' '"stripe.api_key = <API-KEY>"). You can generate API keys ' 'from the Stripe web interface. See https://stripe.com/api ' 'for details, or email [email protected] if you have any ' 'questions.') abs_url = '{}{}'.format(txstripe.api_base, url) ua = { 'lang': 'python', 'publisher': 'lextoumbourou', 'httplib': 'Twisted', } headers = headers or {} headers.update({ 'X-Stripe-Client-User-Agent': util.json.dumps(ua), 'User-Agent': 'txstripe', 'Authorization': 'Bearer %s' % (txstripe.api_key,) }) if stripe_account: headers['Stripe-Account'] = stripe_account if txstripe.api_version is not None: headers['Stripe-Version'] = txstripe.api_version if method == 'get' or method == 'delete': data = None elif method == 'post': data = {k: v for (k, v) in _api_encode(params)} params = None else: raise error.APIConnectionError( 'Unrecognized HTTP method %r. This may indicate a bug in the ' 'Stripe bindings.' % (method,)) resp = yield treq.request( method, abs_url, params=params, data=data, headers=headers, **kwargs) if resp.code >= 400: yield util.handle_api_error(resp) return body = yield resp.json() defer.returnValue( convert_to_stripe_object( body, txstripe.api_key, stripe_account))
0.000555
def count_tokens(tokens, to_lower=False, counter=None): r"""Counts tokens in the specified string. For token_delim='(td)' and seq_delim='(sd)', a specified string of two sequences of tokens may look like:: (td)token1(td)token2(td)token3(td)(sd)(td)token4(td)token5(td)(sd) Parameters ---------- tokens : list of str A source list of tokens. to_lower : bool, default False Whether to convert the source source_str to the lower case. counter : Counter or None, default None The Counter instance to be updated with the counts of `tokens`. If None, return a new Counter instance counting tokens from `tokens`. Returns ------- The `counter` Counter instance after being updated with the token counts of `source_str`. If `counter` is None, return a new Counter instance counting tokens from `source_str`. Examples -------- >>> import re >>> source_str = ' Life is great ! \n life is good . \n' >>> source_str_tokens = filter(None, re.split(' |\n', source_str)) >>> gluonnlp.data.count_tokens(source_str_tokens) Counter({'is': 2, 'Life': 1, 'great': 1, '!': 1, 'life': 1, 'good': 1, '.': 1}) """ if to_lower: tokens = [t.lower() for t in tokens] if counter is None: return Counter(tokens) else: counter.update(tokens) return counter
0.002141
def build_ebound_table(self): """ Build and return an EBOUNDS table with the encapsulated data. """ cols = [ Column(name="E_MIN", dtype=float, data=self._emin, unit='MeV'), Column(name="E_MAX", dtype=float, data=self._emax, unit='MeV'), Column(name="E_REF", dtype=float, data=self._eref, unit='MeV'), Column(name="REF_DNDE", dtype=float, data=self._ref_dnde, unit='ph / (MeV cm2 s)'), Column(name="REF_FLUX", dtype=float, data=self._ref_flux, unit='ph / (cm2 s)'), Column(name="REF_EFLUX", dtype=float, data=self._ref_eflux, unit='MeV / (cm2 s)'), Column(name="REF_NPRED", dtype=float, data=self._ref_npred, unit='ph') ] tab = Table(data=cols) return tab
0.00232
def columnForCell(self, cell): """ Returns the index of the column that a cell belongs to. :param cell: (int) Cell index :returns: (int) Column index """ self._validateCell(cell) return int(cell / self.cellsPerColumn)
0.004032
def multi_replace(instr, search_list=[], repl_list=None): """ Does a string replace with a list of search and replacements TODO: rename """ repl_list = [''] * len(search_list) if repl_list is None else repl_list for ser, repl in zip(search_list, repl_list): instr = instr.replace(ser, repl) return instr
0.002941
def Read(self, expected_ids, read_data=True): """Read ADB messages and return FileSync packets.""" if self.send_idx: self._Flush() # Read one filesync packet off the recv buffer. header_data = self._ReadBuffered(self.recv_header_len) header = struct.unpack(self.recv_header_format, header_data) # Header is (ID, ...). command_id = self.wire_to_id[header[0]] if command_id not in expected_ids: if command_id == b'FAIL': reason = '' if self.recv_buffer: reason = self.recv_buffer.decode('utf-8', errors='ignore') raise usb_exceptions.AdbCommandFailureException('Command failed: {}'.format(reason)) raise adb_protocol.InvalidResponseError( 'Expected one of %s, got %s' % (expected_ids, command_id)) if not read_data: return command_id, header[1:] # Header is (ID, ..., size). size = header[-1] data = self._ReadBuffered(size) return command_id, header[1:-1], data
0.002727
def find(self, nameFilter=None, typeFilter=None, enabledFilter=None, serviceId=None): """ Gets the list of Historian connectors, they are used to configure the Watson IoT Platform to store IoT data in compatible services. Parameters: - nameFilter(string) - Filter the results by the specified name - typeFilter(string) - Filter the results by the specified type, Available values : cloudant, eventstreams - enabledFilter(boolean) - Filter the results by the enabled flag - serviceId(string) - Filter the results by the service id - limit(number) - Max number of results returned, defaults 25 - bookmark(string) - used for paging through results Throws APIException on failure. """ queryParms = {} if nameFilter: queryParms["name"] = nameFilter if typeFilter: queryParms["type"] = typeFilter if enabledFilter: queryParms["enabled"] = enabledFilter if serviceId: queryParms["serviceId"] = serviceId return IterableConnectorList(self._apiClient, filters=queryParms)
0.008914
def user_exists(self, user, note=None, loglevel=logging.DEBUG): """Returns true if the specified username exists. @param user: username to check for @param note: See send() @type user: string @rtype: boolean """ shutit = self.shutit shutit.handle_note(note) exists = False if user == '': return exists # v the space is intentional, to avoid polluting bash history. # The quotes before XIST are deliberate, to prevent the command from matching the expect. ret = self.send(ShutItSendSpec(self, send=' command id %s && echo E""XIST || echo N""XIST' % user, expect=['NXIST', 'EXIST'], echo=False, loglevel=loglevel, ignore_background=True)) if ret: exists = True # sync with the prompt self.expect(self.default_expect) shutit.handle_note_after(note=note) return exists
0.040797
def user_method(user_event): """Decorator of the Pdb user_* methods that controls the RemoteSocket.""" def wrapper(self, *args): stdin = self.stdin is_sock = isinstance(stdin, RemoteSocket) try: try: if is_sock and not stdin.connect(): return return user_event(self, *args) except Exception: self.close() raise finally: if is_sock and stdin.closed(): self.do_detach(None) return wrapper
0.001767
def _refresh_state(self): """ Refresh the job info. """ # DataFlow's DataflowPipelineResult does not refresh state, so we have to do it ourselves # as a workaround. # TODO(Change this to use runner_results.state once it refreshes itself) dataflow_internal_job = ( self._runner_results._runner.dataflow_client.get_job(self._runner_results.job_id())) self._is_complete = str(dataflow_internal_job.currentState) in ['JOB_STATE_STOPPED', 'JOB_STATE_DONE', 'JOB_STATE_FAILED', 'JOB_STATE_CANCELLED'] self._fatal_error = getattr(self._runner_results._runner, 'last_error_msg', None) # Sometimes Dataflow does not populate runner.last_error_msg even if the job fails. if self._fatal_error is None and self._runner_results.state == 'FAILED': self._fatal_error = 'FAILED'
0.009833
def load(self): """Load the library.""" if not git: raise EnvironmentError(MISSING_GIT_ERROR) if os.path.exists(self.path): if not config.CACHE_DISABLE: return shutil.rmtree(self.path, ignore_errors=True) with files.remove_on_exception(self.path): url = self.GIT_URL.format(**vars(self)) repo = git.Repo.clone_from( url=url, to_path=self.path, b=self.branch) if self.commit: repo.head.reset(self.commit, index=True, working_tree=True)
0.003401
def load_config(self): """ Used to dynamically load variables from the Flask application config into the blueprint. To tell this blueprint to pull configuration from the app, just set key-value pairs in the ``from_config`` dict. Keys are the name of the local variable to set on the blueprint object, and values are the variable name in the Flask application config. For example: blueprint.from_config["session.client_id"] = "GITHUB_OAUTH_CLIENT_ID" """ for local_var, config_var in self.from_config.items(): value = flask.current_app.config.get(config_var) if value: if "." in local_var: # this is a dotpath -- needs special handling body, tail = local_var.rsplit(".", 1) obj = getattrd(self, body) setattr(obj, tail, value) else: # just use a normal setattr call setattr(self, local_var, value)
0.002836
def markers(data, marker, f_tooltip=None, marker_preferred_size=32): """ Draw markers :param data: data access object :param marker: full filename of the marker image :param f_tooltip: function to generate a tooltip on mouseover :param marker_preferred_size: size in pixel for the marker images """ from geoplotlib.layers import MarkersLayer _global_config.layers.append(MarkersLayer(data, marker, f_tooltip, marker_preferred_size))
0.004264
def calc_refr_sp_wc_v1(self): """Calculate refreezing of the water content within the snow layer and update both the snow layers ice and the water content. Required control parameters: |NmbZones| |ZoneType| |CFMax| |CFR| Required derived parameter: |TTM| Required flux sequences: |TC| Calculated fluxes sequences: |Refr| Required state sequence: |WC| Updated state sequence: |SP| Basic equations: :math:`\\frac{dSP}{dt} = + Refr` \n :math:`\\frac{dWC}{dt} = - Refr` \n :math:`Refr = min(cfr \\cdot cfmax \\cdot (TTM-TC), WC)` Examples: Six zones are initialized with the same threshold temperature, degree day factor and refreezing coefficient, but with different zone types and initial states: >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> simulationstep('12h') >>> nmbzones(6) >>> zonetype(ILAKE, GLACIER, FIELD, FOREST, FIELD, FIELD) >>> cfmax(4.0) >>> cfr(0.1) >>> derived.ttm = 2.0 >>> states.sp = 2.0 >>> states.wc = 0.0, 1.0, 1.0, 1.0, 0.5, 0.0 Note that the assumed length of the simulation step is only a half day. Hence the effective value of the degree day factor is not 4 but 2: >>> cfmax cfmax(4.0) >>> cfmax.values array([ 2., 2., 2., 2., 2., 2.]) When the actual temperature is equal to the threshold temperature for melting and refreezing, neither no refreezing occurs and the states remain unchanged: >>> fluxes.tc = 2.0 >>> model.calc_refr_sp_wc_v1() >>> fluxes.refr refr(0.0, 0.0, 0.0, 0.0, 0.0, 0.0) >>> states.sp sp(0.0, 2.0, 2.0, 2.0, 2.0, 2.0) >>> states.wc wc(0.0, 1.0, 1.0, 1.0, 0.5, 0.0) The same holds true for an actual temperature higher than the threshold temperature: >>> states.sp = 2.0 >>> states.wc = 0.0, 1.0, 1.0, 1.0, 0.5, 0.0 >>> fluxes.tc = 2.0 >>> model.calc_refr_sp_wc_v1() >>> fluxes.refr refr(0.0, 0.0, 0.0, 0.0, 0.0, 0.0) >>> states.sp sp(0.0, 2.0, 2.0, 2.0, 2.0, 2.0) >>> states.wc wc(0.0, 1.0, 1.0, 1.0, 0.5, 0.0) With an actual temperature 3°C above the threshold temperature, only melting can occur. Actual melting is consistent with potential melting, except for the first zone, which is an internal lake, and the last two zones, for which potential melting exceeds the available frozen water content of the snow layer: >>> states.sp = 2.0 >>> states.wc = 0.0, 1.0, 1.0, 1.0, 0.5, 0.0 >>> fluxes.tc = 5.0 >>> model.calc_refr_sp_wc_v1() >>> fluxes.refr refr(0.0, 0.0, 0.0, 0.0, 0.0, 0.0) >>> states.sp sp(0.0, 2.0, 2.0, 2.0, 2.0, 2.0) >>> states.wc wc(0.0, 1.0, 1.0, 1.0, 0.5, 0.0) With an actual temperature 3°C below the threshold temperature, refreezing can occur. Actual refreezing is consistent with potential refreezing, except for the first zone, which is an internal lake, and the last two zones, for which potential refreezing exceeds the available liquid water content of the snow layer: >>> states.sp = 2.0 >>> states.wc = 0.0, 1.0, 1.0, 1.0, 0.5, 0.0 >>> fluxes.tc = -1.0 >>> model.calc_refr_sp_wc_v1() >>> fluxes.refr refr(0.0, 0.6, 0.6, 0.6, 0.5, 0.0) >>> states.sp sp(0.0, 2.6, 2.6, 2.6, 2.5, 2.0) >>> states.wc wc(0.0, 0.4, 0.4, 0.4, 0.0, 0.0) """ con = self.parameters.control.fastaccess der = self.parameters.derived.fastaccess flu = self.sequences.fluxes.fastaccess sta = self.sequences.states.fastaccess for k in range(con.nmbzones): if con.zonetype[k] != ILAKE: if flu.tc[k] < der.ttm[k]: flu.refr[k] = min(con.cfr[k]*con.cfmax[k] * (der.ttm[k]-flu.tc[k]), sta.wc[k]) sta.sp[k] += flu.refr[k] sta.wc[k] -= flu.refr[k] else: flu.refr[k] = 0. else: flu.refr[k] = 0. sta.wc[k] = 0. sta.sp[k] = 0.
0.000226
def fetch(self, ui_version=values.unset): """ Fetch a ConfigurationInstance :param unicode ui_version: Pinned UI version :returns: Fetched ConfigurationInstance :rtype: twilio.rest.flex_api.v1.configuration.ConfigurationInstance """ return self._proxy.fetch(ui_version=ui_version, )
0.005882
def find_coverage(self, zoom): """ Returns the bounding box (minx, miny, maxx, maxy) of an adjacent group of tiles at this zoom level. """ # Find a group of adjacent available tiles at this zoom level rows = self._query('''SELECT tile_column, tile_row FROM tiles WHERE zoom_level=? ORDER BY tile_column, tile_row;''', (zoom,)) t = rows.fetchone() xmin, ymin = t previous = t while t and t[0] - previous[0] <= 1: # adjacent, go on previous = t t = rows.fetchone() xmax, ymax = previous # Transform (xmin, ymin) (xmax, ymax) to pixels S = self.tilesize bottomleft = (xmin * S, (ymax + 1) * S) topright = ((xmax + 1) * S, ymin * S) # Convert center to (lon, lat) proj = GoogleProjection(S, [zoom]) # WGS84 return proj.unproject_pixels(bottomleft, zoom) + proj.unproject_pixels(topright, zoom)
0.002913
def krylovMethod(self,tol=1e-8): """ We obtain ``pi`` by using the :func:``gmres`` solver for the system of linear equations. It searches in Krylov subspace for a vector with minimal residual. The result is stored in the class attribute ``pi``. Example ------- >>> P = np.array([[0.5,0.5],[0.6,0.4]]) >>> mc = markovChain(P) >>> mc.krylovMethod() >>> print(mc.pi) [ 0.54545455 0.45454545] Parameters ---------- tol : float, optional(default=1e-8) Tolerance level for the precision of the end result. A lower tolerance leads to more accurate estimate of ``pi``. Remarks ------- For large state spaces, this method may not always give a solution. Code due to http://stackoverflow.com/questions/21308848/ """ P = self.getIrreducibleTransitionMatrix() #if P consists of one element, then set self.pi = 1.0 if P.shape == (1, 1): self.pi = np.array([1.0]) return size = P.shape[0] dP = P - eye(size) #Replace the first equation by the normalizing condition. A = vstack([np.ones(size), dP.T[1:,:]]).tocsr() rhs = np.zeros((size,)) rhs[0] = 1 pi, info = gmres(A, rhs, tol=tol) if info != 0: raise RuntimeError("gmres did not converge") self.pi = pi
0.018182
def extract(zf, content, out_dir='.'): ''' Extracting a Java 1.6 XNAT ZIP archive in Python. :param zf: ZipFile object :type zf: zipfile.ZipFile :param out_dir: Output directory :type out_dir: str ''' previous_header_offset = 0 compensation = Namespace(value=2**32, factor=0) for i,member in enumerate(zf.infolist()): ''' Right... so when Java 1.6 produces a Zip filesystem that exceeds 2^32 bytes, the Central Directory local file header offsets after the 2^32 byte appear to overflow. The Python zipfile module then adds any unexpected bytes to each header offset thereafter. This attempts to fix that. My guess is that this comment might make perfect sense now, but will make aboslutely no sense in about a year. ''' # undo concat padding added from zipfile.py:819 if i == 0: concat = member.header_offset member.header_offset -= concat # if a header offset moves backward, add 2^32 bytes * factor if previous_header_offset > member.header_offset: compensation.factor += 1 previous_header_offset = member.header_offset member.header_offset += compensation.value * compensation.factor # read the archive member into a bytes file-like object try: bio = io.BytesIO(zf.read(member.filename)) except zipfile.BadZipfile: with tf.NamedTemporaryFile(dir=out_dir, prefix="xnat", suffix=".zip", delete=False) as fo: content.seek(0) fo.write(content.read()) raise DownloadError("bad zip file, written to %s" % fo.name) # xnat archives may contain files that are gzipped without the .gz if not member.filename.endswith(".gz"): try: gz = gzip.GzipFile(fileobj=bio, mode="rb") gz.read() bio = gz except IOError: pass # write the file out to the filesystem bio.seek(0) f = os.path.join(out_dir, os.path.basename(member.filename)) with open(f, "wb") as fo: fo.write(bio.read())
0.003142
def proximal(self): """Return the `proximal factory` of the functional. The proximal operator separates over separable sums. Returns ------- proximal : combine_proximals """ proximals = [func.proximal for func in self.functionals] return combine_proximals(*proximals)
0.006006
def handle_keypress(self, k): """Last resort for keypresses.""" if k == "esc": self.save_file() raise urwid.ExitMainLoop() elif k == "delete": # delete at end of line self.walker.combine_focus_with_next() elif k == "backspace": # backspace at beginning of line self.walker.combine_focus_with_prev() elif k == "enter": # start new line self.walker.split_focus() # move the cursor to the new line and reset pref_col self.view.keypress(size, "down") self.view.keypress(size, "home")
0.003072
def log_create(self, instance, **kwargs): """ Helper method to create a new log entry. This method automatically populates some fields when no explicit value is given. :param instance: The model instance to log a change for. :type instance: Model :param kwargs: Field overrides for the :py:class:`LogEntry` object. :return: The new log entry or `None` if there were no changes. :rtype: LogEntry """ changes = kwargs.get('changes', None) pk = self._get_pk_value(instance) if changes is not None: kwargs.setdefault('content_type', ContentType.objects.get_for_model(instance)) kwargs.setdefault('object_pk', pk) kwargs.setdefault('object_repr', smart_text(instance)) if isinstance(pk, integer_types): kwargs.setdefault('object_id', pk) get_additional_data = getattr(instance, 'get_additional_data', None) if callable(get_additional_data): kwargs.setdefault('additional_data', get_additional_data()) # Delete log entries with the same pk as a newly created model. This should only be necessary when an pk is # used twice. if kwargs.get('action', None) is LogEntry.Action.CREATE: if kwargs.get('object_id', None) is not None and self.filter(content_type=kwargs.get('content_type'), object_id=kwargs.get('object_id')).exists(): self.filter(content_type=kwargs.get('content_type'), object_id=kwargs.get('object_id')).delete() else: self.filter(content_type=kwargs.get('content_type'), object_pk=kwargs.get('object_pk', '')).delete() # save LogEntry to same database instance is using db = instance._state.db return self.create(**kwargs) if db is None or db == '' else self.using(db).create(**kwargs) return None
0.005099
def run(self, train_set, valid_set=None, test_set=None, train_size=None, epoch_controllers=None): """ Run until the end. :param epoch_controllers: deprecated """ epoch_controllers = epoch_controllers if epoch_controllers else [] epoch_controllers += self._epoch_controllers if isinstance(train_set, Dataset): dataset = train_set train_set = dataset.train_set() valid_set = dataset.valid_set() test_set = dataset.test_set() train_size = dataset.train_size() self._current_train_set = train_set self._current_valid_set = valid_set self._current_test_set = test_set if epoch_controllers: for controller in epoch_controllers: controller.bind(self) timer = Timer() for _ in self.train(train_set, valid_set=valid_set, test_set=test_set, train_size=train_size): if epoch_controllers: for controller in epoch_controllers: controller.invoke() if self._ended: break if self._report_time: timer.report()
0.00339
def cli(self, prt=sys.stdout): """Command-line interface to print specified GO Terms from the DAG source .""" kws = self.objdoc.get_docargs(prt=None) print("KWS", kws) goids = GetGOs().get_goids(kws.get('GO'), kws.get('GO_FILE'), sys.stdout) if not goids and 'name' in kws: goids = self.objsub.get_goids(kws['obo'], kws['name']) self.objsub.prt_goterms(kws['obo'], goids, prt, b_prt=False) print("Printing {N:6} GO IDs: {GOs}".format(N=len(goids), GOs=goids))
0.00759
def create_tags(user): """Create a tag.""" values = { 'id': utils.gen_uuid(), 'created_at': datetime.datetime.utcnow().isoformat() } values.update(schemas.tag.post(flask.request.json)) with flask.g.db_conn.begin(): where_clause = sql.and_( _TABLE.c.name == values['name']) query = sql.select([_TABLE.c.id]).where(where_clause) if flask.g.db_conn.execute(query).fetchone(): raise dci_exc.DCIConflict('Tag already exists', values) # create the label/value row query = _TABLE.insert().values(**values) flask.g.db_conn.execute(query) result = json.dumps({'tag': values}) return flask.Response(result, 201, content_type='application/json')
0.001264