text
stringlengths
78
104k
score
float64
0
0.18
def on(self, left_speed, right_speed): """ Start rotating the motors according to ``left_speed`` and ``right_speed`` forever. Speeds can be percentages or any SpeedValue implementation. """ (left_speed_native_units, right_speed_native_units) = self._unpack_speeds_to_native_units(left_speed, right_speed) # Set all parameters self.left_motor.speed_sp = int(round(left_speed_native_units)) self.right_motor.speed_sp = int(round(right_speed_native_units)) # This debug involves disk I/O to pull speed_sp so only uncomment # if you need to troubleshoot in more detail. # log.debug("%s: on at left-speed %s, right-speed %s" % # (self, self.left_motor.speed_sp, self.right_motor.speed_sp)) # Start the motors self.left_motor.run_forever() self.right_motor.run_forever()
0.004489
def strftime(dt, fmt): ''' `strftime` implementation working before 1900 ''' if _illegal_s.search(fmt): raise TypeError("This strftime implementation does not handle %s") if dt.year > 1900: return dt.strftime(fmt) fmt = fmt.replace('%c', '%a %b %d %H:%M:%S %Y')\ .replace('%Y', str(dt.year))\ .replace('%y', '{:04}'.format(dt.year)[-2:]) year = dt.year # For every non-leap year century, advance by # 6 years to get into the 28-year repeat cycle delta = 2000 - year off = 6*(delta // 100 + delta // 400) year = year + off # Move to around the year 2000 year = year + ((2000 - year)//28)*28 timetuple = dt.timetuple() return time.strftime(fmt, (year,) + timetuple[1:])
0.001289
def subsampleCorrelatedData(A_t, g=None, fast=False, conservative=False, verbose=False): """Determine the indices of an uncorrelated subsample of the data. Parameters ---------- A_t : np.ndarray A_t[t] is the t-th value of timeseries A(t). Length is deduced from vector. g : float, optional if provided, the statistical inefficiency g is used to subsample the timeseries -- otherwise it will be computed (default: None) fast : bool, optional, default=False fast can be set to True to give a less accurate but very quick estimate (default: False) conservative : bool, optional, default=False if set to True, uniformly-spaced indices are chosen with interval ceil(g), where g is the statistical inefficiency. Otherwise, indices are chosen non-uniformly with interval of approximately g in order to end up with approximately T/g total indices verbose : bool, optional, default=False if True, some output is printed Returns ------- indices : list of int the indices of an uncorrelated subsample of the data Notes ----- The statistical inefficiency is computed with the function computeStatisticalInefficiency(). ToDo ---- Instead of using regular stride, use irregular stride so more data can be fit in when g is non-integral. Examples -------- Subsample a correlated timeseries to extract an effectively uncorrelated dataset. >>> from pymbar import testsystems >>> A_t = testsystems.correlated_timeseries_example(N=10000, tau=5.0) # generate a test correlated timeseries >>> indices = subsampleCorrelatedData(A_t) # compute indices of uncorrelated timeseries >>> A_n = A_t[indices] # extract uncorrelated samples Extract uncorrelated samples from multiple timeseries data from the same process. >>> # Generate multiple correlated timeseries data of different lengths. >>> T_k = [1000, 2000, 3000, 4000, 5000] >>> K = len(T_k) # number of timeseries >>> tau = 5.0 # exponential relaxation time >>> A_kt = [ testsystems.correlated_timeseries_example(N=T, tau=tau) for T in T_k ] # A_kt[k] is correlated timeseries k >>> # Estimate statistical inefficiency from all timeseries data. >>> g = statisticalInefficiencyMultiple(A_kt) >>> # Count number of uncorrelated samples in each timeseries. >>> N_k = np.array([ len(subsampleCorrelatedData(A_t, g=g)) for A_t in A_kt ]) # N_k[k] is the number of uncorrelated samples in timeseries k >>> N = N_k.sum() # total number of uncorrelated samples >>> # Subsample all trajectories to produce uncorrelated samples >>> A_kn = [ A_t[subsampleCorrelatedData(A_t, g=g)] for A_t in A_kt ] # A_kn[k] is uncorrelated subset of trajectory A_kt[t] >>> # Concatenate data into one timeseries. >>> A_n = np.zeros([N], np.float32) # A_n[n] is nth sample in concatenated set of uncorrelated samples >>> A_n[0:N_k[0]] = A_kn[0] >>> for k in range(1,K): A_n[N_k[0:k].sum():N_k[0:k+1].sum()] = A_kn[k] """ # Create np copy of arrays. A_t = np.array(A_t) # Get the length of the timeseries. T = A_t.size # Compute the statistical inefficiency for the timeseries. if not g: if verbose: print("Computing statistical inefficiency...") g = statisticalInefficiency(A_t, A_t, fast=fast) if verbose: print("g = %f" % g) if conservative: # Round g up to determine the stride we can use to pick out regularly-spaced uncorrelated samples. stride = int(math.ceil(g)) if verbose: print("conservative subsampling: using stride of %d" % stride) # Assemble list of indices of uncorrelated snapshots. indices = range(0, T, stride) else: # Choose indices as floor(n*g), with n = 0,1,2,..., until we run out of data. indices = [] n = 0 while int(round(n * g)) < T: t = int(round(n * g)) # ensure we don't sample the same point twice if (n == 0) or (t != indices[n - 1]): indices.append(t) n += 1 if verbose: print("standard subsampling: using average stride of %f" % g) # Number of samples in subsampled timeseries. N = len(indices) if verbose: print("The resulting subsampled set has %d samples (original timeseries had %d)." % (N, T)) # Return the list of indices of uncorrelated snapshots. return indices
0.004409
def generate_version_py(packagename=None, version=None, release=None, debug=None, uses_git=None, srcdir='.'): """ Generate a version.py file in the package with version information, and update developer version strings. This function should normally be called without any arguments. In this case the package name and version is read in from the ``setup.cfg`` file (from the ``name`` or ``package_name`` entry and the ``version`` entry in the ``[metadata]`` section). If the version is a developer version (of the form ``3.2.dev``), the version string will automatically be expanded to include a sequential number as a suffix (e.g. ``3.2.dev13312``), and the updated version string will be returned by this function. Based on this updated version string, a ``version.py`` file will be generated inside the package, containing the version string as well as more detailed information (for example the major, minor, and bugfix version numbers, a ``release`` flag indicating whether the current version is a stable or developer version, and so on. """ if packagename is not None: warnings.warn('The packagename argument to generate_version_py has ' 'been deprecated and will be removed in future. Specify ' 'the package name in setup.cfg instead', AstropyDeprecationWarning) if version is not None: warnings.warn('The version argument to generate_version_py has ' 'been deprecated and will be removed in future. Specify ' 'the version number in setup.cfg instead', AstropyDeprecationWarning) if release is not None: warnings.warn('The release argument to generate_version_py has ' 'been deprecated and will be removed in future. We now ' 'use the presence of the "dev" string in the version to ' 'determine whether this is a release', AstropyDeprecationWarning) # We use ConfigParser instead of read_configuration here because the latter # only reads in keys recognized by setuptools, but we need to access # package_name below. conf = ConfigParser() conf.read('setup.cfg') if conf.has_option('metadata', 'name'): packagename = conf.get('metadata', 'name') elif conf.has_option('metadata', 'package_name'): # The package-template used package_name instead of name for a while warnings.warn('Specifying the package name using the "package_name" ' 'option in setup.cfg is deprecated - use the "name" ' 'option instead.', AstropyDeprecationWarning) packagename = conf.get('metadata', 'package_name') elif packagename is not None: # deprecated pass else: sys.stderr.write('ERROR: Could not read package name from setup.cfg\n') sys.exit(1) if conf.has_option('metadata', 'version'): version = conf.get('metadata', 'version') add_git_devstr = True elif version is not None: # deprecated add_git_devstr = False else: sys.stderr.write('ERROR: Could not read package version from setup.cfg\n') sys.exit(1) if release is None: release = 'dev' not in version if not release and add_git_devstr: version += get_git_devstr(False) if uses_git is None: uses_git = not release # In some cases, packages have a - but this is a _ in the module. Since we # are only interested in the module here, we replace - by _ packagename = packagename.replace('-', '_') try: version_module = get_pkg_version_module(packagename) try: last_generated_version = version_module._last_generated_version except AttributeError: last_generated_version = version_module.version try: last_githash = version_module._last_githash except AttributeError: last_githash = version_module.githash current_release = version_module.release current_debug = version_module.debug except ImportError: version_module = None last_generated_version = None last_githash = None current_release = None current_debug = None if release is None: # Keep whatever the current value is, if it exists release = bool(current_release) if debug is None: # Likewise, keep whatever the current value is, if it exists debug = bool(current_debug) package_srcdir = os.path.join(srcdir, *packagename.split('.')) version_py = os.path.join(package_srcdir, 'version.py') if (last_generated_version != version or current_release != release or current_debug != debug): if '-q' not in sys.argv and '--quiet' not in sys.argv: log.set_threshold(log.INFO) if is_distutils_display_option(): # Always silence unnecessary log messages when display options are # being used log.set_threshold(log.WARN) log.info('Freezing version number to {0}'.format(version_py)) with open(version_py, 'w') as f: # This overwrites the actual version.py f.write(_get_version_py_str(packagename, version, last_githash, release, debug, uses_git=uses_git)) return version
0.001098
def init(self, input_dim=0, input_dims=None, no_prepare=False): """ Initialize the layer. :param no_prepare: avoid calling preparation function """ if self.initialized: return # configure input dimensions if input_dims: self.input_dims = input_dims self.input_dim = input_dims[0] else: self.input_dim = input_dim self.input_dims = [input_dims] # set default output dimension if self.output_dim == 0: self.output_dim = self.input_dim self.initialized = True # call prepare if not no_prepare: self.prepare() return self
0.002805
def get_keystone_endpoint(self, keystone_ip, api_version=None, admin_port=False): """Return keystone endpoint""" port = 5000 if admin_port: port = 35357 base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'), port) if api_version == 2: ep = base_ep + "/v2.0" else: ep = base_ep + "/v3" return ep
0.006383
def upload_file(self, owner, id, name, **kwargs): """ Upload file Upload one file at a time to a dataset. This endpoint expects requests of type `application/octet-stream`. For example, assuming that you want to upload a local file named `file1.csv` to a hypothetical dataset `https://data.world/awesome-user/awesome-dataset` and choose its name on data.world to be `better-name.csv`, this is what the cURL command would look like. ```bash curl \\ -H \"Authorization: Bearer <YOUR_API_TOKEN>\" \\ -X PUT -H \"Content-Type: application/octet-stream\" \\ --data-binary @file1.csv \\ https://api.data.world/v0/uploads/awesome-user/awesome-dataset/files/better-name.csv ``` This method of upload is typically not supported by Swagger clients. Other HTTP clients can be used to supply the contents of the file directly in the body of the request. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.upload_file(owner, id, name, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str owner: User name and unique identifier of the creator of a dataset or project. For example, in the URL: [https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), jonloyens is the unique identifier of the owner. (required) :param str id: Dataset unique identifier. For example, in the URL:[https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), an-intro-to-dataworld-dataset is the unique identifier of the dataset. (required) :param str name: File name and unique identifier within dataset. (required) :param bool expand_archive: Indicates whether a compressed file should be expanded upon upload. :return: SuccessMessage If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.upload_file_with_http_info(owner, id, name, **kwargs) else: (data) = self.upload_file_with_http_info(owner, id, name, **kwargs) return data
0.003138
def get_headerReference(self, type_): """Return headerReference element of *type_* or None if not present.""" matching_headerReferences = self.xpath( "./w:headerReference[@w:type='%s']" % WD_HEADER_FOOTER.to_xml(type_) ) if len(matching_headerReferences) == 0: return None return matching_headerReferences[0]
0.008065
def get_plaintext_citations(bbl): """ Parse a ``*.bbl`` file to get a clean list of plaintext citations. :param bbl: Either the path to the .bbl file or the content of a ``.bbl`` \ file. :returns: A list of cleaned plaintext citations. """ # Handle path or content if os.path.isfile(bbl): with open(bbl, 'r') as fh: bbl_content = fh.read() else: bbl_content = bbl # Get a list of bibitems, taking the first item out as it is *before* the # first \bibitem bibitems = BIBITEMS_REGEX.split(bbl_content)[1:] # Delete the text after the \end{thebibliography} bibitems = [ENDTHEBIBLIOGRAPHY_REGEX.sub("", i).strip() for i in bibitems] # Clean every bibitem to have plaintext cleaned_bbl = [bibitem_as_plaintext(bibitem) for bibitem in bibitems] return cleaned_bbl
0.001159
def validate_wrap(self, value): ''' Checks that ``value`` is an instance of ``DocumentField.type``. if it is, then validation on its fields has already been done and no further validation is needed. ''' if not isinstance(value, self.type): self._fail_validation_type(value, self.type)
0.005814
def p_statement_switch(p): 'statement : SWITCH LPAREN expr RPAREN switch_case_list' p[0] = ast.Switch(p[3], p[5], lineno=p.lineno(1))
0.007092
def get_items(self): """Returns a list of SuperModel items """ uids = self.get_uids() if not uids: return [SuperModel(self.context)] items = map(lambda uid: SuperModel(uid), uids) return self._resolve_number_of_copies(items)
0.007042
def do(self, func, *args, **kwargs): """Apply the function to myself, and return myself. Look up the function in the database if needed. Pass it any arguments given, keyword or positional. Useful chiefly when chaining. """ if not callable(func): func = getattr(self.engine.function, func) func(self, *args, **kwargs) return self
0.004914
def register(linter): '''required method to auto register this checker ''' linter.register_checker(StringCurlyBracesFormatIndexChecker(linter)) linter.register_checker(StringLiteralChecker(linter))
0.004785
def load_hours(network, min_load=0.9, max_load=1, boundaries=[0, 8760]): """Plot number of hours with line loading in selected range. Parameters ---------- network: PyPSA network container Holds topology of grid including results from powerflow analysis min_load: float Choose lower bound of relative load max_load: float Choose upper bound of relative load boundaries: array Set boundaries of heatmap axis """ cmap_line = plt.cm.jet cmap_link = plt.cm.jet array_line = [['Line'] * len(network.lines), network.lines.index] load_lines = pd.Series(((abs(network.lines_t.p0[( abs(network.lines_t.p0.mul(network.snapshot_weightings, axis=0)) / network.lines.s_nom_opt >= min_load) & ( abs(network.lines_t.p0.mul(network.snapshot_weightings, axis=0)) / network.lines.s_nom_opt <= max_load)]) / abs(network.lines_t.p0[( abs(network.lines_t.p0) / network.lines.s_nom_opt >= min_load) & (abs(network.lines_t.p0) / network.lines.s_nom_opt <= max_load)])) .sum()).data, index=array_line) array_link = [['Link'] * len(network.links), network.links.index] load_links = pd.Series(((abs(network.links_t.p0[( abs(network.links_t.p0.mul(network.snapshot_weightings, axis=0)) / network.links.p_nom_opt >= min_load) & ( abs(network.links_t.p0.mul(network.snapshot_weightings, axis=0)) / network.links.p_nom_opt <= max_load)]) / abs(network.links_t.p0[( abs(network.links_t.p0) / network.links.p_nom_opt >= min_load) & (abs(network.links_t.p0) / network.links.p_nom_opt <= max_load)])) .sum()).data, index=array_link) load_hours = load_lines.append(load_links) ll = network.plot( line_colors=load_hours, line_cmap={ 'Line': cmap_line, 'Link': cmap_link}, bus_sizes=0, title="Number of hours with more then 90% load", line_widths=2) v1 = np.linspace(boundaries[0], boundaries[1], 101) v = np.linspace(boundaries[0], boundaries[1], 101) cb_Link = plt.colorbar(ll[2], boundaries=v1, ticks=v[0:101:10]) cb_Link.set_clim(vmin=boundaries[0], vmax=boundaries[1]) cb = plt.colorbar(ll[1], boundaries=v, ticks=v[0:101:10]) cb.set_clim(vmin=boundaries[0], vmax=boundaries[1]) cb.set_label('Number of hours')
0.004151
def Meissner(Tc=None, Pc=None, Vc=None): r'''Old (1942) relationship for estimating critical properties from each other. Two of the three properties are required. This model uses the "critical surface", a general plot of Tc vs Pc vs Vc. The model used 42 organic and inorganic compounds to derive the equation. The general equation is in [1]_: .. math:: P_c = \frac{2.08 T_c}{V_c-8} Parameters ---------- Tc : float, optional Critical temperature of fluid [K] Pc : float, optional Critical pressure of fluid [Pa] Vc : float, optional Critical volume of fluid [m^3/mol] Returns ------- Tc, Pc or Vc : float Critical property of fluid [K], [Pa], or [m^3/mol] Notes ----- The prediction of Tc from Pc and Vc is not tested, as this is not necessary anywhere, but it is implemented. Internal units are atm, cm^3/mol, and K. A slight error occurs when Pa, cm^3/mol and K are used instead, on the order of <0.2%. This equation is less accurate than that of Ihmels, but surprisingly close. The author also proposed means of estimated properties independently. Examples -------- Succinic acid [110-15-6] >>> Meissner(Tc=851.0, Vc=0.000308) 5978445.199999999 References ---------- .. [1] Meissner, H. P., and E. M. Redding. "Prediction of Critical Constants." Industrial & Engineering Chemistry 34, no. 5 (May 1, 1942): 521-26. doi:10.1021/ie50389a003. ''' if Tc and Vc: Vc = Vc*1E6 Pc = 20.8*Tc/(Vc-8) Pc = 101325*Pc # atm to Pa return Pc elif Tc and Pc: Pc = Pc/101325. # Pa to atm Vc = 104/5.0*Tc/Pc+8 Vc = Vc/1E6 # cm^3/mol to m^3/mol return Vc elif Pc and Vc: Pc = Pc/101325. # Pa to atm Vc = Vc*1E6 # m^3/mol to cm^3/mol Tc = 5./104.0*Pc*(Vc-8) return Tc else: raise Exception('Two of Tc, Pc, and Vc must be provided')
0.000492
def get_categories(self, app_name): """ Returns a list of the categories for an app name. """ cat_nums = self.apps.get(app_name, {}).get("cats", []) cat_names = [self.categories.get("%s" % cat_num, "") for cat_num in cat_nums] return cat_names
0.00639
def parse_tags(self, tags): """Parse tags into a dict. input tags: a comma separated list of 'key:value' pairs. Example: foo:bar,spam:eggs output dtags: a dict of tags. Example: {'foo': 'bar', 'spam': 'eggs'} """ dtags = {} if tags: try: dtags = dict([x.split(':') for x in tags.split(',')]) except ValueError: # one of the 'key:value' pairs was missing logger.info('Invalid tags passed: %s', tags) dtags = {} return dtags
0.003378
def smooth_normals(positions, normals): '''Assigns an averaged normal to each position based on all of the normals originally used for the position. ''' lookup = defaultdict(list) for position, normal in zip(positions, normals): lookup[position].append(normal) result = [] for position in positions: tx = ty = tz = 0 for x, y, z in lookup[position]: tx += x ty += y tz += z d = (tx * tx + ty * ty + tz * tz) ** 0.5 result.append((tx / d, ty / d, tz / d)) return result
0.001736
def get_file(self, fid): """Get file from WeedFS. Returns file content. May be problematic for large files as content is stored in memory. Args: **fid**: File identifier <volume_id>,<file_name_hash> Returns: Content of the file with provided fid or None if file doesn't exist on the server .. versionadded:: 0.3.1 """ url = self.get_file_url(fid) return self.conn.get_raw_data(url)
0.004057
def Debugger_setAsyncCallStackDepth(self, maxDepth): """ Function path: Debugger.setAsyncCallStackDepth Domain: Debugger Method name: setAsyncCallStackDepth Parameters: Required arguments: 'maxDepth' (type: integer) -> Maximum depth of async call stacks. Setting to <code>0</code> will effectively disable collecting async call stacks (default). No return value. Description: Enables or disables async call stacks tracking. """ assert isinstance(maxDepth, (int,) ), "Argument 'maxDepth' must be of type '['int']'. Received type: '%s'" % type( maxDepth) subdom_funcs = self.synchronous_command('Debugger.setAsyncCallStackDepth', maxDepth=maxDepth) return subdom_funcs
0.039945
def difference(self, reference, hypothesis, uem=None, uemified=False): """Get error analysis as `Annotation` Labels are (status, reference_label, hypothesis_label) tuples. `status` is either 'correct', 'confusion', 'missed detection' or 'false alarm'. `reference_label` is None in case of 'false alarm'. `hypothesis_label` is None in case of 'missed detection'. Parameters ---------- uemified : bool, optional Returns "uemified" version of reference and hypothesis. Defaults to False. Returns ------- errors : `Annotation` """ R, H, common_timeline = self.uemify( reference, hypothesis, uem=uem, collar=self.collar, skip_overlap=self.skip_overlap, returns_timeline=True) errors = Annotation(uri=reference.uri, modality=reference.modality) # loop on all segments for segment in common_timeline: # list of labels in reference segment rlabels = R.get_labels(segment, unique=False) # list of labels in hypothesis segment hlabels = H.get_labels(segment, unique=False) _, details = self.matcher(rlabels, hlabels) for r, h in details[MATCH_CORRECT]: track = errors.new_track(segment, prefix=MATCH_CORRECT) errors[segment, track] = (MATCH_CORRECT, r, h) for r, h in details[MATCH_CONFUSION]: track = errors.new_track(segment, prefix=MATCH_CONFUSION) errors[segment, track] = (MATCH_CONFUSION, r, h) for r in details[MATCH_MISSED_DETECTION]: track = errors.new_track(segment, prefix=MATCH_MISSED_DETECTION) errors[segment, track] = (MATCH_MISSED_DETECTION, r, None) for h in details[MATCH_FALSE_ALARM]: track = errors.new_track(segment, prefix=MATCH_FALSE_ALARM) errors[segment, track] = (MATCH_FALSE_ALARM, None, h) if uemified: return reference, hypothesis, errors else: return errors
0.000907
def parse_query_param(url, param): """Parses the query string of a URL and returns the value of a parameter. Args: url: A URL. param: A string representing the name of the parameter. Returns: The value of the parameter. """ try: return parse.parse_qs(parse.urlparse(url).query)[param][0] except: return None
0.005348
def parse_signed_request(self, signed_request): ''' parse signed request when using in-site app. Returns: dict object like { 'uid': 12345, 'access_token': 'ABC123XYZ', 'expires': unix-timestamp }, or None if parse failed. ''' def _b64_normalize(s): appendix = '=' * (4 - len(s) % 4) return s.replace('-', '+').replace('_', '/') + appendix sr = str(signed_request) logging.info('parse signed request: %s' % sr) enc_sig, enc_payload = sr.split('.', 1) sig = base64.b64decode(_b64_normalize(enc_sig)) data = _parse_json(base64.b64decode(_b64_normalize(enc_payload))) if data['algorithm'] != u'HMAC-SHA256': return None expected_sig = hmac.new(self.client_secret, enc_payload, hashlib.sha256).digest() if expected_sig == sig: data.user_id = data.uid = data.get('user_id', None) data.access_token = data.get('oauth_token', None) expires = data.get('expires', None) if expires: data.expires = data.expires_in = time.time() + expires return data return None
0.003333
def ticket_flag(self, which, new=None): """ Get or set a ticket flag. 'which' can be either a string ('APPEND_CR' etc.), or an integer. You should ALWAYS use a string, unless you really know what you are doing. """ flag = _get_flag(which, TicketFlags) if flag: if not self.capabilities.have_ticket_flag(flag): raise yubikey_base.YubiKeyVersionError('Ticket flag %s requires %s, and this is %s %d.%d' % (which, flag.req_string(self.capabilities.model), \ self.capabilities.model, self.ykver[0], self.ykver[1])) req_major, req_minor = flag.req_version() self._require_version(major=req_major, minor=req_minor) value = flag.to_integer() else: if type(which) is not int: raise yubico_exception.InputError('Unknown non-integer TicketFlag (%s)' % which) value = which return self.ticket_flags.get_set(value, new)
0.008212
def hs_indices(self): """tuple of (anchor_idx, addend_idxs) pair for each subtotal. Example:: ( (2, (0, 1, 2)), (3, (3,)), ('bottom', (4, 5)) ) Note that the `anchor_idx` item in the first position of each pair can be 'top' or 'bottom' as well as an int. The `addend_idxs` tuple will always contains at least one index (a subtotal with no addends is ignored). """ if self.dimension_type in {DT.MR_CAT, DT.LOGICAL}: return () return tuple( (subtotal.anchor_idx, subtotal.addend_idxs) for subtotal in self._subtotals )
0.00431
def describe(self, fields=None, io=None, **kwargs): """ :param fields: dict where the keys are field names that should be returned, and values should be set to True (by default, all fields are returned) :type fields: dict :param io: Include input and output fields in description; cannot be provided with *fields*; default is True if *fields* is not provided (deprecated) :type io: bool :returns: Description of the job :rtype: dict Returns a hash with key-value pairs containing information about the job, including its state and (optionally) its inputs and outputs, as described in the API documentation for the `/job-xxxx/describe <https://wiki.dnanexus.com/API-Specification-v1.0.0/Applets-and-Entry-Points#API-method:-/job-xxxx/describe>`_ method. """ if fields is not None and io is not None: raise DXError('DXJob.describe: cannot provide non-None values for both fields and io') describe_input = {} if fields is not None: describe_input['fields'] = fields if io is not None: describe_input['io'] = io self._desc = dxpy.api.job_describe(self._dxid, describe_input, **kwargs) return self._desc
0.002976
def default_indexes( coords: Mapping[Any, Variable], dims: Iterable, ) -> 'OrderedDict[Any, pd.Index]': """Default indexes for a Dataset/DataArray. Parameters ---------- coords : Mapping[Any, xarray.Variable] Coordinate variables from which to draw default indexes. dims : iterable Iterable of dimension names. Returns ------- Mapping from indexing keys (levels/dimension names) to indexes used for indexing along that dimension. """ return OrderedDict((key, coords[key].to_index()) for key in dims if key in coords)
0.001645
def RIBVRFRouteLimitExceeded_VRFName(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") RIBVRFRouteLimitExceeded = ET.SubElement(config, "RIBVRFRouteLimitExceeded", xmlns="http://brocade.com/ns/brocade-notification-stream") VRFName = ET.SubElement(RIBVRFRouteLimitExceeded, "VRFName") VRFName.text = kwargs.pop('VRFName') callback = kwargs.pop('callback', self._callback) return callback(config)
0.006211
def code_block(self, code, language, indent=0): """Example:: .. code-block:: python from __future__ import print_function import math print(math.sqrt(10.0)) """ if language not in [None, "console", "python", "ruby", "c"]: raise prefix = "\t" * indent code_prefix = "\t" * (indent + 1) lines = list() lines.append(prefix + ".. code-block:: %s" % language) lines.append(prefix) for line in code.split("\n"): lines.append(code_prefix + line.strip()) return "\n".join(lines)
0.00314
def get_certificate_der(self, filename): """ Return the DER coded X.509 certificate from the signature file. :param filename: Signature filename in APK :returns: DER coded X.509 certificate as binary """ pkcs7message = self.get_file(filename) pkcs7obj = cms.ContentInfo.load(pkcs7message) cert = pkcs7obj['content']['certificates'][0].chosen.dump() return cert
0.004608
def _write_ini(source_dict, namespace_name=None, level=0, indent_size=4, output_stream=sys.stdout): """this function prints the components of a configobj ini file. It is recursive for outputing the nested sections of the ini file.""" options = [ value for value in source_dict.values() if isinstance(value, Option) ] options.sort(key=lambda x: x.name) indent_spacer = " " * (level * indent_size) for an_option in options: print("%s# %s" % (indent_spacer, an_option.doc), file=output_stream) option_value = to_str(an_option) if an_option.reference_value_from: print( '%s# see "%s.%s" for the default or override it here' % ( indent_spacer, an_option.reference_value_from, an_option.name ), file=output_stream ) if an_option.likely_to_be_changed or an_option.has_changed: option_format = '%s%s=%s\n' else: option_format = '%s#%s=%s\n' if isinstance(option_value, six.string_types) and \ ',' in option_value: # quote lists unless they're already quoted if option_value[0] not in '\'"': option_value = '"%s"' % option_value print(option_format % (indent_spacer, an_option.name, option_value), file=output_stream) next_level = level + 1 namespaces = [ (key, value) for key, value in source_dict.items() if isinstance(value, Namespace) ] namespaces.sort(key=ValueSource._namespace_reference_value_from_sort) for key, namespace in namespaces: next_level_spacer = " " * next_level * indent_size print("%s%s%s%s\n" % (indent_spacer, "[" * next_level, key, "]" * next_level), file=output_stream) if namespace._doc: print("%s%s" % (next_level_spacer, namespace._doc), file=output_stream) if namespace._reference_value_from: print("%s#+include ./common_%s.ini\n" % (next_level_spacer, key), file=output_stream) if namespace_name: ValueSource._write_ini( source_dict=namespace, namespace_name="%s.%s" % (namespace_name, key), level=level+1, indent_size=indent_size, output_stream=output_stream ) else: ValueSource._write_ini( source_dict=namespace, namespace_name=key, level=level+1, indent_size=indent_size, output_stream=output_stream )
0.000966
def disconnect(self, node): """ Disconnect a node :param node: :return: """ rel = _rel_helper(lhs='a', rhs='b', ident='r', **self.definition) q = "MATCH (a), (b) WHERE id(a)={self} and id(b)={them} " \ "MATCH " + rel + " DELETE r" self.source.cypher(q, {'them': node.id})
0.005747
def on_message(self, message): """Pass response from server to process receive queue Args: message(str): Received message """ # Called in tornado loop try: self.log.debug("Got message %s", message) d = json_decode(message) response = deserialize_object(d, Response) if isinstance(response, (Return, Error)): request = self._request_lookup.pop(response.id) if isinstance(response, Error): # Make the message an exception so it can be raised response.message = ResponseError(response.message) else: request = self._request_lookup[response.id] # Transfer the work of the callback to cothread cothread.Callback(request.callback, response) except Exception: # If we don't catch the exception here, tornado will spew odd # error messages about 'HTTPRequest' object has no attribute 'path' self.log.exception("on_message(%r) failed", message)
0.001812
def _wait(self, args, now, cap, consumed_history, consumed_capacity): """ Check the consumed capacity against the limit and sleep """ for key in ['read', 'write']: if key in cap and cap[key] > 0: consumed_history[key].add(now, consumed_capacity[key]) consumed = consumed_history[key].value if consumed > 0 and consumed >= cap[key]: seconds = math.ceil(float(consumed) / cap[key]) LOG.debug("Rate limited throughput exceeded. Sleeping " "for %d seconds.", seconds) if callable(self.callback): callback_args = args + (seconds,) if self.callback(*callback_args): continue time.sleep(seconds)
0.002353
def calculate(bam_file, data, sv_bed): """Calculate coverage in parallel using mosdepth. Removes duplicates and secondary reads from the counts: if ( b->core.flag & (BAM_FUNMAP | BAM_FSECONDARY | BAM_FQCFAIL | BAM_FDUP) ) continue; """ params = {"min": dd.get_coverage_depth_min(data)} variant_regions = dd.get_variant_regions_merged(data) if not variant_regions: variant_regions = _create_genome_regions(data) # Back compatible with previous pre-mosdepth callable files callable_file = os.path.join(utils.safe_makedir(os.path.join(dd.get_work_dir(data), "align", dd.get_sample_name(data))), "%s-coverage.callable.bed" % (dd.get_sample_name(data))) if not utils.file_uptodate(callable_file, bam_file): vr_quantize = ("0:1:%s:" % (params["min"]), ["NO_COVERAGE", "LOW_COVERAGE", "CALLABLE"]) to_calculate = [("variant_regions", variant_regions, vr_quantize, None, "coverage_perbase" in dd.get_tools_on(data)), ("sv_regions", bedutils.clean_file(sv_bed, data, prefix="svregions-"), None, None, False), ("coverage", bedutils.clean_file(dd.get_coverage(data), data, prefix="cov-"), None, DEPTH_THRESHOLDS, False)] depth_files = {} for target_name, region_bed, quantize, thresholds, per_base in to_calculate: if region_bed: cur_depth = {} depth_info = run_mosdepth(data, target_name, region_bed, quantize=quantize, thresholds=thresholds, per_base=per_base) for attr in ("dist", "regions", "thresholds", "per_base"): val = getattr(depth_info, attr, None) if val: cur_depth[attr] = val depth_files[target_name] = cur_depth if target_name == "variant_regions": callable_file = depth_info.quantize else: depth_files = {} final_callable = _subset_to_variant_regions(callable_file, variant_regions, data) return final_callable, depth_files
0.005291
def rows(self): """Iterate over all of the rows""" for s_name, s in self.sections.items(): # Yield the section header if s.name != 'Root': yield [''] # Unecessary, but makes for nice formatting. Should actually be done just before write yield ['Section', s.value] + s.property_names # Yield all of the rows for terms in the section for row in s.rows: term, value = row term = term.replace('root.', '').title() try: yield [term] + value except: yield [term] + [value]
0.007429
def bidiagonalize_real_matrix_pair_with_symmetric_products( mat1: np.ndarray, mat2: np.ndarray, *, rtol: float = 1e-5, atol: float = 1e-8, check_preconditions: bool = True) -> Tuple[np.ndarray, np.ndarray]: """Finds orthogonal matrices that diagonalize both mat1 and mat2. Requires mat1 and mat2 to be real. Requires mat1.T @ mat2 to be symmetric. Requires mat1 @ mat2.T to be symmetric. Args: mat1: One of the real matrices. mat2: The other real matrix. rtol: Relative numeric error threshold. atol: Absolute numeric error threshold. check_preconditions: If set, verifies that the inputs are real, and that mat1.T @ mat2 and mat1 @ mat2.T are both symmetric. Defaults to set. Returns: A tuple (L, R) of two orthogonal matrices, such that both L @ mat1 @ R and L @ mat2 @ R are diagonal matrices. Raises: ValueError: Matrices don't meet preconditions (e.g. not real). """ if check_preconditions: if np.any(np.imag(mat1) != 0): raise ValueError('mat1 must be real.') if np.any(np.imag(mat2) != 0): raise ValueError('mat2 must be real.') if not predicates.is_hermitian(mat1.dot(mat2.T), rtol=rtol, atol=atol): raise ValueError('mat1 @ mat2.T must be symmetric.') if not predicates.is_hermitian(mat1.T.dot(mat2), rtol=rtol, atol=atol): raise ValueError('mat1.T @ mat2 must be symmetric.') # Use SVD to bi-diagonalize the first matrix. base_left, base_diag, base_right = _svd_handling_empty(np.real(mat1)) base_diag = np.diag(base_diag) # Determine where we switch between diagonalization-fixup strategies. dim = base_diag.shape[0] rank = dim while rank > 0 and tolerance.all_near_zero(base_diag[rank - 1, rank - 1], atol=atol): rank -= 1 base_diag = base_diag[:rank, :rank] # Try diagonalizing the second matrix with the same factors as the first. semi_corrected = base_left.T.dot(np.real(mat2)).dot(base_right.T) # Fix up the part of the second matrix's diagonalization that's matched # against non-zero diagonal entries in the first matrix's diagonalization # by performing simultaneous diagonalization. overlap = semi_corrected[:rank, :rank] overlap_adjust = diagonalize_real_symmetric_and_sorted_diagonal_matrices( overlap, base_diag, rtol=rtol, atol=atol, check_preconditions=check_preconditions) # Fix up the part of the second matrix's diagonalization that's matched # against zeros in the first matrix's diagonalization by performing an SVD. extra = semi_corrected[rank:, rank:] extra_left_adjust, _, extra_right_adjust = _svd_handling_empty(extra) # Merge the fixup factors into the initial diagonalization. left_adjust = combinators.block_diag(overlap_adjust, extra_left_adjust) right_adjust = combinators.block_diag(overlap_adjust.T, extra_right_adjust) left = left_adjust.T.dot(base_left.T) right = base_right.T.dot(right_adjust.T) return left, right
0.000924
def append_row(self, index, values, new_cols=True): """ Appends a row of values to the end of the data. If there are new columns in the values and new_cols is True they will be added. Be very careful with this function as for sort DataFrames it will not enforce sort order. Use this only for speed when needed, be careful. :param index: value of the index :param values: dictionary of values :param new_cols: if True add new columns in values, if False ignore :return: nothing """ if index in self._index: raise IndexError('index already in DataFrame') if new_cols: for col in values: if col not in self._columns: self._add_column(col) # append index value self._index.append(index) # add data values, if not in values then use None for c, col in enumerate(self._columns): self._data[c].append(values.get(col, None))
0.004931
def search_feature_sets(self, dataset_id): """ Returns an iterator over the FeatureSets fulfilling the specified conditions from the specified Dataset. :param str dataset_id: The ID of the :class:`ga4gh.protocol.Dataset` of interest. :return: An iterator over the :class:`ga4gh.protocol.FeatureSet` objects defined by the query parameters. """ request = protocol.SearchFeatureSetsRequest() request.dataset_id = dataset_id request.page_size = pb.int(self._page_size) return self._run_search_request( request, "featuresets", protocol.SearchFeatureSetsResponse)
0.002963
def url_unquote(s, charset='utf-8', errors='replace'): """URL decode a single string with a given decoding. Per default encoding errors are ignored. If you want a different behavior you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a `HTTPUnicodeError` is raised. :param s: the string to unquote. :param charset: the charset to be used. :param errors: the error handling for the charset decoding. """ if isinstance(s, unicode): s = s.encode(charset) return _decode_unicode(_unquote(s), charset, errors)
0.001742
def get_app_ext(self, loops=float('inf')): """ get_app_ext(loops=float('inf')) Application extention. This part specifies the amount of loops. If loops is 0 or inf, it goes on infinitely. """ if loops == 0 or loops == float('inf'): loops = 2**16 - 1 # bb = "" # application extension should not be used (the extension interprets zero loops to mean an infinite number of loops) Mmm, does not seem to work if True: bb = "\x21\xFF\x0B" # application extension bb += "NETSCAPE2.0" bb += "\x03\x01" bb += int_to_bin(loops) bb += '\x00' # end return bb
0.004335
def _create_index_content(words): """Create html string of index file. Parameters ---------- words : list of str List of cached words. Returns ------- str html string. """ content = ["<h1>Index</h1>", "<ul>"] for word in words: content.append( '<li><a href="translations/{word}.html">{word}</a></li>'.format(word=word) ) content.append("</ul>") if not words: content.append("<i>Nothing to see here ...yet!</i>") return "\n".join(content)
0.00367
def nodes_minimum_distance_validation(self): """ if minimum distance is specified, ensure node is not too close to other nodes; """ if self.layer and self.layer.nodes_minimum_distance: minimum_distance = self.layer.nodes_minimum_distance # TODO - lower priority: do this check only when coordinates are changing near_nodes = Node.objects.exclude(pk=self.id).filter(geometry__distance_lte=(self.geometry, D(m=minimum_distance))).count() if near_nodes > 0: raise ValidationError(_('Distance between nodes cannot be less than %s meters') % minimum_distance)
0.008143
def conv_output_length(input_length, filter_size, border_mode, stride, dilation=1): """ Compute the length of the output sequence after 1D convolution along time. Note that this function is in line with the function used in Convolution1D class from Keras. Params: input_length (int): Length of the input sequence. filter_size (int): Width of the convolution kernel. border_mode (str): Only support `same` or `valid`. stride (int): Stride size used in 1D convolution. dilation (int) """ if input_length is None: return None assert border_mode in {'same', 'valid'} dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1) if border_mode == 'same': output_length = input_length elif border_mode == 'valid': output_length = input_length - dilated_filter_size + 1 return (output_length + stride - 1) // stride
0.001045
def generate_child_leaf_nodes(self): """ Generate leaf nodes of this node. """ def _yield_child_leaf_nodes(node): """ Args: node: Yields: """ if not node.has_children(): yield node else: for child_node in node.generate_child_nodes(): # recursivity is not compatible with yield in Python2.x: you have to re-yield results for child in _yield_child_leaf_nodes(child_node): yield child return _yield_child_leaf_nodes(self)
0.004637
def fit(self, text, epochs=EPOCHS): """ Given a string `text`, use it to train the segmentation classifier for `epochs` iterations. """ logger.debug("Extracting features and classifications.") Phi = [] Y = [] for (L, P, R, gold, _) in Detector.candidates(text): Phi.append(self.extract_one(L, P, R)) Y.append(gold) self.classifier.fit(Y, Phi, epochs) logger.debug("Fitting complete.")
0.006356
def stored_bind(self, instance): """Bind an instance to this Pangler, using the bound Pangler store. This method functions identically to `bind`, except that it might return a Pangler which was previously bound to the provided instance. """ if self.id is None: return self.bind(instance) store = self._bound_pangler_store.setdefault(instance, {}) p = store.get(self.id) if p is None: p = store[self.id] = self.bind(instance) return p
0.003752
def post(interface, method, version=1, apihost=DEFAULT_PARAMS['apihost'], https=DEFAULT_PARAMS['https'], caller=None, session=None, params=None): """Send POST request to an API endpoint .. versionadded:: 0.8.3 :param interface: interface name :type interface: str :param method: method name :type method: str :param version: method version :type version: int :param apihost: API hostname :type apihost: str :param https: whether to use HTTPS :type https: bool :param params: parameters for endpoint :type params: dict :return: endpoint response :rtype: :class:`dict`, :class:`lxml.etree.Element`, :class:`str` """ url = "%s://%s/%s/%s/v%s/" % ( 'https' if https else 'http', apihost, interface, method, version) return webapi_request(url, 'POST', caller=caller, session=session, params=params)
0.00223
def get_constant(self, const_name, context): """ Return unrolled const """ # check if value is compatible with const = self._constants[const_name] if isinstance(const, ast.AnnAssign): # Handle ByteArrays. if context: expr = Expr(const.value, context).lll_node return expr else: raise VariableDeclarationException( "ByteArray: Can not be used outside of a function context: %s" % const_name ) # Other types are already unwrapped, no need return self._constants[const_name]
0.004754
def _format_batch_statuses(statuses, batch_ids, tracker): """Takes a statuses dict and formats it for transmission with Protobuf and ZMQ. Args: statuses (dict of int): Dict with batch ids as the key, status as value batch_ids (list of str): The batch ids in their original order tracker (BatchTracker): A batch tracker with access to invalid info """ proto_statuses = [] for batch_id in batch_ids: if statuses[batch_id] == \ client_batch_submit_pb2.ClientBatchStatus.INVALID: invalid_txns = tracker.get_invalid_txn_info(batch_id) for txn_info in invalid_txns: try: txn_info['transaction_id'] = txn_info.pop('id') except KeyError as e: LOGGER.debug(e) else: invalid_txns = None proto_statuses.append( client_batch_submit_pb2.ClientBatchStatus( batch_id=batch_id, status=statuses[batch_id], invalid_transactions=invalid_txns)) return proto_statuses
0.000905
def left_join(input, **params): """ Left join transformation :param input: :param params: :return: """ PARAM_COL_RIGHT = 'col.right' PARAM_COL_LEFT = 'col.left' PARAM_FIELD_JOIN = 'field.join' right_df = pd.DataFrame.from_records(input[params.get(PARAM_COL_RIGHT)]) left_df = pd.DataFrame.from_records(input[params.get(PARAM_COL_LEFT)]) join_on = params.get(PARAM_FIELD_JOIN) res = right_df.set_index(join_on, drop=False).join(left_df.set_index(join_on, drop=False), on=[join_on], rsuffix='_right') return Converter.df2list(res)
0.003419
def stopPoll(self, msg_identifier, reply_markup=None): """ See: https://core.telegram.org/bots/api#stoppoll :param msg_identifier: a 2-tuple (``chat_id``, ``message_id``), a 1-tuple (``inline_message_id``), or simply ``inline_message_id``. You may extract this value easily with :meth:`amanobot.message_identifier` """ p = _strip(locals(), more=['msg_identifier']) p.update(_dismantle_message_identifier(msg_identifier)) return self._api_request('stopPoll', _rectify(p))
0.006757
def _get_serializable_model(cls): """ Returns a model with a valid _meta.local_fields (serializable). Basically, this means the original model, not a proxied model. (this is a workaround for a bug in django) """ current_class = cls while current_class._meta.proxy: current_class = current_class._meta.proxy_for_model return current_class
0.004819
def get_stencil(self, stencil_name, **options): """Return a Stencil instance given a stencil name.""" if stencil_name not in self.manifest.get('stencils', {}): raise ValueError("Stencil '%s' not declared in StencilSet " "manifest." % stencil_name) stencil = copy.deepcopy(self.manifest) allstencils = stencil.pop('stencils') stencil.pop('default_stencil', None) override = allstencils[stencil_name] utils.deepupdate(stencil, override) # merge options, prefer **options (probably user-supplied) for opt, data in stencil.get('options', {}).items(): if opt not in options: options[opt] = data.get('default', '') stencil['options'] = options name = stencil['options'].get('name') files = stencil['files'].copy() for fil, templ in files.items(): if '<NAME>' in fil: # check for the option b/c there are # cases in which it may not exist if not name: raise ValueError("Stencil does not include a name option") stencil['files'].pop(fil) fil = fil.replace('<NAME>', name) stencil['files'][fil] = templ return stencil
0.001514
def avl_join2(t1, t2): """ join two trees without any intermediate key Returns: Node: new_root O(log(n) + log(m)) = O(r(t1) + r(t2)) For AVL-Trees the rank r(t1) = height(t1) - 1 """ if t1 is None and t2 is None: new_root = None elif t2 is None: new_root = t1 elif t1 is None: new_root = t2 else: new_left, last_node = avl_split_last(t1) debug = 0 if debug: EulerTourTree(root=new_left)._assert_nodes('new_left') EulerTourTree(root=last_node)._assert_nodes('last_node') EulerTourTree(root=t2)._assert_nodes('t2') print('new_left') EulerTourTree(root=new_left).print_tree() print('last_node') EulerTourTree(root=last_node).print_tree() print('t2') EulerTourTree(root=t2).print_tree() new_root = avl_join(new_left, t2, last_node) if debug: print('new_root') EulerTourTree(root=new_root).print_tree() EulerTourTree(root=last_node)._assert_nodes('new_root') return new_root
0.000878
def print_version(): """Print the environment versions.""" click.echo("Versions:") click.secho( "CLI Package Version: %(version)s" % {"version": click.style(get_cli_version(), bold=True)} ) click.secho( "API Package Version: %(version)s" % {"version": click.style(get_api_version(), bold=True)} )
0.002841
def check(self): """Check this transaction for completeness""" if not self.date: raise XnDataError("Missing date") if not self.desc: raise XnDataError("Missing description") if not self.dst: raise XnDataError("No destination accounts") if not self.src: raise XnDataError("No source accounts") if not self.amount: raise XnDataError("No transaction amount")
0.00432
def makeLogic(self): # *** When camera list has been closed, re-create the cameralist tree and update filterchains *** # self.manage_cameras_win.signals.close.connect(self.updateCameraTree) # now put into save_camera_config_slot # self.manage_cameras_win.signals.close.connect(self.filterchain_group.update) # TODO: use this once fixed # self.manage_cameras_win.signals.close.connect(self.filterchain_group.read) # TODO: eh.. lets be sure of this .. (are we releasing slots in the LiveThread etc.) # self.manage_cameras_win.signals.close.connect(self.save_camera_config_slot) # self.manage_memory_container.signals.save.connect(self.save_memory_conf_slot) # *** Menu bar connections *** # the self.filemenu.exit attribute was autogenerated self.filemenu.exit. triggered.connect(self.exit_slot) self.filemenu.save_window_layout. triggered.connect( self.save_window_layout_slot) self.filemenu.load_window_layout. triggered.connect( self.load_window_layout_slot) """ self.configmenu.manage_cameras. triggered.connect( self.manage_cameras_slot) self.configmenu.memory_usage. triggered.connect( self.memory_usage_slot) """ self.configmenu.configuration_dialog.triggered.connect(self.config_dialog_slot) self.viewmenu.camera_list. triggered.connect(self.camera_list_slot) self.aboutmenu.about_valkka_live. triggered.connect(self.about_slot) # *** Connect autogenerated menu calls into autogenerated slot functions *** for i in range(1, 5): # gets member function grid_ixi_slot slot_func = getattr(self, "grid_%ix%i_slot" % (i, i)) # gets member function grid_ixi from self.viewmenu.video_grid menu_func = getattr(self.viewmenu.video_grid, "grid_%ix%i" % (i, i)) menu_func.triggered.connect(slot_func) # i.e., like this : self.viewmenu.video_grid.grid_1x1.triggered.connect(slot_func) # *** autogenerated machine vision menu and slots *** for cl in self.mvision_classes: getattr(self.mvisionmenu,cl.name).triggered.connect(getattr(self,cl.name+"_slot"))
0.011045
def directionality(image, min_distance = 4, threshold = 0.1, voxelspacing = None, mask = slice(None)): r""" Takes a simple or multi-spectral image and returns the directionality of the image texture. It is just a value representing the strength of directionality, not the specific direction. An edge detection is applied on the image. Then the edge strength and directional angle between the image axis are computed. A histogram of the directional angles is than used to calculate a qualitative value for directionality in ONE image layer. Note that there are n choose 2 layers in a n dimensional image. Warning ------- Experimental. There are still issues with finding the right maxs and mins in histogram and predefining the number of bins for the histogram. Parameters ---------- image : array_like or list/tuple of array_like A single image or a list/tuple of images (for multi-spectral case). voxelspacing : sequence of floats The side-length of each voxel. mask : array_like A binary mask for the image or a slice object min_distance : int minimal Distance between 2 local minima or maxima in the histogram. Default is 4. threshold : float Defines a threshold between 0 and 1. It is used to ignore angles of low edge strength in the histogram. Default is 0.1. Returns ------- directionality : array Fdir is a value between 0 and 1. 1 represents a high directionality. Returns the directionality of an image in relation to one special image layer. The returned values are sorted like this. The axis are named v,w,x,y,z for a five dimensional image: w x y z v x y z v w arctan(delta)| delta = ---,---,---,---,---, ---,---,---,---,--- v w x y z v w x y z There are always n choose k axis relations; n=image.ndim, k=2 (2 axis in every image layer). See Also -------- """ image = numpy.asarray(image) ndim = image.ndim # set default mask or apply given mask if not type(mask) is slice: if not type(mask[0] is slice): mask = numpy.array(mask, copy=False, dtype = numpy.bool) image = image[mask] # set default voxel spacing if not suppliec if None == voxelspacing: voxelspacing = tuple([1.] * ndim) if len(voxelspacing) != ndim: print("Voxel spacing and image dimensions do not fit.") return None # Calculate amount of combinations: n choose k, normalizing factor r and voxel spacing. n = (factorial(ndim)/(2*factorial(ndim-2))) pi1_2 = numpy.pi/2.0 r=1.0 / (pi1_2**2) vs = [slice(None,None,numpy.rint(ii)) for ii in voxelspacing] # Allocate memory, define constants Fdir = numpy.empty(n) # calculate differences by using Sobel-filter. (Maybe other filter kernel like Prewitt will do a better job) E = [sobel(image, axis=ndim-1-i) for i in range(ndim)] # The edge strength e(x,y) is used for thresholding. e = sum(E) / float(ndim) border = [numpy.percentile(e, 1),numpy.percentile(e, 99)] e[e < border[0]] = 0 e[e > border[1]] = border[1] e -= border[0] e /= border[1] em = e > threshold for i in range(n): A = numpy.arctan((E[(i + (ndim+i)/ndim) % ndim][vs]) / (E[i%ndim][vs]+numpy.spacing(1))) # [0 , pi/2] A = A[em[vs]] # Calculate number of bins for the histogram. Watch out, this is just a work around! # @TODO: Write a more stable code to prevent for minimum and maximum repetition when the same value in the Histogram appears multiple times in a row. Example: image = numpy.zeros([10,10]), image[:,::3] = 1 bins = numpy.unique(A).size + min_distance H = numpy.histogram(A, bins = bins, density=True)[0] # [0 , 1] H[H < numpy.percentile(H,1)] = 0.0 H_peaks, H_valleys, H_range = find_valley_range(H) summe = 0.0 for idx_ap in range(len(H_peaks)): for range_idx in range( H_valleys[idx_ap], H_valleys[idx_ap]+H_range[idx_ap]): a=range_idx % len(H) summe += (((pi1_2*a)/bins - (pi1_2 * H_peaks[idx_ap])/bins) **2) * H[a] Fdir[i] = 1.0 - r * summe return Fdir
0.014148
def map(self, options=None): """Trigger find of serialized sources and build objects""" for path, data in self.paths.items(): for item in data: for obj in self.create_class(item, options): obj.jinja_env = self.jinja_env self.add_object(obj)
0.006173
def _naive_concordance_summary_statistics(event_times, predicted_event_times, event_observed): """ Fallback, simpler method to compute concordance. Assumes the data has been verified by lifelines.utils.concordance_index first. """ num_pairs = 0.0 num_correct = 0.0 num_tied = 0.0 for a, time_a in enumerate(event_times): pred_a = predicted_event_times[a] event_a = event_observed[a] # Don't want to double count for b in range(a + 1, len(event_times)): time_b = event_times[b] pred_b = predicted_event_times[b] event_b = event_observed[b] if _valid_comparison(time_a, time_b, event_a, event_b): num_pairs += 1.0 crct, ties = _concordance_value(time_a, time_b, pred_a, pred_b, event_a, event_b) num_correct += crct num_tied += ties return (num_correct, num_tied, num_pairs)
0.004184
def _canvas_route(self, *args, **kwargs): """ Decorator for canvas route """ def outer(view_fn): @self.route(*args, **kwargs) def inner(*args, **kwargs): fn_args = getargspec(view_fn) try: idx = fn_args.args.index(_ARG_KEY) except ValueError: idx = -1 if idx > -1: if 'error' in flask_request.args: return redirect('%s?error=%s' % ( self.config.get('CANVAS_ERROR_URI', '/'), flask_request.args.get('error'))) if 'signed_request' not in flask_request.form: self.logger.error('signed_request not in request.form') abort(403) try: _, decoded_data = _decode_signed_user( *flask_request.form['signed_request'].split('.')) except ValueError as e: self.logger.error(e.message) abort(403) if 'oauth_token' not in decoded_data: app.logger.info('unauthorized user, redirecting') return _authorize() user = User(**decoded_data) if not app.config.get('CANVAS_SKIP_AUTH_CHECK', False) \ and not user.has_permissions(): self.logger.info( 'user does not have the required permission set.') return _authorize() self.logger.info('all required permissions have been granted') args = args[:idx - 1] + (user,) + args[idx:] return view_fn(*args, **kwargs) return inner return outer
0.001704
def run(self): """run the model""" model = self.model configfile = self.configfile interval = self.interval sockets = self.sockets model.initialize(configfile) if model.state == 'pause': logger.info( "model initialized and started in pause mode, waiting for requests" ) else: logger.info("model started and initialized, running") if self.tracker: self.register() atexit.register(self.unregister) self.process_incoming() # Keep on counting indefinitely counter = itertools.count() logger.info("Entering timeloop...") for i in counter: while model.state == "pause": # keep waiting for messages when paused # process_incoming should set model.state to play self.process_incoming() else: # otherwise process messages once and continue self.process_incoming() if model.state == "quit": break # lookup dt or use -1 (default) dt = model.get_time_step() or -1 model.update(dt) # check counter, if not a multiple of interval, skip this step if i % interval: continue for key in self.output_vars: value = model.get_var(key) metadata = {'name': key, 'iteration': i} # 4ms for 1M doubles logger.debug("sending {}".format(metadata)) if 'pub' in sockets: send_array(sockets['pub'], value, metadata=metadata) logger.info("Finalizing...") model.finalize()
0.001693
def set(self, key, data, retry=0): """ Store data <data> index by key <key> Args key <string> couchbase document id data <dict> data to store """ try: if type(data) != dict: raise Exception("data needs to be of type <dict>") self.bucket.set(key, 0, 0, json.dumps(data)) except: raise
0.019663
def set_element(self, index, e): r""" Replaces a pipeline stage. Replace an element in chain and return replaced element. """ if index > len(self._chain): raise IndexError("tried to access element %i, but chain has only %i" " elements" % (index, len(self._chain))) if type(index) is not int: raise ValueError( "index is not a integer but '%s'" % str(type(index))) # if e is already in chain, we're finished if self._chain[index] is e: return # remove current index and its data producer replaced = self._chain.pop(index) if not replaced.is_reader: replaced.data_producer = None self._chain.insert(index, e) if index == 0: e.data_producer = e else: # rewire data_producers e.data_producer = self._chain[index - 1] # if e has a successive element, need to set data_producer try: successor = self._chain[index + 1] successor.data_producer = e except IndexError: pass # set data_producer for predecessor of e # self._chain[max(0, index - 1)].data_producer = self._chain[index] # since data producer of element after insertion changed, reset its status # TODO: make parameterized a property? self._chain[index]._estimated = False return replaced
0.002685
def get_type_data(name): """Return dictionary representation of type. Can be used to initialize primordium.type.primitives.Type """ name = name.upper() try: return { 'authority': 'okapia.net', 'namespace': 'TextFormats', 'identifier': name, 'domain': 'DisplayText Formats', 'display_name': FORMAT_TYPES[name] + ' Format Type', 'display_label': FORMAT_TYPES[name], 'description': ('The display text format type for the ' + FORMAT_TYPES[name] + ' format.') } except KeyError: raise NotFound('Format Type:' + name)
0.001486
def update_proficiency(self, proficiency_form): """Updates an existing proficiency. arg: proficiency_form (osid.learning.ProficiencyForm): the form containing the elements to be updated raise: IllegalState - ``proficiency_form`` already used in an update transaction raise: InvalidArgument - the form contains an invalid value raise: NullArgument - ``proficiency_form`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - ``proficiency_form`` did not originate from ``get_proficiency_form_for_update()`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceAdminSession.update_resource_template collection = JSONClientValidated('learning', collection='Proficiency', runtime=self._runtime) if not isinstance(proficiency_form, ABCProficiencyForm): raise errors.InvalidArgument('argument type is not an ProficiencyForm') if not proficiency_form.is_for_update(): raise errors.InvalidArgument('the ProficiencyForm is for update only, not create') try: if self._forms[proficiency_form.get_id().get_identifier()] == UPDATED: raise errors.IllegalState('proficiency_form already used in an update transaction') except KeyError: raise errors.Unsupported('proficiency_form did not originate from this session') if not proficiency_form.is_valid(): raise errors.InvalidArgument('one or more of the form elements is invalid') collection.save(proficiency_form._my_map) self._forms[proficiency_form.get_id().get_identifier()] = UPDATED # Note: this is out of spec. The OSIDs don't require an object to be returned: return objects.Proficiency( osid_object_map=proficiency_form._my_map, runtime=self._runtime, proxy=self._proxy)
0.004111
def validate(cls, partial=True, **kwargs): """ Validate kwargs before setting attributes on the model """ data = kwargs if not partial: data = dict(**kwargs, **{col.name: None for col in cls.__table__.c if col.name not in kwargs}) errors = defaultdict(list) for name, value in data.items(): for validator in cls._get_validators(name): try: validator(value) except ValidationError as e: e.model = cls e.column = name errors[name].append(str(e)) if errors: raise ValidationErrors(errors)
0.002717
def night(self, date=None, local=True, use_elevation=True): """Calculates the night time (the time between astronomical dusk and astronomical dawn of the next day) :param date: The date for which to calculate the start of the night time. If no date is specified then the current date will be used. :type date: :class:`~datetime.date` :param local: True = Time to be returned in location's time zone; False = Time to be returned in UTC. If not specified then the time will be returned in local time :type local: bool :param use_elevation: True = Return times that allow for the location's elevation; False = Return times that don't use elevation. If not specified then times will take elevation into account. :type use_elevation: bool :returns: A tuple containing the start and end times :rtype: tuple(:class:`~datetime.datetime`, :class:`~datetime.datetime`) """ if local and self.timezone is None: raise ValueError("Local time requested but Location has no timezone set.") if self.astral is None: self.astral = Astral() if date is None: date = datetime.date.today() elevation = self.elevation if use_elevation else 0 start, end = self.astral.night_utc(date, self.latitude, self.longitude, elevation) if local: return start.astimezone(self.tz), end.astimezone(self.tz) else: return start, end
0.005505
def sct2e(sc, sclkdp): """ Convert encoded spacecraft clock ("ticks") to ephemeris seconds past J2000 (ET). http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/sct2e_c.html :param sc: NAIF spacecraft ID code. :type sc: int :param sclkdp: SCLK, encoded as ticks since spacecraft clock start. :type sclkdp: float :return: Ephemeris time, seconds past J2000. :rtype: float """ sc = ctypes.c_int(sc) sclkdp = ctypes.c_double(sclkdp) et = ctypes.c_double() libspice.sct2e_c(sc, sclkdp, ctypes.byref(et)) return et.value
0.001712
def read_int32(self, little_endian=True): """ Read 4 bytes as a signed integer value from the stream. Args: little_endian (bool): specify the endianness. (Default) Little endian. Returns: int: """ if little_endian: endian = "<" else: endian = ">" return self.unpack('%si' % endian, 4)
0.007538
def _eq(self, T, P): """Procedure for calculate the composition in saturation state Parameters ---------- T : float Temperature [K] P : float Pressure [MPa] Returns ------- Asat : float Saturation mass fraction of dry air in humid air [kg/kg] """ if T <= 273.16: ice = _Ice(T, P) gw = ice["g"] else: water = IAPWS95(T=T, P=P) gw = water.g def f(parr): rho, a = parr if a > 1: a = 1 fa = self._fav(T, rho, a) muw = fa["fir"]+rho*fa["fird"]-a*fa["fira"] return gw-muw, rho**2*fa["fird"]/1000-P rinput = fsolve(f, [1, 0.95], full_output=True) Asat = rinput[0][1] return Asat
0.002339
def radius_server_host_retries(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") radius_server = ET.SubElement(config, "radius-server", xmlns="urn:brocade.com:mgmt:brocade-aaa") host = ET.SubElement(radius_server, "host") hostname_key = ET.SubElement(host, "hostname") hostname_key.text = kwargs.pop('hostname') retries = ET.SubElement(host, "retries") retries.text = kwargs.pop('retries') callback = kwargs.pop('callback', self._callback) return callback(config)
0.005208
def read_google(self,url,**kwargs): """ Reads a google sheet """ if url[-1]!='/': url+='/' return self.read_csv(url+'export?gid=0&format=csv',**kwargs)
0.075949
def translate(self, trans_inputs: List[TranslatorInput]) -> List[TranslatorOutput]: """ Batch-translates a list of TranslatorInputs, returns a list of TranslatorOutputs. Splits oversized sentences to sentence chunks of size less than max_input_length. :param trans_inputs: List of TranslatorInputs as returned by make_input(). :return: List of translation results. """ batch_size = self.max_batch_size # translate in batch-sized blocks over input chunks translations = [] for batch_id, batch in enumerate(utils.grouper(trans_inputs, batch_size)): logger.debug("Translating batch %d", batch_id) # underfilled batch will be filled to a full batch size with copies of the 1st input rest = batch_size - len(batch) if rest > 0: logger.debug("Extending the last batch to the full batch size (%d)", batch_size) batch = batch + [batch[0]] * rest batch_translations = self._translate_nd(*self._get_inference_input(batch)) # truncate to remove filler translations if rest > 0: batch_translations = batch_translations[:-rest] translations.extend(batch_translations) # Concatenate results results = [] # type: List[TranslatorOutput] for trans_input, translation in zip(trans_inputs, translations): results.append(self._make_result(trans_input, translation)) return results
0.006532
def delete_group_maintainer(self, grp_name, user): """Delete the given user to the named group. Both group and user must already exist for this to succeed. Args: name (string): Name of group. user (string): User to add to group. Raises: requests.HTTPError on failure. """ self.service.delete_group_maintainer( grp_name, user, self.url_prefix, self.auth, self.session, self.session_send_opts)
0.003984
def generate(self, api): """ Generates a module for each namespace. Each namespace will have Python classes to represent data types and routes in the Stone spec. """ rsrc_folder = os.path.join(os.path.dirname(__file__), 'python_rsrc') self.logger.info('Copying stone_validators.py to output folder') shutil.copy(os.path.join(rsrc_folder, 'stone_validators.py'), self.target_folder_path) self.logger.info('Copying stone_serializers.py to output folder') shutil.copy(os.path.join(rsrc_folder, 'stone_serializers.py'), self.target_folder_path) self.logger.info('Copying stone_base.py to output folder') shutil.copy(os.path.join(rsrc_folder, 'stone_base.py'), self.target_folder_path) for namespace in api.namespaces.values(): reserved_namespace_name = fmt_namespace(namespace.name) with self.output_to_relative_path('{}.py'.format(reserved_namespace_name)): self._generate_base_namespace_module(api, namespace) if reserved_namespace_name != namespace.name: with self.output_to_relative_path('{}.py'.format(namespace.name)): self._generate_dummy_namespace_module(reserved_namespace_name)
0.003743
def authenticate(self, token): """ Authenticate a token :param token: """ if self.verify_token_callback: # Specified verify function overrides below return self.verify_token_callback(token) if not token: return False name = self.token_manager.verify(token) if not name: return False return True
0.004878
def fetch(table, cols="*", where=(), group="", order=(), limit=(), **kwargs): """Convenience wrapper for database SELECT and fetch all.""" return select(table, cols, where, group, order, limit, **kwargs).fetchall()
0.004464
def _py_invar(parameter, lparams, tab): """Returns the code to create the local input parameter that is coerced to have the correct type for ctypes interaction. """ if ("in" in parameter.direction and parameter.D > 0): if parameter.direction == "(inout)" and ":" not in parameter.dimension: wstr = ", writeable" else: wstr = "" pytype = _py_pytype(parameter) lparams.append("{}_a".format(parameter.lname)) return ('{0}_a = require({0}, {1}, "F{2}")'.format(parameter.lname, pytype, wstr), False) elif parameter.D == 0: #Even for scalar outvars, we need to initialize a variable to pass by reference. lparams.append("byref({}_c)".format(parameter.lname)) if parameter.direction == "(out)": initval = parameter.py_initval return ("{0}_c = {1}({2})".format(parameter.lname, _py_ctype(parameter), initval), False) else: return ("{0}_c = {1}({0})".format(parameter.lname, _py_ctype(parameter)), False)
0.006679
def gradients_X(self, dL_dK, X, X2=None): """Compute the gradient of the objective function with respect to X. :param dL_dK: An array of gradients of the objective function with respect to the covariance function. :type dL_dK: np.ndarray (num_samples x num_inducing) :param X: Observed data inputs :type X: np.ndarray (num_samples x input_dim) :param X2: Observed data inputs (optional, defaults to X) :type X2: np.ndarray (num_inducing x input_dim)""" target = np.zeros(X.shape) [target.__iadd__(p.gradients_X(dL_dK, X, X2)) for p in self.parts] return target
0.004673
def as_dict(self): """ Serializes the object necessary data in a dictionary. :returns: Serialized data in a dictionary. :rtype: dict """ result_dict = super(Group, self).as_dict() statuses = list() version = None titles = list() descriptions = list() platforms = list() groups = list() rules = list() for child in self.children: if isinstance(child, Version): version = child.as_dict() elif isinstance(child, Status): statuses.append(child.as_dict()) elif isinstance(child, Title): titles.append(child.as_dict()) elif isinstance(child, Description): descriptions.append(child.as_dict()) elif isinstance(child, Platform): platforms.append(child.as_dict()) elif isinstance(child, Group): groups.append(child.as_dict()) elif isinstance(child, Rule): rules.append(child.as_dict()) if version is not None: result_dict['version'] = version if len(statuses) > 0: result_dict['statuses'] = statuses if len(titles) > 0: result_dict['titles'] = titles if len(descriptions) > 0: result_dict['descriptions'] = descriptions if len(platforms) > 0: result_dict['platforms'] = platforms if len(groups) > 0: result_dict['groups'] = groups if len(rules) > 0: result_dict['rules'] = rules return result_dict
0.001212
def get_filter_value(self, column_name): """ Returns the filtered value for a certain column :param column_name: The name of the column that we want the value from :return: the filter value of the column """ for flt, value in zip(self.filters, self.values): if flt.column_name == column_name: return value
0.007614
def glance(msg, flavor='chat', long=False): """ Extract "headline" info about a message. Use parameter ``long`` to control whether a short or long tuple is returned. When ``flavor`` is ``chat`` (``msg`` being a `Message <https://core.telegram.org/bots/api#message>`_ object): - short: (content_type, ``msg['chat']['type']``, ``msg['chat']['id']``) - long: (content_type, ``msg['chat']['type']``, ``msg['chat']['id']``, ``msg['date']``, ``msg['message_id']``) *content_type* can be: ``text``, ``audio``, ``document``, ``game``, ``photo``, ``sticker``, ``video``, ``voice``, ``video_note``, ``contact``, ``location``, ``venue``, ``new_chat_member``, ``left_chat_member``, ``new_chat_title``, ``new_chat_photo``, ``delete_chat_photo``, ``group_chat_created``, ``supergroup_chat_created``, ``channel_chat_created``, ``migrate_to_chat_id``, ``migrate_from_chat_id``, ``pinned_message``, ``new_chat_members``, ``invoice``, ``successful_payment``. When ``flavor`` is ``callback_query`` (``msg`` being a `CallbackQuery <https://core.telegram.org/bots/api#callbackquery>`_ object): - regardless: (``msg['id']``, ``msg['from']['id']``, ``msg['data']``) When ``flavor`` is ``inline_query`` (``msg`` being a `InlineQuery <https://core.telegram.org/bots/api#inlinequery>`_ object): - short: (``msg['id']``, ``msg['from']['id']``, ``msg['query']``) - long: (``msg['id']``, ``msg['from']['id']``, ``msg['query']``, ``msg['offset']``) When ``flavor`` is ``chosen_inline_result`` (``msg`` being a `ChosenInlineResult <https://core.telegram.org/bots/api#choseninlineresult>`_ object): - regardless: (``msg['result_id']``, ``msg['from']['id']``, ``msg['query']``) When ``flavor`` is ``shipping_query`` (``msg`` being a `ShippingQuery <https://core.telegram.org/bots/api#shippingquery>`_ object): - regardless: (``msg['id']``, ``msg['from']['id']``, ``msg['invoice_payload']``) When ``flavor`` is ``pre_checkout_query`` (``msg`` being a `PreCheckoutQuery <https://core.telegram.org/bots/api#precheckoutquery>`_ object): - short: (``msg['id']``, ``msg['from']['id']``, ``msg['invoice_payload']``) - long: (``msg['id']``, ``msg['from']['id']``, ``msg['invoice_payload']``, ``msg['currency']``, ``msg['total_amount']``) """ def gl_chat(): content_type = _find_first_key(msg, all_content_types) if long: return content_type, msg['chat']['type'], msg['chat']['id'], msg['date'], msg['message_id'] else: return content_type, msg['chat']['type'], msg['chat']['id'] def gl_callback_query(): return msg['id'], msg['from']['id'], msg['data'] def gl_inline_query(): if long: return msg['id'], msg['from']['id'], msg['query'], msg['offset'] else: return msg['id'], msg['from']['id'], msg['query'] def gl_chosen_inline_result(): return msg['result_id'], msg['from']['id'], msg['query'] def gl_shipping_query(): return msg['id'], msg['from']['id'], msg['invoice_payload'] def gl_pre_checkout_query(): if long: return msg['id'], msg['from']['id'], msg['invoice_payload'], msg['currency'], msg['total_amount'] else: return msg['id'], msg['from']['id'], msg['invoice_payload'] try: fn = {'chat': gl_chat, 'callback_query': gl_callback_query, 'inline_query': gl_inline_query, 'chosen_inline_result': gl_chosen_inline_result, 'shipping_query': gl_shipping_query, 'pre_checkout_query': gl_pre_checkout_query}[flavor] except KeyError: raise exception.BadFlavor(flavor) return fn()
0.005061
def save_and_scan(filename, b64_data): """ Save `b64_data` to temporary file and scan it for viruses. Args: filename (str): Name of the file - used as basename for tmp file. b64_data (str): Content of the file encoded in base64. Returns: dict: ``{filename: ("FOUND", "virus type")}`` or blank dict. """ with NTFile(suffix="_"+os.path.basename(filename), mode="wb") as ifile: ifile.write( b64decode(b64_data) ) ifile.flush() os.chmod(ifile.name, 0755) return scan_file(ifile.name)
0.001715
def stop_artifact_creation(self, id_or_uri, task_uri): """ Stops creation of the selected Artifact Bundle. Args: id_or_uri: ID or URI of the Artifact Bundle. task_uri: Task URI associated with the Artifact Bundle. Returns: string: """ data = { "taskUri": task_uri } uri = self.URI + '/' + extract_id_from_uri(id_or_uri) + self.STOP_CREATION_PATH return self._client.update(data, uri=uri)
0.005871
def bar(self, x=None, y=None, **kwds): """ Vertical bar plot. A bar plot is a plot that presents categorical data with rectangular bars with lengths proportional to the values that they represent. A bar plot shows comparisons among discrete categories. One axis of the plot shows the specific categories being compared, and the other axis represents a measured value. Parameters ---------- x : label or position, optional Allows plotting of one column versus another. If not specified, the index of the DataFrame is used. y : label or position, optional Allows plotting of one column versus another. If not specified, all numerical columns are used. **kwds Additional keyword arguments are documented in :meth:`DataFrame.plot`. Returns ------- matplotlib.axes.Axes or np.ndarray of them An ndarray is returned with one :class:`matplotlib.axes.Axes` per column when ``subplots=True``. See Also -------- DataFrame.plot.barh : Horizontal bar plot. DataFrame.plot : Make plots of a DataFrame. matplotlib.pyplot.bar : Make a bar plot with matplotlib. Examples -------- Basic plot. .. plot:: :context: close-figs >>> df = pd.DataFrame({'lab':['A', 'B', 'C'], 'val':[10, 30, 20]}) >>> ax = df.plot.bar(x='lab', y='val', rot=0) Plot a whole dataframe to a bar plot. Each column is assigned a distinct color, and each row is nested in a group along the horizontal axis. .. plot:: :context: close-figs >>> speed = [0.1, 17.5, 40, 48, 52, 69, 88] >>> lifespan = [2, 8, 70, 1.5, 25, 12, 28] >>> index = ['snail', 'pig', 'elephant', ... 'rabbit', 'giraffe', 'coyote', 'horse'] >>> df = pd.DataFrame({'speed': speed, ... 'lifespan': lifespan}, index=index) >>> ax = df.plot.bar(rot=0) Instead of nesting, the figure can be split by column with ``subplots=True``. In this case, a :class:`numpy.ndarray` of :class:`matplotlib.axes.Axes` are returned. .. plot:: :context: close-figs >>> axes = df.plot.bar(rot=0, subplots=True) >>> axes[1].legend(loc=2) # doctest: +SKIP Plot a single column. .. plot:: :context: close-figs >>> ax = df.plot.bar(y='speed', rot=0) Plot only selected categories for the DataFrame. .. plot:: :context: close-figs >>> ax = df.plot.bar(x='lifespan', rot=0) """ return self(kind='bar', x=x, y=y, **kwds)
0.000695
def cli(ctx, profile): """dw commands support working with multiple data.world accounts \b Use a different <profile> value for each account. In the absence of a <profile>, 'default' will be used. """ if ctx.obj is None: ctx.obj = {} ctx.obj['profile'] = profile pass
0.003257
def breadth_first_vertex_order(vertices_resources, nets): """A generator which iterates over a set of vertices in a breadth-first order in terms of connectivity. For use as a vertex ordering for the sequential placer. """ # Special case: no vertices, just stop immediately if len(vertices_resources) == 0: return # Enumerate the set of nets attached to each vertex vertex_neighbours = defaultdict(set) for net in nets: # Note: Iterating over a Net object produces the set of vertices # involved in the net. vertex_neighbours[net.source].update(net) for sink in net.sinks: vertex_neighbours[sink].update(net) # Perform a breadth-first iteration over the vertices. unplaced_vertices = set(vertices_resources) vertex_queue = deque() while vertex_queue or unplaced_vertices: if not vertex_queue: vertex_queue.append(unplaced_vertices.pop()) vertex = vertex_queue.popleft() yield vertex vertex_queue.extend(v for v in vertex_neighbours[vertex] if v in unplaced_vertices) unplaced_vertices.difference_update(vertex_neighbours[vertex])
0.000822
def output_tray_status(self) -> Dict[int, Dict[str, str]]: """Return the state of all output trays.""" tray_status = {} try: tray_stat = self.data.get('outputTray', []) for i, stat in enumerate(tray_stat): tray_status[i] = { 'name': stat[0], 'capacity': stat[1], 'status': stat[2], } except (KeyError, AttributeError): tray_status = {} return tray_status
0.003738
def _generate_linear_range(start, end, periods): """Generate an equally-spaced sequence of cftime.datetime objects between and including two dates (whose length equals the number of periods).""" import cftime total_seconds = (end - start).total_seconds() values = np.linspace(0., total_seconds, periods, endpoint=True) units = 'seconds since {}'.format(format_cftime_datetime(start)) calendar = start.calendar return cftime.num2date(values, units=units, calendar=calendar, only_use_cftime_datetimes=True)
0.001773
def event_list_tabs(counts, current_kind, page_number=1): """ Displays the tabs to different event_list pages. `counts` is a dict of number of events for each kind, like: {'all': 30, 'gig': 12, 'movie': 18,} `current_kind` is the event kind that's active, if any. e.g. 'gig', 'movie', etc. `page_number` is the current page of this kind of events we're on. """ return { 'counts': counts, 'current_kind': current_kind, 'page_number': page_number, # A list of all the kinds we might show tabs for, like # ['gig', 'movie', 'play', ...] 'event_kinds': Event.get_kinds(), # A dict of data about each kind, keyed by kind ('gig') including # data about 'name', 'name_plural' and 'slug': 'event_kinds_data': Event.get_kinds_data(), }
0.001126
def filename_input(self, path_or_filename): ''' Open and read input from a *path or filename*, and parse its content. If the filename is a directory, files that ends with .xtuml located somewhere in the directory or sub directories will be loaded as well. ''' if os.path.isdir(path_or_filename): for path, _, files in os.walk(path_or_filename): for name in files: if name.endswith('.xtuml'): xtuml.ModelLoader.filename_input(self, os.path.join(path, name)) else: xtuml.ModelLoader.filename_input(self, path_or_filename)
0.005988
def grp_start_len(a): """Given a sorted 1D input array `a`, e.g., [0 0, 1, 2, 3, 4, 4, 4], this routine returns the indices where the blocks of equal integers start and how long the blocks are. """ # https://stackoverflow.com/a/50394587/353337 m = numpy.concatenate([[True], a[:-1] != a[1:], [True]]) idx = numpy.flatnonzero(m) return idx[:-1], numpy.diff(idx)
0.002551
def read_packet(self): """Reads a RTMP packet from the server. Returns a :class:`RTMPPacket`. Raises :exc:`RTMPError` on error. Raises :exc:`RTMPTimeoutError` on timeout. Usage:: >>> packet = conn.read_packet() >>> packet.body b'packet body ...' """ packet = ffi.new("RTMPPacket*") packet_complete = False while not packet_complete: res = librtmp.RTMP_ReadPacket(self.rtmp, packet) if res < 1: if librtmp.RTMP_IsTimedout(self.rtmp): raise RTMPTimeoutError("Timed out while reading packet") else: raise RTMPError("Failed to read packet") packet_complete = packet.m_nBytesRead == packet.m_nBodySize return RTMPPacket._from_pointer(packet)
0.002317
def RootGroup(self): """Returns group object for datacenter root group. >>> clc.v2.Datacenter().RootGroup() <clc.APIv2.group.Group object at 0x105feacd0> >>> print _ WA1 Hardware """ return(clc.v2.Group(id=self.root_group_id,alias=self.alias,session=self.session))
0.042705
def _gpdfit(x): """Estimate the parameters for the Generalized Pareto Distribution (GPD). Empirical Bayes estimate for the parameters of the generalized Pareto distribution given the data. Parameters ---------- x : array sorted 1D data array Returns ------- k : float estimated shape parameter sigma : float estimated scale parameter """ prior_bs = 3 prior_k = 10 len_x = len(x) m_est = 30 + int(len_x ** 0.5) b_ary = 1 - np.sqrt(m_est / (np.arange(1, m_est + 1, dtype=float) - 0.5)) b_ary /= prior_bs * x[int(len_x / 4 + 0.5) - 1] b_ary += 1 / x[-1] k_ary = np.log1p(-b_ary[:, None] * x).mean(axis=1) # pylint: disable=no-member len_scale = len_x * (np.log(-(b_ary / k_ary)) - k_ary - 1) weights = 1 / np.exp(len_scale - len_scale[:, None]).sum(axis=1) # remove negligible weights real_idxs = weights >= 10 * np.finfo(float).eps if not np.all(real_idxs): weights = weights[real_idxs] b_ary = b_ary[real_idxs] # normalise weights weights /= weights.sum() # posterior mean for b b_post = np.sum(b_ary * weights) # estimate for k k_post = np.log1p(-b_post * x).mean() # pylint: disable=invalid-unary-operand-type,no-member # add prior for k_post k_post = (len_x * k_post + prior_k * 0.5) / (len_x + prior_k) sigma = -k_post / b_post return k_post, sigma
0.002088
def from_packets(packets, sequence=0, default_size=4096, wiggle_room=2048): """Construct a list of Ogg pages from a list of packet data. The algorithm will generate pages of approximately default_size in size (rounded down to the nearest multiple of 255). However, it will also allow pages to increase to approximately default_size + wiggle_room if allowing the wiggle room would finish a packet (only one packet will be finished in this way per page; if the next packet would fit into the wiggle room, it still starts on a new page). This method reduces packet fragmentation when packet sizes are slightly larger than the default page size, while still ensuring most pages are of the average size. Pages are numbered started at 'sequence'; other information is uninitialized. """ chunk_size = (default_size // 255) * 255 pages = [] page = OggPage() page.sequence = sequence for packet in packets: page.packets.append(b"") while packet: data, packet = packet[:chunk_size], packet[chunk_size:] if page.size < default_size and len(page.packets) < 255: page.packets[-1] += data else: # If we've put any packet data into this page yet, # we need to mark it incomplete. However, we can # also have just started this packet on an already # full page, in which case, just start the new # page with this packet. if page.packets[-1]: page.complete = False if len(page.packets) == 1: page.position = -1 else: page.packets.pop(-1) pages.append(page) page = OggPage() page.continued = not pages[-1].complete page.sequence = pages[-1].sequence + 1 page.packets.append(data) if len(packet) < wiggle_room: page.packets[-1] += packet packet = b"" if page.packets: pages.append(page) return pages
0.001262
def _distance(self, x0, y0, x1, y1): """Utitlity function to compute distance between points.""" dx = x1-x0 dy = y1-y0 # roll displacements across the borders if self.pix: dx[ dx > self.Lx/2 ] -= self.Lx dx[ dx < -self.Lx/2 ] += self.Lx if self.piy: dy[ dy > self.Ly/2 ] -= self.Ly dy[ dy < -self.Ly/2 ] += self.Ly return dx, dy
0.023148