text
stringlengths
78
104k
score
float64
0
0.18
def request_signature(self, stringtosign): """ Construct a signature by making an RFC2104 HMAC-SHA1 of the following and converting it to Base64 UTF-8 encoded string. """ digest = hmac.new( self.secret_key.encode(ENCODING), stringtosign.encode(ENCODING), hashlib.sha1 ).digest() return b64_string(digest)
0.005051
def p_state_action_constraint_section(self, p): '''state_action_constraint_section : STATE_ACTION_CONSTRAINTS LCURLY state_cons_list RCURLY SEMI | STATE_ACTION_CONSTRAINTS LCURLY RCURLY SEMI''' if len(p) == 6: p[0] = ('constraints', p[3]) elif len(p) == 5: p[0] = ('constraints', []) self._print_verbose('state-action-constraints')
0.009302
def get(self, addresses): """Returns the value in this context, or None, for each address in addresses. Useful for gets on the context manager. Args: addresses (list of str): The addresses to return values for, if within this context. Returns: results (list of bytes): The values in state for these addresses. """ with self._lock: results = [] for add in addresses: self.validate_read(add) results.append(self._get(add)) return results
0.003367
def all_as_list(): ''' returns a list of all defined containers ''' as_dict = all_as_dict() containers = as_dict['Running'] + as_dict['Frozen'] + as_dict['Stopped'] containers_list = [] for i in containers: i = i.replace(' (auto)', '') containers_list.append(i) return containers_list
0.005831
def get_instance(self, payload): """ Build an instance of TollFreeInstance :param dict payload: Payload response from the API :returns: twilio.rest.api.v2010.account.available_phone_number.toll_free.TollFreeInstance :rtype: twilio.rest.api.v2010.account.available_phone_number.toll_free.TollFreeInstance """ return TollFreeInstance( self._version, payload, account_sid=self._solution['account_sid'], country_code=self._solution['country_code'], )
0.007143
def log(self, session): """Logs training progress.""" logging.info('Train [%s/%d], step %d (%.3f sec) %.1f ' 'global steps/s, %.1f local steps/s', self.task.type, self.task.index, self.global_step, (self.now - self.start_time), (self.global_step - self.last_global_step) / (self.now - self.last_global_time), (self.local_step - self.last_local_step) / (self.now - self.last_local_time)) self.last_log = self.now self.last_global_step, self.last_global_time = self.global_step, self.now self.last_local_step, self.last_local_time = self.local_step, self.now
0.001439
def peek_64(library, session, address): """Read an 64-bit value from the specified address. Corresponds to viPeek64 function of the VISA library. :param library: the visa library wrapped by ctypes. :param session: Unique logical identifier to a session. :param address: Source address to read the value. :return: Data read from bus, return value of the library call. :rtype: bytes, :class:`pyvisa.constants.StatusCode` """ value_64 = ViUInt64() ret = library.viPeek64(session, address, byref(value_64)) return value_64.value, ret
0.001739
def list_models(filename): """ Lists all models in given filename. Parameters ---------- filename: str path to filename, where the model has been stored. Returns ------- obj: dict A mapping by name and a comprehensive description like this: {model_name: {'repr' : 'string representation, 'created': 'human readable date', ...} """ from .h5file import H5File with H5File(filename, mode='r') as f: return f.models_descriptive
0.004024
def _make_node(self, singular, plural, variables, plural_expr, vars_referenced, num_called_num): """Generates a useful node from the data provided.""" # no variables referenced? no need to escape for old style # gettext invocations only if there are vars. if not vars_referenced and not self.environment.newstyle_gettext: singular = singular.replace('%%', '%') if plural: plural = plural.replace('%%', '%') # singular only: if plural_expr is None: gettext = nodes.Name('gettext', 'load') node = nodes.Call(gettext, [nodes.Const(singular)], [], None, None) # singular and plural else: ngettext = nodes.Name('ngettext', 'load') node = nodes.Call(ngettext, [ nodes.Const(singular), nodes.Const(plural), plural_expr ], [], None, None) # in case newstyle gettext is used, the method is powerful # enough to handle the variable expansion and autoescape # handling itself if self.environment.newstyle_gettext: for key, value in iteritems(variables): # the function adds that later anyways in case num was # called num, so just skip it. if num_called_num and key == 'num': continue node.kwargs.append(nodes.Keyword(key, value)) # otherwise do that here else: # mark the return value as safe if we are in an # environment with autoescaping turned on node = nodes.MarkSafeIfAutoescape(node) if variables: node = nodes.Mod(node, nodes.Dict([ nodes.Pair(nodes.Const(key), value) for key, value in variables.items() ])) return nodes.Output([node])
0.001521
def wrap_guess_content_type(func_, *args, **kwargs): """ guesses the content type with libmagic if available :param func_: :param args: :param kwargs: :return: """ assert isinstance(args[0], dict) if not args[0].get(CONTENTTYPE_FIELD, None): content = args[0].get(CONTENT_FIELD, b"") try: args[0][CONTENTTYPE_FIELD] = magic.from_buffer(content) except magic.MagicException: # pragma: no cover args[0][CONTENTTYPE_FIELD] = MockMagic.DEFAULT_MAGIC return func_(*args, **kwargs)
0.001773
def get_pub_abbreviation(pubstring, numBest=5, exact=None): """ Get ADS journal abbreviation ("bibstem") candidates for a given publication name. * 'exact': if True results will only be returned if an exact match was found * 'numBest': maximum number of guesses to return A list of tuples will be returned, each tuple consisting of a score and a bibstem """ if exact: # Only try to find exact matches bibstems = _defaultSourcematcher.getExactMatch(string.upper(pubstring)) else: # Allow fuzzy matching bibstems = _defaultSourcematcher.getBestMatches(string.upper(pubstring), numBest) if re.search(r"L(ett(ers)?)?$",pubstring): addit = _defaultSourcematcher.getBestMatches(re.sub(r"(?i)\s*L(ett(ers)?)?$", "", pubstring.upper()), 2) if addit: bibstems.extend(addit) bibstems.sort() # Make the list of results unique try: bibstems = list(set(bibstems)) except: bibstems = [] # Sort the list of results from highest score to lowest score bibstems.sort(key=lambda tup: tup[0], reverse=True) return bibstems
0.006832
def request(self, action, params=None, action_token_type=None, upload_info=None, headers=None): """Perform request to MediaFire API action -- "category/name" of method to call params -- dict of parameters or query string action_token_type -- action token to use: None, "upload", "image" upload_info -- in case of upload, dict of "fd" and "filename" headers -- additional headers to send (used for upload) session_token and signature generation/update is handled automatically """ uri = self._build_uri(action) if isinstance(params, six.text_type): query = params else: query = self._build_query(uri, params, action_token_type) if headers is None: headers = {} if upload_info is None: # Use request body for query data = query headers['Content-Type'] = FORM_MIMETYPE else: # Use query string for query since payload is file uri += '?' + query if "filename" in upload_info: data = MultipartEncoder( fields={'file': ( upload_info["filename"], upload_info["fd"], UPLOAD_MIMETYPE )} ) headers["Content-Type"] = data.content_type else: data = upload_info["fd"] headers["Content-Type"] = UPLOAD_MIMETYPE logger.debug("uri=%s query=%s", uri, query if not upload_info else None) try: # bytes from now on url = (API_BASE + uri).encode('utf-8') if isinstance(data, six.text_type): # request's data is bytes, dict, or filehandle data = data.encode('utf-8') response = self.http.post(url, data=data, headers=headers, stream=True) except RequestException as ex: logger.exception("HTTP request failed") raise MediaFireConnectionError( "RequestException: {}".format(ex)) return self._process_response(response)
0.00133
def fmap_info(metadata, img, config, layout): """ Generate a paragraph describing field map acquisition information. Parameters ---------- metadata : :obj:`dict` Data from the json file associated with the field map, in dictionary form. img : :obj:`nibabel.Nifti1Image` The nifti image of the field map. config : :obj:`dict` A dictionary with relevant information regarding sequences, sequence variants, phase encoding directions, and task names. Returns ------- desc : :obj:`str` A description of the field map's acquisition information. """ dir_ = config['dir'][metadata['PhaseEncodingDirection']] n_slices, vs_str, ms_str, fov_str = get_sizestr(img) seqs, variants = get_seqstr(config, metadata) if 'EchoTime' in metadata.keys(): te = num_to_str(metadata['EchoTime']*1000) else: te = 'UNKNOWN' if 'IntendedFor' in metadata.keys(): scans = metadata['IntendedFor'] run_dict = {} for scan in scans: fn = basename(scan) iff_file = [f for f in layout.get(extensions='nii.gz') if fn in f.path][0] run_num = int(iff_file.run) ty = iff_file.entities['suffix'].upper() if ty == 'BOLD': iff_meta = layout.get_metadata(iff_file.path) task = iff_meta.get('TaskName', iff_file.entities['task']) ty_str = '{0} {1} scan'.format(task, ty) else: ty_str = '{0} scan'.format(ty) if ty_str not in run_dict.keys(): run_dict[ty_str] = [] run_dict[ty_str].append(run_num) for scan in run_dict.keys(): run_dict[scan] = [num2words(r, ordinal=True) for r in sorted(run_dict[scan])] out_list = [] for scan in run_dict.keys(): if len(run_dict[scan]) > 1: s = 's' else: s = '' run_str = list_to_str(run_dict[scan]) string = '{rs} run{s} of the {sc}'.format(rs=run_str, s=s, sc=scan) out_list.append(string) for_str = ' for the {0}'.format(list_to_str(out_list)) else: for_str = '' desc = ''' A {variants} {seqs} field map (phase encoding: {dir_}; {n_slices} slices; repetition time, TR={tr}ms; echo time, TE={te}ms; flip angle, FA={fa}<deg>; field of view, FOV={fov}mm; matrix size={ms}; voxel size={vs}mm) was acquired{for_str}. '''.format(variants=variants, seqs=seqs, dir_=dir_, for_str=for_str, n_slices=n_slices, tr=num_to_str(metadata['RepetitionTime']*1000), te=te, fa=metadata.get('FlipAngle', 'UNKNOWN'), vs=vs_str, fov=fov_str, ms=ms_str) desc = desc.replace('\n', ' ').lstrip() while ' ' in desc: desc = desc.replace(' ', ' ') return desc
0.000928
def search_dependencies(self): """Returns a list of modules that this executable needs in order to run properly. This includes special kind declarations for precision or derived types, but not dependency executable calls. """ #It is understood that this executable's module is obviously required. Just #add any additional modules from the parameters. result = [p.dependency() for p in self.ordered_parameters] result.extend([v.dependency() for k, v in list(self.members.items())]) for ekey, anexec in list(self.executables.items()): result.extend(anexec.search_dependencies()) return [m for m in result if m is not None and m != self.module.name]
0.00813
def index(self, i, length=None): """Return an integer index or None""" if self.begin <= i <= self.end: index = i - self.BEGIN - self.offset if length is None: length = self.full_range() else: length = min(length, self.full_range()) if 0 <= index < length: return index
0.005249
def set(self, n=None, ftype=None, colfac=None, lmfac=None, fid=0): """Set selected properties of the fitserver instance. All unset properties remain the same (in the :meth:`init` method all properties are (re-)initialized). Like in the constructor, the number of unknowns to be solved for; the number of simultaneous solutions; the ftype (as code); and the collinearity and Levenberg-Marquardt factor can be specified. :param n: number of unknowns :param ftype: type of solution Allowed: real, complex, separable, asreal, conjugate :param colfac: collinearity factor :param lmfac: Levenberg-Marquardt factor :param fid: the id of a sub-fitter """ self._checkid(fid) if ftype is None: ftype = -1 else: ftype = self._gettype(ftype) if n is None: n = -1 elif n < 0: raise ValueError("Illegal set argument n") if colfac is None: colfac = -1 elif colfac < 0: raise ValueError("Illegal set argument colfac") if lmfac is None: lmfac = -1 elif lmfac < 0: raise ValueError("Illegal set argument lmfac") self._fitids[fid]["stat"] = False self._fitids[fid]["solved"] = False self._fitids[fid]["haserr"] = False self._fitids[fid]["fit"] = True self._fitids[fid]["looped"] = False if n != -1 or ftype != -1 or colfac != -1 or lmfac != -1: if not self._fitproxy.set(fid, n, ftype, colfac, lmfac): return False self._fitids[fid]["stat"] = self._getstate(fid) return True
0.001153
def copy(self, dest, src): """Copy element from sequence, member from mapping. :param dest: the destination :type dest: Pointer :param src: the source :type src: Pointer :return: resolved document :rtype: Target """ doc = fragment = deepcopy(self.document) for token in Pointer(src): fragment = token.extract(fragment, bypass_ref=True) return Target(doc).add(dest, fragment)
0.004202
def _dir_exists(db, user_id, db_dirname): """ Internal implementation of dir_exists. Expects a db-style path name. """ return db.execute( select( [func.count(directories.c.name)], ).where( and_( directories.c.user_id == user_id, directories.c.name == db_dirname, ), ) ).scalar() != 0
0.0025
def SocketWriter(host, port, af=None, st=None): """ Writes messages to a socket/host. """ import socket if af is None: af = socket.AF_INET if st is None: st = socket.SOCK_STREAM message = '({0}): {1}' s = socket.socket(af, st) s.connect(host, port) try: while True: logstr = (yield) s.send(logstr) except GeneratorExit: s.close()
0.002375
def GetParserPluginsInformation(cls, parser_filter_expression=None): """Retrieves the parser plugins information. Args: parser_filter_expression (Optional[str]): parser filter expression, where None represents all parsers and plugins. Returns: list[tuple[str, str]]: pairs of parser plugin names and descriptions. """ parser_plugins_information = [] for _, parser_class in cls.GetParsers( parser_filter_expression=parser_filter_expression): if parser_class.SupportsPlugins(): for plugin_name, plugin_class in parser_class.GetPlugins(): description = getattr(plugin_class, 'DESCRIPTION', '') parser_plugins_information.append((plugin_name, description)) return parser_plugins_information
0.006402
def parse(file_path, convert_neg_666=True, rid=None, cid=None, ridx=None, cidx=None, row_meta_only=False, col_meta_only=False, make_multiindex=False): """ The main method. Args: - file_path (string): full path to gct(x) file you want to parse - convert_neg_666 (bool): whether to convert -666 values to numpy.nan (see Note below for more details). Default = False. - rid (list of strings): list of row ids to specifically keep from gct. Default=None. - cid (list of strings): list of col ids to specifically keep from gct. Default=None. - ridx (list of integers): only read the rows corresponding to this list of integer ids. Default=None. - cidx (list of integers): only read the columns corresponding to this list of integer ids. Default=None. - row_meta_only (bool): Whether to load data + metadata (if False), or just row metadata (if True) as pandas DataFrame - col_meta_only (bool): Whether to load data + metadata (if False), or just col metadata (if True) as pandas DataFrame - make_multiindex (bool): whether to create a multi-index df combining the 3 component dfs Returns: - myGCToo (GCToo object): A GCToo instance containing content of parsed gct file ** OR ** - row_metadata (pandas df) ** OR ** col_metadata (pandas df) Note: why is convert_neg_666 even a thing? In CMap--for somewhat obscure historical reasons--we use "-666" as our null value for metadata. However (so that users can take full advantage of pandas' methods, including those for filtering nan's etc) we provide the option of converting these into numpy.nan values, the pandas default. """ assert sum([row_meta_only, col_meta_only]) <= 1, ( "row_meta_only and col_meta_only cannot both be requested.") nan_values = [ "#N/A", "N/A", "NA", "#NA", "NULL", "NaN", "-NaN", "nan", "-nan", "#N/A!", "na", "NA", "None", "#VALUE!"] # Add "-666" to the list of NaN values if convert_neg_666: nan_values.append("-666") # Verify that the gct path exists if not os.path.exists(file_path): err_msg = "The given path to the gct file cannot be found. gct_path: {}" logger.error(err_msg.format(file_path)) raise Exception(err_msg.format(file_path)) logger.info("Reading GCT: {}".format(file_path)) # Read version and dimensions (version, num_data_rows, num_data_cols, num_row_metadata, num_col_metadata) = read_version_and_dims(file_path) # Read in metadata and data (row_metadata, col_metadata, data) = parse_into_3_df( file_path, num_data_rows, num_data_cols, num_row_metadata, num_col_metadata, nan_values) # Create the gctoo object and assemble 3 component dataframes # Not the most efficient if only metadata requested (i.e. creating the # whole GCToo just to return the metadata df), but simplest myGCToo = create_gctoo_obj(file_path, version, row_metadata, col_metadata, data, make_multiindex) # Subset if requested if (rid is not None) or (ridx is not None) or (cid is not None) or (cidx is not None): logger.info("Subsetting GCT... (note that there are no speed gains when subsetting GCTs)") myGCToo = sg.subset_gctoo(myGCToo, rid=rid, cid=cid, ridx=ridx, cidx=cidx) if row_meta_only: return myGCToo.row_metadata_df elif col_meta_only: return myGCToo.col_metadata_df else: return myGCToo
0.003023
def random_polygon(segments=8, radius=1.0): """ Generate a random polygon with a maximum number of sides and approximate radius. Parameters --------- segments: int, the maximum number of sides the random polygon will have radius: float, the approximate radius of the polygon desired Returns --------- polygon: shapely.geometry.Polygon object with random exterior, and no interiors. """ angles = np.sort(np.cumsum(np.random.random( segments) * np.pi * 2) % (np.pi * 2)) radii = np.random.random(segments) * radius points = np.column_stack( (np.cos(angles), np.sin(angles))) * radii.reshape((-1, 1)) points = np.vstack((points, points[0])) polygon = Polygon(points).buffer(0.0) if util.is_sequence(polygon): return polygon[0] return polygon
0.003597
def as_html(self, path=""): """ Return a rendering of the current state in HTML. """ if path not in self.top_level_links: raise StateError("Unknown path") header = """ <html> <head> <title>VPC-router state</title> </head> <body> <h3>VPC-router state</h3> <hr> <font face="courier"> """ footer = """ </font> </body> </html> """ rep = self.get_state_repr(path) def make_links(rep): # Recursively create clickable links for _href elements for e, v in rep.items(): if e == "_href": v = '<a href=%s>%s</a>' % (v, v) rep[e] = v else: if type(v) == dict: make_links(v) make_links(rep) rep_str_lines = json.dumps(rep, indent=4).split("\n") buf = [] for l in rep_str_lines: # Replace leading spaces with '&nbsp;' num_spaces = len(l) - len(l.lstrip()) l = "&nbsp;" * num_spaces + l[num_spaces:] buf.append(l) return "%s%s%s" % (header, "<br>\n".join(buf), footer)
0.003019
def total_bytes_processed(self): """Return total bytes processed from job statistics, if present. See: https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#statistics.query.totalBytesProcessed :rtype: int or None :returns: total bytes processed by the job, or None if job is not yet complete. """ result = self._job_statistics().get("totalBytesProcessed") if result is not None: result = int(result) return result
0.003802
def setup(self, environ): '''Called once to setup the list of wsgi middleware.''' json_handler = Root().putSubHandler('calc', Calculator()) middleware = wsgi.Router('/', post=json_handler, accept_content_types=JSON_CONTENT_TYPES) response = [wsgi.GZipMiddleware(200)] return wsgi.WsgiHandler(middleware=[wsgi.wait_for_body_middleware, middleware], response_middleware=response)
0.003802
def run(self, cmd, target=None, lock_name=None, shell=None, nofail=False, clean=False, follow=None, container=None): """ The primary workhorse function of PipelineManager, this runs a command. This is the command execution function, which enforces race-free file-locking, enables restartability, and multiple pipelines can produce/use the same files. The function will wait for the file lock if it exists, and not produce new output (by default) if the target output file already exists. If the output is to be created, it will first create a lock file to prevent other calls to run (for example, in parallel pipelines) from touching the file while it is being created. It also records the memory of the process and provides some logging output. :param str | list[str] cmd: Shell command(s) to be run. :param str | Sequence[str] target: Output file(s) to produce, optional. If all target files exist, the command will not be run. If no target is given, a lock_name must be provided. :param str lock_name: Name of lock file. Optional. :param bool shell: If command requires should be run in its own shell. Optional. Default: None --will try to determine whether the command requires a shell. :param bool nofail: Whether the pipeline proceed past a nonzero return from a process, default False; nofail can be used to implement non-essential parts of the pipeline; if a 'nofail' command fails, the pipeline is free to continue execution. :param bool clean: True means the target file will be automatically added to an auto cleanup list. Optional. :param callable follow: Function to call after executing (each) command. :param str container: Name for Docker container in which to run commands. :return int: Return code of process. If a list of commands is passed, this is the maximum of all return codes for all commands. """ # If the pipeline's not been started, skip ahead. if not self._active: cmds = [cmd] if isinstance(cmd, str) else cmd cmds_text = [c if isinstance(c, str) else " ".join(c) for c in cmds] print("Pipeline is inactive; skipping {} command(s):\n{}". format(len(cmds), "\n".join(cmds_text))) return 0 # Short-circuit if the checkpoint file exists and the manager's not # been configured to overwrite such files. if self.curr_checkpoint is not None: check_fpath = checkpoint_filepath(self.curr_checkpoint, self) if os.path.isfile(check_fpath) and not self.overwrite_checkpoints: print("Checkpoint file exists for '{}' ('{}'), and the {} has " "been configured to not overwrite checkpoints; " "skipping command '{}'".format( self.curr_checkpoint, check_fpath, self.__class__.__name__, cmd)) return 0 # TODO: consider making the logic such that locking isn't implied, or # TODO (cont.): that we can make it otherwise such that it's not # TODO (cont.): strictly necessary to provide target or lock_name. # The default lock name is based on the target name. # Therefore, a targetless command that you want # to lock must specify a lock_name manually. if target is None and lock_name is None: self.fail_pipeline(Exception( "You must provide either a target or a lock_name.")) # Downstream code requires target to be a list, so convert if only # a single item was given if not is_multi_target(target) and target is not None: target = [target] # Downstream code requires a list of locks; convert if isinstance(lock_name, str): lock_name = [lock_name] # Default lock_name (if not provided) is based on the target file name, # but placed in the parent pipeline outfolder lock_name = lock_name or make_lock_name(target, self.outfolder) lock_files = [self._make_lock_path(ln) for ln in lock_name] process_return_code = 0 local_maxmem = 0 # Decide how to do follow-up. if not follow: call_follow = lambda: None elif not hasattr(follow, "__call__"): # Warn about non-callable argument to follow-up function. print("Follow-up function is not callable and won't be used: {}". format(type(follow))) call_follow = lambda: None else: # Wrap the follow-up function so that the log shows what's going on. def call_follow(): print("Follow:") follow() # The while=True loop here is unlikely to be triggered, and is just a # wrapper to prevent race conditions; the lock_file must be created by # the current loop. If not, we loop again and then re-do the tests. # The recover and newstart options inform the pipeline to run a command # in a scenario where it normally would not. We use these "local" flags # to allow us to report on the state of the pipeline in the first round # as normal, but then proceed on the next iteration through the outer # loop. The proceed_through_locks is a flag that is set if any lockfile # is found that needs to be recovered or overwritten. It instructs us to # ignore lock files on the next iteration. local_recover = False local_newstart = False proceed_through_locks = False while True: ##### Tests block # Base case: All targets exists and not set to overwrite targets break loop, don't run process. # os.path.exists returns True for either a file or directory; .isfile is file-only if target is not None and all([os.path.exists(t) for t in target]) \ and not any([os.path.isfile(l) for l in lock_files]) \ and not local_newstart: for tgt in target: if os.path.exists(tgt): print("Target exists: `" + tgt + "`") if self.new_start: print("New start mode; run anyway.") # Set the local_newstart flag so the command will run anyway. # Doing this in here instead of outside the loop allows us # to still report the target existence. local_newstart = True continue # Normally we don't run the follow, but if you want to force. . . if self.force_follow: call_follow() break # Do not run command # Scenario 1: Lock file exists, but we're supposed to overwrite target; Run process. if not proceed_through_locks: for lock_file in lock_files: recover_file = self._recoverfile_from_lockfile(lock_file) if os.path.isfile(lock_file): print("Found lock file: {}".format(lock_file)) if self.overwrite_locks: print("Overwriting target. . .") proceed_through_locks = True elif os.path.isfile(recover_file): print("Found dynamic recovery file ({}); " "overwriting target. . .".format(recover_file)) # remove the lock file which will then be promptly re-created for the current run. local_recover = True proceed_through_locks = True # the recovery flag is now spent; remove so we don't accidentally re-recover a failed job os.remove(recover_file) else: # don't overwrite locks self._wait_for_lock(lock_file) # when it's done loop through again to try one more # time (to see if the target exists now) continue # If you get to this point, the target doesn't exist, and the lock_file doesn't exist # (or we should overwrite). create the lock (if you can) # Initialize lock in master lock list for lock_file in lock_files: self.locks.append(lock_file) if self.overwrite_locks or local_recover: self._create_file(lock_file) else: try: self._create_file_racefree(lock_file) # Create lock except OSError as e: if e.errno == errno.EEXIST: # File already exists print ("Lock file created after test! Looping again: {}".format( lock_file)) # Since a lock file was created by a different source, # we need to reset this flag to re-check the locks. proceed_through_locks = False continue # Go back to start ##### End tests block # If you make it past these tests, we should proceed to run the process. if target is not None: print("Target to produce: {}\n".format(",".join(['`'+x+'`' for x in target]))) else: print("Targetless command, running...\n") if isinstance(cmd, list): # Handle command lists for cmd_i in cmd: list_ret, maxmem = \ self.callprint(cmd_i, shell, lock_file, nofail, container) maxmem = max(maxmem) if isinstance(maxmem, Iterable) else maxmem local_maxmem = max(local_maxmem, maxmem) list_ret = max(list_ret) if isinstance(list_ret, Iterable) else list_ret process_return_code = max(process_return_code, list_ret) else: # Single command (most common) process_return_code, local_maxmem = \ self.callprint(cmd, shell, lock_file, nofail, container) # Run command if isinstance(process_return_code, list): process_return_code = max(process_return_code) # For temporary files, you can specify a clean option to automatically # add them to the clean list, saving you a manual call to clean_add if target is not None and clean: for tgt in target: self.clean_add(tgt) call_follow() for lock_file in lock_files: os.remove(lock_file) # Remove lock file self.locks.remove(lock_file) # If you make it to the end of the while loop, you're done break return process_return_code
0.004181
def _in(field, value, document): """ Returns True if document[field] is in the interable value. If the supplied value is not an iterable, then a MalformedQueryException is raised """ try: values = iter(value) except TypeError: raise MalformedQueryException("'$in' must accept an iterable") return document.get(field, None) in values
0.002653
def from_serializable_data(cls, data, check_fks=True, strict_fks=False): """ Build an instance of this model from the JSON-like structure passed in, recursing into related objects as required. If check_fks is true, it will check whether referenced foreign keys still exist in the database. - dangling foreign keys on related objects are dealt with by either nullifying the key or dropping the related object, according to the 'on_delete' setting. - dangling foreign keys on the base object will be nullified, unless strict_fks is true, in which case any dangling foreign keys with on_delete=CASCADE will cause None to be returned for the entire object. """ obj = model_from_serializable_data(cls, data, check_fks=check_fks, strict_fks=strict_fks) if obj is None: return None child_relations = get_all_child_relations(cls) for rel in child_relations: rel_name = rel.get_accessor_name() try: child_data_list = data[rel_name] except KeyError: continue related_model = rel.related_model if hasattr(related_model, 'from_serializable_data'): children = [ related_model.from_serializable_data(child_data, check_fks=check_fks, strict_fks=True) for child_data in child_data_list ] else: children = [ model_from_serializable_data(related_model, child_data, check_fks=check_fks, strict_fks=True) for child_data in child_data_list ] children = filter(lambda child: child is not None, children) setattr(obj, rel_name, children) return obj
0.004883
def groups(self): """Method returns a list of all goup paths Examples -------- >>> for group in h5f.groups(): print(group) '/' '/dataset1' '/dataset1/data1' '/dataset1/data2' """ HiisiHDF._clear_cache() self.CACHE['group_paths'].append('/') self.visititems(HiisiHDF._is_group) return HiisiHDF.CACHE['group_paths']
0.010917
def set_size(self, size): ''' Size is only set the first time it is called Size that is set is returned ''' if self.size is None: self.size = size return size else: return self.size
0.007519
def contains_content_items(self, request, pk, course_run_ids, program_uuids): """ Return whether or not the specified content is available to the EnterpriseCustomer. Multiple course_run_ids and/or program_uuids query parameters can be sent to this view to check for their existence in the EnterpriseCustomerCatalogs associated with this EnterpriseCustomer. At least one course run key or program UUID value must be included in the request. """ enterprise_customer = self.get_object() # Maintain plus characters in course key. course_run_ids = [unquote(quote_plus(course_run_id)) for course_run_id in course_run_ids] contains_content_items = False for catalog in enterprise_customer.enterprise_customer_catalogs.all(): contains_course_runs = not course_run_ids or catalog.contains_courses(course_run_ids) contains_program_uuids = not program_uuids or catalog.contains_programs(program_uuids) if contains_course_runs and contains_program_uuids: contains_content_items = True break return Response({'contains_content_items': contains_content_items})
0.007414
def perform(action_name, container, **kwargs): """ Performs an action on the given container map and configuration. :param action_name: Name of the action (e.g. ``update``). :param container: Container configuration name. :param kwargs: Keyword arguments for the action implementation. """ cf = container_fabric() cf.call(action_name, container, **kwargs)
0.002577
def H(self, H): """ Set the enthalpy of the package to the specified value, and recalculate it's temperature. :param H: The new enthalpy value. [kWh] """ self._H = H self._T = self._calculate_T(H)
0.007874
def rasterize(self, pitch, origin, resolution=None, fill=True, width=None, **kwargs): """ Rasterize a Path2D object into a boolean image ("mode 1"). Parameters ------------ pitch: float, length in model space of a pixel edge origin: (2,) float, origin position in model space resolution: (2,) int, resolution in pixel space fill: bool, if True will return closed regions as filled width: int, if not None will draw outline this wide (pixels) Returns ------------ raster: PIL.Image object, mode 1 """ image = raster.rasterize(self, pitch=pitch, origin=origin, resolution=resolution, fill=fill, width=width) return image
0.007744
def get_icon(name, aspix=False, asicon=False): """Return the real file path to the given icon name If aspix is True return as QtGui.QPixmap, if asicon is True return as QtGui.QIcon. :param name: the name of the icon :type name: str :param aspix: If True, return a QtGui.QPixmap. :type aspix: bool :param asicon: If True, return a QtGui.QIcon. :type asicon: bool :returns: The real file path to the given icon name. If aspix is True return as QtGui.QPixmap, if asicon is True return as QtGui.QIcon. If both are True, a QtGui.QIcon is returned. :rtype: string :raises: None """ datapath = os.path.join(ICON_PATH, name) icon = pkg_resources.resource_filename('jukeboxcore', datapath) if aspix or asicon: icon = QtGui.QPixmap(icon) if asicon: icon = QtGui.QIcon(icon) return icon
0.003348
def o3(df): """ Calculs réglementaires pour l'ozone Paramètres: df: DataFrame contenant les mesures, avec un index temporel (voir xair.get_mesure) Retourne: Une série de résultats dans un DataFrame : ****** unité (u): µg/m3 (microgramme par mètre cube) Seuil de RI sur 1H: 180u Seuil d'Alerte sur 1H: 240u Seuil d'Alerte sur 3H consécutives: 240u Seuil d'Alerte sur 3H consécutives: 300u Seuil d'Alerte sur 1H: 360u Objectif de qualité pour la santé humaine sur 8H glissantes: 120u Les résultats sont donnés en terme d'heure de dépassement """ polluant = 'O3' # Le DataFrame doit être en heure if not isinstance(df.index.freq, pdoffset.Hour): raise FreqException("df doit être en heure.") res = {"Seuil de RI sur 1H: 180u": depassement(df, valeur=180), "Seuil d'Alerte sur 1H: 240u": depassement(df, valeur=240), "Seuil d'Alerte sur 1H: 360u": depassement(df, valeur=360), "Seuil d'Alerte sur 3H consécutives: 240u": consecutive(df, valeur=240, sur=3), "Seuil d'Alerte sur 3H consécutives: 300u": consecutive(df, valeur=300, sur=3), "Objectif de qualité pour la santé humaine sur 8H glissantes: 120u": depassement( moyennes_glissantes(df, sur=8), valeur=120), } return polluant, res
0.002924
def GetDateRange(self): """Returns a tuple of (earliest, latest) dates on which the service periods in the schedule define service, in YYYYMMDD form. """ (minvalue, maxvalue, minorigin, maxorigin) = self.GetDateRangeWithOrigins() return (minvalue, maxvalue)
0.00361
def get_number_of_particles(): """ Queries the ``dynac.short`` file for the number of particles used in the simulation. """ with open('dynac.short') as f: data_str = ''.join(line for line in f.readlines()) num_of_parts = int(data_str.split('Simulation with')[1].strip().split()[0]) return num_of_parts
0.005865
def get_works(self): """Returns a list of the names of all works in the corpus. :rtype: `list` of `str` """ return [os.path.split(filepath)[1] for filepath in glob.glob(os.path.join(self._path, '*')) if os.path.isdir(filepath)]
0.006826
def get_gallery_file(self, file_id, output_file_path=None, scope='content/read'): """ Get a file in the Mxit user's gallery User authentication required with the following scope: 'content/read' """ data = _get( token=self.oauth.get_user_token(scope), uri='/user/media/content/' + urllib.quote(file_id) ) if output_file_path: with open(output_file_path, 'w') as f: f.write(data) else: return data
0.005725
def open(name=None, fileobj=None, closefd=True): """ Use all decompressor possible to make the stream """ return Guesser().open(name=name, fileobj=fileobj, closefd=closefd)
0.005319
def default_bitcoind_opts(config_file=None, prefix=False): """ Get our default bitcoind options, such as from a config file, or from sane defaults """ default_bitcoin_opts = virtualchain.get_bitcoind_config(config_file=config_file) # drop dict values that are None default_bitcoin_opts = {k: v for k, v in default_bitcoin_opts.items() if v is not None} # strip 'bitcoind_' if not prefix: default_bitcoin_opts = opt_strip('bitcoind_', default_bitcoin_opts) return default_bitcoin_opts
0.005607
def pathname(self): """ Path name is a recursive representation parent path name plus the name which was assigned to this object by its parent. In other words, it is stack of parent name where top is always parent's original name: `parent.pathname + parent.childname` and stop condition is root's name. For example, the pathname of an instance with the two parents may look like `parent0/parent1/childname_at_parent1`. Top parent's name equals its original name `parent0.name == parent0.pathname`. """ if self.parent is self: return self.name parent = self._parent return misc.tensor_name(parent.pathname, parent.childname(self))
0.004021
def _extract_table(table_data, current, pc, ts, tt): """ Use the given table data to create a time series entry for each column in the table. :param dict table_data: Table data :param dict current: LiPD root data :param str pc: paleoData or chronData :param list ts: Time series (so far) :param bool summary: Summary Table or not :return list ts: Time series (so far) """ current["tableType"] = tt # Get root items for this table current = _extract_table_root(table_data, current, pc) # Add in modelNumber and tableNumber if this is "ens" or "summ" table current = _extract_table_model(table_data, current, tt) # Add age, depth, and year columns to root if available _table_tmp = _extract_special(current, table_data) try: # Start creating entries using dictionary copies. for _col_name, _col_data in table_data["columns"].items(): # Add column data onto root items. Copy so we don't ruin original data _col_tmp = _extract_columns(_col_data, copy.deepcopy(_table_tmp), pc) try: ts.append(_col_tmp) except Exception as e: logger_ts.warn("extract_table: Unable to create ts entry, {}".format(e)) except Exception as e: logger_ts.error("extract_table: {}".format(e)) return ts
0.003693
def hide_routemap_holder_route_map_content_set_distance_dist_rms(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") hide_routemap_holder = ET.SubElement(config, "hide-routemap-holder", xmlns="urn:brocade.com:mgmt:brocade-ip-policy") route_map = ET.SubElement(hide_routemap_holder, "route-map") name_key = ET.SubElement(route_map, "name") name_key.text = kwargs.pop('name') action_rm_key = ET.SubElement(route_map, "action-rm") action_rm_key.text = kwargs.pop('action_rm') instance_key = ET.SubElement(route_map, "instance") instance_key.text = kwargs.pop('instance') content = ET.SubElement(route_map, "content") set = ET.SubElement(content, "set") distance = ET.SubElement(set, "distance") dist_rms = ET.SubElement(distance, "dist-rms") dist_rms.text = kwargs.pop('dist_rms') callback = kwargs.pop('callback', self._callback) return callback(config)
0.003929
def send_mail(subject, message, from_email, recipient_list, html_message='', scheduled_time=None, headers=None, priority=PRIORITY.medium): """ Add a new message to the mail queue. This is a replacement for Django's ``send_mail`` core email method. """ subject = force_text(subject) status = None if priority == PRIORITY.now else STATUS.queued emails = [] for address in recipient_list: emails.append( Email.objects.create( from_email=from_email, to=address, subject=subject, message=message, html_message=html_message, status=status, headers=headers, priority=priority, scheduled_time=scheduled_time ) ) if priority == PRIORITY.now: for email in emails: email.dispatch() return emails
0.002361
def do_gate(self, gate: Gate) -> 'AbstractQuantumSimulator': """ Perform a gate. :return: ``self`` to support method chaining. """ unitary = lifted_gate(gate=gate, n_qubits=self.n_qubits) self.density = unitary.dot(self.density).dot(np.conj(unitary).T) return self
0.006231
def getProperty(self, prop, *args, **kwargs): """ Get the value of a property. See the corresponding method for the required arguments. For example, for the property _NET_WM_STATE, look for :meth:`getWmState` """ f = self.__getAttrs.get(prop) if not f: raise KeyError('Unknown readable property: %s' % prop) return f(self, *args, **kwargs)
0.004808
def Interpolate(time, mask, y): ''' Masks certain elements in the array `y` and linearly interpolates over them, returning an array `y'` of the same length. :param array_like time: The time array :param array_like mask: The indices to be interpolated over :param array_like y: The dependent array ''' # Ensure `y` doesn't get modified in place yy = np.array(y) t_ = np.delete(time, mask) y_ = np.delete(y, mask, axis=0) if len(yy.shape) == 1: yy[mask] = np.interp(time[mask], t_, y_) elif len(yy.shape) == 2: for n in range(yy.shape[1]): yy[mask, n] = np.interp(time[mask], t_, y_[:, n]) else: raise Exception("Array ``y`` must be either 1- or 2-d.") return yy
0.001311
def get_info(self): """ Return plugin information. """ return { self.get_plugin_name() : { "version" : self.get_version(), "params" : { "file" : self.conf['file'] }, "stats" : { "last_route_spec_update" : self.last_route_spec_update.isoformat() if self.last_route_spec_update else "(no update, yet)" } } }
0.01487
def visitEqualityExpression(self, ctx): """ expression: expression (EQ | NEQ) expression """ arg1, arg2 = conversions.to_same(self.visit(ctx.expression(0)), self.visit(ctx.expression(1)), self._eval_context) if isinstance(arg1, str): # string equality is case-insensitive equal = arg1.lower() == arg2.lower() else: equal = arg1 == arg2 return equal if ctx.EQ() is not None else not equal
0.006237
def fire_update_event(self, *args, **kwargs): """Trigger the method tied to _on_update""" for _handler in self._on_update: _handler(*args, **kwargs)
0.011364
def right_click_zijderveld(self, event): """ toggles between zoom and pan effects for the zijderveld on right click Parameters ---------- event : the wx.MouseEvent that triggered the call of this function Alters ------ zijderveld_setting, toolbar1 setting """ if event.LeftIsDown() or event.ButtonDClick(): return elif self.zijderveld_setting == "Zoom": self.zijderveld_setting = "Pan" try: self.toolbar1.pan('off') except TypeError: pass elif self.zijderveld_setting == "Pan": self.zijderveld_setting = "Zoom" try: self.toolbar1.zoom() except TypeError: pass
0.00246
def right(X, i): """Compute the orthogonal matrix Q_{\geq i} as defined in [1].""" if i > X.d-1: return np.ones([1, 1]) answ = np.ones([1, 1]) cores = tt.tensor.to_list(X) for dim in xrange(X.d-1, i-1, -1): answ = np.tensordot(cores[dim], answ, 1) answ = reshape(answ, (X.r[i], -1)) return answ.T
0.005882
def _magic(header, footer, mime, ext=None): """ Discover what type of file it is based on the incoming string """ if not header: raise ValueError("Input was empty") info = _identify_all(header, footer, ext)[0] if mime: return info.mime_type return info.extension if not \ isinstance(info.extension, list) else info[0].extension
0.002695
def log(logger=None, start_message='Starting...', end_message='Done...'): """ Basic log decorator Can be used as : - @log (with default logger) - @log(mylogger) - @log(start_message='Hello !", logger=mylogger, end_message='Bye !') """ def actual_log(f, real_logger=logger): logger = real_logger or _logger @wraps(f) def timed(*args, **kwargs): logger.info(f'{f.__name__} - {start_message}') start = time.time() res = f(*args, **kwargs) end = time.time() logger.info(f'{f.__name__} - {end_message} (took {end - start:.2f}s)') return res return timed if callable(logger): return actual_log(logger, real_logger=None) return actual_log
0.002545
def start(self): """ Try to init the main sub-components (:func:`~responsebot.utils.handler_utils.discover_handler_classes`, \ :func:`~responsebot.utils.auth_utils.auth`, :class:`~responsebot.responsebot_stream.ResponseBotStream`, etc.) """ logging.info('ResponseBot started') handler_classes = handler_utils.discover_handler_classes(self.config.get('handlers_package')) if len(handler_classes) == 0: logging.warning('No handler found. Did you forget to extend BaseTweethandler? Check --handlers-module') while True: try: client = auth_utils.auth(self.config) listener = ResponseBotListener(client=client, handler_classes=handler_classes) stream = ResponseBotStream(client=client, listener=listener) stream.start() except (APIQuotaError, AuthenticationError, TweepError) as e: self.handle_error(e) else: break
0.006856
def flatten(iterable, map2iter=None): """recursively flatten nested objects""" if map2iter and isinstance(iterable): iterable = map2iter(iterable) for item in iterable: if isinstance(item, str) or not isinstance(item, abc.Iterable): yield item else: yield from flatten(item, map2iter)
0.002899
def calling_code(f, f_name=None, raise_for_missing=True): """Return the code string for calling a function. """ import inspect from ambry.dbexceptions import ConfigurationError if inspect.isclass(f): try: args = inspect.getargspec(f.__init__).args except TypeError as e: raise TypeError("Failed to inspect {}: {}".format(f, e)) else: args = inspect.getargspec(f).args if len(args) > 1 and args[0] == 'self': args = args[1:] for a in args: if a not in all_args + ('exception',): # exception arg is only for exception handlers if raise_for_missing: raise ConfigurationError('Caster code {} has unknown argument ' 'name: \'{}\'. Must be one of: {} '.format(f, a, ','.join(all_args))) arg_map = {e: e for e in var_args} args = [arg_map.get(a, a) for a in args] return "{}({})".format(f_name if f_name else f.__name__, ','.join(args))
0.00297
def _use_rev_b_archive(self, records, offset): ''' return True if weather station returns Rev.B archives ''' # if pre-determined, return result if type(self._ARCHIVE_REV_B) is bool: return self._ARCHIVE_REV_B # assume, B and check 'RecType' field data = ArchiveBStruct.unpack_from(records, offset) if data['RecType'] == 0: log.info('detected archive rev. B') self._ARCHIVE_REV_B = True else: log.info('detected archive rev. A') self._ARCHIVE_REV_B = False return self._ARCHIVE_REV_B
0.00321
def urlparse(url): """Parse the URL in a Python2/3 independent fashion. :param str url: The URL to parse :rtype: Parsed """ value = 'http%s' % url[5:] if url[:5] == 'postgresql' else url parsed = _urlparse.urlparse(value) path, query = parsed.path, parsed.query hostname = parsed.hostname if parsed.hostname else '' return PARSED(parsed.scheme.replace('http', 'postgresql'), parsed.netloc, path, parsed.params, query, parsed.fragment, parsed.username, parsed.password, hostname.replace('%2f', '/'), parsed.port)
0.001406
def _filesystem(self, **kwargs): """Returns a :class:`FileSystemCache` instance""" kwargs.update(dict( threshold=self._config('threshold', 500), )) return FileSystemCache(self._config('dir', None), **kwargs)
0.007968
def create_milestone(self, title, state=None, description=None, due_on=None): """Create a milestone for this repository. :param str title: (required), title of the milestone :param str state: (optional), state of the milestone, accepted values: ('open', 'closed'), default: 'open' :param str description: (optional), description of the milestone :param str due_on: (optional), ISO 8601 formatted due date :returns: :class:`Milestone <github3.issues.milestone.Milestone>` if successful, otherwise None """ url = self._build_url('milestones', base_url=self._api) if state not in ('open', 'closed'): state = None data = {'title': title, 'state': state, 'description': description, 'due_on': due_on} self._remove_none(data) json = None if data: json = self._json(self._post(url, data=data), 201) return Milestone(json, self) if json else None
0.002885
def strip_doc_string(proto): # type: (google.protobuf.message.Message) -> None """ Empties `doc_string` field on any nested protobuf messages """ assert isinstance(proto, google.protobuf.message.Message) for descriptor in proto.DESCRIPTOR.fields: if descriptor.name == 'doc_string': proto.ClearField(descriptor.name) elif descriptor.type == descriptor.TYPE_MESSAGE: if descriptor.label == descriptor.LABEL_REPEATED: for x in getattr(proto, descriptor.name): strip_doc_string(x) elif proto.HasField(descriptor.name): strip_doc_string(getattr(proto, descriptor.name))
0.001449
def get_zones(self, q=None, **kwargs): """Returns a list of zones across all of the user's accounts. Keyword Arguments: q -- The search parameters, in a dict. Valid keys are: name - substring match of the zone name zone_type - one of: PRIMARY SECONDARY ALIAS sort -- The sort column used to order the list. Valid values for the sort field are: NAME ACCOUNT_NAME RECORD_COUNT ZONE_TYPE reverse -- Whether the list is ascending(False) or descending(True) offset -- The position in the list for the first returned element(0 based) limit -- The maximum number of rows to be returned. """ uri = "/v1/zones" params = build_params(q, kwargs) return self.rest_api_connection.get(uri, params)
0.004386
def _perform_merge(self, other): """ Merges the longer string """ if len(other.value) > len(self.value): self.value = other.value[:] return True
0.010204
def allowCommission(self): """start commissioner candidate petition process Returns: True: successful to start commissioner candidate petition process False: fail to start commissioner candidate petition process """ print '%s call allowCommission' % self.port try: cmd = WPANCTL_CMD + 'commissioner start' print cmd if self.isActiveCommissioner: return True if self.__sendCommand(cmd)[0] != 'Fail': self.isActiveCommissioner = True time.sleep(40) # time for petition process and at least one keep alive return True else: return False except Exception, e: ModuleHelper.WriteIntoDebugLogger('allowcommission() error: ' + str(e))
0.005875
def filter_dict(d, cb): """ Filter a dictionary based on passed function. :param d: The dictionary to be filtered :param cb: A function which is called back for each k, v pair of the dictionary. Should return Truthy or Falsey :return: The filtered dictionary (new instance) """ return {k: v for k, v in d.items() if cb(k, v)}
0.005634
def loop_misc(self): """Misc loop.""" self.check_keepalive() if self.last_retry_check + 1 < time.time(): pass return NC.ERR_SUCCESS
0.017045
def sort_layout(thread, listfile, column=0): """ Sort the syntelog table according to chromomomal positions. First orient the contents against threadbed, then for contents not in threadbed, insert to the nearest neighbor. """ from jcvi.formats.base import DictFile outfile = listfile.rsplit(".", 1)[0] + ".sorted.list" threadorder = thread.order fw = open(outfile, "w") lt = DictFile(listfile, keypos=column, valuepos=None) threaded = [] imported = set() for t in thread: accn = t.accn if accn not in lt: continue imported.add(accn) atoms = lt[accn] threaded.append(atoms) assert len(threaded) == len(imported) total = sum(1 for x in open(listfile)) logging.debug("Total: {0}, currently threaded: {1}".format(total, len(threaded))) fp = open(listfile) for row in fp: atoms = row.split() accn = atoms[0] if accn in imported: continue insert_into_threaded(atoms, threaded, threadorder) for atoms in threaded: print("\t".join(atoms), file=fw) fw.close() logging.debug("File `{0}` sorted to `{1}`.".format(outfile, thread.filename))
0.003273
def delegators(self, account): """ Returns a list of pairs of delegator names given **account** a representative and its balance .. version 8.0 required :param account: Account to return delegators for :type account: str :raises: :py:exc:`nano.rpc.RPCException` >>> rpc.delegators( ... account="xrb_1111111111111111111111111111111111111111111111111117353trpda" ... ) { "xrb_13bqhi1cdqq8yb9szneoc38qk899d58i5rcrgdk5mkdm86hekpoez3zxw5sd": "500000000000000000000000000000000000", "xrb_17k6ug685154an8gri9whhe5kb5z1mf5w6y39gokc1657sh95fegm8ht1zpn": "961647970820730000000000000000000000" } """ account = self._process_value(account, 'account') payload = {"account": account} resp = self.call('delegators', payload) delegators = resp.get('delegators') or {} for k, v in delegators.items(): delegators[k] = int(v) return delegators
0.002833
def vr60baro(msg): """Vertical rate from barometric measurement, this value may be very noisy. Args: msg (String): 28 bytes hexadecimal message (BDS60) string Returns: int: vertical rate in feet/minutes """ d = hex2bin(data(msg)) if d[34] == '0': return None sign = int(d[35]) # 1 -> negative value, two's complement value = bin2int(d[36:45]) if value == 0 or value == 511: # all zeros or all ones return 0 value = value - 512 if sign else value roc = value * 32 # feet/min return roc
0.001733
async def query_pathings(self, zipped_list: List[List[Union[Unit, Point2, Point3]]]) -> List[Union[float, int]]: """ Usage: await self.query_pathings([[unit1, target2], [unit2, target2]]) -> returns [distance1, distance2] Caution: returns 0 when path not found Might merge this function with the function above """ assert zipped_list, "No zipped_list" assert isinstance(zipped_list, list), f"{type(zipped_list)}" assert isinstance(zipped_list[0], list), f"{type(zipped_list[0])}" assert len(zipped_list[0]) == 2, f"{len(zipped_list[0])}" assert isinstance(zipped_list[0][0], (Point2, Unit)), f"{type(zipped_list[0][0])}" assert isinstance(zipped_list[0][1], Point2), f"{type(zipped_list[0][1])}" if isinstance(zipped_list[0][0], Point2): results = await self._execute( query=query_pb.RequestQuery( pathing=[ query_pb.RequestQueryPathing( start_pos=common_pb.Point2D(x=p1.x, y=p1.y), end_pos=common_pb.Point2D(x=p2.x, y=p2.y) ) for p1, p2 in zipped_list ] ) ) else: results = await self._execute( query=query_pb.RequestQuery( pathing=[ query_pb.RequestQueryPathing(unit_tag=p1.tag, end_pos=common_pb.Point2D(x=p2.x, y=p2.y)) for p1, p2 in zipped_list ] ) ) results = [float(d.distance) for d in results.query.pathing] return results
0.004731
def assist(self): """Send a voice request to the Assistant and playback the response. Returns: True if conversation should continue. """ continue_conversation = False device_actions_futures = [] self.conversation_stream.start_recording() logging.info('Recording audio request.') def iter_log_assist_requests(): for c in self.gen_assist_requests(): assistant_helpers.log_assist_request_without_audio(c) yield c logging.debug('Reached end of AssistRequest iteration.') # This generator yields AssistResponse proto messages # received from the gRPC Google Assistant API. for resp in self.assistant.Assist(iter_log_assist_requests(), self.deadline): assistant_helpers.log_assist_response_without_audio(resp) if resp.event_type == END_OF_UTTERANCE: logging.info('End of audio request detected.') logging.info('Stopping recording.') self.conversation_stream.stop_recording() if resp.speech_results: logging.info('Transcript of user request: "%s".', ' '.join(r.transcript for r in resp.speech_results)) if len(resp.audio_out.audio_data) > 0: if not self.conversation_stream.playing: self.conversation_stream.stop_recording() self.conversation_stream.start_playback() logging.info('Playing assistant response.') self.conversation_stream.write(resp.audio_out.audio_data) if resp.dialog_state_out.conversation_state: conversation_state = resp.dialog_state_out.conversation_state logging.debug('Updating conversation state.') self.conversation_state = conversation_state if resp.dialog_state_out.volume_percentage != 0: volume_percentage = resp.dialog_state_out.volume_percentage logging.info('Setting volume to %s%%', volume_percentage) self.conversation_stream.volume_percentage = volume_percentage if resp.dialog_state_out.microphone_mode == DIALOG_FOLLOW_ON: continue_conversation = True logging.info('Expecting follow-on query from user.') elif resp.dialog_state_out.microphone_mode == CLOSE_MICROPHONE: continue_conversation = False if resp.device_action.device_request_json: device_request = json.loads( resp.device_action.device_request_json ) fs = self.device_handler(device_request) if fs: device_actions_futures.extend(fs) if self.display and resp.screen_out.data: system_browser = browser_helpers.system_browser system_browser.display(resp.screen_out.data) if len(device_actions_futures): logging.info('Waiting for device executions to complete.') concurrent.futures.wait(device_actions_futures) logging.info('Finished playing assistant response.') self.conversation_stream.stop_playback() return continue_conversation
0.00059
def _diff_stack(self, stack, **kwargs): """Handles the diffing a stack in CloudFormation vs our config""" if self.cancel.wait(0): return INTERRUPTED if not build.should_submit(stack): return NotSubmittedStatus() if not build.should_update(stack): return NotUpdatedStatus() provider = self.build_provider(stack) provider_stack = provider.get_stack(stack.fqn) # get the current stack template & params from AWS try: [old_template, old_params] = provider.get_stack_info( provider_stack) except exceptions.StackDoesNotExist: old_template = None old_params = {} stack.resolve(self.context, provider) # generate our own template & params parameters = self.build_parameters(stack) new_params = dict() for p in parameters: new_params[p['ParameterKey']] = p['ParameterValue'] new_template = stack.blueprint.rendered new_stack = normalize_json(new_template) output = ["============== Stack: %s ==============" % (stack.name,)] # If this is a completely new template dump our params & stack if not old_template: output.extend(self._build_new_template(new_stack, parameters)) else: # Diff our old & new stack/parameters old_template = parse_cloudformation_template(old_template) if isinstance(old_template, str): # YAML templates returned from CFN need parsing again # "AWSTemplateFormatVersion: \"2010-09-09\"\nParam..." # -> # AWSTemplateFormatVersion: "2010-09-09" old_template = parse_cloudformation_template(old_template) old_stack = normalize_json( json.dumps(old_template, sort_keys=True, indent=4, default=str) ) output.extend(build_stack_changes(stack.name, new_stack, old_stack, new_params, old_params)) ui.info('\n' + '\n'.join(output)) stack.set_outputs( provider.get_output_dict(provider_stack)) return COMPLETE
0.000861
def IsFileRequired(self, filename): """Returns true if a file is required by GTFS, false otherwise. Unknown files are, by definition, not required""" if filename not in self._file_mapping: return False mapping = self._file_mapping[filename] return mapping['required']
0.006826
def min_mean_cycle(graph, weight, start=0): """Minimum mean cycle by Karp :param graph: directed graph in listlist or listdict format :param weight: in matrix format or same listdict graph :param int start: vertex that should be contained in cycle :returns: cycle as vertex list, average arc weights or None if there is no cycle from start :complexity: `O(|V|*|E|)` """ INF = float('inf') n = len(graph) # compute distances dist = [[INF] * n] prec = [[None] * n] dist[0][start] = 0 for ell in range(1, n + 1): dist.append([INF] * n) prec.append([None] * n) for node in range(n): for neighbor in graph[node]: alt = dist[ell - 1][node] + weight[node][neighbor] if alt < dist[ell][neighbor]: dist[ell][neighbor] = alt prec[ell][neighbor] = node # -- find the optimal value valmin = INF argmin = None for node in range(n): valmax = -INF argmax = None for k in range(n): alt = (dist[n][node] - dist[k][node]) / float(n - k) # do not divide by float(n-k) => cycle of minimal total weight if alt >= valmax: # with >= we get simple cycles valmax = alt argmax = k if argmax is not None and valmax < valmin: valmin = valmax argmin = (node, argmax) # -- extract cycle if valmin == INF: # -- there is no cycle return None C = [] node, k = argmin for l in range(n, k, -1): C.append(node) node = prec[l][node] return C[::-1], valmin
0.001127
def get_int_noerr(self, arg): """Eval arg and it is an integer return the value. Otherwise return None""" if self.curframe: g = self.curframe.f_globals l = self.curframe.f_locals else: g = globals() l = locals() pass try: val = int(eval(arg, g, l)) except (SyntaxError, NameError, ValueError, TypeError): return None return val
0.008565
def validate_request(request, schema): """ Request validation does the following steps. 1. validate that the path matches one of the defined paths in the schema. 2. validate that the request method conforms to a supported methods for the given path. 3. validate that the request parameters conform to the parameter definitions for the operation definition. """ with ErrorDict() as errors: # 1 try: api_path = validate_path_to_api_path( path=request.path, context=schema, **schema ) except ValidationError as err: errors['path'].add_error(err.detail) return # this causes an exception to be raised since errors is no longer falsy. path_definition = schema['paths'][api_path] or {} if not path_definition: # TODO: is it valid to not have a definition for a path? return # 2 try: operation_definition = validate_request_method_to_operation( request_method=request.method, path_definition=path_definition, ) except ValidationError as err: errors['method'].add_error(err.detail) return if operation_definition is None: # TODO: is this compliant with swagger, can path operations have a null # definition? return # 3 operation_validators = construct_operation_validators( api_path=api_path, path_definition=path_definition, operation_definition=operation_definition, context=schema, ) try: validate_operation(request, operation_validators, context=schema) except ValidationError as err: errors['method'].add_error(err.detail)
0.002641
def _DecodeUnrecognizedFields(message, pair_type): """Process unrecognized fields in message.""" new_values = [] codec = _ProtoJsonApiTools.Get() for unknown_field in message.all_unrecognized_fields(): # TODO(craigcitro): Consider validating the variant if # the assignment below doesn't take care of it. It may # also be necessary to check it in the case that the # type has multiple encodings. value, _ = message.get_unrecognized_field_info(unknown_field) value_type = pair_type.field_by_name('value') if isinstance(value_type, messages.MessageField): decoded_value = DictToMessage(value, pair_type.value.message_type) else: decoded_value = codec.decode_field( pair_type.value, value) try: new_pair_key = str(unknown_field) except UnicodeEncodeError: new_pair_key = protojson.ProtoJson().decode_field( pair_type.key, unknown_field) new_pair = pair_type(key=new_pair_key, value=decoded_value) new_values.append(new_pair) return new_values
0.000881
def eval(self, expr, lineno=0, show_errors=True): """Evaluate a single statement.""" self.lineno = lineno self.error = [] self.start_time = time.time() try: node = self.parse(expr) except: errmsg = exc_info()[1] if len(self.error) > 0: errmsg = "\n".join(self.error[0].get_error()) if not show_errors: try: exc = self.error[0].exc except: exc = RuntimeError raise exc(errmsg) print(errmsg, file=self.err_writer) return try: return self.run(node, expr=expr, lineno=lineno) except: errmsg = exc_info()[1] if len(self.error) > 0: errmsg = "\n".join(self.error[0].get_error()) if not show_errors: try: exc = self.error[0].exc except: exc = RuntimeError raise exc(errmsg) print(errmsg, file=self.err_writer) return
0.005333
def get_setting(setting): """ Get the specified django setting, or it's default value """ defaults = { # The context to use for rendering fields 'TEMPLATE_FIELD_CONTEXT': {}, # When this is False, don't do any TemplateField rendering 'TEMPLATE_FIELD_RENDER': True } try: return getattr(settings, setting, defaults[setting]) except KeyError: msg = "{0} is not specified in your settings".format(setting) raise ImproperlyConfigured(msg)
0.001961
def update_scope_of_processing(self, process_name, uow, start_timeperiod, end_timeperiod): """method reads collection and refine slice upper bound for processing""" source_collection_name = uow.source last_object_id = self.ds.lowest_primary_key(source_collection_name, start_timeperiod, end_timeperiod) uow.end_id = str(last_object_id) self.uow_dao.update(uow) msg = 'Updated processing range for {0}@{1} for collection {2}: [{3} : {4}]' \ .format(process_name, start_timeperiod, source_collection_name, uow.start_id, uow.end_id) self._log_message(INFO, process_name, start_timeperiod, msg)
0.01059
def fastaAlignmentWrite(columnAlignment, names, seqNo, fastaFile, filter=lambda x : True): """ Writes out column alignment to given file multi-fasta format """ fastaFile = open(fastaFile, 'w') columnAlignment = [ i for i in columnAlignment if filter(i) ] for seq in xrange(0, seqNo): fastaFile.write(">%s\n" % names[seq]) for column in columnAlignment: fastaFile.write(column[seq]) fastaFile.write("\n") fastaFile.close()
0.007859
def create_configmap( name, namespace, data, source=None, template=None, saltenv='base', **kwargs): ''' Creates the kubernetes configmap as defined by the user. CLI Examples:: salt 'minion1' kubernetes.create_configmap \ settings default '{"example.conf": "# example file"}' salt 'minion2' kubernetes.create_configmap \ name=settings namespace=default data='{"example.conf": "# example file"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) body = kubernetes.client.V1ConfigMap( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_config_map( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg)
0.001421
def delete_index(self, attr): """Deletes an index from the Table. Can be used to drop and rebuild an index, or to convert a non-unique index to a unique index, or vice versa. @param attr: name of an indexed attribute @type attr: string """ if attr in self._indexes: del self._indexes[attr] self._uniqueIndexes = [ind for ind in self._indexes.values() if ind.is_unique] return self
0.008511
def is_all_field_none(self): """ :rtype: bool """ if self._id_ is not None: return False if self._created is not None: return False if self._updated is not None: return False if self._type_ is not None: return False if self._cvc2 is not None: return False if self._status is not None: return False if self._expiry_time is not None: return False return True
0.003731
def correctGrid(self, img, grid): ''' grid -> array of polylines=((p0x,p0y),(p1x,p1y),,,) ''' self.img = imread(img) h = self.homography # TODO: cleanup only needed to get newBorder attr. if self.opts['do_correctIntensity']: self.img = self.img / self._getTiltFactor(self.img.shape) s0, s1 = grid.shape[:2] n0, n1 = s0 - 1, s1 - 1 snew = self._newBorders b = self.opts['border'] sx, sy = (snew[0] - 2 * b) // n0, (snew[1] - 2 * b) // n1 out = np.empty(snew[::-1], dtype=self.img.dtype) def warp(ix, iy, objP, outcut): shape = outcut.shape[::-1] quad = grid[ix:ix + 2, iy:iy + 2].reshape(4, 2)[np.array([0, 2, 3, 1])] hcell = cv2.getPerspectiveTransform( quad.astype(np.float32), objP) cv2.warpPerspective(self.img, hcell, shape, outcut, flags=cv2.INTER_LANCZOS4, **self.opts['cv2_opts']) return quad objP = np.array([[0, 0], [sx, 0], [sx, sy], [0, sy]], dtype=np.float32) # INNER CELLS for ix in range(1, n0 - 1): for iy in range(1, n1 - 1): sub = out[iy * sy + b: (iy + 1) * sy + b, ix * sx + b: (ix + 1) * sx + b] # warp(ix, iy, objP, sub) shape = sub.shape[::-1] quad = grid[ix:ix + 2, iy:iy + 2].reshape(4, 2)[np.array([0, 2, 3, 1])] # print(quad, objP) hcell = cv2.getPerspectiveTransform( quad.astype(np.float32), objP) cv2.warpPerspective(self.img, hcell, shape, sub, flags=cv2.INTER_LANCZOS4, **self.opts['cv2_opts']) # return out # TOP CELLS objP[:, 1] += b for ix in range(1, n0 - 1): warp(ix, 0, objP, out[: sy + b, ix * sx + b: (ix + 1) * sx + b]) # BOTTOM CELLS objP[:, 1] -= b for ix in range(1, n0 - 1): iy = (n1 - 1) y = iy * sy + b x = ix * sx + b warp(ix, iy, objP, out[y: y + sy + b, x: x + sx]) # LEFT CELLS objP[:, 0] += b for iy in range(1, n1 - 1): y = iy * sy + b warp(0, iy, objP, out[y: y + sy, : sx + b]) # RIGHT CELLS objP[:, 0] -= b ix = (n0 - 1) x = ix * sx + b for iy in range(1, n1 - 1): y = iy * sy + b warp(ix, iy, objP, out[y: y + sy, x: x + sx + b]) # BOTTOM RIGHT CORNER warp(n0 - 1, n1 - 1, objP, out[-sy - b - 1:, x: x + sx + b]) # #TOP LEFT CORNER objP += (b, b) warp(0, 0, objP, out[0: sy + b, 0: sx + b]) # TOP RIGHT CORNER objP[:, 0] -= b # x = (n0-1)*sx+b warp(n0 - 1, 0, objP, out[: sy + b, x: x + sx + b]) # #BOTTOM LEFT CORNER objP += (b, -b) warp(0, n1 - 1, objP, out[-sy - b - 1:, : sx + b]) return out
0.000594
def set_motor_force(self, motor_name, force): """ Sets the maximum force or torque that a joint can exert. """ self.call_remote_api('simxSetJointForce', self.get_object_handle(motor_name), force, sending=True)
0.00639
def generate_slug(obj, text, tail_number=0): from panya.models import ModelBase """ Returns a new unique slug. Object must provide a SlugField called slug. URL friendly slugs are generated using django.template.defaultfilters' slugify. Numbers are added to the end of slugs for uniqueness. """ # use django slugify filter to slugify slug = slugify(text) existing_slugs = [item.slug for item in ModelBase.objects.filter(slug__regex=r'^%s(-\d+)?' % slug).exclude(id=obj.id)] tail_number = 0 new_slug = slug while new_slug in existing_slugs: new_slug = slugify("%s-%s" % (slug, tail_number)) tail_number += 1 return new_slug
0.008523
def to_json(self, fp=None, default=_json_default, **kwargs): """ Represent the current `TLObject` as JSON. If ``fp`` is given, the JSON will be dumped to said file pointer, otherwise a JSON string will be returned. Note that bytes and datetimes cannot be represented in JSON, so if those are found, they will be base64 encoded and ISO-formatted, respectively, by default. """ d = self.to_dict() if fp: return json.dump(d, fp, default=default, **kwargs) else: return json.dumps(d, default=default, **kwargs)
0.003226
def load_dataset(self, dataset, verbose=True): """ Load a directory of gnt files. Yields the image and label in tuples. :param dataset: The directory to load. :return: Yields (Pillow.Image.Image, label) pairs. """ assert self.get_dataset(dataset) is True, "Datasets aren't properly downloaded, " \ "rerun to try again or download datasets manually." if verbose: print("Loading %s" % dataset) dataset_path = self.base_dataset_path + dataset for path in tqdm(glob.glob(dataset_path + "/*.gnt")): for image, label in self.load_gnt_file(path): yield image, label
0.005533
def components(self, obj, fmt=None, comm=True, **kwargs): """ Returns data and metadata dictionaries containing HTML and JS components to include render in app, notebook, or standalone document. Depending on the backend the fmt defines the format embedded in the HTML, e.g. png or svg. If comm is enabled the JS code will set up a Websocket comm channel using the currently defined CommManager. """ if isinstance(obj, (Plot, NdWidget)): plot = obj else: plot, fmt = self._validate(obj, fmt) widget_id = None data, metadata = {}, {} if isinstance(plot, NdWidget): js, html = plot(as_script=True) plot_id = plot.plot_id widget_id = plot.id else: html, js = self._figure_data(plot, fmt, as_script=True, **kwargs) plot_id = plot.id if comm and plot.comm is not None and self.comm_msg_handler: msg_handler = self.comm_msg_handler.format(plot_id=plot_id) html = plot.comm.html_template.format(init_frame=html, plot_id=plot_id) comm_js = plot.comm.js_template.format(msg_handler=msg_handler, comm_id=plot.comm.id, plot_id=plot_id) js = '\n'.join([js, comm_js]) html = "<div id='%s' style='display: table; margin: 0 auto;'>%s</div>" % (plot_id, html) if not os.environ.get('HV_DOC_HTML', False) and js is not None: js = embed_js.format(widget_id=widget_id, plot_id=plot_id, html=html) + js data['text/html'] = html if js: data[MIME_TYPES['js']] = js data[MIME_TYPES['jlab-hv-exec']] = '' metadata['id'] = plot_id self._plots[plot_id] = plot return (data, {MIME_TYPES['jlab-hv-exec']: metadata})
0.001972
def load(self, table: str): """Set the main dataframe from a table's data :param table: table name :type table: str :example: ``ds.load("mytable")`` """ if self._check_db() is False: return if table not in self.db.tables: self.warning("The table " + table + " does not exists") return try: self.start("Loading data from table " + table) res = self.db[table].all() self.df = pd.DataFrame(list(res)) self.end("Data loaded from table " + table) except Exception as e: self.err(e, "Can not load table " + table)
0.002954
def concat( df, *, columns: List[str], new_column: str, sep: str = None ): """ Concatenate `columns` element-wise See [pandas doc]( https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.str.cat.html) for more information --- ### Parameters *mandatory :* - `columns` (*list*): list of columns to concatenate (at least 2 columns) - `new_column` (*str*): the destination column *optional :* - `sep` (*str*): the separator """ if len(columns) < 2: raise ValueError('The `columns` parameter needs to have at least 2 columns') first_col, *other_cols = columns df.loc[:, new_column] = df[first_col].astype(str).str.cat(df[other_cols].astype(str), sep=sep) return df
0.005044
def translate_symbol(self, in_symbol: str) -> str: """ translate the incoming symbol into locally-used """ # read all mappings from the db if not self.symbol_maps: self.__load_symbol_maps() # translate the incoming symbol result = self.symbol_maps[in_symbol] if in_symbol in self.symbol_maps else in_symbol return result
0.007874
def getFiltersFromArgs(kwargs): ''' getFiltersFromArgs - Returns a dictionary of each filter type, and the corrosponding field/value @param kwargs <dict> - Dictionary of filter arguments @return - Dictionary of each filter type (minus the ones that are optimized into others), each containing a list of tuples, (fieldName, matchingValue) ''' # Create a copy of each possible filter in FILTER_TYPES and link to empty list. # This object will be filled with all of the filters requested ret = { filterType : list() for filterType in FILTER_TYPES } for key, value in kwargs.items(): matchObj = FILTER_PARAM_RE.match(key) if not matchObj: # Default ( no __$oper) is eq filterType = 'eq' field = key else: # We have an operation defined, extract it, and optimize if possible # (like if op is a case-insensitive, lowercase the value here) groupDict = matchObj.groupdict() filterType = groupDict['filterType'] field = groupDict['field'] if filterType not in FILTER_TYPES: raise ValueError('Unknown filter type: %s. Choices are: (%s)' %(filterType, ', '.join(FILTER_TYPES))) if filterType == 'isnull': # Convert "isnull" to one of the "is" or "isnot" filters against None if type(value) is not bool: raise ValueError('Filter type "isnull" requires True/False.') if value is True: filterType = "is" else: filterType = "isnot" value = None elif filterType in ('in', 'notin'): # Try to make more efficient by making a set. Fallback to just using what they provide, could be an object implementing "in" try: value = set(value) except: pass # Optimization - if case-insensitive, lowercase the comparison value here elif filterType in ('ieq', 'ine', 'icontains', 'noticontains'): value = value.lower() elif filterType.startswith('split'): if (not issubclass(type(value), tuple) and not issubclass(type(value), list)) or len(value) != 2: raise ValueError('Filter type %s expects a tuple of two params. (splitBy, matchPortion)' %(filterType,)) ret[filterType].append( (field, value) ) return ret
0.009027
def profile_match(adapter, profiles, hard_threshold=0.95, soft_threshold=0.9): """ given a dict of profiles, searches through all the samples in the DB for a match. If a matching sample is found an exception is raised, and the variants will not be loaded into the database. Args: adapter (MongoAdapter): Adapter to mongodb profiles (dict(str)): The profiles (given as strings) for each sample in vcf. hard_threshold(float): Rejects load if hamming distance above this is found soft_threshold(float): Stores similar samples if hamming distance above this is found Returns: matches(dict(list)): list of similar samples for each sample in vcf. """ matches = {sample: [] for sample in profiles.keys()} for case in adapter.cases(): for individual in case['individuals']: for sample in profiles.keys(): if individual.get('profile'): similarity = compare_profiles( profiles[sample], individual['profile'] ) if similarity >= hard_threshold: msg = ( f"individual {sample} has a {similarity} similarity " f"with individual {individual['ind_id']} in case " f"{case['case_id']}" ) LOG.critical(msg) #Raise some exception raise ProfileError if similarity >= soft_threshold: match = f"{case['case_id']}.{individual['ind_id']}" matches[sample].append(match) return matches
0.004459
def async_task(self, func): """ Execute handler as task and return None. Use this decorator for slow handlers (with timeouts) .. code-block:: python3 @dp.message_handler(commands=['command']) @dp.async_task async def cmd_with_timeout(message: types.Message): await asyncio.sleep(120) return SendMessage(message.chat.id, 'KABOOM').reply(message) :param func: :return: """ def process_response(task): try: response = task.result() except Exception as e: self.loop.create_task( self.errors_handlers.notify(types.Update.get_current(), e)) else: if isinstance(response, BaseResponse): self.loop.create_task(response.execute_response(self.bot)) @functools.wraps(func) async def wrapper(*args, **kwargs): task = self.loop.create_task(func(*args, **kwargs)) task.add_done_callback(process_response) return wrapper
0.001794
def schedule_play(self, call_params): """REST Schedule playing something on a call Helper """ path = '/' + self.api_version + '/SchedulePlay/' method = 'POST' return self.request(path, method, call_params)
0.008163