text
stringlengths
78
104k
score
float64
0
0.18
def convert_meas(direction, Rec): """ converts measurments tables from magic 2 to 3 (direction=magic3) or from model 3 to 2.5 (direction=magic2) [not available] """ if direction == 'magic3': columns = meas_magic2_2_magic3_map MeasRec = {} for key in columns: if key in list(Rec.keys()): # transfer info and change column name to data model 3.0 MeasRec[columns[key]] = Rec[key] return MeasRec else: # haven't added this way yet pass
0.001852
def remove_rule(self, rule): """Remove rule from :attr:`R` and its corresponding weight from :attr:`W`. :param rule: rule to remove :type rule: :class:`~creamas.rules.rule.Rule` or :class:`~creamas.rules.rule.RuleLeaf` :raises TypeError: If rule is not derived from :class:`Rule` or :class:`RuleLeaf`. :returns: ``True`` if the rule was successfully removed, otherwise ``False``. :rtype bool: """ if not issubclass(rule.__class__, (Rule, RuleLeaf)): raise TypeError( "Rule to remove ({}) is not subclass of {} or {}." .format(rule.__class__, Rule, RuleLeaf)) try: ind = self._R.index(rule) del self._R[ind] del self._W[ind] return True except: return False
0.003344
def get_page_number_from_request( request, querystring_key=PAGE_LABEL, default=1): """Retrieve the current page number from *GET* or *POST* data. If the page does not exists in *request*, or is not a number, then *default* number is returned. """ try: if request.method == 'POST': page_number = request.POST[querystring_key] else: page_number = request.GET[querystring_key] return int(page_number) except (KeyError, TypeError, ValueError): return default
0.001838
def do_open(self, mode=None): """ Open the current base file with the (original) mode and encoding. Return the resulting stream. Note: Copied from stdlib. Added option to override 'mode' """ if mode is None: mode = self.mode with self._alter_umask(): # noinspection PyArgumentList stream = io.open( self.baseFilename, mode=mode, encoding=self.encoding, newline=self.newline) self._do_chown_and_chmod(self.baseFilename) return stream
0.005338
def get_all_db_ids(language=DEFAULT_LANG): """ :return: A list with all the database IDs as integers """ _ids = [] json_path = DBVuln.get_json_path(language=language) for _file in os.listdir(json_path): if not _file.endswith('.json'): continue _id = _file.split('-')[0] _ids.append(_id) return _ids
0.004878
def render_text(text, language=None): """ Render the text, reuses the template filters provided by Django. """ # Get the filter text_filter = SUPPORTED_LANGUAGES.get(language, None) if not text_filter: raise ImproperlyConfigured("markup filter does not exist: {0}. Valid options are: {1}".format( language, ', '.join(list(SUPPORTED_LANGUAGES.keys())) )) # Convert. return text_filter(text)
0.004444
def _root(self): """Attribute referencing the root node of the tree. :returns: the root node of the tree containing this instance. :rtype: Node """ _n = self while _n.parent: _n = _n.parent return _n
0.007463
def load_edges_into_db( nanopub_id: str, nanopub_url: str, edges: list = [], edges_coll_name: str = edges_coll_name, nodes_coll_name: str = nodes_coll_name, ): """Load edges into Edgestore""" start_time = datetime.datetime.now() # Clean out edges for nanopub in edgestore query = f""" FOR edge IN {edges_coll_name} FILTER edge.nanopub_id == "{nanopub_id}" REMOVE edge IN edges """ try: edgestore_db.aql.execute(query) except Exception as e: log.debug(f"Could not remove nanopub-related edges: {query} msg: {e}") end_time1 = datetime.datetime.now() delta_ms = f"{(end_time1 - start_time).total_seconds() * 1000:.1f}" log.info("Timing - Delete edges for nanopub", delta_ms=delta_ms) # Clean out errors for nanopub in pipeline_errors query = f""" FOR e IN pipeline_errors FILTER e.nanopub_id == "{nanopub_id}" REMOVE e IN pipeline_errors """ try: edgestore_db.aql.execute(query) except Exception as e: log.debug(f"Could not remove nanopub-related errors: {query} msg: {e}") end_time2 = datetime.datetime.now() delta_ms = f"{(end_time2 - end_time1).total_seconds() * 1000:.1f}" log.info("Timing - Delete pipeline errors for nanopub", delta_ms=delta_ms) # Collect edges and nodes to load into arangodb node_list, edge_list = [], [] for doc in edge_iterator(edges=edges): if doc[0] == "nodes": node_list.append(doc[1]) else: edge_list.append(doc[1]) end_time3 = datetime.datetime.now() delta_ms = f"{(end_time3 - end_time2).total_seconds() * 1000:.1f}" log.info("Timing - Collect edges and nodes", delta_ms=delta_ms) try: results = edgestore_db.collection(edges_coll_name).import_bulk( edge_list, on_duplicate="replace", halt_on_error=False ) except Exception as e: log.error(f"Could not load edges msg: {e}") end_time4 = datetime.datetime.now() delta_ms = f"{(end_time4 - end_time3).total_seconds() * 1000:.1f}" log.info("Timing - Load edges into edgestore", delta_ms=delta_ms) try: results = edgestore_db.collection(nodes_coll_name).import_bulk( node_list, on_duplicate="replace", halt_on_error=False ) except Exception as e: log.error(f"Could not load nodes msg: {e}") end_time5 = datetime.datetime.now() delta_ms = f"{(end_time5 - end_time4).total_seconds() * 1000:.1f}" log.info("Timing - Load nodes into edgestore", delta_ms=delta_ms)
0.000383
def add_arguments(parser): """ adds arguments for the deploy command """ parser.add_argument('-e', '--environment', help='Environment name', required=True) parser.add_argument('-w', '--dont-wait', help='Skip waiting', action='store_true') parser.add_argument('-a', '--archive', help='Archive file', required=False) parser.add_argument('-d', '--directory', help='Directory', required=False) parser.add_argument('-l', '--version-label', help='Version label', required=False) parser.add_argument('-t', '--termination-delay', help='Delay termination of old environment by this number of seconds', type=int, required=False)
0.007123
def _attach_dummy_intf_rtr(self, tenant_id, tenant_name, rtr_id): """Function to create a dummy router and interface. """ serv_obj = self.get_service_obj(tenant_id) fw_dict = serv_obj.get_fw_dict() fw_id = fw_dict.get('fw_id') rtr_nwk = fw_id[0:4] + fw_const.DUMMY_SERVICE_NWK + ( fw_id[len(fw_id) - 4:]) net_id, subnet_id = self.os_helper.create_network( rtr_nwk, tenant_id, self.servicedummy_ip_subnet) if net_id is None or subnet_id is None: return None, None net_dict = {} net_dict['name'] = rtr_nwk self.store_net_db(tenant_id, net_id, net_dict, 'SUCCESS') subnet_lst = set() subnet_lst.add(subnet_id) if rtr_id is None: self.os_helper.delete_network(rtr_nwk, tenant_id, subnet_id, net_id) return None, None ret = self.os_helper.add_intf_router(rtr_id, tenant_id, subnet_lst) if not ret: self.os_helper.delete_network(rtr_nwk, tenant_id, subnet_id, net_id) return None, None return net_id, subnet_id
0.001667
def which_users_can(self, name): """Which role can SendMail? """ _roles = self.which_roles_can(name) result = [self.get_role_members(i.get('role')) for i in _roles] return result
0.014218
def _numpy_char_to_bytes(arr): """Like netCDF4.chartostring, but faster and more flexible. """ # based on: http://stackoverflow.com/a/10984878/809705 arr = np.array(arr, copy=False, order='C') dtype = 'S' + str(arr.shape[-1]) return arr.view(dtype).reshape(arr.shape[:-1])
0.003378
def empty_file(file, mode="w", encoding='utf-8'): """empty file""" with open(file, mode, encoding=encoding) as f: f.truncate()
0.007042
def convert_dict2tuple(value): """ convert dict type to tuple to solve unhashable problem. """ if isinstance(value, dict): for _keys in value: value[_keys] = convert_dict2tuple(value[_keys]) return tuple(sorted(value.items())) else: return value
0.003322
def nlargest(self, n, columns, keep='first'): """ Return the first `n` rows ordered by `columns` in descending order. Return the first `n` rows with the largest values in `columns`, in descending order. The columns that are not specified are returned as well, but not used for ordering. This method is equivalent to ``df.sort_values(columns, ascending=False).head(n)``, but more performant. Parameters ---------- n : int Number of rows to return. columns : label or list of labels Column label(s) to order by. keep : {'first', 'last', 'all'}, default 'first' Where there are duplicate values: - `first` : prioritize the first occurrence(s) - `last` : prioritize the last occurrence(s) - ``all`` : do not drop any duplicates, even it means selecting more than `n` items. .. versionadded:: 0.24.0 Returns ------- DataFrame The first `n` rows ordered by the given columns in descending order. See Also -------- DataFrame.nsmallest : Return the first `n` rows ordered by `columns` in ascending order. DataFrame.sort_values : Sort DataFrame by the values. DataFrame.head : Return the first `n` rows without re-ordering. Notes ----- This function cannot be used with all column types. For example, when specifying columns with `object` or `category` dtypes, ``TypeError`` is raised. Examples -------- >>> df = pd.DataFrame({'population': [59000000, 65000000, 434000, ... 434000, 434000, 337000, 11300, ... 11300, 11300], ... 'GDP': [1937894, 2583560 , 12011, 4520, 12128, ... 17036, 182, 38, 311], ... 'alpha-2': ["IT", "FR", "MT", "MV", "BN", ... "IS", "NR", "TV", "AI"]}, ... index=["Italy", "France", "Malta", ... "Maldives", "Brunei", "Iceland", ... "Nauru", "Tuvalu", "Anguilla"]) >>> df population GDP alpha-2 Italy 59000000 1937894 IT France 65000000 2583560 FR Malta 434000 12011 MT Maldives 434000 4520 MV Brunei 434000 12128 BN Iceland 337000 17036 IS Nauru 11300 182 NR Tuvalu 11300 38 TV Anguilla 11300 311 AI In the following example, we will use ``nlargest`` to select the three rows having the largest values in column "population". >>> df.nlargest(3, 'population') population GDP alpha-2 France 65000000 2583560 FR Italy 59000000 1937894 IT Malta 434000 12011 MT When using ``keep='last'``, ties are resolved in reverse order: >>> df.nlargest(3, 'population', keep='last') population GDP alpha-2 France 65000000 2583560 FR Italy 59000000 1937894 IT Brunei 434000 12128 BN When using ``keep='all'``, all duplicate items are maintained: >>> df.nlargest(3, 'population', keep='all') population GDP alpha-2 France 65000000 2583560 FR Italy 59000000 1937894 IT Malta 434000 12011 MT Maldives 434000 4520 MV Brunei 434000 12128 BN To order by the largest values in column "population" and then "GDP", we can specify multiple columns like in the next example. >>> df.nlargest(3, ['population', 'GDP']) population GDP alpha-2 France 65000000 2583560 FR Italy 59000000 1937894 IT Brunei 434000 12128 BN """ return algorithms.SelectNFrame(self, n=n, keep=keep, columns=columns).nlargest()
0.000444
def matches(self, txt: str) -> bool: """Determine whether txt matches pattern :param txt: text to check :return: True if match """ # rval = ref.getText()[1:-1].encode('utf-8').decode('unicode-escape') if r'\\u' in self.pattern_re.pattern: txt = txt.encode('utf-8').decode('unicode-escape') match = self.pattern_re.match(txt) return match is not None and match.end() == len(txt)
0.004405
async def on_raw_quit(self, message): """ QUIT command. """ nick, metadata = self._parse_user(message.source) self._sync_user(nick, metadata) if message.params: reason = message.params[0] else: reason = None await self.on_quit(nick, reason) # Remove user from database. if not self.is_same_nick(self.nickname, nick): self._destroy_user(nick) # Else, we quit. elif self.connected: await self.disconnect(expected=True)
0.003663
def load_adjustment_values(self, c): """load adjustment values for auto shape types in self""" # retrieve auto shape types in const_name order -------- for mast in self: # retriev adj vals for this auto shape type -------- c.execute( ' SELECT name, val\n' ' FROM adjustment_values\n' ' WHERE prst = ?\n' 'ORDER BY seq_nmbr', (mast.prst,) ) for name, val in c: mast.adj_vals.append(AdjustmentValue(name, val))
0.003534
def onecmd_plus_hooks(self, line): ''' Trigger hooks after command. ''' if not line: return self.emptyline() return Cmd.onecmd_plus_hooks(self, line)
0.010811
def update_api_key_description(apiKey, description, region=None, key=None, keyid=None, profile=None): ''' update the given apiKey with the given description. CLI Example: .. code-block:: bash salt myminion boto_apigateway.update_api_key_description api_key description ''' try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) response = _api_key_patch_replace(conn, apiKey, '/description', description) return {'updated': True, 'apiKey': _convert_datetime_str(response)} except ClientError as e: return {'updated': False, 'error': __utils__['boto3.get_error'](e)}
0.006079
def execCommand(g, command, timeout=10): """ Executes a command by sending it to the rack server Arguments: g : hcam_drivers.globals.Container the Container object of application globals command : (string) the command (see below) Possible commands are: start : starts a run stop : stops a run abort : aborts a run online : bring ESO control server online and power up hardware off : put ESO control server in idle state and power down standby : server can communicate, but child processes disabled reset : resets the NGC controller front end Returns True/False according to whether the command succeeded or not. """ if not g.cpars['hcam_server_on']: g.clog.warn('execCommand: servers are not active') return False try: url = g.cpars['hipercam_server'] + command g.clog.info('execCommand, command = "' + command + '"') response = urllib.request.urlopen(url, timeout=timeout) rs = ReadServer(response.read(), status_msg=False) g.rlog.info('Server response =\n' + rs.resp()) if rs.ok: g.clog.info('Response from server was OK') return True else: g.clog.warn('Response from server was not OK') g.clog.warn('Reason: ' + rs.err) return False except urllib.error.URLError as err: g.clog.warn('execCommand failed') g.clog.warn(str(err)) return False
0.000655
def _partialParseModifier(self, s, sourceTime): """ test if giving C{s} matched CRE_MODIFIER, used by L{parse()} @type s: string @param s: date/time text to evaluate @type sourceTime: struct_time @param sourceTime: C{struct_time} value to use as the base @rtype: tuple @return: tuple of remained date/time text, datetime object and an boolean value to describ if matched or not """ parseStr = None chunk1 = chunk2 = '' # Modifier like next/prev/from/after/prior.. m = self.ptc.CRE_MODIFIER.search(s) if m is not None: if m.group() != s: # capture remaining string parseStr = m.group() chunk1 = s[:m.start()].strip() chunk2 = s[m.end():].strip() else: parseStr = s if parseStr: debug and log.debug('found (modifier) [%s][%s][%s]', parseStr, chunk1, chunk2) s, sourceTime = self._evalModifier(parseStr, chunk1, chunk2, sourceTime) return s, sourceTime, bool(parseStr)
0.001608
def _initialize_operation_name_to_id(self): """Initializer for _operation_name_to_id. Returns: a {string: int}, mapping operation names to their index in _operations. """ operation_name_to_id = {} for i, operation in enumerate(self._operations): operation_name_to_id[operation.name] = i return operation_name_to_id
0.005698
def parse_assign_target(self, with_tuple=True, name_only=False, extra_end_rules=None, with_namespace=False): """Parse an assignment target. As Jinja2 allows assignments to tuples, this function can parse all allowed assignment targets. Per default assignments to tuples are parsed, that can be disable however by setting `with_tuple` to `False`. If only assignments to names are wanted `name_only` can be set to `True`. The `extra_end_rules` parameter is forwarded to the tuple parsing function. If `with_namespace` is enabled, a namespace assignment may be parsed. """ if with_namespace and self.stream.look().type == 'dot': token = self.stream.expect('name') next(self.stream) # dot attr = self.stream.expect('name') target = nodes.NSRef(token.value, attr.value, lineno=token.lineno) elif name_only: token = self.stream.expect('name') target = nodes.Name(token.value, 'store', lineno=token.lineno) else: if with_tuple: target = self.parse_tuple(simplified=True, extra_end_rules=extra_end_rules) else: target = self.parse_primary() target.set_ctx('store') if not target.can_assign(): self.fail('can\'t assign to %r' % target.__class__. __name__.lower(), target.lineno) return target
0.001952
def _get_mine(fun): ''' Return the mine function from all the targeted minions. Just a small helper to avoid redundant pieces of code. ''' if fun in _CACHE and _CACHE[fun]: return _CACHE[fun] net_runner_opts = _get_net_runner_opts() _CACHE[fun] = __salt__['mine.get'](net_runner_opts.get('target'), fun, tgt_type=net_runner_opts.get('expr_form')) return _CACHE[fun]
0.004141
def inverse(x, p, errorOnFail=False): """ Find the inverse of BigInt @x in a field of (prime) order @p. """ # Check types assertType(x, BigInt) # There are a number of ways in RELIC to compute this inverse, but # for simplicity, we'll use the extended GCD algorithm because it # involves less type conversions. On the initial development platform # Lehmer's algorithm was the most performant: we call it directly. gcd = BigInt() inv = BigInt() # bn_gcd_ext(c, d, e, a, b) computes: c = a*d + b*e # We take x=a. b=p, and expect: c = 1 = gcd(x,p), d = 1/x, and e is unused. librelic.bn_gcd_ext_lehme(byref(gcd), byref(inv), None, byref(x), byref(p)) # Check that GCD == 1 if gcd != 1: if errorOnFail: raise Exception("Cannot find an inverse. gcd(x,p) == {}, but we need gcd(x,p) == 1 to find an inverse.". format(long(gcd))) else: return None return inv
0.005097
def do_dq(self, arg): """ [~thread] dq <register> - show memory contents as qwords [~thread] dq <register-register> - show memory contents as qwords [~thread] dq <register> <size> - show memory contents as qwords [~process] dq <address> - show memory contents as qwords [~process] dq <address-address> - show memory contents as qwords [~process] dq <address> <size> - show memory contents as qwords """ self.print_memory_display(arg, HexDump.hexblock_qword) self.last_display_command = self.do_dq
0.003472
def render_template_directory(deck, arguments): """Render a template directory""" output_directory = dir_name_from_title(deck.title) if os.path.exists(output_directory): if sys.stdout.isatty(): if ask( '%s already exists, shall I delete it?' % output_directory, arguments.get('--noinput') ): shutil.rmtree(output_directory) else: shutil.rmtree(output_directory) # copy support files to output directory template_directory_path = ( '%s/templates/%s' % (remarkable.__path__[0], deck.presentation_type) ) shutil.copytree( template_directory_path, output_directory, ) # copy resources if os.path.exists('resources'): log.info('Copying resources') shutil.copytree('resources', '%s/resources' % output_directory) else: log.info('No resources to copy') # render template template_filename = '%s/index.html' % deck.presentation_type html = render_template(template_filename, deck.json) # write index to output directory index_filename = '%s/index.html' % output_directory write_file(index_filename, html) return output_directory
0.000797
def set(self, key, value, **kwargs): """ Set the value of a Parameter in the ParameterSet. If :func:`get` would retrieve a Parameter, this will set the value of that parameter. Or you can provide 'value@...' or 'default_unit@...', etc to specify what attribute to set. :parameter str key: the twig (called key here to be analagous to a normal dict) :parameter value: value to set :parameter **kwargs: other filter parameters (must result in returning a single :class:`Parameter`) :return: the value of the :class:`Parameter` after setting the new value (including converting units if applicable) """ twig = key method = None twigsplit = re.findall(r"[\w']+", twig) if twigsplit[0] == 'value': twig = '@'.join(twigsplit[1:]) method = 'set_value' elif twigsplit[0] == 'quantity': twig = '@'.join(twigsplit[1:]) method = 'set_quantity' elif twigsplit[0] in ['unit', 'default_unit']: twig = '@'.join(twigsplit[1:]) method = 'set_default_unit' elif twigsplit[0] in ['timederiv']: twig = '@'.join(twigsplit[1:]) method = 'set_timederiv' elif twigsplit[0] in ['description']: raise KeyError("cannot set {} of {}".format(twigsplit[0], '@'.join(twigsplit[1:]))) if self._bundle is not None and self._bundle.get_setting('dict_set_all').get_value() and len(self.filter(twig=twig, **kwargs)) > 1: # then we need to loop through all the returned parameters and call set on them for param in self.filter(twig=twig, **kwargs).to_list(): self.set('{}@{}'.format(method, param.twig) if method is not None else param.twig, value) else: if method is None: return self.set_value(twig=twig, value=value, **kwargs) else: param = self.get_parameter(twig=twig, **kwargs) return getattr(param, method)(value)
0.002838
def createFileParserCtxt(filename): """Create a parser context for a file content. Automatic support for ZLIB/Compress compressed document is provided by default if found at compile-time. """ ret = libxml2mod.xmlCreateFileParserCtxt(filename) if ret is None:raise parserError('xmlCreateFileParserCtxt() failed') return parserCtxt(_obj=ret)
0.008152
def add_download_task(self, source_url, remote_path, rate_limit=None, timeout=60 * 60, expires=None, callback='', **kwargs): """添加离线下载任务,实现单个文件离线下载. :param source_url: 源文件的URL。 :param remote_path: 下载后的文件保存路径。 .. warning:: * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :param rate_limit: 下载限速,默认不限速。 :type rate_limit: int or long :param timeout: 下载超时时间,默认3600秒。 :param expires: 请求失效时间,如果有,则会校验。 :type expires: int :param callback: 下载完毕后的回调,默认为空。 :type callback: str :return: Response 对象 """ data = { 'source_url': source_url, 'save_path': remote_path, 'expires': expires, 'rate_limit': rate_limit, 'timeout': timeout, 'callback': callback, } return self._request('services/cloud_dl', 'add_task', data=data, **kwargs)
0.00317
def inquire(self, name=True, lifetime=True, usage=True, mechs=True): """Inspect these credentials for information This method inspects these credentials for information about them. Args: name (bool): get the name associated with the credentials lifetime (bool): get the remaining lifetime for the credentials usage (bool): get the usage for the credentials mechs (bool): get the mechanisms associated with the credentials Returns: InquireCredResult: the information about the credentials, with None used when the corresponding argument was False Raises: MissingCredentialsError InvalidCredentialsError ExpiredCredentialsError """ res = rcreds.inquire_cred(self, name, lifetime, usage, mechs) if res.name is not None: res_name = names.Name(res.name) else: res_name = None return tuples.InquireCredResult(res_name, res.lifetime, res.usage, res.mechs)
0.0018
def draw(self): """ Clear the terminal screen and redraw all of the sub-windows """ n_rows, n_cols = self.term.stdscr.getmaxyx() if n_rows < self.term.MIN_HEIGHT or n_cols < self.term.MIN_WIDTH: # TODO: Will crash when you try to navigate if the terminal is too # small at startup because self._subwindows will never be populated return self._row = 0 self._draw_header() self._draw_banner() self._draw_content() self._draw_footer() self.term.clear_screen() self.term.stdscr.refresh()
0.003247
def user_info(self, verbose=False): ''' Get information about the currently authenticated user http://docs.opsview.com/doku.php?id=opsview4.6:restapi#user_information ''' url = '{}/{}'.format(self.rest_url, 'user') return self.__auth_req_get(url, verbose=verbose)
0.006431
def on_episode_end(self, episode, logs): """ Compute and print metrics at the end of each episode """ duration = timeit.default_timer() - self.starts[episode] metrics = self.metrics[episode] if np.isnan(metrics).all(): mean_metrics = np.array([np.nan for _ in self.metrics_names]) else: mean_metrics = np.nanmean(metrics, axis=0) assert len(mean_metrics) == len(self.metrics_names) data = list(zip(self.metrics_names, mean_metrics)) data += list(logs.items()) data += [('episode', episode), ('duration', duration)] for key, value in data: if key not in self.data: self.data[key] = [] self.data[key].append(value) if self.interval is not None and episode % self.interval == 0: self.save_data() # Clean up. del self.metrics[episode] del self.starts[episode]
0.003165
def index(self, state, code=None, error=None): """ Receive a Exist response containing a verification code. Use the code to fetch the access_token. """ error = None if code: try: auth_token = self.fetch_token(code, state) except MissingTokenError: error = self._fmt_failure( 'Missing access token parameter.</br>Please check that ' 'you are using the correct client_secret') except MismatchingStateError: error = self._fmt_failure('CSRF Warning! Mismatching state') else: error = self._fmt_failure('Unknown error while authenticating') # Use a thread to shutdown cherrypy so we can return HTML first self._shutdown_cherrypy() return error if error else self.success_html % (auth_token)
0.00222
def idxstats(in_bam, data): """Return BAM index stats for the given file, using samtools idxstats. """ index(in_bam, data["config"], check_timestamp=False) AlignInfo = collections.namedtuple("AlignInfo", ["contig", "length", "aligned", "unaligned"]) samtools = config_utils.get_program("samtools", data["config"]) idxstats_out = subprocess.check_output([samtools, "idxstats", in_bam]).decode() out = [] for line in idxstats_out.split("\n"): if line.strip(): contig, length, aligned, unaligned = line.split("\t") out.append(AlignInfo(contig, int(length), int(aligned), int(unaligned))) return out
0.006033
def replication_state(self, repl_id): """ Retrieves the state for the given replication. Possible values are ``triggered``, ``completed``, ``error``, and ``None`` (meaning not yet triggered). :param str repl_id: Replication id used to identify the replication to inspect. :returns: Replication state as a ``str`` """ if "scheduler" in self.client.features(): try: repl_doc = Scheduler(self.client).get_doc(repl_id) except HTTPError as err: raise CloudantReplicatorException(err.response.status_code, repl_id) state = repl_doc['state'] else: try: repl_doc = self.database[repl_id] except KeyError: raise CloudantReplicatorException(404, repl_id) repl_doc.fetch() state = repl_doc.get('_replication_state') return state
0.003128
def tokens_required(service_list): """ Ensure the user has the necessary tokens for the specified services """ def decorator(func): @wraps(func) def inner(request, *args, **kwargs): for service in service_list: if service not in request.session["user_tokens"]: return redirect('denied') return func(request, *args, **kwargs) return inner return decorator
0.002183
def sorted_members(self): """ Iterate over sorted members of shape in the same order in which the members are declared except yielding the required members before any optional members. """ members = collections.OrderedDict() required_names = self.metadata.get("required", ()) for name, shape in self.members.items(): members[name] = AbShapeMember(name=name, shape=shape, is_required=name in required_names) if self.is_output_shape: # ResponseMetadata is the first member for all output shapes. yield AbShapeMember( name="ResponseMetadata", shape=self._shape_resolver.get_shape_by_name("ResponseMetadata"), is_required=True, ) yield from sorted(members.values(), key=lambda m: not m.is_required)
0.004587
def get_records(self, records=None, timeout=1.0): """Get NDEF message records from a SNEP Server. .. versionadded:: 0.13 The :class:`ndef.Record` list given by *records* is encoded as the request message octets input to :meth:`get_octets`. The return value is an :class:`ndef.Record` list decoded from the response message octets returned by :meth:`get_octets`. Same as:: import ndef send_octets = ndef.message_encoder(records) rcvd_octets = snep_client.get_octets(send_octets, timeout) records = list(ndef.message_decoder(rcvd_octets)) """ octets = b''.join(ndef.message_encoder(records)) if records else None octets = self.get_octets(octets, timeout) if octets and len(octets) >= 3: return list(ndef.message_decoder(octets))
0.002278
def make_postcard(self, npix=300, shape=(1070, 1132), buffer_size=15): """ Develop a "postcard" region around the target star. Other stars in this postcard will be used as possible reference stars. Args: npix: The size of the postcard region. The region will be a square with sides npix pixels (default: ``300``) shape: The size of each individual image. For Kepler/K2 FFIs this should never need to be changed from the default, but will be different for e.g. TESS FFIs (default: ``(1070, 1132)``) buffer_size: The number of pixels at the edge of the detector to avoid (default: ``15``) """ source = self.kic client = kplr.API() targ = client.target(source) channel = [targ.params['Channel_0'], targ.params['Channel_1'], targ.params['Channel_2'], targ.params['Channel_3']] col = [targ.params['Column_0'], targ.params['Column_1'], targ.params['Column_2'], targ.params['Column_3']] row = [targ.params['Row_0'], targ.params['Row_1'], targ.params['Row_2'], targ.params['Row_3']] if None in row: raise ValueError('Star not on detector all quarters!') if None in col: raise ValueError('Star not on detector all quarters!') center = np.array([npix/2, npix/2]) # If star close to edge, shift frame so that we have the full npix by npix # In this case, postcard will not be centered on target star if (np.min(col) < npix/2): jump = npix/2 - np.min(col) + buffer_size col += jump center[1] -= jump if (np.min(row) < npix/2): jump = npix/2 - np.min(row) + buffer_size row += jump center[0] -= jump if (np.max(row) > shape[0] - npix/2): jump = shape[0]-npix/2 - np.max(row) - buffer_size row += jump center[0] -= jump if (np.max(col) > shape[1] - npix/2): jump = shape[1]-npix/2 - np.max(col) - buffer_size col += jump center[1] -= jump fin_arr = np.zeros((len(self.times), npix, npix)) for icount, iname in enumerate(self.obs_filenames): a = fits.open(self.ffi_dir+iname) quarter = a[0].header['quarter'] if int(quarter) == 0: season = 3 else: season = (int(quarter) - 2) % 4 #season_arr[icount] = season img = a[channel[season]].data img -= np.median(img) ymin = int(max([int(row[season])-npix/2,0])) ymax = int(min([int(row[season])+npix/2,img.shape[0]])) xmin = int(max([int(col[season])-npix/2,0])) xmax = int(min([int(col[season])+npix/2,img.shape[1]])) pimg = img[ymin:ymax,xmin:xmax] fin_arr[icount,:,:] = pimg self.postcard = fin_arr self.integrated_postcard = np.sum(self.postcard, axis=0) self.center = center
0.013402
def _docs(self): """ Since `self.docs` may yield documents that do not explicitly contain `_index` or `_type`, add those attributes here, if necessary. """ iterdocs = iter(self.docs()) first = next(iterdocs) needs_parsing = False if isinstance(first, six.string_types): needs_parsing = True elif isinstance(first, dict): pass else: raise RuntimeError('Document must be either JSON strings or dict.') for doc in itertools.chain([first], iterdocs): if needs_parsing: doc = json.loads(doc) if '_index' not in doc: doc['_index'] = self.index if '_type' not in doc: doc['_type'] = self.doc_type yield doc
0.003663
def get_n_config_to_keep(self, n_suggestions, bracket_iteration): """Return the number of configs to keep and resume.""" n_configs = n_suggestions * (self.eta ** -bracket_iteration) return int(n_configs / self.eta)
0.008403
def _flatten_dicts(self, dicts): """Flatten a dict :param dicts: Flatten a dict :type dicts: list(dict) """ d = dict() list_of_dicts = [d.get() for d in dicts or []] return {k: v for d in list_of_dicts for k, v in d.items()}
0.007117
def to_op(op, reversed=False): ''' create a function that transforms a method to a binary op often we need to convert a pseudo method <receiver>.<message>(<z>) to a binary op <receiver> <op> <message> that's a decorator that helps for that ''' def transformer(receiver, param, pseudo_type): if not reversed: return Node('binary_op', op=op, left=receiver, right=param, pseudo_type=pseudo_type) return Node('binary_op', op=op, left=param, right=receiver, pseudo_type=pseudo_type) return transformer
0.005348
def provideCustomerReferralCode(sender,**kwargs): ''' If the vouchers app is installed and referrals are enabled, then the customer's profile page can show their voucher referral code. ''' customer = kwargs.pop('customer') if getConstant('vouchers__enableVouchers') and getConstant('referrals__enableReferralProgram'): vrd = ensureReferralVouchersExist(customer) return { 'referralVoucherId': vrd.referreeVoucher.voucherId }
0.008147
def interpolate_holes(self): """Linearly interpolate over holes in this collection to make it continuous. Returns: continuous_collection: A HourlyContinuousCollection with the same data as this collection but with missing data filled by means of a linear interpolation. """ # validate analysis_period and use the resulting period to generate datetimes assert self.validated_a_period is True, 'validated_a_period property must be' \ ' True to use interpolate_holes(). Run validate_analysis_period().' mins_per_step = int(60 / self.header.analysis_period.timestep) new_datetimes = self.header.analysis_period.datetimes new_values = [] # if the first steps are a hole, duplicate the first value. i = 0 if new_datetimes[0] != self.datetimes[0]: n_steps = int((self.datetimes[0].moy - new_datetimes[0].moy) / mins_per_step) new_values.extend([self._values[0]] * n_steps) i = n_steps - 1 # go through the values interpolating any holes. for j in xrange(len(self._values)): if new_datetimes[i] == self.datetimes[j]: # there is no hole. new_values.append(self._values[j]) i += 1 else: # there is a hole between this step and the previous step. n_steps = int((self.datetimes[j].moy - new_datetimes[i].moy) / mins_per_step) intp_vals = self._xxrange(self._values[j - 1], self._values[j], n_steps) new_values.extend(list(intp_vals)[1:] + [self._values[j]]) i += n_steps # if the last steps are a hole duplicate the last value. if len(new_values) != len(new_datetimes): n_steps = len(new_datetimes) - len(new_values) new_values.extend([self._values[-1]] * n_steps) # build the new continuous data collection. return HourlyContinuousCollection(self.header.duplicate(), new_values)
0.003854
def update(self): """Fetch updated information about devices""" if self.device_time_check(): if not self.in_process: outlets, switches, fans = self.get_devices() self.outlets = helpers.resolve_updates(self.outlets, outlets) self.switches = helpers.resolve_updates( self.switches, switches) self.fans = helpers.resolve_updates(self.fans, fans) self.last_update_ts = time.time()
0.003937
def sigmoidal(arr, contrast, bias): r""" Sigmoidal contrast is type of contrast control that adjusts the contrast without saturating highlights or shadows. It allows control over two factors: the contrast range from light to dark, and where the middle value of the mid-tones falls. The result is a non-linear and smooth contrast change. Parameters ---------- arr : ndarray, float, 0 .. 1 Array of color values to adjust contrast : integer Enhances the intensity differences between the lighter and darker elements of the image. For example, 0 is none, 3 is typical and 20 is a lot. bias : float, between 0 and 1 Threshold level for the contrast function to center on (typically centered at 0.5) Notes ---------- Sigmoidal contrast is based on the sigmoidal transfer function: .. math:: g(u) = ( 1/(1 + e^{- \alpha * u + \beta)}) This sigmoid function is scaled so that the output is bound by the interval [0, 1]. .. math:: ( 1/(1 + e^(\beta * (\alpha - u))) - 1/(1 + e^(\beta * \alpha)))/ ( 1/(1 + e^(\beta*(\alpha - 1))) - 1/(1 + e^(\beta * \alpha)) ) Where :math: `\alpha` is the threshold level, and :math: `\beta` the contrast factor to be applied. References ---------- .. [CT] Hany Farid "Fundamentals of Image Processing" http://www.cs.dartmouth.edu/farid/downloads/tutorials/fip.pdf """ if (arr.max() > 1.0 + epsilon) or (arr.min() < 0 - epsilon): raise ValueError("Input array must have float values between 0 and 1") if (bias > 1.0 + epsilon) or (bias < 0 - epsilon): raise ValueError("bias must be a scalar float between 0 and 1") alpha, beta = bias, contrast # We use the names a and b to match documentation. if alpha == 0: alpha = epsilon if beta == 0: return arr np.seterr(divide="ignore", invalid="ignore") if beta > 0: numerator = 1 / (1 + np.exp(beta * (alpha - arr))) - 1 / ( 1 + np.exp(beta * alpha) ) denominator = 1 / (1 + np.exp(beta * (alpha - 1))) - 1 / ( 1 + np.exp(beta * alpha) ) output = numerator / denominator else: # Inverse sigmoidal function: # todo: account for 0s # todo: formatting ;) output = ( (beta * alpha) - np.log( ( 1 / ( (arr / (1 + np.exp(beta * alpha - beta))) - (arr / (1 + np.exp(beta * alpha))) + (1 / (1 + np.exp(beta * alpha))) ) ) - 1 ) ) / beta return output
0.000359
def dispatch(self, command): """Pass a command along with its params to a suitable handler If the command is blank, succeed silently If the command has no handler, succeed silently If the handler raises an exception, fail with the exception message """ log.info("Dispatch on %s", command) if not command: return "OK" action, params = self.parse_command(command) log.debug("Action = %s, Params = %s", action, params) try: function = getattr(self, "do_" + action, None) if function: function(*params) return "OK" except KeyboardInterrupt: raise except Exception as exc: log.exception("Problem executing action %s", action) return "ERROR: %s" % exc
0.004657
def to_series(self, index=None, name=None): """ Create a Series with both index and values equal to the index keys useful with map for returning an indexer based on an index. Parameters ---------- index : Index, optional index of resulting Series. If None, defaults to original index name : string, optional name of resulting Series. If None, defaults to name of original index Returns ------- Series : dtype will be based on the type of the Index values. """ from pandas import Series if index is None: index = self._shallow_copy() if name is None: name = self.name return Series(self.values.copy(), index=index, name=name)
0.002472
def get_children(self, usage_id_filter=None): """ Return instantiated XBlocks for each of this blocks ``children``. """ if not self.has_children: return [] return [ self.get_child(usage_id) for usage_id in self.children if usage_id_filter is None or usage_id_filter(usage_id) ]
0.005348
def build_hgnc_gene(gene_info, build='37'): """Build a hgnc_gene object Args: gene_info(dict): Gene information Returns: gene_obj(dict) { '_id': ObjectId(), # This is the hgnc id, required: 'hgnc_id': int, # The primary symbol, required 'hgnc_symbol': str, 'ensembl_id': str, # required 'build': str, # '37' or '38', defaults to '37', required 'chromosome': str, # required 'start': int, # required 'end': int, # required 'description': str, # Gene description 'aliases': list(), # Gene symbol aliases, includes hgnc_symbol, str 'entrez_id': int, 'omim_id': int, 'pli_score': float, 'primary_transcripts': list(), # List of refseq transcripts (str) 'ucsc_id': str, 'uniprot_ids': list(), # List of str 'vega_id': str, 'transcripts': list(), # List of hgnc_transcript # Inheritance information 'inheritance_models': list(), # List of model names 'incomplete_penetrance': bool, # Acquired from HPO # Phenotype information 'phenotypes': list(), # List of dictionaries with phenotype information } """ try: hgnc_id = int(gene_info['hgnc_id']) except KeyError as err: raise KeyError("Gene has to have a hgnc_id") except ValueError as err: raise ValueError("hgnc_id has to be integer") try: hgnc_symbol = gene_info['hgnc_symbol'] except KeyError as err: raise KeyError("Gene has to have a hgnc_symbol") try: ensembl_id = gene_info['ensembl_gene_id'] except KeyError as err: raise KeyError("Gene has to have a ensembl_id") try: chromosome = gene_info['chromosome'] except KeyError as err: raise KeyError("Gene has to have a chromosome") try: start = int(gene_info['start']) except KeyError as err: raise KeyError("Gene has to have a start position") except TypeError as err: raise TypeError("Gene start has to be a integer") try: end = int(gene_info['end']) except KeyError as err: raise KeyError("Gene has to have a end position") except TypeError as err: raise TypeError("Gene end has to be a integer") gene_obj = HgncGene( hgnc_id=hgnc_id, hgnc_symbol=hgnc_symbol, ensembl_id=ensembl_id, chrom=chromosome, start=start, end=end, build=build, ) if gene_info.get('description'): gene_obj['description'] = gene_info['description'] # LOG.debug("Adding info %s", gene_info['description']) if gene_info.get('previous_symbols'): gene_obj['aliases'] = gene_info['previous_symbols'] if gene_info.get('entrez_id'): gene_obj['entrez_id'] = int(gene_info['entrez_id']) if gene_info.get('omim_id'): gene_obj['omim_id'] = int(gene_info['omim_id']) if gene_info.get('pli_score'): gene_obj['pli_score'] = float(gene_info['pli_score']) if gene_info.get('ref_seq'): gene_obj['primary_transcripts'] = gene_info['ref_seq'] if gene_info.get('ucsc_id'): gene_obj['ucsc_id'] = gene_info['ucsc_id'] if gene_info.get('uniprot_ids'): gene_obj['uniprot_ids'] = gene_info['uniprot_ids'] if gene_info.get('vega_id'): gene_obj['vega_id'] = gene_info['vega_id'] if gene_info.get('incomplete_penetrance'): gene_obj['incomplete_penetrance'] = True if gene_info.get('inheritance_models'): gene_obj['inheritance_models'] = gene_info['inheritance_models'] phenotype_objs = [] for phenotype_info in gene_info.get('phenotypes', []): phenotype_objs.append(build_phenotype(phenotype_info)) if phenotype_objs: gene_obj['phenotypes'] = phenotype_objs for key in list(gene_obj): if gene_obj[key] is None: gene_obj.pop(key) return gene_obj
0.005984
def qualified_name(obj): '''Returns the fully-qualified name of the given object''' if not hasattr(obj, '__module__'): obj = obj.__class__ module = obj.__module__ if module is None or module == str.__class__.__module__: return obj.__qualname__ return '{}.{}'.format(module, obj.__qualname__)
0.003058
def dist(ctx, devpi=False, egg=False, wheel=False, auto=True): """Distribute the project.""" config.load() cmd = ["python", "setup.py", "sdist"] # Automatically create wheels if possible if auto: egg = sys.version_info.major == 2 try: import wheel as _ wheel = True except ImportError: wheel = False if egg: cmd.append("bdist_egg") if wheel: cmd.append("bdist_wheel") ctx.run("invoke clean --all build --docs test check") ctx.run(' '.join(cmd)) if devpi: ctx.run("devpi upload dist/*")
0.001634
def _process_outgoing_msg(self, sink_iter): """For every message we construct a corresponding RPC message to be sent over the given socket inside given RPC session. This function should be launched in a new green thread as it loops forever. """ LOG.debug('NetworkController processing outgoing request list.') # TODO(PH): We should try not to sent routes from bgp peer that is not # in established state. from ryu.services.protocols.bgp.model import ( FlexinetOutgoingRoute) while self.is_connected: # sink iter is Sink instance and next is blocking so this isn't # active wait. for outgoing_msg in sink_iter: if not self.is_connected: self._socket.close() return if isinstance(outgoing_msg, FlexinetOutgoingRoute): rpc_msg = _create_prefix_notification(outgoing_msg, self) else: raise NotImplementedError( 'Do not handle out going message of type %s' % outgoing_msg.__class__) if rpc_msg: self._sendall(rpc_msg) self.pause(0) # Stop incoming connection. if self.green_in: self.green_in.kill()
0.001455
def parse_next(self, ptype, m): """ Parse the next packet. :param ptype: The (string) type of the incoming packet :param `.Message` m: The paket content """ if self.transport.server_mode and (ptype == MSG_KEXGSS_INIT): return self._parse_kexgss_init(m) elif not self.transport.server_mode and (ptype == MSG_KEXGSS_HOSTKEY): return self._parse_kexgss_hostkey(m) elif self.transport.server_mode and (ptype == MSG_KEXGSS_CONTINUE): return self._parse_kexgss_continue(m) elif not self.transport.server_mode and (ptype == MSG_KEXGSS_COMPLETE): return self._parse_kexgss_complete(m) elif ptype == MSG_KEXGSS_ERROR: return self._parse_kexgss_error(m) msg = "GSS KexGroup1 asked to handle packet type {:d}" raise SSHException(msg.format(ptype))
0.00224
def gen_gausprocess_se(ntrain, ntest, noise=1., lenscale=1., scale=1., xmin=-10, xmax=10): """ Generate a random (noisy) draw from a Gaussian Process with a RBF kernel. """ # Xtrain = np.linspace(xmin, xmax, ntrain)[:, np.newaxis] Xtrain = np.random.rand(ntrain)[:, np.newaxis] * (xmin - xmax) - xmin Xtest = np.linspace(xmin, xmax, ntest)[:, np.newaxis] Xcat = np.vstack((Xtrain, Xtest)) K = scale * np.exp(-cdist(Xcat, Xcat, metric='sqeuclidean') / (2 * lenscale**2)) U, S, V = np.linalg.svd(K) L = U.dot(np.diag(np.sqrt(S))).dot(V) f = np.random.randn(ntrain + ntest).dot(L) ytrain = f[0:ntrain] + np.random.randn(ntrain) * noise ftest = f[ntrain:] return Xtrain, ytrain, Xtest, ftest
0.001261
def send_request(self, worker_class_or_function, args, on_receive=None): """ Requests some work to be done by the backend. You can get notified of the work results by passing a callback (on_receive). :param worker_class_or_function: Worker class or function :param args: worker args, any Json serializable objects :param on_receive: an optional callback executed when we receive the worker's results. The callback will be called with one arguments: the results of the worker (object) :raise: backend.NotRunning if the backend process is not running. """ if not self.running: try: # try to restart the backend if it crashed. self.start(self.server_script, interpreter=self.interpreter, args=self.args) except AttributeError: pass # not started yet finally: # caller should try again, later raise NotRunning() else: comm('sending request, worker=%r' % worker_class_or_function) # create a socket, the request will be send as soon as the socket # has connected socket = JsonTcpClient( self.editor, self._port, worker_class_or_function, args, on_receive=on_receive) socket.finished.connect(self._rm_socket) self._sockets.append(socket) # restart heartbeat timer self._heartbeat_timer.start()
0.001278
def _download_repodata(self, checked_repos): """Dowload repodata.""" self._files_downloaded = [] self._repodata_files = [] self.__counter = -1 if checked_repos: for repo in checked_repos: path = self._repo_url_to_path(repo) self._files_downloaded.append(path) self._repodata_files.append(path) worker = self.download_async(repo, path) worker.url = repo worker.path = path worker.sig_finished.connect(self._repodata_downloaded) else: # Empty, maybe there is no internet connection # Load information from conda-meta and save that file path = self._get_repodata_from_meta() self._repodata_files = [path] self._repodata_downloaded()
0.00232
def cbpdn_relax(k): """Do relaxation for the cbpdn stage. The only parameter is the slice index `k` and there are no return values; all inputs and outputs are from and to global variables. """ mp_Z_X[k] = mp_xrlx * mp_Z_X[k] + (1 - mp_xrlx) * mp_Z_Y[k]
0.003663
def batching(size): """Create a transducer which produces non-overlapping batches.""" if size < 1: raise ValueError("batching() size must be at least 1") def batching_transducer(reducer): return Batching(reducer, size) return batching_transducer
0.003571
def get_hash(self, handle): """Return the hash.""" fpath = self._fpath_from_handle(handle) return DiskStorageBroker.hasher(fpath)
0.013072
def fill_predictive_missing_parameters(self): """define state with initial_state :return: None """ if self.initial_state == 'w': self.state = u'WARNING' elif self.initial_state == 'u': self.state = u'UNKNOWN' elif self.initial_state == 'c': self.state = u'CRITICAL' elif self.initial_state == 'x': self.state = u'UNREACHABLE'
0.004651
def convolve2d_disk(fn, r, sig, nstep=200): """Evaluate the convolution f'(r) = f(r) * g(r) where f(r) is azimuthally symmetric function in two dimensions and g is a step function given by: g(r) = H(1-r/s) Parameters ---------- fn : function Input function that takes a single radial coordinate parameter. r : `~numpy.ndarray` Array of points at which the convolution is to be evaluated. sig : float Radius parameter of the step function. nstep : int Number of sampling point for numeric integration. """ r = np.array(r, ndmin=1) sig = np.array(sig, ndmin=1) rmin = r - sig rmax = r + sig rmin[rmin < 0] = 0 delta = (rmax - rmin) / nstep redge = rmin[..., np.newaxis] + \ delta[..., np.newaxis] * np.linspace(0, nstep, nstep + 1) rp = 0.5 * (redge[..., 1:] + redge[..., :-1]) dr = redge[..., 1:] - redge[..., :-1] fnv = fn(rp) r = r.reshape(r.shape + (1,)) cphi = -np.ones(dr.shape) m = ((rp + r) / sig < 1) | (r == 0) rrp = r * rp sx = r ** 2 + rp ** 2 - sig ** 2 cphi[~m] = sx[~m] / (2 * rrp[~m]) dphi = 2 * np.arccos(cphi) v = rp * fnv * dphi * dr / (np.pi * sig * sig) s = np.sum(v, axis=-1) return s
0.000786
def add_service(self, service): """Add a new service. If the service already exists, it will be replaced. """ if service.protocol in self._services: existing = self._services[service.protocol] if not existing.superseeded_by(service): return self._services[service.protocol] = service
0.005479
def _drop(expr, data, axis=0, columns=None): """ Drop data from a DataFrame. :param expr: collection to drop data from :param data: data to be removed :param axis: 0 for deleting rows, 1 for columns. :param columns: columns of data to select, only useful when axis == 0 :return: collection :Example: >>> import pandas as pd >>> df1 = DataFrame(pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6], 'c': [7, 8, 9]})) >>> df2 = DataFrame(pd.DataFrame({'a': [2, 3], 'b': [5, 7]})) >>> df1.drop(df2) a b c 0 1 4 7 1 3 6 9 >>> df1.drop(df2, columns='a') a b c 0 1 4 7 >>> df1.drop(['a'], axis=1) b c 0 4 7 1 5 8 2 6 9 >>> df1.drop(df2, axis=1) c 0 7 1 8 2 9 """ from ..utils import to_collection expr = to_collection(expr) if axis == 0: if not isinstance(data, (CollectionExpr, SequenceExpr)): raise ExpressionError('data should be a collection or sequence when axis == 1.') data = to_collection(data) if columns is None: columns = [n for n in data.schema.names] if isinstance(columns, six.string_types): columns = [columns, ] data = data.select(*columns).distinct() drop_predicates = [data[n].isnull() for n in data.schema.names] return expr.left_join(data, on=columns, suffixes=('', '_dp')).filter(*drop_predicates) \ .select(*expr.schema.names) else: if isinstance(data, (CollectionExpr, SequenceExpr)): data = to_collection(data).schema.names return expr.exclude(data)
0.002404
def get_related_ids(self): """ Get all of the IDs for the related models. :rtype: list """ related = self.get_related() full_key = related.get_qualified_key_name() return self.get_query().select(full_key).lists(related.get_key_name())
0.006826
def do_execute(self): """ The actual execution of the actor. :return: None if successful, otherwise error message :rtype: str """ fname = str(self.input.payload) spattern = str(self.resolve_option("regexp")) pattern = None if (spattern is not None) and (spattern != ".*"): pattern = re.compile(spattern) if (pattern is None) or (pattern.match(fname)): os.remove(fname) self._output.append(self.input) return None
0.003752
def get_article(self, article_id): """ Get a single article represented by `article_id`. :param article_id: ID of the article to retrieve. """ url = self._generate_url('articles/{0}'.format(article_id)) return self.get(url)
0.007353
def merge(cls, first, second): """ Return an AttributeList that is the result of merging first with second. """ merged = AttributeList([], None) assert (isinstance(first, AttributeList)) assert (isinstance(second, AttributeList)) merged._contents = first._contents[:] merged._contents += second._contents[:] return merged
0.007595
def prepare_threads(new_function): """Replaces threading._get_ident() function in order to notify the waiting Condition.""" with _waitforthreads_lock: if hasattr(threading, 'get_ident'): old_function = threading.get_ident threading.get_ident = new_function else: old_function = threading._get_ident threading._get_ident = new_function return old_function
0.002278
def value(self): """ Return the current value of a variable """ # Does the variable name have tags to parse? if len(self._element): var = ''.join(map(str, self.trigger.agentml.parse_tags(self._element, self.trigger))) else: var = self._element.text or attribute(self._element, 'name') # Is there a default value defined? default = attribute(self._element, 'default') try: self._log.debug('Retrieving {type} variable {var}'.format(type=self.type, var=var)) if self.type == 'user': return self.trigger.user.get_var(var) else: return self.trigger.agentml.get_var(var) except VarNotDefinedError: # Do we have a default value? if default: self._log.info('{type} variable {var} not set, returning default: {default}' .format(type=self.type.capitalize(), var=var, default=default)) self._log.info('{type} variable {var} not set and no default value has been specified' .format(type=self.type.capitalize(), var=var)) return ''
0.005761
def get_percolation_threshold(self): r""" Find the invasion threshold at which a cluster spans from the inlet to the outlet sites """ if np.sum(self['pore.inlets']) == 0: raise Exception('Inlet pores must be specified first') if np.sum(self['pore.outlets']) == 0: raise Exception('Outlet pores must be specified first') else: Pout = self['pore.outlets'] # Do a simple check of pressures on the outlet pores first... if self.settings['access_limited']: thresh = np.amin(self['pore.invasion_pressure'][Pout]) else: raise Exception('This is currently only implemented for access ' + 'limited simulations') return thresh
0.002522
def generate_supremacy_circuit_google_v2(qubits: Iterable[devices.GridQubit], cz_depth: int, seed: int) -> circuits.Circuit: """ Generates Google Random Circuits v2 as in github.com/sboixo/GRCS cz_v2. See also https://arxiv.org/abs/1807.10749 Args: qubits: qubit grid in which to generate the circuit. cz_depth: number of layers with CZ gates. seed: seed for the random instance. Returns: A circuit corresponding to instance inst_{n_rows}x{n_cols}_{cz_depth+1}_{seed} The mapping of qubits is cirq.GridQubit(j,k) -> q[j*n_cols+k] (as in the QASM mapping) """ non_diagonal_gates = [ops.pauli_gates.X**(1/2), ops.pauli_gates.Y**(1/2)] rand_gen = random.Random(seed).random circuit = circuits.Circuit() # Add an initial moment of Hadamards circuit.append(ops.common_gates.H(qubit) for qubit in qubits) layer_index = 0 if cz_depth: layer_index = _add_cz_layer(layer_index, circuit) # In the first moment, add T gates when possible for qubit in qubits: if not circuit.operation_at(qubit, 1): circuit.append(ops.common_gates.T(qubit), strategy=InsertStrategy.EARLIEST) for moment_index in range(2, cz_depth+1): layer_index = _add_cz_layer(layer_index, circuit) # Add single qubit gates in the same moment for qubit in qubits: if not circuit.operation_at(qubit, moment_index): last_op = circuit.operation_at(qubit, moment_index-1) if last_op: gate = cast(ops.GateOperation, last_op).gate # Add a random non diagonal gate after a CZ if gate == ops.CZ: circuit.append(_choice(rand_gen, non_diagonal_gates).on(qubit), strategy=InsertStrategy.EARLIEST) # Add a T gate after a non diagonal gate elif not gate == ops.T: circuit.append(ops.common_gates.T(qubit), strategy=InsertStrategy.EARLIEST) # Add a final moment of Hadamards circuit.append([ops.common_gates.H(qubit) for qubit in qubits], strategy=InsertStrategy.NEW_THEN_INLINE) return circuit
0.000402
def _variable_with_weight_decay(name, shape, stddev, wd): """Helper to create an initialized Variable with weight decay. Note that the Variable is initialized with a truncated normal distribution. A weight decay is added only if one is specified. Args: name: name of the variable shape: list of ints stddev: standard deviation of a truncated Gaussian wd: add L2Loss weight decay multiplied by this float. If None, weight decay is not added for this Variable. Returns: Variable Tensor """ dtype = tf.float16 if FLAGS.use_fp16 else tf.float32 var = _variable_on_cpu( name, shape, tf.truncated_normal_initializer(stddev=stddev, dtype=dtype)) if wd is not None: weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss') tf.add_to_collection('losses', weight_decay) return var
0.006969
def lock(self, page): """Locks *page*.""" result = self._dokuwiki.send('dokuwiki.setLocks', lock=[page], unlock=[]) if result['lockfail']: raise DokuWikiError('unable to lock page')
0.007874
def get_parent_directory(path): """Like os.path.dirname except it returns the absolute name of the parent of the dirname directory. No symbolic link expansion (os.path.realpath) or user expansion (os.path.expanduser) is done.""" if not os.path.isdir(path): path = os.path.dirname(path) return os.path.abspath(os.path.join(path, os.path.pardir))
0.005479
def create(self, id, seq, resource): # pylint: disable=invalid-name,redefined-builtin """Create a highlight. :param id: Result ID as an int. :param seq: TestResult sequence ID as an int. :param resource: :class:`highlights.Highlight <highlights.Highlight>` object :return: :class:`highlights.Highlight <highlights.Highlight>` object :rtype: highlights.Highlight """ return self.create_or_edit(id, seq, resource)
0.010504
def clean_link(self, url): """Makes sure a link is fully encoded. That is, if a ' ' shows up in the link, it will be rewritten to %20 (while not over-quoting % or other characters).""" return self._clean_re.sub(lambda match: "%%%2x" % ord(match.group(0)), url)
0.010239
def mfds2multimfd(mfds): """ Convert a list of MFD nodes into a single MultiMFD node """ _, kind = mfds[0].tag.split('}') node = Node('multiMFD', dict(kind=kind, size=len(mfds))) lengths = None for field in mfd.multi_mfd.ASSOC[kind][1:]: alias = mfd.multi_mfd.ALIAS.get(field, field) if field in ('magnitudes', 'occurRates'): data = [~getattr(m, field) for m in mfds] lengths = [len(d) for d in data] data = sum(data, []) # list of lists else: try: data = [m[alias] for m in mfds] except KeyError: if alias == 'binWidth': # missing bindWidth in GR MDFs is ok continue else: raise node.append(Node(field, text=collapse(data))) if lengths: # this is the last field if present node.append(Node('lengths', text=collapse(lengths))) return node
0.001008
def generate_event_set(ucerf, background_sids, src_filter, ses_idx, seed): """ Generates the event set corresponding to a particular branch """ serial = seed + ses_idx * TWO16 # get rates from file with h5py.File(ucerf.source_file, 'r') as hdf5: occurrences = ucerf.tom.sample_number_of_occurrences(ucerf.rate, seed) indices, = numpy.where(occurrences) logging.debug( 'Considering "%s", %d ruptures', ucerf.source_id, len(indices)) # get ruptures from the indices ruptures = [] rupture_occ = [] for iloc, n_occ in zip(indices, occurrences[indices]): ucerf_rup = ucerf.get_ucerf_rupture(iloc, src_filter) if ucerf_rup: ucerf_rup.serial = serial serial += 1 ruptures.append(ucerf_rup) rupture_occ.append(n_occ) # sample background sources background_ruptures, background_n_occ = sample_background_model( hdf5, ucerf.idx_set["grid_key"], ucerf.tom, seed, background_sids, ucerf.min_mag, ucerf.npd, ucerf.hdd, ucerf.usd, ucerf.lsd, ucerf.msr, ucerf.aspect, ucerf.tectonic_region_type) for i, brup in enumerate(background_ruptures): brup.serial = serial serial += 1 ruptures.append(brup) rupture_occ.extend(background_n_occ) assert len(ruptures) < TWO16, len(ruptures) # < 2^16 ruptures per SES return ruptures, rupture_occ
0.000661
def skip(self, content): """ Get whether to skip this I{content}. Should be skipped when the content is optional and value is either None or an empty list. @param content: Content to skip. @type content: L{Object} @return: True if content is to be skipped. @rtype: bool """ if self.optional(content): v = content.value if v is None: return True if isinstance(v, (list, tuple)) and not v: return True return False
0.003503
def getItemByID(self, storeID, default=_noItem, autoUpgrade=True): """ Retrieve an item by its storeID, and return it. Note: most of the failure modes of this method are catastrophic and should not be handled by application code. The only one that application programmers should be concerned with is KeyError. They are listed for educational purposes. @param storeID: an L{int} which refers to the store. @param default: if passed, return this value rather than raising in the case where no Item is found. @raise TypeError: if storeID is not an integer. @raise UnknownItemType: if the storeID refers to an item row in the database, but the corresponding type information is not available to Python. @raise RuntimeError: if the found item's class version is higher than the current application is aware of. (In other words, if you have upgraded a database to a new schema and then attempt to open it with a previous version of the code.) @raise errors.ItemNotFound: if no item existed with the given storeID. @return: an Item, or the given default, if it was passed and no row corresponding to the given storeID can be located in the database. """ if not isinstance(storeID, (int, long)): raise TypeError("storeID *must* be an int or long, not %r" % ( type(storeID).__name__,)) if storeID == STORE_SELF_ID: return self try: return self.objectCache.get(storeID) except KeyError: pass log.msg(interface=iaxiom.IStatEvent, stat_cache_misses=1, key=storeID) results = self.querySchemaSQL(_schema.TYPEOF_QUERY, [storeID]) assert (len(results) in [1, 0]),\ "Database panic: more than one result for TYPEOF!" if results: typename, module, version = results[0] useMostRecent = False moreRecentAvailable = False # The schema may have changed since the last time I saw the # database. Let's look to see if this is suspiciously broken... if _typeIsTotallyUnknown(typename, version): # Another process may have created it - let's re-up the schema # and see what we get. self._startup() # OK, all the modules have been loaded now, everything # verified. if _typeIsTotallyUnknown(typename, version): # If there is STILL no inkling of it anywhere, we are # almost certainly boned. Let's tell the user in a # structured way, at least. raise errors.UnknownItemType( "cannot load unknown schema/version pair: %r %r - id: %r" % (typename, version, storeID)) if typename in _typeNameToMostRecentClass: moreRecentAvailable = True mostRecent = _typeNameToMostRecentClass[typename] if mostRecent.schemaVersion < version: raise RuntimeError("%s:%d - was found in the database and most recent %s is %d" % (typename, version, typename, mostRecent.schemaVersion)) if mostRecent.schemaVersion == version: useMostRecent = True if useMostRecent: T = mostRecent else: T = self.getOldVersionOf(typename, version) # for the moment we're going to assume no inheritance attrs = self.querySQL(T._baseSelectSQL(self), [storeID]) if len(attrs) == 0: if default is _noItem: raise errors.ItemNotFound( 'No results for known-to-be-good object') return default elif len(attrs) > 1: raise errors.DataIntegrityError( 'Too many results for {:d}'.format(storeID)) attrs = attrs[0] x = T.existingInStore(self, storeID, attrs) if moreRecentAvailable and (not useMostRecent) and autoUpgrade: # upgradeVersion will do caching as necessary, we don't have to # cache here. (It must, so that app code can safely call # upgradeVersion and get a consistent object out of it.) x = self.transact(self._upgradeManager.upgradeItem, x) elif not x.__legacy__: # We loaded the most recent version of an object self.objectCache.cache(storeID, x) return x if default is _noItem: raise errors.ItemNotFound(storeID) return default
0.001031
def join_densities(*densities: Density) -> Density: """Join two mixed states into a larger qubit state""" vectors = [rho.vec for rho in densities] vec = reduce(outer_product, vectors) memory = dict(ChainMap(*[rho.memory for rho in densities])) # TESTME return Density(vec.tensor, vec.qubits, memory)
0.003115
def load_parser_options_from_env( parser_class: t.Type[BaseParser], env: t.Optional[t.Dict[str, str]] = None) -> t.Dict[str, t.Any]: """ Extracts arguments from ``parser_class.__init__`` and populates them from environment variables. Uses ``__init__`` argument type annotations for correct type casting. .. note:: Environment variables should be prefixed with ``<UPPERCASEPARSERCLASSNAME>__``. :param parser_class: a subclass of :class:`~django_docker_helpers.config.backends.base.BaseParser` :param env: a dict with environment variables, default is ``os.environ`` :return: parser's ``__init__`` arguments dict mapping Example: :: env = { 'REDISPARSER__ENDPOINT': 'go.deep', 'REDISPARSER__HOST': 'my-host', 'REDISPARSER__PORT': '66', } res = ConfigLoader.load_parser_options_from_env(RedisParser, env) assert res == {'endpoint': 'go.deep', 'host': 'my-host', 'port': 66} """ env = env or os.environ sentinel = object() spec: inspect.FullArgSpec = inspect.getfullargspec(parser_class.__init__) environment_parser = EnvironmentParser(scope=parser_class.__name__.upper(), env=env) stop_args = ['self'] safe_types = [int, bool, str] init_args = {} for arg_name in spec.args: if arg_name in stop_args: continue type_hint = spec.annotations.get(arg_name) coerce_type = None if type_hint in safe_types: coerce_type = type_hint elif hasattr(type_hint, '__args__'): if len(type_hint.__args__) == 1: # one type if type_hint.__args__[0] in safe_types: coerce_type = type_hint.__args__[0] elif len(type_hint.__args__) == 2: # t.Optional try: _args = list(type_hint.__args__) _args.remove(type(None)) if _args[0] in safe_types: coerce_type = _args[0] except ValueError: pass val = environment_parser.get(arg_name, sentinel, coerce_type=coerce_type) if val is sentinel: continue init_args[arg_name] = val return init_args
0.004011
def compute_usage_requirements (self, subvariant): """ Given the set of generated targets, and refined build properties, determines and sets appripriate usage requirements on those targets. """ assert isinstance(subvariant, virtual_target.Subvariant) rproperties = subvariant.build_properties () xusage_requirements =self.evaluate_requirements( self.usage_requirements_, rproperties, "added") # We generate all dependency properties and add them, # as well as their usage requirements, to result. (r1, r2) = self.generate_dependency_properties(xusage_requirements.dependency (), rproperties) extra = r1 + r2 result = property_set.create (xusage_requirements.non_dependency () + extra) # Propagate usage requirements we've got from sources, except # for the <pch-header> and <pch-file> features. # # That feature specifies which pch file to use, and should apply # only to direct dependents. Consider: # # pch pch1 : ... # lib lib1 : ..... pch1 ; # pch pch2 : # lib lib2 : pch2 lib1 ; # # Here, lib2 should not get <pch-header> property from pch1. # # Essentially, when those two features are in usage requirements, # they are propagated only to direct dependents. We might need # a more general mechanism, but for now, only those two # features are special. properties = [] for p in subvariant.sources_usage_requirements().all(): if p.feature.name not in ('pch-header', 'pch-file'): properties.append(p) if 'shared' in rproperties.get('link'): new_properties = [] for p in properties: if p.feature.name != 'library': new_properties.append(p) properties = new_properties result = result.add_raw(properties) return result
0.004943
def subsets(self): """Subsets that make up each split of the dataset for the language pair.""" source, target = self.builder_config.language_pair filtered_subsets = {} for split, ss_names in self._subsets.items(): filtered_subsets[split] = [] for ss_name in ss_names: ds = DATASET_MAP[ss_name] if ds.target != target or source not in ds.sources: logging.info( "Skipping sub-dataset that does not include language pair: %s", ss_name) else: filtered_subsets[split].append(ss_name) logging.info("Using sub-datasets: %s", filtered_subsets) return filtered_subsets
0.007496
def subdivide(length, parts=None, max_length=None): """Generates a list with start end stop indices of length parts, [(0, length/parts), ..., (.., length)]""" if max_length: i1 = 0 done = False while not done: i2 = min(length, i1 + max_length) # print i1, i2 yield i1, i2 i1 = i2 if i1 == length: done = True else: part_length = int(math.ceil(float(length) / parts)) # subblock_count = math.ceil(total_length/subblock_size) # args_list = [] for index in range(parts): i1, i2 = index * part_length, min(length, (index + 1) * part_length) yield i1, i2
0.004184
def get_containers_by_name(self, name): """ get all task which relative with task name :param name: :class:`str`, task name :return: :class:`list`, container list """ code, containers = self.get_containers() if code != httplib.OK: return [] return [container for container in containers if any(map(lambda x: x.startswith(name), container.Names))]
0.004425
def plot(self, plot_grouped=False): """ Plot the cumulative number of detections in time. .. rubric:: Example >>> family = Family( ... template=Template(name='a'), detections=[ ... Detection(template_name='a', detect_time=UTCDateTime(0) + 200, ... no_chans=8, detect_val=4.2, threshold=1.2, ... typeofdet='corr', threshold_type='MAD', ... threshold_input=8.0), ... Detection(template_name='a', detect_time=UTCDateTime(0), ... no_chans=8, detect_val=4.5, threshold=1.2, ... typeofdet='corr', threshold_type='MAD', ... threshold_input=8.0), ... Detection(template_name='a', detect_time=UTCDateTime(0) + 10, ... no_chans=8, detect_val=4.5, threshold=1.2, ... typeofdet='corr', threshold_type='MAD', ... threshold_input=8.0)]) >>> family.plot(plot_grouped=True) # doctest: +SKIP .. plot:: from eqcorrscan.core.match_filter import Family, Template from eqcorrscan.core.match_filter import Detection from obspy import UTCDateTime family = Family( template=Template(name='a'), detections=[ Detection(template_name='a', detect_time=UTCDateTime(0) + 200, no_chans=8, detect_val=4.2, threshold=1.2, typeofdet='corr', threshold_type='MAD', threshold_input=8.0), Detection(template_name='a', detect_time=UTCDateTime(0), no_chans=8, detect_val=4.5, threshold=1.2, typeofdet='corr', threshold_type='MAD', threshold_input=8.0), Detection(template_name='a', detect_time=UTCDateTime(0) + 10, no_chans=8, detect_val=4.5, threshold=1.2, typeofdet='corr', threshold_type='MAD', threshold_input=8.0)]) family.plot(plot_grouped=True) """ cumulative_detections( detections=self.detections, plot_grouped=plot_grouped)
0.000878
def get_account_info(self): ''' Get current account information The information then will be saved in `self.account` so that you can access the information like this: >>> hsclient = HSClient() >>> acct = hsclient.get_account_info() >>> print acct.email_address Returns: An Account object ''' request = self._get_request() response = request.get(self.ACCOUNT_INFO_URL) self.account.json_data = response["account"] return self.account
0.003676
def _build_mask(self): """Build mask applied to O^{-1}, O for the matrix approx constraint""" self.mask = torch.ones(self.d, self.d).byte() for ci in self.c_data.values(): si, ei = ci["start_index"], ci["end_index"] for cj in self.c_data.values(): sj, ej = cj["start_index"], cj["end_index"] # Check if ci and cj are part of the same maximal clique # If so, mask out their corresponding blocks in O^{-1} if len(ci["max_cliques"].intersection(cj["max_cliques"])) > 0: self.mask[si:ei, sj:ej] = 0 self.mask[sj:ej, si:ei] = 0
0.002963
def filter_osm_file(): """ Downloads (and compiles) osmfilter tool from web and calls that osmfilter to only filter out only the road elements. """ print_info('Filtering OSM file...') start_time = time.time() if check_osmfilter(): # params = '--keep="highway=motorway =motorway_link =trunk =trunk_link =primary =primary_link =secondary' \ # ' =secondary_link =tertiary =tertiary_link =unclassified =unclassified_link =residential =residential_link' \ # ' =living_street" --drop="access=no"' params = config.osm_filter_params command = './osmfilter' if platform.system() == 'Linux' else 'osmfilter.exe' if platform.system() == 'Linux': filter_command = '%s "%s" %s | pv > "%s"' % (command, config.osm_map_filename, params, config.filtered_osm_filename) else: filter_command = '%s "%s" %s > "%s"' % ( command, config.osm_map_filename, params, config.filtered_osm_filename) os.system(filter_command) else: print_info('Osmfilter not available. Exiting.') exit(1) print_info('Filtering finished. (%.2f secs)' % (time.time() - start_time))
0.00627
def load(self, service_name, api_version=None, cached=True): """ Loads the desired JSON for a service. (uncached) This will fall back through all the ``data_dirs`` provided to the constructor, returning the **first** one it finds. :param service_name: The name of the desired service :type service_name: string :param api_version: (Optional) The desired API version to load :type service_name: string :param cached: (Optional) Whether or not the cache should be used when attempting to load the data. Default is ``True``. :type cached: boolean :returns: The loaded JSON as a dict """ # Fetch from the cache first if it's there. if cached: if service_name in self._loaded_data: if api_version in self._loaded_data[service_name]: return self._loaded_data[service_name][api_version] data = {} options = self.get_available_options(service_name) match, version = self.get_best_match( options, service_name, api_version=api_version ) with open(match, 'r') as json_file: data = json.load(json_file) # Embed where we found it from for debugging purposes. data['__file__'] = match data['api_version'] = version if cached: self._loaded_data.setdefault(service_name, {}) self._loaded_data[service_name][api_version] = data return data
0.001276
def convert_bboxes_to_albumentations(bboxes, source_format, rows, cols, check_validity=False): """Convert a list bounding boxes from a format specified in `source_format` to the format used by albumentations """ return [convert_bbox_to_albumentations(bbox, source_format, rows, cols, check_validity) for bbox in bboxes]
0.012085
def collection(self, collection_id): """Create a sub-collection underneath the current document. Args: collection_id (str): The sub-collection identifier (sometimes referred to as the "kind"). Returns: ~.firestore_v1beta1.collection.CollectionReference: The child collection. """ child_path = self._path + (collection_id,) return self._client.collection(*child_path)
0.004274
def datetime_value_renderer(value, **options): """Render datetime value with django formats, default is SHORT_DATETIME_FORMAT""" datetime_format = options.get('datetime_format', 'SHORT_DATETIME_FORMAT') return formats.date_format(timezone.localtime(value), datetime_format)
0.007018
def _GetStatus(self): """Retrieves status information. Returns: dict[str, object]: status attributes, indexed by name. """ if self._analysis_mediator: number_of_produced_event_tags = ( self._analysis_mediator.number_of_produced_event_tags) number_of_produced_reports = ( self._analysis_mediator.number_of_produced_analysis_reports) else: number_of_produced_event_tags = None number_of_produced_reports = None if self._process_information: used_memory = self._process_information.GetUsedMemory() or 0 else: used_memory = 0 if self._memory_profiler: self._memory_profiler.Sample('main', used_memory) status = { 'display_name': '', 'identifier': self._name, 'number_of_consumed_event_tags': None, 'number_of_consumed_events': self._number_of_consumed_events, 'number_of_consumed_reports': None, 'number_of_consumed_sources': None, 'number_of_consumed_warnings': None, 'number_of_produced_event_tags': number_of_produced_event_tags, 'number_of_produced_events': None, 'number_of_produced_reports': number_of_produced_reports, 'number_of_produced_sources': None, 'number_of_produced_warnings': None, 'processing_status': self._status, 'task_identifier': None, 'used_memory': used_memory} if self._status in ( definitions.STATUS_INDICATOR_ABORTED, definitions.STATUS_INDICATOR_COMPLETED): self._foreman_status_wait_event.set() return status
0.006274