text
stringlengths
78
104k
score
float64
0
0.18
def _merge_derived_parameters(self, other_trajectory, used_runs, rename_dict, allowed_translations, ignore_data): """ Merges derived parameters that have the `run_ALL` in a name. Creates a new parameter with the name of the first new run and links to this parameter to avoid copying in all other runs. """ other_derived_parameters = other_trajectory._derived_parameters.copy() # get first run_idx new_first_run_idx = min(used_runs.values()) run_name_dummy = other_trajectory.f_wildcard('$', -1) for param_name in other_derived_parameters: if param_name in ignore_data: continue split_name = param_name.split('.') if not any(x in run_name_dummy for x in split_name): continue ignore_data.add(param_name) param = other_derived_parameters[param_name] new_param_name = self._rename_full_name(param_name, other_trajectory, used_runs=used_runs) if new_param_name in self: my_param = self.f_get(new_param_name, fast_access=False) if (my_param._equal_values(my_param.f_get(), param.f_get()) and not (my_param.f_has_range() or param.f_has_range())): continue first_new_param_name = self._rename_full_name(param_name, other_trajectory, new_run_idx=new_first_run_idx) rename_dict[param_name] = first_new_param_name comment = param.v_comment param_type = param.f_get_class_name() param_type = self._create_class(param_type) first_param = self.f_add_leaf(param_type, first_new_param_name, comment=comment) for run_idx in used_runs.values(): if run_idx == new_first_run_idx: continue next_name = self._rename_full_name(param_name, other_trajectory, new_run_idx=run_idx) split_name = next_name.split('.') link_name = split_name.pop() location_name = '.'.join(split_name) if not self.f_contains(location_name, shortcuts=False): the_group = self.f_add_group(location_name) else: the_group = self.f_get(location_name) the_group.f_add_link(link_name, first_param) for param_name in other_derived_parameters: if param_name in ignore_data: continue split_name = param_name.split('.') ignore_data.add(param_name) if any(x in other_trajectory._reversed_wildcards and x not in allowed_translations for x in split_name): continue new_name = self._rename_full_name(param_name, other_trajectory, used_runs=used_runs) if self.f_contains(new_name): my_param = self.f_get(new_name, fast_access=False) param = other_derived_parameters[param_name] if (my_param._equal_values(my_param.f_get(), param.f_get()) and not (my_param.f_has_range() or param.f_has_range())): continue else: self._logger.error('Could not merge parameter `%s`. ' 'I will ignore it!' % new_name) rename_dict[param_name] = new_name
0.003784
def query(self, analysis_type, params, all_keys=False): """ Performs a query using the Keen IO analysis API. A read key must be set first. """ if not self._order_by_is_valid_or_none(params): raise ValueError("order_by given is invalid or is missing required group_by.") if not self._limit_is_valid_or_none(params): raise ValueError("limit given is invalid or is missing required order_by.") url = "{0}/{1}/projects/{2}/queries/{3}".format(self.base_url, self.api_version, self.project_id, analysis_type) headers = utilities.headers(self.read_key) payload = params response = self.fulfill(HTTPMethods.GET, url, params=payload, headers=headers, timeout=self.get_timeout) self._error_handling(response) response = response.json() if not all_keys: response = response["result"] return response
0.008065
def _unstack_extension_series(series, level, fill_value): """ Unstack an ExtensionArray-backed Series. The ExtensionDtype is preserved. Parameters ---------- series : Series A Series with an ExtensionArray for values level : Any The level name or number. fill_value : Any The user-level (not physical storage) fill value to use for missing values introduced by the reshape. Passed to ``series.values.take``. Returns ------- DataFrame Each column of the DataFrame will have the same dtype as the input Series. """ # Implementation note: the basic idea is to # 1. Do a regular unstack on a dummy array of integers # 2. Followup with a columnwise take. # We use the dummy take to discover newly-created missing values # introduced by the reshape. from pandas.core.reshape.concat import concat dummy_arr = np.arange(len(series)) # fill_value=-1, since we will do a series.values.take later result = _Unstacker(dummy_arr, series.index, level=level, fill_value=-1).get_result() out = [] values = extract_array(series, extract_numpy=False) for col, indices in result.iteritems(): out.append(Series(values.take(indices.values, allow_fill=True, fill_value=fill_value), name=col, index=result.index)) return concat(out, axis='columns', copy=False, keys=result.columns)
0.000644
def createDaemon(): """Detach a process from the controlling terminal and run it in the background as a daemon. """ try: # Fork a child process so the parent can exit. This returns control to # the command-line or shell. It also guarantees that the child will not # be a process group leader, since the child receives a new process ID # and inherits the parent's process group ID. This step is required # to insure that the next call to os.setsid is successful. pid = os.fork() except OSError, e: raise Exception, "%s [%d]" % (e.strerror, e.errno) if (pid == 0): # The first child. # To become the session leader of this new session and the process group # leader of the new process group, we call os.setsid(). The process is # also guaranteed not to have a controlling terminal. os.setsid() # Is ignoring SIGHUP necessary? # # It's often suggested that the SIGHUP signal should be ignored before # the second fork to avoid premature termination of the process. The # reason is that when the first child terminates, all processes, e.g. # the second child, in the orphaned group will be sent a SIGHUP. # # "However, as part of the session management system, there are exactly # two cases where SIGHUP is sent on the death of a process: # # 1) When the process that dies is the session leader of a session that # is attached to a terminal device, SIGHUP is sent to all processes # in the foreground process group of that terminal device. # 2) When the death of a process causes a process group to become # orphaned, and one or more processes in the orphaned group are # stopped, then SIGHUP and SIGCONT are sent to all members of the # orphaned group." [2] # # The first case can be ignored since the child is guaranteed not to have # a controlling terminal. The second case isn't so easy to dismiss. # The process group is orphaned when the first child terminates and # POSIX.1 requires that every STOPPED process in an orphaned process # group be sent a SIGHUP signal followed by a SIGCONT signal. Since the # second child is not STOPPED though, we can safely forego ignoring the # SIGHUP signal. In any case, there are no ill-effects if it is ignored. # # import signal # Set handlers for asynchronous events. # signal.signal(signal.SIGHUP, signal.SIG_IGN) try: # Fork a second child and exit immediately to prevent zombies. This # causes the second child process to be orphaned, making the init # process responsible for its cleanup. And, since the first child is # a session leader without a controlling terminal, it's possible for # it to acquire one by opening a terminal in the future (System V- # based systems). This second fork guarantees that the child is no # longer a session leader, preventing the daemon from ever acquiring # a controlling terminal. pid = os.fork() # Fork a second child. except OSError, e: raise Exception, "%s [%d]" % (e.strerror, e.errno) if (pid == 0): # The second child. # Since the current working directory may be a mounted filesystem, we # avoid the issue of not being able to unmount the filesystem at # shutdown time by changing it to the root directory. os.chdir(WORKDIR) # We probably don't want the file mode creation mask inherited from # the parent, so we give the child complete control over permissions. os.umask(UMASK) else: # exit() or _exit()? See below. os._exit(0) # Exit parent (the first child) of the second child. else: # exit() or _exit()? # _exit is like exit(), but it doesn't call any functions registered # with atexit (and on_exit) or any registered signal handlers. It also # closes any open file descriptors. Using exit() may cause all stdio # streams to be flushed twice and any temporary files may be unexpectedly # removed. It's therefore recommended that child branches of a fork() # and the parent branch(es) of a daemon use _exit(). os._exit(0) # Exit parent of the first child. # Close all open file descriptors. This prevents the child from keeping # open any file descriptors inherited from the parent. There is a variety # of methods to accomplish this task. Three are listed below. # # Try the system configuration variable, SC_OPEN_MAX, to obtain the maximum # number of open file descriptors to close. If it doesn't exists, use # the default value (configurable). # # try: # maxfd = os.sysconf("SC_OPEN_MAX") # except (AttributeError, ValueError): # maxfd = MAXFD # # OR # # if (os.sysconf_names.has_key("SC_OPEN_MAX")): # maxfd = os.sysconf("SC_OPEN_MAX") # else: # maxfd = MAXFD # # OR # # Use the getrlimit method to retrieve the maximum file descriptor number # that can be opened by this process. If there is not limit on the # resource, use the default value. # import resource # Resource usage information. maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1] if (maxfd == resource.RLIM_INFINITY): maxfd = MAXFD # FIXME: this breaks our tpxmld, so it's commented for now //kll # Iterate through and close all file descriptors. # for fd in range(0, maxfd): # try: # os.close(fd) # except OSError: # ERROR, fd wasn't open to begin with (ignored) # pass # Redirect the standard I/O file descriptors to the specified file. Since # the daemon has no controlling terminal, most daemons redirect stdin, # stdout, and stderr to /dev/null. This is done to prevent side-effects # from reads and writes to the standard I/O file descriptors. # This call to open is guaranteed to return the lowest file descriptor, # which will be 0 (stdin), since it was closed above. os.open(REDIRECT_TO, os.O_RDWR) # standard input (0) # Duplicate standard input to standard output and standard error. os.dup2(0, 1) # standard output (1) os.dup2(0, 2) # standard error (2) return(0)
0.018977
def run_step(context): """Set new context keys from formatting expressions with substitutions. Context is a dictionary or dictionary-like. context['contextSetf'] must exist. It's a dictionary. Will iterate context['contextSetf'] and save the values as new keys to the context. For example, say input context is: key1: value1 key2: value2 key3: value3 contextSetf: key2: 'aaa_{key1}_zzz' key4: 'bbb_{key3}_yyy' This will result in return context: key1: value1 key2: aaa_value1_zzz key3: bbb_value3_yyy key4: value3 """ logger.debug("started") context.assert_key_has_value(key='contextSetf', caller=__name__) for k, v in context['contextSetf'].items(): logger.debug(f"setting context {k} to value from context {v}") context[context.get_formatted_iterable( k)] = context.get_formatted_iterable(v) logger.info(f"Set {len(context['contextSetf'])} context items.") logger.debug("done")
0.00095
def _CheckWindowsRegistryKeyPath( self, filename, artifact_definition, key_path): """Checks if a path is a valid Windows Registry key path. Args: filename (str): name of the artifacts definition file. artifact_definition (ArtifactDefinition): artifact definition. key_path (str): Windows Registry key path to validate. Returns: bool: True if the Windows Registry key path is valid. """ result = True key_path_segments = key_path.lower().split('\\') if key_path_segments[0] == '%%current_control_set%%': result = False logging.warning(( 'Artifact definition: {0:s} in file: {1:s} contains Windows ' 'Registry key path that starts with ' '%%CURRENT_CONTROL_SET%%. Replace %%CURRENT_CONTROL_SET%% with ' 'HKEY_LOCAL_MACHINE\\System\\CurrentControlSet').format( artifact_definition.name, filename)) for segment_index, key_path_segment in enumerate(key_path_segments): if key_path_segment.startswith('%%') and key_path_segment.endswith('%%'): if (segment_index == 1 and key_path_segment == '%%users.sid%%' and key_path_segments[0] == 'hkey_users'): continue if key_path_segment.startswith('%%environ_'): result = False logging.warning(( 'Artifact definition: {0:s} in file: {1:s} contains Windows ' 'Registry key path that contains an environment variable: ' '"{2:s}". Usage of environment variables in key paths is not ' 'encouraged at this time.').format( artifact_definition.name, filename, key_path_segment)) elif key_path_segment.startswith('%%users.'): result = False logging.warning(( 'Artifact definition: {0:s} in file: {1:s} contains Windows ' 'Registry key path that contains a users variable: "{2:s}". ' 'Usage of users variables in key paths, except for ' '"HKEY_USERS\\%%users.sid%%", is not encouraged at this ' 'time.').format( artifact_definition.name, filename, key_path_segment)) return result
0.004545
def set_entry(self, jid, *, name=_Sentinel, add_to_groups=frozenset(), remove_from_groups=frozenset(), timeout=None): """ Set properties of a roster entry or add a new roster entry. The roster entry is identified by its bare `jid`. If an entry already exists, all values default to those stored in the existing entry. For example, if no `name` is given, the current name of the entry is re-used, if any. If the entry does not exist, it will be created on the server side. The `remove_from_groups` and `add_to_groups` arguments have to be based on the locally cached state, as XMPP does not support sending diffs. `remove_from_groups` takes precedence over `add_to_groups`. `timeout` is the time in seconds to wait for a confirmation by the server. Note that the changes may not be visible immediately after his coroutine returns in the :attr:`items` and :attr:`groups` attributes. The :class:`Service` waits for the "official" roster push from the server for updating the data structures and firing events, to ensure that consistent state with other clients is achieved. This may raise arbitrary :class:`.errors.XMPPError` exceptions if the server replies with an error and also any kind of connection error if the connection gets fatally terminated while waiting for a response. """ existing = self.items.get(jid, Item(jid)) post_groups = (existing.groups | add_to_groups) - remove_from_groups post_name = existing.name if name is not _Sentinel: post_name = name item = roster_xso.Item( jid=jid, name=post_name, groups=[ roster_xso.Group(name=group_name) for group_name in post_groups ]) yield from self.client.send( stanza.IQ( structs.IQType.SET, payload=roster_xso.Query(items=[ item ]) ), timeout=timeout )
0.002712
def feed(self, contents): ''' feed - Feed contents. Use parseStr or parseFile instead. @param contents - Contents ''' contents = stripIEConditionals(contents) try: HTMLParser.feed(self, contents) except MultipleRootNodeException: self.reset() HTMLParser.feed(self, "%s%s" %(addStartTag(contents, INVISIBLE_ROOT_TAG_START), INVISIBLE_ROOT_TAG_END))
0.008909
def get_resource(url, subdomain): """ Issue a GET request to IASystem with the given url and return a response in Collection+json format. :returns: http response with content in json """ headers = {"Accept": "application/vnd.collection+json"} response = IASYSTEM_DAO().getURL(url, headers, subdomain) logger.info("%s ==status==> %s" % (url, response.status)) if response.status != 200: logger.error("%s ==data==> %s" % (url, response.data)) raise DataFailureException(url, response.status, response.data) return json.loads(response.data)
0.001678
def __resolveport(self, definitions): """ Resolve port_type reference. @param definitions: A definitions object. @type definitions: L{Definitions} """ ref = qualify(self.type, self.root, definitions.tns) port_type = definitions.port_types.get(ref) if port_type is None: raise Exception("portType '%s', not-found" % (self.type,)) # Later on we will require access to the message data referenced by # this port_type instance, and in order for those data references to be # available, port_type first needs to dereference its message # identification string. The only scenario where the port_type might # possibly not have already resolved its references, and where this # explicit resolve() call is required, is if we are dealing with a # recursive WSDL import chain. port_type.resolve(definitions) self.type = port_type
0.002066
def _GetNormalizedTimestamp(self): """Retrieves the normalized timestamp. Returns: decimal.Decimal: normalized timestamp, which contains the number of seconds since January 1, 1970 00:00:00 and a fraction of second used for increased precision, or None if the normalized timestamp cannot be determined. """ if self._normalized_timestamp is None: if self._timestamp is not None: self._normalized_timestamp = ( decimal.Decimal(self._timestamp) / definitions.NANOSECONDS_PER_SECOND) return self._normalized_timestamp
0.004926
def remove_monitor(self, handle): """Remove a previously registered monitor. See :meth:`AbstractDeviceAdapter.adjust_monitor`. """ action = (handle, "delete", None, None) if self._currently_notifying: self._deferred_adjustments.append(action) else: self._adjust_monitor_internal(*action)
0.00554
def ageostrophic_wind(heights, f, dx, dy, u, v, dim_order='yx'): r"""Calculate the ageostrophic wind given from the heights or geopotential. Parameters ---------- heights : (M, N) ndarray The height field. f : array_like The coriolis parameter. This can be a scalar to be applied everywhere or an array of values. dx : float or ndarray The grid spacing(s) in the x-direction. If an array, there should be one item less than the size of `heights` along the applicable axis. dy : float or ndarray The grid spacing(s) in the y-direction. If an array, there should be one item less than the size of `heights` along the applicable axis. u : (M, N) ndarray The u wind field. v : (M, N) ndarray The u wind field. Returns ------- A 2-item tuple of arrays A tuple of the u-component and v-component of the ageostrophic wind. Notes ----- If inputs have more than two dimensions, they are assumed to have either leading dimensions of (x, y) or trailing dimensions of (y, x), depending on the value of ``dim_order``. """ u_geostrophic, v_geostrophic = geostrophic_wind(heights, f, dx, dy, dim_order=dim_order) return u - u_geostrophic, v - v_geostrophic
0.004608
def _vcf_is_strelka(variant_file, variant_metadata): """Return True if variant_file given is in strelka format """ if "strelka" in variant_file.lower(): return True elif "NORMAL" in variant_metadata["sample_info"].keys(): return True else: vcf_reader = vcf.Reader(open(variant_file, "r")) try: vcf_type = vcf_reader.metadata["content"] except KeyError: vcf_type = "" if "strelka" in vcf_type.lower(): return True return False
0.001876
def js_prerelease(command, strict=False): """decorator for building minified js/css prior to another command""" class DecoratedCommand(command): def run(self): jsdeps = self.distribution.get_command_obj('jsdeps') if not is_repo and all(exists(t) for t in jsdeps.targets): # sdist, nothing to do command.run(self) return try: self.distribution.run_command('jsdeps') except Exception as e: missing = [t for t in jsdeps.targets if not exists(t)] if strict or missing: log.warn('rebuilding js and css failed') if missing: log.error('missing files: %s' % missing) raise e else: log.warn('rebuilding js and css failed (not a problem)') log.warn(str(e)) command.run(self) update_package_data(self.distribution) return DecoratedCommand
0.000947
def set_bookmarks(self, bookmarks): """ Store the sequence of bookmarks `bookmarks`. Causes signals to be fired to reflect the changes. .. note:: This should normally not be used. It does not mitigate the race condition between clients concurrently modifying the bookmarks and may lead to data loss. Use :meth:`add_bookmark`, :meth:`discard_bookmark` and :meth:`update_bookmark` instead. This method still has use-cases (modifying the bookmarklist at large, e.g. by syncing the remote store with local data). """ with (yield from self._lock): yield from self._set_bookmarks(bookmarks) self._diff_emit_update(bookmarks)
0.002445
def dump_migration_session_state(raw): """ Serialize a migration session state to yaml using nicer formatting Args: raw: object to serialize Returns: string (of yaml) Specifically, this forces the "output" member of state step dicts (e.g. state[0]['output']) to use block formatting. For example, rather than this: - migration: [app, migration_name] output: "line 1\nline2\nline3" You get this: - migration: [app, migration_name] output: | line 1 line 2 line 3 """ class BlockStyle(str): pass class SessionDumper(yaml.SafeDumper): pass def str_block_formatter(dumper, data): return dumper.represent_scalar(u'tag:yaml.org,2002:str', data, style='|') SessionDumper.add_representer(BlockStyle, str_block_formatter) raw = deepcopy(raw) for step in raw: step['output'] = BlockStyle(step['output']) step['traceback'] = BlockStyle(step['traceback']) return yaml.dump(raw, Dumper=SessionDumper)
0.004864
def _validate_covars(covars, covariance_type, n_components): """Do basic checks on matrix covariance sizes and values.""" from scipy import linalg if covariance_type == 'spherical': if len(covars) != n_components: raise ValueError("'spherical' covars have length n_components") elif np.any(covars <= 0): raise ValueError("'spherical' covars must be non-negative") elif covariance_type == 'tied': if covars.shape[0] != covars.shape[1]: raise ValueError("'tied' covars must have shape (n_dim, n_dim)") elif (not np.allclose(covars, covars.T) or np.any(linalg.eigvalsh(covars) <= 0)): raise ValueError("'tied' covars must be symmetric, " "positive-definite") elif covariance_type == 'diag': if len(covars.shape) != 2: raise ValueError("'diag' covars must have shape " "(n_components, n_dim)") elif np.any(covars <= 0): raise ValueError("'diag' covars must be non-negative") elif covariance_type == 'full': if len(covars.shape) != 3: raise ValueError("'full' covars must have shape " "(n_components, n_dim, n_dim)") elif covars.shape[1] != covars.shape[2]: raise ValueError("'full' covars must have shape " "(n_components, n_dim, n_dim)") for n, cv in enumerate(covars): if (not np.allclose(cv, cv.T) or np.any(linalg.eigvalsh(cv) <= 0)): raise ValueError("component %d of 'full' covars must be " "symmetric, positive-definite" % n) else: raise ValueError("covariance_type must be one of " + "'spherical', 'tied', 'diag', 'full'")
0.000536
def SetColLabelValue(self, col, value): """ Set col label value in dataframe """ if len(self.dataframe): col_name = str(self.dataframe.columns[col]) self.dataframe.rename(columns={col_name: str(value)}, inplace=True) return None
0.006849
def bring_to_front(self, selector, by=By.CSS_SELECTOR): """ Updates the Z-index of a page element to bring it into view. Useful when getting a WebDriverException, such as the one below: { Element is not clickable at point (#, #). Other element would receive the click: ... } """ if page_utils.is_xpath_selector(selector): by = By.XPATH self.find_element(selector, by=by, timeout=settings.SMALL_TIMEOUT) try: selector = self.convert_to_css_selector(selector, by=by) except Exception: # Don't run action if can't convert to CSS_Selector for JavaScript return selector = re.escape(selector) selector = self.__escape_quotes_if_needed(selector) script = ("""document.querySelector('%s').style.zIndex = '9999';""" % selector) self.execute_script(script)
0.002148
def _polar_to_cartesian(cx, cy, r, theta): """ :param cx: X coord of circle :param cy: Y coord of circle :param r: Radius of circle :param theta: Degrees from vertical, clockwise, in radians :return: (x, y) """ return cx - r * math.sin(theta), cy - r * math.cos(theta)
0.006024
def send_command_block(self, target, command_block): """Send an arbitrary file system command block The primary use for this method is to send multiple file system commands with a single web service request. This can help to avoid throttling. :param target: The device(s) to be targeted with this request :type target: :class:`devicecloud.sci.TargetABC` or list of :class:`devicecloud.sci.TargetABC` instances :param command_block: The block of commands to execute on the target :type command_block: :class:`~FileSystemServiceCommandBlock` :return: The response will be a dictionary where the keys are device_ids and the values are the parsed responses of each command sent in the order listed in the command response for that device. In practice it seems to be the same order as the commands were sent in, however, Device Cloud documentation does not explicitly state anywhere that is the case so I cannot guarantee it. This does mean that if you send different types of commands the response list will be different types. Please see the commands parse_response functions for what those types will be. (:meth:`LsCommand.parse_response`, :class:`GetCommand.parse_response`, :class:`PutCommand.parse_response`, :class:`DeleteCommand.parse_response`) """ root = _parse_command_response( self._sci_api.send_sci("file_system", target, command_block.get_command_string())) out_dict = {} for device in root.findall('./file_system/device'): device_id = device.get('id') results = [] for command in device.find('./commands'): for command_class in FILE_SYSTEM_COMMANDS: if command_class.command_name == command.tag.lower(): results.append(command_class.parse_response(command, fssapi=self, device_id=device_id)) out_dict[device_id] = results return out_dict
0.006813
def split_pred_string(predstr): """ Split *predstr* and return the (lemma, pos, sense, suffix) components. Examples: >>> Pred.split_pred_string('_dog_n_1_rel') ('dog', 'n', '1', 'rel') >>> Pred.split_pred_string('quant_rel') ('quant', None, None, 'rel') """ predstr = predstr.strip('"\'') # surrounding quotes don't matter rel_added = False if not predstr.lower().endswith('_rel'): logging.debug('Predicate does not end in "_rel": {}' .format(predstr)) rel_added = True predstr += '_rel' match = Pred.pred_re.search(predstr) if match is None: logging.debug('Unexpected predicate string: {}'.format(predstr)) return (predstr, None, None, None) # _lemma_pos(_sense)?_end return (match.group('lemma'), match.group('pos'), match.group('sense'), None if rel_added else match.group('end'))
0.00107
def count(self) -> "CountQuery": """ Return count of objects in queryset instead of objects. """ return CountQuery( db=self._db, model=self.model, q_objects=self._q_objects, annotations=self._annotations, custom_filters=self._custom_filters, )
0.005831
def mac(addr): ''' Validates a mac address ''' valid = re.compile(r''' (^([0-9A-F]{1,2}[-]){5}([0-9A-F]{1,2})$ |^([0-9A-F]{1,2}[:]){5}([0-9A-F]{1,2})$ |^([0-9A-F]{1,2}[.]){5}([0-9A-F]{1,2})$) ''', re.VERBOSE | re.IGNORECASE) return valid.match(addr) is not None
0.005115
def run(items): """Perform detection of structural variations with lumpy. """ paired = vcfutils.get_paired(items) work_dir = _sv_workdir(paired.tumor_data if paired and paired.tumor_data else items[0]) previous_evidence = {} full_bams, sr_bams, disc_bams = [], [], [] for data in items: full_bams.append(dd.get_align_bam(data)) sr_bam, disc_bam = sshared.find_existing_split_discordants(data) sr_bams.append(sr_bam) disc_bams.append(disc_bam) cur_dels, cur_dups = _bedpes_from_cnv_caller(data, work_dir) previous_evidence[dd.get_sample_name(data)] = {} if cur_dels and utils.file_exists(cur_dels): previous_evidence[dd.get_sample_name(data)]["dels"] = cur_dels if cur_dups and utils.file_exists(cur_dups): previous_evidence[dd.get_sample_name(data)]["dups"] = cur_dups lumpy_vcf, exclude_file = _run_smoove(full_bams, sr_bams, disc_bams, work_dir, items) lumpy_vcf = sshared.annotate_with_depth(lumpy_vcf, items) gt_vcfs = {} # Retain paired samples with tumor/normal genotyped in one file if paired and paired.normal_name: batches = [[paired.tumor_data, paired.normal_data]] else: batches = [[x] for x in items] for batch_items in batches: for data in batch_items: gt_vcfs[dd.get_sample_name(data)] = _filter_by_support(lumpy_vcf, data) if paired and paired.normal_name: gt_vcfs = _filter_by_background(paired.tumor_name, [paired.normal_name], gt_vcfs, paired.tumor_data) out = [] upload_counts = collections.defaultdict(int) for data in items: if "sv" not in data: data["sv"] = [] vcf_file = gt_vcfs.get(dd.get_sample_name(data)) if vcf_file: effects_vcf, _ = effects.add_to_vcf(vcf_file, data, "snpeff") data["sv"].append({"variantcaller": "lumpy", "vrn_file": effects_vcf or vcf_file, "do_upload": upload_counts[vcf_file] == 0, # only upload a single file per batch "exclude_file": exclude_file}) upload_counts[vcf_file] += 1 out.append(data) return out
0.002679
def expand_details(df, detailCol='detail'): """Expands the details column of the given dataframe and returns the resulting DataFrame. :df: The input DataFrame. :detailCol: The detail column name. :returns: Returns DataFrame with new columns from pbp parsing. """ df = copy.deepcopy(df) df['detail'] = df[detailCol] dicts = [sportsref.nfl.pbp.parse_play_details(detail) for detail in df['detail'].values] # clean up unmatched details cols = {c for d in dicts if d for c in d.keys()} blankEntry = {c: np.nan for c in cols} newDicts = [d if d else blankEntry for d in dicts] # get details DataFrame and merge it with original to create main DataFrame details = pd.DataFrame(newDicts) df = pd.merge(df, details, left_index=True, right_index=True) # add isError column errors = [i for i, d in enumerate(dicts) if d is None] df['isError'] = False df.loc[errors, 'isError'] = True # fill in some NaN's necessary for _clean_features df.loc[0, 'qtr_time_remain'] = '15:00' df.qtr_time_remain.fillna(method='bfill', inplace=True) df.qtr_time_remain.fillna( pd.Series(np.where(df.quarter == 4, '0:00', '15:00')), inplace=True ) # use _clean_features to clean up and add columns new_df = df.apply(_clean_features, axis=1) return new_df
0.001488
def _get_help_for_modules(self, modules, prefix, include_special_flags): """Returns the help string for a list of modules. Private to absl.flags package. Args: modules: List[str], a list of modules to get the help string for. prefix: str, a string that is prepended to each generated help line. include_special_flags: bool, whether to include description of SPECIAL_FLAGS, i.e. --flagfile and --undefok. """ output_lines = [] for module in modules: self._render_our_module_flags(module, output_lines, prefix) if include_special_flags: self._render_module_flags( 'absl.flags', six.itervalues(_helpers.SPECIAL_FLAGS._flags()), # pylint: disable=protected-access output_lines, prefix) return '\n'.join(output_lines)
0.004848
def timeit(hosts=None, stmt=None, warmup=30, repeat=None, duration=None, concurrency=1, output_fmt=None, fail_if=None, sample_mode='reservoir'): """Run the given statement a number of times and return the runtime stats Args: fail-if: An expression that causes cr8 to exit with a failure if it evaluates to true. The expression can contain formatting expressions for: - runtime_stats - statement - meta - concurrency - bulk_size For example: --fail-if "{runtime_stats.mean} > 1.34" """ num_lines = 0 log = Logger(output_fmt) with Runner(hosts, concurrency, sample_mode) as runner: version_info = aio.run(runner.client.get_server_version) for line in as_statements(lines_from_stdin(stmt)): runner.warmup(line, warmup) timed_stats = runner.run(line, iterations=repeat, duration=duration) r = Result( version_info=version_info, statement=line, timed_stats=timed_stats, concurrency=concurrency ) log.result(r) if fail_if: eval_fail_if(fail_if, r) num_lines += 1 if num_lines == 0: raise SystemExit( 'No SQL statements provided. Use --stmt or provide statements via stdin')
0.001974
def map_concepts_to_indicators( self, n: int = 1, min_temporal_res: Optional[str] = None ): """ Map each concept node in the AnalysisGraph instance to one or more tangible quantities, known as 'indicators'. Args: n: Number of matches to keep min_temporal_res: Minimum temporal resolution that the indicators must have data for. """ for node in self.nodes(data=True): query_parts = [ "select Indicator from concept_to_indicator_mapping", f"where `Concept` like '{node[0]}'", ] # TODO May need to delve into SQL/database stuff a bit more deeply # for this. Foreign keys perhaps? query = " ".join(query_parts) results = engine.execute(query) if min_temporal_res is not None: if min_temporal_res not in ["month"]: raise ValueError("min_temporal_res must be 'month'") vars_with_required_temporal_resolution = [ r[0] for r in engine.execute( "select distinct `Variable` from indicator where " f"`{min_temporal_res.capitalize()}` is not null" ) ] results = [ r for r in results if r[0] in vars_with_required_temporal_resolution ] node[1]["indicators"] = { x: Indicator(x, "MITRE12") for x in [r[0] for r in take(n, results)] }
0.001813
def os_instance_2_json(self): """ transform ariane_clip3 OS Instance object to Ariane server JSON obj :return: Ariane JSON obj """ LOGGER.debug("OSInstance.os_instance_2_json") json_obj = { 'osInstanceID': self.id, 'osInstanceName': self.name, 'osInstanceDescription': self.description, 'osInstanceAdminGateURI': self.admin_gate_uri, 'osInstanceEmbeddingOSInstanceID': self.embedding_osi_id, 'osInstanceOSTypeID': self.ost_id, 'osInstanceEmbeddedOSInstancesID': self.embedded_osi_ids, 'osInstanceIPAddressesID': self.ip_address_ids, 'osInstanceNICsID': self.nic_ids, 'osInstanceApplicationsID': self.application_ids, 'osInstanceEnvironmentsID': self.environment_ids, 'osInstanceSubnetsID': self.subnet_ids, 'osInstanceTeamsID': self.team_ids } return json.dumps(json_obj)
0.002018
def run(vrn_info, cnvs_by_name, somatic_info): """Run THetA analysis given output from CNV caller on a tumor/normal pair. """ cmd = _get_cmd("RunTHeTA.py") if not cmd: logger.info("THetA scripts not found in current PATH. Skipping.") else: from bcbio.structural import cnvkit work_dir = _sv_workdir(somatic_info.tumor_data) assert "cnvkit" in cnvs_by_name, "THetA requires CNVkit calls" cnv_info = cnvkit.export_theta(cnvs_by_name["cnvkit"], somatic_info.tumor_data) cnv_info["theta_input"] = subset_by_supported(cnv_info["theta_input"], _theta_to_coords, cnvs_by_name, work_dir, somatic_info.tumor_data) return _run_theta(cnv_info, somatic_info.tumor_data, work_dir, run_n3=False)
0.006165
def find_all(root, path): """Get all children that satisfy the path.""" path = parse_path(path) if len(path) == 1: yield from get_children(root, path[0]) else: for child in get_children(root, path[0]): yield from find_all(child, path[1:])
0.022989
def get_completeness_adjusted_table(catalogue, completeness, dmag, offset=1.0E-5, end_year=None, plot=False, figure_size=(8, 6), filename=None, filetype='png', dpi=300, ax=None): """ Counts the number of earthquakes in each magnitude bin and normalises the rate to annual rates, taking into account the completeness """ if not end_year: end_year = catalogue.end_year # Find the natural bin limits mag_bins = _get_catalogue_bin_limits(catalogue, dmag) obs_time = end_year - completeness[:, 0] + 1. obs_rates = np.zeros_like(mag_bins) durations = np.zeros_like(mag_bins) n_comp = np.shape(completeness)[0] for iloc in range(n_comp): low_mag = completeness[iloc, 1] comp_year = completeness[iloc, 0] if iloc == (n_comp - 1): idx = np.logical_and( catalogue.data['magnitude'] >= low_mag - offset, catalogue.data['year'] >= comp_year) high_mag = mag_bins[-1] obs_idx = mag_bins >= (low_mag - offset) else: high_mag = completeness[iloc + 1, 1] mag_idx = np.logical_and( catalogue.data['magnitude'] >= low_mag - offset, catalogue.data['magnitude'] < (high_mag - offset)) idx = np.logical_and(mag_idx, catalogue.data['year'] >= (comp_year - offset)) obs_idx = np.logical_and(mag_bins >= (low_mag - offset), mag_bins < (high_mag + offset)) temp_rates = np.histogram(catalogue.data['magnitude'][idx], mag_bins[obs_idx])[0] temp_rates = temp_rates.astype(float) / obs_time[iloc] obs_rates[obs_idx[:-1]] = temp_rates durations[obs_idx[:-1]] = obs_time[iloc] selector = np.where(obs_rates > 0.)[0] mag_bins = mag_bins[selector] obs_rates = obs_rates[selector] durations = durations[selector] # Get cumulative rates cum_rates = np.array([sum(obs_rates[iloc:]) for iloc in range(0, len(obs_rates))]) if plot: plt.figure(figsize=figure_size) plt.semilogy(mag_bins + dmag / 2., obs_rates, "bo", label="Incremental") plt.semilogy(mag_bins + dmag / 2., cum_rates, "rs", label="Cumulative") plt.xlabel("Magnitude (M)", fontsize=16) plt.ylabel("Annual Rate", fontsize=16) plt.grid(True) plt.legend(fontsize=16) if filename: plt.savefig(filename, format=filetype, dpi=dpi, bbox_inches="tight") return np.column_stack([mag_bins, durations, obs_rates, cum_rates, np.log10(cum_rates)])
0.000697
def _decode_v1(value): """ Decode '::' and '$' characters encoded by `_encode`. """ decode_colons = value.replace('$::', '::') decode_dollars = decode_colons.replace('$$', '$') reencoded = _encode_v1(decode_dollars) if reencoded != value: raise ValueError('Ambiguous encoded value, {!r} could have been encoded as {!r}'.format(value, reencoded)) return decode_dollars
0.00489
def to_python(self, value): """Overrides ``models.Field`` method. This is used to convert bytes (from serialization etc) to an instance of this class""" if value is None: return None elif isinstance(value, oauth2client.client.Credentials): return value else: try: return jsonpickle.decode( base64.b64decode(encoding.smart_bytes(value)).decode()) except ValueError: return pickle.loads( base64.b64decode(encoding.smart_bytes(value)))
0.003378
def create(self, name, suffix, description, default_value, display=None): """Create a new Metric :param str name: Name of metric :param str suffix: Metric unit :param str description: Description of what the metric is measuring :param int default_value: Default value to use when a point is added :param int display: Display the chart on the status page :return: Created metric data (:class:`dict`) .. seealso:: https://docs.cachethq.io/reference#metrics """ data = ApiParams() data['name'] = name data['suffix'] = suffix data['description'] = description data['default_value'] = default_value data['display'] = display return self._post('metrics', data=data)['data']
0.002509
def get_stats_summary(start=None, end=None, **kwargs): """ Stats Historical Summary Reference: https://iexcloud.io/docs/api/#stats-historical-summary Data Weighting: ``Free`` Parameters ---------- start: datetime.datetime, default None, optional Start of data retrieval period end: datetime.datetime, default None, optional End of data retrieval period kwargs: Additional Request Parameters (see base class) """ return MonthlySummaryReader(start=start, end=end, **kwargs).fetch()
0.001821
def _add_blockhash_to_state_changes(storage: SQLiteStorage, cache: BlockHashCache) -> None: """Adds blockhash to ContractReceiveXXX and ActionInitChain state changes""" batch_size = 50 batch_query = storage.batch_query_state_changes( batch_size=batch_size, filters=[ ('_type', 'raiden.transfer.state_change.ContractReceive%'), ('_type', 'raiden.transfer.state_change.ActionInitChain'), ], logical_and=False, ) for state_changes_batch in batch_query: # Gather query records to pass to gevent pool imap to have concurrent RPC calls query_records = [] for state_change in state_changes_batch: data = json.loads(state_change.data) assert 'block_hash' not in data, 'v18 state changes cant contain blockhash' record = BlockQueryAndUpdateRecord( block_number=int(data['block_number']), data=data, state_change_identifier=state_change.state_change_identifier, cache=cache, ) query_records.append(record) # Now perform the queries in parallel with gevent.Pool.imap and gather the # updated tuple entries that will update the DB updated_state_changes = [] pool_generator = Pool(batch_size).imap( _query_blocknumber_and_update_statechange_data, query_records, ) for entry in pool_generator: updated_state_changes.append(entry) # Finally update the DB with a batched executemany() storage.update_state_changes(updated_state_changes)
0.003643
def urbext(self, year): """ Estimate the `urbext2000` parameter for a given year assuming a nation-wide urbanisation curve. Methodology source: eqn 5.5, report FD1919/TR :param year: Year to provide estimate for :type year: float :return: Urban extent parameter :rtype: float """ # Decimal places increased to ensure year 2000 corresponds with 1 urban_expansion = 0.7851 + 0.2124 * atan((year - 1967.5) / 20.331792998) try: return self.catchment.descriptors.urbext2000 * urban_expansion except TypeError: # Sometimes urbext2000 is not set, assume zero return 0
0.005755
def readTFAM(fileName): """Reads the TFAM file. :param fileName: the name of the ``tfam`` file. :type fileName: str :returns: a representation the ``tfam`` file (:py:class:`numpy.array`). """ # Saving the TFAM file tfam = None with open(fileName, 'r') as inputFile: tfam = [ tuple(i.rstrip("\r\n").split("\t")) for i in inputFile.readlines() ] tfam = np.array(tfam) return tfam
0.002217
def submit_to_queue(self, script_file): """ Public API: wraps the concrete implementation _submit_to_queue Raises: `self.MaxNumLaunchesError` if we have already tried to submit the job max_num_launches `self.Error` if generic error """ if not os.path.exists(script_file): raise self.Error('Cannot find script file located at: {}'.format(script_file)) if self.num_launches == self.max_num_launches: raise self.MaxNumLaunchesError("num_launches %s == max_num_launches %s" % (self.num_launches, self.max_num_launches)) # Call the concrete implementation. s = self._submit_to_queue(script_file) self.record_launch(s.qid) if s.qid is None: raise self.Error("Error in job submission with %s. file %s \n" % (self.__class__.__name__, script_file) + "The error response reads:\n %s \n " % s.err + "The out response reads:\n %s \n" % s.out) # Here we create a concrete instance of QueueJob return QueueJob.from_qtype_and_id(self.QTYPE, s.qid, self.qname), s.process
0.005824
def _calculate_gain(self, cost_base, y_true, X, cost_mat, split): """ Private function to calculate the gain in cost of using split in the current node. Parameters ---------- cost_base : float Cost of the naive prediction y_true : array indicator matrix Ground truth (correct) labels. X : array-like of shape = [n_samples, n_features] The input samples. cost_mat : array-like of shape = [n_samples, 4] Cost matrix of the classification problem Where the columns represents the costs of: false positives, false negatives, true positives and true negatives, for each example. split : tuple of len = 2 split[0] = feature to split = j split[1] = where to split = l Returns ------- tuple(gain : float, left node prediction : int) """ # Check if cost_base == 0, then no gain is possible #TODO: This must be check in _best_split if cost_base == 0.0: return 0.0, int(np.sign(y_true.mean() - 0.5) == 1) # In case cost_b==0 and pi_1!=(0,1) j, l = split filter_Xl = (X[:, j] <= l) filter_Xr = ~filter_Xl n_samples, n_features = X.shape # Check if one of the leafs is empty #TODO: This must be check in _best_split if np.nonzero(filter_Xl)[0].shape[0] in [0, n_samples]: # One leaft is empty return 0.0, 0.0 # Split X in Xl and Xr according to rule split Xl_cost, Xl_pred, _ = self._node_cost(y_true[filter_Xl], cost_mat[filter_Xl, :]) Xr_cost, _, _ = self._node_cost(y_true[filter_Xr], cost_mat[filter_Xr, :]) if self.criterion_weight: n_samples_Xl = np.nonzero(filter_Xl)[0].shape[0] Xl_w = n_samples_Xl * 1.0 / n_samples Xr_w = 1 - Xl_w gain = round((cost_base - (Xl_w * Xl_cost + Xr_w * Xr_cost)) / cost_base, 6) else: gain = round((cost_base - (Xl_cost + Xr_cost)) / cost_base, 6) return gain, Xl_pred
0.005655
def launch_in_notebook(self, port=9095, width=900, height=600): """launch the app within an iframe in ipython notebook""" from IPython.lib import backgroundjobs as bg from IPython.display import HTML jobs = bg.BackgroundJobManager() jobs.new(self.launch, kw=dict(port=port)) frame = HTML( '<iframe src=http://localhost:{} width={} height={}></iframe>' .format(port, width, height) ) return frame
0.004132
def commit_config(self, message=""): """Commit configuration.""" commit_args = {"comment": message} if message else {} self.device.cu.commit(ignore_warning=self.ignore_warning, **commit_args) if not self.lock_disable and not self.session_config_lock: self._unlock()
0.009709
def _mergeFiles(key, chunkCount, outputFile, fields): """Merge sorted chunk files into a sorted output file chunkCount - the number of available chunk files outputFile the name of the sorted output file _mergeFiles() """ title() # Open all chun files files = [FileRecordStream('chunk_%d.csv' % i) for i in range(chunkCount)] # Open output file with FileRecordStream(outputFile, write=True, fields=fields) as o: # Open all chunk files files = [FileRecordStream('chunk_%d.csv' % i) for i in range(chunkCount)] records = [f.getNextRecord() for f in files] # This loop will run until all files are exhausted while not all(r is None for r in records): # Cleanup None values (files that were exhausted) indices = [i for i,r in enumerate(records) if r is not None] records = [records[i] for i in indices] files = [files[i] for i in indices] # Find the current record r = min(records, key=itemgetter(*key)) # Write it to the file o.appendRecord(r) # Find the index of file that produced the current record index = records.index(r) # Read a new record from the file records[index] = files[index].getNextRecord() # Cleanup chunk files for i, f in enumerate(files): f.close() os.remove('chunk_%d.csv' % i)
0.016579
def init_ui(self, ): """Create the tooltip in the sidebar :returns: None :rtype: None :raises: None """ self.sidebar = self.get_maya_sidebar() self.lay = self.sidebar.layout() self.tool_pb = QtGui.QPushButton("JB Wins") self.tooltip = JB_WindowToolTip() self.tooltip.install_tooltip(self.tool_pb) self.lay.addWidget(self.tool_pb) self.tool_pb.clicked.connect(self.tooltip.show)
0.004219
def _run_keep_alive(self): """ Start a new thread timer to keep the keep_alive_function running every keep_alive seconds. """ threading.Timer(self._keep_alive, self._run_keep_alive).start() _LOGGER.info("Polling the API") # This may or may not return something self._keep_alive_function()
0.005682
async def send_contact(self, chat_id: typing.Union[base.Integer, base.String], phone_number: base.String, first_name: base.String, last_name: typing.Union[base.String, None] = None, vcard: typing.Union[base.String, None] = None, disable_notification: typing.Union[base.Boolean, None] = None, reply_to_message_id: typing.Union[base.Integer, None] = None, reply_markup: typing.Union[types.InlineKeyboardMarkup, types.ReplyKeyboardMarkup, types.ReplyKeyboardRemove, types.ForceReply, None] = None) -> types.Message: """ Use this method to send phone contacts. Source: https://core.telegram.org/bots/api#sendcontact :param chat_id: Unique identifier for the target chat or username of the target channel :type chat_id: :obj:`typing.Union[base.Integer, base.String]` :param phone_number: Contact's phone number :type phone_number: :obj:`base.String` :param first_name: Contact's first name :type first_name: :obj:`base.String` :param last_name: Contact's last name :type last_name: :obj:`typing.Union[base.String, None]` :param vcard: vcard :type vcard: :obj:`typing.Union[base.String, None]` :param disable_notification: Sends the message silently. Users will receive a notification with no sound :type disable_notification: :obj:`typing.Union[base.Boolean, None]` :param reply_to_message_id: If the message is a reply, ID of the original message :type reply_to_message_id: :obj:`typing.Union[base.Integer, None]` :param reply_markup: Additional interface options :type reply_markup: :obj:`typing.Union[types.InlineKeyboardMarkup, types.ReplyKeyboardMarkup, types.ReplyKeyboardRemove, types.ForceReply, None]` :return: On success, the sent Message is returned :rtype: :obj:`types.Message` """ reply_markup = prepare_arg(reply_markup) payload = generate_payload(**locals()) result = await self.request(api.Methods.SEND_CONTACT, payload) return types.Message(**result)
0.007472
def install_package_command(package_name): '''install python package from pip''' #TODO refactor python logic if sys.platform == "win32": cmds = 'python -m pip install --user {0}'.format(package_name) else: cmds = 'python3 -m pip install --user {0}'.format(package_name) call(cmds, shell=True)
0.006098
def taskfileinfo_path_data(tfi, role): """Return the data for path :param tfi: the :class:`jukeboxcore.filesys.TaskFileInfo` holds the data :type tfi: :class:`jukeboxcore.filesys.TaskFileInfo` :param role: item data role :type role: QtCore.Qt.ItemDataRole :returns: data for the path :rtype: depending on role :raises: None """ if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.EditRole: return JB_File(tfi).get_fullpath()
0.002105
def make_country_matrix(self, loc): """ Create features for all possible country labels, return as matrix for keras. Parameters ---------- loc: dict one entry from the list of locations and features that come out of make_country_features Returns -------- keras_inputs: dict with two keys, "label" and "matrix" """ top = loc['features']['ct_mention'] top_count = loc['features']['ctm_count1'] two = loc['features']['ct_mention2'] two_count = loc['features']['ctm_count2'] word_vec = loc['features']['word_vec'] first_back = loc['features']['first_back'] most_alt = loc['features']['most_alt'] most_pop = loc['features']['most_pop'] possible_labels = set([top, two, word_vec, first_back, most_alt, most_pop]) possible_labels = [i for i in possible_labels if i] X_mat = [] for label in possible_labels: inputs = np.array([word_vec, first_back, most_alt, most_pop]) x = inputs == label x = np.asarray((x * 2) - 1) # convert to -1, 1 # get missing values exists = inputs != "" exists = np.asarray((exists * 2) - 1) counts = np.asarray([top_count, two_count]) # cludgy, should be up with "inputs" right = np.asarray([top, two]) == label right = right * 2 - 1 right[counts == 0] = 0 # get correct values features = np.concatenate([x, exists, counts, right]) X_mat.append(np.asarray(features)) keras_inputs = {"labels": possible_labels, "matrix" : np.asmatrix(X_mat)} return keras_inputs
0.00453
def hashkey(*args, **kwargs): """Return a cache key for the specified hashable arguments.""" if kwargs: return _HashedTuple(args + sum(sorted(kwargs.items()), _kwmark)) else: return _HashedTuple(args)
0.004367
def sendACK(self, blocknumber=None): """This method sends an ack packet to the block number specified. If none is specified, it defaults to the next_block property in the parent context.""" log.debug("In sendACK, passed blocknumber is %s", blocknumber) if blocknumber is None: blocknumber = self.context.next_block log.info("Sending ack to block %d" % blocknumber) ackpkt = TftpPacketACK() ackpkt.blocknumber = blocknumber self.context.sock.sendto(ackpkt.encode().buffer, (self.context.host, self.context.tidport)) self.context.last_pkt = ackpkt
0.002841
def for_property(cls, server, namespace, classname, propname): # pylint: disable=line-too-long """ Factory method that returns a new :class:`~pywbem.ValueMapping` instance that maps CIM property values to the `Values` qualifier defined on that property. If a `Values` qualifier is defined but no `ValueMap` qualifier, a default of 0-based consecutive numbers is applied (that is the default defined in :term:`DSP0004`). Parameters: server (:class:`~pywbem.WBEMConnection` or :class:`~pywbem.WBEMServer`): The connection to the WBEM server containing the namespace. namespace (:term:`string`): Name of the CIM namespace containing the class. If `None`, the default namespace of the connection will be used. classname (:term:`string`): Name of the CIM class exposing the property. The property can be defined in that class or inherited into that class. propname (:term:`string`): Name of the CIM property that defines the `Values` / `ValueMap` qualifiers. Returns: The new :class:`~pywbem.ValueMapping` instance. Raises: Exceptions raised by :class:`~pywbem.WBEMConnection`. KeyError: The CIM property does not exist in the CIM class. TypeError: The CIM property is not integer-typed. ValueError: No `Values` qualifier defined on the CIM property. ValueError: Invalid integer representation in `ValueMap` qualifier defined on the CIM property. """ # noqa: E501 conn = server try: get_class = conn.GetClass except AttributeError: conn = server.conn get_class = conn.GetClass class_obj = get_class(ClassName=classname, namespace=namespace, LocalOnly=False, IncludeQualifiers=True) try: property_obj = class_obj.properties[propname] except KeyError: raise KeyError( _format("Class {0!A} (in {1!A}) does not have a property " "{2!A}", classname, namespace, propname)) new_vm = cls._create_for_element(property_obj, conn, namespace, classname, propname=propname) return new_vm
0.001207
async def get(self, key, default=None, loads_fn=None, namespace=None, _conn=None): """ Get a value from the cache. Returns default if not found. :param key: str :param default: obj to return when key is not found :param loads_fn: callable alternative to use as loads function :param namespace: str alternative namespace to use :param timeout: int or float in seconds specifying maximum timeout for the operations to last :returns: obj loaded :raises: :class:`asyncio.TimeoutError` if it lasts more than self.timeout """ start = time.monotonic() loads = loads_fn or self._serializer.loads ns_key = self.build_key(key, namespace=namespace) value = loads(await self._get(ns_key, encoding=self.serializer.encoding, _conn=_conn)) logger.debug("GET %s %s (%.4f)s", ns_key, value is not None, time.monotonic() - start) return value if value is not None else default
0.005994
def write_text(filename: str, text: str) -> None: """ Writes text to a file. """ with open(filename, 'w') as f: # type: TextIO print(text, file=f)
0.005848
def fix_config(self, options): """ Fixes the options, if necessary. I.e., it adds all required elements to the dictionary. :param options: the options to fix :type options: dict :return: the (potentially) fixed options :rtype: dict """ options = super(Trigger, self).fix_config(options) opt = "condition" if opt not in options: options[opt] = "True" if opt not in self.help: self.help[opt] = "The (optional) condition for teeing off the tokens; uses the 'eval' method, "\ "ie the expression must evaluate to a boolean value; storage values placeholders "\ "'@{...}' get replaced with their string representations before evaluating the "\ "expression (string)." return options
0.00678
def _CaptureExpression(self, frame, expression): """Evalutes the expression and captures it into a Variable object. Args: frame: evaluation context. expression: watched expression to compile and evaluate. Returns: Variable object (which will have error status if the expression fails to evaluate). """ rc, value = _EvaluateExpression(frame, expression) if not rc: return {'name': expression, 'status': value} return self.CaptureNamedVariable(expression, value, 0, self.expression_capture_limits)
0.003373
def create_query_index( self, design_document_id=None, index_name=None, index_type='json', partitioned=False, **kwargs ): """ Creates either a JSON or a text query index in the remote database. :param str index_type: The type of the index to create. Can be either 'text' or 'json'. Defaults to 'json'. :param str design_document_id: Optional identifier of the design document in which the index will be created. If omitted the default is that each index will be created in its own design document. Indexes can be grouped into design documents for efficiency. However, a change to one index in a design document will invalidate all other indexes in the same document. :param str index_name: Optional name of the index. If omitted, a name will be generated automatically. :param list fields: A list of fields that should be indexed. For JSON indexes, the fields parameter is mandatory and should follow the 'sort syntax'. For example ``fields=['name', {'age': 'desc'}]`` will create an index on the 'name' field in ascending order and the 'age' field in descending order. For text indexes, the fields parameter is optional. If it is included then each field element in the fields list must be a single element dictionary where the key is the field name and the value is the field type. For example ``fields=[{'name': 'string'}, {'age': 'number'}]``. Valid field types are ``'string'``, ``'number'``, and ``'boolean'``. :param dict default_field: Optional parameter that specifies how the ``$text`` operator can be used with the index. Only valid when creating a text index. :param dict selector: Optional parameter that can be used to limit the index to a specific set of documents that match a query. It uses the same syntax used for selectors in queries. Only valid when creating a text index. :returns: An Index object representing the index created in the remote database """ if index_type == JSON_INDEX_TYPE: index = Index(self, design_document_id, index_name, partitioned=partitioned, **kwargs) elif index_type == TEXT_INDEX_TYPE: index = TextIndex(self, design_document_id, index_name, partitioned=partitioned, **kwargs) else: raise CloudantArgumentError(103, index_type) index.create() return index
0.001084
def check_nonnegative(value): """Check that the value is nonnegative.""" if isinstance(value, tf.Tensor): with tf.control_dependencies([tf.assert_greater_equal(value, 0)]): value = tf.identity(value) elif value < 0: raise ValueError("Value must be non-negative.") return value
0.020134
def new(self): # type: () -> None ''' A method to create a new UDF Logical Volume Header Descriptor. Parameters: None. Returns: Nothing. ''' if self._initialized: raise pycdlibexception.PyCdlibInternalError('UDF Logical Volume Header Descriptor already initialized') self.unique_id = 261 self._initialized = True
0.009592
def system_greet(input_params={}, always_retry=True, **kwargs): """ Invokes the /system/greet API method. """ return DXHTTPRequest('/system/greet', input_params, always_retry=always_retry, **kwargs)
0.009346
def osd_tree(conn, cluster): """ Check the status of an OSD. Make sure all are up and in What good output would look like:: { "epoch": 8, "num_osds": 1, "num_up_osds": 1, "num_in_osds": "1", "full": "false", "nearfull": "false" } Note how the booleans are actually strings, so we need to take that into account and fix it before returning the dictionary. Issue #8108 """ ceph_executable = system.executable_path(conn, 'ceph') command = [ ceph_executable, '--cluster={cluster}'.format(cluster=cluster), 'osd', 'tree', '--format=json', ] out, err, code = remoto.process.check( conn, command, ) try: loaded_json = json.loads(b''.join(out).decode('utf-8')) # convert boolean strings to actual booleans because # --format=json fails to do this properly for k, v in loaded_json.items(): if v == 'true': loaded_json[k] = True elif v == 'false': loaded_json[k] = False return loaded_json except ValueError: return {}
0.000824
def database_caller_creator(self, host, port, name=None): '''creates a redis connection object which will be later used to modify the db ''' name = name or 0 client = redis.StrictRedis(host=host, port=port, db=name) pipe = client.pipeline(transaction=False) return client, pipe
0.005988
def validate_arg(arg, argdef): """ Validate an incoming (unicode) string argument according the UPnP spec. Raises UPNPError. """ datatype = argdef['datatype'] reasons = set() ranges = { 'ui1': (int, 0, 255), 'ui2': (int, 0, 65535), 'ui4': (int, 0, 4294967295), 'i1': (int, -128, 127), 'i2': (int, -32768, 32767), 'i4': (int, -2147483648, 2147483647), 'r4': (Decimal, Decimal('3.40282347E+38'), Decimal('1.17549435E-38')) } try: if datatype in set(ranges.keys()): v_type, v_min, v_max = ranges[datatype] if not v_min <= v_type(arg) <= v_max: reasons.add('%r datatype must be a number in the range %s to %s' % ( datatype, v_min, v_max)) elif datatype in {'r8', 'number', 'float', 'fixed.14.4'}: v = Decimal(arg) if v < 0: assert Decimal('-1.79769313486232E308') <= v <= Decimal('4.94065645841247E-324') else: assert Decimal('4.94065645841247E-324') <= v <= Decimal('1.79769313486232E308') elif datatype == 'char': v = arg.decode('utf8') if six.PY2 or isinstance(arg, bytes) else arg assert len(v) == 1 elif datatype == 'string': v = arg.decode("utf8") if six.PY2 or isinstance(arg, bytes) else arg if argdef['allowed_values'] and v not in argdef['allowed_values']: reasons.add('Value %r not in allowed values list' % arg) elif datatype == 'date': v = parse_date(arg) if any((v.hour, v.minute, v.second)): reasons.add("'date' datatype must not contain a time") elif datatype in ('dateTime', 'dateTime.tz'): v = parse_date(arg) if datatype == 'dateTime' and v.tzinfo is not None: reasons.add("'dateTime' datatype must not contain a timezone") elif datatype in ('time', 'time.tz'): now = datetime.datetime.utcnow() v = parse_date(arg, default=now) if v.tzinfo is not None: now += v.utcoffset() if not all(( v.day == now.day, v.month == now.month, v.year == now.year)): reasons.add('%r datatype must not contain a date' % datatype) if datatype == 'time' and v.tzinfo is not None: reasons.add('%r datatype must not have timezone information' % datatype) elif datatype == 'boolean': valid = {'true', 'yes', '1', 'false', 'no', '0'} if arg.lower() not in valid: reasons.add('%r datatype must be one of %s' % (datatype, ','.join(valid))) elif datatype == 'bin.base64': b64decode(arg) elif datatype == 'bin.hex': unhexlify(arg) elif datatype == 'uri': urlparse(arg) elif datatype == 'uuid': if not re.match( r'^[0-9a-f]{8}\-[0-9a-f]{4}\-[0-9a-f]{4}\-[0-9a-f]{4}\-[0-9a-f]{12}$', arg, re.I): reasons.add('%r datatype must contain a valid UUID') else: reasons.add("%r datatype is unrecognised." % datatype) except ValueError as exc: reasons.add(str(exc)) return not bool(len(reasons)), reasons
0.004065
def _initialize_hierarchy(self): """ This function covers the whole initialization routine before executing a hierarchy state. :return: """ logger.debug("Starting execution of {0}{1}".format(self, " (backwards)" if self.backward_execution else "")) # reset variables self.child_state = None self.last_error = None self.last_child = None self.last_transition = None if self.backward_execution: self.setup_backward_run() else: # forward_execution self.setup_run() self.state_execution_status = StateExecutionStatus.WAIT_FOR_NEXT_STATE if self.backward_execution: last_history_item = self.execution_history.pop_last_item() assert isinstance(last_history_item, ReturnItem) self.scoped_data = last_history_item.scoped_data else: # forward_execution self.execution_history.push_call_history_item(self, CallType.CONTAINER, self, self.input_data) self.child_state = self.get_start_state(set_final_outcome=True) if self.child_state is None: self.child_state = self.handle_no_start_state()
0.004136
def dutyCycle(self, active=False, readOnly=False): """Compute/update and return the positive activations duty cycle of this segment. This is a measure of how often this segment is providing good predictions. :param active True if segment just provided a good prediction :param readOnly If True, compute the updated duty cycle, but don't change the cached value. This is used by debugging print statements. :returns: The duty cycle, a measure of how often this segment is providing good predictions. **NOTE:** This method relies on different schemes to compute the duty cycle based on how much history we have. In order to support this tiered approach **IT MUST BE CALLED ON EVERY SEGMENT AT EACH DUTY CYCLE TIER** (@ref dutyCycleTiers). When we don't have a lot of history yet (first tier), we simply return number of positive activations / total number of iterations After a certain number of iterations have accumulated, it converts into a moving average calculation, which is updated only when requested since it can be a bit expensive to compute on every iteration (it uses the pow() function). The duty cycle is computed as follows: dc[t] = (1-alpha) * dc[t-1] + alpha * value[t] If the value[t] has been 0 for a number of steps in a row, you can apply all of the updates at once using: dc[t] = (1-alpha)^(t-lastT) * dc[lastT] We use the alphas and tiers as defined in @ref dutyCycleAlphas and @ref dutyCycleTiers. """ # For tier #0, compute it from total number of positive activations seen if self.tm.lrnIterationIdx <= self.dutyCycleTiers[1]: dutyCycle = float(self.positiveActivations) \ / self.tm.lrnIterationIdx if not readOnly: self._lastPosDutyCycleIteration = self.tm.lrnIterationIdx self._lastPosDutyCycle = dutyCycle return dutyCycle # How old is our update? age = self.tm.lrnIterationIdx - self._lastPosDutyCycleIteration # If it's already up to date, we can returned our cached value. if age == 0 and not active: return self._lastPosDutyCycle # Figure out which alpha we're using for tierIdx in range(len(self.dutyCycleTiers)-1, 0, -1): if self.tm.lrnIterationIdx > self.dutyCycleTiers[tierIdx]: alpha = self.dutyCycleAlphas[tierIdx] break # Update duty cycle dutyCycle = pow(1.0-alpha, age) * self._lastPosDutyCycle if active: dutyCycle += alpha # Update cached values if not read-only if not readOnly: self._lastPosDutyCycleIteration = self.tm.lrnIterationIdx self._lastPosDutyCycle = dutyCycle return dutyCycle
0.003279
def lat_from_inc(inc, a95=None): """ Calculate paleolatitude from inclination using the dipole equation Required Parameter ---------- inc: (paleo)magnetic inclination in degrees Optional Parameter ---------- a95: 95% confidence interval from Fisher mean Returns ---------- if a95 is provided paleo_lat, paleo_lat_max, paleo_lat_min are returned otherwise, it just returns paleo_lat """ rad = old_div(np.pi, 180.) paleo_lat = old_div(np.arctan(0.5 * np.tan(inc * rad)), rad) if a95 is not None: paleo_lat_max = old_div( np.arctan(0.5 * np.tan((inc + a95) * rad)), rad) paleo_lat_min = old_div( np.arctan(0.5 * np.tan((inc - a95) * rad)), rad) return paleo_lat, paleo_lat_max, paleo_lat_min else: return paleo_lat
0.001193
def setText(self, text): """ Sets the text for this button. If it is set to show rich text, then it will update the label text, leaving the root button text blank, otherwise it will update the button. :param text | <str> """ self._text = nativestring(text) if self.showRichText(): self.richTextLabel().setText(text) else: super(XPushButton, self).setText(text)
0.008264
def plot(self, legend=None, width=1.5, ladder=True, aspect=10, ticks=(1, 10), match_only=None, ax=None, return_fig=False, colour=None, cmap='viridis', default=None, style='intervals', field=None, **kwargs): """ Hands-free plotting. Args: legend (Legend): The Legend to use for colours, etc. width (int): The width of the plot, in inches. Default 1. ladder (bool): Whether to use widths or not. Default False. aspect (int): The aspect ratio of the plot. Default 10. ticks (int or tuple): The (minor,major) tick interval for depth. Only the major interval is labeled. Default (1,10). match_only (list): A list of strings matching the attributes you want to compare when plotting. ax (ax): A maplotlib axis to plot onto. If you pass this, it will be returned. Optional. return_fig (bool): Whether or not to return the maplotlib ``fig`` object. Default False. colour (str): Which data field to use for colours. cmap (cmap): Matplotlib colourmap. Default ``viridis``. **kwargs are passed through to matplotlib's ``patches.Rectangle``. Returns: None. Unless you specify ``return_fig=True`` or pass in an ``ax``. """ if legend is None: legend = Legend.random(self.components) if style.lower() == 'tops': # Make sure width is at least 3 for 'tops' style width = max([3, width]) if ax is None: return_ax = False fig = plt.figure(figsize=(width, aspect*width)) ax = fig.add_axes([0.35, 0.05, 0.6, 0.95]) else: return_ax = True if (self.order == 'none') or (style.lower() == 'points'): # Then this is a set of points. ax = self.plot_points(ax=ax, legend=legend, field=field, **kwargs) elif style.lower() == 'field': if field is None: raise StriplogError('You must provide a field to plot.') ax = self.plot_field(ax=ax, legend=legend, field=field) elif style.lower() == 'tops': ax = self.plot_tops(ax=ax, legend=legend, field=field) ax.set_xticks([]) else: ax = self.plot_axis(ax=ax, legend=legend, ladder=ladder, default_width=width, match_only=kwargs.get('match_only', match_only), colour=colour, cmap=cmap, default=default, width_field=field, **kwargs ) ax.set_xlim([0, width]) ax.set_xticks([]) # Rely on interval order. lower, upper = self[-1].base.z, self[0].top.z rng = abs(upper - lower) ax.set_ylim([lower, upper]) # Make sure ticks is a tuple. try: ticks = tuple(ticks) except TypeError: ticks = (1, ticks) # Avoid MAXTICKS error. while rng/ticks[0] > 250: mi, ma = 10*ticks[0], ticks[1] if ma <= mi: ma = 10 * mi ticks = (mi, ma) # Carry on plotting... minorLocator = mpl.ticker.MultipleLocator(ticks[0]) ax.yaxis.set_minor_locator(minorLocator) majorLocator = mpl.ticker.MultipleLocator(ticks[1]) majorFormatter = mpl.ticker.FormatStrFormatter('%d') ax.yaxis.set_major_locator(majorLocator) ax.yaxis.set_major_formatter(majorFormatter) ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) ax.spines['bottom'].set_visible(False) ax.yaxis.set_ticks_position('left') ax.get_yaxis().set_tick_params(which='both', direction='out') # Optional title. title = getattr(self, 'title', None) if title is not None: ax.set_title(title) ax.patch.set_alpha(0) if return_ax: return ax elif return_fig: return fig else: return
0.003982
def distL2(x1,y1,x2,y2): """Compute the L2-norm (Euclidean) distance between two points. The distance is rounded to the closest integer, for compatibility with the TSPLIB convention. The two points are located on coordinates (x1,y1) and (x2,y2), sent as parameters""" xdiff = x2 - x1 ydiff = y2 - y1 return int(math.sqrt(xdiff*xdiff + ydiff*ydiff) + .5)
0.010336
def main(): """Main function""" parser = argparse.ArgumentParser() parser.add_argument('-u', '--username', required=True, help='enedis username') parser.add_argument('-p', '--password', required=True, help='Password') args = parser.parse_args() client = LinkyClient(args.username, args.password) try: client.login() client.fetch_data() except BaseException as exp: print(exp) return 1 finally: client.close_session() print(json.dumps(client.get_data(), indent=2))
0.001686
def pretty_emit(self, record, is_header=False, task_level=None): """ Wrapper around the :class:`logging.StreamHandler` emit method to add some decoration stuff to the message Args: record (logging.LogRecord): log record to emit is_header (bool): if this record is a header, usually, a start or end task message task_level (int): If passed, will take that as the current nested task level instead of calculating it from the current tasks Returns: None """ task = record.task or self.cur_task if task_level is None: task_level = self.cur_depth_level if is_header: extra_prefix = ( self.get_task_indicator(task_level - 1) + ' ' + ('' if self.am_i_main_thread else '[%s] ' % self.cur_thread) + task + ': ' ) record.levelno = logging.INFO else: extra_prefix = ' ' + self.get_task_indicator(task_level) + ' ' if task: record.msg = ( ' ' * (task_level - 1) + extra_prefix + str(record.msg) ) super().emit(record) super().flush()
0.001587
def getalignedtarget(self, index): """Returns target range only if source index aligns to a single consecutive range of target tokens.""" targetindices = [] target = None foundindex = -1 for sourceindex, targetindex in self.alignment: if sourceindex == index: targetindices.append(targetindex) if len(targetindices) > 1: for i in range(1,len(targetindices)): if abs(targetindices[i] - targetindices[i-1]) != 1: break # not consecutive foundindex = (min(targetindices), max(targetindices)) target = ' '.join(self.target[min(targetindices):max(targetindices)+1]) elif targetindices: foundindex = targetindices[0] target = self.target[foundindex] return target, foundindex
0.005821
def build_js_from_template(self, template_file, variables): """ Build a JS script from a template and args @type template_file: str @param template_file: Script template to implement; can be the name of a built-in script or full filepath to a js file that contains the script. E.g. 'clickElementTemplate.js', 'clickElementTemplate', and '/path/to/custom/template/script.js' are all acceptable @type variables: dict @param variables: Dictionary representing template construction args @rtype: int @rtype: exit code """ template_variable_character = '%' # raise an exception if user passed non-dictionary variables if not isinstance(variables, dict): raise TypeError('You must use a dictionary to populate variables in a javascript template') # This filename is not a full file, attempt to locate the file in built-in templates if not os.path.isfile(template_file): # append the .js extension if not included if '.js' not in template_file: template_file += '.js' # find the template and read the text into a string variable templates_dir = os.path.join(os.path.dirname(__file__), 'jsTemplates') template_full_path = os.path.join(templates_dir, template_file) # The filename specified should be the full path else: template_full_path = template_file # Ensure that the file exists if not os.path.isfile(template_full_path): raise ValueError('File "{}" was not found; you must specify the name of a built-in javascript template ' 'or the full filepath of a custom template'.format(template_full_path)) try: js_text = open(template_full_path).read() except IOError: raise IOError('The template was not found or did not have read permissions: {}'.format(template_full_path)) # replace all variables that match the keys in 'variables' dict for key in variables.keys(): # double escape single and double quotes after variable replacement if hasattr(variables[key], 'replace'): variables[key] = variables[key].replace("'", "\\'") variables[key] = variables[key].replace('"', '\\"') else: # variable is not a string variables[key] = str(variables[key]) js_text = js_text.replace(template_variable_character + key, variables[key]) return js_text
0.005181
def create(cls, paramCount): """ Creates a new particle without position, velocity and -inf as fitness """ return Particle(numpy.array([[]]*paramCount), numpy.array([[]]*paramCount), -numpy.Inf)
0.015326
def get_grades_by_regid_and_term(regid, term): """ Returns a StudentGrades model for the regid and term. """ url = "{}/{},{},{}.json".format(enrollment_res_url_prefix, term.year, term.quarter, regid) return _json_to_grades(get_resource(url), regid, term)
0.002618
def ArgSpec(*args, **kwargs): """ Validate a function based on the given argspec. # Example: validations = { "foo": [ArgSpec("a", "b", c", bar="baz")] } def pass_func(a, b, c, bar="baz"): pass def fail_func(b, c, a, baz="bar"): pass passes = {"foo": pass_func} fails = {"foo": fail_func} """ def argspec_lambda(value): argspec = getargspec(value) argspec_kw_vals = () if argspec.defaults is not None: argspec_kw_vals = argspec.defaults kw_vals = {} arg_offset = 0 arg_len = len(argspec.args) - 1 for val in argspec_kw_vals[::-1]: kw_vals[argspec.args[arg_len - arg_offset]] = val arg_offset += 1 if kwargs == kw_vals: if len(args) != arg_len - arg_offset + 1: return False index = 0 for arg in args: if argspec.args[index] != arg: return False index += 1 return True return False argspec_lambda.err_message = "must match argspec ({0}) {{{1}}}".format(args, kwargs) # as little sense as negating this makes, best to just be consistent. argspec_lambda.not_message = "must not match argspec ({0}) {{{1}}}".format(args, kwargs) return argspec_lambda
0.002155
def readline(self, timeout = 0.1): """Try to read a line from the stream queue. """ try: return self._q.get(block = timeout is not None, timeout = timeout) except Empty: return None
0.029851
def AttachUserList(client, ad_group_id, user_list_id): """Links the provided ad group and user list. Args: client: an AdWordsClient instance. ad_group_id: an int ad group ID. user_list_id: an int user list ID. Returns: The ad group criterion that was successfully created. """ ad_group_criterion_service = client.GetService( 'AdGroupCriterionService', 'v201809') user_list = { 'xsi_type': 'CriterionUserList', 'userListId': user_list_id } ad_group_criterion = { 'xsi_type': 'BiddableAdGroupCriterion', 'criterion': user_list, 'adGroupId': ad_group_id } operations = [{ 'operator': 'ADD', 'operand': ad_group_criterion }] return ad_group_criterion_service.mutate(operations)['value'][0]
0.008986
def sbesselj_sum(z, N): """Tests the Spherical Bessel function jn using the sum: Inf sum (2*n+1) * jn(z)**2 = 1 n=0 z: The argument. N: Large N value that the sum runs too. Note that the sum only converges to 1 for large N value (i.e. N >> z). The routine returns the relative error of the assumption. """ b = sbesselj(z, N) vvv = 2.0 * np.array(range(0, N), dtype=np.float64) + 1.0 sm = np.sum(np.sort(vvv * (b ** 2))) return np.abs((sm - 1.0) / sm) + np.spacing(1)
0.001767
def _handle_received_k_element(self, k_element: BeautifulSoup): """ The 'k' element appears to be kik's connection-related stanza. It lets us know if a connection or a login was successful or not. :param k_element: The XML element we just received from kik. """ if k_element['ok'] == "1": self.connected = True if 'ts' in k_element.attrs: # authenticated! log.info("[+] Authenticated successfully.") self.authenticated = True self.callback.on_authenticated() elif self.should_login_on_connection: self.login(self.username, self.password) self.should_login_on_connection = False else: self.callback.on_connection_failed(login.ConnectionFailedResponse(k_element))
0.003476
def display_drilldown_as_ul(category, using='categories.Category'): """ Render the category with ancestors and children using the ``categories/ul_tree.html`` template. Example:: {% display_drilldown_as_ul "/Grandparent/Parent" %} or :: {% display_drilldown_as_ul category_obj %} Returns:: <ul> <li><a href="/categories/">Top</a> <ul> <li><a href="/categories/grandparent/">Grandparent</a> <ul> <li><a href="/categories/grandparent/parent/">Parent</a> <ul> <li><a href="/categories/grandparent/parent/child1">Child1</a></li> <li><a href="/categories/grandparent/parent/child2">Child2</a></li> <li><a href="/categories/grandparent/parent/child3">Child3</a></li> </ul> </li> </ul> </li> </ul> </li> </ul> """ cat = get_category(category, using) if cat is None: return {'category': cat, 'path': []} else: return {'category': cat, 'path': drilldown_tree_for_node(cat)}
0.003481
def pop(self, num_items: int, type_hint: str) -> Union[int, bytes, Tuple[Union[int, bytes], ...]]: """ Pop an item off the stack. Note: This function is optimized for speed over readability. """ try: if num_items == 1: return next(self._pop(num_items, type_hint)) else: return tuple(self._pop(num_items, type_hint)) except IndexError: raise InsufficientStack("No stack items")
0.009671
def match_version_pattern(filename, pattern): """ Matches a single version upgrade pattern in the specified *filename* and returns the match information. Returns a #Match object or #None if the *pattern* did not match. """ if "{VERSION}" not in pattern: raise ValueError("pattern does not contain a {VERSION} reference") pattern = pattern.replace('{VERSION}', '(?P<v>[\d\w\.\-_]+)') expr = re.compile(pattern) with open(filename) as fp: lines = fp.read().split('\n') for i, line in enumerate(lines): match = expr.search(line) if match: return Match(filename, lines, line_index=i, version=Version(match.group('v')), span=match.span('v')) return None
0.019915
def _parser_jsonip(text): """Parse response text like the one returned by http://jsonip.com/.""" import json try: return str(json.loads(text).get("ip")) except ValueError as exc: LOG.debug("Text '%s' could not be parsed", exc_info=exc) return None
0.003484
def add_ubridge_udp_connection(self, bridge_name, source_nio, destination_nio): """ Creates an UDP connection in uBridge. :param bridge_name: bridge name in uBridge :param source_nio: source NIO instance :param destination_nio: destination NIO instance """ yield from self._ubridge_send("bridge create {name}".format(name=bridge_name)) if not isinstance(destination_nio, NIOUDP): raise NodeError("Destination NIO is not UDP") yield from self._ubridge_send('bridge add_nio_udp {name} {lport} {rhost} {rport}'.format(name=bridge_name, lport=source_nio.lport, rhost=source_nio.rhost, rport=source_nio.rport)) yield from self._ubridge_send('bridge add_nio_udp {name} {lport} {rhost} {rport}'.format(name=bridge_name, lport=destination_nio.lport, rhost=destination_nio.rhost, rport=destination_nio.rport)) if destination_nio.capturing: yield from self._ubridge_send('bridge start_capture {name} "{pcap_file}"'.format(name=bridge_name, pcap_file=destination_nio.pcap_output_file)) yield from self._ubridge_send('bridge start {name}'.format(name=bridge_name)) yield from self._ubridge_apply_filters(bridge_name, destination_nio.filters)
0.007736
def newmodel(f, G, y0, name='NewModel', modelType=ItoModel): """Use the functions f and G to define a new Model class for simulations. It will take functions f and G from global scope and make a new Model class out of them. It will automatically gather any globals used in the definition of f and G and turn them into attributes of the new Model. Args: f: callable(y, t) (defined in global scope) returning (n,) array Scalar or vector-valued function to define the deterministic part G: callable(y, t) (defined in global scope) returning (n,m) array Optional scalar or matrix-valued function to define noise coefficients of a stochastic system. This should be ``None`` for an ODE system. y0 (Number or array): Initial condition name (str): Optional class name for the new model modelType (type): The type of model to simulate. Must be a subclass of nsim.Model, for example nsim.ODEModel, nsim.ItoModel or nsim.StratonovichModel. The default is nsim.ItoModel. Returns: new class (subclass of Model) Raises: SimValueError, SimTypeError """ if not issubclass(modelType, Model): raise SimTypeError('modelType must be a subclass of nsim.Model') if not callable(f) or (G is not None and not callable(G)): raise SimTypeError('f and G must be functions of y and t.') if G is not None and f.__globals__ is not G.__globals__: raise SimValueError('f and G must be defined in the same place') # TODO: validate that f and G are defined at global scope. # TODO: Handle nonlocals used in f,G so that we can lift this restriction. if modelType is ODEModel and G is not None and not np.all(G == 0.0): raise SimValueError('For an ODEModel, noise matrix G should be None') if G is None or modelType is ODEModel: newclass = type(name, (ODEModel,), dict()) setattr(newclass, 'f', staticmethod(__clone_function(f, 'f'))) else: newclass = type(name, (modelType,), dict()) setattr(newclass, 'f', staticmethod(__clone_function(f, 'f'))) setattr(newclass, 'G', staticmethod(__clone_function(G, 'G'))) setattr(newclass, 'y0', copy.deepcopy(y0)) # For any global that is used by the functions f or G, create a # corresponding attribute in our new class. globals_used = [x for x in f.__globals__ if (x in f.__code__.co_names or G is not None and x in G.__code__.co_names)] for x in globals_used: if G is None: setattr(newclass, x, __AccessDict(x, newclass.f.__globals__)) else: setattr(newclass, x, __AccessDicts(x, newclass.f.__globals__, newclass.G.__globals__)) # Put the new class into namespace __main__ (to cause dill to pickle it) newclass.__module__ = '__main__' import __main__ __main__.__dict__[name] = newclass return newclass
0.003694
def grade(PmagRec, ACCEPT, type, data_model=2.5): """ Finds the 'grade' (pass/fail; A/F) of a record (specimen,sample,site) given the acceptance criteria """ GREATERTHAN = ['specimen_q', 'site_k', 'site_n', 'site_n_lines', 'site_int_n', 'measurement_step_min', 'specimen_int_ptrm_n', 'specimen_fvds', 'specimen_frac', 'specimen_f', 'specimen_n', 'specimen_int_n', 'sample_int_n', 'average_age_min', 'average_k', 'average_r', 'specimen_magn_moment', 'specimen_magn_volume', 'specimen_rsc', 'sample_n', 'sample_n_lines', 'sample_n_planes', 'sample_k', 'sample_r', 'site_magn_moment', 'site_magn_volume', 'site_magn_mass', 'site_r'] # these statistics must be exceede to pass, all others must be less than (except specimen_scat, which must be true) ISTRUE = ['specimen_scat'] kill = [] # criteria that kill the record sigma_types = ['sample_int_sigma', 'sample_int_sigma_perc', 'site_int_sigma', 'site_int_sigma_perc', 'average_int_sigma', 'average_int_sigma_perc'] sigmas = [] accept = {} if type == 'specimen_int': USEKEYS = ['specimen_q', 'measurement_step_min', 'measurement_step_max', 'specimen_int_ptrm_n', 'specimen_fvds', 'specimen_frac', 'specimen_f', 'specimen_int_n', 'specimen_magn_moment', 'specimen_magn_volume', 'specimen_rsc', 'specimen_scat', 'specimen_drats', 'specimen_int_mad', 'specimen_int_dang', 'specimen_md', 'specimen_b_beta', 'specimen_w', 'specimen_gmax'] if data_model == 3.0: USEKEYS = [map_magic.spec_magic2_2_magic3_map[k] for k in USEKEYS] elif type == 'specimen_dir': USEKEYS = ['measurement_step_min', 'measurement_step_max', 'specimen_mad', 'specimen_n', 'specimen_magn_moment', 'specimen_magn_volume'] if data_model == 3.0: USEKEYS = [map_magic.spec_magic2_2_magic3_map[k] for k in USEKEYS] elif type == 'sample_int': USEKEYS = ['sample_int_n', 'sample_int_sigma', 'sample_int_sigma_perc'] if data_model == 3.0: USEKEYS = [map_magic.samp_magic2_2_magic3_map[k] for k in USEKEYS] elif type == 'sample_dir': USEKEYS = ['sample_alpha95', 'sample_n', 'sample_n_lines', 'sample_n_planes', 'sample_k', 'sample_r'] if data_model == 3.0: USEKEYS = [map_magic.samp_magic2_2_magic3_map[k] for k in USEKEYS] elif type == 'site_int': USEKEYS = ['site_int_sigma', 'site_int_sigma_perc', 'site_int_n'] if data_model == 3.0: USEKEYS = [map_magic.site_magic2_2_magic3_map[k] for k in USEKEYS] elif type == 'site_dir': USEKEYS = ['site_alpha95', 'site_k', 'site_n', 'site_n_lines', 'site_n_planes', 'site_r'] if data_model == 3.0: USEKEYS = [map_magic.site_magic2_2_magic3_map[k] for k in USEKEYS] for key in list(ACCEPT.keys()): if ACCEPT[key] != "" and key in USEKEYS: if key in ISTRUE and ACCEPT[key] == 'TRUE' or ACCEPT[key] == 'True': # this is because Excel always capitalizes True to TRUE and # python doesn't recognize that as a boolean. never mind ACCEPT[key] = '1' elif ACCEPT[key] == 'FALSE' or ACCEPT[key] == 'False': ACCEPT[key] = '0' elif eval(ACCEPT[key]) == 0: ACCEPT[key] = "" accept[key] = ACCEPT[key] for key in sigma_types: if key in USEKEYS and key in list(accept.keys()) and key in list(PmagRec.keys()): sigmas.append(key) if len(sigmas) > 1: if PmagRec[sigmas[0]] == "" or PmagRec[sigmas[1]] == "": kill.append(sigmas[0]) kill.append(sigmas[1]) elif eval(PmagRec[sigmas[0]]) > eval(accept[sigmas[0]]) and eval(PmagRec[sigmas[1]]) > eval(accept[sigmas[1]]): kill.append(sigmas[0]) kill.append(sigmas[1]) elif len(sigmas) == 1 and sigmas[0] in list(accept.keys()): if PmagRec[sigmas[0]] > accept[sigmas[0]]: kill.append(sigmas[0]) for key in list(accept.keys()): if accept[key] != "": if key not in list(PmagRec.keys()) or PmagRec[key] == '': kill.append(key) elif key not in sigma_types: if key in ISTRUE: # boolean must be true if PmagRec[key] != '1': kill.append(key) if key in GREATERTHAN: if eval(str(PmagRec[key])) < eval(str(accept[key])): kill.append(key) else: if eval(str(PmagRec[key])) > eval(str(accept[key])): kill.append(key) return kill
0.00275
def close(self): """\ Closes the writer. This method MUST be called once all vectors are added. """ self._mmw.fake_headers(self._num_docs+1, self._num_terms, self._num_nnz) self._mmw.close()
0.012552
def destination_absent(name, server=None): ''' Ensures that the JMS Destination doesn't exists name Name of the JMS Destination ''' ret = {'name': name, 'result': None, 'comment': None, 'changes': {}} jms_ret = _do_element_absent(name, 'admin_object_resource', {}, server) if not jms_ret['error']: if __opts__['test'] and jms_ret['delete']: ret['comment'] = 'JMS Queue set to be deleted' elif jms_ret['delete']: ret['result'] = True ret['comment'] = 'JMS Queue deleted' else: ret['result'] = True ret['comment'] = 'JMS Queue doesn\'t exist' else: ret['result'] = False ret['comment'] = 'Error: {0}'.format(jms_ret['error']) return ret
0.001284
def copy_root_log_to_file(filename: str, fmt: str = LOG_FORMAT, datefmt: str = LOG_DATEFMT) -> None: """ Copy all currently configured logs to the specified file. Should ONLY be called from the ``if __name__ == 'main'`` script; see https://docs.python.org/3.4/howto/logging.html#library-config. """ fh = logging.FileHandler(filename) # default file mode is 'a' for append formatter = logging.Formatter(fmt=fmt, datefmt=datefmt) fh.setFormatter(formatter) apply_handler_to_root_log(fh)
0.001733
def concatenate_table(tup, blen=None, storage=None, create='table', **kwargs): """Stack tables in sequence vertically (row-wise).""" # setup storage = _util.get_storage(storage) if not isinstance(tup, (tuple, list)): raise ValueError('expected tuple or list, found %r' % tup) if len(tup) < 2: raise ValueError('expected two or more tables to stack') # build output expectedlen = sum(len(t) for t in tup) out = None tnames = None for tdata in tup: tblen = _util.get_blen_table(tdata, blen) tnames, tcolumns = _util.check_table_like(tdata, names=tnames) tlen = len(tcolumns[0]) for i in range(0, tlen, tblen): j = min(i+tblen, tlen) bcolumns = [c[i:j] for c in tcolumns] if out is None: out = getattr(storage, create)(bcolumns, names=tnames, expectedlen=expectedlen, **kwargs) else: out.append(bcolumns) return out
0.000926
def Run(self, unused_arg): """This kills us with no cleanups.""" logging.debug("Disabling service") msg = "Service disabled." if hasattr(sys, "frozen"): grr_binary = os.path.abspath(sys.executable) elif __file__: grr_binary = os.path.abspath(__file__) try: os.remove(grr_binary) except OSError: msg = "Could not remove binary." try: os.remove(config.CONFIG["Client.plist_path"]) except OSError: if "Could not" in msg: msg += " Could not remove plist file." else: msg = "Could not remove plist file." # Get the directory we are running in from pyinstaller. This is either the # GRR directory which we should delete (onedir mode) or a generated temp # directory which we can delete without problems in onefile mode. directory = getattr(sys, "_MEIPASS", None) if directory: shutil.rmtree(directory, ignore_errors=True) self.SendReply(rdf_protodict.DataBlob(string=msg))
0.009045
def npz_convert(self, infile, item): """Convert a numpy NPZ file to h5features.""" data = np.load(infile) labels = self._labels(data) features = data['features'] self._write(item, labels, features)
0.008439
def calculate_iI_correspondence(omega): r"""Get the correspondance between degenerate and nondegenerate schemes.""" Ne = len(omega[0]) om = omega[0][0] correspondence = [] I = 0 for i in range(Ne): if omega[i][0] != om: om = omega[i][0] I += 1 correspondence += [(i+1, I+1)] Nnd = I+1 def I_nd(i): return correspondence[i-1][1] def i_d(I): for i in range(Ne): if correspondence[i][1] == I: return correspondence[i][0] return i_d, I_nd, Nnd
0.007067
def models(self): """Unhashed""" models_dict = OrderedDict() collected = [] for item in standard_types: if item in self.unordered_models: new_dict, replacement_dict = unhash_dict(self.unordered_models[item]) models_dict[item] = new_dict collected.append(item) for item in self.unordered_models: # print("item: ", item) if item not in collected: new_dict, replacement_dict = unhash_dict(self.unordered_models[item]) models_dict[item] = new_dict return models_dict
0.00639
def run_services(config, *services, **kwargs): """ Serves a number of services for a contextual block. The caller can specify a number of service classes then serve them either stopping (default) or killing them on exiting the contextual block. Example:: with run_services(config, Foobar, Spam) as runner: # interact with services and stop them on exiting the block # services stopped Additional configuration available to :class:``ServiceRunner`` instances can be specified through keyword arguments:: with run_services(config, Foobar, Spam, kill_on_exit=True): # interact with services # services killed :Parameters: config : dict Configuration to instantiate the service containers with services : service definitions Services to be served for the contextual block kill_on_exit : bool (default=False) If ``True``, run ``kill()`` on the service containers when exiting the contextual block. Otherwise ``stop()`` will be called on the service containers on exiting the block. :Returns: The configured :class:`ServiceRunner` instance """ kill_on_exit = kwargs.pop('kill_on_exit', False) runner = ServiceRunner(config) for service in services: runner.add_service(service) runner.start() yield runner if kill_on_exit: runner.kill() else: runner.stop()
0.00067
def cmdline(argv=sys.argv[1:]): """ Script for merging different collections of stop words. """ parser = ArgumentParser( description='Create and merge collections of stop words') parser.add_argument( 'language', help='The language used in the collection') parser.add_argument('sources', metavar='FILE', nargs='+', help='Source files to parse') options = parser.parse_args(argv) factory = StopWordFactory() language = options.language stop_words = factory.get_stop_words(language, fail_safe=True) for filename in options.sources: stop_words += StopWord(language, factory.read_collection(filename)) filename = factory.get_collection_filename(stop_words.language) factory.write_collection(filename, stop_words.collection)
0.001218