code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def _travis_job_state(state): """ Converts a Travis state into a state character, color, and whether it's still running or a stopped state. """ if state in [None, 'queued', 'created', 'received']: return colorama.Fore.YELLOW, '*', True elif state in ['started', 'running']: return colorama.Fore.LIGHTYELLOW_EX, '*', True elif state == 'passed': return colorama.Fore.LIGHTGREEN_EX, 'P', False elif state == 'failed': return colorama.Fore.LIGHTRED_EX, 'X', False elif state == 'errored': return colorama.Fore.LIGHTRED_EX, '!', False elif state == 'canceled': return colorama.Fore.LIGHTBLACK_EX, 'X', False else: raise RuntimeError('unknown state: %s' % str(state))
Converts a Travis state into a state character, color, and whether it's still running or a stopped state.
def _get_principal(self, principal_arn): """ raise ResourceNotFoundException """ if ':cert/' in principal_arn: certs = [_ for _ in self.certificates.values() if _.arn == principal_arn] if len(certs) == 0: raise ResourceNotFoundException() principal = certs[0] return principal else: # TODO: search for cognito_ids pass raise ResourceNotFoundException()
raise ResourceNotFoundException
def match_grade_system_id(self, grade_system_id, match): """Sets the grade system ``Id`` for this query. arg: grade_system_id (osid.id.Id): a grade system ``Id`` arg: match (boolean): ``true`` for a positive match, ``false`` for a negative match raise: NullArgument - ``grade_system_id`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ self._add_match('gradeSystemId', str(grade_system_id), bool(match))
Sets the grade system ``Id`` for this query. arg: grade_system_id (osid.id.Id): a grade system ``Id`` arg: match (boolean): ``true`` for a positive match, ``false`` for a negative match raise: NullArgument - ``grade_system_id`` is ``null`` *compliance: mandatory -- This method must be implemented.*
def decode(in_bytes): """Decode a string using Consistent Overhead Byte Stuffing (COBS). Input should be a byte string that has been COBS encoded. Output is also a byte string. A cobs.DecodeError exception will be raised if the encoded data is invalid.""" if isinstance(in_bytes, str): raise TypeError('Unicode-objects are not supported; byte buffer objects only') in_bytes_mv = _get_buffer_view(in_bytes) out_bytes = bytearray() idx = 0 if len(in_bytes_mv) > 0: while True: length = ord(in_bytes_mv[idx]) if length == 0: raise DecodeError("zero byte found in input") idx += 1 end = idx + length - 1 copy_mv = in_bytes_mv[idx:end] if b'\x00' in copy_mv: raise DecodeError("zero byte found in input") out_bytes += copy_mv idx = end if idx > len(in_bytes_mv): raise DecodeError("not enough input bytes for length code") if idx < len(in_bytes_mv): if length < 0xFF: out_bytes.append(0) else: break return bytes(out_bytes)
Decode a string using Consistent Overhead Byte Stuffing (COBS). Input should be a byte string that has been COBS encoded. Output is also a byte string. A cobs.DecodeError exception will be raised if the encoded data is invalid.
def covariance_eigvals(self): """ The two eigenvalues of the `covariance` matrix in decreasing order. """ if not np.isnan(np.sum(self.covariance)): eigvals = np.linalg.eigvals(self.covariance) if np.any(eigvals < 0): # negative variance return (np.nan, np.nan) * u.pix**2 # pragma: no cover return (np.max(eigvals), np.min(eigvals)) * u.pix**2 else: return (np.nan, np.nan) * u.pix**2
The two eigenvalues of the `covariance` matrix in decreasing order.
def set_auth_key( user, key, enc='ssh-rsa', comment='', options=None, config='.ssh/authorized_keys', cache_keys=None, fingerprint_hash_type=None): ''' Add a key to the authorized_keys file. The "key" parameter must only be the string of text that is the encoded key. If the key begins with "ssh-rsa" or ends with user@host, remove those from the key before passing it to this function. CLI Example: .. code-block:: bash salt '*' ssh.set_auth_key <user> '<key>' enc='dsa' ''' if cache_keys is None: cache_keys = [] if len(key.split()) > 1: return 'invalid' enc = _refine_enc(enc) uinfo = __salt__['user.info'](user) if not uinfo: return 'fail' # A 'valid key' to us pretty much means 'decodable as base64', which is # the same filtering done when reading the authorized_keys file. Apply # the same check to ensure we don't insert anything that will not # subsequently be read) key_is_valid = _fingerprint(key, fingerprint_hash_type) is not None if not key_is_valid: return 'Invalid public key' status = check_key(user, key, enc, comment, options, config=config, cache_keys=cache_keys, fingerprint_hash_type=fingerprint_hash_type) if status == 'update': _replace_auth_key(user, key, enc, comment, options or [], config) return 'replace' elif status == 'exists': return 'no change' else: auth_line = _format_auth_line(key, enc, comment, options) fconfig = _get_config_file(user, config) # Fail if the key lives under the user's homedir, and the homedir # doesn't exist udir = uinfo.get('home', '') if fconfig.startswith(udir) and not os.path.isdir(udir): return 'fail' if not os.path.isdir(os.path.dirname(fconfig)): dpath = os.path.dirname(fconfig) os.makedirs(dpath) if not salt.utils.platform.is_windows(): if os.geteuid() == 0: os.chown(dpath, uinfo['uid'], uinfo['gid']) os.chmod(dpath, 448) # If SELINUX is available run a restorecon on the file rcon = salt.utils.path.which('restorecon') if rcon: cmd = [rcon, dpath] subprocess.call(cmd) if not os.path.isfile(fconfig): new_file = True else: new_file = False try: with salt.utils.files.fopen(fconfig, 'ab+') as _fh: if new_file is False: # Let's make sure we have a new line at the end of the file _fh.seek(0, 2) if _fh.tell() > 0: # File isn't empty, check if last byte is a newline # If not, add one _fh.seek(-1, 2) if _fh.read(1) != b'\n': _fh.write(b'\n') _fh.write(salt.utils.stringutils.to_bytes(auth_line)) except (IOError, OSError) as exc: msg = 'Could not write to key file: {0}' raise CommandExecutionError(msg.format(exc)) if new_file: if not salt.utils.platform.is_windows(): if os.geteuid() == 0: os.chown(fconfig, uinfo['uid'], uinfo['gid']) os.chmod(fconfig, 384) # If SELINUX is available run a restorecon on the file rcon = salt.utils.path.which('restorecon') if rcon: cmd = [rcon, fconfig] subprocess.call(cmd) return 'new'
Add a key to the authorized_keys file. The "key" parameter must only be the string of text that is the encoded key. If the key begins with "ssh-rsa" or ends with user@host, remove those from the key before passing it to this function. CLI Example: .. code-block:: bash salt '*' ssh.set_auth_key <user> '<key>' enc='dsa'
def update_role_config_group(resource_root, service_name, name, apigroup, cluster_name="default"): """ Update a role config group by name. @param resource_root: The root Resource object. @param service_name: Service name. @param name: Role config group name. @param apigroup: The updated role config group. @param cluster_name: Cluster name. @return: The updated ApiRoleConfigGroup object. @since: API v3 """ return call(resource_root.put, _get_role_config_group_path(cluster_name, service_name, name), ApiRoleConfigGroup, data=apigroup, api_version=3)
Update a role config group by name. @param resource_root: The root Resource object. @param service_name: Service name. @param name: Role config group name. @param apigroup: The updated role config group. @param cluster_name: Cluster name. @return: The updated ApiRoleConfigGroup object. @since: API v3
def hicup_stats_table(self): """ Add core HiCUP stats to the general stats table """ headers = OrderedDict() headers['Percentage_Ditags_Passed_Through_HiCUP'] = { 'title': '% Passed', 'description': 'Percentage Di-Tags Passed Through HiCUP', 'max': 100, 'min': 0, 'suffix': '%', 'scale': 'YlGn' } headers['Deduplication_Read_Pairs_Uniques'] = { 'title': '{} Unique'.format(config.read_count_prefix), 'description': 'Unique Di-Tags ({})'.format(config.read_count_desc), 'min': 0, 'scale': 'PuRd', 'modify': lambda x: x * config.read_count_multiplier, 'shared_key': 'read_count' } headers['Percentage_Uniques'] = { 'title': '% Duplicates', 'description': 'Percent Duplicate Di-Tags', 'max': 100, 'min': 0, 'suffix': '%', 'scale': 'YlGn-rev', 'modify': lambda x: 100 - x } headers['Valid_Pairs'] = { 'title': '{} Valid'.format(config.read_count_prefix), 'description': 'Valid Pairs ({})'.format(config.read_count_desc), 'min': 0, 'scale': 'PuRd', 'modify': lambda x: x * config.read_count_multiplier, 'shared_key': 'read_count' } headers['Percentage_Valid'] = { 'title': '% Valid', 'description': 'Percent Valid Pairs', 'max': 100, 'min': 0, 'suffix': '%', 'scale': 'YlGn' } headers['Paired_Read_1'] = { 'title': '{} Pairs Aligned'.format(config.read_count_prefix), 'description': 'Paired Alignments ({})'.format(config.read_count_desc), 'min': 0, 'scale': 'PuRd', 'modify': lambda x: x * config.read_count_multiplier, 'shared_key': 'read_count' } headers['Percentage_Mapped'] = { 'title': '% Aligned', 'description': 'Percentage of Paired Alignments', 'max': 100, 'min': 0, 'suffix': '%', 'scale': 'YlGn' } self.general_stats_addcols(self.hicup_data, headers, 'HiCUP')
Add core HiCUP stats to the general stats table
def lrelu_sq(x): """ Concatenates lrelu and square """ dim = len(x.get_shape()) - 1 return tf.concat(dim, [lrelu(x), tf.minimum(tf.abs(x), tf.square(x))])
Concatenates lrelu and square
def sum_transactions(transactions): """ Sums transactions into a total of remaining vacation days. """ workdays_per_year = 250 previous_date = None rate = 0 day_sum = 0 for transaction in transactions: date, action, value = _parse_transaction_entry(transaction) if previous_date is None: previous_date = date elapsed = workdays.networkdays(previous_date, date, stat_holidays()) - 1 if action == 'rate': rate = float(value) / workdays_per_year elif action == 'off': elapsed -= 1 # Didn't work that day day_sum -= 1 # And we used a day day_sum += rate * elapsed if action == 'days': day_sum = value # Fixed value as of this entry previous_date = date return day_sum
Sums transactions into a total of remaining vacation days.
def GET(self): """ Handles GET request """ if self.user_manager.session_logged_in() or not self.app.allow_registration: raise web.notfound() error = False reset = None msg = "" data = web.input() if "activate" in data: msg, error = self.activate_user(data) elif "reset" in data: msg, error, reset = self.get_reset_data(data) return self.template_helper.get_renderer().register(reset, msg, error)
Handles GET request
def _bind_ith_exec(self, i, data_shapes, label_shapes, shared_group): """Internal utility function to bind the i-th executor. This function utilizes simple_bind python interface. """ shared_exec = None if shared_group is None else shared_group.execs[i] context = self.contexts[i] shared_data_arrays = self.shared_data_arrays[i] input_shapes = dict(data_shapes) if label_shapes is not None: input_shapes.update(dict(label_shapes)) input_types = {x.name: x.dtype for x in data_shapes} if label_shapes is not None: input_types.update({x.name: x.dtype for x in label_shapes}) group2ctx = self.group2ctxs[i] executor = self.symbol.simple_bind(ctx=context, grad_req=self.grad_req, type_dict=input_types, shared_arg_names=self.param_names, shared_exec=shared_exec, group2ctx=group2ctx, shared_buffer=shared_data_arrays, **input_shapes) self._total_exec_bytes += int(executor.debug_str().split('\n')[-3].split()[1]) return executor
Internal utility function to bind the i-th executor. This function utilizes simple_bind python interface.
def default_get_arg_names_from_class_name(class_name): """Converts normal class names into normal arg names. Normal class names are assumed to be CamelCase with an optional leading underscore. Normal arg names are assumed to be lower_with_underscores. Args: class_name: a class name, e.g., "FooBar" or "_FooBar" Returns: all likely corresponding arg names, e.g., ["foo_bar"] """ parts = [] rest = class_name if rest.startswith('_'): rest = rest[1:] while True: m = re.match(r'([A-Z][a-z]+)(.*)', rest) if m is None: break parts.append(m.group(1)) rest = m.group(2) if not parts: return [] return ['_'.join(part.lower() for part in parts)]
Converts normal class names into normal arg names. Normal class names are assumed to be CamelCase with an optional leading underscore. Normal arg names are assumed to be lower_with_underscores. Args: class_name: a class name, e.g., "FooBar" or "_FooBar" Returns: all likely corresponding arg names, e.g., ["foo_bar"]
def clean(ctx): """Clean previously built package artifacts. """ ctx.run(f'python setup.py clean') dist = ROOT.joinpath('dist') print(f'[clean] Removing {dist}') if dist.exists(): shutil.rmtree(str(dist))
Clean previously built package artifacts.
def copy_folder(self, dest_folder_id, source_folder_id): """ Copy a folder. Copy a folder (and its contents) from elsewhere in Canvas into a folder. Copying a folder across contexts (between courses and users) is permitted, but the source and destination must belong to the same institution. If the source and destination folders are in the same context, the source folder may not contain the destination folder. A folder will be renamed at its destination if another folder with the same name already exists. """ path = {} data = {} params = {} # REQUIRED - PATH - dest_folder_id """ID""" path["dest_folder_id"] = dest_folder_id # REQUIRED - source_folder_id """The id of the source folder""" data["source_folder_id"] = source_folder_id self.logger.debug("POST /api/v1/folders/{dest_folder_id}/copy_folder with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/folders/{dest_folder_id}/copy_folder".format(**path), data=data, params=params, single_item=True)
Copy a folder. Copy a folder (and its contents) from elsewhere in Canvas into a folder. Copying a folder across contexts (between courses and users) is permitted, but the source and destination must belong to the same institution. If the source and destination folders are in the same context, the source folder may not contain the destination folder. A folder will be renamed at its destination if another folder with the same name already exists.
def get_name_str(self, element): '''get_name_str High-level api: Produce a string that represents the name of a node. Parameters ---------- element : `Element` A node in model tree. Returns ------- str A string that represents the name of a node. ''' if element.get('diff') == 'added': return self.model2.get_name_str(element) else: return self.model1.get_name_str(element)
get_name_str High-level api: Produce a string that represents the name of a node. Parameters ---------- element : `Element` A node in model tree. Returns ------- str A string that represents the name of a node.
def get_files_types(self): """ Return the files inside the APK with their associated types (by using python-magic) :rtype: a dictionnary """ if self._files == {}: # Generate File Types / CRC List for i in self.get_files(): buffer = self.zip.read(i) self.files_crc32[i] = crc32(buffer) # FIXME why not use the crc from the zipfile? # should be validated as well. # crc = self.zip.getinfo(i).CRC self._files[i] = self._get_file_magic_name(buffer) return self._files
Return the files inside the APK with their associated types (by using python-magic) :rtype: a dictionnary
def ascii2h5(bh_dir=None): """ Convert the Burstein & Heiles (1982) dust map from ASCII to HDF5. """ if bh_dir is None: bh_dir = os.path.join(data_dir_default, 'bh') fname = os.path.join(bh_dir, '{}.ascii') f = h5py.File('bh.h5', 'w') for region in ('hinorth', 'hisouth'): data = np.loadtxt(fname.format(region), dtype='f4') # Reshape and clip data.shape = (210, 201) # (R, N) data = data[:201] # Last 9 records are empty # Use NaNs where no data data[data < -9000] = np.nan dset = f.create_dataset( region, data=data, chunks=True, compression='gzip', compression_opts=3 ) dset.attrs['axes'] = ('R', 'N') dset.attrs['description'] = ( 'HI 21cm column densities, in units of 10*NHYD. ' 'R = 100 + [(90^o-|b|) sin(l)]/[0.3 degrees]. ' 'N = 100 + [(90^o-|b|) cos (l)]/[0.3 degrees].' ) for region in ('rednorth', 'redsouth'): data = np.loadtxt(fname.format(region), dtype='f4') # Reshape and clip data.shape = (94, 1200) # (R, N) data = data[:93] # Last record is empty # Use NaNs where no data data[data < -9000] = np.nan dset = f.create_dataset( region, data=data, chunks=True, compression='gzip', compression_opts=3 ) dset.attrs['axes'] = ('R', 'N') dset.attrs['description'] = ( 'E(B-V), in units of 0.001 mag. ' 'R = (|b| - 10) / (0.6 degrees). ' 'N = (l + 0.15) / 0.3 - 1.' ) f.attrs['description'] = ( 'The Burstein & Heiles (1982) dust map.' ) f.close()
Convert the Burstein & Heiles (1982) dust map from ASCII to HDF5.
def enable(self, cmd="sudo su", pattern="ssword", re_flags=re.IGNORECASE): """Attempt to become root.""" delay_factor = self.select_delay_factor(delay_factor=0) output = "" if not self.check_enable_mode(): self.write_channel(self.normalize_cmd(cmd)) time.sleep(0.3 * delay_factor) try: output += self.read_channel() if re.search(pattern, output, flags=re_flags): self.write_channel(self.normalize_cmd(self.secret)) self.set_base_prompt() except socket.timeout: raise NetMikoTimeoutException( "Timed-out reading channel, data not available." ) if not self.check_enable_mode(): msg = ( "Failed to enter enable mode. Please ensure you pass " "the 'secret' argument to ConnectHandler." ) raise ValueError(msg) return output
Attempt to become root.
def get_revocation_time(self): """Get the revocation time as naive datetime. Note that this method is only used by cryptography>=2.4. """ if self.revoked is False: return if timezone.is_aware(self.revoked_date): # convert datetime object to UTC and make it naive return timezone.make_naive(self.revoked_date, pytz.utc) return self.revoked_date
Get the revocation time as naive datetime. Note that this method is only used by cryptography>=2.4.
def invertible_total_flatten(unflat_list): r""" Args: unflat_list (list): Returns: tuple: (flat_list, invert_levels) CommandLine: python -m utool.util_list --exec-invertible_total_flatten --show Example: >>> # DISABLE_DOCTEST >>> from utool.util_list import * # NOQA >>> import utool as ut >>> unflat_list = [0, [[1, 2, 3], 4, 5], 9, [2, 3], [1, [2, 3, 4]], 1, 2, 3] >>> print('unflat_list = %r' % (unflat_list,)) >>> (flat_list, invert_levels) = invertible_total_flatten(unflat_list) >>> print('flat_list = %r' % (flat_list,)) >>> unflat_list2 = total_unflatten(flat_list, invert_levels) >>> print('unflat_list2 = %r' % (unflat_list2,)) >>> assert unflat_list2 == unflat_list >>> assert ut.depth_profile(flat_list) == 16 """ import utool as ut next_list = unflat_list scalar_flags = [not ut.isiterable(item) for item in next_list] invert_stack = [] # print('unflat_list = %r' % (unflat_list,)) while not all(scalar_flags): unflattenized = [[item] if flag else item for flag, item in zip(scalar_flags, next_list)] flatter_list, invert_part = ut.invertible_flatten1(unflattenized) # print('flatter_list = %r' % (flatter_list,)) for idx in ut.where(scalar_flags): invert_part[idx] = invert_part[idx][0] invert_stack.append(invert_part) next_list = flatter_list scalar_flags = [not ut.isiterable(item) for item in next_list] # invert_part = [None] * len(scalar_flags) # invert_stack.append(invert_part) invert_levels = invert_stack[::-1] flat_list = next_list return flat_list, invert_levels
r""" Args: unflat_list (list): Returns: tuple: (flat_list, invert_levels) CommandLine: python -m utool.util_list --exec-invertible_total_flatten --show Example: >>> # DISABLE_DOCTEST >>> from utool.util_list import * # NOQA >>> import utool as ut >>> unflat_list = [0, [[1, 2, 3], 4, 5], 9, [2, 3], [1, [2, 3, 4]], 1, 2, 3] >>> print('unflat_list = %r' % (unflat_list,)) >>> (flat_list, invert_levels) = invertible_total_flatten(unflat_list) >>> print('flat_list = %r' % (flat_list,)) >>> unflat_list2 = total_unflatten(flat_list, invert_levels) >>> print('unflat_list2 = %r' % (unflat_list2,)) >>> assert unflat_list2 == unflat_list >>> assert ut.depth_profile(flat_list) == 16
def _ParseFileData(self, knowledge_base, file_object): """Parses file content (data) for system product preprocessing attribute. Args: knowledge_base (KnowledgeBase): to fill with preprocessing information. file_object (dfvfs.FileIO): file-like object that contains the artifact value data. Raises: errors.PreProcessFail: if the preprocessing fails. """ text_file_object = dfvfs_text_file.TextFile(file_object, encoding='utf-8') system_product = text_file_object.readline() # Only parse known default /etc/issue file contents. if system_product.startswith('Debian GNU/Linux '): system_product, _, _ = system_product.partition('\\') system_product = system_product.rstrip() else: system_product = None if not knowledge_base.GetValue('operating_system_product'): if system_product: knowledge_base.SetValue('operating_system_product', system_product)
Parses file content (data) for system product preprocessing attribute. Args: knowledge_base (KnowledgeBase): to fill with preprocessing information. file_object (dfvfs.FileIO): file-like object that contains the artifact value data. Raises: errors.PreProcessFail: if the preprocessing fails.
def apt_add_repository_from_apt_string(apt_string, apt_file): """ adds a new repository file for apt """ apt_file_path = '/etc/apt/sources.list.d/%s' % apt_file if not file_contains(apt_file_path, apt_string.lower(), use_sudo=True): file_append(apt_file_path, apt_string.lower(), use_sudo=True) with hide('running', 'stdout'): sudo("DEBIAN_FRONTEND=noninteractive apt-get update")
adds a new repository file for apt
def write_cookies_to_cache(cj, username): """ Save RequestsCookieJar to disk in Mozilla's cookies.txt file format. This prevents us from repeated authentications on the accounts.coursera.org and class.coursera.org/class_name sites. """ mkdir_p(PATH_COOKIES, 0o700) path = get_cookies_cache_path(username) cached_cj = cookielib.MozillaCookieJar() for cookie in cj: cached_cj.set_cookie(cookie) cached_cj.save(path)
Save RequestsCookieJar to disk in Mozilla's cookies.txt file format. This prevents us from repeated authentications on the accounts.coursera.org and class.coursera.org/class_name sites.
def load_inhibit(self, train=True, test=True) -> tuple: """Generate data with inhibitory inputs created from wake word samples""" def loader(kws: list, nkws: list): from precise.params import pr inputs = np.empty((0, pr.n_features, pr.feature_size)) outputs = np.zeros((len(kws), 1)) for f in kws: if not isfile(f): continue new_vec = load_vector(f, vectorize_inhibit) inputs = np.concatenate([inputs, new_vec]) return self.merge((inputs, outputs), self.__load_files(kws, nkws)) return self.__load(loader, train, test)
Generate data with inhibitory inputs created from wake word samples
async def send_request(self): """Coroutine to send request headers with metadata to the server. New HTTP/2 stream will be created during this coroutine call. .. note:: This coroutine will be called implicitly during first :py:meth:`send_message` coroutine call, if not called before explicitly. """ if self._send_request_done: raise ProtocolError('Request is already sent') with self._wrapper: protocol = await self._channel.__connect__() stream = protocol.processor.connection\ .create_stream(wrapper=self._wrapper) headers = [ (':method', 'POST'), (':scheme', self._channel._scheme), (':path', self._method_name), (':authority', self._channel._authority), ] if self._deadline is not None: timeout = self._deadline.time_remaining() headers.append(('grpc-timeout', encode_timeout(timeout))) content_type = (GRPC_CONTENT_TYPE + '+' + self._codec.__content_subtype__) headers.extend(( ('te', 'trailers'), ('content-type', content_type), ('user-agent', USER_AGENT), )) metadata, = await self._dispatch.send_request( self._metadata, method_name=self._method_name, deadline=self._deadline, content_type=content_type, ) headers.extend(encode_metadata(metadata)) release_stream = await stream.send_request( headers, _processor=protocol.processor, ) self._stream = stream self._release_stream = release_stream self._send_request_done = True
Coroutine to send request headers with metadata to the server. New HTTP/2 stream will be created during this coroutine call. .. note:: This coroutine will be called implicitly during first :py:meth:`send_message` coroutine call, if not called before explicitly.
def _cast_expected_to_returned_type(expected, returned): ''' Determine the type of variable returned Cast the expected to the type of variable returned ''' ret_type = type(returned) new_expected = expected if expected == "False" and ret_type == bool: expected = False try: new_expected = ret_type(expected) except ValueError: log.info("Unable to cast expected into type of returned") log.info("returned = %s", returned) log.info("type of returned = %s", type(returned)) log.info("expected = %s", expected) log.info("type of expected = %s", type(expected)) return new_expected
Determine the type of variable returned Cast the expected to the type of variable returned
def append(self, cls, infer_hidden: bool = False, **kwargs) -> Encoder: """ Extends sequence with new Encoder. 'dtype' gets passed into Encoder instance if not present in parameters and supported by specific Encoder type. :param cls: Encoder type. :param infer_hidden: If number of hidden should be inferred from previous encoder. :param kwargs: Named arbitrary parameters for Encoder. :return: Instance of Encoder. """ params = dict(kwargs) if infer_hidden: params['num_hidden'] = self.get_num_hidden() sig_params = inspect.signature(cls.__init__).parameters if 'dtype' in sig_params and 'dtype' not in kwargs: params['dtype'] = self.dtype encoder = cls(**params) self.encoders.append(encoder) return encoder
Extends sequence with new Encoder. 'dtype' gets passed into Encoder instance if not present in parameters and supported by specific Encoder type. :param cls: Encoder type. :param infer_hidden: If number of hidden should be inferred from previous encoder. :param kwargs: Named arbitrary parameters for Encoder. :return: Instance of Encoder.
def _save_npz(self): ''' Saves all of the de-trending information to disk in an `npz` file ''' # Save the data d = dict(self.__dict__) d.pop('_weights', None) d.pop('_A', None) d.pop('_B', None) d.pop('_f', None) d.pop('_mK', None) d.pop('K', None) d.pop('dvs', None) d.pop('clobber', None) d.pop('clobber_tpf', None) d.pop('_mission', None) d.pop('debug', None) np.savez(os.path.join(self.dir, self.name + '.npz'), **d)
Saves all of the de-trending information to disk in an `npz` file
def processDefines(defs): """process defines, resolving strings, lists, dictionaries, into a list of strings """ if SCons.Util.is_List(defs): l = [] for d in defs: if d is None: continue elif SCons.Util.is_List(d) or isinstance(d, tuple): if len(d) >= 2: l.append(str(d[0]) + '=' + str(d[1])) else: l.append(str(d[0])) elif SCons.Util.is_Dict(d): for macro,value in d.items(): if value is not None: l.append(str(macro) + '=' + str(value)) else: l.append(str(macro)) elif SCons.Util.is_String(d): l.append(str(d)) else: raise SCons.Errors.UserError("DEFINE %s is not a list, dict, string or None."%repr(d)) elif SCons.Util.is_Dict(defs): # The items in a dictionary are stored in random order, but # if the order of the command-line options changes from # invocation to invocation, then the signature of the command # line will change and we'll get random unnecessary rebuilds. # Consequently, we have to sort the keys to ensure a # consistent order... l = [] for k,v in sorted(defs.items()): if v is None: l.append(str(k)) else: l.append(str(k) + '=' + str(v)) else: l = [str(defs)] return l
process defines, resolving strings, lists, dictionaries, into a list of strings
def from_csv(cls, path:PathOrStr, csv_name, valid_pct:float=0.2, test:Optional[str]=None, tokenizer:Tokenizer=None, vocab:Vocab=None, classes:Collection[str]=None, delimiter:str=None, header='infer', text_cols:IntsOrStrs=1, label_cols:IntsOrStrs=0, label_delim:str=None, chunksize:int=10000, max_vocab:int=60000, min_freq:int=2, mark_fields:bool=False, include_bos:bool=True, include_eos:bool=False, **kwargs) -> DataBunch: "Create a `TextDataBunch` from texts in csv files. `kwargs` are passed to the dataloader creation." df = pd.read_csv(Path(path)/csv_name, header=header, delimiter=delimiter) df = df.iloc[np.random.permutation(len(df))] cut = int(valid_pct * len(df)) + 1 train_df, valid_df = df[cut:], df[:cut] test_df = None if test is None else pd.read_csv(Path(path)/test, header=header, delimiter=delimiter) return cls.from_df(path, train_df, valid_df, test_df, tokenizer=tokenizer, vocab=vocab, classes=classes, text_cols=text_cols, label_cols=label_cols, label_delim=label_delim, chunksize=chunksize, max_vocab=max_vocab, min_freq=min_freq, mark_fields=mark_fields, include_bos=include_bos, include_eos=include_eos, **kwargs)
Create a `TextDataBunch` from texts in csv files. `kwargs` are passed to the dataloader creation.
def search_responsify(serializer, mimetype): """Create a Records-REST search result response serializer. :param serializer: Serializer instance. :param mimetype: MIME type of response. :returns: Function that generates a record HTTP response. """ def view(pid_fetcher, search_result, code=200, headers=None, links=None, item_links_factory=None): response = current_app.response_class( serializer.serialize_search(pid_fetcher, search_result, links=links, item_links_factory=item_links_factory), mimetype=mimetype) response.status_code = code if headers is not None: response.headers.extend(headers) if links is not None: add_link_header(response, links) return response return view
Create a Records-REST search result response serializer. :param serializer: Serializer instance. :param mimetype: MIME type of response. :returns: Function that generates a record HTTP response.
def emulate_mouse(self, key_code, x_val, y_val, data): """Emulate the ev codes using the data Windows has given us. Note that by default in Windows, to recognise a double click, you just notice two clicks in a row within a reasonablely short time period. However, if the application developer sets the application window's class style to CS_DBLCLKS, the operating system will notice the four button events (down, up, down, up), intercept them and then send a single key code instead. There are no such special double click codes on other platforms, so not obvious what to do with them. It might be best to just convert them back to four events. Currently we do nothing. ((0x0203, 'WM_LBUTTONDBLCLK'), (0x0206, 'WM_RBUTTONDBLCLK'), (0x0209, 'WM_MBUTTONDBLCLK'), (0x020D, 'WM_XBUTTONDBLCLK')) """ # Once again ignore Windows' relative time (since system # startup) and use the absolute time (since epoch i.e. 1st Jan # 1970). self.update_timeval() events = [] if key_code == 0x0200: # We have a mouse move alone. # So just pass through to below pass elif key_code == 0x020A: # We have a vertical mouse wheel turn events.append(self.emulate_wheel(data, 'y', self.timeval)) elif key_code == 0x020E: # We have a horizontal mouse wheel turn # https://msdn.microsoft.com/en-us/library/windows/desktop/ # ms645614%28v=vs.85%29.aspx events.append(self.emulate_wheel(data, 'x', self.timeval)) else: # We have a button press. # Distinguish the second extra button if key_code == 0x020B and data == 2: key_code = 0x020B2 elif key_code == 0x020C and data == 2: key_code = 0x020C2 # Get the mouse codes code, value, scan_code = self.mouse_codes[key_code] # Add in the press events scan_event, key_event = self.emulate_press( code, scan_code, value, self.timeval) events.append(scan_event) events.append(key_event) # Add in the absolute position of the mouse cursor x_event, y_event = self.emulate_abs(x_val, y_val, self.timeval) events.append(x_event) events.append(y_event) # End with a sync marker events.append(self.sync_marker(self.timeval)) # We are done self.write_to_pipe(events)
Emulate the ev codes using the data Windows has given us. Note that by default in Windows, to recognise a double click, you just notice two clicks in a row within a reasonablely short time period. However, if the application developer sets the application window's class style to CS_DBLCLKS, the operating system will notice the four button events (down, up, down, up), intercept them and then send a single key code instead. There are no such special double click codes on other platforms, so not obvious what to do with them. It might be best to just convert them back to four events. Currently we do nothing. ((0x0203, 'WM_LBUTTONDBLCLK'), (0x0206, 'WM_RBUTTONDBLCLK'), (0x0209, 'WM_MBUTTONDBLCLK'), (0x020D, 'WM_XBUTTONDBLCLK'))
def _runcmd(progargs, stdinput=None): ''' Run the command progargs with optional input to be fed in to stdin. ''' stdin = None if stdinput is not None: assert(isinstance(stdinput, list)) stdin=PIPE err = 0 output = b'' log_debug("Calling {} with input {}".format(' '.join(progargs), stdinput)) try: p = Popen(progargs, shell=True, stdin=stdin, stderr=STDOUT, stdout=PIPE, universal_newlines=True) if stdinput is not None: for cmd in stdinput: print(cmd, file=p.stdin) p.stdin.close() output = p.stdout.read() p.stdout.close() err = p.wait(timeout=1.0) except OSError as e: err = e.errno log_warn("Error calling {}: {}".format(progargs, e.stderror)) except Exception as e: errstr = str(e) log_warn("Error calling {}: {}".format(progargs, errstr)) err = -1 log_debug("Result of command (errcode {}): {}".format(err, output)) return err,output
Run the command progargs with optional input to be fed in to stdin.
def get_schema_descendant( self, route: SchemaRoute) -> Optional[SchemaNode]: """Return descendant schema node or ``None`` if not found. Args: route: Schema route to the descendant node (relative to the receiver). """ node = self for p in route: node = node.get_child(*p) if node is None: return None return node
Return descendant schema node or ``None`` if not found. Args: route: Schema route to the descendant node (relative to the receiver).
def transformer_ada_lmpackedbase_dialog(): """Set of hyperparameters.""" hparams = transformer_base_vq_ada_32ex_packed() hparams.max_length = 1024 hparams.ffn_layer = "dense_relu_dense" hparams.batch_size = 4096 return hparams
Set of hyperparameters.
def get_start_time(self): """ Return the start time of the entry as a :class:`datetime.time` object. If the start time is `None`, the end time of the previous entry will be returned instead. If the current entry doesn't have a duration in the form of a tuple, if there's no previous entry or if the previous entry has no end time, the value `None` will be returned. """ if not isinstance(self.duration, tuple): return None if self.duration[0] is not None: return self.duration[0] else: if (self.previous_entry and isinstance(self.previous_entry.duration, tuple) and self.previous_entry.duration[1] is not None): return self.previous_entry.duration[1] return None
Return the start time of the entry as a :class:`datetime.time` object. If the start time is `None`, the end time of the previous entry will be returned instead. If the current entry doesn't have a duration in the form of a tuple, if there's no previous entry or if the previous entry has no end time, the value `None` will be returned.
def delete_message_from_handle(self, queue, receipt_handle): """ Delete a message from a queue, given a receipt handle. :type queue: A :class:`boto.sqs.queue.Queue` object :param queue: The Queue from which messages are read. :type receipt_handle: str :param receipt_handle: The receipt handle for the message :rtype: bool :return: True if successful, False otherwise. """ params = {'ReceiptHandle' : receipt_handle} return self.get_status('DeleteMessage', params, queue.id)
Delete a message from a queue, given a receipt handle. :type queue: A :class:`boto.sqs.queue.Queue` object :param queue: The Queue from which messages are read. :type receipt_handle: str :param receipt_handle: The receipt handle for the message :rtype: bool :return: True if successful, False otherwise.
def save_figure_raw_data(figure="gcf", **kwargs): """ This will just output an ascii file for each of the traces in the shown figure. **kwargs are sent to dialogs.Save() """ # choose a path to save to path = _s.dialogs.Save(**kwargs) if path=="": return "aborted." # if no argument was given, get the current axes if figure=="gcf": figure = _pylab.gcf() for n in range(len(figure.axes)): a = figure.axes[n] for m in range(len(a.lines)): l = a.lines[m] x = l.get_xdata() y = l.get_ydata() p = _os.path.split(path) p = _os.path.join(p[0], "axes" + str(n) + " line" + str(m) + " " + p[1]) print(p) # loop over the data f = open(p, 'w') for j in range(0, len(x)): f.write(str(x[j]) + "\t" + str(y[j]) + "\n") f.close()
This will just output an ascii file for each of the traces in the shown figure. **kwargs are sent to dialogs.Save()
def snapshot (self): """Take a snapshot of the experiment. Returns `self`.""" nextSnapshotNum = self.nextSnapshotNum nextSnapshotPath = self.getFullPathToSnapshot(nextSnapshotNum) if os.path.lexists(nextSnapshotPath): self.rmR(nextSnapshotPath) self.mkdirp(os.path.join(nextSnapshotPath, ".experiment")) return self.dump(nextSnapshotPath).__markLatest(nextSnapshotNum)
Take a snapshot of the experiment. Returns `self`.
def camera_info(self, camera_ids, **kwargs): """Return a list of cameras matching camera_ids.""" api = self._api_info['camera'] payload = dict({ '_sid': self._sid, 'api': api['name'], 'method': 'GetInfo', 'version': api['version'], 'cameraIds': ', '.join(str(id) for id in camera_ids), }, **kwargs) response = self._get_json_with_retry(api['url'], payload) cameras = [] for data in response['data']['cameras']: cameras.append(Camera(data, self._video_stream_url)) return cameras
Return a list of cameras matching camera_ids.
def add(self, chassis): """ add chassis. :param chassis: chassis IP address. """ self.chassis_chain[chassis] = IxeChassis(self.session, chassis, len(self.chassis_chain) + 1) self.chassis_chain[chassis].connect()
add chassis. :param chassis: chassis IP address.
def _set_up_schema_elements_of_kind(self, class_name_to_definition, kind, class_names): """Load all schema classes of the given kind. Used as part of __init__.""" allowed_duplicated_edge_property_names = frozenset({ EDGE_DESTINATION_PROPERTY_NAME, EDGE_SOURCE_PROPERTY_NAME }) orientdb_base_classes = frozenset({ ORIENTDB_BASE_VERTEX_CLASS_NAME, ORIENTDB_BASE_EDGE_CLASS_NAME, }) for class_name in class_names: class_definition = class_name_to_definition[class_name] class_fields = class_definition.get('customFields') if class_fields is None: # OrientDB likes to make empty collections be None instead. # We convert this field back to an empty dict, for our general sanity. class_fields = dict() abstract = class_definition['abstract'] if class_name in orientdb_base_classes: # Special-case the V and E base classes: # OrientDB won't let us make them abstract, but we don't want to create # any vertices or edges with those types either. # Pretend they are marked abstract in OrientDB's schema. abstract = True property_name_to_descriptor = {} all_property_lists = ( class_name_to_definition[inherited_class_name]['properties'] for inherited_class_name in self._inheritance_sets[class_name] ) links = {EDGE_DESTINATION_PROPERTY_NAME: [], EDGE_SOURCE_PROPERTY_NAME: []} for property_definition in chain.from_iterable(all_property_lists): property_name = property_definition['name'] # The only properties we allow to be redefined are the in/out properties # of edge classes. All other properties may only be defined once # in the entire inheritance hierarchy of any schema class, of any kind. duplication_allowed = all(( property_name in allowed_duplicated_edge_property_names, kind == SchemaElement.ELEMENT_KIND_EDGE )) if not duplication_allowed and property_name in property_name_to_descriptor: raise AssertionError(u'The property "{}" on class "{}" is defined ' u'more than once, this is not allowed!' .format(property_name, class_name)) property_descriptor = self._create_descriptor_from_property_definition( class_name, property_definition, class_name_to_definition) if property_name in allowed_duplicated_edge_property_names: links[property_name].append(property_descriptor) else: property_name_to_descriptor[property_name] = property_descriptor for property_name in allowed_duplicated_edge_property_names: elements = { property_descriptor.qualifier for property_descriptor in links[property_name] } # If there are multiple in/out properties, we choose to include the one that # is a subclass of all the elements present in the in/out properties. for property_descriptor in links[property_name]: subclass_set = self._subclass_sets[property_descriptor.qualifier] if len(elements.intersection(subclass_set)) == 1: current_descriptor = property_name_to_descriptor.get(property_name, None) if current_descriptor and current_descriptor != property_descriptor: raise AssertionError(u'There already exists property "{}" in addition ' u'to property "{}" which is a subclass of all ' u'in/out properties for class "{}".' .format(current_descriptor, property_descriptor, class_name)) property_name_to_descriptor[property_name] = property_descriptor if (property_name not in property_name_to_descriptor and not abstract and kind == SchemaElement.ELEMENT_KIND_EDGE): raise AssertionError(u'For property "{}" of non-abstract edge class "{}", ' u'no such subclass-of-all-elements exists.' .format(property_name, class_name)) self._elements[class_name] = SchemaElement(class_name, kind, abstract, property_name_to_descriptor, class_fields)
Load all schema classes of the given kind. Used as part of __init__.
def respond_unauthorized(self, request_authentication=False): """ Respond to the client that the request is unauthorized. :param bool request_authentication: Whether to request basic authentication information by sending a WWW-Authenticate header. """ headers = {} if request_authentication: headers['WWW-Authenticate'] = 'Basic realm="' + self.__config['server_version'] + '"' self.send_response_full(b'Unauthorized', status=401, headers=headers) return
Respond to the client that the request is unauthorized. :param bool request_authentication: Whether to request basic authentication information by sending a WWW-Authenticate header.
def nonzero(self): """ property decorated method to get a new ObservationEnsemble of only non-zero weighted observations Returns ------- ObservationEnsemble : ObservationEnsemble """ df = self.loc[:,self.pst.nnz_obs_names] return ObservationEnsemble.from_dataframe(df=df, pst=self.pst.get(obs_names=self.pst.nnz_obs_names))
property decorated method to get a new ObservationEnsemble of only non-zero weighted observations Returns ------- ObservationEnsemble : ObservationEnsemble
def list_hosting_device_handled_by_config_agent( self, client, cfg_agent_id, **_params): """Fetches a list of hosting devices handled by a config agent.""" return client.get((ConfigAgentHandlingHostingDevice.resource_path + CFG_AGENT_HOSTING_DEVICES) % cfg_agent_id, params=_params)
Fetches a list of hosting devices handled by a config agent.
def set_request(self, method=None, sub_url="", data=None, params=None, proxies=None): """ :param method: str of the method of the api_call :param sub_url: str of the url after the uri :param data: dict of form data to be sent with the request :param params: dict of additional data to be sent as the request args :param proxies: str of the proxie to use :return: None """ self.method = method or self.method self.url = self.base_uri and (self.base_uri + sub_url) or sub_url self.params.update(params or {}) self.proxies = proxies or self.proxies if self.params: self.url += '?' + '&'.join( ['%s=%s' % (k, v) for k, v in self.params.items()]) self.request_data = deepcopy(data) self._timestamps['setup'] = time.time() if isinstance(data, dict) and 'content-type' not in self.headers: self.headers['content-type'] = 'application/json' self.json = data # don't change this user.post else: self.data = data if self.accepted_return is not None: self.headers['Accept'] = \ {'json': 'application/json', 'html': 'text/html'}[ self.accepted_return] self._stage = STAGE_SET
:param method: str of the method of the api_call :param sub_url: str of the url after the uri :param data: dict of form data to be sent with the request :param params: dict of additional data to be sent as the request args :param proxies: str of the proxie to use :return: None
def write(self, outfile): """Write the livetime cube to a FITS file.""" hdu_pri = fits.PrimaryHDU() hdu_exp = self._create_exp_hdu(self.data) hdu_exp.name = 'EXPOSURE' hdu_exp_wt = self._create_exp_hdu(self._data_wt) hdu_exp_wt.name = 'WEIGHTED_EXPOSURE' cols = [Column(name='CTHETA_MIN', dtype='f4', data=self.costh_edges[:-1][::-1]), Column(name='CTHETA_MAX', dtype='f4', data=self.costh_edges[1:][::-1]), ] hdu_bnds = fits.table_to_hdu(Table(cols)) hdu_bnds.name = 'CTHETABOUNDS' hdu_gti = fits.table_to_hdu(self._tab_gti) hdu_gti.name = 'GTI' hdus = [hdu_pri, hdu_exp, hdu_exp_wt, hdu_bnds, hdu_gti] for hdu in hdus: hdu.header['TSTART'] = self.tstart hdu.header['TSTOP'] = self.tstop with fits.HDUList(hdus) as hdulist: hdulist.writeto(outfile, clobber=True)
Write the livetime cube to a FITS file.
def _compute_B_statistics(self, K, W, log_concave, *args, **kwargs): """ Rasmussen suggests the use of a numerically stable positive definite matrix B Which has a positive diagonal elements and can be easily inverted :param K: Prior Covariance matrix evaluated at locations X :type K: NxN matrix :param W: Negative hessian at a point (diagonal matrix) :type W: Vector of diagonal values of Hessian (1xN) :returns: (W12BiW12, L_B, Li_W12) """ if not log_concave: #print "Under 1e-10: {}".format(np.sum(W < 1e-6)) W = np.clip(W, 1e-6, 1e+30) # For student-T we can clip this more intelligently. If the # objective has hardly changed, we can increase the clipping limit # by ((v+1)/v)/sigma2 # NOTE: when setting a parameter inside parameters_changed it will allways come to closed update circles!!! #W.__setitem__(W < 1e-6, 1e-6, update=False) # FIXME-HACK: This is a hack since GPy can't handle negative variances which can occur # If the likelihood is non-log-concave. We wan't to say that there is a negative variance # To cause the posterior to become less certain than the prior and likelihood, # This is a property only held by non-log-concave likelihoods if np.any(np.isnan(W)): raise ValueError('One or more element(s) of W is NaN') #W is diagonal so its sqrt is just the sqrt of the diagonal elements W_12 = np.sqrt(W) B = np.eye(K.shape[0]) + W_12*K*W_12.T L = jitchol(B) LiW12, _ = dtrtrs(L, np.diagflat(W_12), lower=1, trans=0) K_Wi_i = np.dot(LiW12.T, LiW12) # R = W12BiW12, in R&W p 126, eq 5.25 #here's a better way to compute the required matrix. # you could do the model finding witha backsub, instead of a dot... #L2 = L/W_12 #K_Wi_i_2 , _= dpotri(L2) #symmetrify(K_Wi_i_2) #compute vital matrices C = np.dot(LiW12, K) Ki_W_i = K - C.T.dot(C) I_KW_i = np.eye(K.shape[0]) - np.dot(K, K_Wi_i) logdet_I_KW = 2*np.sum(np.log(np.diag(L))) return K_Wi_i, logdet_I_KW, I_KW_i, Ki_W_i
Rasmussen suggests the use of a numerically stable positive definite matrix B Which has a positive diagonal elements and can be easily inverted :param K: Prior Covariance matrix evaluated at locations X :type K: NxN matrix :param W: Negative hessian at a point (diagonal matrix) :type W: Vector of diagonal values of Hessian (1xN) :returns: (W12BiW12, L_B, Li_W12)
def _RunActions(self, rule, client_id): """Run all the actions specified in the rule. Args: rule: Rule which actions are to be executed. client_id: Id of a client where rule's actions are to be executed. Returns: Number of actions started. """ actions_count = 0 for action in rule.actions: try: # Say this flow came from the foreman. token = self.token.Copy() token.username = "Foreman" if action.HasField("hunt_id"): if self._CheckIfHuntTaskWasAssigned(client_id, action.hunt_id): logging.info( "Foreman: ignoring hunt %s on client %s: was started " "here before", client_id, action.hunt_id) else: logging.info("Foreman: Starting hunt %s on client %s.", action.hunt_id, client_id) flow_cls = registry.AFF4FlowRegistry.FlowClassByName( action.hunt_name) flow_cls.StartClients(action.hunt_id, [client_id]) actions_count += 1 else: flow.StartAFF4Flow( client_id=client_id, flow_name=action.flow_name, token=token, **action.argv.ToDict()) actions_count += 1 # There could be all kinds of errors we don't know about when starting the # flow/hunt so we catch everything here. except Exception as e: # pylint: disable=broad-except logging.exception("Failure running foreman action on client %s: %s", action.hunt_id, e) return actions_count
Run all the actions specified in the rule. Args: rule: Rule which actions are to be executed. client_id: Id of a client where rule's actions are to be executed. Returns: Number of actions started.
def GetNextWrittenEventSource(self): """Retrieves the next event source that was written after open. Returns: EventSource: event source or None if there are no newly written ones. Raises: IOError: when the storage writer is closed. OSError: when the storage writer is closed. """ if not self._storage_file: raise IOError('Unable to read from closed storage writer.') event_source = self._storage_file.GetEventSourceByIndex( self._written_event_source_index) if event_source: self._written_event_source_index += 1 return event_source
Retrieves the next event source that was written after open. Returns: EventSource: event source or None if there are no newly written ones. Raises: IOError: when the storage writer is closed. OSError: when the storage writer is closed.
def copy_to(self, destination): """ Copies the file to the given destination. Returns a File object that represents the target file. `destination` must be a File or Folder object. """ target = self.__get_destination__(destination) logger.info("Copying %s to %s" % (self, target)) shutil.copy(self.path, unicode(destination)) return target
Copies the file to the given destination. Returns a File object that represents the target file. `destination` must be a File or Folder object.
def factory(cls, note, fn=None): """Register a function as a provider. Function (name support is optional):: from jeni import Injector as BaseInjector from jeni import Provider class Injector(BaseInjector): pass @Injector.factory('echo') def echo(name=None): return name Registration can be a decorator or a direct method call:: Injector.factory('echo', echo) """ def decorator(f): provider = cls.factory_provider.bind(f) cls.register(note, provider) return f if fn is not None: decorator(fn) else: return decorator
Register a function as a provider. Function (name support is optional):: from jeni import Injector as BaseInjector from jeni import Provider class Injector(BaseInjector): pass @Injector.factory('echo') def echo(name=None): return name Registration can be a decorator or a direct method call:: Injector.factory('echo', echo)
def HashIt(self): """Finalizing function for the Fingerprint class. This method applies all the different hash functions over the previously specified different ranges of the input file, and computes the resulting hashes. After calling HashIt, the state of the object is reset to its initial state, with no fingers defined. Returns: An array of dicts, with each dict containing name of fingerprint type, names of hashes and values, and additional, type-dependent key / value pairs, such as an array of SignedData tuples for the PE/COFF fingerprint type. Raises: RuntimeError: when internal inconsistencies occur. """ while True: interval = self._GetNextInterval() if interval is None: break self.file.seek(interval.start, os.SEEK_SET) block = self.file.read(interval.end - interval.start) if len(block) != interval.end - interval.start: raise RuntimeError('Short read on file.') self._HashBlock(block, interval.start, interval.end) self._AdjustIntervals(interval.start, interval.end) results = [] for finger in self.fingers: res = {} leftover = finger.CurrentRange() if leftover: if (len(finger.ranges) > 1 or leftover.start != self.filelength or leftover.end != self.filelength): raise RuntimeError('Non-empty range remains.') res.update(finger.metadata) for hasher in finger.hashers: res[hasher.name] = hasher.digest() results.append(res) # Clean out things for a fresh start (on the same file object). self.fingers = [] # Make sure the results come back in 'standard' order, regardless of the # order in which fingers were added. Helps with reproducing test results. return sorted(results, key=lambda r: r['name'])
Finalizing function for the Fingerprint class. This method applies all the different hash functions over the previously specified different ranges of the input file, and computes the resulting hashes. After calling HashIt, the state of the object is reset to its initial state, with no fingers defined. Returns: An array of dicts, with each dict containing name of fingerprint type, names of hashes and values, and additional, type-dependent key / value pairs, such as an array of SignedData tuples for the PE/COFF fingerprint type. Raises: RuntimeError: when internal inconsistencies occur.
def short_title(self, key, value): """Populate the ``short_title`` key. Also populates the ``title_variants`` key through side effects. """ short_title = value.get('a') title_variants = self.get('title_variants', []) if value.get('u'): short_title = value.get('u') title_variants.append(value.get('a')) self['title_variants'] = title_variants return short_title
Populate the ``short_title`` key. Also populates the ``title_variants`` key through side effects.
def install(name=None, refresh=False, pkgs=None, version=None, test=False, **kwargs): ''' Install the named fileset(s)/rpm package(s). name The name of the fileset or rpm package to be installed. refresh Whether or not to update the yum database before executing. Multiple Package Installation Options: pkgs A list of filesets and/or rpm packages to install. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. version Install a specific version of a fileset/rpm package. (Unused at present). test Verify that command functions correctly: Returns a dict containing the new fileset(s)/rpm package(s) names and versions: {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} CLI Example: .. code-block:: bash salt '*' pkg.install /stage/middleware/AIX/bash-4.2-3.aix6.1.ppc.rpm salt '*' pkg.install /stage/middleware/AIX/bash-4.2-3.aix6.1.ppc.rpm refresh=True salt '*' pkg.install /stage/middleware/AIX/VIOS2211_update/tpc_4.1.1.85.bff salt '*' pkg.install /stage/middleware/AIX/Xlc/usr/sys/inst.images/xlC.rte salt '*' pkg.install /stage/middleware/AIX/Firefox/ppc-AIX53/Firefox.base salt '*' pkg.install pkgs='["foo", "bar"]' ''' targets = salt.utils.args.split_input(pkgs) if pkgs else [name] if not targets: return {} if pkgs: log.debug('Removing these fileset(s)/rpm package(s) %s: %s', name, targets) # Get a list of the currently installed pkgs. old = list_pkgs() # Install the fileset or rpm package(s) errors = [] for target in targets: filename = os.path.basename(target) if filename.endswith('.rpm'): if _is_installed_rpm(filename.split('.aix')[0]): continue cmdflags = ' -Uivh ' if test: cmdflags += ' --test' cmd = ['/usr/bin/rpm', cmdflags, target] out = __salt__['cmd.run_all'](cmd, output_loglevel='trace') else: if _is_installed(target): continue cmd = '/usr/sbin/installp -acYXg' if test: cmd += 'p' cmd += ' -d ' dirpath = os.path.dirname(target) cmd += dirpath +' '+ filename out = __salt__['cmd.run_all'](cmd, output_loglevel='trace') if 0 != out['retcode']: errors.append(out['stderr']) # Get a list of the packages after the uninstall __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if errors: raise CommandExecutionError( 'Problems encountered installing filesets(s)/package(s)', info={ 'changes': ret, 'errors': errors } ) # No error occurred if test: return 'Test succeeded.' return ret
Install the named fileset(s)/rpm package(s). name The name of the fileset or rpm package to be installed. refresh Whether or not to update the yum database before executing. Multiple Package Installation Options: pkgs A list of filesets and/or rpm packages to install. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. version Install a specific version of a fileset/rpm package. (Unused at present). test Verify that command functions correctly: Returns a dict containing the new fileset(s)/rpm package(s) names and versions: {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} CLI Example: .. code-block:: bash salt '*' pkg.install /stage/middleware/AIX/bash-4.2-3.aix6.1.ppc.rpm salt '*' pkg.install /stage/middleware/AIX/bash-4.2-3.aix6.1.ppc.rpm refresh=True salt '*' pkg.install /stage/middleware/AIX/VIOS2211_update/tpc_4.1.1.85.bff salt '*' pkg.install /stage/middleware/AIX/Xlc/usr/sys/inst.images/xlC.rte salt '*' pkg.install /stage/middleware/AIX/Firefox/ppc-AIX53/Firefox.base salt '*' pkg.install pkgs='["foo", "bar"]'
def _create_parsing_plan(self, desired_type: Type[T], filesystem_object: PersistedObject, logger: Logger, log_only_last: bool = False): """ Adds a log message and creates a recursive parsing plan. :param desired_type: :param filesystem_object: :param logger: :param log_only_last: a flag to only log the last part of the file path (default False) :return: """ logger.debug('(B) ' + get_parsing_plan_log_str(filesystem_object, desired_type, log_only_last=log_only_last, parser=self)) return AnyParser._RecursiveParsingPlan(desired_type, filesystem_object, self, logger)
Adds a log message and creates a recursive parsing plan. :param desired_type: :param filesystem_object: :param logger: :param log_only_last: a flag to only log the last part of the file path (default False) :return:
def CheckApproversForLabel(self, token, client_urn, requester, approvers, label): """Checks if requester and approvers have approval privileges for labels. Checks against list of approvers for each label defined in approvers.yaml to determine if the list of approvers is sufficient. Args: token: user token client_urn: ClientURN object of the client requester: username string of person requesting approval. approvers: list of username strings that have approved this client. label: label strings to check approval privs for. Returns: True if access is allowed, raises otherwise. """ auth = self.reader.GetAuthorizationForSubject(label) if not auth: # This label isn't listed in approvers.yaml return True if auth.requester_must_be_authorized: if not self.CheckPermissions(requester, label): raise access_control.UnauthorizedAccess( "User %s not in %s or groups:%s for %s" % (requester, auth.users, auth.groups, label), subject=client_urn, requested_access=token.requested_access) approved_count = 0 for approver in approvers: if self.CheckPermissions(approver, label) and approver != requester: approved_count += 1 if approved_count < auth.num_approvers_required: raise access_control.UnauthorizedAccess( "Found %s approvers for %s, needed %s" % (approved_count, label, auth.num_approvers_required), subject=client_urn, requested_access=token.requested_access) return True
Checks if requester and approvers have approval privileges for labels. Checks against list of approvers for each label defined in approvers.yaml to determine if the list of approvers is sufficient. Args: token: user token client_urn: ClientURN object of the client requester: username string of person requesting approval. approvers: list of username strings that have approved this client. label: label strings to check approval privs for. Returns: True if access is allowed, raises otherwise.
def deleteGenome(species, name) : """Removes a genome from the database""" printf('deleting genome (%s, %s)...' % (species, name)) conf.db.beginTransaction() objs = [] allGood = True try : genome = Genome_Raba(name = name, species = species.lower()) objs.append(genome) pBar = ProgressBar(label = 'preparing') for typ in (Chromosome_Raba, Gene_Raba, Transcript_Raba, Exon_Raba, Protein_Raba) : pBar.update() f = RabaQuery(typ, namespace = genome._raba_namespace) f.addFilter({'genome' : genome}) for e in f.iterRun() : objs.append(e) pBar.close() pBar = ProgressBar(nbEpochs = len(objs), label = 'deleting objects') for e in objs : pBar.update() e.delete() pBar.close() except KeyError as e : #~ printf("\tWARNING, couldn't remove genome form db, maybe it's not there: ", e) raise KeyError("\tWARNING, couldn't remove genome form db, maybe it's not there: ", e) allGood = False printf('\tdeleting folder') try : shutil.rmtree(conf.getGenomeSequencePath(species, name)) except OSError as e: #~ printf('\tWARNING, Unable to delete folder: ', e) OSError('\tWARNING, Unable to delete folder: ', e) allGood = False conf.db.endTransaction() return allGood
Removes a genome from the database
def street_address(self): """ :example '791 Crist Parks' """ pattern = self.random_element(self.street_address_formats) return self.generator.parse(pattern)
:example '791 Crist Parks'
def _get_audio_sample_rate(self, audio_abs_path): """ Parameters ---------- audio_abs_path : str Returns ------- sample_rate : int """ sample_rate = int( subprocess.check_output( ("""sox --i {} | grep "{}" | awk -F " : " '{{print $2}}'""" ).format(audio_abs_path, "Sample Rate"), shell=True, universal_newlines=True).rstrip()) return sample_rate
Parameters ---------- audio_abs_path : str Returns ------- sample_rate : int
def verify_oauth2_token(id_token, request, audience=None): """Verifies an ID Token issued by Google's OAuth 2.0 authorization server. Args: id_token (Union[str, bytes]): The encoded token. request (google.auth.transport.Request): The object used to make HTTP requests. audience (str): The audience that this token is intended for. This is typically your application's OAuth 2.0 client ID. If None then the audience is not verified. Returns: Mapping[str, Any]: The decoded token. """ return verify_token( id_token, request, audience=audience, certs_url=_GOOGLE_OAUTH2_CERTS_URL)
Verifies an ID Token issued by Google's OAuth 2.0 authorization server. Args: id_token (Union[str, bytes]): The encoded token. request (google.auth.transport.Request): The object used to make HTTP requests. audience (str): The audience that this token is intended for. This is typically your application's OAuth 2.0 client ID. If None then the audience is not verified. Returns: Mapping[str, Any]: The decoded token.
def _add_conversation(self, conversation): """ Add the conversation and fire the :meth:`on_conversation_added` event. :param conversation: The conversation object to add. :type conversation: :class:`~.AbstractConversation` The conversation is added to the internal list of conversations which can be queried at :attr:`conversations`. The :meth:`on_conversation_added` event is fired. In addition, the :class:`ConversationService` subscribes to the :meth:`~.AbstractConversation.on_exit` event to remove the conversation from the list automatically. There is no need to remove a conversation from the list explicitly. """ handler = functools.partial( self._handle_conversation_exit, conversation ) tokens = [] def linked_token(signal, handler): return signal, signal.connect(handler) tokens.append(linked_token(conversation.on_exit, handler)) tokens.append(linked_token(conversation.on_failure, handler)) tokens.append(linked_token(conversation.on_message, functools.partial( self.on_message, conversation, ))) self._conversation_meta[conversation] = ( tokens, ) self._conversation_map[conversation.jid] = conversation self.on_conversation_added(conversation)
Add the conversation and fire the :meth:`on_conversation_added` event. :param conversation: The conversation object to add. :type conversation: :class:`~.AbstractConversation` The conversation is added to the internal list of conversations which can be queried at :attr:`conversations`. The :meth:`on_conversation_added` event is fired. In addition, the :class:`ConversationService` subscribes to the :meth:`~.AbstractConversation.on_exit` event to remove the conversation from the list automatically. There is no need to remove a conversation from the list explicitly.
def is_ge(dicom_input): """ Use this function to detect if a dicom series is a GE dataset :param dicom_input: list with dicom objects """ # read dicom header header = dicom_input[0] if 'Manufacturer' not in header or 'Modality' not in header: return False # we try generic conversion in these cases # check if Modality is mr if header.Modality.upper() != 'MR': return False # check if manufacturer is GE if 'GE MEDICAL SYSTEMS' not in header.Manufacturer.upper(): return False return True
Use this function to detect if a dicom series is a GE dataset :param dicom_input: list with dicom objects
def add_filter(self, property_name, operator, value): """Filter the query based on a property name, operator and a value. Expressions take the form of:: .add_filter('<property>', '<operator>', <value>) where property is a property stored on the entity in the datastore and operator is one of ``OPERATORS`` (ie, ``=``, ``<``, ``<=``, ``>``, ``>=``):: >>> from google.cloud import datastore >>> client = datastore.Client() >>> query = client.query(kind='Person') >>> query.add_filter('name', '=', 'James') >>> query.add_filter('age', '>', 50) :type property_name: str :param property_name: A property name. :type operator: str :param operator: One of ``=``, ``<``, ``<=``, ``>``, ``>=``. :type value: :class:`int`, :class:`str`, :class:`bool`, :class:`float`, :class:`NoneType`, :class:`datetime.datetime`, :class:`google.cloud.datastore.key.Key` :param value: The value to filter on. :raises: :class:`ValueError` if ``operation`` is not one of the specified values, or if a filter names ``'__key__'`` but passes an invalid value (a key is required). """ if self.OPERATORS.get(operator) is None: error_message = 'Invalid expression: "%s"' % (operator,) choices_message = "Please use one of: =, <, <=, >, >=." raise ValueError(error_message, choices_message) if property_name == "__key__" and not isinstance(value, Key): raise ValueError('Invalid key: "%s"' % value) self._filters.append((property_name, operator, value))
Filter the query based on a property name, operator and a value. Expressions take the form of:: .add_filter('<property>', '<operator>', <value>) where property is a property stored on the entity in the datastore and operator is one of ``OPERATORS`` (ie, ``=``, ``<``, ``<=``, ``>``, ``>=``):: >>> from google.cloud import datastore >>> client = datastore.Client() >>> query = client.query(kind='Person') >>> query.add_filter('name', '=', 'James') >>> query.add_filter('age', '>', 50) :type property_name: str :param property_name: A property name. :type operator: str :param operator: One of ``=``, ``<``, ``<=``, ``>``, ``>=``. :type value: :class:`int`, :class:`str`, :class:`bool`, :class:`float`, :class:`NoneType`, :class:`datetime.datetime`, :class:`google.cloud.datastore.key.Key` :param value: The value to filter on. :raises: :class:`ValueError` if ``operation`` is not one of the specified values, or if a filter names ``'__key__'`` but passes an invalid value (a key is required).
def get_highest_build_tool(sdk_version=None): """ Gets the highest build tool version based on major version sdk version. :param sdk_version(int) - sdk version to be used as the marjor build tool version context. Returns: A string containg the build tool version (default is 23.0.2 if none is found) """ if sdk_version is None: sdk_version = config.sdk_version android_home = os.environ.get('AG_MOBILE_SDK', os.environ.get('ANDROID_HOME')) build_tool_folder = '%s/build-tools' % android_home folder_list = os.listdir(build_tool_folder) versions = [folder for folder in folder_list if folder.startswith('%s.' % sdk_version)] if len(versions) == 0: return config.build_tool_version return versions[::-1][0]
Gets the highest build tool version based on major version sdk version. :param sdk_version(int) - sdk version to be used as the marjor build tool version context. Returns: A string containg the build tool version (default is 23.0.2 if none is found)
def get_login_info(): """ Give back an array of dicts with the connection information for all the environments. """ connections = {} _defaults = {} _defaults['start_in'] = '' _defaults['rpm_sign_plugin'] = '' config = _config_file() _config_test(config) juicer.utils.Log.log_debug("Loading connection information:") for section in config.sections(): cfg = dict(config.items(section)) connections[section] = Connectors(cfg) if 'start_in' in cfg: _defaults['start_in'] = cfg['start_in'] if 'rpm_sign_plugin' in cfg: _defaults['rpm_sign_plugin'] = cfg['rpm_sign_plugin'] juicer.utils.Log.log_debug("[%s] username: %s, base_url: %s" % \ (section, \ cfg['username'], \ cfg['base_url'])) _defaults['environments'] = config.sections() return (connections, _defaults)
Give back an array of dicts with the connection information for all the environments.
def migrate_window(bg): "Take a pythoncard background resource and convert to a gui2py window" ret = {} for k, v in bg.items(): if k == 'type': v = WIN_MAP[v]._meta.name elif k == 'menubar': menus = v['menus'] v = [migrate_control(menu) for menu in menus] elif k == 'components': v = [migrate_control(comp) for comp in v] else: k = SPEC_MAP['Widget'].get(k, k) ret[k] = v return ret
Take a pythoncard background resource and convert to a gui2py window
def get_tissue_specificities(cls, entry): """ get list of :class:`pyuniprot.manager.models.TissueSpecificity` object from XML node entry :param entry: XML node entry :return: models.TissueSpecificity object """ tissue_specificities = [] query = "./comment[@type='tissue specificity']/text" for ts in entry.iterfind(query): tissue_specificities.append(models.TissueSpecificity(comment=ts.text)) return tissue_specificities
get list of :class:`pyuniprot.manager.models.TissueSpecificity` object from XML node entry :param entry: XML node entry :return: models.TissueSpecificity object
def get_meta(self): """Get the metadata object for this Point Returns a [PointMeta](PointMeta.m.html#IoticAgent.IOT.PointMeta.PointMeta) object - OR - Raises [IOTException](./Exceptions.m.html#IoticAgent.IOT.Exceptions.IOTException) containing the error if the infrastructure detects a problem Raises [LinkException](../Core/AmqpLink.m.html#IoticAgent.Core.AmqpLink.LinkException) if there is a communications problem between you and the infrastructure """ rdf = self.get_meta_rdf(fmt='n3') return PointMeta(self, rdf, self._client.default_lang, fmt='n3')
Get the metadata object for this Point Returns a [PointMeta](PointMeta.m.html#IoticAgent.IOT.PointMeta.PointMeta) object - OR - Raises [IOTException](./Exceptions.m.html#IoticAgent.IOT.Exceptions.IOTException) containing the error if the infrastructure detects a problem Raises [LinkException](../Core/AmqpLink.m.html#IoticAgent.Core.AmqpLink.LinkException) if there is a communications problem between you and the infrastructure
def logs(self): """returns an object to work with the site logs""" if self._resources is None: self.__init() if "logs" in self._resources: url = self._url + "/logs" return _logs.Log(url=url, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port, initialize=True) else: return None
returns an object to work with the site logs
def __send_log_set_exc_and_wait(self, msg, level=None, wait_seconds=CONN_RETRY_DELAY_SECONDS): """To be called in exception context only. msg - message to log level - logging level. If not specified, ERROR unless it is a repeated failure in which case DEBUG. If specified, the given level will always be used. wait_seconds - how long to pause for (so retry is not triggered immediately) """ logger.log( ((logging.DEBUG if self.__send_exc else logging.ERROR) if level is None else level), msg, exc_info=DEBUG_ENABLED ) self.__send_exc_time = monotonic() self.__send_exc = exc_info()[1] self.__end.wait(wait_seconds)
To be called in exception context only. msg - message to log level - logging level. If not specified, ERROR unless it is a repeated failure in which case DEBUG. If specified, the given level will always be used. wait_seconds - how long to pause for (so retry is not triggered immediately)
def fence_status_encode(self, breach_status, breach_count, breach_type, breach_time): ''' Status of geo-fencing. Sent in extended status stream when fencing enabled breach_status : 0 if currently inside fence, 1 if outside (uint8_t) breach_count : number of fence breaches (uint16_t) breach_type : last breach type (see FENCE_BREACH_* enum) (uint8_t) breach_time : time of last breach in milliseconds since boot (uint32_t) ''' return MAVLink_fence_status_message(breach_status, breach_count, breach_type, breach_time)
Status of geo-fencing. Sent in extended status stream when fencing enabled breach_status : 0 if currently inside fence, 1 if outside (uint8_t) breach_count : number of fence breaches (uint16_t) breach_type : last breach type (see FENCE_BREACH_* enum) (uint8_t) breach_time : time of last breach in milliseconds since boot (uint32_t)
def add(self, *l): '''add inner to outer Args: *l: element that is passed into Inner init ''' for a in flatten(l): self._add([self.Inner(a)], self.l)
add inner to outer Args: *l: element that is passed into Inner init
def save(self,callit="misc",closeToo=True,fullpath=False): """save the existing figure. does not close it.""" if fullpath is False: fname=self.abf.outPre+"plot_"+callit+".jpg" else: fname=callit if not os.path.exists(os.path.dirname(fname)): os.mkdir(os.path.dirname(fname)) plt.savefig(fname) self.log.info("saved [%s]",os.path.basename(fname)) if closeToo: plt.close()
save the existing figure. does not close it.
def run_plugins(context_obj, boto3_clients): """ Executes all loaded plugins designated for the service calling the function. Args: context_obj (obj:EFContext): The EFContext object created by the service. boto3_clients (dict): Dictionary of boto3 clients created by ef_utils.create_aws_clients() """ def print_if_verbose(message): if context_obj.verbose: print(message) service_name = os.path.basename(sys.argv[0]).replace(".py", "") try: import plugins except ImportError: print_if_verbose("no plugins detected.") return else: for plugin_importer, plugin_name, plugin_ispkg in pkgutil.iter_modules(plugins.__path__): if plugin_ispkg: plugin_package = importlib.import_module("plugins.{}".format(plugin_name)) for importer, modname, ispkg in pkgutil.iter_modules(plugin_package.__path__): plugin_module = importlib.import_module("plugins.{}.{}".format(plugin_name, modname)) for name, obj in inspect.getmembers(plugin_module): if inspect.isclass(obj) and obj.__name__ == "EFPlugin": plugin_class = getattr(plugin_module, name) plugin_instance = plugin_class(context=context_obj, clients=boto3_clients) if plugin_instance.service == service_name: print_if_verbose("plugin '{}' loaded".format(plugin_name)) if not context_obj.commit: print_if_verbose("dryrun: skipping plugin execution.") else: try: plugin_instance.run() except AttributeError: print("error executing plugin '{}'".format(modname))
Executes all loaded plugins designated for the service calling the function. Args: context_obj (obj:EFContext): The EFContext object created by the service. boto3_clients (dict): Dictionary of boto3 clients created by ef_utils.create_aws_clients()
def delete_userpass(self, username, mount_point='userpass'): """DELETE /auth/<mount point>/users/<username> :param username: :type username: :param mount_point: :type mount_point: :return: :rtype: """ return self._adapter.delete('/v1/auth/{}/users/{}'.format(mount_point, username))
DELETE /auth/<mount point>/users/<username> :param username: :type username: :param mount_point: :type mount_point: :return: :rtype:
def setup_tree(ctx, verbose=None, root=None, tree_dir=None, modules_dir=None): ''' Sets up the SDSS tree enviroment ''' print('Setting up the tree') ctx.run('python bin/setup_tree.py -t {0} -r {1} -m {2}'.format(tree_dir, root, modules_dir))
Sets up the SDSS tree enviroment
def GetFieldValuesTuple(self, trip_id): """Return a tuple that outputs a row of _FIELD_NAMES to be written to a GTFS file. Arguments: trip_id: The trip_id of the trip to which this StopTime corresponds. It must be provided, as it is not stored in StopTime. """ result = [] for fn in self._FIELD_NAMES: if fn == 'trip_id': result.append(trip_id) else: # Since we'll be writting to an output file, we want empty values to be # outputted as an empty string result.append(getattr(self, fn) or '' ) return tuple(result)
Return a tuple that outputs a row of _FIELD_NAMES to be written to a GTFS file. Arguments: trip_id: The trip_id of the trip to which this StopTime corresponds. It must be provided, as it is not stored in StopTime.
def FrameworkDir32(self): """ Microsoft .NET Framework 32bit directory. """ # Default path guess_fw = os.path.join(self.WinDir, r'Microsoft.NET\Framework') # Try to get path from registry, if fail use default path return self.ri.lookup(self.ri.vc, 'frameworkdir32') or guess_fw
Microsoft .NET Framework 32bit directory.
def getTypeWidth(self, dtype: HdlType, do_eval=False) -> Tuple[int, Union[int, RtlSignal], bool]: """ :see: doc of method on parent class """ width = dtype.width if isinstance(width, int): widthStr = str(width) else: widthStr = self.getExprVal(width, do_eval=do_eval) return width, widthStr, False
:see: doc of method on parent class
def get_source_lane(fork_process, pipeline_list): """Returns the lane of the last process that matches fork_process Parameters ---------- fork_process : list List of processes before the fork. pipeline_list : list List with the pipeline connection dictionaries. Returns ------- int Lane of the last process that matches fork_process """ fork_source = fork_process[-1] fork_sig = [x for x in fork_process if x != "__init__"] for position, p in enumerate(pipeline_list[::-1]): if p["output"]["process"] == fork_source: lane = p["output"]["lane"] logger.debug("Possible source match found in position {} in lane" " {}".format(position, lane)) lane_sequence = [x["output"]["process"] for x in pipeline_list if x["output"]["lane"] == lane] logger.debug("Testing lane sequence '{}' against fork signature" " '{}'".format(lane_sequence, fork_sig)) if lane_sequence == fork_sig: return p["output"]["lane"] return 0
Returns the lane of the last process that matches fork_process Parameters ---------- fork_process : list List of processes before the fork. pipeline_list : list List with the pipeline connection dictionaries. Returns ------- int Lane of the last process that matches fork_process
def _nick(self, nick): """ Sets your nick. Required arguments: * nick - New nick. """ with self.lock: self.send('NICK :%s' % nick) if self.readable(): msg = self._recv(expected_replies='NICK') if msg[0] == 'NICK': if not self.hide_called_events: self.stepback() for channel in self.channels: if 'USERS' in self.channels[channel]: priv_level = \ self.channels[channel]['USERS'][self.current_nick] del self.channels[channel]['USERS'][self.current_nick] self.channels[channel]['USERS'][nick] = priv_level self.current_nick = nick
Sets your nick. Required arguments: * nick - New nick.
def list_lbaas_loadbalancers(self, retrieve_all=True, **_params): """Fetches a list of all lbaas_loadbalancers for a project.""" return self.list('loadbalancers', self.lbaas_loadbalancers_path, retrieve_all, **_params)
Fetches a list of all lbaas_loadbalancers for a project.
def get_nameserver_detail_output_show_nameserver_nameserver_cascaded(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_nameserver_detail = ET.Element("get_nameserver_detail") config = get_nameserver_detail output = ET.SubElement(get_nameserver_detail, "output") show_nameserver = ET.SubElement(output, "show-nameserver") nameserver_portid_key = ET.SubElement(show_nameserver, "nameserver-portid") nameserver_portid_key.text = kwargs.pop('nameserver_portid') nameserver_cascaded = ET.SubElement(show_nameserver, "nameserver-cascaded") nameserver_cascaded.text = kwargs.pop('nameserver_cascaded') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def greenhall_sz(t, F, alpha, d): """ Eqn (9) from Greenhall2004 """ if d == 1: a = 2*greenhall_sx(t, F, alpha) b = greenhall_sx(t-1.0, F, alpha) c = greenhall_sx(t+1.0, F, alpha) return a-b-c elif d == 2: a = 6*greenhall_sx(t, F, alpha) b = 4*greenhall_sx(t-1.0, F, alpha) c = 4*greenhall_sx(t+1.0, F, alpha) dd = greenhall_sx(t-2.0, F, alpha) e = greenhall_sx(t+2.0, F, alpha) return a-b-c+dd+e elif d == 3: a = 20.0*greenhall_sx(t, F, alpha) b = 15.0*greenhall_sx(t-1.0, F, alpha) c = 15.0*greenhall_sx(t+1.0, F, alpha) dd = 6.0*greenhall_sx(t-2.0, F, alpha) e = 6.0*greenhall_sx(t+2.0, F, alpha) f = greenhall_sx(t-3.0, F, alpha) g = greenhall_sx(t+3.0, F, alpha) return a-b-c+dd+e-f-g assert(0)
Eqn (9) from Greenhall2004
def validate(method): """ Config option name value validator decorator. """ # Name error template name_error = 'configuration option "{}" is not supported' @functools.wraps(method) def validator(self, name, *args): if name not in self.allowed_opts: raise ValueError(name_error.format(name)) return method(self, name, *args) return validator
Config option name value validator decorator.
def main(): """Run playbook""" for flag in ('--check',): if flag not in sys.argv: sys.argv.append(flag) obj = PlaybookCLI(sys.argv) obj.parse() obj.run()
Run playbook
def codestr2rst(codestr, lang='python'): """Return reStructuredText code block from code string""" code_directive = "\n.. code-block:: {0}\n\n".format(lang) indented_block = indent(codestr, ' ' * 4) return code_directive + indented_block
Return reStructuredText code block from code string
def p_statement_border(p): """ statement : BORDER expr """ p[0] = make_sentence('BORDER', make_typecast(TYPE.ubyte, p[2], p.lineno(1)))
statement : BORDER expr
def auth(self, username, password): """Authentication of a user""" binddn = self._get_user(self._byte_p2(username), NO_ATTR) if binddn is not None: ldap_client = self._connect() try: ldap_client.simple_bind_s( self._byte_p2(binddn), self._byte_p2(password) ) except ldap.INVALID_CREDENTIALS: ldap_client.unbind_s() return False ldap_client.unbind_s() return True else: return False
Authentication of a user
def _currentLineExtraSelections(self): """QTextEdit.ExtraSelection, which highlightes current line """ if self._currentLineColor is None: return [] def makeSelection(cursor): selection = QTextEdit.ExtraSelection() selection.format.setBackground(self._currentLineColor) selection.format.setProperty(QTextFormat.FullWidthSelection, True) cursor.clearSelection() selection.cursor = cursor return selection rectangularSelectionCursors = self._rectangularSelection.cursors() if rectangularSelectionCursors: return [makeSelection(cursor) \ for cursor in rectangularSelectionCursors] else: return [makeSelection(self.textCursor())]
QTextEdit.ExtraSelection, which highlightes current line
def starlike(x): "weird things happen to cardinality when working with * in comma-lists. this detects when to do that." # todo: is '* as name' a thing? return isinstance(x,sqparse2.AsterX) or isinstance(x,sqparse2.AttrX) and isinstance(x.attr,sqparse2.AsterX)
weird things happen to cardinality when working with * in comma-lists. this detects when to do that.
def map_list(key_map, *inputs, copy=False, base=None): """ Returns a new dict. :param key_map: A list that maps the dict keys ({old key: new key} :type key_map: list[str | dict | list] :param inputs: A sequence of data. :type inputs: iterable | dict | int | float | list | tuple :param copy: If True, it returns a deepcopy of input values. :type copy: bool, optional :param base: Base dict where combine multiple dicts in one. :type base: dict, optional :return: A unique dict with new values. :rtype: dict Example:: >>> key_map = [ ... 'a', ... {'a': 'c'}, ... [ ... 'a', ... {'a': 'd'} ... ] ... ] >>> inputs = ( ... 2, ... {'a': 3, 'b': 2}, ... [ ... 1, ... {'a': 4} ... ] ... ) >>> d = map_list(key_map, *inputs) >>> sorted(d.items()) [('a', 1), ('b', 2), ('c', 3), ('d', 4)] """ d = {} if base is None else base # Initialize empty dict. for m, v in zip(key_map, inputs): if isinstance(m, dict): map_dict(m, v, base=d) # Apply a map dict. elif isinstance(m, list): map_list(m, *v, base=d) # Apply a map list. else: d[m] = v # Apply map. return combine_dicts(copy=copy, base=d)
Returns a new dict. :param key_map: A list that maps the dict keys ({old key: new key} :type key_map: list[str | dict | list] :param inputs: A sequence of data. :type inputs: iterable | dict | int | float | list | tuple :param copy: If True, it returns a deepcopy of input values. :type copy: bool, optional :param base: Base dict where combine multiple dicts in one. :type base: dict, optional :return: A unique dict with new values. :rtype: dict Example:: >>> key_map = [ ... 'a', ... {'a': 'c'}, ... [ ... 'a', ... {'a': 'd'} ... ] ... ] >>> inputs = ( ... 2, ... {'a': 3, 'b': 2}, ... [ ... 1, ... {'a': 4} ... ] ... ) >>> d = map_list(key_map, *inputs) >>> sorted(d.items()) [('a', 1), ('b', 2), ('c', 3), ('d', 4)]
def partsphere(self, x): """Sphere (squared norm) test objective function""" self.counter += 1 # return np.random.rand(1)[0]**0 * sum(x**2) + 1 * np.random.rand(1)[0] dim = len(x) x = array([x[i % dim] for i in xrange(2 * dim)]) N = 8 i = self.counter % dim # f = sum(x[i:i + N]**2) f = sum(x[np.random.randint(dim, size=N)]**2) return f
Sphere (squared norm) test objective function
def create(self, **kwargs): """ returns the JSON object {'embedded': { 'sign_url': 'https://www.hellosign.com/editor/embeddedSign?signature_id={signature_id}&token={token}', 'expires_at': {timestamp} }} """ auth = None if 'auth' in kwargs: auth = kwargs['auth'] del(kwargs['auth']) self._url = '%s%s%s' % (self.base_uri, 'embedded/sign_url/', self.signature_id) return self.embedded.sign_url.get(auth=auth, **kwargs)
returns the JSON object {'embedded': { 'sign_url': 'https://www.hellosign.com/editor/embeddedSign?signature_id={signature_id}&token={token}', 'expires_at': {timestamp} }}
def safe_dump_pk(obj, abspath, pk_protocol=pk_protocol, compress=False, enable_verbose=True): """A stable version of dump_pk, silently overwrite existing file. When your program been interrupted, you lose nothing. Typically if your program is interrupted by any reason, it only leaves a incomplete file. If you use replace=True, then you also lose your old file. So a bettr way is to: 1. dump pickle to a temp file. 2. when it's done, rename it to #abspath, overwrite the old one. This way guarantee atomic write. :param obj: Picklable Python Object. :param abspath: ``save as`` path, file extension has to be ``.pickle`` or ``.gz`` (for compressed Pickle). :type abspath: string :param pk_protocol: (default your python version) use 2, to make a py2.x/3.x compatible pickle file. But 3 is faster. :type pk_protocol: int :param compress: (default False) If ``True``, use GNU program gzip to compress the Pickle file. Disk usage can be greatly reduced. But you have to use :func:`load_pk(abspath, compress=True)<load_pk>` in loading. :type compress: boolean :param enable_verbose: (default True) Trigger for message. :type enable_verbose: boolean Usage:: >>> from weatherlab.lib.dataIO.pk import safe_dump_pk >>> pk = {"a": 1, "b": 2} >>> safe_dump_pk(pk, "test.pickle") Dumping to test.pickle... Complete! Elapse 0.001763 sec **中文文档** 在对文件进行写入时, 如果程序中断, 则会留下一个不完整的文件。如果你使用了覆盖式 写入, 则你同时也丢失了原文件。所以为了保证写操作的原子性(要么全部完成, 要么全部 都不完成), 更好的方法是: 首先将文件写入一个临时文件中, 完成后再讲文件重命名, 覆盖旧文件。这样即使中途程序被中断, 也仅仅是留下了一个未完成的临时文件而已, 不会 影响原文件。 参数列表 :param obj: 可Pickle化的Python对象 :param abspath: 写入文件的路径。扩展名必须为 ``.pickle`` 或 ``.gz`` , 其中gz用于被压 缩的Pickle :type abspath: ``字符串`` :param pk_protocol: (默认 等于你Python的大版本号) 使用2可以使得保存的文件能被 py2.x/3.x都能读取。但是协议3的速度更快, 体积更小, 性能更高。 :type pk_protocol: ``整数`` :param compress: (默认 False) 当为 ``True`` 时, 使用开源压缩标准gzip压缩Pickle文件。 通常能让文件大小缩小10-20倍不等。如要读取文件, 则需要使用函数 :func:`load_pk(abspath, compress=True)<load_pk>`. :type compress: ``布尔值`` :param enable_verbose: (默认 True) 是否打开信息提示开关, 批处理时建议关闭. :type enable_verbose: ``布尔值`` """ abspath = str(abspath) # try stringlize temp_abspath = "%s.tmp" % abspath dump_pk(obj, temp_abspath, pk_protocol=pk_protocol, replace=True, compress=compress, enable_verbose=enable_verbose) shutil.move(temp_abspath, abspath)
A stable version of dump_pk, silently overwrite existing file. When your program been interrupted, you lose nothing. Typically if your program is interrupted by any reason, it only leaves a incomplete file. If you use replace=True, then you also lose your old file. So a bettr way is to: 1. dump pickle to a temp file. 2. when it's done, rename it to #abspath, overwrite the old one. This way guarantee atomic write. :param obj: Picklable Python Object. :param abspath: ``save as`` path, file extension has to be ``.pickle`` or ``.gz`` (for compressed Pickle). :type abspath: string :param pk_protocol: (default your python version) use 2, to make a py2.x/3.x compatible pickle file. But 3 is faster. :type pk_protocol: int :param compress: (default False) If ``True``, use GNU program gzip to compress the Pickle file. Disk usage can be greatly reduced. But you have to use :func:`load_pk(abspath, compress=True)<load_pk>` in loading. :type compress: boolean :param enable_verbose: (default True) Trigger for message. :type enable_verbose: boolean Usage:: >>> from weatherlab.lib.dataIO.pk import safe_dump_pk >>> pk = {"a": 1, "b": 2} >>> safe_dump_pk(pk, "test.pickle") Dumping to test.pickle... Complete! Elapse 0.001763 sec **中文文档** 在对文件进行写入时, 如果程序中断, 则会留下一个不完整的文件。如果你使用了覆盖式 写入, 则你同时也丢失了原文件。所以为了保证写操作的原子性(要么全部完成, 要么全部 都不完成), 更好的方法是: 首先将文件写入一个临时文件中, 完成后再讲文件重命名, 覆盖旧文件。这样即使中途程序被中断, 也仅仅是留下了一个未完成的临时文件而已, 不会 影响原文件。 参数列表 :param obj: 可Pickle化的Python对象 :param abspath: 写入文件的路径。扩展名必须为 ``.pickle`` 或 ``.gz`` , 其中gz用于被压 缩的Pickle :type abspath: ``字符串`` :param pk_protocol: (默认 等于你Python的大版本号) 使用2可以使得保存的文件能被 py2.x/3.x都能读取。但是协议3的速度更快, 体积更小, 性能更高。 :type pk_protocol: ``整数`` :param compress: (默认 False) 当为 ``True`` 时, 使用开源压缩标准gzip压缩Pickle文件。 通常能让文件大小缩小10-20倍不等。如要读取文件, 则需要使用函数 :func:`load_pk(abspath, compress=True)<load_pk>`. :type compress: ``布尔值`` :param enable_verbose: (默认 True) 是否打开信息提示开关, 批处理时建议关闭. :type enable_verbose: ``布尔值``
def get_firewall_rule(self, datacenter_id, server_id, nic_id, firewall_rule_id): """ Retrieves a single firewall rule by ID. :param datacenter_id: The unique ID of the data center. :type datacenter_id: ``str`` :param server_id: The unique ID of the server. :type server_id: ``str`` :param nic_id: The unique ID of the NIC. :type nic_id: ``str`` :param firewall_rule_id: The unique ID of the firewall rule. :type firewall_rule_id: ``str`` """ response = self._perform_request( '/datacenters/%s/servers/%s/nics/%s/firewallrules/%s' % ( datacenter_id, server_id, nic_id, firewall_rule_id)) return response
Retrieves a single firewall rule by ID. :param datacenter_id: The unique ID of the data center. :type datacenter_id: ``str`` :param server_id: The unique ID of the server. :type server_id: ``str`` :param nic_id: The unique ID of the NIC. :type nic_id: ``str`` :param firewall_rule_id: The unique ID of the firewall rule. :type firewall_rule_id: ``str``
def dug(obj, key, value): """ Inverse of dig: recursively set a value in a dictionary, using dot notation. >>> test = {"a":{"b":{"c":1}}} >>> dug(test, "a.b.c", 10) >>> test {'a': {'b': {'c': 10}}} """ array = key.split(".") return _dug(obj, value, *array)
Inverse of dig: recursively set a value in a dictionary, using dot notation. >>> test = {"a":{"b":{"c":1}}} >>> dug(test, "a.b.c", 10) >>> test {'a': {'b': {'c': 10}}}
def enable_argscope_for_module(module, log_shape=True): """ Overwrite all functions of a given module to support argscope. Note that this function monkey-patches the module and therefore could have unexpected consequences. It has been only tested to work well with ``tf.layers`` module. Example: .. code-block:: python import tensorflow as tf enable_argscope_for_module(tf.layers) Args: log_shape (bool): print input/output shapes of each function. """ if is_tfv2() and module == tf.layers: module = tf.compat.v1.layers for name, obj in getmembers(module): if isfunction(obj): setattr(module, name, enable_argscope_for_function(obj, log_shape=log_shape))
Overwrite all functions of a given module to support argscope. Note that this function monkey-patches the module and therefore could have unexpected consequences. It has been only tested to work well with ``tf.layers`` module. Example: .. code-block:: python import tensorflow as tf enable_argscope_for_module(tf.layers) Args: log_shape (bool): print input/output shapes of each function.