code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def diff_ft(self, xt, yt): """First and second derivatives (wrt x_t) of log-density of Y_t|X_t=xt """ a, b = self.a, self.b ex = np.exp(a + np.matmul(b, xt)) # shape = (dy,) grad = (-np.sum(ex[:, np.newaxis] * b, axis=0) + np.sum(yt.flatten()[:, np.newaxis] * b, axis=0)) #TODO flatten hess = np.zeros((self.dx, self.dx)) for k in range(self.dy): hess -= ex[k] * np.outer(b[k,:], b[k,:]) return grad, hess
First and second derivatives (wrt x_t) of log-density of Y_t|X_t=xt
def direct_normal_radiation(self, value=9999.0): """Corresponds to IDD Field `direct_normal_radiation` Args: value (float): value for IDD Field `direct_normal_radiation` Unit: Wh/m2 value >= 0.0 Missing value: 9999.0 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = float(value) except ValueError: raise ValueError( 'value {} need to be of type float ' 'for field `direct_normal_radiation`'.format(value)) if value < 0.0: raise ValueError('value need to be greater or equal 0.0 ' 'for field `direct_normal_radiation`') self._direct_normal_radiation = value
Corresponds to IDD Field `direct_normal_radiation` Args: value (float): value for IDD Field `direct_normal_radiation` Unit: Wh/m2 value >= 0.0 Missing value: 9999.0 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
def update(self, catalog=None, dependencies=None, allow_overwrite=False): ''' Convenience method to update this Di instance with the specified contents. :param catalog: ICatalog supporting class or mapping :type catalog: ICatalog or collections.Mapping :param dependencies: Mapping of dependencies :type dependencies: collections.Mapping :param allow_overwrite: If True, allow overwriting existing keys. Only applies to providers. :type allow_overwrite: bool ''' if catalog: self._providers.update(catalog, allow_overwrite=allow_overwrite) if dependencies: self._dependencies.update(dependencies)
Convenience method to update this Di instance with the specified contents. :param catalog: ICatalog supporting class or mapping :type catalog: ICatalog or collections.Mapping :param dependencies: Mapping of dependencies :type dependencies: collections.Mapping :param allow_overwrite: If True, allow overwriting existing keys. Only applies to providers. :type allow_overwrite: bool
def set_password(name, password): ''' Set the password for a named user (insecure, the password will be in the process list while the command is running) :param str name: The name of the local user, which is assumed to be in the local directory service :param str password: The plaintext password to set :return: True if successful, otherwise False :rtype: bool :raises: CommandExecutionError on user not found or any other unknown error CLI Example: .. code-block:: bash salt '*' mac_shadow.set_password macuser macpassword ''' cmd = "dscl . -passwd /Users/{0} '{1}'".format(name, password) try: salt.utils.mac_utils.execute_return_success(cmd) except CommandExecutionError as exc: if 'eDSUnknownNodeName' in exc.strerror: raise CommandExecutionError('User not found: {0}'.format(name)) raise CommandExecutionError('Unknown error: {0}'.format(exc.strerror)) return True
Set the password for a named user (insecure, the password will be in the process list while the command is running) :param str name: The name of the local user, which is assumed to be in the local directory service :param str password: The plaintext password to set :return: True if successful, otherwise False :rtype: bool :raises: CommandExecutionError on user not found or any other unknown error CLI Example: .. code-block:: bash salt '*' mac_shadow.set_password macuser macpassword
def _skw_matches_comparator(kw0, kw1): """Compare 2 single keywords objects. First by the number of their spans (ie. how many times they were found), if it is equal it compares them by lenghts of their labels. """ def compare(a, b): return (a > b) - (a < b) list_comparison = compare(len(kw1[1][0]), len(kw0[1][0])) if list_comparison: return list_comparison if kw0[0].isComposite() and kw1[0].isComposite(): component_avg0 = sum(kw0[1][1]) / len(kw0[1][1]) component_avg1 = sum(kw1[1][1]) / len(kw1[1][1]) component_comparison = compare(component_avg1, component_avg0) if component_comparison: return component_comparison return compare(len(str(kw1[0])), len(str(kw0[0])))
Compare 2 single keywords objects. First by the number of their spans (ie. how many times they were found), if it is equal it compares them by lenghts of their labels.
def setDeclaration(self, declaration): """ Set the declaration this model will use for rendering the the headers. """ assert isinstance(declaration.proxy, ProxyAbstractItemView), \ "The model declaration must be a QtAbstractItemView subclass. " \ "Got {]".format(declaration) self.declaration = declaration
Set the declaration this model will use for rendering the the headers.
def break_array(a, threshold=numpy.pi, other=None): """Create a array which masks jumps >= threshold. Extra points are inserted between two subsequent values whose absolute difference differs by more than threshold (default is pi). Other can be a secondary array which is also masked according to *a*. Returns (*a_masked*, *other_masked*) (where *other_masked* can be ``None``) """ assert len(a.shape) == 1, "Only 1D arrays supported" if other is not None and a.shape != other.shape: raise ValueError("arrays must be of identical shape") # jump occurs after the index in break breaks = numpy.where(numpy.abs(numpy.diff(a)) >= threshold)[0] # insert a blank after breaks += 1 # is this needed?? -- no, but leave it here as a reminder #f2 = numpy.diff(a, 2) #up = (f2[breaks - 1] >= 0) # >0: up, <0: down # sort into up and down breaks: #breaks_up = breaks[up] #breaks_down = breaks[~up] # new array b including insertions for all the breaks m = len(breaks) b = numpy.empty((len(a) + m)) # calculate new indices for breaks in b, taking previous insertions into account b_breaks = breaks + numpy.arange(m) mask = numpy.zeros_like(b, dtype=numpy.bool) mask[b_breaks] = True b[~mask] = a b[mask] = numpy.NAN if other is not None: c = numpy.empty_like(b) c[~mask] = other c[mask] = numpy.NAN ma_c = numpy.ma.array(c, mask=mask) else: ma_c = None return numpy.ma.array(b, mask=mask), ma_c
Create a array which masks jumps >= threshold. Extra points are inserted between two subsequent values whose absolute difference differs by more than threshold (default is pi). Other can be a secondary array which is also masked according to *a*. Returns (*a_masked*, *other_masked*) (where *other_masked* can be ``None``)
def can_ignore_error(self, reqhnd=None): """Tests if the error is worth reporting. """ value = sys.exc_info()[1] try: if isinstance(value, BrokenPipeError) or \ isinstance(value, ConnectionResetError): return True except NameError: pass if not self.done: return False if not isinstance(value, socket.error): return False need_close = value.errno == 9 if need_close and reqhnd is not None: reqhnd.close_connection = 1 return need_close
Tests if the error is worth reporting.
def image_search(auth=None, **kwargs): ''' Search for images CLI Example: .. code-block:: bash salt '*' glanceng.image_search name=image1 salt '*' glanceng.image_search ''' cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(**kwargs) return cloud.search_images(**kwargs)
Search for images CLI Example: .. code-block:: bash salt '*' glanceng.image_search name=image1 salt '*' glanceng.image_search
def aghmean(nums): """Return arithmetic-geometric-harmonic mean. Iterates over arithmetic, geometric, & harmonic means until they converge to a single value (rounded to 12 digits), following the method described in :cite:`Raissouli:2009`. Parameters ---------- nums : list A series of numbers Returns ------- float The arithmetic-geometric-harmonic mean of nums Examples -------- >>> aghmean([1, 2, 3, 4]) 2.198327159900212 >>> aghmean([1, 2]) 1.4142135623731884 >>> aghmean([0, 5, 1000]) 335.0 """ m_a = amean(nums) m_g = gmean(nums) m_h = hmean(nums) if math.isnan(m_a) or math.isnan(m_g) or math.isnan(m_h): return float('nan') while round(m_a, 12) != round(m_g, 12) and round(m_g, 12) != round( m_h, 12 ): m_a, m_g, m_h = ( (m_a + m_g + m_h) / 3, (m_a * m_g * m_h) ** (1 / 3), 3 / (1 / m_a + 1 / m_g + 1 / m_h), ) return m_a
Return arithmetic-geometric-harmonic mean. Iterates over arithmetic, geometric, & harmonic means until they converge to a single value (rounded to 12 digits), following the method described in :cite:`Raissouli:2009`. Parameters ---------- nums : list A series of numbers Returns ------- float The arithmetic-geometric-harmonic mean of nums Examples -------- >>> aghmean([1, 2, 3, 4]) 2.198327159900212 >>> aghmean([1, 2]) 1.4142135623731884 >>> aghmean([0, 5, 1000]) 335.0
def bulk_docs(self, docs): """ Performs multiple document inserts and/or updates through a single request. Each document must either be or extend a dict as is the case with Document and DesignDocument objects. A document must contain the ``_id`` and ``_rev`` fields if the document is meant to be updated. :param list docs: List of Documents to be created/updated. :returns: Bulk document creation/update status in JSON format """ url = '/'.join((self.database_url, '_bulk_docs')) data = {'docs': docs} headers = {'Content-Type': 'application/json'} resp = self.r_session.post( url, data=json.dumps(data, cls=self.client.encoder), headers=headers ) resp.raise_for_status() return response_to_json_dict(resp)
Performs multiple document inserts and/or updates through a single request. Each document must either be or extend a dict as is the case with Document and DesignDocument objects. A document must contain the ``_id`` and ``_rev`` fields if the document is meant to be updated. :param list docs: List of Documents to be created/updated. :returns: Bulk document creation/update status in JSON format
def _compute_mean_on_rock(self, C, mag, rrup, F, HW): """ Compute mean value on rock (that is eq.1, page 105 with S = 0) """ f1 = self._compute_f1(C, mag, rrup) f3 = self._compute_f3(C, mag) f4 = self._compute_f4(C, mag, rrup) return f1 + F * f3 + HW * f4
Compute mean value on rock (that is eq.1, page 105 with S = 0)
def setdict(self, D): """Set dictionary array.""" self.D = np.asarray(D, dtype=self.dtype) self.DTS = self.D.T.dot(self.S) # Factorise dictionary for efficient solves self.lu, self.piv = sl.cho_factor(self.D, self.rho) self.lu = np.asarray(self.lu, dtype=self.dtype)
Set dictionary array.
def _read_para_notification(self, code, cbit, clen, *, desc, length, version): """Read HIP NOTIFICATION parameter. Structure of HIP NOTIFICATION parameter [RFC 7401]: 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Type | Length | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Reserved | Notify Message Type | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | / / Notification Data / / +---------------+ / | Padding | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Octets Bits Name Description 0 0 notification.type Parameter Type 1 15 notification.critical Critical Bit 2 16 notification.length Length of Contents 4 32 - Reserved 6 48 notification.msg_type Notify Message Type 8 64 notification.data Notification Data ? ? - Padding """ _resv = self._read_fileng(2) _code = self._read_unpack(2) _data = self._read_fileng(2) _type = _NOTIFICATION_TYPE.get(_code) if _type is None: if 1 <= _code <= 50: _type = 'Unassigned (IETF Review)' elif 51 <= _code <= 8191: _type = 'Unassigned (Specification Required; Error Message)' elif 8192 <= _code <= 16383: _type = 'Unassigned (Reserved for Private Use; Error Message)' elif 16384 <= _code <= 40959: _type = 'Unassigned (Specification Required; Status Message)' elif 40960 <= _code <= 65535: _type = 'Unassigned (Reserved for Private Use; Status Message)' else: raise ProtocolError(f'HIPv{version}: [Parano {code}] invalid format') notification = dict( type=desc, critical=cbit, length=clen, msg_type=_type, data=_data, ) _plen = length - clen if _plen: self._read_fileng(_plen) return notification
Read HIP NOTIFICATION parameter. Structure of HIP NOTIFICATION parameter [RFC 7401]: 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Type | Length | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Reserved | Notify Message Type | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | / / Notification Data / / +---------------+ / | Padding | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Octets Bits Name Description 0 0 notification.type Parameter Type 1 15 notification.critical Critical Bit 2 16 notification.length Length of Contents 4 32 - Reserved 6 48 notification.msg_type Notify Message Type 8 64 notification.data Notification Data ? ? - Padding
def Close(self): """ Commits and closes the current connection @author: Nick Verbeck @since: 5/12/2008 """ if self.connection is not None: try: self.connection.commit() self.connection.close() self.connection = None except Exception, e: pass
Commits and closes the current connection @author: Nick Verbeck @since: 5/12/2008
def rotateTo(self, angle): """rotates the image to a given angle Parameters: | angle - the angle that you want the image rotated to. | Positive numbers are clockwise, negative numbers are counter-clockwise """ self._transmogrophy(angle, self.percent, self.scaleFromCenter, self.flipH, self.flipV)
rotates the image to a given angle Parameters: | angle - the angle that you want the image rotated to. | Positive numbers are clockwise, negative numbers are counter-clockwise
def remove_instance(self, instance): """Request to cleanly remove the given instance. If instance is external also shutdown it cleanly :param instance: instance to remove :type instance: object :return: None """ # External instances need to be close before (process + queues) if instance.is_external: logger.info("Request external process to stop for %s", instance.name) instance.stop_process() logger.info("External process stopped.") instance.clear_queues(self.daemon.sync_manager) # Then do not listen anymore about it self.instances.remove(instance)
Request to cleanly remove the given instance. If instance is external also shutdown it cleanly :param instance: instance to remove :type instance: object :return: None
def absent(name, driver=None): ''' Ensure that a volume is absent. .. versionadded:: 2015.8.4 .. versionchanged:: 2017.7.0 This state was renamed from **docker.volume_absent** to **docker_volume.absent** name Name of the volume Usage Examples: .. code-block:: yaml volume_foo: docker_volume.absent ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} volume = _find_volume(name) if not volume: ret['result'] = True ret['comment'] = 'Volume \'{0}\' already absent'.format(name) return ret try: ret['changes']['removed'] = __salt__['docker.remove_volume'](name) ret['result'] = True except Exception as exc: ret['comment'] = ('Failed to remove volume \'{0}\': {1}' .format(name, exc)) return ret
Ensure that a volume is absent. .. versionadded:: 2015.8.4 .. versionchanged:: 2017.7.0 This state was renamed from **docker.volume_absent** to **docker_volume.absent** name Name of the volume Usage Examples: .. code-block:: yaml volume_foo: docker_volume.absent
def _get_stream_schema(fields): """Returns a StreamSchema protobuf message""" stream_schema = topology_pb2.StreamSchema() for field in fields: key = stream_schema.keys.add() key.key = field key.type = topology_pb2.Type.Value("OBJECT") return stream_schema
Returns a StreamSchema protobuf message
def get_arctic_version(self, symbol, as_of=None): """ Return the numerical representation of the arctic version used to write the last (or as_of) version for the given symbol. Parameters ---------- symbol : `str` symbol name for the item as_of : `str` or int or `datetime.datetime` Return the data as it was as_of the point in time. `int` : specific version number `str` : snapshot name which contains the version `datetime.datetime` : the version of the data that existed as_of the requested point in time Returns ------- arctic_version : int The numerical representation of Arctic version, used to create the specified symbol version """ return self._read_metadata(symbol, as_of=as_of).get('arctic_version', 0)
Return the numerical representation of the arctic version used to write the last (or as_of) version for the given symbol. Parameters ---------- symbol : `str` symbol name for the item as_of : `str` or int or `datetime.datetime` Return the data as it was as_of the point in time. `int` : specific version number `str` : snapshot name which contains the version `datetime.datetime` : the version of the data that existed as_of the requested point in time Returns ------- arctic_version : int The numerical representation of Arctic version, used to create the specified symbol version
def validateOpfJsonValue(value, opfJsonSchemaFilename): """ Validate a python object against an OPF json schema file :param value: target python object to validate (typically a dictionary) :param opfJsonSchemaFilename: (string) OPF json schema filename containing the json schema object. (e.g., opfTaskControlSchema.json) :raises: jsonhelpers.ValidationError when value fails json validation """ # Create a path by joining the filename with our local json schema root jsonSchemaPath = os.path.join(os.path.dirname(__file__), "jsonschema", opfJsonSchemaFilename) # Validate jsonhelpers.validate(value, schemaPath=jsonSchemaPath) return
Validate a python object against an OPF json schema file :param value: target python object to validate (typically a dictionary) :param opfJsonSchemaFilename: (string) OPF json schema filename containing the json schema object. (e.g., opfTaskControlSchema.json) :raises: jsonhelpers.ValidationError when value fails json validation
def validate_refresh_token(self, refresh_token, client, request, *args, **kwargs): """Ensure the token is valid and belongs to the client This method is used by the authorization code grant indirectly by issuing refresh tokens, resource owner password credentials grant (also indirectly) and the refresh token grant. """ token = self._tokengetter(refresh_token=refresh_token) if token and token.client_id == client.client_id: # Make sure the request object contains user and client_id request.client_id = token.client_id request.user = token.user return True return False
Ensure the token is valid and belongs to the client This method is used by the authorization code grant indirectly by issuing refresh tokens, resource owner password credentials grant (also indirectly) and the refresh token grant.
def buggy_div(request): """ A buggy endpoint to perform division between query parameters a and b. It will fail if b is equal to 0 or either a or b are not float. :param request: request object :return: """ a = float(request.GET.get('a', '0')) b = float(request.GET.get('b', '0')) return JsonResponse({'result': a / b})
A buggy endpoint to perform division between query parameters a and b. It will fail if b is equal to 0 or either a or b are not float. :param request: request object :return:
def _check_suffix(self, w_string, access_string, index): """ Checks if access string suffix matches with the examined string suffix Args: w_string (str): The examined string to be consumed access_string (str): The access string for the state index (int): The index value for selecting the prefix of w Returns: bool: A boolean valuei indicating if matching was successful """ prefix_as = self._membership_query(access_string) full_as = self._membership_query(access_string + w_string[index:]) prefix_w = self._membership_query(w_string[:index]) full_w = self._membership_query(w_string) length = len(commonprefix([prefix_as, full_as])) as_suffix = full_as[length:] length = len(commonprefix([prefix_w, full_w])) w_suffix = full_w[length:] if as_suffix != w_suffix: logging.debug('Access string state incorrect') return True logging.debug('Access string state correct.') return False
Checks if access string suffix matches with the examined string suffix Args: w_string (str): The examined string to be consumed access_string (str): The access string for the state index (int): The index value for selecting the prefix of w Returns: bool: A boolean valuei indicating if matching was successful
def set_brightness(host, did, value, token=None): """Set brightness of a bulb or fixture.""" urllib3.disable_warnings() if token: scheme = "https" if not token: scheme = "http" token = "1234567890" url = ( scheme + '://' + host + '/gwr/gop.php?cmd=DeviceSendCommand&data=<gip><version>1</version><token>' + token + '</token><did>' + did + '</did><value>' + str( value) + '</value><type>level</type></gip>&fmt=xml') response = requests.get(url, verify=False) if response.status_code == '200': return True else: return False
Set brightness of a bulb or fixture.
def cumsum(self, axis=0, *args, **kwargs): """ Cumulative sum of non-NA/null values. When performing the cumulative summation, any non-NA/null values will be skipped. The resulting SparseSeries will preserve the locations of NaN values, but the fill value will be `np.nan` regardless. Parameters ---------- axis : {0} Returns ------- cumsum : SparseSeries """ nv.validate_cumsum(args, kwargs) # Validate axis if axis is not None: self._get_axis_number(axis) new_array = self.values.cumsum() return self._constructor( new_array, index=self.index, sparse_index=new_array.sp_index).__finalize__(self)
Cumulative sum of non-NA/null values. When performing the cumulative summation, any non-NA/null values will be skipped. The resulting SparseSeries will preserve the locations of NaN values, but the fill value will be `np.nan` regardless. Parameters ---------- axis : {0} Returns ------- cumsum : SparseSeries
def fdf(self, x): """Calculate the value of the functional for the specified arguments, and the derivatives with respect to the parameters (taking any specified mask into account). :param x: the value(s) to evaluate at """ x = self._flatten(x) n = 1 if hasattr(x, "__len__"): n = len(x) if self._dtype == 0: retval = _functional._fdf(self, x) else: retval = _functional._fdfc(self, x) if len(retval) == n: return numpy.array(retval) return numpy.array(retval).reshape(self.npar() + 1, n // self.ndim()).transpose()
Calculate the value of the functional for the specified arguments, and the derivatives with respect to the parameters (taking any specified mask into account). :param x: the value(s) to evaluate at
def _CheckCacheFileForMatch(self, cache_filename, scopes): """Checks the cache file to see if it matches the given credentials. Args: cache_filename: Cache filename to check. scopes: Scopes for the desired credentials. Returns: List of scopes (if cache matches) or None. """ creds = { # Credentials metadata dict. 'scopes': sorted(list(scopes)) if scopes else None, 'svc_acct_name': self.__service_account_name, } cache_file = _MultiProcessCacheFile(cache_filename) try: cached_creds_str = cache_file.LockedRead() if not cached_creds_str: return None cached_creds = json.loads(cached_creds_str) if creds['svc_acct_name'] == cached_creds['svc_acct_name']: if creds['scopes'] in (None, cached_creds['scopes']): return cached_creds['scopes'] except KeyboardInterrupt: raise except: # pylint: disable=bare-except # Treat exceptions as a cache miss. pass
Checks the cache file to see if it matches the given credentials. Args: cache_filename: Cache filename to check. scopes: Scopes for the desired credentials. Returns: List of scopes (if cache matches) or None.
def get_ip_addr(self) -> str: '''Show IP Address.''' output, _ = self._execute( '-s', self.device_sn, 'shell', 'ip', '-f', 'inet', 'addr', 'show', 'wlan0') ip_addr = re.findall( r"\b(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\b", output) if not ip_addr: raise ConnectionError( 'The device is not connected to WLAN or not connected via USB.') return ip_addr[0]
Show IP Address.
def get_guest_connection_status(self, userid): '''Get guest vm connection status.''' rd = ' '.join(('getvm', userid, 'isreachable')) results = self._request(rd) if results['rs'] == 1: return True else: return False
Get guest vm connection status.
def load_handgeometry(): """Hand Geometry Dataset. The data of this dataset is a 3d numpy array vector with shape (224, 224, 3) containing 112 224x224 RGB photos of hands, and the target is a 1d numpy float array containing the width of the wrist in centimeters. """ dataset_path = _load('handgeometry') df = _load_csv(dataset_path, 'data') X = _load_images(os.path.join(dataset_path, 'images'), df.image) y = df.target.values return Dataset(load_handgeometry.__doc__, X, y, r2_score)
Hand Geometry Dataset. The data of this dataset is a 3d numpy array vector with shape (224, 224, 3) containing 112 224x224 RGB photos of hands, and the target is a 1d numpy float array containing the width of the wrist in centimeters.
def is_zone_running(self, zone): """ Returns the state of the specified zone. :param zone: The zone to check. :type zone: int :returns: Returns True if the zone is currently running, otherwise returns False if the zone is not running. :rtype: boolean """ self.update_controller_info() if self.running is None or not self.running: return False if int(self.running[0]['relay']) == zone: return True return False
Returns the state of the specified zone. :param zone: The zone to check. :type zone: int :returns: Returns True if the zone is currently running, otherwise returns False if the zone is not running. :rtype: boolean
def Disconnect(self): '''Disconnect a device ''' device_path = self.path if device_path not in mockobject.objects: raise dbus.exceptions.DBusException('No such device.', name='org.bluez.Error.NoSuchDevice') device = mockobject.objects[device_path] try: device.props[AUDIO_IFACE]['State'] = dbus.String("disconnected", variant_level=1) device.EmitSignal(AUDIO_IFACE, 'PropertyChanged', 'sv', [ 'State', dbus.String("disconnected", variant_level=1), ]) except KeyError: pass device.props[DEVICE_IFACE]['Connected'] = dbus.Boolean(False, variant_level=1) device.EmitSignal(DEVICE_IFACE, 'PropertyChanged', 'sv', [ 'Connected', dbus.Boolean(False, variant_level=1), ])
Disconnect a device
def is_build_dir(self, folder_name): """Return whether or not the given dir contains a build.""" # Cannot move up to base scraper due to parser.entries call in # get_build_info_for_date (see below) url = '%s/' % urljoin(self.base_url, self.monthly_build_list_regex, folder_name) if self.application in APPLICATIONS_MULTI_LOCALE \ and self.locale != 'multi': url = '%s/' % urljoin(url, self.locale) parser = self._create_directory_parser(url) pattern = re.compile(self.binary_regex, re.IGNORECASE) for entry in parser.entries: try: pattern.match(entry).group() return True except Exception: # No match, continue with next entry continue return False
Return whether or not the given dir contains a build.
def log(self, logger=None, label=None, eager=False): ''' Log query result consumption details to a logger. Args: logger: Any object which supports a debug() method which accepts a str, such as a Python standard library logger object from the logging module. If logger is not provided or is None, this method has no logging side effects. label: An optional label which will be inserted into each line of logging output produced by this particular use of log eager: An optional boolean which controls how the query result will be consumed. If True, the sequence will be consumed and logged in its entirety. If False (the default) the sequence will be evaluated and logged lazily as it consumed. Warning: Use of eager=True requires use of sufficient memory to hold the entire sequence which is obviously not possible with infinite sequences. Use with care! Returns: A queryable over the unaltered source sequence. Raises: AttributeError: If logger does not support a debug() method. ValueError: If the Queryable has been closed. ''' if self.closed(): raise ValueError("Attempt to call log() on a closed Queryable.") if logger is None: return self if label is None: label = repr(self) if eager: return self._create(self._eager_log_result(logger, label)) return self._create(self._generate_lazy_log_result(logger, label))
Log query result consumption details to a logger. Args: logger: Any object which supports a debug() method which accepts a str, such as a Python standard library logger object from the logging module. If logger is not provided or is None, this method has no logging side effects. label: An optional label which will be inserted into each line of logging output produced by this particular use of log eager: An optional boolean which controls how the query result will be consumed. If True, the sequence will be consumed and logged in its entirety. If False (the default) the sequence will be evaluated and logged lazily as it consumed. Warning: Use of eager=True requires use of sufficient memory to hold the entire sequence which is obviously not possible with infinite sequences. Use with care! Returns: A queryable over the unaltered source sequence. Raises: AttributeError: If logger does not support a debug() method. ValueError: If the Queryable has been closed.
def aesCCM(key, key_handle, nonce, data, decrypt=False): """ Function implementing YubiHSM AEAD encrypt/decrypt in software. """ if decrypt: (data, saved_mac) = _split_data(data, len(data) - pyhsm.defines.YSM_AEAD_MAC_SIZE) nonce = pyhsm.util.input_validate_nonce(nonce, pad = True) mac = _cbc_mac(key, key_handle, nonce, len(data)) counter = _ctr_counter(key_handle, nonce, value = 0) ctr_aes = AES.new(key, AES.MODE_CTR, counter = counter.next) out = [] while data: (thisblock, data) = _split_data(data, pyhsm.defines.YSM_BLOCK_SIZE) # encrypt/decrypt and CBC MAC if decrypt: aes_out = ctr_aes.decrypt(thisblock) mac.update(aes_out) else: mac.update(thisblock) aes_out = ctr_aes.encrypt(thisblock) out.append(aes_out) # Finalize MAC counter.value = 0 mac.finalize(counter.pack()) if decrypt: if mac.get() != saved_mac: raise pyhsm.exception.YHSM_Error('AEAD integrity check failed') else: out.append(mac.get()) return ''.join(out)
Function implementing YubiHSM AEAD encrypt/decrypt in software.
def _write_single_sample(self, sample): """ :type sample: Sample """ bytes = sample.extras.get("responseHeadersSize", 0) + 2 + sample.extras.get("responseBodySize", 0) message = sample.error_msg if not message: message = sample.extras.get("responseMessage") if not message: for sample in sample.subsamples: if sample.error_msg: message = sample.error_msg break elif sample.extras.get("responseMessage"): message = sample.extras.get("responseMessage") break self.writer.writerow({ "timeStamp": int(1000 * sample.start_time), "elapsed": int(1000 * sample.duration), "Latency": 0, # TODO "label": sample.test_case, "bytes": bytes, "responseCode": sample.extras.get("responseCode"), "responseMessage": message, "allThreads": self.concurrency, # TODO: there will be a problem aggregating concurrency for rare samples "success": "true" if sample.status == "PASSED" else "false", }) self.out_stream.flush()
:type sample: Sample
def get_uids(self, filename=None): """Return a list of UIDs filename -- unused, for API compatibility only """ self._update() return [Abook._gen_uid(self._book[entry]) for entry in self._book.sections()]
Return a list of UIDs filename -- unused, for API compatibility only
async def _get_smallest_env(self): """Get address of the slave environment manager with the smallest number of agents. """ async def slave_task(mgr_addr): r_manager = await self.env.connect(mgr_addr, timeout=TIMEOUT) ret = await r_manager.get_agents(addr=True) return mgr_addr, len(ret) sizes = await create_tasks(slave_task, self.addrs, flatten=False) return sorted(sizes, key=lambda x: x[1])[0][0]
Get address of the slave environment manager with the smallest number of agents.
def debug_sync(self, conn_id, cmd_name, cmd_args, progress_callback): """Asynchronously complete a named debug command. The command name and arguments are passed to the underlying device adapter and interpreted there. If the command is long running, progress_callback may be used to provide status updates. Callback is called when the command has finished. Args: conn_id (int): A unique identifier that will refer to this connection cmd_name (string): the name of the debug command we want to invoke cmd_args (dict): any arguments that we want to send with this command. progress_callback (callable): A function to be called with status on our progress, called as: progress_callback(done_count, total_count) """ done = threading.Event() result = {} def _debug_done(conn_id, adapter_id, success, retval, reason): result['success'] = success result['failure_reason'] = reason result['return_value'] = retval done.set() self.debug_async(conn_id, cmd_name, cmd_args, progress_callback, _debug_done) done.wait() return result
Asynchronously complete a named debug command. The command name and arguments are passed to the underlying device adapter and interpreted there. If the command is long running, progress_callback may be used to provide status updates. Callback is called when the command has finished. Args: conn_id (int): A unique identifier that will refer to this connection cmd_name (string): the name of the debug command we want to invoke cmd_args (dict): any arguments that we want to send with this command. progress_callback (callable): A function to be called with status on our progress, called as: progress_callback(done_count, total_count)
def print_token(self, token_node_index): """returns the string representation of a token.""" err_msg = "The given node is not a token node." assert isinstance(self.nodes[token_node_index], TokenNode), err_msg onset = self.nodes[token_node_index].onset offset = self.nodes[token_node_index].offset return self.text[onset:offset]
returns the string representation of a token.
def resolve_data_objects(objects, project=None, folder=None, batchsize=1000): """ :param objects: Data object specifications, each with fields "name" (required), "folder", and "project" :type objects: list of dictionaries :param project: ID of project context; a data object's project defaults to this if not specified for that object :type project: string :param folder: Folder path within the project; a data object's folder path defaults to this if not specified for that object :type folder: string :param batchsize: Number of objects to resolve in each batch call to system_resolve_data_objects; defaults to 1000 and is only used for testing (must be a positive integer not exceeding 1000) :type batchsize: int :returns: List of results parallel to input objects, where each entry is a list containing 0 or more dicts, each corresponding to a resolved object :rtype: List of lists of dictionaries Each returned element is a list of dictionaries with keys "project" and "id". The number of dictionaries for each element may be 0, 1, or more. """ if not isinstance(batchsize, int) or batchsize <= 0 or batchsize > 1000: raise ValueError("batchsize for resolve_data_objects must be a positive integer not exceeding 1000") args = {} if project: args.update({'project': project}) if folder: args.update({'folder': folder}) results = [] # Call API method /system/resolveDataObjects in groups of size batchsize for i in range(0, len(objects), batchsize): args.update({'objects': objects[i:(i+batchsize)]}) results.extend(dxpy.api.system_resolve_data_objects(args)['results']) return results
:param objects: Data object specifications, each with fields "name" (required), "folder", and "project" :type objects: list of dictionaries :param project: ID of project context; a data object's project defaults to this if not specified for that object :type project: string :param folder: Folder path within the project; a data object's folder path defaults to this if not specified for that object :type folder: string :param batchsize: Number of objects to resolve in each batch call to system_resolve_data_objects; defaults to 1000 and is only used for testing (must be a positive integer not exceeding 1000) :type batchsize: int :returns: List of results parallel to input objects, where each entry is a list containing 0 or more dicts, each corresponding to a resolved object :rtype: List of lists of dictionaries Each returned element is a list of dictionaries with keys "project" and "id". The number of dictionaries for each element may be 0, 1, or more.
def lookup(self, path, is_committed=True, with_proof=False) -> (str, int): """ Queries state for data on specified path :param path: path to data :param is_committed: queries the committed state root if True else the uncommitted root :param with_proof: creates proof if True :return: data """ assert path is not None head_hash = self.state.committedHeadHash if is_committed else self.state.headHash encoded, proof = self._get_value_from_state(path, head_hash, with_proof=with_proof) if encoded: value, last_seq_no, last_update_time = decode_state_value(encoded) return value, last_seq_no, last_update_time, proof return None, None, None, proof
Queries state for data on specified path :param path: path to data :param is_committed: queries the committed state root if True else the uncommitted root :param with_proof: creates proof if True :return: data
def download_file_with_progress_bar(url): """Downloads a file from the given url, displays a progress bar. Returns a io.BytesIO object """ request = requests.get(url, stream=True) if request.status_code == 404: msg = ('there was a 404 error trying to reach {} \nThis probably ' 'means the requested version does not exist.'.format(url)) logger.error(msg) sys.exit() total_size = int(request.headers["Content-Length"]) chunk_size = 1024 bars = int(total_size / chunk_size) bytes_io = io.BytesIO() pbar = tqdm(request.iter_content(chunk_size=chunk_size), total=bars, unit="kb", leave=False) for chunk in pbar: bytes_io.write(chunk) return bytes_io
Downloads a file from the given url, displays a progress bar. Returns a io.BytesIO object
def _correct_build_location(self): # type: () -> None """Move self._temp_build_dir to self._ideal_build_dir/self.req.name For some requirements (e.g. a path to a directory), the name of the package is not available until we run egg_info, so the build_location will return a temporary directory and store the _ideal_build_dir. This is only called by self.run_egg_info to fix the temporary build directory. """ if self.source_dir is not None: return assert self.req is not None assert self._temp_build_dir.path assert (self._ideal_build_dir is not None and self._ideal_build_dir.path) # type: ignore old_location = self._temp_build_dir.path self._temp_build_dir.path = None new_location = self.build_location(self._ideal_build_dir) if os.path.exists(new_location): raise InstallationError( 'A package already exists in %s; please remove it to continue' % display_path(new_location)) logger.debug( 'Moving package %s from %s to new location %s', self, display_path(old_location), display_path(new_location), ) shutil.move(old_location, new_location) self._temp_build_dir.path = new_location self._ideal_build_dir = None self.source_dir = os.path.normpath(os.path.abspath(new_location)) self._egg_info_path = None # Correct the metadata directory, if it exists if self.metadata_directory: old_meta = self.metadata_directory rel = os.path.relpath(old_meta, start=old_location) new_meta = os.path.join(new_location, rel) new_meta = os.path.normpath(os.path.abspath(new_meta)) self.metadata_directory = new_meta
Move self._temp_build_dir to self._ideal_build_dir/self.req.name For some requirements (e.g. a path to a directory), the name of the package is not available until we run egg_info, so the build_location will return a temporary directory and store the _ideal_build_dir. This is only called by self.run_egg_info to fix the temporary build directory.
def fav_songs(self): """ FIXME: 支持获取所有的收藏歌曲 """ if self._fav_songs is None: songs_data = self._api.user_favorite_songs(self.identifier) self._fav_songs = [] if not songs_data: return for song_data in songs_data: song = _deserialize(song_data, NestedSongSchema) self._fav_songs.append(song) return self._fav_songs
FIXME: 支持获取所有的收藏歌曲
def extension_counts(container=None, file_list=None, return_counts=True): '''extension counts will return a dictionary with counts of file extensions for an image. :param container: if provided, will use container as image. Can also provide :param image_package: if provided, can be used instead of container :param file_list: the complete list of files :param return_counts: return counts over dict with files. Default True ''' if file_list is None: file_list = get_container_contents(container, split_delim='\n')['all'] extensions = dict() for item in file_list: filename,ext = os.path.splitext(item) if ext == '': if return_counts == False: extensions = update_dict(extensions,'no-extension',item) else: extensions = update_dict_sum(extensions,'no-extension') else: if return_counts == False: extensions = update_dict(extensions,ext,item) else: extensions = update_dict_sum(extensions,ext) return extensions
extension counts will return a dictionary with counts of file extensions for an image. :param container: if provided, will use container as image. Can also provide :param image_package: if provided, can be used instead of container :param file_list: the complete list of files :param return_counts: return counts over dict with files. Default True
def connections_of(self, target): ''' generate tuples containing (relation, object_that_applies) ''' return gen.chain( ((r,i) for i in self.find(target,r)) for r in self.relations_of(target) )
generate tuples containing (relation, object_that_applies)
def fast_forward(self, start_dt): """ Fast-forward file to given start_dt datetime obj using binary search. Only fast for files. Streams need to be forwarded manually, and it will miss the first line that would otherwise match (as it consumes the log line). """ if self.from_stdin: # skip lines until start_dt is reached return else: # fast bisection path max_mark = self.filesize step_size = max_mark # check if start_dt is already smaller than first datetime self.filehandle.seek(0) le = self.next() if le.datetime and le.datetime >= start_dt: self.filehandle.seek(0) return le = None self.filehandle.seek(0) # search for lower bound while abs(step_size) > 100: step_size = ceil(step_size / 2.) self.filehandle.seek(step_size, 1) le = self._find_curr_line() if not le: break if le.datetime >= start_dt: step_size = -abs(step_size) else: step_size = abs(step_size) if not le: return # now walk backwards until we found a truly smaller line while self.filehandle.tell() >= 2 and (le.datetime is None or le.datetime >= start_dt): self.filehandle.seek(-2, 1) le = self._find_curr_line(prev=True)
Fast-forward file to given start_dt datetime obj using binary search. Only fast for files. Streams need to be forwarded manually, and it will miss the first line that would otherwise match (as it consumes the log line).
def map_ids(queries,frm='ACC',to='ENSEMBL_PRO_ID', organism_taxid=9606,test=False): """ https://www.uniprot.org/help/api_idmapping """ url = 'https://www.uniprot.org/uploadlists/' params = { 'from':frm, 'to':to, 'format':'tab', 'organism':organism_taxid, 'query':' '.join(queries), } response = requests.get(url, params=params) if test: print(response.url) if response.ok: df=pd.read_table(response.url) df.columns=[frm,to] return df else: print('Something went wrong ', response.status_code)
https://www.uniprot.org/help/api_idmapping
def upload_file(self, metadata, filename, signer=None, sign_password=None, filetype='sdist', pyversion='source', keystore=None): """ Upload a release file to the index. :param metadata: A :class:`Metadata` instance defining at least a name and version number for the file to be uploaded. :param filename: The pathname of the file to be uploaded. :param signer: The identifier of the signer of the file. :param sign_password: The passphrase for the signer's private key used for signing. :param filetype: The type of the file being uploaded. This is the distutils command which produced that file, e.g. ``sdist`` or ``bdist_wheel``. :param pyversion: The version of Python which the release relates to. For code compatible with any Python, this would be ``source``, otherwise it would be e.g. ``3.2``. :param keystore: The path to a directory which contains the keys used in signing. If not specified, the instance's ``gpg_home`` attribute is used instead. :return: The HTTP response received from PyPI upon submission of the request. """ self.check_credentials() if not os.path.exists(filename): raise DistlibException('not found: %s' % filename) metadata.validate() d = metadata.todict() sig_file = None if signer: if not self.gpg: logger.warning('no signing program available - not signed') else: sig_file = self.sign_file(filename, signer, sign_password, keystore) with open(filename, 'rb') as f: file_data = f.read() md5_digest = hashlib.md5(file_data).hexdigest() sha256_digest = hashlib.sha256(file_data).hexdigest() d.update({ ':action': 'file_upload', 'protcol_version': '1', 'filetype': filetype, 'pyversion': pyversion, 'md5_digest': md5_digest, 'sha256_digest': sha256_digest, }) files = [('content', os.path.basename(filename), file_data)] if sig_file: with open(sig_file, 'rb') as f: sig_data = f.read() files.append(('gpg_signature', os.path.basename(sig_file), sig_data)) shutil.rmtree(os.path.dirname(sig_file)) request = self.encode_request(d.items(), files) return self.send_request(request)
Upload a release file to the index. :param metadata: A :class:`Metadata` instance defining at least a name and version number for the file to be uploaded. :param filename: The pathname of the file to be uploaded. :param signer: The identifier of the signer of the file. :param sign_password: The passphrase for the signer's private key used for signing. :param filetype: The type of the file being uploaded. This is the distutils command which produced that file, e.g. ``sdist`` or ``bdist_wheel``. :param pyversion: The version of Python which the release relates to. For code compatible with any Python, this would be ``source``, otherwise it would be e.g. ``3.2``. :param keystore: The path to a directory which contains the keys used in signing. If not specified, the instance's ``gpg_home`` attribute is used instead. :return: The HTTP response received from PyPI upon submission of the request.
def create(cls, csr, duration, package, altnames=None, dcv_method=None): """ Create a new certificate. """ params = {'csr': csr, 'package': package, 'duration': duration} if altnames: params['altnames'] = altnames if dcv_method: params['dcv_method'] = dcv_method try: result = cls.call('cert.create', params) except UsageError: params['--dry-run'] = True msg = '\n'.join(['%s (%s)' % (err['reason'], err['attr']) for err in cls.call('cert.create', params)]) cls.error(msg) raise if dcv_method in ('dns', 'file'): cls.advice_dcv_method(csr, package, altnames, dcv_method, cert_id=result['params'].get('cert_id')) return result
Create a new certificate.
def create_tablefn_map(fns, pqdb, poolnames): """Stores protein/peptide table names in DB, returns a map with their respective DB IDs""" poolmap = {name: pid for (name, pid) in pqdb.get_all_poolnames()} pqdb.store_table_files([(poolmap[pool], os.path.basename(fn)) for fn, pool in zip(fns, poolnames)]) return pqdb.get_tablefn_map()
Stores protein/peptide table names in DB, returns a map with their respective DB IDs
def drag_sphere(Re, Method=None, AvailableMethods=False): r'''This function handles calculation of drag coefficient on spheres. Twenty methods are available, all requiring only the Reynolds number of the sphere. Most methods are valid from Re=0 to Re=200,000. A correlation will be automatically selected if none is specified. The full list of correlations valid for a given Reynolds number can be obtained with the `AvailableMethods` flag. If no correlation is selected, the following rules are used: * If Re < 0.01, use Stoke's solution. * If 0.01 <= Re < 0.1, linearly combine 'Barati' with Stokes's solution such that at Re = 0.1 the solution is 'Barati', and at Re = 0.01 the solution is 'Stokes'. * If 0.1 <= Re <= ~212963, use the 'Barati' solution. * If ~212963 < Re <= 1E6, use the 'Barati_high' solution. * For Re > 1E6, raises an exception; no valid results have been found. Examples -------- >>> drag_sphere(200) 0.7682237950389874 Parameters ---------- Re : float Particle Reynolds number of the sphere using the surrounding fluid density and viscosity, [-] Returns ------- Cd : float Drag coefficient [-] methods : list, only returned if AvailableMethods == True List of methods which can be used to calculate `Cd` with the given `Re` Other Parameters ---------------- Method : string, optional A string of the function name to use, as in the dictionary drag_sphere_correlations AvailableMethods : bool, optional If True, function will consider which methods which can be used to calculate `Cd` with the given `Re` ''' def list_methods(): methods = [] for key, (func, Re_min, Re_max) in drag_sphere_correlations.items(): if (Re_min is None or Re > Re_min) and (Re_max is None or Re < Re_max): methods.append(key) return methods if AvailableMethods: return list_methods() if not Method: if Re > 0.1: # Smooth transition point between the two models if Re <= 212963.26847812787: return Barati(Re) elif Re <= 1E6: return Barati_high(Re) else: raise ValueError('No models implement a solution for Re > 1E6') elif Re >= 0.01: # Re from 0.01 to 0.1 ratio = (Re - 0.01)/(0.1 - 0.01) # Ensure a smooth transition by linearly switching to Stokes' law return ratio*Barati(Re) + (1-ratio)*Stokes(Re) else: return Stokes(Re) if Method in drag_sphere_correlations: return drag_sphere_correlations[Method][0](Re) else: raise Exception('Failure in in function')
r'''This function handles calculation of drag coefficient on spheres. Twenty methods are available, all requiring only the Reynolds number of the sphere. Most methods are valid from Re=0 to Re=200,000. A correlation will be automatically selected if none is specified. The full list of correlations valid for a given Reynolds number can be obtained with the `AvailableMethods` flag. If no correlation is selected, the following rules are used: * If Re < 0.01, use Stoke's solution. * If 0.01 <= Re < 0.1, linearly combine 'Barati' with Stokes's solution such that at Re = 0.1 the solution is 'Barati', and at Re = 0.01 the solution is 'Stokes'. * If 0.1 <= Re <= ~212963, use the 'Barati' solution. * If ~212963 < Re <= 1E6, use the 'Barati_high' solution. * For Re > 1E6, raises an exception; no valid results have been found. Examples -------- >>> drag_sphere(200) 0.7682237950389874 Parameters ---------- Re : float Particle Reynolds number of the sphere using the surrounding fluid density and viscosity, [-] Returns ------- Cd : float Drag coefficient [-] methods : list, only returned if AvailableMethods == True List of methods which can be used to calculate `Cd` with the given `Re` Other Parameters ---------------- Method : string, optional A string of the function name to use, as in the dictionary drag_sphere_correlations AvailableMethods : bool, optional If True, function will consider which methods which can be used to calculate `Cd` with the given `Re`
def WriteHashBlobReferences(self, references_by_hash, cursor): """Writes blob references for a given set of hashes.""" values = [] for hash_id, blob_refs in iteritems(references_by_hash): refs = rdf_objects.BlobReferences(items=blob_refs).SerializeToString() values.append({ "hash_id": hash_id.AsBytes(), "blob_references": refs, }) _Insert(cursor, "hash_blob_references", values)
Writes blob references for a given set of hashes.
def fit(self, X, y): """Fit the model using X as training data and y as target values""" self._data = X self._classes = np.unique(y) self._labels = y self._is_fitted = True
Fit the model using X as training data and y as target values
def _variable(lexer): """Return a variable expression.""" names = _names(lexer) tok = next(lexer) # NAMES '[' ... ']' if isinstance(tok, LBRACK): indices = _indices(lexer) _expect_token(lexer, {RBRACK}) # NAMES else: lexer.unpop_token(tok) indices = tuple() return ('var', names, indices)
Return a variable expression.
def splitStis(stisfile, sci_count): """ Split a STIS association file into multiple imset MEF files. Split the corresponding spt file if present into single spt files. If an spt file can't be split or is missing a Warning is printed. Returns ------- names: list a list with the names of the new flt files. """ newfiles = [] toclose = False if isinstance(stisfile, str): f = fits.open(stisfile) toclose = True else: f = stisfile hdu0 = f[0].copy() stisfilename = stisfile.filename() for count in range(1,sci_count+1): fitsobj = fits.HDUList() fitsobj.append(hdu0) hdu = f[('sci',count)].copy() fitsobj.append(hdu) rootname = hdu.header['EXPNAME'] newfilename = fileutil.buildNewRootname(rootname, extn='_flt.fits') try: # Verify error array exists if f[('err', count)].data is None: raise ValueError # Verify dq array exists if f[('dq', count)].data is None: raise ValueError # Copy the err extension hdu = f[('err',count)].copy() fitsobj.append(hdu) # Copy the dq extension hdu = f[('dq',count)].copy() fitsobj.append(hdu) fitsobj[1].header['EXTVER'] = 1 fitsobj[2].header['EXTVER'] = 1 fitsobj[3].header['EXTVER'] = 1 except ValueError: print('\nWarning:') print('Extension version %d of the input file %s does not' %(count, stisfile)) print('contain all required image extensions. Each must contain') print('populates SCI, ERR and DQ arrays.') continue # Determine if the file you wish to create already exists on the disk. # If the file does exist, replace it. if (os.path.exists(newfilename)): os.remove(newfilename) print(" Replacing "+newfilename+"...") # Write out the new file fitsobj.writeto(newfilename) # Insure returned HDUList is associated with a file fitsobj.close() fitsobj = fits.open(newfilename, mode='update') newfiles.append(fitsobj) # Return HDUList, not filename f.close() sptfilename = fileutil.buildNewRootname(stisfilename, extn='_spt.fits') try: sptfile = fits.open(sptfilename) except IOError: print('SPT file not found %s \n' % sptfilename) return newfiles if sptfile: hdu0 = sptfile[0].copy() try: for count in range(1,sci_count+1): fitsobj = fits.HDUList() fitsobj.append(hdu0) hdu = sptfile[count].copy() fitsobj.append(hdu) rootname = hdu.header['EXPNAME'] newfilename = fileutil.buildNewRootname(rootname, extn='_spt.fits') fitsobj[1].header['EXTVER'] = 1 if (os.path.exists(newfilename)): os.remove(newfilename) print(" Replacing "+newfilename+"...") # Write out the new file fitsobj.writeto(newfilename) except: print("Warning: Unable to split spt file %s " % sptfilename) if toclose: sptfile.close() return newfiles
Split a STIS association file into multiple imset MEF files. Split the corresponding spt file if present into single spt files. If an spt file can't be split or is missing a Warning is printed. Returns ------- names: list a list with the names of the new flt files.
def set_lines( lines, target_level, indent_string=" ", indent_empty_lines=False ): """ Sets indentation for the given set of :lines:. """ first_non_empty_line_index = _get_first_non_empty_line_index( lines ) first_line_original_level = get_line_level( lines[first_non_empty_line_index], indent_string ) for i in range(first_non_empty_line_index, len(lines)): if not indent_empty_lines and lines[i] == "": continue line_i_unindented = get_line_unindented( lines[i], indent_string ) line_i_level = get_line_level( lines[i], indent_string ) on_second_line_or_later = i > first_non_empty_line_index if on_second_line_or_later: first_line_final_level = get_line_level( lines[first_non_empty_line_index], indent_string ) relative_indent_move = first_line_final_level - first_line_original_level target_level = line_i_level + relative_indent_move if line_i_level == target_level: continue lines[i] = indent_string * target_level + line_i_unindented
Sets indentation for the given set of :lines:.
def mutualInformation(sp, activeColumnsCurrentEpoch, column_1, column_2): """ Computes the mutual information of the binary variables that represent the activation probabilities of two columns. The mutual information I(X,Y) of two random variables is given by \[ I (X,Y) = \sum_{x,y} p(x,y) log( p(x,y) / ( p(x) p(y) ) ). \] (https://en.wikipedia.org/wiki/Mutual_information) """ i, j = column_1, column_2 batchSize = activeColumnsCurrentEpoch.shape[0] # Activity Counts ci, cj, cij = 0., 0., dict([((0,0),0.), ((1,0),0.), ((0,1),0.), ((1,1),0.)]) for t in range(batchSize): ai = activeColumnsCurrentEpoch[t, i] aj = activeColumnsCurrentEpoch[t, j] cij[(ai, aj)] += 1. ci += ai cj += aj # Mutual information calculation Iij = 0 for a,b in [(0,0), (1,0), (0,1), (1,1)]: # Compute probabilities pij = cij[(a,b)]/batchSize pi = ci/batchSize if a == 1 else 1. - ci/batchSize pj = cj/batchSize if b == 1 else 1. - cj/batchSize # Add current term of mutual information Iij += pij * np.log2(pij/(pi*pj)) if pij > 0 else 0 return Iij
Computes the mutual information of the binary variables that represent the activation probabilities of two columns. The mutual information I(X,Y) of two random variables is given by \[ I (X,Y) = \sum_{x,y} p(x,y) log( p(x,y) / ( p(x) p(y) ) ). \] (https://en.wikipedia.org/wiki/Mutual_information)
def connect(self, hostname=None, port=None): """ Opens the connection to the remote host or IP address. :type hostname: string :param hostname: The remote host or IP address. :type port: int :param port: The remote TCP port number. """ if hostname is not None: self.host = hostname conn = self._connect_hook(self.host, port) self.os_guesser.protocol_info(self.get_remote_version()) self.auto_driver = driver_map[self.guess_os()] if self.get_banner(): self.os_guesser.data_received(self.get_banner(), False) return conn
Opens the connection to the remote host or IP address. :type hostname: string :param hostname: The remote host or IP address. :type port: int :param port: The remote TCP port number.
def guess_settings(self, major, minor): """Gives a guess about the encoder settings used. Returns an empty string if unknown. The guess is mostly correct in case the file was encoded with the default options (-V --preset --alt-preset --abr -b etc) and no other fancy options. Args: major (int) minor (int) Returns: text """ version = major, minor if self.vbr_method == 2: if version in ((3, 90), (3, 91), (3, 92)) and self.encoding_flags: if self.bitrate < 255: return u"--alt-preset %d" % self.bitrate else: return u"--alt-preset %d+" % self.bitrate if self.preset_used != 0: return u"--preset %d" % self.preset_used elif self.bitrate < 255: return u"--abr %d" % self.bitrate else: return u"--abr %d+" % self.bitrate elif self.vbr_method == 1: if self.preset_used == 0: if self.bitrate < 255: return u"-b %d" % self.bitrate else: return u"-b 255+" elif self.preset_used == 1003: return u"--preset insane" return u"-b %d" % self.preset_used elif version in ((3, 90), (3, 91), (3, 92)): preset_key = (self.vbr_quality, self.quality, self.vbr_method, self.lowpass_filter, self.ath_type) if preset_key == (1, 2, 4, 19500, 3): return u"--preset r3mix" if preset_key == (2, 2, 3, 19000, 4): return u"--alt-preset standard" if preset_key == (2, 2, 3, 19500, 2): return u"--alt-preset extreme" if self.vbr_method == 3: return u"-V %s" % self.vbr_quality elif self.vbr_method in (4, 5): return u"-V %s --vbr-new" % self.vbr_quality elif version in ((3, 93), (3, 94), (3, 95), (3, 96), (3, 97)): if self.preset_used == 1001: return u"--preset standard" elif self.preset_used == 1002: return u"--preset extreme" elif self.preset_used == 1004: return u"--preset fast standard" elif self.preset_used == 1005: return u"--preset fast extreme" elif self.preset_used == 1006: return u"--preset medium" elif self.preset_used == 1007: return u"--preset fast medium" if self.vbr_method == 3: return u"-V %s" % self.vbr_quality elif self.vbr_method in (4, 5): return u"-V %s --vbr-new" % self.vbr_quality elif version == (3, 98): if self.vbr_method == 3: return u"-V %s --vbr-old" % self.vbr_quality elif self.vbr_method in (4, 5): return u"-V %s" % self.vbr_quality elif version >= (3, 99): if self.vbr_method == 3: return u"-V %s --vbr-old" % self.vbr_quality elif self.vbr_method in (4, 5): p = self.vbr_quality adjust_key = (p, self.bitrate, self.lowpass_filter) # https://sourceforge.net/p/lame/bugs/455/ p = { (5, 32, 0): 7, (5, 8, 0): 8, (6, 8, 0): 9, }.get(adjust_key, p) return u"-V %s" % p return u""
Gives a guess about the encoder settings used. Returns an empty string if unknown. The guess is mostly correct in case the file was encoded with the default options (-V --preset --alt-preset --abr -b etc) and no other fancy options. Args: major (int) minor (int) Returns: text
def dump(new_data): ''' Replace the entire datastore with a passed data structure CLI Example: .. code-block:: bash salt '*' data.dump '{'eggs': 'spam'}' ''' if not isinstance(new_data, dict): if isinstance(ast.literal_eval(new_data), dict): new_data = ast.literal_eval(new_data) else: return False try: datastore_path = os.path.join(__opts__['cachedir'], 'datastore') with salt.utils.files.fopen(datastore_path, 'w+b') as fn_: serial = salt.payload.Serial(__opts__) serial.dump(new_data, fn_) return True except (IOError, OSError, NameError): return False
Replace the entire datastore with a passed data structure CLI Example: .. code-block:: bash salt '*' data.dump '{'eggs': 'spam'}'
def maps_get_default_rules_output_rules_action(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") maps_get_default_rules = ET.Element("maps_get_default_rules") config = maps_get_default_rules output = ET.SubElement(maps_get_default_rules, "output") rules = ET.SubElement(output, "rules") action = ET.SubElement(rules, "action") action.text = kwargs.pop('action') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def _work_request(self, worker, md5=None): """Wrapper for a work_request to workbench""" # I'm sure there's a better way to do this if not md5 and not self.session.md5: return 'Must call worker with an md5 argument...' elif not md5: md5 = self.session.md5 # Is the md5 a sample_set? if self.workbench.is_sample_set(md5): return self.workbench.set_work_request(worker, md5) # Make the work_request with worker and md5 args try: return self.workbench.work_request(worker, md5) except zerorpc.exceptions.RemoteError as e: return repr_to_str_decorator.r_to_s(self._data_not_found)(e)
Wrapper for a work_request to workbench
def set_payload_format(self, payload_format): """Set the payload format for messages sent to and from the VI. Returns True if the command was successful. """ request = { "command": "payload_format", "format": payload_format } status = self._check_command_response_status(request) # Always change the format regardless because if it was already in the # right format, the command will have failed. self.format = payload_format return status
Set the payload format for messages sent to and from the VI. Returns True if the command was successful.
def replace(self, photo_file, **kwds): """ Endpoint: /photo/<id>/replace.json Uploads the specified photo file to replace this photo. """ result = self._client.photo.replace(self, photo_file, **kwds) self._replace_fields(result.get_fields())
Endpoint: /photo/<id>/replace.json Uploads the specified photo file to replace this photo.
def pca_to_mapping(pca,**extra_props): """ A helper to return a mapping of a PCA result set suitable for reconstructing a planar error surface in other software packages kwargs: method (defaults to sampling axes) """ from .axes import sampling_axes method = extra_props.pop('method',sampling_axes) return dict( axes=pca.axes.tolist(), covariance=method(pca).tolist(), **extra_props)
A helper to return a mapping of a PCA result set suitable for reconstructing a planar error surface in other software packages kwargs: method (defaults to sampling axes)
def get_subprocess_output(cls, command, ignore_stderr=True, **kwargs): """Get the output of an executed command. :param command: An iterable representing the command to execute (e.g. ['ls', '-al']). :param ignore_stderr: Whether or not to ignore stderr output vs interleave it with stdout. :raises: `ProcessManager.ExecutionError` on `OSError` or `CalledProcessError`. :returns: The output of the command. """ if ignore_stderr is False: kwargs.setdefault('stderr', subprocess.STDOUT) try: return subprocess.check_output(command, **kwargs).decode('utf-8').strip() except (OSError, subprocess.CalledProcessError) as e: subprocess_output = getattr(e, 'output', '').strip() raise cls.ExecutionError(str(e), subprocess_output)
Get the output of an executed command. :param command: An iterable representing the command to execute (e.g. ['ls', '-al']). :param ignore_stderr: Whether or not to ignore stderr output vs interleave it with stdout. :raises: `ProcessManager.ExecutionError` on `OSError` or `CalledProcessError`. :returns: The output of the command.
def pack(chunks, r=32): """Return integer concatenating integer chunks of r > 0 bit-length. >>> pack([0, 1, 0, 1, 0, 1], 1) 42 >>> pack([0, 1], 8) 256 >>> pack([0, 1], 0) Traceback (most recent call last): ... ValueError: pack needs r > 0 """ if r < 1: raise ValueError('pack needs r > 0') n = shift = 0 for c in chunks: n += c << shift shift += r return n
Return integer concatenating integer chunks of r > 0 bit-length. >>> pack([0, 1, 0, 1, 0, 1], 1) 42 >>> pack([0, 1], 8) 256 >>> pack([0, 1], 0) Traceback (most recent call last): ... ValueError: pack needs r > 0
def map_exp_ids(self, exp): """Maps ids to feature names. Args: exp: list of tuples [(id, weight), (id,weight)] Returns: list of tuples (feature_name, weight) """ names = self.exp_feature_names if self.discretized_feature_names is not None: names = self.discretized_feature_names return [(names[x[0]], x[1]) for x in exp]
Maps ids to feature names. Args: exp: list of tuples [(id, weight), (id,weight)] Returns: list of tuples (feature_name, weight)
def nvmlDeviceGetMemoryInfo(handle): r""" /** * Retrieves the amount of used, free and total memory available on the device, in bytes. * * For all products. * * Enabling ECC reduces the amount of total available memory, due to the extra required parity bits. * Under WDDM most device memory is allocated and managed on startup by Windows. * * Under Linux and Windows TCC, the reported amount of used memory is equal to the sum of memory allocated * by all active channels on the device. * * See \ref nvmlMemory_t for details on available memory info. * * @param device The identifier of the target device * @param memory Reference in which to return the memory information * * @return * - \ref NVML_SUCCESS if \a memory has been populated * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a memory is NULL * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible * - \ref NVML_ERROR_UNKNOWN on any unexpected error */ nvmlReturn_t DECLDIR nvmlDeviceGetMemoryInfo """ c_memory = c_nvmlMemory_t() fn = _nvmlGetFunctionPointer("nvmlDeviceGetMemoryInfo") ret = fn(handle, byref(c_memory)) _nvmlCheckReturn(ret) return bytes_to_str(c_memory)
r""" /** * Retrieves the amount of used, free and total memory available on the device, in bytes. * * For all products. * * Enabling ECC reduces the amount of total available memory, due to the extra required parity bits. * Under WDDM most device memory is allocated and managed on startup by Windows. * * Under Linux and Windows TCC, the reported amount of used memory is equal to the sum of memory allocated * by all active channels on the device. * * See \ref nvmlMemory_t for details on available memory info. * * @param device The identifier of the target device * @param memory Reference in which to return the memory information * * @return * - \ref NVML_SUCCESS if \a memory has been populated * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a memory is NULL * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible * - \ref NVML_ERROR_UNKNOWN on any unexpected error */ nvmlReturn_t DECLDIR nvmlDeviceGetMemoryInfo
def connected_subgraph(self, node): """Returns the subgraph containing the given node, its ancestors, and its descendants. Parameters ---------- node : str We want to create the subgraph containing this node. Returns ------- subgraph : networkx.DiGraph The subgraph containing the specified node. """ G = self.G subgraph_nodes = set() subgraph_nodes.add(node) subgraph_nodes.update(dag.ancestors(G, node)) subgraph_nodes.update(dag.descendants(G, node)) # Keep adding the ancesotrs and descendants on nodes of the graph # until we can't do so any longer graph_changed = True while graph_changed: initial_count = len(subgraph_nodes) old_nodes = set(subgraph_nodes) for n in old_nodes: subgraph_nodes.update(dag.ancestors(G, n)) subgraph_nodes.update(dag.descendants(G, n)) current_count = len(subgraph_nodes) graph_changed = current_count > initial_count return G.subgraph(subgraph_nodes)
Returns the subgraph containing the given node, its ancestors, and its descendants. Parameters ---------- node : str We want to create the subgraph containing this node. Returns ------- subgraph : networkx.DiGraph The subgraph containing the specified node.
def coin_toss(self): """Gets information relating to the opening coin toss. Keys are: * wonToss - contains the ID of the team that won the toss * deferred - bool whether the team that won the toss deferred it :returns: Dictionary of coin toss-related info. """ doc = self.get_doc() table = doc('table#game_info') giTable = sportsref.utils.parse_info_table(table) if 'Won Toss' in giTable: # TODO: finish coinToss function pass else: return None
Gets information relating to the opening coin toss. Keys are: * wonToss - contains the ID of the team that won the toss * deferred - bool whether the team that won the toss deferred it :returns: Dictionary of coin toss-related info.
def ping_directories(self, request, queryset, messages=True): """ Ping web directories for selected entries. """ for directory in settings.PING_DIRECTORIES: pinger = DirectoryPinger(directory, queryset) pinger.join() if messages: success = 0 for result in pinger.results: if not result.get('flerror', True): success += 1 else: self.message_user(request, '%s : %s' % (directory, result['message'])) if success: self.message_user( request, _('%(directory)s directory succesfully ' 'pinged %(success)d entries.') % {'directory': directory, 'success': success})
Ping web directories for selected entries.
def _read(self): """ Reads a single event from the joystick, blocking until one is available. Returns `None` if a non-key event was read, or an `InputEvent` tuple describing the event otherwise. """ event = self._stick_file.read(self.EVENT_SIZE) (tv_sec, tv_usec, type, code, value) = struct.unpack(self.EVENT_FORMAT, event) if type == self.EV_KEY: return InputEvent( timestamp=tv_sec + (tv_usec / 1000000), direction={ self.KEY_UP: DIRECTION_UP, self.KEY_DOWN: DIRECTION_DOWN, self.KEY_LEFT: DIRECTION_LEFT, self.KEY_RIGHT: DIRECTION_RIGHT, self.KEY_ENTER: DIRECTION_MIDDLE, }[code], action={ self.STATE_PRESS: ACTION_PRESSED, self.STATE_RELEASE: ACTION_RELEASED, self.STATE_HOLD: ACTION_HELD, }[value]) else: return None
Reads a single event from the joystick, blocking until one is available. Returns `None` if a non-key event was read, or an `InputEvent` tuple describing the event otherwise.
def reload_class_methods(self, class_, verbose=True): """ rebinds all class methods Args: self (object): class instance to reload class_ (type): type to reload as Example: >>> # DISABLE_DOCTEST >>> from utool.util_class import * # NOQA >>> self = '?' >>> class_ = '?' >>> result = reload_class_methods(self, class_) >>> print(result) """ if verbose: print('[util_class] Reloading self=%r as class_=%r' % (self, class_)) self.__class__ = class_ for key in dir(class_): # Get unbound reloaded method func = getattr(class_, key) if isinstance(func, types.MethodType): # inject it into the old instance inject_func_as_method(self, func, class_=class_, allow_override=True, verbose=verbose)
rebinds all class methods Args: self (object): class instance to reload class_ (type): type to reload as Example: >>> # DISABLE_DOCTEST >>> from utool.util_class import * # NOQA >>> self = '?' >>> class_ = '?' >>> result = reload_class_methods(self, class_) >>> print(result)
def headers(self): """ Returns a list of the last HTTP response headers. Header keys are normalized to capitalized form, as in `User-Agent`. """ headers = self.conn.issue_command("Headers") res = [] for header in headers.split("\r"): key, value = header.split(": ", 1) for line in value.split("\n"): res.append((_normalize_header(key), line)) return res
Returns a list of the last HTTP response headers. Header keys are normalized to capitalized form, as in `User-Agent`.
def GC_partial(portion): """Manually compute GC content percentage in a DNA string, taking ambiguous values into account (according to standard IUPAC notation). """ sequence_count = collections.Counter(portion) gc = ((sum([sequence_count[i] for i in 'gGcCsS']) + sum([sequence_count[i] for i in 'DdHh']) / 3.0 + 2 * sum([sequence_count[i] for i in 'VvBb']) / 3.0 + sum([sequence_count[i] for i in 'NnYyRrKkMm']) / 2.0) / len(portion)) return 0 or 100 * gc
Manually compute GC content percentage in a DNA string, taking ambiguous values into account (according to standard IUPAC notation).
def _par_write(self, dirname): """ Internal write function to write a formatted parameter file. :type dirname: str :param dirname: Directory to write the parameter file to. """ filename = dirname + '/' + 'template_parameters.csv' with open(filename, 'w') as parfile: for template in self.templates: for key in template.__dict__.keys(): if key not in ['st', 'event']: parfile.write(key + ': ' + str(template.__dict__[key]) + ', ') parfile.write('\n') return self
Internal write function to write a formatted parameter file. :type dirname: str :param dirname: Directory to write the parameter file to.
def put_intent(name=None, description=None, slots=None, sampleUtterances=None, confirmationPrompt=None, rejectionStatement=None, followUpPrompt=None, conclusionStatement=None, dialogCodeHook=None, fulfillmentActivity=None, parentIntentSignature=None, checksum=None): """ Creates an intent or replaces an existing intent. To define the interaction between the user and your bot, you use one or more intents. For a pizza ordering bot, for example, you would create an OrderPizza intent. To create an intent or replace an existing intent, you must provide the following: You can specify other optional information in the request, such as: If you specify an existing intent name to update the intent, Amazon Lex replaces the values in the $LATEST version of the slot type with the values in the request. Amazon Lex removes fields that you don't provide in the request. If you don't specify the required fields, Amazon Lex throws an exception. For more information, see how-it-works . This operation requires permissions for the lex:PutIntent action. See also: AWS API Documentation :example: response = client.put_intent( name='string', description='string', slots=[ { 'name': 'string', 'description': 'string', 'slotConstraint': 'Required'|'Optional', 'slotType': 'string', 'slotTypeVersion': 'string', 'valueElicitationPrompt': { 'messages': [ { 'contentType': 'PlainText'|'SSML', 'content': 'string' }, ], 'maxAttempts': 123, 'responseCard': 'string' }, 'priority': 123, 'sampleUtterances': [ 'string', ], 'responseCard': 'string' }, ], sampleUtterances=[ 'string', ], confirmationPrompt={ 'messages': [ { 'contentType': 'PlainText'|'SSML', 'content': 'string' }, ], 'maxAttempts': 123, 'responseCard': 'string' }, rejectionStatement={ 'messages': [ { 'contentType': 'PlainText'|'SSML', 'content': 'string' }, ], 'responseCard': 'string' }, followUpPrompt={ 'prompt': { 'messages': [ { 'contentType': 'PlainText'|'SSML', 'content': 'string' }, ], 'maxAttempts': 123, 'responseCard': 'string' }, 'rejectionStatement': { 'messages': [ { 'contentType': 'PlainText'|'SSML', 'content': 'string' }, ], 'responseCard': 'string' } }, conclusionStatement={ 'messages': [ { 'contentType': 'PlainText'|'SSML', 'content': 'string' }, ], 'responseCard': 'string' }, dialogCodeHook={ 'uri': 'string', 'messageVersion': 'string' }, fulfillmentActivity={ 'type': 'ReturnIntent'|'CodeHook', 'codeHook': { 'uri': 'string', 'messageVersion': 'string' } }, parentIntentSignature='string', checksum='string' ) :type name: string :param name: [REQUIRED] The name of the intent. The name is not case sensitive. The name can't match a built-in intent name, or a built-in intent name with 'AMAZON.' removed. For example, because there is a built-in intent called AMAZON.HelpIntent , you can't create a custom intent called HelpIntent . For a list of built-in intents, see Standard Built-in Intents in the Alexa Skills Kit . :type description: string :param description: A description of the intent. :type slots: list :param slots: An array of intent slots. At runtime, Amazon Lex elicits required slot values from the user using prompts defined in the slots. For more information, see xref linkend='how-it-works'/. (dict) --Identifies the version of a specific slot. name (string) -- [REQUIRED]The name of the slot. description (string) --A description of the slot. slotConstraint (string) -- [REQUIRED]Specifies whether the slot is required or optional. slotType (string) --The type of the slot, either a custom slot type that you defined or one of the built-in slot types. slotTypeVersion (string) --The version of the slot type. valueElicitationPrompt (dict) --The prompt that Amazon Lex uses to elicit the slot value from the user. messages (list) -- [REQUIRED]An array of objects, each of which provides a message string and its type. You can specify the message string in plain text or in Speech Synthesis Markup Language (SSML). (dict) --The message object that provides the message text and its type. contentType (string) -- [REQUIRED]The content type of the message string. content (string) -- [REQUIRED]The text of the message. maxAttempts (integer) -- [REQUIRED]The number of times to prompt the user for information. responseCard (string) --A response card. Amazon Lex uses this prompt at runtime, in the PostText API response. It substitutes session attributes and slot values for placeholders in the response card. For more information, see ex-resp-card . priority (integer) --Directs Lex the order in which to elicit this slot value from the user. For example, if the intent has two slots with priorities 1 and 2, AWS Lex first elicits a value for the slot with priority 1. If multiple slots share the same priority, the order in which Lex elicits values is arbitrary. sampleUtterances (list) --If you know a specific pattern with which users might respond to an Amazon Lex request for a slot value, you can provide those utterances to improve accuracy. This is optional. In most cases, Amazon Lex is capable of understanding user utterances. (string) -- responseCard (string) --A set of possible responses for the slot type used by text-based clients. A user chooses an option from the response card, instead of using text to reply. :type sampleUtterances: list :param sampleUtterances: An array of utterances (strings) that a user might say to signal the intent. For example, 'I want {PizzaSize} pizza', 'Order {Quantity} {PizzaSize} pizzas'. In each utterance, a slot name is enclosed in curly braces. (string) -- :type confirmationPrompt: dict :param confirmationPrompt: Prompts the user to confirm the intent. This question should have a yes or no answer. Amazon Lex uses this prompt to ensure that the user acknowledges that the intent is ready for fulfillment. For example, with the OrderPizza intent, you might want to confirm that the order is correct before placing it. For other intents, such as intents that simply respond to user questions, you might not need to ask the user for confirmation before providing the information. Note You you must provide both the rejectionStatement and the confirmationPrompt , or neither. messages (list) -- [REQUIRED]An array of objects, each of which provides a message string and its type. You can specify the message string in plain text or in Speech Synthesis Markup Language (SSML). (dict) --The message object that provides the message text and its type. contentType (string) -- [REQUIRED]The content type of the message string. content (string) -- [REQUIRED]The text of the message. maxAttempts (integer) -- [REQUIRED]The number of times to prompt the user for information. responseCard (string) --A response card. Amazon Lex uses this prompt at runtime, in the PostText API response. It substitutes session attributes and slot values for placeholders in the response card. For more information, see ex-resp-card . :type rejectionStatement: dict :param rejectionStatement: When the user answers 'no' to the question defined in confirmationPrompt , Amazon Lex responds with this statement to acknowledge that the intent was canceled. Note You must provide both the rejectionStatement and the confirmationPrompt , or neither. messages (list) -- [REQUIRED]A collection of message objects. (dict) --The message object that provides the message text and its type. contentType (string) -- [REQUIRED]The content type of the message string. content (string) -- [REQUIRED]The text of the message. responseCard (string) --At runtime, if the client is using the API, Amazon Lex includes the response card in the response. It substitutes all of the session attributes and slot values for placeholders in the response card. :type followUpPrompt: dict :param followUpPrompt: A user prompt for additional activity after an intent is fulfilled. For example, after the OrderPizza intent is fulfilled (your Lambda function placed an order with a pizzeria), you might prompt the user to find if they want to order a drink (assuming that you have defined an OrderDrink intent in your bot). Note The followUpPrompt and conclusionStatement are mutually exclusive. You can specify only one. For example, your bot may not solicit both the following: Follow up prompt - '$session.FirstName , your pizza order has been placed. Would you like to order a drink or a dessert?' Conclusion statement - '$session.FirstName , your pizza order has been placed.' prompt (dict) -- [REQUIRED]Obtains information from the user. messages (list) -- [REQUIRED]An array of objects, each of which provides a message string and its type. You can specify the message string in plain text or in Speech Synthesis Markup Language (SSML). (dict) --The message object that provides the message text and its type. contentType (string) -- [REQUIRED]The content type of the message string. content (string) -- [REQUIRED]The text of the message. maxAttempts (integer) -- [REQUIRED]The number of times to prompt the user for information. responseCard (string) --A response card. Amazon Lex uses this prompt at runtime, in the PostText API response. It substitutes session attributes and slot values for placeholders in the response card. For more information, see ex-resp-card . rejectionStatement (dict) -- [REQUIRED]If the user answers 'no' to the question defined in confirmationPrompt , Amazon Lex responds with this statement to acknowledge that the intent was canceled. messages (list) -- [REQUIRED]A collection of message objects. (dict) --The message object that provides the message text and its type. contentType (string) -- [REQUIRED]The content type of the message string. content (string) -- [REQUIRED]The text of the message. responseCard (string) --At runtime, if the client is using the API, Amazon Lex includes the response card in the response. It substitutes all of the session attributes and slot values for placeholders in the response card. :type conclusionStatement: dict :param conclusionStatement: The statement that you want Amazon Lex to convey to the user after the intent is successfully fulfilled by the Lambda function. This element is relevant only if you provide a Lambda function in the fulfillmentActivity . If you return the intent to the client application, you can't specify this element. Note The followUpPrompt and conclusionStatement are mutually exclusive. You can specify only one. messages (list) -- [REQUIRED]A collection of message objects. (dict) --The message object that provides the message text and its type. contentType (string) -- [REQUIRED]The content type of the message string. content (string) -- [REQUIRED]The text of the message. responseCard (string) --At runtime, if the client is using the API, Amazon Lex includes the response card in the response. It substitutes all of the session attributes and slot values for placeholders in the response card. :type dialogCodeHook: dict :param dialogCodeHook: Specifies a Lambda function to invoke for each user input. You can invoke this Lambda function to personalize user interaction. For example, suppose your bot determines that the user is John. Your Lambda function might retrieve John's information from a backend database and prepopulate some of the values. For example, if you find that John is gluten intolerant, you might set the corresponding intent slot, GlutenIntolerant , to true. You might find John's phone number and set the corresponding session attribute. uri (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the Lambda function. messageVersion (string) -- [REQUIRED]The version of the request-response that you want Amazon Lex to use to invoke your Lambda function. For more information, see using-lambda . :type fulfillmentActivity: dict :param fulfillmentActivity: Describes how the intent is fulfilled. For example, after a user provides all of the information for a pizza order, fulfillmentActivity defines how the bot places an order with a local pizza store. You might configure Amazon Lex to return all of the intent information to the client application, or direct it to invoke a Lambda function that can process the intent (for example, place an order with a pizzeria). type (string) -- [REQUIRED]How the intent should be fulfilled, either by running a Lambda function or by returning the slot data to the client application. codeHook (dict) --A description of the Lambda function that is run to fulfill the intent. uri (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the Lambda function. messageVersion (string) -- [REQUIRED]The version of the request-response that you want Amazon Lex to use to invoke your Lambda function. For more information, see using-lambda . :type parentIntentSignature: string :param parentIntentSignature: A unique identifier for the built-in intent to base this intent on. To find the signature for an intent, see Standard Built-in Intents in the Alexa Skills Kit . :type checksum: string :param checksum: Identifies a specific revision of the $LATEST version. When you create a new intent, leave the checksum field blank. If you specify a checksum you get a BadRequestException exception. When you want to update a intent, set the checksum field to the checksum of the most recent revision of the $LATEST version. If you don't specify the checksum field, or if the checksum does not match the $LATEST version, you get a PreconditionFailedException exception. :rtype: dict :return: { 'name': 'string', 'description': 'string', 'slots': [ { 'name': 'string', 'description': 'string', 'slotConstraint': 'Required'|'Optional', 'slotType': 'string', 'slotTypeVersion': 'string', 'valueElicitationPrompt': { 'messages': [ { 'contentType': 'PlainText'|'SSML', 'content': 'string' }, ], 'maxAttempts': 123, 'responseCard': 'string' }, 'priority': 123, 'sampleUtterances': [ 'string', ], 'responseCard': 'string' }, ], 'sampleUtterances': [ 'string', ], 'confirmationPrompt': { 'messages': [ { 'contentType': 'PlainText'|'SSML', 'content': 'string' }, ], 'maxAttempts': 123, 'responseCard': 'string' }, 'rejectionStatement': { 'messages': [ { 'contentType': 'PlainText'|'SSML', 'content': 'string' }, ], 'responseCard': 'string' }, 'followUpPrompt': { 'prompt': { 'messages': [ { 'contentType': 'PlainText'|'SSML', 'content': 'string' }, ], 'maxAttempts': 123, 'responseCard': 'string' }, 'rejectionStatement': { 'messages': [ { 'contentType': 'PlainText'|'SSML', 'content': 'string' }, ], 'responseCard': 'string' } }, 'conclusionStatement': { 'messages': [ { 'contentType': 'PlainText'|'SSML', 'content': 'string' }, ], 'responseCard': 'string' }, 'dialogCodeHook': { 'uri': 'string', 'messageVersion': 'string' }, 'fulfillmentActivity': { 'type': 'ReturnIntent'|'CodeHook', 'codeHook': { 'uri': 'string', 'messageVersion': 'string' } }, 'parentIntentSignature': 'string', 'lastUpdatedDate': datetime(2015, 1, 1), 'createdDate': datetime(2015, 1, 1), 'version': 'string', 'checksum': 'string' } :returns: A confirmation prompt to ask the user to confirm an intent. For example, "Shall I order your pizza?" A conclusion statement to send to the user after the intent has been fulfilled. For example, "I placed your pizza order." A follow-up prompt that asks the user for additional activity. For example, asking "Do you want to order a drink with your pizza?" """ pass
Creates an intent or replaces an existing intent. To define the interaction between the user and your bot, you use one or more intents. For a pizza ordering bot, for example, you would create an OrderPizza intent. To create an intent or replace an existing intent, you must provide the following: You can specify other optional information in the request, such as: If you specify an existing intent name to update the intent, Amazon Lex replaces the values in the $LATEST version of the slot type with the values in the request. Amazon Lex removes fields that you don't provide in the request. If you don't specify the required fields, Amazon Lex throws an exception. For more information, see how-it-works . This operation requires permissions for the lex:PutIntent action. See also: AWS API Documentation :example: response = client.put_intent( name='string', description='string', slots=[ { 'name': 'string', 'description': 'string', 'slotConstraint': 'Required'|'Optional', 'slotType': 'string', 'slotTypeVersion': 'string', 'valueElicitationPrompt': { 'messages': [ { 'contentType': 'PlainText'|'SSML', 'content': 'string' }, ], 'maxAttempts': 123, 'responseCard': 'string' }, 'priority': 123, 'sampleUtterances': [ 'string', ], 'responseCard': 'string' }, ], sampleUtterances=[ 'string', ], confirmationPrompt={ 'messages': [ { 'contentType': 'PlainText'|'SSML', 'content': 'string' }, ], 'maxAttempts': 123, 'responseCard': 'string' }, rejectionStatement={ 'messages': [ { 'contentType': 'PlainText'|'SSML', 'content': 'string' }, ], 'responseCard': 'string' }, followUpPrompt={ 'prompt': { 'messages': [ { 'contentType': 'PlainText'|'SSML', 'content': 'string' }, ], 'maxAttempts': 123, 'responseCard': 'string' }, 'rejectionStatement': { 'messages': [ { 'contentType': 'PlainText'|'SSML', 'content': 'string' }, ], 'responseCard': 'string' } }, conclusionStatement={ 'messages': [ { 'contentType': 'PlainText'|'SSML', 'content': 'string' }, ], 'responseCard': 'string' }, dialogCodeHook={ 'uri': 'string', 'messageVersion': 'string' }, fulfillmentActivity={ 'type': 'ReturnIntent'|'CodeHook', 'codeHook': { 'uri': 'string', 'messageVersion': 'string' } }, parentIntentSignature='string', checksum='string' ) :type name: string :param name: [REQUIRED] The name of the intent. The name is not case sensitive. The name can't match a built-in intent name, or a built-in intent name with 'AMAZON.' removed. For example, because there is a built-in intent called AMAZON.HelpIntent , you can't create a custom intent called HelpIntent . For a list of built-in intents, see Standard Built-in Intents in the Alexa Skills Kit . :type description: string :param description: A description of the intent. :type slots: list :param slots: An array of intent slots. At runtime, Amazon Lex elicits required slot values from the user using prompts defined in the slots. For more information, see xref linkend='how-it-works'/. (dict) --Identifies the version of a specific slot. name (string) -- [REQUIRED]The name of the slot. description (string) --A description of the slot. slotConstraint (string) -- [REQUIRED]Specifies whether the slot is required or optional. slotType (string) --The type of the slot, either a custom slot type that you defined or one of the built-in slot types. slotTypeVersion (string) --The version of the slot type. valueElicitationPrompt (dict) --The prompt that Amazon Lex uses to elicit the slot value from the user. messages (list) -- [REQUIRED]An array of objects, each of which provides a message string and its type. You can specify the message string in plain text or in Speech Synthesis Markup Language (SSML). (dict) --The message object that provides the message text and its type. contentType (string) -- [REQUIRED]The content type of the message string. content (string) -- [REQUIRED]The text of the message. maxAttempts (integer) -- [REQUIRED]The number of times to prompt the user for information. responseCard (string) --A response card. Amazon Lex uses this prompt at runtime, in the PostText API response. It substitutes session attributes and slot values for placeholders in the response card. For more information, see ex-resp-card . priority (integer) --Directs Lex the order in which to elicit this slot value from the user. For example, if the intent has two slots with priorities 1 and 2, AWS Lex first elicits a value for the slot with priority 1. If multiple slots share the same priority, the order in which Lex elicits values is arbitrary. sampleUtterances (list) --If you know a specific pattern with which users might respond to an Amazon Lex request for a slot value, you can provide those utterances to improve accuracy. This is optional. In most cases, Amazon Lex is capable of understanding user utterances. (string) -- responseCard (string) --A set of possible responses for the slot type used by text-based clients. A user chooses an option from the response card, instead of using text to reply. :type sampleUtterances: list :param sampleUtterances: An array of utterances (strings) that a user might say to signal the intent. For example, 'I want {PizzaSize} pizza', 'Order {Quantity} {PizzaSize} pizzas'. In each utterance, a slot name is enclosed in curly braces. (string) -- :type confirmationPrompt: dict :param confirmationPrompt: Prompts the user to confirm the intent. This question should have a yes or no answer. Amazon Lex uses this prompt to ensure that the user acknowledges that the intent is ready for fulfillment. For example, with the OrderPizza intent, you might want to confirm that the order is correct before placing it. For other intents, such as intents that simply respond to user questions, you might not need to ask the user for confirmation before providing the information. Note You you must provide both the rejectionStatement and the confirmationPrompt , or neither. messages (list) -- [REQUIRED]An array of objects, each of which provides a message string and its type. You can specify the message string in plain text or in Speech Synthesis Markup Language (SSML). (dict) --The message object that provides the message text and its type. contentType (string) -- [REQUIRED]The content type of the message string. content (string) -- [REQUIRED]The text of the message. maxAttempts (integer) -- [REQUIRED]The number of times to prompt the user for information. responseCard (string) --A response card. Amazon Lex uses this prompt at runtime, in the PostText API response. It substitutes session attributes and slot values for placeholders in the response card. For more information, see ex-resp-card . :type rejectionStatement: dict :param rejectionStatement: When the user answers 'no' to the question defined in confirmationPrompt , Amazon Lex responds with this statement to acknowledge that the intent was canceled. Note You must provide both the rejectionStatement and the confirmationPrompt , or neither. messages (list) -- [REQUIRED]A collection of message objects. (dict) --The message object that provides the message text and its type. contentType (string) -- [REQUIRED]The content type of the message string. content (string) -- [REQUIRED]The text of the message. responseCard (string) --At runtime, if the client is using the API, Amazon Lex includes the response card in the response. It substitutes all of the session attributes and slot values for placeholders in the response card. :type followUpPrompt: dict :param followUpPrompt: A user prompt for additional activity after an intent is fulfilled. For example, after the OrderPizza intent is fulfilled (your Lambda function placed an order with a pizzeria), you might prompt the user to find if they want to order a drink (assuming that you have defined an OrderDrink intent in your bot). Note The followUpPrompt and conclusionStatement are mutually exclusive. You can specify only one. For example, your bot may not solicit both the following: Follow up prompt - '$session.FirstName , your pizza order has been placed. Would you like to order a drink or a dessert?' Conclusion statement - '$session.FirstName , your pizza order has been placed.' prompt (dict) -- [REQUIRED]Obtains information from the user. messages (list) -- [REQUIRED]An array of objects, each of which provides a message string and its type. You can specify the message string in plain text or in Speech Synthesis Markup Language (SSML). (dict) --The message object that provides the message text and its type. contentType (string) -- [REQUIRED]The content type of the message string. content (string) -- [REQUIRED]The text of the message. maxAttempts (integer) -- [REQUIRED]The number of times to prompt the user for information. responseCard (string) --A response card. Amazon Lex uses this prompt at runtime, in the PostText API response. It substitutes session attributes and slot values for placeholders in the response card. For more information, see ex-resp-card . rejectionStatement (dict) -- [REQUIRED]If the user answers 'no' to the question defined in confirmationPrompt , Amazon Lex responds with this statement to acknowledge that the intent was canceled. messages (list) -- [REQUIRED]A collection of message objects. (dict) --The message object that provides the message text and its type. contentType (string) -- [REQUIRED]The content type of the message string. content (string) -- [REQUIRED]The text of the message. responseCard (string) --At runtime, if the client is using the API, Amazon Lex includes the response card in the response. It substitutes all of the session attributes and slot values for placeholders in the response card. :type conclusionStatement: dict :param conclusionStatement: The statement that you want Amazon Lex to convey to the user after the intent is successfully fulfilled by the Lambda function. This element is relevant only if you provide a Lambda function in the fulfillmentActivity . If you return the intent to the client application, you can't specify this element. Note The followUpPrompt and conclusionStatement are mutually exclusive. You can specify only one. messages (list) -- [REQUIRED]A collection of message objects. (dict) --The message object that provides the message text and its type. contentType (string) -- [REQUIRED]The content type of the message string. content (string) -- [REQUIRED]The text of the message. responseCard (string) --At runtime, if the client is using the API, Amazon Lex includes the response card in the response. It substitutes all of the session attributes and slot values for placeholders in the response card. :type dialogCodeHook: dict :param dialogCodeHook: Specifies a Lambda function to invoke for each user input. You can invoke this Lambda function to personalize user interaction. For example, suppose your bot determines that the user is John. Your Lambda function might retrieve John's information from a backend database and prepopulate some of the values. For example, if you find that John is gluten intolerant, you might set the corresponding intent slot, GlutenIntolerant , to true. You might find John's phone number and set the corresponding session attribute. uri (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the Lambda function. messageVersion (string) -- [REQUIRED]The version of the request-response that you want Amazon Lex to use to invoke your Lambda function. For more information, see using-lambda . :type fulfillmentActivity: dict :param fulfillmentActivity: Describes how the intent is fulfilled. For example, after a user provides all of the information for a pizza order, fulfillmentActivity defines how the bot places an order with a local pizza store. You might configure Amazon Lex to return all of the intent information to the client application, or direct it to invoke a Lambda function that can process the intent (for example, place an order with a pizzeria). type (string) -- [REQUIRED]How the intent should be fulfilled, either by running a Lambda function or by returning the slot data to the client application. codeHook (dict) --A description of the Lambda function that is run to fulfill the intent. uri (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the Lambda function. messageVersion (string) -- [REQUIRED]The version of the request-response that you want Amazon Lex to use to invoke your Lambda function. For more information, see using-lambda . :type parentIntentSignature: string :param parentIntentSignature: A unique identifier for the built-in intent to base this intent on. To find the signature for an intent, see Standard Built-in Intents in the Alexa Skills Kit . :type checksum: string :param checksum: Identifies a specific revision of the $LATEST version. When you create a new intent, leave the checksum field blank. If you specify a checksum you get a BadRequestException exception. When you want to update a intent, set the checksum field to the checksum of the most recent revision of the $LATEST version. If you don't specify the checksum field, or if the checksum does not match the $LATEST version, you get a PreconditionFailedException exception. :rtype: dict :return: { 'name': 'string', 'description': 'string', 'slots': [ { 'name': 'string', 'description': 'string', 'slotConstraint': 'Required'|'Optional', 'slotType': 'string', 'slotTypeVersion': 'string', 'valueElicitationPrompt': { 'messages': [ { 'contentType': 'PlainText'|'SSML', 'content': 'string' }, ], 'maxAttempts': 123, 'responseCard': 'string' }, 'priority': 123, 'sampleUtterances': [ 'string', ], 'responseCard': 'string' }, ], 'sampleUtterances': [ 'string', ], 'confirmationPrompt': { 'messages': [ { 'contentType': 'PlainText'|'SSML', 'content': 'string' }, ], 'maxAttempts': 123, 'responseCard': 'string' }, 'rejectionStatement': { 'messages': [ { 'contentType': 'PlainText'|'SSML', 'content': 'string' }, ], 'responseCard': 'string' }, 'followUpPrompt': { 'prompt': { 'messages': [ { 'contentType': 'PlainText'|'SSML', 'content': 'string' }, ], 'maxAttempts': 123, 'responseCard': 'string' }, 'rejectionStatement': { 'messages': [ { 'contentType': 'PlainText'|'SSML', 'content': 'string' }, ], 'responseCard': 'string' } }, 'conclusionStatement': { 'messages': [ { 'contentType': 'PlainText'|'SSML', 'content': 'string' }, ], 'responseCard': 'string' }, 'dialogCodeHook': { 'uri': 'string', 'messageVersion': 'string' }, 'fulfillmentActivity': { 'type': 'ReturnIntent'|'CodeHook', 'codeHook': { 'uri': 'string', 'messageVersion': 'string' } }, 'parentIntentSignature': 'string', 'lastUpdatedDate': datetime(2015, 1, 1), 'createdDate': datetime(2015, 1, 1), 'version': 'string', 'checksum': 'string' } :returns: A confirmation prompt to ask the user to confirm an intent. For example, "Shall I order your pizza?" A conclusion statement to send to the user after the intent has been fulfilled. For example, "I placed your pizza order." A follow-up prompt that asks the user for additional activity. For example, asking "Do you want to order a drink with your pizza?"
def create_branch_and_checkout(self, branch_name: str): """ Creates a new branch if it doesn't exist Args: branch_name: branch name """ self.create_branch(branch_name) self.checkout(branch_name)
Creates a new branch if it doesn't exist Args: branch_name: branch name
def remove(self, address): """ Remove an address from the connection pool, if present, closing all connections to that address. """ with self.lock: for connection in self.connections.pop(address, ()): try: connection.close() except IOError: pass
Remove an address from the connection pool, if present, closing all connections to that address.
def run(cmd, capture=False, shell=True, env=None, exit_on_error=None, never_pretend=False): # type: (str, bool, bool, Dict[str, str], bool) -> ExecResult """ Run a shell command. Args: cmd (str): The shell command to execute. shell (bool): Same as in `subprocess.Popen`. capture (bool): If set to True, it will capture the standard input/error instead of just piping it to the caller stdout/stderr. env (dict[str, str]): The subprocess environment variables. exit_on_error (bool): If set to **True**, on failure it will call `sys.exit` with the return code for the executed command. never_pretend (bool): If set to **True** the command will always be executed, even if context.get('pretend') is set to True. If set to **False** or not given, if the `pretend` context value is **True**, this function will only print the command it would execute and then return a fake result. Returns: ExecResult: The execution result containing the return code and output (if capture was set to *True*). """ if context.get('pretend', False) and not never_pretend: cprint('<90>{}', cmd) return ExecResult( cmd, 0, # retcode '', # stdout '', # stderr True, # succeeded False, # failed ) if context.get('verbose', 0) > 2: cprint('<90>{}', cmd) options = { 'bufsize': 1, # line buffered 'shell': shell } if exit_on_error is None: exit_on_error = not capture if capture: options.update({ 'stdout': subprocess.PIPE, 'stderr': subprocess.PIPE, }) if env is not None: options['env'] = dict(os.environ) options['env'].update(env) p = subprocess.Popen(cmd, **options) stdout, stderr = p.communicate() try: if stdout is not None: stdout = stdout.decode('utf-8') if stderr is not None: stderr = stderr.decode('utf-8') except AttributeError: # 'str' has no attribute 'decode' pass if exit_on_error and p.returncode != 0: sys.exit(p.returncode) return ExecResult( cmd, p.returncode, stdout, stderr, p.returncode == 0, p.returncode != 0 )
Run a shell command. Args: cmd (str): The shell command to execute. shell (bool): Same as in `subprocess.Popen`. capture (bool): If set to True, it will capture the standard input/error instead of just piping it to the caller stdout/stderr. env (dict[str, str]): The subprocess environment variables. exit_on_error (bool): If set to **True**, on failure it will call `sys.exit` with the return code for the executed command. never_pretend (bool): If set to **True** the command will always be executed, even if context.get('pretend') is set to True. If set to **False** or not given, if the `pretend` context value is **True**, this function will only print the command it would execute and then return a fake result. Returns: ExecResult: The execution result containing the return code and output (if capture was set to *True*).
def clean_email(self): """ ensure email is in the database """ if EMAIL_CONFIRMATION: from .models import EmailAddress condition = EmailAddress.objects.filter( email__iexact=self.cleaned_data["email"], verified=True ).count() == 0 else: condition = User.objects.get( email__iexact=self.cleaned_data["email"], is_active=True ).count() == 0 if condition is True: raise forms.ValidationError( _("Email address not verified for any user account") ) return self.cleaned_data["email"]
ensure email is in the database
def GetProp(self, prop): """ get attribute """ if prop == 'id': return self.id elif prop == 'status': return self.status elif prop == 'bm': return self.bm elif prop == 'graph': return self.graph else: return self.properties[prop]
get attribute
def selection(self): """Returns items in selection as a QItemSelection object""" sel = QtGui.QItemSelection() for index in self.selectedIndexes(): sel.select(index, index) return sel
Returns items in selection as a QItemSelection object
def search(request, abbr): ''' Context: - search_text - abbr - metadata - found_by_id - bill_results - more_bills_available - legislators_list - nav_active Tempaltes: - billy/web/public/search_results_no_query.html - billy/web/public/search_results_bills_legislators.html - billy/web/public/bills_list_row_with_abbr_and_session.html ''' if not request.GET: return render(request, templatename('search_results_no_query'), {'abbr': abbr}) search_text = unicode(request.GET['search_text']).encode('utf8') # First try to get by bill_id. if re.search(r'\d', search_text): url = '/%s/bills?' % abbr url += urllib.urlencode([('search_text', search_text)]) return redirect(url) else: found_by_id = False kwargs = {} if abbr != 'all': kwargs['abbr'] = abbr bill_results = Bill.search(search_text, sort='last', **kwargs) # Limit the bills if it's a search. bill_result_count = len(bill_results) more_bills_available = (bill_result_count > 5) bill_results = bill_results[:5] # See if any legislator names match. First split up name to avoid # the Richard S. Madaleno problem. See Jira issue OS-32. textbits = search_text.split() textbits = filter(lambda s: 2 < len(s), textbits) textbits = filter(lambda s: '.' not in s, textbits) andspec = [] for text in textbits: andspec.append({'full_name': {'$regex': text, '$options': 'i'}}) if andspec: spec = {'$and': andspec} else: spec = {'full_name': {'$regex': search_text, '$options': 'i'}} # Run the query. if abbr != 'all': spec[settings.LEVEL_FIELD] = abbr legislator_results = list(db.legislators.find(spec).sort( [('active', -1)])) if abbr != 'all': metadata = Metadata.get_object(abbr) else: metadata = None return render( request, templatename('search_results_bills_legislators'), dict(search_text=search_text, abbr=abbr, metadata=metadata, found_by_id=found_by_id, bill_results=bill_results, bill_result_count=bill_result_count, more_bills_available=more_bills_available, legislators_list=legislator_results, column_headers_tmplname=None, # not used rowtemplate_name=templatename('bills_list_row_with' '_abbr_and_session'), show_chamber_column=True, nav_active=None))
Context: - search_text - abbr - metadata - found_by_id - bill_results - more_bills_available - legislators_list - nav_active Tempaltes: - billy/web/public/search_results_no_query.html - billy/web/public/search_results_bills_legislators.html - billy/web/public/bills_list_row_with_abbr_and_session.html
def predict(self, data, initial_args=None): """Return the inference from the specified endpoint. Args: data (object): Input data for which you want the model to provide inference. If a serializer was specified when creating the RealTimePredictor, the result of the serializer is sent as input data. Otherwise the data must be sequence of bytes, and the predict method then sends the bytes in the request body as is. initial_args (dict[str,str]): Optional. Default arguments for boto3 ``invoke_endpoint`` call. Default is None (no default arguments). Returns: object: Inference for the given input. If a deserializer was specified when creating the RealTimePredictor, the result of the deserializer is returned. Otherwise the response returns the sequence of bytes as is. """ request_args = self._create_request_args(data, initial_args) response = self.sagemaker_session.sagemaker_runtime_client.invoke_endpoint(**request_args) return self._handle_response(response)
Return the inference from the specified endpoint. Args: data (object): Input data for which you want the model to provide inference. If a serializer was specified when creating the RealTimePredictor, the result of the serializer is sent as input data. Otherwise the data must be sequence of bytes, and the predict method then sends the bytes in the request body as is. initial_args (dict[str,str]): Optional. Default arguments for boto3 ``invoke_endpoint`` call. Default is None (no default arguments). Returns: object: Inference for the given input. If a deserializer was specified when creating the RealTimePredictor, the result of the deserializer is returned. Otherwise the response returns the sequence of bytes as is.
def remove_parenthesis_around_tz(cls, timestr): """get rid of parenthesis around timezone: (GMT) => GMT :return: the new string if parenthesis were found, `None` otherwise """ parenthesis = cls.TIMEZONE_PARENTHESIS.match(timestr) if parenthesis is not None: return parenthesis.group(1)
get rid of parenthesis around timezone: (GMT) => GMT :return: the new string if parenthesis were found, `None` otherwise
def create(epsilon: typing.Union[Schedule, float]): """ Vel factory function """ return GenericFactory(EpsGreedy, arguments={'epsilon': epsilon})
Vel factory function
def _divide_and_round(a, b): """divide a by b and round result to the nearest integer When the ratio is exactly half-way between two integers, the even integer is returned. """ # Based on the reference implementation for divmod_near # in Objects/longobject.c. q, r = divmod(a, b) # round up if either r / b > 0.5, or r / b == 0.5 and q is odd. # The expression r / b > 0.5 is equivalent to 2 * r > b if b is # positive, 2 * r < b if b negative. r *= 2 greater_than_half = r > b if b > 0 else r < b if greater_than_half or r == b and q % 2 == 1: q += 1 return q
divide a by b and round result to the nearest integer When the ratio is exactly half-way between two integers, the even integer is returned.
def __get_resource_entry_data(self, bundleId, languageId, resourceKey, fallback=False): """``GET /{serviceInstanceId}/v2/bundles/{bundleId}/{languageId} /{resourceKey}`` Gets the resource entry information. """ url = self.__get_base_bundle_url() + '/' + bundleId + '/' \ + languageId + '/' + resourceKey params = {'fallback': 'true'} if fallback else None response = self.__perform_rest_call(requestURL=url, params=params) if not response: return None resourceEntryData = response.get(self.__RESPONSE_RESOURCE_ENTRY_KEY) return resourceEntryData
``GET /{serviceInstanceId}/v2/bundles/{bundleId}/{languageId} /{resourceKey}`` Gets the resource entry information.
def check_ellipsis(text): """Use an ellipsis instead of three dots.""" err = "typography.symbols.ellipsis" msg = u"'...' is an approximation, use the ellipsis symbol '…'." regex = "\.\.\." return existence_check(text, [regex], err, msg, max_errors=3, require_padding=False, offset=0)
Use an ellipsis instead of three dots.
def __get_merged_api_info(self, services): """Builds a description of an API. Args: services: List of protorpc.remote.Service instances implementing an api/version. Returns: The _ApiInfo object to use for the API that the given services implement. """ base_paths = sorted(set(s.api_info.base_path for s in services)) if len(base_paths) != 1: raise api_exceptions.ApiConfigurationError( 'Multiple base_paths found: {!r}'.format(base_paths)) names_versions = sorted(set( (s.api_info.name, s.api_info.api_version) for s in services)) if len(names_versions) != 1: raise api_exceptions.ApiConfigurationError( 'Multiple apis/versions found: {!r}'.format(names_versions)) return services[0].api_info
Builds a description of an API. Args: services: List of protorpc.remote.Service instances implementing an api/version. Returns: The _ApiInfo object to use for the API that the given services implement.
def text_search(self, anchor, byte=False): """ Search the substring in response body. :param anchor: string to search :param byte: if False then `anchor` should be the unicode string, and search will be performed in `response.unicode_body()` else `anchor` should be the byte-string and search will be performed in `response.body` If substring is found return True else False. """ if isinstance(anchor, six.text_type): if byte: raise GrabMisuseError('The anchor should be bytes string in ' 'byte mode') else: return anchor in self.unicode_body() if not isinstance(anchor, six.text_type): if byte: # if six.PY3: # return anchor in self.body_as_bytes() return anchor in self.body else: raise GrabMisuseError('The anchor should be byte string in ' 'non-byte mode')
Search the substring in response body. :param anchor: string to search :param byte: if False then `anchor` should be the unicode string, and search will be performed in `response.unicode_body()` else `anchor` should be the byte-string and search will be performed in `response.body` If substring is found return True else False.
def do_loop_turn(self): """Receiver daemon main loop :return: None """ # Begin to clean modules self.check_and_del_zombie_modules() # Maybe the arbiter pushed a new configuration... if self.watch_for_new_conf(timeout=0.05): logger.info("I got a new configuration...") # Manage the new configuration self.setup_new_conf() # Maybe external modules raised 'objects' # we should get them _t0 = time.time() self.get_objects_from_from_queues() statsmgr.timer('core.get-objects-from-queues', time.time() - _t0) # Get external commands from the arbiters... _t0 = time.time() self.get_external_commands_from_arbiters() statsmgr.timer('external-commands.got.time', time.time() - _t0) statsmgr.gauge('external-commands.got.count', len(self.unprocessed_external_commands)) _t0 = time.time() self.push_external_commands_to_schedulers() statsmgr.timer('external-commands.pushed.time', time.time() - _t0) # Say to modules it's a new tick :) _t0 = time.time() self.hook_point('tick') statsmgr.timer('hook.tick', time.time() - _t0)
Receiver daemon main loop :return: None
def find(self, obj, forced_type=None, cls=anyconfig.models.processor.Processor): """ :param obj: a file path, file, file-like object, pathlib.Path object or an 'anyconfig.globals.IOInfo' (namedtuple) object :param forced_type: Forced processor type to find :param cls: A class object to compare with 'ptype' :return: an instance of processor class to process 'obj' :raises: ValueError, UnknownProcessorTypeError, UnknownFileTypeError """ return find(obj, self.list(), forced_type=forced_type, cls=cls)
:param obj: a file path, file, file-like object, pathlib.Path object or an 'anyconfig.globals.IOInfo' (namedtuple) object :param forced_type: Forced processor type to find :param cls: A class object to compare with 'ptype' :return: an instance of processor class to process 'obj' :raises: ValueError, UnknownProcessorTypeError, UnknownFileTypeError
def _prepare_connection(**nxos_api_kwargs): ''' Prepare the connection with the remote network device, and clean up the key value pairs, removing the args used for the connection init. ''' nxos_api_kwargs = clean_kwargs(**nxos_api_kwargs) init_kwargs = {} # Clean up any arguments that are not required for karg, warg in six.iteritems(nxos_api_kwargs): if karg in RPC_INIT_KWARGS: init_kwargs[karg] = warg if 'host' not in init_kwargs: init_kwargs['host'] = 'localhost' if 'transport' not in init_kwargs: init_kwargs['transport'] = 'https' if 'port' not in init_kwargs: init_kwargs['port'] = 80 if init_kwargs['transport'] == 'http' else 443 verify = init_kwargs.get('verify', True) if isinstance(verify, bool): init_kwargs['verify_ssl'] = verify else: init_kwargs['ca_bundle'] = verify if 'rpc_version' not in init_kwargs: init_kwargs['rpc_version'] = '2.0' if 'timeout' not in init_kwargs: init_kwargs['timeout'] = 60 return init_kwargs
Prepare the connection with the remote network device, and clean up the key value pairs, removing the args used for the connection init.
def total_statements(self, filename=None): """ Return the total number of statements for the file `filename`. If `filename` is not given, return the total number of statements for all files. """ if filename is not None: statements = self._get_lines_by_filename(filename) return len(statements) total = 0 for filename in self.files(): statements = self._get_lines_by_filename(filename) total += len(statements) return total
Return the total number of statements for the file `filename`. If `filename` is not given, return the total number of statements for all files.