sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def _get_stream_metadata(self, use_cached): """Retrieve metadata about this stream from Device Cloud""" if self._cached_data is None or not use_cached: try: self._cached_data = self._conn.get_json("/ws/DataStream/%s" % self._stream_id)["items"][0] except DeviceCloudHttpException as http_exception: if http_exception.response.status_code == 404: raise NoSuchStreamException("Stream with id %r has not been created" % self._stream_id) raise http_exception return self._cached_data
Retrieve metadata about this stream from Device Cloud
entailment
def get_data_type(self, use_cached=True): """Get the data type of this stream if it exists The data type is the type of data stored in this data stream. Valid types include: * INTEGER - data can be represented with a network (= big-endian) 32-bit two's-complement integer. Data with this type maps to a python int. * LONG - data can be represented with a network (= big-endian) 64-bit two's complement integer. Data with this type maps to a python int. * FLOAT - data can be represented with a network (= big-endian) 32-bit IEEE754 floating point. Data with this type maps to a python float. * DOUBLE - data can be represented with a network (= big-endian) 64-bit IEEE754 floating point. Data with this type maps to a python float. * STRING - UTF-8. Data with this type map to a python string * BINARY - Data with this type map to a python string. * UNKNOWN - Data with this type map to a python string. :param bool use_cached: If False, the function will always request the latest from Device Cloud. If True, the device will not make a request if it already has cached data. :return: The data type of this stream as a string :rtype: str """ dtype = self._get_stream_metadata(use_cached).get("dataType") if dtype is not None: dtype = dtype.upper() return dtype
Get the data type of this stream if it exists The data type is the type of data stored in this data stream. Valid types include: * INTEGER - data can be represented with a network (= big-endian) 32-bit two's-complement integer. Data with this type maps to a python int. * LONG - data can be represented with a network (= big-endian) 64-bit two's complement integer. Data with this type maps to a python int. * FLOAT - data can be represented with a network (= big-endian) 32-bit IEEE754 floating point. Data with this type maps to a python float. * DOUBLE - data can be represented with a network (= big-endian) 64-bit IEEE754 floating point. Data with this type maps to a python float. * STRING - UTF-8. Data with this type map to a python string * BINARY - Data with this type map to a python string. * UNKNOWN - Data with this type map to a python string. :param bool use_cached: If False, the function will always request the latest from Device Cloud. If True, the device will not make a request if it already has cached data. :return: The data type of this stream as a string :rtype: str
entailment
def get_data_ttl(self, use_cached=True): """Retrieve the dataTTL for this stream The dataTtl is the time to live (TTL) in seconds for data points stored in the data stream. A data point expires after the configured amount of time and is automatically deleted. :param bool use_cached: If False, the function will always request the latest from Device Cloud. If True, the device will not make a request if it already has cached data. :raises devicecloud.DeviceCloudHttpException: in the case of an unexpected http error :raises devicecloud.streams.NoSuchStreamException: if this stream has not yet been created :return: The dataTtl associated with this stream in seconds :rtype: int or None """ data_ttl_text = self._get_stream_metadata(use_cached).get("dataTtl") return int(data_ttl_text)
Retrieve the dataTTL for this stream The dataTtl is the time to live (TTL) in seconds for data points stored in the data stream. A data point expires after the configured amount of time and is automatically deleted. :param bool use_cached: If False, the function will always request the latest from Device Cloud. If True, the device will not make a request if it already has cached data. :raises devicecloud.DeviceCloudHttpException: in the case of an unexpected http error :raises devicecloud.streams.NoSuchStreamException: if this stream has not yet been created :return: The dataTtl associated with this stream in seconds :rtype: int or None
entailment
def get_rollup_ttl(self, use_cached=True): """Retrieve the rollupTtl for this stream The rollupTtl is the time to live (TTL) in seconds for the aggregate roll-ups of data points stored in the stream. A roll-up expires after the configured amount of time and is automatically deleted. :param bool use_cached: If False, the function will always request the latest from Device Cloud. If True, the device will not make a request if it already has cached data. :raises devicecloud.DeviceCloudHttpException: in the case of an unexpected http error :raises devicecloud.streams.NoSuchStreamException: if this stream has not yet been created :return: The rollupTtl associated with this stream in seconds :rtype: int or None """ rollup_ttl_text = self._get_stream_metadata(use_cached).get("rollupTtl") return int(rollup_ttl_text)
Retrieve the rollupTtl for this stream The rollupTtl is the time to live (TTL) in seconds for the aggregate roll-ups of data points stored in the stream. A roll-up expires after the configured amount of time and is automatically deleted. :param bool use_cached: If False, the function will always request the latest from Device Cloud. If True, the device will not make a request if it already has cached data. :raises devicecloud.DeviceCloudHttpException: in the case of an unexpected http error :raises devicecloud.streams.NoSuchStreamException: if this stream has not yet been created :return: The rollupTtl associated with this stream in seconds :rtype: int or None
entailment
def get_current_value(self, use_cached=False): """Return the most recent DataPoint value written to a stream The current value is the last recorded data point for this stream. :param bool use_cached: If False, the function will always request the latest from Device Cloud. If True, the device will not make a request if it already has cached data. :raises devicecloud.DeviceCloudHttpException: in the case of an unexpected http error :raises devicecloud.streams.NoSuchStreamException: if this stream has not yet been created :return: The most recent value written to this stream (or None if nothing has been written) :rtype: :class:`~DataPoint` or None """ current_value = self._get_stream_metadata(use_cached).get("currentValue") if current_value: return DataPoint.from_json(self, current_value) else: return None
Return the most recent DataPoint value written to a stream The current value is the last recorded data point for this stream. :param bool use_cached: If False, the function will always request the latest from Device Cloud. If True, the device will not make a request if it already has cached data. :raises devicecloud.DeviceCloudHttpException: in the case of an unexpected http error :raises devicecloud.streams.NoSuchStreamException: if this stream has not yet been created :return: The most recent value written to this stream (or None if nothing has been written) :rtype: :class:`~DataPoint` or None
entailment
def delete(self): """Delete this stream from Device Cloud along with its history This call will return None on success and raise an exception in the event of an error performing the deletion. :raises devicecloud.DeviceCloudHttpException: in the case of an unexpected http error :raises devicecloud.streams.NoSuchStreamException: if this stream has already been deleted """ try: self._conn.delete("/ws/DataStream/{}".format(self.get_stream_id())) except DeviceCloudHttpException as http_excpeption: if http_excpeption.response.status_code == 404: raise NoSuchStreamException() # this branch is present, but the DC appears to just return 200 again else: raise http_excpeption
Delete this stream from Device Cloud along with its history This call will return None on success and raise an exception in the event of an error performing the deletion. :raises devicecloud.DeviceCloudHttpException: in the case of an unexpected http error :raises devicecloud.streams.NoSuchStreamException: if this stream has already been deleted
entailment
def delete_datapoint(self, datapoint): """Delete the provided datapoint from this stream :raises devicecloud.DeviceCloudHttpException: in the case of an unexpected http error """ datapoint = validate_type(datapoint, DataPoint) self._conn.delete("/ws/DataPoint/{stream_id}/{datapoint_id}".format( stream_id=self.get_stream_id(), datapoint_id=datapoint.get_id(), ))
Delete the provided datapoint from this stream :raises devicecloud.DeviceCloudHttpException: in the case of an unexpected http error
entailment
def delete_datapoints_in_time_range(self, start_dt=None, end_dt=None): """Delete datapoints from this stream between the provided start and end times If neither a start or end time is specified, all data points in the stream will be deleted. :param start_dt: The datetime after which data points should be deleted or None if all data points from the beginning of time should be deleted. :param end_dt: The datetime before which data points should be deleted or None if all data points until the current time should be deleted. :raises devicecloud.DeviceCloudHttpException: in the case of an unexpected http error """ start_dt = to_none_or_dt(validate_type(start_dt, datetime.datetime, type(None))) end_dt = to_none_or_dt(validate_type(end_dt, datetime.datetime, type(None))) params = {} if start_dt is not None: params['startTime'] = isoformat(start_dt) if end_dt is not None: params['endTime'] = isoformat(end_dt) self._conn.delete("/ws/DataPoint/{stream_id}{querystring}".format( stream_id=self.get_stream_id(), querystring="?" + urllib.parse.urlencode(params) if params else "", ))
Delete datapoints from this stream between the provided start and end times If neither a start or end time is specified, all data points in the stream will be deleted. :param start_dt: The datetime after which data points should be deleted or None if all data points from the beginning of time should be deleted. :param end_dt: The datetime before which data points should be deleted or None if all data points until the current time should be deleted. :raises devicecloud.DeviceCloudHttpException: in the case of an unexpected http error
entailment
def write(self, datapoint): """Write some raw data to a stream using the DataPoint API This method will mutate the datapoint provided to populate it with information available from the stream as it is available (but without making any new HTTP requests). For instance, we will add in information about the stream data type if it is available so that proper type conversion happens. Values already set on the datapoint will not be overridden (except for path) :param DataPoint datapoint: The :class:`.DataPoint` that should be written to Device Cloud """ if not isinstance(datapoint, DataPoint): raise TypeError("First argument must be a DataPoint object") datapoint._stream_id = self.get_stream_id() if self._cached_data is not None and datapoint.get_data_type() is None: datapoint._data_type = self.get_data_type() self._conn.post("/ws/DataPoint/{}".format(self.get_stream_id()), datapoint.to_xml())
Write some raw data to a stream using the DataPoint API This method will mutate the datapoint provided to populate it with information available from the stream as it is available (but without making any new HTTP requests). For instance, we will add in information about the stream data type if it is available so that proper type conversion happens. Values already set on the datapoint will not be overridden (except for path) :param DataPoint datapoint: The :class:`.DataPoint` that should be written to Device Cloud
entailment
def read(self, start_time=None, end_time=None, use_client_timeline=True, newest_first=True, rollup_interval=None, rollup_method=None, timezone=None, page_size=1000): """Read one or more DataPoints from a stream .. warning:: The data points from Device Cloud is a paged data set. When iterating over the result set there could be delays when we hit the end of a page. If this is undesirable, the caller should collect all results into a data structure first before iterating over the result set. :param start_time: The start time for the window of data points to read. None means that we should start with the oldest data available. :type start_time: :class:`datetime.datetime` or None :param end_time: The end time for the window of data points to read. None means that we should include all points received until this point in time. :type end_time: :class:`datetime.datetime` or None :param bool use_client_timeline: If True, the times used will be those provided by clients writing data points into the cloud (which also default to server time if the a timestamp was not included by the client). This is usually what you want. If False, the server timestamp will be used which records when the data point was received. :param bool newest_first: If True, results will be ordered from newest to oldest (descending order). If False, results will be returned oldest to newest. :param rollup_interval: the roll-up interval that should be used if one is desired at all. Rollups will not be performed if None is specified for the interval. Valid roll-up interval values are None, "half", "hourly", "day", "week", and "month". See `DataPoints documentation <http://ftp1.digi.com/support/documentation/html/90002008/90002008_P/Default.htm#ProgrammingTopics/DataStreams.htm#DataPoints>`_ for additional details on these values. :type rollup_interval: str or None :param rollup_method: The aggregation applied to values in the points within the specified rollup_interval. Available methods are None, "sum", "average", "min", "max", "count", and "standarddev". See `DataPoint documentation <http://ftp1.digi.com/support/documentation/html/90002008/90002008_P/Default.htm#ProgrammingTopics/DataStreams.htm#DataPoints>`_ for additional details on these values. :type rollup_method: str or None :param timezone: timezone for calculating roll-ups. This determines roll-up interval boundaries and only applies to roll-ups of a day or larger (for example, day, week, or month). Note that it does not apply to the startTime and endTime parameters. See the `Timestamps <http://ftp1.digi.com/support/documentation/html/90002008/90002008_P/Default.htm#ProgrammingTopics/DataStreams.htm#timestamp>`_ and `Supported Time Zones <http://ftp1.digi.com/support/documentation/html/90002008/90002008_P/Default.htm#ProgrammingTopics/DataStreams.htm#TimeZones>`_ sections for more information. :type timezone: str or None :param int page_size: The number of results that we should attempt to retrieve from the device cloud in each page. Generally, this can be left at its default value unless you have a good reason to change the parameter for performance reasons. :returns: A generator object which one can iterate over the DataPoints read. """ is_rollup = False if (rollup_interval is not None) or (rollup_method is not None): is_rollup = True numeric_types = [ STREAM_TYPE_INTEGER, STREAM_TYPE_LONG, STREAM_TYPE_FLOAT, STREAM_TYPE_DOUBLE, STREAM_TYPE_STRING, STREAM_TYPE_BINARY, STREAM_TYPE_UNKNOWN, ] if self.get_data_type(use_cached=True) not in numeric_types: raise InvalidRollupDatatype('Rollups only support numerical DataPoints') # Validate function inputs start_time = to_none_or_dt(validate_type(start_time, datetime.datetime, type(None))) end_time = to_none_or_dt(validate_type(end_time, datetime.datetime, type(None))) use_client_timeline = validate_type(use_client_timeline, bool) newest_first = validate_type(newest_first, bool) rollup_interval = validate_type(rollup_interval, type(None), *six.string_types) if not rollup_interval in {None, ROLLUP_INTERVAL_HALF, ROLLUP_INTERVAL_HOUR, ROLLUP_INTERVAL_DAY, ROLLUP_INTERVAL_WEEK, ROLLUP_INTERVAL_MONTH, }: raise ValueError("Invalid rollup_interval %r provided" % (rollup_interval, )) rollup_method = validate_type(rollup_method, type(None), *six.string_types) if not rollup_method in {None, ROLLUP_METHOD_SUM, ROLLUP_METHOD_AVERAGE, ROLLUP_METHOD_MIN, ROLLUP_METHOD_MAX, ROLLUP_METHOD_COUNT, ROLLUP_METHOD_STDDEV}: raise ValueError("Invalid rollup_method %r provided" % (rollup_method, )) timezone = validate_type(timezone, type(None), *six.string_types) page_size = validate_type(page_size, *six.integer_types) # Remember that there could be multiple pages of data and we want to provide # in iterator over the result set. To start the process out, we need to make # an initial request without a page cursor. We should get one in response to # our first request which we will use to page through the result set query_parameters = { 'timeline': 'client' if use_client_timeline else 'server', 'order': 'descending' if newest_first else 'ascending', 'size': page_size } if start_time is not None: query_parameters["startTime"] = isoformat(start_time) if end_time is not None: query_parameters["endTime"] = isoformat(end_time) if rollup_interval is not None: query_parameters["rollupInterval"] = rollup_interval if rollup_method is not None: query_parameters["rollupMethod"] = rollup_method if timezone is not None: query_parameters["timezone"] = timezone result_size = page_size while result_size == page_size: # request the next page of data or first if pageCursor is not set as query param try: result = self._conn.get_json("/ws/DataPoint/{stream_id}?{query_params}".format( stream_id=self.get_stream_id(), query_params=urllib.parse.urlencode(query_parameters) )) except DeviceCloudHttpException as http_exception: if http_exception.response.status_code == 404: raise NoSuchStreamException() raise http_exception result_size = int(result["resultSize"]) # how many are actually included here? query_parameters["pageCursor"] = result.get("pageCursor") # will not be present if result set is empty for item_info in result.get("items", []): if is_rollup: data_point = DataPoint.from_rollup_json(self, item_info) else: data_point = DataPoint.from_json(self, item_info) yield data_point
Read one or more DataPoints from a stream .. warning:: The data points from Device Cloud is a paged data set. When iterating over the result set there could be delays when we hit the end of a page. If this is undesirable, the caller should collect all results into a data structure first before iterating over the result set. :param start_time: The start time for the window of data points to read. None means that we should start with the oldest data available. :type start_time: :class:`datetime.datetime` or None :param end_time: The end time for the window of data points to read. None means that we should include all points received until this point in time. :type end_time: :class:`datetime.datetime` or None :param bool use_client_timeline: If True, the times used will be those provided by clients writing data points into the cloud (which also default to server time if the a timestamp was not included by the client). This is usually what you want. If False, the server timestamp will be used which records when the data point was received. :param bool newest_first: If True, results will be ordered from newest to oldest (descending order). If False, results will be returned oldest to newest. :param rollup_interval: the roll-up interval that should be used if one is desired at all. Rollups will not be performed if None is specified for the interval. Valid roll-up interval values are None, "half", "hourly", "day", "week", and "month". See `DataPoints documentation <http://ftp1.digi.com/support/documentation/html/90002008/90002008_P/Default.htm#ProgrammingTopics/DataStreams.htm#DataPoints>`_ for additional details on these values. :type rollup_interval: str or None :param rollup_method: The aggregation applied to values in the points within the specified rollup_interval. Available methods are None, "sum", "average", "min", "max", "count", and "standarddev". See `DataPoint documentation <http://ftp1.digi.com/support/documentation/html/90002008/90002008_P/Default.htm#ProgrammingTopics/DataStreams.htm#DataPoints>`_ for additional details on these values. :type rollup_method: str or None :param timezone: timezone for calculating roll-ups. This determines roll-up interval boundaries and only applies to roll-ups of a day or larger (for example, day, week, or month). Note that it does not apply to the startTime and endTime parameters. See the `Timestamps <http://ftp1.digi.com/support/documentation/html/90002008/90002008_P/Default.htm#ProgrammingTopics/DataStreams.htm#timestamp>`_ and `Supported Time Zones <http://ftp1.digi.com/support/documentation/html/90002008/90002008_P/Default.htm#ProgrammingTopics/DataStreams.htm#TimeZones>`_ sections for more information. :type timezone: str or None :param int page_size: The number of results that we should attempt to retrieve from the device cloud in each page. Generally, this can be left at its default value unless you have a good reason to change the parameter for performance reasons. :returns: A generator object which one can iterate over the DataPoints read.
entailment
def _quoted(value): """Return a single-quoted and escaped (percent-encoded) version of value This function will also perform transforms of known data types to a representation that will be handled by Device Cloud. For instance, datetime objects will be converted to ISO8601. """ if isinstance(value, datetime.datetime): value = isoformat(to_none_or_dt(value)) else: value = str(value) return "'{}'".format(value)
Return a single-quoted and escaped (percent-encoded) version of value This function will also perform transforms of known data types to a representation that will be handled by Device Cloud. For instance, datetime objects will be converted to ISO8601.
entailment
def compile(self): """Compile this expression into a query string""" return "{lhs}{sep}{rhs}".format( lhs=self.lhs.compile(), sep=self.sep, rhs=self.rhs.compile(), )
Compile this expression into a query string
entailment
def compile(self): """Compile this expression into a query string""" return "{attribute}{sep}{value}".format( attribute=self.attribute, sep=self.sep, value=_quoted(self.value) )
Compile this expression into a query string
entailment
def _read_msg_header(session): """ Perform a read on input socket to consume headers and then return a tuple of message type, message length. :param session: Push Session to read data for. Returns response type (i.e. PUBLISH_MESSAGE) if header was completely read, otherwise None if header was not completely read. """ try: data = session.socket.recv(6 - len(session.data)) if len(data) == 0: # No Data on Socket. Likely closed. return NO_DATA session.data += data # Data still not completely read. if len(session.data) < 6: return INCOMPLETE except ssl.SSLError: # This can happen when select gets triggered # for an SSL socket and data has not yet been # read. return INCOMPLETE session.message_length = struct.unpack('!i', session.data[2:6])[0] response_type = struct.unpack('!H', session.data[0:2])[0] # Clear out session data as header is consumed. session.data = six.b("") return response_type
Perform a read on input socket to consume headers and then return a tuple of message type, message length. :param session: Push Session to read data for. Returns response type (i.e. PUBLISH_MESSAGE) if header was completely read, otherwise None if header was not completely read.
entailment
def _read_msg(session): """ Perform a read on input socket to consume message and then return the payload and block_id in a tuple. :param session: Push Session to read data for. """ if len(session.data) == session.message_length: # Data Already completely read. Return return True try: data = session.socket.recv(session.message_length - len(session.data)) if len(data) == 0: raise PushException("No Data on Socket!") session.data += data except ssl.SSLError: # This can happen when select gets triggered # for an SSL socket and data has not yet been # read. Wait for it to get triggered again. return False # Whether or not all data was read. return len(session.data) == session.message_length
Perform a read on input socket to consume message and then return the payload and block_id in a tuple. :param session: Push Session to read data for.
entailment
def send_connection_request(self): """ Sends a ConnectionRequest to the iDigi server using the credentials established with the id of the monitor as defined in the monitor member. """ try: self.log.info("Sending ConnectionRequest for Monitor %s." % self.monitor_id) # Send connection request and perform a receive to ensure # request is authenticated. # Protocol Version = 1. payload = struct.pack('!H', 0x01) # Username Length. payload += struct.pack('!H', len(self.client.username)) # Username. payload += six.b(self.client.username) # Password Length. payload += struct.pack('!H', len(self.client.password)) # Password. payload += six.b(self.client.password) # Monitor ID. payload += struct.pack('!L', int(self.monitor_id)) # Header 6 Bytes : Type [2 bytes] & Length [4 Bytes] # ConnectionRequest is Type 0x01. data = struct.pack("!HL", CONNECTION_REQUEST, len(payload)) # The full payload. data += payload # Send Connection Request. self.socket.send(data) # Set a 60 second blocking on recv, if we don't get any data # within 60 seconds, timeout which will throw an exception. self.socket.settimeout(60) # Should receive 10 bytes with ConnectionResponse. response = self.socket.recv(10) # Make socket blocking. self.socket.settimeout(0) if len(response) != 10: raise PushException("Length of Connection Request Response " "(%d) is not 10." % len(response)) # Type response_type = int(struct.unpack("!H", response[0:2])[0]) if response_type != CONNECTION_RESPONSE: raise PushException( "Connection Response Type (%d) is not " "ConnectionResponse Type (%d)." % (response_type, CONNECTION_RESPONSE)) status_code = struct.unpack("!H", response[6:8])[0] self.log.info("Got ConnectionResponse for Monitor %s. Status %s." % (self.monitor_id, status_code)) if status_code != STATUS_OK: raise PushException("Connection Response Status Code (%d) is " "not STATUS_OK (%d)." % (status_code, STATUS_OK)) except Exception as exception: # TODO(posborne): This is bad! It isn't necessarily a socket exception! # Likely a socket exception, close it and raise an exception. self.socket.close() self.socket = None raise exception
Sends a ConnectionRequest to the iDigi server using the credentials established with the id of the monitor as defined in the monitor member.
entailment
def start(self): """Creates a TCP connection to Device Cloud and sends a ConnectionRequest message""" self.log.info("Starting Insecure Session for Monitor %s" % self.monitor_id) if self.socket is not None: raise Exception("Socket already established for %s." % self) try: self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.socket.connect((self.client.hostname, PUSH_OPEN_PORT)) self.socket.setblocking(0) except socket.error as exception: self.socket.close() self.socket = None raise self.send_connection_request()
Creates a TCP connection to Device Cloud and sends a ConnectionRequest message
entailment
def stop(self): """Stop/Close this session Close the socket associated with this session and puts Session into a state such that it can be re-established later. """ if self.socket is not None: self.socket.close() self.socket = None self.data = None
Stop/Close this session Close the socket associated with this session and puts Session into a state such that it can be re-established later.
entailment
def start(self): """ Creates a SSL connection to the iDigi Server and sends a ConnectionRequest message. """ self.log.info("Starting SSL Session for Monitor %s." % self.monitor_id) if self.socket is not None: raise Exception("Socket already established for %s." % self) try: # Create socket, wrap in SSL and connect. self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Validate that certificate server uses matches what we expect. if self.ca_certs is not None: self.socket = ssl.wrap_socket(self.socket, cert_reqs=ssl.CERT_REQUIRED, ca_certs=self.ca_certs) else: self.socket = ssl.wrap_socket(self.socket) self.socket.connect((self.client.hostname, PUSH_SECURE_PORT)) self.socket.setblocking(0) except Exception as exception: self.socket.close() self.socket = None raise exception self.send_connection_request()
Creates a SSL connection to the iDigi Server and sends a ConnectionRequest message.
entailment
def _consume_queue(self): """ Continually blocks until data is on the internal queue, then calls the session's registered callback and sends a PublishMessageReceived if callback returned True. """ while True: session, block_id, raw_data = self._queue.get() data = json.loads(raw_data.decode('utf-8')) # decode as JSON try: result = session.callback(data) if result is None: self.log.warn("Callback %r returned None, expected boolean. Messages " "are not marked as received unless True is returned", session.callback) elif result: # Send a Successful PublishMessageReceived with the # block id sent in request if self._write_queue is not None: response_message = struct.pack('!HHH', PUBLISH_MESSAGE_RECEIVED, block_id, 200) self._write_queue.put((session.socket, response_message)) except Exception as exception: self.log.exception(exception) self._queue.task_done()
Continually blocks until data is on the internal queue, then calls the session's registered callback and sends a PublishMessageReceived if callback returned True.
entailment
def queue_callback(self, session, block_id, data): """ Queues up a callback event to occur for a session with the given payload data. Will block if the queue is full. :param session: the session with a defined callback function to call. :param block_id: the block_id of the message received. :param data: the data payload of the message received. """ self._queue.put((session, block_id, data))
Queues up a callback event to occur for a session with the given payload data. Will block if the queue is full. :param session: the session with a defined callback function to call. :param block_id: the block_id of the message received. :param data: the data payload of the message received.
entailment
def _restart_session(self, session): """Restarts and re-establishes session :param session: The session to restart """ # remove old session key, if socket is None, that means the # session was closed by user and there is no need to restart. if session.socket is not None: self.log.info("Attempting restart session for Monitor Id %s." % session.monitor_id) del self.sessions[session.socket.fileno()] session.stop() session.start() self.sessions[session.socket.fileno()] = session
Restarts and re-establishes session :param session: The session to restart
entailment
def _writer(self): """ Indefinitely checks the writer queue for data to write to socket. """ while not self.closed: try: sock, data = self._write_queue.get(timeout=0.1) self._write_queue.task_done() sock.send(data) except Empty: pass # nothing to write after timeout except socket.error as err: if err.errno == errno.EBADF: self._clean_dead_sessions()
Indefinitely checks the writer queue for data to write to socket.
entailment
def _clean_dead_sessions(self): """ Traverses sessions to determine if any sockets were removed (indicates a stopped session). In these cases, remove the session. """ for sck in list(self.sessions.keys()): session = self.sessions[sck] if session.socket is None: del self.sessions[sck]
Traverses sessions to determine if any sockets were removed (indicates a stopped session). In these cases, remove the session.
entailment
def _select(self): """ While the client is not marked as closed, performs a socket select on all PushSession sockets. If any data is received, parses and forwards it on to the callback function. If the callback is successful, a PublishMessageReceived message is sent. """ try: while not self.closed: try: inputready = select.select(self.sessions.keys(), [], [], 0.1)[0] for sock in inputready: session = self.sessions[sock] sck = session.socket if sck is None: # Socket has since been deleted, continue continue # If no defined message length, nothing has been # consumed yet, parse the header. if session.message_length == 0: # Read header information before receiving rest of # message. response_type = _read_msg_header(session) if response_type == NO_DATA: # No data could be read, assume socket closed. if session.socket is not None: self.log.error("Socket closed for Monitor %s." % session.monitor_id) self._restart_session(session) continue elif response_type == INCOMPLETE: # More Data to be read. Continue. continue elif response_type != PUBLISH_MESSAGE: self.log.warn("Response Type (%x) does not match PublishMessage (%x)" % (response_type, PUBLISH_MESSAGE)) continue try: if not _read_msg(session): # Data not completely read, continue. continue except PushException as err: # If Socket is None, it was closed, # otherwise it was closed when it shouldn't # have been restart it. session.data = six.b("") session.message_length = 0 if session.socket is None: del self.sessions[sck] else: self.log.exception(err) self._restart_session(session) continue # We received full payload, # clear session data and parse it. data = session.data session.data = six.b("") session.message_length = 0 block_id = struct.unpack('!H', data[0:2])[0] compression = struct.unpack('!B', data[4:5])[0] payload = data[10:] if compression == 0x01: # Data is compressed, uncompress it. payload = zlib.decompress(payload) # Enqueue payload into a callback queue to be # invoked self._callback_pool.queue_callback(session, block_id, payload) except select.error as err: # Evaluate sessions if we get a bad file descriptor, if # socket is gone, delete the session. if err.args[0] == errno.EBADF: self._clean_dead_sessions() except Exception as err: self.log.exception(err) finally: for session in self.sessions.values(): if session is not None: session.stop()
While the client is not marked as closed, performs a socket select on all PushSession sockets. If any data is received, parses and forwards it on to the callback function. If the callback is successful, a PublishMessageReceived message is sent.
entailment
def _init_threads(self): """Initializes the IO and Writer threads""" if self._io_thread is None: self._io_thread = Thread(target=self._select) self._io_thread.start() if self._writer_thread is None: self._writer_thread = Thread(target=self._writer) self._writer_thread.start()
Initializes the IO and Writer threads
entailment
def create_session(self, callback, monitor_id): """ Creates and Returns a PushSession instance based on the input monitor and callback. When data is received, callback will be invoked. If neither monitor or monitor_id are specified, throws an Exception. :param callback: Callback function to call when PublishMessage messages are received. Expects 1 argument which will contain the payload of the pushed message. Additionally, expects function to return True if callback was able to process the message, False or None otherwise. :param monitor_id: The id of the Monitor, will be queried to understand parameters of the monitor. """ self.log.info("Creating Session for Monitor %s." % monitor_id) session = SecurePushSession(callback, monitor_id, self, self._ca_certs) \ if self._secure else PushSession(callback, monitor_id, self) session.start() self.sessions[session.socket.fileno()] = session self._init_threads() return session
Creates and Returns a PushSession instance based on the input monitor and callback. When data is received, callback will be invoked. If neither monitor or monitor_id are specified, throws an Exception. :param callback: Callback function to call when PublishMessage messages are received. Expects 1 argument which will contain the payload of the pushed message. Additionally, expects function to return True if callback was able to process the message, False or None otherwise. :param monitor_id: The id of the Monitor, will be queried to understand parameters of the monitor.
entailment
def stop(self): """Stops all session activity. Blocks until io and writer thread dies """ if self._io_thread is not None: self.log.info("Waiting for I/O thread to stop...") self.closed = True self._io_thread.join() if self._writer_thread is not None: self.log.info("Waiting for Writer Thread to stop...") self.closed = True self._writer_thread.join() self.log.info("All worker threads stopped.")
Stops all session activity. Blocks until io and writer thread dies
entailment
def plotF0(fromTuple, toTuple, mergeTupleList, fnFullPath): ''' Plots the original data in a graph above the plot of the dtw'ed data ''' _matplotlibCheck() plt.hold(True) fig, (ax0) = plt.subplots(nrows=1) # Old data plot1 = ax0.plot(fromTuple[0], fromTuple[1], color='red', linewidth=2, label="From") plot2 = ax0.plot(toTuple[0], toTuple[1], color='blue', linewidth=2, label="To") ax0.set_title("Plot of F0 Morph") plt.ylabel('Pitch (hz)') plt.xlabel('Time (s)') # Merge data colorValue = 0 colorStep = 255.0 / len(mergeTupleList) for timeList, valueList in mergeTupleList: colorValue += colorStep hexValue = "#%02x0000" % int(255 - colorValue) if int(colorValue) == 255: ax0.plot(timeList, valueList, color=hexValue, linewidth=1, label="Merged line, final iteration") else: ax0.plot(timeList, valueList, color=hexValue, linewidth=1) plt.legend(loc=1, borderaxespad=0.) # plt.legend([plot1, plot2, plot3], ["From", "To", "Merged line"]) plt.savefig(fnFullPath, dpi=300, bbox_inches='tight') plt.close(fig)
Plots the original data in a graph above the plot of the dtw'ed data
entailment
def getPitchForIntervals(data, tgFN, tierName): ''' Preps data for use in f0Morph ''' tg = tgio.openTextgrid(tgFN) data = tg.tierDict[tierName].getValuesInIntervals(data) data = [dataList for _, dataList in data] return data
Preps data for use in f0Morph
entailment
def f0Morph(fromWavFN, pitchPath, stepList, outputName, doPlotPitchSteps, fromPitchData, toPitchData, outputMinPitch, outputMaxPitch, praatEXE, keepPitchRange=False, keepAveragePitch=False, sourcePitchDataList=None, minIntervalLength=0.3): ''' Resynthesizes the pitch track from a source to a target wav file fromPitchData and toPitchData should be segmented according to the portions that you want to morph. The two lists must have the same number of sublists. Occurs over a three-step process. This function can act as a template for how to use the function morph_sequence.morphChunkedDataLists to morph pitch contours or other data. By default, everything is morphed, but it is possible to maintain elements of the original speaker's pitch (average pitch and pitch range) by setting the appropriate flag) sourcePitchDataList: if passed in, any regions unspecified by fromPitchData will be sampled from this list. In essence, this allows one to leave segments of the original pitch contour untouched by the morph process. ''' fromDuration = audio_scripts.getSoundFileDuration(fromWavFN) # Find source pitch samples that will be mixed in with the target # pitch samples later nonMorphPitchData = [] if sourcePitchDataList is not None: timeList = sorted(fromPitchData) timeList = [(row[0][0], row[-1][0]) for row in timeList] endTime = sourcePitchDataList[-1][0] invertedTimeList = praatio_utils.invertIntervalList(timeList, endTime) invertedTimeList = [(start, stop) for start, stop in invertedTimeList if stop - start > minIntervalLength] for start, stop in invertedTimeList: pitchList = praatio_utils.getValuesInInterval(sourcePitchDataList, start, stop) nonMorphPitchData.extend(pitchList) # Iterative pitch tier data path pitchTierPath = join(pitchPath, "pitchTiers") resynthesizedPath = join(pitchPath, "f0_resynthesized_wavs") for tmpPath in [pitchTierPath, resynthesizedPath]: utils.makeDir(tmpPath) # 1. Prepare the data for morphing - acquire the segments to merge # (Done elsewhere, with the input fed into this function) # 2. Morph the fromData to the toData try: finalOutputList = morph_sequence.morphChunkedDataLists(fromPitchData, toPitchData, stepList) except IndexError: raise MissingPitchDataException() fromPitchData = [row for subList in fromPitchData for row in subList] toPitchData = [row for subList in toPitchData for row in subList] # 3. Save the pitch data and resynthesize the pitch mergedDataList = [] for i in range(0, len(finalOutputList)): outputDataList = finalOutputList[i] if keepPitchRange is True: outputDataList = morph_sequence.morphRange(outputDataList, fromPitchData) if keepAveragePitch is True: outputDataList = morph_sequence.morphAveragePitch(outputDataList, fromPitchData) if sourcePitchDataList is not None: outputDataList.extend(nonMorphPitchData) outputDataList.sort() stepOutputName = "%s_%0.3g" % (outputName, stepList[i]) pitchFNFullPath = join(pitchTierPath, "%s.PitchTier" % stepOutputName) outputFN = join(resynthesizedPath, "%s.wav" % stepOutputName) pointObj = dataio.PointObject2D(outputDataList, dataio.PITCH, 0, fromDuration) pointObj.save(pitchFNFullPath) outputTime, outputVals = zip(*outputDataList) mergedDataList.append((outputTime, outputVals)) praat_scripts.resynthesizePitch(praatEXE, fromWavFN, pitchFNFullPath, outputFN, outputMinPitch, outputMaxPitch) # 4. (Optional) Plot the generated contours if doPlotPitchSteps: fromTime, fromVals = zip(*fromPitchData) toTime, toVals = zip(*toPitchData) plot_morphed_data.plotF0((fromTime, fromVals), (toTime, toVals), mergedDataList, join(pitchTierPath, "%s.png" % outputName))
Resynthesizes the pitch track from a source to a target wav file fromPitchData and toPitchData should be segmented according to the portions that you want to morph. The two lists must have the same number of sublists. Occurs over a three-step process. This function can act as a template for how to use the function morph_sequence.morphChunkedDataLists to morph pitch contours or other data. By default, everything is morphed, but it is possible to maintain elements of the original speaker's pitch (average pitch and pitch range) by setting the appropriate flag) sourcePitchDataList: if passed in, any regions unspecified by fromPitchData will be sampled from this list. In essence, this allows one to leave segments of the original pitch contour untouched by the morph process.
entailment
def decode(s): """ Converts text in the numbering format of pinyin ("ni3hao3") to text with the appropriate tone marks ("nǐhǎo"). """ s = s.lower() r = "" t = "" for c in s: if c >= 'a' and c <= 'z': t += c elif c == ':': try: if t[-1] == 'u': t = t[:-1] + u"\u00fc" except: pass else: if c >= '0' and c <= '5': tone = int(c) % 5 if tone != 0: m = re.search(u"[aoeiuv\u00fc]+", t) if m is None: t += c elif len(m.group(0)) == 1: t = t[:m.start(0)] + PinyinToneMark[tone][PinyinToneMark[0].index(m.group(0))] + t[m.end(0):] else: if 'a' in t: t = t.replace("a", PinyinToneMark[tone][0]) elif 'o' in t: t = t.replace("o", PinyinToneMark[tone][1]) elif 'e' in t: t = t.replace("e", PinyinToneMark[tone][2]) elif t.endswith("ui"): t = t.replace("i", PinyinToneMark[tone][3]) elif t.endswith("iu"): t = t.replace("u", PinyinToneMark[tone][4]) else: t += "!" r += t t = "" r += t return r
Converts text in the numbering format of pinyin ("ni3hao3") to text with the appropriate tone marks ("nǐhǎo").
entailment
def adjustPeakHeight(self, heightAmount): ''' Adjust peak height The foot of the accent is left unchanged and intermediate values are linearly scaled ''' if heightAmount == 0: return pitchList = [f0V for _, f0V in self.pointList] minV = min(pitchList) maxV = max(pitchList) scale = lambda x, y: x + y * (x - minV) / float(maxV - minV) self.pointList = [(timeV, scale(f0V, heightAmount)) for timeV, f0V in self.pointList]
Adjust peak height The foot of the accent is left unchanged and intermediate values are linearly scaled
entailment
def addPlateau(self, plateauAmount, pitchSampFreq=None): ''' Add a plateau A negative plateauAmount will move the peak backwards. A positive plateauAmount will move the peak forwards. All points on the side of the peak growth will also get moved. i.e. the slope of the peak does not change. The accent gets wider instead. If pitchSampFreq=None, the plateau will only be specified by the start and end points of the plateau ''' if plateauAmount == 0: return maxPoint = self.pointList[self.peakI] # Define the plateau if pitchSampFreq is not None: numSteps = abs(int(plateauAmount / pitchSampFreq)) timeChangeList = [stepV * pitchSampFreq for stepV in range(0, numSteps + 1)] else: timeChangeList = [plateauAmount, ] # Shift the side being pushed by the plateau if plateauAmount < 0: # Plateau moves left of the peak leftSide = self.pointList[:self.peakI] rightSide = self.pointList[self.peakI:] plateauPoints = [(maxPoint[0] + timeChange, maxPoint[1]) for timeChange in timeChangeList] leftSide = [(timeV + plateauAmount, f0V) for timeV, f0V in leftSide] self.netLeftShift += plateauAmount elif plateauAmount > 0: # Plateau moves right of the peak leftSide = self.pointList[:self.peakI + 1] rightSide = self.pointList[self.peakI + 1:] plateauPoints = [(maxPoint[0] + timeChange, maxPoint[1]) for timeChange in timeChangeList] rightSide = [(timeV + plateauAmount, f0V) for timeV, f0V in rightSide] self.netRightShift += plateauAmount self.pointList = leftSide + plateauPoints + rightSide
Add a plateau A negative plateauAmount will move the peak backwards. A positive plateauAmount will move the peak forwards. All points on the side of the peak growth will also get moved. i.e. the slope of the peak does not change. The accent gets wider instead. If pitchSampFreq=None, the plateau will only be specified by the start and end points of the plateau
entailment
def shiftAccent(self, shiftAmount): ''' Move the whole accent earlier or later ''' if shiftAmount == 0: return self.pointList = [(time + shiftAmount, pitch) for time, pitch in self.pointList] # Update shift amounts if shiftAmount < 0: self.netLeftShift += shiftAmount elif shiftAmount >= 0: self.netRightShift += shiftAmount
Move the whole accent earlier or later
entailment
def deleteOverlapping(self, targetList): ''' Erase points from another list that overlap with points in this list ''' start = self.pointList[0][0] stop = self.pointList[-1][0] if self.netLeftShift < 0: start += self.netLeftShift if self.netRightShift > 0: stop += self.netRightShift targetList = _deletePoints(targetList, start, stop) return targetList
Erase points from another list that overlap with points in this list
entailment
def reintegrate(self, fullPointList): ''' Integrates the pitch values of the accent into a larger pitch contour ''' # Erase the original region of the accent fullPointList = _deletePoints(fullPointList, self.minT, self.maxT) # Erase the new region of the accent fullPointList = self.deleteOverlapping(fullPointList) # Add the accent into the full pitch list outputPointList = fullPointList + self.pointList outputPointList.sort() return outputPointList
Integrates the pitch values of the accent into a larger pitch contour
entailment
def convert(filename, new_filename=None, overwrite=False, to_encoding='utf-8', force=True): """ Convert file with crappy encoding to a new proper encoding (or vice versa if you wish). filename -- the name, partial path or full path of the file you want to encode to a new encoding new_filename -- (optional) the name of the new file to be generated using the new encoding overwrite -- if `new_filename` is omitted, set this to True to change the supplied file's encoding and not bother creating a new file (be careful! loss of information is likely) to_encoding -- the name of the encoding you wish to convert to (utf-8 by default) force -- Encode even if the current file is already in the correct encoding. """ logging.info('Opening file %s' % filename) f = open(filename) detection = chardet.detect(f.read()) f.close() encoding = detection.get('encoding') confidence = detection.get('confidence') logging.info('I think it is %s with %.1f%% confidence' % (encoding, confidence * 100.0)) delete_original = bool(new_filename) == False and overwrite if not new_filename or new_filename == filename: # use the current filename, but add the encoding to the name (while keeping extension intact) base_name, ext = os.path.splitext(filename) new_filename = base_name + '_%s' % to_encoding + ext if not encoding.lower() == to_encoding.lower(): logging.info('Converting to %s with iconv...' % to_encoding) else: logging.info('Already in correct encoding.') if force: logging.info('Going ahead anyway, because force == True (the force is strong with this one)') else: logging.warning('Stopping. Use force = True if you want to force the encoding.') return None # command example: iconv -f gb18030 -t utf-8 chs.srt > chs-utf8.srt # "iconv" does not support -o parameter now and use stdout to instead. with open(new_filename, 'w') as output_file: p = subprocess.Popen(['iconv', '-f', encoding, '-t', to_encoding + "//IGNORE", \ os.path.abspath(filename)], shell=False, \ stdout=output_file, stdin=subprocess.PIPE, stderr=subprocess.STDOUT) retval = p.wait() if delete_original and os.path.isfile(new_filename): os.remove(filename) os.rename(new_filename, filename) new_filename = filename return new_filename
Convert file with crappy encoding to a new proper encoding (or vice versa if you wish). filename -- the name, partial path or full path of the file you want to encode to a new encoding new_filename -- (optional) the name of the new file to be generated using the new encoding overwrite -- if `new_filename` is omitted, set this to True to change the supplied file's encoding and not bother creating a new file (be careful! loss of information is likely) to_encoding -- the name of the encoding you wish to convert to (utf-8 by default) force -- Encode even if the current file is already in the correct encoding.
entailment
def detect(filename, include_confidence=False): """ Detect the encoding of a file. Returns only the predicted current encoding as a string. If `include_confidence` is True, Returns tuple containing: (str encoding, float confidence) """ f = open(filename) detection = chardet.detect(f.read()) f.close() encoding = detection.get('encoding') confidence = detection.get('confidence') if include_confidence: return (encoding, confidence) return encoding
Detect the encoding of a file. Returns only the predicted current encoding as a string. If `include_confidence` is True, Returns tuple containing: (str encoding, float confidence)
entailment
def download(url, localFileName=None, localDirName=None): """ Utility function for downloading files from the web and retaining the same filename. """ localName = url2name(url) req = Request(url) r = urlopen(req) if r.info().has_key('Content-Disposition'): # If the response has Content-Disposition, we take file name from it localName = r.info()['Content-Disposition'].split('filename=') if len(localName) > 1: localName = localName[1] if localName[0] == '"' or localName[0] == "'": localName = localName[1:-1] else: localName = url2name(r.url) elif r.url != url: # if we were redirected, the real file name we take from the final URL localName = url2name(r.url) if localFileName: # we can force to save the file as specified name localName = localFileName if localDirName: # we can also put it in some custom directory if not os.path.exists(localDirName): os.makedirs(localDirName) localName = os.path.join(localDirName, localName) f = open(localName, 'wb') f.write(r.read()) f.close()
Utility function for downloading files from the web and retaining the same filename.
entailment
def _t(unistr, charset_from, charset_to): """ This is a unexposed function, is responsibility for translation internal. """ # if type(unistr) is str: # try: # unistr = unistr.decode('utf-8') # # Python 3 returns AttributeError when .decode() is called on a str # # This means it is already unicode. # except AttributeError: # pass # try: # if type(unistr) is not unicode: # return unistr # # Python 3 returns NameError because unicode is not a type. # except NameError: # pass chars = [] for c in unistr: idx = charset_from.find(c) chars.append(charset_to[idx] if idx!=-1 else c) return u''.join(chars)
This is a unexposed function, is responsibility for translation internal.
entailment
def identify(text): """Identify whether a string is simplified or traditional Chinese. Returns: None: if there are no recognizd Chinese characters. EITHER: if the test is inconclusive. TRAD: if the text is traditional. SIMP: if the text is simplified. BOTH: the text has characters recognized as being solely traditional and other characters recognized as being solely simplified. """ filtered_text = set(list(text)).intersection(ALL_CHARS) if len(filtered_text) is 0: return None if filtered_text.issubset(SHARED_CHARS): return EITHER if filtered_text.issubset(TRAD_CHARS): return TRAD if filtered_text.issubset(SIMP_CHARS): return SIMP if filtered_text.difference(TRAD_CHARS).issubset(SIMP_CHARS): return BOTH
Identify whether a string is simplified or traditional Chinese. Returns: None: if there are no recognizd Chinese characters. EITHER: if the test is inconclusive. TRAD: if the text is traditional. SIMP: if the text is simplified. BOTH: the text has characters recognized as being solely traditional and other characters recognized as being solely simplified.
entailment
def makeSequenceRelative(absVSequence): ''' Puts every value in a list on a continuum between 0 and 1 Also returns the min and max values (to reverse the process) ''' if len(absVSequence) < 2 or len(set(absVSequence)) == 1: raise RelativizeSequenceException(absVSequence) minV = min(absVSequence) maxV = max(absVSequence) relativeSeq = [(value - minV) / (maxV - minV) for value in absVSequence] return relativeSeq, minV, maxV
Puts every value in a list on a continuum between 0 and 1 Also returns the min and max values (to reverse the process)
entailment
def makeSequenceAbsolute(relVSequence, minV, maxV): ''' Makes every value in a sequence absolute ''' return [(value * (maxV - minV)) + minV for value in relVSequence]
Makes every value in a sequence absolute
entailment
def _makeTimingRelative(absoluteDataList): ''' Given normal pitch tier data, puts the times on a scale from 0 to 1 Input is a list of tuples of the form ([(time1, pitch1), (time2, pitch2),...] Also returns the start and end time so that the process can be reversed ''' timingSeq = [row[0] for row in absoluteDataList] valueSeq = [list(row[1:]) for row in absoluteDataList] relTimingSeq, startTime, endTime = makeSequenceRelative(timingSeq) relDataList = [tuple([time, ] + row) for time, row in zip(relTimingSeq, valueSeq)] return relDataList, startTime, endTime
Given normal pitch tier data, puts the times on a scale from 0 to 1 Input is a list of tuples of the form ([(time1, pitch1), (time2, pitch2),...] Also returns the start and end time so that the process can be reversed
entailment
def _makeTimingAbsolute(relativeDataList, startTime, endTime): ''' Maps values from 0 to 1 to the provided start and end time Input is a list of tuples of the form ([(time1, pitch1), (time2, pitch2),...] ''' timingSeq = [row[0] for row in relativeDataList] valueSeq = [list(row[1:]) for row in relativeDataList] absTimingSeq = makeSequenceAbsolute(timingSeq, startTime, endTime) absDataList = [tuple([time, ] + row) for time, row in zip(absTimingSeq, valueSeq)] return absDataList
Maps values from 0 to 1 to the provided start and end time Input is a list of tuples of the form ([(time1, pitch1), (time2, pitch2),...]
entailment
def _getSmallestDifference(inputList, targetVal): ''' Returns the value in inputList that is closest to targetVal Iteratively splits the dataset in two, so it should be pretty fast ''' targetList = inputList[:] retVal = None while True: # If we're down to one value, stop iterating if len(targetList) == 1: retVal = targetList[0] break halfPoint = int(len(targetList) / 2.0) - 1 a = targetList[halfPoint] b = targetList[halfPoint + 1] leftDiff = abs(targetVal - a) rightDiff = abs(targetVal - b) # If the distance is 0, stop iterating, the targetVal is present # in the inputList if leftDiff == 0 or rightDiff == 0: retVal = targetVal break # Look at left half or right half if leftDiff < rightDiff: targetList = targetList[:halfPoint + 1] else: targetList = targetList[halfPoint + 1:] return retVal
Returns the value in inputList that is closest to targetVal Iteratively splits the dataset in two, so it should be pretty fast
entailment
def _getNearestMappingIndexList(fromValList, toValList): ''' Finds the indicies for data points that are closest to each other. The inputs should be in relative time, scaled from 0 to 1 e.g. if you have [0, .1, .5., .9] and [0, .1, .2, 1] will output [0, 1, 1, 2] ''' indexList = [] for fromTimestamp in fromValList: smallestDiff = _getSmallestDifference(toValList, fromTimestamp) i = toValList.index(smallestDiff) indexList.append(i) return indexList
Finds the indicies for data points that are closest to each other. The inputs should be in relative time, scaled from 0 to 1 e.g. if you have [0, .1, .5., .9] and [0, .1, .2, 1] will output [0, 1, 1, 2]
entailment
def morphDataLists(fromList, toList, stepList): ''' Iteratively morph fromList into toList using the values 0 to 1 in stepList stepList: a value of 0 means no change and a value of 1 means a complete change to the other value ''' # If there are more than 1 pitch value, then we align the data in # relative time. # Each data point comes with a timestamp. The earliest timestamp is 0 # and the latest timestamp is 1. Using this method, for each relative # timestamp in the source list, we find the closest relative timestamp # in the target list. Just because two pitch values have the same index # in the source and target lists does not mean that they correspond to # the same speech event. fromListRel, fromStartTime, fromEndTime = _makeTimingRelative(fromList) toListRel = _makeTimingRelative(toList)[0] # If fromList has more points, we'll have flat areas # If toList has more points, we'll might miss peaks or valleys fromTimeList = [dataTuple[0] for dataTuple in fromListRel] toTimeList = [dataTuple[0] for dataTuple in toListRel] indexList = _getNearestMappingIndexList(fromTimeList, toTimeList) alignedToPitchRel = [toListRel[i] for i in indexList] for stepAmount in stepList: newPitchList = [] # Perform the interpolation for fromTuple, toTuple in zip(fromListRel, alignedToPitchRel): fromTime, fromValue = fromTuple toTime, toValue = toTuple # i + 1 b/c i_0 = 0 = no change newValue = fromValue + (stepAmount * (toValue - fromValue)) newTime = fromTime + (stepAmount * (toTime - fromTime)) newPitchList.append((newTime, newValue)) newPitchList = _makeTimingAbsolute(newPitchList, fromStartTime, fromEndTime) yield stepAmount, newPitchList
Iteratively morph fromList into toList using the values 0 to 1 in stepList stepList: a value of 0 means no change and a value of 1 means a complete change to the other value
entailment
def morphChunkedDataLists(fromDataList, toDataList, stepList): ''' Morph one set of data into another, in a stepwise fashion A convenience function. Given a set of paired data lists, this will morph each one individually. Returns a single list with all data combined together. ''' assert(len(fromDataList) == len(toDataList)) # Morph the fromDataList into the toDataList outputList = [] for x, y in zip(fromDataList, toDataList): # We cannot morph a region if there is no data or only # a single data point for either side if (len(x) < 2) or (len(y) < 2): continue tmpList = [outputPitchList for _, outputPitchList in morphDataLists(x, y, stepList)] outputList.append(tmpList) # Transpose list finalOutputList = outputList.pop(0) for subList in outputList: for i, subsubList in enumerate(subList): finalOutputList[i].extend(subsubList) return finalOutputList
Morph one set of data into another, in a stepwise fashion A convenience function. Given a set of paired data lists, this will morph each one individually. Returns a single list with all data combined together.
entailment
def morphAveragePitch(fromDataList, toDataList): ''' Adjusts the values in fromPitchList to have the same average as toPitchList Because other manipulations can alter the average pitch, morphing the pitch is the last pitch manipulation that should be done After the morphing, the code removes any values below zero, thus the final average might not match the target average. ''' timeList, fromPitchList = zip(*fromDataList) toPitchList = [pitchVal for _, pitchVal in toDataList] # Zero pitch values aren't meaningful, so filter them out if they are # in the dataset fromListNoZeroes = [val for val in fromPitchList if val > 0] fromAverage = sum(fromListNoZeroes) / float(len(fromListNoZeroes)) toListNoZeroes = [val for val in toPitchList if val > 0] toAverage = sum(toListNoZeroes) / float(len(toListNoZeroes)) newPitchList = [val - fromAverage + toAverage for val in fromPitchList] # finalAverage = sum(newPitchList) / float(len(newPitchList)) # Removing zeroes and negative pitch values retDataList = [(time, pitchVal) for time, pitchVal in zip(timeList, newPitchList) if pitchVal > 0] return retDataList
Adjusts the values in fromPitchList to have the same average as toPitchList Because other manipulations can alter the average pitch, morphing the pitch is the last pitch manipulation that should be done After the morphing, the code removes any values below zero, thus the final average might not match the target average.
entailment
def morphRange(fromDataList, toDataList): ''' Changes the scale of values in one distribution to that of another ie The maximum value in fromDataList will be set to the maximum value in toDataList. The 75% largest value in fromDataList will be set to the 75% largest value in toDataList, etc. Small sample sizes will yield results that are not very meaningful ''' # Isolate and sort pitch values fromPitchList = [dataTuple[1] for dataTuple in fromDataList] toPitchList = [dataTuple[1] for dataTuple in toDataList] fromPitchListSorted = sorted(fromPitchList) toPitchListSorted = sorted(toPitchList) # Bin pitch values between 0 and 1 fromListRel = makeSequenceRelative(fromPitchListSorted)[0] toListRel = makeSequenceRelative(toPitchListSorted)[0] # Find each values closest equivalent in the other list indexList = _getNearestMappingIndexList(fromListRel, toListRel) # Map the source pitch to the target pitch value # Pitch value -> get sorted position -> get corresponding position in # target list -> get corresponding pitch value = the new pitch value retList = [] for time, pitch in fromDataList: fromI = fromPitchListSorted.index(pitch) toI = indexList[fromI] newPitch = toPitchListSorted[toI] retList.append((time, newPitch)) return retList
Changes the scale of values in one distribution to that of another ie The maximum value in fromDataList will be set to the maximum value in toDataList. The 75% largest value in fromDataList will be set to the 75% largest value in toDataList, etc. Small sample sizes will yield results that are not very meaningful
entailment
def quadraticInterpolation(valueList2d, numDegrees, n, startTime=None, endTime=None): ''' Generates a series of points on a smooth curve that cross the given points numDegrees - the degrees of the fitted polynomial - the curve gets weird if this value is too high for the input n - number of points to output startTime/endTime/n - n points will be generated at evenly spaced intervals between startTime and endTime ''' _numpyCheck() x, y = zip(*valueList2d) if startTime is None: startTime = x[0] if endTime is None: endTime = x[-1] polyFunc = np.poly1d(np.polyfit(x, y, numDegrees)) newX = np.linspace(startTime, endTime, n) retList = [(n, polyFunc(n)) for n in newX] return retList
Generates a series of points on a smooth curve that cross the given points numDegrees - the degrees of the fitted polynomial - the curve gets weird if this value is too high for the input n - number of points to output startTime/endTime/n - n points will be generated at evenly spaced intervals between startTime and endTime
entailment
def getIntervals(fn, tierName, filterFunc=None, includeUnlabeledRegions=False): ''' Get information about the 'extract' tier, used by several merge scripts ''' tg = tgio.openTextgrid(fn) tier = tg.tierDict[tierName] if includeUnlabeledRegions is True: tier = tgio._fillInBlanks(tier) entryList = tier.entryList if filterFunc is not None: entryList = [entry for entry in entryList if filterFunc(entry)] return entryList
Get information about the 'extract' tier, used by several merge scripts
entailment
def changeDuration(fromWavFN, durationParameters, stepList, outputName, outputMinPitch, outputMaxPitch, praatEXE): ''' Uses praat to morph duration in one file to duration in another Praat uses the PSOLA algorithm ''' rootPath = os.path.split(fromWavFN)[0] # Prep output directories outputPath = join(rootPath, "duration_resynthesized_wavs") utils.makeDir(outputPath) durationTierPath = join(rootPath, "duration_tiers") utils.makeDir(durationTierPath) fromWavDuration = audio_scripts.getSoundFileDuration(fromWavFN) durationParameters = copy.deepcopy(durationParameters) # Pad any gaps with values of 1 (no change in duration) # No need to stretch out any pauses at the beginning if durationParameters[0][0] != 0: tmpVar = (0, durationParameters[0][0] - PRAAT_TIME_DIFF, 1) durationParameters.insert(0, tmpVar) # Or the end if durationParameters[-1][1] < fromWavDuration: durationParameters.append((durationParameters[-1][1] + PRAAT_TIME_DIFF, fromWavDuration, 1)) # Create the praat script for doing duration manipulation for stepAmount in stepList: durationPointList = [] for start, end, ratio in durationParameters: percentChange = 1 + (ratio - 1) * stepAmount durationPointList.append((start, percentChange)) durationPointList.append((end, percentChange)) outputPrefix = "%s_%0.3g" % (outputName, stepAmount) durationTierFN = join(durationTierPath, "%s.DurationTier" % outputPrefix) outputWavFN = join(outputPath, "%s.wav" % outputPrefix) durationTier = dataio.PointObject2D(durationPointList, dataio.DURATION, 0, fromWavDuration) durationTier.save(durationTierFN) praat_scripts.resynthesizeDuration(praatEXE, fromWavFN, durationTierFN, outputWavFN, outputMinPitch, outputMaxPitch)
Uses praat to morph duration in one file to duration in another Praat uses the PSOLA algorithm
entailment
def getMorphParameters(fromTGFN, toTGFN, tierName, filterFunc=None, useBlanks=False): ''' Get intervals for source and target audio files Use this information to find out how much to stretch/shrink each source interval. The target values are based on the contents of toTGFN. ''' if filterFunc is None: filterFunc = lambda entry: True # Everything is accepted fromEntryList = utils.getIntervals(fromTGFN, tierName, includeUnlabeledRegions=useBlanks) toEntryList = utils.getIntervals(toTGFN, tierName, includeUnlabeledRegions=useBlanks) fromEntryList = [entry for entry in fromEntryList if filterFunc(entry)] toEntryList = [entry for entry in toEntryList if filterFunc(entry)] assert(len(fromEntryList) == len(toEntryList)) durationParameters = [] for fromEntry, toEntry in zip(fromEntryList, toEntryList): fromStart, fromEnd = fromEntry[:2] toStart, toEnd = toEntry[:2] # Praat will ignore a second value appearing at the same time as # another so we give each start a tiny offset to distinguish intervals # that start and end at the same point toStart += PRAAT_TIME_DIFF fromStart += PRAAT_TIME_DIFF ratio = (toEnd - toStart) / float((fromEnd - fromStart)) durationParameters.append((fromStart, fromEnd, ratio)) return durationParameters
Get intervals for source and target audio files Use this information to find out how much to stretch/shrink each source interval. The target values are based on the contents of toTGFN.
entailment
def getManipulatedParamaters(tgFN, tierName, modFunc, filterFunc=None, useBlanks=False): ''' Get intervals for source and target audio files Use this information to find out how much to stretch/shrink each source interval. The target values are based on modfunc. ''' fromExtractInfo = utils.getIntervals(tgFN, tierName, filterFunc, useBlanks) durationParameters = [] for fromInfoTuple in fromExtractInfo: fromStart, fromEnd = fromInfoTuple[:2] toStart, toEnd = modFunc(fromStart), modFunc(fromEnd) # Praat will ignore a second value appearing at the same time as # another so we give each start a tiny offset to distinguish intervals # that start and end at the same point toStart += PRAAT_TIME_DIFF fromStart += PRAAT_TIME_DIFF ratio = (toEnd - toStart) / float((fromEnd - fromStart)) ratioTuple = (fromStart, fromEnd, ratio) durationParameters.append(ratioTuple) return durationParameters
Get intervals for source and target audio files Use this information to find out how much to stretch/shrink each source interval. The target values are based on modfunc.
entailment
def textgridMorphDuration(fromTGFN, toTGFN): ''' A convenience function. Morphs interval durations of one tg to another. This assumes the two textgrids have the same number of segments. ''' fromTG = tgio.openTextgrid(fromTGFN) toTG = tgio.openTextgrid(toTGFN) adjustedTG = tgio.Textgrid() for tierName in fromTG.tierNameList: fromTier = fromTG.tierDict[tierName] toTier = toTG.tierDict[tierName] adjustedTier = fromTier.morph(toTier) adjustedTG.addTier(adjustedTier) return adjustedTG
A convenience function. Morphs interval durations of one tg to another. This assumes the two textgrids have the same number of segments.
entailment
def getSoundFileDuration(fn): ''' Returns the duration of a wav file (in seconds) ''' audiofile = wave.open(fn, "r") params = audiofile.getparams() framerate = params[2] nframes = params[3] duration = float(nframes) / framerate return duration
Returns the duration of a wav file (in seconds)
entailment
def split_text(text, include_part_of_speech=False, strip_english=False, strip_numbers=False): u""" Split Chinese text at word boundaries. include_pos: also returns the Part Of Speech for each of the words. Some of the different parts of speech are: r: pronoun v: verb ns: proper noun etc... This all gets returned as a tuple: index 0: the split word index 1: the word's part of speech strip_english: remove all entries that have English or numbers in them (useful sometimes) """ if not include_part_of_speech: seg_list = pseg.cut(text) if strip_english: seg_list = filter(lambda x: not contains_english(x), seg_list) if strip_numbers: seg_list = filter(lambda x: not _is_number(x), seg_list) return list(map(lambda i: i.word, seg_list)) else: seg_list = pseg.cut(text) objs = map(lambda w: (w.word, w.flag), seg_list) if strip_english: objs = filter(lambda x: not contains_english(x[0]), objs) if strip_english: objs = filter(lambda x: not _is_number(x[0]), objs) return objs # if was_traditional: # seg_list = map(tradify, seg_list) return list(seg_list)
u""" Split Chinese text at word boundaries. include_pos: also returns the Part Of Speech for each of the words. Some of the different parts of speech are: r: pronoun v: verb ns: proper noun etc... This all gets returned as a tuple: index 0: the split word index 1: the word's part of speech strip_english: remove all entries that have English or numbers in them (useful sometimes)
entailment
def is_special_atom(cron_atom, span): """ Returns a boolean indicating whether or not the string can be parsed by parse_atom to produce a static set. In the process of examining the string, the syntax of any special character uses is also checked. """ for special_char in ('%', '#', 'L', 'W'): if special_char not in cron_atom: continue if special_char == '#': if span != DAYS_OF_WEEK: raise ValueError("\"#\" invalid where used.") elif not VALIDATE_POUND.match(cron_atom): raise ValueError("\"#\" syntax incorrect.") elif special_char == "W": if span != DAYS_OF_MONTH: raise ValueError("\"W\" syntax incorrect.") elif not(VALIDATE_W.match(cron_atom) and int(cron_atom[:-1]) > 0): raise ValueError("Invalid use of \"W\".") elif special_char == "L": if span not in L_FIELDS: raise ValueError("\"L\" invalid where used.") elif span == DAYS_OF_MONTH: if cron_atom != "L": raise ValueError("\"L\" must be alone in days of month.") elif span == DAYS_OF_WEEK: if not VALIDATE_L_IN_DOW.match(cron_atom): raise ValueError("\"L\" syntax incorrect.") elif special_char == "%": if not(cron_atom[1:].isdigit() and int(cron_atom[1:]) > 1): raise ValueError("\"%\" syntax incorrect.") return True else: return False
Returns a boolean indicating whether or not the string can be parsed by parse_atom to produce a static set. In the process of examining the string, the syntax of any special character uses is also checked.
entailment
def parse_atom(parse, minmax): """ Returns a set containing valid values for a given cron-style range of numbers. The 'minmax' arguments is a two element iterable containing the inclusive upper and lower limits of the expression. Examples: >>> parse_atom("1-5",(0,6)) set([1, 2, 3, 4, 5]) >>> parse_atom("*/6",(0,23)) set([0, 6, 12, 18]) >>> parse_atom("18-6/4",(0,23)) set([18, 22, 0, 4]) >>> parse_atom("*/9",(0,23)) set([0, 9, 18]) """ parse = parse.strip() increment = 1 if parse == '*': return set(xrange(minmax[0], minmax[1] + 1)) elif parse.isdigit(): # A single number still needs to be returned as a set value = int(parse) if value >= minmax[0] and value <= minmax[1]: return set((value,)) else: raise ValueError("\"%s\" is not within valid range." % parse) elif '-' in parse or '/' in parse: divide = parse.split('/') subrange = divide[0] if len(divide) == 2: # Example: 1-3/5 or */7 increment should be 5 and 7 respectively increment = int(divide[1]) if '-' in subrange: # Example: a-b prefix, suffix = [int(n) for n in subrange.split('-')] if prefix < minmax[0] or suffix > minmax[1]: raise ValueError("\"%s\" is not within valid range." % parse) elif subrange.isdigit(): # Handle offset increments e.g. 5/15 to run at :05, :20, :35, and :50 return set(xrange(int(subrange), minmax[1] + 1, increment)) elif subrange == '*': # Include all values with the given range prefix, suffix = minmax else: raise ValueError("Unrecognized symbol \"%s\"" % subrange) if prefix < suffix: # Example: 7-10 return set(xrange(prefix, suffix + 1, increment)) else: # Example: 12-4/2; (12, 12 + n, ..., 12 + m*n) U (n_0, ..., 4) noskips = list(xrange(prefix, minmax[1] + 1)) noskips += list(xrange(minmax[0], suffix + 1)) return set(noskips[::increment]) else: raise ValueError("Atom \"%s\" not in a recognized format." % parse)
Returns a set containing valid values for a given cron-style range of numbers. The 'minmax' arguments is a two element iterable containing the inclusive upper and lower limits of the expression. Examples: >>> parse_atom("1-5",(0,6)) set([1, 2, 3, 4, 5]) >>> parse_atom("*/6",(0,23)) set([0, 6, 12, 18]) >>> parse_atom("18-6/4",(0,23)) set([18, 22, 0, 4]) >>> parse_atom("*/9",(0,23)) set([0, 9, 18])
entailment
def compute_numtab(self): """ Recomputes the sets for the static ranges of the trigger time. This method should only be called by the user if the string_tab member is modified. """ self.numerical_tab = [] for field_str, span in zip(self.string_tab, FIELD_RANGES): split_field_str = field_str.split(',') if len(split_field_str) > 1 and "*" in split_field_str: raise ValueError("\"*\" must be alone in a field.") unified = set() for cron_atom in split_field_str: # parse_atom only handles static cases if not(is_special_atom(cron_atom, span)): unified.update(parse_atom(cron_atom, span)) self.numerical_tab.append(unified) if self.string_tab[2] == "*" and self.string_tab[4] != "*": self.numerical_tab[2] = set() elif self.string_tab[4] == "*" and self.string_tab[2] != "*": self.numerical_tab[4] = set()
Recomputes the sets for the static ranges of the trigger time. This method should only be called by the user if the string_tab member is modified.
entailment
def check_trigger(self, date_tuple, utc_offset=0): """ Returns boolean indicating if the trigger is active at the given time. The date tuple should be in the local time. Unless periodicities are used, utc_offset does not need to be specified. If periodicities are used, specifically in the hour and minutes fields, it is crucial that the utc_offset is specified. """ year, month, day, hour, mins = date_tuple given_date = datetime.date(year, month, day) zeroday = datetime.date(*self.epoch[:3]) last_dom = calendar.monthrange(year, month)[-1] dom_matched = True # In calendar and datetime.date.weekday, Monday = 0 given_dow = (datetime.date.weekday(given_date) + 1) % 7 first_dow = (given_dow + 1 - day) % 7 # Figure out how much time has passed from the epoch to the given date utc_diff = utc_offset - self.epoch[5] mod_delta_yrs = year - self.epoch[0] mod_delta_mon = month - self.epoch[1] + mod_delta_yrs * 12 mod_delta_day = (given_date - zeroday).days mod_delta_hrs = hour - self.epoch[3] + mod_delta_day * 24 + utc_diff mod_delta_min = mins - self.epoch[4] + mod_delta_hrs * 60 # Makes iterating through like components easier. quintuple = zip( (mins, hour, day, month, given_dow), self.numerical_tab, self.string_tab, (mod_delta_min, mod_delta_hrs, mod_delta_day, mod_delta_mon, mod_delta_day), FIELD_RANGES) for value, valid_values, field_str, delta_t, field_type in quintuple: # All valid, static values for the fields are stored in sets if value in valid_values: continue # The following for loop implements the logic for context # sensitive and epoch sensitive constraints. break statements, # which are executed when a match is found, lead to a continue # in the outer loop. If there are no matches found, the given date # does not match expression constraints, so the function returns # False as seen at the end of this for...else... construct. for cron_atom in field_str.split(','): if cron_atom[0] == '%': if not(delta_t % int(cron_atom[1:])): break elif '#' in cron_atom: D, N = int(cron_atom[0]), int(cron_atom[2]) # Computes Nth occurence of D day of the week if (((D - first_dow) % 7) + 1 + 7 * (N - 1)) == day: break elif cron_atom[-1] == 'W': target = min(int(cron_atom[:-1]), last_dom) lands_on = (first_dow + target - 1) % 7 if lands_on == 0: # Shift from Sun. to Mon. unless Mon. is next month if target < last_dom: target += 1 else: target -= 2 elif lands_on == 6: # Shift from Sat. to Fri. unless Fri. in prior month if target > 1: target -= 1 else: target += 2 # Break if the day is correct, and target is a weekday if target == day and (first_dow + target) % 7 > 1: break elif cron_atom[-1] == 'L': # In dom field, L means the last day of the month target = last_dom if field_type == DAYS_OF_WEEK: # Calculates the last occurence of given day of week desired_dow = int(cron_atom[:-1]) target = (((desired_dow - first_dow) % 7) + 29) if target > last_dom: target -= 7 if target == day: break else: # See 2010.11.15 of CHANGELOG if field_type == DAYS_OF_MONTH and self.string_tab[4] != '*': dom_matched = False continue elif field_type == DAYS_OF_WEEK and self.string_tab[2] != '*': # If we got here, then days of months validated so it does # not matter that days of the week failed. return dom_matched # None of the expressions matched which means this field fails return False # Arriving at this point means the date landed within the constraints # of all fields; the associated trigger should be fired. return True
Returns boolean indicating if the trigger is active at the given time. The date tuple should be in the local time. Unless periodicities are used, utc_offset does not need to be specified. If periodicities are used, specifically in the hour and minutes fields, it is crucial that the utc_offset is specified.
entailment
def show(self): """Show the structure of self.rules_list, only for debug.""" for rule in self.rules_list: result = ", ".join([str(check) for check, deny in rule]) print(result)
Show the structure of self.rules_list, only for debug.
entailment
def run(self): """Run self.rules_list. Return True if one rule channel has been passed. Otherwise return False and the deny() method of the last failed rule. """ failed_result = None for rule in self.rules_list: for check, deny in rule: if not check(): failed_result = (False, deny) break else: return (True, None) return failed_result
Run self.rules_list. Return True if one rule channel has been passed. Otherwise return False and the deny() method of the last failed rule.
entailment
def set_fraction(self, value): """Set the meter indicator. Value should be between 0 and 1.""" if value < 0: value *= -1 value = min(value, 1) if self.horizontal: width = int(self.width * value) height = self.height else: width = self.width height = int(self.height * value) self.canvas.coords(self.meter, self.xpos, self.ypos, self.xpos + width, self.ypos + height)
Set the meter indicator. Value should be between 0 and 1.
entailment
def update_status(self): """Update status informations in tkinter window.""" try: # all this may fail if the connection to the fritzbox is down self.update_connection_status() self.max_stream_rate.set(self.get_stream_rate_str()) self.ip.set(self.status.external_ip) self.uptime.set(self.status.str_uptime) upstream, downstream = self.status.transmission_rate except IOError: # here we inform the user about being unable to # update the status informations pass else: # max_downstream and max_upstream may be zero if the # fritzbox is configured as ip-client. if self.max_downstream > 0: self.in_meter.set_fraction( 1.0 * downstream / self.max_downstream) if self.max_upstream > 0: self.out_meter.set_fraction(1.0 * upstream / self.max_upstream) self.update_traffic_info() self.after(1000, self.update_status)
Update status informations in tkinter window.
entailment
def format_num(num, unit='bytes'): """ Returns a human readable string of a byte-value. If 'num' is bits, set unit='bits'. """ if unit == 'bytes': extension = 'B' else: # if it's not bytes, it's bits extension = 'Bit' for dimension in (unit, 'K', 'M', 'G', 'T'): if num < 1024: if dimension == unit: return '%3.1f %s' % (num, dimension) return '%3.1f %s%s' % (num, dimension, extension) num /= 1024 return '%3.1f P%s' % (num, extension)
Returns a human readable string of a byte-value. If 'num' is bits, set unit='bits'.
entailment
def parse_headers(content_disposition, location=None, relaxed=False): """Build a ContentDisposition from header values. """ LOGGER.debug( 'Content-Disposition %r, Location %r', content_disposition, location) if content_disposition is None: return ContentDisposition(location=location) # Both alternatives seem valid. if False: # Require content_disposition to be ascii bytes (0-127), # or characters in the ascii range content_disposition = ensure_charset(content_disposition, 'ascii') else: # We allow non-ascii here (it will only be parsed inside of # qdtext, and rejected by the grammar if it appears in # other places), although parsing it can be ambiguous. # Parsing it ensures that a non-ambiguous filename* value # won't get dismissed because of an unrelated ambiguity # in the filename parameter. But it does mean we occasionally # give less-than-certain values for some legacy senders. content_disposition = ensure_charset(content_disposition, 'iso-8859-1') # Check the caller already did LWS-folding (normally done # when separating header names and values; RFC 2616 section 2.2 # says it should be done before interpretation at any rate). # Hopefully space still means what it should in iso-8859-1. # This check is a bit stronger that LWS folding, it will # remove CR and LF even if they aren't part of a CRLF. # However http doesn't allow isolated CR and LF in headers outside # of LWS. if relaxed: # Relaxed has two effects (so far): # the grammar allows a final ';' in the header; # we do LWS-folding, and possibly normalise other broken # whitespace, instead of rejecting non-lws-safe text. # XXX Would prefer to accept only the quoted whitespace # case, rather than normalising everything. content_disposition = normalize_ws(content_disposition) parser = content_disposition_value_relaxed else: # Turns out this is occasionally broken: two spaces inside # a quoted_string's qdtext. Firefox and Chrome save the two spaces. if not is_lws_safe(content_disposition): raise ValueError( content_disposition, 'Contains nonstandard whitespace') parser = content_disposition_value try: parsed = parser.parse(content_disposition) except FullFirstMatchException: return ContentDisposition(location=location) return ContentDisposition( disposition=parsed[0], assocs=parsed[1:], location=location)
Build a ContentDisposition from header values.
entailment
def parse_requests_response(response, **kwargs): """Build a ContentDisposition from a requests (PyPI) response. """ return parse_headers( response.headers.get('content-disposition'), response.url, **kwargs)
Build a ContentDisposition from a requests (PyPI) response.
entailment
def build_header( filename, disposition='attachment', filename_compat=None ): """Generate a Content-Disposition header for a given filename. For legacy clients that don't understand the filename* parameter, a filename_compat value may be given. It should either be ascii-only (recommended) or iso-8859-1 only. In the later case it should be a character string (unicode in Python 2). Options for generating filename_compat (only useful for legacy clients): - ignore (will only send filename*); - strip accents using unicode's decomposing normalisations, which can be done from unicode data (stdlib), and keep only ascii; - use the ascii transliteration tables from Unidecode (PyPI); - use iso-8859-1 Ignore is the safest, and can be used to trigger a fallback to the document location (which can be percent-encoded utf-8 if you control the URLs). See https://tools.ietf.org/html/rfc6266#appendix-D """ # While this method exists, it could also sanitize the filename # by rejecting slashes or other weirdness that might upset a receiver. if disposition != 'attachment': assert is_token(disposition) rv = disposition if is_token(filename): rv += '; filename=%s' % (filename, ) return rv elif is_ascii(filename) and is_lws_safe(filename): qd_filename = qd_quote(filename) rv += '; filename="%s"' % (qd_filename, ) if qd_filename == filename: # RFC 6266 claims some implementations are iffy on qdtext's # backslash-escaping, we'll include filename* in that case. return rv elif filename_compat: if is_token(filename_compat): rv += '; filename=%s' % (filename_compat, ) else: assert is_lws_safe(filename_compat) rv += '; filename="%s"' % (qd_quote(filename_compat), ) # alnum are already considered always-safe, but the rest isn't. # Python encodes ~ when it shouldn't, for example. rv += "; filename*=utf-8''%s" % (percent_encode( filename, safe=attr_chars_nonalnum, encoding='utf-8'), ) # This will only encode filename_compat, if it used non-ascii iso-8859-1. return rv.encode('iso-8859-1')
Generate a Content-Disposition header for a given filename. For legacy clients that don't understand the filename* parameter, a filename_compat value may be given. It should either be ascii-only (recommended) or iso-8859-1 only. In the later case it should be a character string (unicode in Python 2). Options for generating filename_compat (only useful for legacy clients): - ignore (will only send filename*); - strip accents using unicode's decomposing normalisations, which can be done from unicode data (stdlib), and keep only ascii; - use the ascii transliteration tables from Unidecode (PyPI); - use iso-8859-1 Ignore is the safest, and can be used to trigger a fallback to the document location (which can be percent-encoded utf-8 if you control the URLs). See https://tools.ietf.org/html/rfc6266#appendix-D
entailment
def filename_unsafe(self): """The filename from the Content-Disposition header. If a location was passed at instanciation, the basename from that may be used as a fallback. Otherwise, this may be the None value. On safety: This property records the intent of the sender. You shouldn't use this sender-controlled value as a filesystem path, it can be insecure. Serving files with this filename can be dangerous as well, due to a certain browser using the part after the dot for mime-sniffing. Saving it to a database is fine by itself though. """ if 'filename*' in self.assocs: return self.assocs['filename*'].string elif 'filename' in self.assocs: # XXX Reject non-ascii (parsed via qdtext) here? return self.assocs['filename'] elif self.location is not None: return posixpath.basename(self.location_path.rstrip('/'))
The filename from the Content-Disposition header. If a location was passed at instanciation, the basename from that may be used as a fallback. Otherwise, this may be the None value. On safety: This property records the intent of the sender. You shouldn't use this sender-controlled value as a filesystem path, it can be insecure. Serving files with this filename can be dangerous as well, due to a certain browser using the part after the dot for mime-sniffing. Saving it to a database is fine by itself though.
entailment
def filename_sanitized(self, extension, default_filename='file'): """Returns a filename that is safer to use on the filesystem. The filename will not contain a slash (nor the path separator for the current platform, if different), it will not start with a dot, and it will have the expected extension. No guarantees that makes it "safe enough". No effort is made to remove special characters; using this value blindly might overwrite existing files, etc. """ assert extension assert extension[0] != '.' assert default_filename assert '.' not in default_filename extension = '.' + extension fname = self.filename_unsafe if fname is None: fname = default_filename fname = posixpath.basename(fname) fname = os.path.basename(fname) fname = fname.lstrip('.') if not fname: fname = default_filename if not fname.endswith(extension): fname = fname + extension return fname
Returns a filename that is safer to use on the filesystem. The filename will not contain a slash (nor the path separator for the current platform, if different), it will not start with a dot, and it will have the expected extension. No guarantees that makes it "safe enough". No effort is made to remove special characters; using this value blindly might overwrite existing files, etc.
entailment
def str_uptime(self): """uptime in human readable format.""" mins, secs = divmod(self.uptime, 60) hours, mins = divmod(mins, 60) return '%02d:%02d:%02d' % (hours, mins, secs)
uptime in human readable format.
entailment
def transmission_rate(self): """ Returns the upstream, downstream values as a tuple in bytes per second. Use this for periodical calling. """ sent = self.bytes_sent received = self.bytes_received traffic_call = time.time() time_delta = traffic_call - self.last_traffic_call upstream = int(1.0 * (sent - self.last_bytes_sent)/time_delta) downstream = int(1.0 * (received - self.last_bytes_received)/time_delta) self.last_bytes_sent = sent self.last_bytes_received = received self.last_traffic_call = traffic_call return upstream, downstream
Returns the upstream, downstream values as a tuple in bytes per second. Use this for periodical calling.
entailment
def str_transmission_rate(self): """Returns a tuple of human readable transmission rates in bytes.""" upstream, downstream = self.transmission_rate return ( fritztools.format_num(upstream), fritztools.format_num(downstream) )
Returns a tuple of human readable transmission rates in bytes.
entailment
def max_bit_rate(self): """ Returns a tuple with the maximun upstream- and downstream-rate of the given connection. The rate is given in bits/sec. """ status = self.fc.call_action('WANCommonInterfaceConfig', 'GetCommonLinkProperties') downstream = status['NewLayer1DownstreamMaxBitRate'] upstream = status['NewLayer1UpstreamMaxBitRate'] return upstream, downstream
Returns a tuple with the maximun upstream- and downstream-rate of the given connection. The rate is given in bits/sec.
entailment
def str_max_bit_rate(self): """ Returns a human readable maximun upstream- and downstream-rate of the given connection. The rate is given in bits/sec. """ upstream, downstream = self.max_bit_rate return ( fritztools.format_rate(upstream, unit='bits'), fritztools.format_rate(downstream, unit ='bits') )
Returns a human readable maximun upstream- and downstream-rate of the given connection. The rate is given in bits/sec.
entailment
def _body_builder(self, kwargs): """ Helper method to construct the appropriate SOAP-body to call a FritzBox-Service. """ p = { 'action_name': self.name, 'service_type': self.service_type, 'arguments': '', } if kwargs: arguments = [ self.argument_template % {'name': k, 'value': v} for k, v in kwargs.items() ] p['arguments'] = ''.join(arguments) body = self.body_template.strip() % p return body
Helper method to construct the appropriate SOAP-body to call a FritzBox-Service.
entailment
def execute(self, **kwargs): """ Calls the FritzBox action and returns a dictionary with the arguments. """ headers = self.header.copy() headers['soapaction'] = '%s#%s' % (self.service_type, self.name) data = self.envelope.strip() % self._body_builder(kwargs) url = 'http://%s:%s%s' % (self.address, self.port, self.control_url) auth = None if self.password: auth=HTTPDigestAuth(self.user, self.password) response = requests.post(url, data=data, headers=headers, auth=auth) # lxml needs bytes, therefore response.content (not response.text) result = self.parse_response(response.content) return result
Calls the FritzBox action and returns a dictionary with the arguments.
entailment
def parse_response(self, response): """ Evaluates the action-call response from a FritzBox. The response is a xml byte-string. Returns a dictionary with the received arguments-value pairs. The values are converted according to the given data_types. TODO: boolean and signed integers data-types from tr64 responses """ result = {} root = etree.fromstring(response) for argument in self.arguments.values(): try: value = root.find('.//%s' % argument.name).text except AttributeError: # will happen by searching for in-parameters and by # parsing responses with status_code != 200 continue if argument.data_type.startswith('ui'): try: value = int(value) except ValueError: # should not happen value = None result[argument.name] = value return result
Evaluates the action-call response from a FritzBox. The response is a xml byte-string. Returns a dictionary with the received arguments-value pairs. The values are converted according to the given data_types. TODO: boolean and signed integers data-types from tr64 responses
entailment
def get_modelname(self): """Returns the FritzBox model name.""" xpath = '%s/%s' % (self.nodename('device'), self.nodename('modelName')) return self.root.find(xpath).text
Returns the FritzBox model name.
entailment
def get_services(self): """Returns a list of FritzService-objects.""" result = [] nodes = self.root.iterfind( './/ns:service', namespaces={'ns': self.namespace}) for node in nodes: result.append(FritzService( node.find(self.nodename('serviceType')).text, node.find(self.nodename('controlURL')).text, node.find(self.nodename('SCPDURL')).text)) return result
Returns a list of FritzService-objects.
entailment
def _read_state_variables(self): """ Reads the stateVariable information from the xml-file. The information we like to extract are name and dataType so we can assign them later on to FritzActionArgument-instances. Returns a dictionary: key:value = name:dataType """ nodes = self.root.iterfind( './/ns:stateVariable', namespaces={'ns': self.namespace}) for node in nodes: key = node.find(self.nodename('name')).text value = node.find(self.nodename('dataType')).text self.state_variables[key] = value
Reads the stateVariable information from the xml-file. The information we like to extract are name and dataType so we can assign them later on to FritzActionArgument-instances. Returns a dictionary: key:value = name:dataType
entailment
def get_actions(self): """Returns a list of FritzAction instances.""" self._read_state_variables() actions = [] nodes = self.root.iterfind( './/ns:action', namespaces={'ns': self.namespace}) for node in nodes: action = FritzAction(self.service.service_type, self.service.control_url) action.name = node.find(self.nodename('name')).text action.arguments = self._get_arguments(node) actions.append(action) return actions
Returns a list of FritzAction instances.
entailment
def _get_arguments(self, action_node): """ Returns a dictionary of arguments for the given action_node. """ arguments = {} argument_nodes = action_node.iterfind( r'./ns:argumentList/ns:argument', namespaces={'ns': self.namespace}) for argument_node in argument_nodes: argument = self._get_argument(argument_node) arguments[argument.name] = argument return arguments
Returns a dictionary of arguments for the given action_node.
entailment
def _get_argument(self, argument_node): """ Returns a FritzActionArgument instance for the given argument_node. """ argument = FritzActionArgument() argument.name = argument_node.find(self.nodename('name')).text argument.direction = argument_node.find(self.nodename('direction')).text rsv = argument_node.find(self.nodename('relatedStateVariable')).text # TODO: track malformed xml-nodes (i.e. misspelled) argument.data_type = self.state_variables.get(rsv, None) return argument
Returns a FritzActionArgument instance for the given argument_node.
entailment
def _read_descriptions(self, password): """ Read and evaluate the igddesc.xml file and the tr64desc.xml file if a password is given. """ descfiles = [FRITZ_IGD_DESC_FILE] if password: descfiles.append(FRITZ_TR64_DESC_FILE) for descfile in descfiles: parser = FritzDescParser(self.address, self.port, descfile) if not self.modelname: self.modelname = parser.get_modelname() services = parser.get_services() self._read_services(services)
Read and evaluate the igddesc.xml file and the tr64desc.xml file if a password is given.
entailment
def _read_services(self, services): """Get actions from services.""" for service in services: parser = FritzSCDPParser(self.address, self.port, service) actions = parser.get_actions() service.actions = {action.name: action for action in actions} self.services[service.name] = service
Get actions from services.
entailment
def actionnames(self): """ Returns a alphabetical sorted list of tuples with all known service- and action-names. """ actions = [] for service_name in sorted(self.services.keys()): action_names = self.services[service_name].actions.keys() for action_name in sorted(action_names): actions.append((service_name, action_name)) return actions
Returns a alphabetical sorted list of tuples with all known service- and action-names.
entailment
def get_action_arguments(self, service_name, action_name): """ Returns a list of tuples with all known arguments for the given service- and action-name combination. The tuples contain the argument-name, direction and data_type. """ return self.services[service_name].actions[action_name].info
Returns a list of tuples with all known arguments for the given service- and action-name combination. The tuples contain the argument-name, direction and data_type.
entailment
def call_action(self, service_name, action_name, **kwargs): """Executes the given action. Raise a KeyError on unkown actions.""" action = self.services[service_name].actions[action_name] return action.execute(**kwargs)
Executes the given action. Raise a KeyError on unkown actions.
entailment
def get_hosts_info(self): """ Returns a list of dicts with information about the known hosts. The dict-keys are: 'ip', 'name', 'mac', 'status' """ result = [] index = 0 while index < self.host_numbers: host = self.get_generic_host_entry(index) result.append({ 'ip': host['NewIPAddress'], 'name': host['NewHostName'], 'mac': host['NewMACAddress'], 'status': host['NewActive']}) index += 1 return result
Returns a list of dicts with information about the known hosts. The dict-keys are: 'ip', 'name', 'mac', 'status'
entailment
def find_executable(executable): ''' Finds executable in PATH Returns: string or None ''' logger = logging.getLogger(__name__) logger.debug("Checking executable '%s'...", executable) executable_path = _find_executable(executable) found = executable_path is not None if found: logger.debug("Executable '%s' found: '%s'", executable, executable_path) else: logger.debug("Executable '%s' not found", executable) return executable_path
Finds executable in PATH Returns: string or None
entailment
def check_network_connection(server, port): ''' Checks if jasper can connect a network server. Arguments: server -- (optional) the server to connect with (Default: "www.google.com") Returns: True or False ''' logger = logging.getLogger(__name__) logger.debug("Checking network connection to server '%s'...", server) try: # see if we can resolve the host name -- tells us if there is # a DNS listening host = socket.gethostbyname(server) # connect to the host -- tells us if the host is actually # reachable sock = socket.create_connection((host, port), 2) sock.close() except Exception: # pragma: no cover logger.debug("Network connection not working") return False logger.debug("Network connection working") return True
Checks if jasper can connect a network server. Arguments: server -- (optional) the server to connect with (Default: "www.google.com") Returns: True or False
entailment
def check_python_import(package_or_module): ''' Checks if a python package or module is importable. Arguments: package_or_module -- the package or module name to check Returns: True or False ''' logger = logging.getLogger(__name__) logger.debug("Checking python import '%s'...", package_or_module) loader = pkgutil.get_loader(package_or_module) found = loader is not None if found: logger.debug("Python %s '%s' found", "package" if loader.is_package(package_or_module) else "module", package_or_module) else: # pragma: no cover logger.debug("Python import '%s' not found", package_or_module) return found
Checks if a python package or module is importable. Arguments: package_or_module -- the package or module name to check Returns: True or False
entailment
def inject(self): """ Recursively inject aXe into all iframes and the top level document. :param script_url: location of the axe-core script. :type script_url: string """ with open(self.script_url, "r", encoding="utf8") as f: self.selenium.execute_script(f.read())
Recursively inject aXe into all iframes and the top level document. :param script_url: location of the axe-core script. :type script_url: string
entailment
def run(self, context=None, options=None): """ Run axe against the current page. :param context: which page part(s) to analyze and/or what to exclude. :param options: dictionary of aXe options. """ template = ( "var callback = arguments[arguments.length - 1];" + "axe.run(%s).then(results => callback(results))" ) args = "" # If context parameter is passed, add to args if context is not None: args += "%r" % context # Add comma delimiter only if both parameters are passed if context is not None and options is not None: args += "," # If options parameter is passed, add to args if options is not None: args += "%s" % options command = template % args response = self.selenium.execute_async_script(command) return response
Run axe against the current page. :param context: which page part(s) to analyze and/or what to exclude. :param options: dictionary of aXe options.
entailment
def report(self, violations): """ Return readable report of accessibility violations found. :param violations: Dictionary of violations. :type violations: dict :return report: Readable report of violations. :rtype: string """ string = "" string += "Found " + str(len(violations)) + " accessibility violations:" for violation in violations: string += ( "\n\n\nRule Violated:\n" + violation["id"] + " - " + violation["description"] + "\n\tURL: " + violation["helpUrl"] + "\n\tImpact Level: " + violation["impact"] + "\n\tTags:" ) for tag in violation["tags"]: string += " " + tag string += "\n\tElements Affected:" i = 1 for node in violation["nodes"]: for target in node["target"]: string += "\n\t" + str(i) + ") Target: " + target i += 1 for item in node["all"]: string += "\n\t\t" + item["message"] for item in node["any"]: string += "\n\t\t" + item["message"] for item in node["none"]: string += "\n\t\t" + item["message"] string += "\n\n\n" return string
Return readable report of accessibility violations found. :param violations: Dictionary of violations. :type violations: dict :return report: Readable report of violations. :rtype: string
entailment