language
stringclasses
6 values
original_string
stringlengths
25
887k
text
stringlengths
25
887k
Python
def connect(self): """ This method connects to RabbitMQ, returning the connection handle. When the connection is established, the on_connection_open method will be invoked by pika. :rtype: pika.SelectConnection """ self.LOGGER.info("Connecting to RabbitMQ") creds = pika.PlainCredentials(self._user, self._password) params = pika.ConnectionParameters( host=self._host, port=self._port, virtual_host=self._virtual_host, credentials=creds, ) return pika.SelectConnection( parameters=params, on_open_callback=self.on_connection_open )
def connect(self): """ This method connects to RabbitMQ, returning the connection handle. When the connection is established, the on_connection_open method will be invoked by pika. :rtype: pika.SelectConnection """ self.LOGGER.info("Connecting to RabbitMQ") creds = pika.PlainCredentials(self._user, self._password) params = pika.ConnectionParameters( host=self._host, port=self._port, virtual_host=self._virtual_host, credentials=creds, ) return pika.SelectConnection( parameters=params, on_open_callback=self.on_connection_open )
Python
def on_connection_open(self, unused_connection): """This method is called by pika once the connection to RabbitMQ has been established. It passes the handle to the connection object in case we need it, but in this case, we'll just mark it unused. :type unused_connection: pika.SelectConnection """ self.LOGGER.debug("Connection opened") self.add_on_connection_close_callback() self.open_channel()
def on_connection_open(self, unused_connection): """This method is called by pika once the connection to RabbitMQ has been established. It passes the handle to the connection object in case we need it, but in this case, we'll just mark it unused. :type unused_connection: pika.SelectConnection """ self.LOGGER.debug("Connection opened") self.add_on_connection_close_callback() self.open_channel()
Python
def add_on_connection_close_callback(self): """This method adds an on close callback. This will be invoked by pika when RabbitMQ closes the connection to the publisher unexpectedly. """ self.LOGGER.debug("Adding connection close callback") self._connection.add_on_close_callback(self.on_connection_closed)
def add_on_connection_close_callback(self): """This method adds an on close callback. This will be invoked by pika when RabbitMQ closes the connection to the publisher unexpectedly. """ self.LOGGER.debug("Adding connection close callback") self._connection.add_on_close_callback(self.on_connection_closed)
Python
def on_connection_closed(self, channel, exception_reason): """This method is invoked by pika when the connection to RabbitMQ is closed unexpectedly. Since it is unexpected, we will reconnect to RabbitMQ if it disconnects. :param pika.channel that was closed :param str exception_reason: The server provided exception reason if given """ self.LOGGER.info( f"Connection {channel} closed. Exception reason given is: {exception_reason}" ) self._channel = None if self._closing: self._connection.ioloop.stop() else: self.LOGGER.warning( f"Connection closed, reopening in 5 seconds: Exception reason given is: {exception_reason}" ) self._connection.ioloop.call_later(5, self.reconnect)
def on_connection_closed(self, channel, exception_reason): """This method is invoked by pika when the connection to RabbitMQ is closed unexpectedly. Since it is unexpected, we will reconnect to RabbitMQ if it disconnects. :param pika.channel that was closed :param str exception_reason: The server provided exception reason if given """ self.LOGGER.info( f"Connection {channel} closed. Exception reason given is: {exception_reason}" ) self._channel = None if self._closing: self._connection.ioloop.stop() else: self.LOGGER.warning( f"Connection closed, reopening in 5 seconds: Exception reason given is: {exception_reason}" ) self._connection.ioloop.call_later(5, self.reconnect)
Python
def open_channel(self): """Open a new channel with RabbitMQ by issuing the Channel.Open RPC command. When RabbitMQ responds that the channel is open, the on_channel_open callback will be invoked by pika. """ self.LOGGER.info("Creating a new channel") self._connection.channel(on_open_callback=self.on_channel_open)
def open_channel(self): """Open a new channel with RabbitMQ by issuing the Channel.Open RPC command. When RabbitMQ responds that the channel is open, the on_channel_open callback will be invoked by pika. """ self.LOGGER.info("Creating a new channel") self._connection.channel(on_open_callback=self.on_channel_open)
Python
def on_channel_open(self, channel): """This method is invoked by pika when the channel has been opened. The channel object is passed in so we can make use of it. Since the channel is now open, we'll declare the exchange to use. :param pika.channel.Channel channel: The channel object """ self.LOGGER.info("Channel opened") self._channel = channel self.add_on_channel_close_callback()
def on_channel_open(self, channel): """This method is invoked by pika when the channel has been opened. The channel object is passed in so we can make use of it. Since the channel is now open, we'll declare the exchange to use. :param pika.channel.Channel channel: The channel object """ self.LOGGER.info("Channel opened") self._channel = channel self.add_on_channel_close_callback()
Python
def on_channel_closed(self, channel, reason): """Invoked by pika when RabbitMQ unexpectedly closes the channel. Channels are usually closed if you attempt to do something that violates the protocol, such as re-declare an exchange or queue with different parameters. In this case, we'll close the connection to shutdown the object. :param pika.channel.Channel: The closed channel :param Exception reason: why the channel was closed """ self.LOGGER.warning("Channel %i was closed: %s", channel, reason) self._connection.close()
def on_channel_closed(self, channel, reason): """Invoked by pika when RabbitMQ unexpectedly closes the channel. Channels are usually closed if you attempt to do something that violates the protocol, such as re-declare an exchange or queue with different parameters. In this case, we'll close the connection to shutdown the object. :param pika.channel.Channel: The closed channel :param Exception reason: why the channel was closed """ self.LOGGER.warning("Channel %i was closed: %s", channel, reason) self._connection.close()
Python
def stop(self): """Cleanly shutdown the connection to RabbitMQ. Shutdown the connection ti RabbitMQ by stopping the consumer with RabbitMQ. When RabbitMQ confirms the cancellation, on_cancelok will be invoked by pika, which will then closing the channel and connection. The IOLoop is started again because this method is invoked when CTRL-C is pressed raising a KeyboardInterrupt exception. This exception stops the IOLoop which needs to be running for pika to communicate with RabbitMQ. All of the commands issued prior to starting the IOLoop will be buffered but not processed. """ self.LOGGER.info("Receiver is stopping") self._closing = True self.close_channel() self._connection.ioloop.start() self.LOGGER.info("Stopped")
def stop(self): """Cleanly shutdown the connection to RabbitMQ. Shutdown the connection ti RabbitMQ by stopping the consumer with RabbitMQ. When RabbitMQ confirms the cancellation, on_cancelok will be invoked by pika, which will then closing the channel and connection. The IOLoop is started again because this method is invoked when CTRL-C is pressed raising a KeyboardInterrupt exception. This exception stops the IOLoop which needs to be running for pika to communicate with RabbitMQ. All of the commands issued prior to starting the IOLoop will be buffered but not processed. """ self.LOGGER.info("Receiver is stopping") self._closing = True self.close_channel() self._connection.ioloop.start() self.LOGGER.info("Stopped")
Python
def on_exchange_declareok(self, unused_frame): """Invoked by pika when RabbitMQ has finished the Exchange.Declare RPC command. :param pika.Frame.Method unused_frame: Exchange.DeclareOk response frame """ self.LOGGER.debug("Exchange declared") self.setup_queue()
def on_exchange_declareok(self, unused_frame): """Invoked by pika when RabbitMQ has finished the Exchange.Declare RPC command. :param pika.Frame.Method unused_frame: Exchange.DeclareOk response frame """ self.LOGGER.debug("Exchange declared") self.setup_queue()
Python
def on_queue_declareok(self, method_frame): """Method invoked by pika when the Queue.Declare RPC call made in setup_queue has completed. In this method we will bind the queue and exchange together with the routing key by issuing the Queue.Bind RPC command. When this command is complete, the on_bindok method will be invoked by pika. :param pika.frame.Method method_frame: The Queue.DeclareOk frame """ self.queue_name = method_frame.method.queue self.LOGGER.info( "Binding %s to %s with %s", self.exchange_name, self.queue_name, self.routing_key, ) self._channel.queue_bind( callback=self.on_bindok, queue=self.queue_name, exchange=self.exchange_name, routing_key=self.routing_key, )
def on_queue_declareok(self, method_frame): """Method invoked by pika when the Queue.Declare RPC call made in setup_queue has completed. In this method we will bind the queue and exchange together with the routing key by issuing the Queue.Bind RPC command. When this command is complete, the on_bindok method will be invoked by pika. :param pika.frame.Method method_frame: The Queue.DeclareOk frame """ self.queue_name = method_frame.method.queue self.LOGGER.info( "Binding %s to %s with %s", self.exchange_name, self.queue_name, self.routing_key, ) self._channel.queue_bind( callback=self.on_bindok, queue=self.queue_name, exchange=self.exchange_name, routing_key=self.routing_key, )
Python
def on_bindok(self, unused_frame): """Invoked by pika when the Queue.Bind method has completed. At this point we will start consuming messages by calling start_consuming which will invoke the needed RPC commands to start the process. :param pika.frame.Method unused_frame: The Queue.BindOk response frame """ self.LOGGER.debug("Queue bound") self.start_consuming()
def on_bindok(self, unused_frame): """Invoked by pika when the Queue.Bind method has completed. At this point we will start consuming messages by calling start_consuming which will invoke the needed RPC commands to start the process. :param pika.frame.Method unused_frame: The Queue.BindOk response frame """ self.LOGGER.debug("Queue bound") self.start_consuming()
Python
def start_consuming(self): """This method sets up the consumer. It starts by first calling add_on_cancel_callback so that the object is notified if RabbitMQ cancels the consumer. It then issues the Basic.Consume RPC command which returns the consumer tag that is used to uniquely identify the consumer with RabbitMQ. We keep the value to use it when we want to cancel consuming. The on_message method is passed in as a callback pika will invoke when a message is fully received. """ self.LOGGER.debug("Issuing consumer related RPC commands") self.add_on_cancel_callback() self._consumer_tag = self._channel.basic_consume( on_message_callback=self.on_message, queue=self.queue_name )
def start_consuming(self): """This method sets up the consumer. It starts by first calling add_on_cancel_callback so that the object is notified if RabbitMQ cancels the consumer. It then issues the Basic.Consume RPC command which returns the consumer tag that is used to uniquely identify the consumer with RabbitMQ. We keep the value to use it when we want to cancel consuming. The on_message method is passed in as a callback pika will invoke when a message is fully received. """ self.LOGGER.debug("Issuing consumer related RPC commands") self.add_on_cancel_callback() self._consumer_tag = self._channel.basic_consume( on_message_callback=self.on_message, queue=self.queue_name )
Python
def add_on_cancel_callback(self): """Add a callback that will be invoked if RabbitMQ cancels the consumer for some reason. If RabbitMQ does cancel the consumer, on_consumer_cancelled will be invoked by pika. """ self.LOGGER.debug("Adding consumer cancellation callback") self._channel.add_on_cancel_callback(self.on_consumer_cancelled)
def add_on_cancel_callback(self): """Add a callback that will be invoked if RabbitMQ cancels the consumer for some reason. If RabbitMQ does cancel the consumer, on_consumer_cancelled will be invoked by pika. """ self.LOGGER.debug("Adding consumer cancellation callback") self._channel.add_on_cancel_callback(self.on_consumer_cancelled)
Python
def on_consumer_cancelled(self, method_frame): """Invoked by pika when RabbitMQ sends a Basic.Cancel for a consumer receiving messages. :param pika.frame.Method method_frame: The Basic.Cancel frame """ self.LOGGER.warning( "Consumer was cancelled remotely, shutting down: %r", method_frame ) if self._channel: self._channel.close()
def on_consumer_cancelled(self, method_frame): """Invoked by pika when RabbitMQ sends a Basic.Cancel for a consumer receiving messages. :param pika.frame.Method method_frame: The Basic.Cancel frame """ self.LOGGER.warning( "Consumer was cancelled remotely, shutting down: %r", method_frame ) if self._channel: self._channel.close()
Python
def on_message(self, unused_channel, basic_deliver, properties, body): """Invoked by pika when a message is delivered from RabbitMQ. The channel is passed for your convenience. The basic_deliver object that is passed in carries the exchange, routing key, delivery tag and a redelivered flag for the message. The properties passed in is an instance of BasicProperties with the message properties and the body is the message that was sent. :param pika.channel.Channel unused_channel: The channel object :param pika.Spec.Basic.Deliver: basic_deliver method :param pika.Spec.BasicProperties: properties :param str|unicode body: The message body """ self.LOGGER.debug( f"Received message # {basic_deliver.delivery_tag} from {properties.app_id}" )
def on_message(self, unused_channel, basic_deliver, properties, body): """Invoked by pika when a message is delivered from RabbitMQ. The channel is passed for your convenience. The basic_deliver object that is passed in carries the exchange, routing key, delivery tag and a redelivered flag for the message. The properties passed in is an instance of BasicProperties with the message properties and the body is the message that was sent. :param pika.channel.Channel unused_channel: The channel object :param pika.Spec.Basic.Deliver: basic_deliver method :param pika.Spec.BasicProperties: properties :param str|unicode body: The message body """ self.LOGGER.debug( f"Received message # {basic_deliver.delivery_tag} from {properties.app_id}" )
Python
def stop_consuming(self): """Tell RabbitMQ that you would like to stop consuming by sending the Basic.Cancel RPC command.""" if self._channel: self.LOGGER.debug("Sending a Basic.Cancel RPC command to RabbitMQ") self._channel.basic_cancel( callback=self.on_cancelok, consumer_tag=self._consumer_tag )
def stop_consuming(self): """Tell RabbitMQ that you would like to stop consuming by sending the Basic.Cancel RPC command.""" if self._channel: self.LOGGER.debug("Sending a Basic.Cancel RPC command to RabbitMQ") self._channel.basic_cancel( callback=self.on_cancelok, consumer_tag=self._consumer_tag )
Python
def on_cancelok(self, unused_frame): """This method is invoked by pika when RabbitMQ acknowledges the cancellation of a consumer. At this point we will close the channel. This will invoke the on_channel_closed method once the channel has been closed, which will in-turn close the connection. :param pika.frame.Method unused_frame: The Basic.CancelOk frame """ self.LOGGER.debug("RabbitMQ acknowledged the cancellation of the consumer")
def on_cancelok(self, unused_frame): """This method is invoked by pika when RabbitMQ acknowledges the cancellation of a consumer. At this point we will close the channel. This will invoke the on_channel_closed method once the channel has been closed, which will in-turn close the connection. :param pika.frame.Method unused_frame: The Basic.CancelOk frame """ self.LOGGER.debug("RabbitMQ acknowledged the cancellation of the consumer")
Python
def clean(): """Remove *.pyc and *.pyo files recursively starting at current directory. Borrowed from Flask-Script, converted to use Click. """ for dirpath, dirnames, filenames in os.walk("."): # pylint: disable=unused-variable for filename in filenames: if filename.endswith(".pyc") or filename.endswith(".pyo"): full_pathname = os.path.join(dirpath, filename) click.echo(f"Removing {full_pathname}") os.remove(full_pathname)
def clean(): """Remove *.pyc and *.pyo files recursively starting at current directory. Borrowed from Flask-Script, converted to use Click. """ for dirpath, dirnames, filenames in os.walk("."): # pylint: disable=unused-variable for filename in filenames: if filename.endswith(".pyc") or filename.endswith(".pyo"): full_pathname = os.path.join(dirpath, filename) click.echo(f"Removing {full_pathname}") os.remove(full_pathname)
Python
def urls(url, order): """Display all of the url matching routes for the project. Borrowed from Flask-Script, converted to use Click. """ rows = [] column_length = 0 column_headers = ("Rule", "Endpoint", "Arguments") if url: try: rule, arguments = current_app.url_map.bind("localhost").match( url, return_rule=True ) rows.append((rule.rule, rule.endpoint, arguments)) column_length = 3 except (NotFound, MethodNotAllowed) as error: rows.append((f"<{error}>", None, None)) column_length = 1 else: rules = sorted( current_app.url_map.iter_rules(), key=lambda rule: getattr(rule, order) ) for rule in rules: rows.append((rule.rule, rule.endpoint, None)) column_length = 2 str_template = "" table_width = 0 if column_length >= 1: max_rule_length = max(len(r[0]) for r in rows) max_rule_length = max_rule_length if max_rule_length > 4 else 4 str_template += "{:" + str(max_rule_length) + "}" table_width += max_rule_length if column_length >= 2: max_endpoint_length = max(len(str(r[1])) for r in rows) # max_endpoint_length = max(rows, key=len) max_endpoint_length = max_endpoint_length if max_endpoint_length > 8 else 8 str_template += " {:" + str(max_endpoint_length) + "}" table_width += 2 + max_endpoint_length if column_length >= 3: max_arguments_length = max(len(str(r[2])) for r in rows) max_arguments_length = max_arguments_length if max_arguments_length > 9 else 9 str_template += " {:" + str(max_arguments_length) + "}" table_width += 2 + max_arguments_length click.echo(str_template.format(*column_headers[:column_length])) click.echo("-" * table_width) for row in rows: click.echo(str_template.format(*row[:column_length]))
def urls(url, order): """Display all of the url matching routes for the project. Borrowed from Flask-Script, converted to use Click. """ rows = [] column_length = 0 column_headers = ("Rule", "Endpoint", "Arguments") if url: try: rule, arguments = current_app.url_map.bind("localhost").match( url, return_rule=True ) rows.append((rule.rule, rule.endpoint, arguments)) column_length = 3 except (NotFound, MethodNotAllowed) as error: rows.append((f"<{error}>", None, None)) column_length = 1 else: rules = sorted( current_app.url_map.iter_rules(), key=lambda rule: getattr(rule, order) ) for rule in rules: rows.append((rule.rule, rule.endpoint, None)) column_length = 2 str_template = "" table_width = 0 if column_length >= 1: max_rule_length = max(len(r[0]) for r in rows) max_rule_length = max_rule_length if max_rule_length > 4 else 4 str_template += "{:" + str(max_rule_length) + "}" table_width += max_rule_length if column_length >= 2: max_endpoint_length = max(len(str(r[1])) for r in rows) # max_endpoint_length = max(rows, key=len) max_endpoint_length = max_endpoint_length if max_endpoint_length > 8 else 8 str_template += " {:" + str(max_endpoint_length) + "}" table_width += 2 + max_endpoint_length if column_length >= 3: max_arguments_length = max(len(str(r[2])) for r in rows) max_arguments_length = max_arguments_length if max_arguments_length > 9 else 9 str_template += " {:" + str(max_arguments_length) + "}" table_width += 2 + max_arguments_length click.echo(str_template.format(*column_headers[:column_length])) click.echo("-" * table_width) for row in rows: click.echo(str_template.format(*row[:column_length]))
Python
def pages(self): """The total number of pages.""" if self.per_page == 0 or self.total is None: pages = 0 else: pages = int(ceil(self.total / float(self.per_page))) return pages
def pages(self): """The total number of pages.""" if self.per_page == 0 or self.total is None: pages = 0 else: pages = int(ceil(self.total / float(self.per_page))) return pages
Python
def _get_restrictive_rating(requirements): # type: (Iterable[Requirement]) -> int """Rate how restrictive a set of requirements are. ``Requirement.get_candidate_lookup()`` returns a 2-tuple for lookup. The first element is ``Optional[Candidate]`` and the second ``Optional[InstallRequirement]``. * If the requirement is an explicit one, the explicitly-required candidate is returned as the first element. * If the requirement is based on a PEP 508 specifier, the backing ``InstallRequirement`` is returned as the second element. We use the first element to check whether there is an explicit requirement, and the second for equality operator. """ lookups = (r.get_candidate_lookup() for r in requirements) cands, ireqs = zip(*lookups) if any(cand is not None for cand in cands): return 0 spec_sets = (ireq.specifier for ireq in ireqs if ireq) operators = [ specifier.operator for spec_set in spec_sets for specifier in spec_set ] if any(op in ("==", "===") for op in operators): return 1 if operators: return 2 # A "bare" requirement without any version requirements. return 3
def _get_restrictive_rating(requirements): # type: (Iterable[Requirement]) -> int """Rate how restrictive a set of requirements are. ``Requirement.get_candidate_lookup()`` returns a 2-tuple for lookup. The first element is ``Optional[Candidate]`` and the second ``Optional[InstallRequirement]``. * If the requirement is an explicit one, the explicitly-required candidate is returned as the first element. * If the requirement is based on a PEP 508 specifier, the backing ``InstallRequirement`` is returned as the second element. We use the first element to check whether there is an explicit requirement, and the second for equality operator. """ lookups = (r.get_candidate_lookup() for r in requirements) cands, ireqs = zip(*lookups) if any(cand is not None for cand in cands): return 0 spec_sets = (ireq.specifier for ireq in ireqs if ireq) operators = [ specifier.operator for spec_set in spec_sets for specifier in spec_set ] if any(op in ("==", "===") for op in operators): return 1 if operators: return 2 # A "bare" requirement without any version requirements. return 3
Python
def testEmptyPatch(self): """ The simplest possible patch does not cause an error. """ original = [ ops.Header(0, 0), ops.SourceCRC32(0), ops.TargetCRC32(0), ] self.assertSequenceEqual(original, list(check_stream(original)))
def testEmptyPatch(self): """ The simplest possible patch does not cause an error. """ original = [ ops.Header(0, 0), ops.SourceCRC32(0), ops.TargetCRC32(0), ] self.assertSequenceEqual(original, list(check_stream(original)))
Python
def testUnrecognisedOpcode(self): """ Raise CorruptFile if there's an item with an unknown opcode. """ self.assertRaisesRegex(CorruptFile, "unknown opcode", list, check_stream([ ops.Header(0, 1), b'sasquatch', ]) )
def testUnrecognisedOpcode(self): """ Raise CorruptFile if there's an item with an unknown opcode. """ self.assertRaisesRegex(CorruptFile, "unknown opcode", list, check_stream([ ops.Header(0, 1), b'sasquatch', ]) )
Python
def testSourceReadOpcode(self): """ Raise CorruptFile if a SourceRead opcode has any problems. """ # Can read right up to the end of the source file. original = [ ops.Header(5, 5), ops.SourceRead(5), ops.SourceCRC32(0), ops.TargetCRC32(0), ] self.assertSequenceEqual(original, list(check_stream(original))) # Can't read past the end of the source file. self.assertRaisesRegex(CorruptFile, "end of the source", list, check_stream([ ops.Header(5, 6), # Read part of the source file. ops.SourceRead(1), # Trying to read past the end of the source file. ops.SourceRead(5), ]) )
def testSourceReadOpcode(self): """ Raise CorruptFile if a SourceRead opcode has any problems. """ # Can read right up to the end of the source file. original = [ ops.Header(5, 5), ops.SourceRead(5), ops.SourceCRC32(0), ops.TargetCRC32(0), ] self.assertSequenceEqual(original, list(check_stream(original))) # Can't read past the end of the source file. self.assertRaisesRegex(CorruptFile, "end of the source", list, check_stream([ ops.Header(5, 6), # Read part of the source file. ops.SourceRead(1), # Trying to read past the end of the source file. ops.SourceRead(5), ]) )
Python
def testSourceCopyLimits(self): """ Raise CorruptFile if SourceCopy tries to copy from outside the file. """ # offset + length must be at most sourceSize. original = [ ops.Header(2, 2), # offset + length = sourceSize, so this should be OK. ops.SourceCopy(2, 0), ops.SourceCRC32(0), ops.TargetCRC32(0), ] self.assertSequenceEqual(original, list(check_stream(original))) # Here we read past the end of the source, which should raise an # exception. self.assertRaisesRegex(CorruptFile, "end of the source", list, check_stream([ ops.Header(2, 3), ops.SourceCopy(2, 1), ]) )
def testSourceCopyLimits(self): """ Raise CorruptFile if SourceCopy tries to copy from outside the file. """ # offset + length must be at most sourceSize. original = [ ops.Header(2, 2), # offset + length = sourceSize, so this should be OK. ops.SourceCopy(2, 0), ops.SourceCRC32(0), ops.TargetCRC32(0), ] self.assertSequenceEqual(original, list(check_stream(original))) # Here we read past the end of the source, which should raise an # exception. self.assertRaisesRegex(CorruptFile, "end of the source", list, check_stream([ ops.Header(2, 3), ops.SourceCopy(2, 1), ]) )
Python
def testTargetCopyLimits(self): """ Raise CorruptFile if TargetCopy tries to copy from outside the file. """ # offset must be less than targetWriteOffset. original = [ ops.Header(0, 2), ops.TargetRead(b'A'), ops.TargetCopy(1, 0), ops.SourceCRC32(0), ops.TargetCRC32(0), ] self.assertSequenceEqual(original, list(check_stream(original))) # Trying to read the byte that targetWriteOffset is pointing at is not # allowed. self.assertRaisesRegex(CorruptFile, "end of the written part", list, check_stream([ ops.Header(0, 5), ops.TargetRead(b'A'), ops.TargetCopy(1, 1), ]) ) # But it's OK if the length goes past targetWriteOffset; that's how RLE # works. original = [ ops.Header(0, 5), ops.TargetRead(b'A'), ops.TargetCopy(4, 0), ops.SourceCRC32(0), ops.TargetCRC32(0), ] self.assertSequenceEqual(original, list(check_stream(original)))
def testTargetCopyLimits(self): """ Raise CorruptFile if TargetCopy tries to copy from outside the file. """ # offset must be less than targetWriteOffset. original = [ ops.Header(0, 2), ops.TargetRead(b'A'), ops.TargetCopy(1, 0), ops.SourceCRC32(0), ops.TargetCRC32(0), ] self.assertSequenceEqual(original, list(check_stream(original))) # Trying to read the byte that targetWriteOffset is pointing at is not # allowed. self.assertRaisesRegex(CorruptFile, "end of the written part", list, check_stream([ ops.Header(0, 5), ops.TargetRead(b'A'), ops.TargetCopy(1, 1), ]) ) # But it's OK if the length goes past targetWriteOffset; that's how RLE # works. original = [ ops.Header(0, 5), ops.TargetRead(b'A'), ops.TargetCopy(4, 0), ops.SourceCRC32(0), ops.TargetCRC32(0), ] self.assertSequenceEqual(original, list(check_stream(original)))
Python
def testWritingPastTheEndOfTheTarget(self): """ Raise CorruptFile if the patch writes more than targetsize bytes. """ # SourceRead can't write past the end of the target. self.assertRaisesRegex(CorruptFile, "end of the target", list, check_stream([ ops.Header(5, 1), ops.SourceRead(5), ]) ) # TargetRead can't write past the end of the target. self.assertRaisesRegex(CorruptFile, "end of the target", list, check_stream([ ops.Header(0, 1), ops.TargetRead(b'hello'), ]) ) # SourceCopy can't write past the end of the target. self.assertRaisesRegex(CorruptFile, "end of the target", list, check_stream([ ops.Header(5, 1), ops.SourceCopy(5, 0), ]) ) # TargetCopy can't write past the end of the target. self.assertRaisesRegex(CorruptFile, "end of the target", list, check_stream([ ops.Header(0, 2), ops.TargetRead(b'A'), ops.TargetCopy(5, 0), ]) )
def testWritingPastTheEndOfTheTarget(self): """ Raise CorruptFile if the patch writes more than targetsize bytes. """ # SourceRead can't write past the end of the target. self.assertRaisesRegex(CorruptFile, "end of the target", list, check_stream([ ops.Header(5, 1), ops.SourceRead(5), ]) ) # TargetRead can't write past the end of the target. self.assertRaisesRegex(CorruptFile, "end of the target", list, check_stream([ ops.Header(0, 1), ops.TargetRead(b'hello'), ]) ) # SourceCopy can't write past the end of the target. self.assertRaisesRegex(CorruptFile, "end of the target", list, check_stream([ ops.Header(5, 1), ops.SourceCopy(5, 0), ]) ) # TargetCopy can't write past the end of the target. self.assertRaisesRegex(CorruptFile, "end of the target", list, check_stream([ ops.Header(0, 2), ops.TargetRead(b'A'), ops.TargetCopy(5, 0), ]) )
Python
def testTruncatedStream(self): """ Raise CorruptFile if the iterable ends before we have a whole patch. """ # Complain if there's no header. self.assertRaisesRegex(CorruptFile, "truncated patch", list, check_stream([]) ) # Complain if there's no patch hunks and there should be. self.assertRaisesRegex(CorruptFile, "truncated patch", list, check_stream([ ops.Header(0, 1), ]) ) # Complain if there's no source CRC32 opcode. self.assertRaisesRegex(CorruptFile, "truncated patch", list, check_stream([ ops.Header(0, 1), ops.TargetRead(b'A'), ]) ) # Complain if there's no target CRC32 opcode. self.assertRaisesRegex(CorruptFile, "truncated patch", list, check_stream([ ops.Header(0, 1), ops.TargetRead(b'A'), ops.SourceCRC32(0), ]) )
def testTruncatedStream(self): """ Raise CorruptFile if the iterable ends before we have a whole patch. """ # Complain if there's no header. self.assertRaisesRegex(CorruptFile, "truncated patch", list, check_stream([]) ) # Complain if there's no patch hunks and there should be. self.assertRaisesRegex(CorruptFile, "truncated patch", list, check_stream([ ops.Header(0, 1), ]) ) # Complain if there's no source CRC32 opcode. self.assertRaisesRegex(CorruptFile, "truncated patch", list, check_stream([ ops.Header(0, 1), ops.TargetRead(b'A'), ]) ) # Complain if there's no target CRC32 opcode. self.assertRaisesRegex(CorruptFile, "truncated patch", list, check_stream([ ops.Header(0, 1), ops.TargetRead(b'A'), ops.SourceCRC32(0), ]) )
Python
def testStateMachine(self): """ Raise CorruptFile if we get valid opcodes out of order. """ # Complain if we get anything before the header opcode. self.assertRaisesRegex(CorruptFile, "expected header", list, check_stream([ ops.SourceRead(1), ]), ) # Complain if we get a SourceCRC32 before any patch hunks. self.assertRaisesRegex(CorruptFile, "unknown opcode", list, check_stream([ ops.Header(0, 1), ops.SourceCRC32(0), ]) ) # Complain if we get a TargetCRC32 before a SourceCRC32 opcode. self.assertRaisesRegex(CorruptFile, "expected SourceCRC32", list, check_stream([ ops.Header(0, 1), ops.TargetRead(b'A'), ops.TargetCRC32(0), ]) ) # Complain if we anything after SourceCRC32 besides TargetCRC32 self.assertRaisesRegex(CorruptFile, "expected TargetCRC32", list, check_stream([ ops.Header(0, 1), ops.TargetRead(b'A'), ops.SourceCRC32(0), ops.TargetRead(b'A'), ]) ) # If we get a completely random operation rather than a CRC32, make # sure we complain about the opcode, not the number of arguments (or # whatever. self.assertRaisesRegex(CorruptFile, "expected SourceCRC32", list, check_stream([ ops.Header(0, 0), ops.TargetRead(b'A'), ]) ) self.assertRaisesRegex(CorruptFile, "expected SourceCRC32", list, check_stream([ ops.Header(0, 1), ops.TargetRead(b'A'), ops.TargetCopy(1, 0), ]) )
def testStateMachine(self): """ Raise CorruptFile if we get valid opcodes out of order. """ # Complain if we get anything before the header opcode. self.assertRaisesRegex(CorruptFile, "expected header", list, check_stream([ ops.SourceRead(1), ]), ) # Complain if we get a SourceCRC32 before any patch hunks. self.assertRaisesRegex(CorruptFile, "unknown opcode", list, check_stream([ ops.Header(0, 1), ops.SourceCRC32(0), ]) ) # Complain if we get a TargetCRC32 before a SourceCRC32 opcode. self.assertRaisesRegex(CorruptFile, "expected SourceCRC32", list, check_stream([ ops.Header(0, 1), ops.TargetRead(b'A'), ops.TargetCRC32(0), ]) ) # Complain if we anything after SourceCRC32 besides TargetCRC32 self.assertRaisesRegex(CorruptFile, "expected TargetCRC32", list, check_stream([ ops.Header(0, 1), ops.TargetRead(b'A'), ops.SourceCRC32(0), ops.TargetRead(b'A'), ]) ) # If we get a completely random operation rather than a CRC32, make # sure we complain about the opcode, not the number of arguments (or # whatever. self.assertRaisesRegex(CorruptFile, "expected SourceCRC32", list, check_stream([ ops.Header(0, 0), ops.TargetRead(b'A'), ]) ) self.assertRaisesRegex(CorruptFile, "expected SourceCRC32", list, check_stream([ ops.Header(0, 1), ops.TargetRead(b'A'), ops.TargetCopy(1, 0), ]) )
Python
def testTrailingGarbage(self): """ Raise CorruptFile if there's anything after TargetCRC32. """ self.assertRaisesRegex(CorruptFile, "trailing garbage", list, check_stream([ ops.Header(0, 1), ops.TargetRead(b'A'), ops.SourceCRC32(0), ops.TargetCRC32(0xD3D99E8B), ops.TargetRead(b'A'), ]) )
def testTrailingGarbage(self): """ Raise CorruptFile if there's anything after TargetCRC32. """ self.assertRaisesRegex(CorruptFile, "trailing garbage", list, check_stream([ ops.Header(0, 1), ops.TargetRead(b'A'), ops.SourceCRC32(0), ops.TargetCRC32(0xD3D99E8B), ops.TargetRead(b'A'), ]) )
Python
def apply_to_bytearrays(iterable, source_buf, target_buf): """ Applies the BPS patch from iterable to source_buf, producing target_buf. iterable should be an iterable yielding BPS patch opcodes, after the header. source_buf should be a bytes object, or something impersonating one. target_buf should be a bytearray object, or something impersonating one. """ writeOffset = 0 for item in iterable: if isinstance(item, ops.Header): # Just the header, nothing for us to do here. pass elif isinstance(item, ops.SourceRead): target_buf[writeOffset:writeOffset+item.bytespan] = \ source_buf[writeOffset:writeOffset+item.bytespan] elif isinstance(item, ops.TargetRead): target_buf[writeOffset:writeOffset+item.bytespan] = item.payload elif isinstance(item, ops.SourceCopy): target_buf[writeOffset:writeOffset+item.bytespan] = \ source_buf[item.offset:item.offset+item.bytespan] elif isinstance(item, ops.TargetCopy): # Because TargetCopy can be used to implement RLE-type compression, # we have to copy a byte at a time rather than just slicing # target_buf. for i in range(item.bytespan): target_buf[writeOffset+i] = target_buf[item.offset+i] elif isinstance(item, ops.SourceCRC32): actual = crc32(source_buf) expected = item.value if actual != expected: raise CorruptFile("Source file should have CRC32 {0:08X}, " "got {1:08X}".format(expected, actual)) elif isinstance(item, ops.TargetCRC32): actual = crc32(target_buf) expected = item.value if actual != expected: raise CorruptFile("Target file should have CRC32 {0:08X}, " "got {1:08X}".format(expected, actual)) writeOffset += item.bytespan
def apply_to_bytearrays(iterable, source_buf, target_buf): """ Applies the BPS patch from iterable to source_buf, producing target_buf. iterable should be an iterable yielding BPS patch opcodes, after the header. source_buf should be a bytes object, or something impersonating one. target_buf should be a bytearray object, or something impersonating one. """ writeOffset = 0 for item in iterable: if isinstance(item, ops.Header): # Just the header, nothing for us to do here. pass elif isinstance(item, ops.SourceRead): target_buf[writeOffset:writeOffset+item.bytespan] = \ source_buf[writeOffset:writeOffset+item.bytespan] elif isinstance(item, ops.TargetRead): target_buf[writeOffset:writeOffset+item.bytespan] = item.payload elif isinstance(item, ops.SourceCopy): target_buf[writeOffset:writeOffset+item.bytespan] = \ source_buf[item.offset:item.offset+item.bytespan] elif isinstance(item, ops.TargetCopy): # Because TargetCopy can be used to implement RLE-type compression, # we have to copy a byte at a time rather than just slicing # target_buf. for i in range(item.bytespan): target_buf[writeOffset+i] = target_buf[item.offset+i] elif isinstance(item, ops.SourceCRC32): actual = crc32(source_buf) expected = item.value if actual != expected: raise CorruptFile("Source file should have CRC32 {0:08X}, " "got {1:08X}".format(expected, actual)) elif isinstance(item, ops.TargetCRC32): actual = crc32(target_buf) expected = item.value if actual != expected: raise CorruptFile("Target file should have CRC32 {0:08X}, " "got {1:08X}".format(expected, actual)) writeOffset += item.bytespan
Python
def apply_to_files(patch, source, target): """ Applies the BPS patch to the source file, writing to the target file. patch should be a file handle containing BPS patch data. source should be a readable, binary file handle containing the source data for the BPS patch. target should be a writable, binary file handle, which will contain the result of applying the given patch to the given source data. """ iterable = check_stream(read_bps(patch)) sourceData = source.read() header = next(iterable) if header.sourceSize != len(sourceData): raise CorruptFile("Source file must be {sourceSize} bytes, but " "{source!r} is {sourceDataLen} bytes.".format( sourceSize=header.sourceSize, source=source, sourceDataLen=len(sourceData))) targetData = bytearray(header.targetSize) apply_to_bytearrays(iterable, sourceData, targetData) assert len(targetData) == header.targetSize, ("Should have written {0} " "bytes to target, not {1}".format( header.targetSize, len(targetData) ) ) target.write(targetData)
def apply_to_files(patch, source, target): """ Applies the BPS patch to the source file, writing to the target file. patch should be a file handle containing BPS patch data. source should be a readable, binary file handle containing the source data for the BPS patch. target should be a writable, binary file handle, which will contain the result of applying the given patch to the given source data. """ iterable = check_stream(read_bps(patch)) sourceData = source.read() header = next(iterable) if header.sourceSize != len(sourceData): raise CorruptFile("Source file must be {sourceSize} bytes, but " "{source!r} is {sourceDataLen} bytes.".format( sourceSize=header.sourceSize, source=source, sourceDataLen=len(sourceData))) targetData = bytearray(header.targetSize) apply_to_bytearrays(iterable, sourceData, targetData) assert len(targetData) == header.targetSize, ("Should have written {0} " "bytes to target, not {1}".format( header.targetSize, len(targetData) ) ) target.write(targetData)
Python
def measure_op(blocksrc, sourceoffset, target, targetoffset): """ Measure the match between blocksrc and target at these offsets. """ # The various parameters line up something like this: # # v-- sourceoffset # ...ABCDExGHI... <-- blocksrc # # ...xxxABCDEF... <-- target # ^-- targetOffset # # result: backspan = 2, forespan = 3 # # Measure how far back the source and target files match from these # offsets. backspan = 0 # We need the +1 here because the test inside the loop is actually looking # at the byte *before* the one pointed to by (sourceoffset-backspan), so # we need our span to stretch that little bit further. maxspan = min(sourceoffset, targetoffset) + 1 for backspan in range(maxspan): if blocksrc[sourceoffset-backspan-1] != target[targetoffset-backspan-1]: break # Measure how far forward the source and target files are aligned. forespan = 0 sourcespan = len(blocksrc) - sourceoffset targetspan = len(target) - targetoffset maxspan = min(sourcespan, targetspan) for forespan in range(maxspan): if blocksrc[sourceoffset+forespan] != target[targetoffset+forespan]: break else: # We matched right up to the end of the file. forespan += 1 return backspan, forespan
def measure_op(blocksrc, sourceoffset, target, targetoffset): """ Measure the match between blocksrc and target at these offsets. """ # The various parameters line up something like this: # # v-- sourceoffset # ...ABCDExGHI... <-- blocksrc # # ...xxxABCDEF... <-- target # ^-- targetOffset # # result: backspan = 2, forespan = 3 # # Measure how far back the source and target files match from these # offsets. backspan = 0 # We need the +1 here because the test inside the loop is actually looking # at the byte *before* the one pointed to by (sourceoffset-backspan), so # we need our span to stretch that little bit further. maxspan = min(sourceoffset, targetoffset) + 1 for backspan in range(maxspan): if blocksrc[sourceoffset-backspan-1] != target[targetoffset-backspan-1]: break # Measure how far forward the source and target files are aligned. forespan = 0 sourcespan = len(blocksrc) - sourceoffset targetspan = len(target) - targetoffset maxspan = min(sourcespan, targetspan) for forespan in range(maxspan): if blocksrc[sourceoffset+forespan] != target[targetoffset+forespan]: break else: # We matched right up to the end of the file. forespan += 1 return backspan, forespan
Python
def diff_bytearrays(blocksize, source, target, metadata=""): """ Yield a sequence of patch operations that transform source to target. """ yield ops.Header(len(source), len(target), metadata) # We assume the entire source file will be available when applying this # patch, so load the entire thing into the block map. sourcemap = BlockMap() for block, offset in iter_blocks(source, blocksize): sourcemap.add_block(block, offset) # Points at the next byte of the target buffer that needs to be encoded. targetWriteOffset = 0 # Points at the next byte of the target buffer we're searching for # encodings for. If we can't find an encoding for a particular byte, we'll # leave targetWriteOffset alone and increment this offset, on the off # chance that we find a new encoding that we can extend backwards to # targetWriteOffset. targetEncodingOffset = 0 # Keep track of blocks seen in the part of the target buffer before # targetWriteOffset. Because targetWriteOffset does not always advance by # an even multiple of the blocksize, there can be some lag between when # targetWriteOffset moves past a particular byte, and when that byte's # block is added to targetmap. targetmap = BlockMap() targetblocks = iter_blocks(target, blocksize) # Points to the byte just beyond the most recent block added to targetmap; # the difference between this and targetWriteOffset measures the 'some lag' # described above. nextTargetMapBlockOffset = 0 # A place to store operations before we spit them out. This gives us an # opportunity to replace operations if we later come across a better # alternative encoding. opbuf = ops.OpBuffer(target) while targetEncodingOffset < len(target): # Keeps track of the most efficient operation for encoding this # particular offset that we've found so far. bestOp = None bestOpEfficiency = 0 bestOpBackSpan = 0 bestOpForeSpan = 0 blockend = targetEncodingOffset + blocksize block = target[targetEncodingOffset:blockend] for sourceOffset in sourcemap.get_block(block): backspan, forespan = measure_op( source, sourceOffset, target, targetEncodingOffset, ) if forespan == 0: # This block actually doesn't occur at this sourceOffset after # all. Perhaps it's a hash collision? continue if sourceOffset == targetEncodingOffset: candidate = ops.SourceRead(backspan+forespan) else: candidate = ops.SourceCopy( backspan+forespan, sourceOffset-backspan, ) lastSourceCopyOffset, lastTargetCopyOffset = ( opbuf.copy_offsets(backspan) ) efficiency = candidate.efficiency( lastSourceCopyOffset, lastTargetCopyOffset) if efficiency > bestOpEfficiency: bestOp = candidate bestOpEfficiency = efficiency bestOpBackSpan = backspan bestOpForeSpan = forespan for targetOffset in targetmap.get_block(block): backspan, forespan = measure_op( target, targetOffset, target, targetEncodingOffset, ) if forespan == 0: # This block actually doesn't occur at this sourceOffset after # all. Perhaps it's a hash collision? continue candidate = ops.TargetCopy( backspan+forespan, targetOffset-backspan, ) lastSourceCopyOffset, lastTargetCopyOffset = ( opbuf.copy_offsets(backspan) ) efficiency = candidate.efficiency( lastSourceCopyOffset, lastTargetCopyOffset) if efficiency > bestOpEfficiency: bestOp = candidate bestOpEfficiency = efficiency bestOpBackSpan = backspan bestOpForeSpan = forespan # If we can't find a copy instruction that encodes this block, or the # best one we've found is a net efficiency loss, we'll have to issue # a TargetRead... later. if bestOp is None or bestOpEfficiency < 1.0: targetEncodingOffset += 1 continue # We found an encoding for the target block, so issue a TargetRead for # all the bytes from the end of the last block up to now. if targetWriteOffset < targetEncodingOffset: tr = ops.TargetRead(target[targetWriteOffset:targetEncodingOffset]) opbuf.append(tr) targetWriteOffset = targetEncodingOffset opbuf.append(bestOp, rollback=bestOpBackSpan) targetWriteOffset += bestOpForeSpan # The next block we want to encode starts after the bytes we've # just written. targetEncodingOffset = targetWriteOffset # If it's been more than BLOCKSIZE bytes since we added a block to # targetmap, process the backlog. while (targetWriteOffset - nextTargetMapBlockOffset) >= blocksize: newblock, offset = next(targetblocks) targetmap.add_block(newblock, offset) nextTargetMapBlockOffset = offset + len(newblock) for op in opbuf: yield op if targetWriteOffset < len(target): # It's TargetRead all the way up to the end of the file. yield ops.TargetRead(target[targetWriteOffset:]) yield ops.SourceCRC32(crc32(source)) yield ops.TargetCRC32(crc32(target))
def diff_bytearrays(blocksize, source, target, metadata=""): """ Yield a sequence of patch operations that transform source to target. """ yield ops.Header(len(source), len(target), metadata) # We assume the entire source file will be available when applying this # patch, so load the entire thing into the block map. sourcemap = BlockMap() for block, offset in iter_blocks(source, blocksize): sourcemap.add_block(block, offset) # Points at the next byte of the target buffer that needs to be encoded. targetWriteOffset = 0 # Points at the next byte of the target buffer we're searching for # encodings for. If we can't find an encoding for a particular byte, we'll # leave targetWriteOffset alone and increment this offset, on the off # chance that we find a new encoding that we can extend backwards to # targetWriteOffset. targetEncodingOffset = 0 # Keep track of blocks seen in the part of the target buffer before # targetWriteOffset. Because targetWriteOffset does not always advance by # an even multiple of the blocksize, there can be some lag between when # targetWriteOffset moves past a particular byte, and when that byte's # block is added to targetmap. targetmap = BlockMap() targetblocks = iter_blocks(target, blocksize) # Points to the byte just beyond the most recent block added to targetmap; # the difference between this and targetWriteOffset measures the 'some lag' # described above. nextTargetMapBlockOffset = 0 # A place to store operations before we spit them out. This gives us an # opportunity to replace operations if we later come across a better # alternative encoding. opbuf = ops.OpBuffer(target) while targetEncodingOffset < len(target): # Keeps track of the most efficient operation for encoding this # particular offset that we've found so far. bestOp = None bestOpEfficiency = 0 bestOpBackSpan = 0 bestOpForeSpan = 0 blockend = targetEncodingOffset + blocksize block = target[targetEncodingOffset:blockend] for sourceOffset in sourcemap.get_block(block): backspan, forespan = measure_op( source, sourceOffset, target, targetEncodingOffset, ) if forespan == 0: # This block actually doesn't occur at this sourceOffset after # all. Perhaps it's a hash collision? continue if sourceOffset == targetEncodingOffset: candidate = ops.SourceRead(backspan+forespan) else: candidate = ops.SourceCopy( backspan+forespan, sourceOffset-backspan, ) lastSourceCopyOffset, lastTargetCopyOffset = ( opbuf.copy_offsets(backspan) ) efficiency = candidate.efficiency( lastSourceCopyOffset, lastTargetCopyOffset) if efficiency > bestOpEfficiency: bestOp = candidate bestOpEfficiency = efficiency bestOpBackSpan = backspan bestOpForeSpan = forespan for targetOffset in targetmap.get_block(block): backspan, forespan = measure_op( target, targetOffset, target, targetEncodingOffset, ) if forespan == 0: # This block actually doesn't occur at this sourceOffset after # all. Perhaps it's a hash collision? continue candidate = ops.TargetCopy( backspan+forespan, targetOffset-backspan, ) lastSourceCopyOffset, lastTargetCopyOffset = ( opbuf.copy_offsets(backspan) ) efficiency = candidate.efficiency( lastSourceCopyOffset, lastTargetCopyOffset) if efficiency > bestOpEfficiency: bestOp = candidate bestOpEfficiency = efficiency bestOpBackSpan = backspan bestOpForeSpan = forespan # If we can't find a copy instruction that encodes this block, or the # best one we've found is a net efficiency loss, we'll have to issue # a TargetRead... later. if bestOp is None or bestOpEfficiency < 1.0: targetEncodingOffset += 1 continue # We found an encoding for the target block, so issue a TargetRead for # all the bytes from the end of the last block up to now. if targetWriteOffset < targetEncodingOffset: tr = ops.TargetRead(target[targetWriteOffset:targetEncodingOffset]) opbuf.append(tr) targetWriteOffset = targetEncodingOffset opbuf.append(bestOp, rollback=bestOpBackSpan) targetWriteOffset += bestOpForeSpan # The next block we want to encode starts after the bytes we've # just written. targetEncodingOffset = targetWriteOffset # If it's been more than BLOCKSIZE bytes since we added a block to # targetmap, process the backlog. while (targetWriteOffset - nextTargetMapBlockOffset) >= blocksize: newblock, offset = next(targetblocks) targetmap.add_block(newblock, offset) nextTargetMapBlockOffset = offset + len(newblock) for op in opbuf: yield op if targetWriteOffset < len(target): # It's TargetRead all the way up to the end of the file. yield ops.TargetRead(target[targetWriteOffset:]) yield ops.SourceCRC32(crc32(source)) yield ops.TargetCRC32(crc32(target))
Python
def read_bps(in_buf): """ Yields BPS patch instructions from the BPS patch in in_buf. in_buf should implement io.IOBase, opened in 'rb' mode. """ # Keep track of the input file's CRC32, so we can check it at the end. in_buf = util.CRCIOWrapper(in_buf) # header magic = in_buf.read(4) if magic != C.BPS_MAGIC: raise CorruptFile("File magic should be {expected!r}, got " "{actual!r}".format(expected=C.BPS_MAGIC, actual=magic)) sourcesize = util.read_var_int(in_buf) targetsize = util.read_var_int(in_buf) metadatasize = util.read_var_int(in_buf) metadata = in_buf.read(metadatasize).decode('utf-8') yield ops.Header(sourcesize, targetsize, metadata) targetWriteOffset = 0 sourceRelativeOffset = 0 targetRelativeOffset = 0 while targetWriteOffset < targetsize: value = util.read_var_int(in_buf) opcode = value & C.OPCODEMASK length = (value >> C.OPCODESHIFT) + 1 if opcode == C.OP_SOURCEREAD: yield ops.SourceRead(length) elif opcode == C.OP_TARGETREAD: yield ops.TargetRead(in_buf.read(length)) elif opcode == C.OP_SOURCECOPY: raw_offset = util.read_var_int(in_buf) offset = raw_offset >> 1 if raw_offset & 1: offset = -offset sourceRelativeOffset += offset yield ops.SourceCopy(length, sourceRelativeOffset) sourceRelativeOffset += length elif opcode == C.OP_TARGETCOPY: raw_offset = util.read_var_int(in_buf) offset = raw_offset >> 1 if raw_offset & 1: offset = -offset targetRelativeOffset += offset yield ops.TargetCopy(length, targetRelativeOffset) targetRelativeOffset += length else: raise CorruptFile("Unknown opcode: {opcode:02b}".format( opcode=opcode)) targetWriteOffset += length # footer yield ops.SourceCRC32(unpack("<I", in_buf.read(4))[0]) yield ops.TargetCRC32(unpack("<I", in_buf.read(4))[0]) # Check the patch's CRC32. actual = in_buf.crc32 expected = unpack("<I", in_buf.read(4))[0] if expected != actual: raise CorruptFile("Patch claims its CRC32 is {expected:08X}, but " "it's really {actual:08X}".format( expected=expected, actual=actual) )
def read_bps(in_buf): """ Yields BPS patch instructions from the BPS patch in in_buf. in_buf should implement io.IOBase, opened in 'rb' mode. """ # Keep track of the input file's CRC32, so we can check it at the end. in_buf = util.CRCIOWrapper(in_buf) # header magic = in_buf.read(4) if magic != C.BPS_MAGIC: raise CorruptFile("File magic should be {expected!r}, got " "{actual!r}".format(expected=C.BPS_MAGIC, actual=magic)) sourcesize = util.read_var_int(in_buf) targetsize = util.read_var_int(in_buf) metadatasize = util.read_var_int(in_buf) metadata = in_buf.read(metadatasize).decode('utf-8') yield ops.Header(sourcesize, targetsize, metadata) targetWriteOffset = 0 sourceRelativeOffset = 0 targetRelativeOffset = 0 while targetWriteOffset < targetsize: value = util.read_var_int(in_buf) opcode = value & C.OPCODEMASK length = (value >> C.OPCODESHIFT) + 1 if opcode == C.OP_SOURCEREAD: yield ops.SourceRead(length) elif opcode == C.OP_TARGETREAD: yield ops.TargetRead(in_buf.read(length)) elif opcode == C.OP_SOURCECOPY: raw_offset = util.read_var_int(in_buf) offset = raw_offset >> 1 if raw_offset & 1: offset = -offset sourceRelativeOffset += offset yield ops.SourceCopy(length, sourceRelativeOffset) sourceRelativeOffset += length elif opcode == C.OP_TARGETCOPY: raw_offset = util.read_var_int(in_buf) offset = raw_offset >> 1 if raw_offset & 1: offset = -offset targetRelativeOffset += offset yield ops.TargetCopy(length, targetRelativeOffset) targetRelativeOffset += length else: raise CorruptFile("Unknown opcode: {opcode:02b}".format( opcode=opcode)) targetWriteOffset += length # footer yield ops.SourceCRC32(unpack("<I", in_buf.read(4))[0]) yield ops.TargetCRC32(unpack("<I", in_buf.read(4))[0]) # Check the patch's CRC32. actual = in_buf.crc32 expected = unpack("<I", in_buf.read(4))[0] if expected != actual: raise CorruptFile("Patch claims its CRC32 is {expected:08X}, but " "it's really {actual:08X}".format( expected=expected, actual=actual) )
Python
def write_bps(iterable, out_buf): """ Encodes BPS patch instructions from the iterable into a patch in out_buf. iterable should yield a sequence of BPS patch instructions. out_buf should implement io.IOBase, opened in 'wb' mode. """ # Make sure we have a sensible stream to write. iterable = check_stream(iterable) # Keep track of the patch data's CRC32, so we can write it out at the end. out_buf = util.CRCIOWrapper(out_buf) sourceRelativeOffset = 0 targetRelativeOffset = 0 for item in iterable: out_buf.write(item.encode(sourceRelativeOffset, targetRelativeOffset)) if isinstance(item, ops.SourceCopy): sourceRelativeOffset = item.offset + item.bytespan elif isinstance(item, ops.TargetCopy): targetRelativeOffset = item.offset + item.bytespan # Lastly, write out the patch CRC32. out_buf.write(pack("<I", out_buf.crc32))
def write_bps(iterable, out_buf): """ Encodes BPS patch instructions from the iterable into a patch in out_buf. iterable should yield a sequence of BPS patch instructions. out_buf should implement io.IOBase, opened in 'wb' mode. """ # Make sure we have a sensible stream to write. iterable = check_stream(iterable) # Keep track of the patch data's CRC32, so we can write it out at the end. out_buf = util.CRCIOWrapper(out_buf) sourceRelativeOffset = 0 targetRelativeOffset = 0 for item in iterable: out_buf.write(item.encode(sourceRelativeOffset, targetRelativeOffset)) if isinstance(item, ops.SourceCopy): sourceRelativeOffset = item.offset + item.bytespan elif isinstance(item, ops.TargetCopy): targetRelativeOffset = item.offset + item.bytespan # Lastly, write out the patch CRC32. out_buf.write(pack("<I", out_buf.crc32))
Python
def read_bps_asm(in_buf): """ Yields BPS patch instructions from the BPS patch in in_buf. in_buf should implement io.IOBase, opened in 'rt' mode. """ # header magic = in_buf.readline() if magic != C.BPSASM_MAGIC: raise CorruptFile("BPS asm should have magic set to {expected!r}, " "not {actual!r}".format(expected=C.BPSASM_MAGIC, actual=magic) ) label, sourcesize = in_buf.readline().split(":") _expect_label(C.SOURCESIZE, label) sourcesize = int(sourcesize) label, targetsize = in_buf.readline().split(":") _expect_label(C.TARGETSIZE, label) targetsize = int(targetsize) label, _ = in_buf.readline().split(":") _expect_label(C.METADATA, label) metadata = _read_multiline_text(in_buf) yield ops.Header(sourcesize, targetsize, metadata) targetWriteOffset = 0 while targetWriteOffset < targetsize: label, value = in_buf.readline().split(":") item = None if label == C.SOURCEREAD: length = int(value) item = ops.SourceRead(length) elif label == C.TARGETREAD: data = _read_multiline_text(in_buf) data = NON_HEX_DIGIT_RE.sub("", data) data = data.encode('ascii') data = a2b_hex(data) item = ops.TargetRead(data) elif label == C.SOURCECOPY: length, offset = [int(x) for x in value.split()] item = ops.SourceCopy(length, offset) elif label == C.TARGETCOPY: length, offset = [int(x) for x in value.split()] item = ops.TargetCopy(length, offset) else: raise CorruptFile("Unknown label: {label!r}".format(label=label)) yield item targetWriteOffset += item.bytespan label, sourcecrc32 = in_buf.readline().split(":") _expect_label(C.SOURCECRC32, label) yield ops.SourceCRC32(int(sourcecrc32, 16)) label, targetcrc32 = in_buf.readline().split(":") _expect_label(C.TARGETCRC32, label) yield ops.TargetCRC32(int(targetcrc32, 16))
def read_bps_asm(in_buf): """ Yields BPS patch instructions from the BPS patch in in_buf. in_buf should implement io.IOBase, opened in 'rt' mode. """ # header magic = in_buf.readline() if magic != C.BPSASM_MAGIC: raise CorruptFile("BPS asm should have magic set to {expected!r}, " "not {actual!r}".format(expected=C.BPSASM_MAGIC, actual=magic) ) label, sourcesize = in_buf.readline().split(":") _expect_label(C.SOURCESIZE, label) sourcesize = int(sourcesize) label, targetsize = in_buf.readline().split(":") _expect_label(C.TARGETSIZE, label) targetsize = int(targetsize) label, _ = in_buf.readline().split(":") _expect_label(C.METADATA, label) metadata = _read_multiline_text(in_buf) yield ops.Header(sourcesize, targetsize, metadata) targetWriteOffset = 0 while targetWriteOffset < targetsize: label, value = in_buf.readline().split(":") item = None if label == C.SOURCEREAD: length = int(value) item = ops.SourceRead(length) elif label == C.TARGETREAD: data = _read_multiline_text(in_buf) data = NON_HEX_DIGIT_RE.sub("", data) data = data.encode('ascii') data = a2b_hex(data) item = ops.TargetRead(data) elif label == C.SOURCECOPY: length, offset = [int(x) for x in value.split()] item = ops.SourceCopy(length, offset) elif label == C.TARGETCOPY: length, offset = [int(x) for x in value.split()] item = ops.TargetCopy(length, offset) else: raise CorruptFile("Unknown label: {label!r}".format(label=label)) yield item targetWriteOffset += item.bytespan label, sourcecrc32 = in_buf.readline().split(":") _expect_label(C.SOURCECRC32, label) yield ops.SourceCRC32(int(sourcecrc32, 16)) label, targetcrc32 = in_buf.readline().split(":") _expect_label(C.TARGETCRC32, label) yield ops.TargetCRC32(int(targetcrc32, 16))
Python
def write_bps_asm(iterable, out_buf): """ Encodes BPS patch instructions into BPS assembler in out_buf. iterable should yield a sequence of BPS patch instructions. out_buf should implement io.IOBase, opened in 'wt' mode. """ # Make sure we have a sensible stream to write. iterable = check_stream(iterable) # header header = next(iterable) out_buf.write(C.BPSASM_MAGIC) out_buf.write("{0}: {1:d}\n".format(C.SOURCESIZE, header.sourceSize)) out_buf.write("{0}: {1:d}\n".format(C.TARGETSIZE, header.targetSize)) # metadata out_buf.write("metadata:\n") lines = header.metadata.split("\n") if lines[-1] == "": lines.pop(-1) for line in lines: # Because we use a line containing only "." as the delimiter, we # need to escape all the lines beginning with dots. if line.startswith("."): out_buf.write(".") out_buf.write(line) out_buf.write("\n") out_buf.write(".\n") for item in iterable: if isinstance(item, ops.SourceRead): out_buf.write("sourceread: {0.bytespan}\n".format(item)) elif isinstance(item, ops.TargetRead): out_buf.write("targetread:\n") data = item.payload while len(data) > 40: head, data = data[:40], data[40:] out_buf.write(b2a_hex(head).decode('ascii')) out_buf.write("\n") out_buf.write(b2a_hex(data).decode('ascii')) out_buf.write("\n.\n") elif isinstance(item, ops.SourceCopy): out_buf.write("sourcecopy: {0.bytespan} {0.offset}\n".format(item)) elif isinstance(item, ops.TargetCopy): out_buf.write("targetcopy: {0.bytespan} {0.offset}\n".format(item)) elif isinstance(item, ops.SourceCRC32): out_buf.write("sourcecrc32: {0.value:08X}\n".format(item)) elif isinstance(item, ops.TargetCRC32): out_buf.write("targetcrc32: {0.value:08X}\n".format(item))
def write_bps_asm(iterable, out_buf): """ Encodes BPS patch instructions into BPS assembler in out_buf. iterable should yield a sequence of BPS patch instructions. out_buf should implement io.IOBase, opened in 'wt' mode. """ # Make sure we have a sensible stream to write. iterable = check_stream(iterable) # header header = next(iterable) out_buf.write(C.BPSASM_MAGIC) out_buf.write("{0}: {1:d}\n".format(C.SOURCESIZE, header.sourceSize)) out_buf.write("{0}: {1:d}\n".format(C.TARGETSIZE, header.targetSize)) # metadata out_buf.write("metadata:\n") lines = header.metadata.split("\n") if lines[-1] == "": lines.pop(-1) for line in lines: # Because we use a line containing only "." as the delimiter, we # need to escape all the lines beginning with dots. if line.startswith("."): out_buf.write(".") out_buf.write(line) out_buf.write("\n") out_buf.write(".\n") for item in iterable: if isinstance(item, ops.SourceRead): out_buf.write("sourceread: {0.bytespan}\n".format(item)) elif isinstance(item, ops.TargetRead): out_buf.write("targetread:\n") data = item.payload while len(data) > 40: head, data = data[:40], data[40:] out_buf.write(b2a_hex(head).decode('ascii')) out_buf.write("\n") out_buf.write(b2a_hex(data).decode('ascii')) out_buf.write("\n.\n") elif isinstance(item, ops.SourceCopy): out_buf.write("sourcecopy: {0.bytespan} {0.offset}\n".format(item)) elif isinstance(item, ops.TargetCopy): out_buf.write("targetcopy: {0.bytespan} {0.offset}\n".format(item)) elif isinstance(item, ops.SourceCRC32): out_buf.write("sourcecrc32: {0.value:08X}\n".format(item)) elif isinstance(item, ops.TargetCRC32): out_buf.write("targetcrc32: {0.value:08X}\n".format(item))
Python
def testEmptyPatch(self): """ The simplest possible patch can be processed correctly. """ original = list(read_bps(BytesIO(find_bps("empty")))) result = list(test_optimize(original)) self.assertSequenceEqual(original, result)
def testEmptyPatch(self): """ The simplest possible patch can be processed correctly. """ original = list(read_bps(BytesIO(find_bps("empty")))) result = list(test_optimize(original)) self.assertSequenceEqual(original, result)
Python
def testMergeContiguousSourceCopys(self): """ A SourceCopy is merged if its offset is zero. """ original = [ ops.Header(3, 4), # Make sure the SourceCopy offset never matches the # targetWriteOffset, so that our SourceCopys won't be converted # to SourceReads. ops.TargetRead(b'A'), ops.SourceCopy(1, 0), # This SourceCopy resumes where the previous one left off, so # it can be merged with the previous one. ops.SourceCopy(1, 1), # This SourceCopy copies data from somewhere else in the file, # so it can't be merged. ops.SourceCopy(1, 0), ops.SourceCRC32(0x66A031A7), ops.TargetCRC32(0x66A031A7), ] expected = [ ops.Header(3, 4), ops.TargetRead(b'A'), ops.SourceCopy(2, 0), ops.SourceCopy(1, 0), ops.SourceCRC32(0x66A031A7), ops.TargetCRC32(0x66A031A7), ] actual = list(test_optimize(original)) self.assertSequenceEqual(expected, actual)
def testMergeContiguousSourceCopys(self): """ A SourceCopy is merged if its offset is zero. """ original = [ ops.Header(3, 4), # Make sure the SourceCopy offset never matches the # targetWriteOffset, so that our SourceCopys won't be converted # to SourceReads. ops.TargetRead(b'A'), ops.SourceCopy(1, 0), # This SourceCopy resumes where the previous one left off, so # it can be merged with the previous one. ops.SourceCopy(1, 1), # This SourceCopy copies data from somewhere else in the file, # so it can't be merged. ops.SourceCopy(1, 0), ops.SourceCRC32(0x66A031A7), ops.TargetCRC32(0x66A031A7), ] expected = [ ops.Header(3, 4), ops.TargetRead(b'A'), ops.SourceCopy(2, 0), ops.SourceCopy(1, 0), ops.SourceCRC32(0x66A031A7), ops.TargetCRC32(0x66A031A7), ] actual = list(test_optimize(original)) self.assertSequenceEqual(expected, actual)
Python
def testMergeContiguousTargetCopys(self): """ A TargetCopy is merged if its offset is zero. """ original = [ ops.Header(0, 4), ops.TargetRead(b'A'), ops.TargetCopy(1, 0), # This TargetCopy resumes where the previous one left off, so # it can be merged with the previous one. ops.TargetCopy(1, 1), # This TargetCopy copies data from somewhere else in the file, # so it can't be merged. ops.TargetCopy(1, 0), ops.SourceCRC32(0x00000000), ops.TargetCRC32(0x66A031A7), ] expected = [ ops.Header(0, 4), ops.TargetRead(b'A'), ops.TargetCopy(2, 0), ops.TargetCopy(1, 0), ops.SourceCRC32(0x00000000), ops.TargetCRC32(0x66A031A7), ] actual = list(test_optimize(original)) self.assertSequenceEqual(expected, actual)
def testMergeContiguousTargetCopys(self): """ A TargetCopy is merged if its offset is zero. """ original = [ ops.Header(0, 4), ops.TargetRead(b'A'), ops.TargetCopy(1, 0), # This TargetCopy resumes where the previous one left off, so # it can be merged with the previous one. ops.TargetCopy(1, 1), # This TargetCopy copies data from somewhere else in the file, # so it can't be merged. ops.TargetCopy(1, 0), ops.SourceCRC32(0x00000000), ops.TargetCRC32(0x66A031A7), ] expected = [ ops.Header(0, 4), ops.TargetRead(b'A'), ops.TargetCopy(2, 0), ops.TargetCopy(1, 0), ops.SourceCRC32(0x00000000), ops.TargetCRC32(0x66A031A7), ] actual = list(test_optimize(original)) self.assertSequenceEqual(expected, actual)
Python
def testEmpty(self): """ An empty sequence yields no blocks. """ res = list(diff.iter_blocks([], 4)) self.assertEqual([], res)
def testEmpty(self): """ An empty sequence yields no blocks. """ res = list(diff.iter_blocks([], 4)) self.assertEqual([], res)
Python
def testReturnValues(self): """ Each item contains a block of values and the source offset. """ source = [1,2,3,4,5,6,7,8,9,0] self.assertEqual( ([1,2,3,4], 0), next(diff.iter_blocks(source, 4)), )
def testReturnValues(self): """ Each item contains a block of values and the source offset. """ source = [1,2,3,4,5,6,7,8,9,0] self.assertEqual( ([1,2,3,4], 0), next(diff.iter_blocks(source, 4)), )
Python
def testBlockSize(self): """ The blocksize parameter controls the size of the blocks. """ source = [1,2,3,4,5,6,7,8,9,0] self.assertEqual([ # Each item is a block and an offset. ([1,2,3,4], 0), ([2,3,4,5], 1), ([3,4,5,6], 2), ([4,5,6,7], 3), ([5,6,7,8], 4), ([6,7,8,9], 5), ([7,8,9,0], 6), ([8,9,0], 7), ([9,0], 8), ([0], 9), ], list(diff.iter_blocks(source, 4))) self.assertEqual([ ([1,2,3], 0), ([2,3,4], 1), ([3,4,5], 2), ([4,5,6], 3), ([5,6,7], 4), ([6,7,8], 5), ([7,8,9], 6), ([8,9,0], 7), ([9,0], 8), ([0], 9), ], list(diff.iter_blocks(source, 3)))
def testBlockSize(self): """ The blocksize parameter controls the size of the blocks. """ source = [1,2,3,4,5,6,7,8,9,0] self.assertEqual([ # Each item is a block and an offset. ([1,2,3,4], 0), ([2,3,4,5], 1), ([3,4,5,6], 2), ([4,5,6,7], 3), ([5,6,7,8], 4), ([6,7,8,9], 5), ([7,8,9,0], 6), ([8,9,0], 7), ([9,0], 8), ([0], 9), ], list(diff.iter_blocks(source, 4))) self.assertEqual([ ([1,2,3], 0), ([2,3,4], 1), ([3,4,5], 2), ([4,5,6], 3), ([5,6,7], 4), ([6,7,8], 5), ([7,8,9], 6), ([8,9,0], 7), ([9,0], 8), ([0], 9), ], list(diff.iter_blocks(source, 3)))
Python
def testExactlyMatchingBlocks(self): """ measure_op yields a matching block. """ result = diff.measure_op( b'aAbAa', 1, b'xxAx', 2, ) self.assertEqual( (0, 1), result, )
def testExactlyMatchingBlocks(self): """ measure_op yields a matching block. """ result = diff.measure_op( b'aAbAa', 1, b'xxAx', 2, ) self.assertEqual( (0, 1), result, )
Python
def testExtendBlocksForward(self): """ measure_op extends matches forward as far as possible. """ # Measure up to the first distance. result = diff.measure_op( b'xABCDE', 1, b'xyABCx', 2, ) self.assertEqual( (0, 3), result, ) # Measure up to the end of either one of the strings. result = diff.measure_op( b'xABCD', 1, b'xyABC', 2, ) self.assertEqual( (0, 3), result, )
def testExtendBlocksForward(self): """ measure_op extends matches forward as far as possible. """ # Measure up to the first distance. result = diff.measure_op( b'xABCDE', 1, b'xyABCx', 2, ) self.assertEqual( (0, 3), result, ) # Measure up to the end of either one of the strings. result = diff.measure_op( b'xABCD', 1, b'xyABC', 2, ) self.assertEqual( (0, 3), result, )
Python
def testExtendBlocksBackward(self): """ measure_op extends blocks backward as far as possible. """ # Measure back to the first difference. result = diff.measure_op( b'yABCDEFGHIJ', 8, # ^ b'xxABCDEFGHI', 9, # ^ ) self.assertEqual( (7, 2), result) # Measure back to the beginning of the string. result = diff.measure_op( b'ABCDEFGHIJK', 7, # ^ b'xxABCDEFGHI', 9, # ^ ) self.assertEqual( (7, 2), result)
def testExtendBlocksBackward(self): """ measure_op extends blocks backward as far as possible. """ # Measure back to the first difference. result = diff.measure_op( b'yABCDEFGHIJ', 8, # ^ b'xxABCDEFGHI', 9, # ^ ) self.assertEqual( (7, 2), result) # Measure back to the beginning of the string. result = diff.measure_op( b'ABCDEFGHIJK', 7, # ^ b'xxABCDEFGHI', 9, # ^ ) self.assertEqual( (7, 2), result)
Python
def testNoMatch(self): """ measure_op returns no ops if the source and target don't match. This can happen for example if there's a hash collision. """ source = b'AAAAAA' target = b'BBBBBB' result = diff.measure_op( source, 4, target, 4, ) self.assertEqual((0, 0), result)
def testNoMatch(self): """ measure_op returns no ops if the source and target don't match. This can happen for example if there's a hash collision. """ source = b'AAAAAA' target = b'BBBBBB' result = diff.measure_op( source, 4, target, 4, ) self.assertEqual((0, 0), result)
Python
def testSwap(self): """ diff_bytearrays produces a working diff for AB -> BA """ self._runtest(b'AB', b'BA')
def testSwap(self): """ diff_bytearrays produces a working diff for AB -> BA """ self._runtest(b'AB', b'BA')
Python
def testEmptySource(self): """ diff_bytearrays works with an empty source file. """ self._runtest(b'', b'AB')
def testEmptySource(self): """ diff_bytearrays works with an empty source file. """ self._runtest(b'', b'AB')
Python
def testEmptyTarget(self): """ diff_bytearrays works with an empty target file. """ self._runtest(b'AB', b'')
def testEmptyTarget(self): """ diff_bytearrays works with an empty target file. """ self._runtest(b'AB', b'')
Python
def testTrailingNULs(self): """ diff_bytearrays produces a valid patch even if target ends with NULs. """ self._runtest(b'A', b'A\x00')
def testTrailingNULs(self): """ diff_bytearrays produces a valid patch even if target ends with NULs. """ self._runtest(b'A', b'A\x00')
Python
def testMetadataSupported(self): """ diff_bytearrays can store metadata if requested. """ ops = diff.diff_bytearrays(2, b'A', b'B', "metadata goes here") header = next(ops) self.assertEqual(header.metadata, "metadata goes here")
def testMetadataSupported(self): """ diff_bytearrays can store metadata if requested. """ ops = diff.diff_bytearrays(2, b'A', b'B', "metadata goes here") header = next(ops) self.assertEqual(header.metadata, "metadata goes here")
Python
def testVariableBlockSize(self): """ Blocksize affects the generated delta encoding. """ source = b'ABABAB' target = b'AAABBB' self.assertEqual( list(diff.diff_bytearrays(2, source, target)), [ ops.Header(len(source), len(target)), ops.TargetRead(b'AA'), ops.SourceRead(2), ops.TargetRead(b'B'), ops.SourceRead(1), ops.SourceCRC32(0x76F34B4D), ops.TargetCRC32(0x1A7E625E), ], ) self.assertEqual( list(diff.diff_bytearrays(3, source, target)), [ ops.Header(len(source), len(target)), ops.TargetRead(b'AAABB'), ops.SourceRead(1), ops.SourceCRC32(0x76F34B4D), ops.TargetCRC32(0x1A7E625E), ], )
def testVariableBlockSize(self): """ Blocksize affects the generated delta encoding. """ source = b'ABABAB' target = b'AAABBB' self.assertEqual( list(diff.diff_bytearrays(2, source, target)), [ ops.Header(len(source), len(target)), ops.TargetRead(b'AA'), ops.SourceRead(2), ops.TargetRead(b'B'), ops.SourceRead(1), ops.SourceCRC32(0x76F34B4D), ops.TargetCRC32(0x1A7E625E), ], ) self.assertEqual( list(diff.diff_bytearrays(3, source, target)), [ ops.Header(len(source), len(target)), ops.TargetRead(b'AAABB'), ops.SourceRead(1), ops.SourceCRC32(0x76F34B4D), ops.TargetCRC32(0x1A7E625E), ], )
Python
def testReadStopsAfterHighBitSet(self): """ Reader doesn't read past the byte wwith the high bit set. """ buf = io.BytesIO(b"\x00\x80\x10") self.assertEqual(util.read_var_int(buf), 128) self.assertEqual(buf.read(), b"\x10")
def testReadStopsAfterHighBitSet(self): """ Reader doesn't read past the byte wwith the high bit set. """ buf = io.BytesIO(b"\x00\x80\x10") self.assertEqual(util.read_var_int(buf), 128) self.assertEqual(buf.read(), b"\x10")
Python
def testReadComplainsAboutTruncatedData(self): """ Reader raises an exception if it can't find the end of a varint. """ buf = io.BytesIO(b"\x00\x00") self.assertRaises(Exception, util.read_var_int, buf)
def testReadComplainsAboutTruncatedData(self): """ Reader raises an exception if it can't find the end of a varint. """ buf = io.BytesIO(b"\x00\x00") self.assertRaises(Exception, util.read_var_int, buf)
Python
def testProgressiveReads(self): """ The CRC32 is updated as reads occur. """ buf = io.BytesIO(b'ab') stream = util.CRCIOWrapper(buf) self.assertEqual(stream.read(1), b'a') self.assertEqual(stream.crc32, crc32(b'a')) self.assertEqual(stream.read(1), b'b') self.assertEqual(stream.crc32, crc32(b'ab'))
def testProgressiveReads(self): """ The CRC32 is updated as reads occur. """ buf = io.BytesIO(b'ab') stream = util.CRCIOWrapper(buf) self.assertEqual(stream.read(1), b'a') self.assertEqual(stream.crc32, crc32(b'a')) self.assertEqual(stream.read(1), b'b') self.assertEqual(stream.crc32, crc32(b'ab'))
Python
def testProgressiveWrites(self): """ The CRC32 is updated as writes occur. """ buf = io.BytesIO() stream = util.CRCIOWrapper(buf) stream.write(b'a') self.assertEqual(stream.crc32, crc32(b'a')) stream.write(b'b') self.assertEqual(stream.crc32, crc32(b'ab')) self.assertEqual(stream.getvalue(), b'ab')
def testProgressiveWrites(self): """ The CRC32 is updated as writes occur. """ buf = io.BytesIO() stream = util.CRCIOWrapper(buf) stream.write(b'a') self.assertEqual(stream.crc32, crc32(b'a')) stream.write(b'b') self.assertEqual(stream.crc32, crc32(b'ab')) self.assertEqual(stream.getvalue(), b'ab')
Python
def testTruncateToCurrentPos(self): """ Truncating to the current position is allowed. """ buf = io.BytesIO() stream = util.CRCIOWrapper(buf) stream.write(b'abc') stream.truncate() self.assertEqual(stream.getvalue(), b'abc') self.assertEqual(stream.crc32, crc32(b'abc'))
def testTruncateToCurrentPos(self): """ Truncating to the current position is allowed. """ buf = io.BytesIO() stream = util.CRCIOWrapper(buf) stream.write(b'abc') stream.truncate() self.assertEqual(stream.getvalue(), b'abc') self.assertEqual(stream.crc32, crc32(b'abc'))
Python
def testTruncateToZero(self): """ Truncating to zero is allowed. """ buf = io.BytesIO() stream = util.CRCIOWrapper(buf) stream.write(b'abc') stream.truncate(0) self.assertEqual(stream.getvalue(), b'') self.assertEqual(stream.crc32, 0)
def testTruncateToZero(self): """ Truncating to zero is allowed. """ buf = io.BytesIO() stream = util.CRCIOWrapper(buf) stream.write(b'abc') stream.truncate(0) self.assertEqual(stream.getvalue(), b'') self.assertEqual(stream.crc32, 0)
Python
def testTruncateToNonZero(self): """ Truncating to any other length is prohibited. """ buf = io.BytesIO() stream = util.CRCIOWrapper(buf) stream.write(b'abc') self.assertRaises(io.UnsupportedOperation, stream.truncate, 5)
def testTruncateToNonZero(self): """ Truncating to any other length is prohibited. """ buf = io.BytesIO() stream = util.CRCIOWrapper(buf) stream.write(b'abc') self.assertRaises(io.UnsupportedOperation, stream.truncate, 5)
Python
def _runtests(self, name, eventlist): """ Test the various interactions for a given patch. """ # Test that we can write the asm version of the patch. out_buf = StringIO() write_bps_asm(eventlist, out_buf) self.assertMultiLineEqual(out_buf.getvalue(), find_bpsa(name)) # Test that we can read the asm version of the patch. in_buf = StringIO(find_bpsa(name)) items = list(read_bps_asm(in_buf)) self.assertSequenceEqual(eventlist, items) # Test that we can write the binary patch. out_buf = BytesIO() write_bps(eventlist, out_buf) self.assertSequenceEqual(out_buf.getvalue(), find_bps(name)) # Test that we can read the binary patch. in_buf = BytesIO(find_bps(name)) items = list(read_bps(in_buf)) self.assertSequenceEqual(eventlist, items) # Test that we can roundtrip the binary version through our reader and # writer. original = BytesIO(find_bps(name)) events = read_bps(original) output = BytesIO() write_bps(events, output) self.assertSequenceEqual(original.getvalue(), output.getvalue()) # Test that we can roundtrip the asm version through our reader and # writer. original = StringIO(find_bpsa(name)) events = read_bps_asm(original) output = StringIO() write_bps_asm(events, output) self.assertMultiLineEqual(original.getvalue(), output.getvalue())
def _runtests(self, name, eventlist): """ Test the various interactions for a given patch. """ # Test that we can write the asm version of the patch. out_buf = StringIO() write_bps_asm(eventlist, out_buf) self.assertMultiLineEqual(out_buf.getvalue(), find_bpsa(name)) # Test that we can read the asm version of the patch. in_buf = StringIO(find_bpsa(name)) items = list(read_bps_asm(in_buf)) self.assertSequenceEqual(eventlist, items) # Test that we can write the binary patch. out_buf = BytesIO() write_bps(eventlist, out_buf) self.assertSequenceEqual(out_buf.getvalue(), find_bps(name)) # Test that we can read the binary patch. in_buf = BytesIO(find_bps(name)) items = list(read_bps(in_buf)) self.assertSequenceEqual(eventlist, items) # Test that we can roundtrip the binary version through our reader and # writer. original = BytesIO(find_bps(name)) events = read_bps(original) output = BytesIO() write_bps(events, output) self.assertSequenceEqual(original.getvalue(), output.getvalue()) # Test that we can roundtrip the asm version through our reader and # writer. original = StringIO(find_bpsa(name)) events = read_bps_asm(original) output = StringIO() write_bps_asm(events, output) self.assertMultiLineEqual(original.getvalue(), output.getvalue())
Python
def testEmptyPatch(self): """ The simplest possible patch can be processed correctly. """ self._runtests("empty", [ ops.Header(0, 0), ops.SourceCRC32(0), ops.TargetCRC32(0), ])
def testEmptyPatch(self): """ The simplest possible patch can be processed correctly. """ self._runtests("empty", [ ops.Header(0, 0), ops.SourceCRC32(0), ops.TargetCRC32(0), ])
Python
def testPatchWithMetadata(self): """ We can process a patch with metadata. """ self._runtests("metadata", [ ops.Header(0, 0, '<test>\n. leading "." is escaped\n</test>\n'), ops.SourceCRC32(0), ops.TargetCRC32(0), ])
def testPatchWithMetadata(self): """ We can process a patch with metadata. """ self._runtests("metadata", [ ops.Header(0, 0, '<test>\n. leading "." is escaped\n</test>\n'), ops.SourceCRC32(0), ops.TargetCRC32(0), ])
Python
def testPatchWithSourceRead(self): """ We can process a patch with a SourceRead opcode. """ self._runtests("sourceread", [ ops.Header(1, 1), ops.SourceRead(1), # For the CRC32 to be correct, the one byte must be b'A' ops.SourceCRC32(0xD3D99E8B), ops.TargetCRC32(0xD3D99E8B), ])
def testPatchWithSourceRead(self): """ We can process a patch with a SourceRead opcode. """ self._runtests("sourceread", [ ops.Header(1, 1), ops.SourceRead(1), # For the CRC32 to be correct, the one byte must be b'A' ops.SourceCRC32(0xD3D99E8B), ops.TargetCRC32(0xD3D99E8B), ])
Python
def testPatchWithTargetRead(self): """ We can process a patch with a TargetRead opcode. """ self._runtests("targetread", [ ops.Header(0, 1), ops.TargetRead(b'A'), ops.SourceCRC32(0x00000000), ops.TargetCRC32(0xD3D99E8B), ])
def testPatchWithTargetRead(self): """ We can process a patch with a TargetRead opcode. """ self._runtests("targetread", [ ops.Header(0, 1), ops.TargetRead(b'A'), ops.SourceCRC32(0x00000000), ops.TargetCRC32(0xD3D99E8B), ])
Python
def testPatchWithSourceCopy(self): """ We can process a patch with a SourceCopy opcode. """ self._runtests("sourcecopy", [ ops.Header(2, 2), # We copy the second byte in the source file. ops.SourceCopy(1, 1), # We copy the first byte in the source file. ops.SourceCopy(1, 0), # This CRC32 represents b'AB' ops.SourceCRC32(0x30694C07), # This CRC32 represents b'BA' ops.TargetCRC32(0x824D4E7E), ])
def testPatchWithSourceCopy(self): """ We can process a patch with a SourceCopy opcode. """ self._runtests("sourcecopy", [ ops.Header(2, 2), # We copy the second byte in the source file. ops.SourceCopy(1, 1), # We copy the first byte in the source file. ops.SourceCopy(1, 0), # This CRC32 represents b'AB' ops.SourceCRC32(0x30694C07), # This CRC32 represents b'BA' ops.TargetCRC32(0x824D4E7E), ])
Python
def testPatchWithTargetCopy(self): """ We can process a patch with a TargetCopy opcode. """ self._runtests("targetcopy", [ ops.Header(0, 4), # Add a TargetRead opcode, so TargetCopy has something to copy. ops.TargetRead(b'A'), # Add a TargetCopy opcode that does the RLE trick of reading # more data than is currently written. ops.TargetCopy(2, 0), # Add a TargetCopy that seeks to an earlier offset, so we make # sure negative offsets are handled correctly. ops.TargetCopy(1, 0), # This CRC32 represents b'' ops.SourceCRC32(0x00000000), # This CRC32 represents b'AAAA' ops.TargetCRC32(0x9B0D08F1), ])
def testPatchWithTargetCopy(self): """ We can process a patch with a TargetCopy opcode. """ self._runtests("targetcopy", [ ops.Header(0, 4), # Add a TargetRead opcode, so TargetCopy has something to copy. ops.TargetRead(b'A'), # Add a TargetCopy opcode that does the RLE trick of reading # more data than is currently written. ops.TargetCopy(2, 0), # Add a TargetCopy that seeks to an earlier offset, so we make # sure negative offsets are handled correctly. ops.TargetCopy(1, 0), # This CRC32 represents b'' ops.SourceCRC32(0x00000000), # This CRC32 represents b'AAAA' ops.TargetCRC32(0x9B0D08F1), ])
Python
def encode(self, sourceRelativeOffset, targetRelativeOffset): """ Returns a bytestring representing this operation. sourceRelativeOffset is used when encoding SourceCopy operations, targetRelativeOffset is used when encoding TargetCopy operations. """ raise NotImplementedError()
def encode(self, sourceRelativeOffset, targetRelativeOffset): """ Returns a bytestring representing this operation. sourceRelativeOffset is used when encoding SourceCopy operations, targetRelativeOffset is used when encoding TargetCopy operations. """ raise NotImplementedError()
Python
def encoded_size(self, sourceRelativeOffset, targetRelativeOffset): """ Estimate the size of the bytestring returned by .encode() """ raise NotImplementedError()
def encoded_size(self, sourceRelativeOffset, targetRelativeOffset): """ Estimate the size of the bytestring returned by .encode() """ raise NotImplementedError()
Python
def efficiency(self, sourceRelativeOffset, targetRelativeOffset): """ Returns a float representing the efficiency of this op at this offset. """ return self.bytespan / self.encoded_size( sourceRelativeOffset, targetRelativeOffset )
def efficiency(self, sourceRelativeOffset, targetRelativeOffset): """ Returns a float representing the efficiency of this op at this offset. """ return self.bytespan / self.encoded_size( sourceRelativeOffset, targetRelativeOffset )
Python
def extend(self, other): """ Concatenate the other operation with this one, if possible. Raises TypeError if the other operation is of an incompatible type, or ValueError if the other operation isn't contiguous with this one. """
def extend(self, other): """ Concatenate the other operation with this one, if possible. Raises TypeError if the other operation is of an incompatible type, or ValueError if the other operation isn't contiguous with this one. """
Python
def shrink(self, length): """ Reduce the bytespan of this operation by the given amount. If length is positive, shrinkage will occur from the front. If length is negative, shrinkage will occur from the end (much like Python's slicing operators). Length should never be 0, and abs(length) should never be greater than or equal to the bytespan property. """
def shrink(self, length): """ Reduce the bytespan of this operation by the given amount. If length is positive, shrinkage will occur from the front. If length is negative, shrinkage will occur from the end (much like Python's slicing operators). Length should never be 0, and abs(length) should never be greater than or equal to the bytespan property. """
Python
def _append(self, operation): """ Internal method. Append the given operation to the list, maintaining internal caches. """ if self._buf: _, writeOffset, lastSourceCopyOffset, lastTargetCopyOffset = \ self._buf[-1] else: writeOffset = lastSourceCopyOffset = lastTargetCopyOffset = 0 writeOffset += operation.bytespan if isinstance(operation, SourceCopy): lastSourceCopyOffset = operation.offset + operation.bytespan elif isinstance(operation, TargetCopy): lastTargetCopyOffset = operation.offset + operation.bytespan self._buf.append( (operation, writeOffset, lastSourceCopyOffset, lastTargetCopyOffset) )
def _append(self, operation): """ Internal method. Append the given operation to the list, maintaining internal caches. """ if self._buf: _, writeOffset, lastSourceCopyOffset, lastTargetCopyOffset = \ self._buf[-1] else: writeOffset = lastSourceCopyOffset = lastTargetCopyOffset = 0 writeOffset += operation.bytespan if isinstance(operation, SourceCopy): lastSourceCopyOffset = operation.offset + operation.bytespan elif isinstance(operation, TargetCopy): lastTargetCopyOffset = operation.offset + operation.bytespan self._buf.append( (operation, writeOffset, lastSourceCopyOffset, lastTargetCopyOffset) )
Python
def _check_next(iterable): """ Internal function. Check the iterable does have a next value, and return it. """ try: return next(iterable) except StopIteration: raise CorruptFile("truncated patch: expected more opcodes after this.")
def _check_next(iterable): """ Internal function. Check the iterable does have a next value, and return it. """ try: return next(iterable) except StopIteration: raise CorruptFile("truncated patch: expected more opcodes after this.")
Python
def check_stream(iterable): """ Yields items from iterable if they represent a valid BPS patch. Raises CorruptFile if any problems are detected. """ # Make sure we have an iterable. iterable = iter(iterable) header = _check_next(iterable) if not isinstance(header, ops.Header): raise CorruptFile("bad hunk: expected header, not " "{header!r}".format(header=header)) yield header sourceSize = header.sourceSize targetSize = header.targetSize targetWriteOffset = 0 while targetWriteOffset < targetSize: item = _check_next(iterable) if isinstance(item, ops.SourceRead): # This opcode reads from the source file, from targetWriteOffset to # targetWriteOffset+length, so we need to be sure that byte-range # exists in the source file as well as the target. if targetWriteOffset + item.bytespan > sourceSize: raise CorruptFile("bad hunk: reads past the end of the " "source file: {item!r}".format(item=item)) elif isinstance(item, ops.TargetRead): # Nothing special we need to check for this operation. pass elif isinstance(item, ops.SourceCopy): # Not allowed to SourceCopy past the end of the source file. if item.offset + item.bytespan > sourceSize: raise CorruptFile("bad hunk: reads past the end " "of the source file: {item!r}".format(item=item)) elif isinstance(item, ops.TargetCopy): # Not allowed to TargetCopy an offset that points past the part # we've written. if item.offset >= targetWriteOffset: raise CorruptFile("bad hunk: reads past the end of the " "written part of the target file at " "{targetWriteOffset}: {item!r}".format(item=item, targetWriteOffset=targetWriteOffset)) else: raise CorruptFile("bad hunk: unknown opcode {item!r}".format( item=item)) targetWriteOffset += item.bytespan if targetWriteOffset > targetSize: raise CorruptFile("bad hunk: writes past the end of the target: " "{item!r}".format(item=item)) yield item item = _check_next(iterable) if not isinstance(item, ops.SourceCRC32): raise CorruptFile("bad hunk: expected SourceCRC32, not " "{item!r}".format(item=item)) yield item item = _check_next(iterable) if not isinstance(item, ops.TargetCRC32): raise CorruptFile("bad hunk: expected TargetCRC32, not " "{item!r}".format(item=item)) yield item # Check that the iterable is now empty. try: garbage = next(iterable) raise CorruptFile("trailing garbage in stream: {garbage!r}".format( garbage=garbage)) except StopIteration: pass
def check_stream(iterable): """ Yields items from iterable if they represent a valid BPS patch. Raises CorruptFile if any problems are detected. """ # Make sure we have an iterable. iterable = iter(iterable) header = _check_next(iterable) if not isinstance(header, ops.Header): raise CorruptFile("bad hunk: expected header, not " "{header!r}".format(header=header)) yield header sourceSize = header.sourceSize targetSize = header.targetSize targetWriteOffset = 0 while targetWriteOffset < targetSize: item = _check_next(iterable) if isinstance(item, ops.SourceRead): # This opcode reads from the source file, from targetWriteOffset to # targetWriteOffset+length, so we need to be sure that byte-range # exists in the source file as well as the target. if targetWriteOffset + item.bytespan > sourceSize: raise CorruptFile("bad hunk: reads past the end of the " "source file: {item!r}".format(item=item)) elif isinstance(item, ops.TargetRead): # Nothing special we need to check for this operation. pass elif isinstance(item, ops.SourceCopy): # Not allowed to SourceCopy past the end of the source file. if item.offset + item.bytespan > sourceSize: raise CorruptFile("bad hunk: reads past the end " "of the source file: {item!r}".format(item=item)) elif isinstance(item, ops.TargetCopy): # Not allowed to TargetCopy an offset that points past the part # we've written. if item.offset >= targetWriteOffset: raise CorruptFile("bad hunk: reads past the end of the " "written part of the target file at " "{targetWriteOffset}: {item!r}".format(item=item, targetWriteOffset=targetWriteOffset)) else: raise CorruptFile("bad hunk: unknown opcode {item!r}".format( item=item)) targetWriteOffset += item.bytespan if targetWriteOffset > targetSize: raise CorruptFile("bad hunk: writes past the end of the target: " "{item!r}".format(item=item)) yield item item = _check_next(iterable) if not isinstance(item, ops.SourceCRC32): raise CorruptFile("bad hunk: expected SourceCRC32, not " "{item!r}".format(item=item)) yield item item = _check_next(iterable) if not isinstance(item, ops.TargetCRC32): raise CorruptFile("bad hunk: expected TargetCRC32, not " "{item!r}".format(item=item)) yield item # Check that the iterable is now empty. try: garbage = next(iterable) raise CorruptFile("trailing garbage in stream: {garbage!r}".format( garbage=garbage)) except StopIteration: pass
Python
def find_data(name): """ Retrieves the raw contents of a file in the test data directory. """ return get_data("bps.test", "testdata/{0}".format(name))
def find_data(name): """ Retrieves the raw contents of a file in the test data directory. """ return get_data("bps.test", "testdata/{0}".format(name))
Python
def find_bps(name): """ Retrieves the raw contents of a BPS patch from the test data directory. """ return find_data("{0}.bps".format(name))
def find_bps(name): """ Retrieves the raw contents of a BPS patch from the test data directory. """ return find_data("{0}.bps".format(name))
Python
def find_bpsa(name): """ Retrieves the contents of an assembler file from the test data directory. """ rawdata = find_data("{0}.bpsa".format(name)) return rawdata.decode("utf-8")
def find_bpsa(name): """ Retrieves the contents of an assembler file from the test data directory. """ rawdata = find_data("{0}.bpsa".format(name)) return rawdata.decode("utf-8")
Python
def testEmptyPatch(self): """ The simplest possible patch should be disassembled correctly. """ in_buf = BytesIO(find_bps("empty")) out_buf = StringIO() asm.disassemble(in_buf, out_buf) self.assertMultiLineEqual(out_buf.getvalue(), find_bpsa("empty"))
def testEmptyPatch(self): """ The simplest possible patch should be disassembled correctly. """ in_buf = BytesIO(find_bps("empty")) out_buf = StringIO() asm.disassemble(in_buf, out_buf) self.assertMultiLineEqual(out_buf.getvalue(), find_bpsa("empty"))
Python
def testPatchWithMetadata(self): """ We correctly disassemble a patch with metadata. """ in_buf = BytesIO(find_bps("metadata")) out_buf = StringIO() asm.disassemble(in_buf, out_buf) self.assertMultiLineEqual(out_buf.getvalue(), find_bpsa("metadata"))
def testPatchWithMetadata(self): """ We correctly disassemble a patch with metadata. """ in_buf = BytesIO(find_bps("metadata")) out_buf = StringIO() asm.disassemble(in_buf, out_buf) self.assertMultiLineEqual(out_buf.getvalue(), find_bpsa("metadata"))
Python
def testEmptyPatch(self): """ The simplest possible patch should be assembled correctly. """ in_buf = StringIO(find_bpsa("empty")) out_buf = BytesIO() asm.assemble(in_buf, out_buf) self.assertSequenceEqual(out_buf.getvalue(), find_bps("empty"))
def testEmptyPatch(self): """ The simplest possible patch should be assembled correctly. """ in_buf = StringIO(find_bpsa("empty")) out_buf = BytesIO() asm.assemble(in_buf, out_buf) self.assertSequenceEqual(out_buf.getvalue(), find_bps("empty"))
Python
def testPatchWithMetadata(self): """ We correctly construct a patch with metadata. """ in_buf = StringIO(find_bpsa("metadata")) out_buf = BytesIO() asm.assemble(in_buf, out_buf) self.assertSequenceEqual(out_buf.getvalue(), find_bps("metadata"))
def testPatchWithMetadata(self): """ We correctly construct a patch with metadata. """ in_buf = StringIO(find_bpsa("metadata")) out_buf = BytesIO() asm.assemble(in_buf, out_buf) self.assertSequenceEqual(out_buf.getvalue(), find_bps("metadata"))
Python
def optimize(iterable): """ Yields a simplified sequence of patch operations from iterable. """ iterable = check_stream(iterable) header = next(iterable) yield header lastItem = next(iterable) if isinstance(lastItem, ops.SourceCopy) and lastItem.offset == 0: # SourceCopy is copying from the start of the file, so it might as well # be a SourceRead. lastItem = ops.SourceRead(lastItem.bytespan) targetWriteOffset = 0 for item in iterable: if ( isinstance(lastItem, ops.SourceRead) and isinstance(item, ops.SourceRead) ): # We can merge consecutive SourceRead operations. lastItem.extend(item) continue elif ( isinstance(lastItem, ops.TargetRead) and isinstance(item, ops.TargetRead) ): # We can merge consecutive TargetRead operations. lastItem.extend(item) continue elif ( isinstance(lastItem, ops.SourceCopy) and isinstance(item, ops.SourceCopy) and lastItem.offset + lastItem.bytespan == item.offset ): # We can merge consecutive SourceCopy operations, as long as the # following ones have a relative offset of 0 from the end of the # previous one. lastItem.extend(item) continue elif ( isinstance(lastItem, ops.TargetCopy) and isinstance(item, ops.TargetCopy) and lastItem.offset + lastItem.bytespan == item.offset ): # We can merge consecutive TargetCopy operations, as long as the # following ones have a relative offset of 0 from the end of the # previous one. lastItem.extend(item) continue if ( isinstance(lastItem, ops.SourceCopy) and lastItem.offset == targetWriteOffset ): # A SourceRead is just a SourceCopy that implicitly has its read # off set set to targetWriteOffset. lastItem = ops.SourceRead(lastItem.bytespan) yield lastItem targetWriteOffset += lastItem.bytespan lastItem = item yield lastItem
def optimize(iterable): """ Yields a simplified sequence of patch operations from iterable. """ iterable = check_stream(iterable) header = next(iterable) yield header lastItem = next(iterable) if isinstance(lastItem, ops.SourceCopy) and lastItem.offset == 0: # SourceCopy is copying from the start of the file, so it might as well # be a SourceRead. lastItem = ops.SourceRead(lastItem.bytespan) targetWriteOffset = 0 for item in iterable: if ( isinstance(lastItem, ops.SourceRead) and isinstance(item, ops.SourceRead) ): # We can merge consecutive SourceRead operations. lastItem.extend(item) continue elif ( isinstance(lastItem, ops.TargetRead) and isinstance(item, ops.TargetRead) ): # We can merge consecutive TargetRead operations. lastItem.extend(item) continue elif ( isinstance(lastItem, ops.SourceCopy) and isinstance(item, ops.SourceCopy) and lastItem.offset + lastItem.bytespan == item.offset ): # We can merge consecutive SourceCopy operations, as long as the # following ones have a relative offset of 0 from the end of the # previous one. lastItem.extend(item) continue elif ( isinstance(lastItem, ops.TargetCopy) and isinstance(item, ops.TargetCopy) and lastItem.offset + lastItem.bytespan == item.offset ): # We can merge consecutive TargetCopy operations, as long as the # following ones have a relative offset of 0 from the end of the # previous one. lastItem.extend(item) continue if ( isinstance(lastItem, ops.SourceCopy) and lastItem.offset == targetWriteOffset ): # A SourceRead is just a SourceCopy that implicitly has its read # off set set to targetWriteOffset. lastItem = ops.SourceRead(lastItem.bytespan) yield lastItem targetWriteOffset += lastItem.bytespan lastItem = item yield lastItem
Python
def read_var_int(handle): """ Read a variable-length integer from the given file handle. """ res = 0 shift = 1 while True: byte = handle.read(1)[0] res += (byte & 0x7f) * shift if byte & 0x80: break shift <<= 7 res += shift return res
def read_var_int(handle): """ Read a variable-length integer from the given file handle. """ res = 0 shift = 1 while True: byte = handle.read(1)[0] res += (byte & 0x7f) * shift if byte & 0x80: break shift <<= 7 res += shift return res
Python
def encode_var_int(number): """ Returns a bytearray encoding the given number. """ buf = bytearray() shift = 1 while True: buf.append(number & 0x7F) number -= buf[-1] if number == 0: buf[-1] |= 0x80 break number -= shift number >>= 7 shift += 7 return buf
def encode_var_int(number): """ Returns a bytearray encoding the given number. """ buf = bytearray() shift = 1 while True: buf.append(number & 0x7F) number -= buf[-1] if number == 0: buf[-1] |= 0x80 break number -= shift number >>= 7 shift += 7 return buf
Python
def measure_var_int(number): """ Returns the length of the bytearray returned by encode_var_int(). """ length = 0 shift = 1 while True: length += 1 number -= (number & 0x7F) if number == 0: break number -= shift number >>= 7 shift += 7 return length
def measure_var_int(number): """ Returns the length of the bytearray returned by encode_var_int(). """ length = 0 shift = 1 while True: length += 1 number -= (number & 0x7F) if number == 0: break number -= shift number >>= 7 shift += 7 return length
Python
def write_var_int(number, handle): """ Writes a variable-length integer to the given file handle. """ handle.write(encode_var_int(number))
def write_var_int(number, handle): """ Writes a variable-length integer to the given file handle. """ handle.write(encode_var_int(number))
Python
def help_string(config_dict, config_section): """Generate help string for specific configuration""" result = "Configuration: " + config_section + "\n" if "speed" in config_dict[config_section]: result += "Speed: " + config_dict[config_section]["speed"] + "\n" + \ SPEED.get(config_dict[config_section]["speed"], "") if "risk" in config_dict[config_section]: result += "Risk: " + config_dict[config_section]["risk"] + "\n" + \ RISK.get(config_dict[config_section]["risk"], "") result += "Subtests:\n " +\ "\n ".join(get_subtests(config_dict, config_section)) return result
def help_string(config_dict, config_section): """Generate help string for specific configuration""" result = "Configuration: " + config_section + "\n" if "speed" in config_dict[config_section]: result += "Speed: " + config_dict[config_section]["speed"] + "\n" + \ SPEED.get(config_dict[config_section]["speed"], "") if "risk" in config_dict[config_section]: result += "Risk: " + config_dict[config_section]["risk"] + "\n" + \ RISK.get(config_dict[config_section]["risk"], "") result += "Subtests:\n " +\ "\n ".join(get_subtests(config_dict, config_section)) return result
Python
def load_device(filename: str) -> Device: """ Loads Device object from json file :param filename: file name :return: Device object """ with open(filename, "r") as f: return jsonpickle.decode(f.read())
def load_device(filename: str) -> Device: """ Loads Device object from json file :param filename: file name :return: Device object """ with open(filename, "r") as f: return jsonpickle.decode(f.read())
Python
def output_single_memory_mismatch(self, ref_name: str, prof_name: str, dataset: Dict[str, List[SupportResult]]): """Output part of memory mismatch section""" mismatch = "" if dataset is self.memory_mismatch: mismatch = "persistent memory allocation" elif dataset is self.reset_mismatch: mismatch = "memory allocation during reset call" elif dataset is self.deselect_mismatch: mismatch = "memory allocation during deselect call" tags.h4("Differences in " + mismatch + ":", style="color:var(--orange-color)") header = ["Algorithm", ref_name + " (reference)", prof_name + " (profiled)"] data = [] for key in dataset.keys(): if dataset is self.memory_mismatch: ref = str(dataset[key][0].persistent_memory) prof = str(dataset[key][1].persistent_memory) elif dataset is self.reset_mismatch: ref = str(dataset[key][0].ram_reset) prof = str(dataset[key][1].ram_reset) elif dataset is self.deselect_mismatch: ref = str(dataset[key][0].ram_deselect) prof = str(dataset[key][1].ram_deselect) else: raise Exception("Wrong parameter in output_memory_mismatch") data.append([key, ref, prof]) table(data, header)
def output_single_memory_mismatch(self, ref_name: str, prof_name: str, dataset: Dict[str, List[SupportResult]]): """Output part of memory mismatch section""" mismatch = "" if dataset is self.memory_mismatch: mismatch = "persistent memory allocation" elif dataset is self.reset_mismatch: mismatch = "memory allocation during reset call" elif dataset is self.deselect_mismatch: mismatch = "memory allocation during deselect call" tags.h4("Differences in " + mismatch + ":", style="color:var(--orange-color)") header = ["Algorithm", ref_name + " (reference)", prof_name + " (profiled)"] data = [] for key in dataset.keys(): if dataset is self.memory_mismatch: ref = str(dataset[key][0].persistent_memory) prof = str(dataset[key][1].persistent_memory) elif dataset is self.reset_mismatch: ref = str(dataset[key][0].ram_reset) prof = str(dataset[key][1].ram_reset) elif dataset is self.deselect_mismatch: ref = str(dataset[key][0].ram_deselect) prof = str(dataset[key][1].ram_deselect) else: raise Exception("Wrong parameter in output_memory_mismatch") data.append([key, ref, prof]) table(data, header)
Python
def show_hide_div(divname: str, hide=False): """ Creates a show/hide button and matching div block :param divname: unique name of the div block :param hide: the div block is hidden by default if True :return: the div block """ if hide: hidden.append(divname) else: shown.append(divname) tags.button("Show / Hide", onclick="hideButton('" + divname + "')") tags.br() if hide: return tags.div(id=divname, style="display:none") return tags.div(id=divname)
def show_hide_div(divname: str, hide=False): """ Creates a show/hide button and matching div block :param divname: unique name of the div block :param hide: the div block is hidden by default if True :return: the div block """ if hide: hidden.append(divname) else: shown.append(divname) tags.button("Show / Hide", onclick="hideButton('" + divname + "')") tags.br() if hide: return tags.div(id=divname, style="display:none") return tags.div(id=divname)
Python
def show_all_button(): """Creates a Show All button for every show/hide div block created""" tags.button("Show All", onclick="showAll(" + __get_js_array(__get_all_names()) + ")")
def show_all_button(): """Creates a Show All button for every show/hide div block created""" tags.button("Show All", onclick="showAll(" + __get_js_array(__get_all_names()) + ")")
Python
def hide_all_button(): """Creates a Hide All button for every show/hide div block created""" tags.button("Hide All", onclick="hideAll(" + __get_js_array(__get_all_names()) + ")")
def hide_all_button(): """Creates a Hide All button for every show/hide div block created""" tags.button("Hide All", onclick="hideAll(" + __get_js_array(__get_all_names()) + ")")
Python
def default_button(): """Creates a Hide All button for every show/hide div block created""" tags.button("Default", onclick="defaultAll(" + __get_js_array(shown) + ", " + __get_js_array(hidden) + ")")
def default_button(): """Creates a Hide All button for every show/hide div block created""" tags.button("Default", onclick="defaultAll(" + __get_js_array(shown) + ", " + __get_js_array(hidden) + ")")
Python
def download_file(tool_name, tool_url, tool_path): """ Downloads file and saves it to tool_path :param tool_name: :param tool_url: :param tool_path: :return: """ print("Downloading " + tool_name + "... ", end="") try: with urlopen(tool_url) as remote, open(tool_path, "wb") as file: copyfileobj(remote, file) print("Done.") return True except Exception as ex: return errmsg(tool_name, "downloading", ex)
def download_file(tool_name, tool_url, tool_path): """ Downloads file and saves it to tool_path :param tool_name: :param tool_url: :param tool_path: :return: """ print("Downloading " + tool_name + "... ", end="") try: with urlopen(tool_url) as remote, open(tool_path, "wb") as file: copyfileobj(remote, file) print("Done.") return True except Exception as ex: return errmsg(tool_name, "downloading", ex)
Python
def download_and_extract(tool_name, tool_url, file_translations): """ Downloads zip archive and extracts it's contents :param tool_name: :param tool_url: :param file_translations: :return: """ archive = tool_name + "_dist.zip" directory = tool_name + "_extracted" if not download_file(tool_name, tool_url, archive): return False print("Extracting " + tool_name + "... ", end="") try: with ZipFile(archive, "r") as zipped: zipped.extractall(directory) print("Done.") except Exception as ex: errmsg(tool_name, "extracting", ex) try: remove(archive) except Exception as ex: print(archive, " could not be removed, please remove it manually.", ex) return False return_status = True print("Finishing " + tool_name + " set-up...", end="") try: for (original, destination) in file_translations: replace(directory + "/" + original, destination) print("Done.") except Exception as ex: errmsg(tool_name + " files", "moving", ex) return_status = False print("Cleaning up after " + tool_name + " set-up...", end="") try: remove(archive) rmtree(directory) print("Done.") except Exception as ex: errmsg(tool_name + " set-up", "cleaning after", ex) print("\tRemove", archive, "and", directory, "directory manually.") return_status = False return return_status
def download_and_extract(tool_name, tool_url, file_translations): """ Downloads zip archive and extracts it's contents :param tool_name: :param tool_url: :param file_translations: :return: """ archive = tool_name + "_dist.zip" directory = tool_name + "_extracted" if not download_file(tool_name, tool_url, archive): return False print("Extracting " + tool_name + "... ", end="") try: with ZipFile(archive, "r") as zipped: zipped.extractall(directory) print("Done.") except Exception as ex: errmsg(tool_name, "extracting", ex) try: remove(archive) except Exception as ex: print(archive, " could not be removed, please remove it manually.", ex) return False return_status = True print("Finishing " + tool_name + " set-up...", end="") try: for (original, destination) in file_translations: replace(directory + "/" + original, destination) print("Done.") except Exception as ex: errmsg(tool_name + " files", "moving", ex) return_status = False print("Cleaning up after " + tool_name + " set-up...", end="") try: remove(archive) rmtree(directory) print("Done.") except Exception as ex: errmsg(tool_name + " set-up", "cleaning after", ex) print("\tRemove", archive, "and", directory, "directory manually.") return_status = False return return_status
Python
def contrast(self, other): """ Produce contras module by comparing self to other module :param other: other module of the same type :return: list of contrast modules """ if self.module_name != other.module_name: raise Exception("Comparing module " + self.module_name + " with " + other.module_name + ".") return []
def contrast(self, other): """ Produce contras module by comparing self to other module :param other: other module of the same type :return: list of contrast modules """ if self.module_name != other.module_name: raise Exception("Comparing module " + self.module_name + " with " + other.module_name + ".") return []
Python
def parse_loop(cls, module, filename) -> None: """Performs general parsing loop for JCAlgTest result files""" with open(filename, "r") as f: lines = f.readlines() for i, line in enumerate(lines): line = line.strip() if parse_common_line(module, line): continue cls.parse_specific_lines(line, module, lines, i)
def parse_loop(cls, module, filename) -> None: """Performs general parsing loop for JCAlgTest result files""" with open(filename, "r") as f: lines = f.readlines() for i, line in enumerate(lines): line = line.strip() if parse_common_line(module, line): continue cls.parse_specific_lines(line, module, lines, i)
Python
def create_response_df(response, geo='global'): """ Unpack response and create one dataframe for each ranking and each keyword """ assert isinstance( response, dict), "Empty response, caught in transform.py. Try again." ranking = [*response[[*response][0]]] keywords = [*response] df_list = [] for r in ranking: for kw in keywords: df_list.append(process_response( response, kw=kw, ranking=r, geo=geo)) return pd.concat(df_list)
def create_response_df(response, geo='global'): """ Unpack response and create one dataframe for each ranking and each keyword """ assert isinstance( response, dict), "Empty response, caught in transform.py. Try again." ranking = [*response[[*response][0]]] keywords = [*response] df_list = [] for r in ranking: for kw in keywords: df_list.append(process_response( response, kw=kw, ranking=r, geo=geo)) return pd.concat(df_list)