code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
text
stringlengths
164
112k
def _print_foreign_playlist_copy_error(self): """ reset previous message """ self.operation_mode = self.window_mode = NORMAL_MODE self.refreshBody() txt ='''Foreign playlist copying |failed|! Make sure the file is not open with another application and try to load it again ''' self._show_help(txt, FOREIGN_PLAYLIST_COPY_ERROR_MODE, caption = ' Error ', prompt = ' Press any key ', is_message=True)
reset previous message
Below is the the instruction that describes the task: ### Input: reset previous message ### Response: def _print_foreign_playlist_copy_error(self): """ reset previous message """ self.operation_mode = self.window_mode = NORMAL_MODE self.refreshBody() txt ='''Foreign playlist copying |failed|! Make sure the file is not open with another application and try to load it again ''' self._show_help(txt, FOREIGN_PLAYLIST_COPY_ERROR_MODE, caption = ' Error ', prompt = ' Press any key ', is_message=True)
def extract_from_urllib3(): """ Undo monkey-patching by :func:`inject_into_urllib3`. """ util.ssl_.SSLContext = orig_util_SSLContext util.HAS_SNI = orig_util_HAS_SNI util.ssl_.HAS_SNI = orig_util_HAS_SNI util.IS_SECURETRANSPORT = False util.ssl_.IS_SECURETRANSPORT = False
Undo monkey-patching by :func:`inject_into_urllib3`.
Below is the the instruction that describes the task: ### Input: Undo monkey-patching by :func:`inject_into_urllib3`. ### Response: def extract_from_urllib3(): """ Undo monkey-patching by :func:`inject_into_urllib3`. """ util.ssl_.SSLContext = orig_util_SSLContext util.HAS_SNI = orig_util_HAS_SNI util.ssl_.HAS_SNI = orig_util_HAS_SNI util.IS_SECURETRANSPORT = False util.ssl_.IS_SECURETRANSPORT = False
def cycle(self): """ Cycles through notifications with latest results from data feeds. """ messages = self.poll_datafeeds() notifications = self.process_notifications(messages) self.draw_notifications(notifications)
Cycles through notifications with latest results from data feeds.
Below is the the instruction that describes the task: ### Input: Cycles through notifications with latest results from data feeds. ### Response: def cycle(self): """ Cycles through notifications with latest results from data feeds. """ messages = self.poll_datafeeds() notifications = self.process_notifications(messages) self.draw_notifications(notifications)
def right(self): """Right coordinate.""" if self._has_real(): return self._data.real_right return self._data.right
Right coordinate.
Below is the the instruction that describes the task: ### Input: Right coordinate. ### Response: def right(self): """Right coordinate.""" if self._has_real(): return self._data.real_right return self._data.right
def build_server_from_argparser(description=None, server_klass=None, handler_klass=None): """ Build a server from command line arguments. If a ServerClass or HandlerClass is specified, then the object must inherit from the corresponding AdvancedHTTPServer base class. :param str description: Description string to be passed to the argument parser. :param server_klass: Alternative server class to use. :type server_klass: :py:class:`.AdvancedHTTPServer` :param handler_klass: Alternative handler class to use. :type handler_klass: :py:class:`.RequestHandler` :return: A configured server instance. :rtype: :py:class:`.AdvancedHTTPServer` """ import argparse def _argp_dir_type(arg): if not os.path.isdir(arg): raise argparse.ArgumentTypeError("{0} is not a valid directory".format(repr(arg))) return arg def _argp_port_type(arg): if not arg.isdigit(): raise argparse.ArgumentTypeError("{0} is not a valid port".format(repr(arg))) arg = int(arg) if arg < 0 or arg > 65535: raise argparse.ArgumentTypeError("{0} is not a valid port".format(repr(arg))) return arg description = (description or 'HTTP Server') server_klass = (server_klass or AdvancedHTTPServer) handler_klass = (handler_klass or RequestHandler) parser = argparse.ArgumentParser(conflict_handler='resolve', description=description, fromfile_prefix_chars='@') parser.epilog = 'When a config file is specified with --config only the --log, --log-file and --password options will be used.' parser.add_argument('-c', '--conf', dest='config', type=argparse.FileType('r'), help='read settings from a config file') parser.add_argument('-i', '--ip', dest='ip', default='0.0.0.0', help='the ip address to serve on') parser.add_argument('-L', '--log', dest='loglvl', choices=('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'), default='INFO', help='set the logging level') parser.add_argument('-p', '--port', dest='port', default=8080, type=_argp_port_type, help='port to serve on') parser.add_argument('-v', '--version', action='version', version=parser.prog + ' Version: ' + __version__) parser.add_argument('-w', '--web-root', dest='web_root', default='.', type=_argp_dir_type, help='path to the web root directory') parser.add_argument('--log-file', dest='log_file', help='log information to a file') parser.add_argument('--no-threads', dest='use_threads', action='store_false', default=True, help='disable threading') parser.add_argument('--password', dest='password', help='password to use for basic authentication') ssl_group = parser.add_argument_group('ssl options') ssl_group.add_argument('--ssl-cert', dest='ssl_cert', help='the ssl cert to use') ssl_group.add_argument('--ssl-key', dest='ssl_key', help='the ssl key to use') ssl_group.add_argument('--ssl-version', dest='ssl_version', choices=[p[9:] for p in dir(ssl) if p.startswith('PROTOCOL_')], help='the version of ssl to use') arguments = parser.parse_args() logging.getLogger('').setLevel(logging.DEBUG) console_log_handler = logging.StreamHandler() console_log_handler.setLevel(getattr(logging, arguments.loglvl)) console_log_handler.setFormatter(logging.Formatter("%(asctime)s %(levelname)-8s %(message)s")) logging.getLogger('').addHandler(console_log_handler) if arguments.log_file: main_file_handler = logging.handlers.RotatingFileHandler(arguments.log_file, maxBytes=262144, backupCount=5) main_file_handler.setLevel(logging.DEBUG) main_file_handler.setFormatter(logging.Formatter("%(asctime)s %(name)-30s %(levelname)-10s %(message)s")) logging.getLogger('').setLevel(logging.DEBUG) logging.getLogger('').addHandler(main_file_handler) if arguments.config: config = ConfigParser() config.readfp(arguments.config) server = build_server_from_config( config, 'server', server_klass=server_klass, handler_klass=handler_klass ) else: server = server_klass( handler_klass, address=(arguments.ip, arguments.port), use_threads=arguments.use_threads, ssl_certfile=arguments.ssl_cert, ssl_keyfile=arguments.ssl_key, ssl_version=arguments.ssl_version ) server.serve_files_root = arguments.web_root if arguments.password: server.auth_add_creds('', arguments.password) return server
Build a server from command line arguments. If a ServerClass or HandlerClass is specified, then the object must inherit from the corresponding AdvancedHTTPServer base class. :param str description: Description string to be passed to the argument parser. :param server_klass: Alternative server class to use. :type server_klass: :py:class:`.AdvancedHTTPServer` :param handler_klass: Alternative handler class to use. :type handler_klass: :py:class:`.RequestHandler` :return: A configured server instance. :rtype: :py:class:`.AdvancedHTTPServer`
Below is the the instruction that describes the task: ### Input: Build a server from command line arguments. If a ServerClass or HandlerClass is specified, then the object must inherit from the corresponding AdvancedHTTPServer base class. :param str description: Description string to be passed to the argument parser. :param server_klass: Alternative server class to use. :type server_klass: :py:class:`.AdvancedHTTPServer` :param handler_klass: Alternative handler class to use. :type handler_klass: :py:class:`.RequestHandler` :return: A configured server instance. :rtype: :py:class:`.AdvancedHTTPServer` ### Response: def build_server_from_argparser(description=None, server_klass=None, handler_klass=None): """ Build a server from command line arguments. If a ServerClass or HandlerClass is specified, then the object must inherit from the corresponding AdvancedHTTPServer base class. :param str description: Description string to be passed to the argument parser. :param server_klass: Alternative server class to use. :type server_klass: :py:class:`.AdvancedHTTPServer` :param handler_klass: Alternative handler class to use. :type handler_klass: :py:class:`.RequestHandler` :return: A configured server instance. :rtype: :py:class:`.AdvancedHTTPServer` """ import argparse def _argp_dir_type(arg): if not os.path.isdir(arg): raise argparse.ArgumentTypeError("{0} is not a valid directory".format(repr(arg))) return arg def _argp_port_type(arg): if not arg.isdigit(): raise argparse.ArgumentTypeError("{0} is not a valid port".format(repr(arg))) arg = int(arg) if arg < 0 or arg > 65535: raise argparse.ArgumentTypeError("{0} is not a valid port".format(repr(arg))) return arg description = (description or 'HTTP Server') server_klass = (server_klass or AdvancedHTTPServer) handler_klass = (handler_klass or RequestHandler) parser = argparse.ArgumentParser(conflict_handler='resolve', description=description, fromfile_prefix_chars='@') parser.epilog = 'When a config file is specified with --config only the --log, --log-file and --password options will be used.' parser.add_argument('-c', '--conf', dest='config', type=argparse.FileType('r'), help='read settings from a config file') parser.add_argument('-i', '--ip', dest='ip', default='0.0.0.0', help='the ip address to serve on') parser.add_argument('-L', '--log', dest='loglvl', choices=('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'), default='INFO', help='set the logging level') parser.add_argument('-p', '--port', dest='port', default=8080, type=_argp_port_type, help='port to serve on') parser.add_argument('-v', '--version', action='version', version=parser.prog + ' Version: ' + __version__) parser.add_argument('-w', '--web-root', dest='web_root', default='.', type=_argp_dir_type, help='path to the web root directory') parser.add_argument('--log-file', dest='log_file', help='log information to a file') parser.add_argument('--no-threads', dest='use_threads', action='store_false', default=True, help='disable threading') parser.add_argument('--password', dest='password', help='password to use for basic authentication') ssl_group = parser.add_argument_group('ssl options') ssl_group.add_argument('--ssl-cert', dest='ssl_cert', help='the ssl cert to use') ssl_group.add_argument('--ssl-key', dest='ssl_key', help='the ssl key to use') ssl_group.add_argument('--ssl-version', dest='ssl_version', choices=[p[9:] for p in dir(ssl) if p.startswith('PROTOCOL_')], help='the version of ssl to use') arguments = parser.parse_args() logging.getLogger('').setLevel(logging.DEBUG) console_log_handler = logging.StreamHandler() console_log_handler.setLevel(getattr(logging, arguments.loglvl)) console_log_handler.setFormatter(logging.Formatter("%(asctime)s %(levelname)-8s %(message)s")) logging.getLogger('').addHandler(console_log_handler) if arguments.log_file: main_file_handler = logging.handlers.RotatingFileHandler(arguments.log_file, maxBytes=262144, backupCount=5) main_file_handler.setLevel(logging.DEBUG) main_file_handler.setFormatter(logging.Formatter("%(asctime)s %(name)-30s %(levelname)-10s %(message)s")) logging.getLogger('').setLevel(logging.DEBUG) logging.getLogger('').addHandler(main_file_handler) if arguments.config: config = ConfigParser() config.readfp(arguments.config) server = build_server_from_config( config, 'server', server_klass=server_klass, handler_klass=handler_klass ) else: server = server_klass( handler_klass, address=(arguments.ip, arguments.port), use_threads=arguments.use_threads, ssl_certfile=arguments.ssl_cert, ssl_keyfile=arguments.ssl_key, ssl_version=arguments.ssl_version ) server.serve_files_root = arguments.web_root if arguments.password: server.auth_add_creds('', arguments.password) return server
def hexstr(text): ''' Ensure a string is valid hex. Args: text (str): String to normalize. Examples: Norm a few strings: hexstr('0xff00') hexstr('ff00') Notes: Will accept strings prefixed by '0x' or '0X' and remove them. Returns: str: Normalized hex string. ''' text = text.strip().lower() if text.startswith(('0x', '0X')): text = text[2:] if not text: raise s_exc.BadTypeValu(valu=text, name='hexstr', mesg='No string left after stripping') try: # checks for valid hex width and does character # checking in C without using regex s_common.uhex(text) except (binascii.Error, ValueError) as e: raise s_exc.BadTypeValu(valu=text, name='hexstr', mesg=str(e)) return text
Ensure a string is valid hex. Args: text (str): String to normalize. Examples: Norm a few strings: hexstr('0xff00') hexstr('ff00') Notes: Will accept strings prefixed by '0x' or '0X' and remove them. Returns: str: Normalized hex string.
Below is the the instruction that describes the task: ### Input: Ensure a string is valid hex. Args: text (str): String to normalize. Examples: Norm a few strings: hexstr('0xff00') hexstr('ff00') Notes: Will accept strings prefixed by '0x' or '0X' and remove them. Returns: str: Normalized hex string. ### Response: def hexstr(text): ''' Ensure a string is valid hex. Args: text (str): String to normalize. Examples: Norm a few strings: hexstr('0xff00') hexstr('ff00') Notes: Will accept strings prefixed by '0x' or '0X' and remove them. Returns: str: Normalized hex string. ''' text = text.strip().lower() if text.startswith(('0x', '0X')): text = text[2:] if not text: raise s_exc.BadTypeValu(valu=text, name='hexstr', mesg='No string left after stripping') try: # checks for valid hex width and does character # checking in C without using regex s_common.uhex(text) except (binascii.Error, ValueError) as e: raise s_exc.BadTypeValu(valu=text, name='hexstr', mesg=str(e)) return text
def _create_network_doscript(self, network_file_path): """doscript: contains a invokeScript.sh which will do the special work The network.doscript contains network configuration files and it will be used by zvmguestconfigure to configure zLinux os network when it starts up """ # Generate the tar package for punch LOG.debug('Creating network doscript in the folder %s' % network_file_path) network_doscript = os.path.join(network_file_path, 'network.doscript') tar = tarfile.open(network_doscript, "w") for file in os.listdir(network_file_path): file_name = os.path.join(network_file_path, file) tar.add(file_name, arcname=file) tar.close() return network_doscript
doscript: contains a invokeScript.sh which will do the special work The network.doscript contains network configuration files and it will be used by zvmguestconfigure to configure zLinux os network when it starts up
Below is the the instruction that describes the task: ### Input: doscript: contains a invokeScript.sh which will do the special work The network.doscript contains network configuration files and it will be used by zvmguestconfigure to configure zLinux os network when it starts up ### Response: def _create_network_doscript(self, network_file_path): """doscript: contains a invokeScript.sh which will do the special work The network.doscript contains network configuration files and it will be used by zvmguestconfigure to configure zLinux os network when it starts up """ # Generate the tar package for punch LOG.debug('Creating network doscript in the folder %s' % network_file_path) network_doscript = os.path.join(network_file_path, 'network.doscript') tar = tarfile.open(network_doscript, "w") for file in os.listdir(network_file_path): file_name = os.path.join(network_file_path, file) tar.add(file_name, arcname=file) tar.close() return network_doscript
def gif_summary(name, tensor, max_outputs=3, fps=10, collections=None, family=None): """Outputs a `Summary` protocol buffer with gif animations. Args: name: Name of the summary. tensor: A 5-D `uint8` `Tensor` of shape `[batch_size, time, height, width, channels]` where `channels` is 1 or 3. max_outputs: Max number of batch elements to generate gifs for. fps: frames per second of the animation collections: Optional list of tf.GraphKeys. The collections to add the summary to. Defaults to [tf.GraphKeys.SUMMARIES] family: Optional; if provided, used as the prefix of the summary tag name, which controls the tab name used for display on Tensorboard. Returns: A scalar `Tensor` of type `string`. The serialized `Summary` protocol buffer. Raises: ValueError: if the given tensor has the wrong shape. """ tensor = tf.convert_to_tensor(tensor) if len(tensor.get_shape()) != 5: raise ValueError("Assuming videos given as tensors in the format " "[batch, time, height, width, channels] but got one " "of shape: %s" % str(tensor.get_shape())) tensor = tf.cast(tensor, tf.uint8) if distribute_summary_op_util.skip_summary(): return tf.constant("") with summary_op_util.summary_scope( name, family, values=[tensor]) as (tag, scope): val = tf.py_func( py_gif_summary, [tag, tensor, max_outputs, fps], tf.string, stateful=False, name=scope) summary_op_util.collect(val, collections, [tf.GraphKeys.SUMMARIES]) return val
Outputs a `Summary` protocol buffer with gif animations. Args: name: Name of the summary. tensor: A 5-D `uint8` `Tensor` of shape `[batch_size, time, height, width, channels]` where `channels` is 1 or 3. max_outputs: Max number of batch elements to generate gifs for. fps: frames per second of the animation collections: Optional list of tf.GraphKeys. The collections to add the summary to. Defaults to [tf.GraphKeys.SUMMARIES] family: Optional; if provided, used as the prefix of the summary tag name, which controls the tab name used for display on Tensorboard. Returns: A scalar `Tensor` of type `string`. The serialized `Summary` protocol buffer. Raises: ValueError: if the given tensor has the wrong shape.
Below is the the instruction that describes the task: ### Input: Outputs a `Summary` protocol buffer with gif animations. Args: name: Name of the summary. tensor: A 5-D `uint8` `Tensor` of shape `[batch_size, time, height, width, channels]` where `channels` is 1 or 3. max_outputs: Max number of batch elements to generate gifs for. fps: frames per second of the animation collections: Optional list of tf.GraphKeys. The collections to add the summary to. Defaults to [tf.GraphKeys.SUMMARIES] family: Optional; if provided, used as the prefix of the summary tag name, which controls the tab name used for display on Tensorboard. Returns: A scalar `Tensor` of type `string`. The serialized `Summary` protocol buffer. Raises: ValueError: if the given tensor has the wrong shape. ### Response: def gif_summary(name, tensor, max_outputs=3, fps=10, collections=None, family=None): """Outputs a `Summary` protocol buffer with gif animations. Args: name: Name of the summary. tensor: A 5-D `uint8` `Tensor` of shape `[batch_size, time, height, width, channels]` where `channels` is 1 or 3. max_outputs: Max number of batch elements to generate gifs for. fps: frames per second of the animation collections: Optional list of tf.GraphKeys. The collections to add the summary to. Defaults to [tf.GraphKeys.SUMMARIES] family: Optional; if provided, used as the prefix of the summary tag name, which controls the tab name used for display on Tensorboard. Returns: A scalar `Tensor` of type `string`. The serialized `Summary` protocol buffer. Raises: ValueError: if the given tensor has the wrong shape. """ tensor = tf.convert_to_tensor(tensor) if len(tensor.get_shape()) != 5: raise ValueError("Assuming videos given as tensors in the format " "[batch, time, height, width, channels] but got one " "of shape: %s" % str(tensor.get_shape())) tensor = tf.cast(tensor, tf.uint8) if distribute_summary_op_util.skip_summary(): return tf.constant("") with summary_op_util.summary_scope( name, family, values=[tensor]) as (tag, scope): val = tf.py_func( py_gif_summary, [tag, tensor, max_outputs, fps], tf.string, stateful=False, name=scope) summary_op_util.collect(val, collections, [tf.GraphKeys.SUMMARIES]) return val
def universal_anisotropy(self): """ returns the universal anisotropy value """ return 5. * self.g_voigt / self.g_reuss + \ self.k_voigt / self.k_reuss - 6.
returns the universal anisotropy value
Below is the the instruction that describes the task: ### Input: returns the universal anisotropy value ### Response: def universal_anisotropy(self): """ returns the universal anisotropy value """ return 5. * self.g_voigt / self.g_reuss + \ self.k_voigt / self.k_reuss - 6.
def _playsoundWin(sound, block = True): ''' Utilizes windll.winmm. Tested and known to work with MP3 and WAVE on Windows 7 with Python 2.7. Probably works with more file formats. Probably works on Windows XP thru Windows 10. Probably works with all versions of Python. Inspired by (but not copied from) Michael Gundlach <[email protected]>'s mp3play: https://github.com/michaelgundlach/mp3play I never would have tried using windll.winmm without seeing his code. ''' from ctypes import c_buffer, windll from random import random from time import sleep from sys import getfilesystemencoding def winCommand(*command): buf = c_buffer(255) command = ' '.join(command).encode(getfilesystemencoding()) errorCode = int(windll.winmm.mciSendStringA(command, buf, 254, 0)) if errorCode: errorBuffer = c_buffer(255) windll.winmm.mciGetErrorStringA(errorCode, errorBuffer, 254) exceptionMessage = ('\n Error ' + str(errorCode) + ' for command:' '\n ' + command.decode() + '\n ' + errorBuffer.value.decode()) raise PlaysoundException(exceptionMessage) return buf.value alias = 'playsound_' + str(random()) winCommand('open "' + sound + '" alias', alias) winCommand('set', alias, 'time format milliseconds') durationInMS = winCommand('status', alias, 'length') winCommand('play', alias, 'from 0 to', durationInMS.decode()) if block: sleep(float(durationInMS) / 1000.0)
Utilizes windll.winmm. Tested and known to work with MP3 and WAVE on Windows 7 with Python 2.7. Probably works with more file formats. Probably works on Windows XP thru Windows 10. Probably works with all versions of Python. Inspired by (but not copied from) Michael Gundlach <[email protected]>'s mp3play: https://github.com/michaelgundlach/mp3play I never would have tried using windll.winmm without seeing his code.
Below is the the instruction that describes the task: ### Input: Utilizes windll.winmm. Tested and known to work with MP3 and WAVE on Windows 7 with Python 2.7. Probably works with more file formats. Probably works on Windows XP thru Windows 10. Probably works with all versions of Python. Inspired by (but not copied from) Michael Gundlach <[email protected]>'s mp3play: https://github.com/michaelgundlach/mp3play I never would have tried using windll.winmm without seeing his code. ### Response: def _playsoundWin(sound, block = True): ''' Utilizes windll.winmm. Tested and known to work with MP3 and WAVE on Windows 7 with Python 2.7. Probably works with more file formats. Probably works on Windows XP thru Windows 10. Probably works with all versions of Python. Inspired by (but not copied from) Michael Gundlach <[email protected]>'s mp3play: https://github.com/michaelgundlach/mp3play I never would have tried using windll.winmm without seeing his code. ''' from ctypes import c_buffer, windll from random import random from time import sleep from sys import getfilesystemencoding def winCommand(*command): buf = c_buffer(255) command = ' '.join(command).encode(getfilesystemencoding()) errorCode = int(windll.winmm.mciSendStringA(command, buf, 254, 0)) if errorCode: errorBuffer = c_buffer(255) windll.winmm.mciGetErrorStringA(errorCode, errorBuffer, 254) exceptionMessage = ('\n Error ' + str(errorCode) + ' for command:' '\n ' + command.decode() + '\n ' + errorBuffer.value.decode()) raise PlaysoundException(exceptionMessage) return buf.value alias = 'playsound_' + str(random()) winCommand('open "' + sound + '" alias', alias) winCommand('set', alias, 'time format milliseconds') durationInMS = winCommand('status', alias, 'length') winCommand('play', alias, 'from 0 to', durationInMS.decode()) if block: sleep(float(durationInMS) / 1000.0)
def p_unit_list(self, p): """ unit_list : unit_list unit | unit """ if isinstance(p[1], list): if len(p) >= 3: if isinstance(p[2], list): p[1].extend(p[2]) else: p[1].append(p[2]) else: p[1] = [p[1]] p[0] = p[1]
unit_list : unit_list unit | unit
Below is the the instruction that describes the task: ### Input: unit_list : unit_list unit | unit ### Response: def p_unit_list(self, p): """ unit_list : unit_list unit | unit """ if isinstance(p[1], list): if len(p) >= 3: if isinstance(p[2], list): p[1].extend(p[2]) else: p[1].append(p[2]) else: p[1] = [p[1]] p[0] = p[1]
def _process_datum(self, data, input_reader, ctx, transient_shard_state): """Process a single data piece. Call mapper handler on the data. Args: data: a datum to process. input_reader: input reader. ctx: mapreduce context transient_shard_state: transient shard state. Returns: True if scan should be continued, False if scan should be stopped. """ if data is not input_readers.ALLOW_CHECKPOINT: self.slice_context.incr(context.COUNTER_MAPPER_CALLS) handler = transient_shard_state.handler if isinstance(handler, map_job.Mapper): handler(self.slice_context, data) else: if input_reader.expand_parameters: result = handler(*data) else: result = handler(data) if util.is_generator(result): for output in result: if isinstance(output, operation.Operation): output(ctx) else: output_writer = transient_shard_state.output_writer if not output_writer: logging.warning( "Handler yielded %s, but no output writer is set.", output) else: output_writer.write(output) if self._time() - self._start_time >= parameters.config._SLICE_DURATION_SEC: return False return True
Process a single data piece. Call mapper handler on the data. Args: data: a datum to process. input_reader: input reader. ctx: mapreduce context transient_shard_state: transient shard state. Returns: True if scan should be continued, False if scan should be stopped.
Below is the the instruction that describes the task: ### Input: Process a single data piece. Call mapper handler on the data. Args: data: a datum to process. input_reader: input reader. ctx: mapreduce context transient_shard_state: transient shard state. Returns: True if scan should be continued, False if scan should be stopped. ### Response: def _process_datum(self, data, input_reader, ctx, transient_shard_state): """Process a single data piece. Call mapper handler on the data. Args: data: a datum to process. input_reader: input reader. ctx: mapreduce context transient_shard_state: transient shard state. Returns: True if scan should be continued, False if scan should be stopped. """ if data is not input_readers.ALLOW_CHECKPOINT: self.slice_context.incr(context.COUNTER_MAPPER_CALLS) handler = transient_shard_state.handler if isinstance(handler, map_job.Mapper): handler(self.slice_context, data) else: if input_reader.expand_parameters: result = handler(*data) else: result = handler(data) if util.is_generator(result): for output in result: if isinstance(output, operation.Operation): output(ctx) else: output_writer = transient_shard_state.output_writer if not output_writer: logging.warning( "Handler yielded %s, but no output writer is set.", output) else: output_writer.write(output) if self._time() - self._start_time >= parameters.config._SLICE_DURATION_SEC: return False return True
def new_transaction(self, timeout, durability, transaction_type): """ Creates a Transaction object with given timeout, durability and transaction type. :param timeout: (long), the timeout in seconds determines the maximum lifespan of a transaction. :param durability: (int), the durability is the number of machines that can take over if a member fails during a transaction commit or rollback :param transaction_type: (Transaction Type), the transaction type which can be :const:`~hazelcast.transaction.TWO_PHASE` or :const:`~hazelcast.transaction.ONE_PHASE` :return: (:class:`~hazelcast.transaction.Transaction`), new created Transaction. """ connection = self._connect() return Transaction(self._client, connection, timeout, durability, transaction_type)
Creates a Transaction object with given timeout, durability and transaction type. :param timeout: (long), the timeout in seconds determines the maximum lifespan of a transaction. :param durability: (int), the durability is the number of machines that can take over if a member fails during a transaction commit or rollback :param transaction_type: (Transaction Type), the transaction type which can be :const:`~hazelcast.transaction.TWO_PHASE` or :const:`~hazelcast.transaction.ONE_PHASE` :return: (:class:`~hazelcast.transaction.Transaction`), new created Transaction.
Below is the the instruction that describes the task: ### Input: Creates a Transaction object with given timeout, durability and transaction type. :param timeout: (long), the timeout in seconds determines the maximum lifespan of a transaction. :param durability: (int), the durability is the number of machines that can take over if a member fails during a transaction commit or rollback :param transaction_type: (Transaction Type), the transaction type which can be :const:`~hazelcast.transaction.TWO_PHASE` or :const:`~hazelcast.transaction.ONE_PHASE` :return: (:class:`~hazelcast.transaction.Transaction`), new created Transaction. ### Response: def new_transaction(self, timeout, durability, transaction_type): """ Creates a Transaction object with given timeout, durability and transaction type. :param timeout: (long), the timeout in seconds determines the maximum lifespan of a transaction. :param durability: (int), the durability is the number of machines that can take over if a member fails during a transaction commit or rollback :param transaction_type: (Transaction Type), the transaction type which can be :const:`~hazelcast.transaction.TWO_PHASE` or :const:`~hazelcast.transaction.ONE_PHASE` :return: (:class:`~hazelcast.transaction.Transaction`), new created Transaction. """ connection = self._connect() return Transaction(self._client, connection, timeout, durability, transaction_type)
def fit(sim_mat, D_len, cidx): """ Algorithm maximizes energy between clusters, which is distinction in this algorithm. Distance matrix contains mostly 0, which are overlooked due to search of maximal distances. Algorithm does not try to retain k clusters. D: numpy array - Symmetric distance matrix k: int - number of clusters """ min_energy = np.inf for j in range(3): # select indices in each sample that maximizes its dimension inds = [np.argmin([sim_mat[idy].get(idx, 0) for idx in cidx]) for idy in range(D_len) if idy in sim_mat] cidx = [] energy = 0 # current enengy for i in np.unique(inds): indsi = np.where(inds == i)[0] # find indices for every cluster minind, min_value = 0, 0 for index, idy in enumerate(indsi): if idy in sim_mat: # value = sum([sim_mat[idy].get(idx,0) for idx in indsi]) value = 0 for idx in indsi: value += sim_mat[idy].get(idx, 0) if value < min_value: minind, min_value = index, value energy += min_value cidx.append(indsi[minind]) # new centers if energy < min_energy: min_energy, inds_min, cidx_min = energy, inds, cidx return inds_min, cidx_min
Algorithm maximizes energy between clusters, which is distinction in this algorithm. Distance matrix contains mostly 0, which are overlooked due to search of maximal distances. Algorithm does not try to retain k clusters. D: numpy array - Symmetric distance matrix k: int - number of clusters
Below is the the instruction that describes the task: ### Input: Algorithm maximizes energy between clusters, which is distinction in this algorithm. Distance matrix contains mostly 0, which are overlooked due to search of maximal distances. Algorithm does not try to retain k clusters. D: numpy array - Symmetric distance matrix k: int - number of clusters ### Response: def fit(sim_mat, D_len, cidx): """ Algorithm maximizes energy between clusters, which is distinction in this algorithm. Distance matrix contains mostly 0, which are overlooked due to search of maximal distances. Algorithm does not try to retain k clusters. D: numpy array - Symmetric distance matrix k: int - number of clusters """ min_energy = np.inf for j in range(3): # select indices in each sample that maximizes its dimension inds = [np.argmin([sim_mat[idy].get(idx, 0) for idx in cidx]) for idy in range(D_len) if idy in sim_mat] cidx = [] energy = 0 # current enengy for i in np.unique(inds): indsi = np.where(inds == i)[0] # find indices for every cluster minind, min_value = 0, 0 for index, idy in enumerate(indsi): if idy in sim_mat: # value = sum([sim_mat[idy].get(idx,0) for idx in indsi]) value = 0 for idx in indsi: value += sim_mat[idy].get(idx, 0) if value < min_value: minind, min_value = index, value energy += min_value cidx.append(indsi[minind]) # new centers if energy < min_energy: min_energy, inds_min, cidx_min = energy, inds, cidx return inds_min, cidx_min
def jupyter_notebook_skeleton(): """Returns a dictionary with the elements of a Jupyter notebook""" py_version = sys.version_info notebook_skeleton = { "cells": [], "metadata": { "kernelspec": { "display_name": "Python " + str(py_version[0]), "language": "python", "name": "python" + str(py_version[0]) }, "language_info": { "codemirror_mode": { "name": "ipython", "version": py_version[0] }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython" + str(py_version[0]), "version": '{0}.{1}.{2}'.format(*sys.version_info[:3]) } }, "nbformat": 4, "nbformat_minor": 0 } return notebook_skeleton
Returns a dictionary with the elements of a Jupyter notebook
Below is the the instruction that describes the task: ### Input: Returns a dictionary with the elements of a Jupyter notebook ### Response: def jupyter_notebook_skeleton(): """Returns a dictionary with the elements of a Jupyter notebook""" py_version = sys.version_info notebook_skeleton = { "cells": [], "metadata": { "kernelspec": { "display_name": "Python " + str(py_version[0]), "language": "python", "name": "python" + str(py_version[0]) }, "language_info": { "codemirror_mode": { "name": "ipython", "version": py_version[0] }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython" + str(py_version[0]), "version": '{0}.{1}.{2}'.format(*sys.version_info[:3]) } }, "nbformat": 4, "nbformat_minor": 0 } return notebook_skeleton
def register_plugin(self): """Register plugin in Spyder's main window""" self.profiler.datatree.sig_edit_goto.connect(self.main.editor.load) self.profiler.redirect_stdio.connect( self.main.redirect_internalshell_stdio) self.main.add_dockwidget(self) profiler_act = create_action(self, _("Profile"), icon=self.get_plugin_icon(), triggered=self.run_profiler) profiler_act.setEnabled(is_profiler_installed()) self.register_shortcut(profiler_act, context="Profiler", name="Run profiler") self.main.run_menu_actions += [profiler_act] self.main.editor.pythonfile_dependent_actions += [profiler_act]
Register plugin in Spyder's main window
Below is the the instruction that describes the task: ### Input: Register plugin in Spyder's main window ### Response: def register_plugin(self): """Register plugin in Spyder's main window""" self.profiler.datatree.sig_edit_goto.connect(self.main.editor.load) self.profiler.redirect_stdio.connect( self.main.redirect_internalshell_stdio) self.main.add_dockwidget(self) profiler_act = create_action(self, _("Profile"), icon=self.get_plugin_icon(), triggered=self.run_profiler) profiler_act.setEnabled(is_profiler_installed()) self.register_shortcut(profiler_act, context="Profiler", name="Run profiler") self.main.run_menu_actions += [profiler_act] self.main.editor.pythonfile_dependent_actions += [profiler_act]
def build(self): """ Generates the policy document based on the internal lists of allowed and denied conditions. This will generate a policy with two main statements for the effect: one statement for Allow and one statement for Deny. Methods that includes conditions will have their own statement in the policy. """ if ((self.allowMethods is None or len(self.allowMethods) == 0) and (self.denyMethods is None or len(self.denyMethods) == 0)): raise NameError('No statements defined for the policy') policy = { 'principalId': self.principal_id, 'policyDocument': { 'Version': self.version, 'Statement': [] } } policy['policyDocument']['Statement'].extend( self._get_effect_statement('Allow', self.allowMethods)) policy['policyDocument']['Statement'].extend( self._get_effect_statement('Deny', self.denyMethods)) return policy
Generates the policy document based on the internal lists of allowed and denied conditions. This will generate a policy with two main statements for the effect: one statement for Allow and one statement for Deny. Methods that includes conditions will have their own statement in the policy.
Below is the the instruction that describes the task: ### Input: Generates the policy document based on the internal lists of allowed and denied conditions. This will generate a policy with two main statements for the effect: one statement for Allow and one statement for Deny. Methods that includes conditions will have their own statement in the policy. ### Response: def build(self): """ Generates the policy document based on the internal lists of allowed and denied conditions. This will generate a policy with two main statements for the effect: one statement for Allow and one statement for Deny. Methods that includes conditions will have their own statement in the policy. """ if ((self.allowMethods is None or len(self.allowMethods) == 0) and (self.denyMethods is None or len(self.denyMethods) == 0)): raise NameError('No statements defined for the policy') policy = { 'principalId': self.principal_id, 'policyDocument': { 'Version': self.version, 'Statement': [] } } policy['policyDocument']['Statement'].extend( self._get_effect_statement('Allow', self.allowMethods)) policy['policyDocument']['Statement'].extend( self._get_effect_statement('Deny', self.denyMethods)) return policy
def update_id(self, sequence_id=None): """Alter the sequence id, and all of the names and ids derived from it. This often needs to be done after an IntegrityError in a multiprocessing run""" if sequence_id: self.sequence_id = sequence_id self._set_ids(force=True) if self.dataset: self._update_names()
Alter the sequence id, and all of the names and ids derived from it. This often needs to be done after an IntegrityError in a multiprocessing run
Below is the the instruction that describes the task: ### Input: Alter the sequence id, and all of the names and ids derived from it. This often needs to be done after an IntegrityError in a multiprocessing run ### Response: def update_id(self, sequence_id=None): """Alter the sequence id, and all of the names and ids derived from it. This often needs to be done after an IntegrityError in a multiprocessing run""" if sequence_id: self.sequence_id = sequence_id self._set_ids(force=True) if self.dataset: self._update_names()
def List(self, *branches, **kwargs): """ While `Seq` is sequential, `phi.dsl.Expression.List` allows you to split the computation and get back a list with the result of each path. While the list literal should be the most incarnation of this expresion, it can actually be any iterable (implements `__iter__`) that is not a tuple and yields a valid expresion. The expression k = List(f, g) is equivalent to k = lambda x: [ f(x), g(x) ] In general, the following rules apply after compilation: **General Branching** List(f0, f1, ..., fn) is equivalent to lambda x: [ f0(x), f1(x), ..., fn(x) ] **Composing & Branching** It is interesting to see how braching interacts with composing. The expression Seq(f, List(g, h)) is *almost* equivalent to List( Seq(f, g), Seq(f, h) ) As you see its as if `f` where distributed over the List. We say *almost* because their implementation is different def _lambda(x): x = f(x) return [ g(x), h(x) ] vs lambda x: [ g(f(x)), h(f(x)) ] As you see `f` is only executed once in the first one. Both should yield the same result if `f` is a pure function. ### Examples form phi import P, List avg_word_length = P.Pipe( "1 22 333", lambda s: s.split(' '), # ['1', '22', '333'] lambda l: map(len, l), # [1, 2, 3] List( sum # 1 + 2 + 3 == 6 , len # len([1, 2, 3]) == 3 ), lambda l: l[0] / l[1] # sum / len == 6 / 3 == 2 ) assert avg_word_length == 2 The previous could also be done more briefly like this form phi import P, Obj, List avg_word_length = P.Pipe( "1 22 333", Obj .split(' ') # ['1', '22', '333'] .map(len) # [1, 2, 3] .List( sum #sum([1, 2, 3]) == 6 , len #len([1, 2, 3]) == 3 ), P[0] / P[1] #6 / 3 == 2 ) assert avg_word_length == 2 In the example above the last expression P[0] / P[1] works for a couple of reasons 1. The previous expression returns a list 2. In general the expression `P[x]` compiles to a function with the form `lambda obj: obj[x]` 3. The class `Expression` (the class from which the object `P` inherits) overrides most operators to create functions easily. For example, the expression (P * 2) / (P + 1) compile to a function of the form lambda x: (x * 2) / (x + 1) Check out the documentatio for Phi [lambdas](https://cgarciae.github.io/phi/lambdas.m.html). """ gs = [ _parse(code)._f for code in branches ] def h(x, state): ys = [] for g in gs: y, state = g(x, state) ys.append(y) return (ys, state) return self.__then__(h, **kwargs)
While `Seq` is sequential, `phi.dsl.Expression.List` allows you to split the computation and get back a list with the result of each path. While the list literal should be the most incarnation of this expresion, it can actually be any iterable (implements `__iter__`) that is not a tuple and yields a valid expresion. The expression k = List(f, g) is equivalent to k = lambda x: [ f(x), g(x) ] In general, the following rules apply after compilation: **General Branching** List(f0, f1, ..., fn) is equivalent to lambda x: [ f0(x), f1(x), ..., fn(x) ] **Composing & Branching** It is interesting to see how braching interacts with composing. The expression Seq(f, List(g, h)) is *almost* equivalent to List( Seq(f, g), Seq(f, h) ) As you see its as if `f` where distributed over the List. We say *almost* because their implementation is different def _lambda(x): x = f(x) return [ g(x), h(x) ] vs lambda x: [ g(f(x)), h(f(x)) ] As you see `f` is only executed once in the first one. Both should yield the same result if `f` is a pure function. ### Examples form phi import P, List avg_word_length = P.Pipe( "1 22 333", lambda s: s.split(' '), # ['1', '22', '333'] lambda l: map(len, l), # [1, 2, 3] List( sum # 1 + 2 + 3 == 6 , len # len([1, 2, 3]) == 3 ), lambda l: l[0] / l[1] # sum / len == 6 / 3 == 2 ) assert avg_word_length == 2 The previous could also be done more briefly like this form phi import P, Obj, List avg_word_length = P.Pipe( "1 22 333", Obj .split(' ') # ['1', '22', '333'] .map(len) # [1, 2, 3] .List( sum #sum([1, 2, 3]) == 6 , len #len([1, 2, 3]) == 3 ), P[0] / P[1] #6 / 3 == 2 ) assert avg_word_length == 2 In the example above the last expression P[0] / P[1] works for a couple of reasons 1. The previous expression returns a list 2. In general the expression `P[x]` compiles to a function with the form `lambda obj: obj[x]` 3. The class `Expression` (the class from which the object `P` inherits) overrides most operators to create functions easily. For example, the expression (P * 2) / (P + 1) compile to a function of the form lambda x: (x * 2) / (x + 1) Check out the documentatio for Phi [lambdas](https://cgarciae.github.io/phi/lambdas.m.html).
Below is the the instruction that describes the task: ### Input: While `Seq` is sequential, `phi.dsl.Expression.List` allows you to split the computation and get back a list with the result of each path. While the list literal should be the most incarnation of this expresion, it can actually be any iterable (implements `__iter__`) that is not a tuple and yields a valid expresion. The expression k = List(f, g) is equivalent to k = lambda x: [ f(x), g(x) ] In general, the following rules apply after compilation: **General Branching** List(f0, f1, ..., fn) is equivalent to lambda x: [ f0(x), f1(x), ..., fn(x) ] **Composing & Branching** It is interesting to see how braching interacts with composing. The expression Seq(f, List(g, h)) is *almost* equivalent to List( Seq(f, g), Seq(f, h) ) As you see its as if `f` where distributed over the List. We say *almost* because their implementation is different def _lambda(x): x = f(x) return [ g(x), h(x) ] vs lambda x: [ g(f(x)), h(f(x)) ] As you see `f` is only executed once in the first one. Both should yield the same result if `f` is a pure function. ### Examples form phi import P, List avg_word_length = P.Pipe( "1 22 333", lambda s: s.split(' '), # ['1', '22', '333'] lambda l: map(len, l), # [1, 2, 3] List( sum # 1 + 2 + 3 == 6 , len # len([1, 2, 3]) == 3 ), lambda l: l[0] / l[1] # sum / len == 6 / 3 == 2 ) assert avg_word_length == 2 The previous could also be done more briefly like this form phi import P, Obj, List avg_word_length = P.Pipe( "1 22 333", Obj .split(' ') # ['1', '22', '333'] .map(len) # [1, 2, 3] .List( sum #sum([1, 2, 3]) == 6 , len #len([1, 2, 3]) == 3 ), P[0] / P[1] #6 / 3 == 2 ) assert avg_word_length == 2 In the example above the last expression P[0] / P[1] works for a couple of reasons 1. The previous expression returns a list 2. In general the expression `P[x]` compiles to a function with the form `lambda obj: obj[x]` 3. The class `Expression` (the class from which the object `P` inherits) overrides most operators to create functions easily. For example, the expression (P * 2) / (P + 1) compile to a function of the form lambda x: (x * 2) / (x + 1) Check out the documentatio for Phi [lambdas](https://cgarciae.github.io/phi/lambdas.m.html). ### Response: def List(self, *branches, **kwargs): """ While `Seq` is sequential, `phi.dsl.Expression.List` allows you to split the computation and get back a list with the result of each path. While the list literal should be the most incarnation of this expresion, it can actually be any iterable (implements `__iter__`) that is not a tuple and yields a valid expresion. The expression k = List(f, g) is equivalent to k = lambda x: [ f(x), g(x) ] In general, the following rules apply after compilation: **General Branching** List(f0, f1, ..., fn) is equivalent to lambda x: [ f0(x), f1(x), ..., fn(x) ] **Composing & Branching** It is interesting to see how braching interacts with composing. The expression Seq(f, List(g, h)) is *almost* equivalent to List( Seq(f, g), Seq(f, h) ) As you see its as if `f` where distributed over the List. We say *almost* because their implementation is different def _lambda(x): x = f(x) return [ g(x), h(x) ] vs lambda x: [ g(f(x)), h(f(x)) ] As you see `f` is only executed once in the first one. Both should yield the same result if `f` is a pure function. ### Examples form phi import P, List avg_word_length = P.Pipe( "1 22 333", lambda s: s.split(' '), # ['1', '22', '333'] lambda l: map(len, l), # [1, 2, 3] List( sum # 1 + 2 + 3 == 6 , len # len([1, 2, 3]) == 3 ), lambda l: l[0] / l[1] # sum / len == 6 / 3 == 2 ) assert avg_word_length == 2 The previous could also be done more briefly like this form phi import P, Obj, List avg_word_length = P.Pipe( "1 22 333", Obj .split(' ') # ['1', '22', '333'] .map(len) # [1, 2, 3] .List( sum #sum([1, 2, 3]) == 6 , len #len([1, 2, 3]) == 3 ), P[0] / P[1] #6 / 3 == 2 ) assert avg_word_length == 2 In the example above the last expression P[0] / P[1] works for a couple of reasons 1. The previous expression returns a list 2. In general the expression `P[x]` compiles to a function with the form `lambda obj: obj[x]` 3. The class `Expression` (the class from which the object `P` inherits) overrides most operators to create functions easily. For example, the expression (P * 2) / (P + 1) compile to a function of the form lambda x: (x * 2) / (x + 1) Check out the documentatio for Phi [lambdas](https://cgarciae.github.io/phi/lambdas.m.html). """ gs = [ _parse(code)._f for code in branches ] def h(x, state): ys = [] for g in gs: y, state = g(x, state) ys.append(y) return (ys, state) return self.__then__(h, **kwargs)
async def _clean_shutdown(self): """Cleanly shutdown the emulation loop.""" # Cleanly stop any other outstanding tasks not associated with tiles remaining_tasks = [] for task in self._tasks.get(None, []): self._logger.debug("Cancelling task at shutdown %s", task) task.cancel() remaining_tasks.append(task) asyncio.gather(*remaining_tasks, return_exceptions=True) if len(remaining_tasks) > 0: del self._tasks[None] # Shutdown tasks associated with each tile remaining_tasks = [] for address in sorted(self._tasks, reverse=True): if address is None: continue self._logger.debug("Shutting down tasks for tile at %d", address) for task in self._tasks.get(address, []): task.cancel() remaining_tasks.append(task) asyncio.gather(*remaining_tasks, return_exceptions=True) await self._rpc_queue.stop() self._loop.stop()
Cleanly shutdown the emulation loop.
Below is the the instruction that describes the task: ### Input: Cleanly shutdown the emulation loop. ### Response: async def _clean_shutdown(self): """Cleanly shutdown the emulation loop.""" # Cleanly stop any other outstanding tasks not associated with tiles remaining_tasks = [] for task in self._tasks.get(None, []): self._logger.debug("Cancelling task at shutdown %s", task) task.cancel() remaining_tasks.append(task) asyncio.gather(*remaining_tasks, return_exceptions=True) if len(remaining_tasks) > 0: del self._tasks[None] # Shutdown tasks associated with each tile remaining_tasks = [] for address in sorted(self._tasks, reverse=True): if address is None: continue self._logger.debug("Shutting down tasks for tile at %d", address) for task in self._tasks.get(address, []): task.cancel() remaining_tasks.append(task) asyncio.gather(*remaining_tasks, return_exceptions=True) await self._rpc_queue.stop() self._loop.stop()
async def open(self): """Register with the publisher.""" self.store.register(self) while not self.finished: message = await self.messages.get() await self.publish(message)
Register with the publisher.
Below is the the instruction that describes the task: ### Input: Register with the publisher. ### Response: async def open(self): """Register with the publisher.""" self.store.register(self) while not self.finished: message = await self.messages.get() await self.publish(message)
def included_profiles(self): """Load all profiles.""" profiles = [] for directory in self.tcex_json.get('profile_include_dirs') or []: profiles.extend(self._load_config_include(directory)) return profiles
Load all profiles.
Below is the the instruction that describes the task: ### Input: Load all profiles. ### Response: def included_profiles(self): """Load all profiles.""" profiles = [] for directory in self.tcex_json.get('profile_include_dirs') or []: profiles.extend(self._load_config_include(directory)) return profiles
def is_valid_geometry(self): """ It is possible to infer the geometry only if exactly one of sites, sites_csv, hazard_curves_csv, gmfs_csv, region is set. You did set more than one, or nothing. """ has_sites = (self.sites is not None or 'sites' in self.inputs or 'site_model' in self.inputs) if not has_sites and not self.ground_motion_fields: # when generating only the ruptures you do not need the sites return True if ('gmfs' in self.inputs and not has_sites and not self.inputs['gmfs'].endswith('.xml')): raise ValueError('Missing sites or sites_csv in the .ini file') elif ('risk' in self.calculation_mode or 'damage' in self.calculation_mode or 'bcr' in self.calculation_mode): return True # no check on the sites for risk flags = dict( sites=bool(self.sites), sites_csv=self.inputs.get('sites', 0), hazard_curves_csv=self.inputs.get('hazard_curves', 0), gmfs_csv=self.inputs.get('gmfs', 0), region=bool(self.region and self.region_grid_spacing)) # NB: below we check that all the flags # are mutually exclusive return sum(bool(v) for v in flags.values()) == 1 or self.inputs.get( 'exposure') or self.inputs.get('site_model')
It is possible to infer the geometry only if exactly one of sites, sites_csv, hazard_curves_csv, gmfs_csv, region is set. You did set more than one, or nothing.
Below is the the instruction that describes the task: ### Input: It is possible to infer the geometry only if exactly one of sites, sites_csv, hazard_curves_csv, gmfs_csv, region is set. You did set more than one, or nothing. ### Response: def is_valid_geometry(self): """ It is possible to infer the geometry only if exactly one of sites, sites_csv, hazard_curves_csv, gmfs_csv, region is set. You did set more than one, or nothing. """ has_sites = (self.sites is not None or 'sites' in self.inputs or 'site_model' in self.inputs) if not has_sites and not self.ground_motion_fields: # when generating only the ruptures you do not need the sites return True if ('gmfs' in self.inputs and not has_sites and not self.inputs['gmfs'].endswith('.xml')): raise ValueError('Missing sites or sites_csv in the .ini file') elif ('risk' in self.calculation_mode or 'damage' in self.calculation_mode or 'bcr' in self.calculation_mode): return True # no check on the sites for risk flags = dict( sites=bool(self.sites), sites_csv=self.inputs.get('sites', 0), hazard_curves_csv=self.inputs.get('hazard_curves', 0), gmfs_csv=self.inputs.get('gmfs', 0), region=bool(self.region and self.region_grid_spacing)) # NB: below we check that all the flags # are mutually exclusive return sum(bool(v) for v in flags.values()) == 1 or self.inputs.get( 'exposure') or self.inputs.get('site_model')
def log_decl_method(func): """Decorate do_declartion methods for debug logging.""" from functools import wraps @wraps(func) def with_logging(*args, **kwargs): self = args[0] decl = args[2] log(DEBUG, u" {}: {} {}".format( self.state['current_step'], decl.name, serialize(decl.value).strip()).encode('utf-8')) return func(*args, **kwargs) return with_logging
Decorate do_declartion methods for debug logging.
Below is the the instruction that describes the task: ### Input: Decorate do_declartion methods for debug logging. ### Response: def log_decl_method(func): """Decorate do_declartion methods for debug logging.""" from functools import wraps @wraps(func) def with_logging(*args, **kwargs): self = args[0] decl = args[2] log(DEBUG, u" {}: {} {}".format( self.state['current_step'], decl.name, serialize(decl.value).strip()).encode('utf-8')) return func(*args, **kwargs) return with_logging
def Validate(self): """Ensure the pathspec is valid.""" self.pathspec.Validate() if (self.HasField("start_time") and self.HasField("end_time") and self.start_time > self.end_time): raise ValueError("Start time must be before end time.") if not self.path_regex and not self.data_regex and not self.path_glob: raise ValueError("A Find specification can not contain both an empty " "path regex and an empty data regex")
Ensure the pathspec is valid.
Below is the the instruction that describes the task: ### Input: Ensure the pathspec is valid. ### Response: def Validate(self): """Ensure the pathspec is valid.""" self.pathspec.Validate() if (self.HasField("start_time") and self.HasField("end_time") and self.start_time > self.end_time): raise ValueError("Start time must be before end time.") if not self.path_regex and not self.data_regex and not self.path_glob: raise ValueError("A Find specification can not contain both an empty " "path regex and an empty data regex")
def titles(self, unique=False): """Return a list of contained worksheet titles. Args: unique (bool): drop duplicates Returns: list: list of titles/name strings """ if unique: return tools.uniqued(s.title for s in self._items) return [s.title for s in self._items]
Return a list of contained worksheet titles. Args: unique (bool): drop duplicates Returns: list: list of titles/name strings
Below is the the instruction that describes the task: ### Input: Return a list of contained worksheet titles. Args: unique (bool): drop duplicates Returns: list: list of titles/name strings ### Response: def titles(self, unique=False): """Return a list of contained worksheet titles. Args: unique (bool): drop duplicates Returns: list: list of titles/name strings """ if unique: return tools.uniqued(s.title for s in self._items) return [s.title for s in self._items]
def append(self, child, *args, **kwargs): """See :meth:`AbstractElement.append`""" #if no set is associated with the layer yet, we learn it from span annotation elements that are added if self.set is False or self.set is None: if inspect.isclass(child): if issubclass(child,AbstractSpanAnnotation): if 'set' in kwargs: self.set = kwargs['set'] elif isinstance(child, AbstractSpanAnnotation): if child.set: self.set = child.set elif isinstance(child, Correction): #descend into corrections to find the proper set for this layer (derived from span annotation elements) for e in itertools.chain( child.new(), child.original(), child.suggestions() ): if isinstance(e, AbstractSpanAnnotation) and e.set: self.set = e.set break return super(AbstractAnnotationLayer, self).append(child, *args, **kwargs)
See :meth:`AbstractElement.append`
Below is the the instruction that describes the task: ### Input: See :meth:`AbstractElement.append` ### Response: def append(self, child, *args, **kwargs): """See :meth:`AbstractElement.append`""" #if no set is associated with the layer yet, we learn it from span annotation elements that are added if self.set is False or self.set is None: if inspect.isclass(child): if issubclass(child,AbstractSpanAnnotation): if 'set' in kwargs: self.set = kwargs['set'] elif isinstance(child, AbstractSpanAnnotation): if child.set: self.set = child.set elif isinstance(child, Correction): #descend into corrections to find the proper set for this layer (derived from span annotation elements) for e in itertools.chain( child.new(), child.original(), child.suggestions() ): if isinstance(e, AbstractSpanAnnotation) and e.set: self.set = e.set break return super(AbstractAnnotationLayer, self).append(child, *args, **kwargs)
def _expandDH(self, sampling, lmax, lmax_calc): """Evaluate the coefficients on a Driscoll and Healy (1994) grid.""" if self.normalization == '4pi': norm = 1 elif self.normalization == 'schmidt': norm = 2 elif self.normalization == 'unnorm': norm = 3 elif self.normalization == 'ortho': norm = 4 else: raise ValueError( "Normalization must be '4pi', 'ortho', 'schmidt', or " + "'unnorm'. Input value was {:s}" .format(repr(self.normalization))) data = _shtools.MakeGridDHC(self.coeffs, sampling=sampling, norm=norm, csphase=self.csphase, lmax=lmax, lmax_calc=lmax_calc) gridout = SHGrid.from_array(data, grid='DH', copy=False) return gridout
Evaluate the coefficients on a Driscoll and Healy (1994) grid.
Below is the the instruction that describes the task: ### Input: Evaluate the coefficients on a Driscoll and Healy (1994) grid. ### Response: def _expandDH(self, sampling, lmax, lmax_calc): """Evaluate the coefficients on a Driscoll and Healy (1994) grid.""" if self.normalization == '4pi': norm = 1 elif self.normalization == 'schmidt': norm = 2 elif self.normalization == 'unnorm': norm = 3 elif self.normalization == 'ortho': norm = 4 else: raise ValueError( "Normalization must be '4pi', 'ortho', 'schmidt', or " + "'unnorm'. Input value was {:s}" .format(repr(self.normalization))) data = _shtools.MakeGridDHC(self.coeffs, sampling=sampling, norm=norm, csphase=self.csphase, lmax=lmax, lmax_calc=lmax_calc) gridout = SHGrid.from_array(data, grid='DH', copy=False) return gridout
def define_example_values(self, http_method, route, values, update=False): """Define example values for a given request. By default, example values are determined from the example properties in the schema. But if you want to change the example used in the documentation for a specific route, and this method lets you do that. :param str http_method: An HTTP method, like "get". :param str route: The route to match. :param dict values: A dictionary of parameters for the example request. :param bool update: If True, the values will be merged into the default example values for the request. If False, the values will replace the default example values. """ self.defined_example_values[(http_method.lower(), route)] = { 'update': update, 'values': values }
Define example values for a given request. By default, example values are determined from the example properties in the schema. But if you want to change the example used in the documentation for a specific route, and this method lets you do that. :param str http_method: An HTTP method, like "get". :param str route: The route to match. :param dict values: A dictionary of parameters for the example request. :param bool update: If True, the values will be merged into the default example values for the request. If False, the values will replace the default example values.
Below is the the instruction that describes the task: ### Input: Define example values for a given request. By default, example values are determined from the example properties in the schema. But if you want to change the example used in the documentation for a specific route, and this method lets you do that. :param str http_method: An HTTP method, like "get". :param str route: The route to match. :param dict values: A dictionary of parameters for the example request. :param bool update: If True, the values will be merged into the default example values for the request. If False, the values will replace the default example values. ### Response: def define_example_values(self, http_method, route, values, update=False): """Define example values for a given request. By default, example values are determined from the example properties in the schema. But if you want to change the example used in the documentation for a specific route, and this method lets you do that. :param str http_method: An HTTP method, like "get". :param str route: The route to match. :param dict values: A dictionary of parameters for the example request. :param bool update: If True, the values will be merged into the default example values for the request. If False, the values will replace the default example values. """ self.defined_example_values[(http_method.lower(), route)] = { 'update': update, 'values': values }
def unpersist(self, blocking=False): """ Mark the RDD as non-persistent, and remove all blocks for it from memory and disk. .. versionchanged:: 3.0.0 Added optional argument `blocking` to specify whether to block until all blocks are deleted. """ self.is_cached = False self._jrdd.unpersist(blocking) return self
Mark the RDD as non-persistent, and remove all blocks for it from memory and disk. .. versionchanged:: 3.0.0 Added optional argument `blocking` to specify whether to block until all blocks are deleted.
Below is the the instruction that describes the task: ### Input: Mark the RDD as non-persistent, and remove all blocks for it from memory and disk. .. versionchanged:: 3.0.0 Added optional argument `blocking` to specify whether to block until all blocks are deleted. ### Response: def unpersist(self, blocking=False): """ Mark the RDD as non-persistent, and remove all blocks for it from memory and disk. .. versionchanged:: 3.0.0 Added optional argument `blocking` to specify whether to block until all blocks are deleted. """ self.is_cached = False self._jrdd.unpersist(blocking) return self
def get_process_cmdline(self): """Return process cmdline as a list of arguments.""" if not pid_exists(self.pid): raise NoSuchProcess(self.pid, self._process_name) return _psutil_osx.get_process_cmdline(self.pid)
Return process cmdline as a list of arguments.
Below is the the instruction that describes the task: ### Input: Return process cmdline as a list of arguments. ### Response: def get_process_cmdline(self): """Return process cmdline as a list of arguments.""" if not pid_exists(self.pid): raise NoSuchProcess(self.pid, self._process_name) return _psutil_osx.get_process_cmdline(self.pid)
def extend_variables(raw_variables, override_variables): """ extend raw_variables with override_variables. override_variables will merge and override raw_variables. Args: raw_variables (list): override_variables (list): Returns: dict: extended variables mapping Examples: >>> raw_variables = [{"var1": "val1"}, {"var2": "val2"}] >>> override_variables = [{"var1": "val111"}, {"var3": "val3"}] >>> extend_variables(raw_variables, override_variables) { 'var1', 'val111', 'var2', 'val2', 'var3', 'val3' } """ if not raw_variables: override_variables_mapping = ensure_mapping_format(override_variables) return override_variables_mapping elif not override_variables: raw_variables_mapping = ensure_mapping_format(raw_variables) return raw_variables_mapping else: raw_variables_mapping = ensure_mapping_format(raw_variables) override_variables_mapping = ensure_mapping_format(override_variables) raw_variables_mapping.update(override_variables_mapping) return raw_variables_mapping
extend raw_variables with override_variables. override_variables will merge and override raw_variables. Args: raw_variables (list): override_variables (list): Returns: dict: extended variables mapping Examples: >>> raw_variables = [{"var1": "val1"}, {"var2": "val2"}] >>> override_variables = [{"var1": "val111"}, {"var3": "val3"}] >>> extend_variables(raw_variables, override_variables) { 'var1', 'val111', 'var2', 'val2', 'var3', 'val3' }
Below is the the instruction that describes the task: ### Input: extend raw_variables with override_variables. override_variables will merge and override raw_variables. Args: raw_variables (list): override_variables (list): Returns: dict: extended variables mapping Examples: >>> raw_variables = [{"var1": "val1"}, {"var2": "val2"}] >>> override_variables = [{"var1": "val111"}, {"var3": "val3"}] >>> extend_variables(raw_variables, override_variables) { 'var1', 'val111', 'var2', 'val2', 'var3', 'val3' } ### Response: def extend_variables(raw_variables, override_variables): """ extend raw_variables with override_variables. override_variables will merge and override raw_variables. Args: raw_variables (list): override_variables (list): Returns: dict: extended variables mapping Examples: >>> raw_variables = [{"var1": "val1"}, {"var2": "val2"}] >>> override_variables = [{"var1": "val111"}, {"var3": "val3"}] >>> extend_variables(raw_variables, override_variables) { 'var1', 'val111', 'var2', 'val2', 'var3', 'val3' } """ if not raw_variables: override_variables_mapping = ensure_mapping_format(override_variables) return override_variables_mapping elif not override_variables: raw_variables_mapping = ensure_mapping_format(raw_variables) return raw_variables_mapping else: raw_variables_mapping = ensure_mapping_format(raw_variables) override_variables_mapping = ensure_mapping_format(override_variables) raw_variables_mapping.update(override_variables_mapping) return raw_variables_mapping
def recognise(self): """ Recognise which is Cube's OLL case. """ if not isinstance(self.cube, Cube): raise ValueError("Use Solver.feed(cube) to feed the cube to solver.") result = "" for face in "LFRB": for square in self.cube.get_face(face)[0]: result += str(int(square == self.cube["U"]["U"])) if result not in algo_dict: raise ValueError("Invalid Cube, probably didn't solve F2L, or wrong input value.\nUse Solver.feed(cube) to reset the cube.") self.case = result return result
Recognise which is Cube's OLL case.
Below is the the instruction that describes the task: ### Input: Recognise which is Cube's OLL case. ### Response: def recognise(self): """ Recognise which is Cube's OLL case. """ if not isinstance(self.cube, Cube): raise ValueError("Use Solver.feed(cube) to feed the cube to solver.") result = "" for face in "LFRB": for square in self.cube.get_face(face)[0]: result += str(int(square == self.cube["U"]["U"])) if result not in algo_dict: raise ValueError("Invalid Cube, probably didn't solve F2L, or wrong input value.\nUse Solver.feed(cube) to reset the cube.") self.case = result return result
def simplify_U(theta, phi, lam): """Return the gate u1, u2, or u3 implementing U with the fewest pulses. The returned gate implements U exactly, not up to a global phase. Args: theta, phi, lam: input Euler rotation angles for a general U gate Returns: Gate: one of IdGate, U1Gate, U2Gate, U3Gate. """ gate = U3Gate(theta, phi, lam) # Y rotation is 0 mod 2*pi, so the gate is a u1 if abs(gate.params[0] % (2.0 * math.pi)) < _CUTOFF_PRECISION: gate = U1Gate(gate.params[0] + gate.params[1] + gate.params[2]) # Y rotation is pi/2 or -pi/2 mod 2*pi, so the gate is a u2 if isinstance(gate, U3Gate): # theta = pi/2 + 2*k*pi if abs((gate.params[0] - math.pi / 2) % (2.0 * math.pi)) < _CUTOFF_PRECISION: gate = U2Gate(gate.params[1], gate.params[2] + (gate.params[0] - math.pi / 2)) # theta = -pi/2 + 2*k*pi if abs((gate.params[0] + math.pi / 2) % (2.0 * math.pi)) < _CUTOFF_PRECISION: gate = U2Gate(gate.params[1] + math.pi, gate.params[2] - math.pi + (gate.params[0] + math.pi / 2)) # u1 and lambda is 0 mod 4*pi so gate is nop if isinstance(gate, U1Gate) and abs(gate.params[0] % (4.0 * math.pi)) < _CUTOFF_PRECISION: gate = IdGate() return gate
Return the gate u1, u2, or u3 implementing U with the fewest pulses. The returned gate implements U exactly, not up to a global phase. Args: theta, phi, lam: input Euler rotation angles for a general U gate Returns: Gate: one of IdGate, U1Gate, U2Gate, U3Gate.
Below is the the instruction that describes the task: ### Input: Return the gate u1, u2, or u3 implementing U with the fewest pulses. The returned gate implements U exactly, not up to a global phase. Args: theta, phi, lam: input Euler rotation angles for a general U gate Returns: Gate: one of IdGate, U1Gate, U2Gate, U3Gate. ### Response: def simplify_U(theta, phi, lam): """Return the gate u1, u2, or u3 implementing U with the fewest pulses. The returned gate implements U exactly, not up to a global phase. Args: theta, phi, lam: input Euler rotation angles for a general U gate Returns: Gate: one of IdGate, U1Gate, U2Gate, U3Gate. """ gate = U3Gate(theta, phi, lam) # Y rotation is 0 mod 2*pi, so the gate is a u1 if abs(gate.params[0] % (2.0 * math.pi)) < _CUTOFF_PRECISION: gate = U1Gate(gate.params[0] + gate.params[1] + gate.params[2]) # Y rotation is pi/2 or -pi/2 mod 2*pi, so the gate is a u2 if isinstance(gate, U3Gate): # theta = pi/2 + 2*k*pi if abs((gate.params[0] - math.pi / 2) % (2.0 * math.pi)) < _CUTOFF_PRECISION: gate = U2Gate(gate.params[1], gate.params[2] + (gate.params[0] - math.pi / 2)) # theta = -pi/2 + 2*k*pi if abs((gate.params[0] + math.pi / 2) % (2.0 * math.pi)) < _CUTOFF_PRECISION: gate = U2Gate(gate.params[1] + math.pi, gate.params[2] - math.pi + (gate.params[0] + math.pi / 2)) # u1 and lambda is 0 mod 4*pi so gate is nop if isinstance(gate, U1Gate) and abs(gate.params[0] % (4.0 * math.pi)) < _CUTOFF_PRECISION: gate = IdGate() return gate
def do_AUTOCOMPLETE(cmd, s): """Shows autocomplete results for a given token.""" s = list(preprocess_query(s))[0] keys = [k.decode() for k in DB.smembers(edge_ngram_key(s))] print(white(keys)) print(magenta('({} elements)'.format(len(keys))))
Shows autocomplete results for a given token.
Below is the the instruction that describes the task: ### Input: Shows autocomplete results for a given token. ### Response: def do_AUTOCOMPLETE(cmd, s): """Shows autocomplete results for a given token.""" s = list(preprocess_query(s))[0] keys = [k.decode() for k in DB.smembers(edge_ngram_key(s))] print(white(keys)) print(magenta('({} elements)'.format(len(keys))))
def get_rectangle(self, src): """ :param src: a source object :returns: ((min_lon, min_lat), width, height), useful for plotting """ min_lon, min_lat, max_lon, max_lat = ( self.integration_distance.get_affected_box(src)) return (min_lon, min_lat), (max_lon - min_lon) % 360, max_lat - min_lat
:param src: a source object :returns: ((min_lon, min_lat), width, height), useful for plotting
Below is the the instruction that describes the task: ### Input: :param src: a source object :returns: ((min_lon, min_lat), width, height), useful for plotting ### Response: def get_rectangle(self, src): """ :param src: a source object :returns: ((min_lon, min_lat), width, height), useful for plotting """ min_lon, min_lat, max_lon, max_lat = ( self.integration_distance.get_affected_box(src)) return (min_lon, min_lat), (max_lon - min_lon) % 360, max_lat - min_lat
def _get_filehandler_with_formatter(logname, formatter=None): """ Return a logging FileHandler for given logname using a given logging formatter :param logname: Name of the file where logs will be stored, ".log" extension will be added :param formatter: An instance of logging.Formatter or None if the default should be used :return: """ handler = logging.FileHandler(logname) if formatter is not None: handler.setFormatter(formatter) return handler
Return a logging FileHandler for given logname using a given logging formatter :param logname: Name of the file where logs will be stored, ".log" extension will be added :param formatter: An instance of logging.Formatter or None if the default should be used :return:
Below is the the instruction that describes the task: ### Input: Return a logging FileHandler for given logname using a given logging formatter :param logname: Name of the file where logs will be stored, ".log" extension will be added :param formatter: An instance of logging.Formatter or None if the default should be used :return: ### Response: def _get_filehandler_with_formatter(logname, formatter=None): """ Return a logging FileHandler for given logname using a given logging formatter :param logname: Name of the file where logs will be stored, ".log" extension will be added :param formatter: An instance of logging.Formatter or None if the default should be used :return: """ handler = logging.FileHandler(logname) if formatter is not None: handler.setFormatter(formatter) return handler
def processFlat(self): """Main process. Returns ------- est_idxs : np.array(N) Estimated indeces for the segment boundaries in frames. est_labels : np.array(N-1) Estimated labels for the segments. """ # C-NMF params niter = self.config["niters"] # Iterations for the MF and clustering # Preprocess to obtain features, times, and input boundary indeces F = self._preprocess() # Normalize F = U.normalize(F, norm_type=self.config["norm_feats"]) if F.shape[0] >= self.config["h"]: # Median filter F = median_filter(F, M=self.config["h"]) #plt.imshow(F.T, interpolation="nearest", aspect="auto"); plt.show() # Find the boundary indices and labels using matrix factorization est_idxs, est_labels = get_segmentation( F.T, self.config["rank"], self.config["R"], self.config["rank_labels"], self.config["R_labels"], niter=niter, bound_idxs=self.in_bound_idxs, in_labels=None) # Remove empty segments if needed est_idxs, est_labels = U.remove_empty_segments(est_idxs, est_labels) else: # The track is too short. We will only output the first and last # time stamps if self.in_bound_idxs is None: est_idxs = np.array([0, F.shape[0] - 1]) est_labels = [1] else: est_idxs = self.in_bound_idxs est_labels = [1] * (len(est_idxs) + 1) # Make sure that the first and last boundaries are included assert est_idxs[0] == 0 and est_idxs[-1] == F.shape[0] - 1 # Post process estimations est_idxs, est_labels = self._postprocess(est_idxs, est_labels) return est_idxs, est_labels
Main process. Returns ------- est_idxs : np.array(N) Estimated indeces for the segment boundaries in frames. est_labels : np.array(N-1) Estimated labels for the segments.
Below is the the instruction that describes the task: ### Input: Main process. Returns ------- est_idxs : np.array(N) Estimated indeces for the segment boundaries in frames. est_labels : np.array(N-1) Estimated labels for the segments. ### Response: def processFlat(self): """Main process. Returns ------- est_idxs : np.array(N) Estimated indeces for the segment boundaries in frames. est_labels : np.array(N-1) Estimated labels for the segments. """ # C-NMF params niter = self.config["niters"] # Iterations for the MF and clustering # Preprocess to obtain features, times, and input boundary indeces F = self._preprocess() # Normalize F = U.normalize(F, norm_type=self.config["norm_feats"]) if F.shape[0] >= self.config["h"]: # Median filter F = median_filter(F, M=self.config["h"]) #plt.imshow(F.T, interpolation="nearest", aspect="auto"); plt.show() # Find the boundary indices and labels using matrix factorization est_idxs, est_labels = get_segmentation( F.T, self.config["rank"], self.config["R"], self.config["rank_labels"], self.config["R_labels"], niter=niter, bound_idxs=self.in_bound_idxs, in_labels=None) # Remove empty segments if needed est_idxs, est_labels = U.remove_empty_segments(est_idxs, est_labels) else: # The track is too short. We will only output the first and last # time stamps if self.in_bound_idxs is None: est_idxs = np.array([0, F.shape[0] - 1]) est_labels = [1] else: est_idxs = self.in_bound_idxs est_labels = [1] * (len(est_idxs) + 1) # Make sure that the first and last boundaries are included assert est_idxs[0] == 0 and est_idxs[-1] == F.shape[0] - 1 # Post process estimations est_idxs, est_labels = self._postprocess(est_idxs, est_labels) return est_idxs, est_labels
def load(self, draw_bbox = False, **kwargs): ''' Makes the canvas. This could be far speedier if it copied raw pixels, but that would take far too much time to write vs using Image inbuilts ''' im = Image.new('RGBA', self.img_size) draw = None if draw_bbox: draw = ImageDraw.Draw(im) for sprite in self.images: data = sprite.load() sprite_im = Image.open(BytesIO(data)) size = sprite.imgrect im.paste(sprite_im, (size[0], size[2])) if draw_bbox: draw.rectangle((size[0], size[2], size[1], size[3]), outline='red') del draw b = BytesIO() im.save(b, format = 'PNG') return b.getvalue()
Makes the canvas. This could be far speedier if it copied raw pixels, but that would take far too much time to write vs using Image inbuilts
Below is the the instruction that describes the task: ### Input: Makes the canvas. This could be far speedier if it copied raw pixels, but that would take far too much time to write vs using Image inbuilts ### Response: def load(self, draw_bbox = False, **kwargs): ''' Makes the canvas. This could be far speedier if it copied raw pixels, but that would take far too much time to write vs using Image inbuilts ''' im = Image.new('RGBA', self.img_size) draw = None if draw_bbox: draw = ImageDraw.Draw(im) for sprite in self.images: data = sprite.load() sprite_im = Image.open(BytesIO(data)) size = sprite.imgrect im.paste(sprite_im, (size[0], size[2])) if draw_bbox: draw.rectangle((size[0], size[2], size[1], size[3]), outline='red') del draw b = BytesIO() im.save(b, format = 'PNG') return b.getvalue()
def _check_for_crypto_done(self): # type: (Downloader) -> None """Check queue for crypto done :param Downloader self: this """ cv = self._crypto_offload.done_cv while not self.termination_check: result = None cv.acquire() while True: result = self._crypto_offload.pop_done_queue() if result is None: # use cv timeout due to possible non-wake while running cv.wait(0.1) # check for terminating conditions if self.termination_check: break else: break cv.release() if result is not None: try: final_path, offsets = result with self._transfer_lock: dd = self._dd_map[final_path] self._finalize_chunk(dd, offsets) except KeyError: # this can happen if all of the last integrity # chunks are processed at once pass
Check queue for crypto done :param Downloader self: this
Below is the the instruction that describes the task: ### Input: Check queue for crypto done :param Downloader self: this ### Response: def _check_for_crypto_done(self): # type: (Downloader) -> None """Check queue for crypto done :param Downloader self: this """ cv = self._crypto_offload.done_cv while not self.termination_check: result = None cv.acquire() while True: result = self._crypto_offload.pop_done_queue() if result is None: # use cv timeout due to possible non-wake while running cv.wait(0.1) # check for terminating conditions if self.termination_check: break else: break cv.release() if result is not None: try: final_path, offsets = result with self._transfer_lock: dd = self._dd_map[final_path] self._finalize_chunk(dd, offsets) except KeyError: # this can happen if all of the last integrity # chunks are processed at once pass
def delete_service_version(self, service_id , service_version='default', mode='production'): ''' delete_service(self, service_id, service_version='default', mode='production') Deletes a Service version from Opereto :Parameters: * *service_id* (`string`) -- Service identifier * *service_version* (`string`) -- Service version. Default is 'default' * *mode* (`string`) -- development/production. Default is production :return: success/failure :Example: .. code-block:: python opereto_client.delete_service('my_service_id') ''' return self._call_rest_api('delete', '/services/'+service_id+'/'+mode+'/'+service_version, error='Failed to delete service')
delete_service(self, service_id, service_version='default', mode='production') Deletes a Service version from Opereto :Parameters: * *service_id* (`string`) -- Service identifier * *service_version* (`string`) -- Service version. Default is 'default' * *mode* (`string`) -- development/production. Default is production :return: success/failure :Example: .. code-block:: python opereto_client.delete_service('my_service_id')
Below is the the instruction that describes the task: ### Input: delete_service(self, service_id, service_version='default', mode='production') Deletes a Service version from Opereto :Parameters: * *service_id* (`string`) -- Service identifier * *service_version* (`string`) -- Service version. Default is 'default' * *mode* (`string`) -- development/production. Default is production :return: success/failure :Example: .. code-block:: python opereto_client.delete_service('my_service_id') ### Response: def delete_service_version(self, service_id , service_version='default', mode='production'): ''' delete_service(self, service_id, service_version='default', mode='production') Deletes a Service version from Opereto :Parameters: * *service_id* (`string`) -- Service identifier * *service_version* (`string`) -- Service version. Default is 'default' * *mode* (`string`) -- development/production. Default is production :return: success/failure :Example: .. code-block:: python opereto_client.delete_service('my_service_id') ''' return self._call_rest_api('delete', '/services/'+service_id+'/'+mode+'/'+service_version, error='Failed to delete service')
def find_all_primitives(pad): """ Recursively find all primities on a pad, even those hiding behind a GetListOfFunctions() of a primitive """ result = [] for primitive in pad.GetListOfPrimitives(): result.append(primitive) if hasattr(primitive, "GetListOfFunctions"): result.extend(primitive.GetListOfFunctions()) if hasattr(primitive, "GetHistogram"): p = primitive.GetHistogram() if p: result.append(p) if isinstance(primitive, ROOT.TPad): result.extend(find_all_primitives(primitive)) return result
Recursively find all primities on a pad, even those hiding behind a GetListOfFunctions() of a primitive
Below is the the instruction that describes the task: ### Input: Recursively find all primities on a pad, even those hiding behind a GetListOfFunctions() of a primitive ### Response: def find_all_primitives(pad): """ Recursively find all primities on a pad, even those hiding behind a GetListOfFunctions() of a primitive """ result = [] for primitive in pad.GetListOfPrimitives(): result.append(primitive) if hasattr(primitive, "GetListOfFunctions"): result.extend(primitive.GetListOfFunctions()) if hasattr(primitive, "GetHistogram"): p = primitive.GetHistogram() if p: result.append(p) if isinstance(primitive, ROOT.TPad): result.extend(find_all_primitives(primitive)) return result
def get_full_path(self, fn_attr): '''Returns the full path of a FILENAME. The NTFS filesystem allows for things called hardlinks. Hard links are saved, internally, as different filename attributes. Because of this, an entry can, when dealing with full paths, have multiple full paths. As such, this function receives a fn_attr and uses it to compute the full path for this particular attribute. Also, the MFT entry might still exist, but the file has been deleted, depending on the state of the MFT, the path might not be fully reconstructable, these entries are called "orphan". Args: fn_attr (:obj:`Attribute`): An attribute that has a FILENAME as content Returns: tuple(bool, str): A tuple where the first element is a boolean that is ``True`` if the the file is orphan and ``False`` if not. The second element is a string with the full path ''' if fn_attr.header.attr_type_id is not AttrTypes.FILE_NAME: raise MFTError("Need a filename attribute to compute full path.") orphan, path = self._compute_full_path(fn_attr.content.parent_ref, fn_attr.content.parent_seq) return (orphan, "\\".join([path, fn_attr.content.name]))
Returns the full path of a FILENAME. The NTFS filesystem allows for things called hardlinks. Hard links are saved, internally, as different filename attributes. Because of this, an entry can, when dealing with full paths, have multiple full paths. As such, this function receives a fn_attr and uses it to compute the full path for this particular attribute. Also, the MFT entry might still exist, but the file has been deleted, depending on the state of the MFT, the path might not be fully reconstructable, these entries are called "orphan". Args: fn_attr (:obj:`Attribute`): An attribute that has a FILENAME as content Returns: tuple(bool, str): A tuple where the first element is a boolean that is ``True`` if the the file is orphan and ``False`` if not. The second element is a string with the full path
Below is the the instruction that describes the task: ### Input: Returns the full path of a FILENAME. The NTFS filesystem allows for things called hardlinks. Hard links are saved, internally, as different filename attributes. Because of this, an entry can, when dealing with full paths, have multiple full paths. As such, this function receives a fn_attr and uses it to compute the full path for this particular attribute. Also, the MFT entry might still exist, but the file has been deleted, depending on the state of the MFT, the path might not be fully reconstructable, these entries are called "orphan". Args: fn_attr (:obj:`Attribute`): An attribute that has a FILENAME as content Returns: tuple(bool, str): A tuple where the first element is a boolean that is ``True`` if the the file is orphan and ``False`` if not. The second element is a string with the full path ### Response: def get_full_path(self, fn_attr): '''Returns the full path of a FILENAME. The NTFS filesystem allows for things called hardlinks. Hard links are saved, internally, as different filename attributes. Because of this, an entry can, when dealing with full paths, have multiple full paths. As such, this function receives a fn_attr and uses it to compute the full path for this particular attribute. Also, the MFT entry might still exist, but the file has been deleted, depending on the state of the MFT, the path might not be fully reconstructable, these entries are called "orphan". Args: fn_attr (:obj:`Attribute`): An attribute that has a FILENAME as content Returns: tuple(bool, str): A tuple where the first element is a boolean that is ``True`` if the the file is orphan and ``False`` if not. The second element is a string with the full path ''' if fn_attr.header.attr_type_id is not AttrTypes.FILE_NAME: raise MFTError("Need a filename attribute to compute full path.") orphan, path = self._compute_full_path(fn_attr.content.parent_ref, fn_attr.content.parent_seq) return (orphan, "\\".join([path, fn_attr.content.name]))
def parse(type: Type): """ Register a parser for a attribute type. Parsers will be used to parse `str` type objects from either the commandline arguments or environment variables. Args: type: the type the decorated function will be responsible for parsing a environment variable to. """ def decorator(parser): EnvVar.parsers[type] = parser return parser return decorator
Register a parser for a attribute type. Parsers will be used to parse `str` type objects from either the commandline arguments or environment variables. Args: type: the type the decorated function will be responsible for parsing a environment variable to.
Below is the the instruction that describes the task: ### Input: Register a parser for a attribute type. Parsers will be used to parse `str` type objects from either the commandline arguments or environment variables. Args: type: the type the decorated function will be responsible for parsing a environment variable to. ### Response: def parse(type: Type): """ Register a parser for a attribute type. Parsers will be used to parse `str` type objects from either the commandline arguments or environment variables. Args: type: the type the decorated function will be responsible for parsing a environment variable to. """ def decorator(parser): EnvVar.parsers[type] = parser return parser return decorator
def do_first(self): """Create PNM file from input image file.""" pid = os.getpid() self.basename = os.path.join(self.tmpdir, 'iiif_netpbm_' + str(pid)) outfile = self.basename + '.pnm' # Convert source file to pnm filetype = self.file_type(self.srcfile) if (filetype == 'png'): if (self.shell_call(self.pngtopnm + ' ' + self.srcfile + ' > ' + outfile)): raise IIIFError(text="Oops... got error from pngtopnm.") elif (filetype == 'jpg'): if (self.shell_call(self.jpegtopnm + ' ' + self.srcfile + ' > ' + outfile)): raise IIIFError(text="Oops... got error from jpegtopnm.") else: raise IIIFError(code='501', text='bad input file format (only know how to read png/jpeg)') self.tmpfile = outfile # Get size (self.width, self.height) = self.image_size(self.tmpfile)
Create PNM file from input image file.
Below is the the instruction that describes the task: ### Input: Create PNM file from input image file. ### Response: def do_first(self): """Create PNM file from input image file.""" pid = os.getpid() self.basename = os.path.join(self.tmpdir, 'iiif_netpbm_' + str(pid)) outfile = self.basename + '.pnm' # Convert source file to pnm filetype = self.file_type(self.srcfile) if (filetype == 'png'): if (self.shell_call(self.pngtopnm + ' ' + self.srcfile + ' > ' + outfile)): raise IIIFError(text="Oops... got error from pngtopnm.") elif (filetype == 'jpg'): if (self.shell_call(self.jpegtopnm + ' ' + self.srcfile + ' > ' + outfile)): raise IIIFError(text="Oops... got error from jpegtopnm.") else: raise IIIFError(code='501', text='bad input file format (only know how to read png/jpeg)') self.tmpfile = outfile # Get size (self.width, self.height) = self.image_size(self.tmpfile)
def enable(name, **kwargs): ''' Enable the named service to start at boot CLI Example: .. code-block:: bash salt '*' service.enable <service name> ''' if _service_is_upstart(name): return _upstart_enable(name) executable = _get_service_exec() cmd = '{0} -f {1} defaults'.format(executable, name) return not __salt__['cmd.retcode'](cmd, python_shell=False)
Enable the named service to start at boot CLI Example: .. code-block:: bash salt '*' service.enable <service name>
Below is the the instruction that describes the task: ### Input: Enable the named service to start at boot CLI Example: .. code-block:: bash salt '*' service.enable <service name> ### Response: def enable(name, **kwargs): ''' Enable the named service to start at boot CLI Example: .. code-block:: bash salt '*' service.enable <service name> ''' if _service_is_upstart(name): return _upstart_enable(name) executable = _get_service_exec() cmd = '{0} -f {1} defaults'.format(executable, name) return not __salt__['cmd.retcode'](cmd, python_shell=False)
def get_index_quote(self, code, as_json=False): """ params: code : string index code as_json: True|False returns: a dict | json quote for the given index """ url = self.index_url if self.is_valid_index(code): req = Request(url, None, self.headers) # raises HTTPError and URLError resp = self.opener.open(req) resp = byte_adaptor(resp) resp_list = json.load(resp)['data'] # this is list of dictionaries resp_list = [self.clean_server_response(item) for item in resp_list] # search the right list element to return search_flag = False for item in resp_list: if item['name'] == code.upper(): search_flag = True break return self.render_response(item, as_json) if search_flag else None
params: code : string index code as_json: True|False returns: a dict | json quote for the given index
Below is the the instruction that describes the task: ### Input: params: code : string index code as_json: True|False returns: a dict | json quote for the given index ### Response: def get_index_quote(self, code, as_json=False): """ params: code : string index code as_json: True|False returns: a dict | json quote for the given index """ url = self.index_url if self.is_valid_index(code): req = Request(url, None, self.headers) # raises HTTPError and URLError resp = self.opener.open(req) resp = byte_adaptor(resp) resp_list = json.load(resp)['data'] # this is list of dictionaries resp_list = [self.clean_server_response(item) for item in resp_list] # search the right list element to return search_flag = False for item in resp_list: if item['name'] == code.upper(): search_flag = True break return self.render_response(item, as_json) if search_flag else None
def data(self, root): '''Convert etree.Element into a dictionary''' value = self.dict() children = [node for node in root if isinstance(node.tag, basestring)] for attr, attrval in root.attrib.items(): attr = attr if self.attr_prefix is None else self.attr_prefix + attr value[attr] = self._fromstring(attrval) if root.text and self.text_content is not None: text = root.text.strip() if text: if self.simple_text and len(children) == len(root.attrib) == 0: value = self._fromstring(text) else: value[self.text_content] = self._fromstring(text) count = Counter(child.tag for child in children) for child in children: if count[child.tag] == 1: value.update(self.data(child)) else: result = value.setdefault(child.tag, self.list()) result += self.data(child).values() # if simple_text, elements with no children nor attrs become '', not {} if isinstance(value, dict) and not value and self.simple_text: value = '' return self.dict([(root.tag, value)])
Convert etree.Element into a dictionary
Below is the the instruction that describes the task: ### Input: Convert etree.Element into a dictionary ### Response: def data(self, root): '''Convert etree.Element into a dictionary''' value = self.dict() children = [node for node in root if isinstance(node.tag, basestring)] for attr, attrval in root.attrib.items(): attr = attr if self.attr_prefix is None else self.attr_prefix + attr value[attr] = self._fromstring(attrval) if root.text and self.text_content is not None: text = root.text.strip() if text: if self.simple_text and len(children) == len(root.attrib) == 0: value = self._fromstring(text) else: value[self.text_content] = self._fromstring(text) count = Counter(child.tag for child in children) for child in children: if count[child.tag] == 1: value.update(self.data(child)) else: result = value.setdefault(child.tag, self.list()) result += self.data(child).values() # if simple_text, elements with no children nor attrs become '', not {} if isinstance(value, dict) and not value and self.simple_text: value = '' return self.dict([(root.tag, value)])
def step(self, observations, raw_rewards, processed_rewards, dones, actions): """Record the information obtained from taking a step in all envs. Records (observation, rewards, done) in a new time-step and actions in the current time-step. If any trajectory gets done, we move that trajectory to completed_trajectories. Args: observations: ndarray of first dimension self.batch_size, which has the observations after we've stepped, i.e. s_{t+1} where t is the current state. raw_rewards: ndarray of first dimension self.batch_size containing raw rewards i.e. r_{t+1}. processed_rewards: ndarray of first dimension self.batch_size containing processed rewards. i.e. r_{t+1} dones: ndarray of first dimension self.batch_size, containing true at an index if that env is done, i.e. d_{t+1} actions: ndarray of first dimension self.batch_size, containing actions applied at the current time-step, which leads to the observations rewards and done at the next time-step, i.e. a_t """ # Pre-conditions assert isinstance(observations, np.ndarray) assert isinstance(raw_rewards, np.ndarray) assert isinstance(processed_rewards, np.ndarray) assert isinstance(dones, np.ndarray) assert isinstance(actions, np.ndarray) # We assume that we step in all envs, i.e. not like reset where we can reset # some envs and not others. assert self.batch_size == observations.shape[0] assert self.batch_size == raw_rewards.shape[0] assert self.batch_size == processed_rewards.shape[0] assert self.batch_size == dones.shape[0] assert self.batch_size == actions.shape[0] for index in range(self.batch_size): trajectory = self._trajectories[index] # NOTE: If the trajectory isn't active, that means it doesn't have any # time-steps in it, but we are in step, so the assumption is that it has # a prior observation from which we are stepping away from. # TODO(afrozm): Let's re-visit this if it becomes too restrictive. assert trajectory.is_active # To this trajectory's last time-step, set actions. trajectory.change_last_time_step(action=actions[index]) # Create a new time-step to add observation, done & rewards (no actions). trajectory.add_time_step( observation=observations[index], done=dones[index], raw_reward=raw_rewards[index], processed_reward=processed_rewards[index]) # If the trajectory is completed, i.e. dones[index] == True, then we # account for it right-away. if dones[index]: self._complete_trajectory(trajectory, index) # NOTE: The new trajectory at `index` is going to be in-active and # `reset` should be called on it. assert not self._trajectories[index].is_active
Record the information obtained from taking a step in all envs. Records (observation, rewards, done) in a new time-step and actions in the current time-step. If any trajectory gets done, we move that trajectory to completed_trajectories. Args: observations: ndarray of first dimension self.batch_size, which has the observations after we've stepped, i.e. s_{t+1} where t is the current state. raw_rewards: ndarray of first dimension self.batch_size containing raw rewards i.e. r_{t+1}. processed_rewards: ndarray of first dimension self.batch_size containing processed rewards. i.e. r_{t+1} dones: ndarray of first dimension self.batch_size, containing true at an index if that env is done, i.e. d_{t+1} actions: ndarray of first dimension self.batch_size, containing actions applied at the current time-step, which leads to the observations rewards and done at the next time-step, i.e. a_t
Below is the the instruction that describes the task: ### Input: Record the information obtained from taking a step in all envs. Records (observation, rewards, done) in a new time-step and actions in the current time-step. If any trajectory gets done, we move that trajectory to completed_trajectories. Args: observations: ndarray of first dimension self.batch_size, which has the observations after we've stepped, i.e. s_{t+1} where t is the current state. raw_rewards: ndarray of first dimension self.batch_size containing raw rewards i.e. r_{t+1}. processed_rewards: ndarray of first dimension self.batch_size containing processed rewards. i.e. r_{t+1} dones: ndarray of first dimension self.batch_size, containing true at an index if that env is done, i.e. d_{t+1} actions: ndarray of first dimension self.batch_size, containing actions applied at the current time-step, which leads to the observations rewards and done at the next time-step, i.e. a_t ### Response: def step(self, observations, raw_rewards, processed_rewards, dones, actions): """Record the information obtained from taking a step in all envs. Records (observation, rewards, done) in a new time-step and actions in the current time-step. If any trajectory gets done, we move that trajectory to completed_trajectories. Args: observations: ndarray of first dimension self.batch_size, which has the observations after we've stepped, i.e. s_{t+1} where t is the current state. raw_rewards: ndarray of first dimension self.batch_size containing raw rewards i.e. r_{t+1}. processed_rewards: ndarray of first dimension self.batch_size containing processed rewards. i.e. r_{t+1} dones: ndarray of first dimension self.batch_size, containing true at an index if that env is done, i.e. d_{t+1} actions: ndarray of first dimension self.batch_size, containing actions applied at the current time-step, which leads to the observations rewards and done at the next time-step, i.e. a_t """ # Pre-conditions assert isinstance(observations, np.ndarray) assert isinstance(raw_rewards, np.ndarray) assert isinstance(processed_rewards, np.ndarray) assert isinstance(dones, np.ndarray) assert isinstance(actions, np.ndarray) # We assume that we step in all envs, i.e. not like reset where we can reset # some envs and not others. assert self.batch_size == observations.shape[0] assert self.batch_size == raw_rewards.shape[0] assert self.batch_size == processed_rewards.shape[0] assert self.batch_size == dones.shape[0] assert self.batch_size == actions.shape[0] for index in range(self.batch_size): trajectory = self._trajectories[index] # NOTE: If the trajectory isn't active, that means it doesn't have any # time-steps in it, but we are in step, so the assumption is that it has # a prior observation from which we are stepping away from. # TODO(afrozm): Let's re-visit this if it becomes too restrictive. assert trajectory.is_active # To this trajectory's last time-step, set actions. trajectory.change_last_time_step(action=actions[index]) # Create a new time-step to add observation, done & rewards (no actions). trajectory.add_time_step( observation=observations[index], done=dones[index], raw_reward=raw_rewards[index], processed_reward=processed_rewards[index]) # If the trajectory is completed, i.e. dones[index] == True, then we # account for it right-away. if dones[index]: self._complete_trajectory(trajectory, index) # NOTE: The new trajectory at `index` is going to be in-active and # `reset` should be called on it. assert not self._trajectories[index].is_active
def log_player_roll(self, player, roll): """ :param player: catan.game.Player :param roll: integer or string, the sum of the dice """ self._logln('{0} rolls {1}{2}'.format(player.color, roll, ' ...DEUCES!' if int(roll) == 2 else ''))
:param player: catan.game.Player :param roll: integer or string, the sum of the dice
Below is the the instruction that describes the task: ### Input: :param player: catan.game.Player :param roll: integer or string, the sum of the dice ### Response: def log_player_roll(self, player, roll): """ :param player: catan.game.Player :param roll: integer or string, the sum of the dice """ self._logln('{0} rolls {1}{2}'.format(player.color, roll, ' ...DEUCES!' if int(roll) == 2 else ''))
def run_from_argv(self, argv): """ Called when run from the command line. """ try: return self.main(args=argv[2:], standalone_mode=False) except click.ClickException as e: if getattr(e.ctx, 'traceback', False): raise e.show() sys.exit(e.exit_code)
Called when run from the command line.
Below is the the instruction that describes the task: ### Input: Called when run from the command line. ### Response: def run_from_argv(self, argv): """ Called when run from the command line. """ try: return self.main(args=argv[2:], standalone_mode=False) except click.ClickException as e: if getattr(e.ctx, 'traceback', False): raise e.show() sys.exit(e.exit_code)
def stft(func=None, **kwparams): """ Short Time Fourier Transform for complex data. Same to the default STFT strategy, but with new defaults. This is the same to: .. code-block:: python stft.base(transform=numpy.fft.fft, inverse_transform=numpy.fft.ifft) See ``stft.base`` docs for more. """ from numpy.fft import fft, ifft return stft.base(transform=fft, inverse_transform=ifft)(func, **kwparams)
Short Time Fourier Transform for complex data. Same to the default STFT strategy, but with new defaults. This is the same to: .. code-block:: python stft.base(transform=numpy.fft.fft, inverse_transform=numpy.fft.ifft) See ``stft.base`` docs for more.
Below is the the instruction that describes the task: ### Input: Short Time Fourier Transform for complex data. Same to the default STFT strategy, but with new defaults. This is the same to: .. code-block:: python stft.base(transform=numpy.fft.fft, inverse_transform=numpy.fft.ifft) See ``stft.base`` docs for more. ### Response: def stft(func=None, **kwparams): """ Short Time Fourier Transform for complex data. Same to the default STFT strategy, but with new defaults. This is the same to: .. code-block:: python stft.base(transform=numpy.fft.fft, inverse_transform=numpy.fft.ifft) See ``stft.base`` docs for more. """ from numpy.fft import fft, ifft return stft.base(transform=fft, inverse_transform=ifft)(func, **kwparams)
def to_struct(cls, name=None): """ Convert the TreeModel into a compiled C struct """ if name is None: name = cls.__name__ basic_attrs = dict([(attr_name, value) for attr_name, value in cls.get_attrs() if isinstance(value, Column)]) if not basic_attrs: return None src = 'struct {0} {{'.format(name) for attr_name, value in basic_attrs.items(): src += '{0} {1};'.format(value.type.typename, attr_name) src += '};' if ROOT.gROOT.ProcessLine(src) != 0: return None return getattr(ROOT, name, None)
Convert the TreeModel into a compiled C struct
Below is the the instruction that describes the task: ### Input: Convert the TreeModel into a compiled C struct ### Response: def to_struct(cls, name=None): """ Convert the TreeModel into a compiled C struct """ if name is None: name = cls.__name__ basic_attrs = dict([(attr_name, value) for attr_name, value in cls.get_attrs() if isinstance(value, Column)]) if not basic_attrs: return None src = 'struct {0} {{'.format(name) for attr_name, value in basic_attrs.items(): src += '{0} {1};'.format(value.type.typename, attr_name) src += '};' if ROOT.gROOT.ProcessLine(src) != 0: return None return getattr(ROOT, name, None)
def latsph(radius, lon, lat): """ Convert from latitudinal coordinates to spherical coordinates. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/latsph_c.html :param radius: Distance of a point from the origin. :param lon: Angle of the point from the XZ plane in radians. :param lat: Angle of the point from the XY plane in radians. :return: (rho colat, lons) :rtype: tuple """ radius = ctypes.c_double(radius) lon = ctypes.c_double(lon) lat = ctypes.c_double(lat) rho = ctypes.c_double() colat = ctypes.c_double() lons = ctypes.c_double() libspice.latsph_c(radius, lon, lat, ctypes.byref(rho), ctypes.byref(colat), ctypes.byref(lons)) return rho.value, colat.value, lons.value
Convert from latitudinal coordinates to spherical coordinates. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/latsph_c.html :param radius: Distance of a point from the origin. :param lon: Angle of the point from the XZ plane in radians. :param lat: Angle of the point from the XY plane in radians. :return: (rho colat, lons) :rtype: tuple
Below is the the instruction that describes the task: ### Input: Convert from latitudinal coordinates to spherical coordinates. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/latsph_c.html :param radius: Distance of a point from the origin. :param lon: Angle of the point from the XZ plane in radians. :param lat: Angle of the point from the XY plane in radians. :return: (rho colat, lons) :rtype: tuple ### Response: def latsph(radius, lon, lat): """ Convert from latitudinal coordinates to spherical coordinates. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/latsph_c.html :param radius: Distance of a point from the origin. :param lon: Angle of the point from the XZ plane in radians. :param lat: Angle of the point from the XY plane in radians. :return: (rho colat, lons) :rtype: tuple """ radius = ctypes.c_double(radius) lon = ctypes.c_double(lon) lat = ctypes.c_double(lat) rho = ctypes.c_double() colat = ctypes.c_double() lons = ctypes.c_double() libspice.latsph_c(radius, lon, lat, ctypes.byref(rho), ctypes.byref(colat), ctypes.byref(lons)) return rho.value, colat.value, lons.value
def is_logged_in(username=None): """Checks if user is logged in if `username` is passed check if specified user is logged in username can be a list""" if username: if not isinstance(username, (list, tuple)): username = [username] return 'simple_logged_in' in session and get_username() in username return 'simple_logged_in' in session
Checks if user is logged in if `username` is passed check if specified user is logged in username can be a list
Below is the the instruction that describes the task: ### Input: Checks if user is logged in if `username` is passed check if specified user is logged in username can be a list ### Response: def is_logged_in(username=None): """Checks if user is logged in if `username` is passed check if specified user is logged in username can be a list""" if username: if not isinstance(username, (list, tuple)): username = [username] return 'simple_logged_in' in session and get_username() in username return 'simple_logged_in' in session
def check(self, line_info): "Allow ! and !! in multi-line statements if multi_line_specials is on" # Note that this one of the only places we check the first character of # ifun and *not* the pre_char. Also note that the below test matches # both ! and !!. if line_info.continue_prompt \ and self.prefilter_manager.multi_line_specials: if line_info.esc == ESC_MAGIC: return self.prefilter_manager.get_handler_by_name('magic') else: return None
Allow ! and !! in multi-line statements if multi_line_specials is on
Below is the the instruction that describes the task: ### Input: Allow ! and !! in multi-line statements if multi_line_specials is on ### Response: def check(self, line_info): "Allow ! and !! in multi-line statements if multi_line_specials is on" # Note that this one of the only places we check the first character of # ifun and *not* the pre_char. Also note that the below test matches # both ! and !!. if line_info.continue_prompt \ and self.prefilter_manager.multi_line_specials: if line_info.esc == ESC_MAGIC: return self.prefilter_manager.get_handler_by_name('magic') else: return None
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): """Get version from 'git describe' in the root of the source tree. This only gets called if the git-archive 'subst' keywords were *not* expanded, and _version.py hasn't already been rewritten with a short version string, meaning we're inside a checked out source tree. """ if not os.path.exists(os.path.join(root, ".git")): if verbose: print("no .git in %s" % root) raise NotThisMethod("no .git directory") GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) describe_out = run_command(GITS, ["describe", "--tags", "--dirty", "--always", "--long", "--match", "%s*" % tag_prefix], cwd=root) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") describe_out = describe_out.strip() full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) if full_out is None: raise NotThisMethod("'git rev-parse' failed") full_out = full_out.strip() pieces = {} pieces["long"] = full_out pieces["short"] = full_out[:7] # maybe improved later pieces["error"] = None # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] # TAG might have hyphens. git_describe = describe_out # look for -dirty suffix dirty = git_describe.endswith("-dirty") pieces["dirty"] = dirty if dirty: git_describe = git_describe[:git_describe.rindex("-dirty")] # now we have TAG-NUM-gHEX or HEX if "-" in git_describe: # TAG-NUM-gHEX mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) if not mo: # unparseable. Maybe git-describe is misbehaving? pieces["error"] = ("unable to parse git-describe output: '%s'" % describe_out) return pieces # tag full_tag = mo.group(1) if not full_tag.startswith(tag_prefix): if verbose: fmt = "tag '%s' doesn't start with prefix '%s'" print(fmt % (full_tag, tag_prefix)) pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" % (full_tag, tag_prefix)) return pieces pieces["closest-tag"] = full_tag[len(tag_prefix):] # distance: number of commits since tag pieces["distance"] = int(mo.group(2)) # commit: short hex revision ID pieces["short"] = mo.group(3) else: # HEX: no tags pieces["closest-tag"] = None count_out = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root) pieces["distance"] = int(count_out) # total number of commits return pieces
Get version from 'git describe' in the root of the source tree. This only gets called if the git-archive 'subst' keywords were *not* expanded, and _version.py hasn't already been rewritten with a short version string, meaning we're inside a checked out source tree.
Below is the the instruction that describes the task: ### Input: Get version from 'git describe' in the root of the source tree. This only gets called if the git-archive 'subst' keywords were *not* expanded, and _version.py hasn't already been rewritten with a short version string, meaning we're inside a checked out source tree. ### Response: def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): """Get version from 'git describe' in the root of the source tree. This only gets called if the git-archive 'subst' keywords were *not* expanded, and _version.py hasn't already been rewritten with a short version string, meaning we're inside a checked out source tree. """ if not os.path.exists(os.path.join(root, ".git")): if verbose: print("no .git in %s" % root) raise NotThisMethod("no .git directory") GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) describe_out = run_command(GITS, ["describe", "--tags", "--dirty", "--always", "--long", "--match", "%s*" % tag_prefix], cwd=root) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") describe_out = describe_out.strip() full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) if full_out is None: raise NotThisMethod("'git rev-parse' failed") full_out = full_out.strip() pieces = {} pieces["long"] = full_out pieces["short"] = full_out[:7] # maybe improved later pieces["error"] = None # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] # TAG might have hyphens. git_describe = describe_out # look for -dirty suffix dirty = git_describe.endswith("-dirty") pieces["dirty"] = dirty if dirty: git_describe = git_describe[:git_describe.rindex("-dirty")] # now we have TAG-NUM-gHEX or HEX if "-" in git_describe: # TAG-NUM-gHEX mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) if not mo: # unparseable. Maybe git-describe is misbehaving? pieces["error"] = ("unable to parse git-describe output: '%s'" % describe_out) return pieces # tag full_tag = mo.group(1) if not full_tag.startswith(tag_prefix): if verbose: fmt = "tag '%s' doesn't start with prefix '%s'" print(fmt % (full_tag, tag_prefix)) pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" % (full_tag, tag_prefix)) return pieces pieces["closest-tag"] = full_tag[len(tag_prefix):] # distance: number of commits since tag pieces["distance"] = int(mo.group(2)) # commit: short hex revision ID pieces["short"] = mo.group(3) else: # HEX: no tags pieces["closest-tag"] = None count_out = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root) pieces["distance"] = int(count_out) # total number of commits return pieces
def find_all(string, sub, start=None, end=None, ignore_case=False, **kwargs): """ Return all indices in string s where substring sub is found, such that sub is contained in the slice s[start:end]. >>> list(find_all('The quick brown fox jumps over the lazy dog', 'fox')) [16] >>> list(find_all('The quick brown fox jumps over the lazy dog', 'mountain')) [] >>> list(find_all('The quick brown fox jumps over the lazy dog', 'The')) [0] >>> list(find_all( ... 'Carved symbols in a mountain hollow on the bank of an inlet irritated an eccentric person', ... 'an')) [44, 51, 70] >>> list(find_all( ... 'Carved symbols in a mountain hollow on the bank of an inlet irritated an eccentric person', ... 'an', ... 50, ... 60)) [51] :param string: the input string :type string: str :param sub: the substring :type sub: str :return: all indices in the input string :rtype: __generator[str] """ #pylint: disable=unused-argument if ignore_case: sub = sub.lower() string = string.lower() while True: start = string.find(sub, start, end) if start == -1: return yield start start += len(sub)
Return all indices in string s where substring sub is found, such that sub is contained in the slice s[start:end]. >>> list(find_all('The quick brown fox jumps over the lazy dog', 'fox')) [16] >>> list(find_all('The quick brown fox jumps over the lazy dog', 'mountain')) [] >>> list(find_all('The quick brown fox jumps over the lazy dog', 'The')) [0] >>> list(find_all( ... 'Carved symbols in a mountain hollow on the bank of an inlet irritated an eccentric person', ... 'an')) [44, 51, 70] >>> list(find_all( ... 'Carved symbols in a mountain hollow on the bank of an inlet irritated an eccentric person', ... 'an', ... 50, ... 60)) [51] :param string: the input string :type string: str :param sub: the substring :type sub: str :return: all indices in the input string :rtype: __generator[str]
Below is the the instruction that describes the task: ### Input: Return all indices in string s where substring sub is found, such that sub is contained in the slice s[start:end]. >>> list(find_all('The quick brown fox jumps over the lazy dog', 'fox')) [16] >>> list(find_all('The quick brown fox jumps over the lazy dog', 'mountain')) [] >>> list(find_all('The quick brown fox jumps over the lazy dog', 'The')) [0] >>> list(find_all( ... 'Carved symbols in a mountain hollow on the bank of an inlet irritated an eccentric person', ... 'an')) [44, 51, 70] >>> list(find_all( ... 'Carved symbols in a mountain hollow on the bank of an inlet irritated an eccentric person', ... 'an', ... 50, ... 60)) [51] :param string: the input string :type string: str :param sub: the substring :type sub: str :return: all indices in the input string :rtype: __generator[str] ### Response: def find_all(string, sub, start=None, end=None, ignore_case=False, **kwargs): """ Return all indices in string s where substring sub is found, such that sub is contained in the slice s[start:end]. >>> list(find_all('The quick brown fox jumps over the lazy dog', 'fox')) [16] >>> list(find_all('The quick brown fox jumps over the lazy dog', 'mountain')) [] >>> list(find_all('The quick brown fox jumps over the lazy dog', 'The')) [0] >>> list(find_all( ... 'Carved symbols in a mountain hollow on the bank of an inlet irritated an eccentric person', ... 'an')) [44, 51, 70] >>> list(find_all( ... 'Carved symbols in a mountain hollow on the bank of an inlet irritated an eccentric person', ... 'an', ... 50, ... 60)) [51] :param string: the input string :type string: str :param sub: the substring :type sub: str :return: all indices in the input string :rtype: __generator[str] """ #pylint: disable=unused-argument if ignore_case: sub = sub.lower() string = string.lower() while True: start = string.find(sub, start, end) if start == -1: return yield start start += len(sub)
def user_sessions(self, user_id, **kwargs): "https://developer.zendesk.com/rest_api/docs/core/sessions#list-sessions" api_path = "/api/v2/users/{user_id}/sessions.json" api_path = api_path.format(user_id=user_id) return self.call(api_path, **kwargs)
https://developer.zendesk.com/rest_api/docs/core/sessions#list-sessions
Below is the the instruction that describes the task: ### Input: https://developer.zendesk.com/rest_api/docs/core/sessions#list-sessions ### Response: def user_sessions(self, user_id, **kwargs): "https://developer.zendesk.com/rest_api/docs/core/sessions#list-sessions" api_path = "/api/v2/users/{user_id}/sessions.json" api_path = api_path.format(user_id=user_id) return self.call(api_path, **kwargs)
def get_keyvault(access_token, subscription_id, rgname, vault_name): '''Gets details about the named key vault. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. rgname (str): Azure resource group name. vault_name (str): Name of the key vault. Returns: HTTP response. JSON body of key vault properties. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', rgname, '/providers/Microsoft.KeyVault/vaults/', vault_name, '?api-version=', KEYVAULT_API]) return do_get(endpoint, access_token)
Gets details about the named key vault. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. rgname (str): Azure resource group name. vault_name (str): Name of the key vault. Returns: HTTP response. JSON body of key vault properties.
Below is the the instruction that describes the task: ### Input: Gets details about the named key vault. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. rgname (str): Azure resource group name. vault_name (str): Name of the key vault. Returns: HTTP response. JSON body of key vault properties. ### Response: def get_keyvault(access_token, subscription_id, rgname, vault_name): '''Gets details about the named key vault. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. rgname (str): Azure resource group name. vault_name (str): Name of the key vault. Returns: HTTP response. JSON body of key vault properties. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', rgname, '/providers/Microsoft.KeyVault/vaults/', vault_name, '?api-version=', KEYVAULT_API]) return do_get(endpoint, access_token)
def trigger_installed(connection: connection, table: str, schema: str='public'): """Test whether or not a psycopg2-pgevents trigger is installed for a table. Parameters ---------- connection: psycopg2.extensions.connection Active connection to a PostGreSQL database. table: str Table whose trigger-existence will be checked. schema: str Schema to which the table belongs. Returns ------- bool True if the trigger is installed, otherwise False. """ installed = False log('Checking if {}.{} trigger installed...'.format(schema, table), logger_name=_LOGGER_NAME) statement = SELECT_TRIGGER_STATEMENT.format( table=table, schema=schema ) result = execute(connection, statement) if result: installed = True log('...{}installed'.format('' if installed else 'NOT '), logger_name=_LOGGER_NAME) return installed
Test whether or not a psycopg2-pgevents trigger is installed for a table. Parameters ---------- connection: psycopg2.extensions.connection Active connection to a PostGreSQL database. table: str Table whose trigger-existence will be checked. schema: str Schema to which the table belongs. Returns ------- bool True if the trigger is installed, otherwise False.
Below is the the instruction that describes the task: ### Input: Test whether or not a psycopg2-pgevents trigger is installed for a table. Parameters ---------- connection: psycopg2.extensions.connection Active connection to a PostGreSQL database. table: str Table whose trigger-existence will be checked. schema: str Schema to which the table belongs. Returns ------- bool True if the trigger is installed, otherwise False. ### Response: def trigger_installed(connection: connection, table: str, schema: str='public'): """Test whether or not a psycopg2-pgevents trigger is installed for a table. Parameters ---------- connection: psycopg2.extensions.connection Active connection to a PostGreSQL database. table: str Table whose trigger-existence will be checked. schema: str Schema to which the table belongs. Returns ------- bool True if the trigger is installed, otherwise False. """ installed = False log('Checking if {}.{} trigger installed...'.format(schema, table), logger_name=_LOGGER_NAME) statement = SELECT_TRIGGER_STATEMENT.format( table=table, schema=schema ) result = execute(connection, statement) if result: installed = True log('...{}installed'.format('' if installed else 'NOT '), logger_name=_LOGGER_NAME) return installed
def _scrape_document(self): '''Extract links from the DOM.''' mock_response = self._new_mock_response( self._response, self._get_temp_path('phantom', '.html') ) self._item_session.request = self._request self._item_session.response = mock_response self._processing_rule.scrape_document(item_session) if mock_response.body: mock_response.body.close()
Extract links from the DOM.
Below is the the instruction that describes the task: ### Input: Extract links from the DOM. ### Response: def _scrape_document(self): '''Extract links from the DOM.''' mock_response = self._new_mock_response( self._response, self._get_temp_path('phantom', '.html') ) self._item_session.request = self._request self._item_session.response = mock_response self._processing_rule.scrape_document(item_session) if mock_response.body: mock_response.body.close()
def slack_ver(): """ Open file and read Slackware version """ if _meta_.slackware_version in ["off", "OFF"]: sv = Utils().read_file("/etc/slackware-version") version = re.findall(r"\d+", sv) if len(sv) > 2: return (".".join(version[:2])) else: return (".".join(version)) else: return _meta_.slackware_version
Open file and read Slackware version
Below is the the instruction that describes the task: ### Input: Open file and read Slackware version ### Response: def slack_ver(): """ Open file and read Slackware version """ if _meta_.slackware_version in ["off", "OFF"]: sv = Utils().read_file("/etc/slackware-version") version = re.findall(r"\d+", sv) if len(sv) > 2: return (".".join(version[:2])) else: return (".".join(version)) else: return _meta_.slackware_version
def get_matching_rules(tweet): """ Retrieves the matching rules for a tweet with a gnip field enrichment. Args: tweet (Tweet): the tweet Returns: list: potential ``[{"tag": "user_tag", "value": "rule_value"}]`` pairs from standard rulesets or None if no rules or no matching_rules field is found. \n More information on this value at: http://support.gnip.com/enrichments/matching_rules.html """ if is_original_format(tweet): rules = tweet.get("matching_rules") else: gnip = tweet.get("gnip") rules = gnip.get("matching_rules") if gnip else None return rules
Retrieves the matching rules for a tweet with a gnip field enrichment. Args: tweet (Tweet): the tweet Returns: list: potential ``[{"tag": "user_tag", "value": "rule_value"}]`` pairs from standard rulesets or None if no rules or no matching_rules field is found. \n More information on this value at: http://support.gnip.com/enrichments/matching_rules.html
Below is the the instruction that describes the task: ### Input: Retrieves the matching rules for a tweet with a gnip field enrichment. Args: tweet (Tweet): the tweet Returns: list: potential ``[{"tag": "user_tag", "value": "rule_value"}]`` pairs from standard rulesets or None if no rules or no matching_rules field is found. \n More information on this value at: http://support.gnip.com/enrichments/matching_rules.html ### Response: def get_matching_rules(tweet): """ Retrieves the matching rules for a tweet with a gnip field enrichment. Args: tweet (Tweet): the tweet Returns: list: potential ``[{"tag": "user_tag", "value": "rule_value"}]`` pairs from standard rulesets or None if no rules or no matching_rules field is found. \n More information on this value at: http://support.gnip.com/enrichments/matching_rules.html """ if is_original_format(tweet): rules = tweet.get("matching_rules") else: gnip = tweet.get("gnip") rules = gnip.get("matching_rules") if gnip else None return rules
def _postprocess_variants(record_file, data, ref_file, out_file): """Post-process variants, converting into standard VCF file. """ if not utils.file_uptodate(out_file, record_file): with file_transaction(data, out_file) as tx_out_file: cmd = ["dv_postprocess_variants.py", "--ref", ref_file, "--infile", record_file, "--outfile", tx_out_file] do.run(cmd, "DeepVariant postprocess_variants %s" % dd.get_sample_name(data)) return out_file
Post-process variants, converting into standard VCF file.
Below is the the instruction that describes the task: ### Input: Post-process variants, converting into standard VCF file. ### Response: def _postprocess_variants(record_file, data, ref_file, out_file): """Post-process variants, converting into standard VCF file. """ if not utils.file_uptodate(out_file, record_file): with file_transaction(data, out_file) as tx_out_file: cmd = ["dv_postprocess_variants.py", "--ref", ref_file, "--infile", record_file, "--outfile", tx_out_file] do.run(cmd, "DeepVariant postprocess_variants %s" % dd.get_sample_name(data)) return out_file
def ns_prefix(self, prefix, ns_uri): """ Set the namespace prefix of the :class:`xml4h.nodes.Element` node represented by this Builder. :return: the current Builder. Delegates to :meth:`xml4h.nodes.Element.set_ns_prefix`. """ self._element.set_ns_prefix(prefix, ns_uri) return self
Set the namespace prefix of the :class:`xml4h.nodes.Element` node represented by this Builder. :return: the current Builder. Delegates to :meth:`xml4h.nodes.Element.set_ns_prefix`.
Below is the the instruction that describes the task: ### Input: Set the namespace prefix of the :class:`xml4h.nodes.Element` node represented by this Builder. :return: the current Builder. Delegates to :meth:`xml4h.nodes.Element.set_ns_prefix`. ### Response: def ns_prefix(self, prefix, ns_uri): """ Set the namespace prefix of the :class:`xml4h.nodes.Element` node represented by this Builder. :return: the current Builder. Delegates to :meth:`xml4h.nodes.Element.set_ns_prefix`. """ self._element.set_ns_prefix(prefix, ns_uri) return self
def element_type(self, type_): """returns reference to the class value\\mapped type declaration""" return self.__find_xxx_type( type_, self.element_type_index, self.element_type_typedef, 'container_element_type')
returns reference to the class value\\mapped type declaration
Below is the the instruction that describes the task: ### Input: returns reference to the class value\\mapped type declaration ### Response: def element_type(self, type_): """returns reference to the class value\\mapped type declaration""" return self.__find_xxx_type( type_, self.element_type_index, self.element_type_typedef, 'container_element_type')
def scp_data_length(self): """The maximum SCP data field length supported by the machine (bytes). """ # If not known, query the machine if self._scp_data_length is None: data = self.get_software_version(255, 255, 0) self._scp_data_length = data.buffer_size return self._scp_data_length
The maximum SCP data field length supported by the machine (bytes).
Below is the the instruction that describes the task: ### Input: The maximum SCP data field length supported by the machine (bytes). ### Response: def scp_data_length(self): """The maximum SCP data field length supported by the machine (bytes). """ # If not known, query the machine if self._scp_data_length is None: data = self.get_software_version(255, 255, 0) self._scp_data_length = data.buffer_size return self._scp_data_length
def walk(self, where="/"): """ Walk the pytables group hierarchy for pandas objects This generator will yield the group path, subgroups and pandas object names for each group. Any non-pandas PyTables objects that are not a group will be ignored. The `where` group itself is listed first (preorder), then each of its child groups (following an alphanumerical order) is also traversed, following the same procedure. .. versionadded:: 0.24.0 Parameters ---------- where : str, optional Group where to start walking. If not supplied, the root group is used. Yields ------ path : str Full path to a group (without trailing '/') groups : list of str names of the groups contained in `path` leaves : list of str names of the pandas objects contained in `path` """ _tables() self._check_if_open() for g in self._handle.walk_groups(where): if getattr(g._v_attrs, 'pandas_type', None) is not None: continue groups = [] leaves = [] for child in g._v_children.values(): pandas_type = getattr(child._v_attrs, 'pandas_type', None) if pandas_type is None: if isinstance(child, _table_mod.group.Group): groups.append(child._v_name) else: leaves.append(child._v_name) yield (g._v_pathname.rstrip('/'), groups, leaves)
Walk the pytables group hierarchy for pandas objects This generator will yield the group path, subgroups and pandas object names for each group. Any non-pandas PyTables objects that are not a group will be ignored. The `where` group itself is listed first (preorder), then each of its child groups (following an alphanumerical order) is also traversed, following the same procedure. .. versionadded:: 0.24.0 Parameters ---------- where : str, optional Group where to start walking. If not supplied, the root group is used. Yields ------ path : str Full path to a group (without trailing '/') groups : list of str names of the groups contained in `path` leaves : list of str names of the pandas objects contained in `path`
Below is the the instruction that describes the task: ### Input: Walk the pytables group hierarchy for pandas objects This generator will yield the group path, subgroups and pandas object names for each group. Any non-pandas PyTables objects that are not a group will be ignored. The `where` group itself is listed first (preorder), then each of its child groups (following an alphanumerical order) is also traversed, following the same procedure. .. versionadded:: 0.24.0 Parameters ---------- where : str, optional Group where to start walking. If not supplied, the root group is used. Yields ------ path : str Full path to a group (without trailing '/') groups : list of str names of the groups contained in `path` leaves : list of str names of the pandas objects contained in `path` ### Response: def walk(self, where="/"): """ Walk the pytables group hierarchy for pandas objects This generator will yield the group path, subgroups and pandas object names for each group. Any non-pandas PyTables objects that are not a group will be ignored. The `where` group itself is listed first (preorder), then each of its child groups (following an alphanumerical order) is also traversed, following the same procedure. .. versionadded:: 0.24.0 Parameters ---------- where : str, optional Group where to start walking. If not supplied, the root group is used. Yields ------ path : str Full path to a group (without trailing '/') groups : list of str names of the groups contained in `path` leaves : list of str names of the pandas objects contained in `path` """ _tables() self._check_if_open() for g in self._handle.walk_groups(where): if getattr(g._v_attrs, 'pandas_type', None) is not None: continue groups = [] leaves = [] for child in g._v_children.values(): pandas_type = getattr(child._v_attrs, 'pandas_type', None) if pandas_type is None: if isinstance(child, _table_mod.group.Group): groups.append(child._v_name) else: leaves.append(child._v_name) yield (g._v_pathname.rstrip('/'), groups, leaves)
def get_image(conn, vm_): ''' Return the image object to use ''' images = conn.list_images() vm_image = config.get_cloud_config_value('image', vm_, __opts__) if not six.PY3: vm_image = vm_image.encode('ascii', 'salt-cloud-force-ascii') for img in images: if isinstance(img.id, six.string_types) and not six.PY3: img_id = img.id.encode('ascii', 'salt-cloud-force-ascii') else: img_id = str(img.id) # future lint: disable=blacklisted-function if isinstance(img.name, six.string_types) and not six.PY3: img_name = img.name.encode('ascii', 'salt-cloud-force-ascii') else: img_name = str(img.name) # future lint: disable=blacklisted-function if vm_image and vm_image in (img_id, img_name): return img raise SaltCloudNotFound( 'The specified image, \'{0}\', could not be found.'.format(vm_image) )
Return the image object to use
Below is the the instruction that describes the task: ### Input: Return the image object to use ### Response: def get_image(conn, vm_): ''' Return the image object to use ''' images = conn.list_images() vm_image = config.get_cloud_config_value('image', vm_, __opts__) if not six.PY3: vm_image = vm_image.encode('ascii', 'salt-cloud-force-ascii') for img in images: if isinstance(img.id, six.string_types) and not six.PY3: img_id = img.id.encode('ascii', 'salt-cloud-force-ascii') else: img_id = str(img.id) # future lint: disable=blacklisted-function if isinstance(img.name, six.string_types) and not six.PY3: img_name = img.name.encode('ascii', 'salt-cloud-force-ascii') else: img_name = str(img.name) # future lint: disable=blacklisted-function if vm_image and vm_image in (img_id, img_name): return img raise SaltCloudNotFound( 'The specified image, \'{0}\', could not be found.'.format(vm_image) )
def nodeprep(string, allow_unassigned=False): """ Process the given `string` using the Nodeprep (`RFC 6122`_) profile. In the error cases defined in `RFC 3454`_ (stringprep), a :class:`ValueError` is raised. """ chars = list(string) _nodeprep_do_mapping(chars) do_normalization(chars) check_prohibited_output( chars, ( stringprep.in_table_c11, stringprep.in_table_c12, stringprep.in_table_c21, stringprep.in_table_c22, stringprep.in_table_c3, stringprep.in_table_c4, stringprep.in_table_c5, stringprep.in_table_c6, stringprep.in_table_c7, stringprep.in_table_c8, stringprep.in_table_c9, lambda x: x in _nodeprep_prohibited )) check_bidi(chars) if not allow_unassigned: check_unassigned( chars, ( stringprep.in_table_a1, ) ) return "".join(chars)
Process the given `string` using the Nodeprep (`RFC 6122`_) profile. In the error cases defined in `RFC 3454`_ (stringprep), a :class:`ValueError` is raised.
Below is the the instruction that describes the task: ### Input: Process the given `string` using the Nodeprep (`RFC 6122`_) profile. In the error cases defined in `RFC 3454`_ (stringprep), a :class:`ValueError` is raised. ### Response: def nodeprep(string, allow_unassigned=False): """ Process the given `string` using the Nodeprep (`RFC 6122`_) profile. In the error cases defined in `RFC 3454`_ (stringprep), a :class:`ValueError` is raised. """ chars = list(string) _nodeprep_do_mapping(chars) do_normalization(chars) check_prohibited_output( chars, ( stringprep.in_table_c11, stringprep.in_table_c12, stringprep.in_table_c21, stringprep.in_table_c22, stringprep.in_table_c3, stringprep.in_table_c4, stringprep.in_table_c5, stringprep.in_table_c6, stringprep.in_table_c7, stringprep.in_table_c8, stringprep.in_table_c9, lambda x: x in _nodeprep_prohibited )) check_bidi(chars) if not allow_unassigned: check_unassigned( chars, ( stringprep.in_table_a1, ) ) return "".join(chars)
def _map_to_cfg(self): """ Map our current slice to CFG. Based on self._statements_per_run and self._exit_statements_per_run, this method will traverse the CFG and check if there is any missing block on the path. If there is, the default exit of that missing block will be included in the slice. This is because Slicecutor cannot skip individual basic blocks along a path. """ exit_statements_per_run = self.chosen_exits new_exit_statements_per_run = defaultdict(list) while len(exit_statements_per_run): for block_address, exits in exit_statements_per_run.items(): for stmt_idx, exit_target in exits: if exit_target not in self.chosen_exits: # Oh we found one! # The default exit should be taken no matter where it leads to # Add it to the new set tpl = (DEFAULT_STATEMENT, None) if tpl not in new_exit_statements_per_run[exit_target]: new_exit_statements_per_run[exit_target].append(tpl) # Add the new ones to our global dict for block_address, exits in new_exit_statements_per_run.items(): for ex in exits: if ex not in self.chosen_exits[block_address]: self.chosen_exits[block_address].append(ex) # Switch them so we can process the new set exit_statements_per_run = new_exit_statements_per_run new_exit_statements_per_run = defaultdict(list)
Map our current slice to CFG. Based on self._statements_per_run and self._exit_statements_per_run, this method will traverse the CFG and check if there is any missing block on the path. If there is, the default exit of that missing block will be included in the slice. This is because Slicecutor cannot skip individual basic blocks along a path.
Below is the the instruction that describes the task: ### Input: Map our current slice to CFG. Based on self._statements_per_run and self._exit_statements_per_run, this method will traverse the CFG and check if there is any missing block on the path. If there is, the default exit of that missing block will be included in the slice. This is because Slicecutor cannot skip individual basic blocks along a path. ### Response: def _map_to_cfg(self): """ Map our current slice to CFG. Based on self._statements_per_run and self._exit_statements_per_run, this method will traverse the CFG and check if there is any missing block on the path. If there is, the default exit of that missing block will be included in the slice. This is because Slicecutor cannot skip individual basic blocks along a path. """ exit_statements_per_run = self.chosen_exits new_exit_statements_per_run = defaultdict(list) while len(exit_statements_per_run): for block_address, exits in exit_statements_per_run.items(): for stmt_idx, exit_target in exits: if exit_target not in self.chosen_exits: # Oh we found one! # The default exit should be taken no matter where it leads to # Add it to the new set tpl = (DEFAULT_STATEMENT, None) if tpl not in new_exit_statements_per_run[exit_target]: new_exit_statements_per_run[exit_target].append(tpl) # Add the new ones to our global dict for block_address, exits in new_exit_statements_per_run.items(): for ex in exits: if ex not in self.chosen_exits[block_address]: self.chosen_exits[block_address].append(ex) # Switch them so we can process the new set exit_statements_per_run = new_exit_statements_per_run new_exit_statements_per_run = defaultdict(list)
def get_tasker2_slabs(self, tol=0.01, same_species_only=True): """ Get a list of slabs that have been Tasker 2 corrected. Args: tol (float): Tolerance to determine if atoms are within same plane. This is a fractional tolerance, not an absolute one. same_species_only (bool): If True, only that are of the exact same species as the atom at the outermost surface are considered for moving. Otherwise, all atoms regardless of species that is within tol are considered for moving. Default is True (usually the desired behavior). Returns: ([Slab]) List of tasker 2 corrected slabs. """ sites = list(self.sites) slabs = [] sortedcsites = sorted(sites, key=lambda site: site.c) # Determine what fraction the slab is of the total cell size in the # c direction. Round to nearest rational number. nlayers_total = int(round(self.lattice.c / self.oriented_unit_cell.lattice.c)) nlayers_slab = int(round((sortedcsites[-1].c - sortedcsites[0].c) * nlayers_total)) slab_ratio = nlayers_slab / nlayers_total a = SpacegroupAnalyzer(self) symm_structure = a.get_symmetrized_structure() def equi_index(site): for i, equi_sites in enumerate(symm_structure.equivalent_sites): if site in equi_sites: return i raise ValueError("Cannot determine equi index!") for surface_site, shift in [(sortedcsites[0], slab_ratio), (sortedcsites[-1], -slab_ratio)]: tomove = [] fixed = [] for site in sites: if abs(site.c - surface_site.c) < tol and ( (not same_species_only) or site.species == surface_site.species): tomove.append(site) else: fixed.append(site) # Sort and group the sites by the species and symmetry equivalence tomove = sorted(tomove, key=lambda s: equi_index(s)) grouped = [list(sites) for k, sites in itertools.groupby( tomove, key=lambda s: equi_index(s))] if len(tomove) == 0 or any([len(g) % 2 != 0 for g in grouped]): warnings.warn("Odd number of sites to divide! Try changing " "the tolerance to ensure even division of " "sites or create supercells in a or b directions " "to allow for atoms to be moved!") continue combinations = [] for g in grouped: combinations.append( [c for c in itertools.combinations(g, int(len(g) / 2))]) for selection in itertools.product(*combinations): species = [site.species for site in fixed] fcoords = [site.frac_coords for site in fixed] for s in tomove: species.append(s.species) for group in selection: if s in group: fcoords.append(s.frac_coords) break else: # Move unselected atom to the opposite surface. fcoords.append(s.frac_coords + [0, 0, shift]) # sort by species to put all similar species together. sp_fcoord = sorted(zip(species, fcoords), key=lambda x: x[0]) species = [x[0] for x in sp_fcoord] fcoords = [x[1] for x in sp_fcoord] slab = Slab(self.lattice, species, fcoords, self.miller_index, self.oriented_unit_cell, self.shift, self.scale_factor, energy=self.energy, reorient_lattice=self.reorient_lattice) slabs.append(slab) s = StructureMatcher() unique = [ss[0] for ss in s.group_structures(slabs)] return unique
Get a list of slabs that have been Tasker 2 corrected. Args: tol (float): Tolerance to determine if atoms are within same plane. This is a fractional tolerance, not an absolute one. same_species_only (bool): If True, only that are of the exact same species as the atom at the outermost surface are considered for moving. Otherwise, all atoms regardless of species that is within tol are considered for moving. Default is True (usually the desired behavior). Returns: ([Slab]) List of tasker 2 corrected slabs.
Below is the the instruction that describes the task: ### Input: Get a list of slabs that have been Tasker 2 corrected. Args: tol (float): Tolerance to determine if atoms are within same plane. This is a fractional tolerance, not an absolute one. same_species_only (bool): If True, only that are of the exact same species as the atom at the outermost surface are considered for moving. Otherwise, all atoms regardless of species that is within tol are considered for moving. Default is True (usually the desired behavior). Returns: ([Slab]) List of tasker 2 corrected slabs. ### Response: def get_tasker2_slabs(self, tol=0.01, same_species_only=True): """ Get a list of slabs that have been Tasker 2 corrected. Args: tol (float): Tolerance to determine if atoms are within same plane. This is a fractional tolerance, not an absolute one. same_species_only (bool): If True, only that are of the exact same species as the atom at the outermost surface are considered for moving. Otherwise, all atoms regardless of species that is within tol are considered for moving. Default is True (usually the desired behavior). Returns: ([Slab]) List of tasker 2 corrected slabs. """ sites = list(self.sites) slabs = [] sortedcsites = sorted(sites, key=lambda site: site.c) # Determine what fraction the slab is of the total cell size in the # c direction. Round to nearest rational number. nlayers_total = int(round(self.lattice.c / self.oriented_unit_cell.lattice.c)) nlayers_slab = int(round((sortedcsites[-1].c - sortedcsites[0].c) * nlayers_total)) slab_ratio = nlayers_slab / nlayers_total a = SpacegroupAnalyzer(self) symm_structure = a.get_symmetrized_structure() def equi_index(site): for i, equi_sites in enumerate(symm_structure.equivalent_sites): if site in equi_sites: return i raise ValueError("Cannot determine equi index!") for surface_site, shift in [(sortedcsites[0], slab_ratio), (sortedcsites[-1], -slab_ratio)]: tomove = [] fixed = [] for site in sites: if abs(site.c - surface_site.c) < tol and ( (not same_species_only) or site.species == surface_site.species): tomove.append(site) else: fixed.append(site) # Sort and group the sites by the species and symmetry equivalence tomove = sorted(tomove, key=lambda s: equi_index(s)) grouped = [list(sites) for k, sites in itertools.groupby( tomove, key=lambda s: equi_index(s))] if len(tomove) == 0 or any([len(g) % 2 != 0 for g in grouped]): warnings.warn("Odd number of sites to divide! Try changing " "the tolerance to ensure even division of " "sites or create supercells in a or b directions " "to allow for atoms to be moved!") continue combinations = [] for g in grouped: combinations.append( [c for c in itertools.combinations(g, int(len(g) / 2))]) for selection in itertools.product(*combinations): species = [site.species for site in fixed] fcoords = [site.frac_coords for site in fixed] for s in tomove: species.append(s.species) for group in selection: if s in group: fcoords.append(s.frac_coords) break else: # Move unselected atom to the opposite surface. fcoords.append(s.frac_coords + [0, 0, shift]) # sort by species to put all similar species together. sp_fcoord = sorted(zip(species, fcoords), key=lambda x: x[0]) species = [x[0] for x in sp_fcoord] fcoords = [x[1] for x in sp_fcoord] slab = Slab(self.lattice, species, fcoords, self.miller_index, self.oriented_unit_cell, self.shift, self.scale_factor, energy=self.energy, reorient_lattice=self.reorient_lattice) slabs.append(slab) s = StructureMatcher() unique = [ss[0] for ss in s.group_structures(slabs)] return unique
def schedule(self, callback, *args, **kwargs): """Schedule the callback to be called asynchronously in a thread pool. Args: callback (Callable): The function to call. args: Positional arguments passed to the function. kwargs: Key-word arguments passed to the function. Returns: None """ self._executor.submit(callback, *args, **kwargs)
Schedule the callback to be called asynchronously in a thread pool. Args: callback (Callable): The function to call. args: Positional arguments passed to the function. kwargs: Key-word arguments passed to the function. Returns: None
Below is the the instruction that describes the task: ### Input: Schedule the callback to be called asynchronously in a thread pool. Args: callback (Callable): The function to call. args: Positional arguments passed to the function. kwargs: Key-word arguments passed to the function. Returns: None ### Response: def schedule(self, callback, *args, **kwargs): """Schedule the callback to be called asynchronously in a thread pool. Args: callback (Callable): The function to call. args: Positional arguments passed to the function. kwargs: Key-word arguments passed to the function. Returns: None """ self._executor.submit(callback, *args, **kwargs)
def fgm(self, x, labels, targeted=False): """ TensorFlow Eager implementation of the Fast Gradient Method. :param x: the input variable :param targeted: Is the attack targeted or untargeted? Untargeted, the default, will try to make the label incorrect. Targeted will instead try to move in the direction of being more like y. :return: a tensor for the adversarial example """ # Compute loss with tf.GradientTape() as tape: # input should be watched because it may be # combination of trainable and non-trainable variables tape.watch(x) loss_obj = LossCrossEntropy(self.model, smoothing=0.) loss = loss_obj.fprop(x=x, y=labels) if targeted: loss = -loss # Define gradient of loss wrt input grad = tape.gradient(loss, x) optimal_perturbation = attacks.optimize_linear(grad, self.eps, self.ord) # Add perturbation to original example to obtain adversarial example adv_x = x + optimal_perturbation # If clipping is needed # reset all values outside of [clip_min, clip_max] if (self.clip_min is not None) and (self.clip_max is not None): adv_x = tf.clip_by_value(adv_x, self.clip_min, self.clip_max) return adv_x
TensorFlow Eager implementation of the Fast Gradient Method. :param x: the input variable :param targeted: Is the attack targeted or untargeted? Untargeted, the default, will try to make the label incorrect. Targeted will instead try to move in the direction of being more like y. :return: a tensor for the adversarial example
Below is the the instruction that describes the task: ### Input: TensorFlow Eager implementation of the Fast Gradient Method. :param x: the input variable :param targeted: Is the attack targeted or untargeted? Untargeted, the default, will try to make the label incorrect. Targeted will instead try to move in the direction of being more like y. :return: a tensor for the adversarial example ### Response: def fgm(self, x, labels, targeted=False): """ TensorFlow Eager implementation of the Fast Gradient Method. :param x: the input variable :param targeted: Is the attack targeted or untargeted? Untargeted, the default, will try to make the label incorrect. Targeted will instead try to move in the direction of being more like y. :return: a tensor for the adversarial example """ # Compute loss with tf.GradientTape() as tape: # input should be watched because it may be # combination of trainable and non-trainable variables tape.watch(x) loss_obj = LossCrossEntropy(self.model, smoothing=0.) loss = loss_obj.fprop(x=x, y=labels) if targeted: loss = -loss # Define gradient of loss wrt input grad = tape.gradient(loss, x) optimal_perturbation = attacks.optimize_linear(grad, self.eps, self.ord) # Add perturbation to original example to obtain adversarial example adv_x = x + optimal_perturbation # If clipping is needed # reset all values outside of [clip_min, clip_max] if (self.clip_min is not None) and (self.clip_max is not None): adv_x = tf.clip_by_value(adv_x, self.clip_min, self.clip_max) return adv_x
def _iac_sniffer(self, byte): """ Watches incomming data for Telnet IAC sequences. Passes the data, if any, with the IAC commands stripped to _recv_byte(). """ ## Are we not currently in an IAC sequence coming from the DE? if self.telnet_got_iac is False: if byte == IAC: ## Well, we are now self.telnet_got_iac = True return ## Are we currenty in a sub-negotion? elif self.telnet_got_sb is True: ## Sanity check on length if len(self.telnet_sb_buffer) < 64: self.telnet_sb_buffer += byte else: self.telnet_got_sb = False self.telnet_sb_buffer = "" return else: ## Just a normal NVT character self._recv_byte(byte) return ## Byte handling when already in an IAC sequence sent from the DE else: ## Did we get sent a second IAC? if byte == IAC and self.telnet_got_sb is True: ## Must be an escaped 255 (IAC + IAC) self.telnet_sb_buffer += byte self.telnet_got_iac = False return ## Do we already have an IAC + CMD? elif self.telnet_got_cmd: ## Yes, so handle the option self._three_byte_cmd(byte) return ## We have IAC but no CMD else: ## Is this the middle byte of a three-byte command? if byte == DO: self.telnet_got_cmd = DO return elif byte == DONT: self.telnet_got_cmd = DONT return elif byte == WILL: self.telnet_got_cmd = WILL return elif byte == WONT: self.telnet_got_cmd = WONT return else: ## Nope, must be a two-byte command self._two_byte_cmd(byte)
Watches incomming data for Telnet IAC sequences. Passes the data, if any, with the IAC commands stripped to _recv_byte().
Below is the the instruction that describes the task: ### Input: Watches incomming data for Telnet IAC sequences. Passes the data, if any, with the IAC commands stripped to _recv_byte(). ### Response: def _iac_sniffer(self, byte): """ Watches incomming data for Telnet IAC sequences. Passes the data, if any, with the IAC commands stripped to _recv_byte(). """ ## Are we not currently in an IAC sequence coming from the DE? if self.telnet_got_iac is False: if byte == IAC: ## Well, we are now self.telnet_got_iac = True return ## Are we currenty in a sub-negotion? elif self.telnet_got_sb is True: ## Sanity check on length if len(self.telnet_sb_buffer) < 64: self.telnet_sb_buffer += byte else: self.telnet_got_sb = False self.telnet_sb_buffer = "" return else: ## Just a normal NVT character self._recv_byte(byte) return ## Byte handling when already in an IAC sequence sent from the DE else: ## Did we get sent a second IAC? if byte == IAC and self.telnet_got_sb is True: ## Must be an escaped 255 (IAC + IAC) self.telnet_sb_buffer += byte self.telnet_got_iac = False return ## Do we already have an IAC + CMD? elif self.telnet_got_cmd: ## Yes, so handle the option self._three_byte_cmd(byte) return ## We have IAC but no CMD else: ## Is this the middle byte of a three-byte command? if byte == DO: self.telnet_got_cmd = DO return elif byte == DONT: self.telnet_got_cmd = DONT return elif byte == WILL: self.telnet_got_cmd = WILL return elif byte == WONT: self.telnet_got_cmd = WONT return else: ## Nope, must be a two-byte command self._two_byte_cmd(byte)
def tokenize_init(spec): """Initialize a tokenizer. Should only be called by the :func:`~textparser.Parser.tokenize` method in the parser. """ tokens = [Token('__SOF__', '__SOF__', 0)] re_token = '|'.join([ '(?P<{}>{})'.format(name, regex) for name, regex in spec ]) return tokens, re_token
Initialize a tokenizer. Should only be called by the :func:`~textparser.Parser.tokenize` method in the parser.
Below is the the instruction that describes the task: ### Input: Initialize a tokenizer. Should only be called by the :func:`~textparser.Parser.tokenize` method in the parser. ### Response: def tokenize_init(spec): """Initialize a tokenizer. Should only be called by the :func:`~textparser.Parser.tokenize` method in the parser. """ tokens = [Token('__SOF__', '__SOF__', 0)] re_token = '|'.join([ '(?P<{}>{})'.format(name, regex) for name, regex in spec ]) return tokens, re_token
def load_agent(self, overall_index: int=None): """ Loads all data for overall_index from the overall config and also loads both presets :param overall_index: the index of the targeted agent :return agent: an Agent (gui_agent) class with all loaded values """ if not self.index_manager.has_free_slots(): return None if overall_index is None: overall_index = self.index_manager.get_new_index() else: self.index_manager.use_index(overall_index) agent = self.add_agent(overall_index=overall_index) path_in_overall_config = agent.get_agent_config_path() if path_in_overall_config is None: # Fall back to the path of the first agent if there's nothing configured. path_in_overall_config = self.overall_config.getpath(PARTICIPANT_CONFIGURATION_HEADER, PARTICIPANT_CONFIG_KEY, 0) agent_preset = self.add_agent_preset(path_in_overall_config) agent.set_agent_preset(agent_preset) agent.set_name(agent_preset.config.get(BOT_CONFIG_MODULE_HEADER, BOT_NAME_KEY)) # Add the preset's loadout as a loadout own_loadout = self.add_loadout_preset(agent_preset.looks_path) # Agent has a loadout defined in overall config, load that if it is not None loadout_file_in_overall_config = self.overall_config.get(PARTICIPANT_CONFIGURATION_HEADER, PARTICIPANT_LOADOUT_CONFIG_KEY, overall_index) if loadout_file_in_overall_config is None or loadout_file_in_overall_config == "None": agent.set_loadout_preset(own_loadout) else: directory = get_python_root() file_path = loadout_file_in_overall_config loadout_file_in_overall_config = os.path.realpath(os.path.join(directory, file_path)) loadout_preset = self.add_loadout_preset(loadout_file_in_overall_config) agent.set_loadout_preset(loadout_preset) return agent
Loads all data for overall_index from the overall config and also loads both presets :param overall_index: the index of the targeted agent :return agent: an Agent (gui_agent) class with all loaded values
Below is the the instruction that describes the task: ### Input: Loads all data for overall_index from the overall config and also loads both presets :param overall_index: the index of the targeted agent :return agent: an Agent (gui_agent) class with all loaded values ### Response: def load_agent(self, overall_index: int=None): """ Loads all data for overall_index from the overall config and also loads both presets :param overall_index: the index of the targeted agent :return agent: an Agent (gui_agent) class with all loaded values """ if not self.index_manager.has_free_slots(): return None if overall_index is None: overall_index = self.index_manager.get_new_index() else: self.index_manager.use_index(overall_index) agent = self.add_agent(overall_index=overall_index) path_in_overall_config = agent.get_agent_config_path() if path_in_overall_config is None: # Fall back to the path of the first agent if there's nothing configured. path_in_overall_config = self.overall_config.getpath(PARTICIPANT_CONFIGURATION_HEADER, PARTICIPANT_CONFIG_KEY, 0) agent_preset = self.add_agent_preset(path_in_overall_config) agent.set_agent_preset(agent_preset) agent.set_name(agent_preset.config.get(BOT_CONFIG_MODULE_HEADER, BOT_NAME_KEY)) # Add the preset's loadout as a loadout own_loadout = self.add_loadout_preset(agent_preset.looks_path) # Agent has a loadout defined in overall config, load that if it is not None loadout_file_in_overall_config = self.overall_config.get(PARTICIPANT_CONFIGURATION_HEADER, PARTICIPANT_LOADOUT_CONFIG_KEY, overall_index) if loadout_file_in_overall_config is None or loadout_file_in_overall_config == "None": agent.set_loadout_preset(own_loadout) else: directory = get_python_root() file_path = loadout_file_in_overall_config loadout_file_in_overall_config = os.path.realpath(os.path.join(directory, file_path)) loadout_preset = self.add_loadout_preset(loadout_file_in_overall_config) agent.set_loadout_preset(loadout_preset) return agent
def add_port(self, port): """ Add a port object to the definition :param port: port definition :type port: PortDef """ self.ports.append(port) if port.io_type not in self.port_seqs: self.port_seqs[port.io_type] = 0 self.port_seqs[port.io_type] += 1 port.sequence = self.port_seqs[port.io_type] return self
Add a port object to the definition :param port: port definition :type port: PortDef
Below is the the instruction that describes the task: ### Input: Add a port object to the definition :param port: port definition :type port: PortDef ### Response: def add_port(self, port): """ Add a port object to the definition :param port: port definition :type port: PortDef """ self.ports.append(port) if port.io_type not in self.port_seqs: self.port_seqs[port.io_type] = 0 self.port_seqs[port.io_type] += 1 port.sequence = self.port_seqs[port.io_type] return self
def run_report_from_console(output_file_name, callback): """ Runs the report from the command line. Receives the book url from the console. """ print("The report uses a read-only access to the book.") print("Now enter the data or ^Z to continue:") #report_method = kwargs["report_method"] result = callback() #output_file_name = kwargs["output_file_name"] output = save_to_temp(result, output_file_name) webbrowser.open(output)
Runs the report from the command line. Receives the book url from the console.
Below is the the instruction that describes the task: ### Input: Runs the report from the command line. Receives the book url from the console. ### Response: def run_report_from_console(output_file_name, callback): """ Runs the report from the command line. Receives the book url from the console. """ print("The report uses a read-only access to the book.") print("Now enter the data or ^Z to continue:") #report_method = kwargs["report_method"] result = callback() #output_file_name = kwargs["output_file_name"] output = save_to_temp(result, output_file_name) webbrowser.open(output)
def relation_get(attribute=None, unit=None, rid=None): """Get relation information""" _args = ['relation-get', '--format=json'] if rid: _args.append('-r') _args.append(rid) _args.append(attribute or '-') if unit: _args.append(unit) try: return json.loads(subprocess.check_output(_args).decode('UTF-8')) except ValueError: return None except CalledProcessError as e: if e.returncode == 2: return None raise
Get relation information
Below is the the instruction that describes the task: ### Input: Get relation information ### Response: def relation_get(attribute=None, unit=None, rid=None): """Get relation information""" _args = ['relation-get', '--format=json'] if rid: _args.append('-r') _args.append(rid) _args.append(attribute or '-') if unit: _args.append(unit) try: return json.loads(subprocess.check_output(_args).decode('UTF-8')) except ValueError: return None except CalledProcessError as e: if e.returncode == 2: return None raise
def associate_psds_to_multi_ifo_segments(opt, fd_segments, gwstrain, flen, delta_f, flow, ifos, dyn_range_factor=1., precision=None): """ Associate PSDs to segments for all ifos when using the multi-detector CLI """ for ifo in ifos: if gwstrain is not None: strain = gwstrain[ifo] else: strain = None if fd_segments is not None: segments = fd_segments[ifo] else: segments = None associate_psds_to_single_ifo_segments(opt, segments, strain, flen, delta_f, flow, ifo, dyn_range_factor=dyn_range_factor, precision=precision)
Associate PSDs to segments for all ifos when using the multi-detector CLI
Below is the the instruction that describes the task: ### Input: Associate PSDs to segments for all ifos when using the multi-detector CLI ### Response: def associate_psds_to_multi_ifo_segments(opt, fd_segments, gwstrain, flen, delta_f, flow, ifos, dyn_range_factor=1., precision=None): """ Associate PSDs to segments for all ifos when using the multi-detector CLI """ for ifo in ifos: if gwstrain is not None: strain = gwstrain[ifo] else: strain = None if fd_segments is not None: segments = fd_segments[ifo] else: segments = None associate_psds_to_single_ifo_segments(opt, segments, strain, flen, delta_f, flow, ifo, dyn_range_factor=dyn_range_factor, precision=precision)
def build(python=PYTHON): """Build the bigfloat library for in-place testing.""" clean() local( "LIBRARY_PATH={library_path} CPATH={include_path} {python} " "setup.py build_ext --inplace".format( library_path=LIBRARY_PATH, include_path=INCLUDE_PATH, python=python, ))
Build the bigfloat library for in-place testing.
Below is the the instruction that describes the task: ### Input: Build the bigfloat library for in-place testing. ### Response: def build(python=PYTHON): """Build the bigfloat library for in-place testing.""" clean() local( "LIBRARY_PATH={library_path} CPATH={include_path} {python} " "setup.py build_ext --inplace".format( library_path=LIBRARY_PATH, include_path=INCLUDE_PATH, python=python, ))
def _is_mosaic(dicom_input): """ Use this function to detect if a dicom series is a siemens 4d dataset NOTE: Only the first slice will be checked so you can only provide an already sorted dicom directory (containing one series) """ # for grouped dicoms if type(dicom_input) is list and type(dicom_input[0]) is list: header = dicom_input[0][0] else: # all the others header = dicom_input[0] # check if image type contains m and mosaic if 'ImageType' not in header or 'MOSAIC' not in header.ImageType: return False if 'AcquisitionMatrix' not in header or header.AcquisitionMatrix is None: return False return True
Use this function to detect if a dicom series is a siemens 4d dataset NOTE: Only the first slice will be checked so you can only provide an already sorted dicom directory (containing one series)
Below is the the instruction that describes the task: ### Input: Use this function to detect if a dicom series is a siemens 4d dataset NOTE: Only the first slice will be checked so you can only provide an already sorted dicom directory (containing one series) ### Response: def _is_mosaic(dicom_input): """ Use this function to detect if a dicom series is a siemens 4d dataset NOTE: Only the first slice will be checked so you can only provide an already sorted dicom directory (containing one series) """ # for grouped dicoms if type(dicom_input) is list and type(dicom_input[0]) is list: header = dicom_input[0][0] else: # all the others header = dicom_input[0] # check if image type contains m and mosaic if 'ImageType' not in header or 'MOSAIC' not in header.ImageType: return False if 'AcquisitionMatrix' not in header or header.AcquisitionMatrix is None: return False return True
def remove(self, rel_path, propagate=False): '''Delete the file from the cache, and from the upstream''' key = self._get_boto_key(rel_path) if key: key.delete()
Delete the file from the cache, and from the upstream
Below is the the instruction that describes the task: ### Input: Delete the file from the cache, and from the upstream ### Response: def remove(self, rel_path, propagate=False): '''Delete the file from the cache, and from the upstream''' key = self._get_boto_key(rel_path) if key: key.delete()
def consume_arguments(self, argument_list): """ Takes arguments from a list while this parameter can accept them """ if len(argument_list) == 0: return [] if argument_list[0] == self.arg_name: argument_list = argument_list[1:] if self.constraint is bool: self.value = not self.value else: try: value = argument_list.pop(0) except IndexError: raise ParameterError('Argument %s expects a value' % self.arg_name) self.value = value return argument_list
Takes arguments from a list while this parameter can accept them
Below is the the instruction that describes the task: ### Input: Takes arguments from a list while this parameter can accept them ### Response: def consume_arguments(self, argument_list): """ Takes arguments from a list while this parameter can accept them """ if len(argument_list) == 0: return [] if argument_list[0] == self.arg_name: argument_list = argument_list[1:] if self.constraint is bool: self.value = not self.value else: try: value = argument_list.pop(0) except IndexError: raise ParameterError('Argument %s expects a value' % self.arg_name) self.value = value return argument_list
def asset_create(self, name, items, tag='', description='', atype='static'): '''asset_create_static name, ips, tags, description Create a new asset list with the defined information. UN-DOCUMENTED CALL: This function is not considered stable. :param name: asset list name (must be unique) :type name: string :param items: list of IP Addresses, CIDR, and Network Ranges :type items: list :param tag: The tag associate to the asset list :type tag: string :param description: The Asset List description :type description: string ''' data = { 'name': name, 'description': description, 'type': atype, 'tags': tag } if atype == 'static': data['definedIPs'] = ','.join(items) if atype == 'dns': data['type'] = 'dnsname' data['definedDNSNames'] = ' '.join(items) return self.raw_query('asset', 'add', data=data)
asset_create_static name, ips, tags, description Create a new asset list with the defined information. UN-DOCUMENTED CALL: This function is not considered stable. :param name: asset list name (must be unique) :type name: string :param items: list of IP Addresses, CIDR, and Network Ranges :type items: list :param tag: The tag associate to the asset list :type tag: string :param description: The Asset List description :type description: string
Below is the the instruction that describes the task: ### Input: asset_create_static name, ips, tags, description Create a new asset list with the defined information. UN-DOCUMENTED CALL: This function is not considered stable. :param name: asset list name (must be unique) :type name: string :param items: list of IP Addresses, CIDR, and Network Ranges :type items: list :param tag: The tag associate to the asset list :type tag: string :param description: The Asset List description :type description: string ### Response: def asset_create(self, name, items, tag='', description='', atype='static'): '''asset_create_static name, ips, tags, description Create a new asset list with the defined information. UN-DOCUMENTED CALL: This function is not considered stable. :param name: asset list name (must be unique) :type name: string :param items: list of IP Addresses, CIDR, and Network Ranges :type items: list :param tag: The tag associate to the asset list :type tag: string :param description: The Asset List description :type description: string ''' data = { 'name': name, 'description': description, 'type': atype, 'tags': tag } if atype == 'static': data['definedIPs'] = ','.join(items) if atype == 'dns': data['type'] = 'dnsname' data['definedDNSNames'] = ' '.join(items) return self.raw_query('asset', 'add', data=data)
def qteNextApplet(self, numSkip: int=1, ofsApp: (QtmacsApplet, str)=None, skipInvisible: bool=True, skipVisible: bool=False, skipMiniApplet: bool=True, windowObj: QtmacsWindow=None): """ Return the next applet in cyclic order. If ``ofsApp=None`` then start cycling at the currently active applet. If ``ofsApp`` does not fit the selection criteria, then the cycling starts at the next applet in cyclic order that does. The returned applet is ``numSkip`` items in cyclic order away from the offset applet. If ``numSkip`` is positive traverse the applet list forwards, otherwise backwards. The method supports the following Boolean selection criteria: * ``skipInvisible``: ignore all invisible applets. * ``skipVisible``: ignore all visible applets. * ``skipMiniApplet``: ignore the mini applet applet. The ``ofsApp`` parameter can either be an instance of ``QtmacsApplet`` or a string denoting an applet ID. In the latter case the ``qteGetAppletHandle`` method is used to fetch the respective applet instance. |Args| * ``numSkip`` (**int**): number of applets to skip. * ``ofsApp`` (**QtmacsApplet**, **str**): applet from where to start counting. * ``skipInvisible`` (**bool**): whether or not to skip currently not shown applets. * ``skipVisible`` (**bool**): whether or not to skip currently shown applets. * ``skipMiniApplet`` (**bool**): whether or not to skip the mini applet. * ``windowObj`` (**QtmacsWindow**): the window to use when looking for applets. If **None**, then search in all windows. |Returns| * **QtmacsApplet**: either the next applet that fits the criteria, or **None** if no such applet exists. |Raises| * **QtmacsArgumentError** if at least one argument has an invalid type. """ # If ``applet`` was specified by its ID (ie. a string) then # fetch the associated ``QtmacsApplet`` instance. If # ``applet`` is already an instance of ``QtmacsApplet`` then # use it directly. if isinstance(ofsApp, str): ofsApp = self.qteGetAppletHandle(ofsApp) # Return immediately if the applet list is empty. if len(self._qteAppletList) == 0: return None # Sanity check: if the user requests applets that are neither # visible nor invisible then return immediately because no # such applet can possibly exist. if skipVisible and skipInvisible: return None # Make a copy of the applet list. appList = list(self._qteAppletList) # Remove all invisible applets from the list if the # skipInvisible flag is set. if skipInvisible: appList = [app for app in appList if app.qteIsVisible()] # From the list of (now guaranteed visible) applets remove # all those that are not in the specified window. if windowObj is not None: appList = [app for app in appList if app.qteParentWindow() == windowObj] # Remove all visible applets from the list if the # skipInvisible flag is set. if skipVisible: appList = [app for app in appList if not app.qteIsVisible()] # If the mini-buffer is to be skipped remove it (if a custom # mini applet even exists). if skipMiniApplet: if self._qteMiniApplet in appList: appList.remove(self._qteMiniApplet) # Return immediately if no applet satisfied all criteria. if len(appList) == 0: return None # If no offset applet was given use the currently active one. if ofsApp is None: ofsApp = self._qteActiveApplet if ofsApp in self._qteAppletList: # Determine if the offset applet is part of the pruned # list. if ofsApp in appList: # Yes: determine its index in the list. ofsIdx = appList.index(ofsApp) else: # No: traverse all applets until one is found that is # also part of the pruned list (start at ofsIdx). Then # determine its index in the list. ofsIdx = self._qteAppletList.index(ofsApp) glob_list = self._qteAppletList[ofsIdx:] glob_list += self._qteAppletList[:ofsIdx] # Compile the intersection between the global and pruned list. ofsIdx = [appList.index(_) for _ in glob_list if _ in appList] if len(ofsIdx) == 0: msg = ('No match between global and local applet list' ' --> Bug.') self.qteLogger.error(msg, stack_info=True) return None else: # Pick the first match. ofsIdx = ofsIdx[0] else: # The offset applet does not exist, eg. because the user # supplied a handle that does not point to an applet or # we are called from qteKillApplet to replace the just # removed (and active) applet. ofsIdx = 0 # Compute the index of the next applet and wrap around the # list if necessary. ofsIdx = (ofsIdx + numSkip) % len(appList) # Return a handle to the applet that meets the specified # criteria. return appList[ofsIdx]
Return the next applet in cyclic order. If ``ofsApp=None`` then start cycling at the currently active applet. If ``ofsApp`` does not fit the selection criteria, then the cycling starts at the next applet in cyclic order that does. The returned applet is ``numSkip`` items in cyclic order away from the offset applet. If ``numSkip`` is positive traverse the applet list forwards, otherwise backwards. The method supports the following Boolean selection criteria: * ``skipInvisible``: ignore all invisible applets. * ``skipVisible``: ignore all visible applets. * ``skipMiniApplet``: ignore the mini applet applet. The ``ofsApp`` parameter can either be an instance of ``QtmacsApplet`` or a string denoting an applet ID. In the latter case the ``qteGetAppletHandle`` method is used to fetch the respective applet instance. |Args| * ``numSkip`` (**int**): number of applets to skip. * ``ofsApp`` (**QtmacsApplet**, **str**): applet from where to start counting. * ``skipInvisible`` (**bool**): whether or not to skip currently not shown applets. * ``skipVisible`` (**bool**): whether or not to skip currently shown applets. * ``skipMiniApplet`` (**bool**): whether or not to skip the mini applet. * ``windowObj`` (**QtmacsWindow**): the window to use when looking for applets. If **None**, then search in all windows. |Returns| * **QtmacsApplet**: either the next applet that fits the criteria, or **None** if no such applet exists. |Raises| * **QtmacsArgumentError** if at least one argument has an invalid type.
Below is the the instruction that describes the task: ### Input: Return the next applet in cyclic order. If ``ofsApp=None`` then start cycling at the currently active applet. If ``ofsApp`` does not fit the selection criteria, then the cycling starts at the next applet in cyclic order that does. The returned applet is ``numSkip`` items in cyclic order away from the offset applet. If ``numSkip`` is positive traverse the applet list forwards, otherwise backwards. The method supports the following Boolean selection criteria: * ``skipInvisible``: ignore all invisible applets. * ``skipVisible``: ignore all visible applets. * ``skipMiniApplet``: ignore the mini applet applet. The ``ofsApp`` parameter can either be an instance of ``QtmacsApplet`` or a string denoting an applet ID. In the latter case the ``qteGetAppletHandle`` method is used to fetch the respective applet instance. |Args| * ``numSkip`` (**int**): number of applets to skip. * ``ofsApp`` (**QtmacsApplet**, **str**): applet from where to start counting. * ``skipInvisible`` (**bool**): whether or not to skip currently not shown applets. * ``skipVisible`` (**bool**): whether or not to skip currently shown applets. * ``skipMiniApplet`` (**bool**): whether or not to skip the mini applet. * ``windowObj`` (**QtmacsWindow**): the window to use when looking for applets. If **None**, then search in all windows. |Returns| * **QtmacsApplet**: either the next applet that fits the criteria, or **None** if no such applet exists. |Raises| * **QtmacsArgumentError** if at least one argument has an invalid type. ### Response: def qteNextApplet(self, numSkip: int=1, ofsApp: (QtmacsApplet, str)=None, skipInvisible: bool=True, skipVisible: bool=False, skipMiniApplet: bool=True, windowObj: QtmacsWindow=None): """ Return the next applet in cyclic order. If ``ofsApp=None`` then start cycling at the currently active applet. If ``ofsApp`` does not fit the selection criteria, then the cycling starts at the next applet in cyclic order that does. The returned applet is ``numSkip`` items in cyclic order away from the offset applet. If ``numSkip`` is positive traverse the applet list forwards, otherwise backwards. The method supports the following Boolean selection criteria: * ``skipInvisible``: ignore all invisible applets. * ``skipVisible``: ignore all visible applets. * ``skipMiniApplet``: ignore the mini applet applet. The ``ofsApp`` parameter can either be an instance of ``QtmacsApplet`` or a string denoting an applet ID. In the latter case the ``qteGetAppletHandle`` method is used to fetch the respective applet instance. |Args| * ``numSkip`` (**int**): number of applets to skip. * ``ofsApp`` (**QtmacsApplet**, **str**): applet from where to start counting. * ``skipInvisible`` (**bool**): whether or not to skip currently not shown applets. * ``skipVisible`` (**bool**): whether or not to skip currently shown applets. * ``skipMiniApplet`` (**bool**): whether or not to skip the mini applet. * ``windowObj`` (**QtmacsWindow**): the window to use when looking for applets. If **None**, then search in all windows. |Returns| * **QtmacsApplet**: either the next applet that fits the criteria, or **None** if no such applet exists. |Raises| * **QtmacsArgumentError** if at least one argument has an invalid type. """ # If ``applet`` was specified by its ID (ie. a string) then # fetch the associated ``QtmacsApplet`` instance. If # ``applet`` is already an instance of ``QtmacsApplet`` then # use it directly. if isinstance(ofsApp, str): ofsApp = self.qteGetAppletHandle(ofsApp) # Return immediately if the applet list is empty. if len(self._qteAppletList) == 0: return None # Sanity check: if the user requests applets that are neither # visible nor invisible then return immediately because no # such applet can possibly exist. if skipVisible and skipInvisible: return None # Make a copy of the applet list. appList = list(self._qteAppletList) # Remove all invisible applets from the list if the # skipInvisible flag is set. if skipInvisible: appList = [app for app in appList if app.qteIsVisible()] # From the list of (now guaranteed visible) applets remove # all those that are not in the specified window. if windowObj is not None: appList = [app for app in appList if app.qteParentWindow() == windowObj] # Remove all visible applets from the list if the # skipInvisible flag is set. if skipVisible: appList = [app for app in appList if not app.qteIsVisible()] # If the mini-buffer is to be skipped remove it (if a custom # mini applet even exists). if skipMiniApplet: if self._qteMiniApplet in appList: appList.remove(self._qteMiniApplet) # Return immediately if no applet satisfied all criteria. if len(appList) == 0: return None # If no offset applet was given use the currently active one. if ofsApp is None: ofsApp = self._qteActiveApplet if ofsApp in self._qteAppletList: # Determine if the offset applet is part of the pruned # list. if ofsApp in appList: # Yes: determine its index in the list. ofsIdx = appList.index(ofsApp) else: # No: traverse all applets until one is found that is # also part of the pruned list (start at ofsIdx). Then # determine its index in the list. ofsIdx = self._qteAppletList.index(ofsApp) glob_list = self._qteAppletList[ofsIdx:] glob_list += self._qteAppletList[:ofsIdx] # Compile the intersection between the global and pruned list. ofsIdx = [appList.index(_) for _ in glob_list if _ in appList] if len(ofsIdx) == 0: msg = ('No match between global and local applet list' ' --> Bug.') self.qteLogger.error(msg, stack_info=True) return None else: # Pick the first match. ofsIdx = ofsIdx[0] else: # The offset applet does not exist, eg. because the user # supplied a handle that does not point to an applet or # we are called from qteKillApplet to replace the just # removed (and active) applet. ofsIdx = 0 # Compute the index of the next applet and wrap around the # list if necessary. ofsIdx = (ofsIdx + numSkip) % len(appList) # Return a handle to the applet that meets the specified # criteria. return appList[ofsIdx]
def AddEventTags(self, event_tags): """Adds event tags. Args: event_tags (list[EventTag]): event tags. Raises: IOError: when the storage file is closed or read-only or if the event tags cannot be serialized. OSError: when the storage file is closed or read-only or if the event tags cannot be serialized. """ self._RaiseIfNotWritable() for event_tag in event_tags: self.AddEventTag(event_tag)
Adds event tags. Args: event_tags (list[EventTag]): event tags. Raises: IOError: when the storage file is closed or read-only or if the event tags cannot be serialized. OSError: when the storage file is closed or read-only or if the event tags cannot be serialized.
Below is the the instruction that describes the task: ### Input: Adds event tags. Args: event_tags (list[EventTag]): event tags. Raises: IOError: when the storage file is closed or read-only or if the event tags cannot be serialized. OSError: when the storage file is closed or read-only or if the event tags cannot be serialized. ### Response: def AddEventTags(self, event_tags): """Adds event tags. Args: event_tags (list[EventTag]): event tags. Raises: IOError: when the storage file is closed or read-only or if the event tags cannot be serialized. OSError: when the storage file is closed or read-only or if the event tags cannot be serialized. """ self._RaiseIfNotWritable() for event_tag in event_tags: self.AddEventTag(event_tag)
def _SendStruct(self, fmt, *args): """Pack a struct (without length or checksum) and send it. """ data = struct.pack(fmt, *args) data_len = len(data) + 1 checksum = (data_len + sum(bytearray(data))) % 256 out = struct.pack("B", data_len) + data + struct.pack("B", checksum) self.ser.write(out)
Pack a struct (without length or checksum) and send it.
Below is the the instruction that describes the task: ### Input: Pack a struct (without length or checksum) and send it. ### Response: def _SendStruct(self, fmt, *args): """Pack a struct (without length or checksum) and send it. """ data = struct.pack(fmt, *args) data_len = len(data) + 1 checksum = (data_len + sum(bytearray(data))) % 256 out = struct.pack("B", data_len) + data + struct.pack("B", checksum) self.ser.write(out)
def _ensure_slack(self, connector: Any, retries: int, backoff: Callable[[int], float]) -> None: """ Ensure we have a SlackClient. """ connector = self._env_var if connector is None else connector slack: SlackClient = _create_slack(connector) self._slack = _SlackClientWrapper( slack=slack, retries=retries, backoff=backoff )
Ensure we have a SlackClient.
Below is the the instruction that describes the task: ### Input: Ensure we have a SlackClient. ### Response: def _ensure_slack(self, connector: Any, retries: int, backoff: Callable[[int], float]) -> None: """ Ensure we have a SlackClient. """ connector = self._env_var if connector is None else connector slack: SlackClient = _create_slack(connector) self._slack = _SlackClientWrapper( slack=slack, retries=retries, backoff=backoff )
def _keyboard_access(self, element): """ Provide keyboard access for element, if it not has. :param element: The element. :type element: hatemile.util.html.htmldomelement.HTMLDOMElement """ # pylint: disable=no-self-use if not element.has_attribute('tabindex'): tag = element.get_tag_name() if (tag == 'A') and (not element.has_attribute('href')): element.set_attribute('tabindex', '0') elif ( (tag != 'A') and (tag != 'INPUT') and (tag != 'BUTTON') and (tag != 'SELECT') and (tag != 'TEXTAREA') ): element.set_attribute('tabindex', '0')
Provide keyboard access for element, if it not has. :param element: The element. :type element: hatemile.util.html.htmldomelement.HTMLDOMElement
Below is the the instruction that describes the task: ### Input: Provide keyboard access for element, if it not has. :param element: The element. :type element: hatemile.util.html.htmldomelement.HTMLDOMElement ### Response: def _keyboard_access(self, element): """ Provide keyboard access for element, if it not has. :param element: The element. :type element: hatemile.util.html.htmldomelement.HTMLDOMElement """ # pylint: disable=no-self-use if not element.has_attribute('tabindex'): tag = element.get_tag_name() if (tag == 'A') and (not element.has_attribute('href')): element.set_attribute('tabindex', '0') elif ( (tag != 'A') and (tag != 'INPUT') and (tag != 'BUTTON') and (tag != 'SELECT') and (tag != 'TEXTAREA') ): element.set_attribute('tabindex', '0')
def mutate(self, mutation, timeout=None, metadata=None, credentials=None): """Runs mutate operation.""" return self.stub.Mutate(mutation, timeout=timeout, metadata=metadata, credentials=credentials)
Runs mutate operation.
Below is the the instruction that describes the task: ### Input: Runs mutate operation. ### Response: def mutate(self, mutation, timeout=None, metadata=None, credentials=None): """Runs mutate operation.""" return self.stub.Mutate(mutation, timeout=timeout, metadata=metadata, credentials=credentials)
def remove_namespace(doc, namespace): """ Takes in a ElementTree object and namespace value. The length of that namespace value is removed from all Element nodes within the document. This effectively removes the namespace from that document. :param doc: lxml.etree :param namespace: Namespace that needs to be removed. :return: Returns the source document with namespaces removed. """ # http://homework.nwsnet.de/products/45be_remove-namespace-in-an-xml-document-using-elementtree # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ns = '{{{}}}'.format(namespace) nsl = len(ns) # print 'DEBUG: removing',ns for elem in doc.getiterator(): if elem.tag.startswith(ns): elem.tag = elem.tag[nsl:] return doc
Takes in a ElementTree object and namespace value. The length of that namespace value is removed from all Element nodes within the document. This effectively removes the namespace from that document. :param doc: lxml.etree :param namespace: Namespace that needs to be removed. :return: Returns the source document with namespaces removed.
Below is the the instruction that describes the task: ### Input: Takes in a ElementTree object and namespace value. The length of that namespace value is removed from all Element nodes within the document. This effectively removes the namespace from that document. :param doc: lxml.etree :param namespace: Namespace that needs to be removed. :return: Returns the source document with namespaces removed. ### Response: def remove_namespace(doc, namespace): """ Takes in a ElementTree object and namespace value. The length of that namespace value is removed from all Element nodes within the document. This effectively removes the namespace from that document. :param doc: lxml.etree :param namespace: Namespace that needs to be removed. :return: Returns the source document with namespaces removed. """ # http://homework.nwsnet.de/products/45be_remove-namespace-in-an-xml-document-using-elementtree # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ns = '{{{}}}'.format(namespace) nsl = len(ns) # print 'DEBUG: removing',ns for elem in doc.getiterator(): if elem.tag.startswith(ns): elem.tag = elem.tag[nsl:] return doc
def nl_complete_msg(sk, msg): """Finalize Netlink message. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/nl.c#L450 This function finalizes a Netlink message by completing the message with desirable flags and values depending on the socket configuration. - If not yet filled out, the source address of the message (`nlmsg_pid`) will be set to the local port number of the socket. - If not yet specified, the next available sequence number is assigned to the message (`nlmsg_seq`). - If not yet specified, the protocol field of the message will be set to the protocol field of the socket. - The `NLM_F_REQUEST` Netlink message flag will be set. - The `NLM_F_ACK` flag will be set if Auto-ACK mode is enabled on the socket. Positional arguments: sk -- Netlink socket (nl_sock class instance). msg -- Netlink message (nl_msg class instance). """ nlh = msg.nm_nlh if nlh.nlmsg_pid == NL_AUTO_PORT: nlh.nlmsg_pid = nl_socket_get_local_port(sk) if nlh.nlmsg_seq == NL_AUTO_SEQ: nlh.nlmsg_seq = sk.s_seq_next sk.s_seq_next += 1 if msg.nm_protocol == -1: msg.nm_protocol = sk.s_proto nlh.nlmsg_flags |= NLM_F_REQUEST if not sk.s_flags & NL_NO_AUTO_ACK: nlh.nlmsg_flags |= NLM_F_ACK
Finalize Netlink message. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/nl.c#L450 This function finalizes a Netlink message by completing the message with desirable flags and values depending on the socket configuration. - If not yet filled out, the source address of the message (`nlmsg_pid`) will be set to the local port number of the socket. - If not yet specified, the next available sequence number is assigned to the message (`nlmsg_seq`). - If not yet specified, the protocol field of the message will be set to the protocol field of the socket. - The `NLM_F_REQUEST` Netlink message flag will be set. - The `NLM_F_ACK` flag will be set if Auto-ACK mode is enabled on the socket. Positional arguments: sk -- Netlink socket (nl_sock class instance). msg -- Netlink message (nl_msg class instance).
Below is the the instruction that describes the task: ### Input: Finalize Netlink message. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/nl.c#L450 This function finalizes a Netlink message by completing the message with desirable flags and values depending on the socket configuration. - If not yet filled out, the source address of the message (`nlmsg_pid`) will be set to the local port number of the socket. - If not yet specified, the next available sequence number is assigned to the message (`nlmsg_seq`). - If not yet specified, the protocol field of the message will be set to the protocol field of the socket. - The `NLM_F_REQUEST` Netlink message flag will be set. - The `NLM_F_ACK` flag will be set if Auto-ACK mode is enabled on the socket. Positional arguments: sk -- Netlink socket (nl_sock class instance). msg -- Netlink message (nl_msg class instance). ### Response: def nl_complete_msg(sk, msg): """Finalize Netlink message. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/nl.c#L450 This function finalizes a Netlink message by completing the message with desirable flags and values depending on the socket configuration. - If not yet filled out, the source address of the message (`nlmsg_pid`) will be set to the local port number of the socket. - If not yet specified, the next available sequence number is assigned to the message (`nlmsg_seq`). - If not yet specified, the protocol field of the message will be set to the protocol field of the socket. - The `NLM_F_REQUEST` Netlink message flag will be set. - The `NLM_F_ACK` flag will be set if Auto-ACK mode is enabled on the socket. Positional arguments: sk -- Netlink socket (nl_sock class instance). msg -- Netlink message (nl_msg class instance). """ nlh = msg.nm_nlh if nlh.nlmsg_pid == NL_AUTO_PORT: nlh.nlmsg_pid = nl_socket_get_local_port(sk) if nlh.nlmsg_seq == NL_AUTO_SEQ: nlh.nlmsg_seq = sk.s_seq_next sk.s_seq_next += 1 if msg.nm_protocol == -1: msg.nm_protocol = sk.s_proto nlh.nlmsg_flags |= NLM_F_REQUEST if not sk.s_flags & NL_NO_AUTO_ACK: nlh.nlmsg_flags |= NLM_F_ACK
def clearAndSetComboBoxes(self, axesNames): """ Removes all comboboxes. """ logger.debug("Collector clearAndSetComboBoxes: {}".format(axesNames)) check_is_a_sequence(axesNames) row = 0 self._deleteComboBoxes(row) self.clear() self._setAxesNames(axesNames) self._createComboBoxes(row) self._updateWidgets()
Removes all comboboxes.
Below is the the instruction that describes the task: ### Input: Removes all comboboxes. ### Response: def clearAndSetComboBoxes(self, axesNames): """ Removes all comboboxes. """ logger.debug("Collector clearAndSetComboBoxes: {}".format(axesNames)) check_is_a_sequence(axesNames) row = 0 self._deleteComboBoxes(row) self.clear() self._setAxesNames(axesNames) self._createComboBoxes(row) self._updateWidgets()
def define_standalone_options(parser, extra_options=None): ''' Adds the options specific to the database connection. Parses the agency configuration files and uses its configuration as the default values. ''' c = config.parse_service_config() parser.add_option('--dbhost', '-H', action='store', dest='db_host', type='str', help='hostname of the database', default=c.db.host) parser.add_option('--dbname', '-n', action='store', dest='db_name', type='str', help='name of database to use', default=c.db.name) parser.add_option('--dbport', '-P', action='store', dest='db_port', type='str', help='port of database to use', default=c.db.port) parser.add_option('--dbusername', dest="db_username", help="username to use for authentication ", metavar="USER", default=c.db.username) parser.add_option('--dbpassword', dest="db_password", help="password to use for authentication ", metavar="PASSWORD", default=c.db.password) parser.add_option('--ssl', '-S', action='store_true', dest='db_https', help='whether to use SSL db connections', default=False) parser.add_option('--log', action='store', dest='log', type='str', help='log level to set', default=os.environ.get('FEAT_DEBUG', '2')) if extra_options: for option in extra_options: parser.add_option(option) return parser
Adds the options specific to the database connection. Parses the agency configuration files and uses its configuration as the default values.
Below is the the instruction that describes the task: ### Input: Adds the options specific to the database connection. Parses the agency configuration files and uses its configuration as the default values. ### Response: def define_standalone_options(parser, extra_options=None): ''' Adds the options specific to the database connection. Parses the agency configuration files and uses its configuration as the default values. ''' c = config.parse_service_config() parser.add_option('--dbhost', '-H', action='store', dest='db_host', type='str', help='hostname of the database', default=c.db.host) parser.add_option('--dbname', '-n', action='store', dest='db_name', type='str', help='name of database to use', default=c.db.name) parser.add_option('--dbport', '-P', action='store', dest='db_port', type='str', help='port of database to use', default=c.db.port) parser.add_option('--dbusername', dest="db_username", help="username to use for authentication ", metavar="USER", default=c.db.username) parser.add_option('--dbpassword', dest="db_password", help="password to use for authentication ", metavar="PASSWORD", default=c.db.password) parser.add_option('--ssl', '-S', action='store_true', dest='db_https', help='whether to use SSL db connections', default=False) parser.add_option('--log', action='store', dest='log', type='str', help='log level to set', default=os.environ.get('FEAT_DEBUG', '2')) if extra_options: for option in extra_options: parser.add_option(option) return parser
def update_coordinates(self, filename, update_port_locations=True): """Update the coordinates of this Compound from a file. Parameters ---------- filename : str Name of file from which to load coordinates. Supported file types are the same as those supported by load() update_port_locations : bool, optional, default=True Update the locations of Ports so that they are shifted along with their anchor particles. Note: This conserves the location of Ports with respect to the anchor Particle, but does not conserve the orientation of Ports with respect to the molecule as a whole. See Also -------- load : Load coordinates from a file """ if update_port_locations: xyz_init = self.xyz self = load(filename, compound=self, coords_only=True) self._update_port_locations(xyz_init) else: self = load(filename, compound=self, coords_only=True)
Update the coordinates of this Compound from a file. Parameters ---------- filename : str Name of file from which to load coordinates. Supported file types are the same as those supported by load() update_port_locations : bool, optional, default=True Update the locations of Ports so that they are shifted along with their anchor particles. Note: This conserves the location of Ports with respect to the anchor Particle, but does not conserve the orientation of Ports with respect to the molecule as a whole. See Also -------- load : Load coordinates from a file
Below is the the instruction that describes the task: ### Input: Update the coordinates of this Compound from a file. Parameters ---------- filename : str Name of file from which to load coordinates. Supported file types are the same as those supported by load() update_port_locations : bool, optional, default=True Update the locations of Ports so that they are shifted along with their anchor particles. Note: This conserves the location of Ports with respect to the anchor Particle, but does not conserve the orientation of Ports with respect to the molecule as a whole. See Also -------- load : Load coordinates from a file ### Response: def update_coordinates(self, filename, update_port_locations=True): """Update the coordinates of this Compound from a file. Parameters ---------- filename : str Name of file from which to load coordinates. Supported file types are the same as those supported by load() update_port_locations : bool, optional, default=True Update the locations of Ports so that they are shifted along with their anchor particles. Note: This conserves the location of Ports with respect to the anchor Particle, but does not conserve the orientation of Ports with respect to the molecule as a whole. See Also -------- load : Load coordinates from a file """ if update_port_locations: xyz_init = self.xyz self = load(filename, compound=self, coords_only=True) self._update_port_locations(xyz_init) else: self = load(filename, compound=self, coords_only=True)
def feedforward(inputs, num_units, scope="multihead_attention"): '''Point-wise feed forward net. Args: inputs: A 3d tensor with shape of [N, T, C]. num_units: A list of two integers. scope: Optional scope for `variable_scope`. reuse: Boolean, whether to reuse the weights of a previous layer by the same name. Returns: A 3d tensor with the same shape and dtype as inputs ''' with tf.variable_scope(scope): # Inner layer params = {"inputs": inputs, "filters": num_units[0], "kernel_size": 1, "activation": tf.nn.relu, "use_bias": True} outputs = tf.layers.conv1d(**params) # Readout layer params = {"inputs": outputs, "filters": num_units[1], "kernel_size": 1, "activation": None, "use_bias": True} outputs = tf.layers.conv1d(**params) # Residual connection outputs += inputs # Normalize outputs = normalize(outputs) return outputs
Point-wise feed forward net. Args: inputs: A 3d tensor with shape of [N, T, C]. num_units: A list of two integers. scope: Optional scope for `variable_scope`. reuse: Boolean, whether to reuse the weights of a previous layer by the same name. Returns: A 3d tensor with the same shape and dtype as inputs
Below is the the instruction that describes the task: ### Input: Point-wise feed forward net. Args: inputs: A 3d tensor with shape of [N, T, C]. num_units: A list of two integers. scope: Optional scope for `variable_scope`. reuse: Boolean, whether to reuse the weights of a previous layer by the same name. Returns: A 3d tensor with the same shape and dtype as inputs ### Response: def feedforward(inputs, num_units, scope="multihead_attention"): '''Point-wise feed forward net. Args: inputs: A 3d tensor with shape of [N, T, C]. num_units: A list of two integers. scope: Optional scope for `variable_scope`. reuse: Boolean, whether to reuse the weights of a previous layer by the same name. Returns: A 3d tensor with the same shape and dtype as inputs ''' with tf.variable_scope(scope): # Inner layer params = {"inputs": inputs, "filters": num_units[0], "kernel_size": 1, "activation": tf.nn.relu, "use_bias": True} outputs = tf.layers.conv1d(**params) # Readout layer params = {"inputs": outputs, "filters": num_units[1], "kernel_size": 1, "activation": None, "use_bias": True} outputs = tf.layers.conv1d(**params) # Residual connection outputs += inputs # Normalize outputs = normalize(outputs) return outputs
def setup_callbacks(self): ''' Assign attributes for pygit2 callbacks ''' if PYGIT2_VERSION >= _LooseVersion('0.23.2'): self.remotecallbacks = pygit2.RemoteCallbacks( credentials=self.credentials) if not self.ssl_verify: # Override the certificate_check function with a lambda that # just returns True, thus skipping the cert check. self.remotecallbacks.certificate_check = \ lambda *args, **kwargs: True else: self.remotecallbacks = None if not self.ssl_verify: warnings.warn( 'pygit2 does not support disabling the SSL certificate ' 'check in versions prior to 0.23.2 (installed: {0}). ' 'Fetches for self-signed certificates will fail.'.format( PYGIT2_VERSION ) )
Assign attributes for pygit2 callbacks
Below is the the instruction that describes the task: ### Input: Assign attributes for pygit2 callbacks ### Response: def setup_callbacks(self): ''' Assign attributes for pygit2 callbacks ''' if PYGIT2_VERSION >= _LooseVersion('0.23.2'): self.remotecallbacks = pygit2.RemoteCallbacks( credentials=self.credentials) if not self.ssl_verify: # Override the certificate_check function with a lambda that # just returns True, thus skipping the cert check. self.remotecallbacks.certificate_check = \ lambda *args, **kwargs: True else: self.remotecallbacks = None if not self.ssl_verify: warnings.warn( 'pygit2 does not support disabling the SSL certificate ' 'check in versions prior to 0.23.2 (installed: {0}). ' 'Fetches for self-signed certificates will fail.'.format( PYGIT2_VERSION ) )