code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
text
stringlengths
164
112k
def closeEvent(self, event): """Send last file signal on close event :param event: The close event :type event: :returns: None :rtype: None :raises: None """ lf = self.browser.get_current_selection() if lf: self.last_file.emit(lf) return super(GenesisWin, self).close()
Send last file signal on close event :param event: The close event :type event: :returns: None :rtype: None :raises: None
Below is the the instruction that describes the task: ### Input: Send last file signal on close event :param event: The close event :type event: :returns: None :rtype: None :raises: None ### Response: def closeEvent(self, event): """Send last file signal on close event :param event: The close event :type event: :returns: None :rtype: None :raises: None """ lf = self.browser.get_current_selection() if lf: self.last_file.emit(lf) return super(GenesisWin, self).close()
def html_single_plot(self,abfID,launch=False,overwrite=False): """create ID_plot.html of just intrinsic properties.""" if type(abfID) is str: abfID=[abfID] for thisABFid in cm.abfSort(abfID): parentID=cm.parent(self.groups,thisABFid) saveAs=os.path.abspath("%s/%s_plot.html"%(self.folder2,parentID)) if overwrite is False and os.path.basename(saveAs) in self.files2: continue filesByType=cm.filesByType(self.groupFiles[parentID]) html="" html+='<div style="background-color: #DDDDFF;">' html+='<span class="title">intrinsic properties for: %s</span></br>'%parentID html+='<code>%s</code>'%os.path.abspath(self.folder1+"/"+parentID+".abf") html+='</div>' for fname in filesByType['plot']: html+=self.htmlFor(fname) print("creating",saveAs,'...') style.save(html,saveAs,launch=launch)
create ID_plot.html of just intrinsic properties.
Below is the the instruction that describes the task: ### Input: create ID_plot.html of just intrinsic properties. ### Response: def html_single_plot(self,abfID,launch=False,overwrite=False): """create ID_plot.html of just intrinsic properties.""" if type(abfID) is str: abfID=[abfID] for thisABFid in cm.abfSort(abfID): parentID=cm.parent(self.groups,thisABFid) saveAs=os.path.abspath("%s/%s_plot.html"%(self.folder2,parentID)) if overwrite is False and os.path.basename(saveAs) in self.files2: continue filesByType=cm.filesByType(self.groupFiles[parentID]) html="" html+='<div style="background-color: #DDDDFF;">' html+='<span class="title">intrinsic properties for: %s</span></br>'%parentID html+='<code>%s</code>'%os.path.abspath(self.folder1+"/"+parentID+".abf") html+='</div>' for fname in filesByType['plot']: html+=self.htmlFor(fname) print("creating",saveAs,'...') style.save(html,saveAs,launch=launch)
def ParseOptions(cls, options, configuration_object): """Parses and validates options. Args: options (argparse.Namespace): parser options. configuration_object (CLITool): object to be configured by the argument helper. Raises: BadConfigObject: when the configuration object is of the wrong type. BadConfigOption: when a configuration parameter fails validation. """ if not isinstance(configuration_object, tools.CLITool): raise errors.BadConfigObject( 'Configuration object is not an instance of CLITool') hashers = cls._ParseStringOption( options, 'hashers', default_value=cls._DEFAULT_HASHER_STRING) hasher_file_size_limit = cls._ParseNumericOption( options, 'hasher_file_size_limit', default_value=0) # TODO: validate hasher names. if hasher_file_size_limit < 0: raise errors.BadConfigOption( 'Invalid hasher file size limit value cannot be negative.') setattr(configuration_object, '_hasher_names_string', hashers) setattr( configuration_object, '_hasher_file_size_limit', hasher_file_size_limit)
Parses and validates options. Args: options (argparse.Namespace): parser options. configuration_object (CLITool): object to be configured by the argument helper. Raises: BadConfigObject: when the configuration object is of the wrong type. BadConfigOption: when a configuration parameter fails validation.
Below is the the instruction that describes the task: ### Input: Parses and validates options. Args: options (argparse.Namespace): parser options. configuration_object (CLITool): object to be configured by the argument helper. Raises: BadConfigObject: when the configuration object is of the wrong type. BadConfigOption: when a configuration parameter fails validation. ### Response: def ParseOptions(cls, options, configuration_object): """Parses and validates options. Args: options (argparse.Namespace): parser options. configuration_object (CLITool): object to be configured by the argument helper. Raises: BadConfigObject: when the configuration object is of the wrong type. BadConfigOption: when a configuration parameter fails validation. """ if not isinstance(configuration_object, tools.CLITool): raise errors.BadConfigObject( 'Configuration object is not an instance of CLITool') hashers = cls._ParseStringOption( options, 'hashers', default_value=cls._DEFAULT_HASHER_STRING) hasher_file_size_limit = cls._ParseNumericOption( options, 'hasher_file_size_limit', default_value=0) # TODO: validate hasher names. if hasher_file_size_limit < 0: raise errors.BadConfigOption( 'Invalid hasher file size limit value cannot be negative.') setattr(configuration_object, '_hasher_names_string', hashers) setattr( configuration_object, '_hasher_file_size_limit', hasher_file_size_limit)
def proj2equidistant(network): """Defines conformal (e.g. WGS84) to ETRS (equidistant) projection Source CRS is loaded from Network's config. Parameters ---------- network : :class:`~.grid.network.Network` The eDisGo container object Returns ------- :py:func:`functools.partial` """ srid = int(network.config['geo']['srid']) return partial(pyproj.transform, pyproj.Proj(init='epsg:{}' .format(str(srid))), # source coordinate system pyproj.Proj(init='epsg:3035') # destination coordinate system )
Defines conformal (e.g. WGS84) to ETRS (equidistant) projection Source CRS is loaded from Network's config. Parameters ---------- network : :class:`~.grid.network.Network` The eDisGo container object Returns ------- :py:func:`functools.partial`
Below is the the instruction that describes the task: ### Input: Defines conformal (e.g. WGS84) to ETRS (equidistant) projection Source CRS is loaded from Network's config. Parameters ---------- network : :class:`~.grid.network.Network` The eDisGo container object Returns ------- :py:func:`functools.partial` ### Response: def proj2equidistant(network): """Defines conformal (e.g. WGS84) to ETRS (equidistant) projection Source CRS is loaded from Network's config. Parameters ---------- network : :class:`~.grid.network.Network` The eDisGo container object Returns ------- :py:func:`functools.partial` """ srid = int(network.config['geo']['srid']) return partial(pyproj.transform, pyproj.Proj(init='epsg:{}' .format(str(srid))), # source coordinate system pyproj.Proj(init='epsg:3035') # destination coordinate system )
def format_expose(expose): """ Converts a port number or multiple port numbers, as used in the Dockerfile ``EXPOSE`` command, to a tuple. :param: Port numbers, can be as integer, string, or a list/tuple of those. :type expose: int | unicode | str | list | tuple :return: A tuple, to be separated by spaces before inserting in a Dockerfile. :rtype: tuple """ if isinstance(expose, six.string_types): return expose, elif isinstance(expose, collections.Iterable): return map(six.text_type, expose) return six.text_type(expose),
Converts a port number or multiple port numbers, as used in the Dockerfile ``EXPOSE`` command, to a tuple. :param: Port numbers, can be as integer, string, or a list/tuple of those. :type expose: int | unicode | str | list | tuple :return: A tuple, to be separated by spaces before inserting in a Dockerfile. :rtype: tuple
Below is the the instruction that describes the task: ### Input: Converts a port number or multiple port numbers, as used in the Dockerfile ``EXPOSE`` command, to a tuple. :param: Port numbers, can be as integer, string, or a list/tuple of those. :type expose: int | unicode | str | list | tuple :return: A tuple, to be separated by spaces before inserting in a Dockerfile. :rtype: tuple ### Response: def format_expose(expose): """ Converts a port number or multiple port numbers, as used in the Dockerfile ``EXPOSE`` command, to a tuple. :param: Port numbers, can be as integer, string, or a list/tuple of those. :type expose: int | unicode | str | list | tuple :return: A tuple, to be separated by spaces before inserting in a Dockerfile. :rtype: tuple """ if isinstance(expose, six.string_types): return expose, elif isinstance(expose, collections.Iterable): return map(six.text_type, expose) return six.text_type(expose),
def maybe_check_scalar_distribution(distribution, expected_base_dtype, validate_args): """Helper which checks validity of a scalar `distribution` init arg. Valid here means: * `distribution` has scalar batch and event shapes. * `distribution` is `FULLY_REPARAMETERIZED` * `distribution` has expected dtype. Args: distribution: `Distribution`-like object. expected_base_dtype: `TensorFlow` `dtype`. validate_args: Python `bool`. Whether to do additional checks: (i) check that reparameterization_type is `FULLY_REPARAMETERIZED`. (ii) add `tf.Assert` ops to the graph to enforce that distribution is scalar in the event that this cannot be determined statically. Returns: List of `tf.Assert` ops to run to enforce validity checks that could not be statically determined. Empty if `not validate_args`. Raises: ValueError: If validate_args and distribution is not FULLY_REPARAMETERIZED ValueError: If distribution is statically determined to not have both scalar batch and scalar event shapes. """ if distribution.dtype != expected_base_dtype: raise TypeError("dtype mismatch; " "distribution.dtype=\"{}\" is not \"{}\"".format( dtype_util.name(distribution.dtype), dtype_util.name(expected_base_dtype))) # Although `reparameterization_type` is a static property, we guard it by # `validate_args`. This allows users to use a `distribution` which is not # reparameterized itself. However, we tacitly assume that although the # distribution is not reparameterized, it only depends on non-trainable # variables. if validate_args and (distribution.reparameterization_type != reparameterization.FULLY_REPARAMETERIZED): raise ValueError("Base distribution should be reparameterized or be " "a function of non-trainable variables; " "distribution.reparameterization_type = \"{}\" " "!= \"FULLY_REPARAMETERIZED\".".format( distribution.reparameterization_type)) with tf.name_scope("check_distribution"): assertions = [] def check_is_scalar(is_scalar, name): is_scalar_ = tf.get_static_value(is_scalar) if is_scalar_ is not None: if not is_scalar_: raise ValueError("distribution must be scalar; " "distribution.{}=False is not True".format(name)) elif validate_args: assertions.append( assert_util.assert_equal( is_scalar, True, message=("distribution must be scalar; " "distribution.{}=False is not True".format(name)))) check_is_scalar(distribution.is_scalar_event(), "is_scalar_event") check_is_scalar(distribution.is_scalar_batch(), "is_scalar_batch") return assertions
Helper which checks validity of a scalar `distribution` init arg. Valid here means: * `distribution` has scalar batch and event shapes. * `distribution` is `FULLY_REPARAMETERIZED` * `distribution` has expected dtype. Args: distribution: `Distribution`-like object. expected_base_dtype: `TensorFlow` `dtype`. validate_args: Python `bool`. Whether to do additional checks: (i) check that reparameterization_type is `FULLY_REPARAMETERIZED`. (ii) add `tf.Assert` ops to the graph to enforce that distribution is scalar in the event that this cannot be determined statically. Returns: List of `tf.Assert` ops to run to enforce validity checks that could not be statically determined. Empty if `not validate_args`. Raises: ValueError: If validate_args and distribution is not FULLY_REPARAMETERIZED ValueError: If distribution is statically determined to not have both scalar batch and scalar event shapes.
Below is the the instruction that describes the task: ### Input: Helper which checks validity of a scalar `distribution` init arg. Valid here means: * `distribution` has scalar batch and event shapes. * `distribution` is `FULLY_REPARAMETERIZED` * `distribution` has expected dtype. Args: distribution: `Distribution`-like object. expected_base_dtype: `TensorFlow` `dtype`. validate_args: Python `bool`. Whether to do additional checks: (i) check that reparameterization_type is `FULLY_REPARAMETERIZED`. (ii) add `tf.Assert` ops to the graph to enforce that distribution is scalar in the event that this cannot be determined statically. Returns: List of `tf.Assert` ops to run to enforce validity checks that could not be statically determined. Empty if `not validate_args`. Raises: ValueError: If validate_args and distribution is not FULLY_REPARAMETERIZED ValueError: If distribution is statically determined to not have both scalar batch and scalar event shapes. ### Response: def maybe_check_scalar_distribution(distribution, expected_base_dtype, validate_args): """Helper which checks validity of a scalar `distribution` init arg. Valid here means: * `distribution` has scalar batch and event shapes. * `distribution` is `FULLY_REPARAMETERIZED` * `distribution` has expected dtype. Args: distribution: `Distribution`-like object. expected_base_dtype: `TensorFlow` `dtype`. validate_args: Python `bool`. Whether to do additional checks: (i) check that reparameterization_type is `FULLY_REPARAMETERIZED`. (ii) add `tf.Assert` ops to the graph to enforce that distribution is scalar in the event that this cannot be determined statically. Returns: List of `tf.Assert` ops to run to enforce validity checks that could not be statically determined. Empty if `not validate_args`. Raises: ValueError: If validate_args and distribution is not FULLY_REPARAMETERIZED ValueError: If distribution is statically determined to not have both scalar batch and scalar event shapes. """ if distribution.dtype != expected_base_dtype: raise TypeError("dtype mismatch; " "distribution.dtype=\"{}\" is not \"{}\"".format( dtype_util.name(distribution.dtype), dtype_util.name(expected_base_dtype))) # Although `reparameterization_type` is a static property, we guard it by # `validate_args`. This allows users to use a `distribution` which is not # reparameterized itself. However, we tacitly assume that although the # distribution is not reparameterized, it only depends on non-trainable # variables. if validate_args and (distribution.reparameterization_type != reparameterization.FULLY_REPARAMETERIZED): raise ValueError("Base distribution should be reparameterized or be " "a function of non-trainable variables; " "distribution.reparameterization_type = \"{}\" " "!= \"FULLY_REPARAMETERIZED\".".format( distribution.reparameterization_type)) with tf.name_scope("check_distribution"): assertions = [] def check_is_scalar(is_scalar, name): is_scalar_ = tf.get_static_value(is_scalar) if is_scalar_ is not None: if not is_scalar_: raise ValueError("distribution must be scalar; " "distribution.{}=False is not True".format(name)) elif validate_args: assertions.append( assert_util.assert_equal( is_scalar, True, message=("distribution must be scalar; " "distribution.{}=False is not True".format(name)))) check_is_scalar(distribution.is_scalar_event(), "is_scalar_event") check_is_scalar(distribution.is_scalar_batch(), "is_scalar_batch") return assertions
def _event_size(event_shape, name=None): """Computes the number of elements in a tensor with shape `event_shape`. Args: event_shape: A tensor shape. name: The name to use for the tensor op to compute the number of elements (if such an op needs to be created). Returns: event_size: The number of elements in `tensor_shape`. Returns a numpy int when the number of elements can be computed immediately. Otherwise, returns a scalar tensor. """ with tf.compat.v1.name_scope(name, 'event_size', [event_shape]): event_shape = tf.convert_to_tensor( value=event_shape, dtype=tf.int32, name='event_shape') event_shape_const = tf.get_static_value(event_shape) if event_shape_const is not None: return np.prod(event_shape_const) else: return tf.reduce_prod(input_tensor=event_shape)
Computes the number of elements in a tensor with shape `event_shape`. Args: event_shape: A tensor shape. name: The name to use for the tensor op to compute the number of elements (if such an op needs to be created). Returns: event_size: The number of elements in `tensor_shape`. Returns a numpy int when the number of elements can be computed immediately. Otherwise, returns a scalar tensor.
Below is the the instruction that describes the task: ### Input: Computes the number of elements in a tensor with shape `event_shape`. Args: event_shape: A tensor shape. name: The name to use for the tensor op to compute the number of elements (if such an op needs to be created). Returns: event_size: The number of elements in `tensor_shape`. Returns a numpy int when the number of elements can be computed immediately. Otherwise, returns a scalar tensor. ### Response: def _event_size(event_shape, name=None): """Computes the number of elements in a tensor with shape `event_shape`. Args: event_shape: A tensor shape. name: The name to use for the tensor op to compute the number of elements (if such an op needs to be created). Returns: event_size: The number of elements in `tensor_shape`. Returns a numpy int when the number of elements can be computed immediately. Otherwise, returns a scalar tensor. """ with tf.compat.v1.name_scope(name, 'event_size', [event_shape]): event_shape = tf.convert_to_tensor( value=event_shape, dtype=tf.int32, name='event_shape') event_shape_const = tf.get_static_value(event_shape) if event_shape_const is not None: return np.prod(event_shape_const) else: return tf.reduce_prod(input_tensor=event_shape)
def update(self, widget, widget_tree): """ for the selected widget are listed the relative signals for each signal there is a dropdown containing all the widgets the user will select the widget that have to listen a specific event """ self.listeners_list = [] self.build_widget_list_from_tree(widget_tree) self.label.set_text('Signal connections: ' + widget.attributes['editor_varname']) #del self.container self.container = gui.VBox(width='100%', height='90%') self.container.style['justify-content'] = 'flex-start' self.container.style['overflow-y'] = 'scroll' self.append(self.container, 'container') ##for all the events of this widget #isclass instead of ismethod because event methods are replaced with ClassEventConnector for (setOnEventListenerFuncname,setOnEventListenerFunc) in inspect.getmembers(widget): #if the member is decorated by decorate_set_on_listener and the function is referred to this event if hasattr(setOnEventListenerFunc, '_event_info'): self.container.append( SignalConnection(widget, self.listeners_list, setOnEventListenerFuncname, setOnEventListenerFunc, width='100%') )
for the selected widget are listed the relative signals for each signal there is a dropdown containing all the widgets the user will select the widget that have to listen a specific event
Below is the the instruction that describes the task: ### Input: for the selected widget are listed the relative signals for each signal there is a dropdown containing all the widgets the user will select the widget that have to listen a specific event ### Response: def update(self, widget, widget_tree): """ for the selected widget are listed the relative signals for each signal there is a dropdown containing all the widgets the user will select the widget that have to listen a specific event """ self.listeners_list = [] self.build_widget_list_from_tree(widget_tree) self.label.set_text('Signal connections: ' + widget.attributes['editor_varname']) #del self.container self.container = gui.VBox(width='100%', height='90%') self.container.style['justify-content'] = 'flex-start' self.container.style['overflow-y'] = 'scroll' self.append(self.container, 'container') ##for all the events of this widget #isclass instead of ismethod because event methods are replaced with ClassEventConnector for (setOnEventListenerFuncname,setOnEventListenerFunc) in inspect.getmembers(widget): #if the member is decorated by decorate_set_on_listener and the function is referred to this event if hasattr(setOnEventListenerFunc, '_event_info'): self.container.append( SignalConnection(widget, self.listeners_list, setOnEventListenerFuncname, setOnEventListenerFunc, width='100%') )
def parse_mixed_delim_str(line): """Turns .obj face index string line into [verts, texcoords, normals] numeric tuples.""" arrs = [[], [], []] for group in line.split(' '): for col, coord in enumerate(group.split('/')): if coord: arrs[col].append(int(coord)) return [tuple(arr) for arr in arrs]
Turns .obj face index string line into [verts, texcoords, normals] numeric tuples.
Below is the the instruction that describes the task: ### Input: Turns .obj face index string line into [verts, texcoords, normals] numeric tuples. ### Response: def parse_mixed_delim_str(line): """Turns .obj face index string line into [verts, texcoords, normals] numeric tuples.""" arrs = [[], [], []] for group in line.split(' '): for col, coord in enumerate(group.split('/')): if coord: arrs[col].append(int(coord)) return [tuple(arr) for arr in arrs]
def rst_table(data, schema=None): """ Creates a reStructuredText simple table (list of strings) from a list of lists. """ # Process multi-rows (replaced by rows with empty columns when needed) pdata = [] for row in data: prow = [el if isinstance(el, list) else [el] for el in row] pdata.extend(pr for pr in xzip_longest(*prow, fillvalue="")) # Find the columns sizes sizes = [max(len("{0}".format(el)) for el in column) for column in xzip(*pdata)] sizes = [max(size, len(sch)) for size, sch in xzip(sizes, schema)] # Creates the title and border rows if schema is None: schema = pdata[0] pdata = pdata[1:] border = " ".join("=" * size for size in sizes) titles = " ".join("{1:^{0}}".format(*pair) for pair in xzip(sizes, schema)) # Creates the full table and returns rows = [border, titles, border] rows.extend(" ".join("{1:<{0}}".format(*pair) for pair in xzip(sizes, row)) for row in pdata) rows.append(border) return rows
Creates a reStructuredText simple table (list of strings) from a list of lists.
Below is the the instruction that describes the task: ### Input: Creates a reStructuredText simple table (list of strings) from a list of lists. ### Response: def rst_table(data, schema=None): """ Creates a reStructuredText simple table (list of strings) from a list of lists. """ # Process multi-rows (replaced by rows with empty columns when needed) pdata = [] for row in data: prow = [el if isinstance(el, list) else [el] for el in row] pdata.extend(pr for pr in xzip_longest(*prow, fillvalue="")) # Find the columns sizes sizes = [max(len("{0}".format(el)) for el in column) for column in xzip(*pdata)] sizes = [max(size, len(sch)) for size, sch in xzip(sizes, schema)] # Creates the title and border rows if schema is None: schema = pdata[0] pdata = pdata[1:] border = " ".join("=" * size for size in sizes) titles = " ".join("{1:^{0}}".format(*pair) for pair in xzip(sizes, schema)) # Creates the full table and returns rows = [border, titles, border] rows.extend(" ".join("{1:<{0}}".format(*pair) for pair in xzip(sizes, row)) for row in pdata) rows.append(border) return rows
def _set_dscp_exp_state(self, v, load=False): """ Setter method for dscp_exp_state, mapped from YANG variable /dscp_exp_state (container) If this variable is read-only (config: false) in the source YANG file, then _set_dscp_exp_state is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_dscp_exp_state() directly. YANG Description: dscp_exp """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=dscp_exp_state.dscp_exp_state, is_container='container', presence=False, yang_name="dscp-exp-state", rest_name="dscp-exp-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'qos-dscp-exp', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-qos-operational', defining_module='brocade-qos-operational', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """dscp_exp_state must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=dscp_exp_state.dscp_exp_state, is_container='container', presence=False, yang_name="dscp-exp-state", rest_name="dscp-exp-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'qos-dscp-exp', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-qos-operational', defining_module='brocade-qos-operational', yang_type='container', is_config=True)""", }) self.__dscp_exp_state = t if hasattr(self, '_set'): self._set()
Setter method for dscp_exp_state, mapped from YANG variable /dscp_exp_state (container) If this variable is read-only (config: false) in the source YANG file, then _set_dscp_exp_state is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_dscp_exp_state() directly. YANG Description: dscp_exp
Below is the the instruction that describes the task: ### Input: Setter method for dscp_exp_state, mapped from YANG variable /dscp_exp_state (container) If this variable is read-only (config: false) in the source YANG file, then _set_dscp_exp_state is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_dscp_exp_state() directly. YANG Description: dscp_exp ### Response: def _set_dscp_exp_state(self, v, load=False): """ Setter method for dscp_exp_state, mapped from YANG variable /dscp_exp_state (container) If this variable is read-only (config: false) in the source YANG file, then _set_dscp_exp_state is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_dscp_exp_state() directly. YANG Description: dscp_exp """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=dscp_exp_state.dscp_exp_state, is_container='container', presence=False, yang_name="dscp-exp-state", rest_name="dscp-exp-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'qos-dscp-exp', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-qos-operational', defining_module='brocade-qos-operational', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """dscp_exp_state must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=dscp_exp_state.dscp_exp_state, is_container='container', presence=False, yang_name="dscp-exp-state", rest_name="dscp-exp-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'qos-dscp-exp', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-qos-operational', defining_module='brocade-qos-operational', yang_type='container', is_config=True)""", }) self.__dscp_exp_state = t if hasattr(self, '_set'): self._set()
def set_dtreat_interp_indch(self, indch=None): """ Set the indices of the channels for which to interpolate data The index can be provided as: - A 1d np.ndarray of boolean or int indices of channels => interpolate data at these channels for all times - A dict with: * keys = int indices of times * values = array of int indices of chan. for which to interpolate Time indices refer to self.ddataRef['t'] Channel indices refer to self.ddataRef['X'] """ lC = [indch is None, type(indch) in [np.ndarray,list], type(indch) is dict] assert any(lC) if lC[2]: lc = [type(k) is int and k<self._ddataRef['nt'] for k in indch.keys()] assert all(lc) for k in indch.keys(): assert hasattr(indch[k],'__iter__') indch[k] = _format_ind(indch[k], n=self._ddataRef['nch']) elif lC[1]: indch = np.asarray(indch) assert indch.ndim==1 indch = _format_ind(indch, n=self._ddataRef['nch']) self._dtreat['interp-indch'] = indch self._ddata['uptodate'] = False
Set the indices of the channels for which to interpolate data The index can be provided as: - A 1d np.ndarray of boolean or int indices of channels => interpolate data at these channels for all times - A dict with: * keys = int indices of times * values = array of int indices of chan. for which to interpolate Time indices refer to self.ddataRef['t'] Channel indices refer to self.ddataRef['X']
Below is the the instruction that describes the task: ### Input: Set the indices of the channels for which to interpolate data The index can be provided as: - A 1d np.ndarray of boolean or int indices of channels => interpolate data at these channels for all times - A dict with: * keys = int indices of times * values = array of int indices of chan. for which to interpolate Time indices refer to self.ddataRef['t'] Channel indices refer to self.ddataRef['X'] ### Response: def set_dtreat_interp_indch(self, indch=None): """ Set the indices of the channels for which to interpolate data The index can be provided as: - A 1d np.ndarray of boolean or int indices of channels => interpolate data at these channels for all times - A dict with: * keys = int indices of times * values = array of int indices of chan. for which to interpolate Time indices refer to self.ddataRef['t'] Channel indices refer to self.ddataRef['X'] """ lC = [indch is None, type(indch) in [np.ndarray,list], type(indch) is dict] assert any(lC) if lC[2]: lc = [type(k) is int and k<self._ddataRef['nt'] for k in indch.keys()] assert all(lc) for k in indch.keys(): assert hasattr(indch[k],'__iter__') indch[k] = _format_ind(indch[k], n=self._ddataRef['nch']) elif lC[1]: indch = np.asarray(indch) assert indch.ndim==1 indch = _format_ind(indch, n=self._ddataRef['nch']) self._dtreat['interp-indch'] = indch self._ddata['uptodate'] = False
def get_storage_pool_by_name(self, name): """ Get ScaleIO StoragePool object by its name :param name: Name of StoragePool :return: ScaleIO StoragePool object :raise KeyError: No StoragePool with specified name found :rtype: StoragePool object """ for storage_pool in self.conn.storage_pools: if storage_pool.name == name: return storage_pool raise KeyError("Storage pool of that name not found")
Get ScaleIO StoragePool object by its name :param name: Name of StoragePool :return: ScaleIO StoragePool object :raise KeyError: No StoragePool with specified name found :rtype: StoragePool object
Below is the the instruction that describes the task: ### Input: Get ScaleIO StoragePool object by its name :param name: Name of StoragePool :return: ScaleIO StoragePool object :raise KeyError: No StoragePool with specified name found :rtype: StoragePool object ### Response: def get_storage_pool_by_name(self, name): """ Get ScaleIO StoragePool object by its name :param name: Name of StoragePool :return: ScaleIO StoragePool object :raise KeyError: No StoragePool with specified name found :rtype: StoragePool object """ for storage_pool in self.conn.storage_pools: if storage_pool.name == name: return storage_pool raise KeyError("Storage pool of that name not found")
def inspect_select_calculation(self): """Inspect the result of the CifSelectCalculation, verifying that it produced a CifData output node.""" try: node = self.ctx.cif_select self.ctx.cif = node.outputs.cif except exceptions.NotExistent: self.report('aborting: CifSelectCalculation<{}> did not return the required cif output'.format(node.uuid)) return self.exit_codes.ERROR_CIF_SELECT_FAILED
Inspect the result of the CifSelectCalculation, verifying that it produced a CifData output node.
Below is the the instruction that describes the task: ### Input: Inspect the result of the CifSelectCalculation, verifying that it produced a CifData output node. ### Response: def inspect_select_calculation(self): """Inspect the result of the CifSelectCalculation, verifying that it produced a CifData output node.""" try: node = self.ctx.cif_select self.ctx.cif = node.outputs.cif except exceptions.NotExistent: self.report('aborting: CifSelectCalculation<{}> did not return the required cif output'.format(node.uuid)) return self.exit_codes.ERROR_CIF_SELECT_FAILED
def _route_message(self, message, data): """ Route message to any handlers on the message namespace """ # route message to handlers if message.namespace in self._handlers: # debug messages if message.namespace != NS_HEARTBEAT: self.logger.debug( "[%s:%s] Received: %s", self.fn or self.host, self.port, _message_to_string(message, data)) # message handlers try: handled = \ self._handlers[message.namespace].receive_message( message, data) if not handled: if data.get(REQUEST_ID) not in self._request_callbacks: self.logger.debug( "[%s:%s] Message unhandled: %s", self.fn or self.host, self.port, _message_to_string(message, data)) except Exception: # pylint: disable=broad-except self.logger.exception( ("[%s:%s] Exception caught while sending message to " "controller %s: %s"), self.fn or self.host, self.port, type(self._handlers[message.namespace]).__name__, _message_to_string(message, data)) else: self.logger.debug( "[%s:%s] Received unknown namespace: %s", self.fn or self.host, self.port, _message_to_string(message, data))
Route message to any handlers on the message namespace
Below is the the instruction that describes the task: ### Input: Route message to any handlers on the message namespace ### Response: def _route_message(self, message, data): """ Route message to any handlers on the message namespace """ # route message to handlers if message.namespace in self._handlers: # debug messages if message.namespace != NS_HEARTBEAT: self.logger.debug( "[%s:%s] Received: %s", self.fn or self.host, self.port, _message_to_string(message, data)) # message handlers try: handled = \ self._handlers[message.namespace].receive_message( message, data) if not handled: if data.get(REQUEST_ID) not in self._request_callbacks: self.logger.debug( "[%s:%s] Message unhandled: %s", self.fn or self.host, self.port, _message_to_string(message, data)) except Exception: # pylint: disable=broad-except self.logger.exception( ("[%s:%s] Exception caught while sending message to " "controller %s: %s"), self.fn or self.host, self.port, type(self._handlers[message.namespace]).__name__, _message_to_string(message, data)) else: self.logger.debug( "[%s:%s] Received unknown namespace: %s", self.fn or self.host, self.port, _message_to_string(message, data))
def file_add_tags(object_id, input_params={}, always_retry=True, **kwargs): """ Invokes the /file-xxxx/addTags API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Tags#API-method%3A-%2Fclass-xxxx%2FaddTags """ return DXHTTPRequest('/%s/addTags' % object_id, input_params, always_retry=always_retry, **kwargs)
Invokes the /file-xxxx/addTags API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Tags#API-method%3A-%2Fclass-xxxx%2FaddTags
Below is the the instruction that describes the task: ### Input: Invokes the /file-xxxx/addTags API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Tags#API-method%3A-%2Fclass-xxxx%2FaddTags ### Response: def file_add_tags(object_id, input_params={}, always_retry=True, **kwargs): """ Invokes the /file-xxxx/addTags API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Tags#API-method%3A-%2Fclass-xxxx%2FaddTags """ return DXHTTPRequest('/%s/addTags' % object_id, input_params, always_retry=always_retry, **kwargs)
def destroy(name, conn=None, call=None): ''' Delete a single VM ''' if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.' ) __utils__['cloud.fire_event']( 'event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if not conn: conn = get_conn() # pylint: disable=E0602 node = get_node(conn, name) profiles = get_configured_provider()['profiles'] # pylint: disable=E0602 if node is None: log.error('Unable to find the VM %s', name) profile = None if 'metadata' in node.extra and 'profile' in node.extra['metadata']: profile = node.extra['metadata']['profile'] flush_mine_on_destroy = False if profile and profile in profiles and 'flush_mine_on_destroy' in profiles[profile]: flush_mine_on_destroy = profiles[profile]['flush_mine_on_destroy'] if flush_mine_on_destroy: log.info('Clearing Salt Mine: %s', name) mopts_ = salt.config.DEFAULT_MINION_OPTS conf_path = '/'.join(__opts__['conf_file'].split('/')[:-1]) mopts_.update( salt.config.minion_config(os.path.join(conf_path, 'minion')) ) client = salt.client.get_local_client(mopts_) minions = client.cmd(name, 'mine.flush') log.info('Clearing Salt Mine: %s, %s', name, flush_mine_on_destroy) log.info('Destroying VM: %s', name) ret = conn.destroy_node(node) if ret: log.info('Destroyed VM: %s', name) # Fire destroy action __utils__['cloud.fire_event']( 'event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if __opts__['delete_sshkeys'] is True: public_ips = getattr(node, __opts__.get('ssh_interface', 'public_ips')) if public_ips: salt.utils.cloud.remove_sshkey(public_ips[0]) private_ips = getattr(node, __opts__.get('ssh_interface', 'private_ips')) if private_ips: salt.utils.cloud.remove_sshkey(private_ips[0]) if __opts__.get('update_cachedir', False) is True: __utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__) return True log.error('Failed to Destroy VM: %s', name) return False
Delete a single VM
Below is the the instruction that describes the task: ### Input: Delete a single VM ### Response: def destroy(name, conn=None, call=None): ''' Delete a single VM ''' if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.' ) __utils__['cloud.fire_event']( 'event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if not conn: conn = get_conn() # pylint: disable=E0602 node = get_node(conn, name) profiles = get_configured_provider()['profiles'] # pylint: disable=E0602 if node is None: log.error('Unable to find the VM %s', name) profile = None if 'metadata' in node.extra and 'profile' in node.extra['metadata']: profile = node.extra['metadata']['profile'] flush_mine_on_destroy = False if profile and profile in profiles and 'flush_mine_on_destroy' in profiles[profile]: flush_mine_on_destroy = profiles[profile]['flush_mine_on_destroy'] if flush_mine_on_destroy: log.info('Clearing Salt Mine: %s', name) mopts_ = salt.config.DEFAULT_MINION_OPTS conf_path = '/'.join(__opts__['conf_file'].split('/')[:-1]) mopts_.update( salt.config.minion_config(os.path.join(conf_path, 'minion')) ) client = salt.client.get_local_client(mopts_) minions = client.cmd(name, 'mine.flush') log.info('Clearing Salt Mine: %s, %s', name, flush_mine_on_destroy) log.info('Destroying VM: %s', name) ret = conn.destroy_node(node) if ret: log.info('Destroyed VM: %s', name) # Fire destroy action __utils__['cloud.fire_event']( 'event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if __opts__['delete_sshkeys'] is True: public_ips = getattr(node, __opts__.get('ssh_interface', 'public_ips')) if public_ips: salt.utils.cloud.remove_sshkey(public_ips[0]) private_ips = getattr(node, __opts__.get('ssh_interface', 'private_ips')) if private_ips: salt.utils.cloud.remove_sshkey(private_ips[0]) if __opts__.get('update_cachedir', False) is True: __utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__) return True log.error('Failed to Destroy VM: %s', name) return False
def get_dicts(self): """Gets dicts in file :return: (generator of) of dicts with data from .csv file """ reader = csv.DictReader(open(self.path, "r", encoding=self.encoding)) for row in reader: if row: yield row
Gets dicts in file :return: (generator of) of dicts with data from .csv file
Below is the the instruction that describes the task: ### Input: Gets dicts in file :return: (generator of) of dicts with data from .csv file ### Response: def get_dicts(self): """Gets dicts in file :return: (generator of) of dicts with data from .csv file """ reader = csv.DictReader(open(self.path, "r", encoding=self.encoding)) for row in reader: if row: yield row
def as_local_model(self): """ Makes sure our optimizer is wrapped into the global_optimizer meta. This is only relevant for distributed RL. """ super(MemoryModel, self).as_local_model() self.optimizer_spec = dict( type='global_optimizer', optimizer=self.optimizer_spec )
Makes sure our optimizer is wrapped into the global_optimizer meta. This is only relevant for distributed RL.
Below is the the instruction that describes the task: ### Input: Makes sure our optimizer is wrapped into the global_optimizer meta. This is only relevant for distributed RL. ### Response: def as_local_model(self): """ Makes sure our optimizer is wrapped into the global_optimizer meta. This is only relevant for distributed RL. """ super(MemoryModel, self).as_local_model() self.optimizer_spec = dict( type='global_optimizer', optimizer=self.optimizer_spec )
def _get_and_start_work(self): "return (async_result, work_unit) or (None, None)" worker_id = nice_identifier() work_unit = self.task_master.get_work(worker_id, available_gb=self.available_gb()) if work_unit is None: return None, None async_result = self.pool.apply_async( run_worker, (HeadlessWorker, self.task_master.registry.config, worker_id, work_unit.work_spec_name, work_unit.key), callback=self._finish_callback) return async_result, work_unit
return (async_result, work_unit) or (None, None)
Below is the the instruction that describes the task: ### Input: return (async_result, work_unit) or (None, None) ### Response: def _get_and_start_work(self): "return (async_result, work_unit) or (None, None)" worker_id = nice_identifier() work_unit = self.task_master.get_work(worker_id, available_gb=self.available_gb()) if work_unit is None: return None, None async_result = self.pool.apply_async( run_worker, (HeadlessWorker, self.task_master.registry.config, worker_id, work_unit.work_spec_name, work_unit.key), callback=self._finish_callback) return async_result, work_unit
def render_to_response(self, obj, **response_kwargs): """ Returns an ``HttpResponse`` object instance with Content-Type: application/json. The response body will be the return value of ``self.serialize(obj)`` """ return HttpResponse(self.serialize(obj), content_type='application/json', **response_kwargs)
Returns an ``HttpResponse`` object instance with Content-Type: application/json. The response body will be the return value of ``self.serialize(obj)``
Below is the the instruction that describes the task: ### Input: Returns an ``HttpResponse`` object instance with Content-Type: application/json. The response body will be the return value of ``self.serialize(obj)`` ### Response: def render_to_response(self, obj, **response_kwargs): """ Returns an ``HttpResponse`` object instance with Content-Type: application/json. The response body will be the return value of ``self.serialize(obj)`` """ return HttpResponse(self.serialize(obj), content_type='application/json', **response_kwargs)
def _assign_new_thread_id(self, recursive=True): """ Assigns a new thread id to the task. :type recursive: bool :param recursive: Whether to assign the id to children recursively. :rtype: bool :returns: The new thread id. """ self.__class__.thread_id_pool += 1 self.thread_id = self.__class__.thread_id_pool if not recursive: return self.thread_id for child in self: child.thread_id = self.thread_id return self.thread_id
Assigns a new thread id to the task. :type recursive: bool :param recursive: Whether to assign the id to children recursively. :rtype: bool :returns: The new thread id.
Below is the the instruction that describes the task: ### Input: Assigns a new thread id to the task. :type recursive: bool :param recursive: Whether to assign the id to children recursively. :rtype: bool :returns: The new thread id. ### Response: def _assign_new_thread_id(self, recursive=True): """ Assigns a new thread id to the task. :type recursive: bool :param recursive: Whether to assign the id to children recursively. :rtype: bool :returns: The new thread id. """ self.__class__.thread_id_pool += 1 self.thread_id = self.__class__.thread_id_pool if not recursive: return self.thread_id for child in self: child.thread_id = self.thread_id return self.thread_id
def start_notifications(self): """Start the notifications thread. If an external callback is not set up (using `update_webhook`) then calling this function is mandatory to get or set resource. .. code-block:: python >>> api.start_notifications() >>> print(api.get_resource_value(device, path)) Some value >>> api.stop_notifications() :returns: void """ with self._notifications_lock: if self.has_active_notification_thread: return api = self._get_api(mds.NotificationsApi) self._notifications_thread = NotificationsThread( self._db, self._queues, b64decode=self.b64decode, notifications_api=api, subscription_manager=self.subscribe, ) self._notifications_thread.daemon = True self._notifications_thread.start()
Start the notifications thread. If an external callback is not set up (using `update_webhook`) then calling this function is mandatory to get or set resource. .. code-block:: python >>> api.start_notifications() >>> print(api.get_resource_value(device, path)) Some value >>> api.stop_notifications() :returns: void
Below is the the instruction that describes the task: ### Input: Start the notifications thread. If an external callback is not set up (using `update_webhook`) then calling this function is mandatory to get or set resource. .. code-block:: python >>> api.start_notifications() >>> print(api.get_resource_value(device, path)) Some value >>> api.stop_notifications() :returns: void ### Response: def start_notifications(self): """Start the notifications thread. If an external callback is not set up (using `update_webhook`) then calling this function is mandatory to get or set resource. .. code-block:: python >>> api.start_notifications() >>> print(api.get_resource_value(device, path)) Some value >>> api.stop_notifications() :returns: void """ with self._notifications_lock: if self.has_active_notification_thread: return api = self._get_api(mds.NotificationsApi) self._notifications_thread = NotificationsThread( self._db, self._queues, b64decode=self.b64decode, notifications_api=api, subscription_manager=self.subscribe, ) self._notifications_thread.daemon = True self._notifications_thread.start()
def choose_include_text(s, params, source_path): """Given the contents of a file and !inc[these params], return matching lines If there was a problem matching parameters, return empty list. :param s: file's text :param params: string like "start-at=foo&end-at=bar" :param source_path: path to source .md. Useful in error messages """ lines = s.splitlines() start_after = None start_at = None end_before = None end_at = None for term in params.split("&"): if '=' in term: param, value = [p.strip() for p in term.split('=', 1)] else: param, value = term.strip(), '' if not param: continue if param == "start-after": start_after = value elif param == "start-at": start_at = value elif param == "end-before": end_before = value elif param == "end-at": end_at = value else: raise TaskError('Invalid include directive "{0}"' ' in {1}'.format(params, source_path)) chosen_lines = [] # two loops, one waits to "start recording", one "records" for line_ix in range(0, len(lines)): line = lines[line_ix] if (not start_at) and (not start_after): # if we didn't set a start-* param, don't wait to start break if start_at is not None and start_at in line: break if start_after is not None and start_after in line: line_ix += 1 break else: # never started recording: return '' for line_ix in range(line_ix, len(lines)): line = lines[line_ix] if end_before is not None and end_before in line: break chosen_lines.append(line) if end_at is not None and end_at in line: break else: if (end_before or end_at): # we had an end- filter, but never encountered it. return '' return '\n'.join(chosen_lines)
Given the contents of a file and !inc[these params], return matching lines If there was a problem matching parameters, return empty list. :param s: file's text :param params: string like "start-at=foo&end-at=bar" :param source_path: path to source .md. Useful in error messages
Below is the the instruction that describes the task: ### Input: Given the contents of a file and !inc[these params], return matching lines If there was a problem matching parameters, return empty list. :param s: file's text :param params: string like "start-at=foo&end-at=bar" :param source_path: path to source .md. Useful in error messages ### Response: def choose_include_text(s, params, source_path): """Given the contents of a file and !inc[these params], return matching lines If there was a problem matching parameters, return empty list. :param s: file's text :param params: string like "start-at=foo&end-at=bar" :param source_path: path to source .md. Useful in error messages """ lines = s.splitlines() start_after = None start_at = None end_before = None end_at = None for term in params.split("&"): if '=' in term: param, value = [p.strip() for p in term.split('=', 1)] else: param, value = term.strip(), '' if not param: continue if param == "start-after": start_after = value elif param == "start-at": start_at = value elif param == "end-before": end_before = value elif param == "end-at": end_at = value else: raise TaskError('Invalid include directive "{0}"' ' in {1}'.format(params, source_path)) chosen_lines = [] # two loops, one waits to "start recording", one "records" for line_ix in range(0, len(lines)): line = lines[line_ix] if (not start_at) and (not start_after): # if we didn't set a start-* param, don't wait to start break if start_at is not None and start_at in line: break if start_after is not None and start_after in line: line_ix += 1 break else: # never started recording: return '' for line_ix in range(line_ix, len(lines)): line = lines[line_ix] if end_before is not None and end_before in line: break chosen_lines.append(line) if end_at is not None and end_at in line: break else: if (end_before or end_at): # we had an end- filter, but never encountered it. return '' return '\n'.join(chosen_lines)
def url_for(self, operation, _external=True, **kwargs): """ Construct a URL for an operation against a resource. :param kwargs: additional arguments for URL path expansion, which are passed to flask.url_for. In particular, _external=True produces absolute url. """ return url_for(self.endpoint_for(operation), _external=_external, **kwargs)
Construct a URL for an operation against a resource. :param kwargs: additional arguments for URL path expansion, which are passed to flask.url_for. In particular, _external=True produces absolute url.
Below is the the instruction that describes the task: ### Input: Construct a URL for an operation against a resource. :param kwargs: additional arguments for URL path expansion, which are passed to flask.url_for. In particular, _external=True produces absolute url. ### Response: def url_for(self, operation, _external=True, **kwargs): """ Construct a URL for an operation against a resource. :param kwargs: additional arguments for URL path expansion, which are passed to flask.url_for. In particular, _external=True produces absolute url. """ return url_for(self.endpoint_for(operation), _external=_external, **kwargs)
def is_newer_b(a, bfiles): """ check that all b files have been modified more recently than a """ if isinstance(bfiles, basestring): bfiles = [bfiles] if not op.exists(a): return False if not all(op.exists(b) for b in bfiles): return False atime = os.stat(a).st_mtime # modification time for b in bfiles: # a has been modified since if atime > os.stat(b).st_mtime: return False return True
check that all b files have been modified more recently than a
Below is the the instruction that describes the task: ### Input: check that all b files have been modified more recently than a ### Response: def is_newer_b(a, bfiles): """ check that all b files have been modified more recently than a """ if isinstance(bfiles, basestring): bfiles = [bfiles] if not op.exists(a): return False if not all(op.exists(b) for b in bfiles): return False atime = os.stat(a).st_mtime # modification time for b in bfiles: # a has been modified since if atime > os.stat(b).st_mtime: return False return True
def from_opcode(cls, opcode, arg=_no_arg): """ Create an instruction from an opcode and raw argument. Parameters ---------- opcode : int Opcode for the instruction to create. arg : int, optional The argument for the instruction. Returns ------- intsr : Instruction An instance of the instruction named by ``opcode``. """ return type(cls)(opname[opcode], (cls,), {}, opcode=opcode)(arg)
Create an instruction from an opcode and raw argument. Parameters ---------- opcode : int Opcode for the instruction to create. arg : int, optional The argument for the instruction. Returns ------- intsr : Instruction An instance of the instruction named by ``opcode``.
Below is the the instruction that describes the task: ### Input: Create an instruction from an opcode and raw argument. Parameters ---------- opcode : int Opcode for the instruction to create. arg : int, optional The argument for the instruction. Returns ------- intsr : Instruction An instance of the instruction named by ``opcode``. ### Response: def from_opcode(cls, opcode, arg=_no_arg): """ Create an instruction from an opcode and raw argument. Parameters ---------- opcode : int Opcode for the instruction to create. arg : int, optional The argument for the instruction. Returns ------- intsr : Instruction An instance of the instruction named by ``opcode``. """ return type(cls)(opname[opcode], (cls,), {}, opcode=opcode)(arg)
def _parse_errback(self, error): """ Parse an error from an XML-RPC call. raises: ``IOError`` when the Twisted XML-RPC connection times out. raises: ``BugzillaNotFoundException`` raises: ``BugzillaNotAuthorizedException`` raises: ``BugzillaException`` if we got a response from the XML-RPC server but it is not one of the ``xmlrpc.Fault``s above that we know about. raises: ``Exception`` if it is not one of the above. """ if isinstance(error.value, IOError): raise error.value if hasattr(xmlrpc, 'Fault'): # Python 2: fault = xmlrpc.Fault else: fault = xmlrpc.client.Fault if isinstance(error.value, fault): if error.value.faultCode == 101: raise BugzillaNotFoundException(error.value.faultString) if error.value.faultCode == 102: raise BugzillaNotAuthorizedException(error.value.faultString) if error.value.faultCode == 32000: raise BugzillaTokenExpiredException(error.value.faultString) raise BugzillaException(error.value) # We don't know what this is, so just raise it. raise error
Parse an error from an XML-RPC call. raises: ``IOError`` when the Twisted XML-RPC connection times out. raises: ``BugzillaNotFoundException`` raises: ``BugzillaNotAuthorizedException`` raises: ``BugzillaException`` if we got a response from the XML-RPC server but it is not one of the ``xmlrpc.Fault``s above that we know about. raises: ``Exception`` if it is not one of the above.
Below is the the instruction that describes the task: ### Input: Parse an error from an XML-RPC call. raises: ``IOError`` when the Twisted XML-RPC connection times out. raises: ``BugzillaNotFoundException`` raises: ``BugzillaNotAuthorizedException`` raises: ``BugzillaException`` if we got a response from the XML-RPC server but it is not one of the ``xmlrpc.Fault``s above that we know about. raises: ``Exception`` if it is not one of the above. ### Response: def _parse_errback(self, error): """ Parse an error from an XML-RPC call. raises: ``IOError`` when the Twisted XML-RPC connection times out. raises: ``BugzillaNotFoundException`` raises: ``BugzillaNotAuthorizedException`` raises: ``BugzillaException`` if we got a response from the XML-RPC server but it is not one of the ``xmlrpc.Fault``s above that we know about. raises: ``Exception`` if it is not one of the above. """ if isinstance(error.value, IOError): raise error.value if hasattr(xmlrpc, 'Fault'): # Python 2: fault = xmlrpc.Fault else: fault = xmlrpc.client.Fault if isinstance(error.value, fault): if error.value.faultCode == 101: raise BugzillaNotFoundException(error.value.faultString) if error.value.faultCode == 102: raise BugzillaNotAuthorizedException(error.value.faultString) if error.value.faultCode == 32000: raise BugzillaTokenExpiredException(error.value.faultString) raise BugzillaException(error.value) # We don't know what this is, so just raise it. raise error
def add_eager_constraints(self, models): """ Set the constraints for an eager load of the relation. :type models: list """ super(MorphOneOrMany, self).add_eager_constraints(models) self._query.where(self._morph_type, self._morph_class)
Set the constraints for an eager load of the relation. :type models: list
Below is the the instruction that describes the task: ### Input: Set the constraints for an eager load of the relation. :type models: list ### Response: def add_eager_constraints(self, models): """ Set the constraints for an eager load of the relation. :type models: list """ super(MorphOneOrMany, self).add_eager_constraints(models) self._query.where(self._morph_type, self._morph_class)
def parse_description(s): """ Returns a dictionary based on the FASTA header, assuming JCVI data """ s = "".join(s.split()[1:]).replace("/", ";") a = parse_qs(s) return a
Returns a dictionary based on the FASTA header, assuming JCVI data
Below is the the instruction that describes the task: ### Input: Returns a dictionary based on the FASTA header, assuming JCVI data ### Response: def parse_description(s): """ Returns a dictionary based on the FASTA header, assuming JCVI data """ s = "".join(s.split()[1:]).replace("/", ";") a = parse_qs(s) return a
def PHASE(angle, qubit): """Produces the PHASE gate:: PHASE(phi) = [[1, 0], [0, exp(1j * phi)]] This is the same as the RZ gate. :param angle: The angle to rotate around the z-axis on the bloch sphere. :param qubit: The qubit apply the gate to. :returns: A Gate object. """ return Gate(name="PHASE", params=[angle], qubits=[unpack_qubit(qubit)])
Produces the PHASE gate:: PHASE(phi) = [[1, 0], [0, exp(1j * phi)]] This is the same as the RZ gate. :param angle: The angle to rotate around the z-axis on the bloch sphere. :param qubit: The qubit apply the gate to. :returns: A Gate object.
Below is the the instruction that describes the task: ### Input: Produces the PHASE gate:: PHASE(phi) = [[1, 0], [0, exp(1j * phi)]] This is the same as the RZ gate. :param angle: The angle to rotate around the z-axis on the bloch sphere. :param qubit: The qubit apply the gate to. :returns: A Gate object. ### Response: def PHASE(angle, qubit): """Produces the PHASE gate:: PHASE(phi) = [[1, 0], [0, exp(1j * phi)]] This is the same as the RZ gate. :param angle: The angle to rotate around the z-axis on the bloch sphere. :param qubit: The qubit apply the gate to. :returns: A Gate object. """ return Gate(name="PHASE", params=[angle], qubits=[unpack_qubit(qubit)])
def update_query(self, query_update, project, query, undelete_descendants=None): """UpdateQuery. [Preview API] Update a query or a folder. This allows you to update, rename and move queries and folders. :param :class:`<QueryHierarchyItem> <azure.devops.v5_1.work_item_tracking.models.QueryHierarchyItem>` query_update: The query to update. :param str project: Project ID or project name :param str query: The ID or path for the query to update. :param bool undelete_descendants: Undelete the children of this folder. It is important to note that this will not bring back the permission changes that were previously applied to the descendants. :rtype: :class:`<QueryHierarchyItem> <azure.devops.v5_1.work-item-tracking.models.QueryHierarchyItem>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if query is not None: route_values['query'] = self._serialize.url('query', query, 'str') query_parameters = {} if undelete_descendants is not None: query_parameters['$undeleteDescendants'] = self._serialize.query('undelete_descendants', undelete_descendants, 'bool') content = self._serialize.body(query_update, 'QueryHierarchyItem') response = self._send(http_method='PATCH', location_id='a67d190c-c41f-424b-814d-0e906f659301', version='5.1-preview.2', route_values=route_values, query_parameters=query_parameters, content=content) return self._deserialize('QueryHierarchyItem', response)
UpdateQuery. [Preview API] Update a query or a folder. This allows you to update, rename and move queries and folders. :param :class:`<QueryHierarchyItem> <azure.devops.v5_1.work_item_tracking.models.QueryHierarchyItem>` query_update: The query to update. :param str project: Project ID or project name :param str query: The ID or path for the query to update. :param bool undelete_descendants: Undelete the children of this folder. It is important to note that this will not bring back the permission changes that were previously applied to the descendants. :rtype: :class:`<QueryHierarchyItem> <azure.devops.v5_1.work-item-tracking.models.QueryHierarchyItem>`
Below is the the instruction that describes the task: ### Input: UpdateQuery. [Preview API] Update a query or a folder. This allows you to update, rename and move queries and folders. :param :class:`<QueryHierarchyItem> <azure.devops.v5_1.work_item_tracking.models.QueryHierarchyItem>` query_update: The query to update. :param str project: Project ID or project name :param str query: The ID or path for the query to update. :param bool undelete_descendants: Undelete the children of this folder. It is important to note that this will not bring back the permission changes that were previously applied to the descendants. :rtype: :class:`<QueryHierarchyItem> <azure.devops.v5_1.work-item-tracking.models.QueryHierarchyItem>` ### Response: def update_query(self, query_update, project, query, undelete_descendants=None): """UpdateQuery. [Preview API] Update a query or a folder. This allows you to update, rename and move queries and folders. :param :class:`<QueryHierarchyItem> <azure.devops.v5_1.work_item_tracking.models.QueryHierarchyItem>` query_update: The query to update. :param str project: Project ID or project name :param str query: The ID or path for the query to update. :param bool undelete_descendants: Undelete the children of this folder. It is important to note that this will not bring back the permission changes that were previously applied to the descendants. :rtype: :class:`<QueryHierarchyItem> <azure.devops.v5_1.work-item-tracking.models.QueryHierarchyItem>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if query is not None: route_values['query'] = self._serialize.url('query', query, 'str') query_parameters = {} if undelete_descendants is not None: query_parameters['$undeleteDescendants'] = self._serialize.query('undelete_descendants', undelete_descendants, 'bool') content = self._serialize.body(query_update, 'QueryHierarchyItem') response = self._send(http_method='PATCH', location_id='a67d190c-c41f-424b-814d-0e906f659301', version='5.1-preview.2', route_values=route_values, query_parameters=query_parameters, content=content) return self._deserialize('QueryHierarchyItem', response)
def set_status_return_level(self, srl_for_id, **kwargs): """ Sets status return level to the specified motors. """ convert = kwargs['convert'] if 'convert' in kwargs else self._convert if convert: srl_for_id = dict(zip(srl_for_id.keys(), [('never', 'read', 'always').index(s) for s in srl_for_id.values()])) self._set_status_return_level(srl_for_id, convert=False)
Sets status return level to the specified motors.
Below is the the instruction that describes the task: ### Input: Sets status return level to the specified motors. ### Response: def set_status_return_level(self, srl_for_id, **kwargs): """ Sets status return level to the specified motors. """ convert = kwargs['convert'] if 'convert' in kwargs else self._convert if convert: srl_for_id = dict(zip(srl_for_id.keys(), [('never', 'read', 'always').index(s) for s in srl_for_id.values()])) self._set_status_return_level(srl_for_id, convert=False)
def set_default_pos(self, defaultPos): """Set the default starting location of our character.""" self.coords = defaultPos self.velocity = r.Vector2() self.desired_position = defaultPos r.Ragnarok.get_world().Camera.pan = self.coords r.Ragnarok.get_world().Camera.desired_pan = self.coords
Set the default starting location of our character.
Below is the the instruction that describes the task: ### Input: Set the default starting location of our character. ### Response: def set_default_pos(self, defaultPos): """Set the default starting location of our character.""" self.coords = defaultPos self.velocity = r.Vector2() self.desired_position = defaultPos r.Ragnarok.get_world().Camera.pan = self.coords r.Ragnarok.get_world().Camera.desired_pan = self.coords
def content_children(self): """ A sequence containing the text-container child elements of this ``<a:p>`` element, i.e. (a:r|a:br|a:fld). """ text_types = {CT_RegularTextRun, CT_TextLineBreak, CT_TextField} return tuple(elm for elm in self if type(elm) in text_types)
A sequence containing the text-container child elements of this ``<a:p>`` element, i.e. (a:r|a:br|a:fld).
Below is the the instruction that describes the task: ### Input: A sequence containing the text-container child elements of this ``<a:p>`` element, i.e. (a:r|a:br|a:fld). ### Response: def content_children(self): """ A sequence containing the text-container child elements of this ``<a:p>`` element, i.e. (a:r|a:br|a:fld). """ text_types = {CT_RegularTextRun, CT_TextLineBreak, CT_TextField} return tuple(elm for elm in self if type(elm) in text_types)
def order_book(self, symbol, parameters=None): """ curl "https://api.bitfinex.com/v1/book/btcusd" {"bids":[{"price":"561.1101","amount":"0.985","timestamp":"1395557729.0"}],"asks":[{"price":"562.9999","amount":"0.985","timestamp":"1395557711.0"}]} The 'bids' and 'asks' arrays will have multiple bid and ask dicts. Optional parameters limit_bids (int): Optional. Limit the number of bids returned. May be 0 in which case the array of bids is empty. Default is 50. limit_asks (int): Optional. Limit the number of asks returned. May be 0 in which case the array of asks is empty. Default is 50. eg. curl "https://api.bitfinex.com/v1/book/btcusd?limit_bids=1&limit_asks=0" {"bids":[{"price":"561.1101","amount":"0.985","timestamp":"1395557729.0"}],"asks":[]} """ data = self._get(self.url_for(PATH_ORDERBOOK, path_arg=symbol, parameters=parameters)) for type_ in data.keys(): for list_ in data[type_]: for key, value in list_.items(): list_[key] = float(value) return data
curl "https://api.bitfinex.com/v1/book/btcusd" {"bids":[{"price":"561.1101","amount":"0.985","timestamp":"1395557729.0"}],"asks":[{"price":"562.9999","amount":"0.985","timestamp":"1395557711.0"}]} The 'bids' and 'asks' arrays will have multiple bid and ask dicts. Optional parameters limit_bids (int): Optional. Limit the number of bids returned. May be 0 in which case the array of bids is empty. Default is 50. limit_asks (int): Optional. Limit the number of asks returned. May be 0 in which case the array of asks is empty. Default is 50. eg. curl "https://api.bitfinex.com/v1/book/btcusd?limit_bids=1&limit_asks=0" {"bids":[{"price":"561.1101","amount":"0.985","timestamp":"1395557729.0"}],"asks":[]}
Below is the the instruction that describes the task: ### Input: curl "https://api.bitfinex.com/v1/book/btcusd" {"bids":[{"price":"561.1101","amount":"0.985","timestamp":"1395557729.0"}],"asks":[{"price":"562.9999","amount":"0.985","timestamp":"1395557711.0"}]} The 'bids' and 'asks' arrays will have multiple bid and ask dicts. Optional parameters limit_bids (int): Optional. Limit the number of bids returned. May be 0 in which case the array of bids is empty. Default is 50. limit_asks (int): Optional. Limit the number of asks returned. May be 0 in which case the array of asks is empty. Default is 50. eg. curl "https://api.bitfinex.com/v1/book/btcusd?limit_bids=1&limit_asks=0" {"bids":[{"price":"561.1101","amount":"0.985","timestamp":"1395557729.0"}],"asks":[]} ### Response: def order_book(self, symbol, parameters=None): """ curl "https://api.bitfinex.com/v1/book/btcusd" {"bids":[{"price":"561.1101","amount":"0.985","timestamp":"1395557729.0"}],"asks":[{"price":"562.9999","amount":"0.985","timestamp":"1395557711.0"}]} The 'bids' and 'asks' arrays will have multiple bid and ask dicts. Optional parameters limit_bids (int): Optional. Limit the number of bids returned. May be 0 in which case the array of bids is empty. Default is 50. limit_asks (int): Optional. Limit the number of asks returned. May be 0 in which case the array of asks is empty. Default is 50. eg. curl "https://api.bitfinex.com/v1/book/btcusd?limit_bids=1&limit_asks=0" {"bids":[{"price":"561.1101","amount":"0.985","timestamp":"1395557729.0"}],"asks":[]} """ data = self._get(self.url_for(PATH_ORDERBOOK, path_arg=symbol, parameters=parameters)) for type_ in data.keys(): for list_ in data[type_]: for key, value in list_.items(): list_[key] = float(value) return data
def format_authors(self, format='html5', deparagraph=True, mathjax=False, smart=True, extra_args=None): """Get the document authors in the specified markup format. Parameters ---------- format : `str`, optional Output format (such as ``'html5'`` or ``'plain'``). deparagraph : `bool`, optional Remove the paragraph tags from single paragraph content. mathjax : `bool`, optional Allow pandoc to use MathJax math markup. smart : `True`, optional Allow pandoc to create "smart" unicode punctuation. extra_args : `list`, optional Additional command line flags to pass to Pandoc. See `lsstprojectmeta.pandoc.convert.convert_text`. Returns ------- output_text : `list` of `str` Sequence of author names in the specified output markup format. """ formatted_authors = [] for latex_author in self.authors: formatted_author = convert_lsstdoc_tex( latex_author, format, deparagraph=deparagraph, mathjax=mathjax, smart=smart, extra_args=extra_args) # removes Pandoc's terminal newlines formatted_author = formatted_author.strip() formatted_authors.append(formatted_author) return formatted_authors
Get the document authors in the specified markup format. Parameters ---------- format : `str`, optional Output format (such as ``'html5'`` or ``'plain'``). deparagraph : `bool`, optional Remove the paragraph tags from single paragraph content. mathjax : `bool`, optional Allow pandoc to use MathJax math markup. smart : `True`, optional Allow pandoc to create "smart" unicode punctuation. extra_args : `list`, optional Additional command line flags to pass to Pandoc. See `lsstprojectmeta.pandoc.convert.convert_text`. Returns ------- output_text : `list` of `str` Sequence of author names in the specified output markup format.
Below is the the instruction that describes the task: ### Input: Get the document authors in the specified markup format. Parameters ---------- format : `str`, optional Output format (such as ``'html5'`` or ``'plain'``). deparagraph : `bool`, optional Remove the paragraph tags from single paragraph content. mathjax : `bool`, optional Allow pandoc to use MathJax math markup. smart : `True`, optional Allow pandoc to create "smart" unicode punctuation. extra_args : `list`, optional Additional command line flags to pass to Pandoc. See `lsstprojectmeta.pandoc.convert.convert_text`. Returns ------- output_text : `list` of `str` Sequence of author names in the specified output markup format. ### Response: def format_authors(self, format='html5', deparagraph=True, mathjax=False, smart=True, extra_args=None): """Get the document authors in the specified markup format. Parameters ---------- format : `str`, optional Output format (such as ``'html5'`` or ``'plain'``). deparagraph : `bool`, optional Remove the paragraph tags from single paragraph content. mathjax : `bool`, optional Allow pandoc to use MathJax math markup. smart : `True`, optional Allow pandoc to create "smart" unicode punctuation. extra_args : `list`, optional Additional command line flags to pass to Pandoc. See `lsstprojectmeta.pandoc.convert.convert_text`. Returns ------- output_text : `list` of `str` Sequence of author names in the specified output markup format. """ formatted_authors = [] for latex_author in self.authors: formatted_author = convert_lsstdoc_tex( latex_author, format, deparagraph=deparagraph, mathjax=mathjax, smart=smart, extra_args=extra_args) # removes Pandoc's terminal newlines formatted_author = formatted_author.strip() formatted_authors.append(formatted_author) return formatted_authors
def delete_vm(name, datacenter, placement=None, power_off=False, service_instance=None): ''' Deletes a virtual machine defined by name and placement name Name of the virtual machine datacenter Datacenter of the virtual machine placement Placement information of the virtual machine service_instance vCenter service instance for connection and configuration .. code-block:: bash salt '*' vsphere.delete_vm name=my_vm datacenter=my_datacenter ''' results = {} schema = ESXVirtualMachineDeleteSchema.serialize() try: jsonschema.validate({'name': name, 'datacenter': datacenter, 'placement': placement}, schema) except jsonschema.exceptions.ValidationError as exc: raise InvalidConfigError(exc) (results, vm_ref) = _remove_vm(name, datacenter, service_instance=service_instance, placement=placement, power_off=power_off) salt.utils.vmware.delete_vm(vm_ref) results['deleted_vm'] = True return results
Deletes a virtual machine defined by name and placement name Name of the virtual machine datacenter Datacenter of the virtual machine placement Placement information of the virtual machine service_instance vCenter service instance for connection and configuration .. code-block:: bash salt '*' vsphere.delete_vm name=my_vm datacenter=my_datacenter
Below is the the instruction that describes the task: ### Input: Deletes a virtual machine defined by name and placement name Name of the virtual machine datacenter Datacenter of the virtual machine placement Placement information of the virtual machine service_instance vCenter service instance for connection and configuration .. code-block:: bash salt '*' vsphere.delete_vm name=my_vm datacenter=my_datacenter ### Response: def delete_vm(name, datacenter, placement=None, power_off=False, service_instance=None): ''' Deletes a virtual machine defined by name and placement name Name of the virtual machine datacenter Datacenter of the virtual machine placement Placement information of the virtual machine service_instance vCenter service instance for connection and configuration .. code-block:: bash salt '*' vsphere.delete_vm name=my_vm datacenter=my_datacenter ''' results = {} schema = ESXVirtualMachineDeleteSchema.serialize() try: jsonschema.validate({'name': name, 'datacenter': datacenter, 'placement': placement}, schema) except jsonschema.exceptions.ValidationError as exc: raise InvalidConfigError(exc) (results, vm_ref) = _remove_vm(name, datacenter, service_instance=service_instance, placement=placement, power_off=power_off) salt.utils.vmware.delete_vm(vm_ref) results['deleted_vm'] = True return results
def edit(self, data_src, value): """ Edit data layer. :param data_src: Name of :class:`DataSource` to edit. :type data_src: str :param value: Values to edit. :type value: dict """ # check if opening file if 'filename' in value: items = [k for k, v in self.reg.data_source.iteritems() if v == data_src] self.reg.unregister(items) # remove items from Registry # open file and register new data self.open(data_src, value['filename'], value.get('path')) self.layer[data_src].update(value)
Edit data layer. :param data_src: Name of :class:`DataSource` to edit. :type data_src: str :param value: Values to edit. :type value: dict
Below is the the instruction that describes the task: ### Input: Edit data layer. :param data_src: Name of :class:`DataSource` to edit. :type data_src: str :param value: Values to edit. :type value: dict ### Response: def edit(self, data_src, value): """ Edit data layer. :param data_src: Name of :class:`DataSource` to edit. :type data_src: str :param value: Values to edit. :type value: dict """ # check if opening file if 'filename' in value: items = [k for k, v in self.reg.data_source.iteritems() if v == data_src] self.reg.unregister(items) # remove items from Registry # open file and register new data self.open(data_src, value['filename'], value.get('path')) self.layer[data_src].update(value)
def inclusions_validation(tree:BubbleTree) -> iter: """Yield message about inclusions inconsistancies""" # search for powernode overlapping for one, two in it.combinations(tree.inclusions, 2): assert len(one) == len(one.strip()) assert len(two) == len(two.strip()) one_inc = set(included(one, tree.inclusions)) two_inc = set(included(two, tree.inclusions)) common_inc = one_inc & two_inc if len(common_inc) == one_inc: if not two in one_inc: yield ("ERROR inconsistency in inclusions: {} is both" " included and not included in {}.".format(two, one)) if len(common_inc) == two_inc: if not one in two_inc: yield ("ERROR inconsistency in inclusions: {} is both" " included and not included in {}.".format(one, two)) if len(common_inc) > 0: # one and two are not disjoint if len(common_inc) == len(one_inc) or len(common_inc) == len(two_inc): # one is included in the other pass else: # problem: some nodes are shared, but not all yield ("ERROR overlapping powernodes:" " {} nodes are shared by {} and {}," " which are not in inclusion." " Shared nodes are {}".format( len(common_inc), one, two, common_inc)) for pwn in tree.powernodes(): # search for empty powernodes if len(tree.inclusions[pwn]) == 0: yield ("WARNING empty powernode: {} is defined," " but contains nothing".format(pwn)) # search for singleton powernodes if len(tree.inclusions[pwn]) == 1: yield ("WARNING singleton powernode: {} is defined," " but contains only {}".format(pwn, tree.inclusions[pwn])) # search for cycles nodes_in_cycles = utils.have_cycle(tree.inclusions) if nodes_in_cycles: yield ("ERROR inclusion cycle: the following {}" " nodes are involved: {}".format( len(nodes_in_cycles), set(nodes_in_cycles)))
Yield message about inclusions inconsistancies
Below is the the instruction that describes the task: ### Input: Yield message about inclusions inconsistancies ### Response: def inclusions_validation(tree:BubbleTree) -> iter: """Yield message about inclusions inconsistancies""" # search for powernode overlapping for one, two in it.combinations(tree.inclusions, 2): assert len(one) == len(one.strip()) assert len(two) == len(two.strip()) one_inc = set(included(one, tree.inclusions)) two_inc = set(included(two, tree.inclusions)) common_inc = one_inc & two_inc if len(common_inc) == one_inc: if not two in one_inc: yield ("ERROR inconsistency in inclusions: {} is both" " included and not included in {}.".format(two, one)) if len(common_inc) == two_inc: if not one in two_inc: yield ("ERROR inconsistency in inclusions: {} is both" " included and not included in {}.".format(one, two)) if len(common_inc) > 0: # one and two are not disjoint if len(common_inc) == len(one_inc) or len(common_inc) == len(two_inc): # one is included in the other pass else: # problem: some nodes are shared, but not all yield ("ERROR overlapping powernodes:" " {} nodes are shared by {} and {}," " which are not in inclusion." " Shared nodes are {}".format( len(common_inc), one, two, common_inc)) for pwn in tree.powernodes(): # search for empty powernodes if len(tree.inclusions[pwn]) == 0: yield ("WARNING empty powernode: {} is defined," " but contains nothing".format(pwn)) # search for singleton powernodes if len(tree.inclusions[pwn]) == 1: yield ("WARNING singleton powernode: {} is defined," " but contains only {}".format(pwn, tree.inclusions[pwn])) # search for cycles nodes_in_cycles = utils.have_cycle(tree.inclusions) if nodes_in_cycles: yield ("ERROR inclusion cycle: the following {}" " nodes are involved: {}".format( len(nodes_in_cycles), set(nodes_in_cycles)))
def patch(self, path=None, method='PATCH', **options): """ Equals :meth:`route` with a ``PATCH`` method parameter. """ return self.route(path, method, **options)
Equals :meth:`route` with a ``PATCH`` method parameter.
Below is the the instruction that describes the task: ### Input: Equals :meth:`route` with a ``PATCH`` method parameter. ### Response: def patch(self, path=None, method='PATCH', **options): """ Equals :meth:`route` with a ``PATCH`` method parameter. """ return self.route(path, method, **options)
def soft_threshold(x, threshold, name=None): """Soft Thresholding operator. This operator is defined by the equations ```none { x[i] - gamma, x[i] > gamma SoftThreshold(x, gamma)[i] = { 0, x[i] == gamma { x[i] + gamma, x[i] < -gamma ``` In the context of proximal gradient methods, we have ```none SoftThreshold(x, gamma) = prox_{gamma L1}(x) ``` where `prox` is the proximity operator. Thus the soft thresholding operator is used in proximal gradient descent for optimizing a smooth function with (non-smooth) L1 regularization, as outlined below. The proximity operator is defined as: ```none prox_r(x) = argmin{ r(z) + 0.5 ||x - z||_2**2 : z }, ``` where `r` is a (weakly) convex function, not necessarily differentiable. Because the L2 norm is strictly convex, the above argmin is unique. One important application of the proximity operator is as follows. Let `L` be a convex and differentiable function with Lipschitz-continuous gradient. Let `R` be a convex lower semicontinuous function which is possibly nondifferentiable. Let `gamma` be an arbitrary positive real. Then ```none x_star = argmin{ L(x) + R(x) : x } ``` if and only if the fixed-point equation is satisfied: ```none x_star = prox_{gamma R}(x_star - gamma grad L(x_star)) ``` Proximal gradient descent thus typically consists of choosing an initial value `x^{(0)}` and repeatedly applying the update ```none x^{(k+1)} = prox_{gamma^{(k)} R}(x^{(k)} - gamma^{(k)} grad L(x^{(k)})) ``` where `gamma` is allowed to vary from iteration to iteration. Specializing to the case where `R(x) = ||x||_1`, we minimize `L(x) + ||x||_1` by repeatedly applying the update ``` x^{(k+1)} = SoftThreshold(x - gamma grad L(x^{(k)}), gamma) ``` (This idea can also be extended to second-order approximations, although the multivariate case does not have a known closed form like above.) Args: x: `float` `Tensor` representing the input to the SoftThreshold function. threshold: nonnegative scalar, `float` `Tensor` representing the radius of the interval on which each coordinate of SoftThreshold takes the value zero. Denoted `gamma` above. name: Python string indicating the name of the TensorFlow operation. Default value: `'soft_threshold'`. Returns: softthreshold: `float` `Tensor` with the same shape and dtype as `x`, representing the value of the SoftThreshold function. #### References [1]: Yu, Yao-Liang. The Proximity Operator. https://www.cs.cmu.edu/~suvrit/teach/yaoliang_proximity.pdf [2]: Wikipedia Contributors. Proximal gradient methods for learning. _Wikipedia, The Free Encyclopedia_, 2018. https://en.wikipedia.org/wiki/Proximal_gradient_methods_for_learning """ # https://math.stackexchange.com/questions/471339/derivation-of-soft-thresholding-operator with tf.compat.v1.name_scope(name, 'soft_threshold', [x, threshold]): x = tf.convert_to_tensor(value=x, name='x') threshold = tf.convert_to_tensor( value=threshold, dtype=x.dtype, name='threshold') return tf.sign(x) * tf.maximum(tf.abs(x) - threshold, 0.)
Soft Thresholding operator. This operator is defined by the equations ```none { x[i] - gamma, x[i] > gamma SoftThreshold(x, gamma)[i] = { 0, x[i] == gamma { x[i] + gamma, x[i] < -gamma ``` In the context of proximal gradient methods, we have ```none SoftThreshold(x, gamma) = prox_{gamma L1}(x) ``` where `prox` is the proximity operator. Thus the soft thresholding operator is used in proximal gradient descent for optimizing a smooth function with (non-smooth) L1 regularization, as outlined below. The proximity operator is defined as: ```none prox_r(x) = argmin{ r(z) + 0.5 ||x - z||_2**2 : z }, ``` where `r` is a (weakly) convex function, not necessarily differentiable. Because the L2 norm is strictly convex, the above argmin is unique. One important application of the proximity operator is as follows. Let `L` be a convex and differentiable function with Lipschitz-continuous gradient. Let `R` be a convex lower semicontinuous function which is possibly nondifferentiable. Let `gamma` be an arbitrary positive real. Then ```none x_star = argmin{ L(x) + R(x) : x } ``` if and only if the fixed-point equation is satisfied: ```none x_star = prox_{gamma R}(x_star - gamma grad L(x_star)) ``` Proximal gradient descent thus typically consists of choosing an initial value `x^{(0)}` and repeatedly applying the update ```none x^{(k+1)} = prox_{gamma^{(k)} R}(x^{(k)} - gamma^{(k)} grad L(x^{(k)})) ``` where `gamma` is allowed to vary from iteration to iteration. Specializing to the case where `R(x) = ||x||_1`, we minimize `L(x) + ||x||_1` by repeatedly applying the update ``` x^{(k+1)} = SoftThreshold(x - gamma grad L(x^{(k)}), gamma) ``` (This idea can also be extended to second-order approximations, although the multivariate case does not have a known closed form like above.) Args: x: `float` `Tensor` representing the input to the SoftThreshold function. threshold: nonnegative scalar, `float` `Tensor` representing the radius of the interval on which each coordinate of SoftThreshold takes the value zero. Denoted `gamma` above. name: Python string indicating the name of the TensorFlow operation. Default value: `'soft_threshold'`. Returns: softthreshold: `float` `Tensor` with the same shape and dtype as `x`, representing the value of the SoftThreshold function. #### References [1]: Yu, Yao-Liang. The Proximity Operator. https://www.cs.cmu.edu/~suvrit/teach/yaoliang_proximity.pdf [2]: Wikipedia Contributors. Proximal gradient methods for learning. _Wikipedia, The Free Encyclopedia_, 2018. https://en.wikipedia.org/wiki/Proximal_gradient_methods_for_learning
Below is the the instruction that describes the task: ### Input: Soft Thresholding operator. This operator is defined by the equations ```none { x[i] - gamma, x[i] > gamma SoftThreshold(x, gamma)[i] = { 0, x[i] == gamma { x[i] + gamma, x[i] < -gamma ``` In the context of proximal gradient methods, we have ```none SoftThreshold(x, gamma) = prox_{gamma L1}(x) ``` where `prox` is the proximity operator. Thus the soft thresholding operator is used in proximal gradient descent for optimizing a smooth function with (non-smooth) L1 regularization, as outlined below. The proximity operator is defined as: ```none prox_r(x) = argmin{ r(z) + 0.5 ||x - z||_2**2 : z }, ``` where `r` is a (weakly) convex function, not necessarily differentiable. Because the L2 norm is strictly convex, the above argmin is unique. One important application of the proximity operator is as follows. Let `L` be a convex and differentiable function with Lipschitz-continuous gradient. Let `R` be a convex lower semicontinuous function which is possibly nondifferentiable. Let `gamma` be an arbitrary positive real. Then ```none x_star = argmin{ L(x) + R(x) : x } ``` if and only if the fixed-point equation is satisfied: ```none x_star = prox_{gamma R}(x_star - gamma grad L(x_star)) ``` Proximal gradient descent thus typically consists of choosing an initial value `x^{(0)}` and repeatedly applying the update ```none x^{(k+1)} = prox_{gamma^{(k)} R}(x^{(k)} - gamma^{(k)} grad L(x^{(k)})) ``` where `gamma` is allowed to vary from iteration to iteration. Specializing to the case where `R(x) = ||x||_1`, we minimize `L(x) + ||x||_1` by repeatedly applying the update ``` x^{(k+1)} = SoftThreshold(x - gamma grad L(x^{(k)}), gamma) ``` (This idea can also be extended to second-order approximations, although the multivariate case does not have a known closed form like above.) Args: x: `float` `Tensor` representing the input to the SoftThreshold function. threshold: nonnegative scalar, `float` `Tensor` representing the radius of the interval on which each coordinate of SoftThreshold takes the value zero. Denoted `gamma` above. name: Python string indicating the name of the TensorFlow operation. Default value: `'soft_threshold'`. Returns: softthreshold: `float` `Tensor` with the same shape and dtype as `x`, representing the value of the SoftThreshold function. #### References [1]: Yu, Yao-Liang. The Proximity Operator. https://www.cs.cmu.edu/~suvrit/teach/yaoliang_proximity.pdf [2]: Wikipedia Contributors. Proximal gradient methods for learning. _Wikipedia, The Free Encyclopedia_, 2018. https://en.wikipedia.org/wiki/Proximal_gradient_methods_for_learning ### Response: def soft_threshold(x, threshold, name=None): """Soft Thresholding operator. This operator is defined by the equations ```none { x[i] - gamma, x[i] > gamma SoftThreshold(x, gamma)[i] = { 0, x[i] == gamma { x[i] + gamma, x[i] < -gamma ``` In the context of proximal gradient methods, we have ```none SoftThreshold(x, gamma) = prox_{gamma L1}(x) ``` where `prox` is the proximity operator. Thus the soft thresholding operator is used in proximal gradient descent for optimizing a smooth function with (non-smooth) L1 regularization, as outlined below. The proximity operator is defined as: ```none prox_r(x) = argmin{ r(z) + 0.5 ||x - z||_2**2 : z }, ``` where `r` is a (weakly) convex function, not necessarily differentiable. Because the L2 norm is strictly convex, the above argmin is unique. One important application of the proximity operator is as follows. Let `L` be a convex and differentiable function with Lipschitz-continuous gradient. Let `R` be a convex lower semicontinuous function which is possibly nondifferentiable. Let `gamma` be an arbitrary positive real. Then ```none x_star = argmin{ L(x) + R(x) : x } ``` if and only if the fixed-point equation is satisfied: ```none x_star = prox_{gamma R}(x_star - gamma grad L(x_star)) ``` Proximal gradient descent thus typically consists of choosing an initial value `x^{(0)}` and repeatedly applying the update ```none x^{(k+1)} = prox_{gamma^{(k)} R}(x^{(k)} - gamma^{(k)} grad L(x^{(k)})) ``` where `gamma` is allowed to vary from iteration to iteration. Specializing to the case where `R(x) = ||x||_1`, we minimize `L(x) + ||x||_1` by repeatedly applying the update ``` x^{(k+1)} = SoftThreshold(x - gamma grad L(x^{(k)}), gamma) ``` (This idea can also be extended to second-order approximations, although the multivariate case does not have a known closed form like above.) Args: x: `float` `Tensor` representing the input to the SoftThreshold function. threshold: nonnegative scalar, `float` `Tensor` representing the radius of the interval on which each coordinate of SoftThreshold takes the value zero. Denoted `gamma` above. name: Python string indicating the name of the TensorFlow operation. Default value: `'soft_threshold'`. Returns: softthreshold: `float` `Tensor` with the same shape and dtype as `x`, representing the value of the SoftThreshold function. #### References [1]: Yu, Yao-Liang. The Proximity Operator. https://www.cs.cmu.edu/~suvrit/teach/yaoliang_proximity.pdf [2]: Wikipedia Contributors. Proximal gradient methods for learning. _Wikipedia, The Free Encyclopedia_, 2018. https://en.wikipedia.org/wiki/Proximal_gradient_methods_for_learning """ # https://math.stackexchange.com/questions/471339/derivation-of-soft-thresholding-operator with tf.compat.v1.name_scope(name, 'soft_threshold', [x, threshold]): x = tf.convert_to_tensor(value=x, name='x') threshold = tf.convert_to_tensor( value=threshold, dtype=x.dtype, name='threshold') return tf.sign(x) * tf.maximum(tf.abs(x) - threshold, 0.)
def start_runs( logdir, steps, run_name, thresholds, mask_every_other_prediction=False): """Generate a PR curve with precision and recall evenly weighted. Arguments: logdir: The directory into which to store all the runs' data. steps: The number of steps to run for. run_name: The name of the run. thresholds: The number of thresholds to use for PR curves. mask_every_other_prediction: Whether to mask every other prediction by alternating weights between 0 and 1. """ tf.compat.v1.reset_default_graph() tf.compat.v1.set_random_seed(42) # Create a normal distribution layer used to generate true color labels. distribution = tf.compat.v1.distributions.Normal(loc=0., scale=142.) # Sample the distribution to generate colors. Lets generate different numbers # of each color. The first dimension is the count of examples. # The calls to sample() are given fixed random seed values that are "magic" # in that they correspond to the default seeds for those ops when the PR # curve test (which depends on this code) was written. We've pinned these # instead of continuing to use the defaults since the defaults are based on # node IDs from the sequence of nodes added to the graph, which can silently # change when this code or any TF op implementations it uses are modified. # TODO(nickfelt): redo the PR curve test to avoid reliance on random seeds. # Generate reds. number_of_reds = 100 true_reds = tf.clip_by_value( tf.concat([ 255 - tf.abs(distribution.sample([number_of_reds, 1], seed=11)), tf.abs(distribution.sample([number_of_reds, 2], seed=34)) ], axis=1), 0, 255) # Generate greens. number_of_greens = 200 true_greens = tf.clip_by_value( tf.concat([ tf.abs(distribution.sample([number_of_greens, 1], seed=61)), 255 - tf.abs(distribution.sample([number_of_greens, 1], seed=82)), tf.abs(distribution.sample([number_of_greens, 1], seed=105)) ], axis=1), 0, 255) # Generate blues. number_of_blues = 150 true_blues = tf.clip_by_value( tf.concat([ tf.abs(distribution.sample([number_of_blues, 2], seed=132)), 255 - tf.abs(distribution.sample([number_of_blues, 1], seed=153)) ], axis=1), 0, 255) # Assign each color a vector of 3 booleans based on its true label. labels = tf.concat([ tf.tile(tf.constant([[True, False, False]]), (number_of_reds, 1)), tf.tile(tf.constant([[False, True, False]]), (number_of_greens, 1)), tf.tile(tf.constant([[False, False, True]]), (number_of_blues, 1)), ], axis=0) # We introduce 3 normal distributions. They are used to predict whether a # color falls under a certain class (based on distances from corners of the # color triangle). The distributions vary per color. We have the distributions # narrow over time. initial_standard_deviations = [v + FLAGS.steps for v in (158, 200, 242)] iteration = tf.compat.v1.placeholder(tf.int32, shape=[]) red_predictor = tf.compat.v1.distributions.Normal( loc=0., scale=tf.cast( initial_standard_deviations[0] - iteration, dtype=tf.float32)) green_predictor = tf.compat.v1.distributions.Normal( loc=0., scale=tf.cast( initial_standard_deviations[1] - iteration, dtype=tf.float32)) blue_predictor = tf.compat.v1.distributions.Normal( loc=0., scale=tf.cast( initial_standard_deviations[2] - iteration, dtype=tf.float32)) # Make predictions (assign 3 probabilities to each color based on each color's # distance to each of the 3 corners). We seek double the area in the right # tail of the normal distribution. examples = tf.concat([true_reds, true_greens, true_blues], axis=0) probabilities_colors_are_red = (1 - red_predictor.cdf( tf.norm(tensor=examples - tf.constant([255., 0, 0]), axis=1))) * 2 probabilities_colors_are_green = (1 - green_predictor.cdf( tf.norm(tensor=examples - tf.constant([0, 255., 0]), axis=1))) * 2 probabilities_colors_are_blue = (1 - blue_predictor.cdf( tf.norm(tensor=examples - tf.constant([0, 0, 255.]), axis=1))) * 2 predictions = ( probabilities_colors_are_red, probabilities_colors_are_green, probabilities_colors_are_blue ) # This is the crucial piece. We write data required for generating PR curves. # We create 1 summary per class because we create 1 PR curve per class. for i, color in enumerate(('red', 'green', 'blue')): description = ('The probabilities used to create this PR curve are ' 'generated from a normal distribution. Its standard ' 'deviation is initially %0.0f and decreases over time.' % initial_standard_deviations[i]) weights = None if mask_every_other_prediction: # Assign a weight of 0 to every even-indexed prediction. Odd-indexed # predictions are assigned a default weight of 1. consecutive_indices = tf.reshape( tf.range(tf.size(input=predictions[i])), tf.shape(input=predictions[i])) weights = tf.cast(consecutive_indices % 2, dtype=tf.float32) summary.op( name=color, labels=labels[:, i], predictions=predictions[i], num_thresholds=thresholds, weights=weights, display_name='classifying %s' % color, description=description) merged_summary_op = tf.compat.v1.summary.merge_all() events_directory = os.path.join(logdir, run_name) sess = tf.compat.v1.Session() writer = tf.compat.v1.summary.FileWriter(events_directory, sess.graph) for step in xrange(steps): feed_dict = { iteration: step, } merged_summary = sess.run(merged_summary_op, feed_dict=feed_dict) writer.add_summary(merged_summary, step) writer.close()
Generate a PR curve with precision and recall evenly weighted. Arguments: logdir: The directory into which to store all the runs' data. steps: The number of steps to run for. run_name: The name of the run. thresholds: The number of thresholds to use for PR curves. mask_every_other_prediction: Whether to mask every other prediction by alternating weights between 0 and 1.
Below is the the instruction that describes the task: ### Input: Generate a PR curve with precision and recall evenly weighted. Arguments: logdir: The directory into which to store all the runs' data. steps: The number of steps to run for. run_name: The name of the run. thresholds: The number of thresholds to use for PR curves. mask_every_other_prediction: Whether to mask every other prediction by alternating weights between 0 and 1. ### Response: def start_runs( logdir, steps, run_name, thresholds, mask_every_other_prediction=False): """Generate a PR curve with precision and recall evenly weighted. Arguments: logdir: The directory into which to store all the runs' data. steps: The number of steps to run for. run_name: The name of the run. thresholds: The number of thresholds to use for PR curves. mask_every_other_prediction: Whether to mask every other prediction by alternating weights between 0 and 1. """ tf.compat.v1.reset_default_graph() tf.compat.v1.set_random_seed(42) # Create a normal distribution layer used to generate true color labels. distribution = tf.compat.v1.distributions.Normal(loc=0., scale=142.) # Sample the distribution to generate colors. Lets generate different numbers # of each color. The first dimension is the count of examples. # The calls to sample() are given fixed random seed values that are "magic" # in that they correspond to the default seeds for those ops when the PR # curve test (which depends on this code) was written. We've pinned these # instead of continuing to use the defaults since the defaults are based on # node IDs from the sequence of nodes added to the graph, which can silently # change when this code or any TF op implementations it uses are modified. # TODO(nickfelt): redo the PR curve test to avoid reliance on random seeds. # Generate reds. number_of_reds = 100 true_reds = tf.clip_by_value( tf.concat([ 255 - tf.abs(distribution.sample([number_of_reds, 1], seed=11)), tf.abs(distribution.sample([number_of_reds, 2], seed=34)) ], axis=1), 0, 255) # Generate greens. number_of_greens = 200 true_greens = tf.clip_by_value( tf.concat([ tf.abs(distribution.sample([number_of_greens, 1], seed=61)), 255 - tf.abs(distribution.sample([number_of_greens, 1], seed=82)), tf.abs(distribution.sample([number_of_greens, 1], seed=105)) ], axis=1), 0, 255) # Generate blues. number_of_blues = 150 true_blues = tf.clip_by_value( tf.concat([ tf.abs(distribution.sample([number_of_blues, 2], seed=132)), 255 - tf.abs(distribution.sample([number_of_blues, 1], seed=153)) ], axis=1), 0, 255) # Assign each color a vector of 3 booleans based on its true label. labels = tf.concat([ tf.tile(tf.constant([[True, False, False]]), (number_of_reds, 1)), tf.tile(tf.constant([[False, True, False]]), (number_of_greens, 1)), tf.tile(tf.constant([[False, False, True]]), (number_of_blues, 1)), ], axis=0) # We introduce 3 normal distributions. They are used to predict whether a # color falls under a certain class (based on distances from corners of the # color triangle). The distributions vary per color. We have the distributions # narrow over time. initial_standard_deviations = [v + FLAGS.steps for v in (158, 200, 242)] iteration = tf.compat.v1.placeholder(tf.int32, shape=[]) red_predictor = tf.compat.v1.distributions.Normal( loc=0., scale=tf.cast( initial_standard_deviations[0] - iteration, dtype=tf.float32)) green_predictor = tf.compat.v1.distributions.Normal( loc=0., scale=tf.cast( initial_standard_deviations[1] - iteration, dtype=tf.float32)) blue_predictor = tf.compat.v1.distributions.Normal( loc=0., scale=tf.cast( initial_standard_deviations[2] - iteration, dtype=tf.float32)) # Make predictions (assign 3 probabilities to each color based on each color's # distance to each of the 3 corners). We seek double the area in the right # tail of the normal distribution. examples = tf.concat([true_reds, true_greens, true_blues], axis=0) probabilities_colors_are_red = (1 - red_predictor.cdf( tf.norm(tensor=examples - tf.constant([255., 0, 0]), axis=1))) * 2 probabilities_colors_are_green = (1 - green_predictor.cdf( tf.norm(tensor=examples - tf.constant([0, 255., 0]), axis=1))) * 2 probabilities_colors_are_blue = (1 - blue_predictor.cdf( tf.norm(tensor=examples - tf.constant([0, 0, 255.]), axis=1))) * 2 predictions = ( probabilities_colors_are_red, probabilities_colors_are_green, probabilities_colors_are_blue ) # This is the crucial piece. We write data required for generating PR curves. # We create 1 summary per class because we create 1 PR curve per class. for i, color in enumerate(('red', 'green', 'blue')): description = ('The probabilities used to create this PR curve are ' 'generated from a normal distribution. Its standard ' 'deviation is initially %0.0f and decreases over time.' % initial_standard_deviations[i]) weights = None if mask_every_other_prediction: # Assign a weight of 0 to every even-indexed prediction. Odd-indexed # predictions are assigned a default weight of 1. consecutive_indices = tf.reshape( tf.range(tf.size(input=predictions[i])), tf.shape(input=predictions[i])) weights = tf.cast(consecutive_indices % 2, dtype=tf.float32) summary.op( name=color, labels=labels[:, i], predictions=predictions[i], num_thresholds=thresholds, weights=weights, display_name='classifying %s' % color, description=description) merged_summary_op = tf.compat.v1.summary.merge_all() events_directory = os.path.join(logdir, run_name) sess = tf.compat.v1.Session() writer = tf.compat.v1.summary.FileWriter(events_directory, sess.graph) for step in xrange(steps): feed_dict = { iteration: step, } merged_summary = sess.run(merged_summary_op, feed_dict=feed_dict) writer.add_summary(merged_summary, step) writer.close()
def ReportConfiguration(self, f): """Report the boundary configuration details :param f: File (or standard out/err) :return: None """ if BoundaryCheck.chrom != -1: print >> f, BuildReportLine("CHROM", BoundaryCheck.chrom) if len(self.start_bounds) > 0: bounds = ",".join(["%s-%s" % (a[0], a[1]) for a in zip(self.start_bounds, self.end_bounds)]) print >> f, BuildReportLine("SNP BOUNDARY", bounds) if len(self.ignored_rs) > 0: print >> f, BuildReportLine("IGNORED RS", ",".join(self.ignored_rs)) if len(self.target_rs) > 0: print >> f, BuildReportLine("TARGET RS", ",".join(self.target_rs))
Report the boundary configuration details :param f: File (or standard out/err) :return: None
Below is the the instruction that describes the task: ### Input: Report the boundary configuration details :param f: File (or standard out/err) :return: None ### Response: def ReportConfiguration(self, f): """Report the boundary configuration details :param f: File (or standard out/err) :return: None """ if BoundaryCheck.chrom != -1: print >> f, BuildReportLine("CHROM", BoundaryCheck.chrom) if len(self.start_bounds) > 0: bounds = ",".join(["%s-%s" % (a[0], a[1]) for a in zip(self.start_bounds, self.end_bounds)]) print >> f, BuildReportLine("SNP BOUNDARY", bounds) if len(self.ignored_rs) > 0: print >> f, BuildReportLine("IGNORED RS", ",".join(self.ignored_rs)) if len(self.target_rs) > 0: print >> f, BuildReportLine("TARGET RS", ",".join(self.target_rs))
def create_top_level_index_entry(title, max_depth, subtitles): """Function for creating a text entry in index.rst for its content. :param title : Title for the content. :type title: str :param max_depth : Value for max_depth in the top level index content. :type max_depth: int :param subtitles : list of subtitles that is available. :type subtitles: list :return: A text for the content of top level index. :rtype: str """ return_text = title + '\n' dash = '-' * len(title) + '\n' return_text += dash + '\n' return_text += '.. toctree::' + '\n' return_text += ' :maxdepth: ' + str(max_depth) + '\n\n' for subtitle in subtitles: return_text += ' ' + subtitle + '\n\n' return return_text
Function for creating a text entry in index.rst for its content. :param title : Title for the content. :type title: str :param max_depth : Value for max_depth in the top level index content. :type max_depth: int :param subtitles : list of subtitles that is available. :type subtitles: list :return: A text for the content of top level index. :rtype: str
Below is the the instruction that describes the task: ### Input: Function for creating a text entry in index.rst for its content. :param title : Title for the content. :type title: str :param max_depth : Value for max_depth in the top level index content. :type max_depth: int :param subtitles : list of subtitles that is available. :type subtitles: list :return: A text for the content of top level index. :rtype: str ### Response: def create_top_level_index_entry(title, max_depth, subtitles): """Function for creating a text entry in index.rst for its content. :param title : Title for the content. :type title: str :param max_depth : Value for max_depth in the top level index content. :type max_depth: int :param subtitles : list of subtitles that is available. :type subtitles: list :return: A text for the content of top level index. :rtype: str """ return_text = title + '\n' dash = '-' * len(title) + '\n' return_text += dash + '\n' return_text += '.. toctree::' + '\n' return_text += ' :maxdepth: ' + str(max_depth) + '\n\n' for subtitle in subtitles: return_text += ' ' + subtitle + '\n\n' return return_text
def pattern_logic_aeidon(): """Return patterns to be used for searching subtitles via aeidon.""" if Config.options.pattern_files: return prep_patterns(Config.options.pattern_files) elif Config.options.regex: return Config.REGEX else: return Config.TERMS
Return patterns to be used for searching subtitles via aeidon.
Below is the the instruction that describes the task: ### Input: Return patterns to be used for searching subtitles via aeidon. ### Response: def pattern_logic_aeidon(): """Return patterns to be used for searching subtitles via aeidon.""" if Config.options.pattern_files: return prep_patterns(Config.options.pattern_files) elif Config.options.regex: return Config.REGEX else: return Config.TERMS
def getAnnIds(self, imgIds=[], catIds=[], areaRng=[], iscrowd=None): """ Get ann ids that satisfy given filter conditions. default skips that filter :param imgIds (int array) : get anns for given imgs catIds (int array) : get anns for given cats areaRng (float array) : get anns for given area range (e.g. [0 inf]) iscrowd (boolean) : get anns for given crowd label (False or True) :return: ids (int array) : integer array of ann ids """ imgIds = imgIds if _isArrayLike(imgIds) else [imgIds] catIds = catIds if _isArrayLike(catIds) else [catIds] if len(imgIds) == len(catIds) == len(areaRng) == 0: anns = self.dataset['annotations'] else: if not len(imgIds) == 0: lists = [self.imgToAnns[imgId] for imgId in imgIds if imgId in self.imgToAnns] anns = list(itertools.chain.from_iterable(lists)) else: anns = self.dataset['annotations'] anns = anns if len(catIds) == 0 else [ann for ann in anns if ann['category_id'] in catIds] anns = anns if len(areaRng) == 0 else [ann for ann in anns if ann['area'] > areaRng[0] and ann['area'] < areaRng[1]] if not iscrowd == None: ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd] else: ids = [ann['id'] for ann in anns] return ids
Get ann ids that satisfy given filter conditions. default skips that filter :param imgIds (int array) : get anns for given imgs catIds (int array) : get anns for given cats areaRng (float array) : get anns for given area range (e.g. [0 inf]) iscrowd (boolean) : get anns for given crowd label (False or True) :return: ids (int array) : integer array of ann ids
Below is the the instruction that describes the task: ### Input: Get ann ids that satisfy given filter conditions. default skips that filter :param imgIds (int array) : get anns for given imgs catIds (int array) : get anns for given cats areaRng (float array) : get anns for given area range (e.g. [0 inf]) iscrowd (boolean) : get anns for given crowd label (False or True) :return: ids (int array) : integer array of ann ids ### Response: def getAnnIds(self, imgIds=[], catIds=[], areaRng=[], iscrowd=None): """ Get ann ids that satisfy given filter conditions. default skips that filter :param imgIds (int array) : get anns for given imgs catIds (int array) : get anns for given cats areaRng (float array) : get anns for given area range (e.g. [0 inf]) iscrowd (boolean) : get anns for given crowd label (False or True) :return: ids (int array) : integer array of ann ids """ imgIds = imgIds if _isArrayLike(imgIds) else [imgIds] catIds = catIds if _isArrayLike(catIds) else [catIds] if len(imgIds) == len(catIds) == len(areaRng) == 0: anns = self.dataset['annotations'] else: if not len(imgIds) == 0: lists = [self.imgToAnns[imgId] for imgId in imgIds if imgId in self.imgToAnns] anns = list(itertools.chain.from_iterable(lists)) else: anns = self.dataset['annotations'] anns = anns if len(catIds) == 0 else [ann for ann in anns if ann['category_id'] in catIds] anns = anns if len(areaRng) == 0 else [ann for ann in anns if ann['area'] > areaRng[0] and ann['area'] < areaRng[1]] if not iscrowd == None: ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd] else: ids = [ann['id'] for ann in anns] return ids
def get(self): """ Get a JSON-ready representation of this Ganalytics. :returns: This Ganalytics, ready for use in a request body. :rtype: dict """ keys = ["enable", "utm_source", "utm_medium", "utm_term", "utm_content", "utm_campaign"] ganalytics = {} for key in keys: value = getattr(self, key, None) if value is not None: if isinstance(value, bool): ganalytics[key] = value else: ganalytics[key] = value.get() return ganalytics
Get a JSON-ready representation of this Ganalytics. :returns: This Ganalytics, ready for use in a request body. :rtype: dict
Below is the the instruction that describes the task: ### Input: Get a JSON-ready representation of this Ganalytics. :returns: This Ganalytics, ready for use in a request body. :rtype: dict ### Response: def get(self): """ Get a JSON-ready representation of this Ganalytics. :returns: This Ganalytics, ready for use in a request body. :rtype: dict """ keys = ["enable", "utm_source", "utm_medium", "utm_term", "utm_content", "utm_campaign"] ganalytics = {} for key in keys: value = getattr(self, key, None) if value is not None: if isinstance(value, bool): ganalytics[key] = value else: ganalytics[key] = value.get() return ganalytics
def evaluate_accuracy(data_iterator, network): """ Measure the accuracy of ResNet Parameters ---------- data_iterator: Iter examples of dataset network: ResNet Returns ---------- tuple of array element """ acc = mx.metric.Accuracy() # Iterate through data and label for i, (data, label) in enumerate(data_iterator): # Get the data and label into the GPU data = data.as_in_context(ctx[0]) label = label.as_in_context(ctx[0]) # Get network's output which is a probability distribution # Apply argmax on the probability distribution to get network's classification. output = network(data) predictions = nd.argmax(output, axis=1) # Give network's prediction and the correct label to update the metric acc.update(preds=predictions, labels=label) # Return the accuracy return acc.get()[1]
Measure the accuracy of ResNet Parameters ---------- data_iterator: Iter examples of dataset network: ResNet Returns ---------- tuple of array element
Below is the the instruction that describes the task: ### Input: Measure the accuracy of ResNet Parameters ---------- data_iterator: Iter examples of dataset network: ResNet Returns ---------- tuple of array element ### Response: def evaluate_accuracy(data_iterator, network): """ Measure the accuracy of ResNet Parameters ---------- data_iterator: Iter examples of dataset network: ResNet Returns ---------- tuple of array element """ acc = mx.metric.Accuracy() # Iterate through data and label for i, (data, label) in enumerate(data_iterator): # Get the data and label into the GPU data = data.as_in_context(ctx[0]) label = label.as_in_context(ctx[0]) # Get network's output which is a probability distribution # Apply argmax on the probability distribution to get network's classification. output = network(data) predictions = nd.argmax(output, axis=1) # Give network's prediction and the correct label to update the metric acc.update(preds=predictions, labels=label) # Return the accuracy return acc.get()[1]
def get_version(self, dependency): """Return the installed version parsing the output of 'pip show'.""" logger.debug("getting installed version for %s", dependency) stdout = helpers.logged_exec([self.pip_exe, "show", str(dependency)]) version = [line for line in stdout if line.startswith('Version:')] if len(version) == 1: version = version[0].strip().split()[1] logger.debug("Installed version of %s is: %s", dependency, version) return version else: logger.error('Fades is having problems getting the installed version. ' 'Run with -v or check the logs for details') return ''
Return the installed version parsing the output of 'pip show'.
Below is the the instruction that describes the task: ### Input: Return the installed version parsing the output of 'pip show'. ### Response: def get_version(self, dependency): """Return the installed version parsing the output of 'pip show'.""" logger.debug("getting installed version for %s", dependency) stdout = helpers.logged_exec([self.pip_exe, "show", str(dependency)]) version = [line for line in stdout if line.startswith('Version:')] if len(version) == 1: version = version[0].strip().split()[1] logger.debug("Installed version of %s is: %s", dependency, version) return version else: logger.error('Fades is having problems getting the installed version. ' 'Run with -v or check the logs for details') return ''
def id_to_word(self, word_id): """Returns the word string of an integer word id.""" if word_id >= len(self.reverse_vocab): return self.reverse_vocab[self.unk_id] else: return self.reverse_vocab[word_id]
Returns the word string of an integer word id.
Below is the the instruction that describes the task: ### Input: Returns the word string of an integer word id. ### Response: def id_to_word(self, word_id): """Returns the word string of an integer word id.""" if word_id >= len(self.reverse_vocab): return self.reverse_vocab[self.unk_id] else: return self.reverse_vocab[word_id]
def set_prefix(self, prefix): """ Set the prefix for the node (see Leaf class). DEPRECATED; use the prefix property directly. """ warnings.warn("set_prefix() is deprecated; use the prefix property", DeprecationWarning, stacklevel=2) self.prefix = prefix
Set the prefix for the node (see Leaf class). DEPRECATED; use the prefix property directly.
Below is the the instruction that describes the task: ### Input: Set the prefix for the node (see Leaf class). DEPRECATED; use the prefix property directly. ### Response: def set_prefix(self, prefix): """ Set the prefix for the node (see Leaf class). DEPRECATED; use the prefix property directly. """ warnings.warn("set_prefix() is deprecated; use the prefix property", DeprecationWarning, stacklevel=2) self.prefix = prefix
def extract(self, extractor: Extractor, extractable: Extractable = None, tokenizer: Tokenizer = None, joiner: str = " ", **options) -> List[Extraction]: """ Invoke the extractor on the given extractable, accumulating all the extractions in a list. Args: extractor (Extractor): extractable (extractable): tokenizer: user can pass custom tokenizer if extractor wants token joiner: user can pass joiner if extractor wants text options: user can pass arguments as a dict to the extract() function of different extractors Returns: List of Extraction, containing all the extractions. """ if not extractable: extractable = self if not tokenizer: tokenizer = self.etk.default_tokenizer extracted_results = list() if extractor.input_type == InputType.TOKENS: if self.etk.error_policy == ErrorPolicy.PROCESS: if isinstance(extractable.value, list): self.etk.log( "Extractor needs tokens, tokenizer needs string to tokenize, got list, converting to string", "warning", self.doc_id, self.url) warnings.warn( "Extractor needs tokens, tokenizer needs string to tokenize, got list, converting to string") elif isinstance(extractable.value, dict): self.etk.log( "Extractor needs tokens, tokenizer needs string to tokenize, got dict, converting to string", "warning", self.doc_id, self.url) warnings.warn( "Extractor needs tokens, tokenizer needs string to tokenize, got dict, converting to string") tokens = extractable.get_tokens(tokenizer) if tokens: extracted_results = extractor.extract(tokens, **options) else: raise ExtractorValueError( "Extractor needs string, tokenizer needs string to tokenize, got " + str(type(extractable.value))) elif extractor.input_type == InputType.TEXT: if self.etk.error_policy == ErrorPolicy.PROCESS: if isinstance(extractable.value, list): self.etk.log("Extractor needs string, got extractable value as list, converting to string", "warning", self.doc_id, self.url) warnings.warn("Extractor needs string, got extractable value as list, converting to string") elif isinstance(extractable.value, dict): self.etk.log("Extractor needs string, got extractable value as dict, converting to string", "warning", self.doc_id, self.url) warnings.warn("Extractor needs string, got extractable value as dict, converting to string") text = extractable.get_string(joiner) if text: extracted_results = extractor.extract(text, **options) else: # raise ExtractorValueError("Extractor needs string, got " + str(type(extractable.value))) # TODO: Yixiang - needs to be handled properly pass elif extractor.input_type == InputType.OBJECT: extracted_results = extractor.extract(extractable.value, **options) elif extractor.input_type == InputType.HTML: if bool(BeautifulSoup(extractable.value, "html.parser").find()): extracted_results = extractor.extract(extractable.value, **options) else: # raise ExtractorValueError("Extractor needs HTML, got non HTML string") # TODO: Yixiang - needs to be handled properly pass try: jsonPath = extractable.full_path except AttributeError: jsonPath = None for e in extracted_results: # for the purpose of provenance hierarrchy tracking, a parent's id for next generation. e.prov_id = self.provenance_id_index extraction_provenance_record: ExtractionProvenanceRecord = ExtractionProvenanceRecord( e.prov_id, jsonPath, e.provenance["extractor_name"], e.provenance["start_char"], e.provenance["end_char"], e.provenance["confidence"], self, extractable.prov_id) self._provenances[e.prov_id] = extraction_provenance_record # for the purpose of provenance hierarchy tracking self.provenance_id_index_incrementer() self.create_provenance(extraction_provenance_record) return extracted_results
Invoke the extractor on the given extractable, accumulating all the extractions in a list. Args: extractor (Extractor): extractable (extractable): tokenizer: user can pass custom tokenizer if extractor wants token joiner: user can pass joiner if extractor wants text options: user can pass arguments as a dict to the extract() function of different extractors Returns: List of Extraction, containing all the extractions.
Below is the the instruction that describes the task: ### Input: Invoke the extractor on the given extractable, accumulating all the extractions in a list. Args: extractor (Extractor): extractable (extractable): tokenizer: user can pass custom tokenizer if extractor wants token joiner: user can pass joiner if extractor wants text options: user can pass arguments as a dict to the extract() function of different extractors Returns: List of Extraction, containing all the extractions. ### Response: def extract(self, extractor: Extractor, extractable: Extractable = None, tokenizer: Tokenizer = None, joiner: str = " ", **options) -> List[Extraction]: """ Invoke the extractor on the given extractable, accumulating all the extractions in a list. Args: extractor (Extractor): extractable (extractable): tokenizer: user can pass custom tokenizer if extractor wants token joiner: user can pass joiner if extractor wants text options: user can pass arguments as a dict to the extract() function of different extractors Returns: List of Extraction, containing all the extractions. """ if not extractable: extractable = self if not tokenizer: tokenizer = self.etk.default_tokenizer extracted_results = list() if extractor.input_type == InputType.TOKENS: if self.etk.error_policy == ErrorPolicy.PROCESS: if isinstance(extractable.value, list): self.etk.log( "Extractor needs tokens, tokenizer needs string to tokenize, got list, converting to string", "warning", self.doc_id, self.url) warnings.warn( "Extractor needs tokens, tokenizer needs string to tokenize, got list, converting to string") elif isinstance(extractable.value, dict): self.etk.log( "Extractor needs tokens, tokenizer needs string to tokenize, got dict, converting to string", "warning", self.doc_id, self.url) warnings.warn( "Extractor needs tokens, tokenizer needs string to tokenize, got dict, converting to string") tokens = extractable.get_tokens(tokenizer) if tokens: extracted_results = extractor.extract(tokens, **options) else: raise ExtractorValueError( "Extractor needs string, tokenizer needs string to tokenize, got " + str(type(extractable.value))) elif extractor.input_type == InputType.TEXT: if self.etk.error_policy == ErrorPolicy.PROCESS: if isinstance(extractable.value, list): self.etk.log("Extractor needs string, got extractable value as list, converting to string", "warning", self.doc_id, self.url) warnings.warn("Extractor needs string, got extractable value as list, converting to string") elif isinstance(extractable.value, dict): self.etk.log("Extractor needs string, got extractable value as dict, converting to string", "warning", self.doc_id, self.url) warnings.warn("Extractor needs string, got extractable value as dict, converting to string") text = extractable.get_string(joiner) if text: extracted_results = extractor.extract(text, **options) else: # raise ExtractorValueError("Extractor needs string, got " + str(type(extractable.value))) # TODO: Yixiang - needs to be handled properly pass elif extractor.input_type == InputType.OBJECT: extracted_results = extractor.extract(extractable.value, **options) elif extractor.input_type == InputType.HTML: if bool(BeautifulSoup(extractable.value, "html.parser").find()): extracted_results = extractor.extract(extractable.value, **options) else: # raise ExtractorValueError("Extractor needs HTML, got non HTML string") # TODO: Yixiang - needs to be handled properly pass try: jsonPath = extractable.full_path except AttributeError: jsonPath = None for e in extracted_results: # for the purpose of provenance hierarrchy tracking, a parent's id for next generation. e.prov_id = self.provenance_id_index extraction_provenance_record: ExtractionProvenanceRecord = ExtractionProvenanceRecord( e.prov_id, jsonPath, e.provenance["extractor_name"], e.provenance["start_char"], e.provenance["end_char"], e.provenance["confidence"], self, extractable.prov_id) self._provenances[e.prov_id] = extraction_provenance_record # for the purpose of provenance hierarchy tracking self.provenance_id_index_incrementer() self.create_provenance(extraction_provenance_record) return extracted_results
def unseen_videos_reset(self): """Reset the unseen videos counter.""" url = RESET_CAM_ENDPOINT.format(self.unique_id) ret = self._session.query(url).get('success') return ret
Reset the unseen videos counter.
Below is the the instruction that describes the task: ### Input: Reset the unseen videos counter. ### Response: def unseen_videos_reset(self): """Reset the unseen videos counter.""" url = RESET_CAM_ENDPOINT.format(self.unique_id) ret = self._session.query(url).get('success') return ret
async def query_firmware(self): """Query the firmware versions.""" _version = await self.request.get(join_path(self._base_path, "/fwversion")) _fw = _version.get("firmware") if _fw: _main = _fw.get("mainProcessor") if _main: self._main_processor_version = self._make_version(_main) _radio = _fw.get("radio") if _radio: self._radio_version = self._make_version(_radio)
Query the firmware versions.
Below is the the instruction that describes the task: ### Input: Query the firmware versions. ### Response: async def query_firmware(self): """Query the firmware versions.""" _version = await self.request.get(join_path(self._base_path, "/fwversion")) _fw = _version.get("firmware") if _fw: _main = _fw.get("mainProcessor") if _main: self._main_processor_version = self._make_version(_main) _radio = _fw.get("radio") if _radio: self._radio_version = self._make_version(_radio)
def pair_has_contradiction(graph: BELGraph, u: BaseEntity, v: BaseEntity) -> bool: """Check if a pair of nodes has any contradictions in their causal relationships. Assumes both nodes are in the graph. """ relations = {data[RELATION] for data in graph[u][v].values()} return relation_set_has_contradictions(relations)
Check if a pair of nodes has any contradictions in their causal relationships. Assumes both nodes are in the graph.
Below is the the instruction that describes the task: ### Input: Check if a pair of nodes has any contradictions in their causal relationships. Assumes both nodes are in the graph. ### Response: def pair_has_contradiction(graph: BELGraph, u: BaseEntity, v: BaseEntity) -> bool: """Check if a pair of nodes has any contradictions in their causal relationships. Assumes both nodes are in the graph. """ relations = {data[RELATION] for data in graph[u][v].values()} return relation_set_has_contradictions(relations)
def _parse_comma_list(self): """Parse a comma seperated list.""" if self._cur_token['type'] not in self._literals: raise Exception( "Parser failed, _parse_comma_list was called on non-literal" " {} on line {}.".format( repr(self._cur_token['value']), self._cur_token['line'] ) ) array = [] while self._cur_token['type'] in self._literals and not self._finished: array.append(self._cur_token['value']) self._increment() self._skip_whitespace() if self._cur_token['type'] is TT.comma: self._increment() self._skip_whitespace() elif ( not self._finished and self._cur_token['type'] not in (TT.ws, TT.lbreak) ): raise ParseError('comma or newline', self._cur_token) return array
Parse a comma seperated list.
Below is the the instruction that describes the task: ### Input: Parse a comma seperated list. ### Response: def _parse_comma_list(self): """Parse a comma seperated list.""" if self._cur_token['type'] not in self._literals: raise Exception( "Parser failed, _parse_comma_list was called on non-literal" " {} on line {}.".format( repr(self._cur_token['value']), self._cur_token['line'] ) ) array = [] while self._cur_token['type'] in self._literals and not self._finished: array.append(self._cur_token['value']) self._increment() self._skip_whitespace() if self._cur_token['type'] is TT.comma: self._increment() self._skip_whitespace() elif ( not self._finished and self._cur_token['type'] not in (TT.ws, TT.lbreak) ): raise ParseError('comma or newline', self._cur_token) return array
def _aspect_preserving_resize(image, resize_min): """Resize images preserving the original aspect ratio. Args: image: A 3-D image `Tensor`. resize_min: A python integer or scalar `Tensor` indicating the size of the smallest side after resize. Returns: resized_image: A 3-D tensor containing the resized image. """ mlperf_log.resnet_print(key=mlperf_log.INPUT_RESIZE_ASPECT_PRESERVING, value={"min": resize_min}) shape = tf.shape(image) height, width = shape[0], shape[1] new_height, new_width = _smallest_size_at_least(height, width, resize_min) return _resize_image(image, new_height, new_width)
Resize images preserving the original aspect ratio. Args: image: A 3-D image `Tensor`. resize_min: A python integer or scalar `Tensor` indicating the size of the smallest side after resize. Returns: resized_image: A 3-D tensor containing the resized image.
Below is the the instruction that describes the task: ### Input: Resize images preserving the original aspect ratio. Args: image: A 3-D image `Tensor`. resize_min: A python integer or scalar `Tensor` indicating the size of the smallest side after resize. Returns: resized_image: A 3-D tensor containing the resized image. ### Response: def _aspect_preserving_resize(image, resize_min): """Resize images preserving the original aspect ratio. Args: image: A 3-D image `Tensor`. resize_min: A python integer or scalar `Tensor` indicating the size of the smallest side after resize. Returns: resized_image: A 3-D tensor containing the resized image. """ mlperf_log.resnet_print(key=mlperf_log.INPUT_RESIZE_ASPECT_PRESERVING, value={"min": resize_min}) shape = tf.shape(image) height, width = shape[0], shape[1] new_height, new_width = _smallest_size_at_least(height, width, resize_min) return _resize_image(image, new_height, new_width)
def resolve_font(name): """Turns font names into absolute filenames This is case sensitive. The extension should be omitted. For example:: >>> path = resolve_font('NotoSans-Bold') >>> fontdir = os.path.join(os.path.dirname(__file__), 'fonts') >>> noto_path = os.path.join(fontdir, 'NotoSans-Bold.ttf') >>> noto_path = os.path.abspath(noto_path) >>> assert path == noto_path Absolute paths are allowed:: >>> resolve_font(noto_path) == noto_path True Raises :exc:`FontNotFound` on failure:: >>> try: ... resolve_font('blahahaha') ... assert False ... except FontNotFound: ... pass """ if os.path.exists(name): return os.path.abspath(name) fonts = get_font_files() if name in fonts: return fonts[name] raise FontNotFound("Can't find %r :'( Try adding it to ~/.fonts" % name)
Turns font names into absolute filenames This is case sensitive. The extension should be omitted. For example:: >>> path = resolve_font('NotoSans-Bold') >>> fontdir = os.path.join(os.path.dirname(__file__), 'fonts') >>> noto_path = os.path.join(fontdir, 'NotoSans-Bold.ttf') >>> noto_path = os.path.abspath(noto_path) >>> assert path == noto_path Absolute paths are allowed:: >>> resolve_font(noto_path) == noto_path True Raises :exc:`FontNotFound` on failure:: >>> try: ... resolve_font('blahahaha') ... assert False ... except FontNotFound: ... pass
Below is the the instruction that describes the task: ### Input: Turns font names into absolute filenames This is case sensitive. The extension should be omitted. For example:: >>> path = resolve_font('NotoSans-Bold') >>> fontdir = os.path.join(os.path.dirname(__file__), 'fonts') >>> noto_path = os.path.join(fontdir, 'NotoSans-Bold.ttf') >>> noto_path = os.path.abspath(noto_path) >>> assert path == noto_path Absolute paths are allowed:: >>> resolve_font(noto_path) == noto_path True Raises :exc:`FontNotFound` on failure:: >>> try: ... resolve_font('blahahaha') ... assert False ... except FontNotFound: ... pass ### Response: def resolve_font(name): """Turns font names into absolute filenames This is case sensitive. The extension should be omitted. For example:: >>> path = resolve_font('NotoSans-Bold') >>> fontdir = os.path.join(os.path.dirname(__file__), 'fonts') >>> noto_path = os.path.join(fontdir, 'NotoSans-Bold.ttf') >>> noto_path = os.path.abspath(noto_path) >>> assert path == noto_path Absolute paths are allowed:: >>> resolve_font(noto_path) == noto_path True Raises :exc:`FontNotFound` on failure:: >>> try: ... resolve_font('blahahaha') ... assert False ... except FontNotFound: ... pass """ if os.path.exists(name): return os.path.abspath(name) fonts = get_font_files() if name in fonts: return fonts[name] raise FontNotFound("Can't find %r :'( Try adding it to ~/.fonts" % name)
def get_sort_function(order): """ Returns a callable similar to the built-in `cmp`, to be used on objects. Takes a list of dictionaries. In each, 'key' must be a string that is used to get an attribute of the objects to compare, and 'reverse' must be a boolean indicating whether the result should be reversed. """ stable = tuple((d['key'], -1 if d['reverse'] else 1) for d in order) def sort_function(a, b): for name, direction in stable: v = cmp(getattr(a, name) if a else a, getattr(b, name) if b else b) if v != 0: return v * direction return 0 return sort_function
Returns a callable similar to the built-in `cmp`, to be used on objects. Takes a list of dictionaries. In each, 'key' must be a string that is used to get an attribute of the objects to compare, and 'reverse' must be a boolean indicating whether the result should be reversed.
Below is the the instruction that describes the task: ### Input: Returns a callable similar to the built-in `cmp`, to be used on objects. Takes a list of dictionaries. In each, 'key' must be a string that is used to get an attribute of the objects to compare, and 'reverse' must be a boolean indicating whether the result should be reversed. ### Response: def get_sort_function(order): """ Returns a callable similar to the built-in `cmp`, to be used on objects. Takes a list of dictionaries. In each, 'key' must be a string that is used to get an attribute of the objects to compare, and 'reverse' must be a boolean indicating whether the result should be reversed. """ stable = tuple((d['key'], -1 if d['reverse'] else 1) for d in order) def sort_function(a, b): for name, direction in stable: v = cmp(getattr(a, name) if a else a, getattr(b, name) if b else b) if v != 0: return v * direction return 0 return sort_function
def _remove_live_points(self): """Remove the final set of live points if they were previously added to the current set of dead points.""" if self.added_live: self.added_live = False if self.save_samples: del self.saved_id[-self.nlive:] del self.saved_u[-self.nlive:] del self.saved_v[-self.nlive:] del self.saved_logl[-self.nlive:] del self.saved_logvol[-self.nlive:] del self.saved_logwt[-self.nlive:] del self.saved_logz[-self.nlive:] del self.saved_logzvar[-self.nlive:] del self.saved_h[-self.nlive:] del self.saved_nc[-self.nlive:] del self.saved_boundidx[-self.nlive:] del self.saved_it[-self.nlive:] del self.saved_bounditer[-self.nlive:] del self.saved_scale[-self.nlive:] else: raise ValueError("No live points were added to the " "list of samples!")
Remove the final set of live points if they were previously added to the current set of dead points.
Below is the the instruction that describes the task: ### Input: Remove the final set of live points if they were previously added to the current set of dead points. ### Response: def _remove_live_points(self): """Remove the final set of live points if they were previously added to the current set of dead points.""" if self.added_live: self.added_live = False if self.save_samples: del self.saved_id[-self.nlive:] del self.saved_u[-self.nlive:] del self.saved_v[-self.nlive:] del self.saved_logl[-self.nlive:] del self.saved_logvol[-self.nlive:] del self.saved_logwt[-self.nlive:] del self.saved_logz[-self.nlive:] del self.saved_logzvar[-self.nlive:] del self.saved_h[-self.nlive:] del self.saved_nc[-self.nlive:] del self.saved_boundidx[-self.nlive:] del self.saved_it[-self.nlive:] del self.saved_bounditer[-self.nlive:] del self.saved_scale[-self.nlive:] else: raise ValueError("No live points were added to the " "list of samples!")
def get_all_subs_satellites_by_type(self, sat_type, realms): """Get all satellites of the wanted type in this realm recursively :param sat_type: satellite type wanted (scheduler, poller ..) :type sat_type: :param realms: all realms :type realms: list of realm object :return: list of satellite in this realm :rtype: list """ res = copy.copy(getattr(self, sat_type)) for member in self.all_sub_members: res.extend(realms[member].get_all_subs_satellites_by_type(sat_type, realms)) return res
Get all satellites of the wanted type in this realm recursively :param sat_type: satellite type wanted (scheduler, poller ..) :type sat_type: :param realms: all realms :type realms: list of realm object :return: list of satellite in this realm :rtype: list
Below is the the instruction that describes the task: ### Input: Get all satellites of the wanted type in this realm recursively :param sat_type: satellite type wanted (scheduler, poller ..) :type sat_type: :param realms: all realms :type realms: list of realm object :return: list of satellite in this realm :rtype: list ### Response: def get_all_subs_satellites_by_type(self, sat_type, realms): """Get all satellites of the wanted type in this realm recursively :param sat_type: satellite type wanted (scheduler, poller ..) :type sat_type: :param realms: all realms :type realms: list of realm object :return: list of satellite in this realm :rtype: list """ res = copy.copy(getattr(self, sat_type)) for member in self.all_sub_members: res.extend(realms[member].get_all_subs_satellites_by_type(sat_type, realms)) return res
def prt_txt(prt, data_nts, prtfmt=None, nt_fields=None, **kws): """Print list of namedtuples into a table using prtfmt.""" lines = get_lines(data_nts, prtfmt, nt_fields, **kws) if lines: for line in lines: prt.write(line) else: sys.stdout.write(" 0 items. NOT WRITING\n")
Print list of namedtuples into a table using prtfmt.
Below is the the instruction that describes the task: ### Input: Print list of namedtuples into a table using prtfmt. ### Response: def prt_txt(prt, data_nts, prtfmt=None, nt_fields=None, **kws): """Print list of namedtuples into a table using prtfmt.""" lines = get_lines(data_nts, prtfmt, nt_fields, **kws) if lines: for line in lines: prt.write(line) else: sys.stdout.write(" 0 items. NOT WRITING\n")
def get_led_register_from_name(self, name): """Parse the name for led number :param name: attribute name, like: led_1 """ res = re.match('^led_([0-9]{1,2})$', name) if res is None: raise AttributeError("Unknown attribute: '%s'" % name) led_num = int(res.group(1)) if led_num < 0 or led_num > 15: raise AttributeError("Unknown attribute: '%s'" % name) return self.calc_led_register(led_num)
Parse the name for led number :param name: attribute name, like: led_1
Below is the the instruction that describes the task: ### Input: Parse the name for led number :param name: attribute name, like: led_1 ### Response: def get_led_register_from_name(self, name): """Parse the name for led number :param name: attribute name, like: led_1 """ res = re.match('^led_([0-9]{1,2})$', name) if res is None: raise AttributeError("Unknown attribute: '%s'" % name) led_num = int(res.group(1)) if led_num < 0 or led_num > 15: raise AttributeError("Unknown attribute: '%s'" % name) return self.calc_led_register(led_num)
def _to_ctfile(self): """Convert :class:`~ctfile.ctfile.CTfile` into `CTfile` formatted string. :return: ``CTfile`` formatted string. :rtype: :py:class:`str`. """ output = io.StringIO() for key in self: if key == 'HeaderBlock': for line in self[key].values(): output.write(line) output.write('\n') elif key == 'Ctab': ctab_str = self[key]._to_ctfile() output.write(ctab_str) else: raise KeyError('Molfile object does not supposed to have any other information: "{}".'.format(key)) return output.getvalue()
Convert :class:`~ctfile.ctfile.CTfile` into `CTfile` formatted string. :return: ``CTfile`` formatted string. :rtype: :py:class:`str`.
Below is the the instruction that describes the task: ### Input: Convert :class:`~ctfile.ctfile.CTfile` into `CTfile` formatted string. :return: ``CTfile`` formatted string. :rtype: :py:class:`str`. ### Response: def _to_ctfile(self): """Convert :class:`~ctfile.ctfile.CTfile` into `CTfile` formatted string. :return: ``CTfile`` formatted string. :rtype: :py:class:`str`. """ output = io.StringIO() for key in self: if key == 'HeaderBlock': for line in self[key].values(): output.write(line) output.write('\n') elif key == 'Ctab': ctab_str = self[key]._to_ctfile() output.write(ctab_str) else: raise KeyError('Molfile object does not supposed to have any other information: "{}".'.format(key)) return output.getvalue()
def _validate_channel_definition(self, jp2h, colr): """Validate the channel definition box.""" cdef_lst = [j for (j, box) in enumerate(jp2h.box) if box.box_id == 'cdef'] if len(cdef_lst) > 1: msg = ("Only one channel definition box is allowed in the " "JP2 header.") raise IOError(msg) elif len(cdef_lst) == 1: cdef = jp2h.box[cdef_lst[0]] if colr.colorspace == core.SRGB: if any([chan + 1 not in cdef.association or cdef.channel_type[chan] != 0 for chan in [0, 1, 2]]): msg = ("All color channels must be defined in the " "channel definition box.") raise IOError(msg) elif colr.colorspace == core.GREYSCALE: if 0 not in cdef.channel_type: msg = ("All color channels must be defined in the " "channel definition box.") raise IOError(msg)
Validate the channel definition box.
Below is the the instruction that describes the task: ### Input: Validate the channel definition box. ### Response: def _validate_channel_definition(self, jp2h, colr): """Validate the channel definition box.""" cdef_lst = [j for (j, box) in enumerate(jp2h.box) if box.box_id == 'cdef'] if len(cdef_lst) > 1: msg = ("Only one channel definition box is allowed in the " "JP2 header.") raise IOError(msg) elif len(cdef_lst) == 1: cdef = jp2h.box[cdef_lst[0]] if colr.colorspace == core.SRGB: if any([chan + 1 not in cdef.association or cdef.channel_type[chan] != 0 for chan in [0, 1, 2]]): msg = ("All color channels must be defined in the " "channel definition box.") raise IOError(msg) elif colr.colorspace == core.GREYSCALE: if 0 not in cdef.channel_type: msg = ("All color channels must be defined in the " "channel definition box.") raise IOError(msg)
def _execute_with_retries(conn, function, **kwargs): ''' Retry if we're rate limited by AWS or blocked by another call. Give up and return error message if resource not found or argument is invalid. conn The connection established by the calling method via _get_conn() function The function to call on conn. i.e. create_stream **kwargs Any kwargs required by the above function, with their keywords i.e. StreamName=stream_name Returns: The result dict with the HTTP response and JSON data if applicable as 'result', or an error as 'error' CLI example:: salt myminion boto_kinesis._execute_with_retries existing_conn function_name function_kwargs ''' r = {} max_attempts = 18 max_retry_delay = 10 for attempt in range(max_attempts): log.info("attempt: %s function: %s", attempt, function) try: fn = getattr(conn, function) r['result'] = fn(**kwargs) return r except botocore.exceptions.ClientError as e: error_code = e.response['Error']['Code'] if "LimitExceededException" in error_code or "ResourceInUseException" in error_code: # could be rate limited by AWS or another command is blocking, # retry with exponential backoff log.debug("Retrying due to AWS exception", exc_info=True) time.sleep(_jittered_backoff(attempt, max_retry_delay)) else: # ResourceNotFoundException or InvalidArgumentException r['error'] = e.response['Error'] log.error(r['error']) r['result'] = None return r r['error'] = "Tried to execute function {0} {1} times, but was unable".format(function, max_attempts) log.error(r['error']) return r
Retry if we're rate limited by AWS or blocked by another call. Give up and return error message if resource not found or argument is invalid. conn The connection established by the calling method via _get_conn() function The function to call on conn. i.e. create_stream **kwargs Any kwargs required by the above function, with their keywords i.e. StreamName=stream_name Returns: The result dict with the HTTP response and JSON data if applicable as 'result', or an error as 'error' CLI example:: salt myminion boto_kinesis._execute_with_retries existing_conn function_name function_kwargs
Below is the the instruction that describes the task: ### Input: Retry if we're rate limited by AWS or blocked by another call. Give up and return error message if resource not found or argument is invalid. conn The connection established by the calling method via _get_conn() function The function to call on conn. i.e. create_stream **kwargs Any kwargs required by the above function, with their keywords i.e. StreamName=stream_name Returns: The result dict with the HTTP response and JSON data if applicable as 'result', or an error as 'error' CLI example:: salt myminion boto_kinesis._execute_with_retries existing_conn function_name function_kwargs ### Response: def _execute_with_retries(conn, function, **kwargs): ''' Retry if we're rate limited by AWS or blocked by another call. Give up and return error message if resource not found or argument is invalid. conn The connection established by the calling method via _get_conn() function The function to call on conn. i.e. create_stream **kwargs Any kwargs required by the above function, with their keywords i.e. StreamName=stream_name Returns: The result dict with the HTTP response and JSON data if applicable as 'result', or an error as 'error' CLI example:: salt myminion boto_kinesis._execute_with_retries existing_conn function_name function_kwargs ''' r = {} max_attempts = 18 max_retry_delay = 10 for attempt in range(max_attempts): log.info("attempt: %s function: %s", attempt, function) try: fn = getattr(conn, function) r['result'] = fn(**kwargs) return r except botocore.exceptions.ClientError as e: error_code = e.response['Error']['Code'] if "LimitExceededException" in error_code or "ResourceInUseException" in error_code: # could be rate limited by AWS or another command is blocking, # retry with exponential backoff log.debug("Retrying due to AWS exception", exc_info=True) time.sleep(_jittered_backoff(attempt, max_retry_delay)) else: # ResourceNotFoundException or InvalidArgumentException r['error'] = e.response['Error'] log.error(r['error']) r['result'] = None return r r['error'] = "Tried to execute function {0} {1} times, but was unable".format(function, max_attempts) log.error(r['error']) return r
def revoke_vouchers(self, vid_encoded=None, uid_from=None, uid_to=None, gid=None, valid_after=None, valid_before=None, last=None, first=None): """ REVOKES/INVALIDATES a filtered list of vouchers. :type vid_encoded: ``alphanumeric(64)`` :param vid_encoded: Voucher ID, as a string with CRC. :type uid_from: ``bigint`` :param uid_from: Filter by source account UID. :type uid_to: ``bigint`` :param uid_to: Filter by destination account UID. :type gid: ``alphanumeric(32)`` :param gid: Filter by voucher Group ID. GID is localized to `uid_from`. :type valid_after: ``datetime``/``dict`` :param valid_after: Voucher has to be valid after this timestamp. Absolute (``datetime``) or relative (``dict``) timestamps are accepted. Valid keys for relative timestamp dictionary are same as keyword arguments for `datetime.timedelta` (``days``, ``seconds``, ``minutes``, ``hours``, ``weeks``). :type valid_before: ``datetime``/``dict`` :param valid_before: Voucher was valid until this timestamp (for format, see the `valid_after` above). :type last: ``bigint`` :param last: The number of newest vouchers (that satisfy all other criteria) to return. :type first: ``bigint`` :param first: The number of oldest vouchers (that satisfy all other criteria) to return. :note: As with `get_vouchers`, filters are restrictive, narrowing down the set of vouchers, which initially includes complete voucher collection. That means, in turn, that a naive empty-handed `revoke_vouchers()` call shall revoke **all** un-used vouchers (both valid and expired)! :rtype: ``dict`` :returns: A dictionary of successfully revoked vouchers, i.e. a map ``vid_encoded``: ``refund_transfer_id`` for all successfully revoked vouchers. :raises GeneralException: :resource: ``vouchers[/<vid_encoded>][/from=<uid_from>][/to=<uid_to>]`` ``[/valid_after=<valid_after>][/valid_before=<valid_before>]`` ``[/last=<last>][/first=<first>]`` :access: authorized users (ACL flag: ``voucher.revoke``) """ resource = self.kvpath( 'vouchers', ('ident', vid_encoded), **{ 'from': ('int', uid_from), 'to': ('int', uid_to), 'gid': ('ident', gid), 'valid_after': ('isobasic', absdatetime(valid_after)), 'valid_before': ('isobasic', absdatetime(valid_before)), 'first': ('int', first), 'last': ('int', last) } ) return self.request('delete', resource, json.dumps({'revoke': True}))
REVOKES/INVALIDATES a filtered list of vouchers. :type vid_encoded: ``alphanumeric(64)`` :param vid_encoded: Voucher ID, as a string with CRC. :type uid_from: ``bigint`` :param uid_from: Filter by source account UID. :type uid_to: ``bigint`` :param uid_to: Filter by destination account UID. :type gid: ``alphanumeric(32)`` :param gid: Filter by voucher Group ID. GID is localized to `uid_from`. :type valid_after: ``datetime``/``dict`` :param valid_after: Voucher has to be valid after this timestamp. Absolute (``datetime``) or relative (``dict``) timestamps are accepted. Valid keys for relative timestamp dictionary are same as keyword arguments for `datetime.timedelta` (``days``, ``seconds``, ``minutes``, ``hours``, ``weeks``). :type valid_before: ``datetime``/``dict`` :param valid_before: Voucher was valid until this timestamp (for format, see the `valid_after` above). :type last: ``bigint`` :param last: The number of newest vouchers (that satisfy all other criteria) to return. :type first: ``bigint`` :param first: The number of oldest vouchers (that satisfy all other criteria) to return. :note: As with `get_vouchers`, filters are restrictive, narrowing down the set of vouchers, which initially includes complete voucher collection. That means, in turn, that a naive empty-handed `revoke_vouchers()` call shall revoke **all** un-used vouchers (both valid and expired)! :rtype: ``dict`` :returns: A dictionary of successfully revoked vouchers, i.e. a map ``vid_encoded``: ``refund_transfer_id`` for all successfully revoked vouchers. :raises GeneralException: :resource: ``vouchers[/<vid_encoded>][/from=<uid_from>][/to=<uid_to>]`` ``[/valid_after=<valid_after>][/valid_before=<valid_before>]`` ``[/last=<last>][/first=<first>]`` :access: authorized users (ACL flag: ``voucher.revoke``)
Below is the the instruction that describes the task: ### Input: REVOKES/INVALIDATES a filtered list of vouchers. :type vid_encoded: ``alphanumeric(64)`` :param vid_encoded: Voucher ID, as a string with CRC. :type uid_from: ``bigint`` :param uid_from: Filter by source account UID. :type uid_to: ``bigint`` :param uid_to: Filter by destination account UID. :type gid: ``alphanumeric(32)`` :param gid: Filter by voucher Group ID. GID is localized to `uid_from`. :type valid_after: ``datetime``/``dict`` :param valid_after: Voucher has to be valid after this timestamp. Absolute (``datetime``) or relative (``dict``) timestamps are accepted. Valid keys for relative timestamp dictionary are same as keyword arguments for `datetime.timedelta` (``days``, ``seconds``, ``minutes``, ``hours``, ``weeks``). :type valid_before: ``datetime``/``dict`` :param valid_before: Voucher was valid until this timestamp (for format, see the `valid_after` above). :type last: ``bigint`` :param last: The number of newest vouchers (that satisfy all other criteria) to return. :type first: ``bigint`` :param first: The number of oldest vouchers (that satisfy all other criteria) to return. :note: As with `get_vouchers`, filters are restrictive, narrowing down the set of vouchers, which initially includes complete voucher collection. That means, in turn, that a naive empty-handed `revoke_vouchers()` call shall revoke **all** un-used vouchers (both valid and expired)! :rtype: ``dict`` :returns: A dictionary of successfully revoked vouchers, i.e. a map ``vid_encoded``: ``refund_transfer_id`` for all successfully revoked vouchers. :raises GeneralException: :resource: ``vouchers[/<vid_encoded>][/from=<uid_from>][/to=<uid_to>]`` ``[/valid_after=<valid_after>][/valid_before=<valid_before>]`` ``[/last=<last>][/first=<first>]`` :access: authorized users (ACL flag: ``voucher.revoke``) ### Response: def revoke_vouchers(self, vid_encoded=None, uid_from=None, uid_to=None, gid=None, valid_after=None, valid_before=None, last=None, first=None): """ REVOKES/INVALIDATES a filtered list of vouchers. :type vid_encoded: ``alphanumeric(64)`` :param vid_encoded: Voucher ID, as a string with CRC. :type uid_from: ``bigint`` :param uid_from: Filter by source account UID. :type uid_to: ``bigint`` :param uid_to: Filter by destination account UID. :type gid: ``alphanumeric(32)`` :param gid: Filter by voucher Group ID. GID is localized to `uid_from`. :type valid_after: ``datetime``/``dict`` :param valid_after: Voucher has to be valid after this timestamp. Absolute (``datetime``) or relative (``dict``) timestamps are accepted. Valid keys for relative timestamp dictionary are same as keyword arguments for `datetime.timedelta` (``days``, ``seconds``, ``minutes``, ``hours``, ``weeks``). :type valid_before: ``datetime``/``dict`` :param valid_before: Voucher was valid until this timestamp (for format, see the `valid_after` above). :type last: ``bigint`` :param last: The number of newest vouchers (that satisfy all other criteria) to return. :type first: ``bigint`` :param first: The number of oldest vouchers (that satisfy all other criteria) to return. :note: As with `get_vouchers`, filters are restrictive, narrowing down the set of vouchers, which initially includes complete voucher collection. That means, in turn, that a naive empty-handed `revoke_vouchers()` call shall revoke **all** un-used vouchers (both valid and expired)! :rtype: ``dict`` :returns: A dictionary of successfully revoked vouchers, i.e. a map ``vid_encoded``: ``refund_transfer_id`` for all successfully revoked vouchers. :raises GeneralException: :resource: ``vouchers[/<vid_encoded>][/from=<uid_from>][/to=<uid_to>]`` ``[/valid_after=<valid_after>][/valid_before=<valid_before>]`` ``[/last=<last>][/first=<first>]`` :access: authorized users (ACL flag: ``voucher.revoke``) """ resource = self.kvpath( 'vouchers', ('ident', vid_encoded), **{ 'from': ('int', uid_from), 'to': ('int', uid_to), 'gid': ('ident', gid), 'valid_after': ('isobasic', absdatetime(valid_after)), 'valid_before': ('isobasic', absdatetime(valid_before)), 'first': ('int', first), 'last': ('int', last) } ) return self.request('delete', resource, json.dumps({'revoke': True}))
def call_at(self, when, callback, *args, **kwargs): """Runs the ``callback`` at the absolute time designated by ``when``. ``when`` must be a number using the same reference point as `IOLoop.time`. Returns an opaque handle that may be passed to `remove_timeout` to cancel. Note that unlike the `asyncio` method of the same name, the returned object does not have a ``cancel()`` method. See `add_timeout` for comments on thread-safety and subclassing. .. versionadded:: 4.0 """ return self.add_timeout(when, callback, *args, **kwargs)
Runs the ``callback`` at the absolute time designated by ``when``. ``when`` must be a number using the same reference point as `IOLoop.time`. Returns an opaque handle that may be passed to `remove_timeout` to cancel. Note that unlike the `asyncio` method of the same name, the returned object does not have a ``cancel()`` method. See `add_timeout` for comments on thread-safety and subclassing. .. versionadded:: 4.0
Below is the the instruction that describes the task: ### Input: Runs the ``callback`` at the absolute time designated by ``when``. ``when`` must be a number using the same reference point as `IOLoop.time`. Returns an opaque handle that may be passed to `remove_timeout` to cancel. Note that unlike the `asyncio` method of the same name, the returned object does not have a ``cancel()`` method. See `add_timeout` for comments on thread-safety and subclassing. .. versionadded:: 4.0 ### Response: def call_at(self, when, callback, *args, **kwargs): """Runs the ``callback`` at the absolute time designated by ``when``. ``when`` must be a number using the same reference point as `IOLoop.time`. Returns an opaque handle that may be passed to `remove_timeout` to cancel. Note that unlike the `asyncio` method of the same name, the returned object does not have a ``cancel()`` method. See `add_timeout` for comments on thread-safety and subclassing. .. versionadded:: 4.0 """ return self.add_timeout(when, callback, *args, **kwargs)
def colRegex(self, colName): """ Selects column based on the column name specified as a regex and returns it as :class:`Column`. :param colName: string, column name specified as a regex. >>> df = spark.createDataFrame([("a", 1), ("b", 2), ("c", 3)], ["Col1", "Col2"]) >>> df.select(df.colRegex("`(Col1)?+.+`")).show() +----+ |Col2| +----+ | 1| | 2| | 3| +----+ """ if not isinstance(colName, basestring): raise ValueError("colName should be provided as string") jc = self._jdf.colRegex(colName) return Column(jc)
Selects column based on the column name specified as a regex and returns it as :class:`Column`. :param colName: string, column name specified as a regex. >>> df = spark.createDataFrame([("a", 1), ("b", 2), ("c", 3)], ["Col1", "Col2"]) >>> df.select(df.colRegex("`(Col1)?+.+`")).show() +----+ |Col2| +----+ | 1| | 2| | 3| +----+
Below is the the instruction that describes the task: ### Input: Selects column based on the column name specified as a regex and returns it as :class:`Column`. :param colName: string, column name specified as a regex. >>> df = spark.createDataFrame([("a", 1), ("b", 2), ("c", 3)], ["Col1", "Col2"]) >>> df.select(df.colRegex("`(Col1)?+.+`")).show() +----+ |Col2| +----+ | 1| | 2| | 3| +----+ ### Response: def colRegex(self, colName): """ Selects column based on the column name specified as a regex and returns it as :class:`Column`. :param colName: string, column name specified as a regex. >>> df = spark.createDataFrame([("a", 1), ("b", 2), ("c", 3)], ["Col1", "Col2"]) >>> df.select(df.colRegex("`(Col1)?+.+`")).show() +----+ |Col2| +----+ | 1| | 2| | 3| +----+ """ if not isinstance(colName, basestring): raise ValueError("colName should be provided as string") jc = self._jdf.colRegex(colName) return Column(jc)
def floordiv(self, other, axis="columns", level=None, fill_value=None): """Divides this DataFrame against another DataFrame/Series/scalar. Args: other: The object to use to apply the divide against this. axis: The axis to divide over. level: The Multilevel index level to apply divide over. fill_value: The value to fill NaNs with. Returns: A new DataFrame with the Divide applied. """ return self._binary_op( "floordiv", other, axis=axis, level=level, fill_value=fill_value )
Divides this DataFrame against another DataFrame/Series/scalar. Args: other: The object to use to apply the divide against this. axis: The axis to divide over. level: The Multilevel index level to apply divide over. fill_value: The value to fill NaNs with. Returns: A new DataFrame with the Divide applied.
Below is the the instruction that describes the task: ### Input: Divides this DataFrame against another DataFrame/Series/scalar. Args: other: The object to use to apply the divide against this. axis: The axis to divide over. level: The Multilevel index level to apply divide over. fill_value: The value to fill NaNs with. Returns: A new DataFrame with the Divide applied. ### Response: def floordiv(self, other, axis="columns", level=None, fill_value=None): """Divides this DataFrame against another DataFrame/Series/scalar. Args: other: The object to use to apply the divide against this. axis: The axis to divide over. level: The Multilevel index level to apply divide over. fill_value: The value to fill NaNs with. Returns: A new DataFrame with the Divide applied. """ return self._binary_op( "floordiv", other, axis=axis, level=level, fill_value=fill_value )
def get_spider_list(self, project_name, version=None): """ Get the list of spiders available in the last (unless overridden) version of some project. :param project_name: the project name :param version: the version of the project to examine :return: a dictionary that spider name list example: {"status": "ok", "spiders": ["spider1", "spider2", "spider3"]} """ url, method = self.command_set['listspiders'][0], self.command_set['listspiders'][1] data = {} data['project'] = project_name if version is not None: data['_version'] = version response = http_utils.request(url, method_type=method, data=data, return_type=http_utils.RETURN_JSON) if response is None: logging.warning('%s failure: not found or connection fail' % sys._getframe().f_code.co_name) response = SpiderList().__dict__ return response
Get the list of spiders available in the last (unless overridden) version of some project. :param project_name: the project name :param version: the version of the project to examine :return: a dictionary that spider name list example: {"status": "ok", "spiders": ["spider1", "spider2", "spider3"]}
Below is the the instruction that describes the task: ### Input: Get the list of spiders available in the last (unless overridden) version of some project. :param project_name: the project name :param version: the version of the project to examine :return: a dictionary that spider name list example: {"status": "ok", "spiders": ["spider1", "spider2", "spider3"]} ### Response: def get_spider_list(self, project_name, version=None): """ Get the list of spiders available in the last (unless overridden) version of some project. :param project_name: the project name :param version: the version of the project to examine :return: a dictionary that spider name list example: {"status": "ok", "spiders": ["spider1", "spider2", "spider3"]} """ url, method = self.command_set['listspiders'][0], self.command_set['listspiders'][1] data = {} data['project'] = project_name if version is not None: data['_version'] = version response = http_utils.request(url, method_type=method, data=data, return_type=http_utils.RETURN_JSON) if response is None: logging.warning('%s failure: not found or connection fail' % sys._getframe().f_code.co_name) response = SpiderList().__dict__ return response
def load_dataframe(self, df_loader_name): """ Instead of joining a DataFrameJoiner with the Cohort in `as_dataframe`, sometimes we may want to just directly load a particular DataFrame. """ logger.debug("loading dataframe: {}".format(df_loader_name)) # Get the DataFrameLoader object corresponding to this name. df_loaders = [df_loader for df_loader in self.df_loaders if df_loader.name == df_loader_name] if len(df_loaders) == 0: raise ValueError("No DataFrameLoader with name %s" % df_loader_name) if len(df_loaders) > 1: raise ValueError("Multiple DataFrameLoaders with name %s" % df_loader_name) return df_loaders[0].load_dataframe()
Instead of joining a DataFrameJoiner with the Cohort in `as_dataframe`, sometimes we may want to just directly load a particular DataFrame.
Below is the the instruction that describes the task: ### Input: Instead of joining a DataFrameJoiner with the Cohort in `as_dataframe`, sometimes we may want to just directly load a particular DataFrame. ### Response: def load_dataframe(self, df_loader_name): """ Instead of joining a DataFrameJoiner with the Cohort in `as_dataframe`, sometimes we may want to just directly load a particular DataFrame. """ logger.debug("loading dataframe: {}".format(df_loader_name)) # Get the DataFrameLoader object corresponding to this name. df_loaders = [df_loader for df_loader in self.df_loaders if df_loader.name == df_loader_name] if len(df_loaders) == 0: raise ValueError("No DataFrameLoader with name %s" % df_loader_name) if len(df_loaders) > 1: raise ValueError("Multiple DataFrameLoaders with name %s" % df_loader_name) return df_loaders[0].load_dataframe()
def get_dtype_kinds(l): """ Parameters ---------- l : list of arrays Returns ------- a set of kinds that exist in this list of arrays """ typs = set() for arr in l: dtype = arr.dtype if is_categorical_dtype(dtype): typ = 'category' elif is_sparse(arr): typ = 'sparse' elif isinstance(arr, ABCRangeIndex): typ = 'range' elif is_datetime64tz_dtype(arr): # if to_concat contains different tz, # the result must be object dtype typ = str(arr.dtype) elif is_datetime64_dtype(dtype): typ = 'datetime' elif is_timedelta64_dtype(dtype): typ = 'timedelta' elif is_object_dtype(dtype): typ = 'object' elif is_bool_dtype(dtype): typ = 'bool' elif is_extension_array_dtype(dtype): typ = str(arr.dtype) else: typ = dtype.kind typs.add(typ) return typs
Parameters ---------- l : list of arrays Returns ------- a set of kinds that exist in this list of arrays
Below is the the instruction that describes the task: ### Input: Parameters ---------- l : list of arrays Returns ------- a set of kinds that exist in this list of arrays ### Response: def get_dtype_kinds(l): """ Parameters ---------- l : list of arrays Returns ------- a set of kinds that exist in this list of arrays """ typs = set() for arr in l: dtype = arr.dtype if is_categorical_dtype(dtype): typ = 'category' elif is_sparse(arr): typ = 'sparse' elif isinstance(arr, ABCRangeIndex): typ = 'range' elif is_datetime64tz_dtype(arr): # if to_concat contains different tz, # the result must be object dtype typ = str(arr.dtype) elif is_datetime64_dtype(dtype): typ = 'datetime' elif is_timedelta64_dtype(dtype): typ = 'timedelta' elif is_object_dtype(dtype): typ = 'object' elif is_bool_dtype(dtype): typ = 'bool' elif is_extension_array_dtype(dtype): typ = str(arr.dtype) else: typ = dtype.kind typs.add(typ) return typs
def xloss(logits, labels, ignore=None): """ Cross entropy loss """ return F.cross_entropy(logits, Variable(labels), ignore_index=255)
Cross entropy loss
Below is the the instruction that describes the task: ### Input: Cross entropy loss ### Response: def xloss(logits, labels, ignore=None): """ Cross entropy loss """ return F.cross_entropy(logits, Variable(labels), ignore_index=255)
def trade_sell(self, market=None, order_type=None, quantity=None, rate=None, time_in_effect=None, condition_type=None, target=0.0): """ Enter a sell order into the book Endpoint 1.1 NO EQUIVALENT -- see sell_market or sell_limit 2.0 /key/market/tradesell :param market: String literal for the market (ex: BTC-LTC) :type market: str :param order_type: ORDERTYPE_LIMIT = 'LIMIT' or ORDERTYPE_MARKET = 'MARKET' :type order_type: str :param quantity: The amount to purchase :type quantity: float :param rate: The rate at which to place the order. This is not needed for market orders :type rate: float :param time_in_effect: TIMEINEFFECT_GOOD_TIL_CANCELLED = 'GOOD_TIL_CANCELLED', TIMEINEFFECT_IMMEDIATE_OR_CANCEL = 'IMMEDIATE_OR_CANCEL', or TIMEINEFFECT_FILL_OR_KILL = 'FILL_OR_KILL' :type time_in_effect: str :param condition_type: CONDITIONTYPE_NONE = 'NONE', CONDITIONTYPE_GREATER_THAN = 'GREATER_THAN', CONDITIONTYPE_LESS_THAN = 'LESS_THAN', CONDITIONTYPE_STOP_LOSS_FIXED = 'STOP_LOSS_FIXED', CONDITIONTYPE_STOP_LOSS_PERCENTAGE = 'STOP_LOSS_PERCENTAGE' :type condition_type: str :param target: used in conjunction with condition_type :type target: float :return: """ return self._api_query(path_dict={ API_V2_0: '/key/market/tradesell' }, options={ 'marketname': market, 'ordertype': order_type, 'quantity': quantity, 'rate': rate, 'timeInEffect': time_in_effect, 'conditiontype': condition_type, 'target': target }, protection=PROTECTION_PRV)
Enter a sell order into the book Endpoint 1.1 NO EQUIVALENT -- see sell_market or sell_limit 2.0 /key/market/tradesell :param market: String literal for the market (ex: BTC-LTC) :type market: str :param order_type: ORDERTYPE_LIMIT = 'LIMIT' or ORDERTYPE_MARKET = 'MARKET' :type order_type: str :param quantity: The amount to purchase :type quantity: float :param rate: The rate at which to place the order. This is not needed for market orders :type rate: float :param time_in_effect: TIMEINEFFECT_GOOD_TIL_CANCELLED = 'GOOD_TIL_CANCELLED', TIMEINEFFECT_IMMEDIATE_OR_CANCEL = 'IMMEDIATE_OR_CANCEL', or TIMEINEFFECT_FILL_OR_KILL = 'FILL_OR_KILL' :type time_in_effect: str :param condition_type: CONDITIONTYPE_NONE = 'NONE', CONDITIONTYPE_GREATER_THAN = 'GREATER_THAN', CONDITIONTYPE_LESS_THAN = 'LESS_THAN', CONDITIONTYPE_STOP_LOSS_FIXED = 'STOP_LOSS_FIXED', CONDITIONTYPE_STOP_LOSS_PERCENTAGE = 'STOP_LOSS_PERCENTAGE' :type condition_type: str :param target: used in conjunction with condition_type :type target: float :return:
Below is the the instruction that describes the task: ### Input: Enter a sell order into the book Endpoint 1.1 NO EQUIVALENT -- see sell_market or sell_limit 2.0 /key/market/tradesell :param market: String literal for the market (ex: BTC-LTC) :type market: str :param order_type: ORDERTYPE_LIMIT = 'LIMIT' or ORDERTYPE_MARKET = 'MARKET' :type order_type: str :param quantity: The amount to purchase :type quantity: float :param rate: The rate at which to place the order. This is not needed for market orders :type rate: float :param time_in_effect: TIMEINEFFECT_GOOD_TIL_CANCELLED = 'GOOD_TIL_CANCELLED', TIMEINEFFECT_IMMEDIATE_OR_CANCEL = 'IMMEDIATE_OR_CANCEL', or TIMEINEFFECT_FILL_OR_KILL = 'FILL_OR_KILL' :type time_in_effect: str :param condition_type: CONDITIONTYPE_NONE = 'NONE', CONDITIONTYPE_GREATER_THAN = 'GREATER_THAN', CONDITIONTYPE_LESS_THAN = 'LESS_THAN', CONDITIONTYPE_STOP_LOSS_FIXED = 'STOP_LOSS_FIXED', CONDITIONTYPE_STOP_LOSS_PERCENTAGE = 'STOP_LOSS_PERCENTAGE' :type condition_type: str :param target: used in conjunction with condition_type :type target: float :return: ### Response: def trade_sell(self, market=None, order_type=None, quantity=None, rate=None, time_in_effect=None, condition_type=None, target=0.0): """ Enter a sell order into the book Endpoint 1.1 NO EQUIVALENT -- see sell_market or sell_limit 2.0 /key/market/tradesell :param market: String literal for the market (ex: BTC-LTC) :type market: str :param order_type: ORDERTYPE_LIMIT = 'LIMIT' or ORDERTYPE_MARKET = 'MARKET' :type order_type: str :param quantity: The amount to purchase :type quantity: float :param rate: The rate at which to place the order. This is not needed for market orders :type rate: float :param time_in_effect: TIMEINEFFECT_GOOD_TIL_CANCELLED = 'GOOD_TIL_CANCELLED', TIMEINEFFECT_IMMEDIATE_OR_CANCEL = 'IMMEDIATE_OR_CANCEL', or TIMEINEFFECT_FILL_OR_KILL = 'FILL_OR_KILL' :type time_in_effect: str :param condition_type: CONDITIONTYPE_NONE = 'NONE', CONDITIONTYPE_GREATER_THAN = 'GREATER_THAN', CONDITIONTYPE_LESS_THAN = 'LESS_THAN', CONDITIONTYPE_STOP_LOSS_FIXED = 'STOP_LOSS_FIXED', CONDITIONTYPE_STOP_LOSS_PERCENTAGE = 'STOP_LOSS_PERCENTAGE' :type condition_type: str :param target: used in conjunction with condition_type :type target: float :return: """ return self._api_query(path_dict={ API_V2_0: '/key/market/tradesell' }, options={ 'marketname': market, 'ordertype': order_type, 'quantity': quantity, 'rate': rate, 'timeInEffect': time_in_effect, 'conditiontype': condition_type, 'target': target }, protection=PROTECTION_PRV)
def lower(option,value): ''' Enforces lower case options and option values where appropriate ''' if type(option) is str: option=option.lower() if type(value) is str: value=value.lower() return (option,value)
Enforces lower case options and option values where appropriate
Below is the the instruction that describes the task: ### Input: Enforces lower case options and option values where appropriate ### Response: def lower(option,value): ''' Enforces lower case options and option values where appropriate ''' if type(option) is str: option=option.lower() if type(value) is str: value=value.lower() return (option,value)
def get_cacheable(cache_key, cache_ttl, calculate, recalculate=False): """ Gets the result of a method call, using the given key and TTL as a cache """ if not recalculate: cached = cache.get(cache_key) if cached is not None: return json.loads(cached) calculated = calculate() cache.set(cache_key, json.dumps(calculated), cache_ttl) return calculated
Gets the result of a method call, using the given key and TTL as a cache
Below is the the instruction that describes the task: ### Input: Gets the result of a method call, using the given key and TTL as a cache ### Response: def get_cacheable(cache_key, cache_ttl, calculate, recalculate=False): """ Gets the result of a method call, using the given key and TTL as a cache """ if not recalculate: cached = cache.get(cache_key) if cached is not None: return json.loads(cached) calculated = calculate() cache.set(cache_key, json.dumps(calculated), cache_ttl) return calculated
def update_balances(self, recursive=True): """ Calculate tree balance factor """ if self.node: if recursive: if self.node.left: self.node.left.update_balances() if self.node.right: self.node.right.update_balances() self.balance = self.node.left.height - self.node.right.height else: self.balance = 0
Calculate tree balance factor
Below is the the instruction that describes the task: ### Input: Calculate tree balance factor ### Response: def update_balances(self, recursive=True): """ Calculate tree balance factor """ if self.node: if recursive: if self.node.left: self.node.left.update_balances() if self.node.right: self.node.right.update_balances() self.balance = self.node.left.height - self.node.right.height else: self.balance = 0
def identifier(self): """ A unique identifier for the path. Returns --------- identifier: (5,) float, unique identifier """ if len(self.polygons_full) != 1: raise TypeError('Identifier only valid for single body') return polygons.polygon_hash(self.polygons_full[0])
A unique identifier for the path. Returns --------- identifier: (5,) float, unique identifier
Below is the the instruction that describes the task: ### Input: A unique identifier for the path. Returns --------- identifier: (5,) float, unique identifier ### Response: def identifier(self): """ A unique identifier for the path. Returns --------- identifier: (5,) float, unique identifier """ if len(self.polygons_full) != 1: raise TypeError('Identifier only valid for single body') return polygons.polygon_hash(self.polygons_full[0])
def illegal_doi(self, doi_string): """ DOI string did not match the regex. Determine what the data is. :param doi_string: (str) Malformed DOI string :return: None """ logger_doi_resolver.info("enter illegal_doi") # Ignores empty or irrelevant strings (blank, spaces, na, nan, ', others) if len(doi_string) > 5: # NOAA string if 'noaa' in doi_string.lower(): self.noaa_citation(doi_string) # Paragraph citation / Manual citation elif doi_string.count(' ') > 3: self.root_dict['pub'][0]['citation'] = doi_string # Strange Links or Other, send to quarantine else: logger_doi_resolver.warn("illegal_doi: bad doi string: {}".format(doi_string)) logger_doi_resolver.info("exit illegal_doi") return
DOI string did not match the regex. Determine what the data is. :param doi_string: (str) Malformed DOI string :return: None
Below is the the instruction that describes the task: ### Input: DOI string did not match the regex. Determine what the data is. :param doi_string: (str) Malformed DOI string :return: None ### Response: def illegal_doi(self, doi_string): """ DOI string did not match the regex. Determine what the data is. :param doi_string: (str) Malformed DOI string :return: None """ logger_doi_resolver.info("enter illegal_doi") # Ignores empty or irrelevant strings (blank, spaces, na, nan, ', others) if len(doi_string) > 5: # NOAA string if 'noaa' in doi_string.lower(): self.noaa_citation(doi_string) # Paragraph citation / Manual citation elif doi_string.count(' ') > 3: self.root_dict['pub'][0]['citation'] = doi_string # Strange Links or Other, send to quarantine else: logger_doi_resolver.warn("illegal_doi: bad doi string: {}".format(doi_string)) logger_doi_resolver.info("exit illegal_doi") return
def _note_remote_option(self, option, state): """Record the status of local negotiated Telnet options.""" if not self.telnet_opt_dict.has_key(option): self.telnet_opt_dict[option] = TelnetOption() self.telnet_opt_dict[option].remote_option = state
Record the status of local negotiated Telnet options.
Below is the the instruction that describes the task: ### Input: Record the status of local negotiated Telnet options. ### Response: def _note_remote_option(self, option, state): """Record the status of local negotiated Telnet options.""" if not self.telnet_opt_dict.has_key(option): self.telnet_opt_dict[option] = TelnetOption() self.telnet_opt_dict[option].remote_option = state
def fsqrt(q): ''' given a non-negative fraction q, return a pair (a,b) such that q = a * a * b where b is a square-free integer. if q is a perfect square, a is its square root and b is one. ''' if q == 0: return q, 1 if q < 0: raise ValueError('math domain error %s' % q) a, b = isqrt(q.numerator) c, d = isqrt(q.denominator) # q == (a/c)**2 * (b/d) == (a/(c*d))**2 * b*d return Fraction(a, c * d), b * d
given a non-negative fraction q, return a pair (a,b) such that q = a * a * b where b is a square-free integer. if q is a perfect square, a is its square root and b is one.
Below is the the instruction that describes the task: ### Input: given a non-negative fraction q, return a pair (a,b) such that q = a * a * b where b is a square-free integer. if q is a perfect square, a is its square root and b is one. ### Response: def fsqrt(q): ''' given a non-negative fraction q, return a pair (a,b) such that q = a * a * b where b is a square-free integer. if q is a perfect square, a is its square root and b is one. ''' if q == 0: return q, 1 if q < 0: raise ValueError('math domain error %s' % q) a, b = isqrt(q.numerator) c, d = isqrt(q.denominator) # q == (a/c)**2 * (b/d) == (a/(c*d))**2 * b*d return Fraction(a, c * d), b * d
def gmtime_adj_notBefore(self, amount): """ Adjust the timestamp on which the certificate starts being valid. :param amount: The number of seconds by which to adjust the timestamp. :return: ``None`` """ if not isinstance(amount, int): raise TypeError("amount must be an integer") notBefore = _lib.X509_get_notBefore(self._x509) _lib.X509_gmtime_adj(notBefore, amount)
Adjust the timestamp on which the certificate starts being valid. :param amount: The number of seconds by which to adjust the timestamp. :return: ``None``
Below is the the instruction that describes the task: ### Input: Adjust the timestamp on which the certificate starts being valid. :param amount: The number of seconds by which to adjust the timestamp. :return: ``None`` ### Response: def gmtime_adj_notBefore(self, amount): """ Adjust the timestamp on which the certificate starts being valid. :param amount: The number of seconds by which to adjust the timestamp. :return: ``None`` """ if not isinstance(amount, int): raise TypeError("amount must be an integer") notBefore = _lib.X509_get_notBefore(self._x509) _lib.X509_gmtime_adj(notBefore, amount)
def _get_conn(opts, profile=None): ''' Establish a connection to etcd ''' if profile is None: profile = opts.get('etcd.returner') path = opts.get('etcd.returner_root', '/salt/return') return salt.utils.etcd_util.get_conn(opts, profile), path
Establish a connection to etcd
Below is the the instruction that describes the task: ### Input: Establish a connection to etcd ### Response: def _get_conn(opts, profile=None): ''' Establish a connection to etcd ''' if profile is None: profile = opts.get('etcd.returner') path = opts.get('etcd.returner_root', '/salt/return') return salt.utils.etcd_util.get_conn(opts, profile), path
def __impl_read_chain(self, start, read_sector_f, read_fat_f): """Returns the entire contents of a chain starting at the given sector.""" sector = start check = [ sector ] # keep a list of sectors we've already read buffer = StringIO() while sector != ENDOFCHAIN: buffer.write(read_sector_f(sector)) next = read_fat_f(sector) if next in check: logging.error('infinite loop detected at {0} to {1} starting at {2}'.format( sector, next, sector_start)) return buffer.getvalue() check.append(next) sector = next return buffer.getvalue()
Returns the entire contents of a chain starting at the given sector.
Below is the the instruction that describes the task: ### Input: Returns the entire contents of a chain starting at the given sector. ### Response: def __impl_read_chain(self, start, read_sector_f, read_fat_f): """Returns the entire contents of a chain starting at the given sector.""" sector = start check = [ sector ] # keep a list of sectors we've already read buffer = StringIO() while sector != ENDOFCHAIN: buffer.write(read_sector_f(sector)) next = read_fat_f(sector) if next in check: logging.error('infinite loop detected at {0} to {1} starting at {2}'.format( sector, next, sector_start)) return buffer.getvalue() check.append(next) sector = next return buffer.getvalue()
def _iterate_fields_cond(self, pkt, val, use_val): """Internal function used by _find_fld_pkt & _find_fld_pkt_val""" # Iterate through the fields for fld, cond in self.flds: if isinstance(cond, tuple): if use_val: if cond[1](pkt, val): return fld continue else: cond = cond[0] if cond(pkt): return fld return self.dflt
Internal function used by _find_fld_pkt & _find_fld_pkt_val
Below is the the instruction that describes the task: ### Input: Internal function used by _find_fld_pkt & _find_fld_pkt_val ### Response: def _iterate_fields_cond(self, pkt, val, use_val): """Internal function used by _find_fld_pkt & _find_fld_pkt_val""" # Iterate through the fields for fld, cond in self.flds: if isinstance(cond, tuple): if use_val: if cond[1](pkt, val): return fld continue else: cond = cond[0] if cond(pkt): return fld return self.dflt
def wait_for_success(self, interval=1): """ Wait for instance to complete, and check if the instance is successful. :param interval: time interval to check :return: None :raise: :class:`odps.errors.ODPSError` if the instance failed """ self.wait_for_completion(interval=interval) if not self.is_successful(retry=True): for task_name, task in six.iteritems(self.get_task_statuses()): exc = None if task.status == Instance.Task.TaskStatus.FAILED: exc = errors.parse_instance_error(self.get_task_result(task_name)) elif task.status != Instance.Task.TaskStatus.SUCCESS: exc = errors.ODPSError('%s, status=%s' % (task_name, task.status.value)) if exc: exc.instance_id = self.id raise exc
Wait for instance to complete, and check if the instance is successful. :param interval: time interval to check :return: None :raise: :class:`odps.errors.ODPSError` if the instance failed
Below is the the instruction that describes the task: ### Input: Wait for instance to complete, and check if the instance is successful. :param interval: time interval to check :return: None :raise: :class:`odps.errors.ODPSError` if the instance failed ### Response: def wait_for_success(self, interval=1): """ Wait for instance to complete, and check if the instance is successful. :param interval: time interval to check :return: None :raise: :class:`odps.errors.ODPSError` if the instance failed """ self.wait_for_completion(interval=interval) if not self.is_successful(retry=True): for task_name, task in six.iteritems(self.get_task_statuses()): exc = None if task.status == Instance.Task.TaskStatus.FAILED: exc = errors.parse_instance_error(self.get_task_result(task_name)) elif task.status != Instance.Task.TaskStatus.SUCCESS: exc = errors.ODPSError('%s, status=%s' % (task_name, task.status.value)) if exc: exc.instance_id = self.id raise exc
def do(cmdline=None, runas=None): ''' Execute a python command with pyenv's shims from the user or the system. CLI Example: .. code-block:: bash salt '*' pyenv.do 'gem list bundler' salt '*' pyenv.do 'gem list bundler' deploy ''' path = _pyenv_path(runas) cmd_split = cmdline.split() quoted_line = '' for cmd in cmd_split: quoted_line = quoted_line + ' ' + _cmd_quote(cmd) result = __salt__['cmd.run_all']( 'env PATH={0}/shims:$PATH {1}'.format(_cmd_quote(path), quoted_line), runas=runas, python_shell=True ) if result['retcode'] == 0: rehash(runas=runas) return result['stdout'] else: return False
Execute a python command with pyenv's shims from the user or the system. CLI Example: .. code-block:: bash salt '*' pyenv.do 'gem list bundler' salt '*' pyenv.do 'gem list bundler' deploy
Below is the the instruction that describes the task: ### Input: Execute a python command with pyenv's shims from the user or the system. CLI Example: .. code-block:: bash salt '*' pyenv.do 'gem list bundler' salt '*' pyenv.do 'gem list bundler' deploy ### Response: def do(cmdline=None, runas=None): ''' Execute a python command with pyenv's shims from the user or the system. CLI Example: .. code-block:: bash salt '*' pyenv.do 'gem list bundler' salt '*' pyenv.do 'gem list bundler' deploy ''' path = _pyenv_path(runas) cmd_split = cmdline.split() quoted_line = '' for cmd in cmd_split: quoted_line = quoted_line + ' ' + _cmd_quote(cmd) result = __salt__['cmd.run_all']( 'env PATH={0}/shims:$PATH {1}'.format(_cmd_quote(path), quoted_line), runas=runas, python_shell=True ) if result['retcode'] == 0: rehash(runas=runas) return result['stdout'] else: return False
def _handle_response(response): """Internal helper for handling API responses from the Quoine server. Raises the appropriate exceptions when necessary; otherwise, returns the response. """ if not str(response.status_code).startswith('2'): raise KucoinAPIException(response) try: res = response.json() if 'code' in res and res['code'] != "200000": raise KucoinAPIException(response) if 'success' in res and not res['success']: raise KucoinAPIException(response) # by default return full response # if it's a normal response we have a data attribute, return that if 'data' in res: res = res['data'] return res except ValueError: raise KucoinRequestException('Invalid Response: %s' % response.text)
Internal helper for handling API responses from the Quoine server. Raises the appropriate exceptions when necessary; otherwise, returns the response.
Below is the the instruction that describes the task: ### Input: Internal helper for handling API responses from the Quoine server. Raises the appropriate exceptions when necessary; otherwise, returns the response. ### Response: def _handle_response(response): """Internal helper for handling API responses from the Quoine server. Raises the appropriate exceptions when necessary; otherwise, returns the response. """ if not str(response.status_code).startswith('2'): raise KucoinAPIException(response) try: res = response.json() if 'code' in res and res['code'] != "200000": raise KucoinAPIException(response) if 'success' in res and not res['success']: raise KucoinAPIException(response) # by default return full response # if it's a normal response we have a data attribute, return that if 'data' in res: res = res['data'] return res except ValueError: raise KucoinRequestException('Invalid Response: %s' % response.text)
def connect_with_username_and_password(cls, url=None, username=None, password=None): """ Returns an object that makes requests to the API, authenticated with a short-lived token retrieved from username and password. If username or password is not supplied, the method will prompt for a username and/or password to be entered interactively. See the connect method for more details about the `url` argument. PLEASE NOTE: This method is being provided as a temporary measure. We strongly encourage users of the Luminoso API to use a long-lived token instead, as explained in the V5_README file. """ from .v4_client import LuminosoClient as v4LC if username is None: username = input('Username: ') v4client = v4LC.connect(url=url, username=username, password=password) if url is None: url = '/' if url.startswith('http'): root_url = get_root_url(url) else: url = URL_BASE + '/' + url.lstrip('/') root_url = URL_BASE return cls(v4client.session, root_url)
Returns an object that makes requests to the API, authenticated with a short-lived token retrieved from username and password. If username or password is not supplied, the method will prompt for a username and/or password to be entered interactively. See the connect method for more details about the `url` argument. PLEASE NOTE: This method is being provided as a temporary measure. We strongly encourage users of the Luminoso API to use a long-lived token instead, as explained in the V5_README file.
Below is the the instruction that describes the task: ### Input: Returns an object that makes requests to the API, authenticated with a short-lived token retrieved from username and password. If username or password is not supplied, the method will prompt for a username and/or password to be entered interactively. See the connect method for more details about the `url` argument. PLEASE NOTE: This method is being provided as a temporary measure. We strongly encourage users of the Luminoso API to use a long-lived token instead, as explained in the V5_README file. ### Response: def connect_with_username_and_password(cls, url=None, username=None, password=None): """ Returns an object that makes requests to the API, authenticated with a short-lived token retrieved from username and password. If username or password is not supplied, the method will prompt for a username and/or password to be entered interactively. See the connect method for more details about the `url` argument. PLEASE NOTE: This method is being provided as a temporary measure. We strongly encourage users of the Luminoso API to use a long-lived token instead, as explained in the V5_README file. """ from .v4_client import LuminosoClient as v4LC if username is None: username = input('Username: ') v4client = v4LC.connect(url=url, username=username, password=password) if url is None: url = '/' if url.startswith('http'): root_url = get_root_url(url) else: url = URL_BASE + '/' + url.lstrip('/') root_url = URL_BASE return cls(v4client.session, root_url)
def read_cache(stream): """Read a cache file from the given stream :return: tuple(version, entries_dict, extension_data, content_sha) * version is the integer version number * entries dict is a dictionary which maps IndexEntry instances to a path at a stage * extension_data is '' or 4 bytes of type + 4 bytes of size + size bytes * content_sha is a 20 byte sha on all cache file contents""" version, num_entries = read_header(stream) count = 0 entries = {} read = stream.read tell = stream.tell while count < num_entries: beginoffset = tell() ctime = unpack(">8s", read(8))[0] mtime = unpack(">8s", read(8))[0] (dev, ino, mode, uid, gid, size, sha, flags) = \ unpack(">LLLLLL20sH", read(20 + 4 * 6 + 2)) path_size = flags & CE_NAMEMASK path = read(path_size).decode(defenc) real_size = ((tell() - beginoffset + 8) & ~7) read((beginoffset + real_size) - tell()) entry = IndexEntry((mode, sha, flags, path, ctime, mtime, dev, ino, uid, gid, size)) # entry_key would be the method to use, but we safe the effort entries[(path, entry.stage)] = entry count += 1 # END for each entry # the footer contains extension data and a sha on the content so far # Keep the extension footer,and verify we have a sha in the end # Extension data format is: # 4 bytes ID # 4 bytes length of chunk # repeated 0 - N times extension_data = stream.read(~0) assert len(extension_data) > 19, "Index Footer was not at least a sha on content as it was only %i bytes in size"\ % len(extension_data) content_sha = extension_data[-20:] # truncate the sha in the end as we will dynamically create it anyway extension_data = extension_data[:-20] return (version, entries, extension_data, content_sha)
Read a cache file from the given stream :return: tuple(version, entries_dict, extension_data, content_sha) * version is the integer version number * entries dict is a dictionary which maps IndexEntry instances to a path at a stage * extension_data is '' or 4 bytes of type + 4 bytes of size + size bytes * content_sha is a 20 byte sha on all cache file contents
Below is the the instruction that describes the task: ### Input: Read a cache file from the given stream :return: tuple(version, entries_dict, extension_data, content_sha) * version is the integer version number * entries dict is a dictionary which maps IndexEntry instances to a path at a stage * extension_data is '' or 4 bytes of type + 4 bytes of size + size bytes * content_sha is a 20 byte sha on all cache file contents ### Response: def read_cache(stream): """Read a cache file from the given stream :return: tuple(version, entries_dict, extension_data, content_sha) * version is the integer version number * entries dict is a dictionary which maps IndexEntry instances to a path at a stage * extension_data is '' or 4 bytes of type + 4 bytes of size + size bytes * content_sha is a 20 byte sha on all cache file contents""" version, num_entries = read_header(stream) count = 0 entries = {} read = stream.read tell = stream.tell while count < num_entries: beginoffset = tell() ctime = unpack(">8s", read(8))[0] mtime = unpack(">8s", read(8))[0] (dev, ino, mode, uid, gid, size, sha, flags) = \ unpack(">LLLLLL20sH", read(20 + 4 * 6 + 2)) path_size = flags & CE_NAMEMASK path = read(path_size).decode(defenc) real_size = ((tell() - beginoffset + 8) & ~7) read((beginoffset + real_size) - tell()) entry = IndexEntry((mode, sha, flags, path, ctime, mtime, dev, ino, uid, gid, size)) # entry_key would be the method to use, but we safe the effort entries[(path, entry.stage)] = entry count += 1 # END for each entry # the footer contains extension data and a sha on the content so far # Keep the extension footer,and verify we have a sha in the end # Extension data format is: # 4 bytes ID # 4 bytes length of chunk # repeated 0 - N times extension_data = stream.read(~0) assert len(extension_data) > 19, "Index Footer was not at least a sha on content as it was only %i bytes in size"\ % len(extension_data) content_sha = extension_data[-20:] # truncate the sha in the end as we will dynamically create it anyway extension_data = extension_data[:-20] return (version, entries, extension_data, content_sha)
def _nucmer_hits_to_assembly_coords(nucmer_hits): '''Input is hits made by self._parse_nucmer_coords_file. Returns dictionary. key = contig name. Value = list of coords that match to the reference gene''' coords = {} for l in nucmer_hits.values(): for hit in l: if hit.qry_name not in coords: coords[hit.qry_name] = [] coords[hit.qry_name].append(hit.qry_coords()) for scaff in coords: pyfastaq.intervals.merge_overlapping_in_list(coords[scaff]) return coords
Input is hits made by self._parse_nucmer_coords_file. Returns dictionary. key = contig name. Value = list of coords that match to the reference gene
Below is the the instruction that describes the task: ### Input: Input is hits made by self._parse_nucmer_coords_file. Returns dictionary. key = contig name. Value = list of coords that match to the reference gene ### Response: def _nucmer_hits_to_assembly_coords(nucmer_hits): '''Input is hits made by self._parse_nucmer_coords_file. Returns dictionary. key = contig name. Value = list of coords that match to the reference gene''' coords = {} for l in nucmer_hits.values(): for hit in l: if hit.qry_name not in coords: coords[hit.qry_name] = [] coords[hit.qry_name].append(hit.qry_coords()) for scaff in coords: pyfastaq.intervals.merge_overlapping_in_list(coords[scaff]) return coords
def encode(self, word, terminator='\0'): r"""Return the Burrows-Wheeler transformed form of a word. Parameters ---------- word : str The word to transform using BWT terminator : str A character added to signal the end of the string Returns ------- str Word encoded by BWT Raises ------ ValueError Specified terminator absent from code. Examples -------- >>> bwt = BWT() >>> bwt.encode('align') 'n\x00ilag' >>> bwt.encode('banana') 'annb\x00aa' >>> bwt.encode('banana', '@') 'annb@aa' """ if word: if terminator in word: raise ValueError( 'Specified terminator, {}, already in word.'.format( terminator if terminator != '\0' else '\\0' ) ) else: word += terminator wordlist = sorted( word[i:] + word[:i] for i in range(len(word)) ) return ''.join([w[-1] for w in wordlist]) else: return terminator
r"""Return the Burrows-Wheeler transformed form of a word. Parameters ---------- word : str The word to transform using BWT terminator : str A character added to signal the end of the string Returns ------- str Word encoded by BWT Raises ------ ValueError Specified terminator absent from code. Examples -------- >>> bwt = BWT() >>> bwt.encode('align') 'n\x00ilag' >>> bwt.encode('banana') 'annb\x00aa' >>> bwt.encode('banana', '@') 'annb@aa'
Below is the the instruction that describes the task: ### Input: r"""Return the Burrows-Wheeler transformed form of a word. Parameters ---------- word : str The word to transform using BWT terminator : str A character added to signal the end of the string Returns ------- str Word encoded by BWT Raises ------ ValueError Specified terminator absent from code. Examples -------- >>> bwt = BWT() >>> bwt.encode('align') 'n\x00ilag' >>> bwt.encode('banana') 'annb\x00aa' >>> bwt.encode('banana', '@') 'annb@aa' ### Response: def encode(self, word, terminator='\0'): r"""Return the Burrows-Wheeler transformed form of a word. Parameters ---------- word : str The word to transform using BWT terminator : str A character added to signal the end of the string Returns ------- str Word encoded by BWT Raises ------ ValueError Specified terminator absent from code. Examples -------- >>> bwt = BWT() >>> bwt.encode('align') 'n\x00ilag' >>> bwt.encode('banana') 'annb\x00aa' >>> bwt.encode('banana', '@') 'annb@aa' """ if word: if terminator in word: raise ValueError( 'Specified terminator, {}, already in word.'.format( terminator if terminator != '\0' else '\\0' ) ) else: word += terminator wordlist = sorted( word[i:] + word[:i] for i in range(len(word)) ) return ''.join([w[-1] for w in wordlist]) else: return terminator
def setup_panel_params(self, coord): """ Calculate the x & y range & breaks information for each panel Parameters ---------- coord : coord Coordinate """ if not self.panel_scales_x: raise PlotnineError('Missing an x scale') if not self.panel_scales_y: raise PlotnineError('Missing a y scale') self.panel_params = [] cols = ['SCALE_X', 'SCALE_Y'] for i, j in self.layout[cols].itertuples(index=False): i, j = i-1, j-1 params = coord.setup_panel_params( self.panel_scales_x[i], self.panel_scales_y[j]) self.panel_params.append(params)
Calculate the x & y range & breaks information for each panel Parameters ---------- coord : coord Coordinate
Below is the the instruction that describes the task: ### Input: Calculate the x & y range & breaks information for each panel Parameters ---------- coord : coord Coordinate ### Response: def setup_panel_params(self, coord): """ Calculate the x & y range & breaks information for each panel Parameters ---------- coord : coord Coordinate """ if not self.panel_scales_x: raise PlotnineError('Missing an x scale') if not self.panel_scales_y: raise PlotnineError('Missing a y scale') self.panel_params = [] cols = ['SCALE_X', 'SCALE_Y'] for i, j in self.layout[cols].itertuples(index=False): i, j = i-1, j-1 params = coord.setup_panel_params( self.panel_scales_x[i], self.panel_scales_y[j]) self.panel_params.append(params)
def fastas(self, download=False): """ Dict of filepaths for all fasta files associated with code. Parameters ---------- download : bool If True, downloads the fasta file from the PDB. If False, uses the ampal Protein.fasta property Defaults to False - this is definitely the recommended behaviour. Notes ----- Calls self.mmols, and so downloads mmol files if not already present. See .fasta property of isambard.ampal.base_ampal.Protein for more information. Returns ------- fastas_dict : dict, or None. Keys : int mmol number Values : str Filepath for the corresponding fasta file. """ fastas_dict = {} fasta_dir = os.path.join(self.parent_dir, 'fasta') if not os.path.exists(fasta_dir): os.makedirs(fasta_dir) for i, mmol_file in self.mmols.items(): mmol_name = os.path.basename(mmol_file) fasta_file_name = '{0}.fasta'.format(mmol_name) fasta_file = os.path.join(fasta_dir, fasta_file_name) if not os.path.exists(fasta_file): if download: pdb_url = "http://www.rcsb.org/pdb/files/fasta.txt?structureIdList={0}".format(self.code.upper()) r = requests.get(pdb_url) if r.status_code == 200: fasta_string = r.text else: fasta_string = None else: a = convert_pdb_to_ampal(mmol_file) # take first object if AmpalContainer (i.e. NMR structure). if type(a) == AmpalContainer: a = a[0] fasta_string = a.fasta with open(fasta_file, 'w') as foo: foo.write(fasta_string) fastas_dict[i] = fasta_file return fastas_dict
Dict of filepaths for all fasta files associated with code. Parameters ---------- download : bool If True, downloads the fasta file from the PDB. If False, uses the ampal Protein.fasta property Defaults to False - this is definitely the recommended behaviour. Notes ----- Calls self.mmols, and so downloads mmol files if not already present. See .fasta property of isambard.ampal.base_ampal.Protein for more information. Returns ------- fastas_dict : dict, or None. Keys : int mmol number Values : str Filepath for the corresponding fasta file.
Below is the the instruction that describes the task: ### Input: Dict of filepaths for all fasta files associated with code. Parameters ---------- download : bool If True, downloads the fasta file from the PDB. If False, uses the ampal Protein.fasta property Defaults to False - this is definitely the recommended behaviour. Notes ----- Calls self.mmols, and so downloads mmol files if not already present. See .fasta property of isambard.ampal.base_ampal.Protein for more information. Returns ------- fastas_dict : dict, or None. Keys : int mmol number Values : str Filepath for the corresponding fasta file. ### Response: def fastas(self, download=False): """ Dict of filepaths for all fasta files associated with code. Parameters ---------- download : bool If True, downloads the fasta file from the PDB. If False, uses the ampal Protein.fasta property Defaults to False - this is definitely the recommended behaviour. Notes ----- Calls self.mmols, and so downloads mmol files if not already present. See .fasta property of isambard.ampal.base_ampal.Protein for more information. Returns ------- fastas_dict : dict, or None. Keys : int mmol number Values : str Filepath for the corresponding fasta file. """ fastas_dict = {} fasta_dir = os.path.join(self.parent_dir, 'fasta') if not os.path.exists(fasta_dir): os.makedirs(fasta_dir) for i, mmol_file in self.mmols.items(): mmol_name = os.path.basename(mmol_file) fasta_file_name = '{0}.fasta'.format(mmol_name) fasta_file = os.path.join(fasta_dir, fasta_file_name) if not os.path.exists(fasta_file): if download: pdb_url = "http://www.rcsb.org/pdb/files/fasta.txt?structureIdList={0}".format(self.code.upper()) r = requests.get(pdb_url) if r.status_code == 200: fasta_string = r.text else: fasta_string = None else: a = convert_pdb_to_ampal(mmol_file) # take first object if AmpalContainer (i.e. NMR structure). if type(a) == AmpalContainer: a = a[0] fasta_string = a.fasta with open(fasta_file, 'w') as foo: foo.write(fasta_string) fastas_dict[i] = fasta_file return fastas_dict
def keyPressEvent( self, event ): """ Overloads the key press event to listen for escape calls to cancel the parts editing. :param event | <QKeyPressEvent> """ if ( self.scrollWidget().isHidden() ): if ( event.key() == Qt.Key_Escape ): self.cancelEdit() return elif ( event.key() in (Qt.Key_Return, Qt.Key_Enter) ): self.acceptEdit() return elif ( event.key() == Qt.Key_A and event.modifiers() == Qt.ControlModifier ): self.startEdit() super(XNavigationEdit, self).keyPressEvent(event)
Overloads the key press event to listen for escape calls to cancel the parts editing. :param event | <QKeyPressEvent>
Below is the the instruction that describes the task: ### Input: Overloads the key press event to listen for escape calls to cancel the parts editing. :param event | <QKeyPressEvent> ### Response: def keyPressEvent( self, event ): """ Overloads the key press event to listen for escape calls to cancel the parts editing. :param event | <QKeyPressEvent> """ if ( self.scrollWidget().isHidden() ): if ( event.key() == Qt.Key_Escape ): self.cancelEdit() return elif ( event.key() in (Qt.Key_Return, Qt.Key_Enter) ): self.acceptEdit() return elif ( event.key() == Qt.Key_A and event.modifiers() == Qt.ControlModifier ): self.startEdit() super(XNavigationEdit, self).keyPressEvent(event)
def format(self, pattern='{head}{padding}{tail} [{ranges}]'): '''Return string representation as specified by *pattern*. Pattern can be any format accepted by Python's standard format function and will receive the following keyword arguments as context: * *head* - Common leading part of the collection. * *tail* - Common trailing part of the collection. * *padding* - Padding value in ``%0d`` format. * *range* - Total range in the form ``start-end`` * *ranges* - Comma separated ranges of indexes. * *holes* - Comma separated ranges of missing indexes. ''' data = {} data['head'] = self.head data['tail'] = self.tail if self.padding: data['padding'] = '%0{0}d'.format(self.padding) else: data['padding'] = '%d' if '{holes}' in pattern: data['holes'] = self.holes().format('{ranges}') if '{range}' in pattern or '{ranges}' in pattern: indexes = list(self.indexes) indexes_count = len(indexes) if indexes_count == 0: data['range'] = '' elif indexes_count == 1: data['range'] = '{0}'.format(indexes[0]) else: data['range'] = '{0}-{1}'.format( indexes[0], indexes[-1] ) if '{ranges}' in pattern: separated = self.separate() if len(separated) > 1: ranges = [collection.format('{range}') for collection in separated] else: ranges = [data['range']] data['ranges'] = ', '.join(ranges) return pattern.format(**data)
Return string representation as specified by *pattern*. Pattern can be any format accepted by Python's standard format function and will receive the following keyword arguments as context: * *head* - Common leading part of the collection. * *tail* - Common trailing part of the collection. * *padding* - Padding value in ``%0d`` format. * *range* - Total range in the form ``start-end`` * *ranges* - Comma separated ranges of indexes. * *holes* - Comma separated ranges of missing indexes.
Below is the the instruction that describes the task: ### Input: Return string representation as specified by *pattern*. Pattern can be any format accepted by Python's standard format function and will receive the following keyword arguments as context: * *head* - Common leading part of the collection. * *tail* - Common trailing part of the collection. * *padding* - Padding value in ``%0d`` format. * *range* - Total range in the form ``start-end`` * *ranges* - Comma separated ranges of indexes. * *holes* - Comma separated ranges of missing indexes. ### Response: def format(self, pattern='{head}{padding}{tail} [{ranges}]'): '''Return string representation as specified by *pattern*. Pattern can be any format accepted by Python's standard format function and will receive the following keyword arguments as context: * *head* - Common leading part of the collection. * *tail* - Common trailing part of the collection. * *padding* - Padding value in ``%0d`` format. * *range* - Total range in the form ``start-end`` * *ranges* - Comma separated ranges of indexes. * *holes* - Comma separated ranges of missing indexes. ''' data = {} data['head'] = self.head data['tail'] = self.tail if self.padding: data['padding'] = '%0{0}d'.format(self.padding) else: data['padding'] = '%d' if '{holes}' in pattern: data['holes'] = self.holes().format('{ranges}') if '{range}' in pattern or '{ranges}' in pattern: indexes = list(self.indexes) indexes_count = len(indexes) if indexes_count == 0: data['range'] = '' elif indexes_count == 1: data['range'] = '{0}'.format(indexes[0]) else: data['range'] = '{0}-{1}'.format( indexes[0], indexes[-1] ) if '{ranges}' in pattern: separated = self.separate() if len(separated) > 1: ranges = [collection.format('{range}') for collection in separated] else: ranges = [data['range']] data['ranges'] = ', '.join(ranges) return pattern.format(**data)
def graph(self, ig): """Specify the node and edge data. :param ig: Graph with node and edge attributes. :type ig: NetworkX graph or an IGraph graph. :returns: Plotter. :rtype: Plotter. """ res = copy.copy(self) res._edges = ig res._nodes = None return res
Specify the node and edge data. :param ig: Graph with node and edge attributes. :type ig: NetworkX graph or an IGraph graph. :returns: Plotter. :rtype: Plotter.
Below is the the instruction that describes the task: ### Input: Specify the node and edge data. :param ig: Graph with node and edge attributes. :type ig: NetworkX graph or an IGraph graph. :returns: Plotter. :rtype: Plotter. ### Response: def graph(self, ig): """Specify the node and edge data. :param ig: Graph with node and edge attributes. :type ig: NetworkX graph or an IGraph graph. :returns: Plotter. :rtype: Plotter. """ res = copy.copy(self) res._edges = ig res._nodes = None return res
def pages_siblings_menu(context, page, url='/'): """Get the parent page of the given page and render a nested list of its child pages. Good for rendering a secondary menu. :param page: the page where to start the menu from. :param url: not used anymore. """ lang = context.get('lang', pages_settings.PAGE_DEFAULT_LANGUAGE) page = get_page_from_string_or_id(page, lang) if page: siblings = page.get_siblings() context.update({'children': siblings, 'page': page}) return context
Get the parent page of the given page and render a nested list of its child pages. Good for rendering a secondary menu. :param page: the page where to start the menu from. :param url: not used anymore.
Below is the the instruction that describes the task: ### Input: Get the parent page of the given page and render a nested list of its child pages. Good for rendering a secondary menu. :param page: the page where to start the menu from. :param url: not used anymore. ### Response: def pages_siblings_menu(context, page, url='/'): """Get the parent page of the given page and render a nested list of its child pages. Good for rendering a secondary menu. :param page: the page where to start the menu from. :param url: not used anymore. """ lang = context.get('lang', pages_settings.PAGE_DEFAULT_LANGUAGE) page = get_page_from_string_or_id(page, lang) if page: siblings = page.get_siblings() context.update({'children': siblings, 'page': page}) return context