code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
text
stringlengths
164
112k
def files(self) -> List[str]: """ Obtain the list of the files (excluding .git directory). :return: List[str], the list of the files """ _all = [] for path, _, files in os.walk(str(self.path)): if '.git' in path: continue for name in files: _all.append(os.path.join(path, name)) return _all
Obtain the list of the files (excluding .git directory). :return: List[str], the list of the files
Below is the the instruction that describes the task: ### Input: Obtain the list of the files (excluding .git directory). :return: List[str], the list of the files ### Response: def files(self) -> List[str]: """ Obtain the list of the files (excluding .git directory). :return: List[str], the list of the files """ _all = [] for path, _, files in os.walk(str(self.path)): if '.git' in path: continue for name in files: _all.append(os.path.join(path, name)) return _all
def run(self): """Run command.""" files = self.__zipped_files_data hashes = {} icons = {} # Read icons.json (from the webfont zip download) data = json.loads(files['icons.json']) # Group icons by style, since not all icons exist for all styles: for icon, info in data.iteritems(): for style in info['styles']: icons.setdefault(str(style), {}) icons[str(style)][icon] = str(info['unicode']) # For every FA "style": for style, details in icons.iteritems(): # Dump a .json charmap file: charmapPath = self.__get_charmap_path(style) self.__print('Dumping updated "%s" charmap: %s' % (style, charmapPath)) with open(charmapPath, 'w+') as f: json.dump(details, f, indent=4, sort_keys=True) # Dump a .ttf font file: font_path = self.__get_ttf_path(style) data = files[style] self.__print('Dumping updated "%s" font: %s' % (style, font_path)) with open(font_path, 'w+') as f: f.write(data) # Fix to prevent repeated font names: if style in ('regular', 'solid'): new_name = str("Font Awesome 5 Free %s") % style.title() self.__print('Renaming font to "%s" in: %s' % (new_name, font_path)) if ttLib is not None: rename_font(font_path, new_name) else: sys.exit( "This special command requires the module 'fonttools': " "https://github.com/fonttools/fonttools/") # Reread the data since we just edited the font file: with open(font_path, 'rb') as f: data = f.read() files[style] = data # Store hashes for later: hashes[style] = hashlib.md5(data).hexdigest() # Now it's time to patch "iconic_font.py": iconic_path = self.ICONIC_FONT_PY_PATH self.__print('Patching new MD5 hashes in: %s' % iconic_path) with open(iconic_path, 'r') as iconic_file: contents = iconic_file.read() # We read it in full, then use regex substitution: for style, md5 in hashes.iteritems(): self.__print('New "%s" hash is: %s' % (style, md5)) regex = r"('fontawesome5-%s-webfont.ttf':\s+)'(\w+)'" % style subst = r"\g<1>'" + md5 + "'" contents = re.sub(regex, subst, contents, 1) # and finally overwrite with the modified file: self.__print('Dumping updated file: %s' % iconic_path) with open(iconic_path, 'w') as iconic_file: iconic_file.write(contents) self.__print( '\nFinished!\n' 'Please check the git diff to make sure everything went okay.\n' 'You should also edit README.md and ' 'qtawesome/docs/source/usage.rst to reflect the changes.')
Run command.
Below is the the instruction that describes the task: ### Input: Run command. ### Response: def run(self): """Run command.""" files = self.__zipped_files_data hashes = {} icons = {} # Read icons.json (from the webfont zip download) data = json.loads(files['icons.json']) # Group icons by style, since not all icons exist for all styles: for icon, info in data.iteritems(): for style in info['styles']: icons.setdefault(str(style), {}) icons[str(style)][icon] = str(info['unicode']) # For every FA "style": for style, details in icons.iteritems(): # Dump a .json charmap file: charmapPath = self.__get_charmap_path(style) self.__print('Dumping updated "%s" charmap: %s' % (style, charmapPath)) with open(charmapPath, 'w+') as f: json.dump(details, f, indent=4, sort_keys=True) # Dump a .ttf font file: font_path = self.__get_ttf_path(style) data = files[style] self.__print('Dumping updated "%s" font: %s' % (style, font_path)) with open(font_path, 'w+') as f: f.write(data) # Fix to prevent repeated font names: if style in ('regular', 'solid'): new_name = str("Font Awesome 5 Free %s") % style.title() self.__print('Renaming font to "%s" in: %s' % (new_name, font_path)) if ttLib is not None: rename_font(font_path, new_name) else: sys.exit( "This special command requires the module 'fonttools': " "https://github.com/fonttools/fonttools/") # Reread the data since we just edited the font file: with open(font_path, 'rb') as f: data = f.read() files[style] = data # Store hashes for later: hashes[style] = hashlib.md5(data).hexdigest() # Now it's time to patch "iconic_font.py": iconic_path = self.ICONIC_FONT_PY_PATH self.__print('Patching new MD5 hashes in: %s' % iconic_path) with open(iconic_path, 'r') as iconic_file: contents = iconic_file.read() # We read it in full, then use regex substitution: for style, md5 in hashes.iteritems(): self.__print('New "%s" hash is: %s' % (style, md5)) regex = r"('fontawesome5-%s-webfont.ttf':\s+)'(\w+)'" % style subst = r"\g<1>'" + md5 + "'" contents = re.sub(regex, subst, contents, 1) # and finally overwrite with the modified file: self.__print('Dumping updated file: %s' % iconic_path) with open(iconic_path, 'w') as iconic_file: iconic_file.write(contents) self.__print( '\nFinished!\n' 'Please check the git diff to make sure everything went okay.\n' 'You should also edit README.md and ' 'qtawesome/docs/source/usage.rst to reflect the changes.')
def _sortValue_isItalic(font): """ Returns 0 if the font is italic. Returns 1 if the font is not italic. """ info = font.info styleMapStyleName = info.styleMapStyleName if styleMapStyleName is not None and "italic" in styleMapStyleName: return 0 if info.italicAngle not in (None, 0): return 0 return 1
Returns 0 if the font is italic. Returns 1 if the font is not italic.
Below is the the instruction that describes the task: ### Input: Returns 0 if the font is italic. Returns 1 if the font is not italic. ### Response: def _sortValue_isItalic(font): """ Returns 0 if the font is italic. Returns 1 if the font is not italic. """ info = font.info styleMapStyleName = info.styleMapStyleName if styleMapStyleName is not None and "italic" in styleMapStyleName: return 0 if info.italicAngle not in (None, 0): return 0 return 1
def load(self): """ Loads the records from the query set linked with this item. """ if self._loaded: return rset = self.recordSet() QApplication.setOverrideCursor(Qt.WaitCursor) self.loadRecords(rset) QApplication.restoreOverrideCursor()
Loads the records from the query set linked with this item.
Below is the the instruction that describes the task: ### Input: Loads the records from the query set linked with this item. ### Response: def load(self): """ Loads the records from the query set linked with this item. """ if self._loaded: return rset = self.recordSet() QApplication.setOverrideCursor(Qt.WaitCursor) self.loadRecords(rset) QApplication.restoreOverrideCursor()
def dump_passes(self): """ Fetches the passes added to this flow controller. Returns (dict): {'options': self.options, 'passes': [passes], 'type': type(self)} """ ret = {'options': self.options, 'passes': [], 'type': type(self)} for pass_ in self._passes: if isinstance(pass_, FlowController): ret['passes'].append(pass_.dump_passes()) else: ret['passes'].append(pass_) return ret
Fetches the passes added to this flow controller. Returns (dict): {'options': self.options, 'passes': [passes], 'type': type(self)}
Below is the the instruction that describes the task: ### Input: Fetches the passes added to this flow controller. Returns (dict): {'options': self.options, 'passes': [passes], 'type': type(self)} ### Response: def dump_passes(self): """ Fetches the passes added to this flow controller. Returns (dict): {'options': self.options, 'passes': [passes], 'type': type(self)} """ ret = {'options': self.options, 'passes': [], 'type': type(self)} for pass_ in self._passes: if isinstance(pass_, FlowController): ret['passes'].append(pass_.dump_passes()) else: ret['passes'].append(pass_) return ret
def collides(self,position,size): '''Returns True if the word collides with another plotted word.''' word_rect = pygame.Rect(position,self.word_size) if word_rect.collidelistall(self.used_pos) == []: return False else: return True
Returns True if the word collides with another plotted word.
Below is the the instruction that describes the task: ### Input: Returns True if the word collides with another plotted word. ### Response: def collides(self,position,size): '''Returns True if the word collides with another plotted word.''' word_rect = pygame.Rect(position,self.word_size) if word_rect.collidelistall(self.used_pos) == []: return False else: return True
def highlightByAlternate(self): """ Sets the palette highlighting for this tree widget to use a darker version of the alternate color vs. the standard highlighting. """ palette = QtGui.QApplication.palette() palette.setColor(palette.HighlightedText, palette.color(palette.Text)) clr = palette.color(palette.AlternateBase) palette.setColor(palette.Highlight, clr.darker(110)) self.setPalette(palette)
Sets the palette highlighting for this tree widget to use a darker version of the alternate color vs. the standard highlighting.
Below is the the instruction that describes the task: ### Input: Sets the palette highlighting for this tree widget to use a darker version of the alternate color vs. the standard highlighting. ### Response: def highlightByAlternate(self): """ Sets the palette highlighting for this tree widget to use a darker version of the alternate color vs. the standard highlighting. """ palette = QtGui.QApplication.palette() palette.setColor(palette.HighlightedText, palette.color(palette.Text)) clr = palette.color(palette.AlternateBase) palette.setColor(palette.Highlight, clr.darker(110)) self.setPalette(palette)
def bm3_big_F(p, v, v0): """ calculate big F for linearlized form not fully tested :param p: :param f: :return: """ f = bm3_small_f(v, v0) return cal_big_F(p, f)
calculate big F for linearlized form not fully tested :param p: :param f: :return:
Below is the the instruction that describes the task: ### Input: calculate big F for linearlized form not fully tested :param p: :param f: :return: ### Response: def bm3_big_F(p, v, v0): """ calculate big F for linearlized form not fully tested :param p: :param f: :return: """ f = bm3_small_f(v, v0) return cal_big_F(p, f)
def compute_index_key(self, to_instance): ''' Compute the index key that can be used to identify an instance on the link. ''' kwargs = dict() for attr in self.key_map.values(): if _is_null(to_instance, attr): return None if attr in to_instance.__dict__: kwargs[attr] = to_instance.__dict__[attr] else: kwargs[attr] = getattr(to_instance, attr) return frozenset(tuple(kwargs.items()))
Compute the index key that can be used to identify an instance on the link.
Below is the the instruction that describes the task: ### Input: Compute the index key that can be used to identify an instance on the link. ### Response: def compute_index_key(self, to_instance): ''' Compute the index key that can be used to identify an instance on the link. ''' kwargs = dict() for attr in self.key_map.values(): if _is_null(to_instance, attr): return None if attr in to_instance.__dict__: kwargs[attr] = to_instance.__dict__[attr] else: kwargs[attr] = getattr(to_instance, attr) return frozenset(tuple(kwargs.items()))
def get_aids_by_tag(self): """ :returns: dict tag -> asset ordinals """ aids_by_tag = general.AccumDict(accum=set()) for aid, ass in enumerate(self): for tagname in self.tagnames: tag = self.tagcol.get_tag(tagname, ass[tagname]) aids_by_tag[tag].add(aid) return aids_by_tag
:returns: dict tag -> asset ordinals
Below is the the instruction that describes the task: ### Input: :returns: dict tag -> asset ordinals ### Response: def get_aids_by_tag(self): """ :returns: dict tag -> asset ordinals """ aids_by_tag = general.AccumDict(accum=set()) for aid, ass in enumerate(self): for tagname in self.tagnames: tag = self.tagcol.get_tag(tagname, ass[tagname]) aids_by_tag[tag].add(aid) return aids_by_tag
def _associate_short_long(notices): """ If a notice is type ${1}Short, associate with its Long notice in an attribute called long_notice. """ for notice in notices: if notice.notice_type is not None and\ notice.notice_category == "StudentFinAid" and\ notice.notice_type.endswith("Short"): notice.long_notice = _find_notice_by_type(notices, notice.notice_type[:-5]) return notices
If a notice is type ${1}Short, associate with its Long notice in an attribute called long_notice.
Below is the the instruction that describes the task: ### Input: If a notice is type ${1}Short, associate with its Long notice in an attribute called long_notice. ### Response: def _associate_short_long(notices): """ If a notice is type ${1}Short, associate with its Long notice in an attribute called long_notice. """ for notice in notices: if notice.notice_type is not None and\ notice.notice_category == "StudentFinAid" and\ notice.notice_type.endswith("Short"): notice.long_notice = _find_notice_by_type(notices, notice.notice_type[:-5]) return notices
def get_transform_vector(self, resx, resy): """ Given resolution it returns a transformation vector :param resx: Resolution in x direction :type resx: float or int :param resy: Resolution in y direction :type resy: float or int :return: A tuple with 6 numbers representing transformation vector :rtype: tuple(float) """ return self.x_min, self._parse_resolution(resx), 0, self.y_max, 0, -self._parse_resolution(resy)
Given resolution it returns a transformation vector :param resx: Resolution in x direction :type resx: float or int :param resy: Resolution in y direction :type resy: float or int :return: A tuple with 6 numbers representing transformation vector :rtype: tuple(float)
Below is the the instruction that describes the task: ### Input: Given resolution it returns a transformation vector :param resx: Resolution in x direction :type resx: float or int :param resy: Resolution in y direction :type resy: float or int :return: A tuple with 6 numbers representing transformation vector :rtype: tuple(float) ### Response: def get_transform_vector(self, resx, resy): """ Given resolution it returns a transformation vector :param resx: Resolution in x direction :type resx: float or int :param resy: Resolution in y direction :type resy: float or int :return: A tuple with 6 numbers representing transformation vector :rtype: tuple(float) """ return self.x_min, self._parse_resolution(resx), 0, self.y_max, 0, -self._parse_resolution(resy)
def _automatic_dims(cls, dims, size): """Check if input dimension corresponds to qubit subsystems.""" if dims is None: dims = size elif np.product(dims) != size: raise QiskitError("dimensions do not match size.") if isinstance(dims, (int, np.integer)): num_qubits = int(np.log2(dims)) if 2 ** num_qubits == size: return num_qubits * (2,) return (dims,) return tuple(dims)
Check if input dimension corresponds to qubit subsystems.
Below is the the instruction that describes the task: ### Input: Check if input dimension corresponds to qubit subsystems. ### Response: def _automatic_dims(cls, dims, size): """Check if input dimension corresponds to qubit subsystems.""" if dims is None: dims = size elif np.product(dims) != size: raise QiskitError("dimensions do not match size.") if isinstance(dims, (int, np.integer)): num_qubits = int(np.log2(dims)) if 2 ** num_qubits == size: return num_qubits * (2,) return (dims,) return tuple(dims)
def DbGetDeviceAttributePropertyHist(self, argin): """ Retrieve device attribute property history :param argin: Str[0] = Device name Str[1] = Attribute name Str[2] = Property name :type: tango.DevVarStringArray :return: Str[0] = Attribute name Str[1] = Property name Str[2] = date Str[3] = Property value number (array case) Str[4] = Property value 1 Str[n] = Property value n :rtype: tango.DevVarStringArray """ self._log.debug("In DbGetDeviceAttributePropertyHist()") dev_name = argin[0] attribute = replace_wildcard(argin[1]) prop_name = replace_wildcard(argin[2]) return self.db.get_device_attribute_property_hist(dev_name, attribute, prop_name)
Retrieve device attribute property history :param argin: Str[0] = Device name Str[1] = Attribute name Str[2] = Property name :type: tango.DevVarStringArray :return: Str[0] = Attribute name Str[1] = Property name Str[2] = date Str[3] = Property value number (array case) Str[4] = Property value 1 Str[n] = Property value n :rtype: tango.DevVarStringArray
Below is the the instruction that describes the task: ### Input: Retrieve device attribute property history :param argin: Str[0] = Device name Str[1] = Attribute name Str[2] = Property name :type: tango.DevVarStringArray :return: Str[0] = Attribute name Str[1] = Property name Str[2] = date Str[3] = Property value number (array case) Str[4] = Property value 1 Str[n] = Property value n :rtype: tango.DevVarStringArray ### Response: def DbGetDeviceAttributePropertyHist(self, argin): """ Retrieve device attribute property history :param argin: Str[0] = Device name Str[1] = Attribute name Str[2] = Property name :type: tango.DevVarStringArray :return: Str[0] = Attribute name Str[1] = Property name Str[2] = date Str[3] = Property value number (array case) Str[4] = Property value 1 Str[n] = Property value n :rtype: tango.DevVarStringArray """ self._log.debug("In DbGetDeviceAttributePropertyHist()") dev_name = argin[0] attribute = replace_wildcard(argin[1]) prop_name = replace_wildcard(argin[2]) return self.db.get_device_attribute_property_hist(dev_name, attribute, prop_name)
def sendUssd(self, ussdString, responseTimeout=15): """ Starts a USSD session by dialing the the specified USSD string, or \ sends the specified string in the existing USSD session (if any) :param ussdString: The USSD access number to dial :param responseTimeout: Maximum time to wait a response, in seconds :raise TimeoutException: if no response is received in time :return: The USSD response message/session (as a Ussd object) :rtype: gsmmodem.modem.Ussd """ self._ussdSessionEvent = threading.Event() try: cusdResponse = self.write('AT+CUSD=1,"{0}",15'.format(ussdString), timeout=responseTimeout) # Should respond with "OK" except Exception: self._ussdSessionEvent = None # Cancel the thread sync lock raise # Some modems issue the +CUSD response before the acknowledgment "OK" - check for that if len(cusdResponse) > 1: cusdResponseFound = lineStartingWith('+CUSD', cusdResponse) != None if cusdResponseFound: self._ussdSessionEvent = None # Cancel thread sync lock return self._parseCusdResponse(cusdResponse) # Wait for the +CUSD notification message if self._ussdSessionEvent.wait(responseTimeout): self._ussdSessionEvent = None return self._ussdResponse else: # Response timed out self._ussdSessionEvent = None raise TimeoutException()
Starts a USSD session by dialing the the specified USSD string, or \ sends the specified string in the existing USSD session (if any) :param ussdString: The USSD access number to dial :param responseTimeout: Maximum time to wait a response, in seconds :raise TimeoutException: if no response is received in time :return: The USSD response message/session (as a Ussd object) :rtype: gsmmodem.modem.Ussd
Below is the the instruction that describes the task: ### Input: Starts a USSD session by dialing the the specified USSD string, or \ sends the specified string in the existing USSD session (if any) :param ussdString: The USSD access number to dial :param responseTimeout: Maximum time to wait a response, in seconds :raise TimeoutException: if no response is received in time :return: The USSD response message/session (as a Ussd object) :rtype: gsmmodem.modem.Ussd ### Response: def sendUssd(self, ussdString, responseTimeout=15): """ Starts a USSD session by dialing the the specified USSD string, or \ sends the specified string in the existing USSD session (if any) :param ussdString: The USSD access number to dial :param responseTimeout: Maximum time to wait a response, in seconds :raise TimeoutException: if no response is received in time :return: The USSD response message/session (as a Ussd object) :rtype: gsmmodem.modem.Ussd """ self._ussdSessionEvent = threading.Event() try: cusdResponse = self.write('AT+CUSD=1,"{0}",15'.format(ussdString), timeout=responseTimeout) # Should respond with "OK" except Exception: self._ussdSessionEvent = None # Cancel the thread sync lock raise # Some modems issue the +CUSD response before the acknowledgment "OK" - check for that if len(cusdResponse) > 1: cusdResponseFound = lineStartingWith('+CUSD', cusdResponse) != None if cusdResponseFound: self._ussdSessionEvent = None # Cancel thread sync lock return self._parseCusdResponse(cusdResponse) # Wait for the +CUSD notification message if self._ussdSessionEvent.wait(responseTimeout): self._ussdSessionEvent = None return self._ussdResponse else: # Response timed out self._ussdSessionEvent = None raise TimeoutException()
def get_host_node_state(self, state, problem_has_been_acknowledged, in_scheduled_downtime): """Get host node state, simplest case :: * Handle not value (revert) for host and consider 1 as 2 :return: 0, 1 or 2 :rtype: int """ # Make DOWN look as CRITICAL (2 instead of 1) if state == 1: state = 2 # If our node is acknowledged or in downtime, state is ok/up if problem_has_been_acknowledged or in_scheduled_downtime: state = 0 # Maybe we are a NOT node, so manage this if self.not_value: return 0 if state else 2 # Keep the logic of return Down on NOT rules return state
Get host node state, simplest case :: * Handle not value (revert) for host and consider 1 as 2 :return: 0, 1 or 2 :rtype: int
Below is the the instruction that describes the task: ### Input: Get host node state, simplest case :: * Handle not value (revert) for host and consider 1 as 2 :return: 0, 1 or 2 :rtype: int ### Response: def get_host_node_state(self, state, problem_has_been_acknowledged, in_scheduled_downtime): """Get host node state, simplest case :: * Handle not value (revert) for host and consider 1 as 2 :return: 0, 1 or 2 :rtype: int """ # Make DOWN look as CRITICAL (2 instead of 1) if state == 1: state = 2 # If our node is acknowledged or in downtime, state is ok/up if problem_has_been_acknowledged or in_scheduled_downtime: state = 0 # Maybe we are a NOT node, so manage this if self.not_value: return 0 if state else 2 # Keep the logic of return Down on NOT rules return state
def change_vlan_id(self, original, new): """ Change VLAN ID for a single VLAN, cluster VLAN or inline interface. When changing a single or cluster FW vlan, you can specify the original VLAN and new VLAN as either single int or str value. If modifying an inline interface VLAN when the interface pair has two different VLAN identifiers per interface, use a str value in form: '10-11' (original), and '20-21' (new). Single VLAN id:: >>> engine = Engine('singlefw') >>> itf = engine.interface.get(1) >>> itf.vlan_interfaces() [PhysicalVlanInterface(vlan_id=11), PhysicalVlanInterface(vlan_id=10)] >>> itf.change_vlan_id(11, 100) >>> itf.vlan_interfaces() [PhysicalVlanInterface(vlan_id=100), PhysicalVlanInterface(vlan_id=10)] Inline interface with unique VLAN on each interface pair:: >>> itf = engine.interface.get(2) >>> itf.vlan_interfaces() [PhysicalVlanInterface(vlan_id=2-3)] >>> itf.change_vlan_id('2-3', '20-30') >>> itf.vlan_interfaces() [PhysicalVlanInterface(vlan_id=20-30)] :param str,int original: original VLAN to change. :param str,int new: new VLAN identifier/s. :raises InterfaceNotFound: VLAN not found :raises UpdateElementFailed: failed updating the VLAN id :return: None """ vlan = self.vlan_interface.get_vlan(original) newvlan = str(new).split('-') splitted = vlan.interface_id.split('.') vlan.interface_id = '{}.{}'.format(splitted[0], newvlan[0]) for interface in vlan.interfaces: if isinstance(interface, InlineInterface): interface.change_vlan_id(new) else: interface.change_vlan_id(newvlan[0]) self.update()
Change VLAN ID for a single VLAN, cluster VLAN or inline interface. When changing a single or cluster FW vlan, you can specify the original VLAN and new VLAN as either single int or str value. If modifying an inline interface VLAN when the interface pair has two different VLAN identifiers per interface, use a str value in form: '10-11' (original), and '20-21' (new). Single VLAN id:: >>> engine = Engine('singlefw') >>> itf = engine.interface.get(1) >>> itf.vlan_interfaces() [PhysicalVlanInterface(vlan_id=11), PhysicalVlanInterface(vlan_id=10)] >>> itf.change_vlan_id(11, 100) >>> itf.vlan_interfaces() [PhysicalVlanInterface(vlan_id=100), PhysicalVlanInterface(vlan_id=10)] Inline interface with unique VLAN on each interface pair:: >>> itf = engine.interface.get(2) >>> itf.vlan_interfaces() [PhysicalVlanInterface(vlan_id=2-3)] >>> itf.change_vlan_id('2-3', '20-30') >>> itf.vlan_interfaces() [PhysicalVlanInterface(vlan_id=20-30)] :param str,int original: original VLAN to change. :param str,int new: new VLAN identifier/s. :raises InterfaceNotFound: VLAN not found :raises UpdateElementFailed: failed updating the VLAN id :return: None
Below is the the instruction that describes the task: ### Input: Change VLAN ID for a single VLAN, cluster VLAN or inline interface. When changing a single or cluster FW vlan, you can specify the original VLAN and new VLAN as either single int or str value. If modifying an inline interface VLAN when the interface pair has two different VLAN identifiers per interface, use a str value in form: '10-11' (original), and '20-21' (new). Single VLAN id:: >>> engine = Engine('singlefw') >>> itf = engine.interface.get(1) >>> itf.vlan_interfaces() [PhysicalVlanInterface(vlan_id=11), PhysicalVlanInterface(vlan_id=10)] >>> itf.change_vlan_id(11, 100) >>> itf.vlan_interfaces() [PhysicalVlanInterface(vlan_id=100), PhysicalVlanInterface(vlan_id=10)] Inline interface with unique VLAN on each interface pair:: >>> itf = engine.interface.get(2) >>> itf.vlan_interfaces() [PhysicalVlanInterface(vlan_id=2-3)] >>> itf.change_vlan_id('2-3', '20-30') >>> itf.vlan_interfaces() [PhysicalVlanInterface(vlan_id=20-30)] :param str,int original: original VLAN to change. :param str,int new: new VLAN identifier/s. :raises InterfaceNotFound: VLAN not found :raises UpdateElementFailed: failed updating the VLAN id :return: None ### Response: def change_vlan_id(self, original, new): """ Change VLAN ID for a single VLAN, cluster VLAN or inline interface. When changing a single or cluster FW vlan, you can specify the original VLAN and new VLAN as either single int or str value. If modifying an inline interface VLAN when the interface pair has two different VLAN identifiers per interface, use a str value in form: '10-11' (original), and '20-21' (new). Single VLAN id:: >>> engine = Engine('singlefw') >>> itf = engine.interface.get(1) >>> itf.vlan_interfaces() [PhysicalVlanInterface(vlan_id=11), PhysicalVlanInterface(vlan_id=10)] >>> itf.change_vlan_id(11, 100) >>> itf.vlan_interfaces() [PhysicalVlanInterface(vlan_id=100), PhysicalVlanInterface(vlan_id=10)] Inline interface with unique VLAN on each interface pair:: >>> itf = engine.interface.get(2) >>> itf.vlan_interfaces() [PhysicalVlanInterface(vlan_id=2-3)] >>> itf.change_vlan_id('2-3', '20-30') >>> itf.vlan_interfaces() [PhysicalVlanInterface(vlan_id=20-30)] :param str,int original: original VLAN to change. :param str,int new: new VLAN identifier/s. :raises InterfaceNotFound: VLAN not found :raises UpdateElementFailed: failed updating the VLAN id :return: None """ vlan = self.vlan_interface.get_vlan(original) newvlan = str(new).split('-') splitted = vlan.interface_id.split('.') vlan.interface_id = '{}.{}'.format(splitted[0], newvlan[0]) for interface in vlan.interfaces: if isinstance(interface, InlineInterface): interface.change_vlan_id(new) else: interface.change_vlan_id(newvlan[0]) self.update()
def get(self, **kwargs): """ :param texteRecherche: :param numAmend: :param idArticle: :param idAuteur: :param idDossierLegislatif: :param idExamen: :param idExamens: :param periodeParlementaire: :param dateDebut: :param dateFin: :param rows: :param start: :param sort: """ params = self.default_params.copy() params.update(kwargs) start = time.time() response = requests.get(self.base_url, params=params) end = time.time() LOGGER.debug( 'fetched amendements with search params: %s in %0.2f s', params, end - start ) return parse_amendements_summary(response.url, response.json())
:param texteRecherche: :param numAmend: :param idArticle: :param idAuteur: :param idDossierLegislatif: :param idExamen: :param idExamens: :param periodeParlementaire: :param dateDebut: :param dateFin: :param rows: :param start: :param sort:
Below is the the instruction that describes the task: ### Input: :param texteRecherche: :param numAmend: :param idArticle: :param idAuteur: :param idDossierLegislatif: :param idExamen: :param idExamens: :param periodeParlementaire: :param dateDebut: :param dateFin: :param rows: :param start: :param sort: ### Response: def get(self, **kwargs): """ :param texteRecherche: :param numAmend: :param idArticle: :param idAuteur: :param idDossierLegislatif: :param idExamen: :param idExamens: :param periodeParlementaire: :param dateDebut: :param dateFin: :param rows: :param start: :param sort: """ params = self.default_params.copy() params.update(kwargs) start = time.time() response = requests.get(self.base_url, params=params) end = time.time() LOGGER.debug( 'fetched amendements with search params: %s in %0.2f s', params, end - start ) return parse_amendements_summary(response.url, response.json())
def get_id_for_extra_dim_type(type_str): """ Returns the index of the type as defined in the LAS Specification Parameters ---------- type_str: str Returns ------- int index of the type """ try: return _type_to_extra_dim_id_style_1[type_str] except KeyError: try: return _type_to_extra_dim_id_style_2[type_str] except KeyError: raise errors.UnknownExtraType(type_str)
Returns the index of the type as defined in the LAS Specification Parameters ---------- type_str: str Returns ------- int index of the type
Below is the the instruction that describes the task: ### Input: Returns the index of the type as defined in the LAS Specification Parameters ---------- type_str: str Returns ------- int index of the type ### Response: def get_id_for_extra_dim_type(type_str): """ Returns the index of the type as defined in the LAS Specification Parameters ---------- type_str: str Returns ------- int index of the type """ try: return _type_to_extra_dim_id_style_1[type_str] except KeyError: try: return _type_to_extra_dim_id_style_2[type_str] except KeyError: raise errors.UnknownExtraType(type_str)
def value(self, obj): ''' Computes the value of this field to update the index. :param obj: object instance, as a dictionary or as a model instance. ''' if self.template_name: t = loader.select_template([self.template_name]) return t.render(Context({'object': obj})) if self.eval_func: try: return eval(self.eval_func) except Exception as e: raise type(e)('Could not compute value of {} field (eval_as=`{}`): {}.'.format(unicode(self), self.eval_func, unicode(e))) elif self.model_attr: if isinstance(obj, dict): return obj[self.model_attr] current_obj = getattr(obj, self.model_attr) if callable(current_obj): return current_obj() else: return current_obj else: raise KeyError('{0} gets its value via a model attribute, an eval function, a template, or is prepared in a method ' 'call but none of `model_attr`, `eval_as,` `template,` `prepare_{0}` is provided.'.format(unicode(self)))
Computes the value of this field to update the index. :param obj: object instance, as a dictionary or as a model instance.
Below is the the instruction that describes the task: ### Input: Computes the value of this field to update the index. :param obj: object instance, as a dictionary or as a model instance. ### Response: def value(self, obj): ''' Computes the value of this field to update the index. :param obj: object instance, as a dictionary or as a model instance. ''' if self.template_name: t = loader.select_template([self.template_name]) return t.render(Context({'object': obj})) if self.eval_func: try: return eval(self.eval_func) except Exception as e: raise type(e)('Could not compute value of {} field (eval_as=`{}`): {}.'.format(unicode(self), self.eval_func, unicode(e))) elif self.model_attr: if isinstance(obj, dict): return obj[self.model_attr] current_obj = getattr(obj, self.model_attr) if callable(current_obj): return current_obj() else: return current_obj else: raise KeyError('{0} gets its value via a model attribute, an eval function, a template, or is prepared in a method ' 'call but none of `model_attr`, `eval_as,` `template,` `prepare_{0}` is provided.'.format(unicode(self)))
def query_image_content(self, image, content_type=""): '''**Description** Find the image with the tag <image> and return its content. **Arguments** - image: Input image can be in the following formats: registry/repo:tag - content_type: The content type can be one of the following types: - os: Operating System Packages - npm: Node.JS NPM Module - gem: Ruby GEM - files: Files **Success Return Value** A JSON object representing the image content. ''' return self._query_image(image, query_group='content', query_type=content_type)
**Description** Find the image with the tag <image> and return its content. **Arguments** - image: Input image can be in the following formats: registry/repo:tag - content_type: The content type can be one of the following types: - os: Operating System Packages - npm: Node.JS NPM Module - gem: Ruby GEM - files: Files **Success Return Value** A JSON object representing the image content.
Below is the the instruction that describes the task: ### Input: **Description** Find the image with the tag <image> and return its content. **Arguments** - image: Input image can be in the following formats: registry/repo:tag - content_type: The content type can be one of the following types: - os: Operating System Packages - npm: Node.JS NPM Module - gem: Ruby GEM - files: Files **Success Return Value** A JSON object representing the image content. ### Response: def query_image_content(self, image, content_type=""): '''**Description** Find the image with the tag <image> and return its content. **Arguments** - image: Input image can be in the following formats: registry/repo:tag - content_type: The content type can be one of the following types: - os: Operating System Packages - npm: Node.JS NPM Module - gem: Ruby GEM - files: Files **Success Return Value** A JSON object representing the image content. ''' return self._query_image(image, query_group='content', query_type=content_type)
def stream(self, id, offset, origin, path="/"): """ This endpoint streams the contents of a file in an allocation directory. https://www.nomadproject.io/api/client.html#stream-file arguments: - id: (str) allocation_id required - offset: (int) required - origin: (str) either start|end - path: (str) optional returns: (str) text raises: - nomad.api.exceptions.BaseNomadException - nomad.api.exceptions.BadRequestNomadException """ params = { "path": path, "offset": offset, "origin": origin } return self.request(id, params=params, method="get").text
This endpoint streams the contents of a file in an allocation directory. https://www.nomadproject.io/api/client.html#stream-file arguments: - id: (str) allocation_id required - offset: (int) required - origin: (str) either start|end - path: (str) optional returns: (str) text raises: - nomad.api.exceptions.BaseNomadException - nomad.api.exceptions.BadRequestNomadException
Below is the the instruction that describes the task: ### Input: This endpoint streams the contents of a file in an allocation directory. https://www.nomadproject.io/api/client.html#stream-file arguments: - id: (str) allocation_id required - offset: (int) required - origin: (str) either start|end - path: (str) optional returns: (str) text raises: - nomad.api.exceptions.BaseNomadException - nomad.api.exceptions.BadRequestNomadException ### Response: def stream(self, id, offset, origin, path="/"): """ This endpoint streams the contents of a file in an allocation directory. https://www.nomadproject.io/api/client.html#stream-file arguments: - id: (str) allocation_id required - offset: (int) required - origin: (str) either start|end - path: (str) optional returns: (str) text raises: - nomad.api.exceptions.BaseNomadException - nomad.api.exceptions.BadRequestNomadException """ params = { "path": path, "offset": offset, "origin": origin } return self.request(id, params=params, method="get").text
def generate_aead_simple(self, nonce, key_handle, data): """ Generate AEAD block from data for a specific key in a single step (without using the YubiHSM internal buffer). @param nonce: The nonce to use when creating the AEAD @param key_handle: The key handle that can encrypt data into an AEAD @param data: Data to put inside the AEAD @type nonce: string @type key_handle: integer or string @type data: string @returns: The generated AEAD on success. @rtype: L{YHSM_GeneratedAEAD} @see: L{pyhsm.aead_cmd.YHSM_Cmd_AEAD_Generate} """ return pyhsm.aead_cmd.YHSM_Cmd_AEAD_Generate(self.stick, nonce, key_handle, data).execute()
Generate AEAD block from data for a specific key in a single step (without using the YubiHSM internal buffer). @param nonce: The nonce to use when creating the AEAD @param key_handle: The key handle that can encrypt data into an AEAD @param data: Data to put inside the AEAD @type nonce: string @type key_handle: integer or string @type data: string @returns: The generated AEAD on success. @rtype: L{YHSM_GeneratedAEAD} @see: L{pyhsm.aead_cmd.YHSM_Cmd_AEAD_Generate}
Below is the the instruction that describes the task: ### Input: Generate AEAD block from data for a specific key in a single step (without using the YubiHSM internal buffer). @param nonce: The nonce to use when creating the AEAD @param key_handle: The key handle that can encrypt data into an AEAD @param data: Data to put inside the AEAD @type nonce: string @type key_handle: integer or string @type data: string @returns: The generated AEAD on success. @rtype: L{YHSM_GeneratedAEAD} @see: L{pyhsm.aead_cmd.YHSM_Cmd_AEAD_Generate} ### Response: def generate_aead_simple(self, nonce, key_handle, data): """ Generate AEAD block from data for a specific key in a single step (without using the YubiHSM internal buffer). @param nonce: The nonce to use when creating the AEAD @param key_handle: The key handle that can encrypt data into an AEAD @param data: Data to put inside the AEAD @type nonce: string @type key_handle: integer or string @type data: string @returns: The generated AEAD on success. @rtype: L{YHSM_GeneratedAEAD} @see: L{pyhsm.aead_cmd.YHSM_Cmd_AEAD_Generate} """ return pyhsm.aead_cmd.YHSM_Cmd_AEAD_Generate(self.stick, nonce, key_handle, data).execute()
def generate_trajectory(group_membership, num_levels=4): """Return a single trajectory Return a single trajectory of size :math:`(g+1)`-by-:math:`k` where :math:`g` is the number of groups, and :math:`k` is the number of factors, both implied by the dimensions of `group_membership` Arguments --------- group_membership : np.ndarray a k-by-g matrix which notes factor membership of groups num_levels : int, default=4 The number of levels in the grid Returns ------- np.ndarray """ delta = compute_delta(num_levels) # Infer number of groups `g` and number of params `k` from # `group_membership` matrix num_params = group_membership.shape[0] num_groups = group_membership.shape[1] # Matrix B - size (g + 1) * g - lower triangular matrix B = np.tril(np.ones([num_groups + 1, num_groups], dtype=int), -1) P_star = generate_p_star(num_groups) # Matrix J - a (g+1)-by-num_params matrix of ones J = np.ones((num_groups + 1, num_params)) # Matrix D* - num_params-by-num_params matrix which decribes whether # factors move up or down D_star = np.diag([rd.choice([-1, 1]) for _ in range(num_params)]) x_star = generate_x_star(num_params, num_levels) # Matrix B* - size (num_groups + 1) * num_params B_star = compute_b_star(J, x_star, delta, B, group_membership, P_star, D_star) return B_star
Return a single trajectory Return a single trajectory of size :math:`(g+1)`-by-:math:`k` where :math:`g` is the number of groups, and :math:`k` is the number of factors, both implied by the dimensions of `group_membership` Arguments --------- group_membership : np.ndarray a k-by-g matrix which notes factor membership of groups num_levels : int, default=4 The number of levels in the grid Returns ------- np.ndarray
Below is the the instruction that describes the task: ### Input: Return a single trajectory Return a single trajectory of size :math:`(g+1)`-by-:math:`k` where :math:`g` is the number of groups, and :math:`k` is the number of factors, both implied by the dimensions of `group_membership` Arguments --------- group_membership : np.ndarray a k-by-g matrix which notes factor membership of groups num_levels : int, default=4 The number of levels in the grid Returns ------- np.ndarray ### Response: def generate_trajectory(group_membership, num_levels=4): """Return a single trajectory Return a single trajectory of size :math:`(g+1)`-by-:math:`k` where :math:`g` is the number of groups, and :math:`k` is the number of factors, both implied by the dimensions of `group_membership` Arguments --------- group_membership : np.ndarray a k-by-g matrix which notes factor membership of groups num_levels : int, default=4 The number of levels in the grid Returns ------- np.ndarray """ delta = compute_delta(num_levels) # Infer number of groups `g` and number of params `k` from # `group_membership` matrix num_params = group_membership.shape[0] num_groups = group_membership.shape[1] # Matrix B - size (g + 1) * g - lower triangular matrix B = np.tril(np.ones([num_groups + 1, num_groups], dtype=int), -1) P_star = generate_p_star(num_groups) # Matrix J - a (g+1)-by-num_params matrix of ones J = np.ones((num_groups + 1, num_params)) # Matrix D* - num_params-by-num_params matrix which decribes whether # factors move up or down D_star = np.diag([rd.choice([-1, 1]) for _ in range(num_params)]) x_star = generate_x_star(num_params, num_levels) # Matrix B* - size (num_groups + 1) * num_params B_star = compute_b_star(J, x_star, delta, B, group_membership, P_star, D_star) return B_star
def sources_remove(name, ruby=None, user=None): ''' Make sure that a gem source is removed. name The URL of the gem source to be removed ruby: None For RVM or rbenv installations: the ruby version and gemset to target. user: None The user under which to run the ``gem`` command .. versionadded:: 0.17.0 ''' ret = {'name': name, 'result': None, 'comment': '', 'changes': {}} if name not in __salt__['gem.sources_list'](ruby, runas=user): ret['result'] = True ret['comment'] = 'Gem source is already removed.' return ret if __opts__['test']: ret['comment'] = 'The gem source would have been removed.' return ret if __salt__['gem.sources_remove'](source_uri=name, ruby=ruby, runas=user): ret['result'] = True ret['changes'][name] = 'Removed' ret['comment'] = 'Gem source was successfully removed.' else: ret['result'] = False ret['comment'] = 'Could not remove gem source.' return ret
Make sure that a gem source is removed. name The URL of the gem source to be removed ruby: None For RVM or rbenv installations: the ruby version and gemset to target. user: None The user under which to run the ``gem`` command .. versionadded:: 0.17.0
Below is the the instruction that describes the task: ### Input: Make sure that a gem source is removed. name The URL of the gem source to be removed ruby: None For RVM or rbenv installations: the ruby version and gemset to target. user: None The user under which to run the ``gem`` command .. versionadded:: 0.17.0 ### Response: def sources_remove(name, ruby=None, user=None): ''' Make sure that a gem source is removed. name The URL of the gem source to be removed ruby: None For RVM or rbenv installations: the ruby version and gemset to target. user: None The user under which to run the ``gem`` command .. versionadded:: 0.17.0 ''' ret = {'name': name, 'result': None, 'comment': '', 'changes': {}} if name not in __salt__['gem.sources_list'](ruby, runas=user): ret['result'] = True ret['comment'] = 'Gem source is already removed.' return ret if __opts__['test']: ret['comment'] = 'The gem source would have been removed.' return ret if __salt__['gem.sources_remove'](source_uri=name, ruby=ruby, runas=user): ret['result'] = True ret['changes'][name] = 'Removed' ret['comment'] = 'Gem source was successfully removed.' else: ret['result'] = False ret['comment'] = 'Could not remove gem source.' return ret
def cortex_plot_2D(the_map, color=None, cmap=None, vmin=None, vmax=None, alpha=None, underlay='curvature', mask=None, axes=None, triangulation=None): ''' cortex_plot_2D(map) yields a plot of the given 2D cortical mesh, map. The following options are accepted: * color (default: None) specifies the color to plot for each vertex; this argument may take a number of forms: * None, do not plot a color over the underlay (the default) * a matrix of RGB or RGBA values, one per vertex * a property vector or a string naming a property, in which case the cmap, vmin, and vmax arguments are used to generate colors * a function that, when passed a single argument, a dict of the properties of a single vertex, yields an RGB or RGBA list for that vertex. * cmap (default: 'log_eccentricity') specifies the colormap to use in plotting if the color argument provided is a property. * vmin (default: None) specifies the minimum value for scaling the property when one is passed as the color option. None means to use the min value of the property. * vmax (default: None) specifies the maximum value for scaling the property when one is passed as the color option. None means to use the max value of the property. * underlay (default: 'curvature') specifies the default underlay color to plot for the cortical surface; it may be None, 'curvature', or a color. * alpha (default None) specifies the alpha values to use for the color plot. If None, then leaves the alpha values from color unchanged. If a single number, then all alpha values in color are multiplied by that value. If a list of values, one per vertex, then this vector is multiplied by the alpha values. Finally, any negative value is set instead of multiplied. So, for example, if there were 3 vertices with: * color = ((0,0,0,1), (0,0,1,0.5), (0,0,0.75,0,8)) * alpha = (-0.5, 1, 0.5) then the resulting colors plotted will be ((0,0,0,0.5), (0,0,1,0.5), (0,0,0.75,0,4)). * mask (default: None) specifies a mask to use for the mesh; thi sis passed through map.mask() to figure out the masking. Those vertices not in the mask are not plotted (but they will be plotted in the underlay if it is not None). * axes (default: None) specifies a particular set of matplotlib pyplot axes that should be used. If axes is Ellipsis, then instead of attempting to render the plot, a tuple of (tri, zs, cmap) is returned; in this case, tri is a matplotlib.tri.Triangulation object for the given map and zs and cmap are an array and colormap (respectively) that will produce the correct colors. Without axes equal to Ellipsis, these would instead be rendered as axes.tripcolor(tri, zs, cmap, shading='gouraud'). If axes is None, then uses the current axes. * triangulation (default: None) specifies the matplotlib triangulation object to use, if one already exists; otherwise a new one is made. ''' # parse the axes if axes is None: axes = matplotlib.pyplot.gca() # process the colors color = cortex_plot_colors(the_map, color=color, cmap=cmap, vmin=vmin, vmax=vmax, alpha=alpha, underlay=underlay, mask=mask) # finally, we can make the plot! return cortex_rgba_plot_2D(the_map, color, axes=axes, triangulation=triangulation)
cortex_plot_2D(map) yields a plot of the given 2D cortical mesh, map. The following options are accepted: * color (default: None) specifies the color to plot for each vertex; this argument may take a number of forms: * None, do not plot a color over the underlay (the default) * a matrix of RGB or RGBA values, one per vertex * a property vector or a string naming a property, in which case the cmap, vmin, and vmax arguments are used to generate colors * a function that, when passed a single argument, a dict of the properties of a single vertex, yields an RGB or RGBA list for that vertex. * cmap (default: 'log_eccentricity') specifies the colormap to use in plotting if the color argument provided is a property. * vmin (default: None) specifies the minimum value for scaling the property when one is passed as the color option. None means to use the min value of the property. * vmax (default: None) specifies the maximum value for scaling the property when one is passed as the color option. None means to use the max value of the property. * underlay (default: 'curvature') specifies the default underlay color to plot for the cortical surface; it may be None, 'curvature', or a color. * alpha (default None) specifies the alpha values to use for the color plot. If None, then leaves the alpha values from color unchanged. If a single number, then all alpha values in color are multiplied by that value. If a list of values, one per vertex, then this vector is multiplied by the alpha values. Finally, any negative value is set instead of multiplied. So, for example, if there were 3 vertices with: * color = ((0,0,0,1), (0,0,1,0.5), (0,0,0.75,0,8)) * alpha = (-0.5, 1, 0.5) then the resulting colors plotted will be ((0,0,0,0.5), (0,0,1,0.5), (0,0,0.75,0,4)). * mask (default: None) specifies a mask to use for the mesh; thi sis passed through map.mask() to figure out the masking. Those vertices not in the mask are not plotted (but they will be plotted in the underlay if it is not None). * axes (default: None) specifies a particular set of matplotlib pyplot axes that should be used. If axes is Ellipsis, then instead of attempting to render the plot, a tuple of (tri, zs, cmap) is returned; in this case, tri is a matplotlib.tri.Triangulation object for the given map and zs and cmap are an array and colormap (respectively) that will produce the correct colors. Without axes equal to Ellipsis, these would instead be rendered as axes.tripcolor(tri, zs, cmap, shading='gouraud'). If axes is None, then uses the current axes. * triangulation (default: None) specifies the matplotlib triangulation object to use, if one already exists; otherwise a new one is made.
Below is the the instruction that describes the task: ### Input: cortex_plot_2D(map) yields a plot of the given 2D cortical mesh, map. The following options are accepted: * color (default: None) specifies the color to plot for each vertex; this argument may take a number of forms: * None, do not plot a color over the underlay (the default) * a matrix of RGB or RGBA values, one per vertex * a property vector or a string naming a property, in which case the cmap, vmin, and vmax arguments are used to generate colors * a function that, when passed a single argument, a dict of the properties of a single vertex, yields an RGB or RGBA list for that vertex. * cmap (default: 'log_eccentricity') specifies the colormap to use in plotting if the color argument provided is a property. * vmin (default: None) specifies the minimum value for scaling the property when one is passed as the color option. None means to use the min value of the property. * vmax (default: None) specifies the maximum value for scaling the property when one is passed as the color option. None means to use the max value of the property. * underlay (default: 'curvature') specifies the default underlay color to plot for the cortical surface; it may be None, 'curvature', or a color. * alpha (default None) specifies the alpha values to use for the color plot. If None, then leaves the alpha values from color unchanged. If a single number, then all alpha values in color are multiplied by that value. If a list of values, one per vertex, then this vector is multiplied by the alpha values. Finally, any negative value is set instead of multiplied. So, for example, if there were 3 vertices with: * color = ((0,0,0,1), (0,0,1,0.5), (0,0,0.75,0,8)) * alpha = (-0.5, 1, 0.5) then the resulting colors plotted will be ((0,0,0,0.5), (0,0,1,0.5), (0,0,0.75,0,4)). * mask (default: None) specifies a mask to use for the mesh; thi sis passed through map.mask() to figure out the masking. Those vertices not in the mask are not plotted (but they will be plotted in the underlay if it is not None). * axes (default: None) specifies a particular set of matplotlib pyplot axes that should be used. If axes is Ellipsis, then instead of attempting to render the plot, a tuple of (tri, zs, cmap) is returned; in this case, tri is a matplotlib.tri.Triangulation object for the given map and zs and cmap are an array and colormap (respectively) that will produce the correct colors. Without axes equal to Ellipsis, these would instead be rendered as axes.tripcolor(tri, zs, cmap, shading='gouraud'). If axes is None, then uses the current axes. * triangulation (default: None) specifies the matplotlib triangulation object to use, if one already exists; otherwise a new one is made. ### Response: def cortex_plot_2D(the_map, color=None, cmap=None, vmin=None, vmax=None, alpha=None, underlay='curvature', mask=None, axes=None, triangulation=None): ''' cortex_plot_2D(map) yields a plot of the given 2D cortical mesh, map. The following options are accepted: * color (default: None) specifies the color to plot for each vertex; this argument may take a number of forms: * None, do not plot a color over the underlay (the default) * a matrix of RGB or RGBA values, one per vertex * a property vector or a string naming a property, in which case the cmap, vmin, and vmax arguments are used to generate colors * a function that, when passed a single argument, a dict of the properties of a single vertex, yields an RGB or RGBA list for that vertex. * cmap (default: 'log_eccentricity') specifies the colormap to use in plotting if the color argument provided is a property. * vmin (default: None) specifies the minimum value for scaling the property when one is passed as the color option. None means to use the min value of the property. * vmax (default: None) specifies the maximum value for scaling the property when one is passed as the color option. None means to use the max value of the property. * underlay (default: 'curvature') specifies the default underlay color to plot for the cortical surface; it may be None, 'curvature', or a color. * alpha (default None) specifies the alpha values to use for the color plot. If None, then leaves the alpha values from color unchanged. If a single number, then all alpha values in color are multiplied by that value. If a list of values, one per vertex, then this vector is multiplied by the alpha values. Finally, any negative value is set instead of multiplied. So, for example, if there were 3 vertices with: * color = ((0,0,0,1), (0,0,1,0.5), (0,0,0.75,0,8)) * alpha = (-0.5, 1, 0.5) then the resulting colors plotted will be ((0,0,0,0.5), (0,0,1,0.5), (0,0,0.75,0,4)). * mask (default: None) specifies a mask to use for the mesh; thi sis passed through map.mask() to figure out the masking. Those vertices not in the mask are not plotted (but they will be plotted in the underlay if it is not None). * axes (default: None) specifies a particular set of matplotlib pyplot axes that should be used. If axes is Ellipsis, then instead of attempting to render the plot, a tuple of (tri, zs, cmap) is returned; in this case, tri is a matplotlib.tri.Triangulation object for the given map and zs and cmap are an array and colormap (respectively) that will produce the correct colors. Without axes equal to Ellipsis, these would instead be rendered as axes.tripcolor(tri, zs, cmap, shading='gouraud'). If axes is None, then uses the current axes. * triangulation (default: None) specifies the matplotlib triangulation object to use, if one already exists; otherwise a new one is made. ''' # parse the axes if axes is None: axes = matplotlib.pyplot.gca() # process the colors color = cortex_plot_colors(the_map, color=color, cmap=cmap, vmin=vmin, vmax=vmax, alpha=alpha, underlay=underlay, mask=mask) # finally, we can make the plot! return cortex_rgba_plot_2D(the_map, color, axes=axes, triangulation=triangulation)
def has_stack(self, s): """Tests whether store `s` is a stack, that is, it never moves from position 0.""" for t in self.transitions: if t.lhs[s].position != 0: return False if t.rhs[s].position != 0: return False return True
Tests whether store `s` is a stack, that is, it never moves from position 0.
Below is the the instruction that describes the task: ### Input: Tests whether store `s` is a stack, that is, it never moves from position 0. ### Response: def has_stack(self, s): """Tests whether store `s` is a stack, that is, it never moves from position 0.""" for t in self.transitions: if t.lhs[s].position != 0: return False if t.rhs[s].position != 0: return False return True
def _import_warnings(self): """ Add custom warnings found in output files. Warnings in output files are searched for using this method; if a warning is found then it will be appended to the warnings list. """ warnings = ( r"Warning: BMDL computation is at best imprecise for these data", r"THE MODEL HAS PROBABLY NOT CONVERGED!!!", "THIS USUALLY MEANS THE MODEL HAS NOT CONVERGED!", r"BMR value is not in the range of the mean function", r"BMD = 100\*\(maximum dose\)", r"BMDL computation failed\.", "Warning: optimum may not have been found. Bad completion code in Optimization routine.", # noqa "Warning: Likelihood for fitted model larger than the Likelihood for model A3.", # noqa ) self.output["warnings"] = [] for warning in warnings: m = re.search(warning, self.output_text) if m: self.output["warnings"].append(m.group())
Add custom warnings found in output files. Warnings in output files are searched for using this method; if a warning is found then it will be appended to the warnings list.
Below is the the instruction that describes the task: ### Input: Add custom warnings found in output files. Warnings in output files are searched for using this method; if a warning is found then it will be appended to the warnings list. ### Response: def _import_warnings(self): """ Add custom warnings found in output files. Warnings in output files are searched for using this method; if a warning is found then it will be appended to the warnings list. """ warnings = ( r"Warning: BMDL computation is at best imprecise for these data", r"THE MODEL HAS PROBABLY NOT CONVERGED!!!", "THIS USUALLY MEANS THE MODEL HAS NOT CONVERGED!", r"BMR value is not in the range of the mean function", r"BMD = 100\*\(maximum dose\)", r"BMDL computation failed\.", "Warning: optimum may not have been found. Bad completion code in Optimization routine.", # noqa "Warning: Likelihood for fitted model larger than the Likelihood for model A3.", # noqa ) self.output["warnings"] = [] for warning in warnings: m = re.search(warning, self.output_text) if m: self.output["warnings"].append(m.group())
def _build(self, inputs, keep_prob=None, is_training=None, test_local_stats=True): """Connects the AlexNet module into the graph. The is_training flag only controls the batch norm settings, if `False` it does not force no dropout by overriding any input `keep_prob`. To avoid any confusion this may cause, if `is_training=False` and `keep_prob` would cause dropout to be applied, an error is thrown. Args: inputs: A Tensor of size [batch_size, input_height, input_width, input_channels], representing a batch of input images. keep_prob: A scalar Tensor representing the dropout keep probability. When `is_training=False` this must be None or 1 to give no dropout. is_training: Boolean to indicate if we are currently training. Must be specified if batch normalization or dropout is used. test_local_stats: Boolean to indicate to `snt.BatchNorm` if batch normalization should use local batch statistics at test time. By default `True`. Returns: A Tensor of size [batch_size, output_size], where `output_size` depends on the mode the network was constructed in. Raises: base.IncompatibleShapeError: If any of the input image dimensions (input_height, input_width) are too small for the given network mode. ValueError: If `keep_prob` is not None or 1 when `is_training=False`. ValueError: If `is_training` is not explicitly specified when using batch normalization. """ # Check input shape if (self._use_batch_norm or keep_prob is not None) and is_training is None: raise ValueError("Boolean is_training flag must be explicitly specified " "when using batch normalization or dropout.") input_shape = inputs.get_shape().as_list() if input_shape[1] < self._min_size or input_shape[2] < self._min_size: raise base.IncompatibleShapeError( "Image shape too small: ({:d}, {:d}) < {:d}".format( input_shape[1], input_shape[2], self._min_size)) net = inputs # Check keep prob if keep_prob is not None: valid_inputs = tf.logical_or(is_training, tf.equal(keep_prob, 1.)) keep_prob_check = tf.assert_equal( valid_inputs, True, message="Input `keep_prob` must be None or 1 if `is_training=False`.") with tf.control_dependencies([keep_prob_check]): net = tf.identity(net) for i, params in enumerate(self._conv_layers): output_channels, conv_params, max_pooling = params kernel_size, stride = conv_params conv_mod = conv.Conv2D( name="conv_{}".format(i), output_channels=output_channels, kernel_shape=kernel_size, stride=stride, padding=conv.VALID, initializers=self._initializers, partitioners=self._partitioners, regularizers=self._regularizers) if not self.is_connected: self._conv_modules.append(conv_mod) net = conv_mod(net) if self._use_batch_norm: bn = batch_norm.BatchNorm(**self._batch_norm_config) net = bn(net, is_training, test_local_stats) net = tf.nn.relu(net) if max_pooling is not None: pooling_kernel_size, pooling_stride = max_pooling net = tf.nn.max_pool( net, ksize=[1, pooling_kernel_size, pooling_kernel_size, 1], strides=[1, pooling_stride, pooling_stride, 1], padding=conv.VALID) net = basic.BatchFlatten(name="flatten")(net) for i, output_size in enumerate(self._fc_layers): linear_mod = basic.Linear( name="fc_{}".format(i), output_size=output_size, initializers=self._initializers, partitioners=self._partitioners) if not self.is_connected: self._linear_modules.append(linear_mod) net = linear_mod(net) if self._use_batch_norm and self._bn_on_fc_layers: bn = batch_norm.BatchNorm(**self._batch_norm_config) net = bn(net, is_training, test_local_stats) net = tf.nn.relu(net) if keep_prob is not None: net = tf.nn.dropout(net, keep_prob=keep_prob) return net
Connects the AlexNet module into the graph. The is_training flag only controls the batch norm settings, if `False` it does not force no dropout by overriding any input `keep_prob`. To avoid any confusion this may cause, if `is_training=False` and `keep_prob` would cause dropout to be applied, an error is thrown. Args: inputs: A Tensor of size [batch_size, input_height, input_width, input_channels], representing a batch of input images. keep_prob: A scalar Tensor representing the dropout keep probability. When `is_training=False` this must be None or 1 to give no dropout. is_training: Boolean to indicate if we are currently training. Must be specified if batch normalization or dropout is used. test_local_stats: Boolean to indicate to `snt.BatchNorm` if batch normalization should use local batch statistics at test time. By default `True`. Returns: A Tensor of size [batch_size, output_size], where `output_size` depends on the mode the network was constructed in. Raises: base.IncompatibleShapeError: If any of the input image dimensions (input_height, input_width) are too small for the given network mode. ValueError: If `keep_prob` is not None or 1 when `is_training=False`. ValueError: If `is_training` is not explicitly specified when using batch normalization.
Below is the the instruction that describes the task: ### Input: Connects the AlexNet module into the graph. The is_training flag only controls the batch norm settings, if `False` it does not force no dropout by overriding any input `keep_prob`. To avoid any confusion this may cause, if `is_training=False` and `keep_prob` would cause dropout to be applied, an error is thrown. Args: inputs: A Tensor of size [batch_size, input_height, input_width, input_channels], representing a batch of input images. keep_prob: A scalar Tensor representing the dropout keep probability. When `is_training=False` this must be None or 1 to give no dropout. is_training: Boolean to indicate if we are currently training. Must be specified if batch normalization or dropout is used. test_local_stats: Boolean to indicate to `snt.BatchNorm` if batch normalization should use local batch statistics at test time. By default `True`. Returns: A Tensor of size [batch_size, output_size], where `output_size` depends on the mode the network was constructed in. Raises: base.IncompatibleShapeError: If any of the input image dimensions (input_height, input_width) are too small for the given network mode. ValueError: If `keep_prob` is not None or 1 when `is_training=False`. ValueError: If `is_training` is not explicitly specified when using batch normalization. ### Response: def _build(self, inputs, keep_prob=None, is_training=None, test_local_stats=True): """Connects the AlexNet module into the graph. The is_training flag only controls the batch norm settings, if `False` it does not force no dropout by overriding any input `keep_prob`. To avoid any confusion this may cause, if `is_training=False` and `keep_prob` would cause dropout to be applied, an error is thrown. Args: inputs: A Tensor of size [batch_size, input_height, input_width, input_channels], representing a batch of input images. keep_prob: A scalar Tensor representing the dropout keep probability. When `is_training=False` this must be None or 1 to give no dropout. is_training: Boolean to indicate if we are currently training. Must be specified if batch normalization or dropout is used. test_local_stats: Boolean to indicate to `snt.BatchNorm` if batch normalization should use local batch statistics at test time. By default `True`. Returns: A Tensor of size [batch_size, output_size], where `output_size` depends on the mode the network was constructed in. Raises: base.IncompatibleShapeError: If any of the input image dimensions (input_height, input_width) are too small for the given network mode. ValueError: If `keep_prob` is not None or 1 when `is_training=False`. ValueError: If `is_training` is not explicitly specified when using batch normalization. """ # Check input shape if (self._use_batch_norm or keep_prob is not None) and is_training is None: raise ValueError("Boolean is_training flag must be explicitly specified " "when using batch normalization or dropout.") input_shape = inputs.get_shape().as_list() if input_shape[1] < self._min_size or input_shape[2] < self._min_size: raise base.IncompatibleShapeError( "Image shape too small: ({:d}, {:d}) < {:d}".format( input_shape[1], input_shape[2], self._min_size)) net = inputs # Check keep prob if keep_prob is not None: valid_inputs = tf.logical_or(is_training, tf.equal(keep_prob, 1.)) keep_prob_check = tf.assert_equal( valid_inputs, True, message="Input `keep_prob` must be None or 1 if `is_training=False`.") with tf.control_dependencies([keep_prob_check]): net = tf.identity(net) for i, params in enumerate(self._conv_layers): output_channels, conv_params, max_pooling = params kernel_size, stride = conv_params conv_mod = conv.Conv2D( name="conv_{}".format(i), output_channels=output_channels, kernel_shape=kernel_size, stride=stride, padding=conv.VALID, initializers=self._initializers, partitioners=self._partitioners, regularizers=self._regularizers) if not self.is_connected: self._conv_modules.append(conv_mod) net = conv_mod(net) if self._use_batch_norm: bn = batch_norm.BatchNorm(**self._batch_norm_config) net = bn(net, is_training, test_local_stats) net = tf.nn.relu(net) if max_pooling is not None: pooling_kernel_size, pooling_stride = max_pooling net = tf.nn.max_pool( net, ksize=[1, pooling_kernel_size, pooling_kernel_size, 1], strides=[1, pooling_stride, pooling_stride, 1], padding=conv.VALID) net = basic.BatchFlatten(name="flatten")(net) for i, output_size in enumerate(self._fc_layers): linear_mod = basic.Linear( name="fc_{}".format(i), output_size=output_size, initializers=self._initializers, partitioners=self._partitioners) if not self.is_connected: self._linear_modules.append(linear_mod) net = linear_mod(net) if self._use_batch_norm and self._bn_on_fc_layers: bn = batch_norm.BatchNorm(**self._batch_norm_config) net = bn(net, is_training, test_local_stats) net = tf.nn.relu(net) if keep_prob is not None: net = tf.nn.dropout(net, keep_prob=keep_prob) return net
def resolve(self, strict=None): """Make the path absolute, resolving all symlinks on the way and also normalizing it (for example turning slashes into backslashes under Windows). Args: strict: If False (default) no exception is raised if the path does not exist. New in Python 3.6. Raises: IOError: if the path doesn't exist (strict=True or Python < 3.6) """ if sys.version_info >= (3, 6) or pathlib2: if strict is None: strict = False else: if strict is not None: raise TypeError( "resolve() got an unexpected keyword argument 'strict'") strict = True if self._closed: self._raise_closed() path = self._flavour.resolve(self, strict=strict) if path is None: self.stat() path = str(self.absolute()) path = self.filesystem.absnormpath(path) return FakePath(path)
Make the path absolute, resolving all symlinks on the way and also normalizing it (for example turning slashes into backslashes under Windows). Args: strict: If False (default) no exception is raised if the path does not exist. New in Python 3.6. Raises: IOError: if the path doesn't exist (strict=True or Python < 3.6)
Below is the the instruction that describes the task: ### Input: Make the path absolute, resolving all symlinks on the way and also normalizing it (for example turning slashes into backslashes under Windows). Args: strict: If False (default) no exception is raised if the path does not exist. New in Python 3.6. Raises: IOError: if the path doesn't exist (strict=True or Python < 3.6) ### Response: def resolve(self, strict=None): """Make the path absolute, resolving all symlinks on the way and also normalizing it (for example turning slashes into backslashes under Windows). Args: strict: If False (default) no exception is raised if the path does not exist. New in Python 3.6. Raises: IOError: if the path doesn't exist (strict=True or Python < 3.6) """ if sys.version_info >= (3, 6) or pathlib2: if strict is None: strict = False else: if strict is not None: raise TypeError( "resolve() got an unexpected keyword argument 'strict'") strict = True if self._closed: self._raise_closed() path = self._flavour.resolve(self, strict=strict) if path is None: self.stat() path = str(self.absolute()) path = self.filesystem.absnormpath(path) return FakePath(path)
def get_wake_on_network(): ''' Displays whether 'wake on network' is on or off if supported :return: A string value representing the "wake on network" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_wake_on_network ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getwakeonnetworkaccess') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on'
Displays whether 'wake on network' is on or off if supported :return: A string value representing the "wake on network" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_wake_on_network
Below is the the instruction that describes the task: ### Input: Displays whether 'wake on network' is on or off if supported :return: A string value representing the "wake on network" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_wake_on_network ### Response: def get_wake_on_network(): ''' Displays whether 'wake on network' is on or off if supported :return: A string value representing the "wake on network" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_wake_on_network ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getwakeonnetworkaccess') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on'
def parse_block_scalar_empty_line(indent_token_class, content_token_class): """Process an empty line in a block scalar.""" def callback(lexer, match, context): text = match.group() if (context.block_scalar_indent is None or len(text) <= context.block_scalar_indent): if text: yield match.start(), indent_token_class, text else: indentation = text[:context.block_scalar_indent] content = text[context.block_scalar_indent:] yield match.start(), indent_token_class, indentation yield (match.start()+context.block_scalar_indent, content_token_class, content) context.pos = match.end() return callback
Process an empty line in a block scalar.
Below is the the instruction that describes the task: ### Input: Process an empty line in a block scalar. ### Response: def parse_block_scalar_empty_line(indent_token_class, content_token_class): """Process an empty line in a block scalar.""" def callback(lexer, match, context): text = match.group() if (context.block_scalar_indent is None or len(text) <= context.block_scalar_indent): if text: yield match.start(), indent_token_class, text else: indentation = text[:context.block_scalar_indent] content = text[context.block_scalar_indent:] yield match.start(), indent_token_class, indentation yield (match.start()+context.block_scalar_indent, content_token_class, content) context.pos = match.end() return callback
def insert(self, packet, **kwargs): ''' Insert a packet into the database Arguments packet The :class:`ait.core.tlm.Packet` instance to insert into the database ''' values = [ ] pd = packet._defn for defn in pd.fields: val = getattr(packet.raw, defn.name) if val is None and defn.name in pd.history: val = getattr(packet.history, defn.name) values.append(val) qmark = ['?'] * len(values) sql = 'INSERT INTO %s VALUES (%s)' % (pd.name, ', '.join(qmark)) self._conn.execute(sql, values)
Insert a packet into the database Arguments packet The :class:`ait.core.tlm.Packet` instance to insert into the database
Below is the the instruction that describes the task: ### Input: Insert a packet into the database Arguments packet The :class:`ait.core.tlm.Packet` instance to insert into the database ### Response: def insert(self, packet, **kwargs): ''' Insert a packet into the database Arguments packet The :class:`ait.core.tlm.Packet` instance to insert into the database ''' values = [ ] pd = packet._defn for defn in pd.fields: val = getattr(packet.raw, defn.name) if val is None and defn.name in pd.history: val = getattr(packet.history, defn.name) values.append(val) qmark = ['?'] * len(values) sql = 'INSERT INTO %s VALUES (%s)' % (pd.name, ', '.join(qmark)) self._conn.execute(sql, values)
def nvmlDeviceGetSupportedMemoryClocks(handle): r""" /** * Retrieves the list of possible memory clocks that can be used as an argument for \ref nvmlDeviceSetApplicationsClocks. * * For Kepler &tm; or newer fully supported devices. * * @param device The identifier of the target device * @param count Reference in which to provide the \a clocksMHz array size, and * to return the number of elements * @param clocksMHz Reference in which to return the clock in MHz * * @return * - \ref NVML_SUCCESS if \a count and \a clocksMHz have been populated * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a count is NULL * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a count is too small (\a count is set to the number of * required elements) * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible * - \ref NVML_ERROR_UNKNOWN on any unexpected error * * @see nvmlDeviceSetApplicationsClocks * @see nvmlDeviceGetSupportedGraphicsClocks */ nvmlReturn_t DECLDIR nvmlDeviceGetSupportedMemoryClocks """ # first call to get the size c_count = c_uint(0) fn = _nvmlGetFunctionPointer("nvmlDeviceGetSupportedMemoryClocks") ret = fn(handle, byref(c_count), None) if (ret == NVML_SUCCESS): # special case, no clocks return [] elif (ret == NVML_ERROR_INSUFFICIENT_SIZE): # typical case clocks_array = c_uint * c_count.value c_clocks = clocks_array() # make the call again ret = fn(handle, byref(c_count), c_clocks) _nvmlCheckReturn(ret) procs = [] for i in range(c_count.value): procs.append(c_clocks[i]) return procs else: # error case raise NVMLError(ret)
r""" /** * Retrieves the list of possible memory clocks that can be used as an argument for \ref nvmlDeviceSetApplicationsClocks. * * For Kepler &tm; or newer fully supported devices. * * @param device The identifier of the target device * @param count Reference in which to provide the \a clocksMHz array size, and * to return the number of elements * @param clocksMHz Reference in which to return the clock in MHz * * @return * - \ref NVML_SUCCESS if \a count and \a clocksMHz have been populated * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a count is NULL * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a count is too small (\a count is set to the number of * required elements) * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible * - \ref NVML_ERROR_UNKNOWN on any unexpected error * * @see nvmlDeviceSetApplicationsClocks * @see nvmlDeviceGetSupportedGraphicsClocks */ nvmlReturn_t DECLDIR nvmlDeviceGetSupportedMemoryClocks
Below is the the instruction that describes the task: ### Input: r""" /** * Retrieves the list of possible memory clocks that can be used as an argument for \ref nvmlDeviceSetApplicationsClocks. * * For Kepler &tm; or newer fully supported devices. * * @param device The identifier of the target device * @param count Reference in which to provide the \a clocksMHz array size, and * to return the number of elements * @param clocksMHz Reference in which to return the clock in MHz * * @return * - \ref NVML_SUCCESS if \a count and \a clocksMHz have been populated * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a count is NULL * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a count is too small (\a count is set to the number of * required elements) * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible * - \ref NVML_ERROR_UNKNOWN on any unexpected error * * @see nvmlDeviceSetApplicationsClocks * @see nvmlDeviceGetSupportedGraphicsClocks */ nvmlReturn_t DECLDIR nvmlDeviceGetSupportedMemoryClocks ### Response: def nvmlDeviceGetSupportedMemoryClocks(handle): r""" /** * Retrieves the list of possible memory clocks that can be used as an argument for \ref nvmlDeviceSetApplicationsClocks. * * For Kepler &tm; or newer fully supported devices. * * @param device The identifier of the target device * @param count Reference in which to provide the \a clocksMHz array size, and * to return the number of elements * @param clocksMHz Reference in which to return the clock in MHz * * @return * - \ref NVML_SUCCESS if \a count and \a clocksMHz have been populated * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a count is NULL * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a count is too small (\a count is set to the number of * required elements) * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible * - \ref NVML_ERROR_UNKNOWN on any unexpected error * * @see nvmlDeviceSetApplicationsClocks * @see nvmlDeviceGetSupportedGraphicsClocks */ nvmlReturn_t DECLDIR nvmlDeviceGetSupportedMemoryClocks """ # first call to get the size c_count = c_uint(0) fn = _nvmlGetFunctionPointer("nvmlDeviceGetSupportedMemoryClocks") ret = fn(handle, byref(c_count), None) if (ret == NVML_SUCCESS): # special case, no clocks return [] elif (ret == NVML_ERROR_INSUFFICIENT_SIZE): # typical case clocks_array = c_uint * c_count.value c_clocks = clocks_array() # make the call again ret = fn(handle, byref(c_count), c_clocks) _nvmlCheckReturn(ret) procs = [] for i in range(c_count.value): procs.append(c_clocks[i]) return procs else: # error case raise NVMLError(ret)
def is_transactional(self, state): ''' Decide if a request should be wrapped in a transaction, based upon the state of the request. By default, wraps all but ``GET`` and ``HEAD`` requests in a transaction, along with respecting the ``transactional`` decorator from :mod:pecan.decorators. :param state: The Pecan state object for the current request. ''' controller = getattr(state, 'controller', None) if controller: force_transactional = _cfg(controller).get('transactional', False) else: force_transactional = False if state.request.method not in ('GET', 'HEAD') or force_transactional: return True return False
Decide if a request should be wrapped in a transaction, based upon the state of the request. By default, wraps all but ``GET`` and ``HEAD`` requests in a transaction, along with respecting the ``transactional`` decorator from :mod:pecan.decorators. :param state: The Pecan state object for the current request.
Below is the the instruction that describes the task: ### Input: Decide if a request should be wrapped in a transaction, based upon the state of the request. By default, wraps all but ``GET`` and ``HEAD`` requests in a transaction, along with respecting the ``transactional`` decorator from :mod:pecan.decorators. :param state: The Pecan state object for the current request. ### Response: def is_transactional(self, state): ''' Decide if a request should be wrapped in a transaction, based upon the state of the request. By default, wraps all but ``GET`` and ``HEAD`` requests in a transaction, along with respecting the ``transactional`` decorator from :mod:pecan.decorators. :param state: The Pecan state object for the current request. ''' controller = getattr(state, 'controller', None) if controller: force_transactional = _cfg(controller).get('transactional', False) else: force_transactional = False if state.request.method not in ('GET', 'HEAD') or force_transactional: return True return False
def _intercept_dot(w, X): """Computes y * np.dot(X, w). It takes into consideration if the intercept should be fit or not. Parameters ---------- w : ndarray, shape (n_features,) or (n_features + 1,) Coefficient vector. X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data. """ c = 0. if w.size == X.shape[1] + 1: c = w[-1] w = w[:-1] z = np.dot(X, w) + c return w, c, z
Computes y * np.dot(X, w). It takes into consideration if the intercept should be fit or not. Parameters ---------- w : ndarray, shape (n_features,) or (n_features + 1,) Coefficient vector. X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data.
Below is the the instruction that describes the task: ### Input: Computes y * np.dot(X, w). It takes into consideration if the intercept should be fit or not. Parameters ---------- w : ndarray, shape (n_features,) or (n_features + 1,) Coefficient vector. X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data. ### Response: def _intercept_dot(w, X): """Computes y * np.dot(X, w). It takes into consideration if the intercept should be fit or not. Parameters ---------- w : ndarray, shape (n_features,) or (n_features + 1,) Coefficient vector. X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data. """ c = 0. if w.size == X.shape[1] + 1: c = w[-1] w = w[:-1] z = np.dot(X, w) + c return w, c, z
def _scan(positions): """get the region inside the vector with more expression""" scores = [] for start in range(0, len(positions) - 17, 5): end = start = 17 scores.add(_enrichment(positions[start:end], positions[:start], positions[end:]))
get the region inside the vector with more expression
Below is the the instruction that describes the task: ### Input: get the region inside the vector with more expression ### Response: def _scan(positions): """get the region inside the vector with more expression""" scores = [] for start in range(0, len(positions) - 17, 5): end = start = 17 scores.add(_enrichment(positions[start:end], positions[:start], positions[end:]))
def fromMessage(klass, message, op_endpoint=UNUSED): """Construct me from an OpenID Message. @param message: The OpenID associate request @type message: openid.message.Message @returntype: L{AssociateRequest} """ if message.isOpenID1(): session_type = message.getArg(OPENID_NS, 'session_type') if session_type == 'no-encryption': logging.warn('Received OpenID 1 request with a no-encryption ' 'assocaition session type. Continuing anyway.') elif not session_type: session_type = 'no-encryption' else: session_type = message.getArg(OPENID2_NS, 'session_type') if session_type is None: raise ProtocolError(message, text="session_type missing from request") try: session_class = klass.session_classes[session_type] except KeyError: raise ProtocolError(message, "Unknown session type %r" % (session_type,)) try: session = session_class.fromMessage(message) except ValueError, why: raise ProtocolError(message, 'Error parsing %s session: %s' % (session_class.session_type, why[0])) assoc_type = message.getArg(OPENID_NS, 'assoc_type', 'HMAC-SHA1') if assoc_type not in session.allowed_assoc_types: fmt = 'Session type %s does not support association type %s' raise ProtocolError(message, fmt % (session_type, assoc_type)) self = klass(session, assoc_type) self.message = message self.namespace = message.getOpenIDNamespace() return self
Construct me from an OpenID Message. @param message: The OpenID associate request @type message: openid.message.Message @returntype: L{AssociateRequest}
Below is the the instruction that describes the task: ### Input: Construct me from an OpenID Message. @param message: The OpenID associate request @type message: openid.message.Message @returntype: L{AssociateRequest} ### Response: def fromMessage(klass, message, op_endpoint=UNUSED): """Construct me from an OpenID Message. @param message: The OpenID associate request @type message: openid.message.Message @returntype: L{AssociateRequest} """ if message.isOpenID1(): session_type = message.getArg(OPENID_NS, 'session_type') if session_type == 'no-encryption': logging.warn('Received OpenID 1 request with a no-encryption ' 'assocaition session type. Continuing anyway.') elif not session_type: session_type = 'no-encryption' else: session_type = message.getArg(OPENID2_NS, 'session_type') if session_type is None: raise ProtocolError(message, text="session_type missing from request") try: session_class = klass.session_classes[session_type] except KeyError: raise ProtocolError(message, "Unknown session type %r" % (session_type,)) try: session = session_class.fromMessage(message) except ValueError, why: raise ProtocolError(message, 'Error parsing %s session: %s' % (session_class.session_type, why[0])) assoc_type = message.getArg(OPENID_NS, 'assoc_type', 'HMAC-SHA1') if assoc_type not in session.allowed_assoc_types: fmt = 'Session type %s does not support association type %s' raise ProtocolError(message, fmt % (session_type, assoc_type)) self = klass(session, assoc_type) self.message = message self.namespace = message.getOpenIDNamespace() return self
def get_correlation_matrix_from_columns(self): """Computes correlation matrix of columns :return: Correlation matrix of columns """ header_to_column = {} # create index of headers for header in self.headers: header_to_column[header] = self.headers.index(header) data_to_test = [] for header in self.headers_to_test: header_column = Matrix(self.data) \ .get_column(header_to_column[header]) for i, value in enumerate(header_column): header_column[i] = float(value) # get float data_to_test.append(header_column) return self.get_correlation_matrix(data_to_test)
Computes correlation matrix of columns :return: Correlation matrix of columns
Below is the the instruction that describes the task: ### Input: Computes correlation matrix of columns :return: Correlation matrix of columns ### Response: def get_correlation_matrix_from_columns(self): """Computes correlation matrix of columns :return: Correlation matrix of columns """ header_to_column = {} # create index of headers for header in self.headers: header_to_column[header] = self.headers.index(header) data_to_test = [] for header in self.headers_to_test: header_column = Matrix(self.data) \ .get_column(header_to_column[header]) for i, value in enumerate(header_column): header_column[i] = float(value) # get float data_to_test.append(header_column) return self.get_correlation_matrix(data_to_test)
def fetch_from_archive(backend_class, backend_args, manager, category, archived_after): """Fetch items from an archive manager. Generator to get the items of a category (previously fetched by the given backend class) from an archive manager. Only those items archived after the given date will be returned. The parameters needed to initialize `backend` and get the items are given using `backend_args` dict parameter. :param backend_class: backend class to retrive items :param backend_args: dict of arguments needed to retrieve the items :param manager: archive manager where the items will be retrieved :param category: category of the items to retrieve :param archived_after: return items archived after this date :returns: a generator of archived items """ init_args = find_signature_parameters(backend_class.__init__, backend_args) backend = backend_class(**init_args) filepaths = manager.search(backend.origin, backend.__class__.__name__, category, archived_after) for filepath in filepaths: backend.archive = Archive(filepath) items = backend.fetch_from_archive() try: for item in items: yield item except ArchiveError as e: logger.warning("Ignoring %s archive due to: %s", filepath, str(e))
Fetch items from an archive manager. Generator to get the items of a category (previously fetched by the given backend class) from an archive manager. Only those items archived after the given date will be returned. The parameters needed to initialize `backend` and get the items are given using `backend_args` dict parameter. :param backend_class: backend class to retrive items :param backend_args: dict of arguments needed to retrieve the items :param manager: archive manager where the items will be retrieved :param category: category of the items to retrieve :param archived_after: return items archived after this date :returns: a generator of archived items
Below is the the instruction that describes the task: ### Input: Fetch items from an archive manager. Generator to get the items of a category (previously fetched by the given backend class) from an archive manager. Only those items archived after the given date will be returned. The parameters needed to initialize `backend` and get the items are given using `backend_args` dict parameter. :param backend_class: backend class to retrive items :param backend_args: dict of arguments needed to retrieve the items :param manager: archive manager where the items will be retrieved :param category: category of the items to retrieve :param archived_after: return items archived after this date :returns: a generator of archived items ### Response: def fetch_from_archive(backend_class, backend_args, manager, category, archived_after): """Fetch items from an archive manager. Generator to get the items of a category (previously fetched by the given backend class) from an archive manager. Only those items archived after the given date will be returned. The parameters needed to initialize `backend` and get the items are given using `backend_args` dict parameter. :param backend_class: backend class to retrive items :param backend_args: dict of arguments needed to retrieve the items :param manager: archive manager where the items will be retrieved :param category: category of the items to retrieve :param archived_after: return items archived after this date :returns: a generator of archived items """ init_args = find_signature_parameters(backend_class.__init__, backend_args) backend = backend_class(**init_args) filepaths = manager.search(backend.origin, backend.__class__.__name__, category, archived_after) for filepath in filepaths: backend.archive = Archive(filepath) items = backend.fetch_from_archive() try: for item in items: yield item except ArchiveError as e: logger.warning("Ignoring %s archive due to: %s", filepath, str(e))
def unpack_boolean(self, data): """ Unpack a string value of CIM type 'boolean' and return its CIM data type object, or None. data (unicode string): CIM-XML string value, or None (in which case None is returned). """ if data is None: return None # CIM-XML says "These values MUST be treated as case-insensitive" # (even though the XML definition requires them to be lowercase.) data_ = data.strip().lower() # ignore space if data_ == 'true': return True if data_ == 'false': return False if data_ == '': warnings.warn("WBEM server sent invalid empty boolean value in a " "CIM-XML response.", ToleratedServerIssueWarning, stacklevel=_stacklevel_above_module(__name__)) return None raise CIMXMLParseError( _format("Invalid boolean value {0!A}", data), conn_id=self.conn_id)
Unpack a string value of CIM type 'boolean' and return its CIM data type object, or None. data (unicode string): CIM-XML string value, or None (in which case None is returned).
Below is the the instruction that describes the task: ### Input: Unpack a string value of CIM type 'boolean' and return its CIM data type object, or None. data (unicode string): CIM-XML string value, or None (in which case None is returned). ### Response: def unpack_boolean(self, data): """ Unpack a string value of CIM type 'boolean' and return its CIM data type object, or None. data (unicode string): CIM-XML string value, or None (in which case None is returned). """ if data is None: return None # CIM-XML says "These values MUST be treated as case-insensitive" # (even though the XML definition requires them to be lowercase.) data_ = data.strip().lower() # ignore space if data_ == 'true': return True if data_ == 'false': return False if data_ == '': warnings.warn("WBEM server sent invalid empty boolean value in a " "CIM-XML response.", ToleratedServerIssueWarning, stacklevel=_stacklevel_above_module(__name__)) return None raise CIMXMLParseError( _format("Invalid boolean value {0!A}", data), conn_id=self.conn_id)
def clean_text(value, topic=False): """ Replaces "profane" words with more suitable ones. Uses bleach to strip all but whitelisted html. Converts bbcode to Markdown """ for x in PROFANITY_REPLACEMENTS: value = value.replace(x[0], x[1]) for bbset in BBCODE_REPLACEMENTS: p = re.compile(bbset[0], re.DOTALL) value = p.sub(bbset[1], value) bleached = bleach.clean(value, tags=ALLOWED_TAGS, attributes=ALLOWED_ATTRIBUTES, strip=True) # We want to retain markdown quotes and we'll be running bleach again in format_post. bleached = bleached.replace('&gt;', '>').replace('&amp;', '&') return bleached
Replaces "profane" words with more suitable ones. Uses bleach to strip all but whitelisted html. Converts bbcode to Markdown
Below is the the instruction that describes the task: ### Input: Replaces "profane" words with more suitable ones. Uses bleach to strip all but whitelisted html. Converts bbcode to Markdown ### Response: def clean_text(value, topic=False): """ Replaces "profane" words with more suitable ones. Uses bleach to strip all but whitelisted html. Converts bbcode to Markdown """ for x in PROFANITY_REPLACEMENTS: value = value.replace(x[0], x[1]) for bbset in BBCODE_REPLACEMENTS: p = re.compile(bbset[0], re.DOTALL) value = p.sub(bbset[1], value) bleached = bleach.clean(value, tags=ALLOWED_TAGS, attributes=ALLOWED_ATTRIBUTES, strip=True) # We want to retain markdown quotes and we'll be running bleach again in format_post. bleached = bleached.replace('&gt;', '>').replace('&amp;', '&') return bleached
def resolve(self, pid): """Get Object Locations for Object.""" client = d1_cli.impl.client.CLICNClient( **self._cn_client_connect_params_from_session() ) object_location_list_pyxb = client.resolve(pid) for location in object_location_list_pyxb.objectLocation: d1_cli.impl.util.print_info(location.url)
Get Object Locations for Object.
Below is the the instruction that describes the task: ### Input: Get Object Locations for Object. ### Response: def resolve(self, pid): """Get Object Locations for Object.""" client = d1_cli.impl.client.CLICNClient( **self._cn_client_connect_params_from_session() ) object_location_list_pyxb = client.resolve(pid) for location in object_location_list_pyxb.objectLocation: d1_cli.impl.util.print_info(location.url)
def matching_fpaths(dpath_list, include_patterns, exclude_dirs=[], greater_exclude_dirs=[], exclude_patterns=[], recursive=True): r""" walks dpath lists returning all directories that match the requested pattern. Args: dpath_list (list): include_patterns (str): exclude_dirs (None): recursive (bool): References: # TODO: fix names and behavior of exclude_dirs and greater_exclude_dirs http://stackoverflow.com/questions/19859840/excluding-directories-in-os-walk Example: >>> # DISABLE_DOCTEST >>> from utool.util_path import * # NOQA >>> import utool as ut >>> dpath_list = [dirname(dirname(ut.__file__))] >>> include_patterns = get_standard_include_patterns() >>> exclude_dirs = ['_page'] >>> greater_exclude_dirs = get_standard_exclude_dnames() >>> recursive = True >>> fpath_gen = matching_fpaths(dpath_list, include_patterns, exclude_dirs, >>> greater_exclude_dirs, recursive) >>> result = list(fpath_gen) >>> print('\n'.join(result)) """ if isinstance(dpath_list, six.string_types): dpath_list = [dpath_list] for dpath in dpath_list: for root, dname_list, fname_list in os.walk(dpath): # Look at all subdirs subdirs = pathsplit_full(relpath(root, dpath)) # HACK: if any([dir_ in greater_exclude_dirs for dir_ in subdirs]): continue # Look at one subdir if basename(root) in exclude_dirs: continue _match = fnmatch.fnmatch for name in fname_list: # yeild filepaths that are included if any(_match(name, pat) for pat in include_patterns): # ... and not excluded if not any(_match(name, pat) for pat in exclude_patterns): fpath = join(root, name) yield fpath if not recursive: break
r""" walks dpath lists returning all directories that match the requested pattern. Args: dpath_list (list): include_patterns (str): exclude_dirs (None): recursive (bool): References: # TODO: fix names and behavior of exclude_dirs and greater_exclude_dirs http://stackoverflow.com/questions/19859840/excluding-directories-in-os-walk Example: >>> # DISABLE_DOCTEST >>> from utool.util_path import * # NOQA >>> import utool as ut >>> dpath_list = [dirname(dirname(ut.__file__))] >>> include_patterns = get_standard_include_patterns() >>> exclude_dirs = ['_page'] >>> greater_exclude_dirs = get_standard_exclude_dnames() >>> recursive = True >>> fpath_gen = matching_fpaths(dpath_list, include_patterns, exclude_dirs, >>> greater_exclude_dirs, recursive) >>> result = list(fpath_gen) >>> print('\n'.join(result))
Below is the the instruction that describes the task: ### Input: r""" walks dpath lists returning all directories that match the requested pattern. Args: dpath_list (list): include_patterns (str): exclude_dirs (None): recursive (bool): References: # TODO: fix names and behavior of exclude_dirs and greater_exclude_dirs http://stackoverflow.com/questions/19859840/excluding-directories-in-os-walk Example: >>> # DISABLE_DOCTEST >>> from utool.util_path import * # NOQA >>> import utool as ut >>> dpath_list = [dirname(dirname(ut.__file__))] >>> include_patterns = get_standard_include_patterns() >>> exclude_dirs = ['_page'] >>> greater_exclude_dirs = get_standard_exclude_dnames() >>> recursive = True >>> fpath_gen = matching_fpaths(dpath_list, include_patterns, exclude_dirs, >>> greater_exclude_dirs, recursive) >>> result = list(fpath_gen) >>> print('\n'.join(result)) ### Response: def matching_fpaths(dpath_list, include_patterns, exclude_dirs=[], greater_exclude_dirs=[], exclude_patterns=[], recursive=True): r""" walks dpath lists returning all directories that match the requested pattern. Args: dpath_list (list): include_patterns (str): exclude_dirs (None): recursive (bool): References: # TODO: fix names and behavior of exclude_dirs and greater_exclude_dirs http://stackoverflow.com/questions/19859840/excluding-directories-in-os-walk Example: >>> # DISABLE_DOCTEST >>> from utool.util_path import * # NOQA >>> import utool as ut >>> dpath_list = [dirname(dirname(ut.__file__))] >>> include_patterns = get_standard_include_patterns() >>> exclude_dirs = ['_page'] >>> greater_exclude_dirs = get_standard_exclude_dnames() >>> recursive = True >>> fpath_gen = matching_fpaths(dpath_list, include_patterns, exclude_dirs, >>> greater_exclude_dirs, recursive) >>> result = list(fpath_gen) >>> print('\n'.join(result)) """ if isinstance(dpath_list, six.string_types): dpath_list = [dpath_list] for dpath in dpath_list: for root, dname_list, fname_list in os.walk(dpath): # Look at all subdirs subdirs = pathsplit_full(relpath(root, dpath)) # HACK: if any([dir_ in greater_exclude_dirs for dir_ in subdirs]): continue # Look at one subdir if basename(root) in exclude_dirs: continue _match = fnmatch.fnmatch for name in fname_list: # yeild filepaths that are included if any(_match(name, pat) for pat in include_patterns): # ... and not excluded if not any(_match(name, pat) for pat in exclude_patterns): fpath = join(root, name) yield fpath if not recursive: break
def _merge_fastqc(samples): """ merge all fastqc samples into one by module """ fastqc_list = collections.defaultdict(list) seen = set() for data in samples: name = dd.get_sample_name(data) if name in seen: continue seen.add(name) fns = glob.glob(os.path.join(dd.get_work_dir(data), "qc", dd.get_sample_name(data), "fastqc") + "/*") for fn in fns: if fn.endswith("tsv"): metric = os.path.basename(fn) fastqc_list[metric].append([name, fn]) for metric in fastqc_list: dt_by_sample = [] for fn in fastqc_list[metric]: dt = pd.read_csv(fn[1], sep="\t") dt['sample'] = fn[0] dt_by_sample.append(dt) dt = utils.rbind(dt_by_sample) dt.to_csv(metric, sep="\t", index=False, mode ='w') return samples
merge all fastqc samples into one by module
Below is the the instruction that describes the task: ### Input: merge all fastqc samples into one by module ### Response: def _merge_fastqc(samples): """ merge all fastqc samples into one by module """ fastqc_list = collections.defaultdict(list) seen = set() for data in samples: name = dd.get_sample_name(data) if name in seen: continue seen.add(name) fns = glob.glob(os.path.join(dd.get_work_dir(data), "qc", dd.get_sample_name(data), "fastqc") + "/*") for fn in fns: if fn.endswith("tsv"): metric = os.path.basename(fn) fastqc_list[metric].append([name, fn]) for metric in fastqc_list: dt_by_sample = [] for fn in fastqc_list[metric]: dt = pd.read_csv(fn[1], sep="\t") dt['sample'] = fn[0] dt_by_sample.append(dt) dt = utils.rbind(dt_by_sample) dt.to_csv(metric, sep="\t", index=False, mode ='w') return samples
def execute(self, container: Container, test: TestCase, verbose: bool = False ) -> TestOutcome: """ Runs a specified test inside a given container. Returns: the outcome of the test execution. """ bug = self.__installation.bugs[container.bug] # type: Bug response = self.command(container, cmd=test.command, context=test.context, stderr=True, time_limit=test.time_limit, kill_after=test.kill_after, verbose=verbose) passed = test.oracle.check(response) return TestOutcome(response, passed)
Runs a specified test inside a given container. Returns: the outcome of the test execution.
Below is the the instruction that describes the task: ### Input: Runs a specified test inside a given container. Returns: the outcome of the test execution. ### Response: def execute(self, container: Container, test: TestCase, verbose: bool = False ) -> TestOutcome: """ Runs a specified test inside a given container. Returns: the outcome of the test execution. """ bug = self.__installation.bugs[container.bug] # type: Bug response = self.command(container, cmd=test.command, context=test.context, stderr=True, time_limit=test.time_limit, kill_after=test.kill_after, verbose=verbose) passed = test.oracle.check(response) return TestOutcome(response, passed)
def scale(self, width: int, height: int) -> None: """Scale this Image to the new width and height. Args: width (int): The new width of the Image after scaling. height (int): The new height of the Image after scaling. """ lib.TCOD_image_scale(self.image_c, width, height) self.width, self.height = width, height
Scale this Image to the new width and height. Args: width (int): The new width of the Image after scaling. height (int): The new height of the Image after scaling.
Below is the the instruction that describes the task: ### Input: Scale this Image to the new width and height. Args: width (int): The new width of the Image after scaling. height (int): The new height of the Image after scaling. ### Response: def scale(self, width: int, height: int) -> None: """Scale this Image to the new width and height. Args: width (int): The new width of the Image after scaling. height (int): The new height of the Image after scaling. """ lib.TCOD_image_scale(self.image_c, width, height) self.width, self.height = width, height
def resolve_domain(self, pipeline): """Resolve a concrete domain for ``pipeline``. """ domain = pipeline.domain(default=self._default_domain) if domain is GENERIC: raise ValueError( "Unable to determine domain for Pipeline.\n" "Pass domain=<desired domain> to your Pipeline to set a " "domain." ) return domain
Resolve a concrete domain for ``pipeline``.
Below is the the instruction that describes the task: ### Input: Resolve a concrete domain for ``pipeline``. ### Response: def resolve_domain(self, pipeline): """Resolve a concrete domain for ``pipeline``. """ domain = pipeline.domain(default=self._default_domain) if domain is GENERIC: raise ValueError( "Unable to determine domain for Pipeline.\n" "Pass domain=<desired domain> to your Pipeline to set a " "domain." ) return domain
def descriptor_factory(self, type_name, shard=u'lobby', **kwargs): """ Creates and returns a descriptor to pass it later for starting the agent. First parameter is a type_name representing the descirptor. Second parameter is optional (default lobby). Usage: > descriptor_factory('shard_descriptor', 'some shard') """ desc = factories.build(type_name, shard=unicode(shard), **kwargs) return self._database_connection.save_document(desc)
Creates and returns a descriptor to pass it later for starting the agent. First parameter is a type_name representing the descirptor. Second parameter is optional (default lobby). Usage: > descriptor_factory('shard_descriptor', 'some shard')
Below is the the instruction that describes the task: ### Input: Creates and returns a descriptor to pass it later for starting the agent. First parameter is a type_name representing the descirptor. Second parameter is optional (default lobby). Usage: > descriptor_factory('shard_descriptor', 'some shard') ### Response: def descriptor_factory(self, type_name, shard=u'lobby', **kwargs): """ Creates and returns a descriptor to pass it later for starting the agent. First parameter is a type_name representing the descirptor. Second parameter is optional (default lobby). Usage: > descriptor_factory('shard_descriptor', 'some shard') """ desc = factories.build(type_name, shard=unicode(shard), **kwargs) return self._database_connection.save_document(desc)
def _set_vlan(self, v, load=False): """ Setter method for vlan, mapped from YANG variable /interface_vlan/interface/vlan (list) If this variable is read-only (config: false) in the source YANG file, then _set_vlan is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_vlan() directly. YANG Description: The list of vlans in the managed device. Each row represents a vlan. User can create/delete an entry in to this list. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("name",vlan.vlan, yang_name="vlan", rest_name="Vlan", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'The list of vlans.', u'cli-no-key-completion': None, u'alt-name': u'Vlan', u'cli-suppress-show-path': None, u'cli-suppress-list-no': None, u'cli-custom-range-actionpoint': u'NsmRangeCliActionpoint', u'cli-custom-range-enumerator': u'NsmRangeCliActionpoint', u'cli-suppress-key-abbreviation': None, u'cli-no-match-completion': None, u'cli-full-command': None, u'callpoint': u'interface_vlan'}}), is_container='list', yang_name="vlan", rest_name="Vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'The list of vlans.', u'cli-no-key-completion': None, u'alt-name': u'Vlan', u'cli-suppress-show-path': None, u'cli-suppress-list-no': None, u'cli-custom-range-actionpoint': u'NsmRangeCliActionpoint', u'cli-custom-range-enumerator': u'NsmRangeCliActionpoint', u'cli-suppress-key-abbreviation': None, u'cli-no-match-completion': None, u'cli-full-command': None, u'callpoint': u'interface_vlan'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """vlan must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("name",vlan.vlan, yang_name="vlan", rest_name="Vlan", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'The list of vlans.', u'cli-no-key-completion': None, u'alt-name': u'Vlan', u'cli-suppress-show-path': None, u'cli-suppress-list-no': None, u'cli-custom-range-actionpoint': u'NsmRangeCliActionpoint', u'cli-custom-range-enumerator': u'NsmRangeCliActionpoint', u'cli-suppress-key-abbreviation': None, u'cli-no-match-completion': None, u'cli-full-command': None, u'callpoint': u'interface_vlan'}}), is_container='list', yang_name="vlan", rest_name="Vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'The list of vlans.', u'cli-no-key-completion': None, u'alt-name': u'Vlan', u'cli-suppress-show-path': None, u'cli-suppress-list-no': None, u'cli-custom-range-actionpoint': u'NsmRangeCliActionpoint', u'cli-custom-range-enumerator': u'NsmRangeCliActionpoint', u'cli-suppress-key-abbreviation': None, u'cli-no-match-completion': None, u'cli-full-command': None, u'callpoint': u'interface_vlan'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='list', is_config=True)""", }) self.__vlan = t if hasattr(self, '_set'): self._set()
Setter method for vlan, mapped from YANG variable /interface_vlan/interface/vlan (list) If this variable is read-only (config: false) in the source YANG file, then _set_vlan is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_vlan() directly. YANG Description: The list of vlans in the managed device. Each row represents a vlan. User can create/delete an entry in to this list.
Below is the the instruction that describes the task: ### Input: Setter method for vlan, mapped from YANG variable /interface_vlan/interface/vlan (list) If this variable is read-only (config: false) in the source YANG file, then _set_vlan is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_vlan() directly. YANG Description: The list of vlans in the managed device. Each row represents a vlan. User can create/delete an entry in to this list. ### Response: def _set_vlan(self, v, load=False): """ Setter method for vlan, mapped from YANG variable /interface_vlan/interface/vlan (list) If this variable is read-only (config: false) in the source YANG file, then _set_vlan is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_vlan() directly. YANG Description: The list of vlans in the managed device. Each row represents a vlan. User can create/delete an entry in to this list. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("name",vlan.vlan, yang_name="vlan", rest_name="Vlan", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'The list of vlans.', u'cli-no-key-completion': None, u'alt-name': u'Vlan', u'cli-suppress-show-path': None, u'cli-suppress-list-no': None, u'cli-custom-range-actionpoint': u'NsmRangeCliActionpoint', u'cli-custom-range-enumerator': u'NsmRangeCliActionpoint', u'cli-suppress-key-abbreviation': None, u'cli-no-match-completion': None, u'cli-full-command': None, u'callpoint': u'interface_vlan'}}), is_container='list', yang_name="vlan", rest_name="Vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'The list of vlans.', u'cli-no-key-completion': None, u'alt-name': u'Vlan', u'cli-suppress-show-path': None, u'cli-suppress-list-no': None, u'cli-custom-range-actionpoint': u'NsmRangeCliActionpoint', u'cli-custom-range-enumerator': u'NsmRangeCliActionpoint', u'cli-suppress-key-abbreviation': None, u'cli-no-match-completion': None, u'cli-full-command': None, u'callpoint': u'interface_vlan'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """vlan must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("name",vlan.vlan, yang_name="vlan", rest_name="Vlan", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'The list of vlans.', u'cli-no-key-completion': None, u'alt-name': u'Vlan', u'cli-suppress-show-path': None, u'cli-suppress-list-no': None, u'cli-custom-range-actionpoint': u'NsmRangeCliActionpoint', u'cli-custom-range-enumerator': u'NsmRangeCliActionpoint', u'cli-suppress-key-abbreviation': None, u'cli-no-match-completion': None, u'cli-full-command': None, u'callpoint': u'interface_vlan'}}), is_container='list', yang_name="vlan", rest_name="Vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'The list of vlans.', u'cli-no-key-completion': None, u'alt-name': u'Vlan', u'cli-suppress-show-path': None, u'cli-suppress-list-no': None, u'cli-custom-range-actionpoint': u'NsmRangeCliActionpoint', u'cli-custom-range-enumerator': u'NsmRangeCliActionpoint', u'cli-suppress-key-abbreviation': None, u'cli-no-match-completion': None, u'cli-full-command': None, u'callpoint': u'interface_vlan'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='list', is_config=True)""", }) self.__vlan = t if hasattr(self, '_set'): self._set()
def _repack(h5file): """ Repack archive to remove freespace. Returns ------- file : h5py File or None If the input is a h5py.File then a h5py File instance of the repacked archive is returned. The input File instance will no longer be useable. """ f1, opened = _openfile(h5file) filename1 = f1.filename filename2 = filename1 + '_repack_tmp' f2 = h5py.File(filename2, 'w') for key in f1.keys(): # print 'copying', key f1.copy(key, f2) f1.close() f2.close() filename_tmp = filename1 + '_repack_rename_tmp' os.rename(filename1, filename_tmp) os.rename(filename2, filename1) if opened: f = None else: f = h5py.File(filename1) os.remove(filename_tmp) return f
Repack archive to remove freespace. Returns ------- file : h5py File or None If the input is a h5py.File then a h5py File instance of the repacked archive is returned. The input File instance will no longer be useable.
Below is the the instruction that describes the task: ### Input: Repack archive to remove freespace. Returns ------- file : h5py File or None If the input is a h5py.File then a h5py File instance of the repacked archive is returned. The input File instance will no longer be useable. ### Response: def _repack(h5file): """ Repack archive to remove freespace. Returns ------- file : h5py File or None If the input is a h5py.File then a h5py File instance of the repacked archive is returned. The input File instance will no longer be useable. """ f1, opened = _openfile(h5file) filename1 = f1.filename filename2 = filename1 + '_repack_tmp' f2 = h5py.File(filename2, 'w') for key in f1.keys(): # print 'copying', key f1.copy(key, f2) f1.close() f2.close() filename_tmp = filename1 + '_repack_rename_tmp' os.rename(filename1, filename_tmp) os.rename(filename2, filename1) if opened: f = None else: f = h5py.File(filename1) os.remove(filename_tmp) return f
def create_vault_ec2_client_configuration(self, access_key, secret_key, endpoint=None, mount_point='aws-ec2'): """POST /auth/<mount_point>/config/client Configure the credentials required to perform API calls to AWS as well as custom endpoints to talk to AWS APIs. The instance identity document fetched from the PKCS#7 signature will provide the EC2 instance ID. The credentials configured using this endpoint will be used to query the status of the instances via DescribeInstances API. If static credentials are not provided using this endpoint, then the credentials will be retrieved from the environment variables AWS_ACCESS_KEY, AWS_SECRET_KEY and AWS_REGION respectively. If the credentials are still not found and if the method is configured on an EC2 instance with metadata querying capabilities, the credentials are fetched automatically :param access_key: AWS Access key with permissions to query AWS APIs. The permissions required depend on the specific configurations. If using the iam auth method without inferencing, then no credentials are necessary. If using the ec2 auth method or using the iam auth method with inferencing, then these credentials need access to ec2:DescribeInstances. If additionally a bound_iam_role is specified, then these credentials also need access to iam:GetInstanceProfile. If, however, an alternate sts configuration is set for the target account, then the credentials must be permissioned to call sts:AssumeRole on the configured role, and that role must have the permissions described here. :type access_key: str|unicode :param secret_key: AWS Secret key with permissions to query AWS APIs. :type secret_key: str|unicode :param endpoint: URL to override the default generated endpoint for making AWS EC2 API calls. :type endpoint: str|unicode :param mount_point: The "path" the AWS auth backend was mounted on. Vault currently defaults to "aws". "aws-ec2" is the default argument for backwards comparability within this module. :type mount_point: str|unicode :return: The response of the request. :rtype: requests.Response """ params = { 'access_key': access_key, 'secret_key': secret_key } if endpoint is not None: params['endpoint'] = endpoint return self._adapter.post('/v1/auth/{0}/config/client'.format(mount_point), json=params)
POST /auth/<mount_point>/config/client Configure the credentials required to perform API calls to AWS as well as custom endpoints to talk to AWS APIs. The instance identity document fetched from the PKCS#7 signature will provide the EC2 instance ID. The credentials configured using this endpoint will be used to query the status of the instances via DescribeInstances API. If static credentials are not provided using this endpoint, then the credentials will be retrieved from the environment variables AWS_ACCESS_KEY, AWS_SECRET_KEY and AWS_REGION respectively. If the credentials are still not found and if the method is configured on an EC2 instance with metadata querying capabilities, the credentials are fetched automatically :param access_key: AWS Access key with permissions to query AWS APIs. The permissions required depend on the specific configurations. If using the iam auth method without inferencing, then no credentials are necessary. If using the ec2 auth method or using the iam auth method with inferencing, then these credentials need access to ec2:DescribeInstances. If additionally a bound_iam_role is specified, then these credentials also need access to iam:GetInstanceProfile. If, however, an alternate sts configuration is set for the target account, then the credentials must be permissioned to call sts:AssumeRole on the configured role, and that role must have the permissions described here. :type access_key: str|unicode :param secret_key: AWS Secret key with permissions to query AWS APIs. :type secret_key: str|unicode :param endpoint: URL to override the default generated endpoint for making AWS EC2 API calls. :type endpoint: str|unicode :param mount_point: The "path" the AWS auth backend was mounted on. Vault currently defaults to "aws". "aws-ec2" is the default argument for backwards comparability within this module. :type mount_point: str|unicode :return: The response of the request. :rtype: requests.Response
Below is the the instruction that describes the task: ### Input: POST /auth/<mount_point>/config/client Configure the credentials required to perform API calls to AWS as well as custom endpoints to talk to AWS APIs. The instance identity document fetched from the PKCS#7 signature will provide the EC2 instance ID. The credentials configured using this endpoint will be used to query the status of the instances via DescribeInstances API. If static credentials are not provided using this endpoint, then the credentials will be retrieved from the environment variables AWS_ACCESS_KEY, AWS_SECRET_KEY and AWS_REGION respectively. If the credentials are still not found and if the method is configured on an EC2 instance with metadata querying capabilities, the credentials are fetched automatically :param access_key: AWS Access key with permissions to query AWS APIs. The permissions required depend on the specific configurations. If using the iam auth method without inferencing, then no credentials are necessary. If using the ec2 auth method or using the iam auth method with inferencing, then these credentials need access to ec2:DescribeInstances. If additionally a bound_iam_role is specified, then these credentials also need access to iam:GetInstanceProfile. If, however, an alternate sts configuration is set for the target account, then the credentials must be permissioned to call sts:AssumeRole on the configured role, and that role must have the permissions described here. :type access_key: str|unicode :param secret_key: AWS Secret key with permissions to query AWS APIs. :type secret_key: str|unicode :param endpoint: URL to override the default generated endpoint for making AWS EC2 API calls. :type endpoint: str|unicode :param mount_point: The "path" the AWS auth backend was mounted on. Vault currently defaults to "aws". "aws-ec2" is the default argument for backwards comparability within this module. :type mount_point: str|unicode :return: The response of the request. :rtype: requests.Response ### Response: def create_vault_ec2_client_configuration(self, access_key, secret_key, endpoint=None, mount_point='aws-ec2'): """POST /auth/<mount_point>/config/client Configure the credentials required to perform API calls to AWS as well as custom endpoints to talk to AWS APIs. The instance identity document fetched from the PKCS#7 signature will provide the EC2 instance ID. The credentials configured using this endpoint will be used to query the status of the instances via DescribeInstances API. If static credentials are not provided using this endpoint, then the credentials will be retrieved from the environment variables AWS_ACCESS_KEY, AWS_SECRET_KEY and AWS_REGION respectively. If the credentials are still not found and if the method is configured on an EC2 instance with metadata querying capabilities, the credentials are fetched automatically :param access_key: AWS Access key with permissions to query AWS APIs. The permissions required depend on the specific configurations. If using the iam auth method without inferencing, then no credentials are necessary. If using the ec2 auth method or using the iam auth method with inferencing, then these credentials need access to ec2:DescribeInstances. If additionally a bound_iam_role is specified, then these credentials also need access to iam:GetInstanceProfile. If, however, an alternate sts configuration is set for the target account, then the credentials must be permissioned to call sts:AssumeRole on the configured role, and that role must have the permissions described here. :type access_key: str|unicode :param secret_key: AWS Secret key with permissions to query AWS APIs. :type secret_key: str|unicode :param endpoint: URL to override the default generated endpoint for making AWS EC2 API calls. :type endpoint: str|unicode :param mount_point: The "path" the AWS auth backend was mounted on. Vault currently defaults to "aws". "aws-ec2" is the default argument for backwards comparability within this module. :type mount_point: str|unicode :return: The response of the request. :rtype: requests.Response """ params = { 'access_key': access_key, 'secret_key': secret_key } if endpoint is not None: params['endpoint'] = endpoint return self._adapter.post('/v1/auth/{0}/config/client'.format(mount_point), json=params)
def create_page(self, space, title, body, parent_id=None, type='page'): """ Create page from scratch :param space: :param title: :param body: :param parent_id: :param type: :return: """ log.info('Creating {type} "{space}" -> "{title}"'.format(space=space, title=title, type=type)) url = 'rest/api/content/' data = { 'type': type, 'title': title, 'space': {'key': space}, 'body': {'storage': { 'value': body, 'representation': 'storage'}}} if parent_id: data['ancestors'] = [{'type': type, 'id': parent_id}] return self.post(url, data=data)
Create page from scratch :param space: :param title: :param body: :param parent_id: :param type: :return:
Below is the the instruction that describes the task: ### Input: Create page from scratch :param space: :param title: :param body: :param parent_id: :param type: :return: ### Response: def create_page(self, space, title, body, parent_id=None, type='page'): """ Create page from scratch :param space: :param title: :param body: :param parent_id: :param type: :return: """ log.info('Creating {type} "{space}" -> "{title}"'.format(space=space, title=title, type=type)) url = 'rest/api/content/' data = { 'type': type, 'title': title, 'space': {'key': space}, 'body': {'storage': { 'value': body, 'representation': 'storage'}}} if parent_id: data['ancestors'] = [{'type': type, 'id': parent_id}] return self.post(url, data=data)
def _get_config_generator(filename): """ A generator which populates and return a dict. :parse filename: A string containing the path to YAML file. :return: dict """ for d in _get_config(filename): repo = d['git'] parsedrepo = giturlparse.parse(repo) name = '{}.{}'.format(parsedrepo.owner, parsedrepo.name) src_dir = os.path.join(_get_clone_dir(), name) files = d.get('files') post_commands = d.get('post_commands', []) dst_dir = None if not files: dst_dir = _get_dst_dir(d['dst']) yield { 'git': repo, 'lock_file': _get_lock_file(name), 'version': d['version'], 'name': name, 'src': src_dir, 'dst': dst_dir, 'files': _get_files_config(src_dir, files), 'post_commands': post_commands, }
A generator which populates and return a dict. :parse filename: A string containing the path to YAML file. :return: dict
Below is the the instruction that describes the task: ### Input: A generator which populates and return a dict. :parse filename: A string containing the path to YAML file. :return: dict ### Response: def _get_config_generator(filename): """ A generator which populates and return a dict. :parse filename: A string containing the path to YAML file. :return: dict """ for d in _get_config(filename): repo = d['git'] parsedrepo = giturlparse.parse(repo) name = '{}.{}'.format(parsedrepo.owner, parsedrepo.name) src_dir = os.path.join(_get_clone_dir(), name) files = d.get('files') post_commands = d.get('post_commands', []) dst_dir = None if not files: dst_dir = _get_dst_dir(d['dst']) yield { 'git': repo, 'lock_file': _get_lock_file(name), 'version': d['version'], 'name': name, 'src': src_dir, 'dst': dst_dir, 'files': _get_files_config(src_dir, files), 'post_commands': post_commands, }
def get_zcta_metadata(zcta): """ Get metadata about a ZIP Code Tabulation Area (ZCTA). Parameters ---------- zcta : str ID of ZIP Code Tabulation Area Returns ------- metadata : dict Dict of data about the ZCTA, including lat/long coordinates. """ conn = metadata_db_connection_proxy.get_connection() cur = conn.cursor() cur.execute( """ select * from zcta_metadata where zcta_id = ? """, (zcta,), ) row = cur.fetchone() if row is None: raise UnrecognizedZCTAError(zcta) return {col[0]: row[i] for i, col in enumerate(cur.description)}
Get metadata about a ZIP Code Tabulation Area (ZCTA). Parameters ---------- zcta : str ID of ZIP Code Tabulation Area Returns ------- metadata : dict Dict of data about the ZCTA, including lat/long coordinates.
Below is the the instruction that describes the task: ### Input: Get metadata about a ZIP Code Tabulation Area (ZCTA). Parameters ---------- zcta : str ID of ZIP Code Tabulation Area Returns ------- metadata : dict Dict of data about the ZCTA, including lat/long coordinates. ### Response: def get_zcta_metadata(zcta): """ Get metadata about a ZIP Code Tabulation Area (ZCTA). Parameters ---------- zcta : str ID of ZIP Code Tabulation Area Returns ------- metadata : dict Dict of data about the ZCTA, including lat/long coordinates. """ conn = metadata_db_connection_proxy.get_connection() cur = conn.cursor() cur.execute( """ select * from zcta_metadata where zcta_id = ? """, (zcta,), ) row = cur.fetchone() if row is None: raise UnrecognizedZCTAError(zcta) return {col[0]: row[i] for i, col in enumerate(cur.description)}
def get_as_nullable_datetime(self, key): """ Converts map element into a Date or returns None if conversion is not possible. :param key: an index of element to get. :return: Date value of the element or None if conversion is not supported. """ value = self.get(key) return DateTimeConverter.to_nullable_datetime(value)
Converts map element into a Date or returns None if conversion is not possible. :param key: an index of element to get. :return: Date value of the element or None if conversion is not supported.
Below is the the instruction that describes the task: ### Input: Converts map element into a Date or returns None if conversion is not possible. :param key: an index of element to get. :return: Date value of the element or None if conversion is not supported. ### Response: def get_as_nullable_datetime(self, key): """ Converts map element into a Date or returns None if conversion is not possible. :param key: an index of element to get. :return: Date value of the element or None if conversion is not supported. """ value = self.get(key) return DateTimeConverter.to_nullable_datetime(value)
def rename(self, columns): """Returns a new DataFrame with renamed columns. Currently a simplified version of Pandas' rename. Parameters ---------- columns : dict Old names to new names. Returns ------- DataFrame With columns renamed, if found. """ new_data = OrderedDict() for column_name in self: if column_name in columns.keys(): column = self._data[column_name] new_name = columns[column_name] new_data[new_name] = Series(column.values, column.index, column.dtype, new_name) else: new_data[column_name] = self._data[column_name] return DataFrame(new_data, self.index)
Returns a new DataFrame with renamed columns. Currently a simplified version of Pandas' rename. Parameters ---------- columns : dict Old names to new names. Returns ------- DataFrame With columns renamed, if found.
Below is the the instruction that describes the task: ### Input: Returns a new DataFrame with renamed columns. Currently a simplified version of Pandas' rename. Parameters ---------- columns : dict Old names to new names. Returns ------- DataFrame With columns renamed, if found. ### Response: def rename(self, columns): """Returns a new DataFrame with renamed columns. Currently a simplified version of Pandas' rename. Parameters ---------- columns : dict Old names to new names. Returns ------- DataFrame With columns renamed, if found. """ new_data = OrderedDict() for column_name in self: if column_name in columns.keys(): column = self._data[column_name] new_name = columns[column_name] new_data[new_name] = Series(column.values, column.index, column.dtype, new_name) else: new_data[column_name] = self._data[column_name] return DataFrame(new_data, self.index)
def _create_graph(self, return_target_sources=None): """ Create a DiGraph out of the existing edge map. :param return_target_sources: Used for making up those missing returns :returns: A networkx.DiGraph() object """ if return_target_sources is None: # We set it to a defaultdict in order to be consistent with the # actual parameter. return_target_sources = defaultdict(list) cfg = networkx.DiGraph() # The corner case: add a node to the graph if there is only one block if len(self._nodes) == 1: cfg.add_node(self._nodes[next(iter(self._nodes.keys()))]) # Adding edges for tpl, targets in self._exit_targets.items(): basic_block = self._nodes[tpl] # Cannot fail :) for ex, jumpkind in targets: if ex in self._nodes: target_bbl = self._nodes[ex] cfg.add_edge(basic_block, target_bbl, jumpkind=jumpkind) # Add edges for possibly missing returns if basic_block.addr in return_target_sources: for src_irsb_key in \ return_target_sources[basic_block.addr]: cfg.add_edge(self._nodes[src_irsb_key], basic_block, jumpkind="Ijk_Ret") else: # Debugging output def addr_formalize(addr): if addr is None: return "None" else: return "%#08x" % addr s = "([" for addr in ex[:-1]: s += addr_formalize(addr) + ", " s += "] %s)" % addr_formalize(ex[-1]) l.warning("Key %s does not exist.", s) return cfg
Create a DiGraph out of the existing edge map. :param return_target_sources: Used for making up those missing returns :returns: A networkx.DiGraph() object
Below is the the instruction that describes the task: ### Input: Create a DiGraph out of the existing edge map. :param return_target_sources: Used for making up those missing returns :returns: A networkx.DiGraph() object ### Response: def _create_graph(self, return_target_sources=None): """ Create a DiGraph out of the existing edge map. :param return_target_sources: Used for making up those missing returns :returns: A networkx.DiGraph() object """ if return_target_sources is None: # We set it to a defaultdict in order to be consistent with the # actual parameter. return_target_sources = defaultdict(list) cfg = networkx.DiGraph() # The corner case: add a node to the graph if there is only one block if len(self._nodes) == 1: cfg.add_node(self._nodes[next(iter(self._nodes.keys()))]) # Adding edges for tpl, targets in self._exit_targets.items(): basic_block = self._nodes[tpl] # Cannot fail :) for ex, jumpkind in targets: if ex in self._nodes: target_bbl = self._nodes[ex] cfg.add_edge(basic_block, target_bbl, jumpkind=jumpkind) # Add edges for possibly missing returns if basic_block.addr in return_target_sources: for src_irsb_key in \ return_target_sources[basic_block.addr]: cfg.add_edge(self._nodes[src_irsb_key], basic_block, jumpkind="Ijk_Ret") else: # Debugging output def addr_formalize(addr): if addr is None: return "None" else: return "%#08x" % addr s = "([" for addr in ex[:-1]: s += addr_formalize(addr) + ", " s += "] %s)" % addr_formalize(ex[-1]) l.warning("Key %s does not exist.", s) return cfg
def commands2tree(self, adapter, session, commands): '''Consumes state.Command commands and converts them to an ET protocol tree''' # todo: trap errors... hdrcmd = commands[0] commands = commands[1:] if hdrcmd.name != constants.CMD_SYNCHDR: raise common.InternalError('unexpected first command "%s" (expected "%s")' % (hdrcmd.name, constants.CMD_SYNCHDR)) if hdrcmd.version != constants.SYNCML_VERSION_1_2: raise common.FeatureNotSupported('unsupported SyncML version "%s"' % (hdrcmd.version,)) xsync = ET.Element(constants.NODE_SYNCML) xhdr = ET.SubElement(xsync, hdrcmd.name) if hdrcmd.version == constants.SYNCML_VERSION_1_2: ET.SubElement(xhdr, 'VerDTD').text = constants.SYNCML_DTD_VERSION_1_2 ET.SubElement(xhdr, 'VerProto').text = hdrcmd.version ET.SubElement(xhdr, 'SessionID').text = hdrcmd.sessionID ET.SubElement(xhdr, 'MsgID').text = hdrcmd.msgID xsrc = ET.SubElement(xhdr, 'Source') ET.SubElement(xsrc, 'LocURI').text = hdrcmd.source if hdrcmd.sourceName is not None: ET.SubElement(xsrc, 'LocName').text = hdrcmd.sourceName xtgt = ET.SubElement(xhdr, 'Target') ET.SubElement(xtgt, 'LocURI').text = hdrcmd.target if hdrcmd.targetName is not None: ET.SubElement(xtgt, 'LocName').text = hdrcmd.targetName if hdrcmd.respUri is not None: ET.SubElement(xhdr, 'RespURI').text = hdrcmd.respUri if hdrcmd.auth is not None and not session.authAccepted: if hdrcmd.auth != constants.NAMESPACE_AUTH_BASIC: raise NotImplementedError('auth method "%s"' % (common.auth2string(hdrcmd.auth),)) if hdrcmd.auth == constants.NAMESPACE_AUTH_BASIC: xcred = ET.SubElement(xhdr, 'Cred') xmeta = ET.SubElement(xcred, 'Meta') ET.SubElement(xmeta, 'Format', {'xmlns': constants.NAMESPACE_METINF}).text = 'b64' ET.SubElement(xmeta, 'Type', {'xmlns': constants.NAMESPACE_METINF}).text = hdrcmd.auth ET.SubElement(xcred, 'Data').text = base64.b64encode( '%s:%s' % (adapter.peer.username, adapter.peer.password)) if hdrcmd.maxMsgSize is not None or hdrcmd.maxObjSize is not None: xmeta = ET.SubElement(xhdr, 'Meta') if hdrcmd.maxMsgSize is not None: ET.SubElement(xmeta, 'MaxMsgSize', {'xmlns': constants.NAMESPACE_METINF}).text = hdrcmd.maxMsgSize if hdrcmd.maxObjSize is not None: ET.SubElement(xmeta, 'MaxObjSize', {'xmlns': constants.NAMESPACE_METINF}).text = hdrcmd.maxObjSize xbody = ET.SubElement(xsync, constants.NODE_SYNCBODY) for cmdidx, cmd in enumerate(commands): xcmd = ET.SubElement(xbody, cmd.name) if cmd.cmdID is not None: ET.SubElement(xcmd, 'CmdID').text = cmd.cmdID if cmd.name == constants.CMD_ALERT: ET.SubElement(xcmd, 'Data').text = str(cmd.data) xitem = ET.SubElement(xcmd, 'Item') ET.SubElement(ET.SubElement(xitem, 'Source'), 'LocURI').text = cmd.source ET.SubElement(ET.SubElement(xitem, 'Target'), 'LocURI').text = cmd.target if cmd.lastAnchor is not None \ or cmd.nextAnchor is not None \ or cmd.maxObjSize is not None: xmeta = ET.SubElement(xitem, 'Meta') xanch = ET.SubElement(xmeta, 'Anchor', {'xmlns': constants.NAMESPACE_METINF}) if cmd.lastAnchor is not None: ET.SubElement(xanch, 'Last').text = cmd.lastAnchor if cmd.nextAnchor is not None: ET.SubElement(xanch, 'Next').text = cmd.nextAnchor if cmd.maxObjSize is not None: ET.SubElement(xmeta, 'MaxObjSize', {'xmlns': constants.NAMESPACE_METINF}).text = cmd.maxObjSize continue if cmd.name == constants.CMD_STATUS: ET.SubElement(xcmd, 'MsgRef').text = cmd.msgRef ET.SubElement(xcmd, 'CmdRef').text = cmd.cmdRef ET.SubElement(xcmd, 'Cmd').text = cmd.statusOf if cmd.sourceRef is not None: ET.SubElement(xcmd, 'SourceRef').text = cmd.sourceRef if cmd.targetRef is not None: ET.SubElement(xcmd, 'TargetRef').text = cmd.targetRef ET.SubElement(xcmd, 'Data').text = cmd.statusCode if cmd.nextAnchor is not None or cmd.lastAnchor is not None: xdata = ET.SubElement(ET.SubElement(xcmd, 'Item'), 'Data') xanch = ET.SubElement(xdata, 'Anchor', {'xmlns': constants.NAMESPACE_METINF}) if cmd.lastAnchor is not None: ET.SubElement(xanch, 'Last').text = cmd.lastAnchor if cmd.nextAnchor is not None: ET.SubElement(xanch, 'Next').text = cmd.nextAnchor # NOTE: this is NOT standard SyncML... if cmd.errorCode is not None or cmd.errorMsg is not None: xerr = ET.SubElement(xcmd, 'Error') if cmd.errorCode is not None: ET.SubElement(xerr, 'Code').text = cmd.errorCode if cmd.errorMsg is not None: ET.SubElement(xerr, 'Message').text = cmd.errorMsg if cmd.errorTrace is not None: ET.SubElement(xerr, 'Trace').text = cmd.errorTrace continue if cmd.name in [constants.CMD_GET, constants.CMD_PUT]: ET.SubElement(ET.SubElement(xcmd, 'Meta'), 'Type', {'xmlns': constants.NAMESPACE_METINF}).text = cmd.type if cmd.source is not None or cmd.target is not None or cmd.data: xitem = ET.SubElement(xcmd, 'Item') if cmd.source is not None: xsrc = ET.SubElement(xitem, 'Source') ET.SubElement(xsrc, 'LocURI').text = cmd.source ET.SubElement(xsrc, 'LocName').text = cmd.source if cmd.target is not None: xtgt = ET.SubElement(xitem, 'Target') ET.SubElement(xtgt, 'LocURI').text = cmd.target ET.SubElement(xtgt, 'LocName').text = cmd.target if cmd.data is not None: if isinstance(cmd.data, basestring): ET.SubElement(xitem, 'Data').text = cmd.data else: ET.SubElement(xitem, 'Data').append(cmd.data) continue if cmd.name == constants.CMD_RESULTS: ET.SubElement(xcmd, 'MsgRef').text = cmd.msgRef ET.SubElement(xcmd, 'CmdRef').text = cmd.cmdRef ET.SubElement(ET.SubElement(xcmd, 'Meta'), 'Type', {'xmlns': constants.NAMESPACE_METINF}).text = cmd.type xitem = ET.SubElement(xcmd, 'Item') xsrc = ET.SubElement(xitem, 'Source') ET.SubElement(xsrc, 'LocURI').text = cmd.source ET.SubElement(xsrc, 'LocName').text = cmd.source if cmd.data is not None: if isinstance(cmd.data, basestring): ET.SubElement(xitem, 'Data').text = cmd.data else: ET.SubElement(xitem, 'Data').append(cmd.data) continue if cmd.name == constants.CMD_SYNC: ET.SubElement(ET.SubElement(xcmd, 'Source'), 'LocURI').text = cmd.source ET.SubElement(ET.SubElement(xcmd, 'Target'), 'LocURI').text = cmd.target if cmd.noc is not None: ET.SubElement(xcmd, 'NumberOfChanges').text = cmd.noc if cmd.data is not None: for scmd in cmd.data: xscmd = ET.SubElement(xcmd, scmd.name) if scmd.cmdID is not None: ET.SubElement(xscmd, 'CmdID').text = scmd.cmdID if scmd.type is not None or \ ( scmd.format is not None and scmd.format != constants.FORMAT_AUTO ): xsmeta = ET.SubElement(xscmd, 'Meta') # todo: implement auto encoding determination... # (the current implementation just lets XML encoding do it, # which is for most things good enough, but not so good # for sequences that need a large amount escaping such as # binary data...) if scmd.format is not None and scmd.format != constants.FORMAT_AUTO: ET.SubElement(xsmeta, 'Format', {'xmlns': constants.NAMESPACE_METINF}).text = scmd.format if scmd.type is not None: ET.SubElement(xsmeta, 'Type', {'xmlns': constants.NAMESPACE_METINF}).text = scmd.type xsitem = ET.SubElement(xscmd, 'Item') if scmd.source is not None: ET.SubElement(ET.SubElement(xsitem, 'Source'), 'LocURI').text = scmd.source if scmd.sourceParent is not None: ET.SubElement(ET.SubElement(xsitem, 'SourceParent'), 'LocURI').text = scmd.sourceParent if scmd.target is not None: ET.SubElement(ET.SubElement(xsitem, 'Target'), 'LocURI').text = scmd.target if scmd.targetParent is not None: ET.SubElement(ET.SubElement(xsitem, 'TargetParent'), 'LocURI').text = scmd.targetParent if scmd.data is not None: if isinstance(scmd.data, basestring): ET.SubElement(xsitem, 'Data').text = scmd.data else: ET.SubElement(xsitem, 'Data').append(scmd.data) continue if cmd.name == constants.CMD_MAP: ET.SubElement(ET.SubElement(xcmd, 'Source'), 'LocURI').text = cmd.source ET.SubElement(ET.SubElement(xcmd, 'Target'), 'LocURI').text = cmd.target if cmd.sourceItem is not None or cmd.targetItem is not None: xitem = ET.SubElement(xcmd, constants.CMD_MAPITEM) if cmd.sourceItem is not None: ET.SubElement(ET.SubElement(xitem, 'Source'), 'LocURI').text = cmd.sourceItem if cmd.targetItem is not None: ET.SubElement(ET.SubElement(xitem, 'Target'), 'LocURI').text = cmd.targetItem continue if cmd.name == constants.CMD_FINAL: if cmdidx + 1 < len(commands): raise common.InternalError('command "%s" not at tail end of commands' % (cmd.name,)) continue raise common.InternalError('unexpected command "%s"' % (cmd.name,)) return xsync
Consumes state.Command commands and converts them to an ET protocol tree
Below is the the instruction that describes the task: ### Input: Consumes state.Command commands and converts them to an ET protocol tree ### Response: def commands2tree(self, adapter, session, commands): '''Consumes state.Command commands and converts them to an ET protocol tree''' # todo: trap errors... hdrcmd = commands[0] commands = commands[1:] if hdrcmd.name != constants.CMD_SYNCHDR: raise common.InternalError('unexpected first command "%s" (expected "%s")' % (hdrcmd.name, constants.CMD_SYNCHDR)) if hdrcmd.version != constants.SYNCML_VERSION_1_2: raise common.FeatureNotSupported('unsupported SyncML version "%s"' % (hdrcmd.version,)) xsync = ET.Element(constants.NODE_SYNCML) xhdr = ET.SubElement(xsync, hdrcmd.name) if hdrcmd.version == constants.SYNCML_VERSION_1_2: ET.SubElement(xhdr, 'VerDTD').text = constants.SYNCML_DTD_VERSION_1_2 ET.SubElement(xhdr, 'VerProto').text = hdrcmd.version ET.SubElement(xhdr, 'SessionID').text = hdrcmd.sessionID ET.SubElement(xhdr, 'MsgID').text = hdrcmd.msgID xsrc = ET.SubElement(xhdr, 'Source') ET.SubElement(xsrc, 'LocURI').text = hdrcmd.source if hdrcmd.sourceName is not None: ET.SubElement(xsrc, 'LocName').text = hdrcmd.sourceName xtgt = ET.SubElement(xhdr, 'Target') ET.SubElement(xtgt, 'LocURI').text = hdrcmd.target if hdrcmd.targetName is not None: ET.SubElement(xtgt, 'LocName').text = hdrcmd.targetName if hdrcmd.respUri is not None: ET.SubElement(xhdr, 'RespURI').text = hdrcmd.respUri if hdrcmd.auth is not None and not session.authAccepted: if hdrcmd.auth != constants.NAMESPACE_AUTH_BASIC: raise NotImplementedError('auth method "%s"' % (common.auth2string(hdrcmd.auth),)) if hdrcmd.auth == constants.NAMESPACE_AUTH_BASIC: xcred = ET.SubElement(xhdr, 'Cred') xmeta = ET.SubElement(xcred, 'Meta') ET.SubElement(xmeta, 'Format', {'xmlns': constants.NAMESPACE_METINF}).text = 'b64' ET.SubElement(xmeta, 'Type', {'xmlns': constants.NAMESPACE_METINF}).text = hdrcmd.auth ET.SubElement(xcred, 'Data').text = base64.b64encode( '%s:%s' % (adapter.peer.username, adapter.peer.password)) if hdrcmd.maxMsgSize is not None or hdrcmd.maxObjSize is not None: xmeta = ET.SubElement(xhdr, 'Meta') if hdrcmd.maxMsgSize is not None: ET.SubElement(xmeta, 'MaxMsgSize', {'xmlns': constants.NAMESPACE_METINF}).text = hdrcmd.maxMsgSize if hdrcmd.maxObjSize is not None: ET.SubElement(xmeta, 'MaxObjSize', {'xmlns': constants.NAMESPACE_METINF}).text = hdrcmd.maxObjSize xbody = ET.SubElement(xsync, constants.NODE_SYNCBODY) for cmdidx, cmd in enumerate(commands): xcmd = ET.SubElement(xbody, cmd.name) if cmd.cmdID is not None: ET.SubElement(xcmd, 'CmdID').text = cmd.cmdID if cmd.name == constants.CMD_ALERT: ET.SubElement(xcmd, 'Data').text = str(cmd.data) xitem = ET.SubElement(xcmd, 'Item') ET.SubElement(ET.SubElement(xitem, 'Source'), 'LocURI').text = cmd.source ET.SubElement(ET.SubElement(xitem, 'Target'), 'LocURI').text = cmd.target if cmd.lastAnchor is not None \ or cmd.nextAnchor is not None \ or cmd.maxObjSize is not None: xmeta = ET.SubElement(xitem, 'Meta') xanch = ET.SubElement(xmeta, 'Anchor', {'xmlns': constants.NAMESPACE_METINF}) if cmd.lastAnchor is not None: ET.SubElement(xanch, 'Last').text = cmd.lastAnchor if cmd.nextAnchor is not None: ET.SubElement(xanch, 'Next').text = cmd.nextAnchor if cmd.maxObjSize is not None: ET.SubElement(xmeta, 'MaxObjSize', {'xmlns': constants.NAMESPACE_METINF}).text = cmd.maxObjSize continue if cmd.name == constants.CMD_STATUS: ET.SubElement(xcmd, 'MsgRef').text = cmd.msgRef ET.SubElement(xcmd, 'CmdRef').text = cmd.cmdRef ET.SubElement(xcmd, 'Cmd').text = cmd.statusOf if cmd.sourceRef is not None: ET.SubElement(xcmd, 'SourceRef').text = cmd.sourceRef if cmd.targetRef is not None: ET.SubElement(xcmd, 'TargetRef').text = cmd.targetRef ET.SubElement(xcmd, 'Data').text = cmd.statusCode if cmd.nextAnchor is not None or cmd.lastAnchor is not None: xdata = ET.SubElement(ET.SubElement(xcmd, 'Item'), 'Data') xanch = ET.SubElement(xdata, 'Anchor', {'xmlns': constants.NAMESPACE_METINF}) if cmd.lastAnchor is not None: ET.SubElement(xanch, 'Last').text = cmd.lastAnchor if cmd.nextAnchor is not None: ET.SubElement(xanch, 'Next').text = cmd.nextAnchor # NOTE: this is NOT standard SyncML... if cmd.errorCode is not None or cmd.errorMsg is not None: xerr = ET.SubElement(xcmd, 'Error') if cmd.errorCode is not None: ET.SubElement(xerr, 'Code').text = cmd.errorCode if cmd.errorMsg is not None: ET.SubElement(xerr, 'Message').text = cmd.errorMsg if cmd.errorTrace is not None: ET.SubElement(xerr, 'Trace').text = cmd.errorTrace continue if cmd.name in [constants.CMD_GET, constants.CMD_PUT]: ET.SubElement(ET.SubElement(xcmd, 'Meta'), 'Type', {'xmlns': constants.NAMESPACE_METINF}).text = cmd.type if cmd.source is not None or cmd.target is not None or cmd.data: xitem = ET.SubElement(xcmd, 'Item') if cmd.source is not None: xsrc = ET.SubElement(xitem, 'Source') ET.SubElement(xsrc, 'LocURI').text = cmd.source ET.SubElement(xsrc, 'LocName').text = cmd.source if cmd.target is not None: xtgt = ET.SubElement(xitem, 'Target') ET.SubElement(xtgt, 'LocURI').text = cmd.target ET.SubElement(xtgt, 'LocName').text = cmd.target if cmd.data is not None: if isinstance(cmd.data, basestring): ET.SubElement(xitem, 'Data').text = cmd.data else: ET.SubElement(xitem, 'Data').append(cmd.data) continue if cmd.name == constants.CMD_RESULTS: ET.SubElement(xcmd, 'MsgRef').text = cmd.msgRef ET.SubElement(xcmd, 'CmdRef').text = cmd.cmdRef ET.SubElement(ET.SubElement(xcmd, 'Meta'), 'Type', {'xmlns': constants.NAMESPACE_METINF}).text = cmd.type xitem = ET.SubElement(xcmd, 'Item') xsrc = ET.SubElement(xitem, 'Source') ET.SubElement(xsrc, 'LocURI').text = cmd.source ET.SubElement(xsrc, 'LocName').text = cmd.source if cmd.data is not None: if isinstance(cmd.data, basestring): ET.SubElement(xitem, 'Data').text = cmd.data else: ET.SubElement(xitem, 'Data').append(cmd.data) continue if cmd.name == constants.CMD_SYNC: ET.SubElement(ET.SubElement(xcmd, 'Source'), 'LocURI').text = cmd.source ET.SubElement(ET.SubElement(xcmd, 'Target'), 'LocURI').text = cmd.target if cmd.noc is not None: ET.SubElement(xcmd, 'NumberOfChanges').text = cmd.noc if cmd.data is not None: for scmd in cmd.data: xscmd = ET.SubElement(xcmd, scmd.name) if scmd.cmdID is not None: ET.SubElement(xscmd, 'CmdID').text = scmd.cmdID if scmd.type is not None or \ ( scmd.format is not None and scmd.format != constants.FORMAT_AUTO ): xsmeta = ET.SubElement(xscmd, 'Meta') # todo: implement auto encoding determination... # (the current implementation just lets XML encoding do it, # which is for most things good enough, but not so good # for sequences that need a large amount escaping such as # binary data...) if scmd.format is not None and scmd.format != constants.FORMAT_AUTO: ET.SubElement(xsmeta, 'Format', {'xmlns': constants.NAMESPACE_METINF}).text = scmd.format if scmd.type is not None: ET.SubElement(xsmeta, 'Type', {'xmlns': constants.NAMESPACE_METINF}).text = scmd.type xsitem = ET.SubElement(xscmd, 'Item') if scmd.source is not None: ET.SubElement(ET.SubElement(xsitem, 'Source'), 'LocURI').text = scmd.source if scmd.sourceParent is not None: ET.SubElement(ET.SubElement(xsitem, 'SourceParent'), 'LocURI').text = scmd.sourceParent if scmd.target is not None: ET.SubElement(ET.SubElement(xsitem, 'Target'), 'LocURI').text = scmd.target if scmd.targetParent is not None: ET.SubElement(ET.SubElement(xsitem, 'TargetParent'), 'LocURI').text = scmd.targetParent if scmd.data is not None: if isinstance(scmd.data, basestring): ET.SubElement(xsitem, 'Data').text = scmd.data else: ET.SubElement(xsitem, 'Data').append(scmd.data) continue if cmd.name == constants.CMD_MAP: ET.SubElement(ET.SubElement(xcmd, 'Source'), 'LocURI').text = cmd.source ET.SubElement(ET.SubElement(xcmd, 'Target'), 'LocURI').text = cmd.target if cmd.sourceItem is not None or cmd.targetItem is not None: xitem = ET.SubElement(xcmd, constants.CMD_MAPITEM) if cmd.sourceItem is not None: ET.SubElement(ET.SubElement(xitem, 'Source'), 'LocURI').text = cmd.sourceItem if cmd.targetItem is not None: ET.SubElement(ET.SubElement(xitem, 'Target'), 'LocURI').text = cmd.targetItem continue if cmd.name == constants.CMD_FINAL: if cmdidx + 1 < len(commands): raise common.InternalError('command "%s" not at tail end of commands' % (cmd.name,)) continue raise common.InternalError('unexpected command "%s"' % (cmd.name,)) return xsync
def predict(self, X): """Predict the closest cluster each sample in X belongs to. In the vector quantization literature, `cluster_centers_` is called the code book and each value returned by `predict` is the index of the closest code in the code book. Parameters ---------- X : array-like, shape = [n_samples, n_features] New data to predict. Returns ------- Y : array, shape [n_samples,] Index of the closest center each sample belongs to. """ if isinstance(X, np.ndarray): if not (X.dtype == 'float32' or X.dtype == 'float64'): X = X.astype('float64') labels, inertia = libdistance.assign_nearest( X, self.cluster_centers_, metric=self.metric) return labels
Predict the closest cluster each sample in X belongs to. In the vector quantization literature, `cluster_centers_` is called the code book and each value returned by `predict` is the index of the closest code in the code book. Parameters ---------- X : array-like, shape = [n_samples, n_features] New data to predict. Returns ------- Y : array, shape [n_samples,] Index of the closest center each sample belongs to.
Below is the the instruction that describes the task: ### Input: Predict the closest cluster each sample in X belongs to. In the vector quantization literature, `cluster_centers_` is called the code book and each value returned by `predict` is the index of the closest code in the code book. Parameters ---------- X : array-like, shape = [n_samples, n_features] New data to predict. Returns ------- Y : array, shape [n_samples,] Index of the closest center each sample belongs to. ### Response: def predict(self, X): """Predict the closest cluster each sample in X belongs to. In the vector quantization literature, `cluster_centers_` is called the code book and each value returned by `predict` is the index of the closest code in the code book. Parameters ---------- X : array-like, shape = [n_samples, n_features] New data to predict. Returns ------- Y : array, shape [n_samples,] Index of the closest center each sample belongs to. """ if isinstance(X, np.ndarray): if not (X.dtype == 'float32' or X.dtype == 'float64'): X = X.astype('float64') labels, inertia = libdistance.assign_nearest( X, self.cluster_centers_, metric=self.metric) return labels
def read_header(self): """ Read header and return a Python dictionary of key:value pairs """ self.header = {} for key, val in self.h5['data'].attrs.items(): if six.PY3: key = bytes(key, 'ascii') if key == b'src_raj': self.header[key] = Angle(val, unit='hr') elif key == b'src_dej': self.header[key] = Angle(val, unit='deg') else: self.header[key] = val return self.header
Read header and return a Python dictionary of key:value pairs
Below is the the instruction that describes the task: ### Input: Read header and return a Python dictionary of key:value pairs ### Response: def read_header(self): """ Read header and return a Python dictionary of key:value pairs """ self.header = {} for key, val in self.h5['data'].attrs.items(): if six.PY3: key = bytes(key, 'ascii') if key == b'src_raj': self.header[key] = Angle(val, unit='hr') elif key == b'src_dej': self.header[key] = Angle(val, unit='deg') else: self.header[key] = val return self.header
def instance_norm(x): """Instance normalization layer.""" with tf.variable_scope("instance_norm"): epsilon = 1e-5 mean, var = tf.nn.moments(x, [1, 2], keep_dims=True) scale = tf.get_variable( "scale", [x.get_shape()[-1]], initializer=tf.truncated_normal_initializer(mean=1.0, stddev=0.02)) offset = tf.get_variable( "offset", [x.get_shape()[-1]], initializer=tf.constant_initializer(0.0)) out = scale * tf.div(x - mean, tf.sqrt(var + epsilon)) + offset return out
Instance normalization layer.
Below is the the instruction that describes the task: ### Input: Instance normalization layer. ### Response: def instance_norm(x): """Instance normalization layer.""" with tf.variable_scope("instance_norm"): epsilon = 1e-5 mean, var = tf.nn.moments(x, [1, 2], keep_dims=True) scale = tf.get_variable( "scale", [x.get_shape()[-1]], initializer=tf.truncated_normal_initializer(mean=1.0, stddev=0.02)) offset = tf.get_variable( "offset", [x.get_shape()[-1]], initializer=tf.constant_initializer(0.0)) out = scale * tf.div(x - mean, tf.sqrt(var + epsilon)) + offset return out
def _on_connection_open(self, connection): """ Callback invoked when the connection is successfully established. Args: connection (pika.connection.SelectConnection): The newly-estabilished connection. """ _log.info("Successfully opened connection to %s", connection.params.host) self._channel = connection.channel(on_open_callback=self._on_channel_open)
Callback invoked when the connection is successfully established. Args: connection (pika.connection.SelectConnection): The newly-estabilished connection.
Below is the the instruction that describes the task: ### Input: Callback invoked when the connection is successfully established. Args: connection (pika.connection.SelectConnection): The newly-estabilished connection. ### Response: def _on_connection_open(self, connection): """ Callback invoked when the connection is successfully established. Args: connection (pika.connection.SelectConnection): The newly-estabilished connection. """ _log.info("Successfully opened connection to %s", connection.params.host) self._channel = connection.channel(on_open_callback=self._on_channel_open)
def copy(self, name=None): r""" Creates a deep copy of the current project A deep copy means that new, unique versions of all the objects are created but with identical data and properties. Parameters ---------- name : string The name to give to the new project. If not supplied, a name is automatically generated. Returns ------- A new Project object containing copies of all objects """ if name is None: name = ws._gen_name() proj = deepcopy(self) ws[name] = proj return proj
r""" Creates a deep copy of the current project A deep copy means that new, unique versions of all the objects are created but with identical data and properties. Parameters ---------- name : string The name to give to the new project. If not supplied, a name is automatically generated. Returns ------- A new Project object containing copies of all objects
Below is the the instruction that describes the task: ### Input: r""" Creates a deep copy of the current project A deep copy means that new, unique versions of all the objects are created but with identical data and properties. Parameters ---------- name : string The name to give to the new project. If not supplied, a name is automatically generated. Returns ------- A new Project object containing copies of all objects ### Response: def copy(self, name=None): r""" Creates a deep copy of the current project A deep copy means that new, unique versions of all the objects are created but with identical data and properties. Parameters ---------- name : string The name to give to the new project. If not supplied, a name is automatically generated. Returns ------- A new Project object containing copies of all objects """ if name is None: name = ws._gen_name() proj = deepcopy(self) ws[name] = proj return proj
def features_for_rank(self, proc, results): """Compute features for ranking results from ES/geonames Parameters ---------- proc : dict One dictionary from the list that comes back from geoparse or from make_country_features (doesn't matter) results : dict the response from a geonames query Returns -------- X : numpy matrix holding the computed features meta: list of dicts including feature information """ feature_list = [] meta = [] results = results['hits']['hits'] search_name = proc['word'] code_mention = proc['features']['code_mention'] class_mention = proc['features']['class_mention'] for rank, entry in enumerate(results): # go through the results and calculate some features # get population number and exists try: pop = int(entry['population']) has_pop = 1 except Exception as e: pop = 0 has_pop = 0 if pop > 0: logp = np.log(pop) else: logp = 0 ### order the results came back adj_rank = 1 / np.log(rank + 2) # alternative names len_alt = len(entry['alternativenames']) adj_alt = np.log(len_alt) ### feature class (just boost the good ones) if entry['feature_class'] == "A" or entry['feature_class'] == "P": good_type = 1 else: good_type = 0 #fc_score = 3 ### feature class/code matching if entry['feature_class'] == class_mention: good_class_mention = 1 else: good_class_mention = 0 if entry['feature_code'] == code_mention: good_code_mention = 1 else: good_code_mention = 0 ### edit distance ed = editdistance.eval(search_name, entry['name']) ed = ed # shrug # maybe also get min edit distance to alternative names... features = [has_pop, pop, logp, adj_rank, len_alt, adj_alt, good_type, good_class_mention, good_code_mention, ed] m = self.format_geonames(entry) feature_list.append(features) meta.append(m) #meta = geo.format_geonames(results) X = np.asmatrix(feature_list) return (X, meta)
Compute features for ranking results from ES/geonames Parameters ---------- proc : dict One dictionary from the list that comes back from geoparse or from make_country_features (doesn't matter) results : dict the response from a geonames query Returns -------- X : numpy matrix holding the computed features meta: list of dicts including feature information
Below is the the instruction that describes the task: ### Input: Compute features for ranking results from ES/geonames Parameters ---------- proc : dict One dictionary from the list that comes back from geoparse or from make_country_features (doesn't matter) results : dict the response from a geonames query Returns -------- X : numpy matrix holding the computed features meta: list of dicts including feature information ### Response: def features_for_rank(self, proc, results): """Compute features for ranking results from ES/geonames Parameters ---------- proc : dict One dictionary from the list that comes back from geoparse or from make_country_features (doesn't matter) results : dict the response from a geonames query Returns -------- X : numpy matrix holding the computed features meta: list of dicts including feature information """ feature_list = [] meta = [] results = results['hits']['hits'] search_name = proc['word'] code_mention = proc['features']['code_mention'] class_mention = proc['features']['class_mention'] for rank, entry in enumerate(results): # go through the results and calculate some features # get population number and exists try: pop = int(entry['population']) has_pop = 1 except Exception as e: pop = 0 has_pop = 0 if pop > 0: logp = np.log(pop) else: logp = 0 ### order the results came back adj_rank = 1 / np.log(rank + 2) # alternative names len_alt = len(entry['alternativenames']) adj_alt = np.log(len_alt) ### feature class (just boost the good ones) if entry['feature_class'] == "A" or entry['feature_class'] == "P": good_type = 1 else: good_type = 0 #fc_score = 3 ### feature class/code matching if entry['feature_class'] == class_mention: good_class_mention = 1 else: good_class_mention = 0 if entry['feature_code'] == code_mention: good_code_mention = 1 else: good_code_mention = 0 ### edit distance ed = editdistance.eval(search_name, entry['name']) ed = ed # shrug # maybe also get min edit distance to alternative names... features = [has_pop, pop, logp, adj_rank, len_alt, adj_alt, good_type, good_class_mention, good_code_mention, ed] m = self.format_geonames(entry) feature_list.append(features) meta.append(m) #meta = geo.format_geonames(results) X = np.asmatrix(feature_list) return (X, meta)
def BuscarCertConSaldoDisponible(self, cuit_depositante=None, cod_grano=2, campania=1314, coe=None, fecha_emision_des=None, fecha_emision_has=None, ): """Devuelve los certificados de depósito en los que un productor tiene saldo disponible para Liquidar/Retirar/Transferir""" ret = self.client.cgBuscarCertConSaldoDisponible( auth={ 'token': self.Token, 'sign': self.Sign, 'cuit': self.Cuit, }, cuitDepositante=cuit_depositante or self.Cuit, codGrano=cod_grano, campania=campania, coe=coe, fechaEmisionDes=fecha_emision_des, fechaEmisionHas=fecha_emision_has, )['oReturn'] self.__analizar_errores(ret) array = ret.get('certificado', []) self.Excepcion = self.Traceback = "" self.params_out['certificados'] = [] for cert in array: self.params_out['certificados'].append(dict( coe=cert['coe'], tipo_certificado=cert['tipoCertificado'], campania=cert['campania'], cuit_depositante=cert['cuitDepositante'], cuit_depositario=cert['cuitDepositario'], nro_planta=cert['nroPlanta'], kilos_disponibles=cert['kilosDisponibles'], cod_grano=cert['codGrano'], )) return True
Devuelve los certificados de depósito en los que un productor tiene saldo disponible para Liquidar/Retirar/Transferir
Below is the the instruction that describes the task: ### Input: Devuelve los certificados de depósito en los que un productor tiene saldo disponible para Liquidar/Retirar/Transferir ### Response: def BuscarCertConSaldoDisponible(self, cuit_depositante=None, cod_grano=2, campania=1314, coe=None, fecha_emision_des=None, fecha_emision_has=None, ): """Devuelve los certificados de depósito en los que un productor tiene saldo disponible para Liquidar/Retirar/Transferir""" ret = self.client.cgBuscarCertConSaldoDisponible( auth={ 'token': self.Token, 'sign': self.Sign, 'cuit': self.Cuit, }, cuitDepositante=cuit_depositante or self.Cuit, codGrano=cod_grano, campania=campania, coe=coe, fechaEmisionDes=fecha_emision_des, fechaEmisionHas=fecha_emision_has, )['oReturn'] self.__analizar_errores(ret) array = ret.get('certificado', []) self.Excepcion = self.Traceback = "" self.params_out['certificados'] = [] for cert in array: self.params_out['certificados'].append(dict( coe=cert['coe'], tipo_certificado=cert['tipoCertificado'], campania=cert['campania'], cuit_depositante=cert['cuitDepositante'], cuit_depositario=cert['cuitDepositario'], nro_planta=cert['nroPlanta'], kilos_disponibles=cert['kilosDisponibles'], cod_grano=cert['codGrano'], )) return True
def plot_sampler( sampler, suptitle=None, labels=None, bins=50, plot_samples=False, plot_hist=True, plot_chains=True, burn=0, chain_mask=None, temp_idx=0, weights=None, cutoff_weight=None, cmap='gray_r', hist_color='k', chain_alpha=0.1, points=None, covs=None, colors=None, ci=[0.95], max_hist_ticks=None, max_chain_ticks=6, label_chain_y=False, hide_chain_yticklabels=False, chain_ytick_pad=2.0, label_fontsize=None, ticklabel_fontsize=None, chain_label_fontsize=None, chain_ticklabel_fontsize=None, xticklabel_angle=90.0, bottom_sep=0.075, suptitle_space=0.1, fixed_height=None, fixed_width=None, l=0.1, r=0.9, t1=None, b1=None, t2=0.2, b2=0.1, ax_space=0.1 ): """Plot the results of MCMC sampler (posterior and chains). Loosely based on triangle.py. Provides extensive options to format the plot. Parameters ---------- sampler : :py:class:`emcee.Sampler` instance or array, (`n_temps`, `n_chains`, `n_samp`, `n_dim`), (`n_chains`, `n_samp`, `n_dim`) or (`n_samp`, `n_dim`) The sampler to plot the chains/marginals of. Can also be an array of samples which matches the shape of the `chain` attribute that would be present in a :py:class:`emcee.Sampler` instance. suptitle : str, optional The figure title to place at the top. Default is no title. labels : list of str, optional The labels to use for each of the free parameters. Default is to leave the axes unlabeled. bins : int, optional Number of bins to use for the histograms. Default is 50. plot_samples : bool, optional If True, the samples are plotted as individual points. Default is False. plot_hist : bool, optional If True, histograms are plotted. Default is True. plot_chains : bool, optional If True, plot the sampler chains at the bottom. Default is True. burn : int, optional The number of samples to burn before making the marginal histograms. Default is zero (use all samples). chain_mask : (index) array, optional Mask identifying the chains to keep before plotting, in case there are bad chains. Default is to use all chains. temp_idx : int, optional Index of the temperature to plot when plotting a :py:class:`emcee.PTSampler`. Default is 0 (samples from the posterior). weights : array, (`n_temps`, `n_chains`, `n_samp`), (`n_chains`, `n_samp`) or (`n_samp`,), optional The weight for each sample. This is useful for post-processing the output from MultiNest sampling, for instance. Default is to not weight the samples. cutoff_weight : float, optional If `weights` and `cutoff_weight` are present, points with `weights < cutoff_weight * weights.max()` will be excluded. Default is to plot all points. cmap : str, optional The colormap to use for the histograms. Default is 'gray_r'. hist_color : str, optional The color to use for the univariate histograms. Default is 'k'. chain_alpha : float, optional The transparency to use for the plots of the individual chains. Setting this to something low lets you better visualize what is going on. Default is 0.1. points : array, (`D`,) or (`N`, `D`), optional Array of point(s) to plot onto each marginal and chain. Default is None. covs : array, (`D`, `D`) or (`N`, `D`, `D`), optional Covariance matrix or array of covariance matrices to plot onto each marginal. If you do not want to plot a covariance matrix for a specific point, set its corresponding entry to `None`. Default is to not plot confidence ellipses for any points. colors : array of str, (`N`,), optional The colors to use for the points in `points`. Default is to use the standard matplotlib RGBCMYK cycle. ci : array, (`num_ci`,), optional List of confidence intervals to plot for each non-`None` entry in `covs`. Default is 0.95 (just plot the 95 percent confidence interval). max_hist_ticks : int, optional The maximum number of ticks for the histogram plots. Default is None (no limit). max_chain_ticks : int, optional The maximum number of y-axis ticks for the chain plots. Default is 6. label_chain_y : bool, optional If True, the chain plots will have y axis labels. Default is False. hide_chain_yticklabels : bool, optional If True, hide the y axis tick labels for the chain plots. Default is False (show y tick labels). chain_ytick_pad : float, optional The padding (in points) between the y-axis tick labels and the axis for the chain plots. Default is 2.0. label_fontsize : float, optional The font size (in points) to use for the axis labels. Default is `axes.labelsize`. ticklabel_fontsize : float, optional The font size (in points) to use for the axis tick labels. Default is `xtick.labelsize`. chain_label_fontsize : float, optional The font size (in points) to use for the labels of the chain axes. Default is `axes.labelsize`. chain_ticklabel_fontsize : float, optional The font size (in points) to use for the chain axis tick labels. Default is `xtick.labelsize`. xticklabel_angle : float, optional The angle to rotate the x tick labels, in degrees. Default is 90. bottom_sep : float, optional The separation (in relative figure units) between the chains and the marginals. Default is 0.075. suptitle_space : float, optional The amount of space (in relative figure units) to leave for a figure title. Default is 0.1. fixed_height : float, optional The desired figure height (in inches). Default is to automatically adjust based on `fixed_width` to make the subplots square. fixed_width : float, optional The desired figure width (in inches). Default is `figure.figsize[0]`. l : float, optional The location (in relative figure units) of the left margin. Default is 0.1. r : float, optional The location (in relative figure units) of the right margin. Default is 0.9. t1 : float, optional The location (in relative figure units) of the top of the grid of histograms. Overrides `suptitle_space` if present. b1 : float, optional The location (in relative figure units) of the bottom of the grid of histograms. Overrides `bottom_sep` if present. Defaults to 0.1 if `plot_chains` is False. t2 : float, optional The location (in relative figure units) of the top of the grid of chain plots. Default is 0.2. b2 : float, optional The location (in relative figure units) of the bottom of the grid of chain plots. Default is 0.1. ax_space : float, optional The `w_space` and `h_space` to use (in relative figure units). Default is 0.1. """ masked_weights = None if points is not None: points = scipy.atleast_2d(points) if covs is not None and len(covs) != len(points): raise ValueError( "If covariance matrices are provided, len(covs) must equal len(points)!" ) elif covs is None: covs = [None,] * len(points) if colors is None: c_cycle = itertools.cycle(['b', 'g', 'r', 'c', 'm', 'y', 'k']) colors = [c_cycle.next() for p in points] # Create axes: try: k = sampler.flatchain.shape[-1] except AttributeError: # Assumes array input is only case where there is no "flatchain" attribute. k = sampler.shape[-1] if labels is None: labels = [''] * k # Set up geometry: # plot_chains = # True: False: # +-----------+ +-----------+ # | +-------+ | | +-------+ | # | | | | | | | | # | | | | | | | | # | | | | | | | | # | +-------+ | | +-------+ | # | +-------+ | +-----------+ # | | | | # | +-------+ | # +-----------+ # We retain support for the original suptitle_space keyword, but can # override with t1 as needed: if t1 is None: t1 = 1 - suptitle_space # We retain support for the original bottom_sep keyword, but can override # with b1 as needed: if b1 is None: if plot_chains: b1 = t2 + bottom_sep else: b1 = 0.1 if fixed_height is None and fixed_width is None: # Default: use matplotlib's default width, handle remaining parameters # with the fixed width case below: fixed_width = matplotlib.rcParams['figure.figsize'][0] if fixed_height is None and fixed_width is not None: # Only width specified, compute height to yield square histograms: fixed_height = fixed_width * (r - l) / (t1 - b1) elif fixed_height is not None and fixed_width is None: # Only height specified, compute width to yield square histograms fixed_width = fixed_height * (t1 - b1) / (r - l) # Otherwise width and height are fixed, and we may not have square # histograms, at the user's discretion. wspace = ax_space hspace = ax_space # gs1 is the histograms, gs2 is the chains: f = plt.figure(figsize=(fixed_width, fixed_height)) gs1 = mplgs.GridSpec(k, k) gs1.update(bottom=b1, top=t1, left=l, right=r, wspace=wspace, hspace=hspace) if plot_chains: gs2 = mplgs.GridSpec(1, k) gs2.update(bottom=b2, top=t2, left=l, right=r, wspace=wspace, hspace=hspace) axes = [] # j is the row, i is the column. for j in xrange(0, k + int(plot_chains)): row = [] for i in xrange(0, k): if i > j: row.append(None) else: sharey = row[-1] if i > 0 and i < j and j < k else None sharex = axes[-1][i] if j > i and j < k else \ (row[-1] if i > 0 and j == k else None) gs = gs1[j, i] if j < k else gs2[:, i] row.append(f.add_subplot(gs, sharey=sharey, sharex=sharex)) if j < k and ticklabel_fontsize is not None: row[-1].tick_params(labelsize=ticklabel_fontsize) elif j >= k and chain_ticklabel_fontsize is not None: row[-1].tick_params(labelsize=chain_ticklabel_fontsize) axes.append(row) axes = scipy.asarray(axes) # Update axes with the data: if isinstance(sampler, emcee.EnsembleSampler): if chain_mask is None: chain_mask = scipy.ones(sampler.chain.shape[0], dtype=bool) flat_trace = sampler.chain[chain_mask, burn:, :] flat_trace = flat_trace.reshape((-1, k)) elif isinstance(sampler, emcee.PTSampler): if chain_mask is None: chain_mask = scipy.ones(sampler.nwalkers, dtype=bool) flat_trace = sampler.chain[temp_idx, chain_mask, burn:, :] flat_trace = flat_trace.reshape((-1, k)) elif isinstance(sampler, scipy.ndarray): if sampler.ndim == 4: if chain_mask is None: chain_mask = scipy.ones(sampler.shape[1], dtype=bool) flat_trace = sampler[temp_idx, chain_mask, burn:, :] flat_trace = flat_trace.reshape((-1, k)) if weights is not None: weights = weights[temp_idx, chain_mask, burn:] weights = weights.ravel() elif sampler.ndim == 3: if chain_mask is None: chain_mask = scipy.ones(sampler.shape[0], dtype=bool) flat_trace = sampler[chain_mask, burn:, :] flat_trace = flat_trace.reshape((-1, k)) if weights is not None: weights = weights[chain_mask, burn:] weights = weights.ravel() elif sampler.ndim == 2: flat_trace = sampler[burn:, :] flat_trace = flat_trace.reshape((-1, k)) if weights is not None: weights = weights[burn:] weights = weights.ravel() if cutoff_weight is not None and weights is not None: mask = weights >= cutoff_weight * weights.max() flat_trace = flat_trace[mask, :] masked_weights = weights[mask] else: masked_weights = weights else: raise ValueError("Unknown sampler class: %s" % (type(sampler),)) # j is the row, i is the column. for i in xrange(0, k): axes[i, i].clear() if plot_hist: axes[i, i].hist(flat_trace[:, i], bins=bins, color=hist_color, weights=masked_weights, normed=True, histtype='stepfilled') if plot_samples: axes[i, i].plot(flat_trace[:, i], scipy.zeros_like(flat_trace[:, i]), ',', alpha=0.1) if points is not None: # axvline can only take a scalar x, so we have to loop: for p, c, cov in zip(points, colors, covs): axes[i, i].axvline(x=p[i], linewidth=3, color=c) if cov is not None: xlim = axes[i, i].get_xlim() i_grid = scipy.linspace(xlim[0], xlim[1], 100) axes[i, i].plot( i_grid, scipy.stats.norm.pdf( i_grid, loc=p[i], scale=scipy.sqrt(cov[i, i]) ), c, linewidth=3.0 ) axes[i, i].set_xlim(xlim) if i == k - 1: axes[i, i].set_xlabel(labels[i], fontsize=label_fontsize) plt.setp(axes[i, i].xaxis.get_majorticklabels(), rotation=xticklabel_angle) if i < k - 1: plt.setp(axes[i, i].get_xticklabels(), visible=False) plt.setp(axes[i, i].get_yticklabels(), visible=False) for j in xrange(i + 1, k): axes[j, i].clear() if plot_hist: ct, x, y, im = axes[j, i].hist2d( flat_trace[:, i], flat_trace[:, j], bins=bins, cmap=cmap, weights=masked_weights ) if plot_samples: axes[j, i].plot(flat_trace[:, i], flat_trace[:, j], ',', alpha=0.1) if points is not None: for p, c, cov in zip(points, colors, covs): axes[j, i].plot(p[i], p[j], 'o', color=c) if cov is not None: Sigma = scipy.asarray([[cov[i, i], cov[i, j]], [cov[j, i], cov[j, j]]], dtype=float) lam, v = scipy.linalg.eigh(Sigma) chi2 = [-scipy.log(1.0 - cival) * 2.0 for cival in ci] a = [2.0 * scipy.sqrt(chi2val * lam[-1]) for chi2val in chi2] b = [2.0 * scipy.sqrt(chi2val * lam[-2]) for chi2val in chi2] ang = scipy.arctan2(v[1, -1], v[0, -1]) for aval, bval in zip(a, b): ell = mplp.Ellipse( [p[i], p[j]], aval, bval, angle=scipy.degrees(ang), facecolor='none', edgecolor=c, linewidth=3 ) axes[j, i].add_artist(ell) # axes[j, i].plot(points[i], points[j], 'o') # xmid = 0.5 * (x[1:] + x[:-1]) # ymid = 0.5 * (y[1:] + y[:-1]) # axes[j, i].contour(xmid, ymid, ct.T, colors='k') if j < k - 1: plt.setp(axes[j, i].get_xticklabels(), visible=False) if i != 0: plt.setp(axes[j, i].get_yticklabels(), visible=False) if i == 0: axes[j, i].set_ylabel(labels[j], fontsize=label_fontsize) if j == k - 1: axes[j, i].set_xlabel(labels[i], fontsize=label_fontsize) plt.setp(axes[j, i].xaxis.get_majorticklabels(), rotation=xticklabel_angle) if plot_chains: axes[-1, i].clear() if isinstance(sampler, emcee.EnsembleSampler): axes[-1, i].plot(sampler.chain[:, :, i].T, alpha=chain_alpha) elif isinstance(sampler, emcee.PTSampler): axes[-1, i].plot(sampler.chain[temp_idx, :, :, i].T, alpha=chain_alpha) else: if sampler.ndim == 4: axes[-1, i].plot(sampler[temp_idx, :, :, i].T, alpha=chain_alpha) elif sampler.ndim == 3: axes[-1, i].plot(sampler[:, :, i].T, alpha=chain_alpha) elif sampler.ndim == 2: axes[-1, i].plot(sampler[:, i].T, alpha=chain_alpha) # Plot the weights on top of the chains: if weights is not None: a_wt = axes[-1, i].twinx() a_wt.plot(weights, alpha=chain_alpha, linestyle='--', color='r') plt.setp(a_wt.yaxis.get_majorticklabels(), visible=False) a_wt.yaxis.set_ticks_position('none') # Plot the cutoff weight as a horizontal line and the first sample # which is included as a vertical bar. Note that this won't be quite # the right behavior if the weights are not roughly monotonic. if cutoff_weight is not None: a_wt.axhline(cutoff_weight * weights.max(), linestyle='-', color='r') wi, = scipy.where(weights >= cutoff_weight * weights.max()) a_wt.axvline(wi[0], linestyle='-', color='r') if burn > 0: axes[-1, i].axvline(burn, color='r', linewidth=3) if points is not None: for p, c in zip(points, colors): axes[-1, i].axhline(y=p[i], linewidth=3, color=c) # Reset the xlim since it seems to get messed up: axes[-1, i].set_xlim(left=0) # try: # [axes[-1, i].axhline(y=pt, linewidth=3) for pt in points[i]] # except TypeError: # axes[-1, i].axhline(y=points[i], linewidth=3) if label_chain_y: axes[-1, i].set_ylabel(labels[i], fontsize=chain_label_fontsize) axes[-1, i].set_xlabel('step', fontsize=chain_label_fontsize) plt.setp(axes[-1, i].xaxis.get_majorticklabels(), rotation=xticklabel_angle) for tick in axes[-1, i].get_yaxis().get_major_ticks(): tick.set_pad(chain_ytick_pad) tick.label1 = tick._get_text1() for i in xrange(0, k): if max_hist_ticks is not None: axes[k - 1, i].xaxis.set_major_locator(plt.MaxNLocator(nbins=max_hist_ticks - 1)) axes[i, 0].yaxis.set_major_locator(plt.MaxNLocator(nbins=max_hist_ticks - 1)) if plot_chains and max_chain_ticks is not None: axes[k, i].yaxis.set_major_locator(plt.MaxNLocator(nbins=max_chain_ticks - 1)) axes[k, i].xaxis.set_major_locator(plt.MaxNLocator(nbins=max_chain_ticks - 1)) if plot_chains and hide_chain_yticklabels: plt.setp(axes[k, i].get_yticklabels(), visible=False) if suptitle is not None: f.suptitle(suptitle) f.canvas.draw() return f
Plot the results of MCMC sampler (posterior and chains). Loosely based on triangle.py. Provides extensive options to format the plot. Parameters ---------- sampler : :py:class:`emcee.Sampler` instance or array, (`n_temps`, `n_chains`, `n_samp`, `n_dim`), (`n_chains`, `n_samp`, `n_dim`) or (`n_samp`, `n_dim`) The sampler to plot the chains/marginals of. Can also be an array of samples which matches the shape of the `chain` attribute that would be present in a :py:class:`emcee.Sampler` instance. suptitle : str, optional The figure title to place at the top. Default is no title. labels : list of str, optional The labels to use for each of the free parameters. Default is to leave the axes unlabeled. bins : int, optional Number of bins to use for the histograms. Default is 50. plot_samples : bool, optional If True, the samples are plotted as individual points. Default is False. plot_hist : bool, optional If True, histograms are plotted. Default is True. plot_chains : bool, optional If True, plot the sampler chains at the bottom. Default is True. burn : int, optional The number of samples to burn before making the marginal histograms. Default is zero (use all samples). chain_mask : (index) array, optional Mask identifying the chains to keep before plotting, in case there are bad chains. Default is to use all chains. temp_idx : int, optional Index of the temperature to plot when plotting a :py:class:`emcee.PTSampler`. Default is 0 (samples from the posterior). weights : array, (`n_temps`, `n_chains`, `n_samp`), (`n_chains`, `n_samp`) or (`n_samp`,), optional The weight for each sample. This is useful for post-processing the output from MultiNest sampling, for instance. Default is to not weight the samples. cutoff_weight : float, optional If `weights` and `cutoff_weight` are present, points with `weights < cutoff_weight * weights.max()` will be excluded. Default is to plot all points. cmap : str, optional The colormap to use for the histograms. Default is 'gray_r'. hist_color : str, optional The color to use for the univariate histograms. Default is 'k'. chain_alpha : float, optional The transparency to use for the plots of the individual chains. Setting this to something low lets you better visualize what is going on. Default is 0.1. points : array, (`D`,) or (`N`, `D`), optional Array of point(s) to plot onto each marginal and chain. Default is None. covs : array, (`D`, `D`) or (`N`, `D`, `D`), optional Covariance matrix or array of covariance matrices to plot onto each marginal. If you do not want to plot a covariance matrix for a specific point, set its corresponding entry to `None`. Default is to not plot confidence ellipses for any points. colors : array of str, (`N`,), optional The colors to use for the points in `points`. Default is to use the standard matplotlib RGBCMYK cycle. ci : array, (`num_ci`,), optional List of confidence intervals to plot for each non-`None` entry in `covs`. Default is 0.95 (just plot the 95 percent confidence interval). max_hist_ticks : int, optional The maximum number of ticks for the histogram plots. Default is None (no limit). max_chain_ticks : int, optional The maximum number of y-axis ticks for the chain plots. Default is 6. label_chain_y : bool, optional If True, the chain plots will have y axis labels. Default is False. hide_chain_yticklabels : bool, optional If True, hide the y axis tick labels for the chain plots. Default is False (show y tick labels). chain_ytick_pad : float, optional The padding (in points) between the y-axis tick labels and the axis for the chain plots. Default is 2.0. label_fontsize : float, optional The font size (in points) to use for the axis labels. Default is `axes.labelsize`. ticklabel_fontsize : float, optional The font size (in points) to use for the axis tick labels. Default is `xtick.labelsize`. chain_label_fontsize : float, optional The font size (in points) to use for the labels of the chain axes. Default is `axes.labelsize`. chain_ticklabel_fontsize : float, optional The font size (in points) to use for the chain axis tick labels. Default is `xtick.labelsize`. xticklabel_angle : float, optional The angle to rotate the x tick labels, in degrees. Default is 90. bottom_sep : float, optional The separation (in relative figure units) between the chains and the marginals. Default is 0.075. suptitle_space : float, optional The amount of space (in relative figure units) to leave for a figure title. Default is 0.1. fixed_height : float, optional The desired figure height (in inches). Default is to automatically adjust based on `fixed_width` to make the subplots square. fixed_width : float, optional The desired figure width (in inches). Default is `figure.figsize[0]`. l : float, optional The location (in relative figure units) of the left margin. Default is 0.1. r : float, optional The location (in relative figure units) of the right margin. Default is 0.9. t1 : float, optional The location (in relative figure units) of the top of the grid of histograms. Overrides `suptitle_space` if present. b1 : float, optional The location (in relative figure units) of the bottom of the grid of histograms. Overrides `bottom_sep` if present. Defaults to 0.1 if `plot_chains` is False. t2 : float, optional The location (in relative figure units) of the top of the grid of chain plots. Default is 0.2. b2 : float, optional The location (in relative figure units) of the bottom of the grid of chain plots. Default is 0.1. ax_space : float, optional The `w_space` and `h_space` to use (in relative figure units). Default is 0.1.
Below is the the instruction that describes the task: ### Input: Plot the results of MCMC sampler (posterior and chains). Loosely based on triangle.py. Provides extensive options to format the plot. Parameters ---------- sampler : :py:class:`emcee.Sampler` instance or array, (`n_temps`, `n_chains`, `n_samp`, `n_dim`), (`n_chains`, `n_samp`, `n_dim`) or (`n_samp`, `n_dim`) The sampler to plot the chains/marginals of. Can also be an array of samples which matches the shape of the `chain` attribute that would be present in a :py:class:`emcee.Sampler` instance. suptitle : str, optional The figure title to place at the top. Default is no title. labels : list of str, optional The labels to use for each of the free parameters. Default is to leave the axes unlabeled. bins : int, optional Number of bins to use for the histograms. Default is 50. plot_samples : bool, optional If True, the samples are plotted as individual points. Default is False. plot_hist : bool, optional If True, histograms are plotted. Default is True. plot_chains : bool, optional If True, plot the sampler chains at the bottom. Default is True. burn : int, optional The number of samples to burn before making the marginal histograms. Default is zero (use all samples). chain_mask : (index) array, optional Mask identifying the chains to keep before plotting, in case there are bad chains. Default is to use all chains. temp_idx : int, optional Index of the temperature to plot when plotting a :py:class:`emcee.PTSampler`. Default is 0 (samples from the posterior). weights : array, (`n_temps`, `n_chains`, `n_samp`), (`n_chains`, `n_samp`) or (`n_samp`,), optional The weight for each sample. This is useful for post-processing the output from MultiNest sampling, for instance. Default is to not weight the samples. cutoff_weight : float, optional If `weights` and `cutoff_weight` are present, points with `weights < cutoff_weight * weights.max()` will be excluded. Default is to plot all points. cmap : str, optional The colormap to use for the histograms. Default is 'gray_r'. hist_color : str, optional The color to use for the univariate histograms. Default is 'k'. chain_alpha : float, optional The transparency to use for the plots of the individual chains. Setting this to something low lets you better visualize what is going on. Default is 0.1. points : array, (`D`,) or (`N`, `D`), optional Array of point(s) to plot onto each marginal and chain. Default is None. covs : array, (`D`, `D`) or (`N`, `D`, `D`), optional Covariance matrix or array of covariance matrices to plot onto each marginal. If you do not want to plot a covariance matrix for a specific point, set its corresponding entry to `None`. Default is to not plot confidence ellipses for any points. colors : array of str, (`N`,), optional The colors to use for the points in `points`. Default is to use the standard matplotlib RGBCMYK cycle. ci : array, (`num_ci`,), optional List of confidence intervals to plot for each non-`None` entry in `covs`. Default is 0.95 (just plot the 95 percent confidence interval). max_hist_ticks : int, optional The maximum number of ticks for the histogram plots. Default is None (no limit). max_chain_ticks : int, optional The maximum number of y-axis ticks for the chain plots. Default is 6. label_chain_y : bool, optional If True, the chain plots will have y axis labels. Default is False. hide_chain_yticklabels : bool, optional If True, hide the y axis tick labels for the chain plots. Default is False (show y tick labels). chain_ytick_pad : float, optional The padding (in points) between the y-axis tick labels and the axis for the chain plots. Default is 2.0. label_fontsize : float, optional The font size (in points) to use for the axis labels. Default is `axes.labelsize`. ticklabel_fontsize : float, optional The font size (in points) to use for the axis tick labels. Default is `xtick.labelsize`. chain_label_fontsize : float, optional The font size (in points) to use for the labels of the chain axes. Default is `axes.labelsize`. chain_ticklabel_fontsize : float, optional The font size (in points) to use for the chain axis tick labels. Default is `xtick.labelsize`. xticklabel_angle : float, optional The angle to rotate the x tick labels, in degrees. Default is 90. bottom_sep : float, optional The separation (in relative figure units) between the chains and the marginals. Default is 0.075. suptitle_space : float, optional The amount of space (in relative figure units) to leave for a figure title. Default is 0.1. fixed_height : float, optional The desired figure height (in inches). Default is to automatically adjust based on `fixed_width` to make the subplots square. fixed_width : float, optional The desired figure width (in inches). Default is `figure.figsize[0]`. l : float, optional The location (in relative figure units) of the left margin. Default is 0.1. r : float, optional The location (in relative figure units) of the right margin. Default is 0.9. t1 : float, optional The location (in relative figure units) of the top of the grid of histograms. Overrides `suptitle_space` if present. b1 : float, optional The location (in relative figure units) of the bottom of the grid of histograms. Overrides `bottom_sep` if present. Defaults to 0.1 if `plot_chains` is False. t2 : float, optional The location (in relative figure units) of the top of the grid of chain plots. Default is 0.2. b2 : float, optional The location (in relative figure units) of the bottom of the grid of chain plots. Default is 0.1. ax_space : float, optional The `w_space` and `h_space` to use (in relative figure units). Default is 0.1. ### Response: def plot_sampler( sampler, suptitle=None, labels=None, bins=50, plot_samples=False, plot_hist=True, plot_chains=True, burn=0, chain_mask=None, temp_idx=0, weights=None, cutoff_weight=None, cmap='gray_r', hist_color='k', chain_alpha=0.1, points=None, covs=None, colors=None, ci=[0.95], max_hist_ticks=None, max_chain_ticks=6, label_chain_y=False, hide_chain_yticklabels=False, chain_ytick_pad=2.0, label_fontsize=None, ticklabel_fontsize=None, chain_label_fontsize=None, chain_ticklabel_fontsize=None, xticklabel_angle=90.0, bottom_sep=0.075, suptitle_space=0.1, fixed_height=None, fixed_width=None, l=0.1, r=0.9, t1=None, b1=None, t2=0.2, b2=0.1, ax_space=0.1 ): """Plot the results of MCMC sampler (posterior and chains). Loosely based on triangle.py. Provides extensive options to format the plot. Parameters ---------- sampler : :py:class:`emcee.Sampler` instance or array, (`n_temps`, `n_chains`, `n_samp`, `n_dim`), (`n_chains`, `n_samp`, `n_dim`) or (`n_samp`, `n_dim`) The sampler to plot the chains/marginals of. Can also be an array of samples which matches the shape of the `chain` attribute that would be present in a :py:class:`emcee.Sampler` instance. suptitle : str, optional The figure title to place at the top. Default is no title. labels : list of str, optional The labels to use for each of the free parameters. Default is to leave the axes unlabeled. bins : int, optional Number of bins to use for the histograms. Default is 50. plot_samples : bool, optional If True, the samples are plotted as individual points. Default is False. plot_hist : bool, optional If True, histograms are plotted. Default is True. plot_chains : bool, optional If True, plot the sampler chains at the bottom. Default is True. burn : int, optional The number of samples to burn before making the marginal histograms. Default is zero (use all samples). chain_mask : (index) array, optional Mask identifying the chains to keep before plotting, in case there are bad chains. Default is to use all chains. temp_idx : int, optional Index of the temperature to plot when plotting a :py:class:`emcee.PTSampler`. Default is 0 (samples from the posterior). weights : array, (`n_temps`, `n_chains`, `n_samp`), (`n_chains`, `n_samp`) or (`n_samp`,), optional The weight for each sample. This is useful for post-processing the output from MultiNest sampling, for instance. Default is to not weight the samples. cutoff_weight : float, optional If `weights` and `cutoff_weight` are present, points with `weights < cutoff_weight * weights.max()` will be excluded. Default is to plot all points. cmap : str, optional The colormap to use for the histograms. Default is 'gray_r'. hist_color : str, optional The color to use for the univariate histograms. Default is 'k'. chain_alpha : float, optional The transparency to use for the plots of the individual chains. Setting this to something low lets you better visualize what is going on. Default is 0.1. points : array, (`D`,) or (`N`, `D`), optional Array of point(s) to plot onto each marginal and chain. Default is None. covs : array, (`D`, `D`) or (`N`, `D`, `D`), optional Covariance matrix or array of covariance matrices to plot onto each marginal. If you do not want to plot a covariance matrix for a specific point, set its corresponding entry to `None`. Default is to not plot confidence ellipses for any points. colors : array of str, (`N`,), optional The colors to use for the points in `points`. Default is to use the standard matplotlib RGBCMYK cycle. ci : array, (`num_ci`,), optional List of confidence intervals to plot for each non-`None` entry in `covs`. Default is 0.95 (just plot the 95 percent confidence interval). max_hist_ticks : int, optional The maximum number of ticks for the histogram plots. Default is None (no limit). max_chain_ticks : int, optional The maximum number of y-axis ticks for the chain plots. Default is 6. label_chain_y : bool, optional If True, the chain plots will have y axis labels. Default is False. hide_chain_yticklabels : bool, optional If True, hide the y axis tick labels for the chain plots. Default is False (show y tick labels). chain_ytick_pad : float, optional The padding (in points) between the y-axis tick labels and the axis for the chain plots. Default is 2.0. label_fontsize : float, optional The font size (in points) to use for the axis labels. Default is `axes.labelsize`. ticklabel_fontsize : float, optional The font size (in points) to use for the axis tick labels. Default is `xtick.labelsize`. chain_label_fontsize : float, optional The font size (in points) to use for the labels of the chain axes. Default is `axes.labelsize`. chain_ticklabel_fontsize : float, optional The font size (in points) to use for the chain axis tick labels. Default is `xtick.labelsize`. xticklabel_angle : float, optional The angle to rotate the x tick labels, in degrees. Default is 90. bottom_sep : float, optional The separation (in relative figure units) between the chains and the marginals. Default is 0.075. suptitle_space : float, optional The amount of space (in relative figure units) to leave for a figure title. Default is 0.1. fixed_height : float, optional The desired figure height (in inches). Default is to automatically adjust based on `fixed_width` to make the subplots square. fixed_width : float, optional The desired figure width (in inches). Default is `figure.figsize[0]`. l : float, optional The location (in relative figure units) of the left margin. Default is 0.1. r : float, optional The location (in relative figure units) of the right margin. Default is 0.9. t1 : float, optional The location (in relative figure units) of the top of the grid of histograms. Overrides `suptitle_space` if present. b1 : float, optional The location (in relative figure units) of the bottom of the grid of histograms. Overrides `bottom_sep` if present. Defaults to 0.1 if `plot_chains` is False. t2 : float, optional The location (in relative figure units) of the top of the grid of chain plots. Default is 0.2. b2 : float, optional The location (in relative figure units) of the bottom of the grid of chain plots. Default is 0.1. ax_space : float, optional The `w_space` and `h_space` to use (in relative figure units). Default is 0.1. """ masked_weights = None if points is not None: points = scipy.atleast_2d(points) if covs is not None and len(covs) != len(points): raise ValueError( "If covariance matrices are provided, len(covs) must equal len(points)!" ) elif covs is None: covs = [None,] * len(points) if colors is None: c_cycle = itertools.cycle(['b', 'g', 'r', 'c', 'm', 'y', 'k']) colors = [c_cycle.next() for p in points] # Create axes: try: k = sampler.flatchain.shape[-1] except AttributeError: # Assumes array input is only case where there is no "flatchain" attribute. k = sampler.shape[-1] if labels is None: labels = [''] * k # Set up geometry: # plot_chains = # True: False: # +-----------+ +-----------+ # | +-------+ | | +-------+ | # | | | | | | | | # | | | | | | | | # | | | | | | | | # | +-------+ | | +-------+ | # | +-------+ | +-----------+ # | | | | # | +-------+ | # +-----------+ # We retain support for the original suptitle_space keyword, but can # override with t1 as needed: if t1 is None: t1 = 1 - suptitle_space # We retain support for the original bottom_sep keyword, but can override # with b1 as needed: if b1 is None: if plot_chains: b1 = t2 + bottom_sep else: b1 = 0.1 if fixed_height is None and fixed_width is None: # Default: use matplotlib's default width, handle remaining parameters # with the fixed width case below: fixed_width = matplotlib.rcParams['figure.figsize'][0] if fixed_height is None and fixed_width is not None: # Only width specified, compute height to yield square histograms: fixed_height = fixed_width * (r - l) / (t1 - b1) elif fixed_height is not None and fixed_width is None: # Only height specified, compute width to yield square histograms fixed_width = fixed_height * (t1 - b1) / (r - l) # Otherwise width and height are fixed, and we may not have square # histograms, at the user's discretion. wspace = ax_space hspace = ax_space # gs1 is the histograms, gs2 is the chains: f = plt.figure(figsize=(fixed_width, fixed_height)) gs1 = mplgs.GridSpec(k, k) gs1.update(bottom=b1, top=t1, left=l, right=r, wspace=wspace, hspace=hspace) if plot_chains: gs2 = mplgs.GridSpec(1, k) gs2.update(bottom=b2, top=t2, left=l, right=r, wspace=wspace, hspace=hspace) axes = [] # j is the row, i is the column. for j in xrange(0, k + int(plot_chains)): row = [] for i in xrange(0, k): if i > j: row.append(None) else: sharey = row[-1] if i > 0 and i < j and j < k else None sharex = axes[-1][i] if j > i and j < k else \ (row[-1] if i > 0 and j == k else None) gs = gs1[j, i] if j < k else gs2[:, i] row.append(f.add_subplot(gs, sharey=sharey, sharex=sharex)) if j < k and ticklabel_fontsize is not None: row[-1].tick_params(labelsize=ticklabel_fontsize) elif j >= k and chain_ticklabel_fontsize is not None: row[-1].tick_params(labelsize=chain_ticklabel_fontsize) axes.append(row) axes = scipy.asarray(axes) # Update axes with the data: if isinstance(sampler, emcee.EnsembleSampler): if chain_mask is None: chain_mask = scipy.ones(sampler.chain.shape[0], dtype=bool) flat_trace = sampler.chain[chain_mask, burn:, :] flat_trace = flat_trace.reshape((-1, k)) elif isinstance(sampler, emcee.PTSampler): if chain_mask is None: chain_mask = scipy.ones(sampler.nwalkers, dtype=bool) flat_trace = sampler.chain[temp_idx, chain_mask, burn:, :] flat_trace = flat_trace.reshape((-1, k)) elif isinstance(sampler, scipy.ndarray): if sampler.ndim == 4: if chain_mask is None: chain_mask = scipy.ones(sampler.shape[1], dtype=bool) flat_trace = sampler[temp_idx, chain_mask, burn:, :] flat_trace = flat_trace.reshape((-1, k)) if weights is not None: weights = weights[temp_idx, chain_mask, burn:] weights = weights.ravel() elif sampler.ndim == 3: if chain_mask is None: chain_mask = scipy.ones(sampler.shape[0], dtype=bool) flat_trace = sampler[chain_mask, burn:, :] flat_trace = flat_trace.reshape((-1, k)) if weights is not None: weights = weights[chain_mask, burn:] weights = weights.ravel() elif sampler.ndim == 2: flat_trace = sampler[burn:, :] flat_trace = flat_trace.reshape((-1, k)) if weights is not None: weights = weights[burn:] weights = weights.ravel() if cutoff_weight is not None and weights is not None: mask = weights >= cutoff_weight * weights.max() flat_trace = flat_trace[mask, :] masked_weights = weights[mask] else: masked_weights = weights else: raise ValueError("Unknown sampler class: %s" % (type(sampler),)) # j is the row, i is the column. for i in xrange(0, k): axes[i, i].clear() if plot_hist: axes[i, i].hist(flat_trace[:, i], bins=bins, color=hist_color, weights=masked_weights, normed=True, histtype='stepfilled') if plot_samples: axes[i, i].plot(flat_trace[:, i], scipy.zeros_like(flat_trace[:, i]), ',', alpha=0.1) if points is not None: # axvline can only take a scalar x, so we have to loop: for p, c, cov in zip(points, colors, covs): axes[i, i].axvline(x=p[i], linewidth=3, color=c) if cov is not None: xlim = axes[i, i].get_xlim() i_grid = scipy.linspace(xlim[0], xlim[1], 100) axes[i, i].plot( i_grid, scipy.stats.norm.pdf( i_grid, loc=p[i], scale=scipy.sqrt(cov[i, i]) ), c, linewidth=3.0 ) axes[i, i].set_xlim(xlim) if i == k - 1: axes[i, i].set_xlabel(labels[i], fontsize=label_fontsize) plt.setp(axes[i, i].xaxis.get_majorticklabels(), rotation=xticklabel_angle) if i < k - 1: plt.setp(axes[i, i].get_xticklabels(), visible=False) plt.setp(axes[i, i].get_yticklabels(), visible=False) for j in xrange(i + 1, k): axes[j, i].clear() if plot_hist: ct, x, y, im = axes[j, i].hist2d( flat_trace[:, i], flat_trace[:, j], bins=bins, cmap=cmap, weights=masked_weights ) if plot_samples: axes[j, i].plot(flat_trace[:, i], flat_trace[:, j], ',', alpha=0.1) if points is not None: for p, c, cov in zip(points, colors, covs): axes[j, i].plot(p[i], p[j], 'o', color=c) if cov is not None: Sigma = scipy.asarray([[cov[i, i], cov[i, j]], [cov[j, i], cov[j, j]]], dtype=float) lam, v = scipy.linalg.eigh(Sigma) chi2 = [-scipy.log(1.0 - cival) * 2.0 for cival in ci] a = [2.0 * scipy.sqrt(chi2val * lam[-1]) for chi2val in chi2] b = [2.0 * scipy.sqrt(chi2val * lam[-2]) for chi2val in chi2] ang = scipy.arctan2(v[1, -1], v[0, -1]) for aval, bval in zip(a, b): ell = mplp.Ellipse( [p[i], p[j]], aval, bval, angle=scipy.degrees(ang), facecolor='none', edgecolor=c, linewidth=3 ) axes[j, i].add_artist(ell) # axes[j, i].plot(points[i], points[j], 'o') # xmid = 0.5 * (x[1:] + x[:-1]) # ymid = 0.5 * (y[1:] + y[:-1]) # axes[j, i].contour(xmid, ymid, ct.T, colors='k') if j < k - 1: plt.setp(axes[j, i].get_xticklabels(), visible=False) if i != 0: plt.setp(axes[j, i].get_yticklabels(), visible=False) if i == 0: axes[j, i].set_ylabel(labels[j], fontsize=label_fontsize) if j == k - 1: axes[j, i].set_xlabel(labels[i], fontsize=label_fontsize) plt.setp(axes[j, i].xaxis.get_majorticklabels(), rotation=xticklabel_angle) if plot_chains: axes[-1, i].clear() if isinstance(sampler, emcee.EnsembleSampler): axes[-1, i].plot(sampler.chain[:, :, i].T, alpha=chain_alpha) elif isinstance(sampler, emcee.PTSampler): axes[-1, i].plot(sampler.chain[temp_idx, :, :, i].T, alpha=chain_alpha) else: if sampler.ndim == 4: axes[-1, i].plot(sampler[temp_idx, :, :, i].T, alpha=chain_alpha) elif sampler.ndim == 3: axes[-1, i].plot(sampler[:, :, i].T, alpha=chain_alpha) elif sampler.ndim == 2: axes[-1, i].plot(sampler[:, i].T, alpha=chain_alpha) # Plot the weights on top of the chains: if weights is not None: a_wt = axes[-1, i].twinx() a_wt.plot(weights, alpha=chain_alpha, linestyle='--', color='r') plt.setp(a_wt.yaxis.get_majorticklabels(), visible=False) a_wt.yaxis.set_ticks_position('none') # Plot the cutoff weight as a horizontal line and the first sample # which is included as a vertical bar. Note that this won't be quite # the right behavior if the weights are not roughly monotonic. if cutoff_weight is not None: a_wt.axhline(cutoff_weight * weights.max(), linestyle='-', color='r') wi, = scipy.where(weights >= cutoff_weight * weights.max()) a_wt.axvline(wi[0], linestyle='-', color='r') if burn > 0: axes[-1, i].axvline(burn, color='r', linewidth=3) if points is not None: for p, c in zip(points, colors): axes[-1, i].axhline(y=p[i], linewidth=3, color=c) # Reset the xlim since it seems to get messed up: axes[-1, i].set_xlim(left=0) # try: # [axes[-1, i].axhline(y=pt, linewidth=3) for pt in points[i]] # except TypeError: # axes[-1, i].axhline(y=points[i], linewidth=3) if label_chain_y: axes[-1, i].set_ylabel(labels[i], fontsize=chain_label_fontsize) axes[-1, i].set_xlabel('step', fontsize=chain_label_fontsize) plt.setp(axes[-1, i].xaxis.get_majorticklabels(), rotation=xticklabel_angle) for tick in axes[-1, i].get_yaxis().get_major_ticks(): tick.set_pad(chain_ytick_pad) tick.label1 = tick._get_text1() for i in xrange(0, k): if max_hist_ticks is not None: axes[k - 1, i].xaxis.set_major_locator(plt.MaxNLocator(nbins=max_hist_ticks - 1)) axes[i, 0].yaxis.set_major_locator(plt.MaxNLocator(nbins=max_hist_ticks - 1)) if plot_chains and max_chain_ticks is not None: axes[k, i].yaxis.set_major_locator(plt.MaxNLocator(nbins=max_chain_ticks - 1)) axes[k, i].xaxis.set_major_locator(plt.MaxNLocator(nbins=max_chain_ticks - 1)) if plot_chains and hide_chain_yticklabels: plt.setp(axes[k, i].get_yticklabels(), visible=False) if suptitle is not None: f.suptitle(suptitle) f.canvas.draw() return f
def compute_residuals(self): """Compute residuals and stopping thresholds.""" r = self.rsdl() adapt_tol = self.opt['RelStopTol'] if self.opt['AutoStop', 'Enabled']: adapt_tol = self.tau0 / (1. + self.k) return r, adapt_tol
Compute residuals and stopping thresholds.
Below is the the instruction that describes the task: ### Input: Compute residuals and stopping thresholds. ### Response: def compute_residuals(self): """Compute residuals and stopping thresholds.""" r = self.rsdl() adapt_tol = self.opt['RelStopTol'] if self.opt['AutoStop', 'Enabled']: adapt_tol = self.tau0 / (1. + self.k) return r, adapt_tol
def get_api_references(self, api_url=None): """Get set of HATEOAS reference for the given SCO-API. Use the default SCO-API if none is given. References are cached as they are not expected to change. Parameters ---------- Returns ------- """ # Get subject listing Url for SCO-API if not api_url is None: url = api_url else: url = self.api_url # Check if API references are in local cache. If not send GET request # and add the result to the local cache if not url in self.apis: self.apis[url] = sco.references_to_dict( sco.JsonResource(url).json[sco.REF_LINKS] ) return self.apis[url]
Get set of HATEOAS reference for the given SCO-API. Use the default SCO-API if none is given. References are cached as they are not expected to change. Parameters ---------- Returns -------
Below is the the instruction that describes the task: ### Input: Get set of HATEOAS reference for the given SCO-API. Use the default SCO-API if none is given. References are cached as they are not expected to change. Parameters ---------- Returns ------- ### Response: def get_api_references(self, api_url=None): """Get set of HATEOAS reference for the given SCO-API. Use the default SCO-API if none is given. References are cached as they are not expected to change. Parameters ---------- Returns ------- """ # Get subject listing Url for SCO-API if not api_url is None: url = api_url else: url = self.api_url # Check if API references are in local cache. If not send GET request # and add the result to the local cache if not url in self.apis: self.apis[url] = sco.references_to_dict( sco.JsonResource(url).json[sco.REF_LINKS] ) return self.apis[url]
def patch_namespaced_endpoints(self, name, namespace, body, **kwargs): """ partially update the specified Endpoints This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_endpoints(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Endpoints (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch). :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests. :return: V1Endpoints If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.patch_namespaced_endpoints_with_http_info(name, namespace, body, **kwargs) else: (data) = self.patch_namespaced_endpoints_with_http_info(name, namespace, body, **kwargs) return data
partially update the specified Endpoints This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_endpoints(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Endpoints (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch). :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests. :return: V1Endpoints If the method is called asynchronously, returns the request thread.
Below is the the instruction that describes the task: ### Input: partially update the specified Endpoints This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_endpoints(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Endpoints (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch). :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests. :return: V1Endpoints If the method is called asynchronously, returns the request thread. ### Response: def patch_namespaced_endpoints(self, name, namespace, body, **kwargs): """ partially update the specified Endpoints This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_endpoints(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Endpoints (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch). :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests. :return: V1Endpoints If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.patch_namespaced_endpoints_with_http_info(name, namespace, body, **kwargs) else: (data) = self.patch_namespaced_endpoints_with_http_info(name, namespace, body, **kwargs) return data
def check_longitude(self, ds): ''' Check variable(s) that define longitude and are defined correctly according to CF. CF §4.2 Variables representing longitude must always explicitly include the units attribute; there is no default value. The recommended unit of longitude is degrees_east. Also acceptable are degree_east, degree_E, degrees_E, degreeE, and degreesE. Optionally, the longitude type may be indicated additionally by providing the standard_name attribute with the value longitude, and/or the axis attribute with the value X. - Four checks per longitude variable - (H) longitude has units attribute - (M) longitude has an allowed units attribute - (L) longitude uses degrees_east (if not in rotated pole) - (M) longitude defines either standard_name or axis :param netCDF4.Dataset ds: An open netCDF dataset :rtype: list :return: List of results ''' # TODO we already have a check_latitude... I'm sure we can make DRYer ret_val = [] allowed_lon_units = [ 'degrees_east', 'degree_east', 'degree_e', 'degrees_e', 'degreee', 'degreese' ] # Determine the grid mappings in this dataset grid_mapping = [] grid_mapping_variables = cfutil.get_grid_mapping_variables(ds) for name in grid_mapping_variables: variable = ds.variables[name] grid_mapping_name = getattr(variable, 'grid_mapping_name', None) if grid_mapping_name: grid_mapping.append(grid_mapping_name) longitude_variables = cfutil.get_longitude_variables(ds) for longitude in longitude_variables: variable = ds.variables[longitude] units = getattr(variable, 'units', None) units_is_string = isinstance(units, basestring) standard_name = getattr(variable, 'standard_name', None) axis = getattr(variable, 'axis', None) # NOTE see docstring--should below be 4.1 or 4.2? # Check that longitude defines units valid_longitude = TestCtx(BaseCheck.HIGH, self.section_titles['4.1']) valid_longitude.assert_true(units is not None, "longitude variable '{}' must define units".format(longitude)) ret_val.append(valid_longitude.to_result()) # Check that longitude uses allowed units allowed_units = TestCtx(BaseCheck.MEDIUM, self.section_titles['4.1']) if standard_name == 'grid_longitude': e_n_units = cfutil.VALID_LAT_UNITS | cfutil.VALID_LON_UNITS # check that the units aren't in east and north degrees units, # but are convertible to angular units allowed_units.assert_true(units not in e_n_units and Unit(units) == Unit('degree'), "Grid longitude variable '{}' should use degree equivalent units without east or north components. " "Current units are {}".format(longitude, units)) else: allowed_units.assert_true(units_is_string and units.lower() in allowed_lon_units, "longitude variable '{}' should define valid units for longitude" "".format(longitude)) ret_val.append(allowed_units.to_result()) # Check that longitude uses degrees_east if standard_name == 'longitude' and units != 'degrees_east': # This is only a recommendation and we won't penalize but we # will include a recommended action. msg = ("CF recommends longitude variable '{}' to use units degrees_east" "".format(longitude)) recommended_units = Result(BaseCheck.LOW, (1, 1), self.section_titles['4.1'], [msg]) ret_val.append(recommended_units) x_variables = ds.get_variables_by_attributes(axis='X') # Check that longitude defines either standard_name or axis definition = TestCtx(BaseCheck.MEDIUM, self.section_titles['4.1']) definition.assert_true(standard_name == 'longitude' or axis == 'Y' or x_variables != [], "longitude variable '{}' should define standard_name='longitude' or axis='X'" "".format(longitude)) ret_val.append(definition.to_result()) return ret_val
Check variable(s) that define longitude and are defined correctly according to CF. CF §4.2 Variables representing longitude must always explicitly include the units attribute; there is no default value. The recommended unit of longitude is degrees_east. Also acceptable are degree_east, degree_E, degrees_E, degreeE, and degreesE. Optionally, the longitude type may be indicated additionally by providing the standard_name attribute with the value longitude, and/or the axis attribute with the value X. - Four checks per longitude variable - (H) longitude has units attribute - (M) longitude has an allowed units attribute - (L) longitude uses degrees_east (if not in rotated pole) - (M) longitude defines either standard_name or axis :param netCDF4.Dataset ds: An open netCDF dataset :rtype: list :return: List of results
Below is the the instruction that describes the task: ### Input: Check variable(s) that define longitude and are defined correctly according to CF. CF §4.2 Variables representing longitude must always explicitly include the units attribute; there is no default value. The recommended unit of longitude is degrees_east. Also acceptable are degree_east, degree_E, degrees_E, degreeE, and degreesE. Optionally, the longitude type may be indicated additionally by providing the standard_name attribute with the value longitude, and/or the axis attribute with the value X. - Four checks per longitude variable - (H) longitude has units attribute - (M) longitude has an allowed units attribute - (L) longitude uses degrees_east (if not in rotated pole) - (M) longitude defines either standard_name or axis :param netCDF4.Dataset ds: An open netCDF dataset :rtype: list :return: List of results ### Response: def check_longitude(self, ds): ''' Check variable(s) that define longitude and are defined correctly according to CF. CF §4.2 Variables representing longitude must always explicitly include the units attribute; there is no default value. The recommended unit of longitude is degrees_east. Also acceptable are degree_east, degree_E, degrees_E, degreeE, and degreesE. Optionally, the longitude type may be indicated additionally by providing the standard_name attribute with the value longitude, and/or the axis attribute with the value X. - Four checks per longitude variable - (H) longitude has units attribute - (M) longitude has an allowed units attribute - (L) longitude uses degrees_east (if not in rotated pole) - (M) longitude defines either standard_name or axis :param netCDF4.Dataset ds: An open netCDF dataset :rtype: list :return: List of results ''' # TODO we already have a check_latitude... I'm sure we can make DRYer ret_val = [] allowed_lon_units = [ 'degrees_east', 'degree_east', 'degree_e', 'degrees_e', 'degreee', 'degreese' ] # Determine the grid mappings in this dataset grid_mapping = [] grid_mapping_variables = cfutil.get_grid_mapping_variables(ds) for name in grid_mapping_variables: variable = ds.variables[name] grid_mapping_name = getattr(variable, 'grid_mapping_name', None) if grid_mapping_name: grid_mapping.append(grid_mapping_name) longitude_variables = cfutil.get_longitude_variables(ds) for longitude in longitude_variables: variable = ds.variables[longitude] units = getattr(variable, 'units', None) units_is_string = isinstance(units, basestring) standard_name = getattr(variable, 'standard_name', None) axis = getattr(variable, 'axis', None) # NOTE see docstring--should below be 4.1 or 4.2? # Check that longitude defines units valid_longitude = TestCtx(BaseCheck.HIGH, self.section_titles['4.1']) valid_longitude.assert_true(units is not None, "longitude variable '{}' must define units".format(longitude)) ret_val.append(valid_longitude.to_result()) # Check that longitude uses allowed units allowed_units = TestCtx(BaseCheck.MEDIUM, self.section_titles['4.1']) if standard_name == 'grid_longitude': e_n_units = cfutil.VALID_LAT_UNITS | cfutil.VALID_LON_UNITS # check that the units aren't in east and north degrees units, # but are convertible to angular units allowed_units.assert_true(units not in e_n_units and Unit(units) == Unit('degree'), "Grid longitude variable '{}' should use degree equivalent units without east or north components. " "Current units are {}".format(longitude, units)) else: allowed_units.assert_true(units_is_string and units.lower() in allowed_lon_units, "longitude variable '{}' should define valid units for longitude" "".format(longitude)) ret_val.append(allowed_units.to_result()) # Check that longitude uses degrees_east if standard_name == 'longitude' and units != 'degrees_east': # This is only a recommendation and we won't penalize but we # will include a recommended action. msg = ("CF recommends longitude variable '{}' to use units degrees_east" "".format(longitude)) recommended_units = Result(BaseCheck.LOW, (1, 1), self.section_titles['4.1'], [msg]) ret_val.append(recommended_units) x_variables = ds.get_variables_by_attributes(axis='X') # Check that longitude defines either standard_name or axis definition = TestCtx(BaseCheck.MEDIUM, self.section_titles['4.1']) definition.assert_true(standard_name == 'longitude' or axis == 'Y' or x_variables != [], "longitude variable '{}' should define standard_name='longitude' or axis='X'" "".format(longitude)) ret_val.append(definition.to_result()) return ret_val
def function(self, x, y, amp, sigma_x, sigma_y, center_x=0, center_y=0): """ returns Gaussian """ c = amp/(2*np.pi*sigma_x*sigma_y) delta_x = x - center_x delta_y = y - center_y exponent = -((delta_x/sigma_x)**2+(delta_y/sigma_y)**2)/2. return c * np.exp(exponent)
returns Gaussian
Below is the the instruction that describes the task: ### Input: returns Gaussian ### Response: def function(self, x, y, amp, sigma_x, sigma_y, center_x=0, center_y=0): """ returns Gaussian """ c = amp/(2*np.pi*sigma_x*sigma_y) delta_x = x - center_x delta_y = y - center_y exponent = -((delta_x/sigma_x)**2+(delta_y/sigma_y)**2)/2. return c * np.exp(exponent)
def emailclients(self, tag=None, fromdate=None, todate=None): """ Gets an overview of the email clients used to open your emails. This is only recorded when open tracking is enabled for that email. """ return self.call("GET", "/stats/outbound/opens/emailclients", tag=tag, fromdate=fromdate, todate=todate)
Gets an overview of the email clients used to open your emails. This is only recorded when open tracking is enabled for that email.
Below is the the instruction that describes the task: ### Input: Gets an overview of the email clients used to open your emails. This is only recorded when open tracking is enabled for that email. ### Response: def emailclients(self, tag=None, fromdate=None, todate=None): """ Gets an overview of the email clients used to open your emails. This is only recorded when open tracking is enabled for that email. """ return self.call("GET", "/stats/outbound/opens/emailclients", tag=tag, fromdate=fromdate, todate=todate)
def tanh(x, context=None): """ Return the hyperbolic tangent of x. """ return _apply_function_in_current_context( BigFloat, mpfr.mpfr_tanh, (BigFloat._implicit_convert(x),), context, )
Return the hyperbolic tangent of x.
Below is the the instruction that describes the task: ### Input: Return the hyperbolic tangent of x. ### Response: def tanh(x, context=None): """ Return the hyperbolic tangent of x. """ return _apply_function_in_current_context( BigFloat, mpfr.mpfr_tanh, (BigFloat._implicit_convert(x),), context, )
def namedb_get_names_with_value_hash( cur, value_hash, block_number ): """ Get the names with the given value hash. Only includes current, non-revoked names. Return None if there are no names. """ unexpired_query, unexpired_args = namedb_select_where_unexpired_names( block_number ) select_query = "SELECT name FROM name_records JOIN namespaces ON name_records.namespace_id = namespaces.namespace_id " + \ "WHERE value_hash = ? AND revoked = 0 AND " + unexpired_query + ";" args = (value_hash,) + unexpired_args name_rows = namedb_query_execute( cur, select_query, args ) names = [] for name_row in name_rows: names.append( name_row['name'] ) if len(names) == 0: return None else: return names
Get the names with the given value hash. Only includes current, non-revoked names. Return None if there are no names.
Below is the the instruction that describes the task: ### Input: Get the names with the given value hash. Only includes current, non-revoked names. Return None if there are no names. ### Response: def namedb_get_names_with_value_hash( cur, value_hash, block_number ): """ Get the names with the given value hash. Only includes current, non-revoked names. Return None if there are no names. """ unexpired_query, unexpired_args = namedb_select_where_unexpired_names( block_number ) select_query = "SELECT name FROM name_records JOIN namespaces ON name_records.namespace_id = namespaces.namespace_id " + \ "WHERE value_hash = ? AND revoked = 0 AND " + unexpired_query + ";" args = (value_hash,) + unexpired_args name_rows = namedb_query_execute( cur, select_query, args ) names = [] for name_row in name_rows: names.append( name_row['name'] ) if len(names) == 0: return None else: return names
def _tokenize_wordpiece(self, text): """Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform tokenization using the given vocabulary. For example: input = "unaffable" output = ["un", "##aff", "##able"] Args: text: A single token or whitespace separated tokens. This should have already been passed through `BERTBasicTokenizer. Returns: A list of wordpiece tokens. """ output_tokens = [] for token in self.basic_tokenizer._whitespace_tokenize(text): chars = list(token) if len(chars) > self.max_input_chars_per_word: output_tokens.append(self.vocab.unknown_token) continue is_bad = False start = 0 sub_tokens = [] while start < len(chars): end = len(chars) cur_substr = None while start < end: substr = ''.join(chars[start:end]) if start > 0: substr = '##' + substr if substr in self.vocab: cur_substr = substr break end -= 1 if cur_substr is None: is_bad = True break sub_tokens.append(cur_substr) start = end if is_bad: output_tokens.append(self.vocab.unknown_token) else: output_tokens.extend(sub_tokens) return output_tokens
Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform tokenization using the given vocabulary. For example: input = "unaffable" output = ["un", "##aff", "##able"] Args: text: A single token or whitespace separated tokens. This should have already been passed through `BERTBasicTokenizer. Returns: A list of wordpiece tokens.
Below is the the instruction that describes the task: ### Input: Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform tokenization using the given vocabulary. For example: input = "unaffable" output = ["un", "##aff", "##able"] Args: text: A single token or whitespace separated tokens. This should have already been passed through `BERTBasicTokenizer. Returns: A list of wordpiece tokens. ### Response: def _tokenize_wordpiece(self, text): """Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform tokenization using the given vocabulary. For example: input = "unaffable" output = ["un", "##aff", "##able"] Args: text: A single token or whitespace separated tokens. This should have already been passed through `BERTBasicTokenizer. Returns: A list of wordpiece tokens. """ output_tokens = [] for token in self.basic_tokenizer._whitespace_tokenize(text): chars = list(token) if len(chars) > self.max_input_chars_per_word: output_tokens.append(self.vocab.unknown_token) continue is_bad = False start = 0 sub_tokens = [] while start < len(chars): end = len(chars) cur_substr = None while start < end: substr = ''.join(chars[start:end]) if start > 0: substr = '##' + substr if substr in self.vocab: cur_substr = substr break end -= 1 if cur_substr is None: is_bad = True break sub_tokens.append(cur_substr) start = end if is_bad: output_tokens.append(self.vocab.unknown_token) else: output_tokens.extend(sub_tokens) return output_tokens
def validate_email_with_name(value): """ Validate email address. Both "Recipient Name <[email protected]>" and "[email protected]" are valid. """ value = force_text(value) recipient = value if '<' and '>' in value: start = value.find('<') + 1 end = value.find('>') if start < end: recipient = value[start:end] validate_email(recipient)
Validate email address. Both "Recipient Name <[email protected]>" and "[email protected]" are valid.
Below is the the instruction that describes the task: ### Input: Validate email address. Both "Recipient Name <[email protected]>" and "[email protected]" are valid. ### Response: def validate_email_with_name(value): """ Validate email address. Both "Recipient Name <[email protected]>" and "[email protected]" are valid. """ value = force_text(value) recipient = value if '<' and '>' in value: start = value.find('<') + 1 end = value.find('>') if start < end: recipient = value[start:end] validate_email(recipient)
def bam2fastq(bamfile, univ_options, picard_options): """ Split an input bam to paired fastqs. :param str bamfile: Path to a bam file :param dict univ_options: Dict of universal options used by almost all tools :param dict picard_options: Dict of options specific to Picard :return: Path to the _1.fastq file :rtype: str """ work_dir = os.path.split(bamfile)[0] base_name = os.path.split(os.path.splitext(bamfile)[0])[1] parameters = ['SamToFastq', ''.join(['I=', docker_path(bamfile)]), ''.join(['F=/data/', base_name, '_1.fastq']), ''.join(['F2=/data/', base_name, '_2.fastq']), ''.join(['FU=/data/', base_name, '_UP.fastq'])] docker_call(tool='picard', tool_parameters=parameters, work_dir=work_dir, dockerhub=univ_options['dockerhub'], java_xmx=univ_options['java_Xmx'], tool_version=picard_options['version']) first_fastq = ''.join([work_dir, '/', base_name, '_1.fastq']) assert os.path.exists(first_fastq) return first_fastq
Split an input bam to paired fastqs. :param str bamfile: Path to a bam file :param dict univ_options: Dict of universal options used by almost all tools :param dict picard_options: Dict of options specific to Picard :return: Path to the _1.fastq file :rtype: str
Below is the the instruction that describes the task: ### Input: Split an input bam to paired fastqs. :param str bamfile: Path to a bam file :param dict univ_options: Dict of universal options used by almost all tools :param dict picard_options: Dict of options specific to Picard :return: Path to the _1.fastq file :rtype: str ### Response: def bam2fastq(bamfile, univ_options, picard_options): """ Split an input bam to paired fastqs. :param str bamfile: Path to a bam file :param dict univ_options: Dict of universal options used by almost all tools :param dict picard_options: Dict of options specific to Picard :return: Path to the _1.fastq file :rtype: str """ work_dir = os.path.split(bamfile)[0] base_name = os.path.split(os.path.splitext(bamfile)[0])[1] parameters = ['SamToFastq', ''.join(['I=', docker_path(bamfile)]), ''.join(['F=/data/', base_name, '_1.fastq']), ''.join(['F2=/data/', base_name, '_2.fastq']), ''.join(['FU=/data/', base_name, '_UP.fastq'])] docker_call(tool='picard', tool_parameters=parameters, work_dir=work_dir, dockerhub=univ_options['dockerhub'], java_xmx=univ_options['java_Xmx'], tool_version=picard_options['version']) first_fastq = ''.join([work_dir, '/', base_name, '_1.fastq']) assert os.path.exists(first_fastq) return first_fastq
def get_romfile_path(game, inttype=Integrations.DEFAULT): """ Return the path to a given game's romfile """ for extension in EMU_EXTENSIONS.keys(): possible_path = get_file_path(game, "rom" + extension, inttype) if possible_path: return possible_path raise FileNotFoundError("No romfiles found for game: %s" % game)
Return the path to a given game's romfile
Below is the the instruction that describes the task: ### Input: Return the path to a given game's romfile ### Response: def get_romfile_path(game, inttype=Integrations.DEFAULT): """ Return the path to a given game's romfile """ for extension in EMU_EXTENSIONS.keys(): possible_path = get_file_path(game, "rom" + extension, inttype) if possible_path: return possible_path raise FileNotFoundError("No romfiles found for game: %s" % game)
def compile(self, compass): """ Calls the compass script specified in the compass extension with the paths provided by the config.rb. """ try: output = subprocess.check_output( [compass.compass_path, 'compile', '-q'], cwd=self.base_dir) os.utime(self.dest, None) compass.log.debug(output) except OSError, e: if e.errno == errno.ENOENT: compass.log.error("Compass could not be found in the PATH " + "and/or in the COMPASS_PATH setting! " + "Disabling compilation.") compass.disabled = True else: raise e
Calls the compass script specified in the compass extension with the paths provided by the config.rb.
Below is the the instruction that describes the task: ### Input: Calls the compass script specified in the compass extension with the paths provided by the config.rb. ### Response: def compile(self, compass): """ Calls the compass script specified in the compass extension with the paths provided by the config.rb. """ try: output = subprocess.check_output( [compass.compass_path, 'compile', '-q'], cwd=self.base_dir) os.utime(self.dest, None) compass.log.debug(output) except OSError, e: if e.errno == errno.ENOENT: compass.log.error("Compass could not be found in the PATH " + "and/or in the COMPASS_PATH setting! " + "Disabling compilation.") compass.disabled = True else: raise e
def new_linsolver(name,prop): """ Creates a linear solver. Parameters ---------- name : string prop : string Returns ------- solver : :class:`LinSolver <optalg.lin_solver.LinSolver>` """ if name == 'mumps': return LinSolverMUMPS(prop) elif name == 'superlu': return LinSolverSUPERLU(prop) elif name == 'umfpack': return LinSolverUMFPACK(prop) elif name == 'default': try: return new_linsolver('mumps',prop) except ImportError: return new_linsolver('superlu',prop) else: raise ValueError('invalid linear solver name')
Creates a linear solver. Parameters ---------- name : string prop : string Returns ------- solver : :class:`LinSolver <optalg.lin_solver.LinSolver>`
Below is the the instruction that describes the task: ### Input: Creates a linear solver. Parameters ---------- name : string prop : string Returns ------- solver : :class:`LinSolver <optalg.lin_solver.LinSolver>` ### Response: def new_linsolver(name,prop): """ Creates a linear solver. Parameters ---------- name : string prop : string Returns ------- solver : :class:`LinSolver <optalg.lin_solver.LinSolver>` """ if name == 'mumps': return LinSolverMUMPS(prop) elif name == 'superlu': return LinSolverSUPERLU(prop) elif name == 'umfpack': return LinSolverUMFPACK(prop) elif name == 'default': try: return new_linsolver('mumps',prop) except ImportError: return new_linsolver('superlu',prop) else: raise ValueError('invalid linear solver name')
def interfaces(self): """list[dict]: A list of dictionary items describing the operational state of interfaces. This method currently only lists the Physical Interfaces ( Gigabitethernet, tengigabitethernet, fortygigabitethernet, hundredgigabitethernet) and Loopback interfaces. It currently excludes VLAN interfaces, FCoE, Port-Channels, Management and Fibre Channel ports. """ urn = "{urn:brocade.com:mgmt:brocade-interface-ext}" int_ns = 'urn:brocade.com:mgmt:brocade-interface-ext' result = [] has_more = '' last_interface_name = '' last_interface_type = '' while (has_more == '') or (has_more == 'true'): request_interface = self.get_interface_detail_request( last_interface_name, last_interface_type) interface_result = self._callback(request_interface, 'get') has_more = interface_result.find('%shas-more' % urn).text for item in interface_result.findall('%sinterface' % urn): interface_type = item.find('%sinterface-type' % urn).text interface_name = item.find('%sinterface-name' % urn).text last_interface_type = interface_type last_interface_name = interface_name if "gigabitethernet" in interface_type: interface_role = item.find('%sport-role' % urn).text if_name = item.find('%sif-name' % urn).text interface_state = item.find('%sif-state' % urn).text interface_proto_state = item.find('%sline-protocol-state' % urn).text interface_mac = item.find( '%scurrent-hardware-address' % urn).text item_results = {'interface-type': interface_type, 'interface-name': interface_name, 'interface-role': interface_role, 'if-name': if_name, 'interface-state': interface_state, 'interface-proto-state': interface_proto_state, 'interface-mac': interface_mac} result.append(item_results) # Loopback interfaces. Probably for other non-physical interfaces, too. ip_result = [] request_interface = ET.Element('get-ip-interface', xmlns=int_ns) interface_result = self._callback(request_interface, 'get') for interface in interface_result.findall('%sinterface' % urn): int_type = interface.find('%sinterface-type' % urn).text int_name = interface.find('%sinterface-name' % urn).text if int_type == 'unknown': continue int_state = interface.find('%sif-state' % urn).text int_proto_state = interface.find('%sline-protocol-state' % urn).text ip_address = interface.find('.//%sipv4' % urn).text results = {'interface-type': int_type, 'interface-name': int_name, 'interface-role': None, 'if-name': None, 'interface-state': int_state, 'interface-proto-state': int_proto_state, 'interface-mac': None, 'ip-address': ip_address} x = next((x for x in result if int_type == x['interface-type'] and int_name == x['interface-name']), None) if x is not None: results.update(x) ip_result.append(results) return ip_result
list[dict]: A list of dictionary items describing the operational state of interfaces. This method currently only lists the Physical Interfaces ( Gigabitethernet, tengigabitethernet, fortygigabitethernet, hundredgigabitethernet) and Loopback interfaces. It currently excludes VLAN interfaces, FCoE, Port-Channels, Management and Fibre Channel ports.
Below is the the instruction that describes the task: ### Input: list[dict]: A list of dictionary items describing the operational state of interfaces. This method currently only lists the Physical Interfaces ( Gigabitethernet, tengigabitethernet, fortygigabitethernet, hundredgigabitethernet) and Loopback interfaces. It currently excludes VLAN interfaces, FCoE, Port-Channels, Management and Fibre Channel ports. ### Response: def interfaces(self): """list[dict]: A list of dictionary items describing the operational state of interfaces. This method currently only lists the Physical Interfaces ( Gigabitethernet, tengigabitethernet, fortygigabitethernet, hundredgigabitethernet) and Loopback interfaces. It currently excludes VLAN interfaces, FCoE, Port-Channels, Management and Fibre Channel ports. """ urn = "{urn:brocade.com:mgmt:brocade-interface-ext}" int_ns = 'urn:brocade.com:mgmt:brocade-interface-ext' result = [] has_more = '' last_interface_name = '' last_interface_type = '' while (has_more == '') or (has_more == 'true'): request_interface = self.get_interface_detail_request( last_interface_name, last_interface_type) interface_result = self._callback(request_interface, 'get') has_more = interface_result.find('%shas-more' % urn).text for item in interface_result.findall('%sinterface' % urn): interface_type = item.find('%sinterface-type' % urn).text interface_name = item.find('%sinterface-name' % urn).text last_interface_type = interface_type last_interface_name = interface_name if "gigabitethernet" in interface_type: interface_role = item.find('%sport-role' % urn).text if_name = item.find('%sif-name' % urn).text interface_state = item.find('%sif-state' % urn).text interface_proto_state = item.find('%sline-protocol-state' % urn).text interface_mac = item.find( '%scurrent-hardware-address' % urn).text item_results = {'interface-type': interface_type, 'interface-name': interface_name, 'interface-role': interface_role, 'if-name': if_name, 'interface-state': interface_state, 'interface-proto-state': interface_proto_state, 'interface-mac': interface_mac} result.append(item_results) # Loopback interfaces. Probably for other non-physical interfaces, too. ip_result = [] request_interface = ET.Element('get-ip-interface', xmlns=int_ns) interface_result = self._callback(request_interface, 'get') for interface in interface_result.findall('%sinterface' % urn): int_type = interface.find('%sinterface-type' % urn).text int_name = interface.find('%sinterface-name' % urn).text if int_type == 'unknown': continue int_state = interface.find('%sif-state' % urn).text int_proto_state = interface.find('%sline-protocol-state' % urn).text ip_address = interface.find('.//%sipv4' % urn).text results = {'interface-type': int_type, 'interface-name': int_name, 'interface-role': None, 'if-name': None, 'interface-state': int_state, 'interface-proto-state': int_proto_state, 'interface-mac': None, 'ip-address': ip_address} x = next((x for x in result if int_type == x['interface-type'] and int_name == x['interface-name']), None) if x is not None: results.update(x) ip_result.append(results) return ip_result
def new_connection(self, remote_ip, remote_port): """This method is called when a new SMTP session is opened. [PUBLIC API] """ self.state.set_state('new') self._message = Message(Peer(remote_ip, remote_port)) decision, response_sent = self.is_allowed('accept_new_connection', self._message.peer) if decision: if not response_sent: self.handle_input('greet') self._set_size_restrictions() else: if not response_sent: self.reply(554, 'SMTP service not available') self.close_connection()
This method is called when a new SMTP session is opened. [PUBLIC API]
Below is the the instruction that describes the task: ### Input: This method is called when a new SMTP session is opened. [PUBLIC API] ### Response: def new_connection(self, remote_ip, remote_port): """This method is called when a new SMTP session is opened. [PUBLIC API] """ self.state.set_state('new') self._message = Message(Peer(remote_ip, remote_port)) decision, response_sent = self.is_allowed('accept_new_connection', self._message.peer) if decision: if not response_sent: self.handle_input('greet') self._set_size_restrictions() else: if not response_sent: self.reply(554, 'SMTP service not available') self.close_connection()
def save_bed(cls, query, filename=sys.stdout): """ write a bed12 file of the query. Parameters ---------- query : query a table or query to save to file filename : file string or filehandle to write output """ out = _open(filename, 'w') for o in query: out.write(o.bed() + '\n')
write a bed12 file of the query. Parameters ---------- query : query a table or query to save to file filename : file string or filehandle to write output
Below is the the instruction that describes the task: ### Input: write a bed12 file of the query. Parameters ---------- query : query a table or query to save to file filename : file string or filehandle to write output ### Response: def save_bed(cls, query, filename=sys.stdout): """ write a bed12 file of the query. Parameters ---------- query : query a table or query to save to file filename : file string or filehandle to write output """ out = _open(filename, 'w') for o in query: out.write(o.bed() + '\n')
def AAM(cpu, imm=None): """ ASCII adjust AX after multiply. Adjusts the result of the multiplication of two unpacked BCD values to create a pair of unpacked (base 10) BCD values. The AX register is the implied source and destination operand for this instruction. The AAM instruction is only useful when it follows a MUL instruction that multiplies (binary multiplication) two unpacked BCD values and stores a word result in the AX register. The AAM instruction then adjusts the contents of the AX register to contain the correct 2-digit unpacked (base 10) BCD result. The SF, ZF, and PF flags are set according to the resulting binary value in the AL register. This instruction executes as described in compatibility mode and legacy mode. It is not valid in 64-bit mode.:: tempAL = AL; AH = tempAL / 10; AL = tempAL MOD 10; :param cpu: current CPU. """ if imm is None: imm = 10 else: imm = imm.read() cpu.AH = Operators.UDIV(cpu.AL, imm) cpu.AL = Operators.UREM(cpu.AL, imm) # Defined flags: ...sz.p. cpu._calculate_logic_flags(8, cpu.AL)
ASCII adjust AX after multiply. Adjusts the result of the multiplication of two unpacked BCD values to create a pair of unpacked (base 10) BCD values. The AX register is the implied source and destination operand for this instruction. The AAM instruction is only useful when it follows a MUL instruction that multiplies (binary multiplication) two unpacked BCD values and stores a word result in the AX register. The AAM instruction then adjusts the contents of the AX register to contain the correct 2-digit unpacked (base 10) BCD result. The SF, ZF, and PF flags are set according to the resulting binary value in the AL register. This instruction executes as described in compatibility mode and legacy mode. It is not valid in 64-bit mode.:: tempAL = AL; AH = tempAL / 10; AL = tempAL MOD 10; :param cpu: current CPU.
Below is the the instruction that describes the task: ### Input: ASCII adjust AX after multiply. Adjusts the result of the multiplication of two unpacked BCD values to create a pair of unpacked (base 10) BCD values. The AX register is the implied source and destination operand for this instruction. The AAM instruction is only useful when it follows a MUL instruction that multiplies (binary multiplication) two unpacked BCD values and stores a word result in the AX register. The AAM instruction then adjusts the contents of the AX register to contain the correct 2-digit unpacked (base 10) BCD result. The SF, ZF, and PF flags are set according to the resulting binary value in the AL register. This instruction executes as described in compatibility mode and legacy mode. It is not valid in 64-bit mode.:: tempAL = AL; AH = tempAL / 10; AL = tempAL MOD 10; :param cpu: current CPU. ### Response: def AAM(cpu, imm=None): """ ASCII adjust AX after multiply. Adjusts the result of the multiplication of two unpacked BCD values to create a pair of unpacked (base 10) BCD values. The AX register is the implied source and destination operand for this instruction. The AAM instruction is only useful when it follows a MUL instruction that multiplies (binary multiplication) two unpacked BCD values and stores a word result in the AX register. The AAM instruction then adjusts the contents of the AX register to contain the correct 2-digit unpacked (base 10) BCD result. The SF, ZF, and PF flags are set according to the resulting binary value in the AL register. This instruction executes as described in compatibility mode and legacy mode. It is not valid in 64-bit mode.:: tempAL = AL; AH = tempAL / 10; AL = tempAL MOD 10; :param cpu: current CPU. """ if imm is None: imm = 10 else: imm = imm.read() cpu.AH = Operators.UDIV(cpu.AL, imm) cpu.AL = Operators.UREM(cpu.AL, imm) # Defined flags: ...sz.p. cpu._calculate_logic_flags(8, cpu.AL)
def plot_cumulative_density(self, **kwargs): """ Plots a pretty figure of {0}.{1} Matplotlib plot arguments can be passed in inside the kwargs, plus Parameters ----------- show_censors: bool place markers at censorship events. Default: False censor_styles: bool If show_censors, this dictionary will be passed into the plot call. ci_alpha: bool the transparency level of the confidence interval. Default: 0.3 ci_force_lines: bool force the confidence intervals to be line plots (versus default shaded areas). Default: False ci_show: bool show confidence intervals. Default: True ci_legend: bool if ci_force_lines is True, this is a boolean flag to add the lines' labels to the legend. Default: False at_risk_counts: bool show group sizes at time points. See function ``add_at_risk_counts`` for details. Default: False loc: slice specify a time-based subsection of the curves to plot, ex: >>> model.plot(loc=slice(0.,10.)) will plot the time values between t=0. and t=10. iloc: slice specify a location-based subsection of the curves to plot, ex: >>> model.plot(iloc=slice(0,10)) will plot the first 10 time points. invert_y_axis: bool boolean to invert the y-axis, useful to show cumulative graphs instead of survival graphs. (Deprecated, use ``plot_cumulative_density()``) Returns ------- ax: a pyplot axis object """ return _plot_estimate( self, estimate=self.cumulative_density_, confidence_intervals=self.confidence_interval_cumulative_density_, **kwargs )
Plots a pretty figure of {0}.{1} Matplotlib plot arguments can be passed in inside the kwargs, plus Parameters ----------- show_censors: bool place markers at censorship events. Default: False censor_styles: bool If show_censors, this dictionary will be passed into the plot call. ci_alpha: bool the transparency level of the confidence interval. Default: 0.3 ci_force_lines: bool force the confidence intervals to be line plots (versus default shaded areas). Default: False ci_show: bool show confidence intervals. Default: True ci_legend: bool if ci_force_lines is True, this is a boolean flag to add the lines' labels to the legend. Default: False at_risk_counts: bool show group sizes at time points. See function ``add_at_risk_counts`` for details. Default: False loc: slice specify a time-based subsection of the curves to plot, ex: >>> model.plot(loc=slice(0.,10.)) will plot the time values between t=0. and t=10. iloc: slice specify a location-based subsection of the curves to plot, ex: >>> model.plot(iloc=slice(0,10)) will plot the first 10 time points. invert_y_axis: bool boolean to invert the y-axis, useful to show cumulative graphs instead of survival graphs. (Deprecated, use ``plot_cumulative_density()``) Returns ------- ax: a pyplot axis object
Below is the the instruction that describes the task: ### Input: Plots a pretty figure of {0}.{1} Matplotlib plot arguments can be passed in inside the kwargs, plus Parameters ----------- show_censors: bool place markers at censorship events. Default: False censor_styles: bool If show_censors, this dictionary will be passed into the plot call. ci_alpha: bool the transparency level of the confidence interval. Default: 0.3 ci_force_lines: bool force the confidence intervals to be line plots (versus default shaded areas). Default: False ci_show: bool show confidence intervals. Default: True ci_legend: bool if ci_force_lines is True, this is a boolean flag to add the lines' labels to the legend. Default: False at_risk_counts: bool show group sizes at time points. See function ``add_at_risk_counts`` for details. Default: False loc: slice specify a time-based subsection of the curves to plot, ex: >>> model.plot(loc=slice(0.,10.)) will plot the time values between t=0. and t=10. iloc: slice specify a location-based subsection of the curves to plot, ex: >>> model.plot(iloc=slice(0,10)) will plot the first 10 time points. invert_y_axis: bool boolean to invert the y-axis, useful to show cumulative graphs instead of survival graphs. (Deprecated, use ``plot_cumulative_density()``) Returns ------- ax: a pyplot axis object ### Response: def plot_cumulative_density(self, **kwargs): """ Plots a pretty figure of {0}.{1} Matplotlib plot arguments can be passed in inside the kwargs, plus Parameters ----------- show_censors: bool place markers at censorship events. Default: False censor_styles: bool If show_censors, this dictionary will be passed into the plot call. ci_alpha: bool the transparency level of the confidence interval. Default: 0.3 ci_force_lines: bool force the confidence intervals to be line plots (versus default shaded areas). Default: False ci_show: bool show confidence intervals. Default: True ci_legend: bool if ci_force_lines is True, this is a boolean flag to add the lines' labels to the legend. Default: False at_risk_counts: bool show group sizes at time points. See function ``add_at_risk_counts`` for details. Default: False loc: slice specify a time-based subsection of the curves to plot, ex: >>> model.plot(loc=slice(0.,10.)) will plot the time values between t=0. and t=10. iloc: slice specify a location-based subsection of the curves to plot, ex: >>> model.plot(iloc=slice(0,10)) will plot the first 10 time points. invert_y_axis: bool boolean to invert the y-axis, useful to show cumulative graphs instead of survival graphs. (Deprecated, use ``plot_cumulative_density()``) Returns ------- ax: a pyplot axis object """ return _plot_estimate( self, estimate=self.cumulative_density_, confidence_intervals=self.confidence_interval_cumulative_density_, **kwargs )
def from_cli(cls, opt): """Create an InjFilterRejector instance from command-line options.""" injection_file = opt.injection_file chirp_time_window = \ opt.injection_filter_rejector_chirp_time_window match_threshold = opt.injection_filter_rejector_match_threshold coarsematch_deltaf = opt.injection_filter_rejector_coarsematch_deltaf coarsematch_fmax = opt.injection_filter_rejector_coarsematch_fmax seg_buffer = opt.injection_filter_rejector_seg_buffer if opt.injection_filter_rejector_f_lower is not None: f_lower = opt.injection_filter_rejector_f_lower else: # NOTE: Uses main low-frequency cutoff as default option. This may # need some editing if using this in multi_inspiral, which I # leave for future work, or if this is being used in another # code which doesn't have --low-frequency-cutoff f_lower = opt.low_frequency_cutoff return cls(injection_file, chirp_time_window, match_threshold, f_lower, coarsematch_deltaf=coarsematch_deltaf, coarsematch_fmax=coarsematch_fmax, seg_buffer=seg_buffer)
Create an InjFilterRejector instance from command-line options.
Below is the the instruction that describes the task: ### Input: Create an InjFilterRejector instance from command-line options. ### Response: def from_cli(cls, opt): """Create an InjFilterRejector instance from command-line options.""" injection_file = opt.injection_file chirp_time_window = \ opt.injection_filter_rejector_chirp_time_window match_threshold = opt.injection_filter_rejector_match_threshold coarsematch_deltaf = opt.injection_filter_rejector_coarsematch_deltaf coarsematch_fmax = opt.injection_filter_rejector_coarsematch_fmax seg_buffer = opt.injection_filter_rejector_seg_buffer if opt.injection_filter_rejector_f_lower is not None: f_lower = opt.injection_filter_rejector_f_lower else: # NOTE: Uses main low-frequency cutoff as default option. This may # need some editing if using this in multi_inspiral, which I # leave for future work, or if this is being used in another # code which doesn't have --low-frequency-cutoff f_lower = opt.low_frequency_cutoff return cls(injection_file, chirp_time_window, match_threshold, f_lower, coarsematch_deltaf=coarsematch_deltaf, coarsematch_fmax=coarsematch_fmax, seg_buffer=seg_buffer)
def root(self, pattern, current): """Start parsing the pattern.""" self.set_after_start() i = util.StringIter(pattern) iter(i) root_specified = False if self.win_drive_detect: m = RE_WIN_PATH.match(pattern) if m: drive = m.group(0).replace('\\\\', '\\') if drive.endswith('\\'): slash = True drive = drive[:-1] current.append(re.escape(drive)) if slash: current.append(self.get_path_sep() + _ONE_OR_MORE) i.advance(m.end(0)) self.consume_path_sep(i) root_specified = True elif pattern.startswith('\\\\'): root_specified = True elif not self.win_drive_detect and self.pathname and pattern.startswith('/'): root_specified = True if not root_specified and self.realpath: current.append(_NO_WIN_ROOT if self.win_drive_detect else _NO_ROOT) current.append('') for c in i: index = i.index if self.extend and c in EXT_TYPES and self.parse_extend(c, i, current, True): # Nothing to do pass elif c == '*': self._handle_star(i, current) elif c == '?': current.append(self._restrict_sequence() + _QMARK) elif c == '/': if self.pathname: self.set_start_dir() self.clean_up_inverse(current) current.append(self.get_path_sep() + _ONE_OR_MORE) self.consume_path_sep(i) else: current.append(re.escape(c)) elif c == '\\': index = i.index try: value = self._references(i) if self.dir_start: self.clean_up_inverse(current) self.consume_path_sep(i) current.append(value) except StopIteration: i.rewind(i.index - index) current.append(re.escape(c)) elif c == '[': index = i.index try: current.append(self._sequence(i)) except StopIteration: i.rewind(i.index - index) current.append(re.escape(c)) else: current.append(re.escape(c)) self.update_dir_state() self.clean_up_inverse(current) if self.pathname: current.append(_PATH_TRAIL % self.get_path_sep())
Start parsing the pattern.
Below is the the instruction that describes the task: ### Input: Start parsing the pattern. ### Response: def root(self, pattern, current): """Start parsing the pattern.""" self.set_after_start() i = util.StringIter(pattern) iter(i) root_specified = False if self.win_drive_detect: m = RE_WIN_PATH.match(pattern) if m: drive = m.group(0).replace('\\\\', '\\') if drive.endswith('\\'): slash = True drive = drive[:-1] current.append(re.escape(drive)) if slash: current.append(self.get_path_sep() + _ONE_OR_MORE) i.advance(m.end(0)) self.consume_path_sep(i) root_specified = True elif pattern.startswith('\\\\'): root_specified = True elif not self.win_drive_detect and self.pathname and pattern.startswith('/'): root_specified = True if not root_specified and self.realpath: current.append(_NO_WIN_ROOT if self.win_drive_detect else _NO_ROOT) current.append('') for c in i: index = i.index if self.extend and c in EXT_TYPES and self.parse_extend(c, i, current, True): # Nothing to do pass elif c == '*': self._handle_star(i, current) elif c == '?': current.append(self._restrict_sequence() + _QMARK) elif c == '/': if self.pathname: self.set_start_dir() self.clean_up_inverse(current) current.append(self.get_path_sep() + _ONE_OR_MORE) self.consume_path_sep(i) else: current.append(re.escape(c)) elif c == '\\': index = i.index try: value = self._references(i) if self.dir_start: self.clean_up_inverse(current) self.consume_path_sep(i) current.append(value) except StopIteration: i.rewind(i.index - index) current.append(re.escape(c)) elif c == '[': index = i.index try: current.append(self._sequence(i)) except StopIteration: i.rewind(i.index - index) current.append(re.escape(c)) else: current.append(re.escape(c)) self.update_dir_state() self.clean_up_inverse(current) if self.pathname: current.append(_PATH_TRAIL % self.get_path_sep())
def __default(self, ast_token): """Handle tokens inside the list or outside the list.""" if self.list_level == 1: if self.list_entry is None: self.list_entry = ast_token elif not isinstance(ast_token, type(self.list_entry)): self.final_ast_tokens.append(ast_token) elif self.list_level == 0: self.final_ast_tokens.append(ast_token)
Handle tokens inside the list or outside the list.
Below is the the instruction that describes the task: ### Input: Handle tokens inside the list or outside the list. ### Response: def __default(self, ast_token): """Handle tokens inside the list or outside the list.""" if self.list_level == 1: if self.list_entry is None: self.list_entry = ast_token elif not isinstance(ast_token, type(self.list_entry)): self.final_ast_tokens.append(ast_token) elif self.list_level == 0: self.final_ast_tokens.append(ast_token)
def add_transaction_clause(self, clause): """ Adds a iff clause to this statement :param clause: The clause that will be added to the iff statement :type clause: TransactionClause """ if not isinstance(clause, TransactionClause): raise StatementException('only instances of AssignmentClause can be added to statements') clause.set_context_id(self.context_counter) self.context_counter += clause.get_context_size() self.transactions.append(clause)
Adds a iff clause to this statement :param clause: The clause that will be added to the iff statement :type clause: TransactionClause
Below is the the instruction that describes the task: ### Input: Adds a iff clause to this statement :param clause: The clause that will be added to the iff statement :type clause: TransactionClause ### Response: def add_transaction_clause(self, clause): """ Adds a iff clause to this statement :param clause: The clause that will be added to the iff statement :type clause: TransactionClause """ if not isinstance(clause, TransactionClause): raise StatementException('only instances of AssignmentClause can be added to statements') clause.set_context_id(self.context_counter) self.context_counter += clause.get_context_size() self.transactions.append(clause)
def while_stmt(self, while_loc, test, while_colon_loc, body, else_opt): """while_stmt: 'while' test ':' suite ['else' ':' suite]""" stmt = ast.While(test=test, body=body, orelse=[], keyword_loc=while_loc, while_colon_loc=while_colon_loc, else_loc=None, else_colon_loc=None, loc=while_loc.join(body[-1].loc)) if else_opt: stmt.else_loc, stmt.else_colon_loc, stmt.orelse = else_opt stmt.loc = stmt.loc.join(stmt.orelse[-1].loc) return stmt
while_stmt: 'while' test ':' suite ['else' ':' suite]
Below is the the instruction that describes the task: ### Input: while_stmt: 'while' test ':' suite ['else' ':' suite] ### Response: def while_stmt(self, while_loc, test, while_colon_loc, body, else_opt): """while_stmt: 'while' test ':' suite ['else' ':' suite]""" stmt = ast.While(test=test, body=body, orelse=[], keyword_loc=while_loc, while_colon_loc=while_colon_loc, else_loc=None, else_colon_loc=None, loc=while_loc.join(body[-1].loc)) if else_opt: stmt.else_loc, stmt.else_colon_loc, stmt.orelse = else_opt stmt.loc = stmt.loc.join(stmt.orelse[-1].loc) return stmt
def filter_clades(self): "Remove conflicting clades and those < cutoff to get majority rule" passed = [] carrs = np.array([list(i[0]) for i in self.clade_counts], dtype=int) freqs = np.array([i[1] for i in self.clade_counts]) for idx in range(carrs.shape[0]): conflict = False if freqs[idx] < self.cutoff: continue for pidx in passed: intersect = np.max(carrs[idx] + carrs[pidx]) > 1 # is either one a subset of the other? subset_test0 = np.all(carrs[idx] - carrs[pidx] >= 0) subset_test1 = np.all(carrs[pidx] - carrs[idx] >= 0) if intersect: if (not subset_test0) and (not subset_test1): conflict = True if not conflict: passed.append(idx) rclades = [] for idx in passed: rclades.append((carrs[idx], freqs[idx])) self.fclade_counts = rclades
Remove conflicting clades and those < cutoff to get majority rule
Below is the the instruction that describes the task: ### Input: Remove conflicting clades and those < cutoff to get majority rule ### Response: def filter_clades(self): "Remove conflicting clades and those < cutoff to get majority rule" passed = [] carrs = np.array([list(i[0]) for i in self.clade_counts], dtype=int) freqs = np.array([i[1] for i in self.clade_counts]) for idx in range(carrs.shape[0]): conflict = False if freqs[idx] < self.cutoff: continue for pidx in passed: intersect = np.max(carrs[idx] + carrs[pidx]) > 1 # is either one a subset of the other? subset_test0 = np.all(carrs[idx] - carrs[pidx] >= 0) subset_test1 = np.all(carrs[pidx] - carrs[idx] >= 0) if intersect: if (not subset_test0) and (not subset_test1): conflict = True if not conflict: passed.append(idx) rclades = [] for idx in passed: rclades.append((carrs[idx], freqs[idx])) self.fclade_counts = rclades
def exec_cmd(self, command, **kwargs): """Normal save and load only need the command. To merge, just supply the merge and file arguments as kwargs like so: exec_cmd('load', merge=True, file='/path/to/file.txt') """ if command == 'load': if kwargs: kwargs = dict(options=[kwargs]) return self._exec_cmd(command, **kwargs)
Normal save and load only need the command. To merge, just supply the merge and file arguments as kwargs like so: exec_cmd('load', merge=True, file='/path/to/file.txt')
Below is the the instruction that describes the task: ### Input: Normal save and load only need the command. To merge, just supply the merge and file arguments as kwargs like so: exec_cmd('load', merge=True, file='/path/to/file.txt') ### Response: def exec_cmd(self, command, **kwargs): """Normal save and load only need the command. To merge, just supply the merge and file arguments as kwargs like so: exec_cmd('load', merge=True, file='/path/to/file.txt') """ if command == 'load': if kwargs: kwargs = dict(options=[kwargs]) return self._exec_cmd(command, **kwargs)
def wait_for_focus(self, title, timeOut=5): """ Wait for window with the given title to have focus Usage: C{window.wait_for_focus(title, timeOut=5)} If the window becomes active, returns True. Otherwise, returns False if the window has not become active by the time the timeout has elapsed. @param title: title to match against (as a regular expression) @param timeOut: period (seconds) to wait before giving up @rtype: boolean """ regex = re.compile(title) waited = 0 while waited <= timeOut: if regex.match(self.mediator.interface.get_window_title()): return True if timeOut == 0: break # zero length timeout, if not matched go straight to end time.sleep(0.3) waited += 0.3 return False
Wait for window with the given title to have focus Usage: C{window.wait_for_focus(title, timeOut=5)} If the window becomes active, returns True. Otherwise, returns False if the window has not become active by the time the timeout has elapsed. @param title: title to match against (as a regular expression) @param timeOut: period (seconds) to wait before giving up @rtype: boolean
Below is the the instruction that describes the task: ### Input: Wait for window with the given title to have focus Usage: C{window.wait_for_focus(title, timeOut=5)} If the window becomes active, returns True. Otherwise, returns False if the window has not become active by the time the timeout has elapsed. @param title: title to match against (as a regular expression) @param timeOut: period (seconds) to wait before giving up @rtype: boolean ### Response: def wait_for_focus(self, title, timeOut=5): """ Wait for window with the given title to have focus Usage: C{window.wait_for_focus(title, timeOut=5)} If the window becomes active, returns True. Otherwise, returns False if the window has not become active by the time the timeout has elapsed. @param title: title to match against (as a regular expression) @param timeOut: period (seconds) to wait before giving up @rtype: boolean """ regex = re.compile(title) waited = 0 while waited <= timeOut: if regex.match(self.mediator.interface.get_window_title()): return True if timeOut == 0: break # zero length timeout, if not matched go straight to end time.sleep(0.3) waited += 0.3 return False
def make_python_identifier(string, namespace=None, reserved_words=None, convert='drop', handle='force'): """ Takes an arbitrary string and creates a valid Python identifier. If the input string is in the namespace, return its value. If the python identifier created is already in the namespace, but the input string is not (ie, two similar strings resolve to the same python identifier) or if the identifier is a reserved word in the reserved_words list, or is a python default reserved word, adds _1, or if _1 is in the namespace, _2, etc. Parameters ---------- string : <basestring> The text to be converted into a valid python identifier namespace : <dictionary> Map of existing translations into python safe identifiers. This is to ensure that two strings are not translated into the same python identifier reserved_words : <list of strings> List of words that are reserved (because they have other meanings in this particular program, such as also being the names of libraries, etc. convert : <string> Tells the function what to do with characters that are not valid in python identifiers - 'hex' implies that they will be converted to their hexidecimal representation. This is handy if you have variables that have a lot of reserved characters, or you don't want the name to be dependent on when things were added to the namespace - 'drop' implies that they will just be dropped altogether handle : <string> Tells the function how to deal with namespace conflicts - 'force' will create a representation which is not in conflict by appending _n to the resulting variable where n is the lowest number necessary to avoid a conflict - 'throw' will raise an exception Returns ------- identifier : <string> A vaild python identifier based on the input string namespace : <dictionary> An updated map of the translations of words to python identifiers, including the passed in 'string'. Examples -------- >>> make_python_identifier('Capital') ('capital', {'Capital': 'capital'}) >>> make_python_identifier('multiple words') ('multiple_words', {'multiple words': 'multiple_words'}) >>> make_python_identifier('multiple spaces') ('multiple_spaces', {'multiple spaces': 'multiple_spaces'}) When the name is a python keyword, add '_1' to differentiate it >>> make_python_identifier('for') ('for_1', {'for': 'for_1'}) Remove leading and trailing whitespace >>> make_python_identifier(' whitespace ') ('whitespace', {' whitespace ': 'whitespace'}) Remove most special characters outright: >>> make_python_identifier('H@t tr!ck') ('ht_trck', {'H@t tr!ck': 'ht_trck'}) Replace special characters with their hex representations >>> make_python_identifier('H@t tr!ck', convert='hex') ('h40t_tr21ck', {'H@t tr!ck': 'h40t_tr21ck'}) remove leading digits >>> make_python_identifier('123abc') ('abc', {'123abc': 'abc'}) already in namespace >>> make_python_identifier('Variable$', namespace={'Variable$': 'variable'}) ('variable', {'Variable$': 'variable'}) namespace conflicts >>> make_python_identifier('Variable$', namespace={'Variable@': 'variable'}) ('variable_1', {'Variable@': 'variable', 'Variable$': 'variable_1'}) >>> make_python_identifier('Variable$', namespace={'Variable@': 'variable', >>> 'Variable%': 'variable_1'}) ('variable_2', {'Variable@': 'variable', 'Variable%': 'variable_1', 'Variable$': 'variable_2'}) throw exception instead >>> make_python_identifier('Variable$', namespace={'Variable@': 'variable'}, handle='throw') Traceback (most recent call last): ... NameError: variable already exists in namespace or is a reserved word References ---------- Identifiers must follow the convention outlined here: https://docs.python.org/2/reference/lexical_analysis.html#identifiers """ if namespace is None: namespace = dict() if reserved_words is None: reserved_words = list() if string in namespace: return namespace[string], namespace # create a working copy (and make it lowercase, while we're at it) s = string.lower() # remove leading and trailing whitespace s = s.strip() # Make spaces into underscores s = re.sub('[\\s\\t\\n]+', '_', s) if convert == 'hex': # Convert invalid characters to hex. Note: \p{l} designates all Unicode letter characters (any language), # \p{m} designates all mark symbols (e.g., vowel marks in Indian scrips, such as the final) # and \p{n} designates all numbers. We allow any of these to be present in the regex. s = ''.join([c.encode("hex") if re.findall('[^\p{l}\p{m}\p{n}_]', c) else c for c in s]) elif convert == 'drop': # Remove invalid characters s = re.sub('[^\p{l}\p{m}\p{n}_]', '', s) # Remove leading characters until we find a letter or underscore. Only letters can be leading characters. s = re.sub('^[^\p{l}_]+', '', s) # Check that the string is not a python identifier while (s in keyword.kwlist or s in namespace.values() or s in reserved_words): if handle == 'throw': raise NameError(s + ' already exists in namespace or is a reserved word') if handle == 'force': if re.match(".*?_\d+$", s): i = re.match(".*?_(\d+)$", s).groups()[0] s = s.strip('_' + i) + '_' + str(int(i) + 1) else: s += '_1' namespace[string] = s return s, namespace
Takes an arbitrary string and creates a valid Python identifier. If the input string is in the namespace, return its value. If the python identifier created is already in the namespace, but the input string is not (ie, two similar strings resolve to the same python identifier) or if the identifier is a reserved word in the reserved_words list, or is a python default reserved word, adds _1, or if _1 is in the namespace, _2, etc. Parameters ---------- string : <basestring> The text to be converted into a valid python identifier namespace : <dictionary> Map of existing translations into python safe identifiers. This is to ensure that two strings are not translated into the same python identifier reserved_words : <list of strings> List of words that are reserved (because they have other meanings in this particular program, such as also being the names of libraries, etc. convert : <string> Tells the function what to do with characters that are not valid in python identifiers - 'hex' implies that they will be converted to their hexidecimal representation. This is handy if you have variables that have a lot of reserved characters, or you don't want the name to be dependent on when things were added to the namespace - 'drop' implies that they will just be dropped altogether handle : <string> Tells the function how to deal with namespace conflicts - 'force' will create a representation which is not in conflict by appending _n to the resulting variable where n is the lowest number necessary to avoid a conflict - 'throw' will raise an exception Returns ------- identifier : <string> A vaild python identifier based on the input string namespace : <dictionary> An updated map of the translations of words to python identifiers, including the passed in 'string'. Examples -------- >>> make_python_identifier('Capital') ('capital', {'Capital': 'capital'}) >>> make_python_identifier('multiple words') ('multiple_words', {'multiple words': 'multiple_words'}) >>> make_python_identifier('multiple spaces') ('multiple_spaces', {'multiple spaces': 'multiple_spaces'}) When the name is a python keyword, add '_1' to differentiate it >>> make_python_identifier('for') ('for_1', {'for': 'for_1'}) Remove leading and trailing whitespace >>> make_python_identifier(' whitespace ') ('whitespace', {' whitespace ': 'whitespace'}) Remove most special characters outright: >>> make_python_identifier('H@t tr!ck') ('ht_trck', {'H@t tr!ck': 'ht_trck'}) Replace special characters with their hex representations >>> make_python_identifier('H@t tr!ck', convert='hex') ('h40t_tr21ck', {'H@t tr!ck': 'h40t_tr21ck'}) remove leading digits >>> make_python_identifier('123abc') ('abc', {'123abc': 'abc'}) already in namespace >>> make_python_identifier('Variable$', namespace={'Variable$': 'variable'}) ('variable', {'Variable$': 'variable'}) namespace conflicts >>> make_python_identifier('Variable$', namespace={'Variable@': 'variable'}) ('variable_1', {'Variable@': 'variable', 'Variable$': 'variable_1'}) >>> make_python_identifier('Variable$', namespace={'Variable@': 'variable', >>> 'Variable%': 'variable_1'}) ('variable_2', {'Variable@': 'variable', 'Variable%': 'variable_1', 'Variable$': 'variable_2'}) throw exception instead >>> make_python_identifier('Variable$', namespace={'Variable@': 'variable'}, handle='throw') Traceback (most recent call last): ... NameError: variable already exists in namespace or is a reserved word References ---------- Identifiers must follow the convention outlined here: https://docs.python.org/2/reference/lexical_analysis.html#identifiers
Below is the the instruction that describes the task: ### Input: Takes an arbitrary string and creates a valid Python identifier. If the input string is in the namespace, return its value. If the python identifier created is already in the namespace, but the input string is not (ie, two similar strings resolve to the same python identifier) or if the identifier is a reserved word in the reserved_words list, or is a python default reserved word, adds _1, or if _1 is in the namespace, _2, etc. Parameters ---------- string : <basestring> The text to be converted into a valid python identifier namespace : <dictionary> Map of existing translations into python safe identifiers. This is to ensure that two strings are not translated into the same python identifier reserved_words : <list of strings> List of words that are reserved (because they have other meanings in this particular program, such as also being the names of libraries, etc. convert : <string> Tells the function what to do with characters that are not valid in python identifiers - 'hex' implies that they will be converted to their hexidecimal representation. This is handy if you have variables that have a lot of reserved characters, or you don't want the name to be dependent on when things were added to the namespace - 'drop' implies that they will just be dropped altogether handle : <string> Tells the function how to deal with namespace conflicts - 'force' will create a representation which is not in conflict by appending _n to the resulting variable where n is the lowest number necessary to avoid a conflict - 'throw' will raise an exception Returns ------- identifier : <string> A vaild python identifier based on the input string namespace : <dictionary> An updated map of the translations of words to python identifiers, including the passed in 'string'. Examples -------- >>> make_python_identifier('Capital') ('capital', {'Capital': 'capital'}) >>> make_python_identifier('multiple words') ('multiple_words', {'multiple words': 'multiple_words'}) >>> make_python_identifier('multiple spaces') ('multiple_spaces', {'multiple spaces': 'multiple_spaces'}) When the name is a python keyword, add '_1' to differentiate it >>> make_python_identifier('for') ('for_1', {'for': 'for_1'}) Remove leading and trailing whitespace >>> make_python_identifier(' whitespace ') ('whitespace', {' whitespace ': 'whitespace'}) Remove most special characters outright: >>> make_python_identifier('H@t tr!ck') ('ht_trck', {'H@t tr!ck': 'ht_trck'}) Replace special characters with their hex representations >>> make_python_identifier('H@t tr!ck', convert='hex') ('h40t_tr21ck', {'H@t tr!ck': 'h40t_tr21ck'}) remove leading digits >>> make_python_identifier('123abc') ('abc', {'123abc': 'abc'}) already in namespace >>> make_python_identifier('Variable$', namespace={'Variable$': 'variable'}) ('variable', {'Variable$': 'variable'}) namespace conflicts >>> make_python_identifier('Variable$', namespace={'Variable@': 'variable'}) ('variable_1', {'Variable@': 'variable', 'Variable$': 'variable_1'}) >>> make_python_identifier('Variable$', namespace={'Variable@': 'variable', >>> 'Variable%': 'variable_1'}) ('variable_2', {'Variable@': 'variable', 'Variable%': 'variable_1', 'Variable$': 'variable_2'}) throw exception instead >>> make_python_identifier('Variable$', namespace={'Variable@': 'variable'}, handle='throw') Traceback (most recent call last): ... NameError: variable already exists in namespace or is a reserved word References ---------- Identifiers must follow the convention outlined here: https://docs.python.org/2/reference/lexical_analysis.html#identifiers ### Response: def make_python_identifier(string, namespace=None, reserved_words=None, convert='drop', handle='force'): """ Takes an arbitrary string and creates a valid Python identifier. If the input string is in the namespace, return its value. If the python identifier created is already in the namespace, but the input string is not (ie, two similar strings resolve to the same python identifier) or if the identifier is a reserved word in the reserved_words list, or is a python default reserved word, adds _1, or if _1 is in the namespace, _2, etc. Parameters ---------- string : <basestring> The text to be converted into a valid python identifier namespace : <dictionary> Map of existing translations into python safe identifiers. This is to ensure that two strings are not translated into the same python identifier reserved_words : <list of strings> List of words that are reserved (because they have other meanings in this particular program, such as also being the names of libraries, etc. convert : <string> Tells the function what to do with characters that are not valid in python identifiers - 'hex' implies that they will be converted to their hexidecimal representation. This is handy if you have variables that have a lot of reserved characters, or you don't want the name to be dependent on when things were added to the namespace - 'drop' implies that they will just be dropped altogether handle : <string> Tells the function how to deal with namespace conflicts - 'force' will create a representation which is not in conflict by appending _n to the resulting variable where n is the lowest number necessary to avoid a conflict - 'throw' will raise an exception Returns ------- identifier : <string> A vaild python identifier based on the input string namespace : <dictionary> An updated map of the translations of words to python identifiers, including the passed in 'string'. Examples -------- >>> make_python_identifier('Capital') ('capital', {'Capital': 'capital'}) >>> make_python_identifier('multiple words') ('multiple_words', {'multiple words': 'multiple_words'}) >>> make_python_identifier('multiple spaces') ('multiple_spaces', {'multiple spaces': 'multiple_spaces'}) When the name is a python keyword, add '_1' to differentiate it >>> make_python_identifier('for') ('for_1', {'for': 'for_1'}) Remove leading and trailing whitespace >>> make_python_identifier(' whitespace ') ('whitespace', {' whitespace ': 'whitespace'}) Remove most special characters outright: >>> make_python_identifier('H@t tr!ck') ('ht_trck', {'H@t tr!ck': 'ht_trck'}) Replace special characters with their hex representations >>> make_python_identifier('H@t tr!ck', convert='hex') ('h40t_tr21ck', {'H@t tr!ck': 'h40t_tr21ck'}) remove leading digits >>> make_python_identifier('123abc') ('abc', {'123abc': 'abc'}) already in namespace >>> make_python_identifier('Variable$', namespace={'Variable$': 'variable'}) ('variable', {'Variable$': 'variable'}) namespace conflicts >>> make_python_identifier('Variable$', namespace={'Variable@': 'variable'}) ('variable_1', {'Variable@': 'variable', 'Variable$': 'variable_1'}) >>> make_python_identifier('Variable$', namespace={'Variable@': 'variable', >>> 'Variable%': 'variable_1'}) ('variable_2', {'Variable@': 'variable', 'Variable%': 'variable_1', 'Variable$': 'variable_2'}) throw exception instead >>> make_python_identifier('Variable$', namespace={'Variable@': 'variable'}, handle='throw') Traceback (most recent call last): ... NameError: variable already exists in namespace or is a reserved word References ---------- Identifiers must follow the convention outlined here: https://docs.python.org/2/reference/lexical_analysis.html#identifiers """ if namespace is None: namespace = dict() if reserved_words is None: reserved_words = list() if string in namespace: return namespace[string], namespace # create a working copy (and make it lowercase, while we're at it) s = string.lower() # remove leading and trailing whitespace s = s.strip() # Make spaces into underscores s = re.sub('[\\s\\t\\n]+', '_', s) if convert == 'hex': # Convert invalid characters to hex. Note: \p{l} designates all Unicode letter characters (any language), # \p{m} designates all mark symbols (e.g., vowel marks in Indian scrips, such as the final) # and \p{n} designates all numbers. We allow any of these to be present in the regex. s = ''.join([c.encode("hex") if re.findall('[^\p{l}\p{m}\p{n}_]', c) else c for c in s]) elif convert == 'drop': # Remove invalid characters s = re.sub('[^\p{l}\p{m}\p{n}_]', '', s) # Remove leading characters until we find a letter or underscore. Only letters can be leading characters. s = re.sub('^[^\p{l}_]+', '', s) # Check that the string is not a python identifier while (s in keyword.kwlist or s in namespace.values() or s in reserved_words): if handle == 'throw': raise NameError(s + ' already exists in namespace or is a reserved word') if handle == 'force': if re.match(".*?_\d+$", s): i = re.match(".*?_(\d+)$", s).groups()[0] s = s.strip('_' + i) + '_' + str(int(i) + 1) else: s += '_1' namespace[string] = s return s, namespace
def Reset(self): """Reset the camera back to its defaults.""" self.pan = self.world_center self.desired_pan = self.pos
Reset the camera back to its defaults.
Below is the the instruction that describes the task: ### Input: Reset the camera back to its defaults. ### Response: def Reset(self): """Reset the camera back to its defaults.""" self.pan = self.world_center self.desired_pan = self.pos
def median_kneighbour_distance(X, k=5): """ Calculate the median kneighbor distance. Find the distance between a set of random datapoints and their kth nearest neighbours. This is a heuristic for setting the kernel length scale. """ N_all = X.shape[0] k = min(k, N_all) N_subset = min(N_all, 2000) sample_idx_train = np.random.permutation(N_all)[:N_subset] nn = neighbors.NearestNeighbors(k) nn.fit(X[sample_idx_train, :]) d, idx = nn.kneighbors(X[sample_idx_train, :]) return np.median(d[:, -1])
Calculate the median kneighbor distance. Find the distance between a set of random datapoints and their kth nearest neighbours. This is a heuristic for setting the kernel length scale.
Below is the the instruction that describes the task: ### Input: Calculate the median kneighbor distance. Find the distance between a set of random datapoints and their kth nearest neighbours. This is a heuristic for setting the kernel length scale. ### Response: def median_kneighbour_distance(X, k=5): """ Calculate the median kneighbor distance. Find the distance between a set of random datapoints and their kth nearest neighbours. This is a heuristic for setting the kernel length scale. """ N_all = X.shape[0] k = min(k, N_all) N_subset = min(N_all, 2000) sample_idx_train = np.random.permutation(N_all)[:N_subset] nn = neighbors.NearestNeighbors(k) nn.fit(X[sample_idx_train, :]) d, idx = nn.kneighbors(X[sample_idx_train, :]) return np.median(d[:, -1])
def get_color_names(self, format_string): """ Parses the format_string and returns a set of color names. """ names = set() # Tokenize the format string and process them for token in self.tokens(format_string): if token.group("command"): name = dict(parse_qsl(token.group("command"))).get("color") if ( not name or name in COLOR_NAMES_EXCLUDED or name in COLOR_NAMES or name[0] == "#" ): continue names.add(name) return names
Parses the format_string and returns a set of color names.
Below is the the instruction that describes the task: ### Input: Parses the format_string and returns a set of color names. ### Response: def get_color_names(self, format_string): """ Parses the format_string and returns a set of color names. """ names = set() # Tokenize the format string and process them for token in self.tokens(format_string): if token.group("command"): name = dict(parse_qsl(token.group("command"))).get("color") if ( not name or name in COLOR_NAMES_EXCLUDED or name in COLOR_NAMES or name[0] == "#" ): continue names.add(name) return names
def setCurrentRegItem(self, regItem): """ Sets the current item to the regItem """ check_class(regItem, ClassRegItem, allow_none=True) self.tableView.setCurrentRegItem(regItem)
Sets the current item to the regItem
Below is the the instruction that describes the task: ### Input: Sets the current item to the regItem ### Response: def setCurrentRegItem(self, regItem): """ Sets the current item to the regItem """ check_class(regItem, ClassRegItem, allow_none=True) self.tableView.setCurrentRegItem(regItem)
def _set_dense_defaults_and_eval(kwargs): """ Sets default values in kwargs if kwargs are not already given. Evaluates all values using eval Parameters ----------- kwargs : dict Dictionary of dense specific keyword args Returns ------- : dict Default, evaluated dictionary """ kwargs['delimiter'] = kwargs.get('delimiter', ',') kwargs['na_values'] = kwargs.get('na_values', '') kwargs['nan_to_zero'] = kwargs.get('nan_to_zero', False) kwargs['drop_na'] = kwargs.get('drop_na', False) kwargs['label_col'] = kwargs.get('label_col', 'label') kwargs['count_col'] = kwargs.get('count_col', 'count') for key, val in kwargs.iteritems(): try: kwargs[key] = eval(val) except: kwargs[key] = val return kwargs
Sets default values in kwargs if kwargs are not already given. Evaluates all values using eval Parameters ----------- kwargs : dict Dictionary of dense specific keyword args Returns ------- : dict Default, evaluated dictionary
Below is the the instruction that describes the task: ### Input: Sets default values in kwargs if kwargs are not already given. Evaluates all values using eval Parameters ----------- kwargs : dict Dictionary of dense specific keyword args Returns ------- : dict Default, evaluated dictionary ### Response: def _set_dense_defaults_and_eval(kwargs): """ Sets default values in kwargs if kwargs are not already given. Evaluates all values using eval Parameters ----------- kwargs : dict Dictionary of dense specific keyword args Returns ------- : dict Default, evaluated dictionary """ kwargs['delimiter'] = kwargs.get('delimiter', ',') kwargs['na_values'] = kwargs.get('na_values', '') kwargs['nan_to_zero'] = kwargs.get('nan_to_zero', False) kwargs['drop_na'] = kwargs.get('drop_na', False) kwargs['label_col'] = kwargs.get('label_col', 'label') kwargs['count_col'] = kwargs.get('count_col', 'count') for key, val in kwargs.iteritems(): try: kwargs[key] = eval(val) except: kwargs[key] = val return kwargs