code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
text
stringlengths
164
112k
def _normpath(self, pathname): "Return path normalized for duscan usage: no trailing slash." if pathname == '/': pathname = '' elif pathname.endswith('/'): pathname = pathname[:-1] assert not pathname.endswith('/'), pathname return pathname
Return path normalized for duscan usage: no trailing slash.
Below is the the instruction that describes the task: ### Input: Return path normalized for duscan usage: no trailing slash. ### Response: def _normpath(self, pathname): "Return path normalized for duscan usage: no trailing slash." if pathname == '/': pathname = '' elif pathname.endswith('/'): pathname = pathname[:-1] assert not pathname.endswith('/'), pathname return pathname
def _displayattrs(attrib, expandattrs): """ Helper function to display the attributes of a Node object in lexicographic order. :param attrib: dictionary with the attributes :param expandattrs: if True also displays the value of the attributes """ if not attrib: return '' if expandattrs: alist = ['%s=%r' % item for item in sorted(attrib.items())] else: alist = list(attrib) return '{%s}' % ', '.join(alist)
Helper function to display the attributes of a Node object in lexicographic order. :param attrib: dictionary with the attributes :param expandattrs: if True also displays the value of the attributes
Below is the the instruction that describes the task: ### Input: Helper function to display the attributes of a Node object in lexicographic order. :param attrib: dictionary with the attributes :param expandattrs: if True also displays the value of the attributes ### Response: def _displayattrs(attrib, expandattrs): """ Helper function to display the attributes of a Node object in lexicographic order. :param attrib: dictionary with the attributes :param expandattrs: if True also displays the value of the attributes """ if not attrib: return '' if expandattrs: alist = ['%s=%r' % item for item in sorted(attrib.items())] else: alist = list(attrib) return '{%s}' % ', '.join(alist)
def parse_placeholder(parser, token): """Parse the `PlaceholderNode` parameters. Return a tuple with the name and parameters.""" bits = token.split_contents() count = len(bits) error_string = '%r tag requires at least one argument' % bits[0] if count <= 1: raise TemplateSyntaxError(error_string) name = bits[1] remaining = bits[2:] params = {} simple_options = ['parsed', 'inherited', 'untranslated'] param_options = ['as', 'on', 'with'] all_options = simple_options + param_options while remaining: bit = remaining[0] if bit not in all_options: raise TemplateSyntaxError( "%r is not an correct option for a placeholder" % bit) if bit in param_options: if len(remaining) < 2: raise TemplateSyntaxError( "Placeholder option '%s' need a parameter" % bit) if bit == 'as': params['as_varname'] = remaining[1] if bit == 'with': params['widget'] = remaining[1] if bit == 'on': params['page'] = remaining[1] remaining = remaining[2:] elif bit == 'parsed': params['parsed'] = True remaining = remaining[1:] elif bit == 'inherited': params['inherited'] = True remaining = remaining[1:] elif bit == 'untranslated': params['untranslated'] = True remaining = remaining[1:] return name, params
Parse the `PlaceholderNode` parameters. Return a tuple with the name and parameters.
Below is the the instruction that describes the task: ### Input: Parse the `PlaceholderNode` parameters. Return a tuple with the name and parameters. ### Response: def parse_placeholder(parser, token): """Parse the `PlaceholderNode` parameters. Return a tuple with the name and parameters.""" bits = token.split_contents() count = len(bits) error_string = '%r tag requires at least one argument' % bits[0] if count <= 1: raise TemplateSyntaxError(error_string) name = bits[1] remaining = bits[2:] params = {} simple_options = ['parsed', 'inherited', 'untranslated'] param_options = ['as', 'on', 'with'] all_options = simple_options + param_options while remaining: bit = remaining[0] if bit not in all_options: raise TemplateSyntaxError( "%r is not an correct option for a placeholder" % bit) if bit in param_options: if len(remaining) < 2: raise TemplateSyntaxError( "Placeholder option '%s' need a parameter" % bit) if bit == 'as': params['as_varname'] = remaining[1] if bit == 'with': params['widget'] = remaining[1] if bit == 'on': params['page'] = remaining[1] remaining = remaining[2:] elif bit == 'parsed': params['parsed'] = True remaining = remaining[1:] elif bit == 'inherited': params['inherited'] = True remaining = remaining[1:] elif bit == 'untranslated': params['untranslated'] = True remaining = remaining[1:] return name, params
def write_fits(self, data, outfile, extname="SKYMAP", clobber=True): """ Write input data to a FITS file data : The data begin stored outfile : The name of the output file extname : The HDU extension name clobber : True -> overwrite existing files """ hdu_prim = fits.PrimaryHDU() hdu_hpx = self.make_hdu(data, extname=extname) hl = [hdu_prim, hdu_hpx] if self.conv.energy_hdu == 'EBOUNDS': hdu_energy = self.make_energy_bounds_hdu() elif self.conv.energy_hdu == 'ENERGIES': hdu_energy = self.make_energies_hdu() if hdu_energy is not None: hl.append(hdu_energy) hdulist = fits.HDUList(hl) hdulist.writeto(outfile, overwrite=clobber)
Write input data to a FITS file data : The data begin stored outfile : The name of the output file extname : The HDU extension name clobber : True -> overwrite existing files
Below is the the instruction that describes the task: ### Input: Write input data to a FITS file data : The data begin stored outfile : The name of the output file extname : The HDU extension name clobber : True -> overwrite existing files ### Response: def write_fits(self, data, outfile, extname="SKYMAP", clobber=True): """ Write input data to a FITS file data : The data begin stored outfile : The name of the output file extname : The HDU extension name clobber : True -> overwrite existing files """ hdu_prim = fits.PrimaryHDU() hdu_hpx = self.make_hdu(data, extname=extname) hl = [hdu_prim, hdu_hpx] if self.conv.energy_hdu == 'EBOUNDS': hdu_energy = self.make_energy_bounds_hdu() elif self.conv.energy_hdu == 'ENERGIES': hdu_energy = self.make_energies_hdu() if hdu_energy is not None: hl.append(hdu_energy) hdulist = fits.HDUList(hl) hdulist.writeto(outfile, overwrite=clobber)
def _fullqualname_builtin_py2(obj): """Fully qualified name for 'builtin_function_or_method' objects in Python 2. """ if obj.__self__ is None: # built-in functions module = obj.__module__ qualname = obj.__name__ else: # built-in methods if inspect.isclass(obj.__self__): cls = obj.__self__ else: cls = obj.__self__.__class__ module = cls.__module__ qualname = cls.__name__ + '.' + obj.__name__ return module + '.' + qualname
Fully qualified name for 'builtin_function_or_method' objects in Python 2.
Below is the the instruction that describes the task: ### Input: Fully qualified name for 'builtin_function_or_method' objects in Python 2. ### Response: def _fullqualname_builtin_py2(obj): """Fully qualified name for 'builtin_function_or_method' objects in Python 2. """ if obj.__self__ is None: # built-in functions module = obj.__module__ qualname = obj.__name__ else: # built-in methods if inspect.isclass(obj.__self__): cls = obj.__self__ else: cls = obj.__self__.__class__ module = cls.__module__ qualname = cls.__name__ + '.' + obj.__name__ return module + '.' + qualname
def reply( self, text: str, quote: bool = None, parse_mode: str = "", disable_web_page_preview: bool = None, disable_notification: bool = None, reply_to_message_id: int = None, reply_markup=None ) -> "Message": """Bound method *reply* of :obj:`Message <pyrogram.Message>`. Use as a shortcut for: .. code-block:: python client.send_message( chat_id=message.chat.id, text="hello", reply_to_message_id=message.message_id ) Example: .. code-block:: python message.reply("hello", quote=True) Args: text (``str``): Text of the message to be sent. quote (``bool``, *optional*): If ``True``, the message will be sent as a reply to this message. If *reply_to_message_id* is passed, this parameter will be ignored. Defaults to ``True`` in group chats and ``False`` in private chats. parse_mode (``str``, *optional*): Use :obj:`MARKDOWN <pyrogram.ParseMode.MARKDOWN>` or :obj:`HTML <pyrogram.ParseMode.HTML>` if you want Telegram apps to show bold, italic, fixed-width text or inline URLs in your message. Defaults to Markdown. disable_web_page_preview (``bool``, *optional*): Disables link previews for links in this message. disable_notification (``bool``, *optional*): Sends the message silently. Users will receive a notification with no sound. reply_to_message_id (``int``, *optional*): If the message is a reply, ID of the original message. reply_markup (:obj:`InlineKeyboardMarkup` | :obj:`ReplyKeyboardMarkup` | :obj:`ReplyKeyboardRemove` | :obj:`ForceReply`, *optional*): Additional interface options. An object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user. Returns: On success, the sent Message is returned. Raises: :class:`RPCError <pyrogram.RPCError>` """ if quote is None: quote = self.chat.type != "private" if reply_to_message_id is None and quote: reply_to_message_id = self.message_id return self._client.send_message( chat_id=self.chat.id, text=text, parse_mode=parse_mode, disable_web_page_preview=disable_web_page_preview, disable_notification=disable_notification, reply_to_message_id=reply_to_message_id, reply_markup=reply_markup )
Bound method *reply* of :obj:`Message <pyrogram.Message>`. Use as a shortcut for: .. code-block:: python client.send_message( chat_id=message.chat.id, text="hello", reply_to_message_id=message.message_id ) Example: .. code-block:: python message.reply("hello", quote=True) Args: text (``str``): Text of the message to be sent. quote (``bool``, *optional*): If ``True``, the message will be sent as a reply to this message. If *reply_to_message_id* is passed, this parameter will be ignored. Defaults to ``True`` in group chats and ``False`` in private chats. parse_mode (``str``, *optional*): Use :obj:`MARKDOWN <pyrogram.ParseMode.MARKDOWN>` or :obj:`HTML <pyrogram.ParseMode.HTML>` if you want Telegram apps to show bold, italic, fixed-width text or inline URLs in your message. Defaults to Markdown. disable_web_page_preview (``bool``, *optional*): Disables link previews for links in this message. disable_notification (``bool``, *optional*): Sends the message silently. Users will receive a notification with no sound. reply_to_message_id (``int``, *optional*): If the message is a reply, ID of the original message. reply_markup (:obj:`InlineKeyboardMarkup` | :obj:`ReplyKeyboardMarkup` | :obj:`ReplyKeyboardRemove` | :obj:`ForceReply`, *optional*): Additional interface options. An object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user. Returns: On success, the sent Message is returned. Raises: :class:`RPCError <pyrogram.RPCError>`
Below is the the instruction that describes the task: ### Input: Bound method *reply* of :obj:`Message <pyrogram.Message>`. Use as a shortcut for: .. code-block:: python client.send_message( chat_id=message.chat.id, text="hello", reply_to_message_id=message.message_id ) Example: .. code-block:: python message.reply("hello", quote=True) Args: text (``str``): Text of the message to be sent. quote (``bool``, *optional*): If ``True``, the message will be sent as a reply to this message. If *reply_to_message_id* is passed, this parameter will be ignored. Defaults to ``True`` in group chats and ``False`` in private chats. parse_mode (``str``, *optional*): Use :obj:`MARKDOWN <pyrogram.ParseMode.MARKDOWN>` or :obj:`HTML <pyrogram.ParseMode.HTML>` if you want Telegram apps to show bold, italic, fixed-width text or inline URLs in your message. Defaults to Markdown. disable_web_page_preview (``bool``, *optional*): Disables link previews for links in this message. disable_notification (``bool``, *optional*): Sends the message silently. Users will receive a notification with no sound. reply_to_message_id (``int``, *optional*): If the message is a reply, ID of the original message. reply_markup (:obj:`InlineKeyboardMarkup` | :obj:`ReplyKeyboardMarkup` | :obj:`ReplyKeyboardRemove` | :obj:`ForceReply`, *optional*): Additional interface options. An object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user. Returns: On success, the sent Message is returned. Raises: :class:`RPCError <pyrogram.RPCError>` ### Response: def reply( self, text: str, quote: bool = None, parse_mode: str = "", disable_web_page_preview: bool = None, disable_notification: bool = None, reply_to_message_id: int = None, reply_markup=None ) -> "Message": """Bound method *reply* of :obj:`Message <pyrogram.Message>`. Use as a shortcut for: .. code-block:: python client.send_message( chat_id=message.chat.id, text="hello", reply_to_message_id=message.message_id ) Example: .. code-block:: python message.reply("hello", quote=True) Args: text (``str``): Text of the message to be sent. quote (``bool``, *optional*): If ``True``, the message will be sent as a reply to this message. If *reply_to_message_id* is passed, this parameter will be ignored. Defaults to ``True`` in group chats and ``False`` in private chats. parse_mode (``str``, *optional*): Use :obj:`MARKDOWN <pyrogram.ParseMode.MARKDOWN>` or :obj:`HTML <pyrogram.ParseMode.HTML>` if you want Telegram apps to show bold, italic, fixed-width text or inline URLs in your message. Defaults to Markdown. disable_web_page_preview (``bool``, *optional*): Disables link previews for links in this message. disable_notification (``bool``, *optional*): Sends the message silently. Users will receive a notification with no sound. reply_to_message_id (``int``, *optional*): If the message is a reply, ID of the original message. reply_markup (:obj:`InlineKeyboardMarkup` | :obj:`ReplyKeyboardMarkup` | :obj:`ReplyKeyboardRemove` | :obj:`ForceReply`, *optional*): Additional interface options. An object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user. Returns: On success, the sent Message is returned. Raises: :class:`RPCError <pyrogram.RPCError>` """ if quote is None: quote = self.chat.type != "private" if reply_to_message_id is None and quote: reply_to_message_id = self.message_id return self._client.send_message( chat_id=self.chat.id, text=text, parse_mode=parse_mode, disable_web_page_preview=disable_web_page_preview, disable_notification=disable_notification, reply_to_message_id=reply_to_message_id, reply_markup=reply_markup )
def sendmail_proxy(subject, email, template, **context): """Cast the lazy_gettext'ed subject to string before passing to Celery""" sendmail.delay(subject.value, email, template, **context)
Cast the lazy_gettext'ed subject to string before passing to Celery
Below is the the instruction that describes the task: ### Input: Cast the lazy_gettext'ed subject to string before passing to Celery ### Response: def sendmail_proxy(subject, email, template, **context): """Cast the lazy_gettext'ed subject to string before passing to Celery""" sendmail.delay(subject.value, email, template, **context)
def extract_references_from_fulltext(fulltext): """Locate and extract the reference section from a fulltext document. Return the extracted reference section as a list of strings, whereby each string in the list is considered to be a single reference line. E.g. a string could be something like: '[19] Wilson, A. Unpublished (1986). @param fulltext: (list) of strings, whereby each string is a line of the document. @return: (list) of strings, where each string is an extracted reference line. """ # Try to remove pagebreaks, headers, footers fulltext = remove_page_boundary_lines(fulltext) status = 0 # How ref section found flag how_found_start = 0 # Find start of refs section ref_sect_start = get_reference_section_beginning(fulltext) if ref_sect_start is None: # No References refs = [] status = 4 LOGGER.debug(u"extract_references_from_fulltext: ref_sect_start is None") else: # If a reference section was found, however weak ref_sect_end = \ find_end_of_reference_section(fulltext, ref_sect_start["start_line"], ref_sect_start["marker"], ref_sect_start["marker_pattern"]) if ref_sect_end is None: # No End to refs? Not safe to extract refs = [] status = 5 LOGGER.debug(u"extract_references_from_fulltext: no end to refs!") else: # If the end of the reference section was found.. start extraction refs = get_reference_lines(fulltext, ref_sect_start["start_line"], ref_sect_end, ref_sect_start["title_string"], ref_sect_start["marker_pattern"], ref_sect_start["title_marker_same_line"]) return refs, status, how_found_start
Locate and extract the reference section from a fulltext document. Return the extracted reference section as a list of strings, whereby each string in the list is considered to be a single reference line. E.g. a string could be something like: '[19] Wilson, A. Unpublished (1986). @param fulltext: (list) of strings, whereby each string is a line of the document. @return: (list) of strings, where each string is an extracted reference line.
Below is the the instruction that describes the task: ### Input: Locate and extract the reference section from a fulltext document. Return the extracted reference section as a list of strings, whereby each string in the list is considered to be a single reference line. E.g. a string could be something like: '[19] Wilson, A. Unpublished (1986). @param fulltext: (list) of strings, whereby each string is a line of the document. @return: (list) of strings, where each string is an extracted reference line. ### Response: def extract_references_from_fulltext(fulltext): """Locate and extract the reference section from a fulltext document. Return the extracted reference section as a list of strings, whereby each string in the list is considered to be a single reference line. E.g. a string could be something like: '[19] Wilson, A. Unpublished (1986). @param fulltext: (list) of strings, whereby each string is a line of the document. @return: (list) of strings, where each string is an extracted reference line. """ # Try to remove pagebreaks, headers, footers fulltext = remove_page_boundary_lines(fulltext) status = 0 # How ref section found flag how_found_start = 0 # Find start of refs section ref_sect_start = get_reference_section_beginning(fulltext) if ref_sect_start is None: # No References refs = [] status = 4 LOGGER.debug(u"extract_references_from_fulltext: ref_sect_start is None") else: # If a reference section was found, however weak ref_sect_end = \ find_end_of_reference_section(fulltext, ref_sect_start["start_line"], ref_sect_start["marker"], ref_sect_start["marker_pattern"]) if ref_sect_end is None: # No End to refs? Not safe to extract refs = [] status = 5 LOGGER.debug(u"extract_references_from_fulltext: no end to refs!") else: # If the end of the reference section was found.. start extraction refs = get_reference_lines(fulltext, ref_sect_start["start_line"], ref_sect_end, ref_sect_start["title_string"], ref_sect_start["marker_pattern"], ref_sect_start["title_marker_same_line"]) return refs, status, how_found_start
def pvpc_calc_tcu_cp_feu_d(df, verbose=True, convert_kwh=True): """Procesa TCU, CP, FEU diario. :param df: :param verbose: :param convert_kwh: :return: """ if 'TCU' + TARIFAS[0] not in df.columns: # Pasa de €/MWh a €/kWh: if convert_kwh: cols_mwh = [c + t for c in COLS_PVPC for t in TARIFAS if c != 'COF'] df[cols_mwh] = df[cols_mwh].applymap(lambda x: x / 1000.) # Obtiene columnas TCU, CP, precio día gb_t = df.groupby(lambda x: TARIFAS[np.argmax([t in x for t in TARIFAS])], axis=1) for k, g in gb_t: if verbose: print('TARIFA {}'.format(k)) print(g.head()) # Cálculo de TCU df['TCU{}'.format(k)] = g[k] - g['TEU{}'.format(k)] # Cálculo de CP # cols_cp = [c + k for c in ['FOS', 'FOM', 'INT', 'PCAP', 'PMH', 'SAH']] cols_cp = [c + k for c in COLS_PVPC if c not in ['', 'COF', 'TEU']] df['CP{}'.format(k)] = g[cols_cp].sum(axis=1) # Cálculo de PERD --> No es posible así, ya que los valores base ya vienen con PERD # dfs_pvpc[k]['PERD{}'.format(k)] = dfs_pvpc[k]['TCU{}'.format(k)] / dfs_pvpc[k]['CP{}'.format(k)] # dfs_pvpc[k]['PERD{}'.format(k)] = dfs_pvpc[k]['INT{}'.format(k)] / 1.92 # Cálculo de FEU diario cols_k = ['TEU' + k, 'TCU' + k, 'COF' + k] g = df[cols_k].groupby('TEU' + k) pr = g.apply(lambda x: x['TCU' + k].dot(x['COF' + k]) / x['COF' + k].sum()) pr.name = 'PD_' + k df = df.join(pr, on='TEU' + k, rsuffix='_r') df['PD_' + k] += df['TEU' + k] return df
Procesa TCU, CP, FEU diario. :param df: :param verbose: :param convert_kwh: :return:
Below is the the instruction that describes the task: ### Input: Procesa TCU, CP, FEU diario. :param df: :param verbose: :param convert_kwh: :return: ### Response: def pvpc_calc_tcu_cp_feu_d(df, verbose=True, convert_kwh=True): """Procesa TCU, CP, FEU diario. :param df: :param verbose: :param convert_kwh: :return: """ if 'TCU' + TARIFAS[0] not in df.columns: # Pasa de €/MWh a €/kWh: if convert_kwh: cols_mwh = [c + t for c in COLS_PVPC for t in TARIFAS if c != 'COF'] df[cols_mwh] = df[cols_mwh].applymap(lambda x: x / 1000.) # Obtiene columnas TCU, CP, precio día gb_t = df.groupby(lambda x: TARIFAS[np.argmax([t in x for t in TARIFAS])], axis=1) for k, g in gb_t: if verbose: print('TARIFA {}'.format(k)) print(g.head()) # Cálculo de TCU df['TCU{}'.format(k)] = g[k] - g['TEU{}'.format(k)] # Cálculo de CP # cols_cp = [c + k for c in ['FOS', 'FOM', 'INT', 'PCAP', 'PMH', 'SAH']] cols_cp = [c + k for c in COLS_PVPC if c not in ['', 'COF', 'TEU']] df['CP{}'.format(k)] = g[cols_cp].sum(axis=1) # Cálculo de PERD --> No es posible así, ya que los valores base ya vienen con PERD # dfs_pvpc[k]['PERD{}'.format(k)] = dfs_pvpc[k]['TCU{}'.format(k)] / dfs_pvpc[k]['CP{}'.format(k)] # dfs_pvpc[k]['PERD{}'.format(k)] = dfs_pvpc[k]['INT{}'.format(k)] / 1.92 # Cálculo de FEU diario cols_k = ['TEU' + k, 'TCU' + k, 'COF' + k] g = df[cols_k].groupby('TEU' + k) pr = g.apply(lambda x: x['TCU' + k].dot(x['COF' + k]) / x['COF' + k].sum()) pr.name = 'PD_' + k df = df.join(pr, on='TEU' + k, rsuffix='_r') df['PD_' + k] += df['TEU' + k] return df
async def async_set_config(self, data): """Set config of thermostat. { "mode": "auto", "heatsetpoint": 180, } """ field = self.deconz_id + '/config' await self._async_set_state_callback(field, data)
Set config of thermostat. { "mode": "auto", "heatsetpoint": 180, }
Below is the the instruction that describes the task: ### Input: Set config of thermostat. { "mode": "auto", "heatsetpoint": 180, } ### Response: async def async_set_config(self, data): """Set config of thermostat. { "mode": "auto", "heatsetpoint": 180, } """ field = self.deconz_id + '/config' await self._async_set_state_callback(field, data)
def get_workspaces(self): """ Get a list of workspaces. Returns JSON-like data, not a Con instance. You might want to try the :meth:`Con.workspaces` instead if the info contained here is too little. :rtype: List of :class:`WorkspaceReply`. """ data = self.message(MessageType.GET_WORKSPACES, '') return json.loads(data, object_hook=WorkspaceReply)
Get a list of workspaces. Returns JSON-like data, not a Con instance. You might want to try the :meth:`Con.workspaces` instead if the info contained here is too little. :rtype: List of :class:`WorkspaceReply`.
Below is the the instruction that describes the task: ### Input: Get a list of workspaces. Returns JSON-like data, not a Con instance. You might want to try the :meth:`Con.workspaces` instead if the info contained here is too little. :rtype: List of :class:`WorkspaceReply`. ### Response: def get_workspaces(self): """ Get a list of workspaces. Returns JSON-like data, not a Con instance. You might want to try the :meth:`Con.workspaces` instead if the info contained here is too little. :rtype: List of :class:`WorkspaceReply`. """ data = self.message(MessageType.GET_WORKSPACES, '') return json.loads(data, object_hook=WorkspaceReply)
def restore_region(self, region, bbox=None, xy=None): """ Restore the saved region. If bbox (instance of BboxBase, or its extents) is given, only the region specified by the bbox will be restored. *xy* (a tuple of two floasts) optionally specifies the new position (the LLC of the original region, not the LLC of the bbox) where the region will be restored. >>> region = renderer.copy_from_bbox() >>> x1, y1, x2, y2 = region.get_extents() >>> renderer.restore_region(region, bbox=(x1+dx, y1, x2, y2), ... xy=(x1-dx, y1)) """ if bbox is not None or xy is not None: if bbox is None: x1, y1, x2, y2 = region.get_extents() elif isinstance(bbox, BboxBase): x1, y1, x2, y2 = bbox.extents else: x1, y1, x2, y2 = bbox if xy is None: ox, oy = x1, y1 else: ox, oy = xy self._renderer.restore_region2(region, x1, y1, x2, y2, ox, oy) else: self._renderer.restore_region(region)
Restore the saved region. If bbox (instance of BboxBase, or its extents) is given, only the region specified by the bbox will be restored. *xy* (a tuple of two floasts) optionally specifies the new position (the LLC of the original region, not the LLC of the bbox) where the region will be restored. >>> region = renderer.copy_from_bbox() >>> x1, y1, x2, y2 = region.get_extents() >>> renderer.restore_region(region, bbox=(x1+dx, y1, x2, y2), ... xy=(x1-dx, y1))
Below is the the instruction that describes the task: ### Input: Restore the saved region. If bbox (instance of BboxBase, or its extents) is given, only the region specified by the bbox will be restored. *xy* (a tuple of two floasts) optionally specifies the new position (the LLC of the original region, not the LLC of the bbox) where the region will be restored. >>> region = renderer.copy_from_bbox() >>> x1, y1, x2, y2 = region.get_extents() >>> renderer.restore_region(region, bbox=(x1+dx, y1, x2, y2), ... xy=(x1-dx, y1)) ### Response: def restore_region(self, region, bbox=None, xy=None): """ Restore the saved region. If bbox (instance of BboxBase, or its extents) is given, only the region specified by the bbox will be restored. *xy* (a tuple of two floasts) optionally specifies the new position (the LLC of the original region, not the LLC of the bbox) where the region will be restored. >>> region = renderer.copy_from_bbox() >>> x1, y1, x2, y2 = region.get_extents() >>> renderer.restore_region(region, bbox=(x1+dx, y1, x2, y2), ... xy=(x1-dx, y1)) """ if bbox is not None or xy is not None: if bbox is None: x1, y1, x2, y2 = region.get_extents() elif isinstance(bbox, BboxBase): x1, y1, x2, y2 = bbox.extents else: x1, y1, x2, y2 = bbox if xy is None: ox, oy = x1, y1 else: ox, oy = xy self._renderer.restore_region2(region, x1, y1, x2, y2, ox, oy) else: self._renderer.restore_region(region)
def _pfp__build(self, stream=None, save_offset=False): """Build the union and write the result into the stream. :stream: None :returns: None """ max_size = -1 if stream is None: core_stream = six.BytesIO() new_stream = bitwrap.BitwrappedStream(core_stream) else: new_stream = stream for child in self._pfp__children: curr_pos = new_stream.tell() child._pfp__build(new_stream, save_offset) size = new_stream.tell() - curr_pos new_stream.seek(-size, 1) if size > max_size: max_size = size new_stream.seek(max_size, 1) if stream is None: return core_stream.getvalue() else: return max_size
Build the union and write the result into the stream. :stream: None :returns: None
Below is the the instruction that describes the task: ### Input: Build the union and write the result into the stream. :stream: None :returns: None ### Response: def _pfp__build(self, stream=None, save_offset=False): """Build the union and write the result into the stream. :stream: None :returns: None """ max_size = -1 if stream is None: core_stream = six.BytesIO() new_stream = bitwrap.BitwrappedStream(core_stream) else: new_stream = stream for child in self._pfp__children: curr_pos = new_stream.tell() child._pfp__build(new_stream, save_offset) size = new_stream.tell() - curr_pos new_stream.seek(-size, 1) if size > max_size: max_size = size new_stream.seek(max_size, 1) if stream is None: return core_stream.getvalue() else: return max_size
def degrade_to_order(self, new_order): """ Degrades the MOC instance to a new, less precise, MOC. The maximum depth (i.e. the depth of the smallest HEALPix cells that can be found in the MOC) of the degraded MOC is set to ``new_order``. Parameters ---------- new_order : int Maximum depth of the output degraded MOC. Returns ------- moc : `~mocpy.moc.MOC` or `~mocpy.tmoc.TimeMOC` The degraded MOC. """ shift = 2 * (AbstractMOC.HPY_MAX_NORDER - new_order) ofs = (int(1) << shift) - 1 mask = ~ofs adda = int(0) addb = ofs iv_set = [] for iv in self._interval_set._intervals: a = (iv[0] + adda) & mask b = (iv[1] + addb) & mask if b > a: iv_set.append((a, b)) return self.__class__(IntervalSet(np.asarray(iv_set)))
Degrades the MOC instance to a new, less precise, MOC. The maximum depth (i.e. the depth of the smallest HEALPix cells that can be found in the MOC) of the degraded MOC is set to ``new_order``. Parameters ---------- new_order : int Maximum depth of the output degraded MOC. Returns ------- moc : `~mocpy.moc.MOC` or `~mocpy.tmoc.TimeMOC` The degraded MOC.
Below is the the instruction that describes the task: ### Input: Degrades the MOC instance to a new, less precise, MOC. The maximum depth (i.e. the depth of the smallest HEALPix cells that can be found in the MOC) of the degraded MOC is set to ``new_order``. Parameters ---------- new_order : int Maximum depth of the output degraded MOC. Returns ------- moc : `~mocpy.moc.MOC` or `~mocpy.tmoc.TimeMOC` The degraded MOC. ### Response: def degrade_to_order(self, new_order): """ Degrades the MOC instance to a new, less precise, MOC. The maximum depth (i.e. the depth of the smallest HEALPix cells that can be found in the MOC) of the degraded MOC is set to ``new_order``. Parameters ---------- new_order : int Maximum depth of the output degraded MOC. Returns ------- moc : `~mocpy.moc.MOC` or `~mocpy.tmoc.TimeMOC` The degraded MOC. """ shift = 2 * (AbstractMOC.HPY_MAX_NORDER - new_order) ofs = (int(1) << shift) - 1 mask = ~ofs adda = int(0) addb = ofs iv_set = [] for iv in self._interval_set._intervals: a = (iv[0] + adda) & mask b = (iv[1] + addb) & mask if b > a: iv_set.append((a, b)) return self.__class__(IntervalSet(np.asarray(iv_set)))
def parse(self, xml_file): "Get a list of parsed recipes from BeerXML input" recipes = [] with open(xml_file, "rt") as f: tree = ElementTree.parse(f) for recipeNode in tree.iter(): if self.to_lower(recipeNode.tag) != "recipe": continue recipe = Recipe() recipes.append(recipe) for recipeProperty in list(recipeNode): tag_name = self.to_lower(recipeProperty.tag) if tag_name == "fermentables": for fermentable_node in list(recipeProperty): fermentable = Fermentable() self.nodes_to_object(fermentable_node, fermentable) recipe.fermentables.append(fermentable) elif tag_name == "yeasts": for yeast_node in list(recipeProperty): yeast = Yeast() self.nodes_to_object(yeast_node, yeast) recipe.yeasts.append(yeast) elif tag_name == "hops": for hop_node in list(recipeProperty): hop = Hop() self.nodes_to_object(hop_node, hop) recipe.hops.append(hop) elif tag_name == "miscs": for misc_node in list(recipeProperty): misc = Misc() self.nodes_to_object(misc_node, misc) recipe.miscs.append(misc) elif tag_name == "style": style = Style() recipe.style = style self.nodes_to_object(recipeProperty, style) elif tag_name == "mash": for mash_node in list(recipeProperty): mash = Mash() recipe.mash = mash if self.to_lower(mash_node.tag) == "mash_steps": for mash_step_node in list(mash_node): mash_step = MashStep() self.nodes_to_object(mash_step_node, mash_step) mash.steps.append(mash_step) else: self.nodes_to_object(mash_node, mash) else: self.node_to_object(recipeProperty, recipe) return recipes
Get a list of parsed recipes from BeerXML input
Below is the the instruction that describes the task: ### Input: Get a list of parsed recipes from BeerXML input ### Response: def parse(self, xml_file): "Get a list of parsed recipes from BeerXML input" recipes = [] with open(xml_file, "rt") as f: tree = ElementTree.parse(f) for recipeNode in tree.iter(): if self.to_lower(recipeNode.tag) != "recipe": continue recipe = Recipe() recipes.append(recipe) for recipeProperty in list(recipeNode): tag_name = self.to_lower(recipeProperty.tag) if tag_name == "fermentables": for fermentable_node in list(recipeProperty): fermentable = Fermentable() self.nodes_to_object(fermentable_node, fermentable) recipe.fermentables.append(fermentable) elif tag_name == "yeasts": for yeast_node in list(recipeProperty): yeast = Yeast() self.nodes_to_object(yeast_node, yeast) recipe.yeasts.append(yeast) elif tag_name == "hops": for hop_node in list(recipeProperty): hop = Hop() self.nodes_to_object(hop_node, hop) recipe.hops.append(hop) elif tag_name == "miscs": for misc_node in list(recipeProperty): misc = Misc() self.nodes_to_object(misc_node, misc) recipe.miscs.append(misc) elif tag_name == "style": style = Style() recipe.style = style self.nodes_to_object(recipeProperty, style) elif tag_name == "mash": for mash_node in list(recipeProperty): mash = Mash() recipe.mash = mash if self.to_lower(mash_node.tag) == "mash_steps": for mash_step_node in list(mash_node): mash_step = MashStep() self.nodes_to_object(mash_step_node, mash_step) mash.steps.append(mash_step) else: self.nodes_to_object(mash_node, mash) else: self.node_to_object(recipeProperty, recipe) return recipes
def getAllExportsAsDict(self, plugin_list=None): """ Return all the stats to be exported (list). Default behavor is to export all the stat if plugin_list is provided, only export stats of given plugin (list) """ if plugin_list is None: # All plugins should be exported plugin_list = self._plugins return {p: self._plugins[p].get_export() for p in plugin_list}
Return all the stats to be exported (list). Default behavor is to export all the stat if plugin_list is provided, only export stats of given plugin (list)
Below is the the instruction that describes the task: ### Input: Return all the stats to be exported (list). Default behavor is to export all the stat if plugin_list is provided, only export stats of given plugin (list) ### Response: def getAllExportsAsDict(self, plugin_list=None): """ Return all the stats to be exported (list). Default behavor is to export all the stat if plugin_list is provided, only export stats of given plugin (list) """ if plugin_list is None: # All plugins should be exported plugin_list = self._plugins return {p: self._plugins[p].get_export() for p in plugin_list}
def insert_permission( self, file_id, value, perm_type, role, notify=True, email_message=None, with_link=False ): """Creates a new permission for a file. :param file_id: a spreadsheet ID (aka file ID.) :type file_id: str :param value: user or group e-mail address, domain name or None for 'default' type. :type value: str, None :param perm_type: (optional) The account type. Allowed values are: ``user``, ``group``, ``domain``, ``anyone`` :type perm_type: str :param role: (optional) The primary role for this user. Allowed values are: ``owner``, ``writer``, ``reader`` :type str: :param notify: (optional) Whether to send an email to the target user/domain. :type notify: str :param email_message: (optional) An email message to be sent if notify=True. :type email_message: str :param with_link: (optional) Whether the link is required for this permission to be active. :type with_link: bool Examples:: # Give write permissions to [email protected] gc.insert_permission( '0BmgG6nO_6dprnRRUWl1UFE', '[email protected]', perm_type='user', role='writer' ) # Make the spreadsheet publicly readable gc.insert_permission( '0BmgG6nO_6dprnRRUWl1UFE', None, perm_type='anyone', role='reader' ) """ url = '{0}/{1}/permissions'.format(DRIVE_FILES_API_V2_URL, file_id) payload = { 'value': value, 'type': perm_type, 'role': role, 'withLink': with_link } params = { 'sendNotificationEmails': notify, 'emailMessage': email_message } self.request( 'post', url, json=payload, params=params )
Creates a new permission for a file. :param file_id: a spreadsheet ID (aka file ID.) :type file_id: str :param value: user or group e-mail address, domain name or None for 'default' type. :type value: str, None :param perm_type: (optional) The account type. Allowed values are: ``user``, ``group``, ``domain``, ``anyone`` :type perm_type: str :param role: (optional) The primary role for this user. Allowed values are: ``owner``, ``writer``, ``reader`` :type str: :param notify: (optional) Whether to send an email to the target user/domain. :type notify: str :param email_message: (optional) An email message to be sent if notify=True. :type email_message: str :param with_link: (optional) Whether the link is required for this permission to be active. :type with_link: bool Examples:: # Give write permissions to [email protected] gc.insert_permission( '0BmgG6nO_6dprnRRUWl1UFE', '[email protected]', perm_type='user', role='writer' ) # Make the spreadsheet publicly readable gc.insert_permission( '0BmgG6nO_6dprnRRUWl1UFE', None, perm_type='anyone', role='reader' )
Below is the the instruction that describes the task: ### Input: Creates a new permission for a file. :param file_id: a spreadsheet ID (aka file ID.) :type file_id: str :param value: user or group e-mail address, domain name or None for 'default' type. :type value: str, None :param perm_type: (optional) The account type. Allowed values are: ``user``, ``group``, ``domain``, ``anyone`` :type perm_type: str :param role: (optional) The primary role for this user. Allowed values are: ``owner``, ``writer``, ``reader`` :type str: :param notify: (optional) Whether to send an email to the target user/domain. :type notify: str :param email_message: (optional) An email message to be sent if notify=True. :type email_message: str :param with_link: (optional) Whether the link is required for this permission to be active. :type with_link: bool Examples:: # Give write permissions to [email protected] gc.insert_permission( '0BmgG6nO_6dprnRRUWl1UFE', '[email protected]', perm_type='user', role='writer' ) # Make the spreadsheet publicly readable gc.insert_permission( '0BmgG6nO_6dprnRRUWl1UFE', None, perm_type='anyone', role='reader' ) ### Response: def insert_permission( self, file_id, value, perm_type, role, notify=True, email_message=None, with_link=False ): """Creates a new permission for a file. :param file_id: a spreadsheet ID (aka file ID.) :type file_id: str :param value: user or group e-mail address, domain name or None for 'default' type. :type value: str, None :param perm_type: (optional) The account type. Allowed values are: ``user``, ``group``, ``domain``, ``anyone`` :type perm_type: str :param role: (optional) The primary role for this user. Allowed values are: ``owner``, ``writer``, ``reader`` :type str: :param notify: (optional) Whether to send an email to the target user/domain. :type notify: str :param email_message: (optional) An email message to be sent if notify=True. :type email_message: str :param with_link: (optional) Whether the link is required for this permission to be active. :type with_link: bool Examples:: # Give write permissions to [email protected] gc.insert_permission( '0BmgG6nO_6dprnRRUWl1UFE', '[email protected]', perm_type='user', role='writer' ) # Make the spreadsheet publicly readable gc.insert_permission( '0BmgG6nO_6dprnRRUWl1UFE', None, perm_type='anyone', role='reader' ) """ url = '{0}/{1}/permissions'.format(DRIVE_FILES_API_V2_URL, file_id) payload = { 'value': value, 'type': perm_type, 'role': role, 'withLink': with_link } params = { 'sendNotificationEmails': notify, 'emailMessage': email_message } self.request( 'post', url, json=payload, params=params )
def write_bus_data(self, file): """ Writes bus data to a ReST table. """ report = CaseReport(self.case) buses = self.case.buses col_width = 8 col_width_2 = col_width * 2 + 1 col1_width = 6 sep = "=" * 6 + " " + ("=" * col_width + " ") * 6 + "\n" file.write(sep) # Line one of column headers file.write("Name".center(col1_width) + " ") file.write("Voltage (pu)".center(col_width_2) + " ") file.write("Generation".center(col_width_2) + " ") file.write("Load".center(col_width_2) + " ") file.write("\n") file.write("-" * col1_width +" "+ ("-" * col_width_2 + " ") * 3 + "\n") # Line two of column header file.write("..".ljust(col1_width) + " ") file.write("Amp".center(col_width) + " ") file.write("Phase".center(col_width) + " ") file.write("P (MW)".center(col_width) + " ") file.write("Q (MVAr)".center(col_width) + " ") file.write("P (MW)".center(col_width) + " ") file.write("Q (MVAr)".center(col_width) + " ") file.write("\n") file.write(sep) # Bus rows for bus in buses: file.write(bus.name[:col1_width].ljust(col1_width)) file.write(" %8.3f" % bus.v_magnitude) file.write(" %8.3f" % bus.v_angle) file.write(" %8.2f" % self.case.s_supply(bus).real) file.write(" %8.2f" % self.case.s_supply(bus).imag) file.write(" %8.2f" % self.case.s_demand(bus).real) file.write(" %8.2f" % self.case.s_demand(bus).imag) file.write("\n") # Totals # file.write("..".ljust(col1_width) + " ") # file.write(("..".ljust(col_width) + " ")*2) # file.write(("_"*col_width + " ")*4 + "\n") file.write("..".ljust(col1_width) + " " + "..".ljust(col_width) + " ") file.write("*Total:*".rjust(col_width) + " ") ptot = report.actual_pgen qtot = report.actual_qgen file.write("%8.2f " % ptot) file.write("%8.2f " % qtot) file.write("%8.2f " % report.p_demand) file.write("%8.2f " % report.q_demand) file.write("\n") file.write(sep) del report
Writes bus data to a ReST table.
Below is the the instruction that describes the task: ### Input: Writes bus data to a ReST table. ### Response: def write_bus_data(self, file): """ Writes bus data to a ReST table. """ report = CaseReport(self.case) buses = self.case.buses col_width = 8 col_width_2 = col_width * 2 + 1 col1_width = 6 sep = "=" * 6 + " " + ("=" * col_width + " ") * 6 + "\n" file.write(sep) # Line one of column headers file.write("Name".center(col1_width) + " ") file.write("Voltage (pu)".center(col_width_2) + " ") file.write("Generation".center(col_width_2) + " ") file.write("Load".center(col_width_2) + " ") file.write("\n") file.write("-" * col1_width +" "+ ("-" * col_width_2 + " ") * 3 + "\n") # Line two of column header file.write("..".ljust(col1_width) + " ") file.write("Amp".center(col_width) + " ") file.write("Phase".center(col_width) + " ") file.write("P (MW)".center(col_width) + " ") file.write("Q (MVAr)".center(col_width) + " ") file.write("P (MW)".center(col_width) + " ") file.write("Q (MVAr)".center(col_width) + " ") file.write("\n") file.write(sep) # Bus rows for bus in buses: file.write(bus.name[:col1_width].ljust(col1_width)) file.write(" %8.3f" % bus.v_magnitude) file.write(" %8.3f" % bus.v_angle) file.write(" %8.2f" % self.case.s_supply(bus).real) file.write(" %8.2f" % self.case.s_supply(bus).imag) file.write(" %8.2f" % self.case.s_demand(bus).real) file.write(" %8.2f" % self.case.s_demand(bus).imag) file.write("\n") # Totals # file.write("..".ljust(col1_width) + " ") # file.write(("..".ljust(col_width) + " ")*2) # file.write(("_"*col_width + " ")*4 + "\n") file.write("..".ljust(col1_width) + " " + "..".ljust(col_width) + " ") file.write("*Total:*".rjust(col_width) + " ") ptot = report.actual_pgen qtot = report.actual_qgen file.write("%8.2f " % ptot) file.write("%8.2f " % qtot) file.write("%8.2f " % report.p_demand) file.write("%8.2f " % report.q_demand) file.write("\n") file.write(sep) del report
def http_post(self, path, query_data={}, post_data={}, files=None, **kwargs): """Make a POST request to the Gitlab server. Args: path (str): Path or full URL to query ('/projects' or 'http://whatever/v4/api/projecs') query_data (dict): Data to send as query parameters post_data (dict): Data to send in the body (will be converted to json) files (dict): The files to send to the server **kwargs: Extra options to send to the server (e.g. sudo) Returns: The parsed json returned by the server if json is return, else the raw content Raises: GitlabHttpError: When the return code is not 2xx GitlabParsingError: If the json data could not be parsed """ result = self.http_request('post', path, query_data=query_data, post_data=post_data, files=files, **kwargs) try: if result.headers.get('Content-Type', None) == 'application/json': return result.json() except Exception: raise GitlabParsingError( error_message="Failed to parse the server message") return result
Make a POST request to the Gitlab server. Args: path (str): Path or full URL to query ('/projects' or 'http://whatever/v4/api/projecs') query_data (dict): Data to send as query parameters post_data (dict): Data to send in the body (will be converted to json) files (dict): The files to send to the server **kwargs: Extra options to send to the server (e.g. sudo) Returns: The parsed json returned by the server if json is return, else the raw content Raises: GitlabHttpError: When the return code is not 2xx GitlabParsingError: If the json data could not be parsed
Below is the the instruction that describes the task: ### Input: Make a POST request to the Gitlab server. Args: path (str): Path or full URL to query ('/projects' or 'http://whatever/v4/api/projecs') query_data (dict): Data to send as query parameters post_data (dict): Data to send in the body (will be converted to json) files (dict): The files to send to the server **kwargs: Extra options to send to the server (e.g. sudo) Returns: The parsed json returned by the server if json is return, else the raw content Raises: GitlabHttpError: When the return code is not 2xx GitlabParsingError: If the json data could not be parsed ### Response: def http_post(self, path, query_data={}, post_data={}, files=None, **kwargs): """Make a POST request to the Gitlab server. Args: path (str): Path or full URL to query ('/projects' or 'http://whatever/v4/api/projecs') query_data (dict): Data to send as query parameters post_data (dict): Data to send in the body (will be converted to json) files (dict): The files to send to the server **kwargs: Extra options to send to the server (e.g. sudo) Returns: The parsed json returned by the server if json is return, else the raw content Raises: GitlabHttpError: When the return code is not 2xx GitlabParsingError: If the json data could not be parsed """ result = self.http_request('post', path, query_data=query_data, post_data=post_data, files=files, **kwargs) try: if result.headers.get('Content-Type', None) == 'application/json': return result.json() except Exception: raise GitlabParsingError( error_message="Failed to parse the server message") return result
def apply(self, configuration, schema, args): """ Apply the plugin to the configuration. Inheriting plugins should implement this method to add additional functionality. Parameters ---------- configuration : dict configuration schema : dict JSON schema args : argparse.NameSpace parsed command line arguments Returns ------- configuration : dict updated configuration after applying the plugin """ # Set values from the command line for name, path in self.arguments.items(): value = getattr(args, name.replace('-', '_')) if value is not None: util.set_value(configuration, path, value) return configuration
Apply the plugin to the configuration. Inheriting plugins should implement this method to add additional functionality. Parameters ---------- configuration : dict configuration schema : dict JSON schema args : argparse.NameSpace parsed command line arguments Returns ------- configuration : dict updated configuration after applying the plugin
Below is the the instruction that describes the task: ### Input: Apply the plugin to the configuration. Inheriting plugins should implement this method to add additional functionality. Parameters ---------- configuration : dict configuration schema : dict JSON schema args : argparse.NameSpace parsed command line arguments Returns ------- configuration : dict updated configuration after applying the plugin ### Response: def apply(self, configuration, schema, args): """ Apply the plugin to the configuration. Inheriting plugins should implement this method to add additional functionality. Parameters ---------- configuration : dict configuration schema : dict JSON schema args : argparse.NameSpace parsed command line arguments Returns ------- configuration : dict updated configuration after applying the plugin """ # Set values from the command line for name, path in self.arguments.items(): value = getattr(args, name.replace('-', '_')) if value is not None: util.set_value(configuration, path, value) return configuration
def chunkify(iterable, chunksize): """ Splits an iterable into chunks of size ``chunksize``. The last chunk may be smaller than ``chunksize``. """ from .queryset import QuerySet if hasattr(iterable, '__getitem__') and not isinstance(iterable, QuerySet): # tuple, list. QuerySet has __getitem__ but that evaluates the entire query greedily. We don't want that here. for i in range(0, len(iterable), chunksize): yield iterable[i:i + chunksize] else: # generator, set, map, QuerySet chunk = [] for i in iterable: chunk.append(i) if len(chunk) == chunksize: yield chunk chunk = [] if chunk: yield chunk
Splits an iterable into chunks of size ``chunksize``. The last chunk may be smaller than ``chunksize``.
Below is the the instruction that describes the task: ### Input: Splits an iterable into chunks of size ``chunksize``. The last chunk may be smaller than ``chunksize``. ### Response: def chunkify(iterable, chunksize): """ Splits an iterable into chunks of size ``chunksize``. The last chunk may be smaller than ``chunksize``. """ from .queryset import QuerySet if hasattr(iterable, '__getitem__') and not isinstance(iterable, QuerySet): # tuple, list. QuerySet has __getitem__ but that evaluates the entire query greedily. We don't want that here. for i in range(0, len(iterable), chunksize): yield iterable[i:i + chunksize] else: # generator, set, map, QuerySet chunk = [] for i in iterable: chunk.append(i) if len(chunk) == chunksize: yield chunk chunk = [] if chunk: yield chunk
def find_hosts_that_use_template(self, tpl_name): """Find hosts that use the template defined in argument tpl_name :param tpl_name: the template name we filter or :type tpl_name: str :return: list of the host_name of the hosts that got the template tpl_name in tags :rtype: list[str] """ return [h.host_name for h in self if tpl_name in h.tags if hasattr(h, "host_name")]
Find hosts that use the template defined in argument tpl_name :param tpl_name: the template name we filter or :type tpl_name: str :return: list of the host_name of the hosts that got the template tpl_name in tags :rtype: list[str]
Below is the the instruction that describes the task: ### Input: Find hosts that use the template defined in argument tpl_name :param tpl_name: the template name we filter or :type tpl_name: str :return: list of the host_name of the hosts that got the template tpl_name in tags :rtype: list[str] ### Response: def find_hosts_that_use_template(self, tpl_name): """Find hosts that use the template defined in argument tpl_name :param tpl_name: the template name we filter or :type tpl_name: str :return: list of the host_name of the hosts that got the template tpl_name in tags :rtype: list[str] """ return [h.host_name for h in self if tpl_name in h.tags if hasattr(h, "host_name")]
def _execute(self, connection, query, fetch=True): """ Executes given query using given connection. Args: connection (apsw.Connection): connection to the sqlite db who stores mpr data. query (str): sql query fetch (boolean, optional): if True, fetch query result and return it. If False, do not fetch. Returns: iterable with query result. """ cursor = connection.cursor() try: cursor.execute(query) except Exception as e: from ambry.mprlib.exceptions import BadSQLError raise BadSQLError("Failed to execute query: {}; {}".format(query, e)) if fetch: return cursor.fetchall() else: return cursor
Executes given query using given connection. Args: connection (apsw.Connection): connection to the sqlite db who stores mpr data. query (str): sql query fetch (boolean, optional): if True, fetch query result and return it. If False, do not fetch. Returns: iterable with query result.
Below is the the instruction that describes the task: ### Input: Executes given query using given connection. Args: connection (apsw.Connection): connection to the sqlite db who stores mpr data. query (str): sql query fetch (boolean, optional): if True, fetch query result and return it. If False, do not fetch. Returns: iterable with query result. ### Response: def _execute(self, connection, query, fetch=True): """ Executes given query using given connection. Args: connection (apsw.Connection): connection to the sqlite db who stores mpr data. query (str): sql query fetch (boolean, optional): if True, fetch query result and return it. If False, do not fetch. Returns: iterable with query result. """ cursor = connection.cursor() try: cursor.execute(query) except Exception as e: from ambry.mprlib.exceptions import BadSQLError raise BadSQLError("Failed to execute query: {}; {}".format(query, e)) if fetch: return cursor.fetchall() else: return cursor
def screenshot(self, viewID, filename): """screenshot(string, string) -> None Save a screenshot for the given view to the given filename. The fileformat is guessed from the extension, the available formats differ from platform to platform but should at least include ps, svg and pdf, on linux probably gif, png and jpg as well. """ self._connection._sendStringCmd( tc.CMD_SET_GUI_VARIABLE, tc.VAR_SCREENSHOT, viewID, filename)
screenshot(string, string) -> None Save a screenshot for the given view to the given filename. The fileformat is guessed from the extension, the available formats differ from platform to platform but should at least include ps, svg and pdf, on linux probably gif, png and jpg as well.
Below is the the instruction that describes the task: ### Input: screenshot(string, string) -> None Save a screenshot for the given view to the given filename. The fileformat is guessed from the extension, the available formats differ from platform to platform but should at least include ps, svg and pdf, on linux probably gif, png and jpg as well. ### Response: def screenshot(self, viewID, filename): """screenshot(string, string) -> None Save a screenshot for the given view to the given filename. The fileformat is guessed from the extension, the available formats differ from platform to platform but should at least include ps, svg and pdf, on linux probably gif, png and jpg as well. """ self._connection._sendStringCmd( tc.CMD_SET_GUI_VARIABLE, tc.VAR_SCREENSHOT, viewID, filename)
def get_period_seconds(period): """ return the number of seconds in the specified period >>> get_period_seconds('day') 86400 >>> get_period_seconds(86400) 86400 >>> get_period_seconds(datetime.timedelta(hours=24)) 86400 >>> get_period_seconds('day + os.system("rm -Rf *")') Traceback (most recent call last): ... ValueError: period not in (second, minute, hour, day, month, year) """ if isinstance(period, six.string_types): try: name = 'seconds_per_' + period.lower() result = globals()[name] except KeyError: msg = "period not in (second, minute, hour, day, month, year)" raise ValueError(msg) elif isinstance(period, numbers.Number): result = period elif isinstance(period, datetime.timedelta): result = period.days * get_period_seconds('day') + period.seconds else: raise TypeError('period must be a string or integer') return result
return the number of seconds in the specified period >>> get_period_seconds('day') 86400 >>> get_period_seconds(86400) 86400 >>> get_period_seconds(datetime.timedelta(hours=24)) 86400 >>> get_period_seconds('day + os.system("rm -Rf *")') Traceback (most recent call last): ... ValueError: period not in (second, minute, hour, day, month, year)
Below is the the instruction that describes the task: ### Input: return the number of seconds in the specified period >>> get_period_seconds('day') 86400 >>> get_period_seconds(86400) 86400 >>> get_period_seconds(datetime.timedelta(hours=24)) 86400 >>> get_period_seconds('day + os.system("rm -Rf *")') Traceback (most recent call last): ... ValueError: period not in (second, minute, hour, day, month, year) ### Response: def get_period_seconds(period): """ return the number of seconds in the specified period >>> get_period_seconds('day') 86400 >>> get_period_seconds(86400) 86400 >>> get_period_seconds(datetime.timedelta(hours=24)) 86400 >>> get_period_seconds('day + os.system("rm -Rf *")') Traceback (most recent call last): ... ValueError: period not in (second, minute, hour, day, month, year) """ if isinstance(period, six.string_types): try: name = 'seconds_per_' + period.lower() result = globals()[name] except KeyError: msg = "period not in (second, minute, hour, day, month, year)" raise ValueError(msg) elif isinstance(period, numbers.Number): result = period elif isinstance(period, datetime.timedelta): result = period.days * get_period_seconds('day') + period.seconds else: raise TypeError('period must be a string or integer') return result
def parse_json_feed_bytes(data: bytes) -> JSONFeed: """Parse a JSON feed from a byte-string containing JSON data.""" try: root = json.loads(data) except json.decoder.JSONDecodeError: raise FeedJSONError('Not a valid JSON document') return parse_json_feed(root)
Parse a JSON feed from a byte-string containing JSON data.
Below is the the instruction that describes the task: ### Input: Parse a JSON feed from a byte-string containing JSON data. ### Response: def parse_json_feed_bytes(data: bytes) -> JSONFeed: """Parse a JSON feed from a byte-string containing JSON data.""" try: root = json.loads(data) except json.decoder.JSONDecodeError: raise FeedJSONError('Not a valid JSON document') return parse_json_feed(root)
def receive_trial_result(self, parameter_id, parameters, value): """Tuner receive result from trial. Parameters ---------- parameter_id : int parameters : dict value : dict/float if value is dict, it should have "default" key. """ value = extract_scalar_reward(value) if self.optimize_mode == OptimizeMode.Maximize: value = -value logger.info("Received trial result.") logger.info("value is :" + str(value)) logger.info("parameter is : " + str(parameters)) # parse parameter to sample_x sample_x = [0 for i in range(len(self.key_order))] for key in parameters: idx = self.key_order.index(key) sample_x[idx] = parameters[key] # parse value to sample_y temp_y = [] if sample_x in self.samples_x: idx = self.samples_x.index(sample_x) temp_y = self.samples_y[idx] temp_y.append(value) self.samples_y[idx] = temp_y # calculate y aggregation median = get_median(temp_y) self.samples_y_aggregation[idx] = [median] else: self.samples_x.append(sample_x) self.samples_y.append([value]) # calculate y aggregation self.samples_y_aggregation.append([value])
Tuner receive result from trial. Parameters ---------- parameter_id : int parameters : dict value : dict/float if value is dict, it should have "default" key.
Below is the the instruction that describes the task: ### Input: Tuner receive result from trial. Parameters ---------- parameter_id : int parameters : dict value : dict/float if value is dict, it should have "default" key. ### Response: def receive_trial_result(self, parameter_id, parameters, value): """Tuner receive result from trial. Parameters ---------- parameter_id : int parameters : dict value : dict/float if value is dict, it should have "default" key. """ value = extract_scalar_reward(value) if self.optimize_mode == OptimizeMode.Maximize: value = -value logger.info("Received trial result.") logger.info("value is :" + str(value)) logger.info("parameter is : " + str(parameters)) # parse parameter to sample_x sample_x = [0 for i in range(len(self.key_order))] for key in parameters: idx = self.key_order.index(key) sample_x[idx] = parameters[key] # parse value to sample_y temp_y = [] if sample_x in self.samples_x: idx = self.samples_x.index(sample_x) temp_y = self.samples_y[idx] temp_y.append(value) self.samples_y[idx] = temp_y # calculate y aggregation median = get_median(temp_y) self.samples_y_aggregation[idx] = [median] else: self.samples_x.append(sample_x) self.samples_y.append([value]) # calculate y aggregation self.samples_y_aggregation.append([value])
def get_config(self, config_filename): """ Collection all config file in all available apps, and merge them into ini object :return: ini object """ x = pyini.Ini(lazy=True, basepath=os.path.join(self.project_dir, 'apps')) for p in reversed(self.apps): app_path = get_app_dir(p) filename = os.path.join(app_path, config_filename) if os.path.exists(filename): x.read(filename) x.freeze() return x
Collection all config file in all available apps, and merge them into ini object :return: ini object
Below is the the instruction that describes the task: ### Input: Collection all config file in all available apps, and merge them into ini object :return: ini object ### Response: def get_config(self, config_filename): """ Collection all config file in all available apps, and merge them into ini object :return: ini object """ x = pyini.Ini(lazy=True, basepath=os.path.join(self.project_dir, 'apps')) for p in reversed(self.apps): app_path = get_app_dir(p) filename = os.path.join(app_path, config_filename) if os.path.exists(filename): x.read(filename) x.freeze() return x
def parse_opml_bytes(data: bytes) -> OPML: """Parse an OPML document from a byte-string containing XML data.""" root = parse_xml(BytesIO(data)).getroot() return _parse_opml(root)
Parse an OPML document from a byte-string containing XML data.
Below is the the instruction that describes the task: ### Input: Parse an OPML document from a byte-string containing XML data. ### Response: def parse_opml_bytes(data: bytes) -> OPML: """Parse an OPML document from a byte-string containing XML data.""" root = parse_xml(BytesIO(data)).getroot() return _parse_opml(root)
def transformer_ffn_layer(x, hparams, pad_remover=None, conv_padding="LEFT", nonpadding_mask=None, losses=None, cache=None, decode_loop_step=None, readout_filter_size=0, layer_collection=None): """Feed-forward layer in the transformer. Args: x: a Tensor of shape [batch_size, length, hparams.hidden_size] hparams: hyperparameters for model pad_remover: an expert_utils.PadRemover object tracking the padding positions. If provided, when using convolutional settings, the padding is removed before applying the convolution, and restored afterward. This can give a significant speedup. conv_padding: a string - either "LEFT" or "SAME". nonpadding_mask: an optional Tensor with shape [batch_size, length]. needed for convolutional layers with "SAME" padding. Contains 1.0 in positions corresponding to nonpadding. losses: optional list onto which to append extra training losses cache: dict, containing tensors which are the results of previous attentions, used for fast decoding. decode_loop_step: An integer, step number of the decoding loop. Only used for inference on TPU. readout_filter_size: if it's greater than 0, then it will be used instead of filter_size layer_collection: A tensorflow_kfac.LayerCollection. Only used by the KFAC optimizer. Default is None. Returns: a Tensor of shape [batch_size, length, hparams.hidden_size] Raises: ValueError: If losses arg is None, but layer generates extra losses. """ ffn_layer = hparams.ffn_layer relu_dropout_broadcast_dims = ( common_layers.comma_separated_string_to_integer_list( getattr(hparams, "relu_dropout_broadcast_dims", ""))) if ffn_layer == "conv_hidden_relu": # Backwards compatibility ffn_layer = "dense_relu_dense" if ffn_layer == "dense_relu_dense": # In simple convolution mode, use `pad_remover` to speed up processing. mlperf_log.transformer_print( key=mlperf_log.MODEL_HP_FFN_FILTER_DENSE, value={ "filter_size": hparams.filter_size, "use_bias": "True", "activation": mlperf_log.RELU }) mlperf_log.transformer_print( key=mlperf_log.MODEL_HP_FFN_OUTPUT_DENSE, value={ "hidden_size": hparams.hidden_size, "use_bias": "True", }) mlperf_log.transformer_print( key=mlperf_log.MODEL_HP_RELU_DROPOUT, value=hparams.relu_dropout) if pad_remover: original_shape = common_layers.shape_list(x) # Collapse `x` across examples, and remove padding positions. x = tf.reshape(x, tf.concat([[-1], original_shape[2:]], axis=0)) x = tf.expand_dims(pad_remover.remove(x), axis=0) conv_output = common_layers.dense_relu_dense( x, hparams.filter_size, hparams.hidden_size, dropout=hparams.relu_dropout, dropout_broadcast_dims=relu_dropout_broadcast_dims, layer_collection=layer_collection) if pad_remover: # Restore `conv_output` to the original shape of `x`, including padding. conv_output = tf.reshape( pad_remover.restore(tf.squeeze(conv_output, axis=0)), original_shape) return conv_output elif ffn_layer == "conv_relu_conv": return common_layers.conv_relu_conv( x, readout_filter_size or hparams.filter_size, hparams.hidden_size, first_kernel_size=hparams.conv_first_kernel, second_kernel_size=1, padding=conv_padding, nonpadding_mask=nonpadding_mask, dropout=hparams.relu_dropout, cache=cache, decode_loop_step=decode_loop_step) elif ffn_layer == "parameter_attention": return common_attention.parameter_attention( x, hparams.parameter_attention_key_channels or hparams.hidden_size, hparams.parameter_attention_value_channels or hparams.hidden_size, hparams.hidden_size, readout_filter_size or hparams.filter_size, hparams.num_heads, hparams.attention_dropout) elif ffn_layer == "conv_hidden_relu_with_sepconv": return common_layers.conv_hidden_relu( x, readout_filter_size or hparams.filter_size, hparams.hidden_size, kernel_size=(3, 1), second_kernel_size=(31, 1), padding="LEFT", dropout=hparams.relu_dropout) elif ffn_layer == "sru": return common_layers.sru(x) elif ffn_layer == "local_moe_tpu": overhead = hparams.moe_overhead_eval if hparams.mode == tf.estimator.ModeKeys.TRAIN: overhead = hparams.moe_overhead_train ret, loss = expert_utils.local_moe_tpu( x, hparams.filter_size // 2, hparams.hidden_size, hparams.moe_num_experts, overhead=overhead, loss_coef=hparams.moe_loss_coef) elif ffn_layer == "local_moe": overhead = hparams.moe_overhead_eval if hparams.mode == tf.estimator.ModeKeys.TRAIN: overhead = hparams.moe_overhead_train ret, loss = expert_utils.local_moe( x, True, expert_utils.ffn_expert_fn(hparams.hidden_size, [hparams.filter_size], hparams.hidden_size), hparams.moe_num_experts, k=hparams.moe_k, hparams=hparams) losses.append(loss) return ret else: assert ffn_layer == "none" return x
Feed-forward layer in the transformer. Args: x: a Tensor of shape [batch_size, length, hparams.hidden_size] hparams: hyperparameters for model pad_remover: an expert_utils.PadRemover object tracking the padding positions. If provided, when using convolutional settings, the padding is removed before applying the convolution, and restored afterward. This can give a significant speedup. conv_padding: a string - either "LEFT" or "SAME". nonpadding_mask: an optional Tensor with shape [batch_size, length]. needed for convolutional layers with "SAME" padding. Contains 1.0 in positions corresponding to nonpadding. losses: optional list onto which to append extra training losses cache: dict, containing tensors which are the results of previous attentions, used for fast decoding. decode_loop_step: An integer, step number of the decoding loop. Only used for inference on TPU. readout_filter_size: if it's greater than 0, then it will be used instead of filter_size layer_collection: A tensorflow_kfac.LayerCollection. Only used by the KFAC optimizer. Default is None. Returns: a Tensor of shape [batch_size, length, hparams.hidden_size] Raises: ValueError: If losses arg is None, but layer generates extra losses.
Below is the the instruction that describes the task: ### Input: Feed-forward layer in the transformer. Args: x: a Tensor of shape [batch_size, length, hparams.hidden_size] hparams: hyperparameters for model pad_remover: an expert_utils.PadRemover object tracking the padding positions. If provided, when using convolutional settings, the padding is removed before applying the convolution, and restored afterward. This can give a significant speedup. conv_padding: a string - either "LEFT" or "SAME". nonpadding_mask: an optional Tensor with shape [batch_size, length]. needed for convolutional layers with "SAME" padding. Contains 1.0 in positions corresponding to nonpadding. losses: optional list onto which to append extra training losses cache: dict, containing tensors which are the results of previous attentions, used for fast decoding. decode_loop_step: An integer, step number of the decoding loop. Only used for inference on TPU. readout_filter_size: if it's greater than 0, then it will be used instead of filter_size layer_collection: A tensorflow_kfac.LayerCollection. Only used by the KFAC optimizer. Default is None. Returns: a Tensor of shape [batch_size, length, hparams.hidden_size] Raises: ValueError: If losses arg is None, but layer generates extra losses. ### Response: def transformer_ffn_layer(x, hparams, pad_remover=None, conv_padding="LEFT", nonpadding_mask=None, losses=None, cache=None, decode_loop_step=None, readout_filter_size=0, layer_collection=None): """Feed-forward layer in the transformer. Args: x: a Tensor of shape [batch_size, length, hparams.hidden_size] hparams: hyperparameters for model pad_remover: an expert_utils.PadRemover object tracking the padding positions. If provided, when using convolutional settings, the padding is removed before applying the convolution, and restored afterward. This can give a significant speedup. conv_padding: a string - either "LEFT" or "SAME". nonpadding_mask: an optional Tensor with shape [batch_size, length]. needed for convolutional layers with "SAME" padding. Contains 1.0 in positions corresponding to nonpadding. losses: optional list onto which to append extra training losses cache: dict, containing tensors which are the results of previous attentions, used for fast decoding. decode_loop_step: An integer, step number of the decoding loop. Only used for inference on TPU. readout_filter_size: if it's greater than 0, then it will be used instead of filter_size layer_collection: A tensorflow_kfac.LayerCollection. Only used by the KFAC optimizer. Default is None. Returns: a Tensor of shape [batch_size, length, hparams.hidden_size] Raises: ValueError: If losses arg is None, but layer generates extra losses. """ ffn_layer = hparams.ffn_layer relu_dropout_broadcast_dims = ( common_layers.comma_separated_string_to_integer_list( getattr(hparams, "relu_dropout_broadcast_dims", ""))) if ffn_layer == "conv_hidden_relu": # Backwards compatibility ffn_layer = "dense_relu_dense" if ffn_layer == "dense_relu_dense": # In simple convolution mode, use `pad_remover` to speed up processing. mlperf_log.transformer_print( key=mlperf_log.MODEL_HP_FFN_FILTER_DENSE, value={ "filter_size": hparams.filter_size, "use_bias": "True", "activation": mlperf_log.RELU }) mlperf_log.transformer_print( key=mlperf_log.MODEL_HP_FFN_OUTPUT_DENSE, value={ "hidden_size": hparams.hidden_size, "use_bias": "True", }) mlperf_log.transformer_print( key=mlperf_log.MODEL_HP_RELU_DROPOUT, value=hparams.relu_dropout) if pad_remover: original_shape = common_layers.shape_list(x) # Collapse `x` across examples, and remove padding positions. x = tf.reshape(x, tf.concat([[-1], original_shape[2:]], axis=0)) x = tf.expand_dims(pad_remover.remove(x), axis=0) conv_output = common_layers.dense_relu_dense( x, hparams.filter_size, hparams.hidden_size, dropout=hparams.relu_dropout, dropout_broadcast_dims=relu_dropout_broadcast_dims, layer_collection=layer_collection) if pad_remover: # Restore `conv_output` to the original shape of `x`, including padding. conv_output = tf.reshape( pad_remover.restore(tf.squeeze(conv_output, axis=0)), original_shape) return conv_output elif ffn_layer == "conv_relu_conv": return common_layers.conv_relu_conv( x, readout_filter_size or hparams.filter_size, hparams.hidden_size, first_kernel_size=hparams.conv_first_kernel, second_kernel_size=1, padding=conv_padding, nonpadding_mask=nonpadding_mask, dropout=hparams.relu_dropout, cache=cache, decode_loop_step=decode_loop_step) elif ffn_layer == "parameter_attention": return common_attention.parameter_attention( x, hparams.parameter_attention_key_channels or hparams.hidden_size, hparams.parameter_attention_value_channels or hparams.hidden_size, hparams.hidden_size, readout_filter_size or hparams.filter_size, hparams.num_heads, hparams.attention_dropout) elif ffn_layer == "conv_hidden_relu_with_sepconv": return common_layers.conv_hidden_relu( x, readout_filter_size or hparams.filter_size, hparams.hidden_size, kernel_size=(3, 1), second_kernel_size=(31, 1), padding="LEFT", dropout=hparams.relu_dropout) elif ffn_layer == "sru": return common_layers.sru(x) elif ffn_layer == "local_moe_tpu": overhead = hparams.moe_overhead_eval if hparams.mode == tf.estimator.ModeKeys.TRAIN: overhead = hparams.moe_overhead_train ret, loss = expert_utils.local_moe_tpu( x, hparams.filter_size // 2, hparams.hidden_size, hparams.moe_num_experts, overhead=overhead, loss_coef=hparams.moe_loss_coef) elif ffn_layer == "local_moe": overhead = hparams.moe_overhead_eval if hparams.mode == tf.estimator.ModeKeys.TRAIN: overhead = hparams.moe_overhead_train ret, loss = expert_utils.local_moe( x, True, expert_utils.ffn_expert_fn(hparams.hidden_size, [hparams.filter_size], hparams.hidden_size), hparams.moe_num_experts, k=hparams.moe_k, hparams=hparams) losses.append(loss) return ret else: assert ffn_layer == "none" return x
def is_changed(self, start, end): """Tell whether any of start till end lines have changed The end points are inclusive and indices start from 1. """ left, right = self._get_changed(start, end) if left < right: return True return False
Tell whether any of start till end lines have changed The end points are inclusive and indices start from 1.
Below is the the instruction that describes the task: ### Input: Tell whether any of start till end lines have changed The end points are inclusive and indices start from 1. ### Response: def is_changed(self, start, end): """Tell whether any of start till end lines have changed The end points are inclusive and indices start from 1. """ left, right = self._get_changed(start, end) if left < right: return True return False
def get_best_fit_parameters_translated_grouped(self): """Returns the parameters as a dictionary of the 'real units' for the best fit.""" result_dict = dict() result_dict['ocv'] = [parameters['ocv'] for parameters in self.best_fit_parameters_translated] result_dict['ir'] = [parameters['ir'] for parameters in self.best_fit_parameters_translated] for i in range(self.circuits): result_dict['r' + str(i)] = [parameters['r' + str(i)] for parameters in self.best_fit_parameters_translated] result_dict['c' + str(i)] = [parameters['c' + str(i)] for parameters in self.best_fit_parameters_translated] return result_dict
Returns the parameters as a dictionary of the 'real units' for the best fit.
Below is the the instruction that describes the task: ### Input: Returns the parameters as a dictionary of the 'real units' for the best fit. ### Response: def get_best_fit_parameters_translated_grouped(self): """Returns the parameters as a dictionary of the 'real units' for the best fit.""" result_dict = dict() result_dict['ocv'] = [parameters['ocv'] for parameters in self.best_fit_parameters_translated] result_dict['ir'] = [parameters['ir'] for parameters in self.best_fit_parameters_translated] for i in range(self.circuits): result_dict['r' + str(i)] = [parameters['r' + str(i)] for parameters in self.best_fit_parameters_translated] result_dict['c' + str(i)] = [parameters['c' + str(i)] for parameters in self.best_fit_parameters_translated] return result_dict
def to_dict(self): """Convert this object into a dictionary. Returns: dict: A dict with the same information as this object. """ out_dict = {} out_dict['commands'] = self.commands out_dict['configs'] = self.configs out_dict['short_name'] = self.name out_dict['versions'] = { 'module': self.module_version, 'api': self.api_version } return out_dict
Convert this object into a dictionary. Returns: dict: A dict with the same information as this object.
Below is the the instruction that describes the task: ### Input: Convert this object into a dictionary. Returns: dict: A dict with the same information as this object. ### Response: def to_dict(self): """Convert this object into a dictionary. Returns: dict: A dict with the same information as this object. """ out_dict = {} out_dict['commands'] = self.commands out_dict['configs'] = self.configs out_dict['short_name'] = self.name out_dict['versions'] = { 'module': self.module_version, 'api': self.api_version } return out_dict
def powerset(iterable): """Calculate the powerset of any iterable. For a range of integers up to the length of the given list, make all possible combinations and chain them together as one object. From https://docs.python.org/3/library/itertools.html#itertools-recipes """ "list(powerset([1,2,3])) --> [(), (1,), (2,), (3,), (1,2), (1,3), (2,3), (1,2,3)]" s = list(iterable) return chain.from_iterable(combinations(s, r) for r in range(len(s) + 1))
Calculate the powerset of any iterable. For a range of integers up to the length of the given list, make all possible combinations and chain them together as one object. From https://docs.python.org/3/library/itertools.html#itertools-recipes
Below is the the instruction that describes the task: ### Input: Calculate the powerset of any iterable. For a range of integers up to the length of the given list, make all possible combinations and chain them together as one object. From https://docs.python.org/3/library/itertools.html#itertools-recipes ### Response: def powerset(iterable): """Calculate the powerset of any iterable. For a range of integers up to the length of the given list, make all possible combinations and chain them together as one object. From https://docs.python.org/3/library/itertools.html#itertools-recipes """ "list(powerset([1,2,3])) --> [(), (1,), (2,), (3,), (1,2), (1,3), (2,3), (1,2,3)]" s = list(iterable) return chain.from_iterable(combinations(s, r) for r in range(len(s) + 1))
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types): """ See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values. """ # extracting dictionary of coefficients specific to required # intensity measure type. C = self.COEFFS[imt] imean = (self._compute_magnitude(rup, C) + self._compute_distance(rup, dists, C) + self._get_site_amplification(sites, C) + self._compute_forearc_backarc_term(C, sites, dists, rup)) istddevs = self._get_stddevs(C, stddev_types, num_sites=len(sites.vs30)) # Convert units to g, # but only for PGA and SA (not PGV): if imt.name in "SA PGA": mean = np.log((10.0 ** (imean - 2.0)) / g) else: # PGV: mean = np.log(10.0 ** imean) # Return stddevs in terms of natural log scaling stddevs = np.log(10.0 ** np.array(istddevs)) # mean_LogNaturale = np.log((10 ** mean) * 1e-2 / g) return mean, stddevs
See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values.
Below is the the instruction that describes the task: ### Input: See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values. ### Response: def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types): """ See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values. """ # extracting dictionary of coefficients specific to required # intensity measure type. C = self.COEFFS[imt] imean = (self._compute_magnitude(rup, C) + self._compute_distance(rup, dists, C) + self._get_site_amplification(sites, C) + self._compute_forearc_backarc_term(C, sites, dists, rup)) istddevs = self._get_stddevs(C, stddev_types, num_sites=len(sites.vs30)) # Convert units to g, # but only for PGA and SA (not PGV): if imt.name in "SA PGA": mean = np.log((10.0 ** (imean - 2.0)) / g) else: # PGV: mean = np.log(10.0 ** imean) # Return stddevs in terms of natural log scaling stddevs = np.log(10.0 ** np.array(istddevs)) # mean_LogNaturale = np.log((10 ** mean) * 1e-2 / g) return mean, stddevs
def update_state_changes(self, state_changes_data: List[Tuple[str, int]]) -> None: """Given a list of identifier/data state tuples update them in the DB""" cursor = self.conn.cursor() cursor.executemany( 'UPDATE state_changes SET data=? WHERE identifier=?', state_changes_data, ) self.maybe_commit()
Given a list of identifier/data state tuples update them in the DB
Below is the the instruction that describes the task: ### Input: Given a list of identifier/data state tuples update them in the DB ### Response: def update_state_changes(self, state_changes_data: List[Tuple[str, int]]) -> None: """Given a list of identifier/data state tuples update them in the DB""" cursor = self.conn.cursor() cursor.executemany( 'UPDATE state_changes SET data=? WHERE identifier=?', state_changes_data, ) self.maybe_commit()
def get_form_errors(form): """ Django form errors do not obey natural field order, this template tag returns non-field and field-specific errors :param form: the form instance """ return { 'non_field': form.non_field_errors(), 'field_specific': OrderedDict( (field, form.errors[field.name]) for field in form if field.name in form.errors ) }
Django form errors do not obey natural field order, this template tag returns non-field and field-specific errors :param form: the form instance
Below is the the instruction that describes the task: ### Input: Django form errors do not obey natural field order, this template tag returns non-field and field-specific errors :param form: the form instance ### Response: def get_form_errors(form): """ Django form errors do not obey natural field order, this template tag returns non-field and field-specific errors :param form: the form instance """ return { 'non_field': form.non_field_errors(), 'field_specific': OrderedDict( (field, form.errors[field.name]) for field in form if field.name in form.errors ) }
def target_query(plugin, port, location): """ prepared ReQL for target """ return ((r.row[PLUGIN_NAME_KEY] == plugin) & (r.row[PORT_FIELD] == port) & (r.row[LOCATION_FIELD] == location))
prepared ReQL for target
Below is the the instruction that describes the task: ### Input: prepared ReQL for target ### Response: def target_query(plugin, port, location): """ prepared ReQL for target """ return ((r.row[PLUGIN_NAME_KEY] == plugin) & (r.row[PORT_FIELD] == port) & (r.row[LOCATION_FIELD] == location))
def _Open(self, path_spec=None, mode='rb'): """Opens the file-like object defined by path specification. Args: path_spec (PathSpec): path specification. mode (Optional[str]): file access mode. Raises: AccessError: if the access to open the file was denied. IOError: if the file-like object could not be opened. OSError: if the file-like object could not be opened. PathSpecError: if the path specification is incorrect. ValueError: if the path specification is invalid. """ if not path_spec: raise ValueError('Missing path specification.') if path_spec.HasParent(): raise errors.PathSpecError('Unsupported path specification with parent.') location = getattr(path_spec, 'location', None) if location is None: raise errors.PathSpecError('Path specification missing location.') self._current_offset = 0 self._size = len(self._file_data)
Opens the file-like object defined by path specification. Args: path_spec (PathSpec): path specification. mode (Optional[str]): file access mode. Raises: AccessError: if the access to open the file was denied. IOError: if the file-like object could not be opened. OSError: if the file-like object could not be opened. PathSpecError: if the path specification is incorrect. ValueError: if the path specification is invalid.
Below is the the instruction that describes the task: ### Input: Opens the file-like object defined by path specification. Args: path_spec (PathSpec): path specification. mode (Optional[str]): file access mode. Raises: AccessError: if the access to open the file was denied. IOError: if the file-like object could not be opened. OSError: if the file-like object could not be opened. PathSpecError: if the path specification is incorrect. ValueError: if the path specification is invalid. ### Response: def _Open(self, path_spec=None, mode='rb'): """Opens the file-like object defined by path specification. Args: path_spec (PathSpec): path specification. mode (Optional[str]): file access mode. Raises: AccessError: if the access to open the file was denied. IOError: if the file-like object could not be opened. OSError: if the file-like object could not be opened. PathSpecError: if the path specification is incorrect. ValueError: if the path specification is invalid. """ if not path_spec: raise ValueError('Missing path specification.') if path_spec.HasParent(): raise errors.PathSpecError('Unsupported path specification with parent.') location = getattr(path_spec, 'location', None) if location is None: raise errors.PathSpecError('Path specification missing location.') self._current_offset = 0 self._size = len(self._file_data)
def _auto_positive_symbol(tokens, local_dict, global_dict): """ Inserts calls to ``Symbol`` for undefined variables. Passes in positive=True as a keyword argument. Adapted from sympy.sympy.parsing.sympy_parser.auto_symbol """ result = [] tokens.append((None, None)) # so zip traverses all tokens for tok, nextTok in zip(tokens, tokens[1:]): tokNum, tokVal = tok nextTokNum, nextTokVal = nextTok if tokNum == token.NAME: name = tokVal if name in global_dict: obj = global_dict[name] if isinstance(obj, (Basic, type)) or callable(obj): result.append((token.NAME, name)) continue # try to resolve known alternative unit name try: used_name = inv_name_alternatives[str(name)] except KeyError: # if we don't know this name it's a user-defined unit name # so we should create a new symbol for it used_name = str(name) result.extend( [ (token.NAME, "Symbol"), (token.OP, "("), (token.NAME, repr(used_name)), (token.OP, ","), (token.NAME, "positive"), (token.OP, "="), (token.NAME, "True"), (token.OP, ")"), ] ) else: result.append((tokNum, tokVal)) return result
Inserts calls to ``Symbol`` for undefined variables. Passes in positive=True as a keyword argument. Adapted from sympy.sympy.parsing.sympy_parser.auto_symbol
Below is the the instruction that describes the task: ### Input: Inserts calls to ``Symbol`` for undefined variables. Passes in positive=True as a keyword argument. Adapted from sympy.sympy.parsing.sympy_parser.auto_symbol ### Response: def _auto_positive_symbol(tokens, local_dict, global_dict): """ Inserts calls to ``Symbol`` for undefined variables. Passes in positive=True as a keyword argument. Adapted from sympy.sympy.parsing.sympy_parser.auto_symbol """ result = [] tokens.append((None, None)) # so zip traverses all tokens for tok, nextTok in zip(tokens, tokens[1:]): tokNum, tokVal = tok nextTokNum, nextTokVal = nextTok if tokNum == token.NAME: name = tokVal if name in global_dict: obj = global_dict[name] if isinstance(obj, (Basic, type)) or callable(obj): result.append((token.NAME, name)) continue # try to resolve known alternative unit name try: used_name = inv_name_alternatives[str(name)] except KeyError: # if we don't know this name it's a user-defined unit name # so we should create a new symbol for it used_name = str(name) result.extend( [ (token.NAME, "Symbol"), (token.OP, "("), (token.NAME, repr(used_name)), (token.OP, ","), (token.NAME, "positive"), (token.OP, "="), (token.NAME, "True"), (token.OP, ")"), ] ) else: result.append((tokNum, tokVal)) return result
def normalize_to_range( values, minimum = 0.0, maximum = 1.0 ): """ This function normalizes values of a list to a specified range and returns the original object if the values are not of the types integer or float. """ normalized_values = [] minimum_value = min(values) maximum_value = max(values) for value in values: numerator = value - minimum_value denominator = maximum_value - minimum_value value_normalized = (maximum - minimum) * numerator/denominator + minimum normalized_values.append(value_normalized) return normalized_values
This function normalizes values of a list to a specified range and returns the original object if the values are not of the types integer or float.
Below is the the instruction that describes the task: ### Input: This function normalizes values of a list to a specified range and returns the original object if the values are not of the types integer or float. ### Response: def normalize_to_range( values, minimum = 0.0, maximum = 1.0 ): """ This function normalizes values of a list to a specified range and returns the original object if the values are not of the types integer or float. """ normalized_values = [] minimum_value = min(values) maximum_value = max(values) for value in values: numerator = value - minimum_value denominator = maximum_value - minimum_value value_normalized = (maximum - minimum) * numerator/denominator + minimum normalized_values.append(value_normalized) return normalized_values
def _parse(self): '''parse is the base function for parsing the Dockerfile, and extracting elements into the correct data structures. Everything is parsed into lists or dictionaries that can be assembled again on demand. Environment: Since Docker also exports environment as we go, we add environment to the environment section and install Labels: include anything that is a LABEL, ARG, or (deprecated) maintainer. Add/Copy: are treated the same ''' parser = None previous = None for line in self.lines: parser = self._get_mapping(line, parser, previous) # Parse it, if appropriate if parser: parser(line) previous = line
parse is the base function for parsing the Dockerfile, and extracting elements into the correct data structures. Everything is parsed into lists or dictionaries that can be assembled again on demand. Environment: Since Docker also exports environment as we go, we add environment to the environment section and install Labels: include anything that is a LABEL, ARG, or (deprecated) maintainer. Add/Copy: are treated the same
Below is the the instruction that describes the task: ### Input: parse is the base function for parsing the Dockerfile, and extracting elements into the correct data structures. Everything is parsed into lists or dictionaries that can be assembled again on demand. Environment: Since Docker also exports environment as we go, we add environment to the environment section and install Labels: include anything that is a LABEL, ARG, or (deprecated) maintainer. Add/Copy: are treated the same ### Response: def _parse(self): '''parse is the base function for parsing the Dockerfile, and extracting elements into the correct data structures. Everything is parsed into lists or dictionaries that can be assembled again on demand. Environment: Since Docker also exports environment as we go, we add environment to the environment section and install Labels: include anything that is a LABEL, ARG, or (deprecated) maintainer. Add/Copy: are treated the same ''' parser = None previous = None for line in self.lines: parser = self._get_mapping(line, parser, previous) # Parse it, if appropriate if parser: parser(line) previous = line
def share_file(comm, path): """ Copies the file from rank 0 to all other ranks Puts it in the same place on all machines """ localrank, _ = get_local_rank_size(comm) if comm.Get_rank() == 0: with open(path, 'rb') as fh: data = fh.read() comm.bcast(data) else: data = comm.bcast(None) if localrank == 0: os.makedirs(os.path.dirname(path), exist_ok=True) with open(path, 'wb') as fh: fh.write(data) comm.Barrier()
Copies the file from rank 0 to all other ranks Puts it in the same place on all machines
Below is the the instruction that describes the task: ### Input: Copies the file from rank 0 to all other ranks Puts it in the same place on all machines ### Response: def share_file(comm, path): """ Copies the file from rank 0 to all other ranks Puts it in the same place on all machines """ localrank, _ = get_local_rank_size(comm) if comm.Get_rank() == 0: with open(path, 'rb') as fh: data = fh.read() comm.bcast(data) else: data = comm.bcast(None) if localrank == 0: os.makedirs(os.path.dirname(path), exist_ok=True) with open(path, 'wb') as fh: fh.write(data) comm.Barrier()
def update_model_cache(table_name): """ Updates model cache by generating a new key for the model """ model_cache_info = ModelCacheInfo(table_name, uuid.uuid4().hex) model_cache_backend.share_model_cache_info(model_cache_info)
Updates model cache by generating a new key for the model
Below is the the instruction that describes the task: ### Input: Updates model cache by generating a new key for the model ### Response: def update_model_cache(table_name): """ Updates model cache by generating a new key for the model """ model_cache_info = ModelCacheInfo(table_name, uuid.uuid4().hex) model_cache_backend.share_model_cache_info(model_cache_info)
def plot(self, series, series_diff=None, label='', color=None, style=None): ''' :param pandas.Series series: The series to be plotted, all values must be positive if stacked is True. :param pandas.Series series_diff: The series representing the diff that will be plotted in the bottom part. :param string label: The label for the series. :param integer/string color: Color for the plot. Can be an index for the color from COLORS or a key(string) from CNAMES. :param string style: Style forwarded to the plt.plot. ''' color = self.get_color(color) if series_diff is None and self.autodiffs: series_diff = series.diff() if self.stacked: series += self.running_sum self.ax1.fill_between(series.index, self.running_sum, series, facecolor=ALPHAS[color]) self.running_sum = series self.ax1.set_ylim(bottom=0, top=int(series.max() * 1.05)) series.plot(label=label, c=COLORS[color], linewidth=2, style=style, ax=self.ax1) if series_diff is not None: series_diff.plot(label=label, c=COLORS[color], linewidth=2, style=style, ax=self.ax2)
:param pandas.Series series: The series to be plotted, all values must be positive if stacked is True. :param pandas.Series series_diff: The series representing the diff that will be plotted in the bottom part. :param string label: The label for the series. :param integer/string color: Color for the plot. Can be an index for the color from COLORS or a key(string) from CNAMES. :param string style: Style forwarded to the plt.plot.
Below is the the instruction that describes the task: ### Input: :param pandas.Series series: The series to be plotted, all values must be positive if stacked is True. :param pandas.Series series_diff: The series representing the diff that will be plotted in the bottom part. :param string label: The label for the series. :param integer/string color: Color for the plot. Can be an index for the color from COLORS or a key(string) from CNAMES. :param string style: Style forwarded to the plt.plot. ### Response: def plot(self, series, series_diff=None, label='', color=None, style=None): ''' :param pandas.Series series: The series to be plotted, all values must be positive if stacked is True. :param pandas.Series series_diff: The series representing the diff that will be plotted in the bottom part. :param string label: The label for the series. :param integer/string color: Color for the plot. Can be an index for the color from COLORS or a key(string) from CNAMES. :param string style: Style forwarded to the plt.plot. ''' color = self.get_color(color) if series_diff is None and self.autodiffs: series_diff = series.diff() if self.stacked: series += self.running_sum self.ax1.fill_between(series.index, self.running_sum, series, facecolor=ALPHAS[color]) self.running_sum = series self.ax1.set_ylim(bottom=0, top=int(series.max() * 1.05)) series.plot(label=label, c=COLORS[color], linewidth=2, style=style, ax=self.ax1) if series_diff is not None: series_diff.plot(label=label, c=COLORS[color], linewidth=2, style=style, ax=self.ax2)
def apply_cmap(zs, cmap, vmin=None, vmax=None): ''' apply_cmap(z, cmap) applies the given cmap to the values in z; if vmin and/or vmad are passed, they are used to scale z. ''' if vmin is None: vmin = np.min(zs) if vmax is None: vmax = np.max(zs) if pimms.is_str(cmap): cmap = matplotlib.cm.get_cmap(cmap) return cmap((zs - vmin) / (vmax - vmin))
apply_cmap(z, cmap) applies the given cmap to the values in z; if vmin and/or vmad are passed, they are used to scale z.
Below is the the instruction that describes the task: ### Input: apply_cmap(z, cmap) applies the given cmap to the values in z; if vmin and/or vmad are passed, they are used to scale z. ### Response: def apply_cmap(zs, cmap, vmin=None, vmax=None): ''' apply_cmap(z, cmap) applies the given cmap to the values in z; if vmin and/or vmad are passed, they are used to scale z. ''' if vmin is None: vmin = np.min(zs) if vmax is None: vmax = np.max(zs) if pimms.is_str(cmap): cmap = matplotlib.cm.get_cmap(cmap) return cmap((zs - vmin) / (vmax - vmin))
def add_log_file(logger, log_file, global_log_file=False): """ Add a log file to this logger. If global_log_file is true, log_file will be handed the root logger, otherwise it will only be used by this particular logger. Parameters ---------- logger :obj:`logging.Logger` The logger. log_file :obj:`str` The path to the log file to log to. global_log_file :obj:`bool` Whether or not to use the given log_file for this particular logger or for the root logger. """ if global_log_file: add_root_log_file(log_file) else: hdlr = logging.FileHandler(log_file) formatter = logging.Formatter('%(asctime)s %(name)-10s %(levelname)-8s %(message)s', datefmt='%m-%d %H:%M:%S') hdlr.setFormatter(formatter) logger.addHandler(hdlr)
Add a log file to this logger. If global_log_file is true, log_file will be handed the root logger, otherwise it will only be used by this particular logger. Parameters ---------- logger :obj:`logging.Logger` The logger. log_file :obj:`str` The path to the log file to log to. global_log_file :obj:`bool` Whether or not to use the given log_file for this particular logger or for the root logger.
Below is the the instruction that describes the task: ### Input: Add a log file to this logger. If global_log_file is true, log_file will be handed the root logger, otherwise it will only be used by this particular logger. Parameters ---------- logger :obj:`logging.Logger` The logger. log_file :obj:`str` The path to the log file to log to. global_log_file :obj:`bool` Whether or not to use the given log_file for this particular logger or for the root logger. ### Response: def add_log_file(logger, log_file, global_log_file=False): """ Add a log file to this logger. If global_log_file is true, log_file will be handed the root logger, otherwise it will only be used by this particular logger. Parameters ---------- logger :obj:`logging.Logger` The logger. log_file :obj:`str` The path to the log file to log to. global_log_file :obj:`bool` Whether or not to use the given log_file for this particular logger or for the root logger. """ if global_log_file: add_root_log_file(log_file) else: hdlr = logging.FileHandler(log_file) formatter = logging.Formatter('%(asctime)s %(name)-10s %(levelname)-8s %(message)s', datefmt='%m-%d %H:%M:%S') hdlr.setFormatter(formatter) logger.addHandler(hdlr)
def run_hooks(self, name, event=None, context=None): """ Runs plugin hooks for each registered plugin. """ hooks = { "pre:setup": lambda p: p.pre_setup(self), "post:setup": lambda p: p.post_setup(self), "pre:invoke": lambda p: p.pre_invoke(event, context), "post:invoke": lambda p: p.post_invoke(event, context), "pre:report": lambda p: p.pre_report(self.report), "post:report": lambda p: p.post_report(self.report), } if name in hooks: for p in self.plugins: if p.enabled: try: hooks[name](p) except Exception as e: logger.error( "IOpipe plugin %s hook raised error" % (name, str(e)) ) logger.exception(e)
Runs plugin hooks for each registered plugin.
Below is the the instruction that describes the task: ### Input: Runs plugin hooks for each registered plugin. ### Response: def run_hooks(self, name, event=None, context=None): """ Runs plugin hooks for each registered plugin. """ hooks = { "pre:setup": lambda p: p.pre_setup(self), "post:setup": lambda p: p.post_setup(self), "pre:invoke": lambda p: p.pre_invoke(event, context), "post:invoke": lambda p: p.post_invoke(event, context), "pre:report": lambda p: p.pre_report(self.report), "post:report": lambda p: p.post_report(self.report), } if name in hooks: for p in self.plugins: if p.enabled: try: hooks[name](p) except Exception as e: logger.error( "IOpipe plugin %s hook raised error" % (name, str(e)) ) logger.exception(e)
def get_next_rngruns(self): """ Yield the next RngRun values that can be used in this campaign. """ available_runs = [result['params']['RngRun'] for result in self.get_results()] yield from DatabaseManager.get_next_values(available_runs)
Yield the next RngRun values that can be used in this campaign.
Below is the the instruction that describes the task: ### Input: Yield the next RngRun values that can be used in this campaign. ### Response: def get_next_rngruns(self): """ Yield the next RngRun values that can be used in this campaign. """ available_runs = [result['params']['RngRun'] for result in self.get_results()] yield from DatabaseManager.get_next_values(available_runs)
def _set_vlan_and_bd(self, v, load=False): """ Setter method for vlan_and_bd, mapped from YANG variable /overlay_gateway/map/vlan_and_bd (container) If this variable is read-only (config: false) in the source YANG file, then _set_vlan_and_bd is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_vlan_and_bd() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=vlan_and_bd.vlan_and_bd, is_container='container', presence=False, yang_name="vlan-and-bd", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Specify VLAN/BD to VNI mappings for the Overlay Gateway.', u'cli-drop-node-name': None, u'callpoint': u'autoVlanToVNIMappingCallPoint', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-tunnels', defining_module='brocade-tunnels', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """vlan_and_bd must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=vlan_and_bd.vlan_and_bd, is_container='container', presence=False, yang_name="vlan-and-bd", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Specify VLAN/BD to VNI mappings for the Overlay Gateway.', u'cli-drop-node-name': None, u'callpoint': u'autoVlanToVNIMappingCallPoint', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-tunnels', defining_module='brocade-tunnels', yang_type='container', is_config=True)""", }) self.__vlan_and_bd = t if hasattr(self, '_set'): self._set()
Setter method for vlan_and_bd, mapped from YANG variable /overlay_gateway/map/vlan_and_bd (container) If this variable is read-only (config: false) in the source YANG file, then _set_vlan_and_bd is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_vlan_and_bd() directly.
Below is the the instruction that describes the task: ### Input: Setter method for vlan_and_bd, mapped from YANG variable /overlay_gateway/map/vlan_and_bd (container) If this variable is read-only (config: false) in the source YANG file, then _set_vlan_and_bd is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_vlan_and_bd() directly. ### Response: def _set_vlan_and_bd(self, v, load=False): """ Setter method for vlan_and_bd, mapped from YANG variable /overlay_gateway/map/vlan_and_bd (container) If this variable is read-only (config: false) in the source YANG file, then _set_vlan_and_bd is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_vlan_and_bd() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=vlan_and_bd.vlan_and_bd, is_container='container', presence=False, yang_name="vlan-and-bd", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Specify VLAN/BD to VNI mappings for the Overlay Gateway.', u'cli-drop-node-name': None, u'callpoint': u'autoVlanToVNIMappingCallPoint', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-tunnels', defining_module='brocade-tunnels', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """vlan_and_bd must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=vlan_and_bd.vlan_and_bd, is_container='container', presence=False, yang_name="vlan-and-bd", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Specify VLAN/BD to VNI mappings for the Overlay Gateway.', u'cli-drop-node-name': None, u'callpoint': u'autoVlanToVNIMappingCallPoint', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-tunnels', defining_module='brocade-tunnels', yang_type='container', is_config=True)""", }) self.__vlan_and_bd = t if hasattr(self, '_set'): self._set()
def _cleanRecursive(self, subSelf): """ Delete all NestedOrderedDict that haven't any entries. """ for key, item in list(subSelf.items()): if self.isNestedDict(item): if not item: subSelf.pop(key) else: self._cleanRecursive(item)
Delete all NestedOrderedDict that haven't any entries.
Below is the the instruction that describes the task: ### Input: Delete all NestedOrderedDict that haven't any entries. ### Response: def _cleanRecursive(self, subSelf): """ Delete all NestedOrderedDict that haven't any entries. """ for key, item in list(subSelf.items()): if self.isNestedDict(item): if not item: subSelf.pop(key) else: self._cleanRecursive(item)
def resize_pty(self, width=80, height=24): """ Resize the pseudo-terminal. This can be used to change the width and height of the terminal emulation created in a previous L{get_pty} call. @param width: new width (in characters) of the terminal screen @type width: int @param height: new height (in characters) of the terminal screen @type height: int @raise SSHException: if the request was rejected or the channel was closed """ if self.closed or self.eof_received or self.eof_sent or not self.active: raise SSHException('Channel is not open') m = Message() m.add_byte(chr(MSG_CHANNEL_REQUEST)) m.add_int(self.remote_chanid) m.add_string('window-change') m.add_boolean(True) m.add_int(width) m.add_int(height) m.add_int(0).add_int(0) self._event_pending() self.transport._send_user_message(m) self._wait_for_event()
Resize the pseudo-terminal. This can be used to change the width and height of the terminal emulation created in a previous L{get_pty} call. @param width: new width (in characters) of the terminal screen @type width: int @param height: new height (in characters) of the terminal screen @type height: int @raise SSHException: if the request was rejected or the channel was closed
Below is the the instruction that describes the task: ### Input: Resize the pseudo-terminal. This can be used to change the width and height of the terminal emulation created in a previous L{get_pty} call. @param width: new width (in characters) of the terminal screen @type width: int @param height: new height (in characters) of the terminal screen @type height: int @raise SSHException: if the request was rejected or the channel was closed ### Response: def resize_pty(self, width=80, height=24): """ Resize the pseudo-terminal. This can be used to change the width and height of the terminal emulation created in a previous L{get_pty} call. @param width: new width (in characters) of the terminal screen @type width: int @param height: new height (in characters) of the terminal screen @type height: int @raise SSHException: if the request was rejected or the channel was closed """ if self.closed or self.eof_received or self.eof_sent or not self.active: raise SSHException('Channel is not open') m = Message() m.add_byte(chr(MSG_CHANNEL_REQUEST)) m.add_int(self.remote_chanid) m.add_string('window-change') m.add_boolean(True) m.add_int(width) m.add_int(height) m.add_int(0).add_int(0) self._event_pending() self.transport._send_user_message(m) self._wait_for_event()
def region_est_hull(self, level=0.95, modelparam_slice=None): """ Estimates a credible region over models by taking the convex hull of a credible subset of particles. :param float level: The desired crediblity level (see :meth:`SMCUpdater.est_credible_region`). :param slice modelparam_slice: Slice over which model parameters to consider. :return: The tuple ``(faces, vertices)`` where ``faces`` describes all the vertices of all of the faces on the exterior of the convex hull, and ``vertices`` is a list of all vertices on the exterior of the convex hull. :rtype: ``faces`` is a ``numpy.ndarray`` with shape ``(n_face, n_mps, n_mps)`` and indeces ``(idx_face, idx_vertex, idx_mps)`` where ``n_mps`` corresponds to the size of ``modelparam_slice``. ``vertices`` is an ``numpy.ndarray`` of shape ``(n_vertices, n_mps)``. """ points = self.est_credible_region( level=level, modelparam_slice=modelparam_slice ) hull = ConvexHull(points) return points[hull.simplices], points[u.uniquify(hull.vertices.flatten())]
Estimates a credible region over models by taking the convex hull of a credible subset of particles. :param float level: The desired crediblity level (see :meth:`SMCUpdater.est_credible_region`). :param slice modelparam_slice: Slice over which model parameters to consider. :return: The tuple ``(faces, vertices)`` where ``faces`` describes all the vertices of all of the faces on the exterior of the convex hull, and ``vertices`` is a list of all vertices on the exterior of the convex hull. :rtype: ``faces`` is a ``numpy.ndarray`` with shape ``(n_face, n_mps, n_mps)`` and indeces ``(idx_face, idx_vertex, idx_mps)`` where ``n_mps`` corresponds to the size of ``modelparam_slice``. ``vertices`` is an ``numpy.ndarray`` of shape ``(n_vertices, n_mps)``.
Below is the the instruction that describes the task: ### Input: Estimates a credible region over models by taking the convex hull of a credible subset of particles. :param float level: The desired crediblity level (see :meth:`SMCUpdater.est_credible_region`). :param slice modelparam_slice: Slice over which model parameters to consider. :return: The tuple ``(faces, vertices)`` where ``faces`` describes all the vertices of all of the faces on the exterior of the convex hull, and ``vertices`` is a list of all vertices on the exterior of the convex hull. :rtype: ``faces`` is a ``numpy.ndarray`` with shape ``(n_face, n_mps, n_mps)`` and indeces ``(idx_face, idx_vertex, idx_mps)`` where ``n_mps`` corresponds to the size of ``modelparam_slice``. ``vertices`` is an ``numpy.ndarray`` of shape ``(n_vertices, n_mps)``. ### Response: def region_est_hull(self, level=0.95, modelparam_slice=None): """ Estimates a credible region over models by taking the convex hull of a credible subset of particles. :param float level: The desired crediblity level (see :meth:`SMCUpdater.est_credible_region`). :param slice modelparam_slice: Slice over which model parameters to consider. :return: The tuple ``(faces, vertices)`` where ``faces`` describes all the vertices of all of the faces on the exterior of the convex hull, and ``vertices`` is a list of all vertices on the exterior of the convex hull. :rtype: ``faces`` is a ``numpy.ndarray`` with shape ``(n_face, n_mps, n_mps)`` and indeces ``(idx_face, idx_vertex, idx_mps)`` where ``n_mps`` corresponds to the size of ``modelparam_slice``. ``vertices`` is an ``numpy.ndarray`` of shape ``(n_vertices, n_mps)``. """ points = self.est_credible_region( level=level, modelparam_slice=modelparam_slice ) hull = ConvexHull(points) return points[hull.simplices], points[u.uniquify(hull.vertices.flatten())]
def _split_path(xj_path): """Extract the last piece of XJPath. :param str xj_path: A XJPath expression. :rtype: tuple[str|None, str] :return: A tuple where first element is a root XJPath and the second is a last piece of key. """ res = xj_path.rsplit('.', 1) root_key = res[0] if len(res) > 1: return root_key, res[1] else: if root_key and root_key != '.': return None, root_key else: raise XJPathError('Path cannot be empty', (xj_path,))
Extract the last piece of XJPath. :param str xj_path: A XJPath expression. :rtype: tuple[str|None, str] :return: A tuple where first element is a root XJPath and the second is a last piece of key.
Below is the the instruction that describes the task: ### Input: Extract the last piece of XJPath. :param str xj_path: A XJPath expression. :rtype: tuple[str|None, str] :return: A tuple where first element is a root XJPath and the second is a last piece of key. ### Response: def _split_path(xj_path): """Extract the last piece of XJPath. :param str xj_path: A XJPath expression. :rtype: tuple[str|None, str] :return: A tuple where first element is a root XJPath and the second is a last piece of key. """ res = xj_path.rsplit('.', 1) root_key = res[0] if len(res) > 1: return root_key, res[1] else: if root_key and root_key != '.': return None, root_key else: raise XJPathError('Path cannot be empty', (xj_path,))
def _configure_pool(kwargs): """Configures the pool and keeps the storage service""" _pool_single_run.storage_service = kwargs['storage_service'] _configure_niceness(kwargs) _configure_logging(kwargs, extract=False)
Configures the pool and keeps the storage service
Below is the the instruction that describes the task: ### Input: Configures the pool and keeps the storage service ### Response: def _configure_pool(kwargs): """Configures the pool and keeps the storage service""" _pool_single_run.storage_service = kwargs['storage_service'] _configure_niceness(kwargs) _configure_logging(kwargs, extract=False)
def _numeric_coercion(value, coercion_function = None, allow_empty = False, minimum = None, maximum = None): """Validate that ``value`` is numeric and coerce using ``coercion_function``. :param value: The value to validate. :param coercion_function: The function to use to coerce ``value`` to the desired type. :type coercion_function: callable :param allow_empty: If ``True``, returns :obj:`None <python:None>` if ``value`` is :obj:`None <python:None>`. If ``False``, raises a :class:`EmptyValueError <validator_collection.errors.EmptyValueError>` if ``value`` is :obj:`None <python:None>`. Defaults to ``False``. :type allow_empty: :class:`bool <python:bool>` :returns: ``value`` / :obj:`None <python:None>` :rtype: the type returned by ``coercion_function`` :raises CoercionFunctionEmptyError: if ``coercion_function`` is empty :raises EmptyValueError: if ``value`` is :obj:`None <python:None>` and ``allow_empty`` is ``False`` :raises CannotCoerceError: if ``coercion_function`` raises an :class:`ValueError <python:ValueError>`, :class:`TypeError <python:TypeError>`, :class:`AttributeError <python:AttributeError>`, :class:`IndexError <python:IndexError>, or :class:`SyntaxError <python:SyntaxError>` """ if coercion_function is None: raise errors.CoercionFunctionEmptyError('coercion_function cannot be empty') elif not hasattr(coercion_function, '__call__'): raise errors.NotCallableError('coercion_function must be callable') value = numeric(value, # pylint: disable=E1123 allow_empty = allow_empty, minimum = minimum, maximum = maximum, force_run = True) if value is not None: try: value = coercion_function(value) except (ValueError, TypeError, AttributeError, IndexError, SyntaxError): raise errors.CannotCoerceError( 'cannot coerce value (%s) to desired type' % value ) return value
Validate that ``value`` is numeric and coerce using ``coercion_function``. :param value: The value to validate. :param coercion_function: The function to use to coerce ``value`` to the desired type. :type coercion_function: callable :param allow_empty: If ``True``, returns :obj:`None <python:None>` if ``value`` is :obj:`None <python:None>`. If ``False``, raises a :class:`EmptyValueError <validator_collection.errors.EmptyValueError>` if ``value`` is :obj:`None <python:None>`. Defaults to ``False``. :type allow_empty: :class:`bool <python:bool>` :returns: ``value`` / :obj:`None <python:None>` :rtype: the type returned by ``coercion_function`` :raises CoercionFunctionEmptyError: if ``coercion_function`` is empty :raises EmptyValueError: if ``value`` is :obj:`None <python:None>` and ``allow_empty`` is ``False`` :raises CannotCoerceError: if ``coercion_function`` raises an :class:`ValueError <python:ValueError>`, :class:`TypeError <python:TypeError>`, :class:`AttributeError <python:AttributeError>`, :class:`IndexError <python:IndexError>, or :class:`SyntaxError <python:SyntaxError>`
Below is the the instruction that describes the task: ### Input: Validate that ``value`` is numeric and coerce using ``coercion_function``. :param value: The value to validate. :param coercion_function: The function to use to coerce ``value`` to the desired type. :type coercion_function: callable :param allow_empty: If ``True``, returns :obj:`None <python:None>` if ``value`` is :obj:`None <python:None>`. If ``False``, raises a :class:`EmptyValueError <validator_collection.errors.EmptyValueError>` if ``value`` is :obj:`None <python:None>`. Defaults to ``False``. :type allow_empty: :class:`bool <python:bool>` :returns: ``value`` / :obj:`None <python:None>` :rtype: the type returned by ``coercion_function`` :raises CoercionFunctionEmptyError: if ``coercion_function`` is empty :raises EmptyValueError: if ``value`` is :obj:`None <python:None>` and ``allow_empty`` is ``False`` :raises CannotCoerceError: if ``coercion_function`` raises an :class:`ValueError <python:ValueError>`, :class:`TypeError <python:TypeError>`, :class:`AttributeError <python:AttributeError>`, :class:`IndexError <python:IndexError>, or :class:`SyntaxError <python:SyntaxError>` ### Response: def _numeric_coercion(value, coercion_function = None, allow_empty = False, minimum = None, maximum = None): """Validate that ``value`` is numeric and coerce using ``coercion_function``. :param value: The value to validate. :param coercion_function: The function to use to coerce ``value`` to the desired type. :type coercion_function: callable :param allow_empty: If ``True``, returns :obj:`None <python:None>` if ``value`` is :obj:`None <python:None>`. If ``False``, raises a :class:`EmptyValueError <validator_collection.errors.EmptyValueError>` if ``value`` is :obj:`None <python:None>`. Defaults to ``False``. :type allow_empty: :class:`bool <python:bool>` :returns: ``value`` / :obj:`None <python:None>` :rtype: the type returned by ``coercion_function`` :raises CoercionFunctionEmptyError: if ``coercion_function`` is empty :raises EmptyValueError: if ``value`` is :obj:`None <python:None>` and ``allow_empty`` is ``False`` :raises CannotCoerceError: if ``coercion_function`` raises an :class:`ValueError <python:ValueError>`, :class:`TypeError <python:TypeError>`, :class:`AttributeError <python:AttributeError>`, :class:`IndexError <python:IndexError>, or :class:`SyntaxError <python:SyntaxError>` """ if coercion_function is None: raise errors.CoercionFunctionEmptyError('coercion_function cannot be empty') elif not hasattr(coercion_function, '__call__'): raise errors.NotCallableError('coercion_function must be callable') value = numeric(value, # pylint: disable=E1123 allow_empty = allow_empty, minimum = minimum, maximum = maximum, force_run = True) if value is not None: try: value = coercion_function(value) except (ValueError, TypeError, AttributeError, IndexError, SyntaxError): raise errors.CannotCoerceError( 'cannot coerce value (%s) to desired type' % value ) return value
def removeUnreferencedIDs(referencedIDs, identifiedElements): """ Removes the unreferenced ID attributes. Returns the number of ID attributes removed """ global _num_ids_removed keepTags = ['font'] num = 0 for id in identifiedElements: node = identifiedElements[id] if id not in referencedIDs and node.nodeName not in keepTags: node.removeAttribute('id') _num_ids_removed += 1 num += 1 return num
Removes the unreferenced ID attributes. Returns the number of ID attributes removed
Below is the the instruction that describes the task: ### Input: Removes the unreferenced ID attributes. Returns the number of ID attributes removed ### Response: def removeUnreferencedIDs(referencedIDs, identifiedElements): """ Removes the unreferenced ID attributes. Returns the number of ID attributes removed """ global _num_ids_removed keepTags = ['font'] num = 0 for id in identifiedElements: node = identifiedElements[id] if id not in referencedIDs and node.nodeName not in keepTags: node.removeAttribute('id') _num_ids_removed += 1 num += 1 return num
def _load(self): """ Function load. :return: file contents :raises: NotFoundError if file not found """ if self.is_exists(): return open(self._ref, "rb").read() raise NotFoundError("File %s not found" % self._ref)
Function load. :return: file contents :raises: NotFoundError if file not found
Below is the the instruction that describes the task: ### Input: Function load. :return: file contents :raises: NotFoundError if file not found ### Response: def _load(self): """ Function load. :return: file contents :raises: NotFoundError if file not found """ if self.is_exists(): return open(self._ref, "rb").read() raise NotFoundError("File %s not found" % self._ref)
def do_your_job(self): """ the goal of the explore agent is to move to the target while avoiding blockages on the grid. This function is messy and needs to be looked at. It currently has a bug in that the backtrack oscillates so need a new method of doing this - probably checking if previously backtracked in that direction for those coords, ie keep track of cells visited and number of times visited? """ y,x = self.get_intended_direction() # first find out where we should go if self.target_x == self.current_x and self.target_y == self.current_y: #print(self.name + " : TARGET ACQUIRED") if len(self.results) == 0: self.results.append("TARGET ACQUIRED") self.lg_mv(2, self.name + ": TARGET ACQUIRED" ) return self.num_steps += 1 # first try is to move on the x axis in a simple greedy search accessible = ['\\', '-', '|', '/', '.'] # randomly move in Y direction instead of X if all paths clear if y != 0 and x != 0 and self.backtrack == [0,0]: if random.randint(1,10) > 6: if self.grd.get_tile(self.current_y + y, self.current_x) in accessible: self.current_y += y self.lg_mv(3, self.name + ": randomly moving Y axis " + str(self.num_steps) ) return if x == 1: if self.grd.get_tile(self.current_y, self.current_x + 1) in accessible: self.current_x += 1 self.lg_mv(3, self.name + ": move# " + str(self.num_steps) + " - moving West" ) return elif x == -1: if self.grd.get_tile(self.current_y, self.current_x - 1) in accessible: self.current_x -= 1 self.lg_mv(3, self.name + ": move# " + str(self.num_steps) + " - moving East" ) return elif y == 1: if self.grd.get_tile(self.current_y + 1, self.current_x) in accessible: self.current_y += 1 self.lg_mv(3, self.name + ": move# " + str(self.num_steps) + " - moving South" ) return elif y == -1: if self.grd.get_tile(self.current_y - 1, self.current_x) in accessible: self.current_y -= 1 self.lg_mv(3, self.name + ": move# " + str(self.num_steps) + " - moving North") return self.grd.set_tile(self.start_y, self.start_x, 'A') self.grd.save(os.path.join(os.getcwd(), 'agent.txt'))
the goal of the explore agent is to move to the target while avoiding blockages on the grid. This function is messy and needs to be looked at. It currently has a bug in that the backtrack oscillates so need a new method of doing this - probably checking if previously backtracked in that direction for those coords, ie keep track of cells visited and number of times visited?
Below is the the instruction that describes the task: ### Input: the goal of the explore agent is to move to the target while avoiding blockages on the grid. This function is messy and needs to be looked at. It currently has a bug in that the backtrack oscillates so need a new method of doing this - probably checking if previously backtracked in that direction for those coords, ie keep track of cells visited and number of times visited? ### Response: def do_your_job(self): """ the goal of the explore agent is to move to the target while avoiding blockages on the grid. This function is messy and needs to be looked at. It currently has a bug in that the backtrack oscillates so need a new method of doing this - probably checking if previously backtracked in that direction for those coords, ie keep track of cells visited and number of times visited? """ y,x = self.get_intended_direction() # first find out where we should go if self.target_x == self.current_x and self.target_y == self.current_y: #print(self.name + " : TARGET ACQUIRED") if len(self.results) == 0: self.results.append("TARGET ACQUIRED") self.lg_mv(2, self.name + ": TARGET ACQUIRED" ) return self.num_steps += 1 # first try is to move on the x axis in a simple greedy search accessible = ['\\', '-', '|', '/', '.'] # randomly move in Y direction instead of X if all paths clear if y != 0 and x != 0 and self.backtrack == [0,0]: if random.randint(1,10) > 6: if self.grd.get_tile(self.current_y + y, self.current_x) in accessible: self.current_y += y self.lg_mv(3, self.name + ": randomly moving Y axis " + str(self.num_steps) ) return if x == 1: if self.grd.get_tile(self.current_y, self.current_x + 1) in accessible: self.current_x += 1 self.lg_mv(3, self.name + ": move# " + str(self.num_steps) + " - moving West" ) return elif x == -1: if self.grd.get_tile(self.current_y, self.current_x - 1) in accessible: self.current_x -= 1 self.lg_mv(3, self.name + ": move# " + str(self.num_steps) + " - moving East" ) return elif y == 1: if self.grd.get_tile(self.current_y + 1, self.current_x) in accessible: self.current_y += 1 self.lg_mv(3, self.name + ": move# " + str(self.num_steps) + " - moving South" ) return elif y == -1: if self.grd.get_tile(self.current_y - 1, self.current_x) in accessible: self.current_y -= 1 self.lg_mv(3, self.name + ": move# " + str(self.num_steps) + " - moving North") return self.grd.set_tile(self.start_y, self.start_x, 'A') self.grd.save(os.path.join(os.getcwd(), 'agent.txt'))
def delete_payment_card_by_id(cls, payment_card_id, **kwargs): """Delete PaymentCard Delete an instance of PaymentCard by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_payment_card_by_id(payment_card_id, async=True) >>> result = thread.get() :param async bool :param str payment_card_id: ID of paymentCard to delete. (required) :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._delete_payment_card_by_id_with_http_info(payment_card_id, **kwargs) else: (data) = cls._delete_payment_card_by_id_with_http_info(payment_card_id, **kwargs) return data
Delete PaymentCard Delete an instance of PaymentCard by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_payment_card_by_id(payment_card_id, async=True) >>> result = thread.get() :param async bool :param str payment_card_id: ID of paymentCard to delete. (required) :return: None If the method is called asynchronously, returns the request thread.
Below is the the instruction that describes the task: ### Input: Delete PaymentCard Delete an instance of PaymentCard by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_payment_card_by_id(payment_card_id, async=True) >>> result = thread.get() :param async bool :param str payment_card_id: ID of paymentCard to delete. (required) :return: None If the method is called asynchronously, returns the request thread. ### Response: def delete_payment_card_by_id(cls, payment_card_id, **kwargs): """Delete PaymentCard Delete an instance of PaymentCard by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_payment_card_by_id(payment_card_id, async=True) >>> result = thread.get() :param async bool :param str payment_card_id: ID of paymentCard to delete. (required) :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._delete_payment_card_by_id_with_http_info(payment_card_id, **kwargs) else: (data) = cls._delete_payment_card_by_id_with_http_info(payment_card_id, **kwargs) return data
def set_account_username(self, account, old_username, new_username): """ Account's username was changed. """ self._delete_account(account, old_username) self._save_account(account, new_username)
Account's username was changed.
Below is the the instruction that describes the task: ### Input: Account's username was changed. ### Response: def set_account_username(self, account, old_username, new_username): """ Account's username was changed. """ self._delete_account(account, old_username) self._save_account(account, new_username)
def fix_config(self, options): """ Fixes the options, if necessary. I.e., it adds all required elements to the dictionary. :param options: the options to fix :type options: dict :return: the (potentially) fixed options :rtype: dict """ options = super(FileOutputSink, self).fix_config(options) opt = "output" if opt not in options: options[opt] = "." if opt not in self.help: self.help[opt] = "The file to write to (string)." return options
Fixes the options, if necessary. I.e., it adds all required elements to the dictionary. :param options: the options to fix :type options: dict :return: the (potentially) fixed options :rtype: dict
Below is the the instruction that describes the task: ### Input: Fixes the options, if necessary. I.e., it adds all required elements to the dictionary. :param options: the options to fix :type options: dict :return: the (potentially) fixed options :rtype: dict ### Response: def fix_config(self, options): """ Fixes the options, if necessary. I.e., it adds all required elements to the dictionary. :param options: the options to fix :type options: dict :return: the (potentially) fixed options :rtype: dict """ options = super(FileOutputSink, self).fix_config(options) opt = "output" if opt not in options: options[opt] = "." if opt not in self.help: self.help[opt] = "The file to write to (string)." return options
def fix_path(path): """ Fix windows path's. Linux path's will remain unaltered :param str path: The path to be fixed :return: The fixed path :rtype: str """ if '\\' in path: path = path.replace('\\', '/') path = os.path.normpath(path) return path
Fix windows path's. Linux path's will remain unaltered :param str path: The path to be fixed :return: The fixed path :rtype: str
Below is the the instruction that describes the task: ### Input: Fix windows path's. Linux path's will remain unaltered :param str path: The path to be fixed :return: The fixed path :rtype: str ### Response: def fix_path(path): """ Fix windows path's. Linux path's will remain unaltered :param str path: The path to be fixed :return: The fixed path :rtype: str """ if '\\' in path: path = path.replace('\\', '/') path = os.path.normpath(path) return path
def getPluginActions(self, index): """Return actions from plug-in at `index` Arguments: index (int): Index at which item is located in model """ index = self.data["proxies"]["plugin"].mapToSource( self.data["proxies"]["plugin"].index( index, 0, QtCore.QModelIndex())).row() item = self.data["models"]["item"].items[index] # Inject reference to the original index actions = [ dict(action, **{"index": index}) for action in item.actions ] # Context specific actions for action in list(actions): if action["on"] == "failed" and not item.hasError: actions.remove(action) if action["on"] == "succeeded" and not item.succeeded: actions.remove(action) if action["on"] == "processed" and not item.processed: actions.remove(action) if action["on"] == "notProcessed" and item.processed: actions.remove(action) # Discard empty categories, separators remaining_actions = list() index = 0 try: action = actions[index] except IndexError: pass else: while action: try: action = actions[index] except IndexError: break isempty = False if action["__type__"] in ("category", "separator"): try: next_ = actions[index + 1] if next_["__type__"] != "action": isempty = True except IndexError: isempty = True if not isempty: remaining_actions.append(action) index += 1 return remaining_actions
Return actions from plug-in at `index` Arguments: index (int): Index at which item is located in model
Below is the the instruction that describes the task: ### Input: Return actions from plug-in at `index` Arguments: index (int): Index at which item is located in model ### Response: def getPluginActions(self, index): """Return actions from plug-in at `index` Arguments: index (int): Index at which item is located in model """ index = self.data["proxies"]["plugin"].mapToSource( self.data["proxies"]["plugin"].index( index, 0, QtCore.QModelIndex())).row() item = self.data["models"]["item"].items[index] # Inject reference to the original index actions = [ dict(action, **{"index": index}) for action in item.actions ] # Context specific actions for action in list(actions): if action["on"] == "failed" and not item.hasError: actions.remove(action) if action["on"] == "succeeded" and not item.succeeded: actions.remove(action) if action["on"] == "processed" and not item.processed: actions.remove(action) if action["on"] == "notProcessed" and item.processed: actions.remove(action) # Discard empty categories, separators remaining_actions = list() index = 0 try: action = actions[index] except IndexError: pass else: while action: try: action = actions[index] except IndexError: break isempty = False if action["__type__"] in ("category", "separator"): try: next_ = actions[index + 1] if next_["__type__"] != "action": isempty = True except IndexError: isempty = True if not isempty: remaining_actions.append(action) index += 1 return remaining_actions
def transitivity_wu(W): ''' Transitivity is the ratio of 'triangles to triplets' in the network. (A classical version of the clustering coefficient). Parameters ---------- W : NxN np.ndarray weighted undirected connection matrix Returns ------- T : int transitivity scalar ''' K = np.sum(np.logical_not(W == 0), axis=1) ws = cuberoot(W) cyc3 = np.diag(np.dot(ws, np.dot(ws, ws))) return np.sum(cyc3, axis=0) / np.sum(K * (K - 1), axis=0)
Transitivity is the ratio of 'triangles to triplets' in the network. (A classical version of the clustering coefficient). Parameters ---------- W : NxN np.ndarray weighted undirected connection matrix Returns ------- T : int transitivity scalar
Below is the the instruction that describes the task: ### Input: Transitivity is the ratio of 'triangles to triplets' in the network. (A classical version of the clustering coefficient). Parameters ---------- W : NxN np.ndarray weighted undirected connection matrix Returns ------- T : int transitivity scalar ### Response: def transitivity_wu(W): ''' Transitivity is the ratio of 'triangles to triplets' in the network. (A classical version of the clustering coefficient). Parameters ---------- W : NxN np.ndarray weighted undirected connection matrix Returns ------- T : int transitivity scalar ''' K = np.sum(np.logical_not(W == 0), axis=1) ws = cuberoot(W) cyc3 = np.diag(np.dot(ws, np.dot(ws, ws))) return np.sum(cyc3, axis=0) / np.sum(K * (K - 1), axis=0)
def pressure_series(self): """Returns the atmospheric pressure time series relative to the meteostation, in the form of a list of tuples, each one containing the couple timestamp-value :returns: a list of tuples """ return [(tstamp, \ self._station_history.get_measurements()[tstamp]['pressure']) \ for tstamp in self._station_history.get_measurements()]
Returns the atmospheric pressure time series relative to the meteostation, in the form of a list of tuples, each one containing the couple timestamp-value :returns: a list of tuples
Below is the the instruction that describes the task: ### Input: Returns the atmospheric pressure time series relative to the meteostation, in the form of a list of tuples, each one containing the couple timestamp-value :returns: a list of tuples ### Response: def pressure_series(self): """Returns the atmospheric pressure time series relative to the meteostation, in the form of a list of tuples, each one containing the couple timestamp-value :returns: a list of tuples """ return [(tstamp, \ self._station_history.get_measurements()[tstamp]['pressure']) \ for tstamp in self._station_history.get_measurements()]
def string_to_datetime(self, obj): """ Decode a datetime string to a datetime object """ if isinstance(obj, six.string_types) and len(obj) == 19: try: return datetime.strptime(obj, "%Y-%m-%dT%H:%M:%S") except ValueError: pass if isinstance(obj, six.string_types) and len(obj) > 19: try: return datetime.strptime(obj, "%Y-%m-%dT%H:%M:%S.%f") except ValueError: pass if isinstance(obj, six.string_types) and len(obj) == 10: try: return datetime.strptime(obj, "%Y-%m-%d") except ValueError: pass return obj
Decode a datetime string to a datetime object
Below is the the instruction that describes the task: ### Input: Decode a datetime string to a datetime object ### Response: def string_to_datetime(self, obj): """ Decode a datetime string to a datetime object """ if isinstance(obj, six.string_types) and len(obj) == 19: try: return datetime.strptime(obj, "%Y-%m-%dT%H:%M:%S") except ValueError: pass if isinstance(obj, six.string_types) and len(obj) > 19: try: return datetime.strptime(obj, "%Y-%m-%dT%H:%M:%S.%f") except ValueError: pass if isinstance(obj, six.string_types) and len(obj) == 10: try: return datetime.strptime(obj, "%Y-%m-%d") except ValueError: pass return obj
def union_q(token): """ Appends all the Q() objects. """ query = Q() operation = 'and' negation = False for t in token: if type(t) is ParseResults: # See tokens recursively query &= union_q(t) else: if t in ('or', 'and'): # Set the new op and go to next token operation = t elif t == '-': # Next tokens needs to be negated negation = True else: # Append to query the token if negation: t = ~t if operation == 'or': query |= t else: query &= t return query
Appends all the Q() objects.
Below is the the instruction that describes the task: ### Input: Appends all the Q() objects. ### Response: def union_q(token): """ Appends all the Q() objects. """ query = Q() operation = 'and' negation = False for t in token: if type(t) is ParseResults: # See tokens recursively query &= union_q(t) else: if t in ('or', 'and'): # Set the new op and go to next token operation = t elif t == '-': # Next tokens needs to be negated negation = True else: # Append to query the token if negation: t = ~t if operation == 'or': query |= t else: query &= t return query
def is_closed(self): """ _check if the observation table is closed. Args: None Returns: tuple (bool, str): True if the observation table is closed and false otherwise. If the table is not closed the escaping string is returned. """ old_training_data = self.training_data self.training_data = {x: [] for x in self.sm_vector} for t in self.smi_vector: src_state = t[:-1] symbol = t[-1:] found = False for dst_state in self.sm_vector: if self.observation_table[dst_state] == self.observation_table[t]: self._add_training_data(src_state, dst_state, symbol) found = True break if not found: return False, t assert self.training_data != old_training_data, \ "No update happened from previous round. The algo will loop infinetely" return True, None
_check if the observation table is closed. Args: None Returns: tuple (bool, str): True if the observation table is closed and false otherwise. If the table is not closed the escaping string is returned.
Below is the the instruction that describes the task: ### Input: _check if the observation table is closed. Args: None Returns: tuple (bool, str): True if the observation table is closed and false otherwise. If the table is not closed the escaping string is returned. ### Response: def is_closed(self): """ _check if the observation table is closed. Args: None Returns: tuple (bool, str): True if the observation table is closed and false otherwise. If the table is not closed the escaping string is returned. """ old_training_data = self.training_data self.training_data = {x: [] for x in self.sm_vector} for t in self.smi_vector: src_state = t[:-1] symbol = t[-1:] found = False for dst_state in self.sm_vector: if self.observation_table[dst_state] == self.observation_table[t]: self._add_training_data(src_state, dst_state, symbol) found = True break if not found: return False, t assert self.training_data != old_training_data, \ "No update happened from previous round. The algo will loop infinetely" return True, None
def add_answer_for_student(student_item, vote, rationale): """ Add an answer for a student to the backend Args: student_item (dict): The location of the problem this submission is associated with, as defined by a course, student, and item. vote (int): the option that student voted for rationale (str): the reason why the student vote for the option """ answers = get_answers_for_student(student_item) answers.add_answer(vote, rationale) sub_api.create_submission(student_item, { ANSWER_LIST_KEY: answers.get_answers_as_list() })
Add an answer for a student to the backend Args: student_item (dict): The location of the problem this submission is associated with, as defined by a course, student, and item. vote (int): the option that student voted for rationale (str): the reason why the student vote for the option
Below is the the instruction that describes the task: ### Input: Add an answer for a student to the backend Args: student_item (dict): The location of the problem this submission is associated with, as defined by a course, student, and item. vote (int): the option that student voted for rationale (str): the reason why the student vote for the option ### Response: def add_answer_for_student(student_item, vote, rationale): """ Add an answer for a student to the backend Args: student_item (dict): The location of the problem this submission is associated with, as defined by a course, student, and item. vote (int): the option that student voted for rationale (str): the reason why the student vote for the option """ answers = get_answers_for_student(student_item) answers.add_answer(vote, rationale) sub_api.create_submission(student_item, { ANSWER_LIST_KEY: answers.get_answers_as_list() })
def handle_flush_error(cls, exception): """Handle flush error exceptions.""" trace = exception.args[0] m = re.match(cls.MYSQL_FLUSH_ERROR_REGEX, trace) if not m: raise exception entity = m.group('entity') eid = m.group('eid') raise AlreadyExistsError(entity=entity, eid=eid)
Handle flush error exceptions.
Below is the the instruction that describes the task: ### Input: Handle flush error exceptions. ### Response: def handle_flush_error(cls, exception): """Handle flush error exceptions.""" trace = exception.args[0] m = re.match(cls.MYSQL_FLUSH_ERROR_REGEX, trace) if not m: raise exception entity = m.group('entity') eid = m.group('eid') raise AlreadyExistsError(entity=entity, eid=eid)
def _decade_ranges_in_date_range(self, begin_date, end_date): """Return a list of decades which is covered by date range.""" begin_dated = begin_date.year / 10 end_dated = end_date.year / 10 decades = [] for d in range(begin_dated, end_dated + 1): decades.append('{}-{}'.format(d * 10, d * 10 + 9)) return decades
Return a list of decades which is covered by date range.
Below is the the instruction that describes the task: ### Input: Return a list of decades which is covered by date range. ### Response: def _decade_ranges_in_date_range(self, begin_date, end_date): """Return a list of decades which is covered by date range.""" begin_dated = begin_date.year / 10 end_dated = end_date.year / 10 decades = [] for d in range(begin_dated, end_dated + 1): decades.append('{}-{}'.format(d * 10, d * 10 + 9)) return decades
def cv_precompute(self, mask, b): ''' Pre-compute the matrices :py:obj:`A` and :py:obj:`B` (cross-validation step only) for chunk :py:obj:`b`. ''' # Get current chunk and mask outliers m1 = self.get_masked_chunk(b) flux = self.fraw[m1] K = GetCovariance(self.kernel, self.kernel_params, self.time[m1], self.fraw_err[m1]) med = np.nanmedian(flux) # Now mask the validation set M = lambda x, axis = 0: np.delete(x, mask, axis=axis) m2 = M(m1) mK = M(M(K, axis=0), axis=1) f = M(flux) - med # Pre-compute the matrices A = [None for i in range(self.pld_order)] B = [None for i in range(self.pld_order)] for n in range(self.pld_order): # Only compute up to the current PLD order if self.lam_idx >= n: X2 = self.X(n, m2) X1 = self.X(n, m1) A[n] = np.dot(X2, X2.T) B[n] = np.dot(X1, X2.T) del X1, X2 if self.transit_model is None: C = 0 else: C = np.zeros((len(m2), len(m2))) mean_transit_model = med * \ np.sum([tm.depth * tm(self.time[m2]) for tm in self.transit_model], axis=0) f -= mean_transit_model for tm in self.transit_model: X2 = tm(self.time[m2]).reshape(-1, 1) C += tm.var_depth * np.dot(X2, X2.T) del X2 return A, B, C, mK, f, m1, m2
Pre-compute the matrices :py:obj:`A` and :py:obj:`B` (cross-validation step only) for chunk :py:obj:`b`.
Below is the the instruction that describes the task: ### Input: Pre-compute the matrices :py:obj:`A` and :py:obj:`B` (cross-validation step only) for chunk :py:obj:`b`. ### Response: def cv_precompute(self, mask, b): ''' Pre-compute the matrices :py:obj:`A` and :py:obj:`B` (cross-validation step only) for chunk :py:obj:`b`. ''' # Get current chunk and mask outliers m1 = self.get_masked_chunk(b) flux = self.fraw[m1] K = GetCovariance(self.kernel, self.kernel_params, self.time[m1], self.fraw_err[m1]) med = np.nanmedian(flux) # Now mask the validation set M = lambda x, axis = 0: np.delete(x, mask, axis=axis) m2 = M(m1) mK = M(M(K, axis=0), axis=1) f = M(flux) - med # Pre-compute the matrices A = [None for i in range(self.pld_order)] B = [None for i in range(self.pld_order)] for n in range(self.pld_order): # Only compute up to the current PLD order if self.lam_idx >= n: X2 = self.X(n, m2) X1 = self.X(n, m1) A[n] = np.dot(X2, X2.T) B[n] = np.dot(X1, X2.T) del X1, X2 if self.transit_model is None: C = 0 else: C = np.zeros((len(m2), len(m2))) mean_transit_model = med * \ np.sum([tm.depth * tm(self.time[m2]) for tm in self.transit_model], axis=0) f -= mean_transit_model for tm in self.transit_model: X2 = tm(self.time[m2]).reshape(-1, 1) C += tm.var_depth * np.dot(X2, X2.T) del X2 return A, B, C, mK, f, m1, m2
def _get_output_nodes(self, output_path, error_path): """ Extracts output nodes from the standard output and standard error files. """ from pymatgen.io.nwchem import NwOutput from aiida.orm.data.structure import StructureData from aiida.orm.data.array.trajectory import TrajectoryData ret_dict = [] nwo = NwOutput(output_path) for out in nwo.data: molecules = out.pop('molecules', None) structures = out.pop('structures', None) if molecules: structlist = [StructureData(pymatgen_molecule=m) for m in molecules] ret_dict.append(('trajectory', TrajectoryData(structurelist=structlist))) if structures: structlist = [StructureData(pymatgen_structure=s) for s in structures] ret_dict.append(('trajectory', TrajectoryData(structurelist=structlist))) ret_dict.append(('output', ParameterData(dict=out))) # Since ParameterData rewrites it's properties (using _set_attr()) # with keys from the supplied dictionary, ``source`` has to be # moved to another key. See issue #9 for details: # (https://bitbucket.org/epfl_theos/aiida_epfl/issues/9) nwo.job_info['program_source'] = nwo.job_info.pop('source', None) ret_dict.append(('job_info', ParameterData(dict=nwo.job_info))) return ret_dict
Extracts output nodes from the standard output and standard error files.
Below is the the instruction that describes the task: ### Input: Extracts output nodes from the standard output and standard error files. ### Response: def _get_output_nodes(self, output_path, error_path): """ Extracts output nodes from the standard output and standard error files. """ from pymatgen.io.nwchem import NwOutput from aiida.orm.data.structure import StructureData from aiida.orm.data.array.trajectory import TrajectoryData ret_dict = [] nwo = NwOutput(output_path) for out in nwo.data: molecules = out.pop('molecules', None) structures = out.pop('structures', None) if molecules: structlist = [StructureData(pymatgen_molecule=m) for m in molecules] ret_dict.append(('trajectory', TrajectoryData(structurelist=structlist))) if structures: structlist = [StructureData(pymatgen_structure=s) for s in structures] ret_dict.append(('trajectory', TrajectoryData(structurelist=structlist))) ret_dict.append(('output', ParameterData(dict=out))) # Since ParameterData rewrites it's properties (using _set_attr()) # with keys from the supplied dictionary, ``source`` has to be # moved to another key. See issue #9 for details: # (https://bitbucket.org/epfl_theos/aiida_epfl/issues/9) nwo.job_info['program_source'] = nwo.job_info.pop('source', None) ret_dict.append(('job_info', ParameterData(dict=nwo.job_info))) return ret_dict
def Negative(other_param, mode="invert", reroll_count_max=2): """ Converts another parameter's results to negative values. Parameters ---------- other_param : imgaug.parameters.StochasticParameter Other parameter which's sampled values are to be modified. mode : {'invert', 'reroll'}, optional How to change the signs. Valid values are ``invert`` and ``reroll``. ``invert`` means that wrong signs are simply flipped. ``reroll`` means that all samples with wrong signs are sampled again, optionally many times, until they randomly end up having the correct sign. reroll_count_max : int, optional If `mode` is set to ``reroll``, this determines how often values may be rerolled before giving up and simply flipping the sign (as in ``mode="invert"``). This shouldn't be set too high, as rerolling is expensive. Examples -------- >>> param = Negative(Normal(0, 1), mode="reroll") Generates a normal distribution that has only negative values. """ return ForceSign( other_param=other_param, positive=False, mode=mode, reroll_count_max=reroll_count_max )
Converts another parameter's results to negative values. Parameters ---------- other_param : imgaug.parameters.StochasticParameter Other parameter which's sampled values are to be modified. mode : {'invert', 'reroll'}, optional How to change the signs. Valid values are ``invert`` and ``reroll``. ``invert`` means that wrong signs are simply flipped. ``reroll`` means that all samples with wrong signs are sampled again, optionally many times, until they randomly end up having the correct sign. reroll_count_max : int, optional If `mode` is set to ``reroll``, this determines how often values may be rerolled before giving up and simply flipping the sign (as in ``mode="invert"``). This shouldn't be set too high, as rerolling is expensive. Examples -------- >>> param = Negative(Normal(0, 1), mode="reroll") Generates a normal distribution that has only negative values.
Below is the the instruction that describes the task: ### Input: Converts another parameter's results to negative values. Parameters ---------- other_param : imgaug.parameters.StochasticParameter Other parameter which's sampled values are to be modified. mode : {'invert', 'reroll'}, optional How to change the signs. Valid values are ``invert`` and ``reroll``. ``invert`` means that wrong signs are simply flipped. ``reroll`` means that all samples with wrong signs are sampled again, optionally many times, until they randomly end up having the correct sign. reroll_count_max : int, optional If `mode` is set to ``reroll``, this determines how often values may be rerolled before giving up and simply flipping the sign (as in ``mode="invert"``). This shouldn't be set too high, as rerolling is expensive. Examples -------- >>> param = Negative(Normal(0, 1), mode="reroll") Generates a normal distribution that has only negative values. ### Response: def Negative(other_param, mode="invert", reroll_count_max=2): """ Converts another parameter's results to negative values. Parameters ---------- other_param : imgaug.parameters.StochasticParameter Other parameter which's sampled values are to be modified. mode : {'invert', 'reroll'}, optional How to change the signs. Valid values are ``invert`` and ``reroll``. ``invert`` means that wrong signs are simply flipped. ``reroll`` means that all samples with wrong signs are sampled again, optionally many times, until they randomly end up having the correct sign. reroll_count_max : int, optional If `mode` is set to ``reroll``, this determines how often values may be rerolled before giving up and simply flipping the sign (as in ``mode="invert"``). This shouldn't be set too high, as rerolling is expensive. Examples -------- >>> param = Negative(Normal(0, 1), mode="reroll") Generates a normal distribution that has only negative values. """ return ForceSign( other_param=other_param, positive=False, mode=mode, reroll_count_max=reroll_count_max )
def step(self): # type: () -> bool """ Decreases the internal counter. Raises an error if the counter goes below 0 :return: True if this step was the final one, else False :raise ValueError: The counter has gone below 0 """ with self.__lock: self.__value -= 1 if self.__value == 0: # All done self.__event.set() return True elif self.__value < 0: # Gone too far raise ValueError("The counter has gone below 0") return False
Decreases the internal counter. Raises an error if the counter goes below 0 :return: True if this step was the final one, else False :raise ValueError: The counter has gone below 0
Below is the the instruction that describes the task: ### Input: Decreases the internal counter. Raises an error if the counter goes below 0 :return: True if this step was the final one, else False :raise ValueError: The counter has gone below 0 ### Response: def step(self): # type: () -> bool """ Decreases the internal counter. Raises an error if the counter goes below 0 :return: True if this step was the final one, else False :raise ValueError: The counter has gone below 0 """ with self.__lock: self.__value -= 1 if self.__value == 0: # All done self.__event.set() return True elif self.__value < 0: # Gone too far raise ValueError("The counter has gone below 0") return False
def write(self, filename): """Save detx file.""" with open(filename, 'w') as f: f.write(self.ascii) self.print("Detector file saved as '{0}'".format(filename))
Save detx file.
Below is the the instruction that describes the task: ### Input: Save detx file. ### Response: def write(self, filename): """Save detx file.""" with open(filename, 'w') as f: f.write(self.ascii) self.print("Detector file saved as '{0}'".format(filename))
def follower_ids(self, user): """ Returns Twitter user id lists for the specified user's followers. A user can be a specific using their screen_name or user_id """ user = str(user) user = user.lstrip('@') url = 'https://api.twitter.com/1.1/followers/ids.json' if re.match(r'^\d+$', user): params = {'user_id': user, 'cursor': -1} else: params = {'screen_name': user, 'cursor': -1} while params['cursor'] != 0: try: resp = self.get(url, params=params, allow_404=True) except requests.exceptions.HTTPError as e: if e.response.status_code == 404: log.info("no users matching %s", screen_name) raise e user_ids = resp.json() for user_id in user_ids['ids']: yield str_type(user_id) params['cursor'] = user_ids['next_cursor']
Returns Twitter user id lists for the specified user's followers. A user can be a specific using their screen_name or user_id
Below is the the instruction that describes the task: ### Input: Returns Twitter user id lists for the specified user's followers. A user can be a specific using their screen_name or user_id ### Response: def follower_ids(self, user): """ Returns Twitter user id lists for the specified user's followers. A user can be a specific using their screen_name or user_id """ user = str(user) user = user.lstrip('@') url = 'https://api.twitter.com/1.1/followers/ids.json' if re.match(r'^\d+$', user): params = {'user_id': user, 'cursor': -1} else: params = {'screen_name': user, 'cursor': -1} while params['cursor'] != 0: try: resp = self.get(url, params=params, allow_404=True) except requests.exceptions.HTTPError as e: if e.response.status_code == 404: log.info("no users matching %s", screen_name) raise e user_ids = resp.json() for user_id in user_ids['ids']: yield str_type(user_id) params['cursor'] = user_ids['next_cursor']
def main(): """Create an organization, print out its attributes and delete it.""" org = Organization(name='junk org').create() pprint(org.get_values()) # e.g. {'name': 'junk org', …} org.delete()
Create an organization, print out its attributes and delete it.
Below is the the instruction that describes the task: ### Input: Create an organization, print out its attributes and delete it. ### Response: def main(): """Create an organization, print out its attributes and delete it.""" org = Organization(name='junk org').create() pprint(org.get_values()) # e.g. {'name': 'junk org', …} org.delete()
def from_pdb(cls, path, forcefield=None, loader=PDBFile, strict=True, **kwargs): """ Loads topology, positions and, potentially, velocities and vectors, from a PDB or PDBx file Parameters ---------- path : str Path to PDB/PDBx file forcefields : list of str Paths to FFXML and/or FRCMOD forcefields. REQUIRED. Returns ------- pdb : SystemHandler SystemHandler with topology, positions, and, potentially, velocities and box vectors. Forcefields are embedded in the `master` attribute. """ pdb = loader(path) box = kwargs.pop('box', pdb.topology.getPeriodicBoxVectors()) positions = kwargs.pop('positions', pdb.positions) velocities = kwargs.pop('velocities', getattr(pdb, 'velocities', None)) if strict and not forcefield: from .md import FORCEFIELDS as forcefield logger.info('! Forcefields for PDB not specified. Using default: %s', ', '.join(forcefield)) pdb.forcefield = ForceField(*list(process_forcefield(*forcefield))) return cls(master=pdb.forcefield, topology=pdb.topology, positions=positions, velocities=velocities, box=box, path=path, **kwargs)
Loads topology, positions and, potentially, velocities and vectors, from a PDB or PDBx file Parameters ---------- path : str Path to PDB/PDBx file forcefields : list of str Paths to FFXML and/or FRCMOD forcefields. REQUIRED. Returns ------- pdb : SystemHandler SystemHandler with topology, positions, and, potentially, velocities and box vectors. Forcefields are embedded in the `master` attribute.
Below is the the instruction that describes the task: ### Input: Loads topology, positions and, potentially, velocities and vectors, from a PDB or PDBx file Parameters ---------- path : str Path to PDB/PDBx file forcefields : list of str Paths to FFXML and/or FRCMOD forcefields. REQUIRED. Returns ------- pdb : SystemHandler SystemHandler with topology, positions, and, potentially, velocities and box vectors. Forcefields are embedded in the `master` attribute. ### Response: def from_pdb(cls, path, forcefield=None, loader=PDBFile, strict=True, **kwargs): """ Loads topology, positions and, potentially, velocities and vectors, from a PDB or PDBx file Parameters ---------- path : str Path to PDB/PDBx file forcefields : list of str Paths to FFXML and/or FRCMOD forcefields. REQUIRED. Returns ------- pdb : SystemHandler SystemHandler with topology, positions, and, potentially, velocities and box vectors. Forcefields are embedded in the `master` attribute. """ pdb = loader(path) box = kwargs.pop('box', pdb.topology.getPeriodicBoxVectors()) positions = kwargs.pop('positions', pdb.positions) velocities = kwargs.pop('velocities', getattr(pdb, 'velocities', None)) if strict and not forcefield: from .md import FORCEFIELDS as forcefield logger.info('! Forcefields for PDB not specified. Using default: %s', ', '.join(forcefield)) pdb.forcefield = ForceField(*list(process_forcefield(*forcefield))) return cls(master=pdb.forcefield, topology=pdb.topology, positions=positions, velocities=velocities, box=box, path=path, **kwargs)
def normalize_scientific_notation(s, ignore_commas=True, verbosity=1): """Produce a string convertable with float(s), if possible, fixing some common scientific notations Deletes commas and allows addition. >>> normalize_scientific_notation(' -123 x 10^-45 ') '-123e-45' >>> normalize_scientific_notation(' -1+1,234 x 10^-5,678 ') '1233e-5678' >>> normalize_scientific_notation('$42.42') '42.42' """ s = s.lstrip(charlist.not_digits_nor_sign) s = s.rstrip(charlist.not_digits) # print s # TODO: substitute ** for ^ and just eval the expression rather than insisting on a base-10 representation num_strings = rex.scientific_notation_exponent.split(s, maxsplit=2) # print num_strings # get rid of commas s = rex.re.sub(r"[^.0-9-+" + "," * int(not ignore_commas) + r"]+", '', num_strings[0]) # print s # if this value gets so large that it requires an exponential notation, this will break the conversion if not s: return None try: s = str(eval(s.strip().lstrip('0'))) except (IndexError, ValueError, AttributeError, TypeError): if verbosity > 1: print('Unable to evaluate %s' % repr(s)) try: s = str(float(s)) except (IndexError, ValueError, AttributeError, TypeError): print('Unable to float %s' % repr(s)) s = '' # print s if len(num_strings) > 1: if not s: s = '1' s += 'e' + rex.re.sub(r'[^.0-9-+]+', '', num_strings[1]) if s: return s return None
Produce a string convertable with float(s), if possible, fixing some common scientific notations Deletes commas and allows addition. >>> normalize_scientific_notation(' -123 x 10^-45 ') '-123e-45' >>> normalize_scientific_notation(' -1+1,234 x 10^-5,678 ') '1233e-5678' >>> normalize_scientific_notation('$42.42') '42.42'
Below is the the instruction that describes the task: ### Input: Produce a string convertable with float(s), if possible, fixing some common scientific notations Deletes commas and allows addition. >>> normalize_scientific_notation(' -123 x 10^-45 ') '-123e-45' >>> normalize_scientific_notation(' -1+1,234 x 10^-5,678 ') '1233e-5678' >>> normalize_scientific_notation('$42.42') '42.42' ### Response: def normalize_scientific_notation(s, ignore_commas=True, verbosity=1): """Produce a string convertable with float(s), if possible, fixing some common scientific notations Deletes commas and allows addition. >>> normalize_scientific_notation(' -123 x 10^-45 ') '-123e-45' >>> normalize_scientific_notation(' -1+1,234 x 10^-5,678 ') '1233e-5678' >>> normalize_scientific_notation('$42.42') '42.42' """ s = s.lstrip(charlist.not_digits_nor_sign) s = s.rstrip(charlist.not_digits) # print s # TODO: substitute ** for ^ and just eval the expression rather than insisting on a base-10 representation num_strings = rex.scientific_notation_exponent.split(s, maxsplit=2) # print num_strings # get rid of commas s = rex.re.sub(r"[^.0-9-+" + "," * int(not ignore_commas) + r"]+", '', num_strings[0]) # print s # if this value gets so large that it requires an exponential notation, this will break the conversion if not s: return None try: s = str(eval(s.strip().lstrip('0'))) except (IndexError, ValueError, AttributeError, TypeError): if verbosity > 1: print('Unable to evaluate %s' % repr(s)) try: s = str(float(s)) except (IndexError, ValueError, AttributeError, TypeError): print('Unable to float %s' % repr(s)) s = '' # print s if len(num_strings) > 1: if not s: s = '1' s += 'e' + rex.re.sub(r'[^.0-9-+]+', '', num_strings[1]) if s: return s return None
def mutable(function): '''Add the instance internal state as the second parameter of the decorated function.''' def wrapper(self, *args, **kwargs): state = self._get_state() return function(self, state, *args, **kwargs) return wrapper
Add the instance internal state as the second parameter of the decorated function.
Below is the the instruction that describes the task: ### Input: Add the instance internal state as the second parameter of the decorated function. ### Response: def mutable(function): '''Add the instance internal state as the second parameter of the decorated function.''' def wrapper(self, *args, **kwargs): state = self._get_state() return function(self, state, *args, **kwargs) return wrapper
def dict_to_serialized_dict(ref, the_dict): """Serialize the list of elements to a dictionary Used for the retention store :param ref: Not used :type ref: :param the_dict: dictionary to convert :type the_dict: dict :return: dict of serialized :rtype: dict """ result = {} for elt in list(the_dict.values()): if not getattr(elt, 'serialize', None): continue result[elt.uuid] = elt.serialize() return result
Serialize the list of elements to a dictionary Used for the retention store :param ref: Not used :type ref: :param the_dict: dictionary to convert :type the_dict: dict :return: dict of serialized :rtype: dict
Below is the the instruction that describes the task: ### Input: Serialize the list of elements to a dictionary Used for the retention store :param ref: Not used :type ref: :param the_dict: dictionary to convert :type the_dict: dict :return: dict of serialized :rtype: dict ### Response: def dict_to_serialized_dict(ref, the_dict): """Serialize the list of elements to a dictionary Used for the retention store :param ref: Not used :type ref: :param the_dict: dictionary to convert :type the_dict: dict :return: dict of serialized :rtype: dict """ result = {} for elt in list(the_dict.values()): if not getattr(elt, 'serialize', None): continue result[elt.uuid] = elt.serialize() return result
def loads(cls, pickle_string): """ Equivalent to pickle.loads except that the HoloViews trees is restored appropriately. """ cls.load_counter_offset = StoreOptions.id_offset() val = pickle.loads(pickle_string) cls.load_counter_offset = None return val
Equivalent to pickle.loads except that the HoloViews trees is restored appropriately.
Below is the the instruction that describes the task: ### Input: Equivalent to pickle.loads except that the HoloViews trees is restored appropriately. ### Response: def loads(cls, pickle_string): """ Equivalent to pickle.loads except that the HoloViews trees is restored appropriately. """ cls.load_counter_offset = StoreOptions.id_offset() val = pickle.loads(pickle_string) cls.load_counter_offset = None return val
def start(self, request: Request) -> Response: '''Start a file or directory listing download. Args: request: Request. Returns: A Response populated with the initial data connection reply. Once the response is received, call :meth:`download`. Coroutine. ''' if self._session_state != SessionState.ready: raise RuntimeError('Session not ready') response = Response() yield from self._prepare_fetch(request, response) response.file_transfer_size = yield from self._fetch_size(request) if request.restart_value: try: yield from self._commander.restart(request.restart_value) response.restart_value = request.restart_value except FTPServerError: _logger.debug('Could not restart file.', exc_info=1) yield from self._open_data_stream() command = Command('RETR', request.file_path) yield from self._begin_stream(command) self._session_state = SessionState.file_request_sent return response
Start a file or directory listing download. Args: request: Request. Returns: A Response populated with the initial data connection reply. Once the response is received, call :meth:`download`. Coroutine.
Below is the the instruction that describes the task: ### Input: Start a file or directory listing download. Args: request: Request. Returns: A Response populated with the initial data connection reply. Once the response is received, call :meth:`download`. Coroutine. ### Response: def start(self, request: Request) -> Response: '''Start a file or directory listing download. Args: request: Request. Returns: A Response populated with the initial data connection reply. Once the response is received, call :meth:`download`. Coroutine. ''' if self._session_state != SessionState.ready: raise RuntimeError('Session not ready') response = Response() yield from self._prepare_fetch(request, response) response.file_transfer_size = yield from self._fetch_size(request) if request.restart_value: try: yield from self._commander.restart(request.restart_value) response.restart_value = request.restart_value except FTPServerError: _logger.debug('Could not restart file.', exc_info=1) yield from self._open_data_stream() command = Command('RETR', request.file_path) yield from self._begin_stream(command) self._session_state = SessionState.file_request_sent return response
def _parse_status(self, status): """Parses the house's state description and applies the corresponding values Parameters ---------- status: :class:`str` Plain text string containing the current renting state of the house. """ m = rented_regex.search(status) if m: self.status = HouseStatus.RENTED self.owner = m.group("owner") self.owner_sex = Sex.MALE if m.group("pronoun") == "He" else Sex.FEMALE self.paid_until = parse_tibia_datetime(m.group("paid_until")) else: self.status = HouseStatus.AUCTIONED m = transfer_regex.search(status) if m: self.transfer_date = parse_tibia_datetime(m.group("transfer_date")) self.transfer_accepted = m.group("verb") == "will" self.transferee = m.group("transferee") price = m.group("transfer_price") self.transfer_price = int(price) if price is not None else 0 m = auction_regex.search(status) if m: self.auction_end = parse_tibia_datetime(m.group("auction_end")) m = bid_regex.search(status) if m: self.highest_bid = int(m.group("highest_bid")) self.highest_bidder = m.group("bidder")
Parses the house's state description and applies the corresponding values Parameters ---------- status: :class:`str` Plain text string containing the current renting state of the house.
Below is the the instruction that describes the task: ### Input: Parses the house's state description and applies the corresponding values Parameters ---------- status: :class:`str` Plain text string containing the current renting state of the house. ### Response: def _parse_status(self, status): """Parses the house's state description and applies the corresponding values Parameters ---------- status: :class:`str` Plain text string containing the current renting state of the house. """ m = rented_regex.search(status) if m: self.status = HouseStatus.RENTED self.owner = m.group("owner") self.owner_sex = Sex.MALE if m.group("pronoun") == "He" else Sex.FEMALE self.paid_until = parse_tibia_datetime(m.group("paid_until")) else: self.status = HouseStatus.AUCTIONED m = transfer_regex.search(status) if m: self.transfer_date = parse_tibia_datetime(m.group("transfer_date")) self.transfer_accepted = m.group("verb") == "will" self.transferee = m.group("transferee") price = m.group("transfer_price") self.transfer_price = int(price) if price is not None else 0 m = auction_regex.search(status) if m: self.auction_end = parse_tibia_datetime(m.group("auction_end")) m = bid_regex.search(status) if m: self.highest_bid = int(m.group("highest_bid")) self.highest_bidder = m.group("bidder")
def getmembers_runtime(self): """Gets members (vars) from all scopes using ONLY runtime information. You most likely want to use ScopeStack.getmembers instead. Returns: Set of available vars. Raises: NotImplementedError if any scope fails to implement 'getmembers'. """ names = set() for scope in self.scopes: names.update(structured.getmembers_runtime(scope)) return names
Gets members (vars) from all scopes using ONLY runtime information. You most likely want to use ScopeStack.getmembers instead. Returns: Set of available vars. Raises: NotImplementedError if any scope fails to implement 'getmembers'.
Below is the the instruction that describes the task: ### Input: Gets members (vars) from all scopes using ONLY runtime information. You most likely want to use ScopeStack.getmembers instead. Returns: Set of available vars. Raises: NotImplementedError if any scope fails to implement 'getmembers'. ### Response: def getmembers_runtime(self): """Gets members (vars) from all scopes using ONLY runtime information. You most likely want to use ScopeStack.getmembers instead. Returns: Set of available vars. Raises: NotImplementedError if any scope fails to implement 'getmembers'. """ names = set() for scope in self.scopes: names.update(structured.getmembers_runtime(scope)) return names
def findAttr(self, svgNode, name): """Search an attribute with some name in some node or above. First the node is searched, then its style attribute, then the search continues in the node's parent node. If no such attribute is found, '' is returned. """ # This needs also to lookup values like "url(#SomeName)"... if self.css_rules is not None and not svgNode.attrib.get('__rules_applied', False): if isinstance(svgNode, NodeTracker): svgNode.apply_rules(self.css_rules) else: ElementWrapper(svgNode).apply_rules(self.css_rules) attr_value = svgNode.attrib.get(name, '').strip() if attr_value and attr_value != "inherit": return attr_value elif svgNode.attrib.get("style"): dict = self.parseMultiAttributes(svgNode.attrib.get("style")) if name in dict: return dict[name] if svgNode.getparent() is not None: return self.findAttr(svgNode.getparent(), name) return ''
Search an attribute with some name in some node or above. First the node is searched, then its style attribute, then the search continues in the node's parent node. If no such attribute is found, '' is returned.
Below is the the instruction that describes the task: ### Input: Search an attribute with some name in some node or above. First the node is searched, then its style attribute, then the search continues in the node's parent node. If no such attribute is found, '' is returned. ### Response: def findAttr(self, svgNode, name): """Search an attribute with some name in some node or above. First the node is searched, then its style attribute, then the search continues in the node's parent node. If no such attribute is found, '' is returned. """ # This needs also to lookup values like "url(#SomeName)"... if self.css_rules is not None and not svgNode.attrib.get('__rules_applied', False): if isinstance(svgNode, NodeTracker): svgNode.apply_rules(self.css_rules) else: ElementWrapper(svgNode).apply_rules(self.css_rules) attr_value = svgNode.attrib.get(name, '').strip() if attr_value and attr_value != "inherit": return attr_value elif svgNode.attrib.get("style"): dict = self.parseMultiAttributes(svgNode.attrib.get("style")) if name in dict: return dict[name] if svgNode.getparent() is not None: return self.findAttr(svgNode.getparent(), name) return ''
def _consolidate_elemental_array_(elemental_array): """ Accounts for non-empirical chemical formulas by taking in the compositional array generated by _create_compositional_array_() and returning a consolidated array of dictionaries with no repeating elements :param elemental_array: an elemental array generated from _create_compositional_array_() :return: an array of element dictionaries """ condensed_array = [] for e in elemental_array: exists = False for k in condensed_array: if k["symbol"] == e["symbol"]: exists = True k["occurances"] += e["occurances"] break if not exists: condensed_array.append(e) return condensed_array
Accounts for non-empirical chemical formulas by taking in the compositional array generated by _create_compositional_array_() and returning a consolidated array of dictionaries with no repeating elements :param elemental_array: an elemental array generated from _create_compositional_array_() :return: an array of element dictionaries
Below is the the instruction that describes the task: ### Input: Accounts for non-empirical chemical formulas by taking in the compositional array generated by _create_compositional_array_() and returning a consolidated array of dictionaries with no repeating elements :param elemental_array: an elemental array generated from _create_compositional_array_() :return: an array of element dictionaries ### Response: def _consolidate_elemental_array_(elemental_array): """ Accounts for non-empirical chemical formulas by taking in the compositional array generated by _create_compositional_array_() and returning a consolidated array of dictionaries with no repeating elements :param elemental_array: an elemental array generated from _create_compositional_array_() :return: an array of element dictionaries """ condensed_array = [] for e in elemental_array: exists = False for k in condensed_array: if k["symbol"] == e["symbol"]: exists = True k["occurances"] += e["occurances"] break if not exists: condensed_array.append(e) return condensed_array
def parse_annotation(obj: dict) -> BioCAnnotation: """Deserialize a dict obj to a BioCAnnotation object""" ann = BioCAnnotation() ann.id = obj['id'] ann.infons = obj['infons'] ann.text = obj['text'] for loc in obj['locations']: ann.add_location(BioCLocation(loc['offset'], loc['length'])) return ann
Deserialize a dict obj to a BioCAnnotation object
Below is the the instruction that describes the task: ### Input: Deserialize a dict obj to a BioCAnnotation object ### Response: def parse_annotation(obj: dict) -> BioCAnnotation: """Deserialize a dict obj to a BioCAnnotation object""" ann = BioCAnnotation() ann.id = obj['id'] ann.infons = obj['infons'] ann.text = obj['text'] for loc in obj['locations']: ann.add_location(BioCLocation(loc['offset'], loc['length'])) return ann
def update_experiments(self): """Experiment mapping.""" # 693 Remove if 'not applicable' for field in record_get_field_instances(self.record, '693'): subs = field_get_subfields(field) acc_experiment = subs.get("e", []) if not acc_experiment: acc_experiment = subs.get("a", []) if not acc_experiment: continue experiment = acc_experiment[-1] # Handle special case of leading experiments numbers NA-050 -> NA 50 e_suffix = "" if "-NA-" in experiment or \ "-RD-" in experiment or \ "-WA-" in experiment: splitted_experiment = experiment.split("-") e_suffix = "-".join(splitted_experiment[2:]) if e_suffix.startswith("0"): e_suffix = e_suffix[1:] experiment = "-".join(splitted_experiment[:2]) # only CERN-NA translated_experiment = self.get_config_item(experiment, "experiments") if not translated_experiment: continue new_subs = [] if "---" in translated_experiment: experiment_a, experiment_e = translated_experiment.split("---") new_subs.append(("a", experiment_a.replace("-", " "))) else: experiment_e = translated_experiment new_subs.append(("e", experiment_e.replace("-", " ") + e_suffix)) record_delete_field(self.record, tag="693", field_position_global=field[4]) record_add_field(self.record, "693", subfields=new_subs)
Experiment mapping.
Below is the the instruction that describes the task: ### Input: Experiment mapping. ### Response: def update_experiments(self): """Experiment mapping.""" # 693 Remove if 'not applicable' for field in record_get_field_instances(self.record, '693'): subs = field_get_subfields(field) acc_experiment = subs.get("e", []) if not acc_experiment: acc_experiment = subs.get("a", []) if not acc_experiment: continue experiment = acc_experiment[-1] # Handle special case of leading experiments numbers NA-050 -> NA 50 e_suffix = "" if "-NA-" in experiment or \ "-RD-" in experiment or \ "-WA-" in experiment: splitted_experiment = experiment.split("-") e_suffix = "-".join(splitted_experiment[2:]) if e_suffix.startswith("0"): e_suffix = e_suffix[1:] experiment = "-".join(splitted_experiment[:2]) # only CERN-NA translated_experiment = self.get_config_item(experiment, "experiments") if not translated_experiment: continue new_subs = [] if "---" in translated_experiment: experiment_a, experiment_e = translated_experiment.split("---") new_subs.append(("a", experiment_a.replace("-", " "))) else: experiment_e = translated_experiment new_subs.append(("e", experiment_e.replace("-", " ") + e_suffix)) record_delete_field(self.record, tag="693", field_position_global=field[4]) record_add_field(self.record, "693", subfields=new_subs)
def lines(self): """ :return: list containing lines (unicode) from Dockerfile """ if self.cache_content and self.cached_content: return self.cached_content.splitlines(True) try: with self._open_dockerfile('rb') as dockerfile: lines = [b2u(l) for l in dockerfile.readlines()] if self.cache_content: self.cached_content = ''.join(lines) return lines except (IOError, OSError) as ex: logger.error("Couldn't retrieve lines from dockerfile: %r", ex) raise
:return: list containing lines (unicode) from Dockerfile
Below is the the instruction that describes the task: ### Input: :return: list containing lines (unicode) from Dockerfile ### Response: def lines(self): """ :return: list containing lines (unicode) from Dockerfile """ if self.cache_content and self.cached_content: return self.cached_content.splitlines(True) try: with self._open_dockerfile('rb') as dockerfile: lines = [b2u(l) for l in dockerfile.readlines()] if self.cache_content: self.cached_content = ''.join(lines) return lines except (IOError, OSError) as ex: logger.error("Couldn't retrieve lines from dockerfile: %r", ex) raise
def set_chat_description(self, *args, **kwargs): """See :func:`set_chat_description`""" return set_chat_description(*args, **self._merge_overrides(**kwargs)).run()
See :func:`set_chat_description`
Below is the the instruction that describes the task: ### Input: See :func:`set_chat_description` ### Response: def set_chat_description(self, *args, **kwargs): """See :func:`set_chat_description`""" return set_chat_description(*args, **self._merge_overrides(**kwargs)).run()
def dispatch(self, request, environ): """ Checks which Grant supports the current request and dispatches to it. :param request: The incoming request. :type request: :class:`oauth2.web.Request` :param environ: Dict containing variables of the environment. :type environ: dict :return: An instance of ``oauth2.web.Response``. """ try: grant_type = self._determine_grant_type(request) response = self.response_class() grant_type.read_validate_params(request) return grant_type.process(request, response, environ) except OAuthInvalidNoRedirectError: response = self.response_class() response.add_header("Content-Type", "application/json") response.status_code = 400 response.body = json.dumps({ "error": "invalid_redirect_uri", "error_description": "Invalid redirect URI" }) return response except OAuthInvalidError as err: response = self.response_class() return grant_type.handle_error(error=err, response=response) except UnsupportedGrantError: response = self.response_class() response.add_header("Content-Type", "application/json") response.status_code = 400 response.body = json.dumps({ "error": "unsupported_response_type", "error_description": "Grant not supported" }) return response except: app_log.error("Uncaught Exception", exc_info=True) response = self.response_class() return grant_type.handle_error( error=OAuthInvalidError(error="server_error", explanation="Internal server error"), response=response)
Checks which Grant supports the current request and dispatches to it. :param request: The incoming request. :type request: :class:`oauth2.web.Request` :param environ: Dict containing variables of the environment. :type environ: dict :return: An instance of ``oauth2.web.Response``.
Below is the the instruction that describes the task: ### Input: Checks which Grant supports the current request and dispatches to it. :param request: The incoming request. :type request: :class:`oauth2.web.Request` :param environ: Dict containing variables of the environment. :type environ: dict :return: An instance of ``oauth2.web.Response``. ### Response: def dispatch(self, request, environ): """ Checks which Grant supports the current request and dispatches to it. :param request: The incoming request. :type request: :class:`oauth2.web.Request` :param environ: Dict containing variables of the environment. :type environ: dict :return: An instance of ``oauth2.web.Response``. """ try: grant_type = self._determine_grant_type(request) response = self.response_class() grant_type.read_validate_params(request) return grant_type.process(request, response, environ) except OAuthInvalidNoRedirectError: response = self.response_class() response.add_header("Content-Type", "application/json") response.status_code = 400 response.body = json.dumps({ "error": "invalid_redirect_uri", "error_description": "Invalid redirect URI" }) return response except OAuthInvalidError as err: response = self.response_class() return grant_type.handle_error(error=err, response=response) except UnsupportedGrantError: response = self.response_class() response.add_header("Content-Type", "application/json") response.status_code = 400 response.body = json.dumps({ "error": "unsupported_response_type", "error_description": "Grant not supported" }) return response except: app_log.error("Uncaught Exception", exc_info=True) response = self.response_class() return grant_type.handle_error( error=OAuthInvalidError(error="server_error", explanation="Internal server error"), response=response)
def get_position(self): ''' Read chuck position (x, y, z)''' reply = self._intf.query('ReadChuckPosition Y H')[2:] return [float(i) for i in reply.split()]
Read chuck position (x, y, z)
Below is the the instruction that describes the task: ### Input: Read chuck position (x, y, z) ### Response: def get_position(self): ''' Read chuck position (x, y, z)''' reply = self._intf.query('ReadChuckPosition Y H')[2:] return [float(i) for i in reply.split()]
def get_connections(app): """ Return all Heroku Connect connections setup with the given application. For more details check the link - https://devcenter.heroku.com/articles/heroku-connect-api#step-4-retrieve-the-new-connection-s-id Sample response from the API call is below:: { "count": 1, "results":[{ "id": "<connection_id>", "name": "<app_name>", "resource_name": "<resource_name>", … }], … } Args: app (str): Heroku application name. Returns: List[dict]: List of all Heroku Connect connections associated with the Heroku application. Raises: requests.HTTPError: If an error occurred when accessing the connections API. ValueError: If response is not a valid JSON. """ payload = {'app': app} url = os.path.join(settings.HEROKU_CONNECT_API_ENDPOINT, 'connections') response = requests.get(url, params=payload, headers=_get_authorization_headers()) response.raise_for_status() return response.json()['results']
Return all Heroku Connect connections setup with the given application. For more details check the link - https://devcenter.heroku.com/articles/heroku-connect-api#step-4-retrieve-the-new-connection-s-id Sample response from the API call is below:: { "count": 1, "results":[{ "id": "<connection_id>", "name": "<app_name>", "resource_name": "<resource_name>", … }], … } Args: app (str): Heroku application name. Returns: List[dict]: List of all Heroku Connect connections associated with the Heroku application. Raises: requests.HTTPError: If an error occurred when accessing the connections API. ValueError: If response is not a valid JSON.
Below is the the instruction that describes the task: ### Input: Return all Heroku Connect connections setup with the given application. For more details check the link - https://devcenter.heroku.com/articles/heroku-connect-api#step-4-retrieve-the-new-connection-s-id Sample response from the API call is below:: { "count": 1, "results":[{ "id": "<connection_id>", "name": "<app_name>", "resource_name": "<resource_name>", … }], … } Args: app (str): Heroku application name. Returns: List[dict]: List of all Heroku Connect connections associated with the Heroku application. Raises: requests.HTTPError: If an error occurred when accessing the connections API. ValueError: If response is not a valid JSON. ### Response: def get_connections(app): """ Return all Heroku Connect connections setup with the given application. For more details check the link - https://devcenter.heroku.com/articles/heroku-connect-api#step-4-retrieve-the-new-connection-s-id Sample response from the API call is below:: { "count": 1, "results":[{ "id": "<connection_id>", "name": "<app_name>", "resource_name": "<resource_name>", … }], … } Args: app (str): Heroku application name. Returns: List[dict]: List of all Heroku Connect connections associated with the Heroku application. Raises: requests.HTTPError: If an error occurred when accessing the connections API. ValueError: If response is not a valid JSON. """ payload = {'app': app} url = os.path.join(settings.HEROKU_CONNECT_API_ENDPOINT, 'connections') response = requests.get(url, params=payload, headers=_get_authorization_headers()) response.raise_for_status() return response.json()['results']
def _on_new_data_received(self, data: bytes): """ Gets called whenever we get a whole new XML element from kik's servers. :param data: The data received (bytes) """ if data == b' ': # Happens every half hour. Disconnect after 10th time. Some kind of keep-alive? Let's send it back. self.loop.call_soon_threadsafe(self.connection.send_raw_data, b' ') return xml_element = BeautifulSoup(data.decode(), features='xml') xml_element = next(iter(xml_element)) if len(xml_element) > 0 else xml_element # choose the handler based on the XML tag name if xml_element.name == "k": self._handle_received_k_element(xml_element) if xml_element.name == "iq": self._handle_received_iq_element(xml_element) elif xml_element.name == "message": self._handle_xmpp_message(xml_element) elif xml_element.name == 'stc': self.callback.on_captcha_received(login.CaptchaElement(xml_element))
Gets called whenever we get a whole new XML element from kik's servers. :param data: The data received (bytes)
Below is the the instruction that describes the task: ### Input: Gets called whenever we get a whole new XML element from kik's servers. :param data: The data received (bytes) ### Response: def _on_new_data_received(self, data: bytes): """ Gets called whenever we get a whole new XML element from kik's servers. :param data: The data received (bytes) """ if data == b' ': # Happens every half hour. Disconnect after 10th time. Some kind of keep-alive? Let's send it back. self.loop.call_soon_threadsafe(self.connection.send_raw_data, b' ') return xml_element = BeautifulSoup(data.decode(), features='xml') xml_element = next(iter(xml_element)) if len(xml_element) > 0 else xml_element # choose the handler based on the XML tag name if xml_element.name == "k": self._handle_received_k_element(xml_element) if xml_element.name == "iq": self._handle_received_iq_element(xml_element) elif xml_element.name == "message": self._handle_xmpp_message(xml_element) elif xml_element.name == 'stc': self.callback.on_captcha_received(login.CaptchaElement(xml_element))
def dragTo(x=None, y=None, duration=0.0, tween=linear, button='left', pause=None, _pause=True, mouseDownUp=True): """Performs a mouse drag (mouse movement while a button is held down) to a point on the screen. The x and y parameters detail where the mouse event happens. If None, the current mouse position is used. If a float value, it is rounded down. If outside the boundaries of the screen, the event happens at edge of the screen. Args: x (int, float, None, tuple, optional): How far left (for negative values) or right (for positive values) to move the cursor. 0 by default. If tuple, this is used for x and y. If x is a str, it's considered a filename of an image to find on the screen with locateOnScreen() and click the center of. y (int, float, None, optional): How far up (for negative values) or down (for positive values) to move the cursor. 0 by default. duration (float, optional): The amount of time it takes to move the mouse cursor to the new xy coordinates. If 0, then the mouse cursor is moved instantaneously. 0.0 by default. tween (func, optional): The tweening function used if the duration is not 0. A linear tween is used by default. See the tweens.py file for details. button (str, int, optional): The mouse button clicked. Must be one of 'left', 'middle', 'right' (or 1, 2, or 3) respectively. 'left' by default. mouseDownUp (True, False): When true, the mouseUp/Down actions are not perfomed. Which allows dragging over multiple (small) actions. 'True' by default. Returns: None """ _failSafeCheck() x, y = _unpackXY(x, y) if mouseDownUp: mouseDown(button=button, _pause=False) _mouseMoveDrag('drag', x, y, 0, 0, duration, tween, button) if mouseDownUp: mouseUp(button=button, _pause=False) _autoPause(pause, _pause)
Performs a mouse drag (mouse movement while a button is held down) to a point on the screen. The x and y parameters detail where the mouse event happens. If None, the current mouse position is used. If a float value, it is rounded down. If outside the boundaries of the screen, the event happens at edge of the screen. Args: x (int, float, None, tuple, optional): How far left (for negative values) or right (for positive values) to move the cursor. 0 by default. If tuple, this is used for x and y. If x is a str, it's considered a filename of an image to find on the screen with locateOnScreen() and click the center of. y (int, float, None, optional): How far up (for negative values) or down (for positive values) to move the cursor. 0 by default. duration (float, optional): The amount of time it takes to move the mouse cursor to the new xy coordinates. If 0, then the mouse cursor is moved instantaneously. 0.0 by default. tween (func, optional): The tweening function used if the duration is not 0. A linear tween is used by default. See the tweens.py file for details. button (str, int, optional): The mouse button clicked. Must be one of 'left', 'middle', 'right' (or 1, 2, or 3) respectively. 'left' by default. mouseDownUp (True, False): When true, the mouseUp/Down actions are not perfomed. Which allows dragging over multiple (small) actions. 'True' by default. Returns: None
Below is the the instruction that describes the task: ### Input: Performs a mouse drag (mouse movement while a button is held down) to a point on the screen. The x and y parameters detail where the mouse event happens. If None, the current mouse position is used. If a float value, it is rounded down. If outside the boundaries of the screen, the event happens at edge of the screen. Args: x (int, float, None, tuple, optional): How far left (for negative values) or right (for positive values) to move the cursor. 0 by default. If tuple, this is used for x and y. If x is a str, it's considered a filename of an image to find on the screen with locateOnScreen() and click the center of. y (int, float, None, optional): How far up (for negative values) or down (for positive values) to move the cursor. 0 by default. duration (float, optional): The amount of time it takes to move the mouse cursor to the new xy coordinates. If 0, then the mouse cursor is moved instantaneously. 0.0 by default. tween (func, optional): The tweening function used if the duration is not 0. A linear tween is used by default. See the tweens.py file for details. button (str, int, optional): The mouse button clicked. Must be one of 'left', 'middle', 'right' (or 1, 2, or 3) respectively. 'left' by default. mouseDownUp (True, False): When true, the mouseUp/Down actions are not perfomed. Which allows dragging over multiple (small) actions. 'True' by default. Returns: None ### Response: def dragTo(x=None, y=None, duration=0.0, tween=linear, button='left', pause=None, _pause=True, mouseDownUp=True): """Performs a mouse drag (mouse movement while a button is held down) to a point on the screen. The x and y parameters detail where the mouse event happens. If None, the current mouse position is used. If a float value, it is rounded down. If outside the boundaries of the screen, the event happens at edge of the screen. Args: x (int, float, None, tuple, optional): How far left (for negative values) or right (for positive values) to move the cursor. 0 by default. If tuple, this is used for x and y. If x is a str, it's considered a filename of an image to find on the screen with locateOnScreen() and click the center of. y (int, float, None, optional): How far up (for negative values) or down (for positive values) to move the cursor. 0 by default. duration (float, optional): The amount of time it takes to move the mouse cursor to the new xy coordinates. If 0, then the mouse cursor is moved instantaneously. 0.0 by default. tween (func, optional): The tweening function used if the duration is not 0. A linear tween is used by default. See the tweens.py file for details. button (str, int, optional): The mouse button clicked. Must be one of 'left', 'middle', 'right' (or 1, 2, or 3) respectively. 'left' by default. mouseDownUp (True, False): When true, the mouseUp/Down actions are not perfomed. Which allows dragging over multiple (small) actions. 'True' by default. Returns: None """ _failSafeCheck() x, y = _unpackXY(x, y) if mouseDownUp: mouseDown(button=button, _pause=False) _mouseMoveDrag('drag', x, y, 0, 0, duration, tween, button) if mouseDownUp: mouseUp(button=button, _pause=False) _autoPause(pause, _pause)
def CopyToDateTimeString(self): """Copies the FILETIME timestamp to a date and time string. Returns: str: date and time value formatted as: "YYYY-MM-DD hh:mm:ss.#######" or None if the timestamp is missing or invalid. """ if (self._timestamp is None or self._timestamp < 0 or self._timestamp > self._UINT64_MAX): return None timestamp, remainder = divmod(self._timestamp, self._100NS_PER_SECOND) number_of_days, hours, minutes, seconds = self._GetTimeValues(timestamp) year, month, day_of_month = self._GetDateValuesWithEpoch( number_of_days, self._EPOCH) return '{0:04d}-{1:02d}-{2:02d} {3:02d}:{4:02d}:{5:02d}.{6:07d}'.format( year, month, day_of_month, hours, minutes, seconds, remainder)
Copies the FILETIME timestamp to a date and time string. Returns: str: date and time value formatted as: "YYYY-MM-DD hh:mm:ss.#######" or None if the timestamp is missing or invalid.
Below is the the instruction that describes the task: ### Input: Copies the FILETIME timestamp to a date and time string. Returns: str: date and time value formatted as: "YYYY-MM-DD hh:mm:ss.#######" or None if the timestamp is missing or invalid. ### Response: def CopyToDateTimeString(self): """Copies the FILETIME timestamp to a date and time string. Returns: str: date and time value formatted as: "YYYY-MM-DD hh:mm:ss.#######" or None if the timestamp is missing or invalid. """ if (self._timestamp is None or self._timestamp < 0 or self._timestamp > self._UINT64_MAX): return None timestamp, remainder = divmod(self._timestamp, self._100NS_PER_SECOND) number_of_days, hours, minutes, seconds = self._GetTimeValues(timestamp) year, month, day_of_month = self._GetDateValuesWithEpoch( number_of_days, self._EPOCH) return '{0:04d}-{1:02d}-{2:02d} {3:02d}:{4:02d}:{5:02d}.{6:07d}'.format( year, month, day_of_month, hours, minutes, seconds, remainder)
def add_service(self, service): """Add a service description. Handles transition from self.service=None, self.service=dict for a single service, and then self.service=[dict,dict,...] for multiple """ if (self.service is None): self.service = service elif (isinstance(self.service, dict)): self.service = [self.service, service] else: self.service.append(service)
Add a service description. Handles transition from self.service=None, self.service=dict for a single service, and then self.service=[dict,dict,...] for multiple
Below is the the instruction that describes the task: ### Input: Add a service description. Handles transition from self.service=None, self.service=dict for a single service, and then self.service=[dict,dict,...] for multiple ### Response: def add_service(self, service): """Add a service description. Handles transition from self.service=None, self.service=dict for a single service, and then self.service=[dict,dict,...] for multiple """ if (self.service is None): self.service = service elif (isinstance(self.service, dict)): self.service = [self.service, service] else: self.service.append(service)