code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
text
stringlengths
164
112k
def _parse_status(self, output): ''' Unit testing is so much easier when Vagrant is removed from the equation. ''' parsed = self._parse_machine_readable_output(output) statuses = [] # group tuples by target name # assuming tuples are sorted by target name, this should group all # the tuples with info for each target. for target, tuples in itertools.groupby(parsed, lambda tup: tup[1]): # transform tuples into a dict mapping "type" to "data" info = {kind: data for timestamp, _, kind, data in tuples} status = Status(name=target, state=info.get('state'), provider=info.get('provider-name')) statuses.append(status) return statuses
Unit testing is so much easier when Vagrant is removed from the equation.
Below is the the instruction that describes the task: ### Input: Unit testing is so much easier when Vagrant is removed from the equation. ### Response: def _parse_status(self, output): ''' Unit testing is so much easier when Vagrant is removed from the equation. ''' parsed = self._parse_machine_readable_output(output) statuses = [] # group tuples by target name # assuming tuples are sorted by target name, this should group all # the tuples with info for each target. for target, tuples in itertools.groupby(parsed, lambda tup: tup[1]): # transform tuples into a dict mapping "type" to "data" info = {kind: data for timestamp, _, kind, data in tuples} status = Status(name=target, state=info.get('state'), provider=info.get('provider-name')) statuses.append(status) return statuses
def ref2names2commdct(ref2names, commdct): """embed ref2names into commdct""" for comm in commdct: for cdct in comm: try: refs = cdct['object-list'][0] validobjects = ref2names[refs] cdct.update({'validobjects':validobjects}) except KeyError as e: continue return commdct
embed ref2names into commdct
Below is the the instruction that describes the task: ### Input: embed ref2names into commdct ### Response: def ref2names2commdct(ref2names, commdct): """embed ref2names into commdct""" for comm in commdct: for cdct in comm: try: refs = cdct['object-list'][0] validobjects = ref2names[refs] cdct.update({'validobjects':validobjects}) except KeyError as e: continue return commdct
def _flush(self, close=False): """Flushes remaining output records in the output queues to plasma. None is used as special type of record that is propagated from sources to sink to notify that the end of data in a stream. Attributes: close (bool): A flag denoting whether the channel should be also marked as 'closed' (True) or not (False) after flushing. """ for channel in self.forward_channels: if close is True: channel.queue.put_next(None) channel.queue._flush_writes() for channels in self.shuffle_channels: for channel in channels: if close is True: channel.queue.put_next(None) channel.queue._flush_writes() for channels in self.shuffle_key_channels: for channel in channels: if close is True: channel.queue.put_next(None) channel.queue._flush_writes() for channels in self.round_robin_channels: for channel in channels: if close is True: channel.queue.put_next(None) channel.queue._flush_writes()
Flushes remaining output records in the output queues to plasma. None is used as special type of record that is propagated from sources to sink to notify that the end of data in a stream. Attributes: close (bool): A flag denoting whether the channel should be also marked as 'closed' (True) or not (False) after flushing.
Below is the the instruction that describes the task: ### Input: Flushes remaining output records in the output queues to plasma. None is used as special type of record that is propagated from sources to sink to notify that the end of data in a stream. Attributes: close (bool): A flag denoting whether the channel should be also marked as 'closed' (True) or not (False) after flushing. ### Response: def _flush(self, close=False): """Flushes remaining output records in the output queues to plasma. None is used as special type of record that is propagated from sources to sink to notify that the end of data in a stream. Attributes: close (bool): A flag denoting whether the channel should be also marked as 'closed' (True) or not (False) after flushing. """ for channel in self.forward_channels: if close is True: channel.queue.put_next(None) channel.queue._flush_writes() for channels in self.shuffle_channels: for channel in channels: if close is True: channel.queue.put_next(None) channel.queue._flush_writes() for channels in self.shuffle_key_channels: for channel in channels: if close is True: channel.queue.put_next(None) channel.queue._flush_writes() for channels in self.round_robin_channels: for channel in channels: if close is True: channel.queue.put_next(None) channel.queue._flush_writes()
def _find_files(directory): """ Find XML files in the directory """ pattern = "{directory}/*.xml".format( directory=directory, ) files = glob(pattern) return files
Find XML files in the directory
Below is the the instruction that describes the task: ### Input: Find XML files in the directory ### Response: def _find_files(directory): """ Find XML files in the directory """ pattern = "{directory}/*.xml".format( directory=directory, ) files = glob(pattern) return files
def get_brightness(self): """ Return the average brightness of the image. """ # Only download the image if it has changed if not self.connection.has_changed(): return self.image_brightness image_path = self.connection.download_image() converted_image = Image.open(image_path).convert('L') statistics = ImageStat.Stat(converted_image) self.image_brightness = statistics.mean[0] return self.image_brightness
Return the average brightness of the image.
Below is the the instruction that describes the task: ### Input: Return the average brightness of the image. ### Response: def get_brightness(self): """ Return the average brightness of the image. """ # Only download the image if it has changed if not self.connection.has_changed(): return self.image_brightness image_path = self.connection.download_image() converted_image = Image.open(image_path).convert('L') statistics = ImageStat.Stat(converted_image) self.image_brightness = statistics.mean[0] return self.image_brightness
def get_page_square_dpi(pageinfo, options): "Get the DPI when we require xres == yres, scaled to physical units" xres = pageinfo.xres or 0 yres = pageinfo.yres or 0 userunit = pageinfo.userunit or 1 return float( max( (xres * userunit) or VECTOR_PAGE_DPI, (yres * userunit) or VECTOR_PAGE_DPI, VECTOR_PAGE_DPI if pageinfo.has_vector else 0, options.oversample or 0, ) )
Get the DPI when we require xres == yres, scaled to physical units
Below is the the instruction that describes the task: ### Input: Get the DPI when we require xres == yres, scaled to physical units ### Response: def get_page_square_dpi(pageinfo, options): "Get the DPI when we require xres == yres, scaled to physical units" xres = pageinfo.xres or 0 yres = pageinfo.yres or 0 userunit = pageinfo.userunit or 1 return float( max( (xres * userunit) or VECTOR_PAGE_DPI, (yres * userunit) or VECTOR_PAGE_DPI, VECTOR_PAGE_DPI if pageinfo.has_vector else 0, options.oversample or 0, ) )
def get_child_directories(path): """Return names of immediate child directories""" if not _is_valid_directory(path): raise exceptions.InvalidDirectory entries = os.listdir(path) directory_names = [] for entry in entries: abs_entry_path = os.path.join(path, entry) if _is_valid_directory(abs_entry_path): directory_names.append(entry) return directory_names
Return names of immediate child directories
Below is the the instruction that describes the task: ### Input: Return names of immediate child directories ### Response: def get_child_directories(path): """Return names of immediate child directories""" if not _is_valid_directory(path): raise exceptions.InvalidDirectory entries = os.listdir(path) directory_names = [] for entry in entries: abs_entry_path = os.path.join(path, entry) if _is_valid_directory(abs_entry_path): directory_names.append(entry) return directory_names
def _sort_lows_and_highs(func): "Decorator for extract_cycles" @functools.wraps(func) def wrapper(*args, **kwargs): for low, high, mult in func(*args, **kwargs): if low < high: yield low, high, mult else: yield high, low, mult return wrapper
Decorator for extract_cycles
Below is the the instruction that describes the task: ### Input: Decorator for extract_cycles ### Response: def _sort_lows_and_highs(func): "Decorator for extract_cycles" @functools.wraps(func) def wrapper(*args, **kwargs): for low, high, mult in func(*args, **kwargs): if low < high: yield low, high, mult else: yield high, low, mult return wrapper
def send_terrain_data_bit(self, bit): '''send some terrain data''' lat = self.current_request.lat * 1.0e-7 lon = self.current_request.lon * 1.0e-7 bit_spacing = self.current_request.grid_spacing * 4 (lat, lon) = mp_util.gps_offset(lat, lon, east=bit_spacing * (bit % 8), north=bit_spacing * (bit // 8)) data = [] for i in range(4*4): y = i % 4 x = i // 4 (lat2,lon2) = mp_util.gps_offset(lat, lon, east=self.current_request.grid_spacing * y, north=self.current_request.grid_spacing * x) alt = self.ElevationModel.GetElevation(lat2, lon2) if alt is None: if self.terrain_settings.debug: print("no alt ", lat2, lon2) return data.append(int(alt)) self.master.mav.terrain_data_send(self.current_request.lat, self.current_request.lon, self.current_request.grid_spacing, bit, data) self.blocks_sent += 1 self.last_send_time = time.time() self.sent_mask |= 1<<bit if self.terrain_settings.debug and bit == 55: lat = self.current_request.lat * 1.0e-7 lon = self.current_request.lon * 1.0e-7 print("--lat=%f --lon=%f %.1f" % ( lat, lon, self.ElevationModel.GetElevation(lat, lon))) (lat2,lon2) = mp_util.gps_offset(lat, lon, east=32*self.current_request.grid_spacing, north=28*self.current_request.grid_spacing) print("--lat=%f --lon=%f %.1f" % ( lat2, lon2, self.ElevationModel.GetElevation(lat2, lon2)))
send some terrain data
Below is the the instruction that describes the task: ### Input: send some terrain data ### Response: def send_terrain_data_bit(self, bit): '''send some terrain data''' lat = self.current_request.lat * 1.0e-7 lon = self.current_request.lon * 1.0e-7 bit_spacing = self.current_request.grid_spacing * 4 (lat, lon) = mp_util.gps_offset(lat, lon, east=bit_spacing * (bit % 8), north=bit_spacing * (bit // 8)) data = [] for i in range(4*4): y = i % 4 x = i // 4 (lat2,lon2) = mp_util.gps_offset(lat, lon, east=self.current_request.grid_spacing * y, north=self.current_request.grid_spacing * x) alt = self.ElevationModel.GetElevation(lat2, lon2) if alt is None: if self.terrain_settings.debug: print("no alt ", lat2, lon2) return data.append(int(alt)) self.master.mav.terrain_data_send(self.current_request.lat, self.current_request.lon, self.current_request.grid_spacing, bit, data) self.blocks_sent += 1 self.last_send_time = time.time() self.sent_mask |= 1<<bit if self.terrain_settings.debug and bit == 55: lat = self.current_request.lat * 1.0e-7 lon = self.current_request.lon * 1.0e-7 print("--lat=%f --lon=%f %.1f" % ( lat, lon, self.ElevationModel.GetElevation(lat, lon))) (lat2,lon2) = mp_util.gps_offset(lat, lon, east=32*self.current_request.grid_spacing, north=28*self.current_request.grid_spacing) print("--lat=%f --lon=%f %.1f" % ( lat2, lon2, self.ElevationModel.GetElevation(lat2, lon2)))
def addresses(self): """ Access the addresses :returns: twilio.rest.api.v2010.account.address.AddressList :rtype: twilio.rest.api.v2010.account.address.AddressList """ if self._addresses is None: self._addresses = AddressList(self._version, account_sid=self._solution['sid'], ) return self._addresses
Access the addresses :returns: twilio.rest.api.v2010.account.address.AddressList :rtype: twilio.rest.api.v2010.account.address.AddressList
Below is the the instruction that describes the task: ### Input: Access the addresses :returns: twilio.rest.api.v2010.account.address.AddressList :rtype: twilio.rest.api.v2010.account.address.AddressList ### Response: def addresses(self): """ Access the addresses :returns: twilio.rest.api.v2010.account.address.AddressList :rtype: twilio.rest.api.v2010.account.address.AddressList """ if self._addresses is None: self._addresses = AddressList(self._version, account_sid=self._solution['sid'], ) return self._addresses
def ddl_target_table(self): """Optional[TableReference]: Return the DDL target table, present for CREATE/DROP TABLE/VIEW queries. See: https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#statistics.query.ddlTargetTable """ prop = self._job_statistics().get("ddlTargetTable") if prop is not None: prop = TableReference.from_api_repr(prop) return prop
Optional[TableReference]: Return the DDL target table, present for CREATE/DROP TABLE/VIEW queries. See: https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#statistics.query.ddlTargetTable
Below is the the instruction that describes the task: ### Input: Optional[TableReference]: Return the DDL target table, present for CREATE/DROP TABLE/VIEW queries. See: https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#statistics.query.ddlTargetTable ### Response: def ddl_target_table(self): """Optional[TableReference]: Return the DDL target table, present for CREATE/DROP TABLE/VIEW queries. See: https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#statistics.query.ddlTargetTable """ prop = self._job_statistics().get("ddlTargetTable") if prop is not None: prop = TableReference.from_api_repr(prop) return prop
def prepare_question_encoder(inputs, hparams): """Prepare question encoder. Args: inputs: a Tensor. hparams: run hyperparameters Returns: encoder_input: a Tensor, bottom of encoder stack encoder_self_attention_bias: a bias tensor for use in encoder self-attention """ encoder_input = inputs # Usual case - not a packed dataset. encoder_padding = common_attention.embedding_to_padding(encoder_input) ignore_padding = common_attention.attention_bias_ignore_padding( encoder_padding) encoder_self_attention_bias = ignore_padding if hparams.pos == "timing": encoder_input = common_attention.add_timing_signal_1d(encoder_input) elif hparams.pos == "emb": encoder_input = common_attention.add_positional_embedding( encoder_input, hparams.max_length, "inputs_positional_embedding", None) return (encoder_input, encoder_self_attention_bias)
Prepare question encoder. Args: inputs: a Tensor. hparams: run hyperparameters Returns: encoder_input: a Tensor, bottom of encoder stack encoder_self_attention_bias: a bias tensor for use in encoder self-attention
Below is the the instruction that describes the task: ### Input: Prepare question encoder. Args: inputs: a Tensor. hparams: run hyperparameters Returns: encoder_input: a Tensor, bottom of encoder stack encoder_self_attention_bias: a bias tensor for use in encoder self-attention ### Response: def prepare_question_encoder(inputs, hparams): """Prepare question encoder. Args: inputs: a Tensor. hparams: run hyperparameters Returns: encoder_input: a Tensor, bottom of encoder stack encoder_self_attention_bias: a bias tensor for use in encoder self-attention """ encoder_input = inputs # Usual case - not a packed dataset. encoder_padding = common_attention.embedding_to_padding(encoder_input) ignore_padding = common_attention.attention_bias_ignore_padding( encoder_padding) encoder_self_attention_bias = ignore_padding if hparams.pos == "timing": encoder_input = common_attention.add_timing_signal_1d(encoder_input) elif hparams.pos == "emb": encoder_input = common_attention.add_positional_embedding( encoder_input, hparams.max_length, "inputs_positional_embedding", None) return (encoder_input, encoder_self_attention_bias)
def promote_owner(self, stream_id, user_id): ''' promote user to owner in stream ''' req_hook = 'pod/v1/room/' + stream_id + '/membership/promoteOwner' req_args = '{ "id": %s }' % user_id status_code, response = self.__rest__.POST_query(req_hook, req_args) self.logger.debug('%s: %s' % (status_code, response)) return status_code, response
promote user to owner in stream
Below is the the instruction that describes the task: ### Input: promote user to owner in stream ### Response: def promote_owner(self, stream_id, user_id): ''' promote user to owner in stream ''' req_hook = 'pod/v1/room/' + stream_id + '/membership/promoteOwner' req_args = '{ "id": %s }' % user_id status_code, response = self.__rest__.POST_query(req_hook, req_args) self.logger.debug('%s: %s' % (status_code, response)) return status_code, response
def list(cls, session, first_name=None, last_name=None, email=None, modified_since=None): """List the customers. Customers can be filtered on any combination of first name, last name, email, and modifiedSince. Args: session (requests.sessions.Session): Authenticated session. first_name (str, optional): First name of customer. last_name (str, optional): Last name of customer. email (str, optional): Email address of customer. modified_since (datetime.datetime, optional): If modified after this date. Returns: RequestPaginator(output_type=helpscout.models.Customer): Customers iterator. """ return super(Customers, cls).list( session, data=cls.__object__.get_non_empty_vals({ 'firstName': first_name, 'lastName': last_name, 'email': email, 'modifiedSince': modified_since, }) )
List the customers. Customers can be filtered on any combination of first name, last name, email, and modifiedSince. Args: session (requests.sessions.Session): Authenticated session. first_name (str, optional): First name of customer. last_name (str, optional): Last name of customer. email (str, optional): Email address of customer. modified_since (datetime.datetime, optional): If modified after this date. Returns: RequestPaginator(output_type=helpscout.models.Customer): Customers iterator.
Below is the the instruction that describes the task: ### Input: List the customers. Customers can be filtered on any combination of first name, last name, email, and modifiedSince. Args: session (requests.sessions.Session): Authenticated session. first_name (str, optional): First name of customer. last_name (str, optional): Last name of customer. email (str, optional): Email address of customer. modified_since (datetime.datetime, optional): If modified after this date. Returns: RequestPaginator(output_type=helpscout.models.Customer): Customers iterator. ### Response: def list(cls, session, first_name=None, last_name=None, email=None, modified_since=None): """List the customers. Customers can be filtered on any combination of first name, last name, email, and modifiedSince. Args: session (requests.sessions.Session): Authenticated session. first_name (str, optional): First name of customer. last_name (str, optional): Last name of customer. email (str, optional): Email address of customer. modified_since (datetime.datetime, optional): If modified after this date. Returns: RequestPaginator(output_type=helpscout.models.Customer): Customers iterator. """ return super(Customers, cls).list( session, data=cls.__object__.get_non_empty_vals({ 'firstName': first_name, 'lastName': last_name, 'email': email, 'modifiedSince': modified_since, }) )
def process_target(self): """Return target with transformations, if any""" if isinstance(self.target, str): # Replace single and double quotes with escaped single-quote self.target = self.target.replace("'", "\'").replace('"', "\'") return "\"{target}\"".format(target=self.target) return self.target
Return target with transformations, if any
Below is the the instruction that describes the task: ### Input: Return target with transformations, if any ### Response: def process_target(self): """Return target with transformations, if any""" if isinstance(self.target, str): # Replace single and double quotes with escaped single-quote self.target = self.target.replace("'", "\'").replace('"', "\'") return "\"{target}\"".format(target=self.target) return self.target
def set_ids(self, set_image_id, image_id, set_parent_id, parent_id): """Changes the UUID and parent UUID for a hard disk medium. in set_image_id of type bool Select whether a new image UUID is set or not. in image_id of type str New UUID for the image. If an empty string is passed, then a new UUID is automatically created, provided that @a setImageId is @c true. Specifying a zero UUID is not allowed. in set_parent_id of type bool Select whether a new parent UUID is set or not. in parent_id of type str New parent UUID for the image. If an empty string is passed, then a new UUID is automatically created, provided @a setParentId is @c true. A zero UUID is valid. raises :class:`OleErrorInvalidarg` Invalid parameter combination. raises :class:`VBoxErrorNotSupported` Medium is not a hard disk medium. """ if not isinstance(set_image_id, bool): raise TypeError("set_image_id can only be an instance of type bool") if not isinstance(image_id, basestring): raise TypeError("image_id can only be an instance of type basestring") if not isinstance(set_parent_id, bool): raise TypeError("set_parent_id can only be an instance of type bool") if not isinstance(parent_id, basestring): raise TypeError("parent_id can only be an instance of type basestring") self._call("setIds", in_p=[set_image_id, image_id, set_parent_id, parent_id])
Changes the UUID and parent UUID for a hard disk medium. in set_image_id of type bool Select whether a new image UUID is set or not. in image_id of type str New UUID for the image. If an empty string is passed, then a new UUID is automatically created, provided that @a setImageId is @c true. Specifying a zero UUID is not allowed. in set_parent_id of type bool Select whether a new parent UUID is set or not. in parent_id of type str New parent UUID for the image. If an empty string is passed, then a new UUID is automatically created, provided @a setParentId is @c true. A zero UUID is valid. raises :class:`OleErrorInvalidarg` Invalid parameter combination. raises :class:`VBoxErrorNotSupported` Medium is not a hard disk medium.
Below is the the instruction that describes the task: ### Input: Changes the UUID and parent UUID for a hard disk medium. in set_image_id of type bool Select whether a new image UUID is set or not. in image_id of type str New UUID for the image. If an empty string is passed, then a new UUID is automatically created, provided that @a setImageId is @c true. Specifying a zero UUID is not allowed. in set_parent_id of type bool Select whether a new parent UUID is set or not. in parent_id of type str New parent UUID for the image. If an empty string is passed, then a new UUID is automatically created, provided @a setParentId is @c true. A zero UUID is valid. raises :class:`OleErrorInvalidarg` Invalid parameter combination. raises :class:`VBoxErrorNotSupported` Medium is not a hard disk medium. ### Response: def set_ids(self, set_image_id, image_id, set_parent_id, parent_id): """Changes the UUID and parent UUID for a hard disk medium. in set_image_id of type bool Select whether a new image UUID is set or not. in image_id of type str New UUID for the image. If an empty string is passed, then a new UUID is automatically created, provided that @a setImageId is @c true. Specifying a zero UUID is not allowed. in set_parent_id of type bool Select whether a new parent UUID is set or not. in parent_id of type str New parent UUID for the image. If an empty string is passed, then a new UUID is automatically created, provided @a setParentId is @c true. A zero UUID is valid. raises :class:`OleErrorInvalidarg` Invalid parameter combination. raises :class:`VBoxErrorNotSupported` Medium is not a hard disk medium. """ if not isinstance(set_image_id, bool): raise TypeError("set_image_id can only be an instance of type bool") if not isinstance(image_id, basestring): raise TypeError("image_id can only be an instance of type basestring") if not isinstance(set_parent_id, bool): raise TypeError("set_parent_id can only be an instance of type bool") if not isinstance(parent_id, basestring): raise TypeError("parent_id can only be an instance of type basestring") self._call("setIds", in_p=[set_image_id, image_id, set_parent_id, parent_id])
def randint(nbits: int) -> int: """Generate an int with nbits random bits. Raises ValueError if nbits <= 0, and TypeError if it's not an integer. >>> randint(16) #doctest:+SKIP 1871 """ if not isinstance(nbits, int): raise TypeError('number of bits should be an integer') if nbits <= 0: raise ValueError('number of bits must be greater than zero') # https://github.com/python/cpython/blob/3.6/Lib/random.py#L676 nbytes = (nbits + 7) // 8 # bits / 8 and rounded up num = int.from_bytes(randbytes(nbytes), 'big') return num >> (nbytes * 8 - nbits)
Generate an int with nbits random bits. Raises ValueError if nbits <= 0, and TypeError if it's not an integer. >>> randint(16) #doctest:+SKIP 1871
Below is the the instruction that describes the task: ### Input: Generate an int with nbits random bits. Raises ValueError if nbits <= 0, and TypeError if it's not an integer. >>> randint(16) #doctest:+SKIP 1871 ### Response: def randint(nbits: int) -> int: """Generate an int with nbits random bits. Raises ValueError if nbits <= 0, and TypeError if it's not an integer. >>> randint(16) #doctest:+SKIP 1871 """ if not isinstance(nbits, int): raise TypeError('number of bits should be an integer') if nbits <= 0: raise ValueError('number of bits must be greater than zero') # https://github.com/python/cpython/blob/3.6/Lib/random.py#L676 nbytes = (nbits + 7) // 8 # bits / 8 and rounded up num = int.from_bytes(randbytes(nbytes), 'big') return num >> (nbytes * 8 - nbits)
def threshold_monitor_hidden_threshold_monitor_security_pause(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") threshold_monitor_hidden = ET.SubElement(config, "threshold-monitor-hidden", xmlns="urn:brocade.com:mgmt:brocade-threshold-monitor") threshold_monitor = ET.SubElement(threshold_monitor_hidden, "threshold-monitor") security = ET.SubElement(threshold_monitor, "security") pause = ET.SubElement(security, "pause") callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
Below is the the instruction that describes the task: ### Input: Auto Generated Code ### Response: def threshold_monitor_hidden_threshold_monitor_security_pause(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") threshold_monitor_hidden = ET.SubElement(config, "threshold-monitor-hidden", xmlns="urn:brocade.com:mgmt:brocade-threshold-monitor") threshold_monitor = ET.SubElement(threshold_monitor_hidden, "threshold-monitor") security = ET.SubElement(threshold_monitor, "security") pause = ET.SubElement(security, "pause") callback = kwargs.pop('callback', self._callback) return callback(config)
def has_ist_trigger(self): """Return True if this BuildConfig has ImageStreamTag trigger.""" triggers = self.template['spec'].get('triggers', []) if not triggers: return False for trigger in triggers: if trigger['type'] == 'ImageChange' and \ trigger['imageChange']['from']['kind'] == 'ImageStreamTag': return True return False
Return True if this BuildConfig has ImageStreamTag trigger.
Below is the the instruction that describes the task: ### Input: Return True if this BuildConfig has ImageStreamTag trigger. ### Response: def has_ist_trigger(self): """Return True if this BuildConfig has ImageStreamTag trigger.""" triggers = self.template['spec'].get('triggers', []) if not triggers: return False for trigger in triggers: if trigger['type'] == 'ImageChange' and \ trigger['imageChange']['from']['kind'] == 'ImageStreamTag': return True return False
def parse_args(): """ Parse the command line arguments """ parser = argparse.ArgumentParser(description = "Add random data from YubiHSM to Linux entropy", add_help = True, formatter_class = argparse.ArgumentDefaultsHelpFormatter, ) parser.add_argument('-D', '--device', dest='device', default=default_device, required=False, help='YubiHSM device', ) parser.add_argument('-v', '--verbose', dest='verbose', action='store_true', default=False, help='Enable verbose operation' ) parser.add_argument('--debug', dest='debug', action='store_true', default=False, help='Enable debug operation' ) parser.add_argument('-r', '--ratio', dest='ratio', type=int, default=default_entropy_ratio, help='Bits per byte of data read to use as entropy', ) parser.add_argument('-c', '--count', dest='iterations', type=int, default=default_iterations, help='Number of iterations to run', ) args = parser.parse_args() return args
Parse the command line arguments
Below is the the instruction that describes the task: ### Input: Parse the command line arguments ### Response: def parse_args(): """ Parse the command line arguments """ parser = argparse.ArgumentParser(description = "Add random data from YubiHSM to Linux entropy", add_help = True, formatter_class = argparse.ArgumentDefaultsHelpFormatter, ) parser.add_argument('-D', '--device', dest='device', default=default_device, required=False, help='YubiHSM device', ) parser.add_argument('-v', '--verbose', dest='verbose', action='store_true', default=False, help='Enable verbose operation' ) parser.add_argument('--debug', dest='debug', action='store_true', default=False, help='Enable debug operation' ) parser.add_argument('-r', '--ratio', dest='ratio', type=int, default=default_entropy_ratio, help='Bits per byte of data read to use as entropy', ) parser.add_argument('-c', '--count', dest='iterations', type=int, default=default_iterations, help='Number of iterations to run', ) args = parser.parse_args() return args
def predict_withGradients(self, X): """ Returns the mean, standard deviation, mean gradient and standard deviation gradient at X for all the MCMC samples. """ if X.ndim==1: X = X[None,:] ps = self.model.param_array.copy() means = [] stds = [] dmdxs = [] dsdxs = [] for s in self.hmc_samples: if self.model._fixes_ is None: self.model[:] = s else: self.model[self.model._fixes_] = s self.model._trigger_params_changed() m, v = self.model.predict(X) std = np.sqrt(np.clip(v, 1e-10, np.inf)) dmdx, dvdx = self.model.predictive_gradients(X) dmdx = dmdx[:,:,0] dsdx = dvdx / (2*std) means.append(m) stds.append(std) dmdxs.append(dmdx) dsdxs.append(dsdx) self.model.param_array[:] = ps self.model._trigger_params_changed() return means, stds, dmdxs, dsdxs
Returns the mean, standard deviation, mean gradient and standard deviation gradient at X for all the MCMC samples.
Below is the the instruction that describes the task: ### Input: Returns the mean, standard deviation, mean gradient and standard deviation gradient at X for all the MCMC samples. ### Response: def predict_withGradients(self, X): """ Returns the mean, standard deviation, mean gradient and standard deviation gradient at X for all the MCMC samples. """ if X.ndim==1: X = X[None,:] ps = self.model.param_array.copy() means = [] stds = [] dmdxs = [] dsdxs = [] for s in self.hmc_samples: if self.model._fixes_ is None: self.model[:] = s else: self.model[self.model._fixes_] = s self.model._trigger_params_changed() m, v = self.model.predict(X) std = np.sqrt(np.clip(v, 1e-10, np.inf)) dmdx, dvdx = self.model.predictive_gradients(X) dmdx = dmdx[:,:,0] dsdx = dvdx / (2*std) means.append(m) stds.append(std) dmdxs.append(dmdx) dsdxs.append(dsdx) self.model.param_array[:] = ps self.model._trigger_params_changed() return means, stds, dmdxs, dsdxs
def _wrap_response(request, data=None, metadata=None, status=200): """Creates the JSON response envelope to be sent back to the client. """ envelope = metadata or {} if data is not None: envelope['data'] = data return web.Response( status=status, content_type='application/json', text=json.dumps( envelope, indent=2, separators=(',', ': '), sort_keys=True))
Creates the JSON response envelope to be sent back to the client.
Below is the the instruction that describes the task: ### Input: Creates the JSON response envelope to be sent back to the client. ### Response: def _wrap_response(request, data=None, metadata=None, status=200): """Creates the JSON response envelope to be sent back to the client. """ envelope = metadata or {} if data is not None: envelope['data'] = data return web.Response( status=status, content_type='application/json', text=json.dumps( envelope, indent=2, separators=(',', ': '), sort_keys=True))
def certify_enum(value, kind=None, required=True): """ Certifier for enum. :param value: The value to be certified. :param kind: The enum type that value should be an instance of. :param bool required: Whether the value can be `None`. Defaults to True. :raises CertifierTypeError: The type is invalid """ if certify_required( value=value, required=required, ): return if not isinstance(value, kind): raise CertifierTypeError( message="expected {expected!r}, but value is of type {actual!r}".format( expected=kind.__name__, actual=value.__class__.__name__), value=value, required=required, )
Certifier for enum. :param value: The value to be certified. :param kind: The enum type that value should be an instance of. :param bool required: Whether the value can be `None`. Defaults to True. :raises CertifierTypeError: The type is invalid
Below is the the instruction that describes the task: ### Input: Certifier for enum. :param value: The value to be certified. :param kind: The enum type that value should be an instance of. :param bool required: Whether the value can be `None`. Defaults to True. :raises CertifierTypeError: The type is invalid ### Response: def certify_enum(value, kind=None, required=True): """ Certifier for enum. :param value: The value to be certified. :param kind: The enum type that value should be an instance of. :param bool required: Whether the value can be `None`. Defaults to True. :raises CertifierTypeError: The type is invalid """ if certify_required( value=value, required=required, ): return if not isinstance(value, kind): raise CertifierTypeError( message="expected {expected!r}, but value is of type {actual!r}".format( expected=kind.__name__, actual=value.__class__.__name__), value=value, required=required, )
def rubberstamp(rest): "Approve something" parts = ["Bad credit? No credit? Slow credit?"] rest = rest.strip() if rest: parts.append("%s is" % rest) karma.Karma.store.change(rest, 1) parts.append("APPROVED!") return " ".join(parts)
Approve something
Below is the the instruction that describes the task: ### Input: Approve something ### Response: def rubberstamp(rest): "Approve something" parts = ["Bad credit? No credit? Slow credit?"] rest = rest.strip() if rest: parts.append("%s is" % rest) karma.Karma.store.change(rest, 1) parts.append("APPROVED!") return " ".join(parts)
async def merge_imports_tree(cache, imports, target_trees, base_tree=None): '''Take an Imports struct and a dictionary of resolved trees and merge the unified imports tree. If base_tree is supplied, merge that too. There are a couple reasons for structuring this function the way it is: - We want to cache merged trees, so that we don't have to do expensive git operations just to check whether a module is in cache. - We want tree merging to know about target names, so that it can write good error messages when there are conflicts. - We need to use this for both toplevel imports and recursive module imports. ''' key = _cache_key(imports, target_trees, base_tree) if key in cache.keyval: return cache.keyval[key] # We always want to merge imports in the same order, so that any conflicts # we run into will be deterministic. Sort the imports alphabetically by # target name. unified_tree = base_tree or (await cache.get_empty_tree()) for target, paths in imports.items(): for path in paths: try: unified_tree = await cache.merge_trees( unified_tree, target_trees[target], path) except MergeConflictError as e: message = 'Merge conflict in import "{}" at "{}":\n\n{}' e.message = message.format(target, path, textwrap.indent(e.message, ' ')) raise cache.keyval[key] = unified_tree return unified_tree
Take an Imports struct and a dictionary of resolved trees and merge the unified imports tree. If base_tree is supplied, merge that too. There are a couple reasons for structuring this function the way it is: - We want to cache merged trees, so that we don't have to do expensive git operations just to check whether a module is in cache. - We want tree merging to know about target names, so that it can write good error messages when there are conflicts. - We need to use this for both toplevel imports and recursive module imports.
Below is the the instruction that describes the task: ### Input: Take an Imports struct and a dictionary of resolved trees and merge the unified imports tree. If base_tree is supplied, merge that too. There are a couple reasons for structuring this function the way it is: - We want to cache merged trees, so that we don't have to do expensive git operations just to check whether a module is in cache. - We want tree merging to know about target names, so that it can write good error messages when there are conflicts. - We need to use this for both toplevel imports and recursive module imports. ### Response: async def merge_imports_tree(cache, imports, target_trees, base_tree=None): '''Take an Imports struct and a dictionary of resolved trees and merge the unified imports tree. If base_tree is supplied, merge that too. There are a couple reasons for structuring this function the way it is: - We want to cache merged trees, so that we don't have to do expensive git operations just to check whether a module is in cache. - We want tree merging to know about target names, so that it can write good error messages when there are conflicts. - We need to use this for both toplevel imports and recursive module imports. ''' key = _cache_key(imports, target_trees, base_tree) if key in cache.keyval: return cache.keyval[key] # We always want to merge imports in the same order, so that any conflicts # we run into will be deterministic. Sort the imports alphabetically by # target name. unified_tree = base_tree or (await cache.get_empty_tree()) for target, paths in imports.items(): for path in paths: try: unified_tree = await cache.merge_trees( unified_tree, target_trees[target], path) except MergeConflictError as e: message = 'Merge conflict in import "{}" at "{}":\n\n{}' e.message = message.format(target, path, textwrap.indent(e.message, ' ')) raise cache.keyval[key] = unified_tree return unified_tree
def properties(self): """ Property for accessing property (doh!) manager of the current job. :return: instance of :class:`yagocd.resources.property.PropertyManager` :rtype: yagocd.resources.property.PropertyManager """ return PropertyManager( session=self._session, pipeline_name=self.pipeline_name, pipeline_counter=self.pipeline_counter, stage_name=self.stage_name, stage_counter=self.stage_counter, job_name=self.data.name )
Property for accessing property (doh!) manager of the current job. :return: instance of :class:`yagocd.resources.property.PropertyManager` :rtype: yagocd.resources.property.PropertyManager
Below is the the instruction that describes the task: ### Input: Property for accessing property (doh!) manager of the current job. :return: instance of :class:`yagocd.resources.property.PropertyManager` :rtype: yagocd.resources.property.PropertyManager ### Response: def properties(self): """ Property for accessing property (doh!) manager of the current job. :return: instance of :class:`yagocd.resources.property.PropertyManager` :rtype: yagocd.resources.property.PropertyManager """ return PropertyManager( session=self._session, pipeline_name=self.pipeline_name, pipeline_counter=self.pipeline_counter, stage_name=self.stage_name, stage_counter=self.stage_counter, job_name=self.data.name )
def visit_subscript(self, node, parent): """visit a Subscript node by returning a fresh instance of it""" context = self._get_context(node) newnode = nodes.Subscript( ctx=context, lineno=node.lineno, col_offset=node.col_offset, parent=parent ) newnode.postinit( self.visit(node.value, newnode), self.visit(node.slice, newnode) ) return newnode
visit a Subscript node by returning a fresh instance of it
Below is the the instruction that describes the task: ### Input: visit a Subscript node by returning a fresh instance of it ### Response: def visit_subscript(self, node, parent): """visit a Subscript node by returning a fresh instance of it""" context = self._get_context(node) newnode = nodes.Subscript( ctx=context, lineno=node.lineno, col_offset=node.col_offset, parent=parent ) newnode.postinit( self.visit(node.value, newnode), self.visit(node.slice, newnode) ) return newnode
def add_sticker_to_set(self, user_id, name, png_sticker, emojis, mask_position=None): """ Use this method to add a new sticker to a set created by the bot. Returns True on success. :param user_id: :param name: :param png_sticker: :param emojis: :param mask_position: :return: """ return apihelper.add_sticker_to_set(self.token, user_id, name, png_sticker, emojis, mask_position)
Use this method to add a new sticker to a set created by the bot. Returns True on success. :param user_id: :param name: :param png_sticker: :param emojis: :param mask_position: :return:
Below is the the instruction that describes the task: ### Input: Use this method to add a new sticker to a set created by the bot. Returns True on success. :param user_id: :param name: :param png_sticker: :param emojis: :param mask_position: :return: ### Response: def add_sticker_to_set(self, user_id, name, png_sticker, emojis, mask_position=None): """ Use this method to add a new sticker to a set created by the bot. Returns True on success. :param user_id: :param name: :param png_sticker: :param emojis: :param mask_position: :return: """ return apihelper.add_sticker_to_set(self.token, user_id, name, png_sticker, emojis, mask_position)
def assign(self, other): """Assign ``other`` to ``self``. This is implemented without `FunctionSpace.lincomb` to ensure that ``self == other`` evaluates to True after ``self.assign(other)``. """ if other not in self.space: raise TypeError('`other` {!r} is not an element of the space ' '{} of this function' ''.format(other, self.space)) self._call_in_place = other._call_in_place self._call_out_of_place = other._call_out_of_place self._call_has_out = other._call_has_out self._call_out_optional = other._call_out_optional
Assign ``other`` to ``self``. This is implemented without `FunctionSpace.lincomb` to ensure that ``self == other`` evaluates to True after ``self.assign(other)``.
Below is the the instruction that describes the task: ### Input: Assign ``other`` to ``self``. This is implemented without `FunctionSpace.lincomb` to ensure that ``self == other`` evaluates to True after ``self.assign(other)``. ### Response: def assign(self, other): """Assign ``other`` to ``self``. This is implemented without `FunctionSpace.lincomb` to ensure that ``self == other`` evaluates to True after ``self.assign(other)``. """ if other not in self.space: raise TypeError('`other` {!r} is not an element of the space ' '{} of this function' ''.format(other, self.space)) self._call_in_place = other._call_in_place self._call_out_of_place = other._call_out_of_place self._call_has_out = other._call_has_out self._call_out_optional = other._call_out_optional
def approve(self, peer_jid): """ Approve a subscription request from jid Args: peer_jid (str): the JID to approve """ self.roster.approve(aioxmpp.JID.fromstr(peer_jid).bare())
Approve a subscription request from jid Args: peer_jid (str): the JID to approve
Below is the the instruction that describes the task: ### Input: Approve a subscription request from jid Args: peer_jid (str): the JID to approve ### Response: def approve(self, peer_jid): """ Approve a subscription request from jid Args: peer_jid (str): the JID to approve """ self.roster.approve(aioxmpp.JID.fromstr(peer_jid).bare())
def group_join(self, inner_iterable, outer_key_selector=identity, inner_key_selector=identity, result_selector=lambda outer, grouping: grouping): '''Match elements of two sequences using keys and group the results. The group_join() query produces a hierarchical result, with all of the inner elements in the result grouped against the matching outer element. The order of elements from outer is maintained. For each of these the order of elements from inner is also preserved. Note: This method uses deferred execution. Args: inner_iterable: The sequence to join with the outer sequence. outer_key_selector: An optional unary function to extract keys from elements of the outer (source) sequence. The first positional argument of the function should accept outer elements and the result value should be the key. If omitted, the identity function is used. inner_key_selector: An optional unary function to extract keys from elements of the inner_iterable. The first positional argument of the function should accept outer elements and the result value should be the key. If omitted, the identity function is used. result_selector: An optional binary function to create a result element from an outer element and the Grouping of matching inner elements. The first positional argument is the outer elements and the second in the Grouping of inner elements which match the outer element according to the key selectors used. If omitted, the result elements will be the Groupings directly. Returns: A Queryable over a sequence with one element for each group in the result as returned by the result_selector. If the default result selector is used, the result is a sequence of Grouping objects. Raises: ValueError: If the Queryable has been closed. TypeError: If the inner_iterable is not in fact iterable. TypeError: If the outer_key_selector is not callable. TypeError: If the inner_key_selector is not callable. TypeError: If the result_selector is not callable. ''' if self.closed(): raise ValueError("Attempt to call group_join() on a closed Queryable.") if not is_iterable(inner_iterable): raise TypeError("Cannot compute group_join() with inner_iterable of non-iterable {type}".format( type=str(type(inner_iterable))[7: -1])) if not is_callable(outer_key_selector): raise TypeError("group_join() parameter outer_key_selector={outer_key_selector} is not callable".format( outer_key_selector=repr(outer_key_selector))) if not is_callable(inner_key_selector): raise TypeError("group_join() parameter inner_key_selector={inner_key_selector} is not callable".format( inner_key_selector=repr(inner_key_selector))) if not is_callable(result_selector): raise TypeError("group_join() parameter result_selector={result_selector} is not callable".format( result_selector=repr(result_selector))) return self._create(self._generate_group_join_result(inner_iterable, outer_key_selector, inner_key_selector, result_selector))
Match elements of two sequences using keys and group the results. The group_join() query produces a hierarchical result, with all of the inner elements in the result grouped against the matching outer element. The order of elements from outer is maintained. For each of these the order of elements from inner is also preserved. Note: This method uses deferred execution. Args: inner_iterable: The sequence to join with the outer sequence. outer_key_selector: An optional unary function to extract keys from elements of the outer (source) sequence. The first positional argument of the function should accept outer elements and the result value should be the key. If omitted, the identity function is used. inner_key_selector: An optional unary function to extract keys from elements of the inner_iterable. The first positional argument of the function should accept outer elements and the result value should be the key. If omitted, the identity function is used. result_selector: An optional binary function to create a result element from an outer element and the Grouping of matching inner elements. The first positional argument is the outer elements and the second in the Grouping of inner elements which match the outer element according to the key selectors used. If omitted, the result elements will be the Groupings directly. Returns: A Queryable over a sequence with one element for each group in the result as returned by the result_selector. If the default result selector is used, the result is a sequence of Grouping objects. Raises: ValueError: If the Queryable has been closed. TypeError: If the inner_iterable is not in fact iterable. TypeError: If the outer_key_selector is not callable. TypeError: If the inner_key_selector is not callable. TypeError: If the result_selector is not callable.
Below is the the instruction that describes the task: ### Input: Match elements of two sequences using keys and group the results. The group_join() query produces a hierarchical result, with all of the inner elements in the result grouped against the matching outer element. The order of elements from outer is maintained. For each of these the order of elements from inner is also preserved. Note: This method uses deferred execution. Args: inner_iterable: The sequence to join with the outer sequence. outer_key_selector: An optional unary function to extract keys from elements of the outer (source) sequence. The first positional argument of the function should accept outer elements and the result value should be the key. If omitted, the identity function is used. inner_key_selector: An optional unary function to extract keys from elements of the inner_iterable. The first positional argument of the function should accept outer elements and the result value should be the key. If omitted, the identity function is used. result_selector: An optional binary function to create a result element from an outer element and the Grouping of matching inner elements. The first positional argument is the outer elements and the second in the Grouping of inner elements which match the outer element according to the key selectors used. If omitted, the result elements will be the Groupings directly. Returns: A Queryable over a sequence with one element for each group in the result as returned by the result_selector. If the default result selector is used, the result is a sequence of Grouping objects. Raises: ValueError: If the Queryable has been closed. TypeError: If the inner_iterable is not in fact iterable. TypeError: If the outer_key_selector is not callable. TypeError: If the inner_key_selector is not callable. TypeError: If the result_selector is not callable. ### Response: def group_join(self, inner_iterable, outer_key_selector=identity, inner_key_selector=identity, result_selector=lambda outer, grouping: grouping): '''Match elements of two sequences using keys and group the results. The group_join() query produces a hierarchical result, with all of the inner elements in the result grouped against the matching outer element. The order of elements from outer is maintained. For each of these the order of elements from inner is also preserved. Note: This method uses deferred execution. Args: inner_iterable: The sequence to join with the outer sequence. outer_key_selector: An optional unary function to extract keys from elements of the outer (source) sequence. The first positional argument of the function should accept outer elements and the result value should be the key. If omitted, the identity function is used. inner_key_selector: An optional unary function to extract keys from elements of the inner_iterable. The first positional argument of the function should accept outer elements and the result value should be the key. If omitted, the identity function is used. result_selector: An optional binary function to create a result element from an outer element and the Grouping of matching inner elements. The first positional argument is the outer elements and the second in the Grouping of inner elements which match the outer element according to the key selectors used. If omitted, the result elements will be the Groupings directly. Returns: A Queryable over a sequence with one element for each group in the result as returned by the result_selector. If the default result selector is used, the result is a sequence of Grouping objects. Raises: ValueError: If the Queryable has been closed. TypeError: If the inner_iterable is not in fact iterable. TypeError: If the outer_key_selector is not callable. TypeError: If the inner_key_selector is not callable. TypeError: If the result_selector is not callable. ''' if self.closed(): raise ValueError("Attempt to call group_join() on a closed Queryable.") if not is_iterable(inner_iterable): raise TypeError("Cannot compute group_join() with inner_iterable of non-iterable {type}".format( type=str(type(inner_iterable))[7: -1])) if not is_callable(outer_key_selector): raise TypeError("group_join() parameter outer_key_selector={outer_key_selector} is not callable".format( outer_key_selector=repr(outer_key_selector))) if not is_callable(inner_key_selector): raise TypeError("group_join() parameter inner_key_selector={inner_key_selector} is not callable".format( inner_key_selector=repr(inner_key_selector))) if not is_callable(result_selector): raise TypeError("group_join() parameter result_selector={result_selector} is not callable".format( result_selector=repr(result_selector))) return self._create(self._generate_group_join_result(inner_iterable, outer_key_selector, inner_key_selector, result_selector))
def advanced_search(self, terms, relation=None, index=0, limit=25, **kwargs): """ Advanced search of track, album or artist. See `Search section of Deezer API <https://developers.deezer.com/api/search>`_ for search terms. :returns: a list of :class:`~deezer.resources.Resource` objects. >>> client.advanced_search({"artist": "Daft Punk", "album": "Homework"}) >>> client.advanced_search({"artist": "Daft Punk", "album": "Homework"}, ... relation="track") """ assert isinstance(terms, dict), "terms must be a dict" # terms are sorted (for consistent tests between Python < 3.7 and >= 3.7) query = " ".join(sorted(['{}:"{}"'.format(k, v) for (k, v) in terms.items()])) return self.get_object( "search", relation=relation, q=query, index=index, limit=limit, **kwargs )
Advanced search of track, album or artist. See `Search section of Deezer API <https://developers.deezer.com/api/search>`_ for search terms. :returns: a list of :class:`~deezer.resources.Resource` objects. >>> client.advanced_search({"artist": "Daft Punk", "album": "Homework"}) >>> client.advanced_search({"artist": "Daft Punk", "album": "Homework"}, ... relation="track")
Below is the the instruction that describes the task: ### Input: Advanced search of track, album or artist. See `Search section of Deezer API <https://developers.deezer.com/api/search>`_ for search terms. :returns: a list of :class:`~deezer.resources.Resource` objects. >>> client.advanced_search({"artist": "Daft Punk", "album": "Homework"}) >>> client.advanced_search({"artist": "Daft Punk", "album": "Homework"}, ... relation="track") ### Response: def advanced_search(self, terms, relation=None, index=0, limit=25, **kwargs): """ Advanced search of track, album or artist. See `Search section of Deezer API <https://developers.deezer.com/api/search>`_ for search terms. :returns: a list of :class:`~deezer.resources.Resource` objects. >>> client.advanced_search({"artist": "Daft Punk", "album": "Homework"}) >>> client.advanced_search({"artist": "Daft Punk", "album": "Homework"}, ... relation="track") """ assert isinstance(terms, dict), "terms must be a dict" # terms are sorted (for consistent tests between Python < 3.7 and >= 3.7) query = " ".join(sorted(['{}:"{}"'.format(k, v) for (k, v) in terms.items()])) return self.get_object( "search", relation=relation, q=query, index=index, limit=limit, **kwargs )
def round(self, decimals=0, *args, **kwargs): """ Round each value in Panel to a specified number of decimal places. .. versionadded:: 0.18.0 Parameters ---------- decimals : int Number of decimal places to round to (default: 0). If decimals is negative, it specifies the number of positions to the left of the decimal point. Returns ------- Panel object See Also -------- numpy.around """ nv.validate_round(args, kwargs) if is_integer(decimals): result = np.apply_along_axis(np.round, 0, self.values) return self._wrap_result(result, axis=0) raise TypeError("decimals must be an integer")
Round each value in Panel to a specified number of decimal places. .. versionadded:: 0.18.0 Parameters ---------- decimals : int Number of decimal places to round to (default: 0). If decimals is negative, it specifies the number of positions to the left of the decimal point. Returns ------- Panel object See Also -------- numpy.around
Below is the the instruction that describes the task: ### Input: Round each value in Panel to a specified number of decimal places. .. versionadded:: 0.18.0 Parameters ---------- decimals : int Number of decimal places to round to (default: 0). If decimals is negative, it specifies the number of positions to the left of the decimal point. Returns ------- Panel object See Also -------- numpy.around ### Response: def round(self, decimals=0, *args, **kwargs): """ Round each value in Panel to a specified number of decimal places. .. versionadded:: 0.18.0 Parameters ---------- decimals : int Number of decimal places to round to (default: 0). If decimals is negative, it specifies the number of positions to the left of the decimal point. Returns ------- Panel object See Also -------- numpy.around """ nv.validate_round(args, kwargs) if is_integer(decimals): result = np.apply_along_axis(np.round, 0, self.values) return self._wrap_result(result, axis=0) raise TypeError("decimals must be an integer")
def set_vocabulary(self, peer_id, from_dialogue=None, update=False): """ Получает вокабулар из функции get_vocabulary и делает его активным. """ self.tokens_array = self.get_vocabulary( peer_id, from_dialogue, update ) self.create_base()
Получает вокабулар из функции get_vocabulary и делает его активным.
Below is the the instruction that describes the task: ### Input: Получает вокабулар из функции get_vocabulary и делает его активным. ### Response: def set_vocabulary(self, peer_id, from_dialogue=None, update=False): """ Получает вокабулар из функции get_vocabulary и делает его активным. """ self.tokens_array = self.get_vocabulary( peer_id, from_dialogue, update ) self.create_base()
def nodeChunk(lines): """ Parse NODE Method """ # Constants KEYWORDS = ('NODE', 'X_Y', 'ELEV') result = {'node': None, 'x': None, 'y': None, 'elev': None} chunks = pt.chunk(KEYWORDS, lines) # Parse chunks associated with each key for key, chunkList in iteritems(chunks): # Parse each chunk in the chunk list for chunk in chunkList: schunk = chunk[0].strip().split() if key == 'X_Y': result['x'] = schunk[1] result['y'] = schunk[2] else: result[key.lower()] = schunk[1] return result
Parse NODE Method
Below is the the instruction that describes the task: ### Input: Parse NODE Method ### Response: def nodeChunk(lines): """ Parse NODE Method """ # Constants KEYWORDS = ('NODE', 'X_Y', 'ELEV') result = {'node': None, 'x': None, 'y': None, 'elev': None} chunks = pt.chunk(KEYWORDS, lines) # Parse chunks associated with each key for key, chunkList in iteritems(chunks): # Parse each chunk in the chunk list for chunk in chunkList: schunk = chunk[0].strip().split() if key == 'X_Y': result['x'] = schunk[1] result['y'] = schunk[2] else: result[key.lower()] = schunk[1] return result
def location_nearbystops(self, origin_coord_lat, origin_coord_long): """ location.nearbystops """ response = self._request( 'location.nearbystops', originCoordLat=origin_coord_lat, originCoordLong=origin_coord_long) return _get_node(response, 'LocationList', 'StopLocation')
location.nearbystops
Below is the the instruction that describes the task: ### Input: location.nearbystops ### Response: def location_nearbystops(self, origin_coord_lat, origin_coord_long): """ location.nearbystops """ response = self._request( 'location.nearbystops', originCoordLat=origin_coord_lat, originCoordLong=origin_coord_long) return _get_node(response, 'LocationList', 'StopLocation')
def destroy( self, request, pk=None, parent_lookup_seedteam=None, parent_lookup_seedteam__organization=None): '''Remove a permission from a team.''' self.check_team_permissions( request, parent_lookup_seedteam, parent_lookup_seedteam__organization) return super(TeamPermissionViewSet, self).destroy( request, pk, parent_lookup_seedteam, parent_lookup_seedteam__organization)
Remove a permission from a team.
Below is the the instruction that describes the task: ### Input: Remove a permission from a team. ### Response: def destroy( self, request, pk=None, parent_lookup_seedteam=None, parent_lookup_seedteam__organization=None): '''Remove a permission from a team.''' self.check_team_permissions( request, parent_lookup_seedteam, parent_lookup_seedteam__organization) return super(TeamPermissionViewSet, self).destroy( request, pk, parent_lookup_seedteam, parent_lookup_seedteam__organization)
def tocimxmlstr(self, indent=None, ignore_path=False): """ Return the CIM-XML representation of this CIM instance, as a :term:`unicode string`. *New in pywbem 0.9.* For the returned CIM-XML representation, see :meth:`~pywbem.CIMInstance.tocimxml`. Parameters: indent (:term:`string` or :term:`integer`): `None` indicates that a single-line version of the XML should be returned, without any whitespace between the XML elements. Other values indicate that a prettified, multi-line version of the XML should be returned. A string value specifies the indentation string to be used for each level of nested XML elements. An integer value specifies an indentation string of so many blanks. ignore_path (:class:`py:bool`): Ignore the path of the instance, even if a path is specified. Returns: The CIM-XML representation of the object, as a :term:`unicode string`. """ xml_elem = self.tocimxml(ignore_path) return tocimxmlstr(xml_elem, indent)
Return the CIM-XML representation of this CIM instance, as a :term:`unicode string`. *New in pywbem 0.9.* For the returned CIM-XML representation, see :meth:`~pywbem.CIMInstance.tocimxml`. Parameters: indent (:term:`string` or :term:`integer`): `None` indicates that a single-line version of the XML should be returned, without any whitespace between the XML elements. Other values indicate that a prettified, multi-line version of the XML should be returned. A string value specifies the indentation string to be used for each level of nested XML elements. An integer value specifies an indentation string of so many blanks. ignore_path (:class:`py:bool`): Ignore the path of the instance, even if a path is specified. Returns: The CIM-XML representation of the object, as a :term:`unicode string`.
Below is the the instruction that describes the task: ### Input: Return the CIM-XML representation of this CIM instance, as a :term:`unicode string`. *New in pywbem 0.9.* For the returned CIM-XML representation, see :meth:`~pywbem.CIMInstance.tocimxml`. Parameters: indent (:term:`string` or :term:`integer`): `None` indicates that a single-line version of the XML should be returned, without any whitespace between the XML elements. Other values indicate that a prettified, multi-line version of the XML should be returned. A string value specifies the indentation string to be used for each level of nested XML elements. An integer value specifies an indentation string of so many blanks. ignore_path (:class:`py:bool`): Ignore the path of the instance, even if a path is specified. Returns: The CIM-XML representation of the object, as a :term:`unicode string`. ### Response: def tocimxmlstr(self, indent=None, ignore_path=False): """ Return the CIM-XML representation of this CIM instance, as a :term:`unicode string`. *New in pywbem 0.9.* For the returned CIM-XML representation, see :meth:`~pywbem.CIMInstance.tocimxml`. Parameters: indent (:term:`string` or :term:`integer`): `None` indicates that a single-line version of the XML should be returned, without any whitespace between the XML elements. Other values indicate that a prettified, multi-line version of the XML should be returned. A string value specifies the indentation string to be used for each level of nested XML elements. An integer value specifies an indentation string of so many blanks. ignore_path (:class:`py:bool`): Ignore the path of the instance, even if a path is specified. Returns: The CIM-XML representation of the object, as a :term:`unicode string`. """ xml_elem = self.tocimxml(ignore_path) return tocimxmlstr(xml_elem, indent)
def send_message(target, data, auth=None, debug=False): """Send a single message to AMQP endpoint. :param target: The target AMQP endpoint. :type target: str, bytes or ~uamqp.address.Target :param data: The contents of the message to send. :type data: str, bytes or ~uamqp.message.Message :param auth: The authentication credentials for the endpoint. This should be one of the subclasses of uamqp.authentication.AMQPAuth. Currently this includes: - uamqp.authentication.SASLAnonymous - uamqp.authentication.SASLPlain - uamqp.authentication.SASTokenAuth If no authentication is supplied, SASLAnnoymous will be used by default. :type auth: ~uamqp.authentication.common.AMQPAuth :param debug: Whether to turn on network trace logs. If `True`, trace logs will be logged at INFO level. Default is `False`. :type debug: bool :return: A list of states for each message sent. :rtype: list[~uamqp.constants.MessageState] """ message = data if isinstance(data, Message) else Message(body=data) with SendClient(target, auth=auth, debug=debug) as send_client: send_client.queue_message(message) return send_client.send_all_messages()
Send a single message to AMQP endpoint. :param target: The target AMQP endpoint. :type target: str, bytes or ~uamqp.address.Target :param data: The contents of the message to send. :type data: str, bytes or ~uamqp.message.Message :param auth: The authentication credentials for the endpoint. This should be one of the subclasses of uamqp.authentication.AMQPAuth. Currently this includes: - uamqp.authentication.SASLAnonymous - uamqp.authentication.SASLPlain - uamqp.authentication.SASTokenAuth If no authentication is supplied, SASLAnnoymous will be used by default. :type auth: ~uamqp.authentication.common.AMQPAuth :param debug: Whether to turn on network trace logs. If `True`, trace logs will be logged at INFO level. Default is `False`. :type debug: bool :return: A list of states for each message sent. :rtype: list[~uamqp.constants.MessageState]
Below is the the instruction that describes the task: ### Input: Send a single message to AMQP endpoint. :param target: The target AMQP endpoint. :type target: str, bytes or ~uamqp.address.Target :param data: The contents of the message to send. :type data: str, bytes or ~uamqp.message.Message :param auth: The authentication credentials for the endpoint. This should be one of the subclasses of uamqp.authentication.AMQPAuth. Currently this includes: - uamqp.authentication.SASLAnonymous - uamqp.authentication.SASLPlain - uamqp.authentication.SASTokenAuth If no authentication is supplied, SASLAnnoymous will be used by default. :type auth: ~uamqp.authentication.common.AMQPAuth :param debug: Whether to turn on network trace logs. If `True`, trace logs will be logged at INFO level. Default is `False`. :type debug: bool :return: A list of states for each message sent. :rtype: list[~uamqp.constants.MessageState] ### Response: def send_message(target, data, auth=None, debug=False): """Send a single message to AMQP endpoint. :param target: The target AMQP endpoint. :type target: str, bytes or ~uamqp.address.Target :param data: The contents of the message to send. :type data: str, bytes or ~uamqp.message.Message :param auth: The authentication credentials for the endpoint. This should be one of the subclasses of uamqp.authentication.AMQPAuth. Currently this includes: - uamqp.authentication.SASLAnonymous - uamqp.authentication.SASLPlain - uamqp.authentication.SASTokenAuth If no authentication is supplied, SASLAnnoymous will be used by default. :type auth: ~uamqp.authentication.common.AMQPAuth :param debug: Whether to turn on network trace logs. If `True`, trace logs will be logged at INFO level. Default is `False`. :type debug: bool :return: A list of states for each message sent. :rtype: list[~uamqp.constants.MessageState] """ message = data if isinstance(data, Message) else Message(body=data) with SendClient(target, auth=auth, debug=debug) as send_client: send_client.queue_message(message) return send_client.send_all_messages()
def attribute(self, keys): """ TODO: document this method :param keys: """ if self['meta']['prefix'] is None: k = keys else: k = self['meta']['prefix'] + "." + keys return self.get(k)
TODO: document this method :param keys:
Below is the the instruction that describes the task: ### Input: TODO: document this method :param keys: ### Response: def attribute(self, keys): """ TODO: document this method :param keys: """ if self['meta']['prefix'] is None: k = keys else: k = self['meta']['prefix'] + "." + keys return self.get(k)
def argument(*args, **kwargs): """Decorator to define an argparse option or argument. The arguments to this decorator are the same as the `ArgumentParser.add_argument <https://docs.python.org/3/library/\ argparse.html#the-add-argument-method>`_ method. """ def decorator(f): if not hasattr(f, '_arguments'): f._arguments = [] if not hasattr(f, '_argnames'): f._argnames = [] f._arguments.append((args, kwargs)) f._argnames.append(_get_dest(*args, **kwargs)) return f return decorator
Decorator to define an argparse option or argument. The arguments to this decorator are the same as the `ArgumentParser.add_argument <https://docs.python.org/3/library/\ argparse.html#the-add-argument-method>`_ method.
Below is the the instruction that describes the task: ### Input: Decorator to define an argparse option or argument. The arguments to this decorator are the same as the `ArgumentParser.add_argument <https://docs.python.org/3/library/\ argparse.html#the-add-argument-method>`_ method. ### Response: def argument(*args, **kwargs): """Decorator to define an argparse option or argument. The arguments to this decorator are the same as the `ArgumentParser.add_argument <https://docs.python.org/3/library/\ argparse.html#the-add-argument-method>`_ method. """ def decorator(f): if not hasattr(f, '_arguments'): f._arguments = [] if not hasattr(f, '_argnames'): f._argnames = [] f._arguments.append((args, kwargs)) f._argnames.append(_get_dest(*args, **kwargs)) return f return decorator
def read_float_matrix(rx_specifier): """ Return float matrix as np array for the given rx specifier. """ path, offset = rx_specifier.strip().split(':', maxsplit=1) offset = int(offset) sample_format = 4 with open(path, 'rb') as f: # move to offset f.seek(offset) # assert binary ark binary = f.read(2) assert (binary == b'\x00B') # assert type float 32 format = f.read(3) assert (format == b'FM ') # get number of mfcc features f.read(1) num_frames = struct.unpack('<i', f.read(4))[0] # get size of mfcc features f.read(1) feature_size = struct.unpack('<i', f.read(4))[0] # read feature data data = f.read(num_frames * feature_size * sample_format) feature_vector = np.frombuffer(data, dtype='float32') feature_matrix = np.reshape(feature_vector, (num_frames, feature_size)) return feature_matrix
Return float matrix as np array for the given rx specifier.
Below is the the instruction that describes the task: ### Input: Return float matrix as np array for the given rx specifier. ### Response: def read_float_matrix(rx_specifier): """ Return float matrix as np array for the given rx specifier. """ path, offset = rx_specifier.strip().split(':', maxsplit=1) offset = int(offset) sample_format = 4 with open(path, 'rb') as f: # move to offset f.seek(offset) # assert binary ark binary = f.read(2) assert (binary == b'\x00B') # assert type float 32 format = f.read(3) assert (format == b'FM ') # get number of mfcc features f.read(1) num_frames = struct.unpack('<i', f.read(4))[0] # get size of mfcc features f.read(1) feature_size = struct.unpack('<i', f.read(4))[0] # read feature data data = f.read(num_frames * feature_size * sample_format) feature_vector = np.frombuffer(data, dtype='float32') feature_matrix = np.reshape(feature_vector, (num_frames, feature_size)) return feature_matrix
def get_tree_info(decision_tree, feature_names=None, **export_graphviz_kwargs): # type: (...) -> TreeInfo """ Convert DecisionTreeClassifier or DecisionTreeRegressor to an inspectable object. """ return TreeInfo( criterion=decision_tree.criterion, tree=_get_root_node_info(decision_tree, feature_names), graphviz=tree2dot(decision_tree, feature_names=feature_names, **export_graphviz_kwargs), is_classification=isinstance(decision_tree, ClassifierMixin), )
Convert DecisionTreeClassifier or DecisionTreeRegressor to an inspectable object.
Below is the the instruction that describes the task: ### Input: Convert DecisionTreeClassifier or DecisionTreeRegressor to an inspectable object. ### Response: def get_tree_info(decision_tree, feature_names=None, **export_graphviz_kwargs): # type: (...) -> TreeInfo """ Convert DecisionTreeClassifier or DecisionTreeRegressor to an inspectable object. """ return TreeInfo( criterion=decision_tree.criterion, tree=_get_root_node_info(decision_tree, feature_names), graphviz=tree2dot(decision_tree, feature_names=feature_names, **export_graphviz_kwargs), is_classification=isinstance(decision_tree, ClassifierMixin), )
def setup(self): ''' Make sure the monitor is ready for fuzzing ''' super(BaseMonitor, self).setup() self.monitor_thread = LoopFuncThread(self._monitor_func) self.monitor_thread.start()
Make sure the monitor is ready for fuzzing
Below is the the instruction that describes the task: ### Input: Make sure the monitor is ready for fuzzing ### Response: def setup(self): ''' Make sure the monitor is ready for fuzzing ''' super(BaseMonitor, self).setup() self.monitor_thread = LoopFuncThread(self._monitor_func) self.monitor_thread.start()
def get_help_datapacks(module_name, server_prefix): """ Get the help datapacks for a module Args: module_name (str): The module to get help data for server_prefix (str): The command prefix for this server Returns: datapacks (list): The help datapacks for the module """ _dir = os.path.realpath( os.path.join(os.getcwd(), os.path.dirname(__file__))) module_dir = "{}/../{}".format(_dir, module_name, "_help.json") if os.path.isdir(module_dir): module_help_path = "{}/{}".format(module_dir, "_help.json") if os.path.isfile(module_help_path): return helptools.get_help_datapacks(module_help_path, server_prefix) else: return [("Help", "{} does not have a help.json file".format(module_name), False)] else: return [("Help", "No module found called {}".format(module_name), False)]
Get the help datapacks for a module Args: module_name (str): The module to get help data for server_prefix (str): The command prefix for this server Returns: datapacks (list): The help datapacks for the module
Below is the the instruction that describes the task: ### Input: Get the help datapacks for a module Args: module_name (str): The module to get help data for server_prefix (str): The command prefix for this server Returns: datapacks (list): The help datapacks for the module ### Response: def get_help_datapacks(module_name, server_prefix): """ Get the help datapacks for a module Args: module_name (str): The module to get help data for server_prefix (str): The command prefix for this server Returns: datapacks (list): The help datapacks for the module """ _dir = os.path.realpath( os.path.join(os.getcwd(), os.path.dirname(__file__))) module_dir = "{}/../{}".format(_dir, module_name, "_help.json") if os.path.isdir(module_dir): module_help_path = "{}/{}".format(module_dir, "_help.json") if os.path.isfile(module_help_path): return helptools.get_help_datapacks(module_help_path, server_prefix) else: return [("Help", "{} does not have a help.json file".format(module_name), False)] else: return [("Help", "No module found called {}".format(module_name), False)]
def _get_ncbi_db_url(): """returns NCBI DB URL based on environment variables and code version * if NCBI_DB_URL is set, use that * Otherwise, if _NCBI_URL_KEY is set, use that as the name of a config file entry and use the corresponding URL * Otherwise, """ if "NCBI_DB_URL" in os.environ: return os.environ["NCBI_DB_URL"] if "_NCBI_URL_KEY" in os.environ: url_key = os.environ["_NCBI_URL_KEY"] else: sdlc = _stage_from_version(hgvs.__version__) url_key = "public_{sdlc}".format(sdlc=sdlc) return hgvs.global_config['NCBI'][url_key]
returns NCBI DB URL based on environment variables and code version * if NCBI_DB_URL is set, use that * Otherwise, if _NCBI_URL_KEY is set, use that as the name of a config file entry and use the corresponding URL * Otherwise,
Below is the the instruction that describes the task: ### Input: returns NCBI DB URL based on environment variables and code version * if NCBI_DB_URL is set, use that * Otherwise, if _NCBI_URL_KEY is set, use that as the name of a config file entry and use the corresponding URL * Otherwise, ### Response: def _get_ncbi_db_url(): """returns NCBI DB URL based on environment variables and code version * if NCBI_DB_URL is set, use that * Otherwise, if _NCBI_URL_KEY is set, use that as the name of a config file entry and use the corresponding URL * Otherwise, """ if "NCBI_DB_URL" in os.environ: return os.environ["NCBI_DB_URL"] if "_NCBI_URL_KEY" in os.environ: url_key = os.environ["_NCBI_URL_KEY"] else: sdlc = _stage_from_version(hgvs.__version__) url_key = "public_{sdlc}".format(sdlc=sdlc) return hgvs.global_config['NCBI'][url_key]
def assets2s3(): """ Upload assets files to S3 """ import flask_s3 header("Assets2S3...") print("") print("Building assets files..." ) print("") build_assets(application.app) print("") print("Uploading assets files to S3 ...") flask_s3.create_all(application.app) print("")
Upload assets files to S3
Below is the the instruction that describes the task: ### Input: Upload assets files to S3 ### Response: def assets2s3(): """ Upload assets files to S3 """ import flask_s3 header("Assets2S3...") print("") print("Building assets files..." ) print("") build_assets(application.app) print("") print("Uploading assets files to S3 ...") flask_s3.create_all(application.app) print("")
def mac_address(self): """Returns the MAC address of the network interface. If multiple interfaces are provided, the address of the first found is returned. """ if self._mac_address is None: self._mac_address = self._get_mac_address() return self._mac_address
Returns the MAC address of the network interface. If multiple interfaces are provided, the address of the first found is returned.
Below is the the instruction that describes the task: ### Input: Returns the MAC address of the network interface. If multiple interfaces are provided, the address of the first found is returned. ### Response: def mac_address(self): """Returns the MAC address of the network interface. If multiple interfaces are provided, the address of the first found is returned. """ if self._mac_address is None: self._mac_address = self._get_mac_address() return self._mac_address
def instrs_to_body(instrs, context): """ Convert a list of Instruction objects to a list of AST body nodes. """ stack = [] body = [] process_instrs(instrs, stack, body, context) if stack: raise DecompilationError( "Non-empty stack at the end of instrs_to_body(): %s." % stack ) return body
Convert a list of Instruction objects to a list of AST body nodes.
Below is the the instruction that describes the task: ### Input: Convert a list of Instruction objects to a list of AST body nodes. ### Response: def instrs_to_body(instrs, context): """ Convert a list of Instruction objects to a list of AST body nodes. """ stack = [] body = [] process_instrs(instrs, stack, body, context) if stack: raise DecompilationError( "Non-empty stack at the end of instrs_to_body(): %s." % stack ) return body
def add_pending_model_content(cursor, publication_id, model): """Updates the pending model's content. This is a secondary step not in ``add_pending_model, because content reference resolution requires the identifiers as they will appear in the end publication. """ cursor.execute("""\ SELECT id, ident_hash(uuid, major_version, minor_version) FROM pending_documents WHERE publication_id = %s AND uuid = %s""", (publication_id, model.id,)) document_info = cursor.fetchone() def attach_info_to_exception(exc): """Small cached function to grab the pending document id and hash to attach to the exception, which is useful when reading the json data on a response. """ exc.publication_id = publication_id exc.pending_document_id, exc.pending_ident_hash = document_info def mark_invalid_reference(reference): """Set the publication to failure and attach invalid reference to the publication. """ exc = exceptions.InvalidReference(reference) attach_info_to_exception(exc) set_publication_failure(cursor, exc) for resource in getattr(model, 'resources', []): add_pending_resource(cursor, resource, document=model) if isinstance(model, cnxepub.Document): for reference in model.references: if reference.is_bound: reference.bind(reference.bound_model, '/resources/{}') elif reference.remote_type == cnxepub.INTERNAL_REFERENCE_TYPE: if reference.uri.startswith('#'): pass elif reference.uri.startswith('/contents'): ident_hash = parse_archive_uri(reference.uri) try: doc_pointer = lookup_document_pointer( ident_hash, cursor) except DocumentLookupError: mark_invalid_reference(reference) else: reference.bind(doc_pointer, "/contents/{}") else: mark_invalid_reference(reference) # else, it's a remote or cnx.org reference ...Do nothing. args = (psycopg2.Binary(model.content.encode('utf-8')), publication_id, model.id,) stmt = """\ UPDATE "pending_documents" SET ("content") = (%s) WHERE "publication_id" = %s AND "uuid" = %s""" else: metadata = model.metadata.copy() # All document pointers in the tree are valid? document_pointers = [m for m in cnxepub.flatten_model(model) if isinstance(m, cnxepub.DocumentPointer)] document_pointer_ident_hashes = [ (split_ident_hash(dp.ident_hash)[0], split_ident_hash(dp.ident_hash, split_version=True)[1][0], split_ident_hash(dp.ident_hash, split_version=True)[1][1],) # split_ident_hash(dp.ident_hash, split_version=True)[1][0],) for dp in document_pointers] document_pointer_ident_hashes = zip(*document_pointer_ident_hashes) if document_pointers: uuids, major_vers, minor_vers = document_pointer_ident_hashes cursor.execute("""\ SELECT dp.uuid, module_version(dp.maj_ver, dp.min_ver) AS version, dp.uuid = m.uuid AS exists, m.portal_type = 'Module' AS is_document FROM (SELECT unnest(%s::uuid[]), unnest(%s::integer[]), unnest(%s::integer[]))\ AS dp(uuid, maj_ver, min_ver) LEFT JOIN modules AS m ON dp.uuid = m.uuid AND \ (dp.maj_ver = m.major_version OR dp.maj_ver is null)""", (list(uuids), list(major_vers), list(minor_vers),)) valid_pointer_results = cursor.fetchall() for result_row in valid_pointer_results: uuid, version, exists, is_document = result_row if not (exists and is_document): dp = [dp for dp in document_pointers if dp.ident_hash == join_ident_hash(uuid, version) ][0] exc = exceptions.InvalidDocumentPointer( dp, exists=exists, is_document=is_document) attach_info_to_exception(exc) set_publication_failure(cursor, exc) # Insert the tree into the metadata. metadata['_tree'] = cnxepub.model_to_tree(model) args = (json.dumps(metadata), None, # TODO Render the HTML tree at ``model.content``. publication_id, model.id,) # Must pave over metadata because postgresql lacks built-in # json update functions. stmt = """\ UPDATE "pending_documents" SET ("metadata", "content") = (%s, %s) WHERE "publication_id" = %s AND "uuid" = %s""" cursor.execute(stmt, args)
Updates the pending model's content. This is a secondary step not in ``add_pending_model, because content reference resolution requires the identifiers as they will appear in the end publication.
Below is the the instruction that describes the task: ### Input: Updates the pending model's content. This is a secondary step not in ``add_pending_model, because content reference resolution requires the identifiers as they will appear in the end publication. ### Response: def add_pending_model_content(cursor, publication_id, model): """Updates the pending model's content. This is a secondary step not in ``add_pending_model, because content reference resolution requires the identifiers as they will appear in the end publication. """ cursor.execute("""\ SELECT id, ident_hash(uuid, major_version, minor_version) FROM pending_documents WHERE publication_id = %s AND uuid = %s""", (publication_id, model.id,)) document_info = cursor.fetchone() def attach_info_to_exception(exc): """Small cached function to grab the pending document id and hash to attach to the exception, which is useful when reading the json data on a response. """ exc.publication_id = publication_id exc.pending_document_id, exc.pending_ident_hash = document_info def mark_invalid_reference(reference): """Set the publication to failure and attach invalid reference to the publication. """ exc = exceptions.InvalidReference(reference) attach_info_to_exception(exc) set_publication_failure(cursor, exc) for resource in getattr(model, 'resources', []): add_pending_resource(cursor, resource, document=model) if isinstance(model, cnxepub.Document): for reference in model.references: if reference.is_bound: reference.bind(reference.bound_model, '/resources/{}') elif reference.remote_type == cnxepub.INTERNAL_REFERENCE_TYPE: if reference.uri.startswith('#'): pass elif reference.uri.startswith('/contents'): ident_hash = parse_archive_uri(reference.uri) try: doc_pointer = lookup_document_pointer( ident_hash, cursor) except DocumentLookupError: mark_invalid_reference(reference) else: reference.bind(doc_pointer, "/contents/{}") else: mark_invalid_reference(reference) # else, it's a remote or cnx.org reference ...Do nothing. args = (psycopg2.Binary(model.content.encode('utf-8')), publication_id, model.id,) stmt = """\ UPDATE "pending_documents" SET ("content") = (%s) WHERE "publication_id" = %s AND "uuid" = %s""" else: metadata = model.metadata.copy() # All document pointers in the tree are valid? document_pointers = [m for m in cnxepub.flatten_model(model) if isinstance(m, cnxepub.DocumentPointer)] document_pointer_ident_hashes = [ (split_ident_hash(dp.ident_hash)[0], split_ident_hash(dp.ident_hash, split_version=True)[1][0], split_ident_hash(dp.ident_hash, split_version=True)[1][1],) # split_ident_hash(dp.ident_hash, split_version=True)[1][0],) for dp in document_pointers] document_pointer_ident_hashes = zip(*document_pointer_ident_hashes) if document_pointers: uuids, major_vers, minor_vers = document_pointer_ident_hashes cursor.execute("""\ SELECT dp.uuid, module_version(dp.maj_ver, dp.min_ver) AS version, dp.uuid = m.uuid AS exists, m.portal_type = 'Module' AS is_document FROM (SELECT unnest(%s::uuid[]), unnest(%s::integer[]), unnest(%s::integer[]))\ AS dp(uuid, maj_ver, min_ver) LEFT JOIN modules AS m ON dp.uuid = m.uuid AND \ (dp.maj_ver = m.major_version OR dp.maj_ver is null)""", (list(uuids), list(major_vers), list(minor_vers),)) valid_pointer_results = cursor.fetchall() for result_row in valid_pointer_results: uuid, version, exists, is_document = result_row if not (exists and is_document): dp = [dp for dp in document_pointers if dp.ident_hash == join_ident_hash(uuid, version) ][0] exc = exceptions.InvalidDocumentPointer( dp, exists=exists, is_document=is_document) attach_info_to_exception(exc) set_publication_failure(cursor, exc) # Insert the tree into the metadata. metadata['_tree'] = cnxepub.model_to_tree(model) args = (json.dumps(metadata), None, # TODO Render the HTML tree at ``model.content``. publication_id, model.id,) # Must pave over metadata because postgresql lacks built-in # json update functions. stmt = """\ UPDATE "pending_documents" SET ("metadata", "content") = (%s, %s) WHERE "publication_id" = %s AND "uuid" = %s""" cursor.execute(stmt, args)
def getData(self): """Get data of the GitHub user.""" url = self.server + self.name data = GitHubUser.__getDataFromURL(url) web = BeautifulSoup(data, "lxml") self.__getContributions(web) self.__getLocation(web) self.__getAvatar(web) self.__getNumberOfRepositories(web) self.__getNumberOfFollowers(web) self.__getBio(web) self.__getJoin(web) self.__getOrganizations(web)
Get data of the GitHub user.
Below is the the instruction that describes the task: ### Input: Get data of the GitHub user. ### Response: def getData(self): """Get data of the GitHub user.""" url = self.server + self.name data = GitHubUser.__getDataFromURL(url) web = BeautifulSoup(data, "lxml") self.__getContributions(web) self.__getLocation(web) self.__getAvatar(web) self.__getNumberOfRepositories(web) self.__getNumberOfFollowers(web) self.__getBio(web) self.__getJoin(web) self.__getOrganizations(web)
def import_identity(self, label: str, encrypted_pri_key: str, pwd: str, salt: str, b58_address: str) -> Identity: """ This interface is used to import identity by providing encrypted private key, password, salt and base58 encode address which should be correspond to the encrypted private key provided. :param label: a label for identity. :param encrypted_pri_key: an encrypted private key in base64 encoding from. :param pwd: a password which is used to encrypt and decrypt the private key. :param salt: a salt value which will be used in the process of encrypt private key. :param b58_address: a base58 encode address which correspond with the encrypted private key provided. :return: if succeed, an Identity object will be returned. """ scrypt_n = Scrypt().n pri_key = Account.get_gcm_decoded_private_key(encrypted_pri_key, pwd, b58_address, salt, scrypt_n, self.scheme) info = self.__create_identity(label, pwd, salt, pri_key) for identity in self.wallet_in_mem.identities: if identity.ont_id == info.ont_id: return identity raise SDKException(ErrorCode.other_error('Import identity failed.'))
This interface is used to import identity by providing encrypted private key, password, salt and base58 encode address which should be correspond to the encrypted private key provided. :param label: a label for identity. :param encrypted_pri_key: an encrypted private key in base64 encoding from. :param pwd: a password which is used to encrypt and decrypt the private key. :param salt: a salt value which will be used in the process of encrypt private key. :param b58_address: a base58 encode address which correspond with the encrypted private key provided. :return: if succeed, an Identity object will be returned.
Below is the the instruction that describes the task: ### Input: This interface is used to import identity by providing encrypted private key, password, salt and base58 encode address which should be correspond to the encrypted private key provided. :param label: a label for identity. :param encrypted_pri_key: an encrypted private key in base64 encoding from. :param pwd: a password which is used to encrypt and decrypt the private key. :param salt: a salt value which will be used in the process of encrypt private key. :param b58_address: a base58 encode address which correspond with the encrypted private key provided. :return: if succeed, an Identity object will be returned. ### Response: def import_identity(self, label: str, encrypted_pri_key: str, pwd: str, salt: str, b58_address: str) -> Identity: """ This interface is used to import identity by providing encrypted private key, password, salt and base58 encode address which should be correspond to the encrypted private key provided. :param label: a label for identity. :param encrypted_pri_key: an encrypted private key in base64 encoding from. :param pwd: a password which is used to encrypt and decrypt the private key. :param salt: a salt value which will be used in the process of encrypt private key. :param b58_address: a base58 encode address which correspond with the encrypted private key provided. :return: if succeed, an Identity object will be returned. """ scrypt_n = Scrypt().n pri_key = Account.get_gcm_decoded_private_key(encrypted_pri_key, pwd, b58_address, salt, scrypt_n, self.scheme) info = self.__create_identity(label, pwd, salt, pri_key) for identity in self.wallet_in_mem.identities: if identity.ont_id == info.ont_id: return identity raise SDKException(ErrorCode.other_error('Import identity failed.'))
def ensure_active_group(self): """Ensure that the group is active (i.e. joined and synced)""" with self._client._lock, self._lock: if self._heartbeat_thread is None: self._start_heartbeat_thread() while self.need_rejoin() or self._rejoin_incomplete(): self.ensure_coordinator_ready() # call on_join_prepare if needed. We set a flag # to make sure that we do not call it a second # time if the client is woken up before a pending # rebalance completes. This must be called on each # iteration of the loop because an event requiring # a rebalance (such as a metadata refresh which # changes the matched subscription set) can occur # while another rebalance is still in progress. if not self.rejoining: self._on_join_prepare(self._generation.generation_id, self._generation.member_id) self.rejoining = True # ensure that there are no pending requests to the coordinator. # This is important in particular to avoid resending a pending # JoinGroup request. while not self.coordinator_unknown(): if not self._client.in_flight_request_count(self.coordinator_id): break self._client.poll() else: continue # we store the join future in case we are woken up by the user # after beginning the rebalance in the call to poll below. # This ensures that we do not mistakenly attempt to rejoin # before the pending rebalance has completed. if self.join_future is None: # Fence off the heartbeat thread explicitly so that it cannot # interfere with the join group. Note that this must come after # the call to _on_join_prepare since we must be able to continue # sending heartbeats if that callback takes some time. self._heartbeat_thread.disable() self.state = MemberState.REBALANCING future = self._send_join_group_request() self.join_future = future # this should happen before adding callbacks # handle join completion in the callback so that the # callback will be invoked even if the consumer is woken up # before finishing the rebalance future.add_callback(self._handle_join_success) # we handle failures below after the request finishes. # If the join completes after having been woken up, the # exception is ignored and we will rejoin future.add_errback(self._handle_join_failure) else: future = self.join_future self._client.poll(future=future) if future.succeeded(): self._on_join_complete(self._generation.generation_id, self._generation.member_id, self._generation.protocol, future.value) self.join_future = None self.rejoining = False else: self.join_future = None exception = future.exception if isinstance(exception, (Errors.UnknownMemberIdError, Errors.RebalanceInProgressError, Errors.IllegalGenerationError)): continue elif not future.retriable(): raise exception # pylint: disable-msg=raising-bad-type time.sleep(self.config['retry_backoff_ms'] / 1000)
Ensure that the group is active (i.e. joined and synced)
Below is the the instruction that describes the task: ### Input: Ensure that the group is active (i.e. joined and synced) ### Response: def ensure_active_group(self): """Ensure that the group is active (i.e. joined and synced)""" with self._client._lock, self._lock: if self._heartbeat_thread is None: self._start_heartbeat_thread() while self.need_rejoin() or self._rejoin_incomplete(): self.ensure_coordinator_ready() # call on_join_prepare if needed. We set a flag # to make sure that we do not call it a second # time if the client is woken up before a pending # rebalance completes. This must be called on each # iteration of the loop because an event requiring # a rebalance (such as a metadata refresh which # changes the matched subscription set) can occur # while another rebalance is still in progress. if not self.rejoining: self._on_join_prepare(self._generation.generation_id, self._generation.member_id) self.rejoining = True # ensure that there are no pending requests to the coordinator. # This is important in particular to avoid resending a pending # JoinGroup request. while not self.coordinator_unknown(): if not self._client.in_flight_request_count(self.coordinator_id): break self._client.poll() else: continue # we store the join future in case we are woken up by the user # after beginning the rebalance in the call to poll below. # This ensures that we do not mistakenly attempt to rejoin # before the pending rebalance has completed. if self.join_future is None: # Fence off the heartbeat thread explicitly so that it cannot # interfere with the join group. Note that this must come after # the call to _on_join_prepare since we must be able to continue # sending heartbeats if that callback takes some time. self._heartbeat_thread.disable() self.state = MemberState.REBALANCING future = self._send_join_group_request() self.join_future = future # this should happen before adding callbacks # handle join completion in the callback so that the # callback will be invoked even if the consumer is woken up # before finishing the rebalance future.add_callback(self._handle_join_success) # we handle failures below after the request finishes. # If the join completes after having been woken up, the # exception is ignored and we will rejoin future.add_errback(self._handle_join_failure) else: future = self.join_future self._client.poll(future=future) if future.succeeded(): self._on_join_complete(self._generation.generation_id, self._generation.member_id, self._generation.protocol, future.value) self.join_future = None self.rejoining = False else: self.join_future = None exception = future.exception if isinstance(exception, (Errors.UnknownMemberIdError, Errors.RebalanceInProgressError, Errors.IllegalGenerationError)): continue elif not future.retriable(): raise exception # pylint: disable-msg=raising-bad-type time.sleep(self.config['retry_backoff_ms'] / 1000)
def load_local_dataset(self, ds_str): ''' Returns a dataset instance for the local resource :param ds_str: Path to the resource ''' if cdl.is_cdl(ds_str): ds_str = self.generate_dataset(ds_str) if netcdf.is_netcdf(ds_str): return MemoizedDataset(ds_str) # Assume this is just a Generic File if it exists if os.path.isfile(ds_str): return GenericFile(ds_str) raise ValueError("File is an unknown format")
Returns a dataset instance for the local resource :param ds_str: Path to the resource
Below is the the instruction that describes the task: ### Input: Returns a dataset instance for the local resource :param ds_str: Path to the resource ### Response: def load_local_dataset(self, ds_str): ''' Returns a dataset instance for the local resource :param ds_str: Path to the resource ''' if cdl.is_cdl(ds_str): ds_str = self.generate_dataset(ds_str) if netcdf.is_netcdf(ds_str): return MemoizedDataset(ds_str) # Assume this is just a Generic File if it exists if os.path.isfile(ds_str): return GenericFile(ds_str) raise ValueError("File is an unknown format")
def is_parent_of_repository(self, id_, repository_id): """Tests if an ``Id`` is a direct parent of a repository. arg: id (osid.id.Id): an ``Id`` arg: repository_id (osid.id.Id): the ``Id`` of a repository return: (boolean) - ``true`` if this ``id`` is a parent of ``repository_id,`` ``false`` otherwise raise: NotFound - ``repository_id`` is not found raise: NullArgument - ``id`` or ``repository_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* *implementation notes*: If ``id`` not found return ``false``. """ # Implemented from template for # osid.resource.BinHierarchySession.is_parent_of_bin if self._catalog_session is not None: return self._catalog_session.is_parent_of_catalog(id_=id_, catalog_id=repository_id) return self._hierarchy_session.is_parent(id_=repository_id, parent_id=id_)
Tests if an ``Id`` is a direct parent of a repository. arg: id (osid.id.Id): an ``Id`` arg: repository_id (osid.id.Id): the ``Id`` of a repository return: (boolean) - ``true`` if this ``id`` is a parent of ``repository_id,`` ``false`` otherwise raise: NotFound - ``repository_id`` is not found raise: NullArgument - ``id`` or ``repository_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* *implementation notes*: If ``id`` not found return ``false``.
Below is the the instruction that describes the task: ### Input: Tests if an ``Id`` is a direct parent of a repository. arg: id (osid.id.Id): an ``Id`` arg: repository_id (osid.id.Id): the ``Id`` of a repository return: (boolean) - ``true`` if this ``id`` is a parent of ``repository_id,`` ``false`` otherwise raise: NotFound - ``repository_id`` is not found raise: NullArgument - ``id`` or ``repository_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* *implementation notes*: If ``id`` not found return ``false``. ### Response: def is_parent_of_repository(self, id_, repository_id): """Tests if an ``Id`` is a direct parent of a repository. arg: id (osid.id.Id): an ``Id`` arg: repository_id (osid.id.Id): the ``Id`` of a repository return: (boolean) - ``true`` if this ``id`` is a parent of ``repository_id,`` ``false`` otherwise raise: NotFound - ``repository_id`` is not found raise: NullArgument - ``id`` or ``repository_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* *implementation notes*: If ``id`` not found return ``false``. """ # Implemented from template for # osid.resource.BinHierarchySession.is_parent_of_bin if self._catalog_session is not None: return self._catalog_session.is_parent_of_catalog(id_=id_, catalog_id=repository_id) return self._hierarchy_session.is_parent(id_=repository_id, parent_id=id_)
def intr_write(self, dev_handle, ep, intf, data, timeout): r"""Perform an interrupt write. dev_handle is the value returned by the open_device() method. The ep parameter is the bEndpointAddress field whose endpoint the data will be sent to. intf is the bInterfaceNumber field of the interface containing the endpoint. The data parameter is the data to be sent. It must be an instance of the array.array class. The timeout parameter specifies a time limit to the operation in miliseconds. The method returns the number of bytes written. """ _not_implemented(self.intr_write)
r"""Perform an interrupt write. dev_handle is the value returned by the open_device() method. The ep parameter is the bEndpointAddress field whose endpoint the data will be sent to. intf is the bInterfaceNumber field of the interface containing the endpoint. The data parameter is the data to be sent. It must be an instance of the array.array class. The timeout parameter specifies a time limit to the operation in miliseconds. The method returns the number of bytes written.
Below is the the instruction that describes the task: ### Input: r"""Perform an interrupt write. dev_handle is the value returned by the open_device() method. The ep parameter is the bEndpointAddress field whose endpoint the data will be sent to. intf is the bInterfaceNumber field of the interface containing the endpoint. The data parameter is the data to be sent. It must be an instance of the array.array class. The timeout parameter specifies a time limit to the operation in miliseconds. The method returns the number of bytes written. ### Response: def intr_write(self, dev_handle, ep, intf, data, timeout): r"""Perform an interrupt write. dev_handle is the value returned by the open_device() method. The ep parameter is the bEndpointAddress field whose endpoint the data will be sent to. intf is the bInterfaceNumber field of the interface containing the endpoint. The data parameter is the data to be sent. It must be an instance of the array.array class. The timeout parameter specifies a time limit to the operation in miliseconds. The method returns the number of bytes written. """ _not_implemented(self.intr_write)
def trans_new(name, transform, inverse, breaks=None, minor_breaks=None, _format=None, domain=(-np.inf, np.inf), doc='', **kwargs): """ Create a transformation class object Parameters ---------- name : str Name of the transformation transform : callable ``f(x)`` A function (preferably a `ufunc`) that computes the transformation. inverse : callable ``f(x)`` A function (preferably a `ufunc`) that computes the inverse of the transformation. breaks : callable ``f(limits)`` Function to compute the breaks for this transform. If None, then a default good enough for a linear domain is used. minor_breaks : callable ``f(major, limits)`` Function to compute the minor breaks for this transform. If None, then a default good enough for a linear domain is used. _format : callable ``f(breaks)`` Function to format the generated breaks. domain : array_like Domain over which the transformation is valid. It should be of length 2. doc : str Docstring for the class. **kwargs : dict Attributes of the transform, e.g if base is passed in kwargs, then `t.base` would be a valied attribute. Returns ------- out : trans Transform class """ def _get(func): if isinstance(func, (classmethod, staticmethod, MethodType)): return func else: return staticmethod(func) klass_name = '{}_trans'.format(name) d = {'transform': _get(transform), 'inverse': _get(inverse), 'domain': domain, '__doc__': doc, **kwargs} if breaks: d['breaks_'] = _get(breaks) if minor_breaks: d['minor_breaks'] = _get(minor_breaks) if _format: d['format'] = _get(_format) return type(klass_name, (trans,), d)
Create a transformation class object Parameters ---------- name : str Name of the transformation transform : callable ``f(x)`` A function (preferably a `ufunc`) that computes the transformation. inverse : callable ``f(x)`` A function (preferably a `ufunc`) that computes the inverse of the transformation. breaks : callable ``f(limits)`` Function to compute the breaks for this transform. If None, then a default good enough for a linear domain is used. minor_breaks : callable ``f(major, limits)`` Function to compute the minor breaks for this transform. If None, then a default good enough for a linear domain is used. _format : callable ``f(breaks)`` Function to format the generated breaks. domain : array_like Domain over which the transformation is valid. It should be of length 2. doc : str Docstring for the class. **kwargs : dict Attributes of the transform, e.g if base is passed in kwargs, then `t.base` would be a valied attribute. Returns ------- out : trans Transform class
Below is the the instruction that describes the task: ### Input: Create a transformation class object Parameters ---------- name : str Name of the transformation transform : callable ``f(x)`` A function (preferably a `ufunc`) that computes the transformation. inverse : callable ``f(x)`` A function (preferably a `ufunc`) that computes the inverse of the transformation. breaks : callable ``f(limits)`` Function to compute the breaks for this transform. If None, then a default good enough for a linear domain is used. minor_breaks : callable ``f(major, limits)`` Function to compute the minor breaks for this transform. If None, then a default good enough for a linear domain is used. _format : callable ``f(breaks)`` Function to format the generated breaks. domain : array_like Domain over which the transformation is valid. It should be of length 2. doc : str Docstring for the class. **kwargs : dict Attributes of the transform, e.g if base is passed in kwargs, then `t.base` would be a valied attribute. Returns ------- out : trans Transform class ### Response: def trans_new(name, transform, inverse, breaks=None, minor_breaks=None, _format=None, domain=(-np.inf, np.inf), doc='', **kwargs): """ Create a transformation class object Parameters ---------- name : str Name of the transformation transform : callable ``f(x)`` A function (preferably a `ufunc`) that computes the transformation. inverse : callable ``f(x)`` A function (preferably a `ufunc`) that computes the inverse of the transformation. breaks : callable ``f(limits)`` Function to compute the breaks for this transform. If None, then a default good enough for a linear domain is used. minor_breaks : callable ``f(major, limits)`` Function to compute the minor breaks for this transform. If None, then a default good enough for a linear domain is used. _format : callable ``f(breaks)`` Function to format the generated breaks. domain : array_like Domain over which the transformation is valid. It should be of length 2. doc : str Docstring for the class. **kwargs : dict Attributes of the transform, e.g if base is passed in kwargs, then `t.base` would be a valied attribute. Returns ------- out : trans Transform class """ def _get(func): if isinstance(func, (classmethod, staticmethod, MethodType)): return func else: return staticmethod(func) klass_name = '{}_trans'.format(name) d = {'transform': _get(transform), 'inverse': _get(inverse), 'domain': domain, '__doc__': doc, **kwargs} if breaks: d['breaks_'] = _get(breaks) if minor_breaks: d['minor_breaks'] = _get(minor_breaks) if _format: d['format'] = _get(_format) return type(klass_name, (trans,), d)
def to_minutes(days=0, hours=0, minutes=0, seconds=0, milliseconds=0, round_to_int=True): """Returns the computed total number of minutes """ total = float(days)*24*60 + float(hours)*60 + float(minutes) + \ float(seconds)/60 + float(milliseconds)/1000/60 return int(round(total)) if round_to_int else total
Returns the computed total number of minutes
Below is the the instruction that describes the task: ### Input: Returns the computed total number of minutes ### Response: def to_minutes(days=0, hours=0, minutes=0, seconds=0, milliseconds=0, round_to_int=True): """Returns the computed total number of minutes """ total = float(days)*24*60 + float(hours)*60 + float(minutes) + \ float(seconds)/60 + float(milliseconds)/1000/60 return int(round(total)) if round_to_int else total
def destroy(cls, url): """ This operation deletes an existing endpoint from the list of all endpoints, and makes the server stop listening on the endpoint. *Note*: deleting and disconnecting an endpoint is allowed in the system database only. Calling this action in any other database will make the server return an error. Futhermore, the last remaining endpoint cannot be deleted as this would make the server kaput. :param url The endpoint to delete, e.g. tcp://127.0.0.1:8529. """ api = Client.instance().api api.endpoint(url).delete()
This operation deletes an existing endpoint from the list of all endpoints, and makes the server stop listening on the endpoint. *Note*: deleting and disconnecting an endpoint is allowed in the system database only. Calling this action in any other database will make the server return an error. Futhermore, the last remaining endpoint cannot be deleted as this would make the server kaput. :param url The endpoint to delete, e.g. tcp://127.0.0.1:8529.
Below is the the instruction that describes the task: ### Input: This operation deletes an existing endpoint from the list of all endpoints, and makes the server stop listening on the endpoint. *Note*: deleting and disconnecting an endpoint is allowed in the system database only. Calling this action in any other database will make the server return an error. Futhermore, the last remaining endpoint cannot be deleted as this would make the server kaput. :param url The endpoint to delete, e.g. tcp://127.0.0.1:8529. ### Response: def destroy(cls, url): """ This operation deletes an existing endpoint from the list of all endpoints, and makes the server stop listening on the endpoint. *Note*: deleting and disconnecting an endpoint is allowed in the system database only. Calling this action in any other database will make the server return an error. Futhermore, the last remaining endpoint cannot be deleted as this would make the server kaput. :param url The endpoint to delete, e.g. tcp://127.0.0.1:8529. """ api = Client.instance().api api.endpoint(url).delete()
def reset(self): """Reset stacks and instruction pointer.""" self.data_stack = stack.Stack() self.return_stack = stack.Stack() self.instruction_pointer = 0 return self
Reset stacks and instruction pointer.
Below is the the instruction that describes the task: ### Input: Reset stacks and instruction pointer. ### Response: def reset(self): """Reset stacks and instruction pointer.""" self.data_stack = stack.Stack() self.return_stack = stack.Stack() self.instruction_pointer = 0 return self
def calculate_clock_skew(self): """ Computer average and standard deviation using all the data points. """ n = self.statx_n(self.data_points) """ Required to be able to compute the standard deviation. """ if n < 1: return Decimal("0") avg = self.statx_avg(self.data_points) sdev = self.statx_sdev(self.data_points) """ Incrementally remove aberration points. """ for k in range(0, self.clean_steps): """ Remove aberration points: keep only the sigma range around the average. """ min_val = avg - sdev max_val = avg + sdev cleaned_data_points = [] for i in range(0, n): v = self.data_points[i] if v < min_val or v > max_val: continue cleaned_data_points.append(v) self.data_points = cleaned_data_points[:] """ Recompute the new average using the "sound" points we kept. """ n = self.statx_n(self.data_points) """ Not enough data to compute standard deviation. """ if n < 2: break avg = self.statx_avg(self.data_points) sdev = self.statx_sdev(self.data_points) if sdev <= self.max_sdev or n < self.min_data: break """ If standard deviation is too large still, we cannot update our clock. Collect more points. If we don't have a minimum amount of data, don't attempt the update yet, continue collecting. """ if sdev > self.max_sdev or n < self.min_data: return Decimal("0") return avg
Computer average and standard deviation using all the data points.
Below is the the instruction that describes the task: ### Input: Computer average and standard deviation using all the data points. ### Response: def calculate_clock_skew(self): """ Computer average and standard deviation using all the data points. """ n = self.statx_n(self.data_points) """ Required to be able to compute the standard deviation. """ if n < 1: return Decimal("0") avg = self.statx_avg(self.data_points) sdev = self.statx_sdev(self.data_points) """ Incrementally remove aberration points. """ for k in range(0, self.clean_steps): """ Remove aberration points: keep only the sigma range around the average. """ min_val = avg - sdev max_val = avg + sdev cleaned_data_points = [] for i in range(0, n): v = self.data_points[i] if v < min_val or v > max_val: continue cleaned_data_points.append(v) self.data_points = cleaned_data_points[:] """ Recompute the new average using the "sound" points we kept. """ n = self.statx_n(self.data_points) """ Not enough data to compute standard deviation. """ if n < 2: break avg = self.statx_avg(self.data_points) sdev = self.statx_sdev(self.data_points) if sdev <= self.max_sdev or n < self.min_data: break """ If standard deviation is too large still, we cannot update our clock. Collect more points. If we don't have a minimum amount of data, don't attempt the update yet, continue collecting. """ if sdev > self.max_sdev or n < self.min_data: return Decimal("0") return avg
def get_loss_ratios(self): """ :returns: a 1-dimensional composite array with loss ratios by loss type """ lst = [('user_provided', numpy.bool)] for cp in self.curve_params: lst.append((cp.loss_type, F32, len(cp.ratios))) loss_ratios = numpy.zeros(1, numpy.dtype(lst)) for cp in self.curve_params: loss_ratios['user_provided'] = cp.user_provided loss_ratios[cp.loss_type] = tuple(cp.ratios) return loss_ratios
:returns: a 1-dimensional composite array with loss ratios by loss type
Below is the the instruction that describes the task: ### Input: :returns: a 1-dimensional composite array with loss ratios by loss type ### Response: def get_loss_ratios(self): """ :returns: a 1-dimensional composite array with loss ratios by loss type """ lst = [('user_provided', numpy.bool)] for cp in self.curve_params: lst.append((cp.loss_type, F32, len(cp.ratios))) loss_ratios = numpy.zeros(1, numpy.dtype(lst)) for cp in self.curve_params: loss_ratios['user_provided'] = cp.user_provided loss_ratios[cp.loss_type] = tuple(cp.ratios) return loss_ratios
def login(self, login, password): """ Login to the remote telnet server. :param login: Username to use for logging in :param password: Password to use for logging in :raise: `InvalidLogin` on failed login """ self.client.read_until('Username: ') self.client.write(login + '\r\n') self.client.read_until('Password: ') self.client.write(password + '\r\n') current_data = self.client.read_until('$ ', 10) if not current_data.endswith('$ '): raise InvalidLogin
Login to the remote telnet server. :param login: Username to use for logging in :param password: Password to use for logging in :raise: `InvalidLogin` on failed login
Below is the the instruction that describes the task: ### Input: Login to the remote telnet server. :param login: Username to use for logging in :param password: Password to use for logging in :raise: `InvalidLogin` on failed login ### Response: def login(self, login, password): """ Login to the remote telnet server. :param login: Username to use for logging in :param password: Password to use for logging in :raise: `InvalidLogin` on failed login """ self.client.read_until('Username: ') self.client.write(login + '\r\n') self.client.read_until('Password: ') self.client.write(password + '\r\n') current_data = self.client.read_until('$ ', 10) if not current_data.endswith('$ '): raise InvalidLogin
def draw_step(self): """iterator that computes all vertices coordinates and edge routing after just one step (one layer after the other from top to bottom to top). Purely inefficient ! Use it only for "animation" or debugging purpose. """ ostep = self.ordering_step() for s in ostep: self.setxy() self.draw_edges() yield s
iterator that computes all vertices coordinates and edge routing after just one step (one layer after the other from top to bottom to top). Purely inefficient ! Use it only for "animation" or debugging purpose.
Below is the the instruction that describes the task: ### Input: iterator that computes all vertices coordinates and edge routing after just one step (one layer after the other from top to bottom to top). Purely inefficient ! Use it only for "animation" or debugging purpose. ### Response: def draw_step(self): """iterator that computes all vertices coordinates and edge routing after just one step (one layer after the other from top to bottom to top). Purely inefficient ! Use it only for "animation" or debugging purpose. """ ostep = self.ordering_step() for s in ostep: self.setxy() self.draw_edges() yield s
def getBucketIndices(self, input): """ See method description in base.py """ if input == SENTINEL_VALUE_FOR_MISSING_DATA: # Encoder each sub-field return [None] * len(self.encoders) else: assert isinstance(input, datetime.datetime) # Get the scalar values for each sub-field scalars = self.getScalars(input) # Encoder each sub-field result = [] for i in xrange(len(self.encoders)): (name, encoder, offset) = self.encoders[i] result.extend(encoder.getBucketIndices(scalars[i])) return result
See method description in base.py
Below is the the instruction that describes the task: ### Input: See method description in base.py ### Response: def getBucketIndices(self, input): """ See method description in base.py """ if input == SENTINEL_VALUE_FOR_MISSING_DATA: # Encoder each sub-field return [None] * len(self.encoders) else: assert isinstance(input, datetime.datetime) # Get the scalar values for each sub-field scalars = self.getScalars(input) # Encoder each sub-field result = [] for i in xrange(len(self.encoders)): (name, encoder, offset) = self.encoders[i] result.extend(encoder.getBucketIndices(scalars[i])) return result
def validate_range(low=None, high=None): """ Validate the range of a field with either low, high, or equal. Should work with anything that supports '>' and '<' operators. :param low: Smallest value required. :param high: Longest value required. :raises: ``ValidationError('range_low')`` :raises: ``ValidationError('range_high')`` :raises: ``ValidationError('range_between')`` """ def range_validator(field, data): if field.value is None: return if low is not None and field.value < low: key = 'range_low' if high is None else 'range_between' raise ValidationError(key, low=low, high=high) if high is not None and field.value > high: key = 'range_high' if high is None else 'range_between' raise ValidationError(key, low=low, high=high) return range_validator
Validate the range of a field with either low, high, or equal. Should work with anything that supports '>' and '<' operators. :param low: Smallest value required. :param high: Longest value required. :raises: ``ValidationError('range_low')`` :raises: ``ValidationError('range_high')`` :raises: ``ValidationError('range_between')``
Below is the the instruction that describes the task: ### Input: Validate the range of a field with either low, high, or equal. Should work with anything that supports '>' and '<' operators. :param low: Smallest value required. :param high: Longest value required. :raises: ``ValidationError('range_low')`` :raises: ``ValidationError('range_high')`` :raises: ``ValidationError('range_between')`` ### Response: def validate_range(low=None, high=None): """ Validate the range of a field with either low, high, or equal. Should work with anything that supports '>' and '<' operators. :param low: Smallest value required. :param high: Longest value required. :raises: ``ValidationError('range_low')`` :raises: ``ValidationError('range_high')`` :raises: ``ValidationError('range_between')`` """ def range_validator(field, data): if field.value is None: return if low is not None and field.value < low: key = 'range_low' if high is None else 'range_between' raise ValidationError(key, low=low, high=high) if high is not None and field.value > high: key = 'range_high' if high is None else 'range_between' raise ValidationError(key, low=low, high=high) return range_validator
def upload(self, picture, resize=None, rotation=None, noexif=None, callback=None): """ wraps upload function :param str/tuple/list picture: Path to picture as str or picture data. \ If data a tuple or list with the file name as str \ and data as byte object in that order. :param str resize: Aresolution in the folowing format: \ '80x80'(optional) :param str|degree rotation: The picture will be rotated by this Value.\ Allowed values are 00, 90, 180, 270.(optional) :param boolean noexif: set to True when exif data should be purged.\ (optional) :param function callback: function will be called after every read. \ Need to take one argument. you can use the len function to \ determine the body length and call bytes_read(). """ if not resize: resize = self._resize if not rotation: rotation = self._rotation if not noexif: noexif = self._noexif if not callback: callback = self._callback return upload(self._apikey, picture, resize, rotation, noexif, callback)
wraps upload function :param str/tuple/list picture: Path to picture as str or picture data. \ If data a tuple or list with the file name as str \ and data as byte object in that order. :param str resize: Aresolution in the folowing format: \ '80x80'(optional) :param str|degree rotation: The picture will be rotated by this Value.\ Allowed values are 00, 90, 180, 270.(optional) :param boolean noexif: set to True when exif data should be purged.\ (optional) :param function callback: function will be called after every read. \ Need to take one argument. you can use the len function to \ determine the body length and call bytes_read().
Below is the the instruction that describes the task: ### Input: wraps upload function :param str/tuple/list picture: Path to picture as str or picture data. \ If data a tuple or list with the file name as str \ and data as byte object in that order. :param str resize: Aresolution in the folowing format: \ '80x80'(optional) :param str|degree rotation: The picture will be rotated by this Value.\ Allowed values are 00, 90, 180, 270.(optional) :param boolean noexif: set to True when exif data should be purged.\ (optional) :param function callback: function will be called after every read. \ Need to take one argument. you can use the len function to \ determine the body length and call bytes_read(). ### Response: def upload(self, picture, resize=None, rotation=None, noexif=None, callback=None): """ wraps upload function :param str/tuple/list picture: Path to picture as str or picture data. \ If data a tuple or list with the file name as str \ and data as byte object in that order. :param str resize: Aresolution in the folowing format: \ '80x80'(optional) :param str|degree rotation: The picture will be rotated by this Value.\ Allowed values are 00, 90, 180, 270.(optional) :param boolean noexif: set to True when exif data should be purged.\ (optional) :param function callback: function will be called after every read. \ Need to take one argument. you can use the len function to \ determine the body length and call bytes_read(). """ if not resize: resize = self._resize if not rotation: rotation = self._rotation if not noexif: noexif = self._noexif if not callback: callback = self._callback return upload(self._apikey, picture, resize, rotation, noexif, callback)
def round(self, eps=1e-14, rmax=1000000): """Applies TT rounding procedure to the TT-vector and **returns rounded tensor**. :param eps: Rounding accuracy. :type eps: float :param rmax: Maximal rank :type rmax: int :returns: tensor -- rounded TT-vector. Usage example: >>> a = tt.ones(2, 10) >>> b = a + a >>> print b.r array([1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1], dtype=int32) >>> b = b.round(1E-14) >>> print b.r array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32) """ c = vector() c.n = _np.copy(self.n) c.d = self.d c.r = _np.copy(self.r) c.ps = _np.copy(self.ps) if (_np.iscomplex(self.core).any()): _tt_f90.tt_f90.ztt_compr2(c.n, c.r, c.ps, self.core, eps, rmax) c.core = _tt_f90.tt_f90.zcore.copy() else: _tt_f90.tt_f90.dtt_compr2(c.n, c.r, c.ps, self.core, eps, rmax) c.core = _tt_f90.tt_f90.core.copy() _tt_f90.tt_f90.tt_dealloc() return c
Applies TT rounding procedure to the TT-vector and **returns rounded tensor**. :param eps: Rounding accuracy. :type eps: float :param rmax: Maximal rank :type rmax: int :returns: tensor -- rounded TT-vector. Usage example: >>> a = tt.ones(2, 10) >>> b = a + a >>> print b.r array([1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1], dtype=int32) >>> b = b.round(1E-14) >>> print b.r array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32)
Below is the the instruction that describes the task: ### Input: Applies TT rounding procedure to the TT-vector and **returns rounded tensor**. :param eps: Rounding accuracy. :type eps: float :param rmax: Maximal rank :type rmax: int :returns: tensor -- rounded TT-vector. Usage example: >>> a = tt.ones(2, 10) >>> b = a + a >>> print b.r array([1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1], dtype=int32) >>> b = b.round(1E-14) >>> print b.r array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32) ### Response: def round(self, eps=1e-14, rmax=1000000): """Applies TT rounding procedure to the TT-vector and **returns rounded tensor**. :param eps: Rounding accuracy. :type eps: float :param rmax: Maximal rank :type rmax: int :returns: tensor -- rounded TT-vector. Usage example: >>> a = tt.ones(2, 10) >>> b = a + a >>> print b.r array([1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1], dtype=int32) >>> b = b.round(1E-14) >>> print b.r array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32) """ c = vector() c.n = _np.copy(self.n) c.d = self.d c.r = _np.copy(self.r) c.ps = _np.copy(self.ps) if (_np.iscomplex(self.core).any()): _tt_f90.tt_f90.ztt_compr2(c.n, c.r, c.ps, self.core, eps, rmax) c.core = _tt_f90.tt_f90.zcore.copy() else: _tt_f90.tt_f90.dtt_compr2(c.n, c.r, c.ps, self.core, eps, rmax) c.core = _tt_f90.tt_f90.core.copy() _tt_f90.tt_f90.tt_dealloc() return c
def calc_worklog(stdout=Ellipsis, stderr=Ellipsis, verbose=False): ''' calc_worklog constructs the worklog from the stdout, stderr, stdin, and verbose arguments. ''' try: cols = int(os.environ['COLUMNS']) except Exception: cols = 80 return pimms.worklog(columns=cols, stdout=stdout, stderr=stderr, verbose=verbose)
calc_worklog constructs the worklog from the stdout, stderr, stdin, and verbose arguments.
Below is the the instruction that describes the task: ### Input: calc_worklog constructs the worklog from the stdout, stderr, stdin, and verbose arguments. ### Response: def calc_worklog(stdout=Ellipsis, stderr=Ellipsis, verbose=False): ''' calc_worklog constructs the worklog from the stdout, stderr, stdin, and verbose arguments. ''' try: cols = int(os.environ['COLUMNS']) except Exception: cols = 80 return pimms.worklog(columns=cols, stdout=stdout, stderr=stderr, verbose=verbose)
def quadrant(xcoord, ycoord): """ Find the quadrant a pair of coordinates are located in :type xcoord: integer :param xcoord: The x coordinate to find the quadrant for :type ycoord: integer :param ycoord: The y coordinate to find the quadrant for """ xneg = bool(xcoord < 0) yneg = bool(ycoord < 0) if xneg is True: if yneg is False: return 2 return 3 if yneg is False: return 1 return 4
Find the quadrant a pair of coordinates are located in :type xcoord: integer :param xcoord: The x coordinate to find the quadrant for :type ycoord: integer :param ycoord: The y coordinate to find the quadrant for
Below is the the instruction that describes the task: ### Input: Find the quadrant a pair of coordinates are located in :type xcoord: integer :param xcoord: The x coordinate to find the quadrant for :type ycoord: integer :param ycoord: The y coordinate to find the quadrant for ### Response: def quadrant(xcoord, ycoord): """ Find the quadrant a pair of coordinates are located in :type xcoord: integer :param xcoord: The x coordinate to find the quadrant for :type ycoord: integer :param ycoord: The y coordinate to find the quadrant for """ xneg = bool(xcoord < 0) yneg = bool(ycoord < 0) if xneg is True: if yneg is False: return 2 return 3 if yneg is False: return 1 return 4
def key(self, id_num): """Get the specified deploy key. :param int id_num: (required), id of the key :returns: :class:`Key <github3.users.Key>` if successful, else None """ json = None if int(id_num) > 0: url = self._build_url('keys', str(id_num), base_url=self._api) json = self._json(self._get(url), 200) return Key(json, self) if json else None
Get the specified deploy key. :param int id_num: (required), id of the key :returns: :class:`Key <github3.users.Key>` if successful, else None
Below is the the instruction that describes the task: ### Input: Get the specified deploy key. :param int id_num: (required), id of the key :returns: :class:`Key <github3.users.Key>` if successful, else None ### Response: def key(self, id_num): """Get the specified deploy key. :param int id_num: (required), id of the key :returns: :class:`Key <github3.users.Key>` if successful, else None """ json = None if int(id_num) > 0: url = self._build_url('keys', str(id_num), base_url=self._api) json = self._json(self._get(url), 200) return Key(json, self) if json else None
def delete(self): """Delete this file""" if not self.remote: if not os.path.exists(self.projectpath + self.basedir + '/' + self.filename): return False else: os.unlink(self.projectpath + self.basedir + '/' + self.filename) #Remove metadata metafile = self.projectpath + self.basedir + '/' + self.metafilename() if os.path.exists(metafile): os.unlink(metafile) #also remove any .*.INPUTTEMPLATE.* links that pointed to this file: simply remove all dead links for linkf,realf in clam.common.util.globsymlinks(self.projectpath + self.basedir + '/.*.INPUTTEMPLATE.*'): if not os.path.exists(realf): os.unlink(linkf) return True else: if self.client: requestparams = self.client.initrequest() else: requestparams = {} requests.delete( self.projectpath + self.basedir + '/' + self.filename, **requestparams) return True
Delete this file
Below is the the instruction that describes the task: ### Input: Delete this file ### Response: def delete(self): """Delete this file""" if not self.remote: if not os.path.exists(self.projectpath + self.basedir + '/' + self.filename): return False else: os.unlink(self.projectpath + self.basedir + '/' + self.filename) #Remove metadata metafile = self.projectpath + self.basedir + '/' + self.metafilename() if os.path.exists(metafile): os.unlink(metafile) #also remove any .*.INPUTTEMPLATE.* links that pointed to this file: simply remove all dead links for linkf,realf in clam.common.util.globsymlinks(self.projectpath + self.basedir + '/.*.INPUTTEMPLATE.*'): if not os.path.exists(realf): os.unlink(linkf) return True else: if self.client: requestparams = self.client.initrequest() else: requestparams = {} requests.delete( self.projectpath + self.basedir + '/' + self.filename, **requestparams) return True
def is_complete(self): """Do all required parameters have values?""" return all(p.name in self.values for p in self.parameters if p.required)
Do all required parameters have values?
Below is the the instruction that describes the task: ### Input: Do all required parameters have values? ### Response: def is_complete(self): """Do all required parameters have values?""" return all(p.name in self.values for p in self.parameters if p.required)
def get_configuration(): """ get a configuration (snapshot) that can be used to call configure snapshot = get_configuration() configure(**snapshot) """ root = getLogger() name_levels = [('', logging.getLevelName(root.level))] name_levels.extend( (name, logging.getLevelName(logger.level)) for name, logger in root.manager.loggerDict.items() if hasattr(logger, 'level') ) config_string = ','.join('%s:%s' % x for x in name_levels) return dict(config_string=config_string, log_json=SLogger.manager.log_json)
get a configuration (snapshot) that can be used to call configure snapshot = get_configuration() configure(**snapshot)
Below is the the instruction that describes the task: ### Input: get a configuration (snapshot) that can be used to call configure snapshot = get_configuration() configure(**snapshot) ### Response: def get_configuration(): """ get a configuration (snapshot) that can be used to call configure snapshot = get_configuration() configure(**snapshot) """ root = getLogger() name_levels = [('', logging.getLevelName(root.level))] name_levels.extend( (name, logging.getLevelName(logger.level)) for name, logger in root.manager.loggerDict.items() if hasattr(logger, 'level') ) config_string = ','.join('%s:%s' % x for x in name_levels) return dict(config_string=config_string, log_json=SLogger.manager.log_json)
def _check_init(self, node): """check that the __init__ method call super or ancestors'__init__ method """ if not self.linter.is_message_enabled( "super-init-not-called" ) and not self.linter.is_message_enabled("non-parent-init-called"): return klass_node = node.parent.frame() to_call = _ancestors_to_call(klass_node) not_called_yet = dict(to_call) for stmt in node.nodes_of_class(astroid.Call): expr = stmt.func if not isinstance(expr, astroid.Attribute) or expr.attrname != "__init__": continue # skip the test if using super if ( isinstance(expr.expr, astroid.Call) and isinstance(expr.expr.func, astroid.Name) and expr.expr.func.name == "super" ): return try: for klass in expr.expr.infer(): if klass is astroid.Uninferable: continue # The infered klass can be super(), which was # assigned to a variable and the `__init__` # was called later. # # base = super() # base.__init__(...) if ( isinstance(klass, astroid.Instance) and isinstance(klass._proxied, astroid.ClassDef) and is_builtin_object(klass._proxied) and klass._proxied.name == "super" ): return if isinstance(klass, objects.Super): return try: del not_called_yet[klass] except KeyError: if klass not in to_call: self.add_message( "non-parent-init-called", node=expr, args=klass.name ) except astroid.InferenceError: continue for klass, method in not_called_yet.items(): cls = node_frame_class(method) if klass.name == "object" or (cls and cls.name == "object"): continue self.add_message("super-init-not-called", args=klass.name, node=node)
check that the __init__ method call super or ancestors'__init__ method
Below is the the instruction that describes the task: ### Input: check that the __init__ method call super or ancestors'__init__ method ### Response: def _check_init(self, node): """check that the __init__ method call super or ancestors'__init__ method """ if not self.linter.is_message_enabled( "super-init-not-called" ) and not self.linter.is_message_enabled("non-parent-init-called"): return klass_node = node.parent.frame() to_call = _ancestors_to_call(klass_node) not_called_yet = dict(to_call) for stmt in node.nodes_of_class(astroid.Call): expr = stmt.func if not isinstance(expr, astroid.Attribute) or expr.attrname != "__init__": continue # skip the test if using super if ( isinstance(expr.expr, astroid.Call) and isinstance(expr.expr.func, astroid.Name) and expr.expr.func.name == "super" ): return try: for klass in expr.expr.infer(): if klass is astroid.Uninferable: continue # The infered klass can be super(), which was # assigned to a variable and the `__init__` # was called later. # # base = super() # base.__init__(...) if ( isinstance(klass, astroid.Instance) and isinstance(klass._proxied, astroid.ClassDef) and is_builtin_object(klass._proxied) and klass._proxied.name == "super" ): return if isinstance(klass, objects.Super): return try: del not_called_yet[klass] except KeyError: if klass not in to_call: self.add_message( "non-parent-init-called", node=expr, args=klass.name ) except astroid.InferenceError: continue for klass, method in not_called_yet.items(): cls = node_frame_class(method) if klass.name == "object" or (cls and cls.name == "object"): continue self.add_message("super-init-not-called", args=klass.name, node=node)
def get_function_descriptor_list(self): """Return a list of bytes representing the function descriptor. This function is used to pass this function descriptor to backend. Returns: A list of bytes. """ descriptor_list = [] if self.is_for_driver_task: # Driver task returns an empty list. return descriptor_list else: descriptor_list.append(self.module_name.encode("ascii")) descriptor_list.append(self.class_name.encode("ascii")) descriptor_list.append(self.function_name.encode("ascii")) if len(self._function_source_hash) != 0: descriptor_list.append(self._function_source_hash) return descriptor_list
Return a list of bytes representing the function descriptor. This function is used to pass this function descriptor to backend. Returns: A list of bytes.
Below is the the instruction that describes the task: ### Input: Return a list of bytes representing the function descriptor. This function is used to pass this function descriptor to backend. Returns: A list of bytes. ### Response: def get_function_descriptor_list(self): """Return a list of bytes representing the function descriptor. This function is used to pass this function descriptor to backend. Returns: A list of bytes. """ descriptor_list = [] if self.is_for_driver_task: # Driver task returns an empty list. return descriptor_list else: descriptor_list.append(self.module_name.encode("ascii")) descriptor_list.append(self.class_name.encode("ascii")) descriptor_list.append(self.function_name.encode("ascii")) if len(self._function_source_hash) != 0: descriptor_list.append(self._function_source_hash) return descriptor_list
def _create_window_function(name, doc=''): """ Create a window function by name """ def _(): sc = SparkContext._active_spark_context jc = getattr(sc._jvm.functions, name)() return Column(jc) _.__name__ = name _.__doc__ = 'Window function: ' + doc return _
Create a window function by name
Below is the the instruction that describes the task: ### Input: Create a window function by name ### Response: def _create_window_function(name, doc=''): """ Create a window function by name """ def _(): sc = SparkContext._active_spark_context jc = getattr(sc._jvm.functions, name)() return Column(jc) _.__name__ = name _.__doc__ = 'Window function: ' + doc return _
def to_json(self, filename): """ Writes the experimental setup to a JSON file Parameters ---------- filename : str Absolute path where to write the JSON file """ with open(filename, 'w') as fp: json.dump(dict(stimuli=self.stimuli, inhibitors=self.inhibitors, readouts=self.readouts), fp)
Writes the experimental setup to a JSON file Parameters ---------- filename : str Absolute path where to write the JSON file
Below is the the instruction that describes the task: ### Input: Writes the experimental setup to a JSON file Parameters ---------- filename : str Absolute path where to write the JSON file ### Response: def to_json(self, filename): """ Writes the experimental setup to a JSON file Parameters ---------- filename : str Absolute path where to write the JSON file """ with open(filename, 'w') as fp: json.dump(dict(stimuli=self.stimuli, inhibitors=self.inhibitors, readouts=self.readouts), fp)
def InitUI(self): """ Initialize interface for drop down menu """ if self.data_type in ['orient', 'ages']: belongs_to = [] else: parent_table_name = self.parent_type + "s" if parent_table_name in self.contribution.tables: belongs_to = sorted(self.contribution.tables[parent_table_name].df.index.unique()) else: belongs_to = [] self.choices = {} if self.data_type in ['specimens', 'samples', 'sites']: self.choices = {1: (belongs_to, False)} if self.data_type == 'orient': self.choices = {1: (['g', 'b'], False)} if self.data_type == 'ages': for level in ['specimen', 'sample', 'site', 'location']: if level in self.grid.col_labels: level_names = [] if level + "s" in self.contribution.tables: level_names = list(self.contribution.tables[level+"s"].df.index.unique()) num = self.grid.col_labels.index(level) self.choices[num] = (level_names, False) # Bind left click to drop-down menu popping out self.grid.Bind(wx.grid.EVT_GRID_CELL_LEFT_CLICK, lambda event: self.on_left_click(event, self.grid, self.choices)) cols = self.grid.GetNumberCols() col_labels = [self.grid.GetColLabelValue(col) for col in range(cols)] # check if any additional columns have controlled vocabularies # if so, get the vocabulary list for col_number, label in enumerate(col_labels): self.add_drop_down(col_number, label)
Initialize interface for drop down menu
Below is the the instruction that describes the task: ### Input: Initialize interface for drop down menu ### Response: def InitUI(self): """ Initialize interface for drop down menu """ if self.data_type in ['orient', 'ages']: belongs_to = [] else: parent_table_name = self.parent_type + "s" if parent_table_name in self.contribution.tables: belongs_to = sorted(self.contribution.tables[parent_table_name].df.index.unique()) else: belongs_to = [] self.choices = {} if self.data_type in ['specimens', 'samples', 'sites']: self.choices = {1: (belongs_to, False)} if self.data_type == 'orient': self.choices = {1: (['g', 'b'], False)} if self.data_type == 'ages': for level in ['specimen', 'sample', 'site', 'location']: if level in self.grid.col_labels: level_names = [] if level + "s" in self.contribution.tables: level_names = list(self.contribution.tables[level+"s"].df.index.unique()) num = self.grid.col_labels.index(level) self.choices[num] = (level_names, False) # Bind left click to drop-down menu popping out self.grid.Bind(wx.grid.EVT_GRID_CELL_LEFT_CLICK, lambda event: self.on_left_click(event, self.grid, self.choices)) cols = self.grid.GetNumberCols() col_labels = [self.grid.GetColLabelValue(col) for col in range(cols)] # check if any additional columns have controlled vocabularies # if so, get the vocabulary list for col_number, label in enumerate(col_labels): self.add_drop_down(col_number, label)
def remove(self, membership_id): """Remove a member from the group. :param str membership_id: the ID of a member in this group :return: ``True`` if the member was successfully removed :rtype: bool """ path = '{}/remove'.format(membership_id) url = utils.urljoin(self.url, path) payload = {'membership_id': membership_id} response = self.session.post(url, json=payload) return response.ok
Remove a member from the group. :param str membership_id: the ID of a member in this group :return: ``True`` if the member was successfully removed :rtype: bool
Below is the the instruction that describes the task: ### Input: Remove a member from the group. :param str membership_id: the ID of a member in this group :return: ``True`` if the member was successfully removed :rtype: bool ### Response: def remove(self, membership_id): """Remove a member from the group. :param str membership_id: the ID of a member in this group :return: ``True`` if the member was successfully removed :rtype: bool """ path = '{}/remove'.format(membership_id) url = utils.urljoin(self.url, path) payload = {'membership_id': membership_id} response = self.session.post(url, json=payload) return response.ok
def find_project_config_path(path=None): """Find project config path.""" path = Path(path) if path else Path.cwd() abspath = path.absolute() project_path = get_project_config_path(abspath) if project_path: return project_path for parent in abspath.parents: project_path = get_project_config_path(parent) if project_path: return project_path
Find project config path.
Below is the the instruction that describes the task: ### Input: Find project config path. ### Response: def find_project_config_path(path=None): """Find project config path.""" path = Path(path) if path else Path.cwd() abspath = path.absolute() project_path = get_project_config_path(abspath) if project_path: return project_path for parent in abspath.parents: project_path = get_project_config_path(parent) if project_path: return project_path
def read_sudoers(): """ Read the sudoers entry for the specified user. args: username (str): username. returns:`r str: sudoers entry for the specified user. """ sudoers_path = '/etc/sudoers' rnd_chars = random_string(length=RANDOM_FILE_EXT_LENGTH) tmp_sudoers_path = '/tmp/sudoers_{0}'.format(rnd_chars) sudoers_entries = list() copy_result = execute_command( shlex.split(str('{0} cp {1} {2}'.format(sudo_check(), sudoers_path, tmp_sudoers_path)))) result_message = copy_result[0][1].decode('UTF-8') if 'No such file or directory' not in result_message: execute_command(shlex.split(str('{0} chmod 755 {1}'.format(sudo_check(), tmp_sudoers_path)))) with open(tmp_sudoers_path) as tmp_sudoers_file: for line in tmp_sudoers_file: stripped = line.strip().replace(os.linesep, '') if stripped and not stripped.startswith('#'): sudoers_entries.append(stripped) execute_command(shlex.split(str('{0} rm {1}'.format(sudo_check(), tmp_sudoers_path)))) return sudoers_entries
Read the sudoers entry for the specified user. args: username (str): username. returns:`r str: sudoers entry for the specified user.
Below is the the instruction that describes the task: ### Input: Read the sudoers entry for the specified user. args: username (str): username. returns:`r str: sudoers entry for the specified user. ### Response: def read_sudoers(): """ Read the sudoers entry for the specified user. args: username (str): username. returns:`r str: sudoers entry for the specified user. """ sudoers_path = '/etc/sudoers' rnd_chars = random_string(length=RANDOM_FILE_EXT_LENGTH) tmp_sudoers_path = '/tmp/sudoers_{0}'.format(rnd_chars) sudoers_entries = list() copy_result = execute_command( shlex.split(str('{0} cp {1} {2}'.format(sudo_check(), sudoers_path, tmp_sudoers_path)))) result_message = copy_result[0][1].decode('UTF-8') if 'No such file or directory' not in result_message: execute_command(shlex.split(str('{0} chmod 755 {1}'.format(sudo_check(), tmp_sudoers_path)))) with open(tmp_sudoers_path) as tmp_sudoers_file: for line in tmp_sudoers_file: stripped = line.strip().replace(os.linesep, '') if stripped and not stripped.startswith('#'): sudoers_entries.append(stripped) execute_command(shlex.split(str('{0} rm {1}'.format(sudo_check(), tmp_sudoers_path)))) return sudoers_entries
def loadHistory(self): """ Loads the shop sale history Raises parseException """ pg = self.usr.getPage("http://www.neopets.com/market.phtml?type=sales")\ try: rows = pg.find("b", text = "Date").parent.parent.parent.find_all("tr") # First and last row do not contain entries rows.pop(0) rows.pop(-1) self.history = [] for row in rows: parts = row.find_all("td") dets = {} dets['date'] = parts[0].text dets['item'] = parts[1].text dets['buyer'] = parts[2].text dets['price'] = parts[3].text self.history.append(dets) # Reverse the list to put it in order by date self.history.reverse() except Exception: logging.getLogger("neolib.shop").exception("Could not parse sales history.", {'pg': pg}) raise parseException
Loads the shop sale history Raises parseException
Below is the the instruction that describes the task: ### Input: Loads the shop sale history Raises parseException ### Response: def loadHistory(self): """ Loads the shop sale history Raises parseException """ pg = self.usr.getPage("http://www.neopets.com/market.phtml?type=sales")\ try: rows = pg.find("b", text = "Date").parent.parent.parent.find_all("tr") # First and last row do not contain entries rows.pop(0) rows.pop(-1) self.history = [] for row in rows: parts = row.find_all("td") dets = {} dets['date'] = parts[0].text dets['item'] = parts[1].text dets['buyer'] = parts[2].text dets['price'] = parts[3].text self.history.append(dets) # Reverse the list to put it in order by date self.history.reverse() except Exception: logging.getLogger("neolib.shop").exception("Could not parse sales history.", {'pg': pg}) raise parseException
def _microcanonical_average_moments(moments, alpha): """ Compute the average moments of the cluster size distributions Helper function for :func:`microcanonical_averages` Parameters ---------- moments : 2-D :py:class:`numpy.ndarray` of int ``moments.shape[1] == 5`. Each array ``moments[r, :]`` is the ``moments`` field of the output of :func:`sample_states`: The ``k``-th entry is the ``k``-th raw moment of the (absolute) cluster size distribution. alpha: float Significance level. Returns ------- ret : dict Moment statistics ret['moments'] : 1-D :py:class:`numpy.ndarray` of float, size 5 The ``k``-th entry is the average ``k``-th raw moment of the (absolute) cluster size distribution, with ``k`` ranging from ``0`` to ``4``. ret['moments_ci'] : 2-D :py:class:`numpy.ndarray` of float, shape (5,2) ``ret['moments_ci'][k]`` are the lower and upper bounds of the normal confidence interval of the average ``k``-th raw moment of the (absolute) cluster size distribution, with ``k`` ranging from ``0`` to ``4``. See Also -------- sample_states : computation of moments microcanonical_averages : moment statistics """ ret = dict() runs = moments.shape[0] sqrt_n = np.sqrt(runs) moments_sample_mean = moments.mean(axis=0) ret['moments'] = moments_sample_mean moments_sample_std = moments.std(axis=0, ddof=1) ret['moments_ci'] = np.empty((5, 2)) for k in range(5): if moments_sample_std[k]: old_settings = np.seterr(all='raise') ret['moments_ci'][k] = scipy.stats.t.interval( 1 - alpha, df=runs - 1, loc=moments_sample_mean[k], scale=moments_sample_std[k] / sqrt_n ) np.seterr(**old_settings) else: ret['moments_ci'][k] = ( moments_sample_mean[k] * np.ones(2) ) return ret
Compute the average moments of the cluster size distributions Helper function for :func:`microcanonical_averages` Parameters ---------- moments : 2-D :py:class:`numpy.ndarray` of int ``moments.shape[1] == 5`. Each array ``moments[r, :]`` is the ``moments`` field of the output of :func:`sample_states`: The ``k``-th entry is the ``k``-th raw moment of the (absolute) cluster size distribution. alpha: float Significance level. Returns ------- ret : dict Moment statistics ret['moments'] : 1-D :py:class:`numpy.ndarray` of float, size 5 The ``k``-th entry is the average ``k``-th raw moment of the (absolute) cluster size distribution, with ``k`` ranging from ``0`` to ``4``. ret['moments_ci'] : 2-D :py:class:`numpy.ndarray` of float, shape (5,2) ``ret['moments_ci'][k]`` are the lower and upper bounds of the normal confidence interval of the average ``k``-th raw moment of the (absolute) cluster size distribution, with ``k`` ranging from ``0`` to ``4``. See Also -------- sample_states : computation of moments microcanonical_averages : moment statistics
Below is the the instruction that describes the task: ### Input: Compute the average moments of the cluster size distributions Helper function for :func:`microcanonical_averages` Parameters ---------- moments : 2-D :py:class:`numpy.ndarray` of int ``moments.shape[1] == 5`. Each array ``moments[r, :]`` is the ``moments`` field of the output of :func:`sample_states`: The ``k``-th entry is the ``k``-th raw moment of the (absolute) cluster size distribution. alpha: float Significance level. Returns ------- ret : dict Moment statistics ret['moments'] : 1-D :py:class:`numpy.ndarray` of float, size 5 The ``k``-th entry is the average ``k``-th raw moment of the (absolute) cluster size distribution, with ``k`` ranging from ``0`` to ``4``. ret['moments_ci'] : 2-D :py:class:`numpy.ndarray` of float, shape (5,2) ``ret['moments_ci'][k]`` are the lower and upper bounds of the normal confidence interval of the average ``k``-th raw moment of the (absolute) cluster size distribution, with ``k`` ranging from ``0`` to ``4``. See Also -------- sample_states : computation of moments microcanonical_averages : moment statistics ### Response: def _microcanonical_average_moments(moments, alpha): """ Compute the average moments of the cluster size distributions Helper function for :func:`microcanonical_averages` Parameters ---------- moments : 2-D :py:class:`numpy.ndarray` of int ``moments.shape[1] == 5`. Each array ``moments[r, :]`` is the ``moments`` field of the output of :func:`sample_states`: The ``k``-th entry is the ``k``-th raw moment of the (absolute) cluster size distribution. alpha: float Significance level. Returns ------- ret : dict Moment statistics ret['moments'] : 1-D :py:class:`numpy.ndarray` of float, size 5 The ``k``-th entry is the average ``k``-th raw moment of the (absolute) cluster size distribution, with ``k`` ranging from ``0`` to ``4``. ret['moments_ci'] : 2-D :py:class:`numpy.ndarray` of float, shape (5,2) ``ret['moments_ci'][k]`` are the lower and upper bounds of the normal confidence interval of the average ``k``-th raw moment of the (absolute) cluster size distribution, with ``k`` ranging from ``0`` to ``4``. See Also -------- sample_states : computation of moments microcanonical_averages : moment statistics """ ret = dict() runs = moments.shape[0] sqrt_n = np.sqrt(runs) moments_sample_mean = moments.mean(axis=0) ret['moments'] = moments_sample_mean moments_sample_std = moments.std(axis=0, ddof=1) ret['moments_ci'] = np.empty((5, 2)) for k in range(5): if moments_sample_std[k]: old_settings = np.seterr(all='raise') ret['moments_ci'][k] = scipy.stats.t.interval( 1 - alpha, df=runs - 1, loc=moments_sample_mean[k], scale=moments_sample_std[k] / sqrt_n ) np.seterr(**old_settings) else: ret['moments_ci'][k] = ( moments_sample_mean[k] * np.ones(2) ) return ret
def validate(self, *args, **kwargs): """ Step 4 (6 for invariant). Process contract (validator) """ # Schemes validation interface if is_scheme(self.validator): params = getcallargs(self.function, *args, **kwargs) params.update(kwargs) validator = self.validator(data=params, request=None) if validator.is_valid(): return raise self.exception(validator.errors) # Simple validation interface if hasattr(self.validator, 'is_valid'): validator = self.validator(*args, **kwargs) # is valid if validator.is_valid(): return # is invalid if hasattr(validator, 'errors'): raise self.exception(validator.errors) if hasattr(validator, '_errors'): raise self.exception(validator._errors) raise self.exception validation_result = self.validator(*args, **kwargs) # is invalid (validator return error message) if isinstance(validation_result, string_types): raise self.exception(validation_result) # is valid (truely result) if validation_result: return # is invalid (falsy result) raise self.exception
Step 4 (6 for invariant). Process contract (validator)
Below is the the instruction that describes the task: ### Input: Step 4 (6 for invariant). Process contract (validator) ### Response: def validate(self, *args, **kwargs): """ Step 4 (6 for invariant). Process contract (validator) """ # Schemes validation interface if is_scheme(self.validator): params = getcallargs(self.function, *args, **kwargs) params.update(kwargs) validator = self.validator(data=params, request=None) if validator.is_valid(): return raise self.exception(validator.errors) # Simple validation interface if hasattr(self.validator, 'is_valid'): validator = self.validator(*args, **kwargs) # is valid if validator.is_valid(): return # is invalid if hasattr(validator, 'errors'): raise self.exception(validator.errors) if hasattr(validator, '_errors'): raise self.exception(validator._errors) raise self.exception validation_result = self.validator(*args, **kwargs) # is invalid (validator return error message) if isinstance(validation_result, string_types): raise self.exception(validation_result) # is valid (truely result) if validation_result: return # is invalid (falsy result) raise self.exception
def update_job(self, job_record, uow, new_state): """ method updates job record with a new unit_of_work and new state""" original_job_state = job_record.state job_record.state = new_state job_record.related_unit_of_work = uow.db_id self.job_dao.update(job_record) msg = 'Updated Job {0} for {1}@{2}: state transfer {3} -> {4};' \ .format(job_record.db_id, job_record.process_name, job_record.timeperiod, original_job_state, new_state) self._log_message(INFO, job_record.process_name, job_record.timeperiod, msg)
method updates job record with a new unit_of_work and new state
Below is the the instruction that describes the task: ### Input: method updates job record with a new unit_of_work and new state ### Response: def update_job(self, job_record, uow, new_state): """ method updates job record with a new unit_of_work and new state""" original_job_state = job_record.state job_record.state = new_state job_record.related_unit_of_work = uow.db_id self.job_dao.update(job_record) msg = 'Updated Job {0} for {1}@{2}: state transfer {3} -> {4};' \ .format(job_record.db_id, job_record.process_name, job_record.timeperiod, original_job_state, new_state) self._log_message(INFO, job_record.process_name, job_record.timeperiod, msg)
def pisaPreLoop(node, context, collect=False): """ Collect all CSS definitions """ data = u"" if node.nodeType == Node.TEXT_NODE and collect: data = node.data elif node.nodeType == Node.ELEMENT_NODE: name = node.tagName.lower() if name in ("style", "link"): attr = pisaGetAttributes(context, name, node.attributes) media = [x.strip() for x in attr.media.lower().split(",") if x.strip()] if attr.get("type", "").lower() in ("", "text/css") and \ (not media or "all" in media or "print" in media or "pdf" in media): if name == "style": for node in node.childNodes: data += pisaPreLoop(node, context, collect=True) context.addCSS(data) return u"" if name == "link" and attr.href and attr.rel.lower() == "stylesheet": # print "CSS LINK", attr context.addCSS('\n@import "%s" %s;' % (attr.href, ",".join(media))) for node in node.childNodes: result = pisaPreLoop(node, context, collect=collect) if collect: data += result return data
Collect all CSS definitions
Below is the the instruction that describes the task: ### Input: Collect all CSS definitions ### Response: def pisaPreLoop(node, context, collect=False): """ Collect all CSS definitions """ data = u"" if node.nodeType == Node.TEXT_NODE and collect: data = node.data elif node.nodeType == Node.ELEMENT_NODE: name = node.tagName.lower() if name in ("style", "link"): attr = pisaGetAttributes(context, name, node.attributes) media = [x.strip() for x in attr.media.lower().split(",") if x.strip()] if attr.get("type", "").lower() in ("", "text/css") and \ (not media or "all" in media or "print" in media or "pdf" in media): if name == "style": for node in node.childNodes: data += pisaPreLoop(node, context, collect=True) context.addCSS(data) return u"" if name == "link" and attr.href and attr.rel.lower() == "stylesheet": # print "CSS LINK", attr context.addCSS('\n@import "%s" %s;' % (attr.href, ",".join(media))) for node in node.childNodes: result = pisaPreLoop(node, context, collect=collect) if collect: data += result return data
def _fill(self): """Advance the iterator without returning the old head.""" try: self._head = self._iterable.next() except StopIteration: self._head = None
Advance the iterator without returning the old head.
Below is the the instruction that describes the task: ### Input: Advance the iterator without returning the old head. ### Response: def _fill(self): """Advance the iterator without returning the old head.""" try: self._head = self._iterable.next() except StopIteration: self._head = None
def list(self, list_folders=False, list_files=False): """ A simple generator that yields a File or Folder object based on the arguments. """ a_files = os.listdir(self.folder.path) for a_file in a_files: path = self.folder.child(a_file) if os.path.isdir(path): if list_folders: yield Folder(path) elif list_files: if not self.pattern or fnmatch.fnmatch(a_file, self.pattern): yield File(path)
A simple generator that yields a File or Folder object based on the arguments.
Below is the the instruction that describes the task: ### Input: A simple generator that yields a File or Folder object based on the arguments. ### Response: def list(self, list_folders=False, list_files=False): """ A simple generator that yields a File or Folder object based on the arguments. """ a_files = os.listdir(self.folder.path) for a_file in a_files: path = self.folder.child(a_file) if os.path.isdir(path): if list_folders: yield Folder(path) elif list_files: if not self.pattern or fnmatch.fnmatch(a_file, self.pattern): yield File(path)
def add_xenon_worker(self, worker_config): """Adds a worker to the pool; sets gears in motion.""" c = XenonInteractiveWorker(self.machine, worker_config) w = RemoteWorker( worker_config.name, threading.Lock(), worker_config.n_threads, [], *c.setup()) with self.wlock: self.workers[worker_config.name] = w def populate(job_source): """Populate the worker with jobs, if jobs are available.""" with w.lock, self.plock: # Worker lock ~~~~~~~~~~~~~~~~~~ while len(w.jobs) < w.max and not self.job_queue.empty(): msg = next(job_source) w.sink.send(msg) if msg is not EndOfQueue: key, job = msg w.jobs.append(key) else: break # lock end ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def activate(_): """Activate the worker.""" job_source = self.job_queue.source() populate(job_source) sink = self.result_queue.sink() for result in w.source: sink.send(result) # do bookkeeping and submit a new job to the worker with w.lock: # Worker lock ~~~~~~~~~~~~~~~~~~~~~ w.jobs.remove(result.key) populate(job_source) # lock end ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ for key in w.jobs: sink.send(ResultMessage( key, 'aborted', None, 'connection to remote worker lost.')) # Start the `activate` function when the worker goes online. t = threading.Thread( target=self.count(c.wait_until_running), args=(activate,), daemon=True) t.start()
Adds a worker to the pool; sets gears in motion.
Below is the the instruction that describes the task: ### Input: Adds a worker to the pool; sets gears in motion. ### Response: def add_xenon_worker(self, worker_config): """Adds a worker to the pool; sets gears in motion.""" c = XenonInteractiveWorker(self.machine, worker_config) w = RemoteWorker( worker_config.name, threading.Lock(), worker_config.n_threads, [], *c.setup()) with self.wlock: self.workers[worker_config.name] = w def populate(job_source): """Populate the worker with jobs, if jobs are available.""" with w.lock, self.plock: # Worker lock ~~~~~~~~~~~~~~~~~~ while len(w.jobs) < w.max and not self.job_queue.empty(): msg = next(job_source) w.sink.send(msg) if msg is not EndOfQueue: key, job = msg w.jobs.append(key) else: break # lock end ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def activate(_): """Activate the worker.""" job_source = self.job_queue.source() populate(job_source) sink = self.result_queue.sink() for result in w.source: sink.send(result) # do bookkeeping and submit a new job to the worker with w.lock: # Worker lock ~~~~~~~~~~~~~~~~~~~~~ w.jobs.remove(result.key) populate(job_source) # lock end ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ for key in w.jobs: sink.send(ResultMessage( key, 'aborted', None, 'connection to remote worker lost.')) # Start the `activate` function when the worker goes online. t = threading.Thread( target=self.count(c.wait_until_running), args=(activate,), daemon=True) t.start()
def get_value(self, variant_line=None, variant_dict=None, entry=None, raw_entry=None, vcf_header=None, csq_format=None, dict_key=None, individual_id=None): """ Return the value as specified by plugin Get value will return one value or None if no correct value is found. Arguments: variant_line (str): A vcf variant line variant_dict (dict): A variant dictionary entry (list): A splitted entry raw_entry (str): The raw entry from the vcf file vcf_header (list): The vcf header line with sample ids csq_format (list): The CSQ format family_id (str): The family id individual_id (str): The individual id Returns: value (str): A string that represents the correct value """ value = None raw_entry = self.get_raw_entry( variant_line = variant_line, variant_dict = variant_dict, vcf_header=vcf_header, individual_id=individual_id, dict_key=dict_key ) # If data type is flag we only need to check if any entry exists if self.data_type == 'flag': if self.field == 'INFO': if variant_line: for info_entry in variant_line.split()[7].split(';'): if self.info_key == info_entry.split('=')[0]: value = True elif variant_dict: if self.info_key in variant_dict.get('info_dict',{}): value = True else: if raw_entry != '.': value = True # If we have a record rule we need to return the correct value elif raw_entry: # If there was no raw entry we will return None if self.record_rule: if self.data_type == 'string': if self.record_rule == 'max': sorted_strings = sorted( self.string_rules.items(), key=operator.itemgetter(1), reverse=True ) if self.record_rule == 'min': sorted_strings = sorted( self.string_rules.items(), key=operator.itemgetter(1) ) for string_rule in sorted_strings: if string_rule[0].lower() in raw_entry.lower(): value = string_rule[0] break else: typed_annotations = [] for value in self.get_entry( raw_entry=raw_entry, vcf_header=vcf_header, csq_format=csq_format, dict_key=dict_key, individual_id=individual_id): if self.data_type == 'float': try: typed_annotations.append(float(value)) except ValueError: pass elif self.data_type == 'integer': try: typed_annotations.append(int(value)) except ValueError: pass if typed_annotations: if self.record_rule == 'max': value = max(typed_annotations) elif self.record_rule == 'min': value = min(typed_annotations) else: value = None # If no record rule is given we return the raw annotation # Here the data_type is not flag, and there is no record rule # We know that there exists a raw annotation else: # We will just return the first annotation found value = self.get_entry( raw_entry=raw_entry, vcf_header=vcf_header, csq_format=csq_format, dict_key=dict_key, individual_id=individual_id)[0] if self.data_type == 'float': try: value = float(value) except ValueError: pass elif self.data_type == 'integer': try: value = int(value) except ValueError: pass return value
Return the value as specified by plugin Get value will return one value or None if no correct value is found. Arguments: variant_line (str): A vcf variant line variant_dict (dict): A variant dictionary entry (list): A splitted entry raw_entry (str): The raw entry from the vcf file vcf_header (list): The vcf header line with sample ids csq_format (list): The CSQ format family_id (str): The family id individual_id (str): The individual id Returns: value (str): A string that represents the correct value
Below is the the instruction that describes the task: ### Input: Return the value as specified by plugin Get value will return one value or None if no correct value is found. Arguments: variant_line (str): A vcf variant line variant_dict (dict): A variant dictionary entry (list): A splitted entry raw_entry (str): The raw entry from the vcf file vcf_header (list): The vcf header line with sample ids csq_format (list): The CSQ format family_id (str): The family id individual_id (str): The individual id Returns: value (str): A string that represents the correct value ### Response: def get_value(self, variant_line=None, variant_dict=None, entry=None, raw_entry=None, vcf_header=None, csq_format=None, dict_key=None, individual_id=None): """ Return the value as specified by plugin Get value will return one value or None if no correct value is found. Arguments: variant_line (str): A vcf variant line variant_dict (dict): A variant dictionary entry (list): A splitted entry raw_entry (str): The raw entry from the vcf file vcf_header (list): The vcf header line with sample ids csq_format (list): The CSQ format family_id (str): The family id individual_id (str): The individual id Returns: value (str): A string that represents the correct value """ value = None raw_entry = self.get_raw_entry( variant_line = variant_line, variant_dict = variant_dict, vcf_header=vcf_header, individual_id=individual_id, dict_key=dict_key ) # If data type is flag we only need to check if any entry exists if self.data_type == 'flag': if self.field == 'INFO': if variant_line: for info_entry in variant_line.split()[7].split(';'): if self.info_key == info_entry.split('=')[0]: value = True elif variant_dict: if self.info_key in variant_dict.get('info_dict',{}): value = True else: if raw_entry != '.': value = True # If we have a record rule we need to return the correct value elif raw_entry: # If there was no raw entry we will return None if self.record_rule: if self.data_type == 'string': if self.record_rule == 'max': sorted_strings = sorted( self.string_rules.items(), key=operator.itemgetter(1), reverse=True ) if self.record_rule == 'min': sorted_strings = sorted( self.string_rules.items(), key=operator.itemgetter(1) ) for string_rule in sorted_strings: if string_rule[0].lower() in raw_entry.lower(): value = string_rule[0] break else: typed_annotations = [] for value in self.get_entry( raw_entry=raw_entry, vcf_header=vcf_header, csq_format=csq_format, dict_key=dict_key, individual_id=individual_id): if self.data_type == 'float': try: typed_annotations.append(float(value)) except ValueError: pass elif self.data_type == 'integer': try: typed_annotations.append(int(value)) except ValueError: pass if typed_annotations: if self.record_rule == 'max': value = max(typed_annotations) elif self.record_rule == 'min': value = min(typed_annotations) else: value = None # If no record rule is given we return the raw annotation # Here the data_type is not flag, and there is no record rule # We know that there exists a raw annotation else: # We will just return the first annotation found value = self.get_entry( raw_entry=raw_entry, vcf_header=vcf_header, csq_format=csq_format, dict_key=dict_key, individual_id=individual_id)[0] if self.data_type == 'float': try: value = float(value) except ValueError: pass elif self.data_type == 'integer': try: value = int(value) except ValueError: pass return value
def opened(self): """Handler called when the WebSocket connection is opened. The first thing to do then is to authenticate ourselves.""" request = { 'type': 'authenticate', 'token': self._token, 'userAgent': '{} ws4py/{}'.format(version.user_agent, ws4py.__version__), } self.send(json.dumps(request))
Handler called when the WebSocket connection is opened. The first thing to do then is to authenticate ourselves.
Below is the the instruction that describes the task: ### Input: Handler called when the WebSocket connection is opened. The first thing to do then is to authenticate ourselves. ### Response: def opened(self): """Handler called when the WebSocket connection is opened. The first thing to do then is to authenticate ourselves.""" request = { 'type': 'authenticate', 'token': self._token, 'userAgent': '{} ws4py/{}'.format(version.user_agent, ws4py.__version__), } self.send(json.dumps(request))
def get_account_authToken(self, account=None, account_name=''): """ Use the DelegateAuthRequest to provide a token and his lifetime for the provided account. If account is provided we use it, else we retreive the account from the provided account_name. """ if account is None: account = self.get_account(zobjects.Account(name=account_name)) selector = account.to_selector() resp = self.request('DelegateAuth', {'account': selector}) authToken = resp['authToken'] lifetime = int(resp['lifetime']) return authToken, lifetime
Use the DelegateAuthRequest to provide a token and his lifetime for the provided account. If account is provided we use it, else we retreive the account from the provided account_name.
Below is the the instruction that describes the task: ### Input: Use the DelegateAuthRequest to provide a token and his lifetime for the provided account. If account is provided we use it, else we retreive the account from the provided account_name. ### Response: def get_account_authToken(self, account=None, account_name=''): """ Use the DelegateAuthRequest to provide a token and his lifetime for the provided account. If account is provided we use it, else we retreive the account from the provided account_name. """ if account is None: account = self.get_account(zobjects.Account(name=account_name)) selector = account.to_selector() resp = self.request('DelegateAuth', {'account': selector}) authToken = resp['authToken'] lifetime = int(resp['lifetime']) return authToken, lifetime
def on_update_page_links(self, evt): """ Perform PDF update of changed links.""" if not self.update_links: # skip if unsupported links evt.Skip() return pg = self.doc[getint(self.TextToPage.Value) -1] for i in range(len(self.page_links)): l = self.page_links[i] if l.get("update", False): # "update" must be True if l["xref"] == 0: # no xref => new link pg.insertLink(l) elif l["kind"] < 1 or l["kind"] > len(self.linkTypeStrings): pg.deleteLink(l) # delete invalid link else: pg.updateLink(l) # else link update l["update"] = False # reset update indicator self.page_links[i] = l # update list of page links self.btn_Update.Disable() # disable update button self.t_Update.Label = "" # and its message self.btn_Save.Enable() self.t_Save.Label = "There are changes. Press to save them to file." evt.Skip() return
Perform PDF update of changed links.
Below is the the instruction that describes the task: ### Input: Perform PDF update of changed links. ### Response: def on_update_page_links(self, evt): """ Perform PDF update of changed links.""" if not self.update_links: # skip if unsupported links evt.Skip() return pg = self.doc[getint(self.TextToPage.Value) -1] for i in range(len(self.page_links)): l = self.page_links[i] if l.get("update", False): # "update" must be True if l["xref"] == 0: # no xref => new link pg.insertLink(l) elif l["kind"] < 1 or l["kind"] > len(self.linkTypeStrings): pg.deleteLink(l) # delete invalid link else: pg.updateLink(l) # else link update l["update"] = False # reset update indicator self.page_links[i] = l # update list of page links self.btn_Update.Disable() # disable update button self.t_Update.Label = "" # and its message self.btn_Save.Enable() self.t_Save.Label = "There are changes. Press to save them to file." evt.Skip() return
def folders(self, mountPoint): """Get an iterator of JFSFolder() from the given mountPoint. "mountPoint" may be either an actual mountPoint element from JFSDevice.mountPoints{} or its .name. """ if isinstance(mountPoint, six.string_types): # shortcut: pass a mountpoint name mountPoint = self.mountPoints[mountPoint] try: return [JFSFolder(f, self, parentpath='%s/%s' % (self.path, mountPoint.name)) for f in self.contents(mountPoint).folders.iterchildren()] except AttributeError as err: # no files at all return [x for x in []]
Get an iterator of JFSFolder() from the given mountPoint. "mountPoint" may be either an actual mountPoint element from JFSDevice.mountPoints{} or its .name.
Below is the the instruction that describes the task: ### Input: Get an iterator of JFSFolder() from the given mountPoint. "mountPoint" may be either an actual mountPoint element from JFSDevice.mountPoints{} or its .name. ### Response: def folders(self, mountPoint): """Get an iterator of JFSFolder() from the given mountPoint. "mountPoint" may be either an actual mountPoint element from JFSDevice.mountPoints{} or its .name. """ if isinstance(mountPoint, six.string_types): # shortcut: pass a mountpoint name mountPoint = self.mountPoints[mountPoint] try: return [JFSFolder(f, self, parentpath='%s/%s' % (self.path, mountPoint.name)) for f in self.contents(mountPoint).folders.iterchildren()] except AttributeError as err: # no files at all return [x for x in []]
def index_feature(self, feature_name, tokenize=lambda x: x, structured=False): """ Creates a new :class:`.FeatureSet` from the attribute ``feature_name`` in each :class:`.Paper`\. New :class:`.FeatureSet`\s are added to :attr:`.features`\. Parameters ---------- feature_name : str The name of a :class:`.Paper` attribute. """ self._init_featureset(feature_name, structured=structured) for paper in self.papers: self.index_paper_by_feature(paper, feature_name, tokenize, structured)
Creates a new :class:`.FeatureSet` from the attribute ``feature_name`` in each :class:`.Paper`\. New :class:`.FeatureSet`\s are added to :attr:`.features`\. Parameters ---------- feature_name : str The name of a :class:`.Paper` attribute.
Below is the the instruction that describes the task: ### Input: Creates a new :class:`.FeatureSet` from the attribute ``feature_name`` in each :class:`.Paper`\. New :class:`.FeatureSet`\s are added to :attr:`.features`\. Parameters ---------- feature_name : str The name of a :class:`.Paper` attribute. ### Response: def index_feature(self, feature_name, tokenize=lambda x: x, structured=False): """ Creates a new :class:`.FeatureSet` from the attribute ``feature_name`` in each :class:`.Paper`\. New :class:`.FeatureSet`\s are added to :attr:`.features`\. Parameters ---------- feature_name : str The name of a :class:`.Paper` attribute. """ self._init_featureset(feature_name, structured=structured) for paper in self.papers: self.index_paper_by_feature(paper, feature_name, tokenize, structured)
def _set(self, name, value): """ Args: name (str | unicode): Name of slot to set. value: Associated value """ if value is not UNSET: if isinstance(value, Slotted): current = getattr(self, name, UNSET) if current is None or current is UNSET: current = value.__class__() current.set(value) setattr(self, name, current) return if isinstance(current, Slotted): current.set(value) return setattr(self, name, value)
Args: name (str | unicode): Name of slot to set. value: Associated value
Below is the the instruction that describes the task: ### Input: Args: name (str | unicode): Name of slot to set. value: Associated value ### Response: def _set(self, name, value): """ Args: name (str | unicode): Name of slot to set. value: Associated value """ if value is not UNSET: if isinstance(value, Slotted): current = getattr(self, name, UNSET) if current is None or current is UNSET: current = value.__class__() current.set(value) setattr(self, name, current) return if isinstance(current, Slotted): current.set(value) return setattr(self, name, value)
def _validate_indexers( self, indexers: Mapping, ) -> List[Tuple[Any, Union[slice, Variable]]]: """ Here we make sure + indexer has a valid keys + indexer is in a valid data type + string indexers are cast to the appropriate date type if the associated index is a DatetimeIndex or CFTimeIndex """ from .dataarray import DataArray invalid = [k for k in indexers if k not in self.dims] if invalid: raise ValueError("dimensions %r do not exist" % invalid) # all indexers should be int, slice, np.ndarrays, or Variable indexers_list = [] # type: List[Tuple[Any, Union[slice, Variable]]] for k, v in indexers.items(): if isinstance(v, slice): indexers_list.append((k, v)) continue if isinstance(v, Variable): pass elif isinstance(v, DataArray): v = v.variable elif isinstance(v, tuple): v = as_variable(v) elif isinstance(v, Dataset): raise TypeError('cannot use a Dataset as an indexer') elif isinstance(v, Sequence) and len(v) == 0: v = IndexVariable((k, ), np.zeros((0,), dtype='int64')) else: v = np.asarray(v) if v.dtype.kind == 'U' or v.dtype.kind == 'S': index = self.indexes[k] if isinstance(index, pd.DatetimeIndex): v = v.astype('datetime64[ns]') elif isinstance(index, xr.CFTimeIndex): v = _parse_array_of_cftime_strings(v, index.date_type) if v.ndim == 0: v = Variable((), v) elif v.ndim == 1: v = IndexVariable((k,), v) else: raise IndexError( "Unlabeled multi-dimensional array cannot be " "used for indexing: {}".format(k)) if v.ndim == 1: v = v.to_index_variable() indexers_list.append((k, v)) return indexers_list
Here we make sure + indexer has a valid keys + indexer is in a valid data type + string indexers are cast to the appropriate date type if the associated index is a DatetimeIndex or CFTimeIndex
Below is the the instruction that describes the task: ### Input: Here we make sure + indexer has a valid keys + indexer is in a valid data type + string indexers are cast to the appropriate date type if the associated index is a DatetimeIndex or CFTimeIndex ### Response: def _validate_indexers( self, indexers: Mapping, ) -> List[Tuple[Any, Union[slice, Variable]]]: """ Here we make sure + indexer has a valid keys + indexer is in a valid data type + string indexers are cast to the appropriate date type if the associated index is a DatetimeIndex or CFTimeIndex """ from .dataarray import DataArray invalid = [k for k in indexers if k not in self.dims] if invalid: raise ValueError("dimensions %r do not exist" % invalid) # all indexers should be int, slice, np.ndarrays, or Variable indexers_list = [] # type: List[Tuple[Any, Union[slice, Variable]]] for k, v in indexers.items(): if isinstance(v, slice): indexers_list.append((k, v)) continue if isinstance(v, Variable): pass elif isinstance(v, DataArray): v = v.variable elif isinstance(v, tuple): v = as_variable(v) elif isinstance(v, Dataset): raise TypeError('cannot use a Dataset as an indexer') elif isinstance(v, Sequence) and len(v) == 0: v = IndexVariable((k, ), np.zeros((0,), dtype='int64')) else: v = np.asarray(v) if v.dtype.kind == 'U' or v.dtype.kind == 'S': index = self.indexes[k] if isinstance(index, pd.DatetimeIndex): v = v.astype('datetime64[ns]') elif isinstance(index, xr.CFTimeIndex): v = _parse_array_of_cftime_strings(v, index.date_type) if v.ndim == 0: v = Variable((), v) elif v.ndim == 1: v = IndexVariable((k,), v) else: raise IndexError( "Unlabeled multi-dimensional array cannot be " "used for indexing: {}".format(k)) if v.ndim == 1: v = v.to_index_variable() indexers_list.append((k, v)) return indexers_list
def _track(self, state, live_defs, statements): """ Given all live definitions prior to this program point, track the changes, and return a new list of live definitions. We scan through the action list of the new state to track the changes. :param state: The input state at that program point. :param live_defs: All live definitions prior to reaching this program point. :param list statements: A list of VEX statements. :returns: A list of new live definitions. :rtype: angr.analyses.ddg.LiveDefinitions """ # Make a copy of live_defs self._live_defs = live_defs.copy() action_list = list(state.history.recent_actions) # Since all temporary variables are local, we simply track them in a dict self._temp_variables = { } self._temp_register_symbols = { } # All dependence edges are added to the graph either at the end of this method, or when they are going to be # overwritten by a new edge. This is because we sometimes have to modify a previous edge (e.g. add new labels # to the edge) self._temp_edges = defaultdict(list) self._register_edges = defaultdict(list) last_statement_id = None self._variables_per_statement = None # program variables read out in the same statement. we keep a copy of those variables here so # we can link it to the tmp_write action right afterwards self._custom_data_per_statement = None for a in action_list: if last_statement_id is None or last_statement_id != a.stmt_idx: # update statement ID last_statement_id = a.stmt_idx statement = statements[last_statement_id] if statements and last_statement_id < len(statements) else None # initialize all per-statement data structures self._variables_per_statement = [ ] self._custom_data_per_statement = None if a.sim_procedure is None: current_code_location = CodeLocation(a.bbl_addr, a.stmt_idx, ins_addr=a.ins_addr) else: current_code_location = CodeLocation(None, None, sim_procedure=a.sim_procedure) if a.type == 'exit': self._handle_exit(a, current_code_location, state, statement) elif a.type == 'operation': self._handle_operation(a, current_code_location, state, statement) elif a.type == 'constraint': pass else: handler_name = "_handle_%s_%s" % (a.type, a.action) if hasattr(self, handler_name): getattr(self, handler_name)(a, current_code_location, state, statement) else: l.debug("Skip an unsupported action %s.", a) return self._live_defs
Given all live definitions prior to this program point, track the changes, and return a new list of live definitions. We scan through the action list of the new state to track the changes. :param state: The input state at that program point. :param live_defs: All live definitions prior to reaching this program point. :param list statements: A list of VEX statements. :returns: A list of new live definitions. :rtype: angr.analyses.ddg.LiveDefinitions
Below is the the instruction that describes the task: ### Input: Given all live definitions prior to this program point, track the changes, and return a new list of live definitions. We scan through the action list of the new state to track the changes. :param state: The input state at that program point. :param live_defs: All live definitions prior to reaching this program point. :param list statements: A list of VEX statements. :returns: A list of new live definitions. :rtype: angr.analyses.ddg.LiveDefinitions ### Response: def _track(self, state, live_defs, statements): """ Given all live definitions prior to this program point, track the changes, and return a new list of live definitions. We scan through the action list of the new state to track the changes. :param state: The input state at that program point. :param live_defs: All live definitions prior to reaching this program point. :param list statements: A list of VEX statements. :returns: A list of new live definitions. :rtype: angr.analyses.ddg.LiveDefinitions """ # Make a copy of live_defs self._live_defs = live_defs.copy() action_list = list(state.history.recent_actions) # Since all temporary variables are local, we simply track them in a dict self._temp_variables = { } self._temp_register_symbols = { } # All dependence edges are added to the graph either at the end of this method, or when they are going to be # overwritten by a new edge. This is because we sometimes have to modify a previous edge (e.g. add new labels # to the edge) self._temp_edges = defaultdict(list) self._register_edges = defaultdict(list) last_statement_id = None self._variables_per_statement = None # program variables read out in the same statement. we keep a copy of those variables here so # we can link it to the tmp_write action right afterwards self._custom_data_per_statement = None for a in action_list: if last_statement_id is None or last_statement_id != a.stmt_idx: # update statement ID last_statement_id = a.stmt_idx statement = statements[last_statement_id] if statements and last_statement_id < len(statements) else None # initialize all per-statement data structures self._variables_per_statement = [ ] self._custom_data_per_statement = None if a.sim_procedure is None: current_code_location = CodeLocation(a.bbl_addr, a.stmt_idx, ins_addr=a.ins_addr) else: current_code_location = CodeLocation(None, None, sim_procedure=a.sim_procedure) if a.type == 'exit': self._handle_exit(a, current_code_location, state, statement) elif a.type == 'operation': self._handle_operation(a, current_code_location, state, statement) elif a.type == 'constraint': pass else: handler_name = "_handle_%s_%s" % (a.type, a.action) if hasattr(self, handler_name): getattr(self, handler_name)(a, current_code_location, state, statement) else: l.debug("Skip an unsupported action %s.", a) return self._live_defs
def content_type(self): """ Returns the content type of the request in all cases where it is different than a submitted form - application/x-www-form-urlencoded """ type_formencoded = "application/x-www-form-urlencoded" ctype = self.request.META.get('CONTENT_TYPE', type_formencoded) if type_formencoded in ctype: return None return ctype
Returns the content type of the request in all cases where it is different than a submitted form - application/x-www-form-urlencoded
Below is the the instruction that describes the task: ### Input: Returns the content type of the request in all cases where it is different than a submitted form - application/x-www-form-urlencoded ### Response: def content_type(self): """ Returns the content type of the request in all cases where it is different than a submitted form - application/x-www-form-urlencoded """ type_formencoded = "application/x-www-form-urlencoded" ctype = self.request.META.get('CONTENT_TYPE', type_formencoded) if type_formencoded in ctype: return None return ctype