code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
text
stringlengths
164
112k
def start_worker_thread( self, sleep_interval=1.0): """start_worker_thread Start the helper worker thread to publish queued messages to Splunk :param sleep_interval: sleep in seconds before reading from the queue again """ # Start a worker thread responsible for sending logs if self.sleep_interval > 0: self.debug_log( 'starting worker thread') self.timer = Timer( sleep_interval, self.perform_work) self.timer.daemon = True # Auto-kill thread if main process exits self.timer.start()
start_worker_thread Start the helper worker thread to publish queued messages to Splunk :param sleep_interval: sleep in seconds before reading from the queue again
Below is the the instruction that describes the task: ### Input: start_worker_thread Start the helper worker thread to publish queued messages to Splunk :param sleep_interval: sleep in seconds before reading from the queue again ### Response: def start_worker_thread( self, sleep_interval=1.0): """start_worker_thread Start the helper worker thread to publish queued messages to Splunk :param sleep_interval: sleep in seconds before reading from the queue again """ # Start a worker thread responsible for sending logs if self.sleep_interval > 0: self.debug_log( 'starting worker thread') self.timer = Timer( sleep_interval, self.perform_work) self.timer.daemon = True # Auto-kill thread if main process exits self.timer.start()
def put(self, pid, record, **kwargs): """Replace a record. Permissions: ``update_permission_factory`` The body should be a JSON object, which will fully replace the current record metadata. Procedure description: #. The ETag is checked. #. The record is updated by calling the record API `clear()`, `update()` and then `commit()`. #. The HTTP response is built with the help of the link factory. :param pid: Persistent identifier for record. :param record: Record object. :returns: The modified record. """ if request.mimetype not in self.loaders: raise UnsupportedMediaRESTError(request.mimetype) data = self.loaders[request.mimetype]() if data is None: raise InvalidDataRESTError() self.check_etag(str(record.revision_id)) record.clear() record.update(data) record.commit() db.session.commit() if self.indexer_class: self.indexer_class().index(record) return self.make_response( pid, record, links_factory=self.links_factory)
Replace a record. Permissions: ``update_permission_factory`` The body should be a JSON object, which will fully replace the current record metadata. Procedure description: #. The ETag is checked. #. The record is updated by calling the record API `clear()`, `update()` and then `commit()`. #. The HTTP response is built with the help of the link factory. :param pid: Persistent identifier for record. :param record: Record object. :returns: The modified record.
Below is the the instruction that describes the task: ### Input: Replace a record. Permissions: ``update_permission_factory`` The body should be a JSON object, which will fully replace the current record metadata. Procedure description: #. The ETag is checked. #. The record is updated by calling the record API `clear()`, `update()` and then `commit()`. #. The HTTP response is built with the help of the link factory. :param pid: Persistent identifier for record. :param record: Record object. :returns: The modified record. ### Response: def put(self, pid, record, **kwargs): """Replace a record. Permissions: ``update_permission_factory`` The body should be a JSON object, which will fully replace the current record metadata. Procedure description: #. The ETag is checked. #. The record is updated by calling the record API `clear()`, `update()` and then `commit()`. #. The HTTP response is built with the help of the link factory. :param pid: Persistent identifier for record. :param record: Record object. :returns: The modified record. """ if request.mimetype not in self.loaders: raise UnsupportedMediaRESTError(request.mimetype) data = self.loaders[request.mimetype]() if data is None: raise InvalidDataRESTError() self.check_etag(str(record.revision_id)) record.clear() record.update(data) record.commit() db.session.commit() if self.indexer_class: self.indexer_class().index(record) return self.make_response( pid, record, links_factory=self.links_factory)
def error_check(options): """ Error check :rtype : object """ compare = options.compare ensemble_paths = options.ensemble_paths if compare and len(ensemble_paths) > 2: print("\n Only 2 ensembles can be compared, {d} were specified\n".format(d=len(ensemble_paths))) sys.exit(1)
Error check :rtype : object
Below is the the instruction that describes the task: ### Input: Error check :rtype : object ### Response: def error_check(options): """ Error check :rtype : object """ compare = options.compare ensemble_paths = options.ensemble_paths if compare and len(ensemble_paths) > 2: print("\n Only 2 ensembles can be compared, {d} were specified\n".format(d=len(ensemble_paths))) sys.exit(1)
def divide(cls, jobs, count): '''Divide up the provided jobs into count evenly-sized groups''' jobs = list(zip(*zip_longest(*[iter(jobs)] * count))) # If we had no jobs to resume, then we get an empty list jobs = jobs or [()] * count for index in range(count): # Filter out the items in jobs that are Nones jobs[index] = [j for j in jobs[index] if j != None] return jobs
Divide up the provided jobs into count evenly-sized groups
Below is the the instruction that describes the task: ### Input: Divide up the provided jobs into count evenly-sized groups ### Response: def divide(cls, jobs, count): '''Divide up the provided jobs into count evenly-sized groups''' jobs = list(zip(*zip_longest(*[iter(jobs)] * count))) # If we had no jobs to resume, then we get an empty list jobs = jobs or [()] * count for index in range(count): # Filter out the items in jobs that are Nones jobs[index] = [j for j in jobs[index] if j != None] return jobs
def _set_global_isis_info(self, v, load=False): """ Setter method for global_isis_info, mapped from YANG variable /isis_state/global_isis_info (container) If this variable is read-only (config: false) in the source YANG file, then _set_global_isis_info is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_global_isis_info() directly. YANG Description: ISIS Global """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=global_isis_info.global_isis_info, is_container='container', presence=False, yang_name="global-isis-info", rest_name="global-isis-info", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-global', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='container', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """global_isis_info must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=global_isis_info.global_isis_info, is_container='container', presence=False, yang_name="global-isis-info", rest_name="global-isis-info", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-global', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='container', is_config=False)""", }) self.__global_isis_info = t if hasattr(self, '_set'): self._set()
Setter method for global_isis_info, mapped from YANG variable /isis_state/global_isis_info (container) If this variable is read-only (config: false) in the source YANG file, then _set_global_isis_info is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_global_isis_info() directly. YANG Description: ISIS Global
Below is the the instruction that describes the task: ### Input: Setter method for global_isis_info, mapped from YANG variable /isis_state/global_isis_info (container) If this variable is read-only (config: false) in the source YANG file, then _set_global_isis_info is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_global_isis_info() directly. YANG Description: ISIS Global ### Response: def _set_global_isis_info(self, v, load=False): """ Setter method for global_isis_info, mapped from YANG variable /isis_state/global_isis_info (container) If this variable is read-only (config: false) in the source YANG file, then _set_global_isis_info is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_global_isis_info() directly. YANG Description: ISIS Global """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=global_isis_info.global_isis_info, is_container='container', presence=False, yang_name="global-isis-info", rest_name="global-isis-info", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-global', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='container', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """global_isis_info must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=global_isis_info.global_isis_info, is_container='container', presence=False, yang_name="global-isis-info", rest_name="global-isis-info", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-global', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='container', is_config=False)""", }) self.__global_isis_info = t if hasattr(self, '_set'): self._set()
def _ParseStringOption(cls, options, argument_name, default_value=None): """Parses a string command line argument. Args: options (argparse.Namespace): parser options. argument_name (str): name of the command line argument. default_value (Optional[str]): default value of the command line argument. Returns: str: command line argument value or the default value if the command line argument is not set Raises: BadConfigOption: if the command line argument value cannot be converted to a Unicode string. """ argument_value = getattr(options, argument_name, None) if argument_value is None: return default_value if isinstance(argument_value, py2to3.BYTES_TYPE): encoding = sys.stdin.encoding # Note that sys.stdin.encoding can be None. if not encoding: encoding = locale.getpreferredencoding() if not encoding: encoding = cls._PREFERRED_ENCODING try: argument_value = argument_value.decode(encoding) except UnicodeDecodeError as exception: raise errors.BadConfigOption(( 'Unable to convert option: {0:s} to Unicode with error: ' '{1!s}.').format(argument_name, exception)) elif not isinstance(argument_value, py2to3.UNICODE_TYPE): raise errors.BadConfigOption( 'Unsupported option: {0:s} string type required.'.format( argument_name)) return argument_value
Parses a string command line argument. Args: options (argparse.Namespace): parser options. argument_name (str): name of the command line argument. default_value (Optional[str]): default value of the command line argument. Returns: str: command line argument value or the default value if the command line argument is not set Raises: BadConfigOption: if the command line argument value cannot be converted to a Unicode string.
Below is the the instruction that describes the task: ### Input: Parses a string command line argument. Args: options (argparse.Namespace): parser options. argument_name (str): name of the command line argument. default_value (Optional[str]): default value of the command line argument. Returns: str: command line argument value or the default value if the command line argument is not set Raises: BadConfigOption: if the command line argument value cannot be converted to a Unicode string. ### Response: def _ParseStringOption(cls, options, argument_name, default_value=None): """Parses a string command line argument. Args: options (argparse.Namespace): parser options. argument_name (str): name of the command line argument. default_value (Optional[str]): default value of the command line argument. Returns: str: command line argument value or the default value if the command line argument is not set Raises: BadConfigOption: if the command line argument value cannot be converted to a Unicode string. """ argument_value = getattr(options, argument_name, None) if argument_value is None: return default_value if isinstance(argument_value, py2to3.BYTES_TYPE): encoding = sys.stdin.encoding # Note that sys.stdin.encoding can be None. if not encoding: encoding = locale.getpreferredencoding() if not encoding: encoding = cls._PREFERRED_ENCODING try: argument_value = argument_value.decode(encoding) except UnicodeDecodeError as exception: raise errors.BadConfigOption(( 'Unable to convert option: {0:s} to Unicode with error: ' '{1!s}.').format(argument_name, exception)) elif not isinstance(argument_value, py2to3.UNICODE_TYPE): raise errors.BadConfigOption( 'Unsupported option: {0:s} string type required.'.format( argument_name)) return argument_value
def _get_webpages(self): """ :rtype: list(list(str)) """ urls = [] for child in self.vcard.getChildren(): if child.name == "URL": urls.append(child.value) return sorted(urls)
:rtype: list(list(str))
Below is the the instruction that describes the task: ### Input: :rtype: list(list(str)) ### Response: def _get_webpages(self): """ :rtype: list(list(str)) """ urls = [] for child in self.vcard.getChildren(): if child.name == "URL": urls.append(child.value) return sorted(urls)
def assign_param_names(cls=None, param_class=None): """Class decorator to assign parameter name to instances of :class:`Param`. .. sourcecode:: @assign_param_names class ConfigSectionSchema(object): alice = Param(type=str) bob = Param(type=str) assert ConfigSectionSchema.alice.name == "alice" assert ConfigSectionSchema.bob.name == "bob" .. sourcecode:: # -- NESTED ASSIGN: Covers also nested SectionSchema subclasses. @assign_param_names class ConfigSectionSchema(object): class Foo(SectionSchema): alice = Param(type=str) bob = Param(type=str) assert ConfigSectionSchema.Foo.alice.name == "alice" assert ConfigSectionSchema.Foo.bob.name == "bob" """ if param_class is None: param_class = Param def decorate_class(cls): for name, value in select_params_from_section_schema(cls, param_class, deep=True): # -- ANNOTATE PARAM: By assigning its name if not value.name: value.name = name return cls # -- DECORATOR LOGIC: if cls is None: # -- CASE: @assign_param_names # -- CASE: @assign_param_names(...) return decorate_class else: # -- CASE: @assign_param_names class X: ... # -- CASE: assign_param_names(my_class) # -- CASE: my_class = assign_param_names(my_class) return decorate_class(cls)
Class decorator to assign parameter name to instances of :class:`Param`. .. sourcecode:: @assign_param_names class ConfigSectionSchema(object): alice = Param(type=str) bob = Param(type=str) assert ConfigSectionSchema.alice.name == "alice" assert ConfigSectionSchema.bob.name == "bob" .. sourcecode:: # -- NESTED ASSIGN: Covers also nested SectionSchema subclasses. @assign_param_names class ConfigSectionSchema(object): class Foo(SectionSchema): alice = Param(type=str) bob = Param(type=str) assert ConfigSectionSchema.Foo.alice.name == "alice" assert ConfigSectionSchema.Foo.bob.name == "bob"
Below is the the instruction that describes the task: ### Input: Class decorator to assign parameter name to instances of :class:`Param`. .. sourcecode:: @assign_param_names class ConfigSectionSchema(object): alice = Param(type=str) bob = Param(type=str) assert ConfigSectionSchema.alice.name == "alice" assert ConfigSectionSchema.bob.name == "bob" .. sourcecode:: # -- NESTED ASSIGN: Covers also nested SectionSchema subclasses. @assign_param_names class ConfigSectionSchema(object): class Foo(SectionSchema): alice = Param(type=str) bob = Param(type=str) assert ConfigSectionSchema.Foo.alice.name == "alice" assert ConfigSectionSchema.Foo.bob.name == "bob" ### Response: def assign_param_names(cls=None, param_class=None): """Class decorator to assign parameter name to instances of :class:`Param`. .. sourcecode:: @assign_param_names class ConfigSectionSchema(object): alice = Param(type=str) bob = Param(type=str) assert ConfigSectionSchema.alice.name == "alice" assert ConfigSectionSchema.bob.name == "bob" .. sourcecode:: # -- NESTED ASSIGN: Covers also nested SectionSchema subclasses. @assign_param_names class ConfigSectionSchema(object): class Foo(SectionSchema): alice = Param(type=str) bob = Param(type=str) assert ConfigSectionSchema.Foo.alice.name == "alice" assert ConfigSectionSchema.Foo.bob.name == "bob" """ if param_class is None: param_class = Param def decorate_class(cls): for name, value in select_params_from_section_schema(cls, param_class, deep=True): # -- ANNOTATE PARAM: By assigning its name if not value.name: value.name = name return cls # -- DECORATOR LOGIC: if cls is None: # -- CASE: @assign_param_names # -- CASE: @assign_param_names(...) return decorate_class else: # -- CASE: @assign_param_names class X: ... # -- CASE: assign_param_names(my_class) # -- CASE: my_class = assign_param_names(my_class) return decorate_class(cls)
def hashantijoin(left, right, key=None, lkey=None, rkey=None): """Alternative implementation of :func:`petl.transform.joins.antijoin`, where the join is executed by constructing an in-memory set for all keys found in the right hand table, then iterating over rows from the left hand table. May be faster and/or more resource efficient where the right table is small and the left table is large. Left and right tables with different key fields can be handled via the `lkey` and `rkey` arguments. """ lkey, rkey = keys_from_args(left, right, key, lkey, rkey) return HashAntiJoinView(left, right, lkey, rkey)
Alternative implementation of :func:`petl.transform.joins.antijoin`, where the join is executed by constructing an in-memory set for all keys found in the right hand table, then iterating over rows from the left hand table. May be faster and/or more resource efficient where the right table is small and the left table is large. Left and right tables with different key fields can be handled via the `lkey` and `rkey` arguments.
Below is the the instruction that describes the task: ### Input: Alternative implementation of :func:`petl.transform.joins.antijoin`, where the join is executed by constructing an in-memory set for all keys found in the right hand table, then iterating over rows from the left hand table. May be faster and/or more resource efficient where the right table is small and the left table is large. Left and right tables with different key fields can be handled via the `lkey` and `rkey` arguments. ### Response: def hashantijoin(left, right, key=None, lkey=None, rkey=None): """Alternative implementation of :func:`petl.transform.joins.antijoin`, where the join is executed by constructing an in-memory set for all keys found in the right hand table, then iterating over rows from the left hand table. May be faster and/or more resource efficient where the right table is small and the left table is large. Left and right tables with different key fields can be handled via the `lkey` and `rkey` arguments. """ lkey, rkey = keys_from_args(left, right, key, lkey, rkey) return HashAntiJoinView(left, right, lkey, rkey)
def buy_on_drop(symbol_set="sp5002012", dataobj=dataobj, start=datetime.datetime(2008, 1, 3), end=datetime.datetime(2009, 12, 28), market_sym='$SPX', threshold=6, sell_delay=5, ): '''Compute and display an "event profile" for multiple sets of symbols''' if symbol_set: if isinstance(symbol_set, basestring): if symbol_set.lower().startswith('sp'): symbol_set = dataobj.get_symbols_from_list(symbol_set.lower()) else: symbol_set = [sym.stip().upper() for sym in symbol_set.split(",")] else: symbol_set = dataobj.get_symbols_from_list("sp5002012") if market_sym: symbol_set.append(market_sym) print "Starting Event Study, retrieving data for the {0} symbol list...".format(symbol_set) market_data = get_clean_prices(symbol_set, dataobj=dataobj, start=start, end=end) print "Finding events for {0} symbols between {1} and {2}...".format(len(symbol_set), start, end) trigger_kwargs={'threshold': threshold} events = find_events(symbol_set, market_data, market_sym=market_sym, trigger=drop_below, trigger_kwargs=trigger_kwargs) csvwriter = csv.writer(getattr(args, 'outfile', open('buy_on_drop_outfile.csv', 'w')), dialect='excel', quoting=csv.QUOTE_MINIMAL) for order in generate_orders(events, sell_delay=sell_delay, sep=None): csvwriter.writerow(order) print "Creating Study report for {0} events...".format(len(events)) ep.eventprofiler(events, market_data, i_lookback=20, i_lookforward=20, s_filename='Event report--buy on drop below {0} for {1} symbols.pdf'.format(threshold, len(symbol_set)), b_market_neutral=True, b_errorbars=True, s_market_sym=market_sym, ) return events
Compute and display an "event profile" for multiple sets of symbols
Below is the the instruction that describes the task: ### Input: Compute and display an "event profile" for multiple sets of symbols ### Response: def buy_on_drop(symbol_set="sp5002012", dataobj=dataobj, start=datetime.datetime(2008, 1, 3), end=datetime.datetime(2009, 12, 28), market_sym='$SPX', threshold=6, sell_delay=5, ): '''Compute and display an "event profile" for multiple sets of symbols''' if symbol_set: if isinstance(symbol_set, basestring): if symbol_set.lower().startswith('sp'): symbol_set = dataobj.get_symbols_from_list(symbol_set.lower()) else: symbol_set = [sym.stip().upper() for sym in symbol_set.split(",")] else: symbol_set = dataobj.get_symbols_from_list("sp5002012") if market_sym: symbol_set.append(market_sym) print "Starting Event Study, retrieving data for the {0} symbol list...".format(symbol_set) market_data = get_clean_prices(symbol_set, dataobj=dataobj, start=start, end=end) print "Finding events for {0} symbols between {1} and {2}...".format(len(symbol_set), start, end) trigger_kwargs={'threshold': threshold} events = find_events(symbol_set, market_data, market_sym=market_sym, trigger=drop_below, trigger_kwargs=trigger_kwargs) csvwriter = csv.writer(getattr(args, 'outfile', open('buy_on_drop_outfile.csv', 'w')), dialect='excel', quoting=csv.QUOTE_MINIMAL) for order in generate_orders(events, sell_delay=sell_delay, sep=None): csvwriter.writerow(order) print "Creating Study report for {0} events...".format(len(events)) ep.eventprofiler(events, market_data, i_lookback=20, i_lookforward=20, s_filename='Event report--buy on drop below {0} for {1} symbols.pdf'.format(threshold, len(symbol_set)), b_market_neutral=True, b_errorbars=True, s_market_sym=market_sym, ) return events
def validate_query_params(self, request): """ Validate that query params are in the list of valid query keywords in :py:attr:`query_regex` :raises ValidationError: if not. """ # TODO: For jsonapi error object conformance, must set jsonapi errors "parameter" for # the ValidationError. This requires extending DRF/DJA Exceptions. for qp in request.query_params.keys(): if not self.query_regex.match(qp): raise ValidationError('invalid query parameter: {}'.format(qp)) if len(request.query_params.getlist(qp)) > 1: raise ValidationError( 'repeated query parameter not allowed: {}'.format(qp))
Validate that query params are in the list of valid query keywords in :py:attr:`query_regex` :raises ValidationError: if not.
Below is the the instruction that describes the task: ### Input: Validate that query params are in the list of valid query keywords in :py:attr:`query_regex` :raises ValidationError: if not. ### Response: def validate_query_params(self, request): """ Validate that query params are in the list of valid query keywords in :py:attr:`query_regex` :raises ValidationError: if not. """ # TODO: For jsonapi error object conformance, must set jsonapi errors "parameter" for # the ValidationError. This requires extending DRF/DJA Exceptions. for qp in request.query_params.keys(): if not self.query_regex.match(qp): raise ValidationError('invalid query parameter: {}'.format(qp)) if len(request.query_params.getlist(qp)) > 1: raise ValidationError( 'repeated query parameter not allowed: {}'.format(qp))
def rectwv_coeff_from_mos_library(reduced_image, master_rectwv, ignore_dtu_configuration=True, debugplot=0): """Evaluate rect.+wavecal. coefficients from MOS library Parameters ---------- reduced_image : HDUList object Image with preliminary basic reduction: bpm, bias, dark and flatfield. master_rectwv : MasterRectWave instance Rectification and Wavelength Calibrartion Library product. Contains the library of polynomial coefficients necessary to generate an instance of RectWaveCoeff with the rectification and wavelength calibration coefficients for the particular CSU configuration. ignore_dtu_configuration : bool If True, ignore differences in DTU configuration. debugplot : int Debugging level for messages and plots. For details see 'numina.array.display.pause_debugplot.py'. Returns ------- rectwv_coeff : RectWaveCoeff instance Rectification and wavelength calibration coefficients for the particular CSU configuration. """ logger = logging.getLogger(__name__) logger.info('Computing expected RectWaveCoeff from CSU configuration') # header header = reduced_image[0].header # read the CSU configuration from the image header csu_conf = CsuConfiguration.define_from_header(header) # read the DTU configuration from the image header dtu_conf = DtuConfiguration.define_from_header(header) # retrieve DTU configuration from MasterRectWave object dtu_conf_calib = DtuConfiguration.define_from_dictionary( master_rectwv.meta_info['dtu_configuration'] ) # check that the DTU configuration employed to obtain the calibration # corresponds to the DTU configuration in the input FITS file if dtu_conf != dtu_conf_calib: if ignore_dtu_configuration: logger.warning('DTU configuration differences found!') else: logger.info('DTU configuration from image header:') logger.info(dtu_conf) logger.info('DTU configuration from master calibration:') logger.info(dtu_conf_calib) raise ValueError("DTU configurations do not match!") else: logger.info('DTU configuration match!') # check grism and filter filter_name = header['filter'] logger.debug('Filter: ' + filter_name) if filter_name != master_rectwv.tags['filter']: raise ValueError('Filter name does not match!') grism_name = header['grism'] logger.debug('Grism: ' + grism_name) if grism_name != master_rectwv.tags['grism']: raise ValueError('Grism name does not match!') # valid slitlet numbers list_valid_islitlets = list(range(1, EMIR_NBARS + 1)) for idel in master_rectwv.missing_slitlets: list_valid_islitlets.remove(idel) logger.debug('valid slitlet numbers: ' + str(list_valid_islitlets)) # initialize intermediate dictionary with relevant information # (note: this dictionary corresponds to an old structure employed to # store the information in a JSON file; this is no longer necessary, # but here we reuse that dictionary for convenience) outdict = {} outdict['instrument'] = 'EMIR' outdict['meta_info'] = {} outdict['meta_info']['creation_date'] = datetime.now().isoformat() outdict['meta_info']['description'] = \ 'computation of rectification and wavelength calibration polynomial ' \ 'coefficients for a particular CSU configuration from a MOS model ' outdict['meta_info']['recipe_name'] = 'undefined' outdict['meta_info']['origin'] = {} outdict['meta_info']['origin']['fits_frame_uuid'] = 'TBD' outdict['meta_info']['origin']['rect_wpoly_mos_uuid'] = \ master_rectwv.uuid outdict['meta_info']['origin']['fitted_boundary_param_uuid'] = \ master_rectwv.meta_info['origin']['bound_param'] outdict['tags'] = {} outdict['tags']['grism'] = grism_name outdict['tags']['filter'] = filter_name outdict['dtu_configuration'] = dtu_conf.outdict() outdict['uuid'] = str(uuid4()) outdict['contents'] = {} # compute rectification and wavelength calibration coefficients for each # slitlet according to its csu_bar_slit_center value for islitlet in list_valid_islitlets: cslitlet = 'slitlet' + str(islitlet).zfill(2) # csu_bar_slit_center of current slitlet in initial FITS image csu_bar_slit_center = csu_conf.csu_bar_slit_center(islitlet) # input data structure tmpdict = master_rectwv.contents[islitlet - 1] list_csu_bar_slit_center = tmpdict['list_csu_bar_slit_center'] # check extrapolations if csu_bar_slit_center < min(list_csu_bar_slit_center): logger.warning('extrapolating table with ' + cslitlet) logger.warning('minimum tabulated value: ' + str(min(list_csu_bar_slit_center))) logger.warning('sought value...........: ' + str(csu_bar_slit_center)) if csu_bar_slit_center > max(list_csu_bar_slit_center): logger.warning('extrapolating table with ' + cslitlet) logger.warning('maximum tabulated value: ' + str(max(list_csu_bar_slit_center))) logger.warning('sought value...........: ' + str(csu_bar_slit_center)) # rectification coefficients ttd_order = tmpdict['ttd_order'] ncoef = ncoef_fmap(ttd_order) outdict['contents'][cslitlet] = {} outdict['contents'][cslitlet]['ttd_order'] = ttd_order outdict['contents'][cslitlet]['ttd_order_longslit_model'] = None for keycoef in ['ttd_aij', 'ttd_bij', 'tti_aij', 'tti_bij']: coef_out = [] for icoef in range(ncoef): ccoef = str(icoef).zfill(2) list_cij = tmpdict['list_' + keycoef + '_' + ccoef] funinterp_coef = interp1d(list_csu_bar_slit_center, list_cij, kind='linear', fill_value='extrapolate') # note: funinterp_coef expects a numpy array dum = funinterp_coef([csu_bar_slit_center]) coef_out.append(dum[0]) outdict['contents'][cslitlet][keycoef] = coef_out outdict['contents'][cslitlet][keycoef + '_longslit_model'] = None # wavelength calibration coefficients ncoef = tmpdict['wpoly_degree'] + 1 wpoly_coeff = [] for icoef in range(ncoef): ccoef = str(icoef).zfill(2) list_cij = tmpdict['list_wpoly_coeff_' + ccoef] funinterp_coef = interp1d(list_csu_bar_slit_center, list_cij, kind='linear', fill_value='extrapolate') # note: funinterp_coef expects a numpy array dum = funinterp_coef([csu_bar_slit_center]) wpoly_coeff.append(dum[0]) outdict['contents'][cslitlet]['wpoly_coeff'] = wpoly_coeff outdict['contents'][cslitlet]['wpoly_coeff_longslit_model'] = None # update cdelt1_linear and crval1_linear wpoly_function = np.polynomial.Polynomial(wpoly_coeff) crmin1_linear = wpoly_function(1) crmax1_linear = wpoly_function(EMIR_NAXIS1) cdelt1_linear = (crmax1_linear - crmin1_linear) / (EMIR_NAXIS1 - 1) crval1_linear = crmin1_linear outdict['contents'][cslitlet]['crval1_linear'] = crval1_linear outdict['contents'][cslitlet]['cdelt1_linear'] = cdelt1_linear # update CSU keywords outdict['contents'][cslitlet]['csu_bar_left'] = \ csu_conf.csu_bar_left(islitlet) outdict['contents'][cslitlet]['csu_bar_right'] = \ csu_conf.csu_bar_right(islitlet) outdict['contents'][cslitlet]['csu_bar_slit_center'] = \ csu_conf.csu_bar_slit_center(islitlet) outdict['contents'][cslitlet]['csu_bar_slit_width'] = \ csu_conf.csu_bar_slit_width(islitlet) # for each slitlet compute spectrum trails and frontiers using the # fitted boundary parameters fitted_bound_param_json = { 'contents': master_rectwv.meta_info['refined_boundary_model'] } parmodel = fitted_bound_param_json['contents']['parmodel'] fitted_bound_param_json.update({'meta_info': {'parmodel': parmodel}}) params = bound_params_from_dict(fitted_bound_param_json) if abs(debugplot) >= 10: logger.debug('Fitted boundary parameters:') logger.debug(params.pretty_print()) for islitlet in list_valid_islitlets: cslitlet = 'slitlet' + str(islitlet).zfill(2) # csu_bar_slit_center of current slitlet in initial FITS image csu_bar_slit_center = csu_conf.csu_bar_slit_center(islitlet) # compute and store x0_reference value x0_reference = float(EMIR_NAXIS1) / 2.0 + 0.5 outdict['contents'][cslitlet]['x0_reference'] = x0_reference # compute spectrum trails (lower, middle and upper) list_spectrails = expected_distorted_boundaries( islitlet, csu_bar_slit_center, [0, 0.5, 1], params, parmodel, numpts=101, deg=5, debugplot=0 ) # store spectrails in output JSON file outdict['contents'][cslitlet]['spectrail'] = {} for idum, cdum in zip(range(3), ['lower', 'middle', 'upper']): outdict['contents'][cslitlet]['spectrail']['poly_coef_' + cdum] = \ list_spectrails[idum].poly_funct.coef.tolist() outdict['contents'][cslitlet]['y0_reference_' + cdum] = \ list_spectrails[idum].poly_funct(x0_reference) # compute frontiers (lower, upper) list_frontiers = expected_distorted_frontiers( islitlet, csu_bar_slit_center, params, parmodel, numpts=101, deg=5, debugplot=0 ) # store frontiers in output JSON outdict['contents'][cslitlet]['frontier'] = {} for idum, cdum in zip(range(2), ['lower', 'upper']): outdict['contents'][cslitlet]['frontier']['poly_coef_' + cdum] = \ list_frontiers[idum].poly_funct.coef.tolist() outdict['contents'][cslitlet]['y0_frontier_' + cdum] = \ list_frontiers[idum].poly_funct(x0_reference) # store bounding box parameters for each slitlet xdum = np.linspace(1, EMIR_NAXIS1, num=EMIR_NAXIS1) for islitlet in list_valid_islitlets: cslitlet = 'slitlet' + str(islitlet).zfill(2) # parameters already available in the input JSON file for par in ['bb_nc1_orig', 'bb_nc2_orig', 'ymargin_bb']: outdict['contents'][cslitlet][par] = \ master_rectwv.contents[islitlet - 1][par] # estimate bb_ns1_orig and bb_ns2_orig using the already computed # frontiers and the value of ymargin_bb, following the same approach # employed in Slitlet2dArc.__init__() poly_lower_frontier = np.polynomial.Polynomial( outdict['contents'][cslitlet]['frontier']['poly_coef_lower'] ) poly_upper_frontier = np.polynomial.Polynomial( outdict['contents'][cslitlet]['frontier']['poly_coef_upper'] ) ylower = poly_lower_frontier(xdum) yupper = poly_upper_frontier(xdum) ymargin_bb = master_rectwv.contents[islitlet - 1]['ymargin_bb'] bb_ns1_orig = int(ylower.min() + 0.5) - ymargin_bb if bb_ns1_orig < 1: bb_ns1_orig = 1 bb_ns2_orig = int(yupper.max() + 0.5) + ymargin_bb if bb_ns2_orig > EMIR_NAXIS2: bb_ns2_orig = EMIR_NAXIS2 outdict['contents'][cslitlet]['bb_ns1_orig'] = bb_ns1_orig outdict['contents'][cslitlet]['bb_ns2_orig'] = bb_ns2_orig # additional parameters (see Slitlet2dArc.__init__) for islitlet in list_valid_islitlets: cslitlet = 'slitlet' + str(islitlet).zfill(2) # define expected frontier ordinates at x0_reference for the rectified # image imposing the vertical length of the slitlet to be constant # and equal to EMIR_NPIXPERSLIT_RECTIFIED outdict['contents'][cslitlet]['y0_frontier_lower_expected'] = \ expected_y0_lower_frontier(islitlet) outdict['contents'][cslitlet]['y0_frontier_upper_expected'] = \ expected_y0_upper_frontier(islitlet) # compute linear transformation to place the rectified slitlet at # the center of the current slitlet bounding box tmpdict = outdict['contents'][cslitlet] xdum1 = tmpdict['y0_frontier_lower'] ydum1 = tmpdict['y0_frontier_lower_expected'] xdum2 = tmpdict['y0_frontier_upper'] ydum2 = tmpdict['y0_frontier_upper_expected'] corr_yrect_b = (ydum2 - ydum1) / (xdum2 - xdum1) corr_yrect_a = ydum1 - corr_yrect_b * xdum1 # compute expected location of rectified boundaries y0_reference_lower_expected = \ corr_yrect_a + corr_yrect_b * tmpdict['y0_reference_lower'] y0_reference_middle_expected = \ corr_yrect_a + corr_yrect_b * tmpdict['y0_reference_middle'] y0_reference_upper_expected = \ corr_yrect_a + corr_yrect_b * tmpdict['y0_reference_upper'] # shift transformation to center the rectified slitlet within the # slitlet bounding box ydummid = (ydum1 + ydum2) / 2 ioffset = int( ydummid - (tmpdict['bb_ns1_orig'] + tmpdict['bb_ns2_orig']) / 2.0) corr_yrect_a -= ioffset # minimum and maximum row in the rectified slitlet encompassing # EMIR_NPIXPERSLIT_RECTIFIED pixels # a) scan number (in pixels, from 1 to NAXIS2) xdum1 = corr_yrect_a + \ corr_yrect_b * tmpdict['y0_frontier_lower'] xdum2 = corr_yrect_a + \ corr_yrect_b * tmpdict['y0_frontier_upper'] # b) row number (starting from zero) min_row_rectified = \ int((round(xdum1 * 10) + 5) / 10) - tmpdict['bb_ns1_orig'] max_row_rectified = \ int((round(xdum2 * 10) - 5) / 10) - tmpdict['bb_ns1_orig'] # save previous results in outdict outdict['contents'][cslitlet]['y0_reference_lower_expected'] = \ y0_reference_lower_expected outdict['contents'][cslitlet]['y0_reference_middle_expected'] = \ y0_reference_middle_expected outdict['contents'][cslitlet]['y0_reference_upper_expected'] = \ y0_reference_upper_expected outdict['contents'][cslitlet]['corr_yrect_a'] = corr_yrect_a outdict['contents'][cslitlet]['corr_yrect_b'] = corr_yrect_b outdict['contents'][cslitlet]['min_row_rectified'] = min_row_rectified outdict['contents'][cslitlet]['max_row_rectified'] = max_row_rectified # --- # Create object of type RectWaveCoeff with coefficients for # rectification and wavelength calibration rectwv_coeff = RectWaveCoeff(instrument='EMIR') rectwv_coeff.quality_control = numina.types.qc.QC.GOOD rectwv_coeff.tags['grism'] = grism_name rectwv_coeff.tags['filter'] = filter_name rectwv_coeff.meta_info['origin']['bound_param'] = \ master_rectwv.meta_info['origin']['bound_param'] rectwv_coeff.meta_info['origin']['master_rectwv'] = \ 'uuid' + master_rectwv.uuid rectwv_coeff.meta_info['dtu_configuration'] = outdict['dtu_configuration'] rectwv_coeff.total_slitlets = EMIR_NBARS for i in range(EMIR_NBARS): islitlet = i + 1 dumdict = {'islitlet': islitlet} cslitlet = 'slitlet' + str(islitlet).zfill(2) if cslitlet in outdict['contents']: dumdict.update(outdict['contents'][cslitlet]) else: dumdict.update({ 'csu_bar_left': csu_conf.csu_bar_left(islitlet), 'csu_bar_right': csu_conf.csu_bar_right(islitlet), 'csu_bar_slit_center': csu_conf.csu_bar_slit_center(islitlet), 'csu_bar_slit_width': csu_conf.csu_bar_slit_width(islitlet), 'x0_reference': float(EMIR_NAXIS1) / 2.0 + 0.5, 'y0_frontier_lower_expected': expected_y0_lower_frontier(islitlet), 'y0_frontier_upper_expected': expected_y0_upper_frontier(islitlet) }) rectwv_coeff.missing_slitlets.append(islitlet) rectwv_coeff.contents.append(dumdict) # debugging __getstate__ and __setstate__ # rectwv_coeff.writeto(args.out_rect_wpoly.name) # print('>>> Saving file ' + args.out_rect_wpoly.name) # check_setstate_getstate(rectwv_coeff, args.out_rect_wpoly.name) logger.info('Generating RectWaveCoeff object with uuid=' + rectwv_coeff.uuid) return rectwv_coeff
Evaluate rect.+wavecal. coefficients from MOS library Parameters ---------- reduced_image : HDUList object Image with preliminary basic reduction: bpm, bias, dark and flatfield. master_rectwv : MasterRectWave instance Rectification and Wavelength Calibrartion Library product. Contains the library of polynomial coefficients necessary to generate an instance of RectWaveCoeff with the rectification and wavelength calibration coefficients for the particular CSU configuration. ignore_dtu_configuration : bool If True, ignore differences in DTU configuration. debugplot : int Debugging level for messages and plots. For details see 'numina.array.display.pause_debugplot.py'. Returns ------- rectwv_coeff : RectWaveCoeff instance Rectification and wavelength calibration coefficients for the particular CSU configuration.
Below is the the instruction that describes the task: ### Input: Evaluate rect.+wavecal. coefficients from MOS library Parameters ---------- reduced_image : HDUList object Image with preliminary basic reduction: bpm, bias, dark and flatfield. master_rectwv : MasterRectWave instance Rectification and Wavelength Calibrartion Library product. Contains the library of polynomial coefficients necessary to generate an instance of RectWaveCoeff with the rectification and wavelength calibration coefficients for the particular CSU configuration. ignore_dtu_configuration : bool If True, ignore differences in DTU configuration. debugplot : int Debugging level for messages and plots. For details see 'numina.array.display.pause_debugplot.py'. Returns ------- rectwv_coeff : RectWaveCoeff instance Rectification and wavelength calibration coefficients for the particular CSU configuration. ### Response: def rectwv_coeff_from_mos_library(reduced_image, master_rectwv, ignore_dtu_configuration=True, debugplot=0): """Evaluate rect.+wavecal. coefficients from MOS library Parameters ---------- reduced_image : HDUList object Image with preliminary basic reduction: bpm, bias, dark and flatfield. master_rectwv : MasterRectWave instance Rectification and Wavelength Calibrartion Library product. Contains the library of polynomial coefficients necessary to generate an instance of RectWaveCoeff with the rectification and wavelength calibration coefficients for the particular CSU configuration. ignore_dtu_configuration : bool If True, ignore differences in DTU configuration. debugplot : int Debugging level for messages and plots. For details see 'numina.array.display.pause_debugplot.py'. Returns ------- rectwv_coeff : RectWaveCoeff instance Rectification and wavelength calibration coefficients for the particular CSU configuration. """ logger = logging.getLogger(__name__) logger.info('Computing expected RectWaveCoeff from CSU configuration') # header header = reduced_image[0].header # read the CSU configuration from the image header csu_conf = CsuConfiguration.define_from_header(header) # read the DTU configuration from the image header dtu_conf = DtuConfiguration.define_from_header(header) # retrieve DTU configuration from MasterRectWave object dtu_conf_calib = DtuConfiguration.define_from_dictionary( master_rectwv.meta_info['dtu_configuration'] ) # check that the DTU configuration employed to obtain the calibration # corresponds to the DTU configuration in the input FITS file if dtu_conf != dtu_conf_calib: if ignore_dtu_configuration: logger.warning('DTU configuration differences found!') else: logger.info('DTU configuration from image header:') logger.info(dtu_conf) logger.info('DTU configuration from master calibration:') logger.info(dtu_conf_calib) raise ValueError("DTU configurations do not match!") else: logger.info('DTU configuration match!') # check grism and filter filter_name = header['filter'] logger.debug('Filter: ' + filter_name) if filter_name != master_rectwv.tags['filter']: raise ValueError('Filter name does not match!') grism_name = header['grism'] logger.debug('Grism: ' + grism_name) if grism_name != master_rectwv.tags['grism']: raise ValueError('Grism name does not match!') # valid slitlet numbers list_valid_islitlets = list(range(1, EMIR_NBARS + 1)) for idel in master_rectwv.missing_slitlets: list_valid_islitlets.remove(idel) logger.debug('valid slitlet numbers: ' + str(list_valid_islitlets)) # initialize intermediate dictionary with relevant information # (note: this dictionary corresponds to an old structure employed to # store the information in a JSON file; this is no longer necessary, # but here we reuse that dictionary for convenience) outdict = {} outdict['instrument'] = 'EMIR' outdict['meta_info'] = {} outdict['meta_info']['creation_date'] = datetime.now().isoformat() outdict['meta_info']['description'] = \ 'computation of rectification and wavelength calibration polynomial ' \ 'coefficients for a particular CSU configuration from a MOS model ' outdict['meta_info']['recipe_name'] = 'undefined' outdict['meta_info']['origin'] = {} outdict['meta_info']['origin']['fits_frame_uuid'] = 'TBD' outdict['meta_info']['origin']['rect_wpoly_mos_uuid'] = \ master_rectwv.uuid outdict['meta_info']['origin']['fitted_boundary_param_uuid'] = \ master_rectwv.meta_info['origin']['bound_param'] outdict['tags'] = {} outdict['tags']['grism'] = grism_name outdict['tags']['filter'] = filter_name outdict['dtu_configuration'] = dtu_conf.outdict() outdict['uuid'] = str(uuid4()) outdict['contents'] = {} # compute rectification and wavelength calibration coefficients for each # slitlet according to its csu_bar_slit_center value for islitlet in list_valid_islitlets: cslitlet = 'slitlet' + str(islitlet).zfill(2) # csu_bar_slit_center of current slitlet in initial FITS image csu_bar_slit_center = csu_conf.csu_bar_slit_center(islitlet) # input data structure tmpdict = master_rectwv.contents[islitlet - 1] list_csu_bar_slit_center = tmpdict['list_csu_bar_slit_center'] # check extrapolations if csu_bar_slit_center < min(list_csu_bar_slit_center): logger.warning('extrapolating table with ' + cslitlet) logger.warning('minimum tabulated value: ' + str(min(list_csu_bar_slit_center))) logger.warning('sought value...........: ' + str(csu_bar_slit_center)) if csu_bar_slit_center > max(list_csu_bar_slit_center): logger.warning('extrapolating table with ' + cslitlet) logger.warning('maximum tabulated value: ' + str(max(list_csu_bar_slit_center))) logger.warning('sought value...........: ' + str(csu_bar_slit_center)) # rectification coefficients ttd_order = tmpdict['ttd_order'] ncoef = ncoef_fmap(ttd_order) outdict['contents'][cslitlet] = {} outdict['contents'][cslitlet]['ttd_order'] = ttd_order outdict['contents'][cslitlet]['ttd_order_longslit_model'] = None for keycoef in ['ttd_aij', 'ttd_bij', 'tti_aij', 'tti_bij']: coef_out = [] for icoef in range(ncoef): ccoef = str(icoef).zfill(2) list_cij = tmpdict['list_' + keycoef + '_' + ccoef] funinterp_coef = interp1d(list_csu_bar_slit_center, list_cij, kind='linear', fill_value='extrapolate') # note: funinterp_coef expects a numpy array dum = funinterp_coef([csu_bar_slit_center]) coef_out.append(dum[0]) outdict['contents'][cslitlet][keycoef] = coef_out outdict['contents'][cslitlet][keycoef + '_longslit_model'] = None # wavelength calibration coefficients ncoef = tmpdict['wpoly_degree'] + 1 wpoly_coeff = [] for icoef in range(ncoef): ccoef = str(icoef).zfill(2) list_cij = tmpdict['list_wpoly_coeff_' + ccoef] funinterp_coef = interp1d(list_csu_bar_slit_center, list_cij, kind='linear', fill_value='extrapolate') # note: funinterp_coef expects a numpy array dum = funinterp_coef([csu_bar_slit_center]) wpoly_coeff.append(dum[0]) outdict['contents'][cslitlet]['wpoly_coeff'] = wpoly_coeff outdict['contents'][cslitlet]['wpoly_coeff_longslit_model'] = None # update cdelt1_linear and crval1_linear wpoly_function = np.polynomial.Polynomial(wpoly_coeff) crmin1_linear = wpoly_function(1) crmax1_linear = wpoly_function(EMIR_NAXIS1) cdelt1_linear = (crmax1_linear - crmin1_linear) / (EMIR_NAXIS1 - 1) crval1_linear = crmin1_linear outdict['contents'][cslitlet]['crval1_linear'] = crval1_linear outdict['contents'][cslitlet]['cdelt1_linear'] = cdelt1_linear # update CSU keywords outdict['contents'][cslitlet]['csu_bar_left'] = \ csu_conf.csu_bar_left(islitlet) outdict['contents'][cslitlet]['csu_bar_right'] = \ csu_conf.csu_bar_right(islitlet) outdict['contents'][cslitlet]['csu_bar_slit_center'] = \ csu_conf.csu_bar_slit_center(islitlet) outdict['contents'][cslitlet]['csu_bar_slit_width'] = \ csu_conf.csu_bar_slit_width(islitlet) # for each slitlet compute spectrum trails and frontiers using the # fitted boundary parameters fitted_bound_param_json = { 'contents': master_rectwv.meta_info['refined_boundary_model'] } parmodel = fitted_bound_param_json['contents']['parmodel'] fitted_bound_param_json.update({'meta_info': {'parmodel': parmodel}}) params = bound_params_from_dict(fitted_bound_param_json) if abs(debugplot) >= 10: logger.debug('Fitted boundary parameters:') logger.debug(params.pretty_print()) for islitlet in list_valid_islitlets: cslitlet = 'slitlet' + str(islitlet).zfill(2) # csu_bar_slit_center of current slitlet in initial FITS image csu_bar_slit_center = csu_conf.csu_bar_slit_center(islitlet) # compute and store x0_reference value x0_reference = float(EMIR_NAXIS1) / 2.0 + 0.5 outdict['contents'][cslitlet]['x0_reference'] = x0_reference # compute spectrum trails (lower, middle and upper) list_spectrails = expected_distorted_boundaries( islitlet, csu_bar_slit_center, [0, 0.5, 1], params, parmodel, numpts=101, deg=5, debugplot=0 ) # store spectrails in output JSON file outdict['contents'][cslitlet]['spectrail'] = {} for idum, cdum in zip(range(3), ['lower', 'middle', 'upper']): outdict['contents'][cslitlet]['spectrail']['poly_coef_' + cdum] = \ list_spectrails[idum].poly_funct.coef.tolist() outdict['contents'][cslitlet]['y0_reference_' + cdum] = \ list_spectrails[idum].poly_funct(x0_reference) # compute frontiers (lower, upper) list_frontiers = expected_distorted_frontiers( islitlet, csu_bar_slit_center, params, parmodel, numpts=101, deg=5, debugplot=0 ) # store frontiers in output JSON outdict['contents'][cslitlet]['frontier'] = {} for idum, cdum in zip(range(2), ['lower', 'upper']): outdict['contents'][cslitlet]['frontier']['poly_coef_' + cdum] = \ list_frontiers[idum].poly_funct.coef.tolist() outdict['contents'][cslitlet]['y0_frontier_' + cdum] = \ list_frontiers[idum].poly_funct(x0_reference) # store bounding box parameters for each slitlet xdum = np.linspace(1, EMIR_NAXIS1, num=EMIR_NAXIS1) for islitlet in list_valid_islitlets: cslitlet = 'slitlet' + str(islitlet).zfill(2) # parameters already available in the input JSON file for par in ['bb_nc1_orig', 'bb_nc2_orig', 'ymargin_bb']: outdict['contents'][cslitlet][par] = \ master_rectwv.contents[islitlet - 1][par] # estimate bb_ns1_orig and bb_ns2_orig using the already computed # frontiers and the value of ymargin_bb, following the same approach # employed in Slitlet2dArc.__init__() poly_lower_frontier = np.polynomial.Polynomial( outdict['contents'][cslitlet]['frontier']['poly_coef_lower'] ) poly_upper_frontier = np.polynomial.Polynomial( outdict['contents'][cslitlet]['frontier']['poly_coef_upper'] ) ylower = poly_lower_frontier(xdum) yupper = poly_upper_frontier(xdum) ymargin_bb = master_rectwv.contents[islitlet - 1]['ymargin_bb'] bb_ns1_orig = int(ylower.min() + 0.5) - ymargin_bb if bb_ns1_orig < 1: bb_ns1_orig = 1 bb_ns2_orig = int(yupper.max() + 0.5) + ymargin_bb if bb_ns2_orig > EMIR_NAXIS2: bb_ns2_orig = EMIR_NAXIS2 outdict['contents'][cslitlet]['bb_ns1_orig'] = bb_ns1_orig outdict['contents'][cslitlet]['bb_ns2_orig'] = bb_ns2_orig # additional parameters (see Slitlet2dArc.__init__) for islitlet in list_valid_islitlets: cslitlet = 'slitlet' + str(islitlet).zfill(2) # define expected frontier ordinates at x0_reference for the rectified # image imposing the vertical length of the slitlet to be constant # and equal to EMIR_NPIXPERSLIT_RECTIFIED outdict['contents'][cslitlet]['y0_frontier_lower_expected'] = \ expected_y0_lower_frontier(islitlet) outdict['contents'][cslitlet]['y0_frontier_upper_expected'] = \ expected_y0_upper_frontier(islitlet) # compute linear transformation to place the rectified slitlet at # the center of the current slitlet bounding box tmpdict = outdict['contents'][cslitlet] xdum1 = tmpdict['y0_frontier_lower'] ydum1 = tmpdict['y0_frontier_lower_expected'] xdum2 = tmpdict['y0_frontier_upper'] ydum2 = tmpdict['y0_frontier_upper_expected'] corr_yrect_b = (ydum2 - ydum1) / (xdum2 - xdum1) corr_yrect_a = ydum1 - corr_yrect_b * xdum1 # compute expected location of rectified boundaries y0_reference_lower_expected = \ corr_yrect_a + corr_yrect_b * tmpdict['y0_reference_lower'] y0_reference_middle_expected = \ corr_yrect_a + corr_yrect_b * tmpdict['y0_reference_middle'] y0_reference_upper_expected = \ corr_yrect_a + corr_yrect_b * tmpdict['y0_reference_upper'] # shift transformation to center the rectified slitlet within the # slitlet bounding box ydummid = (ydum1 + ydum2) / 2 ioffset = int( ydummid - (tmpdict['bb_ns1_orig'] + tmpdict['bb_ns2_orig']) / 2.0) corr_yrect_a -= ioffset # minimum and maximum row in the rectified slitlet encompassing # EMIR_NPIXPERSLIT_RECTIFIED pixels # a) scan number (in pixels, from 1 to NAXIS2) xdum1 = corr_yrect_a + \ corr_yrect_b * tmpdict['y0_frontier_lower'] xdum2 = corr_yrect_a + \ corr_yrect_b * tmpdict['y0_frontier_upper'] # b) row number (starting from zero) min_row_rectified = \ int((round(xdum1 * 10) + 5) / 10) - tmpdict['bb_ns1_orig'] max_row_rectified = \ int((round(xdum2 * 10) - 5) / 10) - tmpdict['bb_ns1_orig'] # save previous results in outdict outdict['contents'][cslitlet]['y0_reference_lower_expected'] = \ y0_reference_lower_expected outdict['contents'][cslitlet]['y0_reference_middle_expected'] = \ y0_reference_middle_expected outdict['contents'][cslitlet]['y0_reference_upper_expected'] = \ y0_reference_upper_expected outdict['contents'][cslitlet]['corr_yrect_a'] = corr_yrect_a outdict['contents'][cslitlet]['corr_yrect_b'] = corr_yrect_b outdict['contents'][cslitlet]['min_row_rectified'] = min_row_rectified outdict['contents'][cslitlet]['max_row_rectified'] = max_row_rectified # --- # Create object of type RectWaveCoeff with coefficients for # rectification and wavelength calibration rectwv_coeff = RectWaveCoeff(instrument='EMIR') rectwv_coeff.quality_control = numina.types.qc.QC.GOOD rectwv_coeff.tags['grism'] = grism_name rectwv_coeff.tags['filter'] = filter_name rectwv_coeff.meta_info['origin']['bound_param'] = \ master_rectwv.meta_info['origin']['bound_param'] rectwv_coeff.meta_info['origin']['master_rectwv'] = \ 'uuid' + master_rectwv.uuid rectwv_coeff.meta_info['dtu_configuration'] = outdict['dtu_configuration'] rectwv_coeff.total_slitlets = EMIR_NBARS for i in range(EMIR_NBARS): islitlet = i + 1 dumdict = {'islitlet': islitlet} cslitlet = 'slitlet' + str(islitlet).zfill(2) if cslitlet in outdict['contents']: dumdict.update(outdict['contents'][cslitlet]) else: dumdict.update({ 'csu_bar_left': csu_conf.csu_bar_left(islitlet), 'csu_bar_right': csu_conf.csu_bar_right(islitlet), 'csu_bar_slit_center': csu_conf.csu_bar_slit_center(islitlet), 'csu_bar_slit_width': csu_conf.csu_bar_slit_width(islitlet), 'x0_reference': float(EMIR_NAXIS1) / 2.0 + 0.5, 'y0_frontier_lower_expected': expected_y0_lower_frontier(islitlet), 'y0_frontier_upper_expected': expected_y0_upper_frontier(islitlet) }) rectwv_coeff.missing_slitlets.append(islitlet) rectwv_coeff.contents.append(dumdict) # debugging __getstate__ and __setstate__ # rectwv_coeff.writeto(args.out_rect_wpoly.name) # print('>>> Saving file ' + args.out_rect_wpoly.name) # check_setstate_getstate(rectwv_coeff, args.out_rect_wpoly.name) logger.info('Generating RectWaveCoeff object with uuid=' + rectwv_coeff.uuid) return rectwv_coeff
def _hadoop_cmd(module, command, *args): ''' Hadoop/hdfs command wrapper As Hadoop command has been deprecated this module will default to use hdfs command and fall back to hadoop if it is not found In order to prevent random execution the module name is checked Follows hadoop command template: hadoop module -command args E.g.: hadoop dfs -ls / ''' tool = 'hadoop' if salt.utils.path.which('hdfs'): tool = 'hdfs' out = None if module and command: if module in __authorized_modules__: mappings = {'tool': tool, 'module': module, 'command': command, 'args': ' '.join(args)} cmd = '{tool} {module} -{command} {args}'.format(**mappings) out = __salt__['cmd.run'](cmd, python_shell=False) else: return 'Error: Unknown module' else: return 'Error: Module and command not defined' return out
Hadoop/hdfs command wrapper As Hadoop command has been deprecated this module will default to use hdfs command and fall back to hadoop if it is not found In order to prevent random execution the module name is checked Follows hadoop command template: hadoop module -command args E.g.: hadoop dfs -ls /
Below is the the instruction that describes the task: ### Input: Hadoop/hdfs command wrapper As Hadoop command has been deprecated this module will default to use hdfs command and fall back to hadoop if it is not found In order to prevent random execution the module name is checked Follows hadoop command template: hadoop module -command args E.g.: hadoop dfs -ls / ### Response: def _hadoop_cmd(module, command, *args): ''' Hadoop/hdfs command wrapper As Hadoop command has been deprecated this module will default to use hdfs command and fall back to hadoop if it is not found In order to prevent random execution the module name is checked Follows hadoop command template: hadoop module -command args E.g.: hadoop dfs -ls / ''' tool = 'hadoop' if salt.utils.path.which('hdfs'): tool = 'hdfs' out = None if module and command: if module in __authorized_modules__: mappings = {'tool': tool, 'module': module, 'command': command, 'args': ' '.join(args)} cmd = '{tool} {module} -{command} {args}'.format(**mappings) out = __salt__['cmd.run'](cmd, python_shell=False) else: return 'Error: Unknown module' else: return 'Error: Module and command not defined' return out
def _pip_search(stdout, stderr): """Callback for pip search.""" result = {} lines = to_text_string(stdout).split('\n') while '' in lines: lines.remove('') for line in lines: if ' - ' in line: parts = line.split(' - ') name = parts[0].strip() description = parts[1].strip() result[name] = description return result
Callback for pip search.
Below is the the instruction that describes the task: ### Input: Callback for pip search. ### Response: def _pip_search(stdout, stderr): """Callback for pip search.""" result = {} lines = to_text_string(stdout).split('\n') while '' in lines: lines.remove('') for line in lines: if ' - ' in line: parts = line.split(' - ') name = parts[0].strip() description = parts[1].strip() result[name] = description return result
def get_recipe_instance(self, badge): """ Returns the recipe instance for the given badge slug. If badge has not been registered, raises ``exceptions.BadgeNotFound``. """ from .exceptions import BadgeNotFound if badge in self._registry: return self.recipes[badge] raise BadgeNotFound()
Returns the recipe instance for the given badge slug. If badge has not been registered, raises ``exceptions.BadgeNotFound``.
Below is the the instruction that describes the task: ### Input: Returns the recipe instance for the given badge slug. If badge has not been registered, raises ``exceptions.BadgeNotFound``. ### Response: def get_recipe_instance(self, badge): """ Returns the recipe instance for the given badge slug. If badge has not been registered, raises ``exceptions.BadgeNotFound``. """ from .exceptions import BadgeNotFound if badge in self._registry: return self.recipes[badge] raise BadgeNotFound()
def process_response(self, request, response): """ Set compact P3P policies and save auth token to cookie. P3P is a WC3 standard (see http://www.w3.org/TR/P3P/), and although largely ignored by most browsers it is considered by IE before accepting third-party cookies (ie. cookies set by documents in iframes). If they are not set correctly, IE will not set these cookies. """ if hasattr(request, "facebook") and request.facebook and request.facebook.oauth_token: if "code" in request.REQUEST: """ Remove auth related query params """ path = get_full_path(request, remove_querystrings=['code', 'web_canvas']) response = HttpResponseRedirect(path) response.set_cookie('oauth_token', request.facebook.oauth_token.token) else: response.delete_cookie('oauth_token') response['P3P'] = 'CP="IDC CURa ADMa OUR IND PHY ONL COM STA"' return response
Set compact P3P policies and save auth token to cookie. P3P is a WC3 standard (see http://www.w3.org/TR/P3P/), and although largely ignored by most browsers it is considered by IE before accepting third-party cookies (ie. cookies set by documents in iframes). If they are not set correctly, IE will not set these cookies.
Below is the the instruction that describes the task: ### Input: Set compact P3P policies and save auth token to cookie. P3P is a WC3 standard (see http://www.w3.org/TR/P3P/), and although largely ignored by most browsers it is considered by IE before accepting third-party cookies (ie. cookies set by documents in iframes). If they are not set correctly, IE will not set these cookies. ### Response: def process_response(self, request, response): """ Set compact P3P policies and save auth token to cookie. P3P is a WC3 standard (see http://www.w3.org/TR/P3P/), and although largely ignored by most browsers it is considered by IE before accepting third-party cookies (ie. cookies set by documents in iframes). If they are not set correctly, IE will not set these cookies. """ if hasattr(request, "facebook") and request.facebook and request.facebook.oauth_token: if "code" in request.REQUEST: """ Remove auth related query params """ path = get_full_path(request, remove_querystrings=['code', 'web_canvas']) response = HttpResponseRedirect(path) response.set_cookie('oauth_token', request.facebook.oauth_token.token) else: response.delete_cookie('oauth_token') response['P3P'] = 'CP="IDC CURa ADMa OUR IND PHY ONL COM STA"' return response
def find(self,cell_designation,cell_filter=lambda x,c: 'c' in x and x['c'] == c): """ finds spike containers in a multi spike containers collection """ res = [i for i,sc in enumerate(self.spike_containers) if cell_filter(sc.meta,cell_designation)] if len(res) > 0: return res[0]
finds spike containers in a multi spike containers collection
Below is the the instruction that describes the task: ### Input: finds spike containers in a multi spike containers collection ### Response: def find(self,cell_designation,cell_filter=lambda x,c: 'c' in x and x['c'] == c): """ finds spike containers in a multi spike containers collection """ res = [i for i,sc in enumerate(self.spike_containers) if cell_filter(sc.meta,cell_designation)] if len(res) > 0: return res[0]
def parse_orf(insertion, gff): """ parse ORF to gff format """ offset = insertion['offset'] if type(insertion['orf']) is not str: return gff for orf in parse_fasta(insertion['orf'].split('|')): ID = orf[0].split('>')[1].split()[0] Start, End, strand = [int(i) for i in orf[0].split(' # ')[1:4]] if strand == 1: strand = '+' else: strand = '-' GeneStrand = insertion['strand'] if strand != GeneStrand: if strand == '+': strand = '-' else: strand = '+' Start, End = End - 2, Start - 2 Start, End = abs(Start + offset) - 1, abs(End + offset) - 1 annot = orf[0].split()[1] if annot == 'n/a': annot = 'unknown' gff['#seqname'].append(insertion['ID']) gff['source'].append('Prodigal and Pfam') gff['feature'].append('CDS') gff['start'].append(Start) gff['end'].append(End) gff['score'].append('.') gff['strand'].append(strand) gff['frame'].append('.') gff['attribute'].append('ID=%s; Name=%s' % (ID, annot)) return gff
parse ORF to gff format
Below is the the instruction that describes the task: ### Input: parse ORF to gff format ### Response: def parse_orf(insertion, gff): """ parse ORF to gff format """ offset = insertion['offset'] if type(insertion['orf']) is not str: return gff for orf in parse_fasta(insertion['orf'].split('|')): ID = orf[0].split('>')[1].split()[0] Start, End, strand = [int(i) for i in orf[0].split(' # ')[1:4]] if strand == 1: strand = '+' else: strand = '-' GeneStrand = insertion['strand'] if strand != GeneStrand: if strand == '+': strand = '-' else: strand = '+' Start, End = End - 2, Start - 2 Start, End = abs(Start + offset) - 1, abs(End + offset) - 1 annot = orf[0].split()[1] if annot == 'n/a': annot = 'unknown' gff['#seqname'].append(insertion['ID']) gff['source'].append('Prodigal and Pfam') gff['feature'].append('CDS') gff['start'].append(Start) gff['end'].append(End) gff['score'].append('.') gff['strand'].append(strand) gff['frame'].append('.') gff['attribute'].append('ID=%s; Name=%s' % (ID, annot)) return gff
def execute_async(self, command, callback=None): """ Executes command on remote hosts without blocking :type command: str :param command: command to be run on remote host :type callback: function :param callback: function to call when execution completes """ try: logger.debug(('{0}: execute async "{1}"' 'with callback {2}'.format(self.target_address, command, callback))) future = self.executor.submit(self.execute, command) if callback is not None: future.add_done_callback(callback) return future except (AuthenticationException, SSHException, ChannelException, SocketError) as ex: logger.critical(("{0} execution failed on {1} with exception:" "{2}".format(command, self.target_address, ex))) raise SSHCommandError(self.target_address, command, ex)
Executes command on remote hosts without blocking :type command: str :param command: command to be run on remote host :type callback: function :param callback: function to call when execution completes
Below is the the instruction that describes the task: ### Input: Executes command on remote hosts without blocking :type command: str :param command: command to be run on remote host :type callback: function :param callback: function to call when execution completes ### Response: def execute_async(self, command, callback=None): """ Executes command on remote hosts without blocking :type command: str :param command: command to be run on remote host :type callback: function :param callback: function to call when execution completes """ try: logger.debug(('{0}: execute async "{1}"' 'with callback {2}'.format(self.target_address, command, callback))) future = self.executor.submit(self.execute, command) if callback is not None: future.add_done_callback(callback) return future except (AuthenticationException, SSHException, ChannelException, SocketError) as ex: logger.critical(("{0} execution failed on {1} with exception:" "{2}".format(command, self.target_address, ex))) raise SSHCommandError(self.target_address, command, ex)
def remove_info_file(): """Remove the current process's TensorBoardInfo file, if it exists. If the file does not exist, no action is taken and no error is raised. """ try: os.unlink(_get_info_file_path()) except OSError as e: if e.errno == errno.ENOENT: # The user may have wiped their temporary directory or something. # Not a problem: we're already in the state that we want to be in. pass else: raise
Remove the current process's TensorBoardInfo file, if it exists. If the file does not exist, no action is taken and no error is raised.
Below is the the instruction that describes the task: ### Input: Remove the current process's TensorBoardInfo file, if it exists. If the file does not exist, no action is taken and no error is raised. ### Response: def remove_info_file(): """Remove the current process's TensorBoardInfo file, if it exists. If the file does not exist, no action is taken and no error is raised. """ try: os.unlink(_get_info_file_path()) except OSError as e: if e.errno == errno.ENOENT: # The user may have wiped their temporary directory or something. # Not a problem: we're already in the state that we want to be in. pass else: raise
def urljoin(base_url, url, allow_fragments=True): '''Join URLs like ``urllib.parse.urljoin`` but allow scheme-relative URL.''' if url.startswith('//') and len(url) > 2: scheme = base_url.partition(':')[0] if scheme: return urllib.parse.urljoin( base_url, '{0}:{1}'.format(scheme, url), allow_fragments=allow_fragments ) return urllib.parse.urljoin( base_url, url, allow_fragments=allow_fragments)
Join URLs like ``urllib.parse.urljoin`` but allow scheme-relative URL.
Below is the the instruction that describes the task: ### Input: Join URLs like ``urllib.parse.urljoin`` but allow scheme-relative URL. ### Response: def urljoin(base_url, url, allow_fragments=True): '''Join URLs like ``urllib.parse.urljoin`` but allow scheme-relative URL.''' if url.startswith('//') and len(url) > 2: scheme = base_url.partition(':')[0] if scheme: return urllib.parse.urljoin( base_url, '{0}:{1}'.format(scheme, url), allow_fragments=allow_fragments ) return urllib.parse.urljoin( base_url, url, allow_fragments=allow_fragments)
def WriteAPIAuditEntry(self, entry): """Writes an audit entry to the database.""" copy = entry.Copy() copy.timestamp = rdfvalue.RDFDatetime.Now() self.api_audit_entries.append(copy)
Writes an audit entry to the database.
Below is the the instruction that describes the task: ### Input: Writes an audit entry to the database. ### Response: def WriteAPIAuditEntry(self, entry): """Writes an audit entry to the database.""" copy = entry.Copy() copy.timestamp = rdfvalue.RDFDatetime.Now() self.api_audit_entries.append(copy)
def ensure(user, action, subject): """ Similar to ``can`` but will raise a AccessDenied Exception if does not have access""" ability = Ability(user, get_authorization_method()) if ability.cannot(action, subject): raise AccessDenied()
Similar to ``can`` but will raise a AccessDenied Exception if does not have access
Below is the the instruction that describes the task: ### Input: Similar to ``can`` but will raise a AccessDenied Exception if does not have access ### Response: def ensure(user, action, subject): """ Similar to ``can`` but will raise a AccessDenied Exception if does not have access""" ability = Ability(user, get_authorization_method()) if ability.cannot(action, subject): raise AccessDenied()
def validate_bool_kwarg(value, arg_name): """ Ensures that argument passed in arg_name is of type bool. """ if not (is_bool(value) or value is None): raise ValueError('For argument "{arg}" expected type bool, received ' 'type {typ}.'.format(arg=arg_name, typ=type(value).__name__)) return value
Ensures that argument passed in arg_name is of type bool.
Below is the the instruction that describes the task: ### Input: Ensures that argument passed in arg_name is of type bool. ### Response: def validate_bool_kwarg(value, arg_name): """ Ensures that argument passed in arg_name is of type bool. """ if not (is_bool(value) or value is None): raise ValueError('For argument "{arg}" expected type bool, received ' 'type {typ}.'.format(arg=arg_name, typ=type(value).__name__)) return value
def conc(self,H=70.,Om=0.3,overdens=200.,wrtcrit=False, ro=None,vo=None): """ NAME: conc PURPOSE: return the concentration INPUT: H= (default: 70) Hubble constant in km/s/Mpc Om= (default: 0.3) Omega matter overdens= (200) overdensity which defines the virial radius wrtcrit= (False) if True, the overdensity is wrt the critical density rather than the mean matter density ro= distance scale in kpc or as Quantity (default: object-wide, which if not set is 8 kpc)) vo= velocity scale in km/s or as Quantity (default: object-wide, which if not set is 220 km/s)) OUTPUT: concentration (scale/rvir) HISTORY: 2014-04-03 - Written - Bovy (IAS) """ if ro is None: ro= self._ro if vo is None: vo= self._vo try: return self.rvir(H=H,Om=Om,overdens=overdens,wrtcrit=wrtcrit, ro=ro,vo=vo,use_physical=False)/self._scale except AttributeError: raise AttributeError("This potential does not have a '_scale' defined to base the concentration on or does not support calculating the virial radius")
NAME: conc PURPOSE: return the concentration INPUT: H= (default: 70) Hubble constant in km/s/Mpc Om= (default: 0.3) Omega matter overdens= (200) overdensity which defines the virial radius wrtcrit= (False) if True, the overdensity is wrt the critical density rather than the mean matter density ro= distance scale in kpc or as Quantity (default: object-wide, which if not set is 8 kpc)) vo= velocity scale in km/s or as Quantity (default: object-wide, which if not set is 220 km/s)) OUTPUT: concentration (scale/rvir) HISTORY: 2014-04-03 - Written - Bovy (IAS)
Below is the the instruction that describes the task: ### Input: NAME: conc PURPOSE: return the concentration INPUT: H= (default: 70) Hubble constant in km/s/Mpc Om= (default: 0.3) Omega matter overdens= (200) overdensity which defines the virial radius wrtcrit= (False) if True, the overdensity is wrt the critical density rather than the mean matter density ro= distance scale in kpc or as Quantity (default: object-wide, which if not set is 8 kpc)) vo= velocity scale in km/s or as Quantity (default: object-wide, which if not set is 220 km/s)) OUTPUT: concentration (scale/rvir) HISTORY: 2014-04-03 - Written - Bovy (IAS) ### Response: def conc(self,H=70.,Om=0.3,overdens=200.,wrtcrit=False, ro=None,vo=None): """ NAME: conc PURPOSE: return the concentration INPUT: H= (default: 70) Hubble constant in km/s/Mpc Om= (default: 0.3) Omega matter overdens= (200) overdensity which defines the virial radius wrtcrit= (False) if True, the overdensity is wrt the critical density rather than the mean matter density ro= distance scale in kpc or as Quantity (default: object-wide, which if not set is 8 kpc)) vo= velocity scale in km/s or as Quantity (default: object-wide, which if not set is 220 km/s)) OUTPUT: concentration (scale/rvir) HISTORY: 2014-04-03 - Written - Bovy (IAS) """ if ro is None: ro= self._ro if vo is None: vo= self._vo try: return self.rvir(H=H,Om=Om,overdens=overdens,wrtcrit=wrtcrit, ro=ro,vo=vo,use_physical=False)/self._scale except AttributeError: raise AttributeError("This potential does not have a '_scale' defined to base the concentration on or does not support calculating the virial radius")
def read(filelines, mapping=None, wok=False): """Parse a ris lines and return a list of entries. Entries are codified as dictionaries whose keys are the different tags. For single line and singly occurring tags, the content is codified as a string. In the case of multiline or multiple key occurrences, the content is returned as a list of strings. Keyword arguments: bibliography_file -- ris filehandle mapping -- custom RIS tags mapping wok -- flag, Web of Knowledge format is used if True, otherwise Refman's RIS specifications are used. """ if wok: if not mapping: mapping = WOK_TAG_KEY_MAPPING return Wok(filelines, mapping).parse() else: if not mapping: mapping = TAG_KEY_MAPPING return Ris(filelines, mapping).parse()
Parse a ris lines and return a list of entries. Entries are codified as dictionaries whose keys are the different tags. For single line and singly occurring tags, the content is codified as a string. In the case of multiline or multiple key occurrences, the content is returned as a list of strings. Keyword arguments: bibliography_file -- ris filehandle mapping -- custom RIS tags mapping wok -- flag, Web of Knowledge format is used if True, otherwise Refman's RIS specifications are used.
Below is the the instruction that describes the task: ### Input: Parse a ris lines and return a list of entries. Entries are codified as dictionaries whose keys are the different tags. For single line and singly occurring tags, the content is codified as a string. In the case of multiline or multiple key occurrences, the content is returned as a list of strings. Keyword arguments: bibliography_file -- ris filehandle mapping -- custom RIS tags mapping wok -- flag, Web of Knowledge format is used if True, otherwise Refman's RIS specifications are used. ### Response: def read(filelines, mapping=None, wok=False): """Parse a ris lines and return a list of entries. Entries are codified as dictionaries whose keys are the different tags. For single line and singly occurring tags, the content is codified as a string. In the case of multiline or multiple key occurrences, the content is returned as a list of strings. Keyword arguments: bibliography_file -- ris filehandle mapping -- custom RIS tags mapping wok -- flag, Web of Knowledge format is used if True, otherwise Refman's RIS specifications are used. """ if wok: if not mapping: mapping = WOK_TAG_KEY_MAPPING return Wok(filelines, mapping).parse() else: if not mapping: mapping = TAG_KEY_MAPPING return Ris(filelines, mapping).parse()
def __convertRlocToRouterId(self, xRloc16): """mapping Rloc16 to router id Args: xRloc16: hex rloc16 short address Returns: actual router id allocated by leader """ routerList = [] routerList = self.__sendCommand('router list')[0].split() print routerList print xRloc16 for index in routerList: router = [] cmd = 'router %s' % index router = self.__sendCommand(cmd) for line in router: if 'Done' in line: break elif 'Router ID' in line: routerid = line.split()[2] elif 'Rloc' in line: rloc16 = line.split()[1] else: pass # process input rloc16 if isinstance(xRloc16, str): rloc16 = '0x' + rloc16 if rloc16 == xRloc16: return routerid elif isinstance(xRloc16, int): if int(rloc16, 16) == xRloc16: return routerid else: pass return None
mapping Rloc16 to router id Args: xRloc16: hex rloc16 short address Returns: actual router id allocated by leader
Below is the the instruction that describes the task: ### Input: mapping Rloc16 to router id Args: xRloc16: hex rloc16 short address Returns: actual router id allocated by leader ### Response: def __convertRlocToRouterId(self, xRloc16): """mapping Rloc16 to router id Args: xRloc16: hex rloc16 short address Returns: actual router id allocated by leader """ routerList = [] routerList = self.__sendCommand('router list')[0].split() print routerList print xRloc16 for index in routerList: router = [] cmd = 'router %s' % index router = self.__sendCommand(cmd) for line in router: if 'Done' in line: break elif 'Router ID' in line: routerid = line.split()[2] elif 'Rloc' in line: rloc16 = line.split()[1] else: pass # process input rloc16 if isinstance(xRloc16, str): rloc16 = '0x' + rloc16 if rloc16 == xRloc16: return routerid elif isinstance(xRloc16, int): if int(rloc16, 16) == xRloc16: return routerid else: pass return None
def _token_of(self, input): """ Returns the token type of the input. :param input: Input whose type is to be determined :return TOKENS: Token type of the input """ if isinstance(input, dict): # Intrinsic functions are always dicts if is_intrinsics(input): # Intrinsic functions are handled *exactly* like a primitive type because # they resolve to a primitive type when creating a stack with CloudFormation return self.TOKEN.PRIMITIVE else: return self.TOKEN.DICT elif isinstance(input, list): return self.TOKEN.LIST else: return self.TOKEN.PRIMITIVE
Returns the token type of the input. :param input: Input whose type is to be determined :return TOKENS: Token type of the input
Below is the the instruction that describes the task: ### Input: Returns the token type of the input. :param input: Input whose type is to be determined :return TOKENS: Token type of the input ### Response: def _token_of(self, input): """ Returns the token type of the input. :param input: Input whose type is to be determined :return TOKENS: Token type of the input """ if isinstance(input, dict): # Intrinsic functions are always dicts if is_intrinsics(input): # Intrinsic functions are handled *exactly* like a primitive type because # they resolve to a primitive type when creating a stack with CloudFormation return self.TOKEN.PRIMITIVE else: return self.TOKEN.DICT elif isinstance(input, list): return self.TOKEN.LIST else: return self.TOKEN.PRIMITIVE
def set_locked_variable(self, key, access_key, value): """Set an already locked global variable :param key: the key of the global variable to be set :param access_key: the access key to the already locked global variable :param value: the new value of the global variable """ return self.set_variable(key, value, per_reference=False, access_key=access_key)
Set an already locked global variable :param key: the key of the global variable to be set :param access_key: the access key to the already locked global variable :param value: the new value of the global variable
Below is the the instruction that describes the task: ### Input: Set an already locked global variable :param key: the key of the global variable to be set :param access_key: the access key to the already locked global variable :param value: the new value of the global variable ### Response: def set_locked_variable(self, key, access_key, value): """Set an already locked global variable :param key: the key of the global variable to be set :param access_key: the access key to the already locked global variable :param value: the new value of the global variable """ return self.set_variable(key, value, per_reference=False, access_key=access_key)
def _diagonalize(self): """Performs SVD on covariance matrices and save left, right singular vectors and values in the model. Parameters ---------- scaling : None or string, default=None Scaling to be applied to the VAMP modes upon transformation * None: no scaling will be applied, variance of the singular functions is 1 * 'kinetic map' or 'km': singular functions are scaled by singular value. Note that only the left singular functions induce a kinetic map. """ L0 = spd_inv_split(self.C00, epsilon=self.epsilon) self._rank0 = L0.shape[1] if L0.ndim == 2 else 1 Lt = spd_inv_split(self.Ctt, epsilon=self.epsilon) self._rankt = Lt.shape[1] if Lt.ndim == 2 else 1 W = np.dot(L0.T, self.C0t).dot(Lt) from scipy.linalg import svd A, s, BT = svd(W, compute_uv=True, lapack_driver='gesvd') self._singular_values = s # don't pass any values in the argument list that call _diagonalize again!!! m = VAMPModel._dimension(self._rank0, self._rankt, self.dim, self._singular_values) U = np.dot(L0, A[:, :m]) V = np.dot(Lt, BT[:m, :].T) # scale vectors if self.scaling is not None: U *= s[np.newaxis, 0:m] # scaled left singular functions induce a kinetic map V *= s[np.newaxis, 0:m] # scaled right singular functions induce a kinetic map wrt. backward propagator self._U = U self._V = V self._svd_performed = True
Performs SVD on covariance matrices and save left, right singular vectors and values in the model. Parameters ---------- scaling : None or string, default=None Scaling to be applied to the VAMP modes upon transformation * None: no scaling will be applied, variance of the singular functions is 1 * 'kinetic map' or 'km': singular functions are scaled by singular value. Note that only the left singular functions induce a kinetic map.
Below is the the instruction that describes the task: ### Input: Performs SVD on covariance matrices and save left, right singular vectors and values in the model. Parameters ---------- scaling : None or string, default=None Scaling to be applied to the VAMP modes upon transformation * None: no scaling will be applied, variance of the singular functions is 1 * 'kinetic map' or 'km': singular functions are scaled by singular value. Note that only the left singular functions induce a kinetic map. ### Response: def _diagonalize(self): """Performs SVD on covariance matrices and save left, right singular vectors and values in the model. Parameters ---------- scaling : None or string, default=None Scaling to be applied to the VAMP modes upon transformation * None: no scaling will be applied, variance of the singular functions is 1 * 'kinetic map' or 'km': singular functions are scaled by singular value. Note that only the left singular functions induce a kinetic map. """ L0 = spd_inv_split(self.C00, epsilon=self.epsilon) self._rank0 = L0.shape[1] if L0.ndim == 2 else 1 Lt = spd_inv_split(self.Ctt, epsilon=self.epsilon) self._rankt = Lt.shape[1] if Lt.ndim == 2 else 1 W = np.dot(L0.T, self.C0t).dot(Lt) from scipy.linalg import svd A, s, BT = svd(W, compute_uv=True, lapack_driver='gesvd') self._singular_values = s # don't pass any values in the argument list that call _diagonalize again!!! m = VAMPModel._dimension(self._rank0, self._rankt, self.dim, self._singular_values) U = np.dot(L0, A[:, :m]) V = np.dot(Lt, BT[:m, :].T) # scale vectors if self.scaling is not None: U *= s[np.newaxis, 0:m] # scaled left singular functions induce a kinetic map V *= s[np.newaxis, 0:m] # scaled right singular functions induce a kinetic map wrt. backward propagator self._U = U self._V = V self._svd_performed = True
def get_valid_configured_integrations(alert): """Return a list of integrations for alert filtered by alert_type. :returns: A list of relevant integrations """ if not configured_integrations: return [] # Collect all integrations that are configured for specific alert_type # or have no specific supported_event_types (i.e., all alert types) valid_configured_integrations = [ _ for _ in configured_integrations if _.integration.integration_type == IntegrationTypes.EVENT_OUTPUT.name and (not _.integration.supported_event_types or alert.alert_type in _.integration.supported_event_types) ] return valid_configured_integrations
Return a list of integrations for alert filtered by alert_type. :returns: A list of relevant integrations
Below is the the instruction that describes the task: ### Input: Return a list of integrations for alert filtered by alert_type. :returns: A list of relevant integrations ### Response: def get_valid_configured_integrations(alert): """Return a list of integrations for alert filtered by alert_type. :returns: A list of relevant integrations """ if not configured_integrations: return [] # Collect all integrations that are configured for specific alert_type # or have no specific supported_event_types (i.e., all alert types) valid_configured_integrations = [ _ for _ in configured_integrations if _.integration.integration_type == IntegrationTypes.EVENT_OUTPUT.name and (not _.integration.supported_event_types or alert.alert_type in _.integration.supported_event_types) ] return valid_configured_integrations
def _delete(self, pos, idx): """ Delete the item at the given (pos, idx). Combines lists that are less than half the load level. Updates the index when the sublist length is more than half the load level. This requires decrementing the nodes in a traversal from the leaf node to the root. For an example traversal see self._loc. """ _maxes, _lists, _keys, _index = self._maxes, self._lists, self._keys, self._index keys_pos = _keys[pos] lists_pos = _lists[pos] del keys_pos[idx] del lists_pos[idx] self._len -= 1 len_keys_pos = len(keys_pos) if len_keys_pos > self._half: _maxes[pos] = keys_pos[-1] if len(_index) > 0: child = self._offset + pos while child > 0: _index[child] -= 1 child = (child - 1) >> 1 _index[0] -= 1 elif len(_keys) > 1: if not pos: pos += 1 prev = pos - 1 _keys[prev].extend(_keys[pos]) _lists[prev].extend(_lists[pos]) _maxes[prev] = _keys[prev][-1] del _keys[pos] del _lists[pos] del _maxes[pos] del _index[:] self._expand(prev) elif len_keys_pos: _maxes[pos] = keys_pos[-1] else: del _keys[pos] del _lists[pos] del _maxes[pos] del _index[:]
Delete the item at the given (pos, idx). Combines lists that are less than half the load level. Updates the index when the sublist length is more than half the load level. This requires decrementing the nodes in a traversal from the leaf node to the root. For an example traversal see self._loc.
Below is the the instruction that describes the task: ### Input: Delete the item at the given (pos, idx). Combines lists that are less than half the load level. Updates the index when the sublist length is more than half the load level. This requires decrementing the nodes in a traversal from the leaf node to the root. For an example traversal see self._loc. ### Response: def _delete(self, pos, idx): """ Delete the item at the given (pos, idx). Combines lists that are less than half the load level. Updates the index when the sublist length is more than half the load level. This requires decrementing the nodes in a traversal from the leaf node to the root. For an example traversal see self._loc. """ _maxes, _lists, _keys, _index = self._maxes, self._lists, self._keys, self._index keys_pos = _keys[pos] lists_pos = _lists[pos] del keys_pos[idx] del lists_pos[idx] self._len -= 1 len_keys_pos = len(keys_pos) if len_keys_pos > self._half: _maxes[pos] = keys_pos[-1] if len(_index) > 0: child = self._offset + pos while child > 0: _index[child] -= 1 child = (child - 1) >> 1 _index[0] -= 1 elif len(_keys) > 1: if not pos: pos += 1 prev = pos - 1 _keys[prev].extend(_keys[pos]) _lists[prev].extend(_lists[pos]) _maxes[prev] = _keys[prev][-1] del _keys[pos] del _lists[pos] del _maxes[pos] del _index[:] self._expand(prev) elif len_keys_pos: _maxes[pos] = keys_pos[-1] else: del _keys[pos] del _lists[pos] del _maxes[pos] del _index[:]
def _prm_store_parameter_or_result(self, instance, store_data=pypetconstants.STORE_DATA, store_flags=None, overwrite=None, with_links=False, recursive=False, _hdf5_group=None, _newly_created=False, **kwargs): """Stores a parameter or result to hdf5. :param instance: The instance to be stored :param store_data: How to store data :param store_flags: Dictionary containing how to store individual data, usually empty. :param overwrite: Instructions how to overwrite data :param with_links: Placeholder because leaves have no links :param recursive: Placeholder, because leaves have no children :param _hdf5_group: The hdf5 group for storing the parameter or result :param _newly_created: If should be created in a new form """ if store_data == pypetconstants.STORE_NOTHING: return elif store_data == pypetconstants.STORE_DATA_SKIPPING and instance._stored: self._logger.debug('Already found `%s` on disk I will not store it!' % instance.v_full_name) return elif store_data == pypetconstants.OVERWRITE_DATA: if not overwrite: overwrite = True fullname = instance.v_full_name self._logger.debug('Storing `%s`.' % fullname) if _hdf5_group is None: # If no group is provided we might need to create one _hdf5_group, _newly_created = self._all_create_or_get_groups(fullname) # kwargs_flags = {} # Dictionary to change settings # old_kwargs = {} store_dict = {} # If the user did not supply storage flags, we need to set it to the empty dictionary if store_flags is None: store_flags = {} try: # Get the data to store from the instance if not instance.f_is_empty(): store_dict = instance._store() try: # Ask the instance for storage flags instance_flags = instance._store_flags().copy() # copy to avoid modifying the # original data except AttributeError: # If it does not provide any, set it to the empty dictionary instance_flags = {} # User specified flags have priority over the flags from the instance instance_flags.update(store_flags) store_flags = instance_flags # If we still have data in `store_dict` about which we do not know how to store # it, pick default storage flags self._prm_extract_missing_flags(store_dict, store_flags) if overwrite: if isinstance(overwrite, str): overwrite = [overwrite] if overwrite is True: to_delete = [key for key in store_dict.keys() if key in _hdf5_group] self._all_delete_parameter_or_result_or_group(instance, delete_only=to_delete, _hdf5_group=_hdf5_group) elif isinstance(overwrite, (list, tuple)): overwrite_set = set(overwrite) key_set = set(store_dict.keys()) stuff_not_to_be_overwritten = overwrite_set - key_set if overwrite!='v_annotations' and len(stuff_not_to_be_overwritten) > 0: self._logger.warning('Cannot overwrite `%s`, these items are not supposed to ' 'be stored by the leaf node.' % str(stuff_not_to_be_overwritten)) stuff_to_overwrite = overwrite_set & key_set if len(stuff_to_overwrite) > 0: self._all_delete_parameter_or_result_or_group(instance, delete_only=list( stuff_to_overwrite)) else: raise ValueError('Your value of overwrite `%s` is not understood. ' 'Please pass `True` of a list of strings to fine grain ' 'overwriting.' % str(overwrite)) self._prm_store_from_dict(fullname, store_dict, _hdf5_group, store_flags, kwargs) # Store annotations self._ann_store_annotations(instance, _hdf5_group, overwrite=overwrite) if _newly_created or overwrite is True: # If we created a new group or the parameter was extended we need to # update the meta information and summary tables self._prm_add_meta_info(instance, _hdf5_group, overwrite=not _newly_created) instance._stored = True #self._logger.debug('Finished Storing `%s`.' % fullname) # Signal completed node loading self._node_processing_timer.signal_update() except: # I anything fails, we want to remove the data of the parameter again self._logger.error( 'Failed storing leaf `%s`. I will remove the hdf5 data I added again.' % fullname) # Delete data for key in store_dict.keys(): if key in _hdf5_group: hdf5_child = _hdf5_group._f_get_child(key) hdf5_child._f_remove(recursive=True) # If no data left delete the whole parameter if _hdf5_group._v_nchildren == 0: _hdf5_group._f_remove(recursive=True) raise
Stores a parameter or result to hdf5. :param instance: The instance to be stored :param store_data: How to store data :param store_flags: Dictionary containing how to store individual data, usually empty. :param overwrite: Instructions how to overwrite data :param with_links: Placeholder because leaves have no links :param recursive: Placeholder, because leaves have no children :param _hdf5_group: The hdf5 group for storing the parameter or result :param _newly_created: If should be created in a new form
Below is the the instruction that describes the task: ### Input: Stores a parameter or result to hdf5. :param instance: The instance to be stored :param store_data: How to store data :param store_flags: Dictionary containing how to store individual data, usually empty. :param overwrite: Instructions how to overwrite data :param with_links: Placeholder because leaves have no links :param recursive: Placeholder, because leaves have no children :param _hdf5_group: The hdf5 group for storing the parameter or result :param _newly_created: If should be created in a new form ### Response: def _prm_store_parameter_or_result(self, instance, store_data=pypetconstants.STORE_DATA, store_flags=None, overwrite=None, with_links=False, recursive=False, _hdf5_group=None, _newly_created=False, **kwargs): """Stores a parameter or result to hdf5. :param instance: The instance to be stored :param store_data: How to store data :param store_flags: Dictionary containing how to store individual data, usually empty. :param overwrite: Instructions how to overwrite data :param with_links: Placeholder because leaves have no links :param recursive: Placeholder, because leaves have no children :param _hdf5_group: The hdf5 group for storing the parameter or result :param _newly_created: If should be created in a new form """ if store_data == pypetconstants.STORE_NOTHING: return elif store_data == pypetconstants.STORE_DATA_SKIPPING and instance._stored: self._logger.debug('Already found `%s` on disk I will not store it!' % instance.v_full_name) return elif store_data == pypetconstants.OVERWRITE_DATA: if not overwrite: overwrite = True fullname = instance.v_full_name self._logger.debug('Storing `%s`.' % fullname) if _hdf5_group is None: # If no group is provided we might need to create one _hdf5_group, _newly_created = self._all_create_or_get_groups(fullname) # kwargs_flags = {} # Dictionary to change settings # old_kwargs = {} store_dict = {} # If the user did not supply storage flags, we need to set it to the empty dictionary if store_flags is None: store_flags = {} try: # Get the data to store from the instance if not instance.f_is_empty(): store_dict = instance._store() try: # Ask the instance for storage flags instance_flags = instance._store_flags().copy() # copy to avoid modifying the # original data except AttributeError: # If it does not provide any, set it to the empty dictionary instance_flags = {} # User specified flags have priority over the flags from the instance instance_flags.update(store_flags) store_flags = instance_flags # If we still have data in `store_dict` about which we do not know how to store # it, pick default storage flags self._prm_extract_missing_flags(store_dict, store_flags) if overwrite: if isinstance(overwrite, str): overwrite = [overwrite] if overwrite is True: to_delete = [key for key in store_dict.keys() if key in _hdf5_group] self._all_delete_parameter_or_result_or_group(instance, delete_only=to_delete, _hdf5_group=_hdf5_group) elif isinstance(overwrite, (list, tuple)): overwrite_set = set(overwrite) key_set = set(store_dict.keys()) stuff_not_to_be_overwritten = overwrite_set - key_set if overwrite!='v_annotations' and len(stuff_not_to_be_overwritten) > 0: self._logger.warning('Cannot overwrite `%s`, these items are not supposed to ' 'be stored by the leaf node.' % str(stuff_not_to_be_overwritten)) stuff_to_overwrite = overwrite_set & key_set if len(stuff_to_overwrite) > 0: self._all_delete_parameter_or_result_or_group(instance, delete_only=list( stuff_to_overwrite)) else: raise ValueError('Your value of overwrite `%s` is not understood. ' 'Please pass `True` of a list of strings to fine grain ' 'overwriting.' % str(overwrite)) self._prm_store_from_dict(fullname, store_dict, _hdf5_group, store_flags, kwargs) # Store annotations self._ann_store_annotations(instance, _hdf5_group, overwrite=overwrite) if _newly_created or overwrite is True: # If we created a new group or the parameter was extended we need to # update the meta information and summary tables self._prm_add_meta_info(instance, _hdf5_group, overwrite=not _newly_created) instance._stored = True #self._logger.debug('Finished Storing `%s`.' % fullname) # Signal completed node loading self._node_processing_timer.signal_update() except: # I anything fails, we want to remove the data of the parameter again self._logger.error( 'Failed storing leaf `%s`. I will remove the hdf5 data I added again.' % fullname) # Delete data for key in store_dict.keys(): if key in _hdf5_group: hdf5_child = _hdf5_group._f_get_child(key) hdf5_child._f_remove(recursive=True) # If no data left delete the whole parameter if _hdf5_group._v_nchildren == 0: _hdf5_group._f_remove(recursive=True) raise
def NetMHCpan( alleles, program_name="netMHCpan", process_limit=-1, default_peptide_lengths=[9], extra_flags=[]): """ This function wraps NetMHCpan28 and NetMHCpan3 to automatically detect which class to use, with the help of the miraculous and strange '--version' netmhcpan argument. """ # convert to str since Python3 returns a `bytes` object. # The '_MHCTOOLS_VERSION_SNIFFING' here is meaningless, but it is necessary # to call `netmhcpan --version` with some argument, otherwise it hangs. with open(os.devnull, 'w') as devnull: output = check_output([ program_name, "--version", "_MHCTOOLS_VERSION_SNIFFING"], stderr=devnull) output_str = output.decode("ascii", "ignore") common_kwargs = { "alleles": alleles, "default_peptide_lengths": default_peptide_lengths, "program_name": program_name, "process_limit": process_limit, "extra_flags": extra_flags, } if "NetMHCpan version 2.8" in output_str: return NetMHCpan28(**common_kwargs) elif "NetMHCpan version 3.0" in output_str: return NetMHCpan3(**common_kwargs) elif "NetMHCpan version 4.0" in output_str: return NetMHCpan4(**common_kwargs) else: raise RuntimeError( "This software expects NetMHCpan version 2.8, 3.0, or 4.0")
This function wraps NetMHCpan28 and NetMHCpan3 to automatically detect which class to use, with the help of the miraculous and strange '--version' netmhcpan argument.
Below is the the instruction that describes the task: ### Input: This function wraps NetMHCpan28 and NetMHCpan3 to automatically detect which class to use, with the help of the miraculous and strange '--version' netmhcpan argument. ### Response: def NetMHCpan( alleles, program_name="netMHCpan", process_limit=-1, default_peptide_lengths=[9], extra_flags=[]): """ This function wraps NetMHCpan28 and NetMHCpan3 to automatically detect which class to use, with the help of the miraculous and strange '--version' netmhcpan argument. """ # convert to str since Python3 returns a `bytes` object. # The '_MHCTOOLS_VERSION_SNIFFING' here is meaningless, but it is necessary # to call `netmhcpan --version` with some argument, otherwise it hangs. with open(os.devnull, 'w') as devnull: output = check_output([ program_name, "--version", "_MHCTOOLS_VERSION_SNIFFING"], stderr=devnull) output_str = output.decode("ascii", "ignore") common_kwargs = { "alleles": alleles, "default_peptide_lengths": default_peptide_lengths, "program_name": program_name, "process_limit": process_limit, "extra_flags": extra_flags, } if "NetMHCpan version 2.8" in output_str: return NetMHCpan28(**common_kwargs) elif "NetMHCpan version 3.0" in output_str: return NetMHCpan3(**common_kwargs) elif "NetMHCpan version 4.0" in output_str: return NetMHCpan4(**common_kwargs) else: raise RuntimeError( "This software expects NetMHCpan version 2.8, 3.0, or 4.0")
def get_parent_path(brain_or_object): """Calculate the physical parent path of this object :param brain_or_object: A single catalog brain or content object :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain :returns: Physical path of the parent object :rtype: string """ if is_portal(brain_or_object): return get_path(get_portal()) if is_brain(brain_or_object): path = get_path(brain_or_object) return path.rpartition("/")[0] return get_path(get_object(brain_or_object).aq_parent)
Calculate the physical parent path of this object :param brain_or_object: A single catalog brain or content object :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain :returns: Physical path of the parent object :rtype: string
Below is the the instruction that describes the task: ### Input: Calculate the physical parent path of this object :param brain_or_object: A single catalog brain or content object :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain :returns: Physical path of the parent object :rtype: string ### Response: def get_parent_path(brain_or_object): """Calculate the physical parent path of this object :param brain_or_object: A single catalog brain or content object :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain :returns: Physical path of the parent object :rtype: string """ if is_portal(brain_or_object): return get_path(get_portal()) if is_brain(brain_or_object): path = get_path(brain_or_object) return path.rpartition("/")[0] return get_path(get_object(brain_or_object).aq_parent)
def wait_for_unicode_char(self, ignored_chars=None, timeout=0): """Returns a str that contains the single character that was pressed. This already respects modifier keys and keyboard layouts. If timeout is not none and no key is pressed within the specified timeout, None is returned. If a key is ingnored_chars it will be ignored. As argument for irgnored_chars any object that has a __contains__ method can be used, e.g. a string, a set, a list, etc""" return self.listen_until_return(Handler.unicode_char(ignored_chars), timeout=timeout)
Returns a str that contains the single character that was pressed. This already respects modifier keys and keyboard layouts. If timeout is not none and no key is pressed within the specified timeout, None is returned. If a key is ingnored_chars it will be ignored. As argument for irgnored_chars any object that has a __contains__ method can be used, e.g. a string, a set, a list, etc
Below is the the instruction that describes the task: ### Input: Returns a str that contains the single character that was pressed. This already respects modifier keys and keyboard layouts. If timeout is not none and no key is pressed within the specified timeout, None is returned. If a key is ingnored_chars it will be ignored. As argument for irgnored_chars any object that has a __contains__ method can be used, e.g. a string, a set, a list, etc ### Response: def wait_for_unicode_char(self, ignored_chars=None, timeout=0): """Returns a str that contains the single character that was pressed. This already respects modifier keys and keyboard layouts. If timeout is not none and no key is pressed within the specified timeout, None is returned. If a key is ingnored_chars it will be ignored. As argument for irgnored_chars any object that has a __contains__ method can be used, e.g. a string, a set, a list, etc""" return self.listen_until_return(Handler.unicode_char(ignored_chars), timeout=timeout)
def _check_view_permission(self, view): """ :param view: a :class:`ObjectView` class or instance """ security = get_service("security") return security.has_permission(current_user, view.permission, self.obj)
:param view: a :class:`ObjectView` class or instance
Below is the the instruction that describes the task: ### Input: :param view: a :class:`ObjectView` class or instance ### Response: def _check_view_permission(self, view): """ :param view: a :class:`ObjectView` class or instance """ security = get_service("security") return security.has_permission(current_user, view.permission, self.obj)
def _adjust_inferential_results_for_parameter_constraints(self, constraints): """ Ensure that parameters that were constrained during estimation do not have any values showed for inferential results. After all, no inference was performed. Parameters ---------- constraints : list of ints, or None. If list, should contain the positions in the array of all estimated parameters that were constrained to their initial values. Returns ------- None. """ if constraints is not None: # Ensure the model object has inferential results inferential_attributes = ["standard_errors", "tvalues", "pvalues", "robust_std_errs", "robust_t_stats", "robust_p_vals"] assert all([hasattr(self, x) for x in inferential_attributes]) assert hasattr(self, "params") all_names = self.params.index.tolist() for series in [getattr(self, x) for x in inferential_attributes]: for pos in constraints: series.loc[all_names[pos]] = np.nan return None
Ensure that parameters that were constrained during estimation do not have any values showed for inferential results. After all, no inference was performed. Parameters ---------- constraints : list of ints, or None. If list, should contain the positions in the array of all estimated parameters that were constrained to their initial values. Returns ------- None.
Below is the the instruction that describes the task: ### Input: Ensure that parameters that were constrained during estimation do not have any values showed for inferential results. After all, no inference was performed. Parameters ---------- constraints : list of ints, or None. If list, should contain the positions in the array of all estimated parameters that were constrained to their initial values. Returns ------- None. ### Response: def _adjust_inferential_results_for_parameter_constraints(self, constraints): """ Ensure that parameters that were constrained during estimation do not have any values showed for inferential results. After all, no inference was performed. Parameters ---------- constraints : list of ints, or None. If list, should contain the positions in the array of all estimated parameters that were constrained to their initial values. Returns ------- None. """ if constraints is not None: # Ensure the model object has inferential results inferential_attributes = ["standard_errors", "tvalues", "pvalues", "robust_std_errs", "robust_t_stats", "robust_p_vals"] assert all([hasattr(self, x) for x in inferential_attributes]) assert hasattr(self, "params") all_names = self.params.index.tolist() for series in [getattr(self, x) for x in inferential_attributes]: for pos in constraints: series.loc[all_names[pos]] = np.nan return None
def on_edit_grid(self, event): """sets self.changes to true when user edits the grid. provides down and up key functionality for exiting the editor""" if not self.changes: self.changes = {event.Row} else: self.changes.add(event.Row) #self.changes = True try: editor = event.GetControl() editor.Bind(wx.EVT_KEY_DOWN, self.onEditorKey) except AttributeError: # if it's a EVT_GRID_EDITOR_SHOWN, it doesn't have the GetControl method pass
sets self.changes to true when user edits the grid. provides down and up key functionality for exiting the editor
Below is the the instruction that describes the task: ### Input: sets self.changes to true when user edits the grid. provides down and up key functionality for exiting the editor ### Response: def on_edit_grid(self, event): """sets self.changes to true when user edits the grid. provides down and up key functionality for exiting the editor""" if not self.changes: self.changes = {event.Row} else: self.changes.add(event.Row) #self.changes = True try: editor = event.GetControl() editor.Bind(wx.EVT_KEY_DOWN, self.onEditorKey) except AttributeError: # if it's a EVT_GRID_EDITOR_SHOWN, it doesn't have the GetControl method pass
def create_for_caught_error(parser: _BaseParserDeclarationForRegistries, desired_type: Type[T], obj: PersistedObject, caught: Exception, options: Dict[str, Dict[str, Any]]): """ Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests https://github.com/nose-devs/nose/issues/725 :param parser: :param desired_type: :param obj: :param caught: :param options: :return: """ try: typ = get_pretty_type_str(desired_type) except: typ = str(desired_type) e = ParsingException('Error while parsing ' + str(obj) + ' as a ' + typ + ' with parser \'' + str(parser) + '\' using options=(' + str(options) + ') : caught \n ' + str(caught.__class__.__name__) + ' : ' + str(caught))\ .with_traceback(caught.__traceback__) # 'from e' was hiding the inner traceback. This is much better for debug e.__cause__ = None # e.__cause__ = caught # store the exception still, to be able to handle it later e.caught = caught return e
Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests https://github.com/nose-devs/nose/issues/725 :param parser: :param desired_type: :param obj: :param caught: :param options: :return:
Below is the the instruction that describes the task: ### Input: Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests https://github.com/nose-devs/nose/issues/725 :param parser: :param desired_type: :param obj: :param caught: :param options: :return: ### Response: def create_for_caught_error(parser: _BaseParserDeclarationForRegistries, desired_type: Type[T], obj: PersistedObject, caught: Exception, options: Dict[str, Dict[str, Any]]): """ Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests https://github.com/nose-devs/nose/issues/725 :param parser: :param desired_type: :param obj: :param caught: :param options: :return: """ try: typ = get_pretty_type_str(desired_type) except: typ = str(desired_type) e = ParsingException('Error while parsing ' + str(obj) + ' as a ' + typ + ' with parser \'' + str(parser) + '\' using options=(' + str(options) + ') : caught \n ' + str(caught.__class__.__name__) + ' : ' + str(caught))\ .with_traceback(caught.__traceback__) # 'from e' was hiding the inner traceback. This is much better for debug e.__cause__ = None # e.__cause__ = caught # store the exception still, to be able to handle it later e.caught = caught return e
def unregisterGeneralHandler(self, handler): """注销通用事件处理函数监听""" if handler in self.__generalHandlers: self.__generalHandlers.remove(handler)
注销通用事件处理函数监听
Below is the the instruction that describes the task: ### Input: 注销通用事件处理函数监听 ### Response: def unregisterGeneralHandler(self, handler): """注销通用事件处理函数监听""" if handler in self.__generalHandlers: self.__generalHandlers.remove(handler)
def env(key, default): """ Helper to try to get a setting from the environment, or pyconfig, or finally use a provided default. """ value = os.environ.get(key, None) if value is not None: log.info(' %s = %r', key.lower().replace('_', '.'), value) return value key = key.lower().replace('_', '.') value = get(key) if value is not None: return value return default
Helper to try to get a setting from the environment, or pyconfig, or finally use a provided default.
Below is the the instruction that describes the task: ### Input: Helper to try to get a setting from the environment, or pyconfig, or finally use a provided default. ### Response: def env(key, default): """ Helper to try to get a setting from the environment, or pyconfig, or finally use a provided default. """ value = os.environ.get(key, None) if value is not None: log.info(' %s = %r', key.lower().replace('_', '.'), value) return value key = key.lower().replace('_', '.') value = get(key) if value is not None: return value return default
def bovy_hist(x,xlabel=None,ylabel=None,overplot=False,**kwargs): """ NAME: bovy_hist PURPOSE: wrapper around matplotlib's hist function INPUT: x - array to histogram xlabel - (raw string!) x-axis label, LaTeX math mode, no $s needed ylabel - (raw string!) y-axis label, LaTeX math mode, no $s needed yrange - set the y-axis range +all pyplot.hist keywords OUTPUT: (from the matplotlib docs: http://matplotlib.sourceforge.net/api/pyplot_api.html#matplotlib.pyplot.hist) The return value is a tuple (n, bins, patches) or ([n0, n1, ...], bins, [patches0, patches1,...]) if the input contains multiple data HISTORY: 2009-12-23 - Written - Bovy (NYU) """ if not overplot: pyplot.figure() if 'xrange' in kwargs: xlimits= kwargs.pop('xrange') if not 'range' in kwargs: kwargs['range']= xlimits xrangeSet= True else: xrangeSet= False if 'yrange' in kwargs: ylimits= kwargs.pop('yrange') yrangeSet= True else: yrangeSet= False out= pyplot.hist(x,**kwargs) if overplot: return out _add_axislabels(xlabel,ylabel) if not 'range' in kwargs and not xrangeSet: if isinstance(x,list): xlimits=(sc.array(x).min(),sc.array(x).max()) else: pyplot.xlim(x.min(),x.max()) elif xrangeSet: pyplot.xlim(xlimits) else: pyplot.xlim(kwargs['range']) if yrangeSet: pyplot.ylim(ylimits) _add_ticks() return out
NAME: bovy_hist PURPOSE: wrapper around matplotlib's hist function INPUT: x - array to histogram xlabel - (raw string!) x-axis label, LaTeX math mode, no $s needed ylabel - (raw string!) y-axis label, LaTeX math mode, no $s needed yrange - set the y-axis range +all pyplot.hist keywords OUTPUT: (from the matplotlib docs: http://matplotlib.sourceforge.net/api/pyplot_api.html#matplotlib.pyplot.hist) The return value is a tuple (n, bins, patches) or ([n0, n1, ...], bins, [patches0, patches1,...]) if the input contains multiple data HISTORY: 2009-12-23 - Written - Bovy (NYU)
Below is the the instruction that describes the task: ### Input: NAME: bovy_hist PURPOSE: wrapper around matplotlib's hist function INPUT: x - array to histogram xlabel - (raw string!) x-axis label, LaTeX math mode, no $s needed ylabel - (raw string!) y-axis label, LaTeX math mode, no $s needed yrange - set the y-axis range +all pyplot.hist keywords OUTPUT: (from the matplotlib docs: http://matplotlib.sourceforge.net/api/pyplot_api.html#matplotlib.pyplot.hist) The return value is a tuple (n, bins, patches) or ([n0, n1, ...], bins, [patches0, patches1,...]) if the input contains multiple data HISTORY: 2009-12-23 - Written - Bovy (NYU) ### Response: def bovy_hist(x,xlabel=None,ylabel=None,overplot=False,**kwargs): """ NAME: bovy_hist PURPOSE: wrapper around matplotlib's hist function INPUT: x - array to histogram xlabel - (raw string!) x-axis label, LaTeX math mode, no $s needed ylabel - (raw string!) y-axis label, LaTeX math mode, no $s needed yrange - set the y-axis range +all pyplot.hist keywords OUTPUT: (from the matplotlib docs: http://matplotlib.sourceforge.net/api/pyplot_api.html#matplotlib.pyplot.hist) The return value is a tuple (n, bins, patches) or ([n0, n1, ...], bins, [patches0, patches1,...]) if the input contains multiple data HISTORY: 2009-12-23 - Written - Bovy (NYU) """ if not overplot: pyplot.figure() if 'xrange' in kwargs: xlimits= kwargs.pop('xrange') if not 'range' in kwargs: kwargs['range']= xlimits xrangeSet= True else: xrangeSet= False if 'yrange' in kwargs: ylimits= kwargs.pop('yrange') yrangeSet= True else: yrangeSet= False out= pyplot.hist(x,**kwargs) if overplot: return out _add_axislabels(xlabel,ylabel) if not 'range' in kwargs and not xrangeSet: if isinstance(x,list): xlimits=(sc.array(x).min(),sc.array(x).max()) else: pyplot.xlim(x.min(),x.max()) elif xrangeSet: pyplot.xlim(xlimits) else: pyplot.xlim(kwargs['range']) if yrangeSet: pyplot.ylim(ylimits) _add_ticks() return out
def _get_version(ctx, _, value): """Click callback for option to show current ZSL version.""" if not value or ctx.resilient_parsing: return message = 'Zsl %(version)s\nPython %(python_version)s' click.echo(message % { 'version': version, 'python_version': sys.version, }, color=ctx.color) ctx.exit()
Click callback for option to show current ZSL version.
Below is the the instruction that describes the task: ### Input: Click callback for option to show current ZSL version. ### Response: def _get_version(ctx, _, value): """Click callback for option to show current ZSL version.""" if not value or ctx.resilient_parsing: return message = 'Zsl %(version)s\nPython %(python_version)s' click.echo(message % { 'version': version, 'python_version': sys.version, }, color=ctx.color) ctx.exit()
def south_field_triple(self): "Returns a suitable description of this field for South." args, kwargs = introspector(self) kwargs.update({ 'populate_from': 'None' if callable(self.populate_from) else repr(self.populate_from), 'unique_with': repr(self.unique_with) }) return ('autoslug.fields.AutoSlugField', args, kwargs)
Returns a suitable description of this field for South.
Below is the the instruction that describes the task: ### Input: Returns a suitable description of this field for South. ### Response: def south_field_triple(self): "Returns a suitable description of this field for South." args, kwargs = introspector(self) kwargs.update({ 'populate_from': 'None' if callable(self.populate_from) else repr(self.populate_from), 'unique_with': repr(self.unique_with) }) return ('autoslug.fields.AutoSlugField', args, kwargs)
def enrich(self, gmt): """use local mode p = p-value computed using the Fisher exact test (Hypergeometric test) Not implemented here: combine score = log(p)·z see here: http://amp.pharm.mssm.edu/Enrichr/help#background&q=4 columns contain: Term Overlap P-value Adjusted_P-value Genes """ if isscalar(self.background): if isinstance(self.background, int) or self.background.isdigit(): self._bg = int(self.background) elif isinstance(self.background, str): # self.background = set(reduce(lambda x,y: x+y, gmt.values(),[])) self._bg = self.get_background() self._logger.info("Background: found %s genes"%(len(self._bg))) else: raise Exception("Unsupported background data type") else: # handle array object: nd.array, list, tuple, set, Series try: it = iter(self.background) self._bg = set(self.background) except TypeError: self._logger.error("Unsupported background data type") # statistical testing hgtest = list(calc_pvalues(query=self._gls, gene_sets=gmt, background=self._bg)) if len(hgtest) > 0: terms, pvals, olsz, gsetsz, genes = hgtest fdrs, rej = multiple_testing_correction(ps = pvals, alpha=self.cutoff, method='benjamini-hochberg') # save to a dataframe odict = OrderedDict() odict['Term'] = terms odict['Overlap'] = list(map(lambda h,g: "%s/%s"%(h, g), olsz, gsetsz)) odict['P-value'] = pvals odict['Adjusted P-value'] = fdrs # odict['Reject (FDR< %s)'%self.cutoff ] = rej odict['Genes'] = [";".join(g) for g in genes] res = pd.DataFrame(odict) return res return
use local mode p = p-value computed using the Fisher exact test (Hypergeometric test) Not implemented here: combine score = log(p)·z see here: http://amp.pharm.mssm.edu/Enrichr/help#background&q=4 columns contain: Term Overlap P-value Adjusted_P-value Genes
Below is the the instruction that describes the task: ### Input: use local mode p = p-value computed using the Fisher exact test (Hypergeometric test) Not implemented here: combine score = log(p)·z see here: http://amp.pharm.mssm.edu/Enrichr/help#background&q=4 columns contain: Term Overlap P-value Adjusted_P-value Genes ### Response: def enrich(self, gmt): """use local mode p = p-value computed using the Fisher exact test (Hypergeometric test) Not implemented here: combine score = log(p)·z see here: http://amp.pharm.mssm.edu/Enrichr/help#background&q=4 columns contain: Term Overlap P-value Adjusted_P-value Genes """ if isscalar(self.background): if isinstance(self.background, int) or self.background.isdigit(): self._bg = int(self.background) elif isinstance(self.background, str): # self.background = set(reduce(lambda x,y: x+y, gmt.values(),[])) self._bg = self.get_background() self._logger.info("Background: found %s genes"%(len(self._bg))) else: raise Exception("Unsupported background data type") else: # handle array object: nd.array, list, tuple, set, Series try: it = iter(self.background) self._bg = set(self.background) except TypeError: self._logger.error("Unsupported background data type") # statistical testing hgtest = list(calc_pvalues(query=self._gls, gene_sets=gmt, background=self._bg)) if len(hgtest) > 0: terms, pvals, olsz, gsetsz, genes = hgtest fdrs, rej = multiple_testing_correction(ps = pvals, alpha=self.cutoff, method='benjamini-hochberg') # save to a dataframe odict = OrderedDict() odict['Term'] = terms odict['Overlap'] = list(map(lambda h,g: "%s/%s"%(h, g), olsz, gsetsz)) odict['P-value'] = pvals odict['Adjusted P-value'] = fdrs # odict['Reject (FDR< %s)'%self.cutoff ] = rej odict['Genes'] = [";".join(g) for g in genes] res = pd.DataFrame(odict) return res return
def builder(name, **builder_init_kwargs): """Fetches a `tfds.core.DatasetBuilder` by string name. Args: name: `str`, the registered name of the `DatasetBuilder` (the snake case version of the class name). This can be either `"dataset_name"` or `"dataset_name/config_name"` for datasets with `BuilderConfig`s. As a convenience, this string may contain comma-separated keyword arguments for the builder. For example `"foo_bar/a=True,b=3"` would use the `FooBar` dataset passing the keyword arguments `a=True` and `b=3` (for builders with configs, it would be `"foo_bar/zoo/a=True,b=3"` to use the `"zoo"` config and pass to the builder keyword arguments `a=True` and `b=3`). **builder_init_kwargs: `dict` of keyword arguments passed to the `DatasetBuilder`. These will override keyword arguments passed in `name`, if any. Returns: A `tfds.core.DatasetBuilder`. Raises: DatasetNotFoundError: if `name` is unrecognized. """ name, builder_kwargs = _dataset_name_and_kwargs_from_name_str(name) builder_kwargs.update(builder_init_kwargs) if name in _ABSTRACT_DATASET_REGISTRY: raise DatasetNotFoundError(name, is_abstract=True) if name in _IN_DEVELOPMENT_REGISTRY: raise DatasetNotFoundError(name, in_development=True) if name not in _DATASET_REGISTRY: raise DatasetNotFoundError(name) try: return _DATASET_REGISTRY[name](**builder_kwargs) except BaseException: logging.error("Failed to construct dataset %s", name) raise
Fetches a `tfds.core.DatasetBuilder` by string name. Args: name: `str`, the registered name of the `DatasetBuilder` (the snake case version of the class name). This can be either `"dataset_name"` or `"dataset_name/config_name"` for datasets with `BuilderConfig`s. As a convenience, this string may contain comma-separated keyword arguments for the builder. For example `"foo_bar/a=True,b=3"` would use the `FooBar` dataset passing the keyword arguments `a=True` and `b=3` (for builders with configs, it would be `"foo_bar/zoo/a=True,b=3"` to use the `"zoo"` config and pass to the builder keyword arguments `a=True` and `b=3`). **builder_init_kwargs: `dict` of keyword arguments passed to the `DatasetBuilder`. These will override keyword arguments passed in `name`, if any. Returns: A `tfds.core.DatasetBuilder`. Raises: DatasetNotFoundError: if `name` is unrecognized.
Below is the the instruction that describes the task: ### Input: Fetches a `tfds.core.DatasetBuilder` by string name. Args: name: `str`, the registered name of the `DatasetBuilder` (the snake case version of the class name). This can be either `"dataset_name"` or `"dataset_name/config_name"` for datasets with `BuilderConfig`s. As a convenience, this string may contain comma-separated keyword arguments for the builder. For example `"foo_bar/a=True,b=3"` would use the `FooBar` dataset passing the keyword arguments `a=True` and `b=3` (for builders with configs, it would be `"foo_bar/zoo/a=True,b=3"` to use the `"zoo"` config and pass to the builder keyword arguments `a=True` and `b=3`). **builder_init_kwargs: `dict` of keyword arguments passed to the `DatasetBuilder`. These will override keyword arguments passed in `name`, if any. Returns: A `tfds.core.DatasetBuilder`. Raises: DatasetNotFoundError: if `name` is unrecognized. ### Response: def builder(name, **builder_init_kwargs): """Fetches a `tfds.core.DatasetBuilder` by string name. Args: name: `str`, the registered name of the `DatasetBuilder` (the snake case version of the class name). This can be either `"dataset_name"` or `"dataset_name/config_name"` for datasets with `BuilderConfig`s. As a convenience, this string may contain comma-separated keyword arguments for the builder. For example `"foo_bar/a=True,b=3"` would use the `FooBar` dataset passing the keyword arguments `a=True` and `b=3` (for builders with configs, it would be `"foo_bar/zoo/a=True,b=3"` to use the `"zoo"` config and pass to the builder keyword arguments `a=True` and `b=3`). **builder_init_kwargs: `dict` of keyword arguments passed to the `DatasetBuilder`. These will override keyword arguments passed in `name`, if any. Returns: A `tfds.core.DatasetBuilder`. Raises: DatasetNotFoundError: if `name` is unrecognized. """ name, builder_kwargs = _dataset_name_and_kwargs_from_name_str(name) builder_kwargs.update(builder_init_kwargs) if name in _ABSTRACT_DATASET_REGISTRY: raise DatasetNotFoundError(name, is_abstract=True) if name in _IN_DEVELOPMENT_REGISTRY: raise DatasetNotFoundError(name, in_development=True) if name not in _DATASET_REGISTRY: raise DatasetNotFoundError(name) try: return _DATASET_REGISTRY[name](**builder_kwargs) except BaseException: logging.error("Failed to construct dataset %s", name) raise
def timezone_from_str(tz_str): """ Convert a timezone string to a timezone object. :param tz_str: string with format 'Asia/Shanghai' or 'UTC±[hh]:[mm]' :return: a timezone object (tzinfo) """ m = re.match(r'UTC([+|-]\d{1,2}):(\d{2})', tz_str) if m: # in format 'UTC±[hh]:[mm]' delta_h = int(m.group(1)) delta_m = int(m.group(2)) if delta_h >= 0 else -int(m.group(2)) return timezone(timedelta(hours=delta_h, minutes=delta_m)) # in format 'Asia/Shanghai' try: return pytz.timezone(tz_str) except pytz.exceptions.UnknownTimeZoneError: return None
Convert a timezone string to a timezone object. :param tz_str: string with format 'Asia/Shanghai' or 'UTC±[hh]:[mm]' :return: a timezone object (tzinfo)
Below is the the instruction that describes the task: ### Input: Convert a timezone string to a timezone object. :param tz_str: string with format 'Asia/Shanghai' or 'UTC±[hh]:[mm]' :return: a timezone object (tzinfo) ### Response: def timezone_from_str(tz_str): """ Convert a timezone string to a timezone object. :param tz_str: string with format 'Asia/Shanghai' or 'UTC±[hh]:[mm]' :return: a timezone object (tzinfo) """ m = re.match(r'UTC([+|-]\d{1,2}):(\d{2})', tz_str) if m: # in format 'UTC±[hh]:[mm]' delta_h = int(m.group(1)) delta_m = int(m.group(2)) if delta_h >= 0 else -int(m.group(2)) return timezone(timedelta(hours=delta_h, minutes=delta_m)) # in format 'Asia/Shanghai' try: return pytz.timezone(tz_str) except pytz.exceptions.UnknownTimeZoneError: return None
def get_user_by_email(server_context, email): """ Get the user with the provided email. Throws a ValueError if not found. :param server_context: A LabKey server context. See utils.create_server_context. :param email: :return: """ url = server_context.build_url(user_controller, 'getUsers.api') payload = dict(includeDeactivatedAccounts=True) result = server_context.make_request(url, payload) if result is None or result['users'] is None: raise ValueError("No Users in container" + email) for user in result['users']: if user['email'] == email: return user else: raise ValueError("User not found: " + email)
Get the user with the provided email. Throws a ValueError if not found. :param server_context: A LabKey server context. See utils.create_server_context. :param email: :return:
Below is the the instruction that describes the task: ### Input: Get the user with the provided email. Throws a ValueError if not found. :param server_context: A LabKey server context. See utils.create_server_context. :param email: :return: ### Response: def get_user_by_email(server_context, email): """ Get the user with the provided email. Throws a ValueError if not found. :param server_context: A LabKey server context. See utils.create_server_context. :param email: :return: """ url = server_context.build_url(user_controller, 'getUsers.api') payload = dict(includeDeactivatedAccounts=True) result = server_context.make_request(url, payload) if result is None or result['users'] is None: raise ValueError("No Users in container" + email) for user in result['users']: if user['email'] == email: return user else: raise ValueError("User not found: " + email)
def merge(cls, source_blocks): """Merge multiple SourceBlocks together""" if len(source_blocks) == 1: return source_blocks[0] source_blocks.sort(key=operator.attrgetter('start_line_number')) main_block = source_blocks[0] boot_lines = main_block.boot_lines source_lines = [source_line for source_block in source_blocks for source_line in source_block.source_lines] return cls(boot_lines, source_lines, directive=main_block.directive, language=main_block.language, roles=main_block.roles)
Merge multiple SourceBlocks together
Below is the the instruction that describes the task: ### Input: Merge multiple SourceBlocks together ### Response: def merge(cls, source_blocks): """Merge multiple SourceBlocks together""" if len(source_blocks) == 1: return source_blocks[0] source_blocks.sort(key=operator.attrgetter('start_line_number')) main_block = source_blocks[0] boot_lines = main_block.boot_lines source_lines = [source_line for source_block in source_blocks for source_line in source_block.source_lines] return cls(boot_lines, source_lines, directive=main_block.directive, language=main_block.language, roles=main_block.roles)
def auto_newline(buffer): r""" Insert \n at the cursor position. Also add necessary padding. """ insert_text = buffer.insert_text if buffer.document.current_line_after_cursor: # When we are in the middle of a line. Always insert a newline. insert_text('\n') else: # Go to new line, but also add indentation. current_line = buffer.document.current_line_before_cursor.rstrip() insert_text('\n') # Unident if the last line ends with 'pass', remove four spaces. unindent = current_line.rstrip().endswith(' pass') # Copy whitespace from current line current_line2 = current_line[4:] if unindent else current_line for c in current_line2: if c.isspace(): insert_text(c) else: break # If the last line ends with a colon, add four extra spaces. if current_line[-1:] == ':': for x in range(4): insert_text(' ')
r""" Insert \n at the cursor position. Also add necessary padding.
Below is the the instruction that describes the task: ### Input: r""" Insert \n at the cursor position. Also add necessary padding. ### Response: def auto_newline(buffer): r""" Insert \n at the cursor position. Also add necessary padding. """ insert_text = buffer.insert_text if buffer.document.current_line_after_cursor: # When we are in the middle of a line. Always insert a newline. insert_text('\n') else: # Go to new line, but also add indentation. current_line = buffer.document.current_line_before_cursor.rstrip() insert_text('\n') # Unident if the last line ends with 'pass', remove four spaces. unindent = current_line.rstrip().endswith(' pass') # Copy whitespace from current line current_line2 = current_line[4:] if unindent else current_line for c in current_line2: if c.isspace(): insert_text(c) else: break # If the last line ends with a colon, add four extra spaces. if current_line[-1:] == ':': for x in range(4): insert_text(' ')
def add( self, method=None, # method or ``Response`` url=None, body="", adding_headers=None, *args, **kwargs ): """ A basic request: >>> responses.add(responses.GET, 'http://example.com') You can also directly pass an object which implements the ``BaseResponse`` interface: >>> responses.add(Response(...)) A JSON payload: >>> responses.add( >>> method='GET', >>> url='http://example.com', >>> json={'foo': 'bar'}, >>> ) Custom headers: >>> responses.add( >>> method='GET', >>> url='http://example.com', >>> headers={'X-Header': 'foo'}, >>> ) Strict query string matching: >>> responses.add( >>> method='GET', >>> url='http://example.com?foo=bar', >>> match_querystring=True >>> ) """ if isinstance(method, BaseResponse): self._matches.append(method) return if adding_headers is not None: kwargs.setdefault("headers", adding_headers) self._matches.append(Response(method=method, url=url, body=body, **kwargs))
A basic request: >>> responses.add(responses.GET, 'http://example.com') You can also directly pass an object which implements the ``BaseResponse`` interface: >>> responses.add(Response(...)) A JSON payload: >>> responses.add( >>> method='GET', >>> url='http://example.com', >>> json={'foo': 'bar'}, >>> ) Custom headers: >>> responses.add( >>> method='GET', >>> url='http://example.com', >>> headers={'X-Header': 'foo'}, >>> ) Strict query string matching: >>> responses.add( >>> method='GET', >>> url='http://example.com?foo=bar', >>> match_querystring=True >>> )
Below is the the instruction that describes the task: ### Input: A basic request: >>> responses.add(responses.GET, 'http://example.com') You can also directly pass an object which implements the ``BaseResponse`` interface: >>> responses.add(Response(...)) A JSON payload: >>> responses.add( >>> method='GET', >>> url='http://example.com', >>> json={'foo': 'bar'}, >>> ) Custom headers: >>> responses.add( >>> method='GET', >>> url='http://example.com', >>> headers={'X-Header': 'foo'}, >>> ) Strict query string matching: >>> responses.add( >>> method='GET', >>> url='http://example.com?foo=bar', >>> match_querystring=True >>> ) ### Response: def add( self, method=None, # method or ``Response`` url=None, body="", adding_headers=None, *args, **kwargs ): """ A basic request: >>> responses.add(responses.GET, 'http://example.com') You can also directly pass an object which implements the ``BaseResponse`` interface: >>> responses.add(Response(...)) A JSON payload: >>> responses.add( >>> method='GET', >>> url='http://example.com', >>> json={'foo': 'bar'}, >>> ) Custom headers: >>> responses.add( >>> method='GET', >>> url='http://example.com', >>> headers={'X-Header': 'foo'}, >>> ) Strict query string matching: >>> responses.add( >>> method='GET', >>> url='http://example.com?foo=bar', >>> match_querystring=True >>> ) """ if isinstance(method, BaseResponse): self._matches.append(method) return if adding_headers is not None: kwargs.setdefault("headers", adding_headers) self._matches.append(Response(method=method, url=url, body=body, **kwargs))
def _get_default_price_id(items, option, hourly, location): """Returns a 'free' price id given an option.""" for item in items: if utils.lookup(item, 'itemCategory', 'categoryCode') != option: continue for price in item['prices']: if all([float(price.get('hourlyRecurringFee', 0)) == 0.0, float(price.get('recurringFee', 0)) == 0.0, _matches_billing(price, hourly), _matches_location(price, location)]): return price['id'] raise SoftLayer.SoftLayerError( "Could not find valid price for '%s' option" % option)
Returns a 'free' price id given an option.
Below is the the instruction that describes the task: ### Input: Returns a 'free' price id given an option. ### Response: def _get_default_price_id(items, option, hourly, location): """Returns a 'free' price id given an option.""" for item in items: if utils.lookup(item, 'itemCategory', 'categoryCode') != option: continue for price in item['prices']: if all([float(price.get('hourlyRecurringFee', 0)) == 0.0, float(price.get('recurringFee', 0)) == 0.0, _matches_billing(price, hourly), _matches_location(price, location)]): return price['id'] raise SoftLayer.SoftLayerError( "Could not find valid price for '%s' option" % option)
def select_specimen(self, specimen): """ Goes through the calculations necessary to plot measurement data for specimen and sets specimen as current GUI specimen, also attempts to handle changing current fit. """ try: fit_index = self.pmag_results_data['specimens'][self.s].index( self.current_fit) except KeyError: fit_index = None except ValueError: fit_index = None # sets self.s to specimen calculates params etc. self.initialize_CART_rot(specimen) self.list_bound_loc = 0 if fit_index != None and self.s in self.pmag_results_data['specimens']: try: self.current_fit = self.pmag_results_data['specimens'][self.s][fit_index] except IndexError: self.current_fit = None else: self.current_fit = None if self.s != self.specimens_box.GetValue(): self.specimens_box.SetValue(self.s)
Goes through the calculations necessary to plot measurement data for specimen and sets specimen as current GUI specimen, also attempts to handle changing current fit.
Below is the the instruction that describes the task: ### Input: Goes through the calculations necessary to plot measurement data for specimen and sets specimen as current GUI specimen, also attempts to handle changing current fit. ### Response: def select_specimen(self, specimen): """ Goes through the calculations necessary to plot measurement data for specimen and sets specimen as current GUI specimen, also attempts to handle changing current fit. """ try: fit_index = self.pmag_results_data['specimens'][self.s].index( self.current_fit) except KeyError: fit_index = None except ValueError: fit_index = None # sets self.s to specimen calculates params etc. self.initialize_CART_rot(specimen) self.list_bound_loc = 0 if fit_index != None and self.s in self.pmag_results_data['specimens']: try: self.current_fit = self.pmag_results_data['specimens'][self.s][fit_index] except IndexError: self.current_fit = None else: self.current_fit = None if self.s != self.specimens_box.GetValue(): self.specimens_box.SetValue(self.s)
def _namespace_default(): """ Get current namespace if running in a k8s cluster If not in a k8s cluster with service accounts enabled, default to 'default' Taken from https://github.com/jupyterhub/kubespawner/blob/master/kubespawner/spawner.py#L125 """ ns_path = '/var/run/secrets/kubernetes.io/serviceaccount/namespace' if os.path.exists(ns_path): with open(ns_path) as f: return f.read().strip() return 'default'
Get current namespace if running in a k8s cluster If not in a k8s cluster with service accounts enabled, default to 'default' Taken from https://github.com/jupyterhub/kubespawner/blob/master/kubespawner/spawner.py#L125
Below is the the instruction that describes the task: ### Input: Get current namespace if running in a k8s cluster If not in a k8s cluster with service accounts enabled, default to 'default' Taken from https://github.com/jupyterhub/kubespawner/blob/master/kubespawner/spawner.py#L125 ### Response: def _namespace_default(): """ Get current namespace if running in a k8s cluster If not in a k8s cluster with service accounts enabled, default to 'default' Taken from https://github.com/jupyterhub/kubespawner/blob/master/kubespawner/spawner.py#L125 """ ns_path = '/var/run/secrets/kubernetes.io/serviceaccount/namespace' if os.path.exists(ns_path): with open(ns_path) as f: return f.read().strip() return 'default'
def run_query(self, query): """ method runs the query and returns a list of filtered UnitOfWork records """ cursor = self.ds.filter(COLLECTION_UNIT_OF_WORK, query) return [UnitOfWork.from_json(document) for document in cursor]
method runs the query and returns a list of filtered UnitOfWork records
Below is the the instruction that describes the task: ### Input: method runs the query and returns a list of filtered UnitOfWork records ### Response: def run_query(self, query): """ method runs the query and returns a list of filtered UnitOfWork records """ cursor = self.ds.filter(COLLECTION_UNIT_OF_WORK, query) return [UnitOfWork.from_json(document) for document in cursor]
def _cgc_package_list_identifier(self, data_addr, data_size): """ Identifies the CGC package list associated with the CGC binary. :param int data_addr: Address of the data in memory. :param int data_size: Maximum size possible. :return: A 2-tuple of data type and size. :rtype: tuple """ if data_size < 100: return None, None data = self.fast_memory_load(data_addr, data_size, str) if data[:10] != 'The DECREE': return None, None if not all(i in string.printable for i in data): return None, None if not re.match(r"The DECREE packages used in the creation of this challenge binary were:", data): return None, None return 'cgc-package-list', data_size
Identifies the CGC package list associated with the CGC binary. :param int data_addr: Address of the data in memory. :param int data_size: Maximum size possible. :return: A 2-tuple of data type and size. :rtype: tuple
Below is the the instruction that describes the task: ### Input: Identifies the CGC package list associated with the CGC binary. :param int data_addr: Address of the data in memory. :param int data_size: Maximum size possible. :return: A 2-tuple of data type and size. :rtype: tuple ### Response: def _cgc_package_list_identifier(self, data_addr, data_size): """ Identifies the CGC package list associated with the CGC binary. :param int data_addr: Address of the data in memory. :param int data_size: Maximum size possible. :return: A 2-tuple of data type and size. :rtype: tuple """ if data_size < 100: return None, None data = self.fast_memory_load(data_addr, data_size, str) if data[:10] != 'The DECREE': return None, None if not all(i in string.printable for i in data): return None, None if not re.match(r"The DECREE packages used in the creation of this challenge binary were:", data): return None, None return 'cgc-package-list', data_size
def to_disk(self, path, **kwargs): """Save the entity ruler patterns to a directory. The patterns will be saved as newline-delimited JSON (JSONL). path (unicode / Path): The JSONL file to load. **kwargs: Other config paramters, mostly for consistency. RETURNS (EntityRuler): The loaded entity ruler. DOCS: https://spacy.io/api/entityruler#to_disk """ path = ensure_path(path) path = path.with_suffix(".jsonl") srsly.write_jsonl(path, self.patterns)
Save the entity ruler patterns to a directory. The patterns will be saved as newline-delimited JSON (JSONL). path (unicode / Path): The JSONL file to load. **kwargs: Other config paramters, mostly for consistency. RETURNS (EntityRuler): The loaded entity ruler. DOCS: https://spacy.io/api/entityruler#to_disk
Below is the the instruction that describes the task: ### Input: Save the entity ruler patterns to a directory. The patterns will be saved as newline-delimited JSON (JSONL). path (unicode / Path): The JSONL file to load. **kwargs: Other config paramters, mostly for consistency. RETURNS (EntityRuler): The loaded entity ruler. DOCS: https://spacy.io/api/entityruler#to_disk ### Response: def to_disk(self, path, **kwargs): """Save the entity ruler patterns to a directory. The patterns will be saved as newline-delimited JSON (JSONL). path (unicode / Path): The JSONL file to load. **kwargs: Other config paramters, mostly for consistency. RETURNS (EntityRuler): The loaded entity ruler. DOCS: https://spacy.io/api/entityruler#to_disk """ path = ensure_path(path) path = path.with_suffix(".jsonl") srsly.write_jsonl(path, self.patterns)
def update_variant_by_id(cls, variant_id, variant, **kwargs): """Update Variant Update attributes of Variant This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.update_variant_by_id(variant_id, variant, async=True) >>> result = thread.get() :param async bool :param str variant_id: ID of variant to update. (required) :param Variant variant: Attributes of variant to update. (required) :return: Variant If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._update_variant_by_id_with_http_info(variant_id, variant, **kwargs) else: (data) = cls._update_variant_by_id_with_http_info(variant_id, variant, **kwargs) return data
Update Variant Update attributes of Variant This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.update_variant_by_id(variant_id, variant, async=True) >>> result = thread.get() :param async bool :param str variant_id: ID of variant to update. (required) :param Variant variant: Attributes of variant to update. (required) :return: Variant If the method is called asynchronously, returns the request thread.
Below is the the instruction that describes the task: ### Input: Update Variant Update attributes of Variant This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.update_variant_by_id(variant_id, variant, async=True) >>> result = thread.get() :param async bool :param str variant_id: ID of variant to update. (required) :param Variant variant: Attributes of variant to update. (required) :return: Variant If the method is called asynchronously, returns the request thread. ### Response: def update_variant_by_id(cls, variant_id, variant, **kwargs): """Update Variant Update attributes of Variant This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.update_variant_by_id(variant_id, variant, async=True) >>> result = thread.get() :param async bool :param str variant_id: ID of variant to update. (required) :param Variant variant: Attributes of variant to update. (required) :return: Variant If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._update_variant_by_id_with_http_info(variant_id, variant, **kwargs) else: (data) = cls._update_variant_by_id_with_http_info(variant_id, variant, **kwargs) return data
def get_requests_batch(input_list, results={}, delay_time=0.5, max_threads=100): """ This is a parallel version of the HTTP GET primitive. :param input_list: the input is a list of either dictionaries containing query information, or just domain names (and NOT URLs). :param delay_time: delay before starting each thread :param max_threads: maximum number of concurrent threads :return: results in dict format Note: the input list can look like this: [ { "host": "www.google.com", "path": "/", "headers": {}, "ssl": False, "url": "http://www.google.com/" }, "www.twitter.com", "www.youtube.com", { "host": "www.facebook.com", "path": "/", "headers": {}, "ssl": True, "url": "http://www.facebook.com" }, ... ] """ threads = [] thread_error = False thread_wait_timeout = 200 ind = 1 total_item_count = len(input_list) # randomly select one user agent for one input list user_agent = random.choice(user_agent_pool) for row in input_list: headers = {} path = "/" ssl = False theme = "http" if type(row) is dict: if "host" not in row: continue host = row["host"] if "path" in row: path = row["path"] if "headers" in row: if type(row["headers"]) is dict: headers = row["headers"] if "ssl" in row: ssl = row["ssl"] theme = "https" if "url" in row: url = row["url"] else: url = "%s://%s%s" % (theme, host, path) else: host = row url = "%s://%s%s" % (theme, host, path) wait_time = 0 while threading.active_count() > max_threads: time.sleep(1) wait_time += 1 if wait_time > thread_wait_timeout: thread_error = True break if thread_error: results["error"] = "Threads took too long to finish." break if "User-Agent" not in headers: headers["User-Agent"] = user_agent # add just a little bit of delay before starting the thread # to avoid overwhelming the connection. time.sleep(delay_time) log_prefix = "%d/%d: " % (ind, total_item_count) thread = threading.Thread(target=get_request, args=(host, path, headers, ssl, results, url, log_prefix)) ind += 1 thread.setDaemon(1) thread_open_success = False retries = 0 while not thread_open_success and retries < MAX_THREAD_START_RETRY: try: thread.start() threads.append(thread) thread_open_success = True except: retries += 1 time.sleep(THREAD_START_DELAY) logging.error("%sThread start failed for %s, retrying... (%d/%d)" % (log_prefix, url, retries, MAX_THREAD_START_RETRY)) if retries == MAX_THREAD_START_RETRY: logging.error("%sCan't start a new thread for %s after %d retries." % (log_prefix, url, retries)) for thread in threads: thread.join(thread_wait_timeout) return results
This is a parallel version of the HTTP GET primitive. :param input_list: the input is a list of either dictionaries containing query information, or just domain names (and NOT URLs). :param delay_time: delay before starting each thread :param max_threads: maximum number of concurrent threads :return: results in dict format Note: the input list can look like this: [ { "host": "www.google.com", "path": "/", "headers": {}, "ssl": False, "url": "http://www.google.com/" }, "www.twitter.com", "www.youtube.com", { "host": "www.facebook.com", "path": "/", "headers": {}, "ssl": True, "url": "http://www.facebook.com" }, ... ]
Below is the the instruction that describes the task: ### Input: This is a parallel version of the HTTP GET primitive. :param input_list: the input is a list of either dictionaries containing query information, or just domain names (and NOT URLs). :param delay_time: delay before starting each thread :param max_threads: maximum number of concurrent threads :return: results in dict format Note: the input list can look like this: [ { "host": "www.google.com", "path": "/", "headers": {}, "ssl": False, "url": "http://www.google.com/" }, "www.twitter.com", "www.youtube.com", { "host": "www.facebook.com", "path": "/", "headers": {}, "ssl": True, "url": "http://www.facebook.com" }, ... ] ### Response: def get_requests_batch(input_list, results={}, delay_time=0.5, max_threads=100): """ This is a parallel version of the HTTP GET primitive. :param input_list: the input is a list of either dictionaries containing query information, or just domain names (and NOT URLs). :param delay_time: delay before starting each thread :param max_threads: maximum number of concurrent threads :return: results in dict format Note: the input list can look like this: [ { "host": "www.google.com", "path": "/", "headers": {}, "ssl": False, "url": "http://www.google.com/" }, "www.twitter.com", "www.youtube.com", { "host": "www.facebook.com", "path": "/", "headers": {}, "ssl": True, "url": "http://www.facebook.com" }, ... ] """ threads = [] thread_error = False thread_wait_timeout = 200 ind = 1 total_item_count = len(input_list) # randomly select one user agent for one input list user_agent = random.choice(user_agent_pool) for row in input_list: headers = {} path = "/" ssl = False theme = "http" if type(row) is dict: if "host" not in row: continue host = row["host"] if "path" in row: path = row["path"] if "headers" in row: if type(row["headers"]) is dict: headers = row["headers"] if "ssl" in row: ssl = row["ssl"] theme = "https" if "url" in row: url = row["url"] else: url = "%s://%s%s" % (theme, host, path) else: host = row url = "%s://%s%s" % (theme, host, path) wait_time = 0 while threading.active_count() > max_threads: time.sleep(1) wait_time += 1 if wait_time > thread_wait_timeout: thread_error = True break if thread_error: results["error"] = "Threads took too long to finish." break if "User-Agent" not in headers: headers["User-Agent"] = user_agent # add just a little bit of delay before starting the thread # to avoid overwhelming the connection. time.sleep(delay_time) log_prefix = "%d/%d: " % (ind, total_item_count) thread = threading.Thread(target=get_request, args=(host, path, headers, ssl, results, url, log_prefix)) ind += 1 thread.setDaemon(1) thread_open_success = False retries = 0 while not thread_open_success and retries < MAX_THREAD_START_RETRY: try: thread.start() threads.append(thread) thread_open_success = True except: retries += 1 time.sleep(THREAD_START_DELAY) logging.error("%sThread start failed for %s, retrying... (%d/%d)" % (log_prefix, url, retries, MAX_THREAD_START_RETRY)) if retries == MAX_THREAD_START_RETRY: logging.error("%sCan't start a new thread for %s after %d retries." % (log_prefix, url, retries)) for thread in threads: thread.join(thread_wait_timeout) return results
def extern_equals(self, context_handle, val1, val2): """Return true if the given Handles are __eq__.""" return self._ffi.from_handle(val1[0]) == self._ffi.from_handle(val2[0])
Return true if the given Handles are __eq__.
Below is the the instruction that describes the task: ### Input: Return true if the given Handles are __eq__. ### Response: def extern_equals(self, context_handle, val1, val2): """Return true if the given Handles are __eq__.""" return self._ffi.from_handle(val1[0]) == self._ffi.from_handle(val2[0])
def hgsub_report(self): """ Yields: str: .hgsubs line for this repository """ if self.relpath == '.': return yield "%s = [%s]%s" % ( self.fpath.lstrip('./'), self.label, self.remote_url)
Yields: str: .hgsubs line for this repository
Below is the the instruction that describes the task: ### Input: Yields: str: .hgsubs line for this repository ### Response: def hgsub_report(self): """ Yields: str: .hgsubs line for this repository """ if self.relpath == '.': return yield "%s = [%s]%s" % ( self.fpath.lstrip('./'), self.label, self.remote_url)
def set_itunes_author_name(self): """Parses author name from itunes tags and sets value""" try: self.itunes_author_name = self.soup.find('itunes:author').string except AttributeError: self.itunes_author_name = None
Parses author name from itunes tags and sets value
Below is the the instruction that describes the task: ### Input: Parses author name from itunes tags and sets value ### Response: def set_itunes_author_name(self): """Parses author name from itunes tags and sets value""" try: self.itunes_author_name = self.soup.find('itunes:author').string except AttributeError: self.itunes_author_name = None
def status(self, id): """ Fetch information about a single toot. Does not require authentication for publicly visible statuses. Returns a `toot dict`_. """ id = self.__unpack_id(id) url = '/api/v1/statuses/{0}'.format(str(id)) return self.__api_request('GET', url)
Fetch information about a single toot. Does not require authentication for publicly visible statuses. Returns a `toot dict`_.
Below is the the instruction that describes the task: ### Input: Fetch information about a single toot. Does not require authentication for publicly visible statuses. Returns a `toot dict`_. ### Response: def status(self, id): """ Fetch information about a single toot. Does not require authentication for publicly visible statuses. Returns a `toot dict`_. """ id = self.__unpack_id(id) url = '/api/v1/statuses/{0}'.format(str(id)) return self.__api_request('GET', url)
def _proxy(self): """ Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: FeedbackContext for this FeedbackInstance :rtype: twilio.rest.api.v2010.account.call.feedback.FeedbackContext """ if self._context is None: self._context = FeedbackContext( self._version, account_sid=self._solution['account_sid'], call_sid=self._solution['call_sid'], ) return self._context
Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: FeedbackContext for this FeedbackInstance :rtype: twilio.rest.api.v2010.account.call.feedback.FeedbackContext
Below is the the instruction that describes the task: ### Input: Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: FeedbackContext for this FeedbackInstance :rtype: twilio.rest.api.v2010.account.call.feedback.FeedbackContext ### Response: def _proxy(self): """ Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: FeedbackContext for this FeedbackInstance :rtype: twilio.rest.api.v2010.account.call.feedback.FeedbackContext """ if self._context is None: self._context = FeedbackContext( self._version, account_sid=self._solution['account_sid'], call_sid=self._solution['call_sid'], ) return self._context
def gaussian_kernel(sigma, truncate=4.0): """Return Gaussian that truncates at the given number of std deviations. Adapted from https://github.com/nicjhan/gaussian-filter """ sigma = float(sigma) radius = int(truncate * sigma + 0.5) x, y = np.mgrid[-radius:radius + 1, -radius:radius + 1] sigma = sigma ** 2 k = 2 * np.exp(-0.5 * (x ** 2 + y ** 2) / sigma) k = k / np.sum(k) return k
Return Gaussian that truncates at the given number of std deviations. Adapted from https://github.com/nicjhan/gaussian-filter
Below is the the instruction that describes the task: ### Input: Return Gaussian that truncates at the given number of std deviations. Adapted from https://github.com/nicjhan/gaussian-filter ### Response: def gaussian_kernel(sigma, truncate=4.0): """Return Gaussian that truncates at the given number of std deviations. Adapted from https://github.com/nicjhan/gaussian-filter """ sigma = float(sigma) radius = int(truncate * sigma + 0.5) x, y = np.mgrid[-radius:radius + 1, -radius:radius + 1] sigma = sigma ** 2 k = 2 * np.exp(-0.5 * (x ** 2 + y ** 2) / sigma) k = k / np.sum(k) return k
def read_environment(): """ Read all environment variables to see if they contain PERI """ out = {} for k,v in iteritems(os.environ): if transform(k) in default_conf: out[transform(k)] = v return out
Read all environment variables to see if they contain PERI
Below is the the instruction that describes the task: ### Input: Read all environment variables to see if they contain PERI ### Response: def read_environment(): """ Read all environment variables to see if they contain PERI """ out = {} for k,v in iteritems(os.environ): if transform(k) in default_conf: out[transform(k)] = v return out
def search_location(loc, locations=None, critical=False, create_in=None, verbose=True): ''' Locates files with a twist: * Check the existence of a file using the full path in `loc` * Search for the filename `loc` in `locations` * Create it's enclosing folders if the file does not exist. \ use `create_in` :param loc: Filename to search :param locations: A list of possible locations to search within (can be a dictionary, see note below) :param critical: |appteardown| if file was not found :param create_in: If `loc` was not found, the folder `create_in` is created. If `locations` is a dictionary, `create_in` can also specify a key of `locations`. The value will be used then. :param verbose: Pass verbose flag to :func:`make_locations` :returns: The full path of `loc` in matched location .. note:: * |params_locations_dict| * |param_locations_none| ''' from photon.util.structures import to_list from photon.util.system import shell_notify if not locations: locations = get_locations() for p in reversed(sorted(to_list(locations))): f = _path.join(p, loc) if _path.exists(f): return f if _path.exists(_path.abspath(_path.expanduser(loc))): return _path.abspath(_path.expanduser(loc)) if critical: shell_notify('could not locate', state=True, more=dict( file=loc, locations=locations )) if create_in: if isinstance(locations, dict): create_in = locations.get(create_in, create_in) make_locations(locations=[create_in], verbose=verbose) return _path.join(create_in, loc)
Locates files with a twist: * Check the existence of a file using the full path in `loc` * Search for the filename `loc` in `locations` * Create it's enclosing folders if the file does not exist. \ use `create_in` :param loc: Filename to search :param locations: A list of possible locations to search within (can be a dictionary, see note below) :param critical: |appteardown| if file was not found :param create_in: If `loc` was not found, the folder `create_in` is created. If `locations` is a dictionary, `create_in` can also specify a key of `locations`. The value will be used then. :param verbose: Pass verbose flag to :func:`make_locations` :returns: The full path of `loc` in matched location .. note:: * |params_locations_dict| * |param_locations_none|
Below is the the instruction that describes the task: ### Input: Locates files with a twist: * Check the existence of a file using the full path in `loc` * Search for the filename `loc` in `locations` * Create it's enclosing folders if the file does not exist. \ use `create_in` :param loc: Filename to search :param locations: A list of possible locations to search within (can be a dictionary, see note below) :param critical: |appteardown| if file was not found :param create_in: If `loc` was not found, the folder `create_in` is created. If `locations` is a dictionary, `create_in` can also specify a key of `locations`. The value will be used then. :param verbose: Pass verbose flag to :func:`make_locations` :returns: The full path of `loc` in matched location .. note:: * |params_locations_dict| * |param_locations_none| ### Response: def search_location(loc, locations=None, critical=False, create_in=None, verbose=True): ''' Locates files with a twist: * Check the existence of a file using the full path in `loc` * Search for the filename `loc` in `locations` * Create it's enclosing folders if the file does not exist. \ use `create_in` :param loc: Filename to search :param locations: A list of possible locations to search within (can be a dictionary, see note below) :param critical: |appteardown| if file was not found :param create_in: If `loc` was not found, the folder `create_in` is created. If `locations` is a dictionary, `create_in` can also specify a key of `locations`. The value will be used then. :param verbose: Pass verbose flag to :func:`make_locations` :returns: The full path of `loc` in matched location .. note:: * |params_locations_dict| * |param_locations_none| ''' from photon.util.structures import to_list from photon.util.system import shell_notify if not locations: locations = get_locations() for p in reversed(sorted(to_list(locations))): f = _path.join(p, loc) if _path.exists(f): return f if _path.exists(_path.abspath(_path.expanduser(loc))): return _path.abspath(_path.expanduser(loc)) if critical: shell_notify('could not locate', state=True, more=dict( file=loc, locations=locations )) if create_in: if isinstance(locations, dict): create_in = locations.get(create_in, create_in) make_locations(locations=[create_in], verbose=verbose) return _path.join(create_in, loc)
def make_event_filter(self, filter_key, filter_value): """Create a new event filter.""" event_filter = EventFilter( self.event_name, self.event, {filter_key: filter_value}, from_block=self.from_block, to_block=self.to_block ) event_filter.set_poll_interval(0.5) return event_filter
Create a new event filter.
Below is the the instruction that describes the task: ### Input: Create a new event filter. ### Response: def make_event_filter(self, filter_key, filter_value): """Create a new event filter.""" event_filter = EventFilter( self.event_name, self.event, {filter_key: filter_value}, from_block=self.from_block, to_block=self.to_block ) event_filter.set_poll_interval(0.5) return event_filter
def delete_entry(request, entry_id): """ Give the user the ability to delete a log entry, with a confirmation beforehand. If this method is invoked via a GET request, a form asking for a confirmation of intent will be presented to the user. If this method is invoked via a POST request, the entry will be deleted. """ try: entry = Entry.no_join.get(pk=entry_id, user=request.user) except Entry.DoesNotExist: message = 'No such entry found.' messages.info(request, message) url = request.GET.get('next', reverse('dashboard')) return HttpResponseRedirect(url) if request.method == 'POST': key = request.POST.get('key', None) if key and key == entry.delete_key: entry.delete() message = 'Deleted {0} for {1}.'.format(entry.activity.name, entry.project) messages.info(request, message) url = request.GET.get('next', reverse('dashboard')) return HttpResponseRedirect(url) else: message = 'You are not authorized to delete this entry!' messages.error(request, message) return render(request, 'timepiece/entry/delete.html', { 'entry': entry, })
Give the user the ability to delete a log entry, with a confirmation beforehand. If this method is invoked via a GET request, a form asking for a confirmation of intent will be presented to the user. If this method is invoked via a POST request, the entry will be deleted.
Below is the the instruction that describes the task: ### Input: Give the user the ability to delete a log entry, with a confirmation beforehand. If this method is invoked via a GET request, a form asking for a confirmation of intent will be presented to the user. If this method is invoked via a POST request, the entry will be deleted. ### Response: def delete_entry(request, entry_id): """ Give the user the ability to delete a log entry, with a confirmation beforehand. If this method is invoked via a GET request, a form asking for a confirmation of intent will be presented to the user. If this method is invoked via a POST request, the entry will be deleted. """ try: entry = Entry.no_join.get(pk=entry_id, user=request.user) except Entry.DoesNotExist: message = 'No such entry found.' messages.info(request, message) url = request.GET.get('next', reverse('dashboard')) return HttpResponseRedirect(url) if request.method == 'POST': key = request.POST.get('key', None) if key and key == entry.delete_key: entry.delete() message = 'Deleted {0} for {1}.'.format(entry.activity.name, entry.project) messages.info(request, message) url = request.GET.get('next', reverse('dashboard')) return HttpResponseRedirect(url) else: message = 'You are not authorized to delete this entry!' messages.error(request, message) return render(request, 'timepiece/entry/delete.html', { 'entry': entry, })
def prepare_amazon_algorithm_estimator(estimator, inputs, mini_batch_size=None): """ Set up amazon algorithm estimator, adding the required `feature_dim` hyperparameter from training data. Args: estimator (sagemaker.amazon.amazon_estimator.AmazonAlgorithmEstimatorBase): An estimator for a built-in Amazon algorithm to get information from and update. inputs: The training data. * (sagemaker.amazon.amazon_estimator.RecordSet) - A collection of Amazon :class:~`Record` objects serialized and stored in S3. For use with an estimator for an Amazon algorithm. * (list[sagemaker.amazon.amazon_estimator.RecordSet]) - A list of :class:~`sagemaker.amazon.amazon_estimator.RecordSet` objects, where each instance is a different channel of training data. """ if isinstance(inputs, list): for record in inputs: if isinstance(record, amazon_estimator.RecordSet) and record.channel == 'train': estimator.feature_dim = record.feature_dim break elif isinstance(inputs, amazon_estimator.RecordSet): estimator.feature_dim = inputs.feature_dim else: raise TypeError('Training data must be represented in RecordSet or list of RecordSets') estimator.mini_batch_size = mini_batch_size
Set up amazon algorithm estimator, adding the required `feature_dim` hyperparameter from training data. Args: estimator (sagemaker.amazon.amazon_estimator.AmazonAlgorithmEstimatorBase): An estimator for a built-in Amazon algorithm to get information from and update. inputs: The training data. * (sagemaker.amazon.amazon_estimator.RecordSet) - A collection of Amazon :class:~`Record` objects serialized and stored in S3. For use with an estimator for an Amazon algorithm. * (list[sagemaker.amazon.amazon_estimator.RecordSet]) - A list of :class:~`sagemaker.amazon.amazon_estimator.RecordSet` objects, where each instance is a different channel of training data.
Below is the the instruction that describes the task: ### Input: Set up amazon algorithm estimator, adding the required `feature_dim` hyperparameter from training data. Args: estimator (sagemaker.amazon.amazon_estimator.AmazonAlgorithmEstimatorBase): An estimator for a built-in Amazon algorithm to get information from and update. inputs: The training data. * (sagemaker.amazon.amazon_estimator.RecordSet) - A collection of Amazon :class:~`Record` objects serialized and stored in S3. For use with an estimator for an Amazon algorithm. * (list[sagemaker.amazon.amazon_estimator.RecordSet]) - A list of :class:~`sagemaker.amazon.amazon_estimator.RecordSet` objects, where each instance is a different channel of training data. ### Response: def prepare_amazon_algorithm_estimator(estimator, inputs, mini_batch_size=None): """ Set up amazon algorithm estimator, adding the required `feature_dim` hyperparameter from training data. Args: estimator (sagemaker.amazon.amazon_estimator.AmazonAlgorithmEstimatorBase): An estimator for a built-in Amazon algorithm to get information from and update. inputs: The training data. * (sagemaker.amazon.amazon_estimator.RecordSet) - A collection of Amazon :class:~`Record` objects serialized and stored in S3. For use with an estimator for an Amazon algorithm. * (list[sagemaker.amazon.amazon_estimator.RecordSet]) - A list of :class:~`sagemaker.amazon.amazon_estimator.RecordSet` objects, where each instance is a different channel of training data. """ if isinstance(inputs, list): for record in inputs: if isinstance(record, amazon_estimator.RecordSet) and record.channel == 'train': estimator.feature_dim = record.feature_dim break elif isinstance(inputs, amazon_estimator.RecordSet): estimator.feature_dim = inputs.feature_dim else: raise TypeError('Training data must be represented in RecordSet or list of RecordSets') estimator.mini_batch_size = mini_batch_size
def _normalize_coerce_to_format_with_lookup(self, v): """ Replace a format with a default """ try: return self.format_lookup.get(v, v) except TypeError: # v is something we can't lookup (like a list) return v
Replace a format with a default
Below is the the instruction that describes the task: ### Input: Replace a format with a default ### Response: def _normalize_coerce_to_format_with_lookup(self, v): """ Replace a format with a default """ try: return self.format_lookup.get(v, v) except TypeError: # v is something we can't lookup (like a list) return v
def _analyze_func_string(func_string): """ Analyze given functiion string an extract: * function name * function arguments * function keyword arguments All given arguments must by of type string, int/float or list. :param func_string: string of the function :return: function name, arguments, keyword arguments """ func = ast.parse(func_string) try: func_call = func.body[0].value func_name = func_call.func.id except AttributeError: raise SphinxError("Given dynamic function string is not a valid python call. Got: {}".format(func_string)) func_args = [] for arg in func_call.args: if isinstance(arg, ast.Num): func_args.append(arg.n) elif isinstance(arg, ast.Str): func_args.append(arg.s) elif isinstance(arg, ast.BoolOp): func_args.append(arg.s) elif isinstance(arg, ast.List): arg_list = [] for element in arg.elts: if isinstance(element, ast.Num): arg_list.append(element.n) elif isinstance(element, ast.Str): arg_list.append(element.s) func_args.append(arg_list) else: raise FunctionParsingException() func_kargs = {} for keyword in func_call.keywords: kvalue = keyword.value kkey = keyword.arg if isinstance(kvalue, ast.Num): func_kargs[kkey] = kvalue.n elif isinstance(kvalue, ast.Str): func_kargs[kkey] = kvalue.s elif isinstance(kvalue, ast_boolean): # Check if Boolean if is_python3: func_kargs[kkey] = kvalue.value else: func_kargs[kkey] = kvalue.id elif isinstance(kvalue, ast.List): arg_list = [] for element in kvalue.elts: if isinstance(element, ast.Num): arg_list.append(element.n) elif isinstance(element, ast.Str): arg_list.append(element.s) func_kargs[kkey] = arg_list else: raise FunctionParsingException() return func_name, func_args, func_kargs
Analyze given functiion string an extract: * function name * function arguments * function keyword arguments All given arguments must by of type string, int/float or list. :param func_string: string of the function :return: function name, arguments, keyword arguments
Below is the the instruction that describes the task: ### Input: Analyze given functiion string an extract: * function name * function arguments * function keyword arguments All given arguments must by of type string, int/float or list. :param func_string: string of the function :return: function name, arguments, keyword arguments ### Response: def _analyze_func_string(func_string): """ Analyze given functiion string an extract: * function name * function arguments * function keyword arguments All given arguments must by of type string, int/float or list. :param func_string: string of the function :return: function name, arguments, keyword arguments """ func = ast.parse(func_string) try: func_call = func.body[0].value func_name = func_call.func.id except AttributeError: raise SphinxError("Given dynamic function string is not a valid python call. Got: {}".format(func_string)) func_args = [] for arg in func_call.args: if isinstance(arg, ast.Num): func_args.append(arg.n) elif isinstance(arg, ast.Str): func_args.append(arg.s) elif isinstance(arg, ast.BoolOp): func_args.append(arg.s) elif isinstance(arg, ast.List): arg_list = [] for element in arg.elts: if isinstance(element, ast.Num): arg_list.append(element.n) elif isinstance(element, ast.Str): arg_list.append(element.s) func_args.append(arg_list) else: raise FunctionParsingException() func_kargs = {} for keyword in func_call.keywords: kvalue = keyword.value kkey = keyword.arg if isinstance(kvalue, ast.Num): func_kargs[kkey] = kvalue.n elif isinstance(kvalue, ast.Str): func_kargs[kkey] = kvalue.s elif isinstance(kvalue, ast_boolean): # Check if Boolean if is_python3: func_kargs[kkey] = kvalue.value else: func_kargs[kkey] = kvalue.id elif isinstance(kvalue, ast.List): arg_list = [] for element in kvalue.elts: if isinstance(element, ast.Num): arg_list.append(element.n) elif isinstance(element, ast.Str): arg_list.append(element.s) func_kargs[kkey] = arg_list else: raise FunctionParsingException() return func_name, func_args, func_kargs
def load(klass, filename, inject_env=True): """Load a Pipfile from a given filename.""" p = PipfileParser(filename=filename) pipfile = klass(filename=filename) pipfile.data = p.parse(inject_env=inject_env) return pipfile
Load a Pipfile from a given filename.
Below is the the instruction that describes the task: ### Input: Load a Pipfile from a given filename. ### Response: def load(klass, filename, inject_env=True): """Load a Pipfile from a given filename.""" p = PipfileParser(filename=filename) pipfile = klass(filename=filename) pipfile.data = p.parse(inject_env=inject_env) return pipfile
def createNetwork(networkConfig): """ Create and initialize the specified network instance. @param networkConfig: (dict) the configuration of this network. @return network: (Network) The actual network """ registerAllResearchRegions() network = Network() if networkConfig["networkType"] == "L4L2Column": return createL4L2Column(network, networkConfig, "_0") elif networkConfig["networkType"] == "MultipleL4L2Columns": return createMultipleL4L2Columns(network, networkConfig) elif networkConfig["networkType"] == "MultipleL4L2ColumnsWithTopology": return createMultipleL4L2ColumnsWithTopology(network, networkConfig) elif networkConfig["networkType"] == "L2456Columns": return createL2456Columns(network, networkConfig) elif networkConfig["networkType"] == "L4L2TMColumn": return createL4L2TMColumn(network, networkConfig, "_0") elif networkConfig["networkType"] == "CombinedSequenceColumn": return createCombinedSequenceColumn(network, networkConfig, "_0")
Create and initialize the specified network instance. @param networkConfig: (dict) the configuration of this network. @return network: (Network) The actual network
Below is the the instruction that describes the task: ### Input: Create and initialize the specified network instance. @param networkConfig: (dict) the configuration of this network. @return network: (Network) The actual network ### Response: def createNetwork(networkConfig): """ Create and initialize the specified network instance. @param networkConfig: (dict) the configuration of this network. @return network: (Network) The actual network """ registerAllResearchRegions() network = Network() if networkConfig["networkType"] == "L4L2Column": return createL4L2Column(network, networkConfig, "_0") elif networkConfig["networkType"] == "MultipleL4L2Columns": return createMultipleL4L2Columns(network, networkConfig) elif networkConfig["networkType"] == "MultipleL4L2ColumnsWithTopology": return createMultipleL4L2ColumnsWithTopology(network, networkConfig) elif networkConfig["networkType"] == "L2456Columns": return createL2456Columns(network, networkConfig) elif networkConfig["networkType"] == "L4L2TMColumn": return createL4L2TMColumn(network, networkConfig, "_0") elif networkConfig["networkType"] == "CombinedSequenceColumn": return createCombinedSequenceColumn(network, networkConfig, "_0")
def resolve_signing_intent(self): """Determine the correct signing intent Regardless of what was requested, or provided as signing_intent plugin parameter, consult sigkeys of the actual composes used to guarantee information accuracy. """ all_signing_intents = [ self.odcs_config.get_signing_intent_by_keys(compose_info.get('sigkeys', [])) for compose_info in self.composes_info ] # Because composes_info may contain composes that were passed as # plugin parameters, add the parent signing intent to avoid the # overall signing intent from surpassing parent's. if self._parent_signing_intent: all_signing_intents.append(self._parent_signing_intent) # Calculate the least restrictive signing intent signing_intent = min(all_signing_intents, key=lambda x: x['restrictiveness']) self.log.info('Signing intent for build is %s', signing_intent['name']) self.compose_config.set_signing_intent(signing_intent['name'])
Determine the correct signing intent Regardless of what was requested, or provided as signing_intent plugin parameter, consult sigkeys of the actual composes used to guarantee information accuracy.
Below is the the instruction that describes the task: ### Input: Determine the correct signing intent Regardless of what was requested, or provided as signing_intent plugin parameter, consult sigkeys of the actual composes used to guarantee information accuracy. ### Response: def resolve_signing_intent(self): """Determine the correct signing intent Regardless of what was requested, or provided as signing_intent plugin parameter, consult sigkeys of the actual composes used to guarantee information accuracy. """ all_signing_intents = [ self.odcs_config.get_signing_intent_by_keys(compose_info.get('sigkeys', [])) for compose_info in self.composes_info ] # Because composes_info may contain composes that were passed as # plugin parameters, add the parent signing intent to avoid the # overall signing intent from surpassing parent's. if self._parent_signing_intent: all_signing_intents.append(self._parent_signing_intent) # Calculate the least restrictive signing intent signing_intent = min(all_signing_intents, key=lambda x: x['restrictiveness']) self.log.info('Signing intent for build is %s', signing_intent['name']) self.compose_config.set_signing_intent(signing_intent['name'])
def PopulateForm(self): """ +-----------------------------------------------------------------------+ | +--- splitter ------------------------------------------------------+ | | | +-- list widget--------------+ +- IdaSettingsView -------------+ | | | | | | | | | | | | | - plugin name | | | | | | | | - plugin name | | | | | | | | - plugin name | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +----------------------------+ +-------------------------------+ | | | +-------------------------------------------------------------------+ | +-----------------------------------------------------------------------+ """ hbox = QtWidgets.QHBoxLayout(self.parent) self._splitter = QtWidgets.QSplitter(QtCore.Qt.Horizontal) self._plugin_list = QtWidgets.QListWidget() plugin_names = set([]) for scope, fn in (("idb", ida_settings.IDASettings.get_idb_plugin_names), ("directory", ida_settings.IDASettings.get_directory_plugin_names), ("user", ida_settings.IDASettings.get_user_plugin_names), ("system", ida_settings.IDASettings.get_system_plugin_names)): for plugin_name in fn(): plugin_names.add(plugin_name) for plugin_name in plugin_names: self._plugin_list.addItem(plugin_name) self._splitter.addWidget(self._plugin_list) hbox.addWidget(self._splitter) self.parent.setLayout(hbox) self._plugin_list.currentItemChanged.connect(self._handle_plugin_changed)
+-----------------------------------------------------------------------+ | +--- splitter ------------------------------------------------------+ | | | +-- list widget--------------+ +- IdaSettingsView -------------+ | | | | | | | | | | | | | - plugin name | | | | | | | | - plugin name | | | | | | | | - plugin name | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +----------------------------+ +-------------------------------+ | | | +-------------------------------------------------------------------+ | +-----------------------------------------------------------------------+
Below is the the instruction that describes the task: ### Input: +-----------------------------------------------------------------------+ | +--- splitter ------------------------------------------------------+ | | | +-- list widget--------------+ +- IdaSettingsView -------------+ | | | | | | | | | | | | | - plugin name | | | | | | | | - plugin name | | | | | | | | - plugin name | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +----------------------------+ +-------------------------------+ | | | +-------------------------------------------------------------------+ | +-----------------------------------------------------------------------+ ### Response: def PopulateForm(self): """ +-----------------------------------------------------------------------+ | +--- splitter ------------------------------------------------------+ | | | +-- list widget--------------+ +- IdaSettingsView -------------+ | | | | | | | | | | | | | - plugin name | | | | | | | | - plugin name | | | | | | | | - plugin name | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +----------------------------+ +-------------------------------+ | | | +-------------------------------------------------------------------+ | +-----------------------------------------------------------------------+ """ hbox = QtWidgets.QHBoxLayout(self.parent) self._splitter = QtWidgets.QSplitter(QtCore.Qt.Horizontal) self._plugin_list = QtWidgets.QListWidget() plugin_names = set([]) for scope, fn in (("idb", ida_settings.IDASettings.get_idb_plugin_names), ("directory", ida_settings.IDASettings.get_directory_plugin_names), ("user", ida_settings.IDASettings.get_user_plugin_names), ("system", ida_settings.IDASettings.get_system_plugin_names)): for plugin_name in fn(): plugin_names.add(plugin_name) for plugin_name in plugin_names: self._plugin_list.addItem(plugin_name) self._splitter.addWidget(self._plugin_list) hbox.addWidget(self._splitter) self.parent.setLayout(hbox) self._plugin_list.currentItemChanged.connect(self._handle_plugin_changed)
def handle_friday(next_date: Datum, period: str, mult: int, start_date: Datum): """ Extracted the calculation for when the next_day is Friday """ assert isinstance(next_date, Datum) assert isinstance(start_date, Datum) # Starting from line 220. tmp_sat = next_date.clone() tmp_sat.add_days(1) tmp_sun = next_date.clone() tmp_sun.add_days(2) if period == RecurrencePeriod.END_OF_MONTH.value: if (next_date.is_end_of_month() or tmp_sat.is_end_of_month() or tmp_sun.is_end_of_month()): next_date.add_months(1) else: next_date.add_months(mult - 1) else: if tmp_sat.get_day_name() == start_date.get_day_name(): next_date.add_days(1) next_date.add_months(mult) elif tmp_sun.get_day_name() == start_date.get_day_name(): next_date.add_days(2) next_date.add_months(mult) elif next_date.get_day() >= start_date.get_day(): next_date.add_months(mult) elif next_date.is_end_of_month(): next_date.add_months(mult) elif tmp_sat.is_end_of_month(): next_date.add_days(1) next_date.add_months(mult) elif tmp_sun.is_end_of_month(): next_date.add_days(2) next_date.add_months(mult) else: # /* one fewer month fwd because of the occurrence in this month */ next_date.subtract_months(1) return next_date
Extracted the calculation for when the next_day is Friday
Below is the the instruction that describes the task: ### Input: Extracted the calculation for when the next_day is Friday ### Response: def handle_friday(next_date: Datum, period: str, mult: int, start_date: Datum): """ Extracted the calculation for when the next_day is Friday """ assert isinstance(next_date, Datum) assert isinstance(start_date, Datum) # Starting from line 220. tmp_sat = next_date.clone() tmp_sat.add_days(1) tmp_sun = next_date.clone() tmp_sun.add_days(2) if period == RecurrencePeriod.END_OF_MONTH.value: if (next_date.is_end_of_month() or tmp_sat.is_end_of_month() or tmp_sun.is_end_of_month()): next_date.add_months(1) else: next_date.add_months(mult - 1) else: if tmp_sat.get_day_name() == start_date.get_day_name(): next_date.add_days(1) next_date.add_months(mult) elif tmp_sun.get_day_name() == start_date.get_day_name(): next_date.add_days(2) next_date.add_months(mult) elif next_date.get_day() >= start_date.get_day(): next_date.add_months(mult) elif next_date.is_end_of_month(): next_date.add_months(mult) elif tmp_sat.is_end_of_month(): next_date.add_days(1) next_date.add_months(mult) elif tmp_sun.is_end_of_month(): next_date.add_days(2) next_date.add_months(mult) else: # /* one fewer month fwd because of the occurrence in this month */ next_date.subtract_months(1) return next_date
def temporal_latent_to_dist(name, x, hparams, output_channels=None): """Network that maps a time-indexed list of 3-D latents to a gaussian. Args: name: variable scope. x: List of 4-D Tensors indexed by time, (NHWC) hparams: tf.contrib.training.Hparams. output_channels: int, Number of channels of the output gaussian mean. Returns: dist: tfp.distributions.Normal """ _, _, width, _, res_channels = common_layers.shape_list(x) if output_channels is None: output_channels = res_channels dilation_rates = get_dilation_rates(hparams, width) with tf.variable_scope(name, reuse=tf.AUTO_REUSE): h = x for i in range(hparams.latent_encoder_depth): if hparams.latent_apply_dilations: h2 = dilated_conv_stack("dil_latent_3d_res_%d" % i, h, mid_channels=hparams.latent_encoder_width, output_channels=res_channels, dilation_rates=dilation_rates, activation=hparams.latent_activation, dropout=hparams.latent_dropout) else: h2 = conv_stack("latent_3d_res_%d" % i, h, mid_channels=hparams.latent_encoder_width, output_channels=res_channels, activation=hparams.latent_activation, dropout=hparams.latent_dropout) h += h2 # take last activation that should capture all context since padding is # on left. h = h[:, -1, :, :, :] h = conv("res_final", h, apply_actnorm=False, conv_init="zeros", output_channels=2*output_channels, filter_size=[1, 1]) mean, log_scale = h[:, :, :, 0::2], h[:, :, :, 1::2] return tfp.distributions.Normal(mean, tf.exp(log_scale))
Network that maps a time-indexed list of 3-D latents to a gaussian. Args: name: variable scope. x: List of 4-D Tensors indexed by time, (NHWC) hparams: tf.contrib.training.Hparams. output_channels: int, Number of channels of the output gaussian mean. Returns: dist: tfp.distributions.Normal
Below is the the instruction that describes the task: ### Input: Network that maps a time-indexed list of 3-D latents to a gaussian. Args: name: variable scope. x: List of 4-D Tensors indexed by time, (NHWC) hparams: tf.contrib.training.Hparams. output_channels: int, Number of channels of the output gaussian mean. Returns: dist: tfp.distributions.Normal ### Response: def temporal_latent_to_dist(name, x, hparams, output_channels=None): """Network that maps a time-indexed list of 3-D latents to a gaussian. Args: name: variable scope. x: List of 4-D Tensors indexed by time, (NHWC) hparams: tf.contrib.training.Hparams. output_channels: int, Number of channels of the output gaussian mean. Returns: dist: tfp.distributions.Normal """ _, _, width, _, res_channels = common_layers.shape_list(x) if output_channels is None: output_channels = res_channels dilation_rates = get_dilation_rates(hparams, width) with tf.variable_scope(name, reuse=tf.AUTO_REUSE): h = x for i in range(hparams.latent_encoder_depth): if hparams.latent_apply_dilations: h2 = dilated_conv_stack("dil_latent_3d_res_%d" % i, h, mid_channels=hparams.latent_encoder_width, output_channels=res_channels, dilation_rates=dilation_rates, activation=hparams.latent_activation, dropout=hparams.latent_dropout) else: h2 = conv_stack("latent_3d_res_%d" % i, h, mid_channels=hparams.latent_encoder_width, output_channels=res_channels, activation=hparams.latent_activation, dropout=hparams.latent_dropout) h += h2 # take last activation that should capture all context since padding is # on left. h = h[:, -1, :, :, :] h = conv("res_final", h, apply_actnorm=False, conv_init="zeros", output_channels=2*output_channels, filter_size=[1, 1]) mean, log_scale = h[:, :, :, 0::2], h[:, :, :, 1::2] return tfp.distributions.Normal(mean, tf.exp(log_scale))
def basic_generator(in_size:int, n_channels:int, noise_sz:int=100, n_features:int=64, n_extra_layers=0, **conv_kwargs): "A basic generator from `noise_sz` to images `n_channels` x `in_size` x `in_size`." cur_size, cur_ftrs = 4, n_features//2 while cur_size < in_size: cur_size *= 2; cur_ftrs *= 2 layers = [conv_layer(noise_sz, cur_ftrs, 4, 1, transpose=True, **conv_kwargs)] cur_size = 4 while cur_size < in_size // 2: layers.append(conv_layer(cur_ftrs, cur_ftrs//2, 4, 2, 1, transpose=True, **conv_kwargs)) cur_ftrs //= 2; cur_size *= 2 layers += [conv_layer(cur_ftrs, cur_ftrs, 3, 1, 1, transpose=True, **conv_kwargs) for _ in range(n_extra_layers)] layers += [conv2d_trans(cur_ftrs, n_channels, 4, 2, 1, bias=False), nn.Tanh()] return nn.Sequential(*layers)
A basic generator from `noise_sz` to images `n_channels` x `in_size` x `in_size`.
Below is the the instruction that describes the task: ### Input: A basic generator from `noise_sz` to images `n_channels` x `in_size` x `in_size`. ### Response: def basic_generator(in_size:int, n_channels:int, noise_sz:int=100, n_features:int=64, n_extra_layers=0, **conv_kwargs): "A basic generator from `noise_sz` to images `n_channels` x `in_size` x `in_size`." cur_size, cur_ftrs = 4, n_features//2 while cur_size < in_size: cur_size *= 2; cur_ftrs *= 2 layers = [conv_layer(noise_sz, cur_ftrs, 4, 1, transpose=True, **conv_kwargs)] cur_size = 4 while cur_size < in_size // 2: layers.append(conv_layer(cur_ftrs, cur_ftrs//2, 4, 2, 1, transpose=True, **conv_kwargs)) cur_ftrs //= 2; cur_size *= 2 layers += [conv_layer(cur_ftrs, cur_ftrs, 3, 1, 1, transpose=True, **conv_kwargs) for _ in range(n_extra_layers)] layers += [conv2d_trans(cur_ftrs, n_channels, 4, 2, 1, bias=False), nn.Tanh()] return nn.Sequential(*layers)
def on_disconnect(client): """ Sample on_disconnect function. Handles lost connections. """ print "-- Lost connection to %s" % client.addrport() CLIENT_LIST.remove(client) broadcast('%s leaves the conversation.\n' % client.addrport() )
Sample on_disconnect function. Handles lost connections.
Below is the the instruction that describes the task: ### Input: Sample on_disconnect function. Handles lost connections. ### Response: def on_disconnect(client): """ Sample on_disconnect function. Handles lost connections. """ print "-- Lost connection to %s" % client.addrport() CLIENT_LIST.remove(client) broadcast('%s leaves the conversation.\n' % client.addrport() )
def _split_comma_separated(string): """Return a set of strings.""" return set(text.strip() for text in string.split(',') if text.strip())
Return a set of strings.
Below is the the instruction that describes the task: ### Input: Return a set of strings. ### Response: def _split_comma_separated(string): """Return a set of strings.""" return set(text.strip() for text in string.split(',') if text.strip())
def save(self, *args, **kwargs): """ Custom save method does the following: * automatically inherit node coordinates and elevation * save shortcuts if HSTORE is enabled """ custom_checks = kwargs.pop('custom_checks', True) super(Device, self).save(*args, **kwargs) if custom_checks is False: return changed = False if not self.location: self.location = self.node.point changed = True if not self.elev and self.node.elev: self.elev = self.node.elev changed = True original_user = self.shortcuts.get('user') if self.node.user: self.shortcuts['user'] = self.node.user if original_user != self.shortcuts.get('user'): changed = True if 'nodeshot.core.layers' in settings.INSTALLED_APPS: original_layer = self.shortcuts.get('layer') self.shortcuts['layer'] = self.node.layer if original_layer != self.shortcuts.get('layer'): changed = True if changed: self.save(custom_checks=False)
Custom save method does the following: * automatically inherit node coordinates and elevation * save shortcuts if HSTORE is enabled
Below is the the instruction that describes the task: ### Input: Custom save method does the following: * automatically inherit node coordinates and elevation * save shortcuts if HSTORE is enabled ### Response: def save(self, *args, **kwargs): """ Custom save method does the following: * automatically inherit node coordinates and elevation * save shortcuts if HSTORE is enabled """ custom_checks = kwargs.pop('custom_checks', True) super(Device, self).save(*args, **kwargs) if custom_checks is False: return changed = False if not self.location: self.location = self.node.point changed = True if not self.elev and self.node.elev: self.elev = self.node.elev changed = True original_user = self.shortcuts.get('user') if self.node.user: self.shortcuts['user'] = self.node.user if original_user != self.shortcuts.get('user'): changed = True if 'nodeshot.core.layers' in settings.INSTALLED_APPS: original_layer = self.shortcuts.get('layer') self.shortcuts['layer'] = self.node.layer if original_layer != self.shortcuts.get('layer'): changed = True if changed: self.save(custom_checks=False)
def get_metadata_from_xml_tree(tree, get_issns_from_nlm=False, get_abstracts=False, prepend_title=False, mesh_annotations=False): """Get metadata for an XML tree containing PubmedArticle elements. Documentation on the XML structure can be found at: - https://www.nlm.nih.gov/bsd/licensee/elements_descriptions.html - https://www.nlm.nih.gov/bsd/licensee/elements_alphabetical.html Parameters ---------- tree : xml.etree.ElementTree ElementTree containing one or more PubmedArticle elements. get_issns_from_nlm : boolean Look up the full list of ISSN number for the journal associated with the article, which helps to match articles to CrossRef search results. Defaults to False, since it slows down performance. get_abstracts : boolean Indicates whether to include the Pubmed abstract in the results. prepend_title : boolean If get_abstracts is True, specifies whether the article title should be prepended to the abstract text. mesh_annotations : boolean If True, extract mesh annotations from the pubmed entries and include in the returned data. If false, don't. Returns ------- dict of dicts Dictionary indexed by PMID. Each value is a dict containing the following fields: 'doi', 'title', 'authors', 'journal_title', 'journal_abbrev', 'journal_nlm_id', 'issn_list', 'page'. """ # Iterate over the articles and build the results dict results = {} pm_articles = tree.findall('./PubmedArticle') for art_ix, pm_article in enumerate(pm_articles): medline_citation = pm_article.find('./MedlineCitation') article_info = _get_article_info(medline_citation, pm_article.find('PubmedData')) journal_info = _get_journal_info(medline_citation, get_issns_from_nlm) context_info = _get_annotations(medline_citation) # Build the result result = {} result.update(article_info) result.update(journal_info) result.update(context_info) # Get the abstracts if requested if get_abstracts: abstract = _abstract_from_article_element( medline_citation.find('Article'), prepend_title=prepend_title ) result['abstract'] = abstract # Add to dict results[article_info['pmid']] = result return results
Get metadata for an XML tree containing PubmedArticle elements. Documentation on the XML structure can be found at: - https://www.nlm.nih.gov/bsd/licensee/elements_descriptions.html - https://www.nlm.nih.gov/bsd/licensee/elements_alphabetical.html Parameters ---------- tree : xml.etree.ElementTree ElementTree containing one or more PubmedArticle elements. get_issns_from_nlm : boolean Look up the full list of ISSN number for the journal associated with the article, which helps to match articles to CrossRef search results. Defaults to False, since it slows down performance. get_abstracts : boolean Indicates whether to include the Pubmed abstract in the results. prepend_title : boolean If get_abstracts is True, specifies whether the article title should be prepended to the abstract text. mesh_annotations : boolean If True, extract mesh annotations from the pubmed entries and include in the returned data. If false, don't. Returns ------- dict of dicts Dictionary indexed by PMID. Each value is a dict containing the following fields: 'doi', 'title', 'authors', 'journal_title', 'journal_abbrev', 'journal_nlm_id', 'issn_list', 'page'.
Below is the the instruction that describes the task: ### Input: Get metadata for an XML tree containing PubmedArticle elements. Documentation on the XML structure can be found at: - https://www.nlm.nih.gov/bsd/licensee/elements_descriptions.html - https://www.nlm.nih.gov/bsd/licensee/elements_alphabetical.html Parameters ---------- tree : xml.etree.ElementTree ElementTree containing one or more PubmedArticle elements. get_issns_from_nlm : boolean Look up the full list of ISSN number for the journal associated with the article, which helps to match articles to CrossRef search results. Defaults to False, since it slows down performance. get_abstracts : boolean Indicates whether to include the Pubmed abstract in the results. prepend_title : boolean If get_abstracts is True, specifies whether the article title should be prepended to the abstract text. mesh_annotations : boolean If True, extract mesh annotations from the pubmed entries and include in the returned data. If false, don't. Returns ------- dict of dicts Dictionary indexed by PMID. Each value is a dict containing the following fields: 'doi', 'title', 'authors', 'journal_title', 'journal_abbrev', 'journal_nlm_id', 'issn_list', 'page'. ### Response: def get_metadata_from_xml_tree(tree, get_issns_from_nlm=False, get_abstracts=False, prepend_title=False, mesh_annotations=False): """Get metadata for an XML tree containing PubmedArticle elements. Documentation on the XML structure can be found at: - https://www.nlm.nih.gov/bsd/licensee/elements_descriptions.html - https://www.nlm.nih.gov/bsd/licensee/elements_alphabetical.html Parameters ---------- tree : xml.etree.ElementTree ElementTree containing one or more PubmedArticle elements. get_issns_from_nlm : boolean Look up the full list of ISSN number for the journal associated with the article, which helps to match articles to CrossRef search results. Defaults to False, since it slows down performance. get_abstracts : boolean Indicates whether to include the Pubmed abstract in the results. prepend_title : boolean If get_abstracts is True, specifies whether the article title should be prepended to the abstract text. mesh_annotations : boolean If True, extract mesh annotations from the pubmed entries and include in the returned data. If false, don't. Returns ------- dict of dicts Dictionary indexed by PMID. Each value is a dict containing the following fields: 'doi', 'title', 'authors', 'journal_title', 'journal_abbrev', 'journal_nlm_id', 'issn_list', 'page'. """ # Iterate over the articles and build the results dict results = {} pm_articles = tree.findall('./PubmedArticle') for art_ix, pm_article in enumerate(pm_articles): medline_citation = pm_article.find('./MedlineCitation') article_info = _get_article_info(medline_citation, pm_article.find('PubmedData')) journal_info = _get_journal_info(medline_citation, get_issns_from_nlm) context_info = _get_annotations(medline_citation) # Build the result result = {} result.update(article_info) result.update(journal_info) result.update(context_info) # Get the abstracts if requested if get_abstracts: abstract = _abstract_from_article_element( medline_citation.find('Article'), prepend_title=prepend_title ) result['abstract'] = abstract # Add to dict results[article_info['pmid']] = result return results
def inputText(message="", title="Lackey Input", lines=9, width=20, text=""): """ Creates a textarea dialog with the specified message and default text. Returns the entered value. """ root = tk.Tk() input_text = tk.StringVar() input_text.set(text) PopupTextarea(root, message, title, lines, width, input_text) root.focus_force() root.mainloop() return str(input_text.get())
Creates a textarea dialog with the specified message and default text. Returns the entered value.
Below is the the instruction that describes the task: ### Input: Creates a textarea dialog with the specified message and default text. Returns the entered value. ### Response: def inputText(message="", title="Lackey Input", lines=9, width=20, text=""): """ Creates a textarea dialog with the specified message and default text. Returns the entered value. """ root = tk.Tk() input_text = tk.StringVar() input_text.set(text) PopupTextarea(root, message, title, lines, width, input_text) root.focus_force() root.mainloop() return str(input_text.get())
def noun_chunks(obj): """ Detect base noun phrases from a dependency parse. Works on both Doc and Span. """ # this iterator extracts spans headed by NOUNs starting from the left-most # syntactic dependent until the NOUN itself for close apposition and # measurement construction, the span is sometimes extended to the right of # the NOUN. Example: "eine Tasse Tee" (a cup (of) tea) returns "eine Tasse Tee" # and not just "eine Tasse", same for "das Thema Familie". labels = [ "sb", "oa", "da", "nk", "mo", "ag", "ROOT", "root", "cj", "pd", "og", "app", ] doc = obj.doc # Ensure works on both Doc and Span. np_label = doc.vocab.strings.add("NP") np_deps = set(doc.vocab.strings.add(label) for label in labels) close_app = doc.vocab.strings.add("nk") rbracket = 0 for i, word in enumerate(obj): if i < rbracket: continue if word.pos in (NOUN, PROPN, PRON) and word.dep in np_deps: rbracket = word.i + 1 # try to extend the span to the right # to capture close apposition/measurement constructions for rdep in doc[word.i].rights: if rdep.pos in (NOUN, PROPN) and rdep.dep == close_app: rbracket = rdep.i + 1 yield word.left_edge.i, rbracket, np_label
Detect base noun phrases from a dependency parse. Works on both Doc and Span.
Below is the the instruction that describes the task: ### Input: Detect base noun phrases from a dependency parse. Works on both Doc and Span. ### Response: def noun_chunks(obj): """ Detect base noun phrases from a dependency parse. Works on both Doc and Span. """ # this iterator extracts spans headed by NOUNs starting from the left-most # syntactic dependent until the NOUN itself for close apposition and # measurement construction, the span is sometimes extended to the right of # the NOUN. Example: "eine Tasse Tee" (a cup (of) tea) returns "eine Tasse Tee" # and not just "eine Tasse", same for "das Thema Familie". labels = [ "sb", "oa", "da", "nk", "mo", "ag", "ROOT", "root", "cj", "pd", "og", "app", ] doc = obj.doc # Ensure works on both Doc and Span. np_label = doc.vocab.strings.add("NP") np_deps = set(doc.vocab.strings.add(label) for label in labels) close_app = doc.vocab.strings.add("nk") rbracket = 0 for i, word in enumerate(obj): if i < rbracket: continue if word.pos in (NOUN, PROPN, PRON) and word.dep in np_deps: rbracket = word.i + 1 # try to extend the span to the right # to capture close apposition/measurement constructions for rdep in doc[word.i].rights: if rdep.pos in (NOUN, PROPN) and rdep.dep == close_app: rbracket = rdep.i + 1 yield word.left_edge.i, rbracket, np_label
def write(self, items): """Write items into ElasticSearch. :param items: Pandas DataFrame """ if self._read_only: raise IOError("Cannot write, Connector created as Read Only") if len(items) == 0: logger.info(self.__log_prefix + " Nothing to write") return # Uploading info to the new ES rows = items.to_dict("index") docs = [] for row_index in rows.keys(): row = rows[row_index] item_id = row[self.AUTHOR_ORG] + '_' + row[self.PROJECT] + '_' \ + row[self.TIMEFRAME] + '_' + row[self.AUTHOR_UUID] item_id = item_id.replace(' ', '').lower() doc = { "_index": self._es_index, "_type": "item", "_id": item_id, "_source": row } docs.append(doc) # TODO uncomment following lines for incremental version # # Delete old data if exists to ensure refreshing in case of deleted commits # timeframe = docs[0]['_source']['timeframe'] # org = docs[0]['_source']['author_org_name'] # project = docs[0]['_source']['project'] # s = Search(using=self._es_conn, index=self._es_index) # s = s.filter('term', project=project) # s = s.filter('term', author_org_name=org) # s = s.filter('term', timeframe=timeframe) # response = s.execute() # # if response.hits.total > 0: # response = s.delete() # logger.info("[Onion] Deleted " + str(response.deleted) + " items for refreshing: " + timeframe + " " # + org + " " + project) # TODO exception and error handling helpers.bulk(self._es_conn, docs) logger.info(self.__log_prefix + " Written: " + str(len(docs)))
Write items into ElasticSearch. :param items: Pandas DataFrame
Below is the the instruction that describes the task: ### Input: Write items into ElasticSearch. :param items: Pandas DataFrame ### Response: def write(self, items): """Write items into ElasticSearch. :param items: Pandas DataFrame """ if self._read_only: raise IOError("Cannot write, Connector created as Read Only") if len(items) == 0: logger.info(self.__log_prefix + " Nothing to write") return # Uploading info to the new ES rows = items.to_dict("index") docs = [] for row_index in rows.keys(): row = rows[row_index] item_id = row[self.AUTHOR_ORG] + '_' + row[self.PROJECT] + '_' \ + row[self.TIMEFRAME] + '_' + row[self.AUTHOR_UUID] item_id = item_id.replace(' ', '').lower() doc = { "_index": self._es_index, "_type": "item", "_id": item_id, "_source": row } docs.append(doc) # TODO uncomment following lines for incremental version # # Delete old data if exists to ensure refreshing in case of deleted commits # timeframe = docs[0]['_source']['timeframe'] # org = docs[0]['_source']['author_org_name'] # project = docs[0]['_source']['project'] # s = Search(using=self._es_conn, index=self._es_index) # s = s.filter('term', project=project) # s = s.filter('term', author_org_name=org) # s = s.filter('term', timeframe=timeframe) # response = s.execute() # # if response.hits.total > 0: # response = s.delete() # logger.info("[Onion] Deleted " + str(response.deleted) + " items for refreshing: " + timeframe + " " # + org + " " + project) # TODO exception and error handling helpers.bulk(self._es_conn, docs) logger.info(self.__log_prefix + " Written: " + str(len(docs)))
def get_upregulated_genes_network(self) -> Graph: """Get the graph of up-regulated genes. :return Graph: Graph of up-regulated genes. """ logger.info("In get_upregulated_genes_network()") deg_graph = self.graph.copy() # deep copy graph not_diff_expr = self.graph.vs(up_regulated_eq=False) # delete genes which are not differentially expressed or have no connections to others deg_graph.delete_vertices(not_diff_expr.indices) deg_graph.delete_vertices(deg_graph.vs.select(_degree_eq=0)) return deg_graph
Get the graph of up-regulated genes. :return Graph: Graph of up-regulated genes.
Below is the the instruction that describes the task: ### Input: Get the graph of up-regulated genes. :return Graph: Graph of up-regulated genes. ### Response: def get_upregulated_genes_network(self) -> Graph: """Get the graph of up-regulated genes. :return Graph: Graph of up-regulated genes. """ logger.info("In get_upregulated_genes_network()") deg_graph = self.graph.copy() # deep copy graph not_diff_expr = self.graph.vs(up_regulated_eq=False) # delete genes which are not differentially expressed or have no connections to others deg_graph.delete_vertices(not_diff_expr.indices) deg_graph.delete_vertices(deg_graph.vs.select(_degree_eq=0)) return deg_graph
def _validate_compute_resources(self, cr): """ Checks contents of sub dictionary for managed clusters :param cr: computeResources :type cr: dict """ for param in ('instanceRole', 'maxvCpus', 'minvCpus', 'instanceTypes', 'securityGroupIds', 'subnets', 'type'): if param not in cr: raise InvalidParameterValueException('computeResources must contain {0}'.format(param)) if self.iam_backend.get_role_by_arn(cr['instanceRole']) is None: raise InvalidParameterValueException('could not find instanceRole {0}'.format(cr['instanceRole'])) if cr['maxvCpus'] < 0: raise InvalidParameterValueException('maxVCpus must be positive') if cr['minvCpus'] < 0: raise InvalidParameterValueException('minVCpus must be positive') if cr['maxvCpus'] < cr['minvCpus']: raise InvalidParameterValueException('maxVCpus must be greater than minvCpus') if len(cr['instanceTypes']) == 0: raise InvalidParameterValueException('At least 1 instance type must be provided') for instance_type in cr['instanceTypes']: if instance_type == 'optimal': pass # Optimal should pick from latest of current gen elif instance_type not in EC2_INSTANCE_TYPES: raise InvalidParameterValueException('Instance type {0} does not exist'.format(instance_type)) for sec_id in cr['securityGroupIds']: if self.ec2_backend.get_security_group_from_id(sec_id) is None: raise InvalidParameterValueException('security group {0} does not exist'.format(sec_id)) if len(cr['securityGroupIds']) == 0: raise InvalidParameterValueException('At least 1 security group must be provided') for subnet_id in cr['subnets']: try: self.ec2_backend.get_subnet(subnet_id) except InvalidSubnetIdError: raise InvalidParameterValueException('subnet {0} does not exist'.format(subnet_id)) if len(cr['subnets']) == 0: raise InvalidParameterValueException('At least 1 subnet must be provided') if cr['type'] not in ('EC2', 'SPOT'): raise InvalidParameterValueException('computeResources.type must be either EC2 | SPOT') if cr['type'] == 'SPOT': raise InternalFailure('SPOT NOT SUPPORTED YET')
Checks contents of sub dictionary for managed clusters :param cr: computeResources :type cr: dict
Below is the the instruction that describes the task: ### Input: Checks contents of sub dictionary for managed clusters :param cr: computeResources :type cr: dict ### Response: def _validate_compute_resources(self, cr): """ Checks contents of sub dictionary for managed clusters :param cr: computeResources :type cr: dict """ for param in ('instanceRole', 'maxvCpus', 'minvCpus', 'instanceTypes', 'securityGroupIds', 'subnets', 'type'): if param not in cr: raise InvalidParameterValueException('computeResources must contain {0}'.format(param)) if self.iam_backend.get_role_by_arn(cr['instanceRole']) is None: raise InvalidParameterValueException('could not find instanceRole {0}'.format(cr['instanceRole'])) if cr['maxvCpus'] < 0: raise InvalidParameterValueException('maxVCpus must be positive') if cr['minvCpus'] < 0: raise InvalidParameterValueException('minVCpus must be positive') if cr['maxvCpus'] < cr['minvCpus']: raise InvalidParameterValueException('maxVCpus must be greater than minvCpus') if len(cr['instanceTypes']) == 0: raise InvalidParameterValueException('At least 1 instance type must be provided') for instance_type in cr['instanceTypes']: if instance_type == 'optimal': pass # Optimal should pick from latest of current gen elif instance_type not in EC2_INSTANCE_TYPES: raise InvalidParameterValueException('Instance type {0} does not exist'.format(instance_type)) for sec_id in cr['securityGroupIds']: if self.ec2_backend.get_security_group_from_id(sec_id) is None: raise InvalidParameterValueException('security group {0} does not exist'.format(sec_id)) if len(cr['securityGroupIds']) == 0: raise InvalidParameterValueException('At least 1 security group must be provided') for subnet_id in cr['subnets']: try: self.ec2_backend.get_subnet(subnet_id) except InvalidSubnetIdError: raise InvalidParameterValueException('subnet {0} does not exist'.format(subnet_id)) if len(cr['subnets']) == 0: raise InvalidParameterValueException('At least 1 subnet must be provided') if cr['type'] not in ('EC2', 'SPOT'): raise InvalidParameterValueException('computeResources.type must be either EC2 | SPOT') if cr['type'] == 'SPOT': raise InternalFailure('SPOT NOT SUPPORTED YET')
def find_all(self, pattern): """ Searches for all occurrences of a pattern in the current memory segment, returns all occurrences as a list """ pos = [] last_found = -1 while True: last_found = self.current_segment.data.find(pattern, last_found + 1) if last_found == -1: break pos.append(last_found + self.current_segment.start_address) return pos
Searches for all occurrences of a pattern in the current memory segment, returns all occurrences as a list
Below is the the instruction that describes the task: ### Input: Searches for all occurrences of a pattern in the current memory segment, returns all occurrences as a list ### Response: def find_all(self, pattern): """ Searches for all occurrences of a pattern in the current memory segment, returns all occurrences as a list """ pos = [] last_found = -1 while True: last_found = self.current_segment.data.find(pattern, last_found + 1) if last_found == -1: break pos.append(last_found + self.current_segment.start_address) return pos
def list_tables(self, like=None, database=None): """ List tables in the current (or indicated) database. Like the SHOW TABLES command in the clickhouse-shell. Parameters ---------- like : string, default None e.g. 'foo*' to match all tables starting with 'foo' database : string, default None If not passed, uses the current/default database Returns ------- tables : list of strings """ statement = 'SHOW TABLES' if database: statement += " FROM `{0}`".format(database) if like: m = fully_qualified_re.match(like) if m: database, quoted, unquoted = m.groups() like = quoted or unquoted return self.list_tables(like=like, database=database) statement += " LIKE '{0}'".format(like) data, _, _ = self.raw_sql(statement, results=True) return data[0]
List tables in the current (or indicated) database. Like the SHOW TABLES command in the clickhouse-shell. Parameters ---------- like : string, default None e.g. 'foo*' to match all tables starting with 'foo' database : string, default None If not passed, uses the current/default database Returns ------- tables : list of strings
Below is the the instruction that describes the task: ### Input: List tables in the current (or indicated) database. Like the SHOW TABLES command in the clickhouse-shell. Parameters ---------- like : string, default None e.g. 'foo*' to match all tables starting with 'foo' database : string, default None If not passed, uses the current/default database Returns ------- tables : list of strings ### Response: def list_tables(self, like=None, database=None): """ List tables in the current (or indicated) database. Like the SHOW TABLES command in the clickhouse-shell. Parameters ---------- like : string, default None e.g. 'foo*' to match all tables starting with 'foo' database : string, default None If not passed, uses the current/default database Returns ------- tables : list of strings """ statement = 'SHOW TABLES' if database: statement += " FROM `{0}`".format(database) if like: m = fully_qualified_re.match(like) if m: database, quoted, unquoted = m.groups() like = quoted or unquoted return self.list_tables(like=like, database=database) statement += " LIKE '{0}'".format(like) data, _, _ = self.raw_sql(statement, results=True) return data[0]
def _ValueMessageToJsonObject(self, message): """Converts Value message according to Proto3 JSON Specification.""" which = message.WhichOneof('kind') # If the Value message is not set treat as null_value when serialize # to JSON. The parse back result will be different from original message. if which is None or which == 'null_value': return None if which == 'list_value': return self._ListValueMessageToJsonObject(message.list_value) if which == 'struct_value': value = message.struct_value else: value = getattr(message, which) oneof_descriptor = message.DESCRIPTOR.fields_by_name[which] return self._FieldToJsonObject(oneof_descriptor, value)
Converts Value message according to Proto3 JSON Specification.
Below is the the instruction that describes the task: ### Input: Converts Value message according to Proto3 JSON Specification. ### Response: def _ValueMessageToJsonObject(self, message): """Converts Value message according to Proto3 JSON Specification.""" which = message.WhichOneof('kind') # If the Value message is not set treat as null_value when serialize # to JSON. The parse back result will be different from original message. if which is None or which == 'null_value': return None if which == 'list_value': return self._ListValueMessageToJsonObject(message.list_value) if which == 'struct_value': value = message.struct_value else: value = getattr(message, which) oneof_descriptor = message.DESCRIPTOR.fields_by_name[which] return self._FieldToJsonObject(oneof_descriptor, value)
def _classname(self): """Return the fully qualified class name.""" if self.__class__.__module__ in (None,): return self.__class__.__name__ else: return "%s.%s" % (self.__class__.__module__, self.__class__.__name__)
Return the fully qualified class name.
Below is the the instruction that describes the task: ### Input: Return the fully qualified class name. ### Response: def _classname(self): """Return the fully qualified class name.""" if self.__class__.__module__ in (None,): return self.__class__.__name__ else: return "%s.%s" % (self.__class__.__module__, self.__class__.__name__)
def _render(self, request, formencode=False, realm=None): """Render a signed request according to signature type Returns a 3-tuple containing the request URI, headers, and body. If the formencode argument is True and the body contains parameters, it is escaped and returned as a valid formencoded string. """ # TODO what if there are body params on a header-type auth? # TODO what if there are query params on a body-type auth? uri, headers, body = request.uri, request.headers, request.body # TODO: right now these prepare_* methods are very narrow in scope--they # only affect their little thing. In some cases (for example, with # header auth) it might be advantageous to allow these methods to touch # other parts of the request, like the headers—so the prepare_headers # method could also set the Content-Type header to x-www-form-urlencoded # like the spec requires. This would be a fundamental change though, and # I'm not sure how I feel about it. if self.signature_type == SIGNATURE_TYPE_AUTH_HEADER: headers = parameters.prepare_headers( request.oauth_params, request.headers, realm=realm) elif self.signature_type == SIGNATURE_TYPE_BODY and request.decoded_body is not None: body = parameters.prepare_form_encoded_body( request.oauth_params, request.decoded_body) if formencode: body = urlencode(body) headers['Content-Type'] = 'application/x-www-form-urlencoded' elif self.signature_type == SIGNATURE_TYPE_QUERY: uri = parameters.prepare_request_uri_query( request.oauth_params, request.uri) else: raise ValueError('Unknown signature type specified.') return uri, headers, body
Render a signed request according to signature type Returns a 3-tuple containing the request URI, headers, and body. If the formencode argument is True and the body contains parameters, it is escaped and returned as a valid formencoded string.
Below is the the instruction that describes the task: ### Input: Render a signed request according to signature type Returns a 3-tuple containing the request URI, headers, and body. If the formencode argument is True and the body contains parameters, it is escaped and returned as a valid formencoded string. ### Response: def _render(self, request, formencode=False, realm=None): """Render a signed request according to signature type Returns a 3-tuple containing the request URI, headers, and body. If the formencode argument is True and the body contains parameters, it is escaped and returned as a valid formencoded string. """ # TODO what if there are body params on a header-type auth? # TODO what if there are query params on a body-type auth? uri, headers, body = request.uri, request.headers, request.body # TODO: right now these prepare_* methods are very narrow in scope--they # only affect their little thing. In some cases (for example, with # header auth) it might be advantageous to allow these methods to touch # other parts of the request, like the headers—so the prepare_headers # method could also set the Content-Type header to x-www-form-urlencoded # like the spec requires. This would be a fundamental change though, and # I'm not sure how I feel about it. if self.signature_type == SIGNATURE_TYPE_AUTH_HEADER: headers = parameters.prepare_headers( request.oauth_params, request.headers, realm=realm) elif self.signature_type == SIGNATURE_TYPE_BODY and request.decoded_body is not None: body = parameters.prepare_form_encoded_body( request.oauth_params, request.decoded_body) if formencode: body = urlencode(body) headers['Content-Type'] = 'application/x-www-form-urlencoded' elif self.signature_type == SIGNATURE_TYPE_QUERY: uri = parameters.prepare_request_uri_query( request.oauth_params, request.uri) else: raise ValueError('Unknown signature type specified.') return uri, headers, body
def get_fetcher(data_format, data_class): """Return the :meth:`~EventTable.fetch` function for the given format Parameters ---------- data_format : `str` name of the format data_class : `type` the class that the fetcher returns Raises ------ astropy.io.registry.IORegistryError if not registration is found matching ``data_format`` """ # this is a copy of astropy.io.regsitry.get_reader fetchers = [(fmt, cls) for fmt, cls in _FETCHERS if fmt == data_format] for fetch_fmt, fetch_cls in fetchers: if io_registry._is_best_match(data_class, fetch_cls, fetchers): return _FETCHERS[(fetch_fmt, fetch_cls)][0] else: formats = [fmt for fmt, cls in _FETCHERS if io_registry._is_best_match(fmt, cls, fetchers)] formatstr = '\n'.join(sorted(formats)) raise IORegistryError( "No fetcher definer for format '{0}' and class '{1}'.\n" "The available formats are:\n{2}".format( data_format, data_class.__name__, formatstr))
Return the :meth:`~EventTable.fetch` function for the given format Parameters ---------- data_format : `str` name of the format data_class : `type` the class that the fetcher returns Raises ------ astropy.io.registry.IORegistryError if not registration is found matching ``data_format``
Below is the the instruction that describes the task: ### Input: Return the :meth:`~EventTable.fetch` function for the given format Parameters ---------- data_format : `str` name of the format data_class : `type` the class that the fetcher returns Raises ------ astropy.io.registry.IORegistryError if not registration is found matching ``data_format`` ### Response: def get_fetcher(data_format, data_class): """Return the :meth:`~EventTable.fetch` function for the given format Parameters ---------- data_format : `str` name of the format data_class : `type` the class that the fetcher returns Raises ------ astropy.io.registry.IORegistryError if not registration is found matching ``data_format`` """ # this is a copy of astropy.io.regsitry.get_reader fetchers = [(fmt, cls) for fmt, cls in _FETCHERS if fmt == data_format] for fetch_fmt, fetch_cls in fetchers: if io_registry._is_best_match(data_class, fetch_cls, fetchers): return _FETCHERS[(fetch_fmt, fetch_cls)][0] else: formats = [fmt for fmt, cls in _FETCHERS if io_registry._is_best_match(fmt, cls, fetchers)] formatstr = '\n'.join(sorted(formats)) raise IORegistryError( "No fetcher definer for format '{0}' and class '{1}'.\n" "The available formats are:\n{2}".format( data_format, data_class.__name__, formatstr))
def get_guest_connection_status(self, userid): '''Get guest vm connection status.''' rd = ' '.join(('getvm', userid, 'isreachable')) results = self._request(rd) if results['rs'] == 1: return True else: return False
Get guest vm connection status.
Below is the the instruction that describes the task: ### Input: Get guest vm connection status. ### Response: def get_guest_connection_status(self, userid): '''Get guest vm connection status.''' rd = ' '.join(('getvm', userid, 'isreachable')) results = self._request(rd) if results['rs'] == 1: return True else: return False
def create_subscriber(self): '''Create a subscriber instance using specified addresses and message types. ''' if self.subscriber is None: if self.topics: self.subscriber = NSSubscriber(self.services, self.topics, addr_listener=True, addresses=self.addresses, nameserver=self.nameserver) self.recv = self.subscriber.start().recv
Create a subscriber instance using specified addresses and message types.
Below is the the instruction that describes the task: ### Input: Create a subscriber instance using specified addresses and message types. ### Response: def create_subscriber(self): '''Create a subscriber instance using specified addresses and message types. ''' if self.subscriber is None: if self.topics: self.subscriber = NSSubscriber(self.services, self.topics, addr_listener=True, addresses=self.addresses, nameserver=self.nameserver) self.recv = self.subscriber.start().recv
def count_alleles(self, max_allele=None, subpop=None): """Count the number of calls of each allele per variant. Parameters ---------- max_allele : int, optional The highest allele index to count. Alleles greater than this index will be ignored. subpop : array_like, int, optional Indices of haplotypes to include. Returns ------- ac : AlleleCountsArray, int, shape (n_variants, n_alleles) Examples -------- >>> import allel >>> h = allel.HaplotypeArray([[0, 0, 0, 1], ... [0, 1, 1, 1], ... [0, 2, -1, -1]], dtype='i1') >>> ac = h.count_alleles() >>> ac <AlleleCountsArray shape=(3, 3) dtype=int32> 3 1 0 1 3 0 1 0 1 """ # check inputs subpop = _normalize_subpop_arg(subpop, self.shape[1]) # determine alleles to count if max_allele is None: max_allele = self.max() # use optimisations values = memoryview_safe(self.values) if subpop is None: ac = haplotype_array_count_alleles(values, max_allele) else: ac = haplotype_array_count_alleles_subpop(values, max_allele, subpop) return AlleleCountsArray(ac, copy=False)
Count the number of calls of each allele per variant. Parameters ---------- max_allele : int, optional The highest allele index to count. Alleles greater than this index will be ignored. subpop : array_like, int, optional Indices of haplotypes to include. Returns ------- ac : AlleleCountsArray, int, shape (n_variants, n_alleles) Examples -------- >>> import allel >>> h = allel.HaplotypeArray([[0, 0, 0, 1], ... [0, 1, 1, 1], ... [0, 2, -1, -1]], dtype='i1') >>> ac = h.count_alleles() >>> ac <AlleleCountsArray shape=(3, 3) dtype=int32> 3 1 0 1 3 0 1 0 1
Below is the the instruction that describes the task: ### Input: Count the number of calls of each allele per variant. Parameters ---------- max_allele : int, optional The highest allele index to count. Alleles greater than this index will be ignored. subpop : array_like, int, optional Indices of haplotypes to include. Returns ------- ac : AlleleCountsArray, int, shape (n_variants, n_alleles) Examples -------- >>> import allel >>> h = allel.HaplotypeArray([[0, 0, 0, 1], ... [0, 1, 1, 1], ... [0, 2, -1, -1]], dtype='i1') >>> ac = h.count_alleles() >>> ac <AlleleCountsArray shape=(3, 3) dtype=int32> 3 1 0 1 3 0 1 0 1 ### Response: def count_alleles(self, max_allele=None, subpop=None): """Count the number of calls of each allele per variant. Parameters ---------- max_allele : int, optional The highest allele index to count. Alleles greater than this index will be ignored. subpop : array_like, int, optional Indices of haplotypes to include. Returns ------- ac : AlleleCountsArray, int, shape (n_variants, n_alleles) Examples -------- >>> import allel >>> h = allel.HaplotypeArray([[0, 0, 0, 1], ... [0, 1, 1, 1], ... [0, 2, -1, -1]], dtype='i1') >>> ac = h.count_alleles() >>> ac <AlleleCountsArray shape=(3, 3) dtype=int32> 3 1 0 1 3 0 1 0 1 """ # check inputs subpop = _normalize_subpop_arg(subpop, self.shape[1]) # determine alleles to count if max_allele is None: max_allele = self.max() # use optimisations values = memoryview_safe(self.values) if subpop is None: ac = haplotype_array_count_alleles(values, max_allele) else: ac = haplotype_array_count_alleles_subpop(values, max_allele, subpop) return AlleleCountsArray(ac, copy=False)
def read_hashes(self): """ Read Hash values from the stream. Returns: list: a list of hash values. Each value is of the bytearray type. """ var_len = self.read_var_int() items = [] for _ in range(0, var_len): ba = bytearray(self.read_bytes(32)) ba.reverse() items.append(ba.hex()) return items
Read Hash values from the stream. Returns: list: a list of hash values. Each value is of the bytearray type.
Below is the the instruction that describes the task: ### Input: Read Hash values from the stream. Returns: list: a list of hash values. Each value is of the bytearray type. ### Response: def read_hashes(self): """ Read Hash values from the stream. Returns: list: a list of hash values. Each value is of the bytearray type. """ var_len = self.read_var_int() items = [] for _ in range(0, var_len): ba = bytearray(self.read_bytes(32)) ba.reverse() items.append(ba.hex()) return items
def _validate_granttype(self, path, obj, _): """ make sure either implicit or authorization_code is defined """ errs = [] if not obj.implicit and not obj.authorization_code: errs.append('Either implicit or authorization_code should be defined.') return path, obj.__class__.__name__, errs
make sure either implicit or authorization_code is defined
Below is the the instruction that describes the task: ### Input: make sure either implicit or authorization_code is defined ### Response: def _validate_granttype(self, path, obj, _): """ make sure either implicit or authorization_code is defined """ errs = [] if not obj.implicit and not obj.authorization_code: errs.append('Either implicit or authorization_code should be defined.') return path, obj.__class__.__name__, errs