code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def _validate_contains(self, expected_values, field, value): """ {'empty': False } """ if not isinstance(value, Iterable): return if not isinstance(expected_values, Iterable) or isinstance( expected_values, _str_type ): expected_values = set((expected_values,)) else: expected_values = set(expected_values) missing_values = expected_values - set(value) if missing_values: self._error(field, errors.MISSING_MEMBERS, missing_values)
{'empty': False }
def add_to_group(server_context, user_ids, group_id, container_path=None): """ Add user to group :param server_context: A LabKey server context. See utils.create_server_context. :param user_ids: users to add :param group_id: to add to :param container_path: :return: """ return __make_security_group_api_request(server_context, 'addGroupMember.api', user_ids, group_id, container_path)
Add user to group :param server_context: A LabKey server context. See utils.create_server_context. :param user_ids: users to add :param group_id: to add to :param container_path: :return:
def unsubscribe_from_data( self, subscriber: Callable[[bytes], bool], ) -> None: """ Not thread-safe. """ self._data_subscribers.remove(subscriber)
Not thread-safe.
def import_string(import_name, silent=False): """Imports an object based on a string. This is useful if you want to use import paths as endpoints or something similar. An import path can be specified either in dotted notation (``xml.sax.saxutils.escape``) or with a colon as object delimiter (``xml.sax.saxutils:escape``). If `silent` is True the return value will be `None` if the import fails. :param import_name: the dotted name for the object to import. :param silent: if set to `True` import errors are ignored and `None` is returned instead. :return: imported object """ # force the import name to automatically convert to strings # __import__ is not able to handle unicode strings in the fromlist # if the module is a package import_name = str(import_name).replace(':', '.') try: try: __import__(import_name) except ImportError: if '.' not in import_name: raise else: return sys.modules[import_name] module_name, obj_name = import_name.rsplit('.', 1) try: module = __import__(module_name, None, None, [obj_name]) except ImportError: # support importing modules not yet set up by the parent module # (or package for that matter) module = import_string(module_name) try: return getattr(module, obj_name) except AttributeError as e: raise ImportError(e) except ImportError as e: if not silent: raise e
Imports an object based on a string. This is useful if you want to use import paths as endpoints or something similar. An import path can be specified either in dotted notation (``xml.sax.saxutils.escape``) or with a colon as object delimiter (``xml.sax.saxutils:escape``). If `silent` is True the return value will be `None` if the import fails. :param import_name: the dotted name for the object to import. :param silent: if set to `True` import errors are ignored and `None` is returned instead. :return: imported object
def create_new_file(help_string=NO_HELP, default=NO_DEFAULT, suffixes=None): # type: (str, Union[str, NO_DEFAULT_TYPE], Union[List[str], None]) -> str """ Create a new file parameter :param help_string: :param default: :param suffixes: :return: """ # noinspection PyTypeChecker return ParamFilename( help_string=help_string, default=default, type_name="new_file", suffixes=suffixes, )
Create a new file parameter :param help_string: :param default: :param suffixes: :return:
def setup_auditlog_catalog(portal): """Setup auditlog catalog """ logger.info("*** Setup Audit Log Catalog ***") catalog_id = auditlog_catalog.CATALOG_AUDITLOG catalog = api.get_tool(catalog_id) for name, meta_type in auditlog_catalog._indexes.iteritems(): indexes = catalog.indexes() if name in indexes: logger.info("*** Index '%s' already in Catalog [SKIP]" % name) continue logger.info("*** Adding Index '%s' for field '%s' to catalog ..." % (meta_type, name)) catalog.addIndex(name, meta_type) # Setup TextIndexNG3 for listings # XXX is there another way to do this? if meta_type == "TextIndexNG3": index = catalog._catalog.getIndex(name) index.index.default_encoding = "utf-8" index.index.query_parser = "txng.parsers.en" index.index.autoexpand = "always" index.index.autoexpand_limit = 3 logger.info("*** Added Index '%s' for field '%s' to catalog [DONE]" % (meta_type, name)) # Attach the catalog to all known portal types at = api.get_tool("archetype_tool") pt = api.get_tool("portal_types") for portal_type in pt.listContentTypes(): catalogs = at.getCatalogsByType(portal_type) if catalog not in catalogs: new_catalogs = map(lambda c: c.getId(), catalogs) + [catalog_id] at.setCatalogsByType(portal_type, new_catalogs) logger.info("*** Adding catalog '{}' for '{}'".format( catalog_id, portal_type))
Setup auditlog catalog
def query_saved_guest_screen_info(self, screen_id): """Returns the guest dimensions from the saved state. in screen_id of type int Saved guest screen to query info from. out origin_x of type int The X position of the guest monitor top left corner. out origin_y of type int The Y position of the guest monitor top left corner. out width of type int Guest width at the time of the saved state was taken. out height of type int Guest height at the time of the saved state was taken. out enabled of type bool Whether the monitor is enabled in the guest. """ if not isinstance(screen_id, baseinteger): raise TypeError("screen_id can only be an instance of type baseinteger") (origin_x, origin_y, width, height, enabled) = self._call("querySavedGuestScreenInfo", in_p=[screen_id]) return (origin_x, origin_y, width, height, enabled)
Returns the guest dimensions from the saved state. in screen_id of type int Saved guest screen to query info from. out origin_x of type int The X position of the guest monitor top left corner. out origin_y of type int The Y position of the guest monitor top left corner. out width of type int Guest width at the time of the saved state was taken. out height of type int Guest height at the time of the saved state was taken. out enabled of type bool Whether the monitor is enabled in the guest.
def validate_wavelengths(wavelengths): """Check wavelengths for ``synphot`` compatibility. Wavelengths must satisfy these conditions: * valid unit type, if given * no zeroes * monotonic ascending or descending * no duplicate values Parameters ---------- wavelengths : array-like or `~astropy.units.quantity.Quantity` Wavelength values. Raises ------ synphot.exceptions.SynphotError Wavelengths unit type is invalid. synphot.exceptions.DuplicateWavelength Wavelength array contains duplicate entries. synphot.exceptions.UnsortedWavelength Wavelength array is not monotonic. synphot.exceptions.ZeroWavelength Negative or zero wavelength occurs in wavelength array. """ if isinstance(wavelengths, u.Quantity): units.validate_wave_unit(wavelengths.unit) wave = wavelengths.value else: wave = wavelengths if np.isscalar(wave): wave = [wave] wave = np.asarray(wave) # Check for zeroes if np.any(wave <= 0): raise exceptions.ZeroWavelength( 'Negative or zero wavelength occurs in wavelength array', rows=np.where(wave <= 0)[0]) # Check for monotonicity sorted_wave = np.sort(wave) if not np.alltrue(sorted_wave == wave): if np.alltrue(sorted_wave[::-1] == wave): pass # Monotonic descending is allowed else: raise exceptions.UnsortedWavelength( 'Wavelength array is not monotonic', rows=np.where(sorted_wave != wave)[0]) # Check for duplicate values if wave.size > 1: dw = sorted_wave[1:] - sorted_wave[:-1] if np.any(dw == 0): raise exceptions.DuplicateWavelength( 'Wavelength array contains duplicate entries', rows=np.where(dw == 0)[0])
Check wavelengths for ``synphot`` compatibility. Wavelengths must satisfy these conditions: * valid unit type, if given * no zeroes * monotonic ascending or descending * no duplicate values Parameters ---------- wavelengths : array-like or `~astropy.units.quantity.Quantity` Wavelength values. Raises ------ synphot.exceptions.SynphotError Wavelengths unit type is invalid. synphot.exceptions.DuplicateWavelength Wavelength array contains duplicate entries. synphot.exceptions.UnsortedWavelength Wavelength array is not monotonic. synphot.exceptions.ZeroWavelength Negative or zero wavelength occurs in wavelength array.
def friends(self, delegate, params={}, extra_args=None): """Get updates from friends. Calls the delgate once for each status object received.""" return self.__get('/statuses/friends_timeline.xml', delegate, params, txml.Statuses, extra_args=extra_args)
Get updates from friends. Calls the delgate once for each status object received.
def offset_data(data_section, offset, readable = False, wraparound = False): """ Offset the whole data section. see offset_byte_in_data for more information Returns: the entire data section + offset on each byte """ for pos in range(0, len(data_section)/2): data_section = offset_byte_in_data(data_section, offset, pos, readable, wraparound) return data_section
Offset the whole data section. see offset_byte_in_data for more information Returns: the entire data section + offset on each byte
def map(self, func): """ A lazy way to apply the given function to each element in the stream. Useful for type casting, like: >>> from audiolazy import count >>> count().take(5) [0, 1, 2, 3, 4] >>> my_stream = count().map(float) >>> my_stream.take(5) # A float counter [0.0, 1.0, 2.0, 3.0, 4.0] """ self._data = xmap(func, self._data) return self
A lazy way to apply the given function to each element in the stream. Useful for type casting, like: >>> from audiolazy import count >>> count().take(5) [0, 1, 2, 3, 4] >>> my_stream = count().map(float) >>> my_stream.take(5) # A float counter [0.0, 1.0, 2.0, 3.0, 4.0]
def class_variables(self): """ Returns all documented class variables in the class, sorted alphabetically as a list of `pydoc.Variable`. """ p = lambda o: isinstance(o, Variable) and self.module._docfilter(o) return filter(p, self.doc.values())
Returns all documented class variables in the class, sorted alphabetically as a list of `pydoc.Variable`.
def GetSources(self, event): """Determines the the short and long source for an event object. Args: event (EventObject): event. Returns: tuple(str, str): short and long source string. Raises: WrongFormatter: if the event object cannot be formatted by the formatter. """ if self.DATA_TYPE != event.data_type: raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format( event.data_type)) source_long = getattr(event, 'source_long', 'UNKNOWN') source_append = getattr(event, 'source_append', None) if source_append: source_long = '{0:s} {1:s}'.format(source_long, source_append) return self.SOURCE_SHORT, source_long
Determines the the short and long source for an event object. Args: event (EventObject): event. Returns: tuple(str, str): short and long source string. Raises: WrongFormatter: if the event object cannot be formatted by the formatter.
def usearch(query, db, type, out, threads = '6', evalue = '100', alignment = 'local', max_hits = 100, cluster = False): """ run usearch """ if 'usearch64' in os.environ: usearch_loc = os.environ['usearch64'] else: usearch_loc = 'usearch' if os.path.exists(out) is False: db = usearchdb(db, alignment, usearch_loc) # make the database file, if neceesary print('# ... running usearch with %s as query and %s as database' % (query, db), file=sys.stderr) if type == 'nucl': strand = '-strand both' else: strand = '' if alignment == 'local' and cluster is False: os.system('%s \ -ublast %s -db %s -blast6out %s \ -evalue %s -threads %s %s -maxhits %s >> log.txt' \ % (usearch_loc, query, db, out, evalue, threads, strand, max_hits)) elif alignment == 'global' and cluster is False: os.system('%s \ -usearch_global %s -db %s -blast6out %s \ -id 0.10 -threads %s %s >> log.txt' \ % (usearch_loc, query, db, out, threads, strand)) elif alignment == 'local' and cluster is True: qsub = 'qsub -V -N usearch' os.system('echo "%s -ublast `pwd`/%s -db %s -blast6out `pwd`/%s -evalue %s -threads %s %s -maxhits %s >> `pwd`/log.txt" | %s' \ % (usearch_loc, query, db, out, evalue, threads, strand, max_hits, qsub)) else: print('specify local or global alignment', file=sys.stderr) exit() else: print('# ... usearch output found for %s as query and %s as database' % (query, db), file=sys.stderr)
run usearch
def _reregister_types(self): """Registers existing types for a new connection""" for _type in self._register_types: psycopg2.extensions.register_type(psycopg2.extensions.new_type(*_type))
Registers existing types for a new connection
def unfreeze_extensions(self): """Remove a previously frozen list of extensions.""" output_path = os.path.join(_registry_folder(), 'frozen_extensions.json') if not os.path.isfile(output_path): raise ExternalError("There is no frozen extension list") os.remove(output_path) ComponentRegistry._frozen_extensions = None
Remove a previously frozen list of extensions.
async def run_asgi(self): """ Wrapper around the ASGI callable, handling exceptions and unexpected termination states. """ try: result = await self.app(self.scope, self.asgi_receive, self.asgi_send) except BaseException as exc: self.closed_event.set() msg = "Exception in ASGI application\n" self.logger.error(msg, exc_info=exc) if not self.handshake_started_event.is_set(): self.send_500_response() else: await self.handshake_completed_event.wait() self.transport.close() else: self.closed_event.set() if not self.handshake_started_event.is_set(): msg = "ASGI callable returned without sending handshake." self.logger.error(msg) self.send_500_response() self.transport.close() elif result is not None: msg = "ASGI callable should return None, but returned '%s'." self.logger.error(msg, result) await self.handshake_completed_event.wait() self.transport.close()
Wrapper around the ASGI callable, handling exceptions and unexpected termination states.
def read_config(args): """ Read configuration options from ~/.shakedown (if exists) :param args: a dict of arguments :type args: dict :return: a dict of arguments :rtype: dict """ configfile = os.path.expanduser('~/.shakedown') if os.path.isfile(configfile): with open(configfile, 'r') as f: config = toml.loads(f.read()) for key in config: param = key.replace('-', '_') if not param in args or args[param] in [False, None]: args[param] = config[key] return args
Read configuration options from ~/.shakedown (if exists) :param args: a dict of arguments :type args: dict :return: a dict of arguments :rtype: dict
def _register_client(self, client, region_name): """Uses functools.partial to wrap all methods on a client with the self._wrap_client method :param botocore.client.BaseClient client: the client to proxy :param str region_name: AWS Region ID (ex: us-east-1) """ for item in client.meta.method_to_api_mapping: method = getattr(client, item) wrapped_method = functools.partial(self._wrap_client, region_name, method) setattr(client, item, wrapped_method)
Uses functools.partial to wrap all methods on a client with the self._wrap_client method :param botocore.client.BaseClient client: the client to proxy :param str region_name: AWS Region ID (ex: us-east-1)
def _create_dmnd_database(self, unaligned_sequences_path, daa_output): ''' Build a diamond database using diamond makedb Parameters ---------- unaligned_sequences_path: str path to a FASTA file containing unaligned sequences daa_output: str Name of output database. ''' logging.debug("Building diamond database") cmd = "diamond makedb --in '%s' -d '%s'" % (unaligned_sequences_path, daa_output) extern.run(cmd)
Build a diamond database using diamond makedb Parameters ---------- unaligned_sequences_path: str path to a FASTA file containing unaligned sequences daa_output: str Name of output database.
def _ClientPathToString(client_path, prefix=""): """Returns a path-like String of client_path with optional prefix.""" return os.path.join(prefix, client_path.client_id, client_path.vfs_path)
Returns a path-like String of client_path with optional prefix.
def get_composition_smart_repository_session(self, repository_id, proxy): """Gets a composition smart repository session for the given repository. arg: repository_id (osid.id.Id): the Id of the repository arg proxy (osid.proxy.Proxy): a proxy return: (osid.repository.CompositionSmartRepositorySession) - a CompositionSmartRepositorySession raise: NotFound - repository_id not found raise: NullArgument - repository_id is null raise: OperationFailed - unable to complete request raise: Unimplemented - supports_composition_smart_repository() false compliance: optional - This method must be implemented if supports_composition_smart_repository() is true. """ if repository_id is None: raise NullArgument() if not self.supports_composition_smart_repository(): raise Unimplemented() try: from . import sessions except ImportError: raise # OperationFailed() proxy = self._convert_proxy(proxy) try: session = sessions.CompositionSmartRepositorySession(repository_id, proxy, runtime=self._runtime) except AttributeError: raise # OperationFailed() return session
Gets a composition smart repository session for the given repository. arg: repository_id (osid.id.Id): the Id of the repository arg proxy (osid.proxy.Proxy): a proxy return: (osid.repository.CompositionSmartRepositorySession) - a CompositionSmartRepositorySession raise: NotFound - repository_id not found raise: NullArgument - repository_id is null raise: OperationFailed - unable to complete request raise: Unimplemented - supports_composition_smart_repository() false compliance: optional - This method must be implemented if supports_composition_smart_repository() is true.
def op_token(self, display_name, opt): """Return a properly annotated token for our use. This token will be revoked at the end of the session. The token will have some decent amounts of metadata tho.""" args = { 'lease': opt.lease, 'display_name': display_name, 'meta': token_meta(opt) } try: token = self.create_token(**args) except (hvac.exceptions.InvalidRequest, hvac.exceptions.Forbidden) as vault_exception: if vault_exception.errors[0] == 'permission denied': emsg = "Permission denied creating operational token" raise aomi.exceptions.AomiCredentials(emsg) else: raise LOG.debug("Created operational token with lease of %s", opt.lease) return token['auth']['client_token']
Return a properly annotated token for our use. This token will be revoked at the end of the session. The token will have some decent amounts of metadata tho.
def escape_dictionary(dictionary, datetime_format='%Y-%m-%d %H:%M:%S'): """Escape dictionary values with keys as column names and values column values @type dictionary: dict @param dictionary: Key-values """ for k, v in dictionary.iteritems(): if isinstance(v, datetime.datetime): v = v.strftime(datetime_format) if isinstance(v, basestring): v = CoyoteDb.db_escape(str(v)) v = '"{}"'.format(v) if v is True: v = 1 if v is False: v = 0 if v is None: v = 'NULL' dictionary[k] = v
Escape dictionary values with keys as column names and values column values @type dictionary: dict @param dictionary: Key-values
def genargs() -> ArgumentParser: """ Create a command line parser :return: parser """ parser = ArgumentParser() parser.add_argument("infile", help="Input ShExC specification") parser.add_argument("-nj", "--nojson", help="Do not produce json output", action="store_true") parser.add_argument("-nr", "--nordf", help="Do not produce rdf output", action="store_true") parser.add_argument("-j", "--jsonfile", help="Output ShExJ file (Default: {infile}.json)") parser.add_argument("-r", "--rdffile", help="Output ShExR file (Default: {infile}.{fmt suffix})") parser.add_argument("--context", help="Alternative @context") parser.add_argument("-f", "--format", choices=list(set(x.name for x in rdflib_plugins(None, rdflib_Serializer) if '/' not in str(x.name))), help="Output format (Default: turtle)", default="turtle") return parser
Create a command line parser :return: parser
def spam(self, msg, *args, **kw): """Log a message with level :data:`SPAM`. The arguments are interpreted as for :func:`logging.debug()`.""" if self.isEnabledFor(SPAM): self._log(SPAM, msg, args, **kw)
Log a message with level :data:`SPAM`. The arguments are interpreted as for :func:`logging.debug()`.
def request_start(self): """ Indicate readiness to receive stream. This is a blocking call. """ self._queue.put(command_packet(CMD_START_STREAM)) _LOGGER.info('Requesting stream') self._source.run()
Indicate readiness to receive stream. This is a blocking call.
def text(self, x, y, txt=''): "Output a string" txt = self.normalize_text(txt) if (self.unifontsubset): txt2 = self._escape(UTF8ToUTF16BE(txt, False)) for uni in UTF8StringToArray(txt): self.current_font['subset'].append(uni) else: txt2 = self._escape(txt) s=sprintf('BT %.2f %.2f Td (%s) Tj ET',x*self.k,(self.h-y)*self.k, txt2) if(self.underline and txt!=''): s+=' '+self._dounderline(x,y,txt) if(self.color_flag): s='q '+self.text_color+' '+s+' Q' self._out(s)
Output a string
def parse_params(self, core_params): """ Goes through a set of parameters, extracting information about each. :param core_params: The collection of parameters :type core_params: A collection of ``<botocore.parameters.Parameter>`` subclasses :returns: A list of dictionaries """ params = [] for core_param in core_params: params.append(self.parse_param(core_param)) return params
Goes through a set of parameters, extracting information about each. :param core_params: The collection of parameters :type core_params: A collection of ``<botocore.parameters.Parameter>`` subclasses :returns: A list of dictionaries
def serialize_to_file( root_processor, # type: RootProcessor value, # type: Any xml_file_path, # type: Text encoding='utf-8', # type: Text indent=None # type: Optional[Text] ): # type: (...) -> None """ Serialize the value to an XML file using the root processor. :param root_processor: Root processor of the XML document. :param value: Value to serialize. :param xml_file_path: Path to the XML file to which the serialized value will be written. :param encoding: Encoding of the file. :param indent: If specified, then the XML will be formatted with the specified indentation. """ serialized_value = serialize_to_string(root_processor, value, indent) with open(xml_file_path, 'w', encoding=encoding) as xml_file: xml_file.write(serialized_value)
Serialize the value to an XML file using the root processor. :param root_processor: Root processor of the XML document. :param value: Value to serialize. :param xml_file_path: Path to the XML file to which the serialized value will be written. :param encoding: Encoding of the file. :param indent: If specified, then the XML will be formatted with the specified indentation.
def step_a_new_working_directory(context): """ Creates a new, empty working directory """ command_util.ensure_context_attribute_exists(context, "workdir", None) command_util.ensure_workdir_exists(context) shutil.rmtree(context.workdir, ignore_errors=True)
Creates a new, empty working directory
def add_var_arg(self, arg): """ Add a variable (or macro) argument to the condor job. The argument is added to the submit file and a different value of the argument can be set for each node in the DAG. @param arg: name of option to add. """ self.__args.append(arg) self.__job.add_var_arg(self.__arg_index) self.__arg_index += 1
Add a variable (or macro) argument to the condor job. The argument is added to the submit file and a different value of the argument can be set for each node in the DAG. @param arg: name of option to add.
def Storage_clearDataForOrigin(self, origin, storageTypes): """ Function path: Storage.clearDataForOrigin Domain: Storage Method name: clearDataForOrigin Parameters: Required arguments: 'origin' (type: string) -> Security origin. 'storageTypes' (type: string) -> Comma separated origin names. No return value. Description: Clears storage for origin. """ assert isinstance(origin, (str,) ), "Argument 'origin' must be of type '['str']'. Received type: '%s'" % type( origin) assert isinstance(storageTypes, (str,) ), "Argument 'storageTypes' must be of type '['str']'. Received type: '%s'" % type( storageTypes) subdom_funcs = self.synchronous_command('Storage.clearDataForOrigin', origin=origin, storageTypes=storageTypes) return subdom_funcs
Function path: Storage.clearDataForOrigin Domain: Storage Method name: clearDataForOrigin Parameters: Required arguments: 'origin' (type: string) -> Security origin. 'storageTypes' (type: string) -> Comma separated origin names. No return value. Description: Clears storage for origin.
def are_forms_valid(self, forms): """ Check if all forms defined in `form_classes` are valid. """ for form in six.itervalues(forms): if not form.is_valid(): return False return True
Check if all forms defined in `form_classes` are valid.
def render_css_classes(self): """ Return a string containing the css classes for the module. >>> mod = DashboardModule(enabled=False, draggable=True, ... collapsible=True, deletable=True) >>> mod.render_css_classes() 'dashboard-module disabled draggable collapsible deletable' >>> mod.css_classes.append('foo') >>> mod.render_css_classes() 'dashboard-module disabled draggable collapsible deletable foo' >>> mod.enabled = True >>> mod.render_css_classes() 'dashboard-module draggable collapsible deletable foo' """ ret = ['dashboard-module'] if not self.enabled: ret.append('disabled') if self.draggable: ret.append('draggable') if self.collapsible: ret.append('collapsible') if self.deletable: ret.append('deletable') ret += self.css_classes return ' '.join(ret)
Return a string containing the css classes for the module. >>> mod = DashboardModule(enabled=False, draggable=True, ... collapsible=True, deletable=True) >>> mod.render_css_classes() 'dashboard-module disabled draggable collapsible deletable' >>> mod.css_classes.append('foo') >>> mod.render_css_classes() 'dashboard-module disabled draggable collapsible deletable foo' >>> mod.enabled = True >>> mod.render_css_classes() 'dashboard-module draggable collapsible deletable foo'
def clearness_index_zenith_independent(clearness_index, airmass, max_clearness_index=2.0): """ Calculate the zenith angle independent clearness index. Parameters ---------- clearness_index : numeric Ratio of global to extraterrestrial irradiance on a horizontal plane airmass : numeric Airmass max_clearness_index : numeric, default 2.0 Maximum value of the clearness index. The default, 2.0, allows for over-irradiance events typically seen in sub-hourly data. NREL's SRRL Fortran code used 0.82 for hourly data. Returns ------- kt_prime : numeric Zenith independent clearness index References ---------- .. [1] Perez, R., P. Ineichen, E. Maxwell, R. Seals and A. Zelenka, (1992). "Dynamic Global-to-Direct Irradiance Conversion Models". ASHRAE Transactions-Research Series, pp. 354-369 """ # Perez eqn 1 kt_prime = clearness_index / _kt_kt_prime_factor(airmass) kt_prime = np.maximum(kt_prime, 0) kt_prime = np.minimum(kt_prime, max_clearness_index) return kt_prime
Calculate the zenith angle independent clearness index. Parameters ---------- clearness_index : numeric Ratio of global to extraterrestrial irradiance on a horizontal plane airmass : numeric Airmass max_clearness_index : numeric, default 2.0 Maximum value of the clearness index. The default, 2.0, allows for over-irradiance events typically seen in sub-hourly data. NREL's SRRL Fortran code used 0.82 for hourly data. Returns ------- kt_prime : numeric Zenith independent clearness index References ---------- .. [1] Perez, R., P. Ineichen, E. Maxwell, R. Seals and A. Zelenka, (1992). "Dynamic Global-to-Direct Irradiance Conversion Models". ASHRAE Transactions-Research Series, pp. 354-369
def remove(self, recursive=True, ignore_error=True): """ Remove the directory. """ try: if recursive or self._cleanup == 'recursive': shutil.rmtree(self.path) else: os.rmdir(self.path) except Exception as e: if not ignore_error: raise e
Remove the directory.
def network_create(provider, names, **kwargs): ''' Create private network CLI Example: .. code-block:: bash salt minionname cloud.network_create my-nova names=['salt'] cidr='192.168.100.0/24' ''' client = _get_client() return client.extra_action(provider=provider, names=names, action='network_create', **kwargs)
Create private network CLI Example: .. code-block:: bash salt minionname cloud.network_create my-nova names=['salt'] cidr='192.168.100.0/24'
def get_points_within_r(center_points, target_points, r): r"""Get all target_points within a specified radius of a center point. All data must be in same coordinate system, or you will get undetermined results. Parameters ---------- center_points: (X, Y) ndarray location from which to grab surrounding points within r target_points: (X, Y) ndarray points from which to return if they are within r of center_points r: integer search radius around center_points to grab target_points Returns ------- matches: (X, Y) ndarray A list of points within r distance of, and in the same order as, center_points """ tree = cKDTree(target_points) indices = tree.query_ball_point(center_points, r) return tree.data[indices].T
r"""Get all target_points within a specified radius of a center point. All data must be in same coordinate system, or you will get undetermined results. Parameters ---------- center_points: (X, Y) ndarray location from which to grab surrounding points within r target_points: (X, Y) ndarray points from which to return if they are within r of center_points r: integer search radius around center_points to grab target_points Returns ------- matches: (X, Y) ndarray A list of points within r distance of, and in the same order as, center_points
def request(self, *args, **kwargs) -> XMLResponse: """Makes an HTTP Request, with mocked User–Agent headers. Returns a class:`HTTPResponse <HTTPResponse>`. """ # Convert Request object into HTTPRequest object. r = super(XMLSession, self).request(*args, **kwargs) return XMLResponse._from_response(r)
Makes an HTTP Request, with mocked User–Agent headers. Returns a class:`HTTPResponse <HTTPResponse>`.
def _print_topics(self, header: str, cmds: List[str], verbose: bool) -> None: """Customized version of print_topics that can switch between verbose or traditional output""" import io if cmds: if not verbose: self.print_topics(header, cmds, 15, 80) else: self.stdout.write('{}\n'.format(str(header))) widest = 0 # measure the commands for command in cmds: width = utils.ansi_safe_wcswidth(command) if width > widest: widest = width # add a 4-space pad widest += 4 if widest < 20: widest = 20 if self.ruler: self.stdout.write('{:{ruler}<{width}}\n'.format('', ruler=self.ruler, width=80)) # Try to get the documentation string for each command topics = self.get_help_topics() for command in cmds: cmd_func = self.cmd_func(command) # Non-argparse commands can have help_functions for their documentation if not hasattr(cmd_func, 'argparser') and command in topics: help_func = getattr(self, HELP_FUNC_PREFIX + command) result = io.StringIO() # try to redirect system stdout with redirect_stdout(result): # save our internal stdout stdout_orig = self.stdout try: # redirect our internal stdout self.stdout = result help_func() finally: # restore internal stdout self.stdout = stdout_orig doc = result.getvalue() else: doc = cmd_func.__doc__ # Attempt to locate the first documentation block if not doc: doc_block = [''] else: doc_block = [] found_first = False for doc_line in doc.splitlines(): stripped_line = doc_line.strip() # Don't include :param type lines if stripped_line.startswith(':'): if found_first: break elif stripped_line: doc_block.append(stripped_line) found_first = True elif found_first: break for doc_line in doc_block: self.stdout.write('{: <{col_width}}{doc}\n'.format(command, col_width=widest, doc=doc_line)) command = '' self.stdout.write("\n")
Customized version of print_topics that can switch between verbose or traditional output
def getList(self, aspList): """ Returns a sorted list with all primary directions. """ # Significators objects = self._elements(self.SIG_OBJECTS, self.N, [0]) houses = self._elements(self.SIG_HOUSES, self.N, [0]) angles = self._elements(self.SIG_ANGLES, self.N, [0]) significators = objects + houses + angles # Promissors objects = self._elements(self.SIG_OBJECTS, self.N, aspList) terms = self._terms() antiscias = self._elements(self.SIG_OBJECTS, self.A, [0]) cantiscias = self._elements(self.SIG_OBJECTS, self.C, [0]) promissors = objects + terms + antiscias + cantiscias # Compute all res = [] for prom in promissors: for sig in significators: if (prom['id'] == sig['id']): continue arcs = self._arc(prom, sig) for (x,y) in [('arcm', 'M'), ('arcz', 'Z')]: arc = arcs[x] if 0 < arc < self.MAX_ARC: res.append([ arcs[x], prom['id'], sig['id'], y, ]) return sorted(res)
Returns a sorted list with all primary directions.
def createPenStyleCti(nodeName, defaultData=0, includeNone=False): """ Creates a ChoiceCti with Qt PenStyles. If includeEmtpy is True, the first option will be None. """ displayValues=PEN_STYLE_DISPLAY_VALUES configValues=PEN_STYLE_CONFIG_VALUES if includeNone: displayValues = [''] + list(displayValues) configValues = [None] + list(configValues) return ChoiceCti(nodeName, defaultData, displayValues=displayValues, configValues=configValues)
Creates a ChoiceCti with Qt PenStyles. If includeEmtpy is True, the first option will be None.
def identity(obj): """ returns a string representing "<pk>,<version>" of the passed object """ if hasattr(obj, '_concurrencymeta'): return mark_safe("{0},{1}".format(unlocalize(obj.pk), get_revision_of_object(obj))) else: return mark_safe(unlocalize(obj.pk))
returns a string representing "<pk>,<version>" of the passed object
def getEyeToHeadTransform(self, eEye): """ Returns the transform from eye space to the head space. Eye space is the per-eye flavor of head space that provides stereo disparity. Instead of Model * View * Projection the sequence is Model * View * Eye^-1 * Projection. Normally View and Eye^-1 will be multiplied together and treated as View in your application. """ fn = self.function_table.getEyeToHeadTransform result = fn(eEye) return result
Returns the transform from eye space to the head space. Eye space is the per-eye flavor of head space that provides stereo disparity. Instead of Model * View * Projection the sequence is Model * View * Eye^-1 * Projection. Normally View and Eye^-1 will be multiplied together and treated as View in your application.
def placebo_session(function): """ Decorator to help do testing with placebo. Simply wrap the function you want to test and make sure to add a "session" argument so the decorator can pass the placebo session. Accepts the following environment variables to configure placebo: PLACEBO_MODE: set to "record" to record AWS calls and save them PLACEBO_PROFILE: optionally set an AWS credential profile to record with PLACEBO_DIR: set the directory to record to / read from """ @functools.wraps(function) def wrapper(*args, **kwargs): session_kwargs = { 'region_name': os.environ.get('AWS_DEFAULT_REGION', 'us-east-1') } profile_name = os.environ.get('PLACEBO_PROFILE', None) if profile_name: session_kwargs['profile_name'] = profile_name session = boto3.Session(**session_kwargs) self = args[0] prefix = self.__class__.__name__ + '.' + function.__name__ base_dir = os.environ.get( "PLACEBO_DIR", os.path.join(os.getcwd(), "placebo")) record_dir = os.path.join(base_dir, prefix) record_format = os.environ.get('PLACEBO_FORMAT', Format.DEFAULT) if not os.path.exists(record_dir): os.makedirs(record_dir) pill = placebo.attach(session, data_path=record_dir, record_format=record_format) if os.environ.get('PLACEBO_MODE') == 'record': pill.record() else: pill.playback() kwargs['session'] = session return function(*args, **kwargs) return wrapper
Decorator to help do testing with placebo. Simply wrap the function you want to test and make sure to add a "session" argument so the decorator can pass the placebo session. Accepts the following environment variables to configure placebo: PLACEBO_MODE: set to "record" to record AWS calls and save them PLACEBO_PROFILE: optionally set an AWS credential profile to record with PLACEBO_DIR: set the directory to record to / read from
def get_instance(self, payload): """ Build an instance of CredentialListInstance :param dict payload: Payload response from the API :returns: twilio.rest.api.v2010.account.sip.credential_list.CredentialListInstance :rtype: twilio.rest.api.v2010.account.sip.credential_list.CredentialListInstance """ return CredentialListInstance(self._version, payload, account_sid=self._solution['account_sid'], )
Build an instance of CredentialListInstance :param dict payload: Payload response from the API :returns: twilio.rest.api.v2010.account.sip.credential_list.CredentialListInstance :rtype: twilio.rest.api.v2010.account.sip.credential_list.CredentialListInstance
def _bss_decomp_mtifilt(reference_sources, estimated_source, j, C, Cj): """Decomposition of an estimated source image into four components representing respectively the true source image, spatial (or filtering) distortion, interference and artifacts, derived from the true source images using multichannel time-invariant filters.""" filters_len = Cj.shape[-2] # zero pad s_true = _zeropad(reference_sources[j], filters_len - 1, axis=0) # compute appropriate projections e_spat = _project(reference_sources[j], Cj) - s_true e_interf = _project(reference_sources, C) - s_true - e_spat e_artif = - s_true - e_spat - e_interf e_artif[:estimated_source.shape[0], :] += estimated_source return (s_true, e_spat, e_interf, e_artif)
Decomposition of an estimated source image into four components representing respectively the true source image, spatial (or filtering) distortion, interference and artifacts, derived from the true source images using multichannel time-invariant filters.
def tabulate_state_blocks(x, states, pos=None): """Construct a dataframe where each row provides information about continuous state blocks. Parameters ---------- x : array_like, int 1-dimensional array of state values. states : set Set of states of interest. Any state value not in this set will be ignored. pos : array_like, int, optional Array of positions corresponding to values in `x`. Returns ------- df : DataFrame Examples -------- >>> import allel >>> x = [1, 1, 0, 1, 1, 2, 2, 0, 2, 1, 1] >>> df = allel.tabulate_state_blocks(x, states={1, 2}) >>> df state support start_lidx ... size_min size_max is_marginal 0 1 4 -1 ... 5 -1 True 1 2 3 4 ... 4 4 False 2 1 2 8 ... 2 -1 True [3 rows x 9 columns] >>> pos = [2, 4, 7, 8, 10, 14, 19, 23, 28, 30, 31] >>> df = allel.tabulate_state_blocks(x, states={1, 2}, pos=pos) >>> df state support start_lidx ... stop_rpos length_min length_max 0 1 4 -1 ... 14 9 -1 1 2 3 4 ... 30 15 19 2 1 2 8 ... -1 2 -1 [3 rows x 15 columns] """ # check inputs x = asarray_ndim(x, 1) check_integer_dtype(x) x = memoryview_safe(x) # find state transitions switch_points, transitions, observations = state_transitions(x, states) # setup some helpers t = transitions[1:, 0] o = observations[1:] s1 = switch_points[:-1] s2 = switch_points[1:] is_marginal = (s1[:, 0] < 0) | (s2[:, 1] < 0) size_min = s2[:, 0] - s1[:, 1] + 1 size_max = s2[:, 1] - s1[:, 0] - 1 size_max[is_marginal] = -1 # start to build a dataframe items = [ ('state', t), ('support', o), ('start_lidx', s1[:, 0]), ('start_ridx', s1[:, 1]), ('stop_lidx', s2[:, 0]), ('stop_ridx', s2[:, 1]), ('size_min', size_min), ('size_max', size_max), ('is_marginal', is_marginal) ] # deal with optional positions if pos is not None: pos = asarray_ndim(pos, 1) check_dim0_aligned(x, pos) check_integer_dtype(pos) # obtain switch positions switch_positions = np.take(pos, switch_points) # deal with boundary transitions switch_positions[0, 0] = -1 switch_positions[-1, 1] = -1 # setup helpers p1 = switch_positions[:-1] p2 = switch_positions[1:] length_min = p2[:, 0] - p1[:, 1] + 1 length_max = p2[:, 1] - p1[:, 0] - 1 length_max[is_marginal] = -1 items += [ ('start_lpos', p1[:, 0]), ('start_rpos', p1[:, 1]), ('stop_lpos', p2[:, 0]), ('stop_rpos', p2[:, 1]), ('length_min', length_min), ('length_max', length_max), ] import pandas return pandas.DataFrame.from_dict(OrderedDict(items))
Construct a dataframe where each row provides information about continuous state blocks. Parameters ---------- x : array_like, int 1-dimensional array of state values. states : set Set of states of interest. Any state value not in this set will be ignored. pos : array_like, int, optional Array of positions corresponding to values in `x`. Returns ------- df : DataFrame Examples -------- >>> import allel >>> x = [1, 1, 0, 1, 1, 2, 2, 0, 2, 1, 1] >>> df = allel.tabulate_state_blocks(x, states={1, 2}) >>> df state support start_lidx ... size_min size_max is_marginal 0 1 4 -1 ... 5 -1 True 1 2 3 4 ... 4 4 False 2 1 2 8 ... 2 -1 True [3 rows x 9 columns] >>> pos = [2, 4, 7, 8, 10, 14, 19, 23, 28, 30, 31] >>> df = allel.tabulate_state_blocks(x, states={1, 2}, pos=pos) >>> df state support start_lidx ... stop_rpos length_min length_max 0 1 4 -1 ... 14 9 -1 1 2 3 4 ... 30 15 19 2 1 2 8 ... -1 2 -1 [3 rows x 15 columns]
def rename(old_name, new_name): '''Rename the given virtual folder. This operation is irreversible! You cannot change the vfolders that are shared by other users, and the new name must be unique among all your accessible vfolders including the shared ones. OLD_NAME: The current name of a virtual folder. NEW_NAME: The new name of a virtual folder. ''' with Session() as session: try: session.VFolder(old_name).rename(new_name) print_done('Renamed.') except Exception as e: print_error(e) sys.exit(1)
Rename the given virtual folder. This operation is irreversible! You cannot change the vfolders that are shared by other users, and the new name must be unique among all your accessible vfolders including the shared ones. OLD_NAME: The current name of a virtual folder. NEW_NAME: The new name of a virtual folder.
def select_ipam_strategy(self, network_id, network_strategy, **kwargs): """Return relevant IPAM strategy name. :param network_id: neutron network id. :param network_strategy: default strategy for the network. NOTE(morgabra) This feels like a hack but I can't think of a better idea. The root problem is we can now attach ports to networks with a different backend driver/ipam strategy than the network speficies. We handle the the backend driver part with allowing network_plugin to be specified for port objects. This works pretty well because nova or whatever knows when we are hooking up an Ironic node so it can pass along that key during port_create(). IPAM is a little trickier, especially in Ironic's case, because we *must* use a specific IPAM for provider networks. There isn't really much of an option other than involve the backend driver when selecting the IPAM strategy. """ LOG.info("Selecting IPAM strategy for network_id:%s " "network_strategy:%s" % (network_id, network_strategy)) net_type = "tenant" if STRATEGY.is_provider_network(network_id): net_type = "provider" strategy = self._ipam_strategies.get(net_type, {}) default = strategy.get("default") overrides = strategy.get("overrides", {}) # If we override a particular strategy explicitly, we use it. if network_strategy in overrides: LOG.info("Selected overridden IPAM strategy: %s" % (overrides[network_strategy])) return overrides[network_strategy] # Otherwise, we are free to use an explicit default. if default: LOG.info("Selected default IPAM strategy for tenant " "network: %s" % (default)) return default # Fallback to the network-specified IPAM strategy LOG.info("Selected network strategy for tenant " "network: %s" % (network_strategy)) return network_strategy
Return relevant IPAM strategy name. :param network_id: neutron network id. :param network_strategy: default strategy for the network. NOTE(morgabra) This feels like a hack but I can't think of a better idea. The root problem is we can now attach ports to networks with a different backend driver/ipam strategy than the network speficies. We handle the the backend driver part with allowing network_plugin to be specified for port objects. This works pretty well because nova or whatever knows when we are hooking up an Ironic node so it can pass along that key during port_create(). IPAM is a little trickier, especially in Ironic's case, because we *must* use a specific IPAM for provider networks. There isn't really much of an option other than involve the backend driver when selecting the IPAM strategy.
def inet_pton(address_family, ip_string): """ A platform independent version of inet_pton """ global __inet_pton if __inet_pton is None: if hasattr(socket, 'inet_pton'): __inet_pton = socket.inet_pton else: from ospd import win_socket __inet_pton = win_socket.inet_pton return __inet_pton(address_family, ip_string)
A platform independent version of inet_pton
def prepare_framework_container_def(model, instance_type, s3_operations): """Prepare the framework model container information. Specify related S3 operations for Airflow to perform. (Upload `source_dir`) Args: model (sagemaker.model.FrameworkModel): The framework model instance_type (str): The EC2 instance type to deploy this Model to. For example, 'ml.p2.xlarge'. s3_operations (dict): The dict to specify S3 operations (upload `source_dir`). Returns: dict: The container information of this framework model. """ deploy_image = model.image if not deploy_image: region_name = model.sagemaker_session.boto_session.region_name deploy_image = fw_utils.create_image_uri( region_name, model.__framework_name__, instance_type, model.framework_version, model.py_version) base_name = utils.base_name_from_image(deploy_image) model.name = model.name or utils.name_from_base(base_name) bucket = model.bucket or model.sagemaker_session._default_bucket script = os.path.basename(model.entry_point) key = '{}/source/sourcedir.tar.gz'.format(model.name) if model.source_dir and model.source_dir.lower().startswith('s3://'): code_dir = model.source_dir model.uploaded_code = fw_utils.UploadedCode(s3_prefix=code_dir, script_name=script) else: code_dir = 's3://{}/{}'.format(bucket, key) model.uploaded_code = fw_utils.UploadedCode(s3_prefix=code_dir, script_name=script) s3_operations['S3Upload'] = [{ 'Path': model.source_dir or script, 'Bucket': bucket, 'Key': key, 'Tar': True }] deploy_env = dict(model.env) deploy_env.update(model._framework_env_vars()) try: if model.model_server_workers: deploy_env[sagemaker.model.MODEL_SERVER_WORKERS_PARAM_NAME.upper()] = str(model.model_server_workers) except AttributeError: # This applies to a FrameworkModel which is not SageMaker Deep Learning Framework Model pass return sagemaker.container_def(deploy_image, model.model_data, deploy_env)
Prepare the framework model container information. Specify related S3 operations for Airflow to perform. (Upload `source_dir`) Args: model (sagemaker.model.FrameworkModel): The framework model instance_type (str): The EC2 instance type to deploy this Model to. For example, 'ml.p2.xlarge'. s3_operations (dict): The dict to specify S3 operations (upload `source_dir`). Returns: dict: The container information of this framework model.
def post_info(self, name, message): """Asynchronously post a user facing info message about a service. Args: name (string): The name of the service message (string): The user facing info message that will be stored for the service and can be queried later. """ self.post_command(OPERATIONS.CMD_POST_MESSAGE, _create_message(name, states.INFO_LEVEL, message))
Asynchronously post a user facing info message about a service. Args: name (string): The name of the service message (string): The user facing info message that will be stored for the service and can be queried later.
def new_driver(browser_name, *args, **kwargs): """Instantiates a new WebDriver instance, determining class by environment variables """ if browser_name == FIREFOX: return webdriver.Firefox(*args, **kwargs) # elif options['local'] and options['browser_name'] == CHROME: # return webdriver.Chrome(*args, **kwargs) # # elif options['local'] and options['browser_name'] == IE: # return webdriver.Ie(*args, **kwargs) # # elif options['local'] and options['browser_name'] == OPERA: # return webdriver.Opera(*args, **kwargs) elif browser_name == PHANTOMJS: executable_path = os.path.join(os.path.dirname(__file__), 'phantomjs/executable/phantomjs_64bit') driver = webdriver.PhantomJS(executable_path=executable_path, **kwargs) driver.set_window_size(1280, 800) # Set a default because phantom needs it return driver else: # remote driver = webdriver.Remote(*args, **kwargs) return driver
Instantiates a new WebDriver instance, determining class by environment variables
def get_message_content(self): """ Given the Slap XML, extract out the payload. """ body = self.doc.find( ".//{http://salmon-protocol.org/ns/magic-env}data").text body = urlsafe_b64decode(body.encode("ascii")) logger.debug("diaspora.protocol.get_message_content: %s", body) return body
Given the Slap XML, extract out the payload.
def add_node(self, node): """Add a node to cluster. :param node: should be formated like this `{"addr": "", "role": "slave", "master": "master_node_id"} """ new = ClusterNode.from_uri(node["addr"]) cluster_member = self.nodes[0] check_new_nodes([new], [cluster_member]) new.meet(cluster_member.host, cluster_member.port) self.nodes.append(new) self.wait() if node["role"] != "slave": return if "master" in node: target = self.get_node(node["master"]) if not target: raise NodeNotFound(node["master"]) else: masters = sorted(self.masters, key=lambda x: len(x.slaves(x.name))) target = masters[0] new.replicate(target.name) new.flush_cache() target.flush_cache()
Add a node to cluster. :param node: should be formated like this `{"addr": "", "role": "slave", "master": "master_node_id"}
def _cache_is_expired(): """Indica si la caché está caducada""" now = timezone.now() timediff = TransCache.SINGLETON_CREATION_DATETIME - now return (timediff.total_seconds() > TransCache.SINGLETON_EXPIRATION_MAX_SECONDS)
Indica si la caché está caducada
def set_features(self, filter_type): """Calls splitter to split percolator output into target/decoy elements. Writes two new xml files with features. Currently only psms and peptides. Proteins not here, since one cannot do protein inference before having merged and remapped multifraction data anyway. """ elements_to_split = {'psm': self.allpsms, 'peptide': self.allpeps} self.features = self.splitfunc(elements_to_split, self.ns, filter_type)
Calls splitter to split percolator output into target/decoy elements. Writes two new xml files with features. Currently only psms and peptides. Proteins not here, since one cannot do protein inference before having merged and remapped multifraction data anyway.
def _unescape_str(value): """ Unescape a TS3 compatible string into a normal string @param value: Value @type value: string/int """ if isinstance(value, int): return "%d" % value value = value.replace(r"\\", "\\") for i, j in ts3_escape.items(): value = value.replace(j, i) return value
Unescape a TS3 compatible string into a normal string @param value: Value @type value: string/int
def agent_version(self): """Get the version of the Juju machine agent. May return None if the agent is not yet available. """ version = self.safe_data['agent-status']['version'] if version: return client.Number.from_json(version) else: return None
Get the version of the Juju machine agent. May return None if the agent is not yet available.
def status(self, build_record_id, **kwargs): """ Latest push result of BuildRecord. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.status(build_record_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int build_record_id: Build Record id (required) :return: BuildRecordPushResultRest If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.status_with_http_info(build_record_id, **kwargs) else: (data) = self.status_with_http_info(build_record_id, **kwargs) return data
Latest push result of BuildRecord. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.status(build_record_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int build_record_id: Build Record id (required) :return: BuildRecordPushResultRest If the method is called asynchronously, returns the request thread.
def build_defaults(self): """Build a dictionary of default values from the `Scheme`. Returns: dict: The default configurations as set by the `Scheme`. Raises: errors.InvalidSchemeError: The `Scheme` does not contain valid options. """ defaults = {} for arg in self.args: if not isinstance(arg, _BaseOpt): raise errors.InvalidSchemeError('Unable to build default for non-Option type') # if there is a default set, add it to the defaults dict if not isinstance(arg.default, NoDefault): defaults[arg.name] = arg.default # if we have a dict option, build the defaults for its scheme. # if any defaults exist, use them. if isinstance(arg, DictOption): if arg.scheme: b = arg.scheme.build_defaults() if b: defaults[arg.name] = b return defaults
Build a dictionary of default values from the `Scheme`. Returns: dict: The default configurations as set by the `Scheme`. Raises: errors.InvalidSchemeError: The `Scheme` does not contain valid options.
def guess_payload_class(self, payload): """ Handles NTPv4 extensions and MAC part (when authentication is used.) """ plen = len(payload) if plen > _NTP_AUTH_MD5_TAIL_SIZE: return NTPExtensions elif plen == _NTP_AUTH_MD5_TAIL_SIZE: return NTPAuthenticator return Packet.guess_payload_class(self, payload)
Handles NTPv4 extensions and MAC part (when authentication is used.)
async def close(self): '''Close the listening socket. This does not close any ServerSession objects created to handle incoming connections. ''' if self.server: self.server.close() await self.server.wait_closed() self.server = None
Close the listening socket. This does not close any ServerSession objects created to handle incoming connections.
def start(self, poll_period=None): """ Start the NeedNameQeueu Parameters: ---------- TODO: Move task receiving to a thread """ logger.info("Incoming ports bound") if poll_period is None: poll_period = self.poll_period start = time.time() count = 0 self._kill_event = threading.Event() self._task_puller_thread = threading.Thread(target=self.migrate_tasks_to_internal, args=(self._kill_event,)) self._task_puller_thread.start() self._command_thread = threading.Thread(target=self._command_server, args=(self._kill_event,)) self._command_thread.start() poller = zmq.Poller() # poller.register(self.task_incoming, zmq.POLLIN) poller.register(self.task_outgoing, zmq.POLLIN) poller.register(self.results_incoming, zmq.POLLIN) # These are managers which we should examine in an iteration # for scheduling a job (or maybe any other attention?). # Anything altering the state of the manager should add it # onto this list. interesting_managers = set() while not self._kill_event.is_set(): self.socks = dict(poller.poll(timeout=poll_period)) # Listen for requests for work if self.task_outgoing in self.socks and self.socks[self.task_outgoing] == zmq.POLLIN: logger.debug("[MAIN] starting task_outgoing section") message = self.task_outgoing.recv_multipart() manager = message[0] if manager not in self._ready_manager_queue: reg_flag = False try: msg = json.loads(message[1].decode('utf-8')) reg_flag = True except Exception: logger.warning("[MAIN] Got a non-json registration message from manager:{}".format( manager)) logger.debug("[MAIN] Message :\n{}\n".format(message[0])) # By default we set up to ignore bad nodes/registration messages. self._ready_manager_queue[manager] = {'last': time.time(), 'free_capacity': 0, 'block_id': None, 'max_capacity': 0, 'active': True, 'tasks': []} if reg_flag is True: interesting_managers.add(manager) logger.info("[MAIN] Adding manager: {} to ready queue".format(manager)) self._ready_manager_queue[manager].update(msg) logger.info("[MAIN] Registration info for manager {}: {}".format(manager, msg)) if (msg['python_v'].rsplit(".", 1)[0] != self.current_platform['python_v'].rsplit(".", 1)[0] or msg['parsl_v'] != self.current_platform['parsl_v']): logger.warn("[MAIN] Manager {} has incompatible version info with the interchange".format(manager)) if self.suppress_failure is False: logger.debug("Setting kill event") self._kill_event.set() e = ManagerLost(manager) result_package = {'task_id': -1, 'exception': serialize_object(e)} pkl_package = pickle.dumps(result_package) self.results_outgoing.send(pkl_package) logger.warning("[MAIN] Sent failure reports, unregistering manager") else: logger.debug("[MAIN] Suppressing shutdown due to version incompatibility") else: logger.info("[MAIN] Manager {} has compatible Parsl version {}".format(manager, msg['parsl_v'])) logger.info("[MAIN] Manager {} has compatible Python version {}".format(manager, msg['python_v'].rsplit(".", 1)[0])) else: # Registration has failed. if self.suppress_failure is False: self._kill_event.set() e = BadRegistration(manager, critical=True) result_package = {'task_id': -1, 'exception': serialize_object(e)} pkl_package = pickle.dumps(result_package) self.results_outgoing.send(pkl_package) else: logger.debug("[MAIN] Suppressing bad registration from manager:{}".format( manager)) else: tasks_requested = int.from_bytes(message[1], "little") self._ready_manager_queue[manager]['last'] = time.time() if tasks_requested == HEARTBEAT_CODE: logger.debug("[MAIN] Manager {} sent heartbeat".format(manager)) self.task_outgoing.send_multipart([manager, b'', PKL_HEARTBEAT_CODE]) else: logger.debug("[MAIN] Manager {} requested {} tasks".format(manager, tasks_requested)) self._ready_manager_queue[manager]['free_capacity'] = tasks_requested interesting_managers.add(manager) logger.debug("[MAIN] leaving task_outgoing section") # If we had received any requests, check if there are tasks that could be passed logger.debug("Managers count (total/interesting): {}/{}".format(len(self._ready_manager_queue), len(interesting_managers))) if interesting_managers and not self.pending_task_queue.empty(): shuffled_managers = list(interesting_managers) random.shuffle(shuffled_managers) while shuffled_managers and not self.pending_task_queue.empty(): # cf. the if statement above... manager = shuffled_managers.pop() tasks_inflight = len(self._ready_manager_queue[manager]['tasks']) real_capacity = min(self._ready_manager_queue[manager]['free_capacity'], self._ready_manager_queue[manager]['max_capacity'] - tasks_inflight) if (real_capacity and self._ready_manager_queue[manager]['active']): tasks = self.get_tasks(real_capacity) if tasks: self.task_outgoing.send_multipart([manager, b'', pickle.dumps(tasks)]) task_count = len(tasks) count += task_count tids = [t['task_id'] for t in tasks] self._ready_manager_queue[manager]['free_capacity'] -= task_count self._ready_manager_queue[manager]['tasks'].extend(tids) logger.debug("[MAIN] Sent tasks: {} to manager {}".format(tids, manager)) if self._ready_manager_queue[manager]['free_capacity'] > 0: logger.debug("[MAIN] Manager {} has free_capacity {}".format(manager, self._ready_manager_queue[manager]['free_capacity'])) # ... so keep it in the interesting_managers list else: logger.debug("[MAIN] Manager {} is now saturated".format(manager)) interesting_managers.remove(manager) else: interesting_managers.remove(manager) # logger.debug("Nothing to send to manager {}".format(manager)) logger.debug("[MAIN] leaving _ready_manager_queue section, with {} managers still interesting".format(len(interesting_managers))) else: logger.debug("[MAIN] either no interesting managers or no tasks, so skipping manager pass") # Receive any results and forward to client if self.results_incoming in self.socks and self.socks[self.results_incoming] == zmq.POLLIN: logger.debug("[MAIN] entering results_incoming section") manager, *b_messages = self.results_incoming.recv_multipart() if manager not in self._ready_manager_queue: logger.warning("[MAIN] Received a result from a un-registered manager: {}".format(manager)) else: logger.debug("[MAIN] Got {} result items in batch".format(len(b_messages))) for b_message in b_messages: r = pickle.loads(b_message) # logger.debug("[MAIN] Received result for task {} from {}".format(r['task_id'], manager)) self._ready_manager_queue[manager]['tasks'].remove(r['task_id']) self.results_outgoing.send_multipart(b_messages) logger.debug("[MAIN] Current tasks: {}".format(self._ready_manager_queue[manager]['tasks'])) logger.debug("[MAIN] leaving results_incoming section") logger.debug("[MAIN] entering bad_managers section") bad_managers = [manager for manager in self._ready_manager_queue if time.time() - self._ready_manager_queue[manager]['last'] > self.heartbeat_threshold] for manager in bad_managers: logger.debug("[MAIN] Last: {} Current: {}".format(self._ready_manager_queue[manager]['last'], time.time())) logger.warning("[MAIN] Too many heartbeats missed for manager {}".format(manager)) for tid in self._ready_manager_queue[manager]['tasks']: try: raise ManagerLost(manager) except Exception: result_package = {'task_id': tid, 'exception': serialize_object(RemoteExceptionWrapper(*sys.exc_info()))} pkl_package = pickle.dumps(result_package) self.results_outgoing.send(pkl_package) logger.warning("[MAIN] Sent failure reports, unregistering manager") self._ready_manager_queue.pop(manager, 'None') logger.debug("[MAIN] leaving bad_managers section") logger.debug("[MAIN] ending one main loop iteration") delta = time.time() - start logger.info("Processed {} tasks in {} seconds".format(count, delta)) logger.warning("Exiting")
Start the NeedNameQeueu Parameters: ---------- TODO: Move task receiving to a thread
def sensitivity(imgs, bg=None): ''' Extract pixel sensitivity from a set of homogeneously illuminated images This method is detailed in Section 5 of: --- K.Bedrich, M.Bokalic et al.: ELECTROLUMINESCENCE IMAGING OF PV DEVICES: ADVANCED FLAT FIELD CALIBRATION,2017 --- ''' bg = getBackground(bg) for n, i in enumerate(imgs): i = imread(i, dtype=float) i -= bg smooth = fastMean(median_filter(i, 3)) i /= smooth if n == 0: out = i else: out += i out /= (n + 1) return out
Extract pixel sensitivity from a set of homogeneously illuminated images This method is detailed in Section 5 of: --- K.Bedrich, M.Bokalic et al.: ELECTROLUMINESCENCE IMAGING OF PV DEVICES: ADVANCED FLAT FIELD CALIBRATION,2017 ---
def block(self, mcs): """ Block a (previously computed) MCS. The MCS should be given as an iterable of integers. Note that this method is not automatically invoked from :func:`enumerate` because a user may want to block some of the MCSes conditionally depending on the needs. For example, one may want to compute disjoint MCSes only in which case this standard blocking is not appropriate. :param mcs: an MCS to block :type mcs: iterable(int) """ self.oracle.add_clause([self.sels[cl_id - 1] for cl_id in mcs])
Block a (previously computed) MCS. The MCS should be given as an iterable of integers. Note that this method is not automatically invoked from :func:`enumerate` because a user may want to block some of the MCSes conditionally depending on the needs. For example, one may want to compute disjoint MCSes only in which case this standard blocking is not appropriate. :param mcs: an MCS to block :type mcs: iterable(int)
def get_composition_repository_assignment_session(self, proxy): """Gets the session for assigning composition to repository mappings. arg proxy (osid.proxy.Proxy): a proxy return: (osid.repository.CompositionRepositoryAssignmentSession) - a CompositionRepositoryAssignmentSession raise: OperationFailed - unable to complete request raise: Unimplemented - supports_composition_repository_assignment() is false compliance: optional - This method must be implemented if supports_composition_repository_assignment() is true. """ if not self.supports_composition_repository_assignment(): raise Unimplemented() try: from . import sessions except ImportError: raise # OperationFailed() proxy = self._convert_proxy(proxy) try: session = sessions.CompositionRepositoryAssignmentSession(proxy, runtime=self._runtime) except AttributeError: raise # OperationFailed() return session
Gets the session for assigning composition to repository mappings. arg proxy (osid.proxy.Proxy): a proxy return: (osid.repository.CompositionRepositoryAssignmentSession) - a CompositionRepositoryAssignmentSession raise: OperationFailed - unable to complete request raise: Unimplemented - supports_composition_repository_assignment() is false compliance: optional - This method must be implemented if supports_composition_repository_assignment() is true.
def listen(self, topic, timeout=1, limit=1): """ Listen to a topic and return a list of message payloads received within the specified time. Requires an async Subscribe to have been called previously. `topic` topic to listen to `timeout` duration to listen `limit` the max number of payloads that will be returned. Specify 0 for no limit Examples: Listen and get a list of all messages received within 5 seconds | ${messages}= | Listen | test/test | timeout=5 | limit=0 | Listen and get 1st message received within 60 seconds | @{messages}= | Listen | test/test | timeout=60 | limit=1 | | Length should be | ${messages} | 1 | """ if not self._subscribed: logger.warn('Cannot listen when not subscribed to a topic') return [] if topic not in self._messages: logger.warn('Cannot listen when not subscribed to topic: %s' % topic) return [] # If enough messages have already been gathered, return them if limit != 0 and len(self._messages[topic]) >= limit: messages = self._messages[topic][:] # Copy the list's contents self._messages[topic] = [] return messages[-limit:] seconds = convert_time(timeout) limit = int(limit) logger.info('Listening on topic: %s' % topic) timer_start = time.time() while time.time() < timer_start + seconds: if limit == 0 or len(self._messages[topic]) < limit: # If the loop is running in the background # merely sleep here for a second or so and continue # otherwise, do the loop ourselves if self._background_mqttc: time.sleep(1) else: self._mqttc.loop() else: # workaround for client to ack the publish. Otherwise, # it seems that if client disconnects quickly, broker # will not get the ack and publish the message again on # next connect. time.sleep(1) break messages = self._messages[topic][:] # Copy the list's contents self._messages[topic] = [] return messages[-limit:] if limit != 0 else messages
Listen to a topic and return a list of message payloads received within the specified time. Requires an async Subscribe to have been called previously. `topic` topic to listen to `timeout` duration to listen `limit` the max number of payloads that will be returned. Specify 0 for no limit Examples: Listen and get a list of all messages received within 5 seconds | ${messages}= | Listen | test/test | timeout=5 | limit=0 | Listen and get 1st message received within 60 seconds | @{messages}= | Listen | test/test | timeout=60 | limit=1 | | Length should be | ${messages} | 1 |
def profile(func): """ Decorator to profile functions with cProfile Args: func: python function Returns: profile report References: https://osf.io/upav8/ """ def inner(*args, **kwargs): pr = cProfile.Profile() pr.enable() res = func(*args, **kwargs) pr.disable() s = io.StringIO() ps = pstats.Stats(pr, stream=s).sort_stats('cumulative') ps.print_stats() print(s.getvalue()) return res return inner
Decorator to profile functions with cProfile Args: func: python function Returns: profile report References: https://osf.io/upav8/
def _at_dump_context(self, calculator, rule, scope, block): """ Implements @dump_context """ sys.stderr.write("%s\n" % repr(rule.namespace._variables))
Implements @dump_context
def is_child_of_objective_bank(self, id_, objective_bank_id): """Tests if an objective bank is a direct child of another. arg: id (osid.id.Id): an ``Id`` arg: objective_bank_id (osid.id.Id): the ``Id`` of an objective bank return: (boolean) - ``true`` if the ``id`` is a child of ``objective_bank_id,`` ``false`` otherwise raise: NotFound - ``objective_bank_id`` is not found raise: NullArgument - ``id`` or ``objective_bank_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* *implementation notes*: If ``id`` not found return ``false``. """ # Implemented from template for # osid.resource.BinHierarchySession.is_child_of_bin if self._catalog_session is not None: return self._catalog_session.is_child_of_catalog(id_=id_, catalog_id=objective_bank_id) return self._hierarchy_session.is_child(id_=objective_bank_id, child_id=id_)
Tests if an objective bank is a direct child of another. arg: id (osid.id.Id): an ``Id`` arg: objective_bank_id (osid.id.Id): the ``Id`` of an objective bank return: (boolean) - ``true`` if the ``id`` is a child of ``objective_bank_id,`` ``false`` otherwise raise: NotFound - ``objective_bank_id`` is not found raise: NullArgument - ``id`` or ``objective_bank_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* *implementation notes*: If ``id`` not found return ``false``.
def _prfx_getattr_(obj, item): """Replacement of __getattr__""" if item.startswith('f_') or item.startswith('v_'): return getattr(obj, item[2:]) raise AttributeError('`%s` object has no attribute `%s`' % (obj.__class__.__name__, item))
Replacement of __getattr__
def rename(idf, objkey, objname, newname): """rename all the refrences to this objname""" refnames = getrefnames(idf, objkey) for refname in refnames: objlists = getallobjlists(idf, refname) # [('OBJKEY', refname, fieldindexlist), ...] for refname in refnames: # TODO : there seems to be a duplication in this loop. Check. # refname appears in both loops for robjkey, refname, fieldindexlist in objlists: idfobjects = idf.idfobjects[robjkey] for idfobject in idfobjects: for findex in fieldindexlist: # for each field if idfobject[idfobject.objls[findex]] == objname: idfobject[idfobject.objls[findex]] = newname theobject = idf.getobject(objkey, objname) fieldname = [item for item in theobject.objls if item.endswith('Name')][0] theobject[fieldname] = newname return theobject
rename all the refrences to this objname
def _buildTime(self, source, quantity, modifier, units): """ Take C{quantity}, C{modifier} and C{unit} strings and convert them into values. After converting, calcuate the time and return the adjusted sourceTime. @type source: time @param source: time to use as the base (or source) @type quantity: string @param quantity: quantity string @type modifier: string @param modifier: how quantity and units modify the source time @type units: string @param units: unit of the quantity (i.e. hours, days, months, etc) @rtype: struct_time @return: C{struct_time} of the calculated time """ if _debug: print '_buildTime: [%s][%s][%s]' % (quantity, modifier, units) if source is None: source = time.localtime() if quantity is None: quantity = '' else: quantity = quantity.strip() if len(quantity) == 0: qty = 1 else: try: qty = int(quantity) except ValueError: qty = 0 if modifier in self.ptc.Modifiers: qty = qty * self.ptc.Modifiers[modifier] if units is None or units == '': units = 'dy' # plurals are handled by regex's (could be a bug tho) (yr, mth, dy, hr, mn, sec, _, _, _) = source start = datetime.datetime(yr, mth, dy, hr, mn, sec) target = start if units.startswith('y'): target = self.inc(start, year=qty) self.dateFlag = 1 elif units.endswith('th') or units.endswith('ths'): target = self.inc(start, month=qty) self.dateFlag = 1 else: if units.startswith('d'): target = start + datetime.timedelta(days=qty) self.dateFlag = 1 elif units.startswith('h'): target = start + datetime.timedelta(hours=qty) self.timeFlag = 2 elif units.startswith('m'): target = start + datetime.timedelta(minutes=qty) self.timeFlag = 2 elif units.startswith('s'): target = start + datetime.timedelta(seconds=qty) self.timeFlag = 2 elif units.startswith('w'): target = start + datetime.timedelta(weeks=qty) self.dateFlag = 1 return target.timetuple()
Take C{quantity}, C{modifier} and C{unit} strings and convert them into values. After converting, calcuate the time and return the adjusted sourceTime. @type source: time @param source: time to use as the base (or source) @type quantity: string @param quantity: quantity string @type modifier: string @param modifier: how quantity and units modify the source time @type units: string @param units: unit of the quantity (i.e. hours, days, months, etc) @rtype: struct_time @return: C{struct_time} of the calculated time
def activate(): """Enter into an environment with support for tab-completion This command drops you into a subshell, similar to the one generated via `be in ...`, except no topic is present and instead it enables tab-completion for supported shells. See documentation for further information. https://github.com/mottosso/be/wiki/cli """ parent = lib.parent() try: cmd = lib.cmd(parent) except SystemError as exc: lib.echo(exc) sys.exit(lib.PROGRAM_ERROR) # Store reference to calling shell context = lib.context(root=_extern.cwd()) context["BE_SHELL"] = parent if lib.platform() == "unix": context["BE_TABCOMPLETION"] = os.path.join( os.path.dirname(__file__), "_autocomplete.sh").replace("\\", "/") context.pop("BE_ACTIVE", None) sys.exit(subprocess.call(cmd, env=context))
Enter into an environment with support for tab-completion This command drops you into a subshell, similar to the one generated via `be in ...`, except no topic is present and instead it enables tab-completion for supported shells. See documentation for further information. https://github.com/mottosso/be/wiki/cli
def interface(iface): ''' Return the details of `iface` or an error if it does not exist ''' iface_info, error = _get_iface_info(iface) if error is False: return iface_info.get(iface, {}).get('inet', '') else: return error
Return the details of `iface` or an error if it does not exist
def parseArguments(argv=None): # pragma: no cover """ I parse arguments in sys.argv and return the args object. The parser itself is available as args.parser. Adds the following members to args: parser = the parser object store_opt = the StoreOpt object """ store_opt = StoreOpt() parser = argparse.ArgumentParser( prog='green', usage='%(prog)s [options] [target [target2 ...]]', add_help=False, description=dedent( """ Green is a clean, colorful, fast test runner for Python unit tests. """.rstrip()), epilog=dedent( """ ENABLING SHELL COMPLETION To enable bash- or zsh-completion, add the line below to the end of your .bashrc or .zshrc file (or equivalent config file): which green >& /dev/null && source "$( green --completion-file )" Warning! Generating a completion list actually discovers and loads tests -- this can be very slow if you run it in huge directories! SETUP.PY RUNNER To run green as a setup.py command, simply add green to the 'setup_requires' section in the setup.py file, and specify a target as the 'test_suite' parameter if you do not want green to load all the tests: setup( setup_requires = ['green'], install_requires = 'myproject.tests' ) Then simply run green as any other setup.py command (it accepts the same parameters as the 'green' executable): python setup.py green python setup.py green -r # to run with coverage, etc. CONFIG FILES For documentation on config files, please see https://github.com/CleanCut/green#config-files """.rstrip()), formatter_class=argparse.RawDescriptionHelpFormatter) target_args = parser.add_argument_group("Target Specification") target_args.add_argument('targets', action='store', nargs='*', metavar='target', help=("""Targets to test. Any number of targets may be specified. If blank, then discover all testcases in the current directory tree. Can be a directory (or package), file (or module), or fully-qualified 'dotted name' like proj.tests.test_things.TestStuff. If a directory (or package) is specified, then we will attempt to discover all tests under the directory (even if the directory is a package and the tests would not be accessible through the package's scope). In all other cases, only tests accessible from introspection of the object will be loaded."""), default=argparse.SUPPRESS) concurrency_args = parser.add_argument_group("Concurrency Options") store_opt( concurrency_args.add_argument('-s', '--processes', action='store', type=int, metavar='NUM', help="Number of processes to use to run tests. Note that your " "tests need to be written to avoid using the same resources (temp " "files, sockets, ports, etc.) for the multi-process mode to work " "well (--initializer and --finalizer can help provision " "per-process resources). Default is to run the same number of " "processes as your machine has logical CPUs. Note that for a " "small number of trivial tests, running everything in a single " "process may be faster than the overhead of initializing all the " "processes.", default=argparse.SUPPRESS)) store_opt( concurrency_args.add_argument('-i', '--initializer', action='store', metavar='DOTTED_FUNCTION', help="Python function to run inside of a single worker process " "before it starts running tests. This is the way to provision " "external resources that each concurrent worker process needs to " "have exclusive access to. Specify the function in dotted " "notation in a way that will be importable from the location you " "are running green from.", default=argparse.SUPPRESS)) store_opt( concurrency_args.add_argument('-z', '--finalizer', action='store', metavar='DOTTED_FUNCTION', help="Same as --initializer, only run at the end of a worker " "process's lifetime. Used to unprovision resources provisioned by " "the initializer.", default=argparse.SUPPRESS)) format_args = parser.add_argument_group("Format Options") store_opt(format_args.add_argument('-t', '--termcolor', action='store_true', help="Force terminal colors on. Default is to autodetect.", default=argparse.SUPPRESS)) store_opt( format_args.add_argument('-T', '--notermcolor', action='store_true', help="Force terminal colors off. Default is to autodetect.", default=argparse.SUPPRESS)) store_opt( format_args.add_argument('-W', '--disable-windows', action='store_true', help="Disable Windows support by turning off Colorama", default=argparse.SUPPRESS)) out_args = parser.add_argument_group("Output Options") store_opt(out_args.add_argument('-a', '--allow-stdout', action='store_true', help=("Instead of capturing the stdout and stderr and presenting it " "in the summary of results, let it come through."), default=argparse.SUPPRESS)) store_opt(out_args.add_argument('-q', '--quiet-stdout', action='store_true', help=("Instead of capturing the stdout and stderr and presenting it " "in the summary of results, discard it completly for successful " "tests. --allow-stdout option overrides it."), default=argparse.SUPPRESS)) store_opt(out_args.add_argument('-k', '--no-skip-report', action='store_true', help=("Don't print the report of skipped tests " "after testing is done. Skips will still show up in the progress " "report and summary count."), default=argparse.SUPPRESS)) store_opt(out_args.add_argument('-e', '--no-tracebacks', action='store_true', help=("Don't print tracebacks for failures and " "errors."), default=argparse.SUPPRESS)) store_opt(out_args.add_argument('-h', '--help', action='store_true', help="Show this help message and exit.", default=argparse.SUPPRESS)) store_opt(out_args.add_argument('-V', '--version', action='store_true', help="Print the version of Green and Python and exit.", default=argparse.SUPPRESS)) store_opt(out_args.add_argument('-l', '--logging', action='store_true', help="Don't configure the root logger to redirect to /dev/null, " "enabling internal debugging output, as well as any output test (or " "tested) code may be sending via the root logger.", default=argparse.SUPPRESS)) store_opt(out_args.add_argument('-d', '--debug', action='count', help=("Enable internal debugging statements. Implies --logging. Can " "be specified up to three times for more debug output."), default=argparse.SUPPRESS)) store_opt(out_args.add_argument('-v', '--verbose', action='count', help=("Verbose. Can be specified up to three times for more " "verbosity. Recommended levels are -v and -vv."), default=argparse.SUPPRESS)) store_opt(out_args.add_argument('-U', '--disable-unidecode', action='store_true', help=("Disable unidecode which converts test output from unicode to" "ascii by default on Windows to avoid hard-to-debug crashes."), default=argparse.SUPPRESS)) other_args = parser.add_argument_group("Other Options") store_opt(other_args.add_argument('-f', '--failfast', action='store_true', help=("Stop execution at the first test that fails or errors."), default=argparse.SUPPRESS)) store_opt(other_args.add_argument('-c', '--config', action='store', metavar='FILE', help="Use this config file to override any values from " "the config file specified by environment variable GREEN_CONFIG, " "~/.green, and .green in the current working directory.", default=argparse.SUPPRESS)) store_opt(other_args.add_argument('-p', '--file-pattern', action='store', metavar='PATTERN', help="Pattern to match test files. Default is test*.py", default=argparse.SUPPRESS)) store_opt(other_args.add_argument('-n', '--test-pattern', action='store', metavar='PATTERN', help="Pattern to match test method names after " "'test'. Default is '*', meaning match methods named 'test*'.", default=argparse.SUPPRESS)) store_opt(other_args.add_argument('-j', '--junit-report', action='store', metavar="FILENAME", help=("Generate a JUnit XML report."), default=argparse.SUPPRESS)) cov_args = parser.add_argument_group( "Coverage Options ({})".format(coverage_version)) store_opt(cov_args.add_argument('-r', '--run-coverage', action='store_true', help=("Produce coverage output."), default=argparse.SUPPRESS)) store_opt(cov_args.add_argument('-g', '--cov-config-file', action='store', metavar='FILE', help=("Specify a coverage config file. " "Implies --run-coverage See the coverage documentation " "at https://coverage.readthedocs.io/en/v4.5.x/config.html " "for coverage config file syntax. The [run] and [report] sections " "are most relevant."), default=argparse.SUPPRESS)), store_opt(cov_args.add_argument('-R', '--quiet-coverage', action='store_true', help=("Do not print coverage report to stdout (coverage files will " "still be created). Implies --run-coverage"), default=argparse.SUPPRESS)) store_opt(cov_args.add_argument('-O', '--clear-omit', action='store_true', help=("Green tries really hard to set up a good list of patterns of " "files to omit from coverage reports. If the default list " "catches files that you DO want to cover you can specify this " "flag to leave the default list empty to start with. You can " "then add patterns back in with --omit-add. The default list is " "something like '*/test*,*/mock*,*(temp dir)*,*(python system " "packages)*' -- only longer."), default=argparse.SUPPRESS)) store_opt(cov_args.add_argument('-u', '--include-patterns', action='store', metavar='PATTERN', help=("Comma-separated file-patterns to include in coverage. This " "implies that anything that does not match the include pattern is " "omitted from coverage reporting."), default=argparse.SUPPRESS)) store_opt(cov_args.add_argument('-o', '--omit-patterns', action='store', metavar='PATTERN', help=("Comma-separated file-patterns to omit from coverage. For " "example, if coverage reported a file mypackage/foo/bar you could " "omit it from coverage with 'mypackage*', '*/foo/*', or '*bar'"), default=argparse.SUPPRESS)) store_opt(cov_args.add_argument('-m', '--minimum-coverage', action='store', metavar='X', type=int, help=("Integer. A minimum coverage value. If " "not met, then we will print a message and exit with a nonzero " "status. Implies --run-coverage"), default=argparse.SUPPRESS)) integration_args = parser.add_argument_group("Integration Options") store_opt(integration_args.add_argument('--completion-file', action='store_true', help=("Location of the bash- and zsh-completion " "file. To enable bash- or zsh-completion, see ENABLING SHELL " "COMPLETION below."), default=argparse.SUPPRESS)) store_opt(integration_args.add_argument('--completions', action='store_true', help=("Output possible completions of the given target. Used by " "bash- and zsh-completion."), default=argparse.SUPPRESS)) store_opt(integration_args.add_argument('--options', action='store_true', help="Output all options. Used by bash- and zsh-completion.", default=argparse.SUPPRESS)) args = parser.parse_args(argv) # Add additional members args.parser = parser args.store_opt = store_opt return args
I parse arguments in sys.argv and return the args object. The parser itself is available as args.parser. Adds the following members to args: parser = the parser object store_opt = the StoreOpt object
def to_dict(self): """ Returns the model properties as a dict """ result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() else: result[attr] = value return result
Returns the model properties as a dict
def process_transport_command(self, header, message): """Parse a command coming in through the transport command subscription""" if not isinstance(message, dict): return relevant = False if "host" in message: # Filter by host if message["host"] != self.__hostid: return relevant = True if "service" in message: # Filter by service if message["service"] != self._service_class_name: return relevant = True if not relevant: # Ignore message unless at least one filter matches return if message.get("command"): self.log.info( "Received command '%s' via transport layer", message["command"] ) if message["command"] == "shutdown": self.shutdown = True else: self.log.warning("Received invalid transport command message")
Parse a command coming in through the transport command subscription
def on_tool_finish(self, tool): """ Called when an individual tool completes execution. :param tool: the name of the tool that completed :type tool: str """ with self._lock: if tool in self.current_tools: self.current_tools.remove(tool) self.completed_tools.append(tool)
Called when an individual tool completes execution. :param tool: the name of the tool that completed :type tool: str
def _load_mapping(self, mapping): """Load data for a single step.""" mapping["oid_as_pk"] = bool(mapping.get("fields", {}).get("Id")) job_id, local_ids_for_batch = self._create_job(mapping) result = self._wait_for_job(job_id) # We store inserted ids even if some batches failed self._store_inserted_ids(mapping, job_id, local_ids_for_batch) return result
Load data for a single step.
def append(self, name, data, start): """ Update timeout for all throttles :param name: name of throttle to append to ("read" or "write") :type name: :py:class:`str` :param data: bytes of data for count :type data: :py:class:`bytes` :param start: start of read/write time from :py:meth:`asyncio.BaseEventLoop.time` :type start: :py:class:`float` """ for throttle in self.throttles.values(): getattr(throttle, name).append(data, start)
Update timeout for all throttles :param name: name of throttle to append to ("read" or "write") :type name: :py:class:`str` :param data: bytes of data for count :type data: :py:class:`bytes` :param start: start of read/write time from :py:meth:`asyncio.BaseEventLoop.time` :type start: :py:class:`float`
def find_tf_idf(file_names=['./../test/testdata'],prev_file_path=None, dump_path=None): '''Function to create a TF-IDF list of dictionaries for a corpus of docs. If you opt for dumping the data, you can provide a file_path with .tfidfpkl extension(standard made for better understanding) and also re-generate a new tfidf list which overrides over an old one by mentioning its path. @Args: -- file_names : paths of files to be processed on, these files are created using twitter_streaming module. prev_file_path : path of old .tfidfpkl file, if available. (default=None) dump_path : directory-path where to dump generated lists.(default=None) @returns: -- df : a dict of unique words in corpus,with their document frequency as values. tf_idf : the generated tf-idf list of dictionaries for mentioned docs. ''' tf_idf = [] # will hold a dict of word_count for every doc(line in a doc in this case) df = defaultdict(int) # this statement is useful for altering existant tf-idf file and adding new docs in itself.(## memory is now the biggest issue) if prev_file_path: print(TAG,'modifying over exising file.. @',prev_file_path) df,tf_idf = pickle.load(open(prev_file_path,'rb')) prev_doc_count = len(df) prev_corpus_length = len(tf_idf) for f in file_names: # never use 'rb' for textual data, it creates something like, {b'line-inside-the-doc'} with open(f,'r') as file1: #create word_count dict for all docs for line in file1: wdict = defaultdict(int) #find the amount of doc a word is in for word in set(line.split()): df[word] +=1 #find the count of all words in every doc for word in line.split(): wdict[word] += 1 tf_idf.append(wdict) #calculating final TF-IDF values for all words in all docs(line is a doc in this case) for doc in tf_idf: for key in doc: true_idf = math.log(len(tf_idf)/df[key]) true_tf = doc[key]/float(len(doc)) doc[key] = true_tf * true_idf print(TAG,'Total number of unique words in corpus',len(df),'( '+paint('++'+str(len(df)-prev_doc_count),'g')+' )' if prev_file_path else '') print(TAG,'Total number of docs in corpus:',len(tf_idf),'( '+paint('++'+str(len(tf_idf)-prev_corpus_length),'g')+' )' if prev_file_path else '') # dump if a dir-path is given if dump_path: if dump_path[-8:] == 'tfidfpkl': pickle.dump((df,tf_idf),open(dump_path,'wb'),protocol=pickle.HIGHEST_PROTOCOL) print(TAG,'Dumping TF-IDF vars @',dump_path) return df,tf_idf
Function to create a TF-IDF list of dictionaries for a corpus of docs. If you opt for dumping the data, you can provide a file_path with .tfidfpkl extension(standard made for better understanding) and also re-generate a new tfidf list which overrides over an old one by mentioning its path. @Args: -- file_names : paths of files to be processed on, these files are created using twitter_streaming module. prev_file_path : path of old .tfidfpkl file, if available. (default=None) dump_path : directory-path where to dump generated lists.(default=None) @returns: -- df : a dict of unique words in corpus,with their document frequency as values. tf_idf : the generated tf-idf list of dictionaries for mentioned docs.
def add_dependency(self, name, obj): """Add a code dependency so it gets inserted into globals""" if name in self._deps: if self._deps[name] is obj: return raise ValueError( "There exists a different dep with the same name : %r" % name) self._deps[name] = obj
Add a code dependency so it gets inserted into globals
def get_auth_token_login_url( self, auth_token_ticket, authenticator, private_key, service_url, username, ): ''' Build an auth token login URL. See https://github.com/rbCAS/CASino/wiki/Auth-Token-Login for details. ''' auth_token, auth_token_signature = self._build_auth_token_data( auth_token_ticket, authenticator, private_key, username=username, ) logging.debug('[CAS] AuthToken: {}'.format(auth_token)) url = self._get_auth_token_login_url( auth_token=auth_token, auth_token_signature=auth_token_signature, service_url=service_url, ) logging.debug('[CAS] AuthToken Login URL: {}'.format(url)) return url
Build an auth token login URL. See https://github.com/rbCAS/CASino/wiki/Auth-Token-Login for details.
def search(self, value, createIndex=None): ''' Full-text support, make sure that text index already exist on collection. Raise IndexNotFound if text index not exist. **Examples**: ``query.search('pecel lele', createIndex=['FullName', 'Username'])`` ''' if createIndex: self._createIndex = createIndex self._search = True self.filter(QueryExpression({'$text' : {'$search': value}})) return self
Full-text support, make sure that text index already exist on collection. Raise IndexNotFound if text index not exist. **Examples**: ``query.search('pecel lele', createIndex=['FullName', 'Username'])``
def get_graphs_by_ids(self, network_ids: Iterable[int]) -> List[BELGraph]: """Get a list of networks with the given identifiers and converts to BEL graphs.""" rv = [ self.get_graph_by_id(network_id) for network_id in network_ids ] log.debug('returning graphs for network identifiers: %s', network_ids) return rv
Get a list of networks with the given identifiers and converts to BEL graphs.
def _retry_deliveries(self): """ Handle [MQTT-4.4.0-1] by resending PUBLISH and PUBREL messages for pending out messages :return: """ self.logger.debug("Begin messages delivery retries") tasks = [] for message in itertools.chain(self.session.inflight_in.values(), self.session.inflight_out.values()): tasks.append(asyncio.wait_for(self._handle_message_flow(message), 10, loop=self._loop)) if tasks: done, pending = yield from asyncio.wait(tasks, loop=self._loop) self.logger.debug("%d messages redelivered" % len(done)) self.logger.debug("%d messages not redelivered due to timeout" % len(pending)) self.logger.debug("End messages delivery retries")
Handle [MQTT-4.4.0-1] by resending PUBLISH and PUBREL messages for pending out messages :return:
def choose(msg, items, attr): # pragma: no cover """ Command line helper to display a list of choices, asking the user to choose one of the options. """ # Return the first item if there is only one choice if len(items) == 1: return items[0] # Print all choices to the command line print() for index, i in enumerate(items): name = attr(i) if callable(attr) else getattr(i, attr) print(' %s: %s' % (index, name)) print() # Request choice from the user while True: try: inp = input('%s: ' % msg) if any(s in inp for s in (':', '::', '-')): idx = slice(*map(lambda x: int(x.strip()) if x.strip() else None, inp.split(':'))) return items[idx] else: return items[int(inp)] except (ValueError, IndexError): pass
Command line helper to display a list of choices, asking the user to choose one of the options.
def sub_channel(self): """Get the SUB socket channel object.""" if self._sub_channel is None: self._sub_channel = self.sub_channel_class(self.context, self.session, (self.ip, self.iopub_port)) return self._sub_channel
Get the SUB socket channel object.
def per(arga, argb, prec=10): r""" Calculate percentage difference between numbers. If only two numbers are given, the percentage difference between them is computed. If two sequences of numbers are given (either two lists of numbers or Numpy vectors), the element-wise percentage difference is computed. If any of the numbers in the arguments is zero the value returned is the maximum floating-point number supported by the Python interpreter. :param arga: First number, list of numbers or Numpy vector :type arga: float, integer, list of floats or integers, or Numpy vector of floats or integers :param argb: Second number, list of numbers or or Numpy vector :type argb: float, integer, list of floats or integers, or Numpy vector of floats or integers :param prec: Maximum length of the fractional part of the result :type prec: integer :rtype: Float, list of floats or Numpy vector, depending on the arguments type :raises: * RuntimeError (Argument \`arga\` is not valid) * RuntimeError (Argument \`argb\` is not valid) * RuntimeError (Argument \`prec\` is not valid) * TypeError (Arguments are not of the same type) """ # pylint: disable=C0103,C0200,E1101,R0204 if not isinstance(prec, int): raise RuntimeError("Argument `prec` is not valid") a_type = 1 * _isreal(arga) + 2 * (isiterable(arga) and not isinstance(arga, str)) b_type = 1 * _isreal(argb) + 2 * (isiterable(argb) and not isinstance(argb, str)) if not a_type: raise RuntimeError("Argument `arga` is not valid") if not b_type: raise RuntimeError("Argument `argb` is not valid") if a_type != b_type: raise TypeError("Arguments are not of the same type") if a_type == 1: arga, argb = float(arga), float(argb) num_min, num_max = min(arga, argb), max(arga, argb) return ( 0 if _isclose(arga, argb) else ( sys.float_info.max if _isclose(num_min, 0.0) else round((num_max / num_min) - 1, prec) ) ) # Contortions to handle lists and Numpy arrays without explicitly # having to import numpy ret = copy.copy(arga) for num, (x, y) in enumerate(zip(arga, argb)): if not _isreal(x): raise RuntimeError("Argument `arga` is not valid") if not _isreal(y): raise RuntimeError("Argument `argb` is not valid") x, y = float(x), float(y) ret[num] = ( 0 if _isclose(x, y) else ( sys.float_info.max if _isclose(x, 0.0) or _isclose(y, 0) else (round((max(x, y) / min(x, y)) - 1, prec)) ) ) return ret
r""" Calculate percentage difference between numbers. If only two numbers are given, the percentage difference between them is computed. If two sequences of numbers are given (either two lists of numbers or Numpy vectors), the element-wise percentage difference is computed. If any of the numbers in the arguments is zero the value returned is the maximum floating-point number supported by the Python interpreter. :param arga: First number, list of numbers or Numpy vector :type arga: float, integer, list of floats or integers, or Numpy vector of floats or integers :param argb: Second number, list of numbers or or Numpy vector :type argb: float, integer, list of floats or integers, or Numpy vector of floats or integers :param prec: Maximum length of the fractional part of the result :type prec: integer :rtype: Float, list of floats or Numpy vector, depending on the arguments type :raises: * RuntimeError (Argument \`arga\` is not valid) * RuntimeError (Argument \`argb\` is not valid) * RuntimeError (Argument \`prec\` is not valid) * TypeError (Arguments are not of the same type)
def generalized_lsp_value_withtau(times, mags, errs, omega): '''Generalized LSP value for a single omega. This uses tau to provide an arbitrary time-reference point. The relations used are:: P(w) = (1/YY) * (YC*YC/CC + YS*YS/SS) where: YC, YS, CC, and SS are all calculated at T and where: tan 2omegaT = 2*CS/(CC - SS) and where: Y = sum( w_i*y_i ) C = sum( w_i*cos(wT_i) ) S = sum( w_i*sin(wT_i) ) YY = sum( w_i*y_i*y_i ) - Y*Y YC = sum( w_i*y_i*cos(wT_i) ) - Y*C YS = sum( w_i*y_i*sin(wT_i) ) - Y*S CpC = sum( w_i*cos(w_T_i)*cos(w_T_i) ) CC = CpC - C*C SS = (1 - CpC) - S*S CS = sum( w_i*cos(w_T_i)*sin(w_T_i) ) - C*S Parameters ---------- times,mags,errs : np.array The time-series to calculate the periodogram value for. omega : float The frequency to calculate the periodogram value at. Returns ------- periodogramvalue : float The normalized periodogram at the specified test frequency `omega`. ''' one_over_errs2 = 1.0/(errs*errs) W = npsum(one_over_errs2) wi = one_over_errs2/W sin_omegat = npsin(omega*times) cos_omegat = npcos(omega*times) sin2_omegat = sin_omegat*sin_omegat cos2_omegat = cos_omegat*cos_omegat sincos_omegat = sin_omegat*cos_omegat # calculate some more sums and terms Y = npsum( wi*mags ) C = npsum( wi*cos_omegat ) S = npsum( wi*sin_omegat ) CpS = npsum( wi*sincos_omegat ) CpC = npsum( wi*cos2_omegat ) CS = CpS - C*S CC = CpC - C*C SS = 1 - CpC - S*S # use SpS = 1 - CpC # calculate tau tan_omega_tau_top = 2.0*CS tan_omega_tau_bottom = CC - SS tan_omega_tau = tan_omega_tau_top/tan_omega_tau_bottom tau = nparctan(tan_omega_tau)/(2.0*omega) # now we need to calculate all the bits at tau sin_omega_tau = npsin(omega*(times - tau)) cos_omega_tau = npcos(omega*(times - tau)) sin2_omega_tau = sin_omega_tau*sin_omega_tau cos2_omega_tau = cos_omega_tau*cos_omega_tau sincos_omega_tau = sin_omega_tau*cos_omega_tau C_tau = npsum(wi*cos_omega_tau) S_tau = npsum(wi*sin_omega_tau) CpS_tau = npsum( wi*sincos_omega_tau ) CpC_tau = npsum( wi*cos2_omega_tau ) CS_tau = CpS_tau - C_tau*S_tau CC_tau = CpC_tau - C_tau*C_tau SS_tau = 1 - CpC_tau - S_tau*S_tau # use SpS = 1 - CpC YpY = npsum( wi*mags*mags) YpC_tau = npsum( wi*mags*cos_omega_tau ) YpS_tau = npsum( wi*mags*sin_omega_tau ) # SpS = npsum( wi*sin2_omegat ) # the final terms YY = YpY - Y*Y YC_tau = YpC_tau - Y*C_tau YS_tau = YpS_tau - Y*S_tau periodogramvalue = (YC_tau*YC_tau/CC_tau + YS_tau*YS_tau/SS_tau)/YY return periodogramvalue
Generalized LSP value for a single omega. This uses tau to provide an arbitrary time-reference point. The relations used are:: P(w) = (1/YY) * (YC*YC/CC + YS*YS/SS) where: YC, YS, CC, and SS are all calculated at T and where: tan 2omegaT = 2*CS/(CC - SS) and where: Y = sum( w_i*y_i ) C = sum( w_i*cos(wT_i) ) S = sum( w_i*sin(wT_i) ) YY = sum( w_i*y_i*y_i ) - Y*Y YC = sum( w_i*y_i*cos(wT_i) ) - Y*C YS = sum( w_i*y_i*sin(wT_i) ) - Y*S CpC = sum( w_i*cos(w_T_i)*cos(w_T_i) ) CC = CpC - C*C SS = (1 - CpC) - S*S CS = sum( w_i*cos(w_T_i)*sin(w_T_i) ) - C*S Parameters ---------- times,mags,errs : np.array The time-series to calculate the periodogram value for. omega : float The frequency to calculate the periodogram value at. Returns ------- periodogramvalue : float The normalized periodogram at the specified test frequency `omega`.
def load_configuration(conf_path): """Load and validate test configuration. :param conf_path: path to YAML configuration file. :return: configuration as dict. """ with open(conf_path) as f: conf_dict = yaml.load(f) validate_config(conf_dict) return conf_dict
Load and validate test configuration. :param conf_path: path to YAML configuration file. :return: configuration as dict.
def show_firmware_version_output_show_firmware_version_node_info_firmware_version_info_application_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") show_firmware_version = ET.Element("show_firmware_version") config = show_firmware_version output = ET.SubElement(show_firmware_version, "output") show_firmware_version = ET.SubElement(output, "show-firmware-version") node_info = ET.SubElement(show_firmware_version, "node-info") firmware_version_info = ET.SubElement(node_info, "firmware-version-info") application_name = ET.SubElement(firmware_version_info, "application-name") application_name.text = kwargs.pop('application_name') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def get(self, requirement): """Find packages matching ``requirement``. :param requirement: Requirement to match against repository packages. :type requirement: `str` or :class:`.Requirement` :returns: :func:`list` of matching :class:`.Package` objects. """ if isinstance(requirement, basestring): requirement = Requirement.parse(requirement) return sorted(p for p in self.packages if requirement.name == p.name and requirement.match(p))
Find packages matching ``requirement``. :param requirement: Requirement to match against repository packages. :type requirement: `str` or :class:`.Requirement` :returns: :func:`list` of matching :class:`.Package` objects.
def format_op_row(ipFile, totLines, totWords, uniqueWords): """ Format the output row with stats """ txt = os.path.basename(ipFile).ljust(36) + ' ' txt += str(totLines).rjust(7) + ' ' txt += str(totWords).rjust(7) + ' ' txt += str(len(uniqueWords)).rjust(7) + ' ' return txt
Format the output row with stats
def create(cls, cli, management_address, local_username=None, local_password=None, remote_username=None, remote_password=None, connection_type=None): """ Configures a remote system for remote replication. :param cls: this class. :param cli: the rest client. :param management_address: the management IP address of the remote system. :param local_username: administrative username of local system. :param local_password: administrative password of local system. :param remote_username: administrative username of remote system. :param remote_password: administrative password of remote system. :param connection_type: `ReplicationCapabilityEnum`. Replication connection type to the remote system. :return: the newly created remote system. """ req_body = cli.make_body( managementAddress=management_address, localUsername=local_username, localPassword=local_password, remoteUsername=remote_username, remotePassword=remote_password, connectionType=connection_type) resp = cli.post(cls().resource_class, **req_body) resp.raise_if_err() return cls.get(cli, resp.resource_id)
Configures a remote system for remote replication. :param cls: this class. :param cli: the rest client. :param management_address: the management IP address of the remote system. :param local_username: administrative username of local system. :param local_password: administrative password of local system. :param remote_username: administrative username of remote system. :param remote_password: administrative password of remote system. :param connection_type: `ReplicationCapabilityEnum`. Replication connection type to the remote system. :return: the newly created remote system.
def _initialize_likelihood_prior(self, positions, log_likelihoods, log_priors): """Initialize the likelihood and the prior using the given positions. This is a general method for computing the log likelihoods and log priors for given positions. Subclasses can use this to instantiate secondary chains as well. """ func = SimpleCLFunction.from_string(''' void compute(global mot_float_type* chain_position, global mot_float_type* log_likelihood, global mot_float_type* log_prior, local mot_float_type* x_tmp, void* data){ bool is_first_work_item = get_local_id(0) == 0; if(is_first_work_item){ for(uint i = 0; i < ''' + str(self._nmr_params) + '''; i++){ x_tmp[i] = chain_position[i]; } *log_prior = _computeLogPrior(x_tmp, data); } barrier(CLK_LOCAL_MEM_FENCE); *log_likelihood = _computeLogLikelihood(x_tmp, data); } ''', dependencies=[self._get_log_prior_cl_func(), self._get_log_likelihood_cl_func()]) kernel_data = { 'chain_position': Array(positions, 'mot_float_type', mode='rw', ensure_zero_copy=True), 'log_likelihood': Array(log_likelihoods, 'mot_float_type', mode='rw', ensure_zero_copy=True), 'log_prior': Array(log_priors, 'mot_float_type', mode='rw', ensure_zero_copy=True), 'x_tmp': LocalMemory('mot_float_type', self._nmr_params), 'data': self._data } func.evaluate(kernel_data, self._nmr_problems, use_local_reduction=all(env.is_gpu for env in self._cl_runtime_info.cl_environments), cl_runtime_info=self._cl_runtime_info)
Initialize the likelihood and the prior using the given positions. This is a general method for computing the log likelihoods and log priors for given positions. Subclasses can use this to instantiate secondary chains as well.