repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_documentation_string
stringlengths
1
47.2k
func_code_url
stringlengths
85
339
dnephin/PyStaticConfiguration
staticconf/config.py
get_namespaces_from_names
def get_namespaces_from_names(name, all_names): """Return a generator which yields namespace objects.""" names = configuration_namespaces.keys() if all_names else [name] for name in names: yield get_namespace(name)
python
def get_namespaces_from_names(name, all_names): """Return a generator which yields namespace objects.""" names = configuration_namespaces.keys() if all_names else [name] for name in names: yield get_namespace(name)
Return a generator which yields namespace objects.
https://github.com/dnephin/PyStaticConfiguration/blob/229733270bc0dc0d9690ba850dbfb470e535c212/staticconf/config.py#L181-L185
dnephin/PyStaticConfiguration
staticconf/config.py
get_namespace
def get_namespace(name): """Return a :class:`ConfigNamespace` by name, creating the namespace if it does not exist. """ if name not in configuration_namespaces: configuration_namespaces[name] = ConfigNamespace(name) return configuration_namespaces[name]
python
def get_namespace(name): """Return a :class:`ConfigNamespace` by name, creating the namespace if it does not exist. """ if name not in configuration_namespaces: configuration_namespaces[name] = ConfigNamespace(name) return configuration_namespaces[name]
Return a :class:`ConfigNamespace` by name, creating the namespace if it does not exist.
https://github.com/dnephin/PyStaticConfiguration/blob/229733270bc0dc0d9690ba850dbfb470e535c212/staticconf/config.py#L188-L194
dnephin/PyStaticConfiguration
staticconf/config.py
reload
def reload(name=DEFAULT, all_names=False): """Reload one or all :class:`ConfigNamespace`. Reload clears the cache of :mod:`staticconf.schema` and :mod:`staticconf.getters`, allowing them to pickup the latest values in the namespace. Defaults to reloading just the DEFAULT namespace. :param name: the name of the :class:`ConfigNamespace` to reload :param all_names: If True, reload all namespaces, and ignore `name` """ for namespace in get_namespaces_from_names(name, all_names): for value_proxy in namespace.get_value_proxies(): value_proxy.reset()
python
def reload(name=DEFAULT, all_names=False): """Reload one or all :class:`ConfigNamespace`. Reload clears the cache of :mod:`staticconf.schema` and :mod:`staticconf.getters`, allowing them to pickup the latest values in the namespace. Defaults to reloading just the DEFAULT namespace. :param name: the name of the :class:`ConfigNamespace` to reload :param all_names: If True, reload all namespaces, and ignore `name` """ for namespace in get_namespaces_from_names(name, all_names): for value_proxy in namespace.get_value_proxies(): value_proxy.reset()
Reload one or all :class:`ConfigNamespace`. Reload clears the cache of :mod:`staticconf.schema` and :mod:`staticconf.getters`, allowing them to pickup the latest values in the namespace. Defaults to reloading just the DEFAULT namespace. :param name: the name of the :class:`ConfigNamespace` to reload :param all_names: If True, reload all namespaces, and ignore `name`
https://github.com/dnephin/PyStaticConfiguration/blob/229733270bc0dc0d9690ba850dbfb470e535c212/staticconf/config.py#L197-L209
dnephin/PyStaticConfiguration
staticconf/config.py
validate
def validate(name=DEFAULT, all_names=False): """Validate all registered keys after loading configuration. Missing values or values which do not pass validation raise :class:`staticconf.errors.ConfigurationError`. By default only validates the `DEFAULT` namespace. :param name: the namespace to validate :type name: string :param all_names: if True validates all namespaces and ignores `name` :type all_names: boolean """ for namespace in get_namespaces_from_names(name, all_names): all(value_proxy.get_value() for value_proxy in namespace.get_value_proxies())
python
def validate(name=DEFAULT, all_names=False): """Validate all registered keys after loading configuration. Missing values or values which do not pass validation raise :class:`staticconf.errors.ConfigurationError`. By default only validates the `DEFAULT` namespace. :param name: the namespace to validate :type name: string :param all_names: if True validates all namespaces and ignores `name` :type all_names: boolean """ for namespace in get_namespaces_from_names(name, all_names): all(value_proxy.get_value() for value_proxy in namespace.get_value_proxies())
Validate all registered keys after loading configuration. Missing values or values which do not pass validation raise :class:`staticconf.errors.ConfigurationError`. By default only validates the `DEFAULT` namespace. :param name: the namespace to validate :type name: string :param all_names: if True validates all namespaces and ignores `name` :type all_names: boolean
https://github.com/dnephin/PyStaticConfiguration/blob/229733270bc0dc0d9690ba850dbfb470e535c212/staticconf/config.py#L212-L225
dnephin/PyStaticConfiguration
staticconf/config.py
has_duplicate_keys
def has_duplicate_keys(config_data, base_conf, raise_error): """Compare two dictionaries for duplicate keys. if raise_error is True then raise on exception, otherwise log return True.""" duplicate_keys = set(base_conf) & set(config_data) if not duplicate_keys: return msg = "Duplicate keys in config: %s" % duplicate_keys if raise_error: raise errors.ConfigurationError(msg) log.info(msg) return True
python
def has_duplicate_keys(config_data, base_conf, raise_error): """Compare two dictionaries for duplicate keys. if raise_error is True then raise on exception, otherwise log return True.""" duplicate_keys = set(base_conf) & set(config_data) if not duplicate_keys: return msg = "Duplicate keys in config: %s" % duplicate_keys if raise_error: raise errors.ConfigurationError(msg) log.info(msg) return True
Compare two dictionaries for duplicate keys. if raise_error is True then raise on exception, otherwise log return True.
https://github.com/dnephin/PyStaticConfiguration/blob/229733270bc0dc0d9690ba850dbfb470e535c212/staticconf/config.py#L276-L286
dnephin/PyStaticConfiguration
staticconf/config.py
build_compare_func
def build_compare_func(err_logger=None): """Returns a compare_func that can be passed to MTimeComparator. The returned compare_func first tries os.path.getmtime(filename), then calls err_logger(filename) if that fails. If err_logger is None, then it does nothing. err_logger is always called within the context of an OSError raised by os.path.getmtime(filename). Information on this error can be retrieved by calling sys.exc_info inside of err_logger.""" def compare_func(filename): try: return os.path.getmtime(filename) except OSError: if err_logger is not None: err_logger(filename) return -1 return compare_func
python
def build_compare_func(err_logger=None): """Returns a compare_func that can be passed to MTimeComparator. The returned compare_func first tries os.path.getmtime(filename), then calls err_logger(filename) if that fails. If err_logger is None, then it does nothing. err_logger is always called within the context of an OSError raised by os.path.getmtime(filename). Information on this error can be retrieved by calling sys.exc_info inside of err_logger.""" def compare_func(filename): try: return os.path.getmtime(filename) except OSError: if err_logger is not None: err_logger(filename) return -1 return compare_func
Returns a compare_func that can be passed to MTimeComparator. The returned compare_func first tries os.path.getmtime(filename), then calls err_logger(filename) if that fails. If err_logger is None, then it does nothing. err_logger is always called within the context of an OSError raised by os.path.getmtime(filename). Information on this error can be retrieved by calling sys.exc_info inside of err_logger.
https://github.com/dnephin/PyStaticConfiguration/blob/229733270bc0dc0d9690ba850dbfb470e535c212/staticconf/config.py#L436-L451
dnephin/PyStaticConfiguration
staticconf/config.py
ConfigNamespace.get_config_dict
def get_config_dict(self): """Reconstruct the nested structure of this object's configuration and return it as a dict. """ config_dict = {} for dotted_key, value in self.get_config_values().items(): subkeys = dotted_key.split('.') d = config_dict for key in subkeys: d = d.setdefault(key, value if key == subkeys[-1] else {}) return config_dict
python
def get_config_dict(self): """Reconstruct the nested structure of this object's configuration and return it as a dict. """ config_dict = {} for dotted_key, value in self.get_config_values().items(): subkeys = dotted_key.split('.') d = config_dict for key in subkeys: d = d.setdefault(key, value if key == subkeys[-1] else {}) return config_dict
Reconstruct the nested structure of this object's configuration and return it as a dict.
https://github.com/dnephin/PyStaticConfiguration/blob/229733270bc0dc0d9690ba850dbfb470e535c212/staticconf/config.py#L114-L124
dnephin/PyStaticConfiguration
staticconf/config.py
ConfigHelp.view_help
def view_help(self): """Return a help message describing all the statically configured keys. """ def format_desc(desc): return "%s (Type: %s, Default: %s)\n%s" % ( desc.name, desc.validator.__name__.replace('validate_', ''), desc.default, desc.help or '') def format_namespace(key, desc_list): return "\nNamespace: %s\n%s" % ( key, '\n'.join(sorted(format_desc(desc) for desc in desc_list))) def namespace_cmp(item): name, _ = item return chr(0) if name == DEFAULT else name return '\n'.join(format_namespace(*desc) for desc in sorted(six.iteritems(self.descriptions), key=namespace_cmp))
python
def view_help(self): """Return a help message describing all the statically configured keys. """ def format_desc(desc): return "%s (Type: %s, Default: %s)\n%s" % ( desc.name, desc.validator.__name__.replace('validate_', ''), desc.default, desc.help or '') def format_namespace(key, desc_list): return "\nNamespace: %s\n%s" % ( key, '\n'.join(sorted(format_desc(desc) for desc in desc_list))) def namespace_cmp(item): name, _ = item return chr(0) if name == DEFAULT else name return '\n'.join(format_namespace(*desc) for desc in sorted(six.iteritems(self.descriptions), key=namespace_cmp))
Return a help message describing all the statically configured keys.
https://github.com/dnephin/PyStaticConfiguration/blob/229733270bc0dc0d9690ba850dbfb470e535c212/staticconf/config.py#L238-L259
dnephin/PyStaticConfiguration
staticconf/config.py
ConfigurationWatcher.reload_if_changed
def reload_if_changed(self, force=False): """If the file(s) being watched by this object have changed, their configuration will be loaded again using `config_loader`. Otherwise this is a noop. :param force: If True ignore the `min_interval` and proceed to file modified comparisons. To force a reload use :func:`reload` directly. """ if (force or self.should_check) and self.file_modified(): return self.reload()
python
def reload_if_changed(self, force=False): """If the file(s) being watched by this object have changed, their configuration will be loaded again using `config_loader`. Otherwise this is a noop. :param force: If True ignore the `min_interval` and proceed to file modified comparisons. To force a reload use :func:`reload` directly. """ if (force or self.should_check) and self.file_modified(): return self.reload()
If the file(s) being watched by this object have changed, their configuration will be loaded again using `config_loader`. Otherwise this is a noop. :param force: If True ignore the `min_interval` and proceed to file modified comparisons. To force a reload use :func:`reload` directly.
https://github.com/dnephin/PyStaticConfiguration/blob/229733270bc0dc0d9690ba850dbfb470e535c212/staticconf/config.py#L369-L379
dnephin/PyStaticConfiguration
staticconf/config.py
ConfigFacade.load
def load( cls, filename, namespace, loader_func, min_interval=0, comparators=None, ): """Create a new :class:`ConfigurationWatcher` and load the initial configuration by calling `loader_func`. :param filename: a filename or list of filenames to monitor for changes :param namespace: the name of a namespace to use when loading configuration. All config data from `filename` will end up in a :class:`ConfigNamespace` with this name :param loader_func: a function which accepts two arguments and uses loader functions from :mod:`staticconf.loader` to load configuration data into a namespace. The arguments are `filename` and `namespace` :param min_interval: minimum number of seconds to wait between calls to :func:`os.path.getmtime` to check if a file has been modified. :param comparators: a list of classes which support the :class:`IComparator` interface which are used to determine if a config file has been modified. See ConfigurationWatcher::__init__. :returns: a :class:`ConfigFacade` """ watcher = ConfigurationWatcher( build_loader_callable(loader_func, filename, namespace=namespace), filename, min_interval=min_interval, reloader=ReloadCallbackChain(namespace=namespace), comparators=comparators, ) watcher.load_config() return cls(watcher)
python
def load( cls, filename, namespace, loader_func, min_interval=0, comparators=None, ): """Create a new :class:`ConfigurationWatcher` and load the initial configuration by calling `loader_func`. :param filename: a filename or list of filenames to monitor for changes :param namespace: the name of a namespace to use when loading configuration. All config data from `filename` will end up in a :class:`ConfigNamespace` with this name :param loader_func: a function which accepts two arguments and uses loader functions from :mod:`staticconf.loader` to load configuration data into a namespace. The arguments are `filename` and `namespace` :param min_interval: minimum number of seconds to wait between calls to :func:`os.path.getmtime` to check if a file has been modified. :param comparators: a list of classes which support the :class:`IComparator` interface which are used to determine if a config file has been modified. See ConfigurationWatcher::__init__. :returns: a :class:`ConfigFacade` """ watcher = ConfigurationWatcher( build_loader_callable(loader_func, filename, namespace=namespace), filename, min_interval=min_interval, reloader=ReloadCallbackChain(namespace=namespace), comparators=comparators, ) watcher.load_config() return cls(watcher)
Create a new :class:`ConfigurationWatcher` and load the initial configuration by calling `loader_func`. :param filename: a filename or list of filenames to monitor for changes :param namespace: the name of a namespace to use when loading configuration. All config data from `filename` will end up in a :class:`ConfigNamespace` with this name :param loader_func: a function which accepts two arguments and uses loader functions from :mod:`staticconf.loader` to load configuration data into a namespace. The arguments are `filename` and `namespace` :param min_interval: minimum number of seconds to wait between calls to :func:`os.path.getmtime` to check if a file has been modified. :param comparators: a list of classes which support the :class:`IComparator` interface which are used to determine if a config file has been modified. See ConfigurationWatcher::__init__. :returns: a :class:`ConfigFacade`
https://github.com/dnephin/PyStaticConfiguration/blob/229733270bc0dc0d9690ba850dbfb470e535c212/staticconf/config.py#L585-L620
dnephin/PyStaticConfiguration
staticconf/validation.py
_validate_iterable
def _validate_iterable(iterable_type, value): """Convert the iterable to iterable_type, or raise a Configuration exception. """ if isinstance(value, six.string_types): msg = "Invalid iterable of type(%s): %s" raise ValidationError(msg % (type(value), value)) try: return iterable_type(value) except TypeError: raise ValidationError("Invalid iterable: %s" % (value))
python
def _validate_iterable(iterable_type, value): """Convert the iterable to iterable_type, or raise a Configuration exception. """ if isinstance(value, six.string_types): msg = "Invalid iterable of type(%s): %s" raise ValidationError(msg % (type(value), value)) try: return iterable_type(value) except TypeError: raise ValidationError("Invalid iterable: %s" % (value))
Convert the iterable to iterable_type, or raise a Configuration exception.
https://github.com/dnephin/PyStaticConfiguration/blob/229733270bc0dc0d9690ba850dbfb470e535c212/staticconf/validation.py#L90-L101
dnephin/PyStaticConfiguration
staticconf/validation.py
build_list_type_validator
def build_list_type_validator(item_validator): """Return a function which validates that the value is a list of items which are validated using item_validator. """ def validate_list_of_type(value): return [item_validator(item) for item in validate_list(value)] return validate_list_of_type
python
def build_list_type_validator(item_validator): """Return a function which validates that the value is a list of items which are validated using item_validator. """ def validate_list_of_type(value): return [item_validator(item) for item in validate_list(value)] return validate_list_of_type
Return a function which validates that the value is a list of items which are validated using item_validator.
https://github.com/dnephin/PyStaticConfiguration/blob/229733270bc0dc0d9690ba850dbfb470e535c212/staticconf/validation.py#L123-L129
dnephin/PyStaticConfiguration
staticconf/validation.py
build_map_type_validator
def build_map_type_validator(item_validator): """Return a function which validates that the value is a mapping of items. The function should return pairs of items that will be passed to the `dict` constructor. """ def validate_mapping(value): return dict(item_validator(item) for item in validate_list(value)) return validate_mapping
python
def build_map_type_validator(item_validator): """Return a function which validates that the value is a mapping of items. The function should return pairs of items that will be passed to the `dict` constructor. """ def validate_mapping(value): return dict(item_validator(item) for item in validate_list(value)) return validate_mapping
Return a function which validates that the value is a mapping of items. The function should return pairs of items that will be passed to the `dict` constructor.
https://github.com/dnephin/PyStaticConfiguration/blob/229733270bc0dc0d9690ba850dbfb470e535c212/staticconf/validation.py#L132-L139
dnephin/PyStaticConfiguration
staticconf/getters.py
register_value_proxy
def register_value_proxy(namespace, value_proxy, help_text): """Register a value proxy with the namespace, and add the help_text.""" namespace.register_proxy(value_proxy) config.config_help.add( value_proxy.config_key, value_proxy.validator, value_proxy.default, namespace.get_name(), help_text)
python
def register_value_proxy(namespace, value_proxy, help_text): """Register a value proxy with the namespace, and add the help_text.""" namespace.register_proxy(value_proxy) config.config_help.add( value_proxy.config_key, value_proxy.validator, value_proxy.default, namespace.get_name(), help_text)
Register a value proxy with the namespace, and add the help_text.
https://github.com/dnephin/PyStaticConfiguration/blob/229733270bc0dc0d9690ba850dbfb470e535c212/staticconf/getters.py#L68-L73
dnephin/PyStaticConfiguration
staticconf/getters.py
build_getter
def build_getter(validator, getter_namespace=None): """Create a getter function for retrieving values from the config cache. Getters will default to the DEFAULT namespace. """ def proxy_register(key_name, default=UndefToken, help=None, namespace=None): name = namespace or getter_namespace or config.DEFAULT namespace = config.get_namespace(name) return proxy_factory.build(validator, namespace, key_name, default, help) return proxy_register
python
def build_getter(validator, getter_namespace=None): """Create a getter function for retrieving values from the config cache. Getters will default to the DEFAULT namespace. """ def proxy_register(key_name, default=UndefToken, help=None, namespace=None): name = namespace or getter_namespace or config.DEFAULT namespace = config.get_namespace(name) return proxy_factory.build(validator, namespace, key_name, default, help) return proxy_register
Create a getter function for retrieving values from the config cache. Getters will default to the DEFAULT namespace.
https://github.com/dnephin/PyStaticConfiguration/blob/229733270bc0dc0d9690ba850dbfb470e535c212/staticconf/getters.py#L101-L110
dnephin/PyStaticConfiguration
staticconf/getters.py
ProxyFactory.build
def build(self, validator, namespace, config_key, default, help): """Build or retrieve a ValueProxy from the attributes. Proxies are keyed using a repr because default values can be mutable types. """ proxy_attrs = validator, namespace, config_key, default proxy_key = repr(proxy_attrs) if proxy_key in self.proxies: return self.proxies[proxy_key] value_proxy = proxy.ValueProxy(*proxy_attrs) register_value_proxy(namespace, value_proxy, help) return self.proxies.setdefault(proxy_key, value_proxy)
python
def build(self, validator, namespace, config_key, default, help): """Build or retrieve a ValueProxy from the attributes. Proxies are keyed using a repr because default values can be mutable types. """ proxy_attrs = validator, namespace, config_key, default proxy_key = repr(proxy_attrs) if proxy_key in self.proxies: return self.proxies[proxy_key] value_proxy = proxy.ValueProxy(*proxy_attrs) register_value_proxy(namespace, value_proxy, help) return self.proxies.setdefault(proxy_key, value_proxy)
Build or retrieve a ValueProxy from the attributes. Proxies are keyed using a repr because default values can be mutable types.
https://github.com/dnephin/PyStaticConfiguration/blob/229733270bc0dc0d9690ba850dbfb470e535c212/staticconf/getters.py#L84-L95
andim/noisyopt
noisyopt/main.py
minimizeCompass
def minimizeCompass(func, x0, args=(), bounds=None, scaling=None, redfactor=2.0, deltainit=1.0, deltatol=1e-3, feps=1e-15, errorcontrol=True, funcNinit=30, funcmultfactor=2.0, paired=True, alpha=0.05, disp=False, callback=None, **kwargs): """ Minimization of an objective function by a pattern search. The algorithm does a compass search along coordinate directions. If `errorcontrol=True` then the function is called repeatedly to average over the stochasticity in the function evaluation. The number of evaluations over which to average is adapted dynamically to ensure convergence. The algorithm terminates when the current iterate is locally optimally at the target pattern size deltatol or when the function value differs by less than the tolerance feps along all directions. Parameters ---------- func: callable objective function to be minimized: called as `func(x, *args)`, if `paired=True`, then called with keyword argument `seed` additionally x0: array-like starting point args: tuple extra arguments to be supplied to func bounds: array-like bounds on the variables scaling: array-like scaling by which to multiply step size and tolerances along different dimensions redfactor: float reduction factor by which to reduce delta if no reduction direction found deltainit: float initial pattern size deltatol: float target pattern size, function differences at this scale need to be larger than stochasticitiy in evaluations to ensure convergence if `errorcontrol=False` feps: float smallest difference in function value to resolve errorcontrol: boolean whether to control error of simulation by repeated sampling funcNinit: int, only for errorcontrol=True initial number of iterations to use for the function, do not set much lower than 30 as otherwise there is no sufficient statistics for function comparisons funcmultfactor: float, only for errorcontrol=True multiplication factor by which to increase number of iterations of function paired: boolean, only for errorcontrol=True compare for same random seeds alpha: float, only for errorcontrol=True significance level of tests, the higher this value the more statistics is acquired, which decreases the risk of taking a step in a non-descent direction at the expense of higher computational cost per iteration disp: boolean whether to output status updates during the optimization callback: callable called after each iteration, as callback(xk), where xk is the current parameter vector. Returns ------- scipy.optimize.OptimizeResult object special entry: free Boolean array indicating parameters that are unconstrained at the optimum (within feps) """ #TODO: implement variable deltas for different directions (might speed up things, see review) if disp: print('minimization starting') print('args', args) print('errorcontrol', errorcontrol) print('paired', paired) # absolute tolerance for float comparisons floatcompatol = 1e-14 x0 = np.asarray(x0) if scaling is None: scaling = np.ones(x0.shape) else: scaling = np.asarray(scaling) # ensure initial point lies within bounds if bounds is not None: bounds = np.asarray(bounds) np.clip(x0, bounds[:, 0], bounds[:, 1], out=x0) def clip(x, d): """clip x+d to respect bounds returns clipped result and effective distance""" xnew = x + d if bounds is not None: # if test point depasses set to boundary instead xclipped = np.clip(xnew, bounds[:, 0], bounds[:, 1]) deltaeff = np.abs(x - xclipped).sum() return xclipped, deltaeff else: return xnew, delta # generate set of search directions (+- s_i e_i | i = 1, ..., N) def unit(i, N): "return ith unit vector in R^N" arr = np.zeros(N) arr[i] = 1.0 return arr N = len(x0) generatingset = [unit(i, N)*direction*scaling[i] for i in np.arange(N) for direction in [+1, -1]] # memoize function if errorcontrol: funcm = AveragedFunction( func, fargs=args, paired=paired, N=funcNinit) # apply Bonferroni correction to confidence level # (need more statistics in higher dimensions) alpha = alpha/len(generatingset) else: # freeze function arguments def funcf(x, **kwargs): return func(x, *args, **kwargs) funcm = _memoized(funcf) x = x0 delta = deltainit # number of iterations nit = 0 # continue as long as delta is larger than tolerance # or if there was an update during the last iteration found = False while delta >= deltatol-floatcompatol or found: nit += 1 # if delta gets close to deltatol, do iteration with step size deltatol instead if delta/redfactor < deltatol: delta = deltatol if disp: print('nit %i, Delta %g' % (nit, delta)) found = False np.random.shuffle(generatingset) for d in generatingset: xtest, deltaeff = clip(x, delta*d) if deltaeff < floatcompatol: continue # Does xtest improve upon previous function value? if ((not errorcontrol and (funcm(xtest) < funcm(x)-feps)) or (errorcontrol and funcm.test(xtest, x, type_='smaller', alpha=alpha))): x = xtest found = True if disp: print(x) # Is non-improvement due to too large step size or missing statistics? elif ((deltaeff >= deltatol*np.sum(np.abs(d))) # no refinement for boundary steps smaller than tolerance and ((not errorcontrol and (funcm(xtest) < funcm(x)+feps)) or (errorcontrol and funcm.test(xtest, x, type_='equality', alpha=alpha) and (funcm.diffse(xtest, x) > feps)))): # If there is no significant difference the step size might # correspond to taking a step to the other side of the minimum. # Therefore test if middle point is better xmid = 0.5*(x+xtest) if ((not errorcontrol and funcm(xmid) < funcm(x)-feps) or (errorcontrol and funcm.test(xmid, x, type_='smaller', alpha=alpha))): x = xmid delta /= redfactor found = True if disp: print('mid', x) # otherwise increase accuracy of simulation to try to get to significance elif errorcontrol: funcm.N *= funcmultfactor if disp: print('new N %i' % funcm.N) found = True if callback is not None: callback(x) if not found: delta /= redfactor message = 'convergence within deltatol' # check if any of the directions are free at the optimum delta = deltatol free = np.zeros(x.shape, dtype=bool) for d in generatingset: dim = np.argmax(np.abs(d)) xtest, deltaeff = clip(x, delta*d) if deltaeff < deltatol*np.sum(np.abs(d))-floatcompatol: # do not consider as free for boundary steps continue if not free[dim] and (((not errorcontrol and funcm(xtest) - feps < funcm(x)) or (errorcontrol and funcm.test(xtest, x, type_='equality', alpha=alpha) and (funcm.diffse(xtest, x) < feps)))): free[dim] = True message += '. dim %i is free at optimum' % dim reskwargs = dict(x=x, nit=nit, nfev=funcm.nev, message=message, free=free, success=True) if errorcontrol: f, funse = funcm(x) res = OptimizeResult(fun=f, funse=funse, **reskwargs) else: f = funcm(x) res = OptimizeResult(fun=f, **reskwargs) if disp: print(res) return res
python
def minimizeCompass(func, x0, args=(), bounds=None, scaling=None, redfactor=2.0, deltainit=1.0, deltatol=1e-3, feps=1e-15, errorcontrol=True, funcNinit=30, funcmultfactor=2.0, paired=True, alpha=0.05, disp=False, callback=None, **kwargs): """ Minimization of an objective function by a pattern search. The algorithm does a compass search along coordinate directions. If `errorcontrol=True` then the function is called repeatedly to average over the stochasticity in the function evaluation. The number of evaluations over which to average is adapted dynamically to ensure convergence. The algorithm terminates when the current iterate is locally optimally at the target pattern size deltatol or when the function value differs by less than the tolerance feps along all directions. Parameters ---------- func: callable objective function to be minimized: called as `func(x, *args)`, if `paired=True`, then called with keyword argument `seed` additionally x0: array-like starting point args: tuple extra arguments to be supplied to func bounds: array-like bounds on the variables scaling: array-like scaling by which to multiply step size and tolerances along different dimensions redfactor: float reduction factor by which to reduce delta if no reduction direction found deltainit: float initial pattern size deltatol: float target pattern size, function differences at this scale need to be larger than stochasticitiy in evaluations to ensure convergence if `errorcontrol=False` feps: float smallest difference in function value to resolve errorcontrol: boolean whether to control error of simulation by repeated sampling funcNinit: int, only for errorcontrol=True initial number of iterations to use for the function, do not set much lower than 30 as otherwise there is no sufficient statistics for function comparisons funcmultfactor: float, only for errorcontrol=True multiplication factor by which to increase number of iterations of function paired: boolean, only for errorcontrol=True compare for same random seeds alpha: float, only for errorcontrol=True significance level of tests, the higher this value the more statistics is acquired, which decreases the risk of taking a step in a non-descent direction at the expense of higher computational cost per iteration disp: boolean whether to output status updates during the optimization callback: callable called after each iteration, as callback(xk), where xk is the current parameter vector. Returns ------- scipy.optimize.OptimizeResult object special entry: free Boolean array indicating parameters that are unconstrained at the optimum (within feps) """ #TODO: implement variable deltas for different directions (might speed up things, see review) if disp: print('minimization starting') print('args', args) print('errorcontrol', errorcontrol) print('paired', paired) # absolute tolerance for float comparisons floatcompatol = 1e-14 x0 = np.asarray(x0) if scaling is None: scaling = np.ones(x0.shape) else: scaling = np.asarray(scaling) # ensure initial point lies within bounds if bounds is not None: bounds = np.asarray(bounds) np.clip(x0, bounds[:, 0], bounds[:, 1], out=x0) def clip(x, d): """clip x+d to respect bounds returns clipped result and effective distance""" xnew = x + d if bounds is not None: # if test point depasses set to boundary instead xclipped = np.clip(xnew, bounds[:, 0], bounds[:, 1]) deltaeff = np.abs(x - xclipped).sum() return xclipped, deltaeff else: return xnew, delta # generate set of search directions (+- s_i e_i | i = 1, ..., N) def unit(i, N): "return ith unit vector in R^N" arr = np.zeros(N) arr[i] = 1.0 return arr N = len(x0) generatingset = [unit(i, N)*direction*scaling[i] for i in np.arange(N) for direction in [+1, -1]] # memoize function if errorcontrol: funcm = AveragedFunction( func, fargs=args, paired=paired, N=funcNinit) # apply Bonferroni correction to confidence level # (need more statistics in higher dimensions) alpha = alpha/len(generatingset) else: # freeze function arguments def funcf(x, **kwargs): return func(x, *args, **kwargs) funcm = _memoized(funcf) x = x0 delta = deltainit # number of iterations nit = 0 # continue as long as delta is larger than tolerance # or if there was an update during the last iteration found = False while delta >= deltatol-floatcompatol or found: nit += 1 # if delta gets close to deltatol, do iteration with step size deltatol instead if delta/redfactor < deltatol: delta = deltatol if disp: print('nit %i, Delta %g' % (nit, delta)) found = False np.random.shuffle(generatingset) for d in generatingset: xtest, deltaeff = clip(x, delta*d) if deltaeff < floatcompatol: continue # Does xtest improve upon previous function value? if ((not errorcontrol and (funcm(xtest) < funcm(x)-feps)) or (errorcontrol and funcm.test(xtest, x, type_='smaller', alpha=alpha))): x = xtest found = True if disp: print(x) # Is non-improvement due to too large step size or missing statistics? elif ((deltaeff >= deltatol*np.sum(np.abs(d))) # no refinement for boundary steps smaller than tolerance and ((not errorcontrol and (funcm(xtest) < funcm(x)+feps)) or (errorcontrol and funcm.test(xtest, x, type_='equality', alpha=alpha) and (funcm.diffse(xtest, x) > feps)))): # If there is no significant difference the step size might # correspond to taking a step to the other side of the minimum. # Therefore test if middle point is better xmid = 0.5*(x+xtest) if ((not errorcontrol and funcm(xmid) < funcm(x)-feps) or (errorcontrol and funcm.test(xmid, x, type_='smaller', alpha=alpha))): x = xmid delta /= redfactor found = True if disp: print('mid', x) # otherwise increase accuracy of simulation to try to get to significance elif errorcontrol: funcm.N *= funcmultfactor if disp: print('new N %i' % funcm.N) found = True if callback is not None: callback(x) if not found: delta /= redfactor message = 'convergence within deltatol' # check if any of the directions are free at the optimum delta = deltatol free = np.zeros(x.shape, dtype=bool) for d in generatingset: dim = np.argmax(np.abs(d)) xtest, deltaeff = clip(x, delta*d) if deltaeff < deltatol*np.sum(np.abs(d))-floatcompatol: # do not consider as free for boundary steps continue if not free[dim] and (((not errorcontrol and funcm(xtest) - feps < funcm(x)) or (errorcontrol and funcm.test(xtest, x, type_='equality', alpha=alpha) and (funcm.diffse(xtest, x) < feps)))): free[dim] = True message += '. dim %i is free at optimum' % dim reskwargs = dict(x=x, nit=nit, nfev=funcm.nev, message=message, free=free, success=True) if errorcontrol: f, funse = funcm(x) res = OptimizeResult(fun=f, funse=funse, **reskwargs) else: f = funcm(x) res = OptimizeResult(fun=f, **reskwargs) if disp: print(res) return res
Minimization of an objective function by a pattern search. The algorithm does a compass search along coordinate directions. If `errorcontrol=True` then the function is called repeatedly to average over the stochasticity in the function evaluation. The number of evaluations over which to average is adapted dynamically to ensure convergence. The algorithm terminates when the current iterate is locally optimally at the target pattern size deltatol or when the function value differs by less than the tolerance feps along all directions. Parameters ---------- func: callable objective function to be minimized: called as `func(x, *args)`, if `paired=True`, then called with keyword argument `seed` additionally x0: array-like starting point args: tuple extra arguments to be supplied to func bounds: array-like bounds on the variables scaling: array-like scaling by which to multiply step size and tolerances along different dimensions redfactor: float reduction factor by which to reduce delta if no reduction direction found deltainit: float initial pattern size deltatol: float target pattern size, function differences at this scale need to be larger than stochasticitiy in evaluations to ensure convergence if `errorcontrol=False` feps: float smallest difference in function value to resolve errorcontrol: boolean whether to control error of simulation by repeated sampling funcNinit: int, only for errorcontrol=True initial number of iterations to use for the function, do not set much lower than 30 as otherwise there is no sufficient statistics for function comparisons funcmultfactor: float, only for errorcontrol=True multiplication factor by which to increase number of iterations of function paired: boolean, only for errorcontrol=True compare for same random seeds alpha: float, only for errorcontrol=True significance level of tests, the higher this value the more statistics is acquired, which decreases the risk of taking a step in a non-descent direction at the expense of higher computational cost per iteration disp: boolean whether to output status updates during the optimization callback: callable called after each iteration, as callback(xk), where xk is the current parameter vector. Returns ------- scipy.optimize.OptimizeResult object special entry: free Boolean array indicating parameters that are unconstrained at the optimum (within feps)
https://github.com/andim/noisyopt/blob/91a748f59acc357622eb4feb58057f8414de7b90/noisyopt/main.py#L59-L256
andim/noisyopt
noisyopt/main.py
minimizeSPSA
def minimizeSPSA(func, x0, args=(), bounds=None, niter=100, paired=True, a=1.0, alpha=0.602, c=1.0, gamma=0.101, disp=False, callback=None): """ Minimization of an objective function by a simultaneous perturbation stochastic approximation algorithm. This algorithm approximates the gradient of the function by finite differences along stochastic directions Deltak. The elements of Deltak are drawn from +- 1 with probability one half. The gradient is approximated from the symmetric difference f(xk + ck*Deltak) - f(xk - ck*Deltak), where the evaluation step size ck is scaled according ck = c/(k+1)**gamma. The algorithm takes a step of size ak = a/(0.01*niter+k+1)**alpha along the negative gradient. See Spall, IEEE, 1998, 34, 817-823 for guidelines about how to choose the algorithm's parameters (a, alpha, c, gamma). Parameters ---------- func: callable objective function to be minimized: called as `func(x, *args)`, if `paired=True`, then called with keyword argument `seed` additionally x0: array-like initial guess for parameters args: tuple extra arguments to be supplied to func bounds: array-like bounds on the variables niter: int number of iterations after which to terminate the algorithm paired: boolean calculate gradient for same random seeds a: float scaling parameter for step size alpha: float scaling exponent for step size c: float scaling parameter for evaluation step size gamma: float scaling exponent for evaluation step size disp: boolean whether to output status updates during the optimization callback: callable called after each iteration, as callback(xk), where xk are the current parameters Returns ------- `scipy.optimize.OptimizeResult` object """ A = 0.01 * niter if bounds is not None: bounds = np.asarray(bounds) project = lambda x: np.clip(x, bounds[:, 0], bounds[:, 1]) if args is not None: # freeze function arguments def funcf(x, **kwargs): return func(x, *args, **kwargs) N = len(x0) x = x0 for k in range(niter): ak = a/(k+1.0+A)**alpha ck = c/(k+1.0)**gamma Deltak = np.random.choice([-1, 1], size=N) fkwargs = dict() if paired: fkwargs['seed'] = np.random.randint(0, np.iinfo(np.uint32).max) if bounds is None: grad = (funcf(x + ck*Deltak, **fkwargs) - funcf(x - ck*Deltak, **fkwargs)) / (2*ck*Deltak) x -= ak*grad else: # ensure evaluation points are feasible xplus = project(x + ck*Deltak) xminus = project(x - ck*Deltak) grad = (funcf(xplus, **fkwargs) - funcf(xminus, **fkwargs)) / (xplus-xminus) x = project(x - ak*grad) # print 100 status updates if disp=True if disp and (k % (niter//100)) == 0: print(x) if callback is not None: callback(x) message = 'terminated after reaching max number of iterations' return OptimizeResult(fun=funcf(x), x=x, nit=niter, nfev=2*niter, message=message, success=True)
python
def minimizeSPSA(func, x0, args=(), bounds=None, niter=100, paired=True, a=1.0, alpha=0.602, c=1.0, gamma=0.101, disp=False, callback=None): """ Minimization of an objective function by a simultaneous perturbation stochastic approximation algorithm. This algorithm approximates the gradient of the function by finite differences along stochastic directions Deltak. The elements of Deltak are drawn from +- 1 with probability one half. The gradient is approximated from the symmetric difference f(xk + ck*Deltak) - f(xk - ck*Deltak), where the evaluation step size ck is scaled according ck = c/(k+1)**gamma. The algorithm takes a step of size ak = a/(0.01*niter+k+1)**alpha along the negative gradient. See Spall, IEEE, 1998, 34, 817-823 for guidelines about how to choose the algorithm's parameters (a, alpha, c, gamma). Parameters ---------- func: callable objective function to be minimized: called as `func(x, *args)`, if `paired=True`, then called with keyword argument `seed` additionally x0: array-like initial guess for parameters args: tuple extra arguments to be supplied to func bounds: array-like bounds on the variables niter: int number of iterations after which to terminate the algorithm paired: boolean calculate gradient for same random seeds a: float scaling parameter for step size alpha: float scaling exponent for step size c: float scaling parameter for evaluation step size gamma: float scaling exponent for evaluation step size disp: boolean whether to output status updates during the optimization callback: callable called after each iteration, as callback(xk), where xk are the current parameters Returns ------- `scipy.optimize.OptimizeResult` object """ A = 0.01 * niter if bounds is not None: bounds = np.asarray(bounds) project = lambda x: np.clip(x, bounds[:, 0], bounds[:, 1]) if args is not None: # freeze function arguments def funcf(x, **kwargs): return func(x, *args, **kwargs) N = len(x0) x = x0 for k in range(niter): ak = a/(k+1.0+A)**alpha ck = c/(k+1.0)**gamma Deltak = np.random.choice([-1, 1], size=N) fkwargs = dict() if paired: fkwargs['seed'] = np.random.randint(0, np.iinfo(np.uint32).max) if bounds is None: grad = (funcf(x + ck*Deltak, **fkwargs) - funcf(x - ck*Deltak, **fkwargs)) / (2*ck*Deltak) x -= ak*grad else: # ensure evaluation points are feasible xplus = project(x + ck*Deltak) xminus = project(x - ck*Deltak) grad = (funcf(xplus, **fkwargs) - funcf(xminus, **fkwargs)) / (xplus-xminus) x = project(x - ak*grad) # print 100 status updates if disp=True if disp and (k % (niter//100)) == 0: print(x) if callback is not None: callback(x) message = 'terminated after reaching max number of iterations' return OptimizeResult(fun=funcf(x), x=x, nit=niter, nfev=2*niter, message=message, success=True)
Minimization of an objective function by a simultaneous perturbation stochastic approximation algorithm. This algorithm approximates the gradient of the function by finite differences along stochastic directions Deltak. The elements of Deltak are drawn from +- 1 with probability one half. The gradient is approximated from the symmetric difference f(xk + ck*Deltak) - f(xk - ck*Deltak), where the evaluation step size ck is scaled according ck = c/(k+1)**gamma. The algorithm takes a step of size ak = a/(0.01*niter+k+1)**alpha along the negative gradient. See Spall, IEEE, 1998, 34, 817-823 for guidelines about how to choose the algorithm's parameters (a, alpha, c, gamma). Parameters ---------- func: callable objective function to be minimized: called as `func(x, *args)`, if `paired=True`, then called with keyword argument `seed` additionally x0: array-like initial guess for parameters args: tuple extra arguments to be supplied to func bounds: array-like bounds on the variables niter: int number of iterations after which to terminate the algorithm paired: boolean calculate gradient for same random seeds a: float scaling parameter for step size alpha: float scaling exponent for step size c: float scaling parameter for evaluation step size gamma: float scaling exponent for evaluation step size disp: boolean whether to output status updates during the optimization callback: callable called after each iteration, as callback(xk), where xk are the current parameters Returns ------- `scipy.optimize.OptimizeResult` object
https://github.com/andim/noisyopt/blob/91a748f59acc357622eb4feb58057f8414de7b90/noisyopt/main.py#L264-L351
andim/noisyopt
noisyopt/main.py
bisect
def bisect(func, a, b, xtol=1e-6, errorcontrol=True, testkwargs=dict(), outside='extrapolate', ascending=None, disp=False): """Find root by bysection search. If the function evaluation is noisy then use `errorcontrol=True` for adaptive sampling of the function during the bisection search. Parameters ---------- func: callable Function of which the root should be found. If `errorcontrol=True` then the function should be derived from `AverageBase`. a, b: float initial interval xtol: float target tolerance for interval size errorcontrol: boolean if true, assume that function is derived from `AverageBase`. testkwargs: only for `errorcontrol=True` see `AverageBase.test0` outside: ['extrapolate', 'raise'] How to handle the case where f(a) and f(b) have same sign, i.e. where the root lies outside of the interval. If 'raise' throws a `BisectException`. ascending: allow passing in directly whether function is ascending or not if ascending=True then it is assumed without check that f(a) < 0 and f(b) > 0 if ascending=False then it is assumed without check that f(a) > 0 and f(b) < 0 Returns ------- float, root of function """ search = True # check whether function is ascending or not if ascending is None: if errorcontrol: testkwargs.update(dict(type_='smaller', force=True)) fa = func.test0(a, **testkwargs) fb = func.test0(b, **testkwargs) else: fa = func(a) < 0 fb = func(b) < 0 if fa and not fb: ascending = True elif fb and not fa: ascending = False else: if disp: print('Warning: func(a) and func(b) do not have opposing signs -> no search done') if outside == 'raise': raise BisectException() search = False # refine interval until it has reached size xtol, except if root outside while (b-a > xtol) and search: mid = (a+b)/2.0 if ascending: if ((not errorcontrol) and (func(mid) < 0)) or \ (errorcontrol and func.test0(mid, **testkwargs)): a = mid else: b = mid else: if ((not errorcontrol) and (func(mid) < 0)) or \ (errorcontrol and func.test0(mid, **testkwargs)): b = mid else: a = mid if disp: print('bisect bounds', a, b) # interpolate linearly to get zero if errorcontrol: ya, yb = func(a)[0], func(b)[0] else: ya, yb = func(a), func(b) m = (yb-ya) / (b-a) res = a-ya/m if disp: print('bisect final value', res) return res
python
def bisect(func, a, b, xtol=1e-6, errorcontrol=True, testkwargs=dict(), outside='extrapolate', ascending=None, disp=False): """Find root by bysection search. If the function evaluation is noisy then use `errorcontrol=True` for adaptive sampling of the function during the bisection search. Parameters ---------- func: callable Function of which the root should be found. If `errorcontrol=True` then the function should be derived from `AverageBase`. a, b: float initial interval xtol: float target tolerance for interval size errorcontrol: boolean if true, assume that function is derived from `AverageBase`. testkwargs: only for `errorcontrol=True` see `AverageBase.test0` outside: ['extrapolate', 'raise'] How to handle the case where f(a) and f(b) have same sign, i.e. where the root lies outside of the interval. If 'raise' throws a `BisectException`. ascending: allow passing in directly whether function is ascending or not if ascending=True then it is assumed without check that f(a) < 0 and f(b) > 0 if ascending=False then it is assumed without check that f(a) > 0 and f(b) < 0 Returns ------- float, root of function """ search = True # check whether function is ascending or not if ascending is None: if errorcontrol: testkwargs.update(dict(type_='smaller', force=True)) fa = func.test0(a, **testkwargs) fb = func.test0(b, **testkwargs) else: fa = func(a) < 0 fb = func(b) < 0 if fa and not fb: ascending = True elif fb and not fa: ascending = False else: if disp: print('Warning: func(a) and func(b) do not have opposing signs -> no search done') if outside == 'raise': raise BisectException() search = False # refine interval until it has reached size xtol, except if root outside while (b-a > xtol) and search: mid = (a+b)/2.0 if ascending: if ((not errorcontrol) and (func(mid) < 0)) or \ (errorcontrol and func.test0(mid, **testkwargs)): a = mid else: b = mid else: if ((not errorcontrol) and (func(mid) < 0)) or \ (errorcontrol and func.test0(mid, **testkwargs)): b = mid else: a = mid if disp: print('bisect bounds', a, b) # interpolate linearly to get zero if errorcontrol: ya, yb = func(a)[0], func(b)[0] else: ya, yb = func(a), func(b) m = (yb-ya) / (b-a) res = a-ya/m if disp: print('bisect final value', res) return res
Find root by bysection search. If the function evaluation is noisy then use `errorcontrol=True` for adaptive sampling of the function during the bisection search. Parameters ---------- func: callable Function of which the root should be found. If `errorcontrol=True` then the function should be derived from `AverageBase`. a, b: float initial interval xtol: float target tolerance for interval size errorcontrol: boolean if true, assume that function is derived from `AverageBase`. testkwargs: only for `errorcontrol=True` see `AverageBase.test0` outside: ['extrapolate', 'raise'] How to handle the case where f(a) and f(b) have same sign, i.e. where the root lies outside of the interval. If 'raise' throws a `BisectException`. ascending: allow passing in directly whether function is ascending or not if ascending=True then it is assumed without check that f(a) < 0 and f(b) > 0 if ascending=False then it is assumed without check that f(a) > 0 and f(b) < 0 Returns ------- float, root of function
https://github.com/andim/noisyopt/blob/91a748f59acc357622eb4feb58057f8414de7b90/noisyopt/main.py#L576-L657
andim/noisyopt
noisyopt/main.py
AveragedFunction.diffse
def diffse(self, x1, x2): """Standard error of the difference between the function values at x1 and x2""" f1, f1se = self(x1) f2, f2se = self(x2) if self.paired: fx1 = np.array(self.cache[tuple(x1)]) fx2 = np.array(self.cache[tuple(x2)]) diffse = np.std(fx1-fx2, ddof=1)/self.N**.5 return diffse else: return (f1se**2 + f2se**2)**.5
python
def diffse(self, x1, x2): """Standard error of the difference between the function values at x1 and x2""" f1, f1se = self(x1) f2, f2se = self(x2) if self.paired: fx1 = np.array(self.cache[tuple(x1)]) fx2 = np.array(self.cache[tuple(x2)]) diffse = np.std(fx1-fx2, ddof=1)/self.N**.5 return diffse else: return (f1se**2 + f2se**2)**.5
Standard error of the difference between the function values at x1 and x2
https://github.com/andim/noisyopt/blob/91a748f59acc357622eb4feb58057f8414de7b90/noisyopt/main.py#L466-L476
ivelum/graphql-py
graphql/parser.py
GraphQLParser.p_document_shorthand_with_fragments
def p_document_shorthand_with_fragments(self, p): """ document : selection_set fragment_list """ p[0] = Document(definitions=[Query(selections=p[1])] + p[2])
python
def p_document_shorthand_with_fragments(self, p): """ document : selection_set fragment_list """ p[0] = Document(definitions=[Query(selections=p[1])] + p[2])
document : selection_set fragment_list
https://github.com/ivelum/graphql-py/blob/72baf16d838e82349ee5e8d8f8971ce11cfcedf9/graphql/parser.py#L52-L56
ivelum/graphql-py
graphql/parser.py
GraphQLParser.p_operation_definition1
def p_operation_definition1(self, p): """ operation_definition : operation_type name variable_definitions directives selection_set """ p[0] = self.operation_cls(p[1])( selections=p[5], name=p[2], variable_definitions=p[3], directives=p[4], )
python
def p_operation_definition1(self, p): """ operation_definition : operation_type name variable_definitions directives selection_set """ p[0] = self.operation_cls(p[1])( selections=p[5], name=p[2], variable_definitions=p[3], directives=p[4], )
operation_definition : operation_type name variable_definitions directives selection_set
https://github.com/ivelum/graphql-py/blob/72baf16d838e82349ee5e8d8f8971ce11cfcedf9/graphql/parser.py#L95-L104
ivelum/graphql-py
graphql/parser.py
GraphQLParser.p_operation_definition2
def p_operation_definition2(self, p): """ operation_definition : operation_type name variable_definitions selection_set """ p[0] = self.operation_cls(p[1])( selections=p[4], name=p[2], variable_definitions=p[3], )
python
def p_operation_definition2(self, p): """ operation_definition : operation_type name variable_definitions selection_set """ p[0] = self.operation_cls(p[1])( selections=p[4], name=p[2], variable_definitions=p[3], )
operation_definition : operation_type name variable_definitions selection_set
https://github.com/ivelum/graphql-py/blob/72baf16d838e82349ee5e8d8f8971ce11cfcedf9/graphql/parser.py#L106-L114
ivelum/graphql-py
graphql/parser.py
GraphQLParser.p_operation_definition3
def p_operation_definition3(self, p): """ operation_definition : operation_type name directives selection_set """ p[0] = self.operation_cls(p[1])( selections=p[4], name=p[2], directives=p[3], )
python
def p_operation_definition3(self, p): """ operation_definition : operation_type name directives selection_set """ p[0] = self.operation_cls(p[1])( selections=p[4], name=p[2], directives=p[3], )
operation_definition : operation_type name directives selection_set
https://github.com/ivelum/graphql-py/blob/72baf16d838e82349ee5e8d8f8971ce11cfcedf9/graphql/parser.py#L116-L124
ivelum/graphql-py
graphql/parser.py
GraphQLParser.p_operation_definition4
def p_operation_definition4(self, p): """ operation_definition : operation_type name selection_set """ p[0] = self.operation_cls(p[1])(selections=p[3], name=p[2])
python
def p_operation_definition4(self, p): """ operation_definition : operation_type name selection_set """ p[0] = self.operation_cls(p[1])(selections=p[3], name=p[2])
operation_definition : operation_type name selection_set
https://github.com/ivelum/graphql-py/blob/72baf16d838e82349ee5e8d8f8971ce11cfcedf9/graphql/parser.py#L126-L130
ivelum/graphql-py
graphql/parser.py
GraphQLParser.p_operation_definition5
def p_operation_definition5(self, p): """ operation_definition : operation_type variable_definitions directives selection_set """ p[0] = self.operation_cls(p[1])( selections=p[4], variable_definitions=p[2], directives=p[3], )
python
def p_operation_definition5(self, p): """ operation_definition : operation_type variable_definitions directives selection_set """ p[0] = self.operation_cls(p[1])( selections=p[4], variable_definitions=p[2], directives=p[3], )
operation_definition : operation_type variable_definitions directives selection_set
https://github.com/ivelum/graphql-py/blob/72baf16d838e82349ee5e8d8f8971ce11cfcedf9/graphql/parser.py#L132-L140
ivelum/graphql-py
graphql/parser.py
GraphQLParser.p_operation_definition6
def p_operation_definition6(self, p): """ operation_definition : operation_type variable_definitions selection_set """ p[0] = self.operation_cls(p[1])( selections=p[3], variable_definitions=p[2], )
python
def p_operation_definition6(self, p): """ operation_definition : operation_type variable_definitions selection_set """ p[0] = self.operation_cls(p[1])( selections=p[3], variable_definitions=p[2], )
operation_definition : operation_type variable_definitions selection_set
https://github.com/ivelum/graphql-py/blob/72baf16d838e82349ee5e8d8f8971ce11cfcedf9/graphql/parser.py#L142-L149
ivelum/graphql-py
graphql/parser.py
GraphQLParser.p_operation_definition7
def p_operation_definition7(self, p): """ operation_definition : operation_type directives selection_set """ p[0] = self.operation_cls(p[1])( selections=p[3], directives=p[2], )
python
def p_operation_definition7(self, p): """ operation_definition : operation_type directives selection_set """ p[0] = self.operation_cls(p[1])( selections=p[3], directives=p[2], )
operation_definition : operation_type directives selection_set
https://github.com/ivelum/graphql-py/blob/72baf16d838e82349ee5e8d8f8971ce11cfcedf9/graphql/parser.py#L151-L158
ivelum/graphql-py
graphql/parser.py
GraphQLParser.p_field_all
def p_field_all(self, p): """ field : alias name arguments directives selection_set """ p[0] = Field(name=p[2], alias=p[1], arguments=p[3], directives=p[4], selections=p[5])
python
def p_field_all(self, p): """ field : alias name arguments directives selection_set """ p[0] = Field(name=p[2], alias=p[1], arguments=p[3], directives=p[4], selections=p[5])
field : alias name arguments directives selection_set
https://github.com/ivelum/graphql-py/blob/72baf16d838e82349ee5e8d8f8971ce11cfcedf9/graphql/parser.py#L199-L204
ivelum/graphql-py
graphql/parser.py
GraphQLParser.p_field_optional1_1
def p_field_optional1_1(self, p): """ field : name arguments directives selection_set """ p[0] = Field(name=p[1], arguments=p[2], directives=p[3], selections=p[5])
python
def p_field_optional1_1(self, p): """ field : name arguments directives selection_set """ p[0] = Field(name=p[1], arguments=p[2], directives=p[3], selections=p[5])
field : name arguments directives selection_set
https://github.com/ivelum/graphql-py/blob/72baf16d838e82349ee5e8d8f8971ce11cfcedf9/graphql/parser.py#L206-L211
ivelum/graphql-py
graphql/parser.py
GraphQLParser.p_field_optional1_2
def p_field_optional1_2(self, p): """ field : alias name directives selection_set """ p[0] = Field(name=p[2], alias=p[1], directives=p[3], selections=p[5])
python
def p_field_optional1_2(self, p): """ field : alias name directives selection_set """ p[0] = Field(name=p[2], alias=p[1], directives=p[3], selections=p[5])
field : alias name directives selection_set
https://github.com/ivelum/graphql-py/blob/72baf16d838e82349ee5e8d8f8971ce11cfcedf9/graphql/parser.py#L213-L217
ivelum/graphql-py
graphql/parser.py
GraphQLParser.p_field_optional1_3
def p_field_optional1_3(self, p): """ field : alias name arguments selection_set """ p[0] = Field(name=p[2], alias=p[1], arguments=p[3], selections=p[4])
python
def p_field_optional1_3(self, p): """ field : alias name arguments selection_set """ p[0] = Field(name=p[2], alias=p[1], arguments=p[3], selections=p[4])
field : alias name arguments selection_set
https://github.com/ivelum/graphql-py/blob/72baf16d838e82349ee5e8d8f8971ce11cfcedf9/graphql/parser.py#L219-L223
ivelum/graphql-py
graphql/parser.py
GraphQLParser.p_field_optional1_4
def p_field_optional1_4(self, p): """ field : alias name arguments directives """ p[0] = Field(name=p[2], alias=p[1], arguments=p[3], directives=p[4])
python
def p_field_optional1_4(self, p): """ field : alias name arguments directives """ p[0] = Field(name=p[2], alias=p[1], arguments=p[3], directives=p[4])
field : alias name arguments directives
https://github.com/ivelum/graphql-py/blob/72baf16d838e82349ee5e8d8f8971ce11cfcedf9/graphql/parser.py#L225-L229
ivelum/graphql-py
graphql/parser.py
GraphQLParser.p_field_optional2_1
def p_field_optional2_1(self, p): """ field : name directives selection_set """ p[0] = Field(name=p[1], directives=p[2], selections=p[3])
python
def p_field_optional2_1(self, p): """ field : name directives selection_set """ p[0] = Field(name=p[1], directives=p[2], selections=p[3])
field : name directives selection_set
https://github.com/ivelum/graphql-py/blob/72baf16d838e82349ee5e8d8f8971ce11cfcedf9/graphql/parser.py#L231-L235
ivelum/graphql-py
graphql/parser.py
GraphQLParser.p_field_optional2_2
def p_field_optional2_2(self, p): """ field : name arguments selection_set """ p[0] = Field(name=p[1], arguments=p[2], selections=p[3])
python
def p_field_optional2_2(self, p): """ field : name arguments selection_set """ p[0] = Field(name=p[1], arguments=p[2], selections=p[3])
field : name arguments selection_set
https://github.com/ivelum/graphql-py/blob/72baf16d838e82349ee5e8d8f8971ce11cfcedf9/graphql/parser.py#L237-L241
ivelum/graphql-py
graphql/parser.py
GraphQLParser.p_field_optional2_3
def p_field_optional2_3(self, p): """ field : name arguments directives """ p[0] = Field(name=p[1], arguments=p[2], directives=p[3])
python
def p_field_optional2_3(self, p): """ field : name arguments directives """ p[0] = Field(name=p[1], arguments=p[2], directives=p[3])
field : name arguments directives
https://github.com/ivelum/graphql-py/blob/72baf16d838e82349ee5e8d8f8971ce11cfcedf9/graphql/parser.py#L243-L247
ivelum/graphql-py
graphql/parser.py
GraphQLParser.p_field_optional2_4
def p_field_optional2_4(self, p): """ field : alias name selection_set """ p[0] = Field(name=p[2], alias=p[1], selections=p[3])
python
def p_field_optional2_4(self, p): """ field : alias name selection_set """ p[0] = Field(name=p[2], alias=p[1], selections=p[3])
field : alias name selection_set
https://github.com/ivelum/graphql-py/blob/72baf16d838e82349ee5e8d8f8971ce11cfcedf9/graphql/parser.py#L249-L253
ivelum/graphql-py
graphql/parser.py
GraphQLParser.p_field_optional2_5
def p_field_optional2_5(self, p): """ field : alias name directives """ p[0] = Field(name=p[2], alias=p[1], directives=p[3])
python
def p_field_optional2_5(self, p): """ field : alias name directives """ p[0] = Field(name=p[2], alias=p[1], directives=p[3])
field : alias name directives
https://github.com/ivelum/graphql-py/blob/72baf16d838e82349ee5e8d8f8971ce11cfcedf9/graphql/parser.py#L255-L259
ivelum/graphql-py
graphql/parser.py
GraphQLParser.p_field_optional2_6
def p_field_optional2_6(self, p): """ field : alias name arguments """ p[0] = Field(name=p[2], alias=p[1], arguments=p[3])
python
def p_field_optional2_6(self, p): """ field : alias name arguments """ p[0] = Field(name=p[2], alias=p[1], arguments=p[3])
field : alias name arguments
https://github.com/ivelum/graphql-py/blob/72baf16d838e82349ee5e8d8f8971ce11cfcedf9/graphql/parser.py#L261-L265
ivelum/graphql-py
graphql/parser.py
GraphQLParser.p_fragment_definition1
def p_fragment_definition1(self, p): """ fragment_definition : FRAGMENT fragment_name ON type_condition directives selection_set """ p[0] = FragmentDefinition(name=p[2], type_condition=p[4], selections=p[6], directives=p[5])
python
def p_fragment_definition1(self, p): """ fragment_definition : FRAGMENT fragment_name ON type_condition directives selection_set """ p[0] = FragmentDefinition(name=p[2], type_condition=p[4], selections=p[6], directives=p[5])
fragment_definition : FRAGMENT fragment_name ON type_condition directives selection_set
https://github.com/ivelum/graphql-py/blob/72baf16d838e82349ee5e8d8f8971ce11cfcedf9/graphql/parser.py#L309-L314
ivelum/graphql-py
graphql/parser.py
GraphQLParser.p_fragment_definition2
def p_fragment_definition2(self, p): """ fragment_definition : FRAGMENT fragment_name ON type_condition selection_set """ p[0] = FragmentDefinition(name=p[2], type_condition=p[4], selections=p[5])
python
def p_fragment_definition2(self, p): """ fragment_definition : FRAGMENT fragment_name ON type_condition selection_set """ p[0] = FragmentDefinition(name=p[2], type_condition=p[4], selections=p[5])
fragment_definition : FRAGMENT fragment_name ON type_condition selection_set
https://github.com/ivelum/graphql-py/blob/72baf16d838e82349ee5e8d8f8971ce11cfcedf9/graphql/parser.py#L316-L321
ivelum/graphql-py
graphql/parser.py
GraphQLParser.p_inline_fragment1
def p_inline_fragment1(self, p): """ inline_fragment : SPREAD ON type_condition directives selection_set """ p[0] = InlineFragment(type_condition=p[3], selections=p[5], directives=p[4])
python
def p_inline_fragment1(self, p): """ inline_fragment : SPREAD ON type_condition directives selection_set """ p[0] = InlineFragment(type_condition=p[3], selections=p[5], directives=p[4])
inline_fragment : SPREAD ON type_condition directives selection_set
https://github.com/ivelum/graphql-py/blob/72baf16d838e82349ee5e8d8f8971ce11cfcedf9/graphql/parser.py#L323-L328
ivelum/graphql-py
graphql/parser.py
GraphQLParser.p_directive
def p_directive(self, p): """ directive : AT name arguments | AT name """ arguments = p[3] if len(p) == 4 else None p[0] = Directive(name=p[2], arguments=arguments)
python
def p_directive(self, p): """ directive : AT name arguments | AT name """ arguments = p[3] if len(p) == 4 else None p[0] = Directive(name=p[2], arguments=arguments)
directive : AT name arguments | AT name
https://github.com/ivelum/graphql-py/blob/72baf16d838e82349ee5e8d8f8971ce11cfcedf9/graphql/parser.py#L372-L378
ivelum/graphql-py
graphql/parser.py
GraphQLParser.p_variable_definition1
def p_variable_definition1(self, p): """ variable_definition : DOLLAR name COLON type default_value """ p[0] = VariableDefinition(name=p[2], type=p[4], default_value=p[5])
python
def p_variable_definition1(self, p): """ variable_definition : DOLLAR name COLON type default_value """ p[0] = VariableDefinition(name=p[2], type=p[4], default_value=p[5])
variable_definition : DOLLAR name COLON type default_value
https://github.com/ivelum/graphql-py/blob/72baf16d838e82349ee5e8d8f8971ce11cfcedf9/graphql/parser.py#L422-L426
ivelum/graphql-py
graphql/parser.py
GraphQLParser.p_object_field_list
def p_object_field_list(self, p): """ object_field_list : object_field_list object_field """ obj = p[1].copy() obj.update(p[2]) p[0] = obj
python
def p_object_field_list(self, p): """ object_field_list : object_field_list object_field """ obj = p[1].copy() obj.update(p[2]) p[0] = obj
object_field_list : object_field_list object_field
https://github.com/ivelum/graphql-py/blob/72baf16d838e82349ee5e8d8f8971ce11cfcedf9/graphql/parser.py#L560-L566
ivelum/graphql-py
graphql/parser.py
GraphQLParser.p_const_object_field_list
def p_const_object_field_list(self, p): """ const_object_field_list : const_object_field_list const_object_field """ obj = p[1].copy() obj.update(p[2]) p[0] = obj
python
def p_const_object_field_list(self, p): """ const_object_field_list : const_object_field_list const_object_field """ obj = p[1].copy() obj.update(p[2]) p[0] = obj
const_object_field_list : const_object_field_list const_object_field
https://github.com/ivelum/graphql-py/blob/72baf16d838e82349ee5e8d8f8971ce11cfcedf9/graphql/parser.py#L587-L593
goodmami/penman
penman.py
alphanum_order
def alphanum_order(triples): """ Sort a list of triples by relation name. Embedded integers are sorted numerically, but otherwise the sorting is alphabetic. """ return sorted( triples, key=lambda t: [ int(t) if t.isdigit() else t for t in re.split(r'([0-9]+)', t.relation or '') ] )
python
def alphanum_order(triples): """ Sort a list of triples by relation name. Embedded integers are sorted numerically, but otherwise the sorting is alphabetic. """ return sorted( triples, key=lambda t: [ int(t) if t.isdigit() else t for t in re.split(r'([0-9]+)', t.relation or '') ] )
Sort a list of triples by relation name. Embedded integers are sorted numerically, but otherwise the sorting is alphabetic.
https://github.com/goodmami/penman/blob/a2563ca16063a7330e2028eb489a99cc8e425c41/penman.py#L95-L108
goodmami/penman
penman.py
decode
def decode(s, cls=PENMANCodec, **kwargs): """ Deserialize PENMAN-serialized *s* into its Graph object Args: s: a string containing a single PENMAN-serialized graph cls: serialization codec class kwargs: keyword arguments passed to the constructor of *cls* Returns: the Graph object described by *s* Example: >>> decode('(b / bark :ARG1 (d / dog))') <Graph object (top=b) at ...> """ codec = cls(**kwargs) return codec.decode(s)
python
def decode(s, cls=PENMANCodec, **kwargs): """ Deserialize PENMAN-serialized *s* into its Graph object Args: s: a string containing a single PENMAN-serialized graph cls: serialization codec class kwargs: keyword arguments passed to the constructor of *cls* Returns: the Graph object described by *s* Example: >>> decode('(b / bark :ARG1 (d / dog))') <Graph object (top=b) at ...> """ codec = cls(**kwargs) return codec.decode(s)
Deserialize PENMAN-serialized *s* into its Graph object Args: s: a string containing a single PENMAN-serialized graph cls: serialization codec class kwargs: keyword arguments passed to the constructor of *cls* Returns: the Graph object described by *s* Example: >>> decode('(b / bark :ARG1 (d / dog))') <Graph object (top=b) at ...>
https://github.com/goodmami/penman/blob/a2563ca16063a7330e2028eb489a99cc8e425c41/penman.py#L793-L809
goodmami/penman
penman.py
encode
def encode(g, top=None, cls=PENMANCodec, **kwargs): """ Serialize the graph *g* from *top* to PENMAN notation. Args: g: the Graph object top: the node identifier for the top of the serialized graph; if unset, the original top of *g* is used cls: serialization codec class kwargs: keyword arguments passed to the constructor of *cls* Returns: the PENMAN-serialized string of the Graph *g* Example: >>> encode(Graph([('h', 'instance', 'hi')])) (h / hi) """ codec = cls(**kwargs) return codec.encode(g, top=top)
python
def encode(g, top=None, cls=PENMANCodec, **kwargs): """ Serialize the graph *g* from *top* to PENMAN notation. Args: g: the Graph object top: the node identifier for the top of the serialized graph; if unset, the original top of *g* is used cls: serialization codec class kwargs: keyword arguments passed to the constructor of *cls* Returns: the PENMAN-serialized string of the Graph *g* Example: >>> encode(Graph([('h', 'instance', 'hi')])) (h / hi) """ codec = cls(**kwargs) return codec.encode(g, top=top)
Serialize the graph *g* from *top* to PENMAN notation. Args: g: the Graph object top: the node identifier for the top of the serialized graph; if unset, the original top of *g* is used cls: serialization codec class kwargs: keyword arguments passed to the constructor of *cls* Returns: the PENMAN-serialized string of the Graph *g* Example: >>> encode(Graph([('h', 'instance', 'hi')])) (h / hi)
https://github.com/goodmami/penman/blob/a2563ca16063a7330e2028eb489a99cc8e425c41/penman.py#L812-L830
goodmami/penman
penman.py
load
def load(source, triples=False, cls=PENMANCodec, **kwargs): """ Deserialize a list of PENMAN-encoded graphs from *source*. Args: source: a filename or file-like object to read from triples: if True, read graphs as triples instead of as PENMAN cls: serialization codec class kwargs: keyword arguments passed to the constructor of *cls* Returns: a list of Graph objects """ decode = cls(**kwargs).iterdecode if hasattr(source, 'read'): return list(decode(source.read())) else: with open(source) as fh: return list(decode(fh.read()))
python
def load(source, triples=False, cls=PENMANCodec, **kwargs): """ Deserialize a list of PENMAN-encoded graphs from *source*. Args: source: a filename or file-like object to read from triples: if True, read graphs as triples instead of as PENMAN cls: serialization codec class kwargs: keyword arguments passed to the constructor of *cls* Returns: a list of Graph objects """ decode = cls(**kwargs).iterdecode if hasattr(source, 'read'): return list(decode(source.read())) else: with open(source) as fh: return list(decode(fh.read()))
Deserialize a list of PENMAN-encoded graphs from *source*. Args: source: a filename or file-like object to read from triples: if True, read graphs as triples instead of as PENMAN cls: serialization codec class kwargs: keyword arguments passed to the constructor of *cls* Returns: a list of Graph objects
https://github.com/goodmami/penman/blob/a2563ca16063a7330e2028eb489a99cc8e425c41/penman.py#L833-L850
goodmami/penman
penman.py
loads
def loads(string, triples=False, cls=PENMANCodec, **kwargs): """ Deserialize a list of PENMAN-encoded graphs from *string*. Args: string: a string containing graph data triples: if True, read graphs as triples instead of as PENMAN cls: serialization codec class kwargs: keyword arguments passed to the constructor of *cls* Returns: a list of Graph objects """ codec = cls(**kwargs) return list(codec.iterdecode(string, triples=triples))
python
def loads(string, triples=False, cls=PENMANCodec, **kwargs): """ Deserialize a list of PENMAN-encoded graphs from *string*. Args: string: a string containing graph data triples: if True, read graphs as triples instead of as PENMAN cls: serialization codec class kwargs: keyword arguments passed to the constructor of *cls* Returns: a list of Graph objects """ codec = cls(**kwargs) return list(codec.iterdecode(string, triples=triples))
Deserialize a list of PENMAN-encoded graphs from *string*. Args: string: a string containing graph data triples: if True, read graphs as triples instead of as PENMAN cls: serialization codec class kwargs: keyword arguments passed to the constructor of *cls* Returns: a list of Graph objects
https://github.com/goodmami/penman/blob/a2563ca16063a7330e2028eb489a99cc8e425c41/penman.py#L853-L866
goodmami/penman
penman.py
dump
def dump(graphs, file, triples=False, cls=PENMANCodec, **kwargs): """ Serialize each graph in *graphs* to PENMAN and write to *file*. Args: graphs: an iterable of Graph objects file: a filename or file-like object to write to triples: if True, write graphs as triples instead of as PENMAN cls: serialization codec class kwargs: keyword arguments passed to the constructor of *cls* """ text = dumps(graphs, triples=triples, cls=cls, **kwargs) if hasattr(file, 'write'): print(text, file=file) else: with open(file, 'w') as fh: print(text, file=fh)
python
def dump(graphs, file, triples=False, cls=PENMANCodec, **kwargs): """ Serialize each graph in *graphs* to PENMAN and write to *file*. Args: graphs: an iterable of Graph objects file: a filename or file-like object to write to triples: if True, write graphs as triples instead of as PENMAN cls: serialization codec class kwargs: keyword arguments passed to the constructor of *cls* """ text = dumps(graphs, triples=triples, cls=cls, **kwargs) if hasattr(file, 'write'): print(text, file=file) else: with open(file, 'w') as fh: print(text, file=fh)
Serialize each graph in *graphs* to PENMAN and write to *file*. Args: graphs: an iterable of Graph objects file: a filename or file-like object to write to triples: if True, write graphs as triples instead of as PENMAN cls: serialization codec class kwargs: keyword arguments passed to the constructor of *cls*
https://github.com/goodmami/penman/blob/a2563ca16063a7330e2028eb489a99cc8e425c41/penman.py#L869-L886
goodmami/penman
penman.py
dumps
def dumps(graphs, triples=False, cls=PENMANCodec, **kwargs): """ Serialize each graph in *graphs* to the PENMAN format. Args: graphs: an iterable of Graph objects triples: if True, write graphs as triples instead of as PENMAN Returns: the string of serialized graphs """ codec = cls(**kwargs) strings = [codec.encode(g, triples=triples) for g in graphs] return '\n\n'.join(strings)
python
def dumps(graphs, triples=False, cls=PENMANCodec, **kwargs): """ Serialize each graph in *graphs* to the PENMAN format. Args: graphs: an iterable of Graph objects triples: if True, write graphs as triples instead of as PENMAN Returns: the string of serialized graphs """ codec = cls(**kwargs) strings = [codec.encode(g, triples=triples) for g in graphs] return '\n\n'.join(strings)
Serialize each graph in *graphs* to the PENMAN format. Args: graphs: an iterable of Graph objects triples: if True, write graphs as triples instead of as PENMAN Returns: the string of serialized graphs
https://github.com/goodmami/penman/blob/a2563ca16063a7330e2028eb489a99cc8e425c41/penman.py#L889-L901
goodmami/penman
penman.py
PENMANCodec.decode
def decode(self, s, triples=False): """ Deserialize PENMAN-notation string *s* into its Graph object. Args: s: a string containing a single PENMAN-serialized graph triples: if True, treat *s* as a conjunction of logical triples Returns: the Graph object described by *s* Example: >>> codec = PENMANCodec() >>> codec.decode('(b / bark :ARG1 (d / dog))') <Graph object (top=b) at ...> >>> codec.decode( ... 'instance(b, bark) ^ instance(d, dog) ^ ARG1(b, d)', ... triples=True ... ) <Graph object (top=b) at ...> """ try: if triples: span, data = self._decode_triple_conjunction(s) else: span, data = self._decode_penman_node(s) except IndexError: raise DecodeError( 'Unexpected end of string.', string=s, pos=len(s) ) top, nodes, edges = data return self.triples_to_graph(nodes + edges, top=top)
python
def decode(self, s, triples=False): """ Deserialize PENMAN-notation string *s* into its Graph object. Args: s: a string containing a single PENMAN-serialized graph triples: if True, treat *s* as a conjunction of logical triples Returns: the Graph object described by *s* Example: >>> codec = PENMANCodec() >>> codec.decode('(b / bark :ARG1 (d / dog))') <Graph object (top=b) at ...> >>> codec.decode( ... 'instance(b, bark) ^ instance(d, dog) ^ ARG1(b, d)', ... triples=True ... ) <Graph object (top=b) at ...> """ try: if triples: span, data = self._decode_triple_conjunction(s) else: span, data = self._decode_penman_node(s) except IndexError: raise DecodeError( 'Unexpected end of string.', string=s, pos=len(s) ) top, nodes, edges = data return self.triples_to_graph(nodes + edges, top=top)
Deserialize PENMAN-notation string *s* into its Graph object. Args: s: a string containing a single PENMAN-serialized graph triples: if True, treat *s* as a conjunction of logical triples Returns: the Graph object described by *s* Example: >>> codec = PENMANCodec() >>> codec.decode('(b / bark :ARG1 (d / dog))') <Graph object (top=b) at ...> >>> codec.decode( ... 'instance(b, bark) ^ instance(d, dog) ^ ARG1(b, d)', ... triples=True ... ) <Graph object (top=b) at ...>
https://github.com/goodmami/penman/blob/a2563ca16063a7330e2028eb489a99cc8e425c41/penman.py#L148-L178
goodmami/penman
penman.py
PENMANCodec.iterdecode
def iterdecode(self, s, triples=False): """ Deserialize PENMAN-notation string *s* into its Graph objects. Args: s: a string containing zero or more PENMAN-serialized graphs triples: if True, treat *s* as a conjunction of logical triples Yields: valid Graph objects described by *s* Example: >>> codec = PENMANCodec() >>> list(codec.iterdecode('(h / hello)(g / goodbye)')) [<Graph object (top=h) at ...>, <Graph object (top=g) at ...>] >>> list(codec.iterdecode( ... 'instance(h, hello)\n' ... 'instance(g, goodbye)' ... )) [<Graph object (top=h) at ...>, <Graph object (top=g) at ...>] """ pos, strlen = 0, len(s) while pos < strlen: if s[pos] == '#': while pos < strlen and s[pos] != '\n': pos += 1 elif triples or s[pos] == '(': try: if triples: span, data = self._decode_triple_conjunction( s, pos=pos ) else: span, data = self._decode_penman_node(s, pos=pos) except (IndexError, DecodeError): # don't re-raise below for more robust parsing, but # for now, raising helps with debugging bad input raise pos += 1 else: top, nodes, edges = data yield self.triples_to_graph(nodes + edges, top=top) pos = span[1] else: pos += 1
python
def iterdecode(self, s, triples=False): """ Deserialize PENMAN-notation string *s* into its Graph objects. Args: s: a string containing zero or more PENMAN-serialized graphs triples: if True, treat *s* as a conjunction of logical triples Yields: valid Graph objects described by *s* Example: >>> codec = PENMANCodec() >>> list(codec.iterdecode('(h / hello)(g / goodbye)')) [<Graph object (top=h) at ...>, <Graph object (top=g) at ...>] >>> list(codec.iterdecode( ... 'instance(h, hello)\n' ... 'instance(g, goodbye)' ... )) [<Graph object (top=h) at ...>, <Graph object (top=g) at ...>] """ pos, strlen = 0, len(s) while pos < strlen: if s[pos] == '#': while pos < strlen and s[pos] != '\n': pos += 1 elif triples or s[pos] == '(': try: if triples: span, data = self._decode_triple_conjunction( s, pos=pos ) else: span, data = self._decode_penman_node(s, pos=pos) except (IndexError, DecodeError): # don't re-raise below for more robust parsing, but # for now, raising helps with debugging bad input raise pos += 1 else: top, nodes, edges = data yield self.triples_to_graph(nodes + edges, top=top) pos = span[1] else: pos += 1
Deserialize PENMAN-notation string *s* into its Graph objects. Args: s: a string containing zero or more PENMAN-serialized graphs triples: if True, treat *s* as a conjunction of logical triples Yields: valid Graph objects described by *s* Example: >>> codec = PENMANCodec() >>> list(codec.iterdecode('(h / hello)(g / goodbye)')) [<Graph object (top=h) at ...>, <Graph object (top=g) at ...>] >>> list(codec.iterdecode( ... 'instance(h, hello)\n' ... 'instance(g, goodbye)' ... )) [<Graph object (top=h) at ...>, <Graph object (top=g) at ...>]
https://github.com/goodmami/penman/blob/a2563ca16063a7330e2028eb489a99cc8e425c41/penman.py#L180-L223
goodmami/penman
penman.py
PENMANCodec.encode
def encode(self, g, top=None, triples=False): """ Serialize the graph *g* from *top* to PENMAN notation. Args: g: the Graph object top: the node identifier for the top of the serialized graph; if unset, the original top of *g* is used triples: if True, serialize as a conjunction of logical triples Returns: the PENMAN-serialized string of the Graph *g* Example: >>> codec = PENMANCodec() >>> codec.encode(Graph([('h', 'instance', 'hi')])) (h / hi) >>> codec.encode(Graph([('h', 'instance', 'hi')]), ... triples=True) instance(h, hi) """ if len(g.triples()) == 0: raise EncodeError('Cannot encode empty graph.') if triples: return self._encode_triple_conjunction(g, top=top) else: return self._encode_penman(g, top=top)
python
def encode(self, g, top=None, triples=False): """ Serialize the graph *g* from *top* to PENMAN notation. Args: g: the Graph object top: the node identifier for the top of the serialized graph; if unset, the original top of *g* is used triples: if True, serialize as a conjunction of logical triples Returns: the PENMAN-serialized string of the Graph *g* Example: >>> codec = PENMANCodec() >>> codec.encode(Graph([('h', 'instance', 'hi')])) (h / hi) >>> codec.encode(Graph([('h', 'instance', 'hi')]), ... triples=True) instance(h, hi) """ if len(g.triples()) == 0: raise EncodeError('Cannot encode empty graph.') if triples: return self._encode_triple_conjunction(g, top=top) else: return self._encode_penman(g, top=top)
Serialize the graph *g* from *top* to PENMAN notation. Args: g: the Graph object top: the node identifier for the top of the serialized graph; if unset, the original top of *g* is used triples: if True, serialize as a conjunction of logical triples Returns: the PENMAN-serialized string of the Graph *g* Example: >>> codec = PENMANCodec() >>> codec.encode(Graph([('h', 'instance', 'hi')])) (h / hi) >>> codec.encode(Graph([('h', 'instance', 'hi')]), ... triples=True) instance(h, hi)
https://github.com/goodmami/penman/blob/a2563ca16063a7330e2028eb489a99cc8e425c41/penman.py#L225-L250
goodmami/penman
penman.py
PENMANCodec.handle_triple
def handle_triple(self, lhs, relation, rhs): """ Process triples before they are added to the graph. Note that *lhs* and *rhs* are as they originally appeared, and may be inverted. Inversions are detected by is_relation_inverted() and de-inverted by invert_relation(). By default, this function: * removes initial colons on relations * de-inverts all inverted relations * sets empty relations to `None` * casts numeric string sources and targets to their numeric types (e.g. float, int) Args: lhs: the left hand side of an observed triple relation: the triple relation (possibly inverted) rhs: the right hand side of an observed triple Returns: The processed (source, relation, target) triple. By default, it is returned as a Triple object. """ relation = relation.replace(':', '', 1) # remove leading : if self.is_relation_inverted(relation): # deinvert source, target, inverted = rhs, lhs, True relation = self.invert_relation(relation) else: source, target, inverted = lhs, rhs, False source = _default_cast(source) target = _default_cast(target) if relation == '': # set empty relations to None relation = None return Triple(source, relation, target, inverted)
python
def handle_triple(self, lhs, relation, rhs): """ Process triples before they are added to the graph. Note that *lhs* and *rhs* are as they originally appeared, and may be inverted. Inversions are detected by is_relation_inverted() and de-inverted by invert_relation(). By default, this function: * removes initial colons on relations * de-inverts all inverted relations * sets empty relations to `None` * casts numeric string sources and targets to their numeric types (e.g. float, int) Args: lhs: the left hand side of an observed triple relation: the triple relation (possibly inverted) rhs: the right hand side of an observed triple Returns: The processed (source, relation, target) triple. By default, it is returned as a Triple object. """ relation = relation.replace(':', '', 1) # remove leading : if self.is_relation_inverted(relation): # deinvert source, target, inverted = rhs, lhs, True relation = self.invert_relation(relation) else: source, target, inverted = lhs, rhs, False source = _default_cast(source) target = _default_cast(target) if relation == '': # set empty relations to None relation = None return Triple(source, relation, target, inverted)
Process triples before they are added to the graph. Note that *lhs* and *rhs* are as they originally appeared, and may be inverted. Inversions are detected by is_relation_inverted() and de-inverted by invert_relation(). By default, this function: * removes initial colons on relations * de-inverts all inverted relations * sets empty relations to `None` * casts numeric string sources and targets to their numeric types (e.g. float, int) Args: lhs: the left hand side of an observed triple relation: the triple relation (possibly inverted) rhs: the right hand side of an observed triple Returns: The processed (source, relation, target) triple. By default, it is returned as a Triple object.
https://github.com/goodmami/penman/blob/a2563ca16063a7330e2028eb489a99cc8e425c41/penman.py#L267-L304
goodmami/penman
penman.py
PENMANCodec.triples_to_graph
def triples_to_graph(self, triples, top=None): """ Create a Graph from *triples* considering codec configuration. The Graph class does not know about information in the codec, so if Graph instantiation depends on special `TYPE_REL` or `TOP_VAR` values, use this function instead of instantiating a Graph object directly. This is also where edge normalization (de-inversion) and value type conversion occur (via handle_triple()). Args: triples: an iterable of (lhs, relation, rhs) triples top: node identifier of the top node Returns: a Graph object """ inferred_top = triples[0][0] if triples else None ts = [] for triple in triples: if triple[0] == self.TOP_VAR and triple[1] == self.TOP_REL: inferred_top = triple[2] else: ts.append(self.handle_triple(*triple)) top = self.handle_triple(self.TOP_VAR, self.TOP_REL, top).target return Graph(ts, top=top or inferred_top)
python
def triples_to_graph(self, triples, top=None): """ Create a Graph from *triples* considering codec configuration. The Graph class does not know about information in the codec, so if Graph instantiation depends on special `TYPE_REL` or `TOP_VAR` values, use this function instead of instantiating a Graph object directly. This is also where edge normalization (de-inversion) and value type conversion occur (via handle_triple()). Args: triples: an iterable of (lhs, relation, rhs) triples top: node identifier of the top node Returns: a Graph object """ inferred_top = triples[0][0] if triples else None ts = [] for triple in triples: if triple[0] == self.TOP_VAR and triple[1] == self.TOP_REL: inferred_top = triple[2] else: ts.append(self.handle_triple(*triple)) top = self.handle_triple(self.TOP_VAR, self.TOP_REL, top).target return Graph(ts, top=top or inferred_top)
Create a Graph from *triples* considering codec configuration. The Graph class does not know about information in the codec, so if Graph instantiation depends on special `TYPE_REL` or `TOP_VAR` values, use this function instead of instantiating a Graph object directly. This is also where edge normalization (de-inversion) and value type conversion occur (via handle_triple()). Args: triples: an iterable of (lhs, relation, rhs) triples top: node identifier of the top node Returns: a Graph object
https://github.com/goodmami/penman/blob/a2563ca16063a7330e2028eb489a99cc8e425c41/penman.py#L306-L331
goodmami/penman
penman.py
PENMANCodec._encode_penman
def _encode_penman(self, g, top=None): """ Walk graph g and find a spanning dag, then serialize the result. First, depth-first traversal of preferred orientations (whether true or inverted) to create graph p. If any triples remain, select the first remaining triple whose source in the dispreferred orientation exists in p, where 'first' is determined by the order of inserted nodes (i.e. a topological sort). Add this triple, then repeat the depth-first traversal of preferred orientations from its target. Repeat until no triples remain, or raise an error if there are no candidates in the dispreferred orientation (which likely means the graph is disconnected). """ if top is None: top = g.top remaining = set(g.triples()) variables = g.variables() store = defaultdict(lambda: ([], [])) # (preferred, dispreferred) for t in g.triples(): if t.inverted: store[t.target][0].append(t) store[t.source][1].append(Triple(*t, inverted=False)) else: store[t.source][0].append(t) store[t.target][1].append(Triple(*t, inverted=True)) p = defaultdict(list) topolist = [top] def _update(t): src, tgt = (t[2], t[0]) if t.inverted else (t[0], t[2]) p[src].append(t) remaining.remove(t) if tgt in variables and t.relation != self.TYPE_REL: topolist.append(tgt) return tgt return None def _explore_preferred(src): ts = store.get(src, ([], []))[0] for t in ts: if t in remaining: tgt = _update(t) if tgt is not None: _explore_preferred(tgt) ts[:] = [] # clear explored list _explore_preferred(top) while remaining: flip_candidates = [store.get(v, ([],[]))[1] for v in topolist] for fc in flip_candidates: fc[:] = [c for c in fc if c in remaining] # clear superfluous if not any(len(fc) > 0 for fc in flip_candidates): raise EncodeError('Invalid graph; possibly disconnected.') c = next(c for fc in flip_candidates for c in fc) tgt = _update(c) if tgt is not None: _explore_preferred(tgt) return self._layout(p, top, 0, set())
python
def _encode_penman(self, g, top=None): """ Walk graph g and find a spanning dag, then serialize the result. First, depth-first traversal of preferred orientations (whether true or inverted) to create graph p. If any triples remain, select the first remaining triple whose source in the dispreferred orientation exists in p, where 'first' is determined by the order of inserted nodes (i.e. a topological sort). Add this triple, then repeat the depth-first traversal of preferred orientations from its target. Repeat until no triples remain, or raise an error if there are no candidates in the dispreferred orientation (which likely means the graph is disconnected). """ if top is None: top = g.top remaining = set(g.triples()) variables = g.variables() store = defaultdict(lambda: ([], [])) # (preferred, dispreferred) for t in g.triples(): if t.inverted: store[t.target][0].append(t) store[t.source][1].append(Triple(*t, inverted=False)) else: store[t.source][0].append(t) store[t.target][1].append(Triple(*t, inverted=True)) p = defaultdict(list) topolist = [top] def _update(t): src, tgt = (t[2], t[0]) if t.inverted else (t[0], t[2]) p[src].append(t) remaining.remove(t) if tgt in variables and t.relation != self.TYPE_REL: topolist.append(tgt) return tgt return None def _explore_preferred(src): ts = store.get(src, ([], []))[0] for t in ts: if t in remaining: tgt = _update(t) if tgt is not None: _explore_preferred(tgt) ts[:] = [] # clear explored list _explore_preferred(top) while remaining: flip_candidates = [store.get(v, ([],[]))[1] for v in topolist] for fc in flip_candidates: fc[:] = [c for c in fc if c in remaining] # clear superfluous if not any(len(fc) > 0 for fc in flip_candidates): raise EncodeError('Invalid graph; possibly disconnected.') c = next(c for fc in flip_candidates for c in fc) tgt = _update(c) if tgt is not None: _explore_preferred(tgt) return self._layout(p, top, 0, set())
Walk graph g and find a spanning dag, then serialize the result. First, depth-first traversal of preferred orientations (whether true or inverted) to create graph p. If any triples remain, select the first remaining triple whose source in the dispreferred orientation exists in p, where 'first' is determined by the order of inserted nodes (i.e. a topological sort). Add this triple, then repeat the depth-first traversal of preferred orientations from its target. Repeat until no triples remain, or raise an error if there are no candidates in the dispreferred orientation (which likely means the graph is disconnected).
https://github.com/goodmami/penman/blob/a2563ca16063a7330e2028eb489a99cc8e425c41/penman.py#L436-L499
goodmami/penman
penman.py
AMRCodec.is_relation_inverted
def is_relation_inverted(self, relation): """ Return True if *relation* is inverted. """ return ( relation in self._deinversions or (relation.endswith('-of') and relation not in self._inversions) )
python
def is_relation_inverted(self, relation): """ Return True if *relation* is inverted. """ return ( relation in self._deinversions or (relation.endswith('-of') and relation not in self._inversions) )
Return True if *relation* is inverted.
https://github.com/goodmami/penman/blob/a2563ca16063a7330e2028eb489a99cc8e425c41/penman.py#L571-L578
goodmami/penman
penman.py
AMRCodec.invert_relation
def invert_relation(self, relation): """ Invert or deinvert *relation*. """ if self.is_relation_inverted(relation): rel = self._deinversions.get(relation, relation[:-3]) else: rel = self._inversions.get(relation, relation + '-of') if rel is None: raise PenmanError( 'Cannot (de)invert {}; not allowed'.format(relation) ) return rel
python
def invert_relation(self, relation): """ Invert or deinvert *relation*. """ if self.is_relation_inverted(relation): rel = self._deinversions.get(relation, relation[:-3]) else: rel = self._inversions.get(relation, relation + '-of') if rel is None: raise PenmanError( 'Cannot (de)invert {}; not allowed'.format(relation) ) return rel
Invert or deinvert *relation*.
https://github.com/goodmami/penman/blob/a2563ca16063a7330e2028eb489a99cc8e425c41/penman.py#L580-L592
goodmami/penman
penman.py
Graph.triples
def triples(self, source=None, relation=None, target=None): """ Return triples filtered by their *source*, *relation*, or *target*. """ triplematch = lambda t: ( (source is None or source == t.source) and (relation is None or relation == t.relation) and (target is None or target == t.target) ) return list(filter(triplematch, self._triples))
python
def triples(self, source=None, relation=None, target=None): """ Return triples filtered by their *source*, *relation*, or *target*. """ triplematch = lambda t: ( (source is None or source == t.source) and (relation is None or relation == t.relation) and (target is None or target == t.target) ) return list(filter(triplematch, self._triples))
Return triples filtered by their *source*, *relation*, or *target*.
https://github.com/goodmami/penman/blob/a2563ca16063a7330e2028eb489a99cc8e425c41/penman.py#L680-L689
goodmami/penman
penman.py
Graph.edges
def edges(self, source=None, relation=None, target=None): """ Return edges filtered by their *source*, *relation*, or *target*. Edges don't include terminal triples (node types or attributes). """ edgematch = lambda e: ( (source is None or source == e.source) and (relation is None or relation == e.relation) and (target is None or target == e.target) ) variables = self.variables() edges = [t for t in self._triples if t.target in variables] return list(filter(edgematch, edges))
python
def edges(self, source=None, relation=None, target=None): """ Return edges filtered by their *source*, *relation*, or *target*. Edges don't include terminal triples (node types or attributes). """ edgematch = lambda e: ( (source is None or source == e.source) and (relation is None or relation == e.relation) and (target is None or target == e.target) ) variables = self.variables() edges = [t for t in self._triples if t.target in variables] return list(filter(edgematch, edges))
Return edges filtered by their *source*, *relation*, or *target*. Edges don't include terminal triples (node types or attributes).
https://github.com/goodmami/penman/blob/a2563ca16063a7330e2028eb489a99cc8e425c41/penman.py#L691-L704
goodmami/penman
penman.py
Graph.attributes
def attributes(self, source=None, relation=None, target=None): """ Return attributes filtered by their *source*, *relation*, or *target*. Attributes don't include triples where the target is a nonterminal. """ attrmatch = lambda a: ( (source is None or source == a.source) and (relation is None or relation == a.relation) and (target is None or target == a.target) ) variables = self.variables() attrs = [t for t in self.triples() if t.target not in variables] return list(filter(attrmatch, attrs))
python
def attributes(self, source=None, relation=None, target=None): """ Return attributes filtered by their *source*, *relation*, or *target*. Attributes don't include triples where the target is a nonterminal. """ attrmatch = lambda a: ( (source is None or source == a.source) and (relation is None or relation == a.relation) and (target is None or target == a.target) ) variables = self.variables() attrs = [t for t in self.triples() if t.target not in variables] return list(filter(attrmatch, attrs))
Return attributes filtered by their *source*, *relation*, or *target*. Attributes don't include triples where the target is a nonterminal.
https://github.com/goodmami/penman/blob/a2563ca16063a7330e2028eb489a99cc8e425c41/penman.py#L706-L719
goodmami/penman
penman.py
Graph.reentrancies
def reentrancies(self): """ Return a mapping of variables to their re-entrancy count. A re-entrancy is when more than one edge selects a node as its target. These graphs are rooted, so the top node always has an implicit entrancy. Only nodes with re-entrancies are reported, and the count is only for the entrant edges beyond the first. Also note that these counts are for the interpreted graph, not for the linearized form, so inverted edges are always re-entrant. """ entrancies = defaultdict(int) entrancies[self.top] += 1 # implicit entrancy to top for t in self.edges(): entrancies[t.target] += 1 return dict((v, cnt - 1) for v, cnt in entrancies.items() if cnt >= 2)
python
def reentrancies(self): """ Return a mapping of variables to their re-entrancy count. A re-entrancy is when more than one edge selects a node as its target. These graphs are rooted, so the top node always has an implicit entrancy. Only nodes with re-entrancies are reported, and the count is only for the entrant edges beyond the first. Also note that these counts are for the interpreted graph, not for the linearized form, so inverted edges are always re-entrant. """ entrancies = defaultdict(int) entrancies[self.top] += 1 # implicit entrancy to top for t in self.edges(): entrancies[t.target] += 1 return dict((v, cnt - 1) for v, cnt in entrancies.items() if cnt >= 2)
Return a mapping of variables to their re-entrancy count. A re-entrancy is when more than one edge selects a node as its target. These graphs are rooted, so the top node always has an implicit entrancy. Only nodes with re-entrancies are reported, and the count is only for the entrant edges beyond the first. Also note that these counts are for the interpreted graph, not for the linearized form, so inverted edges are always re-entrant.
https://github.com/goodmami/penman/blob/a2563ca16063a7330e2028eb489a99cc8e425c41/penman.py#L721-L737
xgfs/NetLSD
netlsd/util.py
check_1d
def check_1d(inp): """ Check input to be a vector. Converts lists to np.ndarray. Parameters ---------- inp : obj Input vector Returns ------- numpy.ndarray or None Input vector or None Examples -------- >>> check_1d([0, 1, 2, 3]) [0, 1, 2, 3] >>> check_1d('test') None """ if isinstance(inp, list): return check_1d(np.array(inp)) if isinstance(inp, np.ndarray): if inp.ndim == 1: # input is a vector return inp
python
def check_1d(inp): """ Check input to be a vector. Converts lists to np.ndarray. Parameters ---------- inp : obj Input vector Returns ------- numpy.ndarray or None Input vector or None Examples -------- >>> check_1d([0, 1, 2, 3]) [0, 1, 2, 3] >>> check_1d('test') None """ if isinstance(inp, list): return check_1d(np.array(inp)) if isinstance(inp, np.ndarray): if inp.ndim == 1: # input is a vector return inp
Check input to be a vector. Converts lists to np.ndarray. Parameters ---------- inp : obj Input vector Returns ------- numpy.ndarray or None Input vector or None Examples -------- >>> check_1d([0, 1, 2, 3]) [0, 1, 2, 3] >>> check_1d('test') None
https://github.com/xgfs/NetLSD/blob/54820b3669a94852bd9653be23b09e126e901ab3/netlsd/util.py#L7-L34
xgfs/NetLSD
netlsd/util.py
check_2d
def check_2d(inp): """ Check input to be a matrix. Converts lists of lists to np.ndarray. Also allows the input to be a scipy sparse matrix. Parameters ---------- inp : obj Input matrix Returns ------- numpy.ndarray, scipy.sparse or None Input matrix or None Examples -------- >>> check_2d([[0, 1], [2, 3]]) [[0, 1], [2, 3]] >>> check_2d('test') None """ if isinstance(inp, list): return check_2d(np.array(inp)) if isinstance(inp, (np.ndarray, np.matrixlib.defmatrix.matrix)): if inp.ndim == 2: # input is a dense matrix return inp if sps.issparse(inp): if inp.ndim == 2: # input is a sparse matrix return inp
python
def check_2d(inp): """ Check input to be a matrix. Converts lists of lists to np.ndarray. Also allows the input to be a scipy sparse matrix. Parameters ---------- inp : obj Input matrix Returns ------- numpy.ndarray, scipy.sparse or None Input matrix or None Examples -------- >>> check_2d([[0, 1], [2, 3]]) [[0, 1], [2, 3]] >>> check_2d('test') None """ if isinstance(inp, list): return check_2d(np.array(inp)) if isinstance(inp, (np.ndarray, np.matrixlib.defmatrix.matrix)): if inp.ndim == 2: # input is a dense matrix return inp if sps.issparse(inp): if inp.ndim == 2: # input is a sparse matrix return inp
Check input to be a matrix. Converts lists of lists to np.ndarray. Also allows the input to be a scipy sparse matrix. Parameters ---------- inp : obj Input matrix Returns ------- numpy.ndarray, scipy.sparse or None Input matrix or None Examples -------- >>> check_2d([[0, 1], [2, 3]]) [[0, 1], [2, 3]] >>> check_2d('test') None
https://github.com/xgfs/NetLSD/blob/54820b3669a94852bd9653be23b09e126e901ab3/netlsd/util.py#L37-L69
xgfs/NetLSD
netlsd/util.py
graph_to_laplacian
def graph_to_laplacian(G, normalized=True): """ Converts a graph from popular Python packages to Laplacian representation. Currently support NetworkX, graph_tool and igraph. Parameters ---------- G : obj Input graph normalized : bool Whether to use normalized Laplacian. Normalized and unnormalized Laplacians capture different properties of graphs, e.g. normalized Laplacian spectrum can determine whether a graph is bipartite, but not the number of its edges. We recommend using normalized Laplacian. Returns ------- scipy.sparse Laplacian matrix of the input graph Examples -------- >>> graph_to_laplacian(nx.complete_graph(3), 'unnormalized').todense() [[ 2, -1, -1], [-1, 2, -1], [-1, -1, 2]] >>> graph_to_laplacian('test') None """ try: import networkx as nx if isinstance(G, nx.Graph): if normalized: return nx.normalized_laplacian_matrix(G) else: return nx.laplacian_matrix(G) except ImportError: pass try: import graph_tool.all as gt if isinstance(G, gt.Graph): if normalized: return gt.laplacian_type(G, normalized=True) else: return gt.laplacian(G) except ImportError: pass try: import igraph as ig if isinstance(G, ig.Graph): if normalized: return np.array(G.laplacian(normalized=True)) else: return np.array(G.laplacian()) except ImportError: pass
python
def graph_to_laplacian(G, normalized=True): """ Converts a graph from popular Python packages to Laplacian representation. Currently support NetworkX, graph_tool and igraph. Parameters ---------- G : obj Input graph normalized : bool Whether to use normalized Laplacian. Normalized and unnormalized Laplacians capture different properties of graphs, e.g. normalized Laplacian spectrum can determine whether a graph is bipartite, but not the number of its edges. We recommend using normalized Laplacian. Returns ------- scipy.sparse Laplacian matrix of the input graph Examples -------- >>> graph_to_laplacian(nx.complete_graph(3), 'unnormalized').todense() [[ 2, -1, -1], [-1, 2, -1], [-1, -1, 2]] >>> graph_to_laplacian('test') None """ try: import networkx as nx if isinstance(G, nx.Graph): if normalized: return nx.normalized_laplacian_matrix(G) else: return nx.laplacian_matrix(G) except ImportError: pass try: import graph_tool.all as gt if isinstance(G, gt.Graph): if normalized: return gt.laplacian_type(G, normalized=True) else: return gt.laplacian(G) except ImportError: pass try: import igraph as ig if isinstance(G, ig.Graph): if normalized: return np.array(G.laplacian(normalized=True)) else: return np.array(G.laplacian()) except ImportError: pass
Converts a graph from popular Python packages to Laplacian representation. Currently support NetworkX, graph_tool and igraph. Parameters ---------- G : obj Input graph normalized : bool Whether to use normalized Laplacian. Normalized and unnormalized Laplacians capture different properties of graphs, e.g. normalized Laplacian spectrum can determine whether a graph is bipartite, but not the number of its edges. We recommend using normalized Laplacian. Returns ------- scipy.sparse Laplacian matrix of the input graph Examples -------- >>> graph_to_laplacian(nx.complete_graph(3), 'unnormalized').todense() [[ 2, -1, -1], [-1, 2, -1], [-1, -1, 2]] >>> graph_to_laplacian('test') None
https://github.com/xgfs/NetLSD/blob/54820b3669a94852bd9653be23b09e126e901ab3/netlsd/util.py#L72-L126
xgfs/NetLSD
netlsd/util.py
mat_to_laplacian
def mat_to_laplacian(mat, normalized): """ Converts a sparse or dence adjacency matrix to Laplacian. Parameters ---------- mat : obj Input adjacency matrix. If it is a Laplacian matrix already, return it. normalized : bool Whether to use normalized Laplacian. Normalized and unnormalized Laplacians capture different properties of graphs, e.g. normalized Laplacian spectrum can determine whether a graph is bipartite, but not the number of its edges. We recommend using normalized Laplacian. Returns ------- obj Laplacian of the input adjacency matrix Examples -------- >>> mat_to_laplacian(numpy.array([[0, 1, 1], [1, 0, 1], [1, 1, 0]]), False) [[ 2, -1, -1], [-1, 2, -1], [-1, -1, 2]] """ if sps.issparse(mat): if np.all(mat.diagonal()>=0): # Check diagonal if np.all((mat-sps.diags(mat.diagonal())).data <= 0): # Check off-diagonal elements return mat else: if np.all(np.diag(mat)>=0): # Check diagonal if np.all(mat - np.diag(mat) <= 0): # Check off-diagonal elements return mat deg = np.squeeze(np.asarray(mat.sum(axis=1))) if sps.issparse(mat): L = sps.diags(deg) - mat else: L = np.diag(deg) - mat if not normalized: return L with np.errstate(divide='ignore'): sqrt_deg = 1.0 / np.sqrt(deg) sqrt_deg[sqrt_deg==np.inf] = 0 if sps.issparse(mat): sqrt_deg_mat = sps.diags(sqrt_deg) else: sqrt_deg_mat = np.diag(sqrt_deg) return sqrt_deg_mat.dot(L).dot(sqrt_deg_mat)
python
def mat_to_laplacian(mat, normalized): """ Converts a sparse or dence adjacency matrix to Laplacian. Parameters ---------- mat : obj Input adjacency matrix. If it is a Laplacian matrix already, return it. normalized : bool Whether to use normalized Laplacian. Normalized and unnormalized Laplacians capture different properties of graphs, e.g. normalized Laplacian spectrum can determine whether a graph is bipartite, but not the number of its edges. We recommend using normalized Laplacian. Returns ------- obj Laplacian of the input adjacency matrix Examples -------- >>> mat_to_laplacian(numpy.array([[0, 1, 1], [1, 0, 1], [1, 1, 0]]), False) [[ 2, -1, -1], [-1, 2, -1], [-1, -1, 2]] """ if sps.issparse(mat): if np.all(mat.diagonal()>=0): # Check diagonal if np.all((mat-sps.diags(mat.diagonal())).data <= 0): # Check off-diagonal elements return mat else: if np.all(np.diag(mat)>=0): # Check diagonal if np.all(mat - np.diag(mat) <= 0): # Check off-diagonal elements return mat deg = np.squeeze(np.asarray(mat.sum(axis=1))) if sps.issparse(mat): L = sps.diags(deg) - mat else: L = np.diag(deg) - mat if not normalized: return L with np.errstate(divide='ignore'): sqrt_deg = 1.0 / np.sqrt(deg) sqrt_deg[sqrt_deg==np.inf] = 0 if sps.issparse(mat): sqrt_deg_mat = sps.diags(sqrt_deg) else: sqrt_deg_mat = np.diag(sqrt_deg) return sqrt_deg_mat.dot(L).dot(sqrt_deg_mat)
Converts a sparse or dence adjacency matrix to Laplacian. Parameters ---------- mat : obj Input adjacency matrix. If it is a Laplacian matrix already, return it. normalized : bool Whether to use normalized Laplacian. Normalized and unnormalized Laplacians capture different properties of graphs, e.g. normalized Laplacian spectrum can determine whether a graph is bipartite, but not the number of its edges. We recommend using normalized Laplacian. Returns ------- obj Laplacian of the input adjacency matrix Examples -------- >>> mat_to_laplacian(numpy.array([[0, 1, 1], [1, 0, 1], [1, 1, 0]]), False) [[ 2, -1, -1], [-1, 2, -1], [-1, -1, 2]]
https://github.com/xgfs/NetLSD/blob/54820b3669a94852bd9653be23b09e126e901ab3/netlsd/util.py#L129-L174
xgfs/NetLSD
netlsd/util.py
updown_linear_approx
def updown_linear_approx(eigvals_lower, eigvals_upper, nv): """ Approximates Laplacian spectrum using upper and lower parts of the eigenspectrum. Parameters ---------- eigvals_lower : numpy.ndarray Lower part of the spectrum, sorted eigvals_upper : numpy.ndarray Upper part of the spectrum, sorted nv : int Total number of nodes (eigenvalues) in the graph. Returns ------- numpy.ndarray Vector of approximated eigenvalues Examples -------- >>> updown_linear_approx([1, 2, 3], [7, 8, 9], 9) array([1, 2, 3, 4, 5, 6, 7, 8, 9]) """ nal = len(eigvals_lower) nau = len(eigvals_upper) if nv < nal + nau: raise ValueError('Number of supplied eigenvalues ({0} lower and {1} upper) is higher than number of nodes ({2})!'.format(nal, nau, nv)) ret = np.zeros(nv) ret[:nal] = eigvals_lower ret[-nau:] = eigvals_upper ret[nal-1:-nau+1] = np.linspace(eigvals_lower[-1], eigvals_upper[0], nv-nal-nau+2) return ret
python
def updown_linear_approx(eigvals_lower, eigvals_upper, nv): """ Approximates Laplacian spectrum using upper and lower parts of the eigenspectrum. Parameters ---------- eigvals_lower : numpy.ndarray Lower part of the spectrum, sorted eigvals_upper : numpy.ndarray Upper part of the spectrum, sorted nv : int Total number of nodes (eigenvalues) in the graph. Returns ------- numpy.ndarray Vector of approximated eigenvalues Examples -------- >>> updown_linear_approx([1, 2, 3], [7, 8, 9], 9) array([1, 2, 3, 4, 5, 6, 7, 8, 9]) """ nal = len(eigvals_lower) nau = len(eigvals_upper) if nv < nal + nau: raise ValueError('Number of supplied eigenvalues ({0} lower and {1} upper) is higher than number of nodes ({2})!'.format(nal, nau, nv)) ret = np.zeros(nv) ret[:nal] = eigvals_lower ret[-nau:] = eigvals_upper ret[nal-1:-nau+1] = np.linspace(eigvals_lower[-1], eigvals_upper[0], nv-nal-nau+2) return ret
Approximates Laplacian spectrum using upper and lower parts of the eigenspectrum. Parameters ---------- eigvals_lower : numpy.ndarray Lower part of the spectrum, sorted eigvals_upper : numpy.ndarray Upper part of the spectrum, sorted nv : int Total number of nodes (eigenvalues) in the graph. Returns ------- numpy.ndarray Vector of approximated eigenvalues Examples -------- >>> updown_linear_approx([1, 2, 3], [7, 8, 9], 9) array([1, 2, 3, 4, 5, 6, 7, 8, 9])
https://github.com/xgfs/NetLSD/blob/54820b3669a94852bd9653be23b09e126e901ab3/netlsd/util.py#L177-L209
xgfs/NetLSD
netlsd/util.py
eigenvalues_auto
def eigenvalues_auto(mat, n_eivals='auto'): """ Automatically computes the spectrum of a given Laplacian matrix. Parameters ---------- mat : numpy.ndarray or scipy.sparse Laplacian matrix n_eivals : string or int or tuple Number of eigenvalues to compute / use for approximation. If string, we expect either 'full' or 'auto', otherwise error will be raised. 'auto' lets the program decide based on the faithful usage. 'full' computes all eigenvalues. If int, compute n_eivals eigenvalues from each side and approximate using linear growth approximation. If tuple, we expect two ints, first for lower part of approximation, and second for the upper part. Returns ------- np.ndarray Vector of approximated eigenvalues Examples -------- >>> eigenvalues_auto(numpy.array([[ 2, -1, -1], [-1, 2, -1], [-1, -1, 2]]), 'auto') array([0, 3, 3]) """ do_full = True n_lower = 150 n_upper = 150 nv = mat.shape[0] if n_eivals == 'auto': if mat.shape[0] > 1024: do_full = False if n_eivals == 'full': do_full = True if isinstance(n_eivals, int): n_lower = n_upper = n_eivals do_full = False if isinstance(n_eivals, tuple): n_lower, n_upper = n_eivals do_full = False if do_full and sps.issparse(mat): mat = mat.todense() if sps.issparse(mat): if n_lower == n_upper: tr_eivals = spsl.eigsh(mat, 2*n_lower, which='BE', return_eigenvectors=False) return updown_linear_approx(tr_eivals[:n_upper], tr_eivals[n_upper:], nv) else: lo_eivals = spsl.eigsh(mat, n_lower, which='SM', return_eigenvectors=False)[::-1] up_eivals = spsl.eigsh(mat, n_upper, which='LM', return_eigenvectors=False) return updown_linear_approx(lo_eivals, up_eivals, nv) else: if do_full: return spl.eigvalsh(mat) else: lo_eivals = spl.eigvalsh(mat, eigvals=(0, n_lower-1)) up_eivals = spl.eigvalsh(mat, eigvals=(nv-n_upper-1, nv-1)) return updown_linear_approx(lo_eivals, up_eivals, nv)
python
def eigenvalues_auto(mat, n_eivals='auto'): """ Automatically computes the spectrum of a given Laplacian matrix. Parameters ---------- mat : numpy.ndarray or scipy.sparse Laplacian matrix n_eivals : string or int or tuple Number of eigenvalues to compute / use for approximation. If string, we expect either 'full' or 'auto', otherwise error will be raised. 'auto' lets the program decide based on the faithful usage. 'full' computes all eigenvalues. If int, compute n_eivals eigenvalues from each side and approximate using linear growth approximation. If tuple, we expect two ints, first for lower part of approximation, and second for the upper part. Returns ------- np.ndarray Vector of approximated eigenvalues Examples -------- >>> eigenvalues_auto(numpy.array([[ 2, -1, -1], [-1, 2, -1], [-1, -1, 2]]), 'auto') array([0, 3, 3]) """ do_full = True n_lower = 150 n_upper = 150 nv = mat.shape[0] if n_eivals == 'auto': if mat.shape[0] > 1024: do_full = False if n_eivals == 'full': do_full = True if isinstance(n_eivals, int): n_lower = n_upper = n_eivals do_full = False if isinstance(n_eivals, tuple): n_lower, n_upper = n_eivals do_full = False if do_full and sps.issparse(mat): mat = mat.todense() if sps.issparse(mat): if n_lower == n_upper: tr_eivals = spsl.eigsh(mat, 2*n_lower, which='BE', return_eigenvectors=False) return updown_linear_approx(tr_eivals[:n_upper], tr_eivals[n_upper:], nv) else: lo_eivals = spsl.eigsh(mat, n_lower, which='SM', return_eigenvectors=False)[::-1] up_eivals = spsl.eigsh(mat, n_upper, which='LM', return_eigenvectors=False) return updown_linear_approx(lo_eivals, up_eivals, nv) else: if do_full: return spl.eigvalsh(mat) else: lo_eivals = spl.eigvalsh(mat, eigvals=(0, n_lower-1)) up_eivals = spl.eigvalsh(mat, eigvals=(nv-n_upper-1, nv-1)) return updown_linear_approx(lo_eivals, up_eivals, nv)
Automatically computes the spectrum of a given Laplacian matrix. Parameters ---------- mat : numpy.ndarray or scipy.sparse Laplacian matrix n_eivals : string or int or tuple Number of eigenvalues to compute / use for approximation. If string, we expect either 'full' or 'auto', otherwise error will be raised. 'auto' lets the program decide based on the faithful usage. 'full' computes all eigenvalues. If int, compute n_eivals eigenvalues from each side and approximate using linear growth approximation. If tuple, we expect two ints, first for lower part of approximation, and second for the upper part. Returns ------- np.ndarray Vector of approximated eigenvalues Examples -------- >>> eigenvalues_auto(numpy.array([[ 2, -1, -1], [-1, 2, -1], [-1, -1, 2]]), 'auto') array([0, 3, 3])
https://github.com/xgfs/NetLSD/blob/54820b3669a94852bd9653be23b09e126e901ab3/netlsd/util.py#L212-L268
xgfs/NetLSD
netlsd/kernels.py
netlsd
def netlsd(inp, timescales=np.logspace(-2, 2, 250), kernel='heat', eigenvalues='auto', normalization='empty', normalized_laplacian=True): """ Computes NetLSD signature from some given input, timescales, and normalization. Accepts matrices, common Python graph libraries' graphs, or vectors of eigenvalues. For precise definition, please refer to "NetLSD: Hearing the Shape of a Graph" by A. Tsitsulin, D. Mottin, P. Karras, A. Bronstein, E. Müller. Published at KDD'18. Parameters ---------- inp: obj 2D numpy/scipy matrix, common Python graph libraries' graph, or vector of eigenvalues timescales : numpy.ndarray Vector of discrete timesteps for the kernel computation kernel : str Either 'heat' or 'wave'. Type of a kernel to use for computation. eigenvalues : str Either string or int or tuple Number of eigenvalues to compute / use for approximation. If string, we expect either 'full' or 'auto', otherwise error will be raised. 'auto' lets the program decide based on the faithful usage. 'full' computes all eigenvalues. If int, compute n_eivals eigenvalues from each side and approximate using linear growth approximation. If tuple, we expect two ints, first for lower part of approximation, and second for the upper part. normalization : str or numpy.ndarray Either 'empty', 'complete' or None. If None or any ther value, return unnormalized heat kernel trace. For the details how 'empty' and 'complete' are computed, please refer to the paper. If np.ndarray, they are treated as exact normalization constants normalized_laplacian: bool Defines whether the eigenvalues came from the normalized Laplacian. It only affects 'complete' normalization. Returns ------- numpy.ndarray NetLSD signature """ if kernel not in {'heat', 'wave'}: raise AttributeError('Unirecognized kernel type: expected one of [\'heat\', \'wave\'], got {0}'.format(kernel)) if not isinstance(normalized_laplacian, bool): raise AttributeError('Unknown Laplacian type: expected bool, got {0}'.format(normalized_laplacian)) if not isinstance(eigenvalues, (int, tuple, str)): raise AttributeError('Unirecognized requested eigenvalue number: expected type of [\'str\', \'tuple\', or \'int\'], got {0}'.format(type(eigenvalues))) if not isinstance(timescales, np.ndarray): raise AttributeError('Unirecognized timescales data type: expected np.ndarray, got {0}'.format(type(timescales))) if timescales.ndim != 1: raise AttributeError('Unirecognized timescales dimensionality: expected a vector, got {0}-d array'.format(timescales.ndim)) if normalization not in {'complete', 'empty', 'none', True, False, None}: if not isinstance(normalization, np.ndarray): raise AttributeError('Unirecognized normalization type: expected one of [\'complete\', \'empty\', None or np.ndarray], got {0}'.format(normalization)) if normalization.ndim != 1: raise AttributeError('Unirecognized normalization dimensionality: expected a vector, got {0}-d array'.format(normalization.ndim)) if timescales.shape[0] != normalization.shape[0]: raise AttributeError('Unirecognized normalization dimensionality: expected {0}-length vector, got length {1}'.format(timescales.shape[0], normalization.shape[0])) eivals = check_1d(inp) if eivals is None: mat = check_2d(inp) if mat is None: mat = graph_to_laplacian(inp, normalized_laplacian) if mat is None: raise ValueError('Unirecognized input type: expected one of [\'np.ndarray\', \'scipy.sparse\', \'networkx.Graph\',\' graph_tool.Graph,\' or \'igraph.Graph\'], got {0}'.format(type(inp))) else: mat = mat_to_laplacian(inp, normalized_laplacian) eivals = eigenvalues_auto(mat, eigenvalues) if kernel == 'heat': return _hkt(eivals, timescales, normalization, normalized_laplacian) else: return _wkt(eivals, timescales, normalization, normalized_laplacian)
python
def netlsd(inp, timescales=np.logspace(-2, 2, 250), kernel='heat', eigenvalues='auto', normalization='empty', normalized_laplacian=True): """ Computes NetLSD signature from some given input, timescales, and normalization. Accepts matrices, common Python graph libraries' graphs, or vectors of eigenvalues. For precise definition, please refer to "NetLSD: Hearing the Shape of a Graph" by A. Tsitsulin, D. Mottin, P. Karras, A. Bronstein, E. Müller. Published at KDD'18. Parameters ---------- inp: obj 2D numpy/scipy matrix, common Python graph libraries' graph, or vector of eigenvalues timescales : numpy.ndarray Vector of discrete timesteps for the kernel computation kernel : str Either 'heat' or 'wave'. Type of a kernel to use for computation. eigenvalues : str Either string or int or tuple Number of eigenvalues to compute / use for approximation. If string, we expect either 'full' or 'auto', otherwise error will be raised. 'auto' lets the program decide based on the faithful usage. 'full' computes all eigenvalues. If int, compute n_eivals eigenvalues from each side and approximate using linear growth approximation. If tuple, we expect two ints, first for lower part of approximation, and second for the upper part. normalization : str or numpy.ndarray Either 'empty', 'complete' or None. If None or any ther value, return unnormalized heat kernel trace. For the details how 'empty' and 'complete' are computed, please refer to the paper. If np.ndarray, they are treated as exact normalization constants normalized_laplacian: bool Defines whether the eigenvalues came from the normalized Laplacian. It only affects 'complete' normalization. Returns ------- numpy.ndarray NetLSD signature """ if kernel not in {'heat', 'wave'}: raise AttributeError('Unirecognized kernel type: expected one of [\'heat\', \'wave\'], got {0}'.format(kernel)) if not isinstance(normalized_laplacian, bool): raise AttributeError('Unknown Laplacian type: expected bool, got {0}'.format(normalized_laplacian)) if not isinstance(eigenvalues, (int, tuple, str)): raise AttributeError('Unirecognized requested eigenvalue number: expected type of [\'str\', \'tuple\', or \'int\'], got {0}'.format(type(eigenvalues))) if not isinstance(timescales, np.ndarray): raise AttributeError('Unirecognized timescales data type: expected np.ndarray, got {0}'.format(type(timescales))) if timescales.ndim != 1: raise AttributeError('Unirecognized timescales dimensionality: expected a vector, got {0}-d array'.format(timescales.ndim)) if normalization not in {'complete', 'empty', 'none', True, False, None}: if not isinstance(normalization, np.ndarray): raise AttributeError('Unirecognized normalization type: expected one of [\'complete\', \'empty\', None or np.ndarray], got {0}'.format(normalization)) if normalization.ndim != 1: raise AttributeError('Unirecognized normalization dimensionality: expected a vector, got {0}-d array'.format(normalization.ndim)) if timescales.shape[0] != normalization.shape[0]: raise AttributeError('Unirecognized normalization dimensionality: expected {0}-length vector, got length {1}'.format(timescales.shape[0], normalization.shape[0])) eivals = check_1d(inp) if eivals is None: mat = check_2d(inp) if mat is None: mat = graph_to_laplacian(inp, normalized_laplacian) if mat is None: raise ValueError('Unirecognized input type: expected one of [\'np.ndarray\', \'scipy.sparse\', \'networkx.Graph\',\' graph_tool.Graph,\' or \'igraph.Graph\'], got {0}'.format(type(inp))) else: mat = mat_to_laplacian(inp, normalized_laplacian) eivals = eigenvalues_auto(mat, eigenvalues) if kernel == 'heat': return _hkt(eivals, timescales, normalization, normalized_laplacian) else: return _wkt(eivals, timescales, normalization, normalized_laplacian)
Computes NetLSD signature from some given input, timescales, and normalization. Accepts matrices, common Python graph libraries' graphs, or vectors of eigenvalues. For precise definition, please refer to "NetLSD: Hearing the Shape of a Graph" by A. Tsitsulin, D. Mottin, P. Karras, A. Bronstein, E. Müller. Published at KDD'18. Parameters ---------- inp: obj 2D numpy/scipy matrix, common Python graph libraries' graph, or vector of eigenvalues timescales : numpy.ndarray Vector of discrete timesteps for the kernel computation kernel : str Either 'heat' or 'wave'. Type of a kernel to use for computation. eigenvalues : str Either string or int or tuple Number of eigenvalues to compute / use for approximation. If string, we expect either 'full' or 'auto', otherwise error will be raised. 'auto' lets the program decide based on the faithful usage. 'full' computes all eigenvalues. If int, compute n_eivals eigenvalues from each side and approximate using linear growth approximation. If tuple, we expect two ints, first for lower part of approximation, and second for the upper part. normalization : str or numpy.ndarray Either 'empty', 'complete' or None. If None or any ther value, return unnormalized heat kernel trace. For the details how 'empty' and 'complete' are computed, please refer to the paper. If np.ndarray, they are treated as exact normalization constants normalized_laplacian: bool Defines whether the eigenvalues came from the normalized Laplacian. It only affects 'complete' normalization. Returns ------- numpy.ndarray NetLSD signature
https://github.com/xgfs/NetLSD/blob/54820b3669a94852bd9653be23b09e126e901ab3/netlsd/kernels.py#L25-L91
xgfs/NetLSD
netlsd/kernels.py
heat
def heat(inp, timescales=np.logspace(-2, 2, 250), eigenvalues='auto', normalization='empty', normalized_laplacian=True): """ Computes heat kernel trace from some given input, timescales, and normalization. Accepts matrices, common Python graph libraries' graphs, or vectors of eigenvalues. For precise definition, please refer to "NetLSD: Hearing the Shape of a Graph" by A. Tsitsulin, D. Mottin, P. Karras, A. Bronstein, E. Müller. Published at KDD'18. Parameters ---------- inp: obj 2D numpy/scipy matrix, common Python graph libraries' graph, or vector of eigenvalues timescales : numpy.ndarray Vector of discrete timesteps for the kernel computation eigenvalues : str Either string or int or tuple Number of eigenvalues to compute / use for approximation. If string, we expect either 'full' or 'auto', otherwise error will be raised. 'auto' lets the program decide based on the faithful usage. 'full' computes all eigenvalues. If int, compute n_eivals eigenvalues from each side and approximate using linear growth approximation. If tuple, we expect two ints, first for lower part of approximation, and second for the upper part. normalization : str or numpy.ndarray Either 'empty', 'complete' or None. If None or any ther value, return unnormalized heat kernel trace. For the details how 'empty' and 'complete' are computed, please refer to the paper. If np.ndarray, they are treated as exact normalization constants normalized_laplacian: bool Defines whether the eigenvalues came from the normalized Laplacian. It only affects 'complete' normalization. Returns ------- numpy.ndarray Heat kernel trace signature """ return netlsd(inp, timescales, 'heat', eigenvalues, normalization, normalized_laplacian)
python
def heat(inp, timescales=np.logspace(-2, 2, 250), eigenvalues='auto', normalization='empty', normalized_laplacian=True): """ Computes heat kernel trace from some given input, timescales, and normalization. Accepts matrices, common Python graph libraries' graphs, or vectors of eigenvalues. For precise definition, please refer to "NetLSD: Hearing the Shape of a Graph" by A. Tsitsulin, D. Mottin, P. Karras, A. Bronstein, E. Müller. Published at KDD'18. Parameters ---------- inp: obj 2D numpy/scipy matrix, common Python graph libraries' graph, or vector of eigenvalues timescales : numpy.ndarray Vector of discrete timesteps for the kernel computation eigenvalues : str Either string or int or tuple Number of eigenvalues to compute / use for approximation. If string, we expect either 'full' or 'auto', otherwise error will be raised. 'auto' lets the program decide based on the faithful usage. 'full' computes all eigenvalues. If int, compute n_eivals eigenvalues from each side and approximate using linear growth approximation. If tuple, we expect two ints, first for lower part of approximation, and second for the upper part. normalization : str or numpy.ndarray Either 'empty', 'complete' or None. If None or any ther value, return unnormalized heat kernel trace. For the details how 'empty' and 'complete' are computed, please refer to the paper. If np.ndarray, they are treated as exact normalization constants normalized_laplacian: bool Defines whether the eigenvalues came from the normalized Laplacian. It only affects 'complete' normalization. Returns ------- numpy.ndarray Heat kernel trace signature """ return netlsd(inp, timescales, 'heat', eigenvalues, normalization, normalized_laplacian)
Computes heat kernel trace from some given input, timescales, and normalization. Accepts matrices, common Python graph libraries' graphs, or vectors of eigenvalues. For precise definition, please refer to "NetLSD: Hearing the Shape of a Graph" by A. Tsitsulin, D. Mottin, P. Karras, A. Bronstein, E. Müller. Published at KDD'18. Parameters ---------- inp: obj 2D numpy/scipy matrix, common Python graph libraries' graph, or vector of eigenvalues timescales : numpy.ndarray Vector of discrete timesteps for the kernel computation eigenvalues : str Either string or int or tuple Number of eigenvalues to compute / use for approximation. If string, we expect either 'full' or 'auto', otherwise error will be raised. 'auto' lets the program decide based on the faithful usage. 'full' computes all eigenvalues. If int, compute n_eivals eigenvalues from each side and approximate using linear growth approximation. If tuple, we expect two ints, first for lower part of approximation, and second for the upper part. normalization : str or numpy.ndarray Either 'empty', 'complete' or None. If None or any ther value, return unnormalized heat kernel trace. For the details how 'empty' and 'complete' are computed, please refer to the paper. If np.ndarray, they are treated as exact normalization constants normalized_laplacian: bool Defines whether the eigenvalues came from the normalized Laplacian. It only affects 'complete' normalization. Returns ------- numpy.ndarray Heat kernel trace signature
https://github.com/xgfs/NetLSD/blob/54820b3669a94852bd9653be23b09e126e901ab3/netlsd/kernels.py#L94-L127
xgfs/NetLSD
netlsd/kernels.py
wave
def wave(inp, timescales=np.linspace(0, 2*np.pi, 250), eigenvalues='auto', normalization='empty', normalized_laplacian=True): """ Computes wave kernel trace from some given input, timescales, and normalization. Accepts matrices, common Python graph libraries' graphs, or vectors of eigenvalues. For precise definition, please refer to "NetLSD: Hearing the Shape of a Graph" by A. Tsitsulin, D. Mottin, P. Karras, A. Bronstein, E. Müller. Published at KDD'18. Parameters ---------- inp: obj 2D numpy/scipy matrix, common Python graph libraries' graph, or vector of eigenvalues timescales : numpy.ndarray Vector of discrete timesteps for the kernel computation eigenvalues : str Either string or int or tuple Number of eigenvalues to compute / use for approximation. If string, we expect either 'full' or 'auto', otherwise error will be raised. 'auto' lets the program decide based on the faithful usage. 'full' computes all eigenvalues. If int, compute n_eivals eigenvalues from each side and approximate using linear growth approximation. If tuple, we expect two ints, first for lower part of approximation, and second for the upper part. normalization : str or numpy.ndarray Either 'empty', 'complete' or None. If None or any ther value, return unnormalized wave kernel trace. For the details how 'empty' and 'complete' are computed, please refer to the paper. If np.ndarray, they are treated as exact normalization constants normalized_laplacian: bool Defines whether the eigenvalues came from the normalized Laplacian. It only affects 'complete' normalization. Returns ------- numpy.ndarray Wave kernel trace signature """ return netlsd(inp, timescales, 'wave', eigenvalues, normalization, normalized_laplacian)
python
def wave(inp, timescales=np.linspace(0, 2*np.pi, 250), eigenvalues='auto', normalization='empty', normalized_laplacian=True): """ Computes wave kernel trace from some given input, timescales, and normalization. Accepts matrices, common Python graph libraries' graphs, or vectors of eigenvalues. For precise definition, please refer to "NetLSD: Hearing the Shape of a Graph" by A. Tsitsulin, D. Mottin, P. Karras, A. Bronstein, E. Müller. Published at KDD'18. Parameters ---------- inp: obj 2D numpy/scipy matrix, common Python graph libraries' graph, or vector of eigenvalues timescales : numpy.ndarray Vector of discrete timesteps for the kernel computation eigenvalues : str Either string or int or tuple Number of eigenvalues to compute / use for approximation. If string, we expect either 'full' or 'auto', otherwise error will be raised. 'auto' lets the program decide based on the faithful usage. 'full' computes all eigenvalues. If int, compute n_eivals eigenvalues from each side and approximate using linear growth approximation. If tuple, we expect two ints, first for lower part of approximation, and second for the upper part. normalization : str or numpy.ndarray Either 'empty', 'complete' or None. If None or any ther value, return unnormalized wave kernel trace. For the details how 'empty' and 'complete' are computed, please refer to the paper. If np.ndarray, they are treated as exact normalization constants normalized_laplacian: bool Defines whether the eigenvalues came from the normalized Laplacian. It only affects 'complete' normalization. Returns ------- numpy.ndarray Wave kernel trace signature """ return netlsd(inp, timescales, 'wave', eigenvalues, normalization, normalized_laplacian)
Computes wave kernel trace from some given input, timescales, and normalization. Accepts matrices, common Python graph libraries' graphs, or vectors of eigenvalues. For precise definition, please refer to "NetLSD: Hearing the Shape of a Graph" by A. Tsitsulin, D. Mottin, P. Karras, A. Bronstein, E. Müller. Published at KDD'18. Parameters ---------- inp: obj 2D numpy/scipy matrix, common Python graph libraries' graph, or vector of eigenvalues timescales : numpy.ndarray Vector of discrete timesteps for the kernel computation eigenvalues : str Either string or int or tuple Number of eigenvalues to compute / use for approximation. If string, we expect either 'full' or 'auto', otherwise error will be raised. 'auto' lets the program decide based on the faithful usage. 'full' computes all eigenvalues. If int, compute n_eivals eigenvalues from each side and approximate using linear growth approximation. If tuple, we expect two ints, first for lower part of approximation, and second for the upper part. normalization : str or numpy.ndarray Either 'empty', 'complete' or None. If None or any ther value, return unnormalized wave kernel trace. For the details how 'empty' and 'complete' are computed, please refer to the paper. If np.ndarray, they are treated as exact normalization constants normalized_laplacian: bool Defines whether the eigenvalues came from the normalized Laplacian. It only affects 'complete' normalization. Returns ------- numpy.ndarray Wave kernel trace signature
https://github.com/xgfs/NetLSD/blob/54820b3669a94852bd9653be23b09e126e901ab3/netlsd/kernels.py#L130-L163
xgfs/NetLSD
netlsd/kernels.py
_hkt
def _hkt(eivals, timescales, normalization, normalized_laplacian): """ Computes heat kernel trace from given eigenvalues, timescales, and normalization. For precise definition, please refer to "NetLSD: Hearing the Shape of a Graph" by A. Tsitsulin, D. Mottin, P. Karras, A. Bronstein, E. Müller. Published at KDD'18. Parameters ---------- eivals : numpy.ndarray Eigenvalue vector timescales : numpy.ndarray Vector of discrete timesteps for the kernel computation normalization : str or numpy.ndarray Either 'empty', 'complete' or None. If None or any ther value, return unnormalized heat kernel trace. For the details how 'empty' and 'complete' are computed, please refer to the paper. If np.ndarray, they are treated as exact normalization constants normalized_laplacian: bool Defines whether the eigenvalues came from the normalized Laplacian. It only affects 'complete' normalization. Returns ------- numpy.ndarray Heat kernel trace signature """ nv = eivals.shape[0] hkt = np.zeros(timescales.shape) for idx, t in enumerate(timescales): hkt[idx] = np.sum(np.exp(-t * eivals)) if isinstance(normalization, np.ndarray): return hkt / normalization if normalization == 'empty' or normalization == True: return hkt / nv if normalization == 'complete': if normalized_laplacian: return hkt / (1 + (nv - 1) * np.exp(-timescales)) else: return hkt / (1 + nv * np.exp(-nv * timescales)) return hkt
python
def _hkt(eivals, timescales, normalization, normalized_laplacian): """ Computes heat kernel trace from given eigenvalues, timescales, and normalization. For precise definition, please refer to "NetLSD: Hearing the Shape of a Graph" by A. Tsitsulin, D. Mottin, P. Karras, A. Bronstein, E. Müller. Published at KDD'18. Parameters ---------- eivals : numpy.ndarray Eigenvalue vector timescales : numpy.ndarray Vector of discrete timesteps for the kernel computation normalization : str or numpy.ndarray Either 'empty', 'complete' or None. If None or any ther value, return unnormalized heat kernel trace. For the details how 'empty' and 'complete' are computed, please refer to the paper. If np.ndarray, they are treated as exact normalization constants normalized_laplacian: bool Defines whether the eigenvalues came from the normalized Laplacian. It only affects 'complete' normalization. Returns ------- numpy.ndarray Heat kernel trace signature """ nv = eivals.shape[0] hkt = np.zeros(timescales.shape) for idx, t in enumerate(timescales): hkt[idx] = np.sum(np.exp(-t * eivals)) if isinstance(normalization, np.ndarray): return hkt / normalization if normalization == 'empty' or normalization == True: return hkt / nv if normalization == 'complete': if normalized_laplacian: return hkt / (1 + (nv - 1) * np.exp(-timescales)) else: return hkt / (1 + nv * np.exp(-nv * timescales)) return hkt
Computes heat kernel trace from given eigenvalues, timescales, and normalization. For precise definition, please refer to "NetLSD: Hearing the Shape of a Graph" by A. Tsitsulin, D. Mottin, P. Karras, A. Bronstein, E. Müller. Published at KDD'18. Parameters ---------- eivals : numpy.ndarray Eigenvalue vector timescales : numpy.ndarray Vector of discrete timesteps for the kernel computation normalization : str or numpy.ndarray Either 'empty', 'complete' or None. If None or any ther value, return unnormalized heat kernel trace. For the details how 'empty' and 'complete' are computed, please refer to the paper. If np.ndarray, they are treated as exact normalization constants normalized_laplacian: bool Defines whether the eigenvalues came from the normalized Laplacian. It only affects 'complete' normalization. Returns ------- numpy.ndarray Heat kernel trace signature
https://github.com/xgfs/NetLSD/blob/54820b3669a94852bd9653be23b09e126e901ab3/netlsd/kernels.py#L166-L205
xgfs/NetLSD
netlsd/kernels.py
_wkt
def _wkt(eivals, timescales, normalization, normalized_laplacian): """ Computes wave kernel trace from given eigenvalues, timescales, and normalization. For precise definition, please refer to "NetLSD: Hearing the Shape of a Graph" by A. Tsitsulin, D. Mottin, P. Karras, A. Bronstein, E. Müller. Published at KDD'18. Parameters ---------- eivals : numpy.ndarray Eigenvalue vector timescales : numpy.ndarray Vector of discrete timesteps for the kernel computation normalization : str or numpy.ndarray Either 'empty', 'complete' or None. If None or any ther value, return unnormalized wave kernel trace. For the details how 'empty' and 'complete' are computed, please refer to the paper. If np.ndarray, they are treated as exact normalization constants normalized_laplacian: bool Defines whether the eigenvalues came from the normalized Laplacian. It only affects 'complete' normalization. Returns ------- numpy.ndarray Wave kernel trace signature """ nv = eivals.shape[0] wkt = np.zeros(timescales.shape) for idx, t in enumerate(timescales): wkt[idx] = np.sum(np.exp(-1j * t * eivals)) if isinstance(normalization, np.ndarray): return hkt / normalization if normalization == 'empty' or normalization == True: return wkt / nv if normalization == 'complete': if normalized_laplacian: return wkt / (1 + (nv - 1) * np.cos(timescales)) else: return wkt / (1 + (nv - 1) * np.cos(nv * timescales)) return wkt
python
def _wkt(eivals, timescales, normalization, normalized_laplacian): """ Computes wave kernel trace from given eigenvalues, timescales, and normalization. For precise definition, please refer to "NetLSD: Hearing the Shape of a Graph" by A. Tsitsulin, D. Mottin, P. Karras, A. Bronstein, E. Müller. Published at KDD'18. Parameters ---------- eivals : numpy.ndarray Eigenvalue vector timescales : numpy.ndarray Vector of discrete timesteps for the kernel computation normalization : str or numpy.ndarray Either 'empty', 'complete' or None. If None or any ther value, return unnormalized wave kernel trace. For the details how 'empty' and 'complete' are computed, please refer to the paper. If np.ndarray, they are treated as exact normalization constants normalized_laplacian: bool Defines whether the eigenvalues came from the normalized Laplacian. It only affects 'complete' normalization. Returns ------- numpy.ndarray Wave kernel trace signature """ nv = eivals.shape[0] wkt = np.zeros(timescales.shape) for idx, t in enumerate(timescales): wkt[idx] = np.sum(np.exp(-1j * t * eivals)) if isinstance(normalization, np.ndarray): return hkt / normalization if normalization == 'empty' or normalization == True: return wkt / nv if normalization == 'complete': if normalized_laplacian: return wkt / (1 + (nv - 1) * np.cos(timescales)) else: return wkt / (1 + (nv - 1) * np.cos(nv * timescales)) return wkt
Computes wave kernel trace from given eigenvalues, timescales, and normalization. For precise definition, please refer to "NetLSD: Hearing the Shape of a Graph" by A. Tsitsulin, D. Mottin, P. Karras, A. Bronstein, E. Müller. Published at KDD'18. Parameters ---------- eivals : numpy.ndarray Eigenvalue vector timescales : numpy.ndarray Vector of discrete timesteps for the kernel computation normalization : str or numpy.ndarray Either 'empty', 'complete' or None. If None or any ther value, return unnormalized wave kernel trace. For the details how 'empty' and 'complete' are computed, please refer to the paper. If np.ndarray, they are treated as exact normalization constants normalized_laplacian: bool Defines whether the eigenvalues came from the normalized Laplacian. It only affects 'complete' normalization. Returns ------- numpy.ndarray Wave kernel trace signature
https://github.com/xgfs/NetLSD/blob/54820b3669a94852bd9653be23b09e126e901ab3/netlsd/kernels.py#L208-L247
lrq3000/pyFileFixity
pyFileFixity/lib/sortedcontainers/sortedlistwithkey.py
SortedListWithKey.clear
def clear(self): """Remove all the elements from the list.""" self._len = 0 del self._maxes[:] del self._lists[:] del self._keys[:] del self._index[:]
python
def clear(self): """Remove all the elements from the list.""" self._len = 0 del self._maxes[:] del self._lists[:] del self._keys[:] del self._index[:]
Remove all the elements from the list.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/sortedcontainers/sortedlistwithkey.py#L53-L59
lrq3000/pyFileFixity
pyFileFixity/lib/sortedcontainers/sortedlistwithkey.py
SortedListWithKey.discard
def discard(self, val): """ Remove the first occurrence of *val*. If *val* is not a member, does nothing. """ _maxes = self._maxes if not _maxes: return key = self._key(val) pos = bisect_left(_maxes, key) if pos == len(_maxes): return _keys = self._keys _lists = self._lists idx = bisect_left(_keys[pos], key) len_keys = len(_keys) len_sublist = len(_keys[pos]) while True: if _keys[pos][idx] != key: return if _lists[pos][idx] == val: self._delete(pos, idx) return idx += 1 if idx == len_sublist: pos += 1 if pos == len_keys: return len_sublist = len(_keys[pos]) idx = 0
python
def discard(self, val): """ Remove the first occurrence of *val*. If *val* is not a member, does nothing. """ _maxes = self._maxes if not _maxes: return key = self._key(val) pos = bisect_left(_maxes, key) if pos == len(_maxes): return _keys = self._keys _lists = self._lists idx = bisect_left(_keys[pos], key) len_keys = len(_keys) len_sublist = len(_keys[pos]) while True: if _keys[pos][idx] != key: return if _lists[pos][idx] == val: self._delete(pos, idx) return idx += 1 if idx == len_sublist: pos += 1 if pos == len_keys: return len_sublist = len(_keys[pos]) idx = 0
Remove the first occurrence of *val*. If *val* is not a member, does nothing.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/sortedcontainers/sortedlistwithkey.py#L178-L214
lrq3000/pyFileFixity
pyFileFixity/lib/sortedcontainers/sortedlistwithkey.py
SortedListWithKey.remove
def remove(self, val): """ Remove first occurrence of *val*. Raises ValueError if *val* is not present. """ _maxes = self._maxes if not _maxes: raise ValueError('{0} not in list'.format(repr(val))) key = self._key(val) pos = bisect_left(_maxes, key) if pos == len(_maxes): raise ValueError('{0} not in list'.format(repr(val))) _keys = self._keys _lists = self._lists idx = bisect_left(_keys[pos], key) len_keys = len(_keys) len_sublist = len(_keys[pos]) while True: if _keys[pos][idx] != key: raise ValueError('{0} not in list'.format(repr(val))) if _lists[pos][idx] == val: self._delete(pos, idx) return idx += 1 if idx == len_sublist: pos += 1 if pos == len_keys: raise ValueError('{0} not in list'.format(repr(val))) len_sublist = len(_keys[pos]) idx = 0
python
def remove(self, val): """ Remove first occurrence of *val*. Raises ValueError if *val* is not present. """ _maxes = self._maxes if not _maxes: raise ValueError('{0} not in list'.format(repr(val))) key = self._key(val) pos = bisect_left(_maxes, key) if pos == len(_maxes): raise ValueError('{0} not in list'.format(repr(val))) _keys = self._keys _lists = self._lists idx = bisect_left(_keys[pos], key) len_keys = len(_keys) len_sublist = len(_keys[pos]) while True: if _keys[pos][idx] != key: raise ValueError('{0} not in list'.format(repr(val))) if _lists[pos][idx] == val: self._delete(pos, idx) return idx += 1 if idx == len_sublist: pos += 1 if pos == len_keys: raise ValueError('{0} not in list'.format(repr(val))) len_sublist = len(_keys[pos]) idx = 0
Remove first occurrence of *val*. Raises ValueError if *val* is not present.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/sortedcontainers/sortedlistwithkey.py#L216-L252
lrq3000/pyFileFixity
pyFileFixity/lib/sortedcontainers/sortedlistwithkey.py
SortedListWithKey._delete
def _delete(self, pos, idx): """ Delete the item at the given (pos, idx). Combines lists that are less than half the load level. Updates the index when the sublist length is more than half the load level. This requires decrementing the nodes in a traversal from the leaf node to the root. For an example traversal see self._loc. """ _maxes, _lists, _keys, _index = self._maxes, self._lists, self._keys, self._index keys_pos = _keys[pos] lists_pos = _lists[pos] del keys_pos[idx] del lists_pos[idx] self._len -= 1 len_keys_pos = len(keys_pos) if len_keys_pos > self._half: _maxes[pos] = keys_pos[-1] if len(_index) > 0: child = self._offset + pos while child > 0: _index[child] -= 1 child = (child - 1) >> 1 _index[0] -= 1 elif len(_keys) > 1: if not pos: pos += 1 prev = pos - 1 _keys[prev].extend(_keys[pos]) _lists[prev].extend(_lists[pos]) _maxes[prev] = _keys[prev][-1] del _keys[pos] del _lists[pos] del _maxes[pos] del _index[:] self._expand(prev) elif len_keys_pos: _maxes[pos] = keys_pos[-1] else: del _keys[pos] del _lists[pos] del _maxes[pos] del _index[:]
python
def _delete(self, pos, idx): """ Delete the item at the given (pos, idx). Combines lists that are less than half the load level. Updates the index when the sublist length is more than half the load level. This requires decrementing the nodes in a traversal from the leaf node to the root. For an example traversal see self._loc. """ _maxes, _lists, _keys, _index = self._maxes, self._lists, self._keys, self._index keys_pos = _keys[pos] lists_pos = _lists[pos] del keys_pos[idx] del lists_pos[idx] self._len -= 1 len_keys_pos = len(keys_pos) if len_keys_pos > self._half: _maxes[pos] = keys_pos[-1] if len(_index) > 0: child = self._offset + pos while child > 0: _index[child] -= 1 child = (child - 1) >> 1 _index[0] -= 1 elif len(_keys) > 1: if not pos: pos += 1 prev = pos - 1 _keys[prev].extend(_keys[pos]) _lists[prev].extend(_lists[pos]) _maxes[prev] = _keys[prev][-1] del _keys[pos] del _lists[pos] del _maxes[pos] del _index[:] self._expand(prev) elif len_keys_pos: _maxes[pos] = keys_pos[-1] else: del _keys[pos] del _lists[pos] del _maxes[pos] del _index[:]
Delete the item at the given (pos, idx). Combines lists that are less than half the load level. Updates the index when the sublist length is more than half the load level. This requires decrementing the nodes in a traversal from the leaf node to the root. For an example traversal see self._loc.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/sortedcontainers/sortedlistwithkey.py#L254-L312
lrq3000/pyFileFixity
pyFileFixity/lib/sortedcontainers/sortedlistwithkey.py
SortedListWithKey._loc
def _loc(self, pos, idx): """Convert an index pair (alpha, beta) into a single index that corresponds to the position of the value in the sorted list. Most queries require the index be built. Details of the index are described in self._build_index. Indexing requires traversing the tree from a leaf node to the root. The parent of each node is easily computable at (pos - 1) // 2. Left-child nodes are always at odd indices and right-child nodes are always at even indices. When traversing up from a right-child node, increment the total by the left-child node. The final index is the sum from traversal and the index in the sublist. For example, using the index from self._build_index: _index = 14 5 9 3 2 4 5 _offset = 3 Tree: 14 5 9 3 2 4 5 Converting index pair (2, 3) into a single index involves iterating like so: 1. Starting at the leaf node: offset + alpha = 3 + 2 = 5. We identify the node as a left-child node. At such nodes, we simply traverse to the parent. 2. At node 9, position 2, we recognize the node as a right-child node and accumulate the left-child in our total. Total is now 5 and we traverse to the parent at position 0. 3. Iteration ends at the root. Computing the index is the sum of the total and beta: 5 + 3 = 8. """ if not pos: return idx _index = self._index if not len(_index): self._build_index() total = 0 # Increment pos to point in the index to len(self._lists[pos]). pos += self._offset # Iterate until reaching the root of the index tree at pos = 0. while pos: # Right-child nodes are at odd indices. At such indices # account the total below the left child node. if not (pos & 1): total += _index[pos - 1] # Advance pos to the parent node. pos = (pos - 1) >> 1 return total + idx
python
def _loc(self, pos, idx): """Convert an index pair (alpha, beta) into a single index that corresponds to the position of the value in the sorted list. Most queries require the index be built. Details of the index are described in self._build_index. Indexing requires traversing the tree from a leaf node to the root. The parent of each node is easily computable at (pos - 1) // 2. Left-child nodes are always at odd indices and right-child nodes are always at even indices. When traversing up from a right-child node, increment the total by the left-child node. The final index is the sum from traversal and the index in the sublist. For example, using the index from self._build_index: _index = 14 5 9 3 2 4 5 _offset = 3 Tree: 14 5 9 3 2 4 5 Converting index pair (2, 3) into a single index involves iterating like so: 1. Starting at the leaf node: offset + alpha = 3 + 2 = 5. We identify the node as a left-child node. At such nodes, we simply traverse to the parent. 2. At node 9, position 2, we recognize the node as a right-child node and accumulate the left-child in our total. Total is now 5 and we traverse to the parent at position 0. 3. Iteration ends at the root. Computing the index is the sum of the total and beta: 5 + 3 = 8. """ if not pos: return idx _index = self._index if not len(_index): self._build_index() total = 0 # Increment pos to point in the index to len(self._lists[pos]). pos += self._offset # Iterate until reaching the root of the index tree at pos = 0. while pos: # Right-child nodes are at odd indices. At such indices # account the total below the left child node. if not (pos & 1): total += _index[pos - 1] # Advance pos to the parent node. pos = (pos - 1) >> 1 return total + idx
Convert an index pair (alpha, beta) into a single index that corresponds to the position of the value in the sorted list. Most queries require the index be built. Details of the index are described in self._build_index. Indexing requires traversing the tree from a leaf node to the root. The parent of each node is easily computable at (pos - 1) // 2. Left-child nodes are always at odd indices and right-child nodes are always at even indices. When traversing up from a right-child node, increment the total by the left-child node. The final index is the sum from traversal and the index in the sublist. For example, using the index from self._build_index: _index = 14 5 9 3 2 4 5 _offset = 3 Tree: 14 5 9 3 2 4 5 Converting index pair (2, 3) into a single index involves iterating like so: 1. Starting at the leaf node: offset + alpha = 3 + 2 = 5. We identify the node as a left-child node. At such nodes, we simply traverse to the parent. 2. At node 9, position 2, we recognize the node as a right-child node and accumulate the left-child in our total. Total is now 5 and we traverse to the parent at position 0. 3. Iteration ends at the root. Computing the index is the sum of the total and beta: 5 + 3 = 8.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/sortedcontainers/sortedlistwithkey.py#L314-L386
lrq3000/pyFileFixity
pyFileFixity/lib/sortedcontainers/sortedlistwithkey.py
SortedListWithKey.islice
def islice(self, start=None, stop=None, reverse=False): """ Returns an iterator that slices `self` from `start` to `stop` index, inclusive and exclusive respectively. When `reverse` is `True`, values are yielded from the iterator in reverse order. Both `start` and `stop` default to `None` which is automatically inclusive of the beginning and end. """ _len = self._len if not _len: return iter(()) start, stop, step = self._slice(slice(start, stop)) if start >= stop: return iter(()) _pos = self._pos min_pos, min_idx = _pos(start) if stop == _len: max_pos = len(self._lists) - 1 max_idx = len(self._lists[-1]) else: max_pos, max_idx = _pos(stop) return self._islice(min_pos, min_idx, max_pos, max_idx, reverse)
python
def islice(self, start=None, stop=None, reverse=False): """ Returns an iterator that slices `self` from `start` to `stop` index, inclusive and exclusive respectively. When `reverse` is `True`, values are yielded from the iterator in reverse order. Both `start` and `stop` default to `None` which is automatically inclusive of the beginning and end. """ _len = self._len if not _len: return iter(()) start, stop, step = self._slice(slice(start, stop)) if start >= stop: return iter(()) _pos = self._pos min_pos, min_idx = _pos(start) if stop == _len: max_pos = len(self._lists) - 1 max_idx = len(self._lists[-1]) else: max_pos, max_idx = _pos(stop) return self._islice(min_pos, min_idx, max_pos, max_idx, reverse)
Returns an iterator that slices `self` from `start` to `stop` index, inclusive and exclusive respectively. When `reverse` is `True`, values are yielded from the iterator in reverse order. Both `start` and `stop` default to `None` which is automatically inclusive of the beginning and end.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/sortedcontainers/sortedlistwithkey.py#L836-L867
lrq3000/pyFileFixity
pyFileFixity/lib/sortedcontainers/sortedlistwithkey.py
SortedListWithKey.irange
def irange(self, minimum=None, maximum=None, inclusive=(True, True), reverse=False): """ Create an iterator of values between `minimum` and `maximum`. `inclusive` is a pair of booleans that indicates whether the minimum and maximum ought to be included in the range, respectively. The default is (True, True) such that the range is inclusive of both minimum and maximum. Both `minimum` and `maximum` default to `None` which is automatically inclusive of the start and end of the list, respectively. When `reverse` is `True` the values are yielded from the iterator in reverse order; `reverse` defaults to `False`. """ minimum = self._key(minimum) if minimum is not None else None maximum = self._key(maximum) if maximum is not None else None return self.irange_key( min_key=minimum, max_key=maximum, inclusive=inclusive, reverse=reverse, )
python
def irange(self, minimum=None, maximum=None, inclusive=(True, True), reverse=False): """ Create an iterator of values between `minimum` and `maximum`. `inclusive` is a pair of booleans that indicates whether the minimum and maximum ought to be included in the range, respectively. The default is (True, True) such that the range is inclusive of both minimum and maximum. Both `minimum` and `maximum` default to `None` which is automatically inclusive of the start and end of the list, respectively. When `reverse` is `True` the values are yielded from the iterator in reverse order; `reverse` defaults to `False`. """ minimum = self._key(minimum) if minimum is not None else None maximum = self._key(maximum) if maximum is not None else None return self.irange_key( min_key=minimum, max_key=maximum, inclusive=inclusive, reverse=reverse, )
Create an iterator of values between `minimum` and `maximum`. `inclusive` is a pair of booleans that indicates whether the minimum and maximum ought to be included in the range, respectively. The default is (True, True) such that the range is inclusive of both minimum and maximum. Both `minimum` and `maximum` default to `None` which is automatically inclusive of the start and end of the list, respectively. When `reverse` is `True` the values are yielded from the iterator in reverse order; `reverse` defaults to `False`.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/sortedcontainers/sortedlistwithkey.py#L908-L929
lrq3000/pyFileFixity
pyFileFixity/lib/sortedcontainers/sortedlistwithkey.py
SortedListWithKey.copy
def copy(self): """Return a shallow copy of the sorted list.""" return self.__class__(self, key=self._key, load=self._load)
python
def copy(self): """Return a shallow copy of the sorted list.""" return self.__class__(self, key=self._key, load=self._load)
Return a shallow copy of the sorted list.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/sortedcontainers/sortedlistwithkey.py#L1100-L1102
lrq3000/pyFileFixity
pyFileFixity/lib/sortedcontainers/sortedlistwithkey.py
SortedListWithKey.append
def append(self, val): """ Append the element *val* to the list. Raises a ValueError if the *val* would violate the sort order. """ _maxes, _lists, _keys = self._maxes, self._lists, self._keys key = self._key(val) if not _maxes: _maxes.append(key) _keys.append([key]) _lists.append([val]) self._len = 1 return pos = len(_keys) - 1 if key < _keys[pos][-1]: msg = '{0} not in sort order at index {1}'.format(repr(val), self._len) raise ValueError(msg) _maxes[pos] = key _keys[pos].append(key) _lists[pos].append(val) self._len += 1 self._expand(pos)
python
def append(self, val): """ Append the element *val* to the list. Raises a ValueError if the *val* would violate the sort order. """ _maxes, _lists, _keys = self._maxes, self._lists, self._keys key = self._key(val) if not _maxes: _maxes.append(key) _keys.append([key]) _lists.append([val]) self._len = 1 return pos = len(_keys) - 1 if key < _keys[pos][-1]: msg = '{0} not in sort order at index {1}'.format(repr(val), self._len) raise ValueError(msg) _maxes[pos] = key _keys[pos].append(key) _lists[pos].append(val) self._len += 1 self._expand(pos)
Append the element *val* to the list. Raises a ValueError if the *val* would violate the sort order.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/sortedcontainers/sortedlistwithkey.py#L1106-L1132
lrq3000/pyFileFixity
pyFileFixity/lib/sortedcontainers/sortedlistwithkey.py
SortedListWithKey.extend
def extend(self, values): """ Extend the list by appending all elements from the *values*. Raises a ValueError if the sort order would be violated. """ _maxes, _keys, _lists, _load = self._maxes, self._keys, self._lists, self._load if not isinstance(values, list): values = list(values) keys = list(map(self._key, values)) if any(keys[pos - 1] > keys[pos] for pos in range(1, len(keys))): raise ValueError('given sequence not in sort order') offset = 0 if _maxes: if keys[0] < _keys[-1][-1]: msg = '{0} not in sort order at index {1}'.format(repr(values[0]), self._len) raise ValueError(msg) if len(_keys[-1]) < self._half: _lists[-1].extend(values[:_load]) _keys[-1].extend(keys[:_load]) _maxes[-1] = _keys[-1][-1] offset = _load len_keys = len(_keys) for idx in range(offset, len(keys), _load): _lists.append(values[idx:(idx + _load)]) _keys.append(keys[idx:(idx + _load)]) _maxes.append(_keys[-1][-1]) _index = self._index if len_keys == len(_keys): len_index = len(_index) if len_index > 0: len_values = len(values) child = len_index - 1 while child: _index[child] += len_values child = (child - 1) >> 1 _index[0] += len_values else: del _index[:] self._len += len(values)
python
def extend(self, values): """ Extend the list by appending all elements from the *values*. Raises a ValueError if the sort order would be violated. """ _maxes, _keys, _lists, _load = self._maxes, self._keys, self._lists, self._load if not isinstance(values, list): values = list(values) keys = list(map(self._key, values)) if any(keys[pos - 1] > keys[pos] for pos in range(1, len(keys))): raise ValueError('given sequence not in sort order') offset = 0 if _maxes: if keys[0] < _keys[-1][-1]: msg = '{0} not in sort order at index {1}'.format(repr(values[0]), self._len) raise ValueError(msg) if len(_keys[-1]) < self._half: _lists[-1].extend(values[:_load]) _keys[-1].extend(keys[:_load]) _maxes[-1] = _keys[-1][-1] offset = _load len_keys = len(_keys) for idx in range(offset, len(keys), _load): _lists.append(values[idx:(idx + _load)]) _keys.append(keys[idx:(idx + _load)]) _maxes.append(_keys[-1][-1]) _index = self._index if len_keys == len(_keys): len_index = len(_index) if len_index > 0: len_values = len(values) child = len_index - 1 while child: _index[child] += len_values child = (child - 1) >> 1 _index[0] += len_values else: del _index[:] self._len += len(values)
Extend the list by appending all elements from the *values*. Raises a ValueError if the sort order would be violated.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/sortedcontainers/sortedlistwithkey.py#L1134-L1184
lrq3000/pyFileFixity
pyFileFixity/lib/sortedcontainers/sortedlistwithkey.py
SortedListWithKey.pop
def pop(self, idx=-1): """ Remove and return item at *idx* (default last). Raises IndexError if list is empty or index is out of range. Negative indices are supported, as for slice indices. """ if (idx < 0 and -idx > self._len) or (idx >= self._len): raise IndexError('pop index out of range') pos, idx = self._pos(idx) val = self._lists[pos][idx] self._delete(pos, idx) return val
python
def pop(self, idx=-1): """ Remove and return item at *idx* (default last). Raises IndexError if list is empty or index is out of range. Negative indices are supported, as for slice indices. """ if (idx < 0 and -idx > self._len) or (idx >= self._len): raise IndexError('pop index out of range') pos, idx = self._pos(idx) val = self._lists[pos][idx] self._delete(pos, idx) return val
Remove and return item at *idx* (default last). Raises IndexError if list is empty or index is out of range. Negative indices are supported, as for slice indices.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/sortedcontainers/sortedlistwithkey.py#L1252-L1265
lrq3000/pyFileFixity
pyFileFixity/lib/sortedcontainers/sorteddict.py
not26
def not26(func): """Function decorator for methods not implemented in Python 2.6.""" @wraps(func) def errfunc(*args, **kwargs): raise NotImplementedError if hexversion < 0x02070000: return errfunc else: return func
python
def not26(func): """Function decorator for methods not implemented in Python 2.6.""" @wraps(func) def errfunc(*args, **kwargs): raise NotImplementedError if hexversion < 0x02070000: return errfunc else: return func
Function decorator for methods not implemented in Python 2.6.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/sortedcontainers/sorteddict.py#L17-L27
lrq3000/pyFileFixity
pyFileFixity/lib/sortedcontainers/sorteddict.py
SortedDict.copy
def copy(self): """Return a shallow copy of the sorted dictionary.""" return self.__class__(self._key, self._load, self._iteritems())
python
def copy(self): """Return a shallow copy of the sorted dictionary.""" return self.__class__(self._key, self._load, self._iteritems())
Return a shallow copy of the sorted dictionary.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/sortedcontainers/sorteddict.py#L200-L202
lrq3000/pyFileFixity
pyFileFixity/lib/sortedcontainers/sorteddict.py
SortedDict.pop
def pop(self, key, default=_NotGiven): """ If *key* is in the dictionary, remove it and return its value, else return *default*. If *default* is not given and *key* is not in the dictionary, a KeyError is raised. """ if key in self: self._list_remove(key) return self._pop(key) else: if default is _NotGiven: raise KeyError(key) else: return default
python
def pop(self, key, default=_NotGiven): """ If *key* is in the dictionary, remove it and return its value, else return *default*. If *default* is not given and *key* is not in the dictionary, a KeyError is raised. """ if key in self: self._list_remove(key) return self._pop(key) else: if default is _NotGiven: raise KeyError(key) else: return default
If *key* is in the dictionary, remove it and return its value, else return *default*. If *default* is not given and *key* is not in the dictionary, a KeyError is raised.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/sortedcontainers/sorteddict.py#L285-L298
lrq3000/pyFileFixity
pyFileFixity/lib/sortedcontainers/sorteddict.py
SortedDict.popitem
def popitem(self, last=True): """ Remove and return a ``(key, value)`` pair from the dictionary. If last=True (default) then remove the *greatest* `key` from the diciontary. Else, remove the *least* key from the dictionary. If the dictionary is empty, calling `popitem` raises a KeyError`. """ if not len(self): raise KeyError('popitem(): dictionary is empty') key = self._list_pop(-1 if last else 0) value = self._pop(key) return (key, value)
python
def popitem(self, last=True): """ Remove and return a ``(key, value)`` pair from the dictionary. If last=True (default) then remove the *greatest* `key` from the diciontary. Else, remove the *least* key from the dictionary. If the dictionary is empty, calling `popitem` raises a KeyError`. """ if not len(self): raise KeyError('popitem(): dictionary is empty') key = self._list_pop(-1 if last else 0) value = self._pop(key) return (key, value)
Remove and return a ``(key, value)`` pair from the dictionary. If last=True (default) then remove the *greatest* `key` from the diciontary. Else, remove the *least* key from the dictionary. If the dictionary is empty, calling `popitem` raises a KeyError`.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/sortedcontainers/sorteddict.py#L300-L315
lrq3000/pyFileFixity
pyFileFixity/lib/sortedcontainers/sorteddict.py
SortedDict.setdefault
def setdefault(self, key, default=None): """ If *key* is in the dictionary, return its value. If not, insert *key* with a value of *default* and return *default*. *default* defaults to ``None``. """ if key in self: return self[key] else: self._setitem(key, default) self._list_add(key) return default
python
def setdefault(self, key, default=None): """ If *key* is in the dictionary, return its value. If not, insert *key* with a value of *default* and return *default*. *default* defaults to ``None``. """ if key in self: return self[key] else: self._setitem(key, default) self._list_add(key) return default
If *key* is in the dictionary, return its value. If not, insert *key* with a value of *default* and return *default*. *default* defaults to ``None``.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/sortedcontainers/sorteddict.py#L317-L328
lrq3000/pyFileFixity
pyFileFixity/lib/sortedcontainers/sorteddict.py
KeysView.index
def index(self, value, start=None, stop=None): """ Return the smallest *k* such that `keysview[k] == value` and `start <= k < end`. Raises `KeyError` if *value* is not present. *stop* defaults to the end of the set. *start* defaults to the beginning. Negative indexes are supported, as for slice indices. """ return self._list.index(value, start, stop)
python
def index(self, value, start=None, stop=None): """ Return the smallest *k* such that `keysview[k] == value` and `start <= k < end`. Raises `KeyError` if *value* is not present. *stop* defaults to the end of the set. *start* defaults to the beginning. Negative indexes are supported, as for slice indices. """ return self._list.index(value, start, stop)
Return the smallest *k* such that `keysview[k] == value` and `start <= k < end`. Raises `KeyError` if *value* is not present. *stop* defaults to the end of the set. *start* defaults to the beginning. Negative indexes are supported, as for slice indices.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/sortedcontainers/sorteddict.py#L462-L469
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/tracker.py
SummaryTracker.create_summary
def create_summary(self): """Return a summary. See also the notes on ignore_self in the class as well as the initializer documentation. """ if not self.ignore_self: res = summary.summarize(muppy.get_objects()) else: # If the user requested the data required to store summaries to be # ignored in the summaries, we need to identify all objects which # are related to each summary stored. # Thus we build a list of all objects used for summary storage as # well as a dictionary which tells us how often an object is # referenced by the summaries. # During this identification process, more objects are referenced, # namely int objects identifying referenced objects as well as the # correspondind count. # For all these objects it will be checked wether they are # referenced from outside the monitor's scope. If not, they will be # subtracted from the snapshot summary, otherwise they are # included (as this indicates that they are relevant to the # application). all_of_them = [] # every single object ref_counter = {} # how often it is referenced; (id(o), o) pairs def store_info(o): all_of_them.append(o) if id(o) in ref_counter: ref_counter[id(o)] += 1 else: ref_counter[id(o)] = 1 # store infos on every single object related to the summaries store_info(self.summaries) for k, v in self.summaries.items(): store_info(k) summary._traverse(v, store_info) # do the summary res = summary.summarize(muppy.get_objects()) # remove ids stored in the ref_counter for _id in ref_counter: # referenced in frame, ref_counter, ref_counter.keys() if len(gc.get_referrers(_id)) == (3): summary._subtract(res, _id) for o in all_of_them: # referenced in frame, summary, all_of_them if len(gc.get_referrers(o)) == (ref_counter[id(o)] + 2): summary._subtract(res, o) return res
python
def create_summary(self): """Return a summary. See also the notes on ignore_self in the class as well as the initializer documentation. """ if not self.ignore_self: res = summary.summarize(muppy.get_objects()) else: # If the user requested the data required to store summaries to be # ignored in the summaries, we need to identify all objects which # are related to each summary stored. # Thus we build a list of all objects used for summary storage as # well as a dictionary which tells us how often an object is # referenced by the summaries. # During this identification process, more objects are referenced, # namely int objects identifying referenced objects as well as the # correspondind count. # For all these objects it will be checked wether they are # referenced from outside the monitor's scope. If not, they will be # subtracted from the snapshot summary, otherwise they are # included (as this indicates that they are relevant to the # application). all_of_them = [] # every single object ref_counter = {} # how often it is referenced; (id(o), o) pairs def store_info(o): all_of_them.append(o) if id(o) in ref_counter: ref_counter[id(o)] += 1 else: ref_counter[id(o)] = 1 # store infos on every single object related to the summaries store_info(self.summaries) for k, v in self.summaries.items(): store_info(k) summary._traverse(v, store_info) # do the summary res = summary.summarize(muppy.get_objects()) # remove ids stored in the ref_counter for _id in ref_counter: # referenced in frame, ref_counter, ref_counter.keys() if len(gc.get_referrers(_id)) == (3): summary._subtract(res, _id) for o in all_of_them: # referenced in frame, summary, all_of_them if len(gc.get_referrers(o)) == (ref_counter[id(o)] + 2): summary._subtract(res, o) return res
Return a summary. See also the notes on ignore_self in the class as well as the initializer documentation.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/tracker.py#L47-L100
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/tracker.py
SummaryTracker.diff
def diff(self, summary1=None, summary2=None): """Compute diff between to summaries. If no summary is provided, the diff from the last to the current summary is used. If summary1 is provided the diff from summary1 to the current summary is used. If summary1 and summary2 are provided, the diff between these two is used. """ res = None if summary2 is None: self.s1 = self.create_summary() if summary1 is None: res = summary.get_diff(self.s0, self.s1) else: res = summary.get_diff(summary1, self.s1) self.s0 = self.s1 else: if summary1 is not None: res = summary.get_diff(summary1, summary2) else: raise ValueError("You cannot provide summary2 without summary1.") return summary._sweep(res)
python
def diff(self, summary1=None, summary2=None): """Compute diff between to summaries. If no summary is provided, the diff from the last to the current summary is used. If summary1 is provided the diff from summary1 to the current summary is used. If summary1 and summary2 are provided, the diff between these two is used. """ res = None if summary2 is None: self.s1 = self.create_summary() if summary1 is None: res = summary.get_diff(self.s0, self.s1) else: res = summary.get_diff(summary1, self.s1) self.s0 = self.s1 else: if summary1 is not None: res = summary.get_diff(summary1, summary2) else: raise ValueError("You cannot provide summary2 without summary1.") return summary._sweep(res)
Compute diff between to summaries. If no summary is provided, the diff from the last to the current summary is used. If summary1 is provided the diff from summary1 to the current summary is used. If summary1 and summary2 are provided, the diff between these two is used.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/tracker.py#L102-L124
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/tracker.py
SummaryTracker.print_diff
def print_diff(self, summary1=None, summary2=None): """Compute diff between to summaries and print it. If no summary is provided, the diff from the last to the current summary is used. If summary1 is provided the diff from summary1 to the current summary is used. If summary1 and summary2 are provided, the diff between these two is used. """ summary.print_(self.diff(summary1=summary1, summary2=summary2))
python
def print_diff(self, summary1=None, summary2=None): """Compute diff between to summaries and print it. If no summary is provided, the diff from the last to the current summary is used. If summary1 is provided the diff from summary1 to the current summary is used. If summary1 and summary2 are provided, the diff between these two is used. """ summary.print_(self.diff(summary1=summary1, summary2=summary2))
Compute diff between to summaries and print it. If no summary is provided, the diff from the last to the current summary is used. If summary1 is provided the diff from summary1 to the current summary is used. If summary1 and summary2 are provided, the diff between these two is used.
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/tracker.py#L126-L134
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/tracker.py
ObjectTracker._get_objects
def _get_objects(self, ignore=[]): """Get all currently existing objects. XXX - ToDo: This method is a copy&paste from muppy.get_objects, but some modifications are applied. Specifically, it allows to ignore objects (which includes the current frame). keyword arguments ignore -- list of objects to ignore """ def remove_ignore(objects, ignore=[]): # remove all objects listed in the ignore list res = [] for o in objects: if not compat.object_in_list(o, ignore): res.append(o) return res tmp = gc.get_objects() ignore.append(inspect.currentframe()) #PYCHOK change ignore ignore.append(self) #PYCHOK change ignore if hasattr(self, 'o0'): ignore.append(self.o0) #PYCHOK change ignore if hasattr(self, 'o1'): ignore.append(self.o1) #PYCHOK change ignore ignore.append(ignore) #PYCHOK change ignore ignore.append(remove_ignore) #PYCHOK change ignore # this implies that referenced objects are also ignored tmp = remove_ignore(tmp, ignore) res = [] for o in tmp: # gc.get_objects returns only container objects, but we also want # the objects referenced by them refs = muppy.get_referents(o) for ref in refs: if not muppy._is_containerobject(ref): # we already got the container objects, now we only add # non-container objects res.append(ref) res.extend(tmp) res = muppy._remove_duplicates(res) if ignore is not None: # repeat to filter out objects which may have been referenced res = remove_ignore(res, ignore) # manual cleanup, see comment above del ignore[:] return res
python
def _get_objects(self, ignore=[]): """Get all currently existing objects. XXX - ToDo: This method is a copy&paste from muppy.get_objects, but some modifications are applied. Specifically, it allows to ignore objects (which includes the current frame). keyword arguments ignore -- list of objects to ignore """ def remove_ignore(objects, ignore=[]): # remove all objects listed in the ignore list res = [] for o in objects: if not compat.object_in_list(o, ignore): res.append(o) return res tmp = gc.get_objects() ignore.append(inspect.currentframe()) #PYCHOK change ignore ignore.append(self) #PYCHOK change ignore if hasattr(self, 'o0'): ignore.append(self.o0) #PYCHOK change ignore if hasattr(self, 'o1'): ignore.append(self.o1) #PYCHOK change ignore ignore.append(ignore) #PYCHOK change ignore ignore.append(remove_ignore) #PYCHOK change ignore # this implies that referenced objects are also ignored tmp = remove_ignore(tmp, ignore) res = [] for o in tmp: # gc.get_objects returns only container objects, but we also want # the objects referenced by them refs = muppy.get_referents(o) for ref in refs: if not muppy._is_containerobject(ref): # we already got the container objects, now we only add # non-container objects res.append(ref) res.extend(tmp) res = muppy._remove_duplicates(res) if ignore is not None: # repeat to filter out objects which may have been referenced res = remove_ignore(res, ignore) # manual cleanup, see comment above del ignore[:] return res
Get all currently existing objects. XXX - ToDo: This method is a copy&paste from muppy.get_objects, but some modifications are applied. Specifically, it allows to ignore objects (which includes the current frame). keyword arguments ignore -- list of objects to ignore
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/tracker.py#L169-L213
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/tracker.py
ObjectTracker.get_diff
def get_diff(self, ignore=[]): """Get the diff to the last time the state of objects was measured. keyword arguments ignore -- list of objects to ignore """ # ignore this and the caller frame ignore.append(inspect.currentframe()) #PYCHOK change ignore self.o1 = self._get_objects(ignore) diff = muppy.get_diff(self.o0, self.o1) self.o0 = self.o1 # manual cleanup, see comment above del ignore[:] #PYCHOK change ignore return diff
python
def get_diff(self, ignore=[]): """Get the diff to the last time the state of objects was measured. keyword arguments ignore -- list of objects to ignore """ # ignore this and the caller frame ignore.append(inspect.currentframe()) #PYCHOK change ignore self.o1 = self._get_objects(ignore) diff = muppy.get_diff(self.o0, self.o1) self.o0 = self.o1 # manual cleanup, see comment above del ignore[:] #PYCHOK change ignore return diff
Get the diff to the last time the state of objects was measured. keyword arguments ignore -- list of objects to ignore
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/tracker.py#L215-L228
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/tracker.py
ObjectTracker.print_diff
def print_diff(self, ignore=[]): """Print the diff to the last time the state of objects was measured. keyword arguments ignore -- list of objects to ignore """ # ignore this and the caller frame ignore.append(inspect.currentframe()) #PYCHOK change ignore diff = self.get_diff(ignore) print("Added objects:") summary.print_(summary.summarize(diff['+'])) print("Removed objects:") summary.print_(summary.summarize(diff['-'])) # manual cleanup, see comment above del ignore[:]
python
def print_diff(self, ignore=[]): """Print the diff to the last time the state of objects was measured. keyword arguments ignore -- list of objects to ignore """ # ignore this and the caller frame ignore.append(inspect.currentframe()) #PYCHOK change ignore diff = self.get_diff(ignore) print("Added objects:") summary.print_(summary.summarize(diff['+'])) print("Removed objects:") summary.print_(summary.summarize(diff['-'])) # manual cleanup, see comment above del ignore[:]
Print the diff to the last time the state of objects was measured. keyword arguments ignore -- list of objects to ignore
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/tracker.py#L230-L244
lrq3000/pyFileFixity
pyFileFixity/lib/distance/distance/_simpledists.py
hamming
def hamming(seq1, seq2, normalized=False): """Compute the Hamming distance between the two sequences `seq1` and `seq2`. The Hamming distance is the number of differing items in two ordered sequences of the same length. If the sequences submitted do not have the same length, an error will be raised. If `normalized` evaluates to `False`, the return value will be an integer between 0 and the length of the sequences provided, edge values included; otherwise, it will be a float between 0 and 1 included, where 0 means equal, and 1 totally different. Normalized hamming distance is computed as: 0.0 if len(seq1) == 0 hamming_dist / len(seq1) otherwise """ L = len(seq1) if L != len(seq2): raise ValueError("expected two strings of the same length") if L == 0: return 0.0 if normalized else 0 # equal dist = sum(c1 != c2 for c1, c2 in zip(seq1, seq2)) if normalized: return dist / float(L) return dist
python
def hamming(seq1, seq2, normalized=False): """Compute the Hamming distance between the two sequences `seq1` and `seq2`. The Hamming distance is the number of differing items in two ordered sequences of the same length. If the sequences submitted do not have the same length, an error will be raised. If `normalized` evaluates to `False`, the return value will be an integer between 0 and the length of the sequences provided, edge values included; otherwise, it will be a float between 0 and 1 included, where 0 means equal, and 1 totally different. Normalized hamming distance is computed as: 0.0 if len(seq1) == 0 hamming_dist / len(seq1) otherwise """ L = len(seq1) if L != len(seq2): raise ValueError("expected two strings of the same length") if L == 0: return 0.0 if normalized else 0 # equal dist = sum(c1 != c2 for c1, c2 in zip(seq1, seq2)) if normalized: return dist / float(L) return dist
Compute the Hamming distance between the two sequences `seq1` and `seq2`. The Hamming distance is the number of differing items in two ordered sequences of the same length. If the sequences submitted do not have the same length, an error will be raised. If `normalized` evaluates to `False`, the return value will be an integer between 0 and the length of the sequences provided, edge values included; otherwise, it will be a float between 0 and 1 included, where 0 means equal, and 1 totally different. Normalized hamming distance is computed as: 0.0 if len(seq1) == 0 hamming_dist / len(seq1) otherwise
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/distance/distance/_simpledists.py#L3-L25