file_path
stringlengths 20
202
| content
stringlengths 9
3.85M
| size
int64 9
3.85M
| lang
stringclasses 9
values | avg_line_length
float64 3.33
100
| max_line_length
int64 8
993
| alphanum_fraction
float64 0.26
0.93
|
---|---|---|---|---|---|---|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/config/__init__.py
|
"""For backward compatibility, expose main functions from
``setuptools.config.setupcfg``
"""
from functools import wraps
from typing import Callable, TypeVar, cast
from ..warnings import SetuptoolsDeprecationWarning
from . import setupcfg
Fn = TypeVar("Fn", bound=Callable)
__all__ = ('parse_configuration', 'read_configuration')
def _deprecation_notice(fn: Fn) -> Fn:
@wraps(fn)
def _wrapper(*args, **kwargs):
SetuptoolsDeprecationWarning.emit(
"Deprecated API usage.",
f"""
As setuptools moves its configuration towards `pyproject.toml`,
`{__name__}.{fn.__name__}` became deprecated.
For the time being, you can use the `{setupcfg.__name__}` module
to access a backward compatible API, but this module is provisional
and might be removed in the future.
To read project metadata, consider using
``build.util.project_wheel_metadata`` (https://pypi.org/project/build/).
For simple scenarios, you can also try parsing the file directly
with the help of ``configparser``.
""",
# due_date not defined yet, because the community still heavily relies on it
# Warning introduced in 24 Mar 2022
)
return fn(*args, **kwargs)
return cast(Fn, _wrapper)
read_configuration = _deprecation_notice(setupcfg.read_configuration)
parse_configuration = _deprecation_notice(setupcfg.parse_configuration)
| 1,498 |
Python
| 33.860464 | 88 | 0.653538 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/config/expand.py
|
"""Utility functions to expand configuration directives or special values
(such glob patterns).
We can split the process of interpreting configuration files into 2 steps:
1. The parsing the file contents from strings to value objects
that can be understand by Python (for example a string with a comma
separated list of keywords into an actual Python list of strings).
2. The expansion (or post-processing) of these values according to the
semantics ``setuptools`` assign to them (for example a configuration field
with the ``file:`` directive should be expanded from a list of file paths to
a single string with the contents of those files concatenated)
This module focus on the second step, and therefore allow sharing the expansion
functions among several configuration file formats.
**PRIVATE MODULE**: API reserved for setuptools internal usage only.
"""
import ast
import importlib
import io
import os
import pathlib
import sys
from glob import iglob
from configparser import ConfigParser
from importlib.machinery import ModuleSpec
from itertools import chain
from typing import (
TYPE_CHECKING,
Callable,
Dict,
Iterable,
Iterator,
List,
Mapping,
Optional,
Tuple,
TypeVar,
Union,
cast
)
from pathlib import Path
from types import ModuleType
from distutils.errors import DistutilsOptionError
from .._path import same_path as _same_path
from ..warnings import SetuptoolsWarning
if TYPE_CHECKING:
from setuptools.dist import Distribution # noqa
from setuptools.discovery import ConfigDiscovery # noqa
from distutils.dist import DistributionMetadata # noqa
chain_iter = chain.from_iterable
_Path = Union[str, os.PathLike]
_K = TypeVar("_K")
_V = TypeVar("_V", covariant=True)
class StaticModule:
"""Proxy to a module object that avoids executing arbitrary code."""
def __init__(self, name: str, spec: ModuleSpec):
module = ast.parse(pathlib.Path(spec.origin).read_bytes())
vars(self).update(locals())
del self.self
def _find_assignments(self) -> Iterator[Tuple[ast.AST, ast.AST]]:
for statement in self.module.body:
if isinstance(statement, ast.Assign):
yield from ((target, statement.value) for target in statement.targets)
elif isinstance(statement, ast.AnnAssign) and statement.value:
yield (statement.target, statement.value)
def __getattr__(self, attr):
"""Attempt to load an attribute "statically", via :func:`ast.literal_eval`."""
try:
return next(
ast.literal_eval(value)
for target, value in self._find_assignments()
if isinstance(target, ast.Name) and target.id == attr
)
except Exception as e:
raise AttributeError(f"{self.name} has no attribute {attr}") from e
def glob_relative(
patterns: Iterable[str], root_dir: Optional[_Path] = None
) -> List[str]:
"""Expand the list of glob patterns, but preserving relative paths.
:param list[str] patterns: List of glob patterns
:param str root_dir: Path to which globs should be relative
(current directory by default)
:rtype: list
"""
glob_characters = {'*', '?', '[', ']', '{', '}'}
expanded_values = []
root_dir = root_dir or os.getcwd()
for value in patterns:
# Has globby characters?
if any(char in value for char in glob_characters):
# then expand the glob pattern while keeping paths *relative*:
glob_path = os.path.abspath(os.path.join(root_dir, value))
expanded_values.extend(sorted(
os.path.relpath(path, root_dir).replace(os.sep, "/")
for path in iglob(glob_path, recursive=True)))
else:
# take the value as-is
path = os.path.relpath(value, root_dir).replace(os.sep, "/")
expanded_values.append(path)
return expanded_values
def read_files(filepaths: Union[str, bytes, Iterable[_Path]], root_dir=None) -> str:
"""Return the content of the files concatenated using ``\n`` as str
This function is sandboxed and won't reach anything outside ``root_dir``
(By default ``root_dir`` is the current directory).
"""
from setuptools.extern.more_itertools import always_iterable
root_dir = os.path.abspath(root_dir or os.getcwd())
_filepaths = (os.path.join(root_dir, path) for path in always_iterable(filepaths))
return '\n'.join(
_read_file(path)
for path in _filter_existing_files(_filepaths)
if _assert_local(path, root_dir)
)
def _filter_existing_files(filepaths: Iterable[_Path]) -> Iterator[_Path]:
for path in filepaths:
if os.path.isfile(path):
yield path
else:
SetuptoolsWarning.emit(f"File {path!r} cannot be found")
def _read_file(filepath: Union[bytes, _Path]) -> str:
with io.open(filepath, encoding='utf-8') as f:
return f.read()
def _assert_local(filepath: _Path, root_dir: str):
if Path(os.path.abspath(root_dir)) not in Path(os.path.abspath(filepath)).parents:
msg = f"Cannot access {filepath!r} (or anything outside {root_dir!r})"
raise DistutilsOptionError(msg)
return True
def read_attr(
attr_desc: str,
package_dir: Optional[Mapping[str, str]] = None,
root_dir: Optional[_Path] = None
):
"""Reads the value of an attribute from a module.
This function will try to read the attributed statically first
(via :func:`ast.literal_eval`), and only evaluate the module if it fails.
Examples:
read_attr("package.attr")
read_attr("package.module.attr")
:param str attr_desc: Dot-separated string describing how to reach the
attribute (see examples above)
:param dict[str, str] package_dir: Mapping of package names to their
location in disk (represented by paths relative to ``root_dir``).
:param str root_dir: Path to directory containing all the packages in
``package_dir`` (current directory by default).
:rtype: str
"""
root_dir = root_dir or os.getcwd()
attrs_path = attr_desc.strip().split('.')
attr_name = attrs_path.pop()
module_name = '.'.join(attrs_path)
module_name = module_name or '__init__'
_parent_path, path, module_name = _find_module(module_name, package_dir, root_dir)
spec = _find_spec(module_name, path)
try:
return getattr(StaticModule(module_name, spec), attr_name)
except Exception:
# fallback to evaluate module
module = _load_spec(spec, module_name)
return getattr(module, attr_name)
def _find_spec(module_name: str, module_path: Optional[_Path]) -> ModuleSpec:
spec = importlib.util.spec_from_file_location(module_name, module_path)
spec = spec or importlib.util.find_spec(module_name)
if spec is None:
raise ModuleNotFoundError(module_name)
return spec
def _load_spec(spec: ModuleSpec, module_name: str) -> ModuleType:
name = getattr(spec, "__name__", module_name)
if name in sys.modules:
return sys.modules[name]
module = importlib.util.module_from_spec(spec)
sys.modules[name] = module # cache (it also ensures `==` works on loaded items)
spec.loader.exec_module(module) # type: ignore
return module
def _find_module(
module_name: str, package_dir: Optional[Mapping[str, str]], root_dir: _Path
) -> Tuple[_Path, Optional[str], str]:
"""Given a module (that could normally be imported by ``module_name``
after the build is complete), find the path to the parent directory where
it is contained and the canonical name that could be used to import it
considering the ``package_dir`` in the build configuration and ``root_dir``
"""
parent_path = root_dir
module_parts = module_name.split('.')
if package_dir:
if module_parts[0] in package_dir:
# A custom path was specified for the module we want to import
custom_path = package_dir[module_parts[0]]
parts = custom_path.rsplit('/', 1)
if len(parts) > 1:
parent_path = os.path.join(root_dir, parts[0])
parent_module = parts[1]
else:
parent_module = custom_path
module_name = ".".join([parent_module, *module_parts[1:]])
elif '' in package_dir:
# A custom parent directory was specified for all root modules
parent_path = os.path.join(root_dir, package_dir[''])
path_start = os.path.join(parent_path, *module_name.split("."))
candidates = chain(
(f"{path_start}.py", os.path.join(path_start, "__init__.py")),
iglob(f"{path_start}.*")
)
module_path = next((x for x in candidates if os.path.isfile(x)), None)
return parent_path, module_path, module_name
def resolve_class(
qualified_class_name: str,
package_dir: Optional[Mapping[str, str]] = None,
root_dir: Optional[_Path] = None
) -> Callable:
"""Given a qualified class name, return the associated class object"""
root_dir = root_dir or os.getcwd()
idx = qualified_class_name.rfind('.')
class_name = qualified_class_name[idx + 1 :]
pkg_name = qualified_class_name[:idx]
_parent_path, path, module_name = _find_module(pkg_name, package_dir, root_dir)
module = _load_spec(_find_spec(module_name, path), module_name)
return getattr(module, class_name)
def cmdclass(
values: Dict[str, str],
package_dir: Optional[Mapping[str, str]] = None,
root_dir: Optional[_Path] = None
) -> Dict[str, Callable]:
"""Given a dictionary mapping command names to strings for qualified class
names, apply :func:`resolve_class` to the dict values.
"""
return {k: resolve_class(v, package_dir, root_dir) for k, v in values.items()}
def find_packages(
*,
namespaces=True,
fill_package_dir: Optional[Dict[str, str]] = None,
root_dir: Optional[_Path] = None,
**kwargs
) -> List[str]:
"""Works similarly to :func:`setuptools.find_packages`, but with all
arguments given as keyword arguments. Moreover, ``where`` can be given
as a list (the results will be simply concatenated).
When the additional keyword argument ``namespaces`` is ``True``, it will
behave like :func:`setuptools.find_namespace_packages`` (i.e. include
implicit namespaces as per :pep:`420`).
The ``where`` argument will be considered relative to ``root_dir`` (or the current
working directory when ``root_dir`` is not given).
If the ``fill_package_dir`` argument is passed, this function will consider it as a
similar data structure to the ``package_dir`` configuration parameter add fill-in
any missing package location.
:rtype: list
"""
from setuptools.discovery import construct_package_dir
from setuptools.extern.more_itertools import unique_everseen, always_iterable
if namespaces:
from setuptools.discovery import PEP420PackageFinder as PackageFinder
else:
from setuptools.discovery import PackageFinder # type: ignore
root_dir = root_dir or os.curdir
where = kwargs.pop('where', ['.'])
packages: List[str] = []
fill_package_dir = {} if fill_package_dir is None else fill_package_dir
search = list(unique_everseen(always_iterable(where)))
if len(search) == 1 and all(not _same_path(search[0], x) for x in (".", root_dir)):
fill_package_dir.setdefault("", search[0])
for path in search:
package_path = _nest_path(root_dir, path)
pkgs = PackageFinder.find(package_path, **kwargs)
packages.extend(pkgs)
if pkgs and not (
fill_package_dir.get("") == path
or os.path.samefile(package_path, root_dir)
):
fill_package_dir.update(construct_package_dir(pkgs, path))
return packages
def _nest_path(parent: _Path, path: _Path) -> str:
path = parent if path in {".", ""} else os.path.join(parent, path)
return os.path.normpath(path)
def version(value: Union[Callable, Iterable[Union[str, int]], str]) -> str:
"""When getting the version directly from an attribute,
it should be normalised to string.
"""
if callable(value):
value = value()
value = cast(Iterable[Union[str, int]], value)
if not isinstance(value, str):
if hasattr(value, '__iter__'):
value = '.'.join(map(str, value))
else:
value = '%s' % value
return value
def canonic_package_data(package_data: dict) -> dict:
if "*" in package_data:
package_data[""] = package_data.pop("*")
return package_data
def canonic_data_files(
data_files: Union[list, dict], root_dir: Optional[_Path] = None
) -> List[Tuple[str, List[str]]]:
"""For compatibility with ``setup.py``, ``data_files`` should be a list
of pairs instead of a dict.
This function also expands glob patterns.
"""
if isinstance(data_files, list):
return data_files
return [
(dest, glob_relative(patterns, root_dir))
for dest, patterns in data_files.items()
]
def entry_points(text: str, text_source="entry-points") -> Dict[str, dict]:
"""Given the contents of entry-points file,
process it into a 2-level dictionary (``dict[str, dict[str, str]]``).
The first level keys are entry-point groups, the second level keys are
entry-point names, and the second level values are references to objects
(that correspond to the entry-point value).
"""
parser = ConfigParser(default_section=None, delimiters=("=",)) # type: ignore
parser.optionxform = str # case sensitive
parser.read_string(text, text_source)
groups = {k: dict(v.items()) for k, v in parser.items()}
groups.pop(parser.default_section, None)
return groups
class EnsurePackagesDiscovered:
"""Some expand functions require all the packages to already be discovered before
they run, e.g. :func:`read_attr`, :func:`resolve_class`, :func:`cmdclass`.
Therefore in some cases we will need to run autodiscovery during the evaluation of
the configuration. However, it is better to postpone calling package discovery as
much as possible, because some parameters can influence it (e.g. ``package_dir``),
and those might not have been processed yet.
"""
def __init__(self, distribution: "Distribution"):
self._dist = distribution
self._called = False
def __call__(self):
"""Trigger the automatic package discovery, if it is still necessary."""
if not self._called:
self._called = True
self._dist.set_defaults(name=False) # Skip name, we can still be parsing
def __enter__(self):
return self
def __exit__(self, _exc_type, _exc_value, _traceback):
if self._called:
self._dist.set_defaults.analyse_name() # Now we can set a default name
def _get_package_dir(self) -> Mapping[str, str]:
self()
pkg_dir = self._dist.package_dir
return {} if pkg_dir is None else pkg_dir
@property
def package_dir(self) -> Mapping[str, str]:
"""Proxy to ``package_dir`` that may trigger auto-discovery when used."""
return LazyMappingProxy(self._get_package_dir)
class LazyMappingProxy(Mapping[_K, _V]):
"""Mapping proxy that delays resolving the target object, until really needed.
>>> def obtain_mapping():
... print("Running expensive function!")
... return {"key": "value", "other key": "other value"}
>>> mapping = LazyMappingProxy(obtain_mapping)
>>> mapping["key"]
Running expensive function!
'value'
>>> mapping["other key"]
'other value'
"""
def __init__(self, obtain_mapping_value: Callable[[], Mapping[_K, _V]]):
self._obtain = obtain_mapping_value
self._value: Optional[Mapping[_K, _V]] = None
def _target(self) -> Mapping[_K, _V]:
if self._value is None:
self._value = self._obtain()
return self._value
def __getitem__(self, key: _K) -> _V:
return self._target()[key]
def __len__(self) -> int:
return len(self._target())
def __iter__(self) -> Iterator[_K]:
return iter(self._target())
| 16,353 |
Python
| 34.321814 | 87 | 0.647588 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/config/setupcfg.py
|
"""
Load setuptools configuration from ``setup.cfg`` files.
**API will be made private in the future**
To read project metadata, consider using
``build.util.project_wheel_metadata`` (https://pypi.org/project/build/).
For simple scenarios, you can also try parsing the file directly
with the help of ``configparser``.
"""
import contextlib
import functools
import os
from collections import defaultdict
from functools import partial
from functools import wraps
from typing import (
TYPE_CHECKING,
Callable,
Any,
Dict,
Generic,
Iterable,
List,
Optional,
Set,
Tuple,
TypeVar,
Union,
)
from ..errors import FileError, OptionError
from ..extern.packaging.markers import default_environment as marker_env
from ..extern.packaging.requirements import InvalidRequirement, Requirement
from ..extern.packaging.specifiers import SpecifierSet
from ..extern.packaging.version import InvalidVersion, Version
from ..warnings import SetuptoolsDeprecationWarning
from . import expand
if TYPE_CHECKING:
from distutils.dist import DistributionMetadata # noqa
from setuptools.dist import Distribution # noqa
_Path = Union[str, os.PathLike]
SingleCommandOptions = Dict["str", Tuple["str", Any]]
"""Dict that associate the name of the options of a particular command to a
tuple. The first element of the tuple indicates the origin of the option value
(e.g. the name of the configuration file where it was read from),
while the second element of the tuple is the option value itself
"""
AllCommandOptions = Dict["str", SingleCommandOptions] # cmd name => its options
Target = TypeVar("Target", bound=Union["Distribution", "DistributionMetadata"])
def read_configuration(
filepath: _Path, find_others=False, ignore_option_errors=False
) -> dict:
"""Read given configuration file and returns options from it as a dict.
:param str|unicode filepath: Path to configuration file
to get options from.
:param bool find_others: Whether to search for other configuration files
which could be on in various places.
:param bool ignore_option_errors: Whether to silently ignore
options, values of which could not be resolved (e.g. due to exceptions
in directives such as file:, attr:, etc.).
If False exceptions are propagated as expected.
:rtype: dict
"""
from setuptools.dist import Distribution
dist = Distribution()
filenames = dist.find_config_files() if find_others else []
handlers = _apply(dist, filepath, filenames, ignore_option_errors)
return configuration_to_dict(handlers)
def apply_configuration(dist: "Distribution", filepath: _Path) -> "Distribution":
"""Apply the configuration from a ``setup.cfg`` file into an existing
distribution object.
"""
_apply(dist, filepath)
dist._finalize_requires()
return dist
def _apply(
dist: "Distribution",
filepath: _Path,
other_files: Iterable[_Path] = (),
ignore_option_errors: bool = False,
) -> Tuple["ConfigHandler", ...]:
"""Read configuration from ``filepath`` and applies to the ``dist`` object."""
from setuptools.dist import _Distribution
filepath = os.path.abspath(filepath)
if not os.path.isfile(filepath):
raise FileError(f'Configuration file {filepath} does not exist.')
current_directory = os.getcwd()
os.chdir(os.path.dirname(filepath))
filenames = [*other_files, filepath]
try:
_Distribution.parse_config_files(dist, filenames=filenames)
handlers = parse_configuration(
dist, dist.command_options, ignore_option_errors=ignore_option_errors
)
dist._finalize_license_files()
finally:
os.chdir(current_directory)
return handlers
def _get_option(target_obj: Target, key: str):
"""
Given a target object and option key, get that option from
the target object, either through a get_{key} method or
from an attribute directly.
"""
getter_name = f'get_{key}'
by_attribute = functools.partial(getattr, target_obj, key)
getter = getattr(target_obj, getter_name, by_attribute)
return getter()
def configuration_to_dict(handlers: Tuple["ConfigHandler", ...]) -> dict:
"""Returns configuration data gathered by given handlers as a dict.
:param list[ConfigHandler] handlers: Handlers list,
usually from parse_configuration()
:rtype: dict
"""
config_dict: dict = defaultdict(dict)
for handler in handlers:
for option in handler.set_options:
value = _get_option(handler.target_obj, option)
config_dict[handler.section_prefix][option] = value
return config_dict
def parse_configuration(
distribution: "Distribution",
command_options: AllCommandOptions,
ignore_option_errors=False,
) -> Tuple["ConfigMetadataHandler", "ConfigOptionsHandler"]:
"""Performs additional parsing of configuration options
for a distribution.
Returns a list of used option handlers.
:param Distribution distribution:
:param dict command_options:
:param bool ignore_option_errors: Whether to silently ignore
options, values of which could not be resolved (e.g. due to exceptions
in directives such as file:, attr:, etc.).
If False exceptions are propagated as expected.
:rtype: list
"""
with expand.EnsurePackagesDiscovered(distribution) as ensure_discovered:
options = ConfigOptionsHandler(
distribution,
command_options,
ignore_option_errors,
ensure_discovered,
)
options.parse()
if not distribution.package_dir:
distribution.package_dir = options.package_dir # Filled by `find_packages`
meta = ConfigMetadataHandler(
distribution.metadata,
command_options,
ignore_option_errors,
ensure_discovered,
distribution.package_dir,
distribution.src_root,
)
meta.parse()
distribution._referenced_files.update(
options._referenced_files, meta._referenced_files
)
return meta, options
def _warn_accidental_env_marker_misconfig(label: str, orig_value: str, parsed: list):
"""Because users sometimes misinterpret this configuration:
[options.extras_require]
foo = bar;python_version<"4"
It looks like one requirement with an environment marker
but because there is no newline, it's parsed as two requirements
with a semicolon as separator.
Therefore, if:
* input string does not contain a newline AND
* parsed result contains two requirements AND
* parsing of the two parts from the result ("<first>;<second>")
leads in a valid Requirement with a valid marker
a UserWarning is shown to inform the user about the possible problem.
"""
if "\n" in orig_value or len(parsed) != 2:
return
markers = marker_env().keys()
try:
req = Requirement(parsed[1])
if req.name in markers:
_AmbiguousMarker.emit(field=label, req=parsed[1])
except InvalidRequirement as ex:
if any(parsed[1].startswith(marker) for marker in markers):
msg = _AmbiguousMarker.message(field=label, req=parsed[1])
raise InvalidRequirement(msg) from ex
class ConfigHandler(Generic[Target]):
"""Handles metadata supplied in configuration files."""
section_prefix: str
"""Prefix for config sections handled by this handler.
Must be provided by class heirs.
"""
aliases: Dict[str, str] = {}
"""Options aliases.
For compatibility with various packages. E.g.: d2to1 and pbr.
Note: `-` in keys is replaced with `_` by config parser.
"""
def __init__(
self,
target_obj: Target,
options: AllCommandOptions,
ignore_option_errors,
ensure_discovered: expand.EnsurePackagesDiscovered,
):
self.ignore_option_errors = ignore_option_errors
self.target_obj = target_obj
self.sections = dict(self._section_options(options))
self.set_options: List[str] = []
self.ensure_discovered = ensure_discovered
self._referenced_files: Set[str] = set()
"""After parsing configurations, this property will enumerate
all files referenced by the "file:" directive. Private API for setuptools only.
"""
@classmethod
def _section_options(cls, options: AllCommandOptions):
for full_name, value in options.items():
pre, sep, name = full_name.partition(cls.section_prefix)
if pre:
continue
yield name.lstrip('.'), value
@property
def parsers(self):
"""Metadata item name to parser function mapping."""
raise NotImplementedError(
'%s must provide .parsers property' % self.__class__.__name__
)
def __setitem__(self, option_name, value):
target_obj = self.target_obj
# Translate alias into real name.
option_name = self.aliases.get(option_name, option_name)
try:
current_value = getattr(target_obj, option_name)
except AttributeError:
raise KeyError(option_name)
if current_value:
# Already inhabited. Skipping.
return
try:
parsed = self.parsers.get(option_name, lambda x: x)(value)
except (Exception,) * self.ignore_option_errors:
return
simple_setter = functools.partial(target_obj.__setattr__, option_name)
setter = getattr(target_obj, 'set_%s' % option_name, simple_setter)
setter(parsed)
self.set_options.append(option_name)
@classmethod
def _parse_list(cls, value, separator=','):
"""Represents value as a list.
Value is split either by separator (defaults to comma) or by lines.
:param value:
:param separator: List items separator character.
:rtype: list
"""
if isinstance(value, list): # _get_parser_compound case
return value
if '\n' in value:
value = value.splitlines()
else:
value = value.split(separator)
return [chunk.strip() for chunk in value if chunk.strip()]
@classmethod
def _parse_dict(cls, value):
"""Represents value as a dict.
:param value:
:rtype: dict
"""
separator = '='
result = {}
for line in cls._parse_list(value):
key, sep, val = line.partition(separator)
if sep != separator:
raise OptionError(f"Unable to parse option value to dict: {value}")
result[key.strip()] = val.strip()
return result
@classmethod
def _parse_bool(cls, value):
"""Represents value as boolean.
:param value:
:rtype: bool
"""
value = value.lower()
return value in ('1', 'true', 'yes')
@classmethod
def _exclude_files_parser(cls, key):
"""Returns a parser function to make sure field inputs
are not files.
Parses a value after getting the key so error messages are
more informative.
:param key:
:rtype: callable
"""
def parser(value):
exclude_directive = 'file:'
if value.startswith(exclude_directive):
raise ValueError(
'Only strings are accepted for the {0} field, '
'files are not accepted'.format(key)
)
return value
return parser
def _parse_file(self, value, root_dir: _Path):
"""Represents value as a string, allowing including text
from nearest files using `file:` directive.
Directive is sandboxed and won't reach anything outside
directory with setup.py.
Examples:
file: README.rst, CHANGELOG.md, src/file.txt
:param str value:
:rtype: str
"""
include_directive = 'file:'
if not isinstance(value, str):
return value
if not value.startswith(include_directive):
return value
spec = value[len(include_directive) :]
filepaths = [path.strip() for path in spec.split(',')]
self._referenced_files.update(filepaths)
return expand.read_files(filepaths, root_dir)
def _parse_attr(self, value, package_dir, root_dir: _Path):
"""Represents value as a module attribute.
Examples:
attr: package.attr
attr: package.module.attr
:param str value:
:rtype: str
"""
attr_directive = 'attr:'
if not value.startswith(attr_directive):
return value
attr_desc = value.replace(attr_directive, '')
# Make sure package_dir is populated correctly, so `attr:` directives can work
package_dir.update(self.ensure_discovered.package_dir)
return expand.read_attr(attr_desc, package_dir, root_dir)
@classmethod
def _get_parser_compound(cls, *parse_methods):
"""Returns parser function to represents value as a list.
Parses a value applying given methods one after another.
:param parse_methods:
:rtype: callable
"""
def parse(value):
parsed = value
for method in parse_methods:
parsed = method(parsed)
return parsed
return parse
@classmethod
def _parse_section_to_dict_with_key(cls, section_options, values_parser):
"""Parses section options into a dictionary.
Applies a given parser to each option in a section.
:param dict section_options:
:param callable values_parser: function with 2 args corresponding to key, value
:rtype: dict
"""
value = {}
for key, (_, val) in section_options.items():
value[key] = values_parser(key, val)
return value
@classmethod
def _parse_section_to_dict(cls, section_options, values_parser=None):
"""Parses section options into a dictionary.
Optionally applies a given parser to each value.
:param dict section_options:
:param callable values_parser: function with 1 arg corresponding to option value
:rtype: dict
"""
parser = (lambda _, v: values_parser(v)) if values_parser else (lambda _, v: v)
return cls._parse_section_to_dict_with_key(section_options, parser)
def parse_section(self, section_options):
"""Parses configuration file section.
:param dict section_options:
"""
for name, (_, value) in section_options.items():
with contextlib.suppress(KeyError):
# Keep silent for a new option may appear anytime.
self[name] = value
def parse(self):
"""Parses configuration file items from one
or more related sections.
"""
for section_name, section_options in self.sections.items():
method_postfix = ''
if section_name: # [section.option] variant
method_postfix = '_%s' % section_name
section_parser_method: Optional[Callable] = getattr(
self,
# Dots in section names are translated into dunderscores.
('parse_section%s' % method_postfix).replace('.', '__'),
None,
)
if section_parser_method is None:
raise OptionError(
"Unsupported distribution option section: "
f"[{self.section_prefix}.{section_name}]"
)
section_parser_method(section_options)
def _deprecated_config_handler(self, func, msg, **kw):
"""this function will wrap around parameters that are deprecated
:param msg: deprecation message
:param func: function to be wrapped around
"""
@wraps(func)
def config_handler(*args, **kwargs):
kw.setdefault("stacklevel", 2)
_DeprecatedConfig.emit("Deprecated config in `setup.cfg`", msg, **kw)
return func(*args, **kwargs)
return config_handler
class ConfigMetadataHandler(ConfigHandler["DistributionMetadata"]):
section_prefix = 'metadata'
aliases = {
'home_page': 'url',
'summary': 'description',
'classifier': 'classifiers',
'platform': 'platforms',
}
strict_mode = False
"""We need to keep it loose, to be partially compatible with
`pbr` and `d2to1` packages which also uses `metadata` section.
"""
def __init__(
self,
target_obj: "DistributionMetadata",
options: AllCommandOptions,
ignore_option_errors: bool,
ensure_discovered: expand.EnsurePackagesDiscovered,
package_dir: Optional[dict] = None,
root_dir: _Path = os.curdir,
):
super().__init__(target_obj, options, ignore_option_errors, ensure_discovered)
self.package_dir = package_dir
self.root_dir = root_dir
@property
def parsers(self):
"""Metadata item name to parser function mapping."""
parse_list = self._parse_list
parse_file = partial(self._parse_file, root_dir=self.root_dir)
parse_dict = self._parse_dict
exclude_files_parser = self._exclude_files_parser
return {
'platforms': parse_list,
'keywords': parse_list,
'provides': parse_list,
'requires': self._deprecated_config_handler(
parse_list,
"The requires parameter is deprecated, please use "
"install_requires for runtime dependencies.",
due_date=(2023, 10, 30),
# Warning introduced in 27 Oct 2018
),
'obsoletes': parse_list,
'classifiers': self._get_parser_compound(parse_file, parse_list),
'license': exclude_files_parser('license'),
'license_file': self._deprecated_config_handler(
exclude_files_parser('license_file'),
"The license_file parameter is deprecated, "
"use license_files instead.",
due_date=(2023, 10, 30),
# Warning introduced in 23 May 2021
),
'license_files': parse_list,
'description': parse_file,
'long_description': parse_file,
'version': self._parse_version,
'project_urls': parse_dict,
}
def _parse_version(self, value):
"""Parses `version` option value.
:param value:
:rtype: str
"""
version = self._parse_file(value, self.root_dir)
if version != value:
version = version.strip()
# Be strict about versions loaded from file because it's easy to
# accidentally include newlines and other unintended content
try:
Version(version)
except InvalidVersion:
raise OptionError(
f'Version loaded from {value} does not '
f'comply with PEP 440: {version}'
)
return version
return expand.version(self._parse_attr(value, self.package_dir, self.root_dir))
class ConfigOptionsHandler(ConfigHandler["Distribution"]):
section_prefix = 'options'
def __init__(
self,
target_obj: "Distribution",
options: AllCommandOptions,
ignore_option_errors: bool,
ensure_discovered: expand.EnsurePackagesDiscovered,
):
super().__init__(target_obj, options, ignore_option_errors, ensure_discovered)
self.root_dir = target_obj.src_root
self.package_dir: Dict[str, str] = {} # To be filled by `find_packages`
@classmethod
def _parse_list_semicolon(cls, value):
return cls._parse_list(value, separator=';')
def _parse_file_in_root(self, value):
return self._parse_file(value, root_dir=self.root_dir)
def _parse_requirements_list(self, label: str, value: str):
# Parse a requirements list, either by reading in a `file:`, or a list.
parsed = self._parse_list_semicolon(self._parse_file_in_root(value))
_warn_accidental_env_marker_misconfig(label, value, parsed)
# Filter it to only include lines that are not comments. `parse_list`
# will have stripped each line and filtered out empties.
return [line for line in parsed if not line.startswith("#")]
@property
def parsers(self):
"""Metadata item name to parser function mapping."""
parse_list = self._parse_list
parse_bool = self._parse_bool
parse_dict = self._parse_dict
parse_cmdclass = self._parse_cmdclass
return {
'zip_safe': parse_bool,
'include_package_data': parse_bool,
'package_dir': parse_dict,
'scripts': parse_list,
'eager_resources': parse_list,
'dependency_links': parse_list,
'namespace_packages': self._deprecated_config_handler(
parse_list,
"The namespace_packages parameter is deprecated, "
"consider using implicit namespaces instead (PEP 420).",
# TODO: define due date, see setuptools.dist:check_nsp.
),
'install_requires': partial(
self._parse_requirements_list, "install_requires"
),
'setup_requires': self._parse_list_semicolon,
'tests_require': self._parse_list_semicolon,
'packages': self._parse_packages,
'entry_points': self._parse_file_in_root,
'py_modules': parse_list,
'python_requires': SpecifierSet,
'cmdclass': parse_cmdclass,
}
def _parse_cmdclass(self, value):
package_dir = self.ensure_discovered.package_dir
return expand.cmdclass(self._parse_dict(value), package_dir, self.root_dir)
def _parse_packages(self, value):
"""Parses `packages` option value.
:param value:
:rtype: list
"""
find_directives = ['find:', 'find_namespace:']
trimmed_value = value.strip()
if trimmed_value not in find_directives:
return self._parse_list(value)
# Read function arguments from a dedicated section.
find_kwargs = self.parse_section_packages__find(
self.sections.get('packages.find', {})
)
find_kwargs.update(
namespaces=(trimmed_value == find_directives[1]),
root_dir=self.root_dir,
fill_package_dir=self.package_dir,
)
return expand.find_packages(**find_kwargs)
def parse_section_packages__find(self, section_options):
"""Parses `packages.find` configuration file section.
To be used in conjunction with _parse_packages().
:param dict section_options:
"""
section_data = self._parse_section_to_dict(section_options, self._parse_list)
valid_keys = ['where', 'include', 'exclude']
find_kwargs = dict(
[(k, v) for k, v in section_data.items() if k in valid_keys and v]
)
where = find_kwargs.get('where')
if where is not None:
find_kwargs['where'] = where[0] # cast list to single val
return find_kwargs
def parse_section_entry_points(self, section_options):
"""Parses `entry_points` configuration file section.
:param dict section_options:
"""
parsed = self._parse_section_to_dict(section_options, self._parse_list)
self['entry_points'] = parsed
def _parse_package_data(self, section_options):
package_data = self._parse_section_to_dict(section_options, self._parse_list)
return expand.canonic_package_data(package_data)
def parse_section_package_data(self, section_options):
"""Parses `package_data` configuration file section.
:param dict section_options:
"""
self['package_data'] = self._parse_package_data(section_options)
def parse_section_exclude_package_data(self, section_options):
"""Parses `exclude_package_data` configuration file section.
:param dict section_options:
"""
self['exclude_package_data'] = self._parse_package_data(section_options)
def parse_section_extras_require(self, section_options):
"""Parses `extras_require` configuration file section.
:param dict section_options:
"""
parsed = self._parse_section_to_dict_with_key(
section_options,
lambda k, v: self._parse_requirements_list(f"extras_require[{k}]", v),
)
self['extras_require'] = parsed
def parse_section_data_files(self, section_options):
"""Parses `data_files` configuration file section.
:param dict section_options:
"""
parsed = self._parse_section_to_dict(section_options, self._parse_list)
self['data_files'] = expand.canonic_data_files(parsed, self.root_dir)
class _AmbiguousMarker(SetuptoolsDeprecationWarning):
_SUMMARY = "Ambiguous requirement marker."
_DETAILS = """
One of the parsed requirements in `{field}` looks like a valid environment marker:
{req!r}
Please make sure that the configuration file is correct.
You can use dangling lines to avoid this problem.
"""
_SEE_DOCS = "userguide/declarative_config.html#opt-2"
# TODO: should we include due_date here? Initially introduced in 6 Aug 2022.
# Does this make sense with latest version of packaging?
@classmethod
def message(cls, **kw):
docs = f"https://setuptools.pypa.io/en/latest/{cls._SEE_DOCS}"
return cls._format(cls._SUMMARY, cls._DETAILS, see_url=docs, format_args=kw)
class _DeprecatedConfig(SetuptoolsDeprecationWarning):
_SEE_DOCS = "userguide/declarative_config.html"
| 26,184 |
Python
| 32.14557 | 88 | 0.61824 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/config/_apply_pyprojecttoml.py
|
"""Translation layer between pyproject config and setuptools distribution and
metadata objects.
The distribution and metadata objects are modeled after (an old version of)
core metadata, therefore configs in the format specified for ``pyproject.toml``
need to be processed before being applied.
**PRIVATE MODULE**: API reserved for setuptools internal usage only.
"""
import logging
import os
from collections.abc import Mapping
from email.headerregistry import Address
from functools import partial, reduce
from itertools import chain
from types import MappingProxyType
from typing import (TYPE_CHECKING, Any, Callable, Dict, List, Optional, Set, Tuple,
Type, Union, cast)
from ..warnings import SetuptoolsWarning, SetuptoolsDeprecationWarning
if TYPE_CHECKING:
from setuptools._importlib import metadata # noqa
from setuptools.dist import Distribution # noqa
EMPTY: Mapping = MappingProxyType({}) # Immutable dict-like
_Path = Union[os.PathLike, str]
_DictOrStr = Union[dict, str]
_CorrespFn = Callable[["Distribution", Any, _Path], None]
_Correspondence = Union[str, _CorrespFn]
_logger = logging.getLogger(__name__)
def apply(dist: "Distribution", config: dict, filename: _Path) -> "Distribution":
"""Apply configuration dict read with :func:`read_configuration`"""
if not config:
return dist # short-circuit unrelated pyproject.toml file
root_dir = os.path.dirname(filename) or "."
_apply_project_table(dist, config, root_dir)
_apply_tool_table(dist, config, filename)
current_directory = os.getcwd()
os.chdir(root_dir)
try:
dist._finalize_requires()
dist._finalize_license_files()
finally:
os.chdir(current_directory)
return dist
def _apply_project_table(dist: "Distribution", config: dict, root_dir: _Path):
project_table = config.get("project", {}).copy()
if not project_table:
return # short-circuit
_handle_missing_dynamic(dist, project_table)
_unify_entry_points(project_table)
for field, value in project_table.items():
norm_key = json_compatible_key(field)
corresp = PYPROJECT_CORRESPONDENCE.get(norm_key, norm_key)
if callable(corresp):
corresp(dist, value, root_dir)
else:
_set_config(dist, corresp, value)
def _apply_tool_table(dist: "Distribution", config: dict, filename: _Path):
tool_table = config.get("tool", {}).get("setuptools", {})
if not tool_table:
return # short-circuit
for field, value in tool_table.items():
norm_key = json_compatible_key(field)
if norm_key in TOOL_TABLE_DEPRECATIONS:
suggestion, kwargs = TOOL_TABLE_DEPRECATIONS[norm_key]
msg = f"The parameter `{norm_key}` is deprecated, {suggestion}"
SetuptoolsDeprecationWarning.emit(
"Deprecated config", msg, **kwargs # type: ignore
)
norm_key = TOOL_TABLE_RENAMES.get(norm_key, norm_key)
_set_config(dist, norm_key, value)
_copy_command_options(config, dist, filename)
def _handle_missing_dynamic(dist: "Distribution", project_table: dict):
"""Be temporarily forgiving with ``dynamic`` fields not listed in ``dynamic``"""
# TODO: Set fields back to `None` once the feature stabilizes
dynamic = set(project_table.get("dynamic", []))
for field, getter in _PREVIOUSLY_DEFINED.items():
if not (field in project_table or field in dynamic):
value = getter(dist)
if value:
_WouldIgnoreField.emit(field=field, value=value)
def json_compatible_key(key: str) -> str:
"""As defined in :pep:`566#json-compatible-metadata`"""
return key.lower().replace("-", "_")
def _set_config(dist: "Distribution", field: str, value: Any):
setter = getattr(dist.metadata, f"set_{field}", None)
if setter:
setter(value)
elif hasattr(dist.metadata, field) or field in SETUPTOOLS_PATCHES:
setattr(dist.metadata, field, value)
else:
setattr(dist, field, value)
_CONTENT_TYPES = {
".md": "text/markdown",
".rst": "text/x-rst",
".txt": "text/plain",
}
def _guess_content_type(file: str) -> Optional[str]:
_, ext = os.path.splitext(file.lower())
if not ext:
return None
if ext in _CONTENT_TYPES:
return _CONTENT_TYPES[ext]
valid = ", ".join(f"{k} ({v})" for k, v in _CONTENT_TYPES.items())
msg = f"only the following file extensions are recognized: {valid}."
raise ValueError(f"Undefined content type for {file}, {msg}")
def _long_description(dist: "Distribution", val: _DictOrStr, root_dir: _Path):
from setuptools.config import expand
if isinstance(val, str):
file: Union[str, list] = val
text = expand.read_files(file, root_dir)
ctype = _guess_content_type(val)
else:
file = val.get("file") or []
text = val.get("text") or expand.read_files(file, root_dir)
ctype = val["content-type"]
_set_config(dist, "long_description", text)
if ctype:
_set_config(dist, "long_description_content_type", ctype)
if file:
dist._referenced_files.add(cast(str, file))
def _license(dist: "Distribution", val: dict, root_dir: _Path):
from setuptools.config import expand
if "file" in val:
_set_config(dist, "license", expand.read_files([val["file"]], root_dir))
dist._referenced_files.add(val["file"])
else:
_set_config(dist, "license", val["text"])
def _people(dist: "Distribution", val: List[dict], _root_dir: _Path, kind: str):
field = []
email_field = []
for person in val:
if "name" not in person:
email_field.append(person["email"])
elif "email" not in person:
field.append(person["name"])
else:
addr = Address(display_name=person["name"], addr_spec=person["email"])
email_field.append(str(addr))
if field:
_set_config(dist, kind, ", ".join(field))
if email_field:
_set_config(dist, f"{kind}_email", ", ".join(email_field))
def _project_urls(dist: "Distribution", val: dict, _root_dir):
_set_config(dist, "project_urls", val)
def _python_requires(dist: "Distribution", val: dict, _root_dir):
from setuptools.extern.packaging.specifiers import SpecifierSet
_set_config(dist, "python_requires", SpecifierSet(val))
def _dependencies(dist: "Distribution", val: list, _root_dir):
if getattr(dist, "install_requires", []):
msg = "`install_requires` overwritten in `pyproject.toml` (dependencies)"
SetuptoolsWarning.emit(msg)
_set_config(dist, "install_requires", val)
def _optional_dependencies(dist: "Distribution", val: dict, _root_dir):
existing = getattr(dist, "extras_require", {})
_set_config(dist, "extras_require", {**existing, **val})
def _unify_entry_points(project_table: dict):
project = project_table
entry_points = project.pop("entry-points", project.pop("entry_points", {}))
renaming = {"scripts": "console_scripts", "gui_scripts": "gui_scripts"}
for key, value in list(project.items()): # eager to allow modifications
norm_key = json_compatible_key(key)
if norm_key in renaming and value:
entry_points[renaming[norm_key]] = project.pop(key)
if entry_points:
project["entry-points"] = {
name: [f"{k} = {v}" for k, v in group.items()]
for name, group in entry_points.items()
}
def _copy_command_options(pyproject: dict, dist: "Distribution", filename: _Path):
tool_table = pyproject.get("tool", {})
cmdclass = tool_table.get("setuptools", {}).get("cmdclass", {})
valid_options = _valid_command_options(cmdclass)
cmd_opts = dist.command_options
for cmd, config in pyproject.get("tool", {}).get("distutils", {}).items():
cmd = json_compatible_key(cmd)
valid = valid_options.get(cmd, set())
cmd_opts.setdefault(cmd, {})
for key, value in config.items():
key = json_compatible_key(key)
cmd_opts[cmd][key] = (str(filename), value)
if key not in valid:
# To avoid removing options that are specified dynamically we
# just log a warn...
_logger.warning(f"Command option {cmd}.{key} is not defined")
def _valid_command_options(cmdclass: Mapping = EMPTY) -> Dict[str, Set[str]]:
from .._importlib import metadata
from setuptools.dist import Distribution
valid_options = {"global": _normalise_cmd_options(Distribution.global_options)}
unloaded_entry_points = metadata.entry_points(group='distutils.commands')
loaded_entry_points = (_load_ep(ep) for ep in unloaded_entry_points)
entry_points = (ep for ep in loaded_entry_points if ep)
for cmd, cmd_class in chain(entry_points, cmdclass.items()):
opts = valid_options.get(cmd, set())
opts = opts | _normalise_cmd_options(getattr(cmd_class, "user_options", []))
valid_options[cmd] = opts
return valid_options
def _load_ep(ep: "metadata.EntryPoint") -> Optional[Tuple[str, Type]]:
# Ignore all the errors
try:
return (ep.name, ep.load())
except Exception as ex:
msg = f"{ex.__class__.__name__} while trying to load entry-point {ep.name}"
_logger.warning(f"{msg}: {ex}")
return None
def _normalise_cmd_option_key(name: str) -> str:
return json_compatible_key(name).strip("_=")
def _normalise_cmd_options(desc: List[Tuple[str, Optional[str], str]]) -> Set[str]:
return {_normalise_cmd_option_key(fancy_option[0]) for fancy_option in desc}
def _get_previous_entrypoints(dist: "Distribution") -> Dict[str, list]:
ignore = ("console_scripts", "gui_scripts")
value = getattr(dist, "entry_points", None) or {}
return {k: v for k, v in value.items() if k not in ignore}
def _attrgetter(attr):
"""
Similar to ``operator.attrgetter`` but returns None if ``attr`` is not found
>>> from types import SimpleNamespace
>>> obj = SimpleNamespace(a=42, b=SimpleNamespace(c=13))
>>> _attrgetter("a")(obj)
42
>>> _attrgetter("b.c")(obj)
13
>>> _attrgetter("d")(obj) is None
True
"""
return partial(reduce, lambda acc, x: getattr(acc, x, None), attr.split("."))
def _some_attrgetter(*items):
"""
Return the first "truth-y" attribute or None
>>> from types import SimpleNamespace
>>> obj = SimpleNamespace(a=42, b=SimpleNamespace(c=13))
>>> _some_attrgetter("d", "a", "b.c")(obj)
42
>>> _some_attrgetter("d", "e", "b.c", "a")(obj)
13
>>> _some_attrgetter("d", "e", "f")(obj) is None
True
"""
def _acessor(obj):
values = (_attrgetter(i)(obj) for i in items)
return next((i for i in values if i is not None), None)
return _acessor
PYPROJECT_CORRESPONDENCE: Dict[str, _Correspondence] = {
"readme": _long_description,
"license": _license,
"authors": partial(_people, kind="author"),
"maintainers": partial(_people, kind="maintainer"),
"urls": _project_urls,
"dependencies": _dependencies,
"optional_dependencies": _optional_dependencies,
"requires_python": _python_requires,
}
TOOL_TABLE_RENAMES = {"script_files": "scripts"}
TOOL_TABLE_DEPRECATIONS = {
"namespace_packages": (
"consider using implicit namespaces instead (PEP 420).",
{"due_date": (2023, 10, 30)}, # warning introduced in May 2022
)
}
SETUPTOOLS_PATCHES = {"long_description_content_type", "project_urls",
"provides_extras", "license_file", "license_files"}
_PREVIOUSLY_DEFINED = {
"name": _attrgetter("metadata.name"),
"version": _attrgetter("metadata.version"),
"description": _attrgetter("metadata.description"),
"readme": _attrgetter("metadata.long_description"),
"requires-python": _some_attrgetter("python_requires", "metadata.python_requires"),
"license": _attrgetter("metadata.license"),
"authors": _some_attrgetter("metadata.author", "metadata.author_email"),
"maintainers": _some_attrgetter("metadata.maintainer", "metadata.maintainer_email"),
"keywords": _attrgetter("metadata.keywords"),
"classifiers": _attrgetter("metadata.classifiers"),
"urls": _attrgetter("metadata.project_urls"),
"entry-points": _get_previous_entrypoints,
"dependencies": _some_attrgetter("_orig_install_requires", "install_requires"),
"optional-dependencies": _some_attrgetter("_orig_extras_require", "extras_require"),
}
class _WouldIgnoreField(SetuptoolsDeprecationWarning):
_SUMMARY = "`{field}` defined outside of `pyproject.toml` would be ignored."
_DETAILS = """
##########################################################################
# configuration would be ignored/result in error due to `pyproject.toml` #
##########################################################################
The following seems to be defined outside of `pyproject.toml`:
`{field} = {value!r}`
According to the spec (see the link below), however, setuptools CANNOT
consider this value unless `{field}` is listed as `dynamic`.
https://packaging.python.org/en/latest/specifications/declaring-project-metadata/
For the time being, `setuptools` will still consider the given value (as a
**transitional** measure), but please note that future releases of setuptools will
follow strictly the standard.
To prevent this warning, you can list `{field}` under `dynamic` or alternatively
remove the `[project]` table from your file and rely entirely on other means of
configuration.
"""
_DUE_DATE = (2023, 10, 30) # Initially introduced in 27 May 2022
| 13,755 |
Python
| 34.54522 | 88 | 0.640131 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/config/_validate_pyproject/__init__.py
|
from functools import reduce
from typing import Any, Callable, Dict
from . import formats
from .error_reporting import detailed_errors, ValidationError
from .extra_validations import EXTRA_VALIDATIONS
from .fastjsonschema_exceptions import JsonSchemaException, JsonSchemaValueException
from .fastjsonschema_validations import validate as _validate
__all__ = [
"validate",
"FORMAT_FUNCTIONS",
"EXTRA_VALIDATIONS",
"ValidationError",
"JsonSchemaException",
"JsonSchemaValueException",
]
FORMAT_FUNCTIONS: Dict[str, Callable[[str], bool]] = {
fn.__name__.replace("_", "-"): fn
for fn in formats.__dict__.values()
if callable(fn) and not fn.__name__.startswith("_")
}
def validate(data: Any) -> bool:
"""Validate the given ``data`` object using JSON Schema
This function raises ``ValidationError`` if ``data`` is invalid.
"""
with detailed_errors():
_validate(data, custom_formats=FORMAT_FUNCTIONS)
reduce(lambda acc, fn: fn(acc), EXTRA_VALIDATIONS, data)
return True
| 1,038 |
Python
| 28.685713 | 84 | 0.702312 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/config/_validate_pyproject/formats.py
|
import logging
import os
import re
import string
import typing
from itertools import chain as _chain
if typing.TYPE_CHECKING:
from typing_extensions import Literal
_logger = logging.getLogger(__name__)
# -------------------------------------------------------------------------------------
# PEP 440
VERSION_PATTERN = r"""
v?
(?:
(?:(?P<epoch>[0-9]+)!)? # epoch
(?P<release>[0-9]+(?:\.[0-9]+)*) # release segment
(?P<pre> # pre-release
[-_\.]?
(?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))
[-_\.]?
(?P<pre_n>[0-9]+)?
)?
(?P<post> # post release
(?:-(?P<post_n1>[0-9]+))
|
(?:
[-_\.]?
(?P<post_l>post|rev|r)
[-_\.]?
(?P<post_n2>[0-9]+)?
)
)?
(?P<dev> # dev release
[-_\.]?
(?P<dev_l>dev)
[-_\.]?
(?P<dev_n>[0-9]+)?
)?
)
(?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version
"""
VERSION_REGEX = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.X | re.I)
def pep440(version: str) -> bool:
return VERSION_REGEX.match(version) is not None
# -------------------------------------------------------------------------------------
# PEP 508
PEP508_IDENTIFIER_PATTERN = r"([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])"
PEP508_IDENTIFIER_REGEX = re.compile(f"^{PEP508_IDENTIFIER_PATTERN}$", re.I)
def pep508_identifier(name: str) -> bool:
return PEP508_IDENTIFIER_REGEX.match(name) is not None
try:
try:
from packaging import requirements as _req
except ImportError: # pragma: no cover
# let's try setuptools vendored version
from setuptools._vendor.packaging import requirements as _req # type: ignore
def pep508(value: str) -> bool:
try:
_req.Requirement(value)
return True
except _req.InvalidRequirement:
return False
except ImportError: # pragma: no cover
_logger.warning(
"Could not find an installation of `packaging`. Requirements, dependencies and "
"versions might not be validated. "
"To enforce validation, please install `packaging`."
)
def pep508(value: str) -> bool:
return True
def pep508_versionspec(value: str) -> bool:
"""Expression that can be used to specify/lock versions (including ranges)"""
if any(c in value for c in (";", "]", "@")):
# In PEP 508:
# conditional markers, extras and URL specs are not included in the
# versionspec
return False
# Let's pretend we have a dependency called `requirement` with the given
# version spec, then we can re-use the pep508 function for validation:
return pep508(f"requirement{value}")
# -------------------------------------------------------------------------------------
# PEP 517
def pep517_backend_reference(value: str) -> bool:
module, _, obj = value.partition(":")
identifiers = (i.strip() for i in _chain(module.split("."), obj.split(".")))
return all(python_identifier(i) for i in identifiers if i)
# -------------------------------------------------------------------------------------
# Classifiers - PEP 301
def _download_classifiers() -> str:
import ssl
from email.message import Message
from urllib.request import urlopen
url = "https://pypi.org/pypi?:action=list_classifiers"
context = ssl.create_default_context()
with urlopen(url, context=context) as response:
headers = Message()
headers["content_type"] = response.getheader("content-type", "text/plain")
return response.read().decode(headers.get_param("charset", "utf-8"))
class _TroveClassifier:
"""The ``trove_classifiers`` package is the official way of validating classifiers,
however this package might not be always available.
As a workaround we can still download a list from PyPI.
We also don't want to be over strict about it, so simply skipping silently is an
option (classifiers will be validated anyway during the upload to PyPI).
"""
downloaded: typing.Union[None, "Literal[False]", typing.Set[str]]
def __init__(self):
self.downloaded = None
self._skip_download = False
# None => not cached yet
# False => cache not available
self.__name__ = "trove_classifier" # Emulate a public function
def _disable_download(self):
# This is a private API. Only setuptools has the consent of using it.
self._skip_download = True
def __call__(self, value: str) -> bool:
if self.downloaded is False or self._skip_download is True:
return True
if os.getenv("NO_NETWORK") or os.getenv("VALIDATE_PYPROJECT_NO_NETWORK"):
self.downloaded = False
msg = (
"Install ``trove-classifiers`` to ensure proper validation. "
"Skipping download of classifiers list from PyPI (NO_NETWORK)."
)
_logger.debug(msg)
return True
if self.downloaded is None:
msg = (
"Install ``trove-classifiers`` to ensure proper validation. "
"Meanwhile a list of classifiers will be downloaded from PyPI."
)
_logger.debug(msg)
try:
self.downloaded = set(_download_classifiers().splitlines())
except Exception:
self.downloaded = False
_logger.debug("Problem with download, skipping validation")
return True
return value in self.downloaded or value.lower().startswith("private ::")
try:
from trove_classifiers import classifiers as _trove_classifiers
def trove_classifier(value: str) -> bool:
return value in _trove_classifiers or value.lower().startswith("private ::")
except ImportError: # pragma: no cover
trove_classifier = _TroveClassifier()
# -------------------------------------------------------------------------------------
# Stub packages - PEP 561
def pep561_stub_name(value: str) -> bool:
top, *children = value.split(".")
if not top.endswith("-stubs"):
return False
return python_module_name(".".join([top[: -len("-stubs")], *children]))
# -------------------------------------------------------------------------------------
# Non-PEP related
def url(value: str) -> bool:
from urllib.parse import urlparse
try:
parts = urlparse(value)
if not parts.scheme:
_logger.warning(
"For maximum compatibility please make sure to include a "
"`scheme` prefix in your URL (e.g. 'http://'). "
f"Given value: {value}"
)
if not (value.startswith("/") or value.startswith("\\") or "@" in value):
parts = urlparse(f"http://{value}")
return bool(parts.scheme and parts.netloc)
except Exception:
return False
# https://packaging.python.org/specifications/entry-points/
ENTRYPOINT_PATTERN = r"[^\[\s=]([^=]*[^\s=])?"
ENTRYPOINT_REGEX = re.compile(f"^{ENTRYPOINT_PATTERN}$", re.I)
RECOMMEDED_ENTRYPOINT_PATTERN = r"[\w.-]+"
RECOMMEDED_ENTRYPOINT_REGEX = re.compile(f"^{RECOMMEDED_ENTRYPOINT_PATTERN}$", re.I)
ENTRYPOINT_GROUP_PATTERN = r"\w+(\.\w+)*"
ENTRYPOINT_GROUP_REGEX = re.compile(f"^{ENTRYPOINT_GROUP_PATTERN}$", re.I)
def python_identifier(value: str) -> bool:
return value.isidentifier()
def python_qualified_identifier(value: str) -> bool:
if value.startswith(".") or value.endswith("."):
return False
return all(python_identifier(m) for m in value.split("."))
def python_module_name(value: str) -> bool:
return python_qualified_identifier(value)
def python_entrypoint_group(value: str) -> bool:
return ENTRYPOINT_GROUP_REGEX.match(value) is not None
def python_entrypoint_name(value: str) -> bool:
if not ENTRYPOINT_REGEX.match(value):
return False
if not RECOMMEDED_ENTRYPOINT_REGEX.match(value):
msg = f"Entry point `{value}` does not follow recommended pattern: "
msg += RECOMMEDED_ENTRYPOINT_PATTERN
_logger.warning(msg)
return True
def python_entrypoint_reference(value: str) -> bool:
module, _, rest = value.partition(":")
if "[" in rest:
obj, _, extras_ = rest.partition("[")
if extras_.strip()[-1] != "]":
return False
extras = (x.strip() for x in extras_.strip(string.whitespace + "[]").split(","))
if not all(pep508_identifier(e) for e in extras):
return False
_logger.warning(f"`{value}` - using extras for entry points is not recommended")
else:
obj = rest
module_parts = module.split(".")
identifiers = _chain(module_parts, obj.split(".")) if rest else module_parts
return all(python_identifier(i.strip()) for i in identifiers)
| 9,161 |
Python
| 32.195652 | 88 | 0.553433 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/config/_validate_pyproject/extra_validations.py
|
"""The purpose of this module is implement PEP 621 validations that are
difficult to express as a JSON Schema (or that are not supported by the current
JSON Schema library).
"""
from typing import Mapping, TypeVar
from .error_reporting import ValidationError
T = TypeVar("T", bound=Mapping)
class RedefiningStaticFieldAsDynamic(ValidationError):
"""According to PEP 621:
Build back-ends MUST raise an error if the metadata specifies a field
statically as well as being listed in dynamic.
"""
def validate_project_dynamic(pyproject: T) -> T:
project_table = pyproject.get("project", {})
dynamic = project_table.get("dynamic", [])
for field in dynamic:
if field in project_table:
msg = f"You cannot provide a value for `project.{field}` and "
msg += "list it under `project.dynamic` at the same time"
name = f"data.project.{field}"
value = {field: project_table[field], "...": " # ...", "dynamic": dynamic}
raise RedefiningStaticFieldAsDynamic(msg, value, name, rule="PEP 621")
return pyproject
EXTRA_VALIDATIONS = (validate_project_dynamic,)
| 1,153 |
Python
| 30.189188 | 86 | 0.677363 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/config/_validate_pyproject/fastjsonschema_exceptions.py
|
import re
SPLIT_RE = re.compile(r'[\.\[\]]+')
class JsonSchemaException(ValueError):
"""
Base exception of ``fastjsonschema`` library.
"""
class JsonSchemaValueException(JsonSchemaException):
"""
Exception raised by validation function. Available properties:
* ``message`` containing human-readable information what is wrong (e.g. ``data.property[index] must be smaller than or equal to 42``),
* invalid ``value`` (e.g. ``60``),
* ``name`` of a path in the data structure (e.g. ``data.property[index]``),
* ``path`` as an array in the data structure (e.g. ``['data', 'property', 'index']``),
* the whole ``definition`` which the ``value`` has to fulfil (e.g. ``{'type': 'number', 'maximum': 42}``),
* ``rule`` which the ``value`` is breaking (e.g. ``maximum``)
* and ``rule_definition`` (e.g. ``42``).
.. versionchanged:: 2.14.0
Added all extra properties.
"""
def __init__(self, message, value=None, name=None, definition=None, rule=None):
super().__init__(message)
self.message = message
self.value = value
self.name = name
self.definition = definition
self.rule = rule
@property
def path(self):
return [item for item in SPLIT_RE.split(self.name) if item != '']
@property
def rule_definition(self):
if not self.rule or not self.definition:
return None
return self.definition.get(self.rule)
class JsonSchemaDefinitionException(JsonSchemaException):
"""
Exception raised by generator of validation function.
"""
| 1,612 |
Python
| 30.01923 | 139 | 0.613524 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/config/_validate_pyproject/error_reporting.py
|
import io
import json
import logging
import os
import re
from contextlib import contextmanager
from textwrap import indent, wrap
from typing import Any, Dict, Iterator, List, Optional, Sequence, Union, cast
from .fastjsonschema_exceptions import JsonSchemaValueException
_logger = logging.getLogger(__name__)
_MESSAGE_REPLACEMENTS = {
"must be named by propertyName definition": "keys must be named by",
"one of contains definition": "at least one item that matches",
" same as const definition:": "",
"only specified items": "only items matching the definition",
}
_SKIP_DETAILS = (
"must not be empty",
"is always invalid",
"must not be there",
)
_NEED_DETAILS = {"anyOf", "oneOf", "anyOf", "contains", "propertyNames", "not", "items"}
_CAMEL_CASE_SPLITTER = re.compile(r"\W+|([A-Z][^A-Z\W]*)")
_IDENTIFIER = re.compile(r"^[\w_]+$", re.I)
_TOML_JARGON = {
"object": "table",
"property": "key",
"properties": "keys",
"property names": "keys",
}
class ValidationError(JsonSchemaValueException):
"""Report violations of a given JSON schema.
This class extends :exc:`~fastjsonschema.JsonSchemaValueException`
by adding the following properties:
- ``summary``: an improved version of the ``JsonSchemaValueException`` error message
with only the necessary information)
- ``details``: more contextual information about the error like the failing schema
itself and the value that violates the schema.
Depending on the level of the verbosity of the ``logging`` configuration
the exception message will be only ``summary`` (default) or a combination of
``summary`` and ``details`` (when the logging level is set to :obj:`logging.DEBUG`).
"""
summary = ""
details = ""
_original_message = ""
@classmethod
def _from_jsonschema(cls, ex: JsonSchemaValueException):
formatter = _ErrorFormatting(ex)
obj = cls(str(formatter), ex.value, formatter.name, ex.definition, ex.rule)
debug_code = os.getenv("JSONSCHEMA_DEBUG_CODE_GENERATION", "false").lower()
if debug_code != "false": # pragma: no cover
obj.__cause__, obj.__traceback__ = ex.__cause__, ex.__traceback__
obj._original_message = ex.message
obj.summary = formatter.summary
obj.details = formatter.details
return obj
@contextmanager
def detailed_errors():
try:
yield
except JsonSchemaValueException as ex:
raise ValidationError._from_jsonschema(ex) from None
class _ErrorFormatting:
def __init__(self, ex: JsonSchemaValueException):
self.ex = ex
self.name = f"`{self._simplify_name(ex.name)}`"
self._original_message = self.ex.message.replace(ex.name, self.name)
self._summary = ""
self._details = ""
def __str__(self) -> str:
if _logger.getEffectiveLevel() <= logging.DEBUG and self.details:
return f"{self.summary}\n\n{self.details}"
return self.summary
@property
def summary(self) -> str:
if not self._summary:
self._summary = self._expand_summary()
return self._summary
@property
def details(self) -> str:
if not self._details:
self._details = self._expand_details()
return self._details
def _simplify_name(self, name):
x = len("data.")
return name[x:] if name.startswith("data.") else name
def _expand_summary(self):
msg = self._original_message
for bad, repl in _MESSAGE_REPLACEMENTS.items():
msg = msg.replace(bad, repl)
if any(substring in msg for substring in _SKIP_DETAILS):
return msg
schema = self.ex.rule_definition
if self.ex.rule in _NEED_DETAILS and schema:
summary = _SummaryWriter(_TOML_JARGON)
return f"{msg}:\n\n{indent(summary(schema), ' ')}"
return msg
def _expand_details(self) -> str:
optional = []
desc_lines = self.ex.definition.pop("$$description", [])
desc = self.ex.definition.pop("description", None) or " ".join(desc_lines)
if desc:
description = "\n".join(
wrap(
desc,
width=80,
initial_indent=" ",
subsequent_indent=" ",
break_long_words=False,
)
)
optional.append(f"DESCRIPTION:\n{description}")
schema = json.dumps(self.ex.definition, indent=4)
value = json.dumps(self.ex.value, indent=4)
defaults = [
f"GIVEN VALUE:\n{indent(value, ' ')}",
f"OFFENDING RULE: {self.ex.rule!r}",
f"DEFINITION:\n{indent(schema, ' ')}",
]
return "\n\n".join(optional + defaults)
class _SummaryWriter:
_IGNORE = {"description", "default", "title", "examples"}
def __init__(self, jargon: Optional[Dict[str, str]] = None):
self.jargon: Dict[str, str] = jargon or {}
# Clarify confusing terms
self._terms = {
"anyOf": "at least one of the following",
"oneOf": "exactly one of the following",
"allOf": "all of the following",
"not": "(*NOT* the following)",
"prefixItems": f"{self._jargon('items')} (in order)",
"items": "items",
"contains": "contains at least one of",
"propertyNames": (
f"non-predefined acceptable {self._jargon('property names')}"
),
"patternProperties": f"{self._jargon('properties')} named via pattern",
"const": "predefined value",
"enum": "one of",
}
# Attributes that indicate that the definition is easy and can be done
# inline (e.g. string and number)
self._guess_inline_defs = [
"enum",
"const",
"maxLength",
"minLength",
"pattern",
"format",
"minimum",
"maximum",
"exclusiveMinimum",
"exclusiveMaximum",
"multipleOf",
]
def _jargon(self, term: Union[str, List[str]]) -> Union[str, List[str]]:
if isinstance(term, list):
return [self.jargon.get(t, t) for t in term]
return self.jargon.get(term, term)
def __call__(
self,
schema: Union[dict, List[dict]],
prefix: str = "",
*,
_path: Sequence[str] = (),
) -> str:
if isinstance(schema, list):
return self._handle_list(schema, prefix, _path)
filtered = self._filter_unecessary(schema, _path)
simple = self._handle_simple_dict(filtered, _path)
if simple:
return f"{prefix}{simple}"
child_prefix = self._child_prefix(prefix, " ")
item_prefix = self._child_prefix(prefix, "- ")
indent = len(prefix) * " "
with io.StringIO() as buffer:
for i, (key, value) in enumerate(filtered.items()):
child_path = [*_path, key]
line_prefix = prefix if i == 0 else indent
buffer.write(f"{line_prefix}{self._label(child_path)}:")
# ^ just the first item should receive the complete prefix
if isinstance(value, dict):
filtered = self._filter_unecessary(value, child_path)
simple = self._handle_simple_dict(filtered, child_path)
buffer.write(
f" {simple}"
if simple
else f"\n{self(value, child_prefix, _path=child_path)}"
)
elif isinstance(value, list) and (
key != "type" or self._is_property(child_path)
):
children = self._handle_list(value, item_prefix, child_path)
sep = " " if children.startswith("[") else "\n"
buffer.write(f"{sep}{children}")
else:
buffer.write(f" {self._value(value, child_path)}\n")
return buffer.getvalue()
def _is_unecessary(self, path: Sequence[str]) -> bool:
if self._is_property(path) or not path: # empty path => instruction @ root
return False
key = path[-1]
return any(key.startswith(k) for k in "$_") or key in self._IGNORE
def _filter_unecessary(self, schema: dict, path: Sequence[str]):
return {
key: value
for key, value in schema.items()
if not self._is_unecessary([*path, key])
}
def _handle_simple_dict(self, value: dict, path: Sequence[str]) -> Optional[str]:
inline = any(p in value for p in self._guess_inline_defs)
simple = not any(isinstance(v, (list, dict)) for v in value.values())
if inline or simple:
return f"{{{', '.join(self._inline_attrs(value, path))}}}\n"
return None
def _handle_list(
self, schemas: list, prefix: str = "", path: Sequence[str] = ()
) -> str:
if self._is_unecessary(path):
return ""
repr_ = repr(schemas)
if all(not isinstance(e, (dict, list)) for e in schemas) and len(repr_) < 60:
return f"{repr_}\n"
item_prefix = self._child_prefix(prefix, "- ")
return "".join(
self(v, item_prefix, _path=[*path, f"[{i}]"]) for i, v in enumerate(schemas)
)
def _is_property(self, path: Sequence[str]):
"""Check if the given path can correspond to an arbitrarily named property"""
counter = 0
for key in path[-2::-1]:
if key not in {"properties", "patternProperties"}:
break
counter += 1
# If the counter if even, the path correspond to a JSON Schema keyword
# otherwise it can be any arbitrary string naming a property
return counter % 2 == 1
def _label(self, path: Sequence[str]) -> str:
*parents, key = path
if not self._is_property(path):
norm_key = _separate_terms(key)
return self._terms.get(key) or " ".join(self._jargon(norm_key))
if parents[-1] == "patternProperties":
return f"(regex {key!r})"
return repr(key) # property name
def _value(self, value: Any, path: Sequence[str]) -> str:
if path[-1] == "type" and not self._is_property(path):
type_ = self._jargon(value)
return (
f"[{', '.join(type_)}]" if isinstance(value, list) else cast(str, type_)
)
return repr(value)
def _inline_attrs(self, schema: dict, path: Sequence[str]) -> Iterator[str]:
for key, value in schema.items():
child_path = [*path, key]
yield f"{self._label(child_path)}: {self._value(value, child_path)}"
def _child_prefix(self, parent_prefix: str, child_prefix: str) -> str:
return len(parent_prefix) * " " + child_prefix
def _separate_terms(word: str) -> List[str]:
"""
>>> _separate_terms("FooBar-foo")
['foo', 'bar', 'foo']
"""
return [w.lower() for w in _CAMEL_CASE_SPLITTER.split(word) if w]
| 11,266 |
Python
| 34.319749 | 88 | 0.559205 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/zipp.py
|
import io
import posixpath
import zipfile
import itertools
import contextlib
import sys
import pathlib
if sys.version_info < (3, 7):
from collections import OrderedDict
else:
OrderedDict = dict
__all__ = ['Path']
def _parents(path):
"""
Given a path with elements separated by
posixpath.sep, generate all parents of that path.
>>> list(_parents('b/d'))
['b']
>>> list(_parents('/b/d/'))
['/b']
>>> list(_parents('b/d/f/'))
['b/d', 'b']
>>> list(_parents('b'))
[]
>>> list(_parents(''))
[]
"""
return itertools.islice(_ancestry(path), 1, None)
def _ancestry(path):
"""
Given a path with elements separated by
posixpath.sep, generate all elements of that path
>>> list(_ancestry('b/d'))
['b/d', 'b']
>>> list(_ancestry('/b/d/'))
['/b/d', '/b']
>>> list(_ancestry('b/d/f/'))
['b/d/f', 'b/d', 'b']
>>> list(_ancestry('b'))
['b']
>>> list(_ancestry(''))
[]
"""
path = path.rstrip(posixpath.sep)
while path and path != posixpath.sep:
yield path
path, tail = posixpath.split(path)
_dedupe = OrderedDict.fromkeys
"""Deduplicate an iterable in original order"""
def _difference(minuend, subtrahend):
"""
Return items in minuend not in subtrahend, retaining order
with O(1) lookup.
"""
return itertools.filterfalse(set(subtrahend).__contains__, minuend)
class CompleteDirs(zipfile.ZipFile):
"""
A ZipFile subclass that ensures that implied directories
are always included in the namelist.
"""
@staticmethod
def _implied_dirs(names):
parents = itertools.chain.from_iterable(map(_parents, names))
as_dirs = (p + posixpath.sep for p in parents)
return _dedupe(_difference(as_dirs, names))
def namelist(self):
names = super(CompleteDirs, self).namelist()
return names + list(self._implied_dirs(names))
def _name_set(self):
return set(self.namelist())
def resolve_dir(self, name):
"""
If the name represents a directory, return that name
as a directory (with the trailing slash).
"""
names = self._name_set()
dirname = name + '/'
dir_match = name not in names and dirname in names
return dirname if dir_match else name
@classmethod
def make(cls, source):
"""
Given a source (filename or zipfile), return an
appropriate CompleteDirs subclass.
"""
if isinstance(source, CompleteDirs):
return source
if not isinstance(source, zipfile.ZipFile):
return cls(_pathlib_compat(source))
# Only allow for FastLookup when supplied zipfile is read-only
if 'r' not in source.mode:
cls = CompleteDirs
source.__class__ = cls
return source
class FastLookup(CompleteDirs):
"""
ZipFile subclass to ensure implicit
dirs exist and are resolved rapidly.
"""
def namelist(self):
with contextlib.suppress(AttributeError):
return self.__names
self.__names = super(FastLookup, self).namelist()
return self.__names
def _name_set(self):
with contextlib.suppress(AttributeError):
return self.__lookup
self.__lookup = super(FastLookup, self)._name_set()
return self.__lookup
def _pathlib_compat(path):
"""
For path-like objects, convert to a filename for compatibility
on Python 3.6.1 and earlier.
"""
try:
return path.__fspath__()
except AttributeError:
return str(path)
class Path:
"""
A pathlib-compatible interface for zip files.
Consider a zip file with this structure::
.
├── a.txt
└── b
├── c.txt
└── d
└── e.txt
>>> data = io.BytesIO()
>>> zf = zipfile.ZipFile(data, 'w')
>>> zf.writestr('a.txt', 'content of a')
>>> zf.writestr('b/c.txt', 'content of c')
>>> zf.writestr('b/d/e.txt', 'content of e')
>>> zf.filename = 'mem/abcde.zip'
Path accepts the zipfile object itself or a filename
>>> root = Path(zf)
From there, several path operations are available.
Directory iteration (including the zip file itself):
>>> a, b = root.iterdir()
>>> a
Path('mem/abcde.zip', 'a.txt')
>>> b
Path('mem/abcde.zip', 'b/')
name property:
>>> b.name
'b'
join with divide operator:
>>> c = b / 'c.txt'
>>> c
Path('mem/abcde.zip', 'b/c.txt')
>>> c.name
'c.txt'
Read text:
>>> c.read_text()
'content of c'
existence:
>>> c.exists()
True
>>> (b / 'missing.txt').exists()
False
Coercion to string:
>>> import os
>>> str(c).replace(os.sep, posixpath.sep)
'mem/abcde.zip/b/c.txt'
At the root, ``name``, ``filename``, and ``parent``
resolve to the zipfile. Note these attributes are not
valid and will raise a ``ValueError`` if the zipfile
has no filename.
>>> root.name
'abcde.zip'
>>> str(root.filename).replace(os.sep, posixpath.sep)
'mem/abcde.zip'
>>> str(root.parent)
'mem'
"""
__repr = "{self.__class__.__name__}({self.root.filename!r}, {self.at!r})"
def __init__(self, root, at=""):
"""
Construct a Path from a ZipFile or filename.
Note: When the source is an existing ZipFile object,
its type (__class__) will be mutated to a
specialized type. If the caller wishes to retain the
original type, the caller should either create a
separate ZipFile object or pass a filename.
"""
self.root = FastLookup.make(root)
self.at = at
def open(self, mode='r', *args, pwd=None, **kwargs):
"""
Open this entry as text or binary following the semantics
of ``pathlib.Path.open()`` by passing arguments through
to io.TextIOWrapper().
"""
if self.is_dir():
raise IsADirectoryError(self)
zip_mode = mode[0]
if not self.exists() and zip_mode == 'r':
raise FileNotFoundError(self)
stream = self.root.open(self.at, zip_mode, pwd=pwd)
if 'b' in mode:
if args or kwargs:
raise ValueError("encoding args invalid for binary operation")
return stream
return io.TextIOWrapper(stream, *args, **kwargs)
@property
def name(self):
return pathlib.Path(self.at).name or self.filename.name
@property
def suffix(self):
return pathlib.Path(self.at).suffix or self.filename.suffix
@property
def suffixes(self):
return pathlib.Path(self.at).suffixes or self.filename.suffixes
@property
def stem(self):
return pathlib.Path(self.at).stem or self.filename.stem
@property
def filename(self):
return pathlib.Path(self.root.filename).joinpath(self.at)
def read_text(self, *args, **kwargs):
with self.open('r', *args, **kwargs) as strm:
return strm.read()
def read_bytes(self):
with self.open('rb') as strm:
return strm.read()
def _is_child(self, path):
return posixpath.dirname(path.at.rstrip("/")) == self.at.rstrip("/")
def _next(self, at):
return self.__class__(self.root, at)
def is_dir(self):
return not self.at or self.at.endswith("/")
def is_file(self):
return self.exists() and not self.is_dir()
def exists(self):
return self.at in self.root._name_set()
def iterdir(self):
if not self.is_dir():
raise ValueError("Can't listdir a file")
subs = map(self._next, self.root.namelist())
return filter(self._is_child, subs)
def __str__(self):
return posixpath.join(self.root.filename, self.at)
def __repr__(self):
return self.__repr.format(self=self)
def joinpath(self, *other):
next = posixpath.join(self.at, *map(_pathlib_compat, other))
return self._next(self.root.resolve_dir(next))
__truediv__ = joinpath
@property
def parent(self):
if not self.at:
return self.filename.parent
parent_at = posixpath.dirname(self.at.rstrip('/'))
if parent_at:
parent_at += '/'
return self._next(parent_at)
| 8,395 |
Python
| 24.442424 | 78 | 0.579869 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/ordered_set.py
|
"""
An OrderedSet is a custom MutableSet that remembers its order, so that every
entry has an index that can be looked up.
Based on a recipe originally posted to ActiveState Recipes by Raymond Hettiger,
and released under the MIT license.
"""
import itertools as it
from collections import deque
try:
# Python 3
from collections.abc import MutableSet, Sequence
except ImportError:
# Python 2.7
from collections import MutableSet, Sequence
SLICE_ALL = slice(None)
__version__ = "3.1"
def is_iterable(obj):
"""
Are we being asked to look up a list of things, instead of a single thing?
We check for the `__iter__` attribute so that this can cover types that
don't have to be known by this module, such as NumPy arrays.
Strings, however, should be considered as atomic values to look up, not
iterables. The same goes for tuples, since they are immutable and therefore
valid entries.
We don't need to check for the Python 2 `unicode` type, because it doesn't
have an `__iter__` attribute anyway.
"""
return (
hasattr(obj, "__iter__")
and not isinstance(obj, str)
and not isinstance(obj, tuple)
)
class OrderedSet(MutableSet, Sequence):
"""
An OrderedSet is a custom MutableSet that remembers its order, so that
every entry has an index that can be looked up.
Example:
>>> OrderedSet([1, 1, 2, 3, 2])
OrderedSet([1, 2, 3])
"""
def __init__(self, iterable=None):
self.items = []
self.map = {}
if iterable is not None:
self |= iterable
def __len__(self):
"""
Returns the number of unique elements in the ordered set
Example:
>>> len(OrderedSet([]))
0
>>> len(OrderedSet([1, 2]))
2
"""
return len(self.items)
def __getitem__(self, index):
"""
Get the item at a given index.
If `index` is a slice, you will get back that slice of items, as a
new OrderedSet.
If `index` is a list or a similar iterable, you'll get a list of
items corresponding to those indices. This is similar to NumPy's
"fancy indexing". The result is not an OrderedSet because you may ask
for duplicate indices, and the number of elements returned should be
the number of elements asked for.
Example:
>>> oset = OrderedSet([1, 2, 3])
>>> oset[1]
2
"""
if isinstance(index, slice) and index == SLICE_ALL:
return self.copy()
elif is_iterable(index):
return [self.items[i] for i in index]
elif hasattr(index, "__index__") or isinstance(index, slice):
result = self.items[index]
if isinstance(result, list):
return self.__class__(result)
else:
return result
else:
raise TypeError("Don't know how to index an OrderedSet by %r" % index)
def copy(self):
"""
Return a shallow copy of this object.
Example:
>>> this = OrderedSet([1, 2, 3])
>>> other = this.copy()
>>> this == other
True
>>> this is other
False
"""
return self.__class__(self)
def __getstate__(self):
if len(self) == 0:
# The state can't be an empty list.
# We need to return a truthy value, or else __setstate__ won't be run.
#
# This could have been done more gracefully by always putting the state
# in a tuple, but this way is backwards- and forwards- compatible with
# previous versions of OrderedSet.
return (None,)
else:
return list(self)
def __setstate__(self, state):
if state == (None,):
self.__init__([])
else:
self.__init__(state)
def __contains__(self, key):
"""
Test if the item is in this ordered set
Example:
>>> 1 in OrderedSet([1, 3, 2])
True
>>> 5 in OrderedSet([1, 3, 2])
False
"""
return key in self.map
def add(self, key):
"""
Add `key` as an item to this OrderedSet, then return its index.
If `key` is already in the OrderedSet, return the index it already
had.
Example:
>>> oset = OrderedSet()
>>> oset.append(3)
0
>>> print(oset)
OrderedSet([3])
"""
if key not in self.map:
self.map[key] = len(self.items)
self.items.append(key)
return self.map[key]
append = add
def update(self, sequence):
"""
Update the set with the given iterable sequence, then return the index
of the last element inserted.
Example:
>>> oset = OrderedSet([1, 2, 3])
>>> oset.update([3, 1, 5, 1, 4])
4
>>> print(oset)
OrderedSet([1, 2, 3, 5, 4])
"""
item_index = None
try:
for item in sequence:
item_index = self.add(item)
except TypeError:
raise ValueError(
"Argument needs to be an iterable, got %s" % type(sequence)
)
return item_index
def index(self, key):
"""
Get the index of a given entry, raising an IndexError if it's not
present.
`key` can be an iterable of entries that is not a string, in which case
this returns a list of indices.
Example:
>>> oset = OrderedSet([1, 2, 3])
>>> oset.index(2)
1
"""
if is_iterable(key):
return [self.index(subkey) for subkey in key]
return self.map[key]
# Provide some compatibility with pd.Index
get_loc = index
get_indexer = index
def pop(self):
"""
Remove and return the last element from the set.
Raises KeyError if the set is empty.
Example:
>>> oset = OrderedSet([1, 2, 3])
>>> oset.pop()
3
"""
if not self.items:
raise KeyError("Set is empty")
elem = self.items[-1]
del self.items[-1]
del self.map[elem]
return elem
def discard(self, key):
"""
Remove an element. Do not raise an exception if absent.
The MutableSet mixin uses this to implement the .remove() method, which
*does* raise an error when asked to remove a non-existent item.
Example:
>>> oset = OrderedSet([1, 2, 3])
>>> oset.discard(2)
>>> print(oset)
OrderedSet([1, 3])
>>> oset.discard(2)
>>> print(oset)
OrderedSet([1, 3])
"""
if key in self:
i = self.map[key]
del self.items[i]
del self.map[key]
for k, v in self.map.items():
if v >= i:
self.map[k] = v - 1
def clear(self):
"""
Remove all items from this OrderedSet.
"""
del self.items[:]
self.map.clear()
def __iter__(self):
"""
Example:
>>> list(iter(OrderedSet([1, 2, 3])))
[1, 2, 3]
"""
return iter(self.items)
def __reversed__(self):
"""
Example:
>>> list(reversed(OrderedSet([1, 2, 3])))
[3, 2, 1]
"""
return reversed(self.items)
def __repr__(self):
if not self:
return "%s()" % (self.__class__.__name__,)
return "%s(%r)" % (self.__class__.__name__, list(self))
def __eq__(self, other):
"""
Returns true if the containers have the same items. If `other` is a
Sequence, then order is checked, otherwise it is ignored.
Example:
>>> oset = OrderedSet([1, 3, 2])
>>> oset == [1, 3, 2]
True
>>> oset == [1, 2, 3]
False
>>> oset == [2, 3]
False
>>> oset == OrderedSet([3, 2, 1])
False
"""
# In Python 2 deque is not a Sequence, so treat it as one for
# consistent behavior with Python 3.
if isinstance(other, (Sequence, deque)):
# Check that this OrderedSet contains the same elements, in the
# same order, as the other object.
return list(self) == list(other)
try:
other_as_set = set(other)
except TypeError:
# If `other` can't be converted into a set, it's not equal.
return False
else:
return set(self) == other_as_set
def union(self, *sets):
"""
Combines all unique items.
Each items order is defined by its first appearance.
Example:
>>> oset = OrderedSet.union(OrderedSet([3, 1, 4, 1, 5]), [1, 3], [2, 0])
>>> print(oset)
OrderedSet([3, 1, 4, 5, 2, 0])
>>> oset.union([8, 9])
OrderedSet([3, 1, 4, 5, 2, 0, 8, 9])
>>> oset | {10}
OrderedSet([3, 1, 4, 5, 2, 0, 10])
"""
cls = self.__class__ if isinstance(self, OrderedSet) else OrderedSet
containers = map(list, it.chain([self], sets))
items = it.chain.from_iterable(containers)
return cls(items)
def __and__(self, other):
# the parent implementation of this is backwards
return self.intersection(other)
def intersection(self, *sets):
"""
Returns elements in common between all sets. Order is defined only
by the first set.
Example:
>>> oset = OrderedSet.intersection(OrderedSet([0, 1, 2, 3]), [1, 2, 3])
>>> print(oset)
OrderedSet([1, 2, 3])
>>> oset.intersection([2, 4, 5], [1, 2, 3, 4])
OrderedSet([2])
>>> oset.intersection()
OrderedSet([1, 2, 3])
"""
cls = self.__class__ if isinstance(self, OrderedSet) else OrderedSet
if sets:
common = set.intersection(*map(set, sets))
items = (item for item in self if item in common)
else:
items = self
return cls(items)
def difference(self, *sets):
"""
Returns all elements that are in this set but not the others.
Example:
>>> OrderedSet([1, 2, 3]).difference(OrderedSet([2]))
OrderedSet([1, 3])
>>> OrderedSet([1, 2, 3]).difference(OrderedSet([2]), OrderedSet([3]))
OrderedSet([1])
>>> OrderedSet([1, 2, 3]) - OrderedSet([2])
OrderedSet([1, 3])
>>> OrderedSet([1, 2, 3]).difference()
OrderedSet([1, 2, 3])
"""
cls = self.__class__
if sets:
other = set.union(*map(set, sets))
items = (item for item in self if item not in other)
else:
items = self
return cls(items)
def issubset(self, other):
"""
Report whether another set contains this set.
Example:
>>> OrderedSet([1, 2, 3]).issubset({1, 2})
False
>>> OrderedSet([1, 2, 3]).issubset({1, 2, 3, 4})
True
>>> OrderedSet([1, 2, 3]).issubset({1, 4, 3, 5})
False
"""
if len(self) > len(other): # Fast check for obvious cases
return False
return all(item in other for item in self)
def issuperset(self, other):
"""
Report whether this set contains another set.
Example:
>>> OrderedSet([1, 2]).issuperset([1, 2, 3])
False
>>> OrderedSet([1, 2, 3, 4]).issuperset({1, 2, 3})
True
>>> OrderedSet([1, 4, 3, 5]).issuperset({1, 2, 3})
False
"""
if len(self) < len(other): # Fast check for obvious cases
return False
return all(item in self for item in other)
def symmetric_difference(self, other):
"""
Return the symmetric difference of two OrderedSets as a new set.
That is, the new set will contain all elements that are in exactly
one of the sets.
Their order will be preserved, with elements from `self` preceding
elements from `other`.
Example:
>>> this = OrderedSet([1, 4, 3, 5, 7])
>>> other = OrderedSet([9, 7, 1, 3, 2])
>>> this.symmetric_difference(other)
OrderedSet([4, 5, 9, 2])
"""
cls = self.__class__ if isinstance(self, OrderedSet) else OrderedSet
diff1 = cls(self).difference(other)
diff2 = cls(other).difference(self)
return diff1.union(diff2)
def _update_items(self, items):
"""
Replace the 'items' list of this OrderedSet with a new one, updating
self.map accordingly.
"""
self.items = items
self.map = {item: idx for (idx, item) in enumerate(items)}
def difference_update(self, *sets):
"""
Update this OrderedSet to remove items from one or more other sets.
Example:
>>> this = OrderedSet([1, 2, 3])
>>> this.difference_update(OrderedSet([2, 4]))
>>> print(this)
OrderedSet([1, 3])
>>> this = OrderedSet([1, 2, 3, 4, 5])
>>> this.difference_update(OrderedSet([2, 4]), OrderedSet([1, 4, 6]))
>>> print(this)
OrderedSet([3, 5])
"""
items_to_remove = set()
for other in sets:
items_to_remove |= set(other)
self._update_items([item for item in self.items if item not in items_to_remove])
def intersection_update(self, other):
"""
Update this OrderedSet to keep only items in another set, preserving
their order in this set.
Example:
>>> this = OrderedSet([1, 4, 3, 5, 7])
>>> other = OrderedSet([9, 7, 1, 3, 2])
>>> this.intersection_update(other)
>>> print(this)
OrderedSet([1, 3, 7])
"""
other = set(other)
self._update_items([item for item in self.items if item in other])
def symmetric_difference_update(self, other):
"""
Update this OrderedSet to remove items from another set, then
add items from the other set that were not present in this set.
Example:
>>> this = OrderedSet([1, 4, 3, 5, 7])
>>> other = OrderedSet([9, 7, 1, 3, 2])
>>> this.symmetric_difference_update(other)
>>> print(this)
OrderedSet([4, 5, 9, 2])
"""
items_to_add = [item for item in other if item not in self]
items_to_remove = set(other)
self._update_items(
[item for item in self.items if item not in items_to_remove] + items_to_add
)
| 15,130 |
Python
| 29.94274 | 88 | 0.514871 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/typing_extensions.py
|
import abc
import collections
import collections.abc
import operator
import sys
import typing
# After PEP 560, internal typing API was substantially reworked.
# This is especially important for Protocol class which uses internal APIs
# quite extensively.
PEP_560 = sys.version_info[:3] >= (3, 7, 0)
if PEP_560:
GenericMeta = type
else:
# 3.6
from typing import GenericMeta, _type_vars # noqa
# The two functions below are copies of typing internal helpers.
# They are needed by _ProtocolMeta
def _no_slots_copy(dct):
dict_copy = dict(dct)
if '__slots__' in dict_copy:
for slot in dict_copy['__slots__']:
dict_copy.pop(slot, None)
return dict_copy
def _check_generic(cls, parameters):
if not cls.__parameters__:
raise TypeError(f"{cls} is not a generic class")
alen = len(parameters)
elen = len(cls.__parameters__)
if alen != elen:
raise TypeError(f"Too {'many' if alen > elen else 'few'} arguments for {cls};"
f" actual {alen}, expected {elen}")
# Please keep __all__ alphabetized within each category.
__all__ = [
# Super-special typing primitives.
'ClassVar',
'Concatenate',
'Final',
'ParamSpec',
'Self',
'Type',
# ABCs (from collections.abc).
'Awaitable',
'AsyncIterator',
'AsyncIterable',
'Coroutine',
'AsyncGenerator',
'AsyncContextManager',
'ChainMap',
# Concrete collection types.
'ContextManager',
'Counter',
'Deque',
'DefaultDict',
'OrderedDict',
'TypedDict',
# Structural checks, a.k.a. protocols.
'SupportsIndex',
# One-off things.
'Annotated',
'final',
'IntVar',
'Literal',
'NewType',
'overload',
'Protocol',
'runtime',
'runtime_checkable',
'Text',
'TypeAlias',
'TypeGuard',
'TYPE_CHECKING',
]
if PEP_560:
__all__.extend(["get_args", "get_origin", "get_type_hints"])
# 3.6.2+
if hasattr(typing, 'NoReturn'):
NoReturn = typing.NoReturn
# 3.6.0-3.6.1
else:
class _NoReturn(typing._FinalTypingBase, _root=True):
"""Special type indicating functions that never return.
Example::
from typing import NoReturn
def stop() -> NoReturn:
raise Exception('no way')
This type is invalid in other positions, e.g., ``List[NoReturn]``
will fail in static type checkers.
"""
__slots__ = ()
def __instancecheck__(self, obj):
raise TypeError("NoReturn cannot be used with isinstance().")
def __subclasscheck__(self, cls):
raise TypeError("NoReturn cannot be used with issubclass().")
NoReturn = _NoReturn(_root=True)
# Some unconstrained type variables. These are used by the container types.
# (These are not for export.)
T = typing.TypeVar('T') # Any type.
KT = typing.TypeVar('KT') # Key type.
VT = typing.TypeVar('VT') # Value type.
T_co = typing.TypeVar('T_co', covariant=True) # Any type covariant containers.
T_contra = typing.TypeVar('T_contra', contravariant=True) # Ditto contravariant.
ClassVar = typing.ClassVar
# On older versions of typing there is an internal class named "Final".
# 3.8+
if hasattr(typing, 'Final') and sys.version_info[:2] >= (3, 7):
Final = typing.Final
# 3.7
elif sys.version_info[:2] >= (3, 7):
class _FinalForm(typing._SpecialForm, _root=True):
def __repr__(self):
return 'typing_extensions.' + self._name
def __getitem__(self, parameters):
item = typing._type_check(parameters,
f'{self._name} accepts only single type')
return typing._GenericAlias(self, (item,))
Final = _FinalForm('Final',
doc="""A special typing construct to indicate that a name
cannot be re-assigned or overridden in a subclass.
For example:
MAX_SIZE: Final = 9000
MAX_SIZE += 1 # Error reported by type checker
class Connection:
TIMEOUT: Final[int] = 10
class FastConnector(Connection):
TIMEOUT = 1 # Error reported by type checker
There is no runtime checking of these properties.""")
# 3.6
else:
class _Final(typing._FinalTypingBase, _root=True):
"""A special typing construct to indicate that a name
cannot be re-assigned or overridden in a subclass.
For example:
MAX_SIZE: Final = 9000
MAX_SIZE += 1 # Error reported by type checker
class Connection:
TIMEOUT: Final[int] = 10
class FastConnector(Connection):
TIMEOUT = 1 # Error reported by type checker
There is no runtime checking of these properties.
"""
__slots__ = ('__type__',)
def __init__(self, tp=None, **kwds):
self.__type__ = tp
def __getitem__(self, item):
cls = type(self)
if self.__type__ is None:
return cls(typing._type_check(item,
f'{cls.__name__[1:]} accepts only single type.'),
_root=True)
raise TypeError(f'{cls.__name__[1:]} cannot be further subscripted')
def _eval_type(self, globalns, localns):
new_tp = typing._eval_type(self.__type__, globalns, localns)
if new_tp == self.__type__:
return self
return type(self)(new_tp, _root=True)
def __repr__(self):
r = super().__repr__()
if self.__type__ is not None:
r += f'[{typing._type_repr(self.__type__)}]'
return r
def __hash__(self):
return hash((type(self).__name__, self.__type__))
def __eq__(self, other):
if not isinstance(other, _Final):
return NotImplemented
if self.__type__ is not None:
return self.__type__ == other.__type__
return self is other
Final = _Final(_root=True)
# 3.8+
if hasattr(typing, 'final'):
final = typing.final
# 3.6-3.7
else:
def final(f):
"""This decorator can be used to indicate to type checkers that
the decorated method cannot be overridden, and decorated class
cannot be subclassed. For example:
class Base:
@final
def done(self) -> None:
...
class Sub(Base):
def done(self) -> None: # Error reported by type checker
...
@final
class Leaf:
...
class Other(Leaf): # Error reported by type checker
...
There is no runtime checking of these properties.
"""
return f
def IntVar(name):
return typing.TypeVar(name)
# 3.8+:
if hasattr(typing, 'Literal'):
Literal = typing.Literal
# 3.7:
elif sys.version_info[:2] >= (3, 7):
class _LiteralForm(typing._SpecialForm, _root=True):
def __repr__(self):
return 'typing_extensions.' + self._name
def __getitem__(self, parameters):
return typing._GenericAlias(self, parameters)
Literal = _LiteralForm('Literal',
doc="""A type that can be used to indicate to type checkers
that the corresponding value has a value literally equivalent
to the provided parameter. For example:
var: Literal[4] = 4
The type checker understands that 'var' is literally equal to
the value 4 and no other value.
Literal[...] cannot be subclassed. There is no runtime
checking verifying that the parameter is actually a value
instead of a type.""")
# 3.6:
else:
class _Literal(typing._FinalTypingBase, _root=True):
"""A type that can be used to indicate to type checkers that the
corresponding value has a value literally equivalent to the
provided parameter. For example:
var: Literal[4] = 4
The type checker understands that 'var' is literally equal to the
value 4 and no other value.
Literal[...] cannot be subclassed. There is no runtime checking
verifying that the parameter is actually a value instead of a type.
"""
__slots__ = ('__values__',)
def __init__(self, values=None, **kwds):
self.__values__ = values
def __getitem__(self, values):
cls = type(self)
if self.__values__ is None:
if not isinstance(values, tuple):
values = (values,)
return cls(values, _root=True)
raise TypeError(f'{cls.__name__[1:]} cannot be further subscripted')
def _eval_type(self, globalns, localns):
return self
def __repr__(self):
r = super().__repr__()
if self.__values__ is not None:
r += f'[{", ".join(map(typing._type_repr, self.__values__))}]'
return r
def __hash__(self):
return hash((type(self).__name__, self.__values__))
def __eq__(self, other):
if not isinstance(other, _Literal):
return NotImplemented
if self.__values__ is not None:
return self.__values__ == other.__values__
return self is other
Literal = _Literal(_root=True)
_overload_dummy = typing._overload_dummy # noqa
overload = typing.overload
# This is not a real generic class. Don't use outside annotations.
Type = typing.Type
# Various ABCs mimicking those in collections.abc.
# A few are simply re-exported for completeness.
class _ExtensionsGenericMeta(GenericMeta):
def __subclasscheck__(self, subclass):
"""This mimics a more modern GenericMeta.__subclasscheck__() logic
(that does not have problems with recursion) to work around interactions
between collections, typing, and typing_extensions on older
versions of Python, see https://github.com/python/typing/issues/501.
"""
if self.__origin__ is not None:
if sys._getframe(1).f_globals['__name__'] not in ['abc', 'functools']:
raise TypeError("Parameterized generics cannot be used with class "
"or instance checks")
return False
if not self.__extra__:
return super().__subclasscheck__(subclass)
res = self.__extra__.__subclasshook__(subclass)
if res is not NotImplemented:
return res
if self.__extra__ in subclass.__mro__:
return True
for scls in self.__extra__.__subclasses__():
if isinstance(scls, GenericMeta):
continue
if issubclass(subclass, scls):
return True
return False
Awaitable = typing.Awaitable
Coroutine = typing.Coroutine
AsyncIterable = typing.AsyncIterable
AsyncIterator = typing.AsyncIterator
# 3.6.1+
if hasattr(typing, 'Deque'):
Deque = typing.Deque
# 3.6.0
else:
class Deque(collections.deque, typing.MutableSequence[T],
metaclass=_ExtensionsGenericMeta,
extra=collections.deque):
__slots__ = ()
def __new__(cls, *args, **kwds):
if cls._gorg is Deque:
return collections.deque(*args, **kwds)
return typing._generic_new(collections.deque, cls, *args, **kwds)
ContextManager = typing.ContextManager
# 3.6.2+
if hasattr(typing, 'AsyncContextManager'):
AsyncContextManager = typing.AsyncContextManager
# 3.6.0-3.6.1
else:
from _collections_abc import _check_methods as _check_methods_in_mro # noqa
class AsyncContextManager(typing.Generic[T_co]):
__slots__ = ()
async def __aenter__(self):
return self
@abc.abstractmethod
async def __aexit__(self, exc_type, exc_value, traceback):
return None
@classmethod
def __subclasshook__(cls, C):
if cls is AsyncContextManager:
return _check_methods_in_mro(C, "__aenter__", "__aexit__")
return NotImplemented
DefaultDict = typing.DefaultDict
# 3.7.2+
if hasattr(typing, 'OrderedDict'):
OrderedDict = typing.OrderedDict
# 3.7.0-3.7.2
elif (3, 7, 0) <= sys.version_info[:3] < (3, 7, 2):
OrderedDict = typing._alias(collections.OrderedDict, (KT, VT))
# 3.6
else:
class OrderedDict(collections.OrderedDict, typing.MutableMapping[KT, VT],
metaclass=_ExtensionsGenericMeta,
extra=collections.OrderedDict):
__slots__ = ()
def __new__(cls, *args, **kwds):
if cls._gorg is OrderedDict:
return collections.OrderedDict(*args, **kwds)
return typing._generic_new(collections.OrderedDict, cls, *args, **kwds)
# 3.6.2+
if hasattr(typing, 'Counter'):
Counter = typing.Counter
# 3.6.0-3.6.1
else:
class Counter(collections.Counter,
typing.Dict[T, int],
metaclass=_ExtensionsGenericMeta, extra=collections.Counter):
__slots__ = ()
def __new__(cls, *args, **kwds):
if cls._gorg is Counter:
return collections.Counter(*args, **kwds)
return typing._generic_new(collections.Counter, cls, *args, **kwds)
# 3.6.1+
if hasattr(typing, 'ChainMap'):
ChainMap = typing.ChainMap
elif hasattr(collections, 'ChainMap'):
class ChainMap(collections.ChainMap, typing.MutableMapping[KT, VT],
metaclass=_ExtensionsGenericMeta,
extra=collections.ChainMap):
__slots__ = ()
def __new__(cls, *args, **kwds):
if cls._gorg is ChainMap:
return collections.ChainMap(*args, **kwds)
return typing._generic_new(collections.ChainMap, cls, *args, **kwds)
# 3.6.1+
if hasattr(typing, 'AsyncGenerator'):
AsyncGenerator = typing.AsyncGenerator
# 3.6.0
else:
class AsyncGenerator(AsyncIterator[T_co], typing.Generic[T_co, T_contra],
metaclass=_ExtensionsGenericMeta,
extra=collections.abc.AsyncGenerator):
__slots__ = ()
NewType = typing.NewType
Text = typing.Text
TYPE_CHECKING = typing.TYPE_CHECKING
def _gorg(cls):
"""This function exists for compatibility with old typing versions."""
assert isinstance(cls, GenericMeta)
if hasattr(cls, '_gorg'):
return cls._gorg
while cls.__origin__ is not None:
cls = cls.__origin__
return cls
_PROTO_WHITELIST = ['Callable', 'Awaitable',
'Iterable', 'Iterator', 'AsyncIterable', 'AsyncIterator',
'Hashable', 'Sized', 'Container', 'Collection', 'Reversible',
'ContextManager', 'AsyncContextManager']
def _get_protocol_attrs(cls):
attrs = set()
for base in cls.__mro__[:-1]: # without object
if base.__name__ in ('Protocol', 'Generic'):
continue
annotations = getattr(base, '__annotations__', {})
for attr in list(base.__dict__.keys()) + list(annotations.keys()):
if (not attr.startswith('_abc_') and attr not in (
'__abstractmethods__', '__annotations__', '__weakref__',
'_is_protocol', '_is_runtime_protocol', '__dict__',
'__args__', '__slots__',
'__next_in_mro__', '__parameters__', '__origin__',
'__orig_bases__', '__extra__', '__tree_hash__',
'__doc__', '__subclasshook__', '__init__', '__new__',
'__module__', '_MutableMapping__marker', '_gorg')):
attrs.add(attr)
return attrs
def _is_callable_members_only(cls):
return all(callable(getattr(cls, attr, None)) for attr in _get_protocol_attrs(cls))
# 3.8+
if hasattr(typing, 'Protocol'):
Protocol = typing.Protocol
# 3.7
elif PEP_560:
from typing import _collect_type_vars # noqa
def _no_init(self, *args, **kwargs):
if type(self)._is_protocol:
raise TypeError('Protocols cannot be instantiated')
class _ProtocolMeta(abc.ABCMeta):
# This metaclass is a bit unfortunate and exists only because of the lack
# of __instancehook__.
def __instancecheck__(cls, instance):
# We need this method for situations where attributes are
# assigned in __init__.
if ((not getattr(cls, '_is_protocol', False) or
_is_callable_members_only(cls)) and
issubclass(instance.__class__, cls)):
return True
if cls._is_protocol:
if all(hasattr(instance, attr) and
(not callable(getattr(cls, attr, None)) or
getattr(instance, attr) is not None)
for attr in _get_protocol_attrs(cls)):
return True
return super().__instancecheck__(instance)
class Protocol(metaclass=_ProtocolMeta):
# There is quite a lot of overlapping code with typing.Generic.
# Unfortunately it is hard to avoid this while these live in two different
# modules. The duplicated code will be removed when Protocol is moved to typing.
"""Base class for protocol classes. Protocol classes are defined as::
class Proto(Protocol):
def meth(self) -> int:
...
Such classes are primarily used with static type checkers that recognize
structural subtyping (static duck-typing), for example::
class C:
def meth(self) -> int:
return 0
def func(x: Proto) -> int:
return x.meth()
func(C()) # Passes static type check
See PEP 544 for details. Protocol classes decorated with
@typing_extensions.runtime act as simple-minded runtime protocol that checks
only the presence of given attributes, ignoring their type signatures.
Protocol classes can be generic, they are defined as::
class GenProto(Protocol[T]):
def meth(self) -> T:
...
"""
__slots__ = ()
_is_protocol = True
def __new__(cls, *args, **kwds):
if cls is Protocol:
raise TypeError("Type Protocol cannot be instantiated; "
"it can only be used as a base class")
return super().__new__(cls)
@typing._tp_cache
def __class_getitem__(cls, params):
if not isinstance(params, tuple):
params = (params,)
if not params and cls is not typing.Tuple:
raise TypeError(
f"Parameter list to {cls.__qualname__}[...] cannot be empty")
msg = "Parameters to generic types must be types."
params = tuple(typing._type_check(p, msg) for p in params) # noqa
if cls is Protocol:
# Generic can only be subscripted with unique type variables.
if not all(isinstance(p, typing.TypeVar) for p in params):
i = 0
while isinstance(params[i], typing.TypeVar):
i += 1
raise TypeError(
"Parameters to Protocol[...] must all be type variables."
f" Parameter {i + 1} is {params[i]}")
if len(set(params)) != len(params):
raise TypeError(
"Parameters to Protocol[...] must all be unique")
else:
# Subscripting a regular Generic subclass.
_check_generic(cls, params)
return typing._GenericAlias(cls, params)
def __init_subclass__(cls, *args, **kwargs):
tvars = []
if '__orig_bases__' in cls.__dict__:
error = typing.Generic in cls.__orig_bases__
else:
error = typing.Generic in cls.__bases__
if error:
raise TypeError("Cannot inherit from plain Generic")
if '__orig_bases__' in cls.__dict__:
tvars = _collect_type_vars(cls.__orig_bases__)
# Look for Generic[T1, ..., Tn] or Protocol[T1, ..., Tn].
# If found, tvars must be a subset of it.
# If not found, tvars is it.
# Also check for and reject plain Generic,
# and reject multiple Generic[...] and/or Protocol[...].
gvars = None
for base in cls.__orig_bases__:
if (isinstance(base, typing._GenericAlias) and
base.__origin__ in (typing.Generic, Protocol)):
# for error messages
the_base = base.__origin__.__name__
if gvars is not None:
raise TypeError(
"Cannot inherit from Generic[...]"
" and/or Protocol[...] multiple types.")
gvars = base.__parameters__
if gvars is None:
gvars = tvars
else:
tvarset = set(tvars)
gvarset = set(gvars)
if not tvarset <= gvarset:
s_vars = ', '.join(str(t) for t in tvars if t not in gvarset)
s_args = ', '.join(str(g) for g in gvars)
raise TypeError(f"Some type variables ({s_vars}) are"
f" not listed in {the_base}[{s_args}]")
tvars = gvars
cls.__parameters__ = tuple(tvars)
# Determine if this is a protocol or a concrete subclass.
if not cls.__dict__.get('_is_protocol', None):
cls._is_protocol = any(b is Protocol for b in cls.__bases__)
# Set (or override) the protocol subclass hook.
def _proto_hook(other):
if not cls.__dict__.get('_is_protocol', None):
return NotImplemented
if not getattr(cls, '_is_runtime_protocol', False):
if sys._getframe(2).f_globals['__name__'] in ['abc', 'functools']:
return NotImplemented
raise TypeError("Instance and class checks can only be used with"
" @runtime protocols")
if not _is_callable_members_only(cls):
if sys._getframe(2).f_globals['__name__'] in ['abc', 'functools']:
return NotImplemented
raise TypeError("Protocols with non-method members"
" don't support issubclass()")
if not isinstance(other, type):
# Same error as for issubclass(1, int)
raise TypeError('issubclass() arg 1 must be a class')
for attr in _get_protocol_attrs(cls):
for base in other.__mro__:
if attr in base.__dict__:
if base.__dict__[attr] is None:
return NotImplemented
break
annotations = getattr(base, '__annotations__', {})
if (isinstance(annotations, typing.Mapping) and
attr in annotations and
isinstance(other, _ProtocolMeta) and
other._is_protocol):
break
else:
return NotImplemented
return True
if '__subclasshook__' not in cls.__dict__:
cls.__subclasshook__ = _proto_hook
# We have nothing more to do for non-protocols.
if not cls._is_protocol:
return
# Check consistency of bases.
for base in cls.__bases__:
if not (base in (object, typing.Generic) or
base.__module__ == 'collections.abc' and
base.__name__ in _PROTO_WHITELIST or
isinstance(base, _ProtocolMeta) and base._is_protocol):
raise TypeError('Protocols can only inherit from other'
f' protocols, got {repr(base)}')
cls.__init__ = _no_init
# 3.6
else:
from typing import _next_in_mro, _type_check # noqa
def _no_init(self, *args, **kwargs):
if type(self)._is_protocol:
raise TypeError('Protocols cannot be instantiated')
class _ProtocolMeta(GenericMeta):
"""Internal metaclass for Protocol.
This exists so Protocol classes can be generic without deriving
from Generic.
"""
def __new__(cls, name, bases, namespace,
tvars=None, args=None, origin=None, extra=None, orig_bases=None):
# This is just a version copied from GenericMeta.__new__ that
# includes "Protocol" special treatment. (Comments removed for brevity.)
assert extra is None # Protocols should not have extra
if tvars is not None:
assert origin is not None
assert all(isinstance(t, typing.TypeVar) for t in tvars), tvars
else:
tvars = _type_vars(bases)
gvars = None
for base in bases:
if base is typing.Generic:
raise TypeError("Cannot inherit from plain Generic")
if (isinstance(base, GenericMeta) and
base.__origin__ in (typing.Generic, Protocol)):
if gvars is not None:
raise TypeError(
"Cannot inherit from Generic[...] or"
" Protocol[...] multiple times.")
gvars = base.__parameters__
if gvars is None:
gvars = tvars
else:
tvarset = set(tvars)
gvarset = set(gvars)
if not tvarset <= gvarset:
s_vars = ", ".join(str(t) for t in tvars if t not in gvarset)
s_args = ", ".join(str(g) for g in gvars)
cls_name = "Generic" if any(b.__origin__ is typing.Generic
for b in bases) else "Protocol"
raise TypeError(f"Some type variables ({s_vars}) are"
f" not listed in {cls_name}[{s_args}]")
tvars = gvars
initial_bases = bases
if (extra is not None and type(extra) is abc.ABCMeta and
extra not in bases):
bases = (extra,) + bases
bases = tuple(_gorg(b) if isinstance(b, GenericMeta) else b
for b in bases)
if any(isinstance(b, GenericMeta) and b is not typing.Generic for b in bases):
bases = tuple(b for b in bases if b is not typing.Generic)
namespace.update({'__origin__': origin, '__extra__': extra})
self = super(GenericMeta, cls).__new__(cls, name, bases, namespace,
_root=True)
super(GenericMeta, self).__setattr__('_gorg',
self if not origin else
_gorg(origin))
self.__parameters__ = tvars
self.__args__ = tuple(... if a is typing._TypingEllipsis else
() if a is typing._TypingEmpty else
a for a in args) if args else None
self.__next_in_mro__ = _next_in_mro(self)
if orig_bases is None:
self.__orig_bases__ = initial_bases
elif origin is not None:
self._abc_registry = origin._abc_registry
self._abc_cache = origin._abc_cache
if hasattr(self, '_subs_tree'):
self.__tree_hash__ = (hash(self._subs_tree()) if origin else
super(GenericMeta, self).__hash__())
return self
def __init__(cls, *args, **kwargs):
super().__init__(*args, **kwargs)
if not cls.__dict__.get('_is_protocol', None):
cls._is_protocol = any(b is Protocol or
isinstance(b, _ProtocolMeta) and
b.__origin__ is Protocol
for b in cls.__bases__)
if cls._is_protocol:
for base in cls.__mro__[1:]:
if not (base in (object, typing.Generic) or
base.__module__ == 'collections.abc' and
base.__name__ in _PROTO_WHITELIST or
isinstance(base, typing.TypingMeta) and base._is_protocol or
isinstance(base, GenericMeta) and
base.__origin__ is typing.Generic):
raise TypeError(f'Protocols can only inherit from other'
f' protocols, got {repr(base)}')
cls.__init__ = _no_init
def _proto_hook(other):
if not cls.__dict__.get('_is_protocol', None):
return NotImplemented
if not isinstance(other, type):
# Same error as for issubclass(1, int)
raise TypeError('issubclass() arg 1 must be a class')
for attr in _get_protocol_attrs(cls):
for base in other.__mro__:
if attr in base.__dict__:
if base.__dict__[attr] is None:
return NotImplemented
break
annotations = getattr(base, '__annotations__', {})
if (isinstance(annotations, typing.Mapping) and
attr in annotations and
isinstance(other, _ProtocolMeta) and
other._is_protocol):
break
else:
return NotImplemented
return True
if '__subclasshook__' not in cls.__dict__:
cls.__subclasshook__ = _proto_hook
def __instancecheck__(self, instance):
# We need this method for situations where attributes are
# assigned in __init__.
if ((not getattr(self, '_is_protocol', False) or
_is_callable_members_only(self)) and
issubclass(instance.__class__, self)):
return True
if self._is_protocol:
if all(hasattr(instance, attr) and
(not callable(getattr(self, attr, None)) or
getattr(instance, attr) is not None)
for attr in _get_protocol_attrs(self)):
return True
return super(GenericMeta, self).__instancecheck__(instance)
def __subclasscheck__(self, cls):
if self.__origin__ is not None:
if sys._getframe(1).f_globals['__name__'] not in ['abc', 'functools']:
raise TypeError("Parameterized generics cannot be used with class "
"or instance checks")
return False
if (self.__dict__.get('_is_protocol', None) and
not self.__dict__.get('_is_runtime_protocol', None)):
if sys._getframe(1).f_globals['__name__'] in ['abc',
'functools',
'typing']:
return False
raise TypeError("Instance and class checks can only be used with"
" @runtime protocols")
if (self.__dict__.get('_is_runtime_protocol', None) and
not _is_callable_members_only(self)):
if sys._getframe(1).f_globals['__name__'] in ['abc',
'functools',
'typing']:
return super(GenericMeta, self).__subclasscheck__(cls)
raise TypeError("Protocols with non-method members"
" don't support issubclass()")
return super(GenericMeta, self).__subclasscheck__(cls)
@typing._tp_cache
def __getitem__(self, params):
# We also need to copy this from GenericMeta.__getitem__ to get
# special treatment of "Protocol". (Comments removed for brevity.)
if not isinstance(params, tuple):
params = (params,)
if not params and _gorg(self) is not typing.Tuple:
raise TypeError(
f"Parameter list to {self.__qualname__}[...] cannot be empty")
msg = "Parameters to generic types must be types."
params = tuple(_type_check(p, msg) for p in params)
if self in (typing.Generic, Protocol):
if not all(isinstance(p, typing.TypeVar) for p in params):
raise TypeError(
f"Parameters to {repr(self)}[...] must all be type variables")
if len(set(params)) != len(params):
raise TypeError(
f"Parameters to {repr(self)}[...] must all be unique")
tvars = params
args = params
elif self in (typing.Tuple, typing.Callable):
tvars = _type_vars(params)
args = params
elif self.__origin__ in (typing.Generic, Protocol):
raise TypeError(f"Cannot subscript already-subscripted {repr(self)}")
else:
_check_generic(self, params)
tvars = _type_vars(params)
args = params
prepend = (self,) if self.__origin__ is None else ()
return self.__class__(self.__name__,
prepend + self.__bases__,
_no_slots_copy(self.__dict__),
tvars=tvars,
args=args,
origin=self,
extra=self.__extra__,
orig_bases=self.__orig_bases__)
class Protocol(metaclass=_ProtocolMeta):
"""Base class for protocol classes. Protocol classes are defined as::
class Proto(Protocol):
def meth(self) -> int:
...
Such classes are primarily used with static type checkers that recognize
structural subtyping (static duck-typing), for example::
class C:
def meth(self) -> int:
return 0
def func(x: Proto) -> int:
return x.meth()
func(C()) # Passes static type check
See PEP 544 for details. Protocol classes decorated with
@typing_extensions.runtime act as simple-minded runtime protocol that checks
only the presence of given attributes, ignoring their type signatures.
Protocol classes can be generic, they are defined as::
class GenProto(Protocol[T]):
def meth(self) -> T:
...
"""
__slots__ = ()
_is_protocol = True
def __new__(cls, *args, **kwds):
if _gorg(cls) is Protocol:
raise TypeError("Type Protocol cannot be instantiated; "
"it can be used only as a base class")
return typing._generic_new(cls.__next_in_mro__, cls, *args, **kwds)
# 3.8+
if hasattr(typing, 'runtime_checkable'):
runtime_checkable = typing.runtime_checkable
# 3.6-3.7
else:
def runtime_checkable(cls):
"""Mark a protocol class as a runtime protocol, so that it
can be used with isinstance() and issubclass(). Raise TypeError
if applied to a non-protocol class.
This allows a simple-minded structural check very similar to the
one-offs in collections.abc such as Hashable.
"""
if not isinstance(cls, _ProtocolMeta) or not cls._is_protocol:
raise TypeError('@runtime_checkable can be only applied to protocol classes,'
f' got {cls!r}')
cls._is_runtime_protocol = True
return cls
# Exists for backwards compatibility.
runtime = runtime_checkable
# 3.8+
if hasattr(typing, 'SupportsIndex'):
SupportsIndex = typing.SupportsIndex
# 3.6-3.7
else:
@runtime_checkable
class SupportsIndex(Protocol):
__slots__ = ()
@abc.abstractmethod
def __index__(self) -> int:
pass
if sys.version_info >= (3, 9, 2):
# The standard library TypedDict in Python 3.8 does not store runtime information
# about which (if any) keys are optional. See https://bugs.python.org/issue38834
# The standard library TypedDict in Python 3.9.0/1 does not honour the "total"
# keyword with old-style TypedDict(). See https://bugs.python.org/issue42059
TypedDict = typing.TypedDict
else:
def _check_fails(cls, other):
try:
if sys._getframe(1).f_globals['__name__'] not in ['abc',
'functools',
'typing']:
# Typed dicts are only for static structural subtyping.
raise TypeError('TypedDict does not support instance and class checks')
except (AttributeError, ValueError):
pass
return False
def _dict_new(*args, **kwargs):
if not args:
raise TypeError('TypedDict.__new__(): not enough arguments')
_, args = args[0], args[1:] # allow the "cls" keyword be passed
return dict(*args, **kwargs)
_dict_new.__text_signature__ = '($cls, _typename, _fields=None, /, **kwargs)'
def _typeddict_new(*args, total=True, **kwargs):
if not args:
raise TypeError('TypedDict.__new__(): not enough arguments')
_, args = args[0], args[1:] # allow the "cls" keyword be passed
if args:
typename, args = args[0], args[1:] # allow the "_typename" keyword be passed
elif '_typename' in kwargs:
typename = kwargs.pop('_typename')
import warnings
warnings.warn("Passing '_typename' as keyword argument is deprecated",
DeprecationWarning, stacklevel=2)
else:
raise TypeError("TypedDict.__new__() missing 1 required positional "
"argument: '_typename'")
if args:
try:
fields, = args # allow the "_fields" keyword be passed
except ValueError:
raise TypeError('TypedDict.__new__() takes from 2 to 3 '
f'positional arguments but {len(args) + 2} '
'were given')
elif '_fields' in kwargs and len(kwargs) == 1:
fields = kwargs.pop('_fields')
import warnings
warnings.warn("Passing '_fields' as keyword argument is deprecated",
DeprecationWarning, stacklevel=2)
else:
fields = None
if fields is None:
fields = kwargs
elif kwargs:
raise TypeError("TypedDict takes either a dict or keyword arguments,"
" but not both")
ns = {'__annotations__': dict(fields)}
try:
# Setting correct module is necessary to make typed dict classes pickleable.
ns['__module__'] = sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
return _TypedDictMeta(typename, (), ns, total=total)
_typeddict_new.__text_signature__ = ('($cls, _typename, _fields=None,'
' /, *, total=True, **kwargs)')
class _TypedDictMeta(type):
def __init__(cls, name, bases, ns, total=True):
super().__init__(name, bases, ns)
def __new__(cls, name, bases, ns, total=True):
# Create new typed dict class object.
# This method is called directly when TypedDict is subclassed,
# or via _typeddict_new when TypedDict is instantiated. This way
# TypedDict supports all three syntaxes described in its docstring.
# Subclasses and instances of TypedDict return actual dictionaries
# via _dict_new.
ns['__new__'] = _typeddict_new if name == 'TypedDict' else _dict_new
tp_dict = super().__new__(cls, name, (dict,), ns)
annotations = {}
own_annotations = ns.get('__annotations__', {})
own_annotation_keys = set(own_annotations.keys())
msg = "TypedDict('Name', {f0: t0, f1: t1, ...}); each t must be a type"
own_annotations = {
n: typing._type_check(tp, msg) for n, tp in own_annotations.items()
}
required_keys = set()
optional_keys = set()
for base in bases:
annotations.update(base.__dict__.get('__annotations__', {}))
required_keys.update(base.__dict__.get('__required_keys__', ()))
optional_keys.update(base.__dict__.get('__optional_keys__', ()))
annotations.update(own_annotations)
if total:
required_keys.update(own_annotation_keys)
else:
optional_keys.update(own_annotation_keys)
tp_dict.__annotations__ = annotations
tp_dict.__required_keys__ = frozenset(required_keys)
tp_dict.__optional_keys__ = frozenset(optional_keys)
if not hasattr(tp_dict, '__total__'):
tp_dict.__total__ = total
return tp_dict
__instancecheck__ = __subclasscheck__ = _check_fails
TypedDict = _TypedDictMeta('TypedDict', (dict,), {})
TypedDict.__module__ = __name__
TypedDict.__doc__ = \
"""A simple typed name space. At runtime it is equivalent to a plain dict.
TypedDict creates a dictionary type that expects all of its
instances to have a certain set of keys, with each key
associated with a value of a consistent type. This expectation
is not checked at runtime but is only enforced by type checkers.
Usage::
class Point2D(TypedDict):
x: int
y: int
label: str
a: Point2D = {'x': 1, 'y': 2, 'label': 'good'} # OK
b: Point2D = {'z': 3, 'label': 'bad'} # Fails type check
assert Point2D(x=1, y=2, label='first') == dict(x=1, y=2, label='first')
The type info can be accessed via the Point2D.__annotations__ dict, and
the Point2D.__required_keys__ and Point2D.__optional_keys__ frozensets.
TypedDict supports two additional equivalent forms::
Point2D = TypedDict('Point2D', x=int, y=int, label=str)
Point2D = TypedDict('Point2D', {'x': int, 'y': int, 'label': str})
The class syntax is only supported in Python 3.6+, while two other
syntax forms work for Python 2.7 and 3.2+
"""
# Python 3.9+ has PEP 593 (Annotated and modified get_type_hints)
if hasattr(typing, 'Annotated'):
Annotated = typing.Annotated
get_type_hints = typing.get_type_hints
# Not exported and not a public API, but needed for get_origin() and get_args()
# to work.
_AnnotatedAlias = typing._AnnotatedAlias
# 3.7-3.8
elif PEP_560:
class _AnnotatedAlias(typing._GenericAlias, _root=True):
"""Runtime representation of an annotated type.
At its core 'Annotated[t, dec1, dec2, ...]' is an alias for the type 't'
with extra annotations. The alias behaves like a normal typing alias,
instantiating is the same as instantiating the underlying type, binding
it to types is also the same.
"""
def __init__(self, origin, metadata):
if isinstance(origin, _AnnotatedAlias):
metadata = origin.__metadata__ + metadata
origin = origin.__origin__
super().__init__(origin, origin)
self.__metadata__ = metadata
def copy_with(self, params):
assert len(params) == 1
new_type = params[0]
return _AnnotatedAlias(new_type, self.__metadata__)
def __repr__(self):
return (f"typing_extensions.Annotated[{typing._type_repr(self.__origin__)}, "
f"{', '.join(repr(a) for a in self.__metadata__)}]")
def __reduce__(self):
return operator.getitem, (
Annotated, (self.__origin__,) + self.__metadata__
)
def __eq__(self, other):
if not isinstance(other, _AnnotatedAlias):
return NotImplemented
if self.__origin__ != other.__origin__:
return False
return self.__metadata__ == other.__metadata__
def __hash__(self):
return hash((self.__origin__, self.__metadata__))
class Annotated:
"""Add context specific metadata to a type.
Example: Annotated[int, runtime_check.Unsigned] indicates to the
hypothetical runtime_check module that this type is an unsigned int.
Every other consumer of this type can ignore this metadata and treat
this type as int.
The first argument to Annotated must be a valid type (and will be in
the __origin__ field), the remaining arguments are kept as a tuple in
the __extra__ field.
Details:
- It's an error to call `Annotated` with less than two arguments.
- Nested Annotated are flattened::
Annotated[Annotated[T, Ann1, Ann2], Ann3] == Annotated[T, Ann1, Ann2, Ann3]
- Instantiating an annotated type is equivalent to instantiating the
underlying type::
Annotated[C, Ann1](5) == C(5)
- Annotated can be used as a generic type alias::
Optimized = Annotated[T, runtime.Optimize()]
Optimized[int] == Annotated[int, runtime.Optimize()]
OptimizedList = Annotated[List[T], runtime.Optimize()]
OptimizedList[int] == Annotated[List[int], runtime.Optimize()]
"""
__slots__ = ()
def __new__(cls, *args, **kwargs):
raise TypeError("Type Annotated cannot be instantiated.")
@typing._tp_cache
def __class_getitem__(cls, params):
if not isinstance(params, tuple) or len(params) < 2:
raise TypeError("Annotated[...] should be used "
"with at least two arguments (a type and an "
"annotation).")
msg = "Annotated[t, ...]: t must be a type."
origin = typing._type_check(params[0], msg)
metadata = tuple(params[1:])
return _AnnotatedAlias(origin, metadata)
def __init_subclass__(cls, *args, **kwargs):
raise TypeError(
f"Cannot subclass {cls.__module__}.Annotated"
)
def _strip_annotations(t):
"""Strips the annotations from a given type.
"""
if isinstance(t, _AnnotatedAlias):
return _strip_annotations(t.__origin__)
if isinstance(t, typing._GenericAlias):
stripped_args = tuple(_strip_annotations(a) for a in t.__args__)
if stripped_args == t.__args__:
return t
res = t.copy_with(stripped_args)
res._special = t._special
return res
return t
def get_type_hints(obj, globalns=None, localns=None, include_extras=False):
"""Return type hints for an object.
This is often the same as obj.__annotations__, but it handles
forward references encoded as string literals, adds Optional[t] if a
default value equal to None is set and recursively replaces all
'Annotated[T, ...]' with 'T' (unless 'include_extras=True').
The argument may be a module, class, method, or function. The annotations
are returned as a dictionary. For classes, annotations include also
inherited members.
TypeError is raised if the argument is not of a type that can contain
annotations, and an empty dictionary is returned if no annotations are
present.
BEWARE -- the behavior of globalns and localns is counterintuitive
(unless you are familiar with how eval() and exec() work). The
search order is locals first, then globals.
- If no dict arguments are passed, an attempt is made to use the
globals from obj (or the respective module's globals for classes),
and these are also used as the locals. If the object does not appear
to have globals, an empty dictionary is used.
- If one dict argument is passed, it is used for both globals and
locals.
- If two dict arguments are passed, they specify globals and
locals, respectively.
"""
hint = typing.get_type_hints(obj, globalns=globalns, localns=localns)
if include_extras:
return hint
return {k: _strip_annotations(t) for k, t in hint.items()}
# 3.6
else:
def _is_dunder(name):
"""Returns True if name is a __dunder_variable_name__."""
return len(name) > 4 and name.startswith('__') and name.endswith('__')
# Prior to Python 3.7 types did not have `copy_with`. A lot of the equality
# checks, argument expansion etc. are done on the _subs_tre. As a result we
# can't provide a get_type_hints function that strips out annotations.
class AnnotatedMeta(typing.GenericMeta):
"""Metaclass for Annotated"""
def __new__(cls, name, bases, namespace, **kwargs):
if any(b is not object for b in bases):
raise TypeError("Cannot subclass " + str(Annotated))
return super().__new__(cls, name, bases, namespace, **kwargs)
@property
def __metadata__(self):
return self._subs_tree()[2]
def _tree_repr(self, tree):
cls, origin, metadata = tree
if not isinstance(origin, tuple):
tp_repr = typing._type_repr(origin)
else:
tp_repr = origin[0]._tree_repr(origin)
metadata_reprs = ", ".join(repr(arg) for arg in metadata)
return f'{cls}[{tp_repr}, {metadata_reprs}]'
def _subs_tree(self, tvars=None, args=None): # noqa
if self is Annotated:
return Annotated
res = super()._subs_tree(tvars=tvars, args=args)
# Flatten nested Annotated
if isinstance(res[1], tuple) and res[1][0] is Annotated:
sub_tp = res[1][1]
sub_annot = res[1][2]
return (Annotated, sub_tp, sub_annot + res[2])
return res
def _get_cons(self):
"""Return the class used to create instance of this type."""
if self.__origin__ is None:
raise TypeError("Cannot get the underlying type of a "
"non-specialized Annotated type.")
tree = self._subs_tree()
while isinstance(tree, tuple) and tree[0] is Annotated:
tree = tree[1]
if isinstance(tree, tuple):
return tree[0]
else:
return tree
@typing._tp_cache
def __getitem__(self, params):
if not isinstance(params, tuple):
params = (params,)
if self.__origin__ is not None: # specializing an instantiated type
return super().__getitem__(params)
elif not isinstance(params, tuple) or len(params) < 2:
raise TypeError("Annotated[...] should be instantiated "
"with at least two arguments (a type and an "
"annotation).")
else:
msg = "Annotated[t, ...]: t must be a type."
tp = typing._type_check(params[0], msg)
metadata = tuple(params[1:])
return self.__class__(
self.__name__,
self.__bases__,
_no_slots_copy(self.__dict__),
tvars=_type_vars((tp,)),
# Metadata is a tuple so it won't be touched by _replace_args et al.
args=(tp, metadata),
origin=self,
)
def __call__(self, *args, **kwargs):
cons = self._get_cons()
result = cons(*args, **kwargs)
try:
result.__orig_class__ = self
except AttributeError:
pass
return result
def __getattr__(self, attr):
# For simplicity we just don't relay all dunder names
if self.__origin__ is not None and not _is_dunder(attr):
return getattr(self._get_cons(), attr)
raise AttributeError(attr)
def __setattr__(self, attr, value):
if _is_dunder(attr) or attr.startswith('_abc_'):
super().__setattr__(attr, value)
elif self.__origin__ is None:
raise AttributeError(attr)
else:
setattr(self._get_cons(), attr, value)
def __instancecheck__(self, obj):
raise TypeError("Annotated cannot be used with isinstance().")
def __subclasscheck__(self, cls):
raise TypeError("Annotated cannot be used with issubclass().")
class Annotated(metaclass=AnnotatedMeta):
"""Add context specific metadata to a type.
Example: Annotated[int, runtime_check.Unsigned] indicates to the
hypothetical runtime_check module that this type is an unsigned int.
Every other consumer of this type can ignore this metadata and treat
this type as int.
The first argument to Annotated must be a valid type, the remaining
arguments are kept as a tuple in the __metadata__ field.
Details:
- It's an error to call `Annotated` with less than two arguments.
- Nested Annotated are flattened::
Annotated[Annotated[T, Ann1, Ann2], Ann3] == Annotated[T, Ann1, Ann2, Ann3]
- Instantiating an annotated type is equivalent to instantiating the
underlying type::
Annotated[C, Ann1](5) == C(5)
- Annotated can be used as a generic type alias::
Optimized = Annotated[T, runtime.Optimize()]
Optimized[int] == Annotated[int, runtime.Optimize()]
OptimizedList = Annotated[List[T], runtime.Optimize()]
OptimizedList[int] == Annotated[List[int], runtime.Optimize()]
"""
# Python 3.8 has get_origin() and get_args() but those implementations aren't
# Annotated-aware, so we can't use those. Python 3.9's versions don't support
# ParamSpecArgs and ParamSpecKwargs, so only Python 3.10's versions will do.
if sys.version_info[:2] >= (3, 10):
get_origin = typing.get_origin
get_args = typing.get_args
# 3.7-3.9
elif PEP_560:
try:
# 3.9+
from typing import _BaseGenericAlias
except ImportError:
_BaseGenericAlias = typing._GenericAlias
try:
# 3.9+
from typing import GenericAlias
except ImportError:
GenericAlias = typing._GenericAlias
def get_origin(tp):
"""Get the unsubscripted version of a type.
This supports generic types, Callable, Tuple, Union, Literal, Final, ClassVar
and Annotated. Return None for unsupported types. Examples::
get_origin(Literal[42]) is Literal
get_origin(int) is None
get_origin(ClassVar[int]) is ClassVar
get_origin(Generic) is Generic
get_origin(Generic[T]) is Generic
get_origin(Union[T, int]) is Union
get_origin(List[Tuple[T, T]][int]) == list
get_origin(P.args) is P
"""
if isinstance(tp, _AnnotatedAlias):
return Annotated
if isinstance(tp, (typing._GenericAlias, GenericAlias, _BaseGenericAlias,
ParamSpecArgs, ParamSpecKwargs)):
return tp.__origin__
if tp is typing.Generic:
return typing.Generic
return None
def get_args(tp):
"""Get type arguments with all substitutions performed.
For unions, basic simplifications used by Union constructor are performed.
Examples::
get_args(Dict[str, int]) == (str, int)
get_args(int) == ()
get_args(Union[int, Union[T, int], str][int]) == (int, str)
get_args(Union[int, Tuple[T, int]][str]) == (int, Tuple[str, int])
get_args(Callable[[], T][int]) == ([], int)
"""
if isinstance(tp, _AnnotatedAlias):
return (tp.__origin__,) + tp.__metadata__
if isinstance(tp, (typing._GenericAlias, GenericAlias)):
if getattr(tp, "_special", False):
return ()
res = tp.__args__
if get_origin(tp) is collections.abc.Callable and res[0] is not Ellipsis:
res = (list(res[:-1]), res[-1])
return res
return ()
# 3.10+
if hasattr(typing, 'TypeAlias'):
TypeAlias = typing.TypeAlias
# 3.9
elif sys.version_info[:2] >= (3, 9):
class _TypeAliasForm(typing._SpecialForm, _root=True):
def __repr__(self):
return 'typing_extensions.' + self._name
@_TypeAliasForm
def TypeAlias(self, parameters):
"""Special marker indicating that an assignment should
be recognized as a proper type alias definition by type
checkers.
For example::
Predicate: TypeAlias = Callable[..., bool]
It's invalid when used anywhere except as in the example above.
"""
raise TypeError(f"{self} is not subscriptable")
# 3.7-3.8
elif sys.version_info[:2] >= (3, 7):
class _TypeAliasForm(typing._SpecialForm, _root=True):
def __repr__(self):
return 'typing_extensions.' + self._name
TypeAlias = _TypeAliasForm('TypeAlias',
doc="""Special marker indicating that an assignment should
be recognized as a proper type alias definition by type
checkers.
For example::
Predicate: TypeAlias = Callable[..., bool]
It's invalid when used anywhere except as in the example
above.""")
# 3.6
else:
class _TypeAliasMeta(typing.TypingMeta):
"""Metaclass for TypeAlias"""
def __repr__(self):
return 'typing_extensions.TypeAlias'
class _TypeAliasBase(typing._FinalTypingBase, metaclass=_TypeAliasMeta, _root=True):
"""Special marker indicating that an assignment should
be recognized as a proper type alias definition by type
checkers.
For example::
Predicate: TypeAlias = Callable[..., bool]
It's invalid when used anywhere except as in the example above.
"""
__slots__ = ()
def __instancecheck__(self, obj):
raise TypeError("TypeAlias cannot be used with isinstance().")
def __subclasscheck__(self, cls):
raise TypeError("TypeAlias cannot be used with issubclass().")
def __repr__(self):
return 'typing_extensions.TypeAlias'
TypeAlias = _TypeAliasBase(_root=True)
# Python 3.10+ has PEP 612
if hasattr(typing, 'ParamSpecArgs'):
ParamSpecArgs = typing.ParamSpecArgs
ParamSpecKwargs = typing.ParamSpecKwargs
# 3.6-3.9
else:
class _Immutable:
"""Mixin to indicate that object should not be copied."""
__slots__ = ()
def __copy__(self):
return self
def __deepcopy__(self, memo):
return self
class ParamSpecArgs(_Immutable):
"""The args for a ParamSpec object.
Given a ParamSpec object P, P.args is an instance of ParamSpecArgs.
ParamSpecArgs objects have a reference back to their ParamSpec:
P.args.__origin__ is P
This type is meant for runtime introspection and has no special meaning to
static type checkers.
"""
def __init__(self, origin):
self.__origin__ = origin
def __repr__(self):
return f"{self.__origin__.__name__}.args"
class ParamSpecKwargs(_Immutable):
"""The kwargs for a ParamSpec object.
Given a ParamSpec object P, P.kwargs is an instance of ParamSpecKwargs.
ParamSpecKwargs objects have a reference back to their ParamSpec:
P.kwargs.__origin__ is P
This type is meant for runtime introspection and has no special meaning to
static type checkers.
"""
def __init__(self, origin):
self.__origin__ = origin
def __repr__(self):
return f"{self.__origin__.__name__}.kwargs"
# 3.10+
if hasattr(typing, 'ParamSpec'):
ParamSpec = typing.ParamSpec
# 3.6-3.9
else:
# Inherits from list as a workaround for Callable checks in Python < 3.9.2.
class ParamSpec(list):
"""Parameter specification variable.
Usage::
P = ParamSpec('P')
Parameter specification variables exist primarily for the benefit of static
type checkers. They are used to forward the parameter types of one
callable to another callable, a pattern commonly found in higher order
functions and decorators. They are only valid when used in ``Concatenate``,
or s the first argument to ``Callable``. In Python 3.10 and higher,
they are also supported in user-defined Generics at runtime.
See class Generic for more information on generic types. An
example for annotating a decorator::
T = TypeVar('T')
P = ParamSpec('P')
def add_logging(f: Callable[P, T]) -> Callable[P, T]:
'''A type-safe decorator to add logging to a function.'''
def inner(*args: P.args, **kwargs: P.kwargs) -> T:
logging.info(f'{f.__name__} was called')
return f(*args, **kwargs)
return inner
@add_logging
def add_two(x: float, y: float) -> float:
'''Add two numbers together.'''
return x + y
Parameter specification variables defined with covariant=True or
contravariant=True can be used to declare covariant or contravariant
generic types. These keyword arguments are valid, but their actual semantics
are yet to be decided. See PEP 612 for details.
Parameter specification variables can be introspected. e.g.:
P.__name__ == 'T'
P.__bound__ == None
P.__covariant__ == False
P.__contravariant__ == False
Note that only parameter specification variables defined in global scope can
be pickled.
"""
# Trick Generic __parameters__.
__class__ = typing.TypeVar
@property
def args(self):
return ParamSpecArgs(self)
@property
def kwargs(self):
return ParamSpecKwargs(self)
def __init__(self, name, *, bound=None, covariant=False, contravariant=False):
super().__init__([self])
self.__name__ = name
self.__covariant__ = bool(covariant)
self.__contravariant__ = bool(contravariant)
if bound:
self.__bound__ = typing._type_check(bound, 'Bound must be a type.')
else:
self.__bound__ = None
# for pickling:
try:
def_mod = sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
def_mod = None
if def_mod != 'typing_extensions':
self.__module__ = def_mod
def __repr__(self):
if self.__covariant__:
prefix = '+'
elif self.__contravariant__:
prefix = '-'
else:
prefix = '~'
return prefix + self.__name__
def __hash__(self):
return object.__hash__(self)
def __eq__(self, other):
return self is other
def __reduce__(self):
return self.__name__
# Hack to get typing._type_check to pass.
def __call__(self, *args, **kwargs):
pass
if not PEP_560:
# Only needed in 3.6.
def _get_type_vars(self, tvars):
if self not in tvars:
tvars.append(self)
# 3.6-3.9
if not hasattr(typing, 'Concatenate'):
# Inherits from list as a workaround for Callable checks in Python < 3.9.2.
class _ConcatenateGenericAlias(list):
# Trick Generic into looking into this for __parameters__.
if PEP_560:
__class__ = typing._GenericAlias
else:
__class__ = typing._TypingBase
# Flag in 3.8.
_special = False
# Attribute in 3.6 and earlier.
_gorg = typing.Generic
def __init__(self, origin, args):
super().__init__(args)
self.__origin__ = origin
self.__args__ = args
def __repr__(self):
_type_repr = typing._type_repr
return (f'{_type_repr(self.__origin__)}'
f'[{", ".join(_type_repr(arg) for arg in self.__args__)}]')
def __hash__(self):
return hash((self.__origin__, self.__args__))
# Hack to get typing._type_check to pass in Generic.
def __call__(self, *args, **kwargs):
pass
@property
def __parameters__(self):
return tuple(
tp for tp in self.__args__ if isinstance(tp, (typing.TypeVar, ParamSpec))
)
if not PEP_560:
# Only required in 3.6.
def _get_type_vars(self, tvars):
if self.__origin__ and self.__parameters__:
typing._get_type_vars(self.__parameters__, tvars)
# 3.6-3.9
@typing._tp_cache
def _concatenate_getitem(self, parameters):
if parameters == ():
raise TypeError("Cannot take a Concatenate of no types.")
if not isinstance(parameters, tuple):
parameters = (parameters,)
if not isinstance(parameters[-1], ParamSpec):
raise TypeError("The last parameter to Concatenate should be a "
"ParamSpec variable.")
msg = "Concatenate[arg, ...]: each arg must be a type."
parameters = tuple(typing._type_check(p, msg) for p in parameters)
return _ConcatenateGenericAlias(self, parameters)
# 3.10+
if hasattr(typing, 'Concatenate'):
Concatenate = typing.Concatenate
_ConcatenateGenericAlias = typing._ConcatenateGenericAlias # noqa
# 3.9
elif sys.version_info[:2] >= (3, 9):
@_TypeAliasForm
def Concatenate(self, parameters):
"""Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a
higher order function which adds, removes or transforms parameters of a
callable.
For example::
Callable[Concatenate[int, P], int]
See PEP 612 for detailed information.
"""
return _concatenate_getitem(self, parameters)
# 3.7-8
elif sys.version_info[:2] >= (3, 7):
class _ConcatenateForm(typing._SpecialForm, _root=True):
def __repr__(self):
return 'typing_extensions.' + self._name
def __getitem__(self, parameters):
return _concatenate_getitem(self, parameters)
Concatenate = _ConcatenateForm(
'Concatenate',
doc="""Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a
higher order function which adds, removes or transforms parameters of a
callable.
For example::
Callable[Concatenate[int, P], int]
See PEP 612 for detailed information.
""")
# 3.6
else:
class _ConcatenateAliasMeta(typing.TypingMeta):
"""Metaclass for Concatenate."""
def __repr__(self):
return 'typing_extensions.Concatenate'
class _ConcatenateAliasBase(typing._FinalTypingBase,
metaclass=_ConcatenateAliasMeta,
_root=True):
"""Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a
higher order function which adds, removes or transforms parameters of a
callable.
For example::
Callable[Concatenate[int, P], int]
See PEP 612 for detailed information.
"""
__slots__ = ()
def __instancecheck__(self, obj):
raise TypeError("Concatenate cannot be used with isinstance().")
def __subclasscheck__(self, cls):
raise TypeError("Concatenate cannot be used with issubclass().")
def __repr__(self):
return 'typing_extensions.Concatenate'
def __getitem__(self, parameters):
return _concatenate_getitem(self, parameters)
Concatenate = _ConcatenateAliasBase(_root=True)
# 3.10+
if hasattr(typing, 'TypeGuard'):
TypeGuard = typing.TypeGuard
# 3.9
elif sys.version_info[:2] >= (3, 9):
class _TypeGuardForm(typing._SpecialForm, _root=True):
def __repr__(self):
return 'typing_extensions.' + self._name
@_TypeGuardForm
def TypeGuard(self, parameters):
"""Special typing form used to annotate the return type of a user-defined
type guard function. ``TypeGuard`` only accepts a single type argument.
At runtime, functions marked this way should return a boolean.
``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static
type checkers to determine a more precise type of an expression within a
program's code flow. Usually type narrowing is done by analyzing
conditional code flow and applying the narrowing to a block of code. The
conditional expression here is sometimes referred to as a "type guard".
Sometimes it would be convenient to use a user-defined boolean function
as a type guard. Such a function should use ``TypeGuard[...]`` as its
return type to alert static type checkers to this intention.
Using ``-> TypeGuard`` tells the static type checker that for a given
function:
1. The return value is a boolean.
2. If the return value is ``True``, the type of its argument
is the type inside ``TypeGuard``.
For example::
def is_str(val: Union[str, float]):
# "isinstance" type guard
if isinstance(val, str):
# Type of ``val`` is narrowed to ``str``
...
else:
# Else, type of ``val`` is narrowed to ``float``.
...
Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower
form of ``TypeA`` (it can even be a wider form) and this may lead to
type-unsafe results. The main reason is to allow for things like
narrowing ``List[object]`` to ``List[str]`` even though the latter is not
a subtype of the former, since ``List`` is invariant. The responsibility of
writing type-safe type guards is left to the user.
``TypeGuard`` also works with type variables. For more information, see
PEP 647 (User-Defined Type Guards).
"""
item = typing._type_check(parameters, f'{self} accepts only single type.')
return typing._GenericAlias(self, (item,))
# 3.7-3.8
elif sys.version_info[:2] >= (3, 7):
class _TypeGuardForm(typing._SpecialForm, _root=True):
def __repr__(self):
return 'typing_extensions.' + self._name
def __getitem__(self, parameters):
item = typing._type_check(parameters,
f'{self._name} accepts only a single type')
return typing._GenericAlias(self, (item,))
TypeGuard = _TypeGuardForm(
'TypeGuard',
doc="""Special typing form used to annotate the return type of a user-defined
type guard function. ``TypeGuard`` only accepts a single type argument.
At runtime, functions marked this way should return a boolean.
``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static
type checkers to determine a more precise type of an expression within a
program's code flow. Usually type narrowing is done by analyzing
conditional code flow and applying the narrowing to a block of code. The
conditional expression here is sometimes referred to as a "type guard".
Sometimes it would be convenient to use a user-defined boolean function
as a type guard. Such a function should use ``TypeGuard[...]`` as its
return type to alert static type checkers to this intention.
Using ``-> TypeGuard`` tells the static type checker that for a given
function:
1. The return value is a boolean.
2. If the return value is ``True``, the type of its argument
is the type inside ``TypeGuard``.
For example::
def is_str(val: Union[str, float]):
# "isinstance" type guard
if isinstance(val, str):
# Type of ``val`` is narrowed to ``str``
...
else:
# Else, type of ``val`` is narrowed to ``float``.
...
Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower
form of ``TypeA`` (it can even be a wider form) and this may lead to
type-unsafe results. The main reason is to allow for things like
narrowing ``List[object]`` to ``List[str]`` even though the latter is not
a subtype of the former, since ``List`` is invariant. The responsibility of
writing type-safe type guards is left to the user.
``TypeGuard`` also works with type variables. For more information, see
PEP 647 (User-Defined Type Guards).
""")
# 3.6
else:
class _TypeGuard(typing._FinalTypingBase, _root=True):
"""Special typing form used to annotate the return type of a user-defined
type guard function. ``TypeGuard`` only accepts a single type argument.
At runtime, functions marked this way should return a boolean.
``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static
type checkers to determine a more precise type of an expression within a
program's code flow. Usually type narrowing is done by analyzing
conditional code flow and applying the narrowing to a block of code. The
conditional expression here is sometimes referred to as a "type guard".
Sometimes it would be convenient to use a user-defined boolean function
as a type guard. Such a function should use ``TypeGuard[...]`` as its
return type to alert static type checkers to this intention.
Using ``-> TypeGuard`` tells the static type checker that for a given
function:
1. The return value is a boolean.
2. If the return value is ``True``, the type of its argument
is the type inside ``TypeGuard``.
For example::
def is_str(val: Union[str, float]):
# "isinstance" type guard
if isinstance(val, str):
# Type of ``val`` is narrowed to ``str``
...
else:
# Else, type of ``val`` is narrowed to ``float``.
...
Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower
form of ``TypeA`` (it can even be a wider form) and this may lead to
type-unsafe results. The main reason is to allow for things like
narrowing ``List[object]`` to ``List[str]`` even though the latter is not
a subtype of the former, since ``List`` is invariant. The responsibility of
writing type-safe type guards is left to the user.
``TypeGuard`` also works with type variables. For more information, see
PEP 647 (User-Defined Type Guards).
"""
__slots__ = ('__type__',)
def __init__(self, tp=None, **kwds):
self.__type__ = tp
def __getitem__(self, item):
cls = type(self)
if self.__type__ is None:
return cls(typing._type_check(item,
f'{cls.__name__[1:]} accepts only a single type.'),
_root=True)
raise TypeError(f'{cls.__name__[1:]} cannot be further subscripted')
def _eval_type(self, globalns, localns):
new_tp = typing._eval_type(self.__type__, globalns, localns)
if new_tp == self.__type__:
return self
return type(self)(new_tp, _root=True)
def __repr__(self):
r = super().__repr__()
if self.__type__ is not None:
r += f'[{typing._type_repr(self.__type__)}]'
return r
def __hash__(self):
return hash((type(self).__name__, self.__type__))
def __eq__(self, other):
if not isinstance(other, _TypeGuard):
return NotImplemented
if self.__type__ is not None:
return self.__type__ == other.__type__
return self is other
TypeGuard = _TypeGuard(_root=True)
if hasattr(typing, "Self"):
Self = typing.Self
elif sys.version_info[:2] >= (3, 7):
# Vendored from cpython typing._SpecialFrom
class _SpecialForm(typing._Final, _root=True):
__slots__ = ('_name', '__doc__', '_getitem')
def __init__(self, getitem):
self._getitem = getitem
self._name = getitem.__name__
self.__doc__ = getitem.__doc__
def __getattr__(self, item):
if item in {'__name__', '__qualname__'}:
return self._name
raise AttributeError(item)
def __mro_entries__(self, bases):
raise TypeError(f"Cannot subclass {self!r}")
def __repr__(self):
return f'typing_extensions.{self._name}'
def __reduce__(self):
return self._name
def __call__(self, *args, **kwds):
raise TypeError(f"Cannot instantiate {self!r}")
def __or__(self, other):
return typing.Union[self, other]
def __ror__(self, other):
return typing.Union[other, self]
def __instancecheck__(self, obj):
raise TypeError(f"{self} cannot be used with isinstance()")
def __subclasscheck__(self, cls):
raise TypeError(f"{self} cannot be used with issubclass()")
@typing._tp_cache
def __getitem__(self, parameters):
return self._getitem(self, parameters)
@_SpecialForm
def Self(self, params):
"""Used to spell the type of "self" in classes.
Example::
from typing import Self
class ReturnsSelf:
def parse(self, data: bytes) -> Self:
...
return self
"""
raise TypeError(f"{self} is not subscriptable")
else:
class _Self(typing._FinalTypingBase, _root=True):
"""Used to spell the type of "self" in classes.
Example::
from typing import Self
class ReturnsSelf:
def parse(self, data: bytes) -> Self:
...
return self
"""
__slots__ = ()
def __instancecheck__(self, obj):
raise TypeError(f"{self} cannot be used with isinstance().")
def __subclasscheck__(self, cls):
raise TypeError(f"{self} cannot be used with issubclass().")
Self = _Self(_root=True)
if hasattr(typing, 'Required'):
Required = typing.Required
NotRequired = typing.NotRequired
elif sys.version_info[:2] >= (3, 9):
class _ExtensionsSpecialForm(typing._SpecialForm, _root=True):
def __repr__(self):
return 'typing_extensions.' + self._name
@_ExtensionsSpecialForm
def Required(self, parameters):
"""A special typing construct to mark a key of a total=False TypedDict
as required. For example:
class Movie(TypedDict, total=False):
title: Required[str]
year: int
m = Movie(
title='The Matrix', # typechecker error if key is omitted
year=1999,
)
There is no runtime checking that a required key is actually provided
when instantiating a related TypedDict.
"""
item = typing._type_check(parameters, f'{self._name} accepts only single type')
return typing._GenericAlias(self, (item,))
@_ExtensionsSpecialForm
def NotRequired(self, parameters):
"""A special typing construct to mark a key of a TypedDict as
potentially missing. For example:
class Movie(TypedDict):
title: str
year: NotRequired[int]
m = Movie(
title='The Matrix', # typechecker error if key is omitted
year=1999,
)
"""
item = typing._type_check(parameters, f'{self._name} accepts only single type')
return typing._GenericAlias(self, (item,))
elif sys.version_info[:2] >= (3, 7):
class _RequiredForm(typing._SpecialForm, _root=True):
def __repr__(self):
return 'typing_extensions.' + self._name
def __getitem__(self, parameters):
item = typing._type_check(parameters,
'{} accepts only single type'.format(self._name))
return typing._GenericAlias(self, (item,))
Required = _RequiredForm(
'Required',
doc="""A special typing construct to mark a key of a total=False TypedDict
as required. For example:
class Movie(TypedDict, total=False):
title: Required[str]
year: int
m = Movie(
title='The Matrix', # typechecker error if key is omitted
year=1999,
)
There is no runtime checking that a required key is actually provided
when instantiating a related TypedDict.
""")
NotRequired = _RequiredForm(
'NotRequired',
doc="""A special typing construct to mark a key of a TypedDict as
potentially missing. For example:
class Movie(TypedDict):
title: str
year: NotRequired[int]
m = Movie(
title='The Matrix', # typechecker error if key is omitted
year=1999,
)
""")
else:
# NOTE: Modeled after _Final's implementation when _FinalTypingBase available
class _MaybeRequired(typing._FinalTypingBase, _root=True):
__slots__ = ('__type__',)
def __init__(self, tp=None, **kwds):
self.__type__ = tp
def __getitem__(self, item):
cls = type(self)
if self.__type__ is None:
return cls(typing._type_check(item,
'{} accepts only single type.'.format(cls.__name__[1:])),
_root=True)
raise TypeError('{} cannot be further subscripted'
.format(cls.__name__[1:]))
def _eval_type(self, globalns, localns):
new_tp = typing._eval_type(self.__type__, globalns, localns)
if new_tp == self.__type__:
return self
return type(self)(new_tp, _root=True)
def __repr__(self):
r = super().__repr__()
if self.__type__ is not None:
r += '[{}]'.format(typing._type_repr(self.__type__))
return r
def __hash__(self):
return hash((type(self).__name__, self.__type__))
def __eq__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
if self.__type__ is not None:
return self.__type__ == other.__type__
return self is other
class _Required(_MaybeRequired, _root=True):
"""A special typing construct to mark a key of a total=False TypedDict
as required. For example:
class Movie(TypedDict, total=False):
title: Required[str]
year: int
m = Movie(
title='The Matrix', # typechecker error if key is omitted
year=1999,
)
There is no runtime checking that a required key is actually provided
when instantiating a related TypedDict.
"""
class _NotRequired(_MaybeRequired, _root=True):
"""A special typing construct to mark a key of a TypedDict as
potentially missing. For example:
class Movie(TypedDict):
title: str
year: NotRequired[int]
m = Movie(
title='The Matrix', # typechecker error if key is omitted
year=1999,
)
"""
Required = _Required(_root=True)
NotRequired = _NotRequired(_root=True)
| 87,149 |
Python
| 36.940792 | 90 | 0.542014 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/more_itertools/recipes.py
|
"""Imported from the recipes section of the itertools documentation.
All functions taken from the recipes section of the itertools library docs
[1]_.
Some backward-compatible usability improvements have been made.
.. [1] http://docs.python.org/library/itertools.html#recipes
"""
import warnings
from collections import deque
from itertools import (
chain,
combinations,
count,
cycle,
groupby,
islice,
repeat,
starmap,
tee,
zip_longest,
)
import operator
from random import randrange, sample, choice
__all__ = [
'all_equal',
'consume',
'convolve',
'dotproduct',
'first_true',
'flatten',
'grouper',
'iter_except',
'ncycles',
'nth',
'nth_combination',
'padnone',
'pad_none',
'pairwise',
'partition',
'powerset',
'prepend',
'quantify',
'random_combination_with_replacement',
'random_combination',
'random_permutation',
'random_product',
'repeatfunc',
'roundrobin',
'tabulate',
'tail',
'take',
'unique_everseen',
'unique_justseen',
]
def take(n, iterable):
"""Return first *n* items of the iterable as a list.
>>> take(3, range(10))
[0, 1, 2]
If there are fewer than *n* items in the iterable, all of them are
returned.
>>> take(10, range(3))
[0, 1, 2]
"""
return list(islice(iterable, n))
def tabulate(function, start=0):
"""Return an iterator over the results of ``func(start)``,
``func(start + 1)``, ``func(start + 2)``...
*func* should be a function that accepts one integer argument.
If *start* is not specified it defaults to 0. It will be incremented each
time the iterator is advanced.
>>> square = lambda x: x ** 2
>>> iterator = tabulate(square, -3)
>>> take(4, iterator)
[9, 4, 1, 0]
"""
return map(function, count(start))
def tail(n, iterable):
"""Return an iterator over the last *n* items of *iterable*.
>>> t = tail(3, 'ABCDEFG')
>>> list(t)
['E', 'F', 'G']
"""
return iter(deque(iterable, maxlen=n))
def consume(iterator, n=None):
"""Advance *iterable* by *n* steps. If *n* is ``None``, consume it
entirely.
Efficiently exhausts an iterator without returning values. Defaults to
consuming the whole iterator, but an optional second argument may be
provided to limit consumption.
>>> i = (x for x in range(10))
>>> next(i)
0
>>> consume(i, 3)
>>> next(i)
4
>>> consume(i)
>>> next(i)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
StopIteration
If the iterator has fewer items remaining than the provided limit, the
whole iterator will be consumed.
>>> i = (x for x in range(3))
>>> consume(i, 5)
>>> next(i)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
StopIteration
"""
# Use functions that consume iterators at C speed.
if n is None:
# feed the entire iterator into a zero-length deque
deque(iterator, maxlen=0)
else:
# advance to the empty slice starting at position n
next(islice(iterator, n, n), None)
def nth(iterable, n, default=None):
"""Returns the nth item or a default value.
>>> l = range(10)
>>> nth(l, 3)
3
>>> nth(l, 20, "zebra")
'zebra'
"""
return next(islice(iterable, n, None), default)
def all_equal(iterable):
"""
Returns ``True`` if all the elements are equal to each other.
>>> all_equal('aaaa')
True
>>> all_equal('aaab')
False
"""
g = groupby(iterable)
return next(g, True) and not next(g, False)
def quantify(iterable, pred=bool):
"""Return the how many times the predicate is true.
>>> quantify([True, False, True])
2
"""
return sum(map(pred, iterable))
def pad_none(iterable):
"""Returns the sequence of elements and then returns ``None`` indefinitely.
>>> take(5, pad_none(range(3)))
[0, 1, 2, None, None]
Useful for emulating the behavior of the built-in :func:`map` function.
See also :func:`padded`.
"""
return chain(iterable, repeat(None))
padnone = pad_none
def ncycles(iterable, n):
"""Returns the sequence elements *n* times
>>> list(ncycles(["a", "b"], 3))
['a', 'b', 'a', 'b', 'a', 'b']
"""
return chain.from_iterable(repeat(tuple(iterable), n))
def dotproduct(vec1, vec2):
"""Returns the dot product of the two iterables.
>>> dotproduct([10, 10], [20, 20])
400
"""
return sum(map(operator.mul, vec1, vec2))
def flatten(listOfLists):
"""Return an iterator flattening one level of nesting in a list of lists.
>>> list(flatten([[0, 1], [2, 3]]))
[0, 1, 2, 3]
See also :func:`collapse`, which can flatten multiple levels of nesting.
"""
return chain.from_iterable(listOfLists)
def repeatfunc(func, times=None, *args):
"""Call *func* with *args* repeatedly, returning an iterable over the
results.
If *times* is specified, the iterable will terminate after that many
repetitions:
>>> from operator import add
>>> times = 4
>>> args = 3, 5
>>> list(repeatfunc(add, times, *args))
[8, 8, 8, 8]
If *times* is ``None`` the iterable will not terminate:
>>> from random import randrange
>>> times = None
>>> args = 1, 11
>>> take(6, repeatfunc(randrange, times, *args)) # doctest:+SKIP
[2, 4, 8, 1, 8, 4]
"""
if times is None:
return starmap(func, repeat(args))
return starmap(func, repeat(args, times))
def _pairwise(iterable):
"""Returns an iterator of paired items, overlapping, from the original
>>> take(4, pairwise(count()))
[(0, 1), (1, 2), (2, 3), (3, 4)]
On Python 3.10 and above, this is an alias for :func:`itertools.pairwise`.
"""
a, b = tee(iterable)
next(b, None)
yield from zip(a, b)
try:
from itertools import pairwise as itertools_pairwise
except ImportError:
pairwise = _pairwise
else:
def pairwise(iterable):
yield from itertools_pairwise(iterable)
pairwise.__doc__ = _pairwise.__doc__
def grouper(iterable, n, fillvalue=None):
"""Collect data into fixed-length chunks or blocks.
>>> list(grouper('ABCDEFG', 3, 'x'))
[('A', 'B', 'C'), ('D', 'E', 'F'), ('G', 'x', 'x')]
"""
if isinstance(iterable, int):
warnings.warn(
"grouper expects iterable as first parameter", DeprecationWarning
)
n, iterable = iterable, n
args = [iter(iterable)] * n
return zip_longest(fillvalue=fillvalue, *args)
def roundrobin(*iterables):
"""Yields an item from each iterable, alternating between them.
>>> list(roundrobin('ABC', 'D', 'EF'))
['A', 'D', 'E', 'B', 'F', 'C']
This function produces the same output as :func:`interleave_longest`, but
may perform better for some inputs (in particular when the number of
iterables is small).
"""
# Recipe credited to George Sakkis
pending = len(iterables)
nexts = cycle(iter(it).__next__ for it in iterables)
while pending:
try:
for next in nexts:
yield next()
except StopIteration:
pending -= 1
nexts = cycle(islice(nexts, pending))
def partition(pred, iterable):
"""
Returns a 2-tuple of iterables derived from the input iterable.
The first yields the items that have ``pred(item) == False``.
The second yields the items that have ``pred(item) == True``.
>>> is_odd = lambda x: x % 2 != 0
>>> iterable = range(10)
>>> even_items, odd_items = partition(is_odd, iterable)
>>> list(even_items), list(odd_items)
([0, 2, 4, 6, 8], [1, 3, 5, 7, 9])
If *pred* is None, :func:`bool` is used.
>>> iterable = [0, 1, False, True, '', ' ']
>>> false_items, true_items = partition(None, iterable)
>>> list(false_items), list(true_items)
([0, False, ''], [1, True, ' '])
"""
if pred is None:
pred = bool
evaluations = ((pred(x), x) for x in iterable)
t1, t2 = tee(evaluations)
return (
(x for (cond, x) in t1 if not cond),
(x for (cond, x) in t2 if cond),
)
def powerset(iterable):
"""Yields all possible subsets of the iterable.
>>> list(powerset([1, 2, 3]))
[(), (1,), (2,), (3,), (1, 2), (1, 3), (2, 3), (1, 2, 3)]
:func:`powerset` will operate on iterables that aren't :class:`set`
instances, so repeated elements in the input will produce repeated elements
in the output. Use :func:`unique_everseen` on the input to avoid generating
duplicates:
>>> seq = [1, 1, 0]
>>> list(powerset(seq))
[(), (1,), (1,), (0,), (1, 1), (1, 0), (1, 0), (1, 1, 0)]
>>> from more_itertools import unique_everseen
>>> list(powerset(unique_everseen(seq)))
[(), (1,), (0,), (1, 0)]
"""
s = list(iterable)
return chain.from_iterable(combinations(s, r) for r in range(len(s) + 1))
def unique_everseen(iterable, key=None):
"""
Yield unique elements, preserving order.
>>> list(unique_everseen('AAAABBBCCDAABBB'))
['A', 'B', 'C', 'D']
>>> list(unique_everseen('ABBCcAD', str.lower))
['A', 'B', 'C', 'D']
Sequences with a mix of hashable and unhashable items can be used.
The function will be slower (i.e., `O(n^2)`) for unhashable items.
Remember that ``list`` objects are unhashable - you can use the *key*
parameter to transform the list to a tuple (which is hashable) to
avoid a slowdown.
>>> iterable = ([1, 2], [2, 3], [1, 2])
>>> list(unique_everseen(iterable)) # Slow
[[1, 2], [2, 3]]
>>> list(unique_everseen(iterable, key=tuple)) # Faster
[[1, 2], [2, 3]]
Similary, you may want to convert unhashable ``set`` objects with
``key=frozenset``. For ``dict`` objects,
``key=lambda x: frozenset(x.items())`` can be used.
"""
seenset = set()
seenset_add = seenset.add
seenlist = []
seenlist_add = seenlist.append
use_key = key is not None
for element in iterable:
k = key(element) if use_key else element
try:
if k not in seenset:
seenset_add(k)
yield element
except TypeError:
if k not in seenlist:
seenlist_add(k)
yield element
def unique_justseen(iterable, key=None):
"""Yields elements in order, ignoring serial duplicates
>>> list(unique_justseen('AAAABBBCCDAABBB'))
['A', 'B', 'C', 'D', 'A', 'B']
>>> list(unique_justseen('ABBCcAD', str.lower))
['A', 'B', 'C', 'A', 'D']
"""
return map(next, map(operator.itemgetter(1), groupby(iterable, key)))
def iter_except(func, exception, first=None):
"""Yields results from a function repeatedly until an exception is raised.
Converts a call-until-exception interface to an iterator interface.
Like ``iter(func, sentinel)``, but uses an exception instead of a sentinel
to end the loop.
>>> l = [0, 1, 2]
>>> list(iter_except(l.pop, IndexError))
[2, 1, 0]
"""
try:
if first is not None:
yield first()
while 1:
yield func()
except exception:
pass
def first_true(iterable, default=None, pred=None):
"""
Returns the first true value in the iterable.
If no true value is found, returns *default*
If *pred* is not None, returns the first item for which
``pred(item) == True`` .
>>> first_true(range(10))
1
>>> first_true(range(10), pred=lambda x: x > 5)
6
>>> first_true(range(10), default='missing', pred=lambda x: x > 9)
'missing'
"""
return next(filter(pred, iterable), default)
def random_product(*args, repeat=1):
"""Draw an item at random from each of the input iterables.
>>> random_product('abc', range(4), 'XYZ') # doctest:+SKIP
('c', 3, 'Z')
If *repeat* is provided as a keyword argument, that many items will be
drawn from each iterable.
>>> random_product('abcd', range(4), repeat=2) # doctest:+SKIP
('a', 2, 'd', 3)
This equivalent to taking a random selection from
``itertools.product(*args, **kwarg)``.
"""
pools = [tuple(pool) for pool in args] * repeat
return tuple(choice(pool) for pool in pools)
def random_permutation(iterable, r=None):
"""Return a random *r* length permutation of the elements in *iterable*.
If *r* is not specified or is ``None``, then *r* defaults to the length of
*iterable*.
>>> random_permutation(range(5)) # doctest:+SKIP
(3, 4, 0, 1, 2)
This equivalent to taking a random selection from
``itertools.permutations(iterable, r)``.
"""
pool = tuple(iterable)
r = len(pool) if r is None else r
return tuple(sample(pool, r))
def random_combination(iterable, r):
"""Return a random *r* length subsequence of the elements in *iterable*.
>>> random_combination(range(5), 3) # doctest:+SKIP
(2, 3, 4)
This equivalent to taking a random selection from
``itertools.combinations(iterable, r)``.
"""
pool = tuple(iterable)
n = len(pool)
indices = sorted(sample(range(n), r))
return tuple(pool[i] for i in indices)
def random_combination_with_replacement(iterable, r):
"""Return a random *r* length subsequence of elements in *iterable*,
allowing individual elements to be repeated.
>>> random_combination_with_replacement(range(3), 5) # doctest:+SKIP
(0, 0, 1, 2, 2)
This equivalent to taking a random selection from
``itertools.combinations_with_replacement(iterable, r)``.
"""
pool = tuple(iterable)
n = len(pool)
indices = sorted(randrange(n) for i in range(r))
return tuple(pool[i] for i in indices)
def nth_combination(iterable, r, index):
"""Equivalent to ``list(combinations(iterable, r))[index]``.
The subsequences of *iterable* that are of length *r* can be ordered
lexicographically. :func:`nth_combination` computes the subsequence at
sort position *index* directly, without computing the previous
subsequences.
>>> nth_combination(range(5), 3, 5)
(0, 3, 4)
``ValueError`` will be raised If *r* is negative or greater than the length
of *iterable*.
``IndexError`` will be raised if the given *index* is invalid.
"""
pool = tuple(iterable)
n = len(pool)
if (r < 0) or (r > n):
raise ValueError
c = 1
k = min(r, n - r)
for i in range(1, k + 1):
c = c * (n - k + i) // i
if index < 0:
index += c
if (index < 0) or (index >= c):
raise IndexError
result = []
while r:
c, n, r = c * r // n, n - 1, r - 1
while index >= c:
index -= c
c, n = c * (n - r) // n, n - 1
result.append(pool[-1 - n])
return tuple(result)
def prepend(value, iterator):
"""Yield *value*, followed by the elements in *iterator*.
>>> value = '0'
>>> iterator = ['1', '2', '3']
>>> list(prepend(value, iterator))
['0', '1', '2', '3']
To prepend multiple values, see :func:`itertools.chain`
or :func:`value_chain`.
"""
return chain([value], iterator)
def convolve(signal, kernel):
"""Convolve the iterable *signal* with the iterable *kernel*.
>>> signal = (1, 2, 3, 4, 5)
>>> kernel = [3, 2, 1]
>>> list(convolve(signal, kernel))
[3, 8, 14, 20, 26, 14, 5]
Note: the input arguments are not interchangeable, as the *kernel*
is immediately consumed and stored.
"""
kernel = tuple(kernel)[::-1]
n = len(kernel)
window = deque([0], maxlen=n) * n
for x in chain(signal, repeat(0, n - 1)):
window.append(x)
yield sum(map(operator.mul, kernel, window))
| 16,256 |
Python
| 25.178744 | 79 | 0.577325 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/more_itertools/__init__.py
|
from .more import * # noqa
from .recipes import * # noqa
__version__ = '8.8.0'
| 82 |
Python
| 15.599997 | 30 | 0.597561 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/more_itertools/more.py
|
import warnings
from collections import Counter, defaultdict, deque, abc
from collections.abc import Sequence
from functools import partial, reduce, wraps
from heapq import merge, heapify, heapreplace, heappop
from itertools import (
chain,
compress,
count,
cycle,
dropwhile,
groupby,
islice,
repeat,
starmap,
takewhile,
tee,
zip_longest,
)
from math import exp, factorial, floor, log
from queue import Empty, Queue
from random import random, randrange, uniform
from operator import itemgetter, mul, sub, gt, lt
from sys import hexversion, maxsize
from time import monotonic
from .recipes import (
consume,
flatten,
pairwise,
powerset,
take,
unique_everseen,
)
__all__ = [
'AbortThread',
'adjacent',
'always_iterable',
'always_reversible',
'bucket',
'callback_iter',
'chunked',
'circular_shifts',
'collapse',
'collate',
'consecutive_groups',
'consumer',
'countable',
'count_cycle',
'mark_ends',
'difference',
'distinct_combinations',
'distinct_permutations',
'distribute',
'divide',
'exactly_n',
'filter_except',
'first',
'groupby_transform',
'ilen',
'interleave_longest',
'interleave',
'intersperse',
'islice_extended',
'iterate',
'ichunked',
'is_sorted',
'last',
'locate',
'lstrip',
'make_decorator',
'map_except',
'map_reduce',
'nth_or_last',
'nth_permutation',
'nth_product',
'numeric_range',
'one',
'only',
'padded',
'partitions',
'set_partitions',
'peekable',
'repeat_last',
'replace',
'rlocate',
'rstrip',
'run_length',
'sample',
'seekable',
'SequenceView',
'side_effect',
'sliced',
'sort_together',
'split_at',
'split_after',
'split_before',
'split_when',
'split_into',
'spy',
'stagger',
'strip',
'substrings',
'substrings_indexes',
'time_limited',
'unique_to_each',
'unzip',
'windowed',
'with_iter',
'UnequalIterablesError',
'zip_equal',
'zip_offset',
'windowed_complete',
'all_unique',
'value_chain',
'product_index',
'combination_index',
'permutation_index',
]
_marker = object()
def chunked(iterable, n, strict=False):
"""Break *iterable* into lists of length *n*:
>>> list(chunked([1, 2, 3, 4, 5, 6], 3))
[[1, 2, 3], [4, 5, 6]]
By the default, the last yielded list will have fewer than *n* elements
if the length of *iterable* is not divisible by *n*:
>>> list(chunked([1, 2, 3, 4, 5, 6, 7, 8], 3))
[[1, 2, 3], [4, 5, 6], [7, 8]]
To use a fill-in value instead, see the :func:`grouper` recipe.
If the length of *iterable* is not divisible by *n* and *strict* is
``True``, then ``ValueError`` will be raised before the last
list is yielded.
"""
iterator = iter(partial(take, n, iter(iterable)), [])
if strict:
def ret():
for chunk in iterator:
if len(chunk) != n:
raise ValueError('iterable is not divisible by n.')
yield chunk
return iter(ret())
else:
return iterator
def first(iterable, default=_marker):
"""Return the first item of *iterable*, or *default* if *iterable* is
empty.
>>> first([0, 1, 2, 3])
0
>>> first([], 'some default')
'some default'
If *default* is not provided and there are no items in the iterable,
raise ``ValueError``.
:func:`first` is useful when you have a generator of expensive-to-retrieve
values and want any arbitrary one. It is marginally shorter than
``next(iter(iterable), default)``.
"""
try:
return next(iter(iterable))
except StopIteration as e:
if default is _marker:
raise ValueError(
'first() was called on an empty iterable, and no '
'default value was provided.'
) from e
return default
def last(iterable, default=_marker):
"""Return the last item of *iterable*, or *default* if *iterable* is
empty.
>>> last([0, 1, 2, 3])
3
>>> last([], 'some default')
'some default'
If *default* is not provided and there are no items in the iterable,
raise ``ValueError``.
"""
try:
if isinstance(iterable, Sequence):
return iterable[-1]
# Work around https://bugs.python.org/issue38525
elif hasattr(iterable, '__reversed__') and (hexversion != 0x030800F0):
return next(reversed(iterable))
else:
return deque(iterable, maxlen=1)[-1]
except (IndexError, TypeError, StopIteration):
if default is _marker:
raise ValueError(
'last() was called on an empty iterable, and no default was '
'provided.'
)
return default
def nth_or_last(iterable, n, default=_marker):
"""Return the nth or the last item of *iterable*,
or *default* if *iterable* is empty.
>>> nth_or_last([0, 1, 2, 3], 2)
2
>>> nth_or_last([0, 1], 2)
1
>>> nth_or_last([], 0, 'some default')
'some default'
If *default* is not provided and there are no items in the iterable,
raise ``ValueError``.
"""
return last(islice(iterable, n + 1), default=default)
class peekable:
"""Wrap an iterator to allow lookahead and prepending elements.
Call :meth:`peek` on the result to get the value that will be returned
by :func:`next`. This won't advance the iterator:
>>> p = peekable(['a', 'b'])
>>> p.peek()
'a'
>>> next(p)
'a'
Pass :meth:`peek` a default value to return that instead of raising
``StopIteration`` when the iterator is exhausted.
>>> p = peekable([])
>>> p.peek('hi')
'hi'
peekables also offer a :meth:`prepend` method, which "inserts" items
at the head of the iterable:
>>> p = peekable([1, 2, 3])
>>> p.prepend(10, 11, 12)
>>> next(p)
10
>>> p.peek()
11
>>> list(p)
[11, 12, 1, 2, 3]
peekables can be indexed. Index 0 is the item that will be returned by
:func:`next`, index 1 is the item after that, and so on:
The values up to the given index will be cached.
>>> p = peekable(['a', 'b', 'c', 'd'])
>>> p[0]
'a'
>>> p[1]
'b'
>>> next(p)
'a'
Negative indexes are supported, but be aware that they will cache the
remaining items in the source iterator, which may require significant
storage.
To check whether a peekable is exhausted, check its truth value:
>>> p = peekable(['a', 'b'])
>>> if p: # peekable has items
... list(p)
['a', 'b']
>>> if not p: # peekable is exhausted
... list(p)
[]
"""
def __init__(self, iterable):
self._it = iter(iterable)
self._cache = deque()
def __iter__(self):
return self
def __bool__(self):
try:
self.peek()
except StopIteration:
return False
return True
def peek(self, default=_marker):
"""Return the item that will be next returned from ``next()``.
Return ``default`` if there are no items left. If ``default`` is not
provided, raise ``StopIteration``.
"""
if not self._cache:
try:
self._cache.append(next(self._it))
except StopIteration:
if default is _marker:
raise
return default
return self._cache[0]
def prepend(self, *items):
"""Stack up items to be the next ones returned from ``next()`` or
``self.peek()``. The items will be returned in
first in, first out order::
>>> p = peekable([1, 2, 3])
>>> p.prepend(10, 11, 12)
>>> next(p)
10
>>> list(p)
[11, 12, 1, 2, 3]
It is possible, by prepending items, to "resurrect" a peekable that
previously raised ``StopIteration``.
>>> p = peekable([])
>>> next(p)
Traceback (most recent call last):
...
StopIteration
>>> p.prepend(1)
>>> next(p)
1
>>> next(p)
Traceback (most recent call last):
...
StopIteration
"""
self._cache.extendleft(reversed(items))
def __next__(self):
if self._cache:
return self._cache.popleft()
return next(self._it)
def _get_slice(self, index):
# Normalize the slice's arguments
step = 1 if (index.step is None) else index.step
if step > 0:
start = 0 if (index.start is None) else index.start
stop = maxsize if (index.stop is None) else index.stop
elif step < 0:
start = -1 if (index.start is None) else index.start
stop = (-maxsize - 1) if (index.stop is None) else index.stop
else:
raise ValueError('slice step cannot be zero')
# If either the start or stop index is negative, we'll need to cache
# the rest of the iterable in order to slice from the right side.
if (start < 0) or (stop < 0):
self._cache.extend(self._it)
# Otherwise we'll need to find the rightmost index and cache to that
# point.
else:
n = min(max(start, stop) + 1, maxsize)
cache_len = len(self._cache)
if n >= cache_len:
self._cache.extend(islice(self._it, n - cache_len))
return list(self._cache)[index]
def __getitem__(self, index):
if isinstance(index, slice):
return self._get_slice(index)
cache_len = len(self._cache)
if index < 0:
self._cache.extend(self._it)
elif index >= cache_len:
self._cache.extend(islice(self._it, index + 1 - cache_len))
return self._cache[index]
def collate(*iterables, **kwargs):
"""Return a sorted merge of the items from each of several already-sorted
*iterables*.
>>> list(collate('ACDZ', 'AZ', 'JKL'))
['A', 'A', 'C', 'D', 'J', 'K', 'L', 'Z', 'Z']
Works lazily, keeping only the next value from each iterable in memory. Use
:func:`collate` to, for example, perform a n-way mergesort of items that
don't fit in memory.
If a *key* function is specified, the iterables will be sorted according
to its result:
>>> key = lambda s: int(s) # Sort by numeric value, not by string
>>> list(collate(['1', '10'], ['2', '11'], key=key))
['1', '2', '10', '11']
If the *iterables* are sorted in descending order, set *reverse* to
``True``:
>>> list(collate([5, 3, 1], [4, 2, 0], reverse=True))
[5, 4, 3, 2, 1, 0]
If the elements of the passed-in iterables are out of order, you might get
unexpected results.
On Python 3.5+, this function is an alias for :func:`heapq.merge`.
"""
warnings.warn(
"collate is no longer part of more_itertools, use heapq.merge",
DeprecationWarning,
)
return merge(*iterables, **kwargs)
def consumer(func):
"""Decorator that automatically advances a PEP-342-style "reverse iterator"
to its first yield point so you don't have to call ``next()`` on it
manually.
>>> @consumer
... def tally():
... i = 0
... while True:
... print('Thing number %s is %s.' % (i, (yield)))
... i += 1
...
>>> t = tally()
>>> t.send('red')
Thing number 0 is red.
>>> t.send('fish')
Thing number 1 is fish.
Without the decorator, you would have to call ``next(t)`` before
``t.send()`` could be used.
"""
@wraps(func)
def wrapper(*args, **kwargs):
gen = func(*args, **kwargs)
next(gen)
return gen
return wrapper
def ilen(iterable):
"""Return the number of items in *iterable*.
>>> ilen(x for x in range(1000000) if x % 3 == 0)
333334
This consumes the iterable, so handle with care.
"""
# This approach was selected because benchmarks showed it's likely the
# fastest of the known implementations at the time of writing.
# See GitHub tracker: #236, #230.
counter = count()
deque(zip(iterable, counter), maxlen=0)
return next(counter)
def iterate(func, start):
"""Return ``start``, ``func(start)``, ``func(func(start))``, ...
>>> from itertools import islice
>>> list(islice(iterate(lambda x: 2*x, 1), 10))
[1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
"""
while True:
yield start
start = func(start)
def with_iter(context_manager):
"""Wrap an iterable in a ``with`` statement, so it closes once exhausted.
For example, this will close the file when the iterator is exhausted::
upper_lines = (line.upper() for line in with_iter(open('foo')))
Any context manager which returns an iterable is a candidate for
``with_iter``.
"""
with context_manager as iterable:
yield from iterable
def one(iterable, too_short=None, too_long=None):
"""Return the first item from *iterable*, which is expected to contain only
that item. Raise an exception if *iterable* is empty or has more than one
item.
:func:`one` is useful for ensuring that an iterable contains only one item.
For example, it can be used to retrieve the result of a database query
that is expected to return a single row.
If *iterable* is empty, ``ValueError`` will be raised. You may specify a
different exception with the *too_short* keyword:
>>> it = []
>>> one(it) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: too many items in iterable (expected 1)'
>>> too_short = IndexError('too few items')
>>> one(it, too_short=too_short) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
IndexError: too few items
Similarly, if *iterable* contains more than one item, ``ValueError`` will
be raised. You may specify a different exception with the *too_long*
keyword:
>>> it = ['too', 'many']
>>> one(it) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: Expected exactly one item in iterable, but got 'too',
'many', and perhaps more.
>>> too_long = RuntimeError
>>> one(it, too_long=too_long) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
RuntimeError
Note that :func:`one` attempts to advance *iterable* twice to ensure there
is only one item. See :func:`spy` or :func:`peekable` to check iterable
contents less destructively.
"""
it = iter(iterable)
try:
first_value = next(it)
except StopIteration as e:
raise (
too_short or ValueError('too few items in iterable (expected 1)')
) from e
try:
second_value = next(it)
except StopIteration:
pass
else:
msg = (
'Expected exactly one item in iterable, but got {!r}, {!r}, '
'and perhaps more.'.format(first_value, second_value)
)
raise too_long or ValueError(msg)
return first_value
def distinct_permutations(iterable, r=None):
"""Yield successive distinct permutations of the elements in *iterable*.
>>> sorted(distinct_permutations([1, 0, 1]))
[(0, 1, 1), (1, 0, 1), (1, 1, 0)]
Equivalent to ``set(permutations(iterable))``, except duplicates are not
generated and thrown away. For larger input sequences this is much more
efficient.
Duplicate permutations arise when there are duplicated elements in the
input iterable. The number of items returned is
`n! / (x_1! * x_2! * ... * x_n!)`, where `n` is the total number of
items input, and each `x_i` is the count of a distinct item in the input
sequence.
If *r* is given, only the *r*-length permutations are yielded.
>>> sorted(distinct_permutations([1, 0, 1], r=2))
[(0, 1), (1, 0), (1, 1)]
>>> sorted(distinct_permutations(range(3), r=2))
[(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)]
"""
# Algorithm: https://w.wiki/Qai
def _full(A):
while True:
# Yield the permutation we have
yield tuple(A)
# Find the largest index i such that A[i] < A[i + 1]
for i in range(size - 2, -1, -1):
if A[i] < A[i + 1]:
break
# If no such index exists, this permutation is the last one
else:
return
# Find the largest index j greater than j such that A[i] < A[j]
for j in range(size - 1, i, -1):
if A[i] < A[j]:
break
# Swap the value of A[i] with that of A[j], then reverse the
# sequence from A[i + 1] to form the new permutation
A[i], A[j] = A[j], A[i]
A[i + 1 :] = A[: i - size : -1] # A[i + 1:][::-1]
# Algorithm: modified from the above
def _partial(A, r):
# Split A into the first r items and the last r items
head, tail = A[:r], A[r:]
right_head_indexes = range(r - 1, -1, -1)
left_tail_indexes = range(len(tail))
while True:
# Yield the permutation we have
yield tuple(head)
# Starting from the right, find the first index of the head with
# value smaller than the maximum value of the tail - call it i.
pivot = tail[-1]
for i in right_head_indexes:
if head[i] < pivot:
break
pivot = head[i]
else:
return
# Starting from the left, find the first value of the tail
# with a value greater than head[i] and swap.
for j in left_tail_indexes:
if tail[j] > head[i]:
head[i], tail[j] = tail[j], head[i]
break
# If we didn't find one, start from the right and find the first
# index of the head with a value greater than head[i] and swap.
else:
for j in right_head_indexes:
if head[j] > head[i]:
head[i], head[j] = head[j], head[i]
break
# Reverse head[i + 1:] and swap it with tail[:r - (i + 1)]
tail += head[: i - r : -1] # head[i + 1:][::-1]
i += 1
head[i:], tail[:] = tail[: r - i], tail[r - i :]
items = sorted(iterable)
size = len(items)
if r is None:
r = size
if 0 < r <= size:
return _full(items) if (r == size) else _partial(items, r)
return iter(() if r else ((),))
def intersperse(e, iterable, n=1):
"""Intersperse filler element *e* among the items in *iterable*, leaving
*n* items between each filler element.
>>> list(intersperse('!', [1, 2, 3, 4, 5]))
[1, '!', 2, '!', 3, '!', 4, '!', 5]
>>> list(intersperse(None, [1, 2, 3, 4, 5], n=2))
[1, 2, None, 3, 4, None, 5]
"""
if n == 0:
raise ValueError('n must be > 0')
elif n == 1:
# interleave(repeat(e), iterable) -> e, x_0, e, e, x_1, e, x_2...
# islice(..., 1, None) -> x_0, e, e, x_1, e, x_2...
return islice(interleave(repeat(e), iterable), 1, None)
else:
# interleave(filler, chunks) -> [e], [x_0, x_1], [e], [x_2, x_3]...
# islice(..., 1, None) -> [x_0, x_1], [e], [x_2, x_3]...
# flatten(...) -> x_0, x_1, e, x_2, x_3...
filler = repeat([e])
chunks = chunked(iterable, n)
return flatten(islice(interleave(filler, chunks), 1, None))
def unique_to_each(*iterables):
"""Return the elements from each of the input iterables that aren't in the
other input iterables.
For example, suppose you have a set of packages, each with a set of
dependencies::
{'pkg_1': {'A', 'B'}, 'pkg_2': {'B', 'C'}, 'pkg_3': {'B', 'D'}}
If you remove one package, which dependencies can also be removed?
If ``pkg_1`` is removed, then ``A`` is no longer necessary - it is not
associated with ``pkg_2`` or ``pkg_3``. Similarly, ``C`` is only needed for
``pkg_2``, and ``D`` is only needed for ``pkg_3``::
>>> unique_to_each({'A', 'B'}, {'B', 'C'}, {'B', 'D'})
[['A'], ['C'], ['D']]
If there are duplicates in one input iterable that aren't in the others
they will be duplicated in the output. Input order is preserved::
>>> unique_to_each("mississippi", "missouri")
[['p', 'p'], ['o', 'u', 'r']]
It is assumed that the elements of each iterable are hashable.
"""
pool = [list(it) for it in iterables]
counts = Counter(chain.from_iterable(map(set, pool)))
uniques = {element for element in counts if counts[element] == 1}
return [list(filter(uniques.__contains__, it)) for it in pool]
def windowed(seq, n, fillvalue=None, step=1):
"""Return a sliding window of width *n* over the given iterable.
>>> all_windows = windowed([1, 2, 3, 4, 5], 3)
>>> list(all_windows)
[(1, 2, 3), (2, 3, 4), (3, 4, 5)]
When the window is larger than the iterable, *fillvalue* is used in place
of missing values:
>>> list(windowed([1, 2, 3], 4))
[(1, 2, 3, None)]
Each window will advance in increments of *step*:
>>> list(windowed([1, 2, 3, 4, 5, 6], 3, fillvalue='!', step=2))
[(1, 2, 3), (3, 4, 5), (5, 6, '!')]
To slide into the iterable's items, use :func:`chain` to add filler items
to the left:
>>> iterable = [1, 2, 3, 4]
>>> n = 3
>>> padding = [None] * (n - 1)
>>> list(windowed(chain(padding, iterable), 3))
[(None, None, 1), (None, 1, 2), (1, 2, 3), (2, 3, 4)]
"""
if n < 0:
raise ValueError('n must be >= 0')
if n == 0:
yield tuple()
return
if step < 1:
raise ValueError('step must be >= 1')
window = deque(maxlen=n)
i = n
for _ in map(window.append, seq):
i -= 1
if not i:
i = step
yield tuple(window)
size = len(window)
if size < n:
yield tuple(chain(window, repeat(fillvalue, n - size)))
elif 0 < i < min(step, n):
window += (fillvalue,) * i
yield tuple(window)
def substrings(iterable):
"""Yield all of the substrings of *iterable*.
>>> [''.join(s) for s in substrings('more')]
['m', 'o', 'r', 'e', 'mo', 'or', 're', 'mor', 'ore', 'more']
Note that non-string iterables can also be subdivided.
>>> list(substrings([0, 1, 2]))
[(0,), (1,), (2,), (0, 1), (1, 2), (0, 1, 2)]
"""
# The length-1 substrings
seq = []
for item in iter(iterable):
seq.append(item)
yield (item,)
seq = tuple(seq)
item_count = len(seq)
# And the rest
for n in range(2, item_count + 1):
for i in range(item_count - n + 1):
yield seq[i : i + n]
def substrings_indexes(seq, reverse=False):
"""Yield all substrings and their positions in *seq*
The items yielded will be a tuple of the form ``(substr, i, j)``, where
``substr == seq[i:j]``.
This function only works for iterables that support slicing, such as
``str`` objects.
>>> for item in substrings_indexes('more'):
... print(item)
('m', 0, 1)
('o', 1, 2)
('r', 2, 3)
('e', 3, 4)
('mo', 0, 2)
('or', 1, 3)
('re', 2, 4)
('mor', 0, 3)
('ore', 1, 4)
('more', 0, 4)
Set *reverse* to ``True`` to yield the same items in the opposite order.
"""
r = range(1, len(seq) + 1)
if reverse:
r = reversed(r)
return (
(seq[i : i + L], i, i + L) for L in r for i in range(len(seq) - L + 1)
)
class bucket:
"""Wrap *iterable* and return an object that buckets it iterable into
child iterables based on a *key* function.
>>> iterable = ['a1', 'b1', 'c1', 'a2', 'b2', 'c2', 'b3']
>>> s = bucket(iterable, key=lambda x: x[0]) # Bucket by 1st character
>>> sorted(list(s)) # Get the keys
['a', 'b', 'c']
>>> a_iterable = s['a']
>>> next(a_iterable)
'a1'
>>> next(a_iterable)
'a2'
>>> list(s['b'])
['b1', 'b2', 'b3']
The original iterable will be advanced and its items will be cached until
they are used by the child iterables. This may require significant storage.
By default, attempting to select a bucket to which no items belong will
exhaust the iterable and cache all values.
If you specify a *validator* function, selected buckets will instead be
checked against it.
>>> from itertools import count
>>> it = count(1, 2) # Infinite sequence of odd numbers
>>> key = lambda x: x % 10 # Bucket by last digit
>>> validator = lambda x: x in {1, 3, 5, 7, 9} # Odd digits only
>>> s = bucket(it, key=key, validator=validator)
>>> 2 in s
False
>>> list(s[2])
[]
"""
def __init__(self, iterable, key, validator=None):
self._it = iter(iterable)
self._key = key
self._cache = defaultdict(deque)
self._validator = validator or (lambda x: True)
def __contains__(self, value):
if not self._validator(value):
return False
try:
item = next(self[value])
except StopIteration:
return False
else:
self._cache[value].appendleft(item)
return True
def _get_values(self, value):
"""
Helper to yield items from the parent iterator that match *value*.
Items that don't match are stored in the local cache as they
are encountered.
"""
while True:
# If we've cached some items that match the target value, emit
# the first one and evict it from the cache.
if self._cache[value]:
yield self._cache[value].popleft()
# Otherwise we need to advance the parent iterator to search for
# a matching item, caching the rest.
else:
while True:
try:
item = next(self._it)
except StopIteration:
return
item_value = self._key(item)
if item_value == value:
yield item
break
elif self._validator(item_value):
self._cache[item_value].append(item)
def __iter__(self):
for item in self._it:
item_value = self._key(item)
if self._validator(item_value):
self._cache[item_value].append(item)
yield from self._cache.keys()
def __getitem__(self, value):
if not self._validator(value):
return iter(())
return self._get_values(value)
def spy(iterable, n=1):
"""Return a 2-tuple with a list containing the first *n* elements of
*iterable*, and an iterator with the same items as *iterable*.
This allows you to "look ahead" at the items in the iterable without
advancing it.
There is one item in the list by default:
>>> iterable = 'abcdefg'
>>> head, iterable = spy(iterable)
>>> head
['a']
>>> list(iterable)
['a', 'b', 'c', 'd', 'e', 'f', 'g']
You may use unpacking to retrieve items instead of lists:
>>> (head,), iterable = spy('abcdefg')
>>> head
'a'
>>> (first, second), iterable = spy('abcdefg', 2)
>>> first
'a'
>>> second
'b'
The number of items requested can be larger than the number of items in
the iterable:
>>> iterable = [1, 2, 3, 4, 5]
>>> head, iterable = spy(iterable, 10)
>>> head
[1, 2, 3, 4, 5]
>>> list(iterable)
[1, 2, 3, 4, 5]
"""
it = iter(iterable)
head = take(n, it)
return head.copy(), chain(head, it)
def interleave(*iterables):
"""Return a new iterable yielding from each iterable in turn,
until the shortest is exhausted.
>>> list(interleave([1, 2, 3], [4, 5], [6, 7, 8]))
[1, 4, 6, 2, 5, 7]
For a version that doesn't terminate after the shortest iterable is
exhausted, see :func:`interleave_longest`.
"""
return chain.from_iterable(zip(*iterables))
def interleave_longest(*iterables):
"""Return a new iterable yielding from each iterable in turn,
skipping any that are exhausted.
>>> list(interleave_longest([1, 2, 3], [4, 5], [6, 7, 8]))
[1, 4, 6, 2, 5, 7, 3, 8]
This function produces the same output as :func:`roundrobin`, but may
perform better for some inputs (in particular when the number of iterables
is large).
"""
i = chain.from_iterable(zip_longest(*iterables, fillvalue=_marker))
return (x for x in i if x is not _marker)
def collapse(iterable, base_type=None, levels=None):
"""Flatten an iterable with multiple levels of nesting (e.g., a list of
lists of tuples) into non-iterable types.
>>> iterable = [(1, 2), ([3, 4], [[5], [6]])]
>>> list(collapse(iterable))
[1, 2, 3, 4, 5, 6]
Binary and text strings are not considered iterable and
will not be collapsed.
To avoid collapsing other types, specify *base_type*:
>>> iterable = ['ab', ('cd', 'ef'), ['gh', 'ij']]
>>> list(collapse(iterable, base_type=tuple))
['ab', ('cd', 'ef'), 'gh', 'ij']
Specify *levels* to stop flattening after a certain level:
>>> iterable = [('a', ['b']), ('c', ['d'])]
>>> list(collapse(iterable)) # Fully flattened
['a', 'b', 'c', 'd']
>>> list(collapse(iterable, levels=1)) # Only one level flattened
['a', ['b'], 'c', ['d']]
"""
def walk(node, level):
if (
((levels is not None) and (level > levels))
or isinstance(node, (str, bytes))
or ((base_type is not None) and isinstance(node, base_type))
):
yield node
return
try:
tree = iter(node)
except TypeError:
yield node
return
else:
for child in tree:
yield from walk(child, level + 1)
yield from walk(iterable, 0)
def side_effect(func, iterable, chunk_size=None, before=None, after=None):
"""Invoke *func* on each item in *iterable* (or on each *chunk_size* group
of items) before yielding the item.
`func` must be a function that takes a single argument. Its return value
will be discarded.
*before* and *after* are optional functions that take no arguments. They
will be executed before iteration starts and after it ends, respectively.
`side_effect` can be used for logging, updating progress bars, or anything
that is not functionally "pure."
Emitting a status message:
>>> from more_itertools import consume
>>> func = lambda item: print('Received {}'.format(item))
>>> consume(side_effect(func, range(2)))
Received 0
Received 1
Operating on chunks of items:
>>> pair_sums = []
>>> func = lambda chunk: pair_sums.append(sum(chunk))
>>> list(side_effect(func, [0, 1, 2, 3, 4, 5], 2))
[0, 1, 2, 3, 4, 5]
>>> list(pair_sums)
[1, 5, 9]
Writing to a file-like object:
>>> from io import StringIO
>>> from more_itertools import consume
>>> f = StringIO()
>>> func = lambda x: print(x, file=f)
>>> before = lambda: print(u'HEADER', file=f)
>>> after = f.close
>>> it = [u'a', u'b', u'c']
>>> consume(side_effect(func, it, before=before, after=after))
>>> f.closed
True
"""
try:
if before is not None:
before()
if chunk_size is None:
for item in iterable:
func(item)
yield item
else:
for chunk in chunked(iterable, chunk_size):
func(chunk)
yield from chunk
finally:
if after is not None:
after()
def sliced(seq, n, strict=False):
"""Yield slices of length *n* from the sequence *seq*.
>>> list(sliced((1, 2, 3, 4, 5, 6), 3))
[(1, 2, 3), (4, 5, 6)]
By the default, the last yielded slice will have fewer than *n* elements
if the length of *seq* is not divisible by *n*:
>>> list(sliced((1, 2, 3, 4, 5, 6, 7, 8), 3))
[(1, 2, 3), (4, 5, 6), (7, 8)]
If the length of *seq* is not divisible by *n* and *strict* is
``True``, then ``ValueError`` will be raised before the last
slice is yielded.
This function will only work for iterables that support slicing.
For non-sliceable iterables, see :func:`chunked`.
"""
iterator = takewhile(len, (seq[i : i + n] for i in count(0, n)))
if strict:
def ret():
for _slice in iterator:
if len(_slice) != n:
raise ValueError("seq is not divisible by n.")
yield _slice
return iter(ret())
else:
return iterator
def split_at(iterable, pred, maxsplit=-1, keep_separator=False):
"""Yield lists of items from *iterable*, where each list is delimited by
an item where callable *pred* returns ``True``.
>>> list(split_at('abcdcba', lambda x: x == 'b'))
[['a'], ['c', 'd', 'c'], ['a']]
>>> list(split_at(range(10), lambda n: n % 2 == 1))
[[0], [2], [4], [6], [8], []]
At most *maxsplit* splits are done. If *maxsplit* is not specified or -1,
then there is no limit on the number of splits:
>>> list(split_at(range(10), lambda n: n % 2 == 1, maxsplit=2))
[[0], [2], [4, 5, 6, 7, 8, 9]]
By default, the delimiting items are not included in the output.
The include them, set *keep_separator* to ``True``.
>>> list(split_at('abcdcba', lambda x: x == 'b', keep_separator=True))
[['a'], ['b'], ['c', 'd', 'c'], ['b'], ['a']]
"""
if maxsplit == 0:
yield list(iterable)
return
buf = []
it = iter(iterable)
for item in it:
if pred(item):
yield buf
if keep_separator:
yield [item]
if maxsplit == 1:
yield list(it)
return
buf = []
maxsplit -= 1
else:
buf.append(item)
yield buf
def split_before(iterable, pred, maxsplit=-1):
"""Yield lists of items from *iterable*, where each list ends just before
an item for which callable *pred* returns ``True``:
>>> list(split_before('OneTwo', lambda s: s.isupper()))
[['O', 'n', 'e'], ['T', 'w', 'o']]
>>> list(split_before(range(10), lambda n: n % 3 == 0))
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
At most *maxsplit* splits are done. If *maxsplit* is not specified or -1,
then there is no limit on the number of splits:
>>> list(split_before(range(10), lambda n: n % 3 == 0, maxsplit=2))
[[0, 1, 2], [3, 4, 5], [6, 7, 8, 9]]
"""
if maxsplit == 0:
yield list(iterable)
return
buf = []
it = iter(iterable)
for item in it:
if pred(item) and buf:
yield buf
if maxsplit == 1:
yield [item] + list(it)
return
buf = []
maxsplit -= 1
buf.append(item)
if buf:
yield buf
def split_after(iterable, pred, maxsplit=-1):
"""Yield lists of items from *iterable*, where each list ends with an
item where callable *pred* returns ``True``:
>>> list(split_after('one1two2', lambda s: s.isdigit()))
[['o', 'n', 'e', '1'], ['t', 'w', 'o', '2']]
>>> list(split_after(range(10), lambda n: n % 3 == 0))
[[0], [1, 2, 3], [4, 5, 6], [7, 8, 9]]
At most *maxsplit* splits are done. If *maxsplit* is not specified or -1,
then there is no limit on the number of splits:
>>> list(split_after(range(10), lambda n: n % 3 == 0, maxsplit=2))
[[0], [1, 2, 3], [4, 5, 6, 7, 8, 9]]
"""
if maxsplit == 0:
yield list(iterable)
return
buf = []
it = iter(iterable)
for item in it:
buf.append(item)
if pred(item) and buf:
yield buf
if maxsplit == 1:
yield list(it)
return
buf = []
maxsplit -= 1
if buf:
yield buf
def split_when(iterable, pred, maxsplit=-1):
"""Split *iterable* into pieces based on the output of *pred*.
*pred* should be a function that takes successive pairs of items and
returns ``True`` if the iterable should be split in between them.
For example, to find runs of increasing numbers, split the iterable when
element ``i`` is larger than element ``i + 1``:
>>> list(split_when([1, 2, 3, 3, 2, 5, 2, 4, 2], lambda x, y: x > y))
[[1, 2, 3, 3], [2, 5], [2, 4], [2]]
At most *maxsplit* splits are done. If *maxsplit* is not specified or -1,
then there is no limit on the number of splits:
>>> list(split_when([1, 2, 3, 3, 2, 5, 2, 4, 2],
... lambda x, y: x > y, maxsplit=2))
[[1, 2, 3, 3], [2, 5], [2, 4, 2]]
"""
if maxsplit == 0:
yield list(iterable)
return
it = iter(iterable)
try:
cur_item = next(it)
except StopIteration:
return
buf = [cur_item]
for next_item in it:
if pred(cur_item, next_item):
yield buf
if maxsplit == 1:
yield [next_item] + list(it)
return
buf = []
maxsplit -= 1
buf.append(next_item)
cur_item = next_item
yield buf
def split_into(iterable, sizes):
"""Yield a list of sequential items from *iterable* of length 'n' for each
integer 'n' in *sizes*.
>>> list(split_into([1,2,3,4,5,6], [1,2,3]))
[[1], [2, 3], [4, 5, 6]]
If the sum of *sizes* is smaller than the length of *iterable*, then the
remaining items of *iterable* will not be returned.
>>> list(split_into([1,2,3,4,5,6], [2,3]))
[[1, 2], [3, 4, 5]]
If the sum of *sizes* is larger than the length of *iterable*, fewer items
will be returned in the iteration that overruns *iterable* and further
lists will be empty:
>>> list(split_into([1,2,3,4], [1,2,3,4]))
[[1], [2, 3], [4], []]
When a ``None`` object is encountered in *sizes*, the returned list will
contain items up to the end of *iterable* the same way that itertools.slice
does:
>>> list(split_into([1,2,3,4,5,6,7,8,9,0], [2,3,None]))
[[1, 2], [3, 4, 5], [6, 7, 8, 9, 0]]
:func:`split_into` can be useful for grouping a series of items where the
sizes of the groups are not uniform. An example would be where in a row
from a table, multiple columns represent elements of the same feature
(e.g. a point represented by x,y,z) but, the format is not the same for
all columns.
"""
# convert the iterable argument into an iterator so its contents can
# be consumed by islice in case it is a generator
it = iter(iterable)
for size in sizes:
if size is None:
yield list(it)
return
else:
yield list(islice(it, size))
def padded(iterable, fillvalue=None, n=None, next_multiple=False):
"""Yield the elements from *iterable*, followed by *fillvalue*, such that
at least *n* items are emitted.
>>> list(padded([1, 2, 3], '?', 5))
[1, 2, 3, '?', '?']
If *next_multiple* is ``True``, *fillvalue* will be emitted until the
number of items emitted is a multiple of *n*::
>>> list(padded([1, 2, 3, 4], n=3, next_multiple=True))
[1, 2, 3, 4, None, None]
If *n* is ``None``, *fillvalue* will be emitted indefinitely.
"""
it = iter(iterable)
if n is None:
yield from chain(it, repeat(fillvalue))
elif n < 1:
raise ValueError('n must be at least 1')
else:
item_count = 0
for item in it:
yield item
item_count += 1
remaining = (n - item_count) % n if next_multiple else n - item_count
for _ in range(remaining):
yield fillvalue
def repeat_last(iterable, default=None):
"""After the *iterable* is exhausted, keep yielding its last element.
>>> list(islice(repeat_last(range(3)), 5))
[0, 1, 2, 2, 2]
If the iterable is empty, yield *default* forever::
>>> list(islice(repeat_last(range(0), 42), 5))
[42, 42, 42, 42, 42]
"""
item = _marker
for item in iterable:
yield item
final = default if item is _marker else item
yield from repeat(final)
def distribute(n, iterable):
"""Distribute the items from *iterable* among *n* smaller iterables.
>>> group_1, group_2 = distribute(2, [1, 2, 3, 4, 5, 6])
>>> list(group_1)
[1, 3, 5]
>>> list(group_2)
[2, 4, 6]
If the length of *iterable* is not evenly divisible by *n*, then the
length of the returned iterables will not be identical:
>>> children = distribute(3, [1, 2, 3, 4, 5, 6, 7])
>>> [list(c) for c in children]
[[1, 4, 7], [2, 5], [3, 6]]
If the length of *iterable* is smaller than *n*, then the last returned
iterables will be empty:
>>> children = distribute(5, [1, 2, 3])
>>> [list(c) for c in children]
[[1], [2], [3], [], []]
This function uses :func:`itertools.tee` and may require significant
storage. If you need the order items in the smaller iterables to match the
original iterable, see :func:`divide`.
"""
if n < 1:
raise ValueError('n must be at least 1')
children = tee(iterable, n)
return [islice(it, index, None, n) for index, it in enumerate(children)]
def stagger(iterable, offsets=(-1, 0, 1), longest=False, fillvalue=None):
"""Yield tuples whose elements are offset from *iterable*.
The amount by which the `i`-th item in each tuple is offset is given by
the `i`-th item in *offsets*.
>>> list(stagger([0, 1, 2, 3]))
[(None, 0, 1), (0, 1, 2), (1, 2, 3)]
>>> list(stagger(range(8), offsets=(0, 2, 4)))
[(0, 2, 4), (1, 3, 5), (2, 4, 6), (3, 5, 7)]
By default, the sequence will end when the final element of a tuple is the
last item in the iterable. To continue until the first element of a tuple
is the last item in the iterable, set *longest* to ``True``::
>>> list(stagger([0, 1, 2, 3], longest=True))
[(None, 0, 1), (0, 1, 2), (1, 2, 3), (2, 3, None), (3, None, None)]
By default, ``None`` will be used to replace offsets beyond the end of the
sequence. Specify *fillvalue* to use some other value.
"""
children = tee(iterable, len(offsets))
return zip_offset(
*children, offsets=offsets, longest=longest, fillvalue=fillvalue
)
class UnequalIterablesError(ValueError):
def __init__(self, details=None):
msg = 'Iterables have different lengths'
if details is not None:
msg += (': index 0 has length {}; index {} has length {}').format(
*details
)
super().__init__(msg)
def _zip_equal_generator(iterables):
for combo in zip_longest(*iterables, fillvalue=_marker):
for val in combo:
if val is _marker:
raise UnequalIterablesError()
yield combo
def zip_equal(*iterables):
"""``zip`` the input *iterables* together, but raise
``UnequalIterablesError`` if they aren't all the same length.
>>> it_1 = range(3)
>>> it_2 = iter('abc')
>>> list(zip_equal(it_1, it_2))
[(0, 'a'), (1, 'b'), (2, 'c')]
>>> it_1 = range(3)
>>> it_2 = iter('abcd')
>>> list(zip_equal(it_1, it_2)) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
more_itertools.more.UnequalIterablesError: Iterables have different
lengths
"""
if hexversion >= 0x30A00A6:
warnings.warn(
(
'zip_equal will be removed in a future version of '
'more-itertools. Use the builtin zip function with '
'strict=True instead.'
),
DeprecationWarning,
)
# Check whether the iterables are all the same size.
try:
first_size = len(iterables[0])
for i, it in enumerate(iterables[1:], 1):
size = len(it)
if size != first_size:
break
else:
# If we didn't break out, we can use the built-in zip.
return zip(*iterables)
# If we did break out, there was a mismatch.
raise UnequalIterablesError(details=(first_size, i, size))
# If any one of the iterables didn't have a length, start reading
# them until one runs out.
except TypeError:
return _zip_equal_generator(iterables)
def zip_offset(*iterables, offsets, longest=False, fillvalue=None):
"""``zip`` the input *iterables* together, but offset the `i`-th iterable
by the `i`-th item in *offsets*.
>>> list(zip_offset('0123', 'abcdef', offsets=(0, 1)))
[('0', 'b'), ('1', 'c'), ('2', 'd'), ('3', 'e')]
This can be used as a lightweight alternative to SciPy or pandas to analyze
data sets in which some series have a lead or lag relationship.
By default, the sequence will end when the shortest iterable is exhausted.
To continue until the longest iterable is exhausted, set *longest* to
``True``.
>>> list(zip_offset('0123', 'abcdef', offsets=(0, 1), longest=True))
[('0', 'b'), ('1', 'c'), ('2', 'd'), ('3', 'e'), (None, 'f')]
By default, ``None`` will be used to replace offsets beyond the end of the
sequence. Specify *fillvalue* to use some other value.
"""
if len(iterables) != len(offsets):
raise ValueError("Number of iterables and offsets didn't match")
staggered = []
for it, n in zip(iterables, offsets):
if n < 0:
staggered.append(chain(repeat(fillvalue, -n), it))
elif n > 0:
staggered.append(islice(it, n, None))
else:
staggered.append(it)
if longest:
return zip_longest(*staggered, fillvalue=fillvalue)
return zip(*staggered)
def sort_together(iterables, key_list=(0,), key=None, reverse=False):
"""Return the input iterables sorted together, with *key_list* as the
priority for sorting. All iterables are trimmed to the length of the
shortest one.
This can be used like the sorting function in a spreadsheet. If each
iterable represents a column of data, the key list determines which
columns are used for sorting.
By default, all iterables are sorted using the ``0``-th iterable::
>>> iterables = [(4, 3, 2, 1), ('a', 'b', 'c', 'd')]
>>> sort_together(iterables)
[(1, 2, 3, 4), ('d', 'c', 'b', 'a')]
Set a different key list to sort according to another iterable.
Specifying multiple keys dictates how ties are broken::
>>> iterables = [(3, 1, 2), (0, 1, 0), ('c', 'b', 'a')]
>>> sort_together(iterables, key_list=(1, 2))
[(2, 3, 1), (0, 0, 1), ('a', 'c', 'b')]
To sort by a function of the elements of the iterable, pass a *key*
function. Its arguments are the elements of the iterables corresponding to
the key list::
>>> names = ('a', 'b', 'c')
>>> lengths = (1, 2, 3)
>>> widths = (5, 2, 1)
>>> def area(length, width):
... return length * width
>>> sort_together([names, lengths, widths], key_list=(1, 2), key=area)
[('c', 'b', 'a'), (3, 2, 1), (1, 2, 5)]
Set *reverse* to ``True`` to sort in descending order.
>>> sort_together([(1, 2, 3), ('c', 'b', 'a')], reverse=True)
[(3, 2, 1), ('a', 'b', 'c')]
"""
if key is None:
# if there is no key function, the key argument to sorted is an
# itemgetter
key_argument = itemgetter(*key_list)
else:
# if there is a key function, call it with the items at the offsets
# specified by the key function as arguments
key_list = list(key_list)
if len(key_list) == 1:
# if key_list contains a single item, pass the item at that offset
# as the only argument to the key function
key_offset = key_list[0]
key_argument = lambda zipped_items: key(zipped_items[key_offset])
else:
# if key_list contains multiple items, use itemgetter to return a
# tuple of items, which we pass as *args to the key function
get_key_items = itemgetter(*key_list)
key_argument = lambda zipped_items: key(
*get_key_items(zipped_items)
)
return list(
zip(*sorted(zip(*iterables), key=key_argument, reverse=reverse))
)
def unzip(iterable):
"""The inverse of :func:`zip`, this function disaggregates the elements
of the zipped *iterable*.
The ``i``-th iterable contains the ``i``-th element from each element
of the zipped iterable. The first element is used to to determine the
length of the remaining elements.
>>> iterable = [('a', 1), ('b', 2), ('c', 3), ('d', 4)]
>>> letters, numbers = unzip(iterable)
>>> list(letters)
['a', 'b', 'c', 'd']
>>> list(numbers)
[1, 2, 3, 4]
This is similar to using ``zip(*iterable)``, but it avoids reading
*iterable* into memory. Note, however, that this function uses
:func:`itertools.tee` and thus may require significant storage.
"""
head, iterable = spy(iter(iterable))
if not head:
# empty iterable, e.g. zip([], [], [])
return ()
# spy returns a one-length iterable as head
head = head[0]
iterables = tee(iterable, len(head))
def itemgetter(i):
def getter(obj):
try:
return obj[i]
except IndexError:
# basically if we have an iterable like
# iter([(1, 2, 3), (4, 5), (6,)])
# the second unzipped iterable would fail at the third tuple
# since it would try to access tup[1]
# same with the third unzipped iterable and the second tuple
# to support these "improperly zipped" iterables,
# we create a custom itemgetter
# which just stops the unzipped iterables
# at first length mismatch
raise StopIteration
return getter
return tuple(map(itemgetter(i), it) for i, it in enumerate(iterables))
def divide(n, iterable):
"""Divide the elements from *iterable* into *n* parts, maintaining
order.
>>> group_1, group_2 = divide(2, [1, 2, 3, 4, 5, 6])
>>> list(group_1)
[1, 2, 3]
>>> list(group_2)
[4, 5, 6]
If the length of *iterable* is not evenly divisible by *n*, then the
length of the returned iterables will not be identical:
>>> children = divide(3, [1, 2, 3, 4, 5, 6, 7])
>>> [list(c) for c in children]
[[1, 2, 3], [4, 5], [6, 7]]
If the length of the iterable is smaller than n, then the last returned
iterables will be empty:
>>> children = divide(5, [1, 2, 3])
>>> [list(c) for c in children]
[[1], [2], [3], [], []]
This function will exhaust the iterable before returning and may require
significant storage. If order is not important, see :func:`distribute`,
which does not first pull the iterable into memory.
"""
if n < 1:
raise ValueError('n must be at least 1')
try:
iterable[:0]
except TypeError:
seq = tuple(iterable)
else:
seq = iterable
q, r = divmod(len(seq), n)
ret = []
stop = 0
for i in range(1, n + 1):
start = stop
stop += q + 1 if i <= r else q
ret.append(iter(seq[start:stop]))
return ret
def always_iterable(obj, base_type=(str, bytes)):
"""If *obj* is iterable, return an iterator over its items::
>>> obj = (1, 2, 3)
>>> list(always_iterable(obj))
[1, 2, 3]
If *obj* is not iterable, return a one-item iterable containing *obj*::
>>> obj = 1
>>> list(always_iterable(obj))
[1]
If *obj* is ``None``, return an empty iterable:
>>> obj = None
>>> list(always_iterable(None))
[]
By default, binary and text strings are not considered iterable::
>>> obj = 'foo'
>>> list(always_iterable(obj))
['foo']
If *base_type* is set, objects for which ``isinstance(obj, base_type)``
returns ``True`` won't be considered iterable.
>>> obj = {'a': 1}
>>> list(always_iterable(obj)) # Iterate over the dict's keys
['a']
>>> list(always_iterable(obj, base_type=dict)) # Treat dicts as a unit
[{'a': 1}]
Set *base_type* to ``None`` to avoid any special handling and treat objects
Python considers iterable as iterable:
>>> obj = 'foo'
>>> list(always_iterable(obj, base_type=None))
['f', 'o', 'o']
"""
if obj is None:
return iter(())
if (base_type is not None) and isinstance(obj, base_type):
return iter((obj,))
try:
return iter(obj)
except TypeError:
return iter((obj,))
def adjacent(predicate, iterable, distance=1):
"""Return an iterable over `(bool, item)` tuples where the `item` is
drawn from *iterable* and the `bool` indicates whether
that item satisfies the *predicate* or is adjacent to an item that does.
For example, to find whether items are adjacent to a ``3``::
>>> list(adjacent(lambda x: x == 3, range(6)))
[(False, 0), (False, 1), (True, 2), (True, 3), (True, 4), (False, 5)]
Set *distance* to change what counts as adjacent. For example, to find
whether items are two places away from a ``3``:
>>> list(adjacent(lambda x: x == 3, range(6), distance=2))
[(False, 0), (True, 1), (True, 2), (True, 3), (True, 4), (True, 5)]
This is useful for contextualizing the results of a search function.
For example, a code comparison tool might want to identify lines that
have changed, but also surrounding lines to give the viewer of the diff
context.
The predicate function will only be called once for each item in the
iterable.
See also :func:`groupby_transform`, which can be used with this function
to group ranges of items with the same `bool` value.
"""
# Allow distance=0 mainly for testing that it reproduces results with map()
if distance < 0:
raise ValueError('distance must be at least 0')
i1, i2 = tee(iterable)
padding = [False] * distance
selected = chain(padding, map(predicate, i1), padding)
adjacent_to_selected = map(any, windowed(selected, 2 * distance + 1))
return zip(adjacent_to_selected, i2)
def groupby_transform(iterable, keyfunc=None, valuefunc=None, reducefunc=None):
"""An extension of :func:`itertools.groupby` that can apply transformations
to the grouped data.
* *keyfunc* is a function computing a key value for each item in *iterable*
* *valuefunc* is a function that transforms the individual items from
*iterable* after grouping
* *reducefunc* is a function that transforms each group of items
>>> iterable = 'aAAbBBcCC'
>>> keyfunc = lambda k: k.upper()
>>> valuefunc = lambda v: v.lower()
>>> reducefunc = lambda g: ''.join(g)
>>> list(groupby_transform(iterable, keyfunc, valuefunc, reducefunc))
[('A', 'aaa'), ('B', 'bbb'), ('C', 'ccc')]
Each optional argument defaults to an identity function if not specified.
:func:`groupby_transform` is useful when grouping elements of an iterable
using a separate iterable as the key. To do this, :func:`zip` the iterables
and pass a *keyfunc* that extracts the first element and a *valuefunc*
that extracts the second element::
>>> from operator import itemgetter
>>> keys = [0, 0, 1, 1, 1, 2, 2, 2, 3]
>>> values = 'abcdefghi'
>>> iterable = zip(keys, values)
>>> grouper = groupby_transform(iterable, itemgetter(0), itemgetter(1))
>>> [(k, ''.join(g)) for k, g in grouper]
[(0, 'ab'), (1, 'cde'), (2, 'fgh'), (3, 'i')]
Note that the order of items in the iterable is significant.
Only adjacent items are grouped together, so if you don't want any
duplicate groups, you should sort the iterable by the key function.
"""
ret = groupby(iterable, keyfunc)
if valuefunc:
ret = ((k, map(valuefunc, g)) for k, g in ret)
if reducefunc:
ret = ((k, reducefunc(g)) for k, g in ret)
return ret
class numeric_range(abc.Sequence, abc.Hashable):
"""An extension of the built-in ``range()`` function whose arguments can
be any orderable numeric type.
With only *stop* specified, *start* defaults to ``0`` and *step*
defaults to ``1``. The output items will match the type of *stop*:
>>> list(numeric_range(3.5))
[0.0, 1.0, 2.0, 3.0]
With only *start* and *stop* specified, *step* defaults to ``1``. The
output items will match the type of *start*:
>>> from decimal import Decimal
>>> start = Decimal('2.1')
>>> stop = Decimal('5.1')
>>> list(numeric_range(start, stop))
[Decimal('2.1'), Decimal('3.1'), Decimal('4.1')]
With *start*, *stop*, and *step* specified the output items will match
the type of ``start + step``:
>>> from fractions import Fraction
>>> start = Fraction(1, 2) # Start at 1/2
>>> stop = Fraction(5, 2) # End at 5/2
>>> step = Fraction(1, 2) # Count by 1/2
>>> list(numeric_range(start, stop, step))
[Fraction(1, 2), Fraction(1, 1), Fraction(3, 2), Fraction(2, 1)]
If *step* is zero, ``ValueError`` is raised. Negative steps are supported:
>>> list(numeric_range(3, -1, -1.0))
[3.0, 2.0, 1.0, 0.0]
Be aware of the limitations of floating point numbers; the representation
of the yielded numbers may be surprising.
``datetime.datetime`` objects can be used for *start* and *stop*, if *step*
is a ``datetime.timedelta`` object:
>>> import datetime
>>> start = datetime.datetime(2019, 1, 1)
>>> stop = datetime.datetime(2019, 1, 3)
>>> step = datetime.timedelta(days=1)
>>> items = iter(numeric_range(start, stop, step))
>>> next(items)
datetime.datetime(2019, 1, 1, 0, 0)
>>> next(items)
datetime.datetime(2019, 1, 2, 0, 0)
"""
_EMPTY_HASH = hash(range(0, 0))
def __init__(self, *args):
argc = len(args)
if argc == 1:
(self._stop,) = args
self._start = type(self._stop)(0)
self._step = type(self._stop - self._start)(1)
elif argc == 2:
self._start, self._stop = args
self._step = type(self._stop - self._start)(1)
elif argc == 3:
self._start, self._stop, self._step = args
elif argc == 0:
raise TypeError(
'numeric_range expected at least '
'1 argument, got {}'.format(argc)
)
else:
raise TypeError(
'numeric_range expected at most '
'3 arguments, got {}'.format(argc)
)
self._zero = type(self._step)(0)
if self._step == self._zero:
raise ValueError('numeric_range() arg 3 must not be zero')
self._growing = self._step > self._zero
self._init_len()
def __bool__(self):
if self._growing:
return self._start < self._stop
else:
return self._start > self._stop
def __contains__(self, elem):
if self._growing:
if self._start <= elem < self._stop:
return (elem - self._start) % self._step == self._zero
else:
if self._start >= elem > self._stop:
return (self._start - elem) % (-self._step) == self._zero
return False
def __eq__(self, other):
if isinstance(other, numeric_range):
empty_self = not bool(self)
empty_other = not bool(other)
if empty_self or empty_other:
return empty_self and empty_other # True if both empty
else:
return (
self._start == other._start
and self._step == other._step
and self._get_by_index(-1) == other._get_by_index(-1)
)
else:
return False
def __getitem__(self, key):
if isinstance(key, int):
return self._get_by_index(key)
elif isinstance(key, slice):
step = self._step if key.step is None else key.step * self._step
if key.start is None or key.start <= -self._len:
start = self._start
elif key.start >= self._len:
start = self._stop
else: # -self._len < key.start < self._len
start = self._get_by_index(key.start)
if key.stop is None or key.stop >= self._len:
stop = self._stop
elif key.stop <= -self._len:
stop = self._start
else: # -self._len < key.stop < self._len
stop = self._get_by_index(key.stop)
return numeric_range(start, stop, step)
else:
raise TypeError(
'numeric range indices must be '
'integers or slices, not {}'.format(type(key).__name__)
)
def __hash__(self):
if self:
return hash((self._start, self._get_by_index(-1), self._step))
else:
return self._EMPTY_HASH
def __iter__(self):
values = (self._start + (n * self._step) for n in count())
if self._growing:
return takewhile(partial(gt, self._stop), values)
else:
return takewhile(partial(lt, self._stop), values)
def __len__(self):
return self._len
def _init_len(self):
if self._growing:
start = self._start
stop = self._stop
step = self._step
else:
start = self._stop
stop = self._start
step = -self._step
distance = stop - start
if distance <= self._zero:
self._len = 0
else: # distance > 0 and step > 0: regular euclidean division
q, r = divmod(distance, step)
self._len = int(q) + int(r != self._zero)
def __reduce__(self):
return numeric_range, (self._start, self._stop, self._step)
def __repr__(self):
if self._step == 1:
return "numeric_range({}, {})".format(
repr(self._start), repr(self._stop)
)
else:
return "numeric_range({}, {}, {})".format(
repr(self._start), repr(self._stop), repr(self._step)
)
def __reversed__(self):
return iter(
numeric_range(
self._get_by_index(-1), self._start - self._step, -self._step
)
)
def count(self, value):
return int(value in self)
def index(self, value):
if self._growing:
if self._start <= value < self._stop:
q, r = divmod(value - self._start, self._step)
if r == self._zero:
return int(q)
else:
if self._start >= value > self._stop:
q, r = divmod(self._start - value, -self._step)
if r == self._zero:
return int(q)
raise ValueError("{} is not in numeric range".format(value))
def _get_by_index(self, i):
if i < 0:
i += self._len
if i < 0 or i >= self._len:
raise IndexError("numeric range object index out of range")
return self._start + i * self._step
def count_cycle(iterable, n=None):
"""Cycle through the items from *iterable* up to *n* times, yielding
the number of completed cycles along with each item. If *n* is omitted the
process repeats indefinitely.
>>> list(count_cycle('AB', 3))
[(0, 'A'), (0, 'B'), (1, 'A'), (1, 'B'), (2, 'A'), (2, 'B')]
"""
iterable = tuple(iterable)
if not iterable:
return iter(())
counter = count() if n is None else range(n)
return ((i, item) for i in counter for item in iterable)
def mark_ends(iterable):
"""Yield 3-tuples of the form ``(is_first, is_last, item)``.
>>> list(mark_ends('ABC'))
[(True, False, 'A'), (False, False, 'B'), (False, True, 'C')]
Use this when looping over an iterable to take special action on its first
and/or last items:
>>> iterable = ['Header', 100, 200, 'Footer']
>>> total = 0
>>> for is_first, is_last, item in mark_ends(iterable):
... if is_first:
... continue # Skip the header
... if is_last:
... continue # Skip the footer
... total += item
>>> print(total)
300
"""
it = iter(iterable)
try:
b = next(it)
except StopIteration:
return
try:
for i in count():
a = b
b = next(it)
yield i == 0, False, a
except StopIteration:
yield i == 0, True, a
def locate(iterable, pred=bool, window_size=None):
"""Yield the index of each item in *iterable* for which *pred* returns
``True``.
*pred* defaults to :func:`bool`, which will select truthy items:
>>> list(locate([0, 1, 1, 0, 1, 0, 0]))
[1, 2, 4]
Set *pred* to a custom function to, e.g., find the indexes for a particular
item.
>>> list(locate(['a', 'b', 'c', 'b'], lambda x: x == 'b'))
[1, 3]
If *window_size* is given, then the *pred* function will be called with
that many items. This enables searching for sub-sequences:
>>> iterable = [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3]
>>> pred = lambda *args: args == (1, 2, 3)
>>> list(locate(iterable, pred=pred, window_size=3))
[1, 5, 9]
Use with :func:`seekable` to find indexes and then retrieve the associated
items:
>>> from itertools import count
>>> from more_itertools import seekable
>>> source = (3 * n + 1 if (n % 2) else n // 2 for n in count())
>>> it = seekable(source)
>>> pred = lambda x: x > 100
>>> indexes = locate(it, pred=pred)
>>> i = next(indexes)
>>> it.seek(i)
>>> next(it)
106
"""
if window_size is None:
return compress(count(), map(pred, iterable))
if window_size < 1:
raise ValueError('window size must be at least 1')
it = windowed(iterable, window_size, fillvalue=_marker)
return compress(count(), starmap(pred, it))
def lstrip(iterable, pred):
"""Yield the items from *iterable*, but strip any from the beginning
for which *pred* returns ``True``.
For example, to remove a set of items from the start of an iterable:
>>> iterable = (None, False, None, 1, 2, None, 3, False, None)
>>> pred = lambda x: x in {None, False, ''}
>>> list(lstrip(iterable, pred))
[1, 2, None, 3, False, None]
This function is analogous to to :func:`str.lstrip`, and is essentially
an wrapper for :func:`itertools.dropwhile`.
"""
return dropwhile(pred, iterable)
def rstrip(iterable, pred):
"""Yield the items from *iterable*, but strip any from the end
for which *pred* returns ``True``.
For example, to remove a set of items from the end of an iterable:
>>> iterable = (None, False, None, 1, 2, None, 3, False, None)
>>> pred = lambda x: x in {None, False, ''}
>>> list(rstrip(iterable, pred))
[None, False, None, 1, 2, None, 3]
This function is analogous to :func:`str.rstrip`.
"""
cache = []
cache_append = cache.append
cache_clear = cache.clear
for x in iterable:
if pred(x):
cache_append(x)
else:
yield from cache
cache_clear()
yield x
def strip(iterable, pred):
"""Yield the items from *iterable*, but strip any from the
beginning and end for which *pred* returns ``True``.
For example, to remove a set of items from both ends of an iterable:
>>> iterable = (None, False, None, 1, 2, None, 3, False, None)
>>> pred = lambda x: x in {None, False, ''}
>>> list(strip(iterable, pred))
[1, 2, None, 3]
This function is analogous to :func:`str.strip`.
"""
return rstrip(lstrip(iterable, pred), pred)
class islice_extended:
"""An extension of :func:`itertools.islice` that supports negative values
for *stop*, *start*, and *step*.
>>> iterable = iter('abcdefgh')
>>> list(islice_extended(iterable, -4, -1))
['e', 'f', 'g']
Slices with negative values require some caching of *iterable*, but this
function takes care to minimize the amount of memory required.
For example, you can use a negative step with an infinite iterator:
>>> from itertools import count
>>> list(islice_extended(count(), 110, 99, -2))
[110, 108, 106, 104, 102, 100]
You can also use slice notation directly:
>>> iterable = map(str, count())
>>> it = islice_extended(iterable)[10:20:2]
>>> list(it)
['10', '12', '14', '16', '18']
"""
def __init__(self, iterable, *args):
it = iter(iterable)
if args:
self._iterable = _islice_helper(it, slice(*args))
else:
self._iterable = it
def __iter__(self):
return self
def __next__(self):
return next(self._iterable)
def __getitem__(self, key):
if isinstance(key, slice):
return islice_extended(_islice_helper(self._iterable, key))
raise TypeError('islice_extended.__getitem__ argument must be a slice')
def _islice_helper(it, s):
start = s.start
stop = s.stop
if s.step == 0:
raise ValueError('step argument must be a non-zero integer or None.')
step = s.step or 1
if step > 0:
start = 0 if (start is None) else start
if start < 0:
# Consume all but the last -start items
cache = deque(enumerate(it, 1), maxlen=-start)
len_iter = cache[-1][0] if cache else 0
# Adjust start to be positive
i = max(len_iter + start, 0)
# Adjust stop to be positive
if stop is None:
j = len_iter
elif stop >= 0:
j = min(stop, len_iter)
else:
j = max(len_iter + stop, 0)
# Slice the cache
n = j - i
if n <= 0:
return
for index, item in islice(cache, 0, n, step):
yield item
elif (stop is not None) and (stop < 0):
# Advance to the start position
next(islice(it, start, start), None)
# When stop is negative, we have to carry -stop items while
# iterating
cache = deque(islice(it, -stop), maxlen=-stop)
for index, item in enumerate(it):
cached_item = cache.popleft()
if index % step == 0:
yield cached_item
cache.append(item)
else:
# When both start and stop are positive we have the normal case
yield from islice(it, start, stop, step)
else:
start = -1 if (start is None) else start
if (stop is not None) and (stop < 0):
# Consume all but the last items
n = -stop - 1
cache = deque(enumerate(it, 1), maxlen=n)
len_iter = cache[-1][0] if cache else 0
# If start and stop are both negative they are comparable and
# we can just slice. Otherwise we can adjust start to be negative
# and then slice.
if start < 0:
i, j = start, stop
else:
i, j = min(start - len_iter, -1), None
for index, item in list(cache)[i:j:step]:
yield item
else:
# Advance to the stop position
if stop is not None:
m = stop + 1
next(islice(it, m, m), None)
# stop is positive, so if start is negative they are not comparable
# and we need the rest of the items.
if start < 0:
i = start
n = None
# stop is None and start is positive, so we just need items up to
# the start index.
elif stop is None:
i = None
n = start + 1
# Both stop and start are positive, so they are comparable.
else:
i = None
n = start - stop
if n <= 0:
return
cache = list(islice(it, n))
yield from cache[i::step]
def always_reversible(iterable):
"""An extension of :func:`reversed` that supports all iterables, not
just those which implement the ``Reversible`` or ``Sequence`` protocols.
>>> print(*always_reversible(x for x in range(3)))
2 1 0
If the iterable is already reversible, this function returns the
result of :func:`reversed()`. If the iterable is not reversible,
this function will cache the remaining items in the iterable and
yield them in reverse order, which may require significant storage.
"""
try:
return reversed(iterable)
except TypeError:
return reversed(list(iterable))
def consecutive_groups(iterable, ordering=lambda x: x):
"""Yield groups of consecutive items using :func:`itertools.groupby`.
The *ordering* function determines whether two items are adjacent by
returning their position.
By default, the ordering function is the identity function. This is
suitable for finding runs of numbers:
>>> iterable = [1, 10, 11, 12, 20, 30, 31, 32, 33, 40]
>>> for group in consecutive_groups(iterable):
... print(list(group))
[1]
[10, 11, 12]
[20]
[30, 31, 32, 33]
[40]
For finding runs of adjacent letters, try using the :meth:`index` method
of a string of letters:
>>> from string import ascii_lowercase
>>> iterable = 'abcdfgilmnop'
>>> ordering = ascii_lowercase.index
>>> for group in consecutive_groups(iterable, ordering):
... print(list(group))
['a', 'b', 'c', 'd']
['f', 'g']
['i']
['l', 'm', 'n', 'o', 'p']
Each group of consecutive items is an iterator that shares it source with
*iterable*. When an an output group is advanced, the previous group is
no longer available unless its elements are copied (e.g., into a ``list``).
>>> iterable = [1, 2, 11, 12, 21, 22]
>>> saved_groups = []
>>> for group in consecutive_groups(iterable):
... saved_groups.append(list(group)) # Copy group elements
>>> saved_groups
[[1, 2], [11, 12], [21, 22]]
"""
for k, g in groupby(
enumerate(iterable), key=lambda x: x[0] - ordering(x[1])
):
yield map(itemgetter(1), g)
def difference(iterable, func=sub, *, initial=None):
"""This function is the inverse of :func:`itertools.accumulate`. By default
it will compute the first difference of *iterable* using
:func:`operator.sub`:
>>> from itertools import accumulate
>>> iterable = accumulate([0, 1, 2, 3, 4]) # produces 0, 1, 3, 6, 10
>>> list(difference(iterable))
[0, 1, 2, 3, 4]
*func* defaults to :func:`operator.sub`, but other functions can be
specified. They will be applied as follows::
A, B, C, D, ... --> A, func(B, A), func(C, B), func(D, C), ...
For example, to do progressive division:
>>> iterable = [1, 2, 6, 24, 120]
>>> func = lambda x, y: x // y
>>> list(difference(iterable, func))
[1, 2, 3, 4, 5]
If the *initial* keyword is set, the first element will be skipped when
computing successive differences.
>>> it = [10, 11, 13, 16] # from accumulate([1, 2, 3], initial=10)
>>> list(difference(it, initial=10))
[1, 2, 3]
"""
a, b = tee(iterable)
try:
first = [next(b)]
except StopIteration:
return iter([])
if initial is not None:
first = []
return chain(first, starmap(func, zip(b, a)))
class SequenceView(Sequence):
"""Return a read-only view of the sequence object *target*.
:class:`SequenceView` objects are analogous to Python's built-in
"dictionary view" types. They provide a dynamic view of a sequence's items,
meaning that when the sequence updates, so does the view.
>>> seq = ['0', '1', '2']
>>> view = SequenceView(seq)
>>> view
SequenceView(['0', '1', '2'])
>>> seq.append('3')
>>> view
SequenceView(['0', '1', '2', '3'])
Sequence views support indexing, slicing, and length queries. They act
like the underlying sequence, except they don't allow assignment:
>>> view[1]
'1'
>>> view[1:-1]
['1', '2']
>>> len(view)
4
Sequence views are useful as an alternative to copying, as they don't
require (much) extra storage.
"""
def __init__(self, target):
if not isinstance(target, Sequence):
raise TypeError
self._target = target
def __getitem__(self, index):
return self._target[index]
def __len__(self):
return len(self._target)
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, repr(self._target))
class seekable:
"""Wrap an iterator to allow for seeking backward and forward. This
progressively caches the items in the source iterable so they can be
re-visited.
Call :meth:`seek` with an index to seek to that position in the source
iterable.
To "reset" an iterator, seek to ``0``:
>>> from itertools import count
>>> it = seekable((str(n) for n in count()))
>>> next(it), next(it), next(it)
('0', '1', '2')
>>> it.seek(0)
>>> next(it), next(it), next(it)
('0', '1', '2')
>>> next(it)
'3'
You can also seek forward:
>>> it = seekable((str(n) for n in range(20)))
>>> it.seek(10)
>>> next(it)
'10'
>>> it.seek(20) # Seeking past the end of the source isn't a problem
>>> list(it)
[]
>>> it.seek(0) # Resetting works even after hitting the end
>>> next(it), next(it), next(it)
('0', '1', '2')
Call :meth:`peek` to look ahead one item without advancing the iterator:
>>> it = seekable('1234')
>>> it.peek()
'1'
>>> list(it)
['1', '2', '3', '4']
>>> it.peek(default='empty')
'empty'
Before the iterator is at its end, calling :func:`bool` on it will return
``True``. After it will return ``False``:
>>> it = seekable('5678')
>>> bool(it)
True
>>> list(it)
['5', '6', '7', '8']
>>> bool(it)
False
You may view the contents of the cache with the :meth:`elements` method.
That returns a :class:`SequenceView`, a view that updates automatically:
>>> it = seekable((str(n) for n in range(10)))
>>> next(it), next(it), next(it)
('0', '1', '2')
>>> elements = it.elements()
>>> elements
SequenceView(['0', '1', '2'])
>>> next(it)
'3'
>>> elements
SequenceView(['0', '1', '2', '3'])
By default, the cache grows as the source iterable progresses, so beware of
wrapping very large or infinite iterables. Supply *maxlen* to limit the
size of the cache (this of course limits how far back you can seek).
>>> from itertools import count
>>> it = seekable((str(n) for n in count()), maxlen=2)
>>> next(it), next(it), next(it), next(it)
('0', '1', '2', '3')
>>> list(it.elements())
['2', '3']
>>> it.seek(0)
>>> next(it), next(it), next(it), next(it)
('2', '3', '4', '5')
>>> next(it)
'6'
"""
def __init__(self, iterable, maxlen=None):
self._source = iter(iterable)
if maxlen is None:
self._cache = []
else:
self._cache = deque([], maxlen)
self._index = None
def __iter__(self):
return self
def __next__(self):
if self._index is not None:
try:
item = self._cache[self._index]
except IndexError:
self._index = None
else:
self._index += 1
return item
item = next(self._source)
self._cache.append(item)
return item
def __bool__(self):
try:
self.peek()
except StopIteration:
return False
return True
def peek(self, default=_marker):
try:
peeked = next(self)
except StopIteration:
if default is _marker:
raise
return default
if self._index is None:
self._index = len(self._cache)
self._index -= 1
return peeked
def elements(self):
return SequenceView(self._cache)
def seek(self, index):
self._index = index
remainder = index - len(self._cache)
if remainder > 0:
consume(self, remainder)
class run_length:
"""
:func:`run_length.encode` compresses an iterable with run-length encoding.
It yields groups of repeated items with the count of how many times they
were repeated:
>>> uncompressed = 'abbcccdddd'
>>> list(run_length.encode(uncompressed))
[('a', 1), ('b', 2), ('c', 3), ('d', 4)]
:func:`run_length.decode` decompresses an iterable that was previously
compressed with run-length encoding. It yields the items of the
decompressed iterable:
>>> compressed = [('a', 1), ('b', 2), ('c', 3), ('d', 4)]
>>> list(run_length.decode(compressed))
['a', 'b', 'b', 'c', 'c', 'c', 'd', 'd', 'd', 'd']
"""
@staticmethod
def encode(iterable):
return ((k, ilen(g)) for k, g in groupby(iterable))
@staticmethod
def decode(iterable):
return chain.from_iterable(repeat(k, n) for k, n in iterable)
def exactly_n(iterable, n, predicate=bool):
"""Return ``True`` if exactly ``n`` items in the iterable are ``True``
according to the *predicate* function.
>>> exactly_n([True, True, False], 2)
True
>>> exactly_n([True, True, False], 1)
False
>>> exactly_n([0, 1, 2, 3, 4, 5], 3, lambda x: x < 3)
True
The iterable will be advanced until ``n + 1`` truthy items are encountered,
so avoid calling it on infinite iterables.
"""
return len(take(n + 1, filter(predicate, iterable))) == n
def circular_shifts(iterable):
"""Return a list of circular shifts of *iterable*.
>>> circular_shifts(range(4))
[(0, 1, 2, 3), (1, 2, 3, 0), (2, 3, 0, 1), (3, 0, 1, 2)]
"""
lst = list(iterable)
return take(len(lst), windowed(cycle(lst), len(lst)))
def make_decorator(wrapping_func, result_index=0):
"""Return a decorator version of *wrapping_func*, which is a function that
modifies an iterable. *result_index* is the position in that function's
signature where the iterable goes.
This lets you use itertools on the "production end," i.e. at function
definition. This can augment what the function returns without changing the
function's code.
For example, to produce a decorator version of :func:`chunked`:
>>> from more_itertools import chunked
>>> chunker = make_decorator(chunked, result_index=0)
>>> @chunker(3)
... def iter_range(n):
... return iter(range(n))
...
>>> list(iter_range(9))
[[0, 1, 2], [3, 4, 5], [6, 7, 8]]
To only allow truthy items to be returned:
>>> truth_serum = make_decorator(filter, result_index=1)
>>> @truth_serum(bool)
... def boolean_test():
... return [0, 1, '', ' ', False, True]
...
>>> list(boolean_test())
[1, ' ', True]
The :func:`peekable` and :func:`seekable` wrappers make for practical
decorators:
>>> from more_itertools import peekable
>>> peekable_function = make_decorator(peekable)
>>> @peekable_function()
... def str_range(*args):
... return (str(x) for x in range(*args))
...
>>> it = str_range(1, 20, 2)
>>> next(it), next(it), next(it)
('1', '3', '5')
>>> it.peek()
'7'
>>> next(it)
'7'
"""
# See https://sites.google.com/site/bbayles/index/decorator_factory for
# notes on how this works.
def decorator(*wrapping_args, **wrapping_kwargs):
def outer_wrapper(f):
def inner_wrapper(*args, **kwargs):
result = f(*args, **kwargs)
wrapping_args_ = list(wrapping_args)
wrapping_args_.insert(result_index, result)
return wrapping_func(*wrapping_args_, **wrapping_kwargs)
return inner_wrapper
return outer_wrapper
return decorator
def map_reduce(iterable, keyfunc, valuefunc=None, reducefunc=None):
"""Return a dictionary that maps the items in *iterable* to categories
defined by *keyfunc*, transforms them with *valuefunc*, and
then summarizes them by category with *reducefunc*.
*valuefunc* defaults to the identity function if it is unspecified.
If *reducefunc* is unspecified, no summarization takes place:
>>> keyfunc = lambda x: x.upper()
>>> result = map_reduce('abbccc', keyfunc)
>>> sorted(result.items())
[('A', ['a']), ('B', ['b', 'b']), ('C', ['c', 'c', 'c'])]
Specifying *valuefunc* transforms the categorized items:
>>> keyfunc = lambda x: x.upper()
>>> valuefunc = lambda x: 1
>>> result = map_reduce('abbccc', keyfunc, valuefunc)
>>> sorted(result.items())
[('A', [1]), ('B', [1, 1]), ('C', [1, 1, 1])]
Specifying *reducefunc* summarizes the categorized items:
>>> keyfunc = lambda x: x.upper()
>>> valuefunc = lambda x: 1
>>> reducefunc = sum
>>> result = map_reduce('abbccc', keyfunc, valuefunc, reducefunc)
>>> sorted(result.items())
[('A', 1), ('B', 2), ('C', 3)]
You may want to filter the input iterable before applying the map/reduce
procedure:
>>> all_items = range(30)
>>> items = [x for x in all_items if 10 <= x <= 20] # Filter
>>> keyfunc = lambda x: x % 2 # Evens map to 0; odds to 1
>>> categories = map_reduce(items, keyfunc=keyfunc)
>>> sorted(categories.items())
[(0, [10, 12, 14, 16, 18, 20]), (1, [11, 13, 15, 17, 19])]
>>> summaries = map_reduce(items, keyfunc=keyfunc, reducefunc=sum)
>>> sorted(summaries.items())
[(0, 90), (1, 75)]
Note that all items in the iterable are gathered into a list before the
summarization step, which may require significant storage.
The returned object is a :obj:`collections.defaultdict` with the
``default_factory`` set to ``None``, such that it behaves like a normal
dictionary.
"""
valuefunc = (lambda x: x) if (valuefunc is None) else valuefunc
ret = defaultdict(list)
for item in iterable:
key = keyfunc(item)
value = valuefunc(item)
ret[key].append(value)
if reducefunc is not None:
for key, value_list in ret.items():
ret[key] = reducefunc(value_list)
ret.default_factory = None
return ret
def rlocate(iterable, pred=bool, window_size=None):
"""Yield the index of each item in *iterable* for which *pred* returns
``True``, starting from the right and moving left.
*pred* defaults to :func:`bool`, which will select truthy items:
>>> list(rlocate([0, 1, 1, 0, 1, 0, 0])) # Truthy at 1, 2, and 4
[4, 2, 1]
Set *pred* to a custom function to, e.g., find the indexes for a particular
item:
>>> iterable = iter('abcb')
>>> pred = lambda x: x == 'b'
>>> list(rlocate(iterable, pred))
[3, 1]
If *window_size* is given, then the *pred* function will be called with
that many items. This enables searching for sub-sequences:
>>> iterable = [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3]
>>> pred = lambda *args: args == (1, 2, 3)
>>> list(rlocate(iterable, pred=pred, window_size=3))
[9, 5, 1]
Beware, this function won't return anything for infinite iterables.
If *iterable* is reversible, ``rlocate`` will reverse it and search from
the right. Otherwise, it will search from the left and return the results
in reverse order.
See :func:`locate` to for other example applications.
"""
if window_size is None:
try:
len_iter = len(iterable)
return (len_iter - i - 1 for i in locate(reversed(iterable), pred))
except TypeError:
pass
return reversed(list(locate(iterable, pred, window_size)))
def replace(iterable, pred, substitutes, count=None, window_size=1):
"""Yield the items from *iterable*, replacing the items for which *pred*
returns ``True`` with the items from the iterable *substitutes*.
>>> iterable = [1, 1, 0, 1, 1, 0, 1, 1]
>>> pred = lambda x: x == 0
>>> substitutes = (2, 3)
>>> list(replace(iterable, pred, substitutes))
[1, 1, 2, 3, 1, 1, 2, 3, 1, 1]
If *count* is given, the number of replacements will be limited:
>>> iterable = [1, 1, 0, 1, 1, 0, 1, 1, 0]
>>> pred = lambda x: x == 0
>>> substitutes = [None]
>>> list(replace(iterable, pred, substitutes, count=2))
[1, 1, None, 1, 1, None, 1, 1, 0]
Use *window_size* to control the number of items passed as arguments to
*pred*. This allows for locating and replacing subsequences.
>>> iterable = [0, 1, 2, 5, 0, 1, 2, 5]
>>> window_size = 3
>>> pred = lambda *args: args == (0, 1, 2) # 3 items passed to pred
>>> substitutes = [3, 4] # Splice in these items
>>> list(replace(iterable, pred, substitutes, window_size=window_size))
[3, 4, 5, 3, 4, 5]
"""
if window_size < 1:
raise ValueError('window_size must be at least 1')
# Save the substitutes iterable, since it's used more than once
substitutes = tuple(substitutes)
# Add padding such that the number of windows matches the length of the
# iterable
it = chain(iterable, [_marker] * (window_size - 1))
windows = windowed(it, window_size)
n = 0
for w in windows:
# If the current window matches our predicate (and we haven't hit
# our maximum number of replacements), splice in the substitutes
# and then consume the following windows that overlap with this one.
# For example, if the iterable is (0, 1, 2, 3, 4...)
# and the window size is 2, we have (0, 1), (1, 2), (2, 3)...
# If the predicate matches on (0, 1), we need to zap (0, 1) and (1, 2)
if pred(*w):
if (count is None) or (n < count):
n += 1
yield from substitutes
consume(windows, window_size - 1)
continue
# If there was no match (or we've reached the replacement limit),
# yield the first item from the window.
if w and (w[0] is not _marker):
yield w[0]
def partitions(iterable):
"""Yield all possible order-preserving partitions of *iterable*.
>>> iterable = 'abc'
>>> for part in partitions(iterable):
... print([''.join(p) for p in part])
['abc']
['a', 'bc']
['ab', 'c']
['a', 'b', 'c']
This is unrelated to :func:`partition`.
"""
sequence = list(iterable)
n = len(sequence)
for i in powerset(range(1, n)):
yield [sequence[i:j] for i, j in zip((0,) + i, i + (n,))]
def set_partitions(iterable, k=None):
"""
Yield the set partitions of *iterable* into *k* parts. Set partitions are
not order-preserving.
>>> iterable = 'abc'
>>> for part in set_partitions(iterable, 2):
... print([''.join(p) for p in part])
['a', 'bc']
['ab', 'c']
['b', 'ac']
If *k* is not given, every set partition is generated.
>>> iterable = 'abc'
>>> for part in set_partitions(iterable):
... print([''.join(p) for p in part])
['abc']
['a', 'bc']
['ab', 'c']
['b', 'ac']
['a', 'b', 'c']
"""
L = list(iterable)
n = len(L)
if k is not None:
if k < 1:
raise ValueError(
"Can't partition in a negative or zero number of groups"
)
elif k > n:
return
def set_partitions_helper(L, k):
n = len(L)
if k == 1:
yield [L]
elif n == k:
yield [[s] for s in L]
else:
e, *M = L
for p in set_partitions_helper(M, k - 1):
yield [[e], *p]
for p in set_partitions_helper(M, k):
for i in range(len(p)):
yield p[:i] + [[e] + p[i]] + p[i + 1 :]
if k is None:
for k in range(1, n + 1):
yield from set_partitions_helper(L, k)
else:
yield from set_partitions_helper(L, k)
class time_limited:
"""
Yield items from *iterable* until *limit_seconds* have passed.
If the time limit expires before all items have been yielded, the
``timed_out`` parameter will be set to ``True``.
>>> from time import sleep
>>> def generator():
... yield 1
... yield 2
... sleep(0.2)
... yield 3
>>> iterable = time_limited(0.1, generator())
>>> list(iterable)
[1, 2]
>>> iterable.timed_out
True
Note that the time is checked before each item is yielded, and iteration
stops if the time elapsed is greater than *limit_seconds*. If your time
limit is 1 second, but it takes 2 seconds to generate the first item from
the iterable, the function will run for 2 seconds and not yield anything.
"""
def __init__(self, limit_seconds, iterable):
if limit_seconds < 0:
raise ValueError('limit_seconds must be positive')
self.limit_seconds = limit_seconds
self._iterable = iter(iterable)
self._start_time = monotonic()
self.timed_out = False
def __iter__(self):
return self
def __next__(self):
item = next(self._iterable)
if monotonic() - self._start_time > self.limit_seconds:
self.timed_out = True
raise StopIteration
return item
def only(iterable, default=None, too_long=None):
"""If *iterable* has only one item, return it.
If it has zero items, return *default*.
If it has more than one item, raise the exception given by *too_long*,
which is ``ValueError`` by default.
>>> only([], default='missing')
'missing'
>>> only([1])
1
>>> only([1, 2]) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: Expected exactly one item in iterable, but got 1, 2,
and perhaps more.'
>>> only([1, 2], too_long=TypeError) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError
Note that :func:`only` attempts to advance *iterable* twice to ensure there
is only one item. See :func:`spy` or :func:`peekable` to check
iterable contents less destructively.
"""
it = iter(iterable)
first_value = next(it, default)
try:
second_value = next(it)
except StopIteration:
pass
else:
msg = (
'Expected exactly one item in iterable, but got {!r}, {!r}, '
'and perhaps more.'.format(first_value, second_value)
)
raise too_long or ValueError(msg)
return first_value
def ichunked(iterable, n):
"""Break *iterable* into sub-iterables with *n* elements each.
:func:`ichunked` is like :func:`chunked`, but it yields iterables
instead of lists.
If the sub-iterables are read in order, the elements of *iterable*
won't be stored in memory.
If they are read out of order, :func:`itertools.tee` is used to cache
elements as necessary.
>>> from itertools import count
>>> all_chunks = ichunked(count(), 4)
>>> c_1, c_2, c_3 = next(all_chunks), next(all_chunks), next(all_chunks)
>>> list(c_2) # c_1's elements have been cached; c_3's haven't been
[4, 5, 6, 7]
>>> list(c_1)
[0, 1, 2, 3]
>>> list(c_3)
[8, 9, 10, 11]
"""
source = iter(iterable)
while True:
# Check to see whether we're at the end of the source iterable
item = next(source, _marker)
if item is _marker:
return
# Clone the source and yield an n-length slice
source, it = tee(chain([item], source))
yield islice(it, n)
# Advance the source iterable
consume(source, n)
def distinct_combinations(iterable, r):
"""Yield the distinct combinations of *r* items taken from *iterable*.
>>> list(distinct_combinations([0, 0, 1], 2))
[(0, 0), (0, 1)]
Equivalent to ``set(combinations(iterable))``, except duplicates are not
generated and thrown away. For larger input sequences this is much more
efficient.
"""
if r < 0:
raise ValueError('r must be non-negative')
elif r == 0:
yield ()
return
pool = tuple(iterable)
generators = [unique_everseen(enumerate(pool), key=itemgetter(1))]
current_combo = [None] * r
level = 0
while generators:
try:
cur_idx, p = next(generators[-1])
except StopIteration:
generators.pop()
level -= 1
continue
current_combo[level] = p
if level + 1 == r:
yield tuple(current_combo)
else:
generators.append(
unique_everseen(
enumerate(pool[cur_idx + 1 :], cur_idx + 1),
key=itemgetter(1),
)
)
level += 1
def filter_except(validator, iterable, *exceptions):
"""Yield the items from *iterable* for which the *validator* function does
not raise one of the specified *exceptions*.
*validator* is called for each item in *iterable*.
It should be a function that accepts one argument and raises an exception
if that item is not valid.
>>> iterable = ['1', '2', 'three', '4', None]
>>> list(filter_except(int, iterable, ValueError, TypeError))
['1', '2', '4']
If an exception other than one given by *exceptions* is raised by
*validator*, it is raised like normal.
"""
for item in iterable:
try:
validator(item)
except exceptions:
pass
else:
yield item
def map_except(function, iterable, *exceptions):
"""Transform each item from *iterable* with *function* and yield the
result, unless *function* raises one of the specified *exceptions*.
*function* is called to transform each item in *iterable*.
It should be a accept one argument.
>>> iterable = ['1', '2', 'three', '4', None]
>>> list(map_except(int, iterable, ValueError, TypeError))
[1, 2, 4]
If an exception other than one given by *exceptions* is raised by
*function*, it is raised like normal.
"""
for item in iterable:
try:
yield function(item)
except exceptions:
pass
def _sample_unweighted(iterable, k):
# Implementation of "Algorithm L" from the 1994 paper by Kim-Hung Li:
# "Reservoir-Sampling Algorithms of Time Complexity O(n(1+log(N/n)))".
# Fill up the reservoir (collection of samples) with the first `k` samples
reservoir = take(k, iterable)
# Generate random number that's the largest in a sample of k U(0,1) numbers
# Largest order statistic: https://en.wikipedia.org/wiki/Order_statistic
W = exp(log(random()) / k)
# The number of elements to skip before changing the reservoir is a random
# number with a geometric distribution. Sample it using random() and logs.
next_index = k + floor(log(random()) / log(1 - W))
for index, element in enumerate(iterable, k):
if index == next_index:
reservoir[randrange(k)] = element
# The new W is the largest in a sample of k U(0, `old_W`) numbers
W *= exp(log(random()) / k)
next_index += floor(log(random()) / log(1 - W)) + 1
return reservoir
def _sample_weighted(iterable, k, weights):
# Implementation of "A-ExpJ" from the 2006 paper by Efraimidis et al. :
# "Weighted random sampling with a reservoir".
# Log-transform for numerical stability for weights that are small/large
weight_keys = (log(random()) / weight for weight in weights)
# Fill up the reservoir (collection of samples) with the first `k`
# weight-keys and elements, then heapify the list.
reservoir = take(k, zip(weight_keys, iterable))
heapify(reservoir)
# The number of jumps before changing the reservoir is a random variable
# with an exponential distribution. Sample it using random() and logs.
smallest_weight_key, _ = reservoir[0]
weights_to_skip = log(random()) / smallest_weight_key
for weight, element in zip(weights, iterable):
if weight >= weights_to_skip:
# The notation here is consistent with the paper, but we store
# the weight-keys in log-space for better numerical stability.
smallest_weight_key, _ = reservoir[0]
t_w = exp(weight * smallest_weight_key)
r_2 = uniform(t_w, 1) # generate U(t_w, 1)
weight_key = log(r_2) / weight
heapreplace(reservoir, (weight_key, element))
smallest_weight_key, _ = reservoir[0]
weights_to_skip = log(random()) / smallest_weight_key
else:
weights_to_skip -= weight
# Equivalent to [element for weight_key, element in sorted(reservoir)]
return [heappop(reservoir)[1] for _ in range(k)]
def sample(iterable, k, weights=None):
"""Return a *k*-length list of elements chosen (without replacement)
from the *iterable*. Like :func:`random.sample`, but works on iterables
of unknown length.
>>> iterable = range(100)
>>> sample(iterable, 5) # doctest: +SKIP
[81, 60, 96, 16, 4]
An iterable with *weights* may also be given:
>>> iterable = range(100)
>>> weights = (i * i + 1 for i in range(100))
>>> sampled = sample(iterable, 5, weights=weights) # doctest: +SKIP
[79, 67, 74, 66, 78]
The algorithm can also be used to generate weighted random permutations.
The relative weight of each item determines the probability that it
appears late in the permutation.
>>> data = "abcdefgh"
>>> weights = range(1, len(data) + 1)
>>> sample(data, k=len(data), weights=weights) # doctest: +SKIP
['c', 'a', 'b', 'e', 'g', 'd', 'h', 'f']
"""
if k == 0:
return []
iterable = iter(iterable)
if weights is None:
return _sample_unweighted(iterable, k)
else:
weights = iter(weights)
return _sample_weighted(iterable, k, weights)
def is_sorted(iterable, key=None, reverse=False):
"""Returns ``True`` if the items of iterable are in sorted order, and
``False`` otherwise. *key* and *reverse* have the same meaning that they do
in the built-in :func:`sorted` function.
>>> is_sorted(['1', '2', '3', '4', '5'], key=int)
True
>>> is_sorted([5, 4, 3, 1, 2], reverse=True)
False
The function returns ``False`` after encountering the first out-of-order
item. If there are no out-of-order items, the iterable is exhausted.
"""
compare = lt if reverse else gt
it = iterable if (key is None) else map(key, iterable)
return not any(starmap(compare, pairwise(it)))
class AbortThread(BaseException):
pass
class callback_iter:
"""Convert a function that uses callbacks to an iterator.
Let *func* be a function that takes a `callback` keyword argument.
For example:
>>> def func(callback=None):
... for i, c in [(1, 'a'), (2, 'b'), (3, 'c')]:
... if callback:
... callback(i, c)
... return 4
Use ``with callback_iter(func)`` to get an iterator over the parameters
that are delivered to the callback.
>>> with callback_iter(func) as it:
... for args, kwargs in it:
... print(args)
(1, 'a')
(2, 'b')
(3, 'c')
The function will be called in a background thread. The ``done`` property
indicates whether it has completed execution.
>>> it.done
True
If it completes successfully, its return value will be available
in the ``result`` property.
>>> it.result
4
Notes:
* If the function uses some keyword argument besides ``callback``, supply
*callback_kwd*.
* If it finished executing, but raised an exception, accessing the
``result`` property will raise the same exception.
* If it hasn't finished executing, accessing the ``result``
property from within the ``with`` block will raise ``RuntimeError``.
* If it hasn't finished executing, accessing the ``result`` property from
outside the ``with`` block will raise a
``more_itertools.AbortThread`` exception.
* Provide *wait_seconds* to adjust how frequently the it is polled for
output.
"""
def __init__(self, func, callback_kwd='callback', wait_seconds=0.1):
self._func = func
self._callback_kwd = callback_kwd
self._aborted = False
self._future = None
self._wait_seconds = wait_seconds
self._executor = __import__("concurrent.futures").futures.ThreadPoolExecutor(max_workers=1)
self._iterator = self._reader()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self._aborted = True
self._executor.shutdown()
def __iter__(self):
return self
def __next__(self):
return next(self._iterator)
@property
def done(self):
if self._future is None:
return False
return self._future.done()
@property
def result(self):
if not self.done:
raise RuntimeError('Function has not yet completed')
return self._future.result()
def _reader(self):
q = Queue()
def callback(*args, **kwargs):
if self._aborted:
raise AbortThread('canceled by user')
q.put((args, kwargs))
self._future = self._executor.submit(
self._func, **{self._callback_kwd: callback}
)
while True:
try:
item = q.get(timeout=self._wait_seconds)
except Empty:
pass
else:
q.task_done()
yield item
if self._future.done():
break
remaining = []
while True:
try:
item = q.get_nowait()
except Empty:
break
else:
q.task_done()
remaining.append(item)
q.join()
yield from remaining
def windowed_complete(iterable, n):
"""
Yield ``(beginning, middle, end)`` tuples, where:
* Each ``middle`` has *n* items from *iterable*
* Each ``beginning`` has the items before the ones in ``middle``
* Each ``end`` has the items after the ones in ``middle``
>>> iterable = range(7)
>>> n = 3
>>> for beginning, middle, end in windowed_complete(iterable, n):
... print(beginning, middle, end)
() (0, 1, 2) (3, 4, 5, 6)
(0,) (1, 2, 3) (4, 5, 6)
(0, 1) (2, 3, 4) (5, 6)
(0, 1, 2) (3, 4, 5) (6,)
(0, 1, 2, 3) (4, 5, 6) ()
Note that *n* must be at least 0 and most equal to the length of
*iterable*.
This function will exhaust the iterable and may require significant
storage.
"""
if n < 0:
raise ValueError('n must be >= 0')
seq = tuple(iterable)
size = len(seq)
if n > size:
raise ValueError('n must be <= len(seq)')
for i in range(size - n + 1):
beginning = seq[:i]
middle = seq[i : i + n]
end = seq[i + n :]
yield beginning, middle, end
def all_unique(iterable, key=None):
"""
Returns ``True`` if all the elements of *iterable* are unique (no two
elements are equal).
>>> all_unique('ABCB')
False
If a *key* function is specified, it will be used to make comparisons.
>>> all_unique('ABCb')
True
>>> all_unique('ABCb', str.lower)
False
The function returns as soon as the first non-unique element is
encountered. Iterables with a mix of hashable and unhashable items can
be used, but the function will be slower for unhashable items.
"""
seenset = set()
seenset_add = seenset.add
seenlist = []
seenlist_add = seenlist.append
for element in map(key, iterable) if key else iterable:
try:
if element in seenset:
return False
seenset_add(element)
except TypeError:
if element in seenlist:
return False
seenlist_add(element)
return True
def nth_product(index, *args):
"""Equivalent to ``list(product(*args))[index]``.
The products of *args* can be ordered lexicographically.
:func:`nth_product` computes the product at sort position *index* without
computing the previous products.
>>> nth_product(8, range(2), range(2), range(2), range(2))
(1, 0, 0, 0)
``IndexError`` will be raised if the given *index* is invalid.
"""
pools = list(map(tuple, reversed(args)))
ns = list(map(len, pools))
c = reduce(mul, ns)
if index < 0:
index += c
if not 0 <= index < c:
raise IndexError
result = []
for pool, n in zip(pools, ns):
result.append(pool[index % n])
index //= n
return tuple(reversed(result))
def nth_permutation(iterable, r, index):
"""Equivalent to ``list(permutations(iterable, r))[index]```
The subsequences of *iterable* that are of length *r* where order is
important can be ordered lexicographically. :func:`nth_permutation`
computes the subsequence at sort position *index* directly, without
computing the previous subsequences.
>>> nth_permutation('ghijk', 2, 5)
('h', 'i')
``ValueError`` will be raised If *r* is negative or greater than the length
of *iterable*.
``IndexError`` will be raised if the given *index* is invalid.
"""
pool = list(iterable)
n = len(pool)
if r is None or r == n:
r, c = n, factorial(n)
elif not 0 <= r < n:
raise ValueError
else:
c = factorial(n) // factorial(n - r)
if index < 0:
index += c
if not 0 <= index < c:
raise IndexError
if c == 0:
return tuple()
result = [0] * r
q = index * factorial(n) // c if r < n else index
for d in range(1, n + 1):
q, i = divmod(q, d)
if 0 <= n - d < r:
result[n - d] = i
if q == 0:
break
return tuple(map(pool.pop, result))
def value_chain(*args):
"""Yield all arguments passed to the function in the same order in which
they were passed. If an argument itself is iterable then iterate over its
values.
>>> list(value_chain(1, 2, 3, [4, 5, 6]))
[1, 2, 3, 4, 5, 6]
Binary and text strings are not considered iterable and are emitted
as-is:
>>> list(value_chain('12', '34', ['56', '78']))
['12', '34', '56', '78']
Multiple levels of nesting are not flattened.
"""
for value in args:
if isinstance(value, (str, bytes)):
yield value
continue
try:
yield from value
except TypeError:
yield value
def product_index(element, *args):
"""Equivalent to ``list(product(*args)).index(element)``
The products of *args* can be ordered lexicographically.
:func:`product_index` computes the first index of *element* without
computing the previous products.
>>> product_index([8, 2], range(10), range(5))
42
``ValueError`` will be raised if the given *element* isn't in the product
of *args*.
"""
index = 0
for x, pool in zip_longest(element, args, fillvalue=_marker):
if x is _marker or pool is _marker:
raise ValueError('element is not a product of args')
pool = tuple(pool)
index = index * len(pool) + pool.index(x)
return index
def combination_index(element, iterable):
"""Equivalent to ``list(combinations(iterable, r)).index(element)``
The subsequences of *iterable* that are of length *r* can be ordered
lexicographically. :func:`combination_index` computes the index of the
first *element*, without computing the previous combinations.
>>> combination_index('adf', 'abcdefg')
10
``ValueError`` will be raised if the given *element* isn't one of the
combinations of *iterable*.
"""
element = enumerate(element)
k, y = next(element, (None, None))
if k is None:
return 0
indexes = []
pool = enumerate(iterable)
for n, x in pool:
if x == y:
indexes.append(n)
tmp, y = next(element, (None, None))
if tmp is None:
break
else:
k = tmp
else:
raise ValueError('element is not a combination of iterable')
n, _ = last(pool, default=(n, None))
# Python versiosn below 3.8 don't have math.comb
index = 1
for i, j in enumerate(reversed(indexes), start=1):
j = n - j
if i <= j:
index += factorial(j) // (factorial(i) * factorial(j - i))
return factorial(n + 1) // (factorial(k + 1) * factorial(n - k)) - index
def permutation_index(element, iterable):
"""Equivalent to ``list(permutations(iterable, r)).index(element)```
The subsequences of *iterable* that are of length *r* where order is
important can be ordered lexicographically. :func:`permutation_index`
computes the index of the first *element* directly, without computing
the previous permutations.
>>> permutation_index([1, 3, 2], range(5))
19
``ValueError`` will be raised if the given *element* isn't one of the
permutations of *iterable*.
"""
index = 0
pool = list(iterable)
for i, x in zip(range(len(pool), -1, -1), element):
r = pool.index(x)
index = index * i + r
del pool[r]
return index
class countable:
"""Wrap *iterable* and keep a count of how many items have been consumed.
The ``items_seen`` attribute starts at ``0`` and increments as the iterable
is consumed:
>>> iterable = map(str, range(10))
>>> it = countable(iterable)
>>> it.items_seen
0
>>> next(it), next(it)
('0', '1')
>>> list(it)
['2', '3', '4', '5', '6', '7', '8', '9']
>>> it.items_seen
10
"""
def __init__(self, iterable):
self._it = iter(iterable)
self.items_seen = 0
def __iter__(self):
return self
def __next__(self):
item = next(self._it)
self.items_seen += 1
return item
| 117,959 |
Python
| 29.839216 | 99 | 0.55365 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/packaging/_structures.py
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
class InfinityType:
def __repr__(self) -> str:
return "Infinity"
def __hash__(self) -> int:
return hash(repr(self))
def __lt__(self, other: object) -> bool:
return False
def __le__(self, other: object) -> bool:
return False
def __eq__(self, other: object) -> bool:
return isinstance(other, self.__class__)
def __gt__(self, other: object) -> bool:
return True
def __ge__(self, other: object) -> bool:
return True
def __neg__(self: object) -> "NegativeInfinityType":
return NegativeInfinity
Infinity = InfinityType()
class NegativeInfinityType:
def __repr__(self) -> str:
return "-Infinity"
def __hash__(self) -> int:
return hash(repr(self))
def __lt__(self, other: object) -> bool:
return True
def __le__(self, other: object) -> bool:
return True
def __eq__(self, other: object) -> bool:
return isinstance(other, self.__class__)
def __gt__(self, other: object) -> bool:
return False
def __ge__(self, other: object) -> bool:
return False
def __neg__(self: object) -> InfinityType:
return Infinity
NegativeInfinity = NegativeInfinityType()
| 1,431 |
Python
| 22.096774 | 79 | 0.587002 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/packaging/requirements.py
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
import urllib.parse
from typing import Any, List, Optional, Set
from ._parser import parse_requirement as _parse_requirement
from ._tokenizer import ParserSyntaxError
from .markers import Marker, _normalize_extra_values
from .specifiers import SpecifierSet
class InvalidRequirement(ValueError):
"""
An invalid requirement was found, users should refer to PEP 508.
"""
class Requirement:
"""Parse a requirement.
Parse a given requirement string into its parts, such as name, specifier,
URL, and extras. Raises InvalidRequirement on a badly-formed requirement
string.
"""
# TODO: Can we test whether something is contained within a requirement?
# If so how do we do that? Do we need to test against the _name_ of
# the thing as well as the version? What about the markers?
# TODO: Can we normalize the name and extra name?
def __init__(self, requirement_string: str) -> None:
try:
parsed = _parse_requirement(requirement_string)
except ParserSyntaxError as e:
raise InvalidRequirement(str(e)) from e
self.name: str = parsed.name
if parsed.url:
parsed_url = urllib.parse.urlparse(parsed.url)
if parsed_url.scheme == "file":
if urllib.parse.urlunparse(parsed_url) != parsed.url:
raise InvalidRequirement("Invalid URL given")
elif not (parsed_url.scheme and parsed_url.netloc) or (
not parsed_url.scheme and not parsed_url.netloc
):
raise InvalidRequirement(f"Invalid URL: {parsed.url}")
self.url: Optional[str] = parsed.url
else:
self.url = None
self.extras: Set[str] = set(parsed.extras if parsed.extras else [])
self.specifier: SpecifierSet = SpecifierSet(parsed.specifier)
self.marker: Optional[Marker] = None
if parsed.marker is not None:
self.marker = Marker.__new__(Marker)
self.marker._markers = _normalize_extra_values(parsed.marker)
def __str__(self) -> str:
parts: List[str] = [self.name]
if self.extras:
formatted_extras = ",".join(sorted(self.extras))
parts.append(f"[{formatted_extras}]")
if self.specifier:
parts.append(str(self.specifier))
if self.url:
parts.append(f"@ {self.url}")
if self.marker:
parts.append(" ")
if self.marker:
parts.append(f"; {self.marker}")
return "".join(parts)
def __repr__(self) -> str:
return f"<Requirement('{self}')>"
def __hash__(self) -> int:
return hash((self.__class__.__name__, str(self)))
def __eq__(self, other: Any) -> bool:
if not isinstance(other, Requirement):
return NotImplemented
return (
self.name == other.name
and self.extras == other.extras
and self.specifier == other.specifier
and self.url == other.url
and self.marker == other.marker
)
| 3,287 |
Python
| 33.25 | 79 | 0.607545 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/packaging/_tokenizer.py
|
import contextlib
import re
from dataclasses import dataclass
from typing import Dict, Iterator, NoReturn, Optional, Tuple, Union
from .specifiers import Specifier
@dataclass
class Token:
name: str
text: str
position: int
class ParserSyntaxError(Exception):
"""The provided source text could not be parsed correctly."""
def __init__(
self,
message: str,
*,
source: str,
span: Tuple[int, int],
) -> None:
self.span = span
self.message = message
self.source = source
super().__init__()
def __str__(self) -> str:
marker = " " * self.span[0] + "~" * (self.span[1] - self.span[0]) + "^"
return "\n ".join([self.message, self.source, marker])
DEFAULT_RULES: "Dict[str, Union[str, re.Pattern[str]]]" = {
"LEFT_PARENTHESIS": r"\(",
"RIGHT_PARENTHESIS": r"\)",
"LEFT_BRACKET": r"\[",
"RIGHT_BRACKET": r"\]",
"SEMICOLON": r";",
"COMMA": r",",
"QUOTED_STRING": re.compile(
r"""
(
('[^']*')
|
("[^"]*")
)
""",
re.VERBOSE,
),
"OP": r"(===|==|~=|!=|<=|>=|<|>)",
"BOOLOP": r"\b(or|and)\b",
"IN": r"\bin\b",
"NOT": r"\bnot\b",
"VARIABLE": re.compile(
r"""
\b(
python_version
|python_full_version
|os[._]name
|sys[._]platform
|platform_(release|system)
|platform[._](version|machine|python_implementation)
|python_implementation
|implementation_(name|version)
|extra
)\b
""",
re.VERBOSE,
),
"SPECIFIER": re.compile(
Specifier._operator_regex_str + Specifier._version_regex_str,
re.VERBOSE | re.IGNORECASE,
),
"AT": r"\@",
"URL": r"[^ \t]+",
"IDENTIFIER": r"\b[a-zA-Z0-9][a-zA-Z0-9._-]*\b",
"VERSION_PREFIX_TRAIL": r"\.\*",
"VERSION_LOCAL_LABEL_TRAIL": r"\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*",
"WS": r"[ \t]+",
"END": r"$",
}
class Tokenizer:
"""Context-sensitive token parsing.
Provides methods to examine the input stream to check whether the next token
matches.
"""
def __init__(
self,
source: str,
*,
rules: "Dict[str, Union[str, re.Pattern[str]]]",
) -> None:
self.source = source
self.rules: Dict[str, re.Pattern[str]] = {
name: re.compile(pattern) for name, pattern in rules.items()
}
self.next_token: Optional[Token] = None
self.position = 0
def consume(self, name: str) -> None:
"""Move beyond provided token name, if at current position."""
if self.check(name):
self.read()
def check(self, name: str, *, peek: bool = False) -> bool:
"""Check whether the next token has the provided name.
By default, if the check succeeds, the token *must* be read before
another check. If `peek` is set to `True`, the token is not loaded and
would need to be checked again.
"""
assert (
self.next_token is None
), f"Cannot check for {name!r}, already have {self.next_token!r}"
assert name in self.rules, f"Unknown token name: {name!r}"
expression = self.rules[name]
match = expression.match(self.source, self.position)
if match is None:
return False
if not peek:
self.next_token = Token(name, match[0], self.position)
return True
def expect(self, name: str, *, expected: str) -> Token:
"""Expect a certain token name next, failing with a syntax error otherwise.
The token is *not* read.
"""
if not self.check(name):
raise self.raise_syntax_error(f"Expected {expected}")
return self.read()
def read(self) -> Token:
"""Consume the next token and return it."""
token = self.next_token
assert token is not None
self.position += len(token.text)
self.next_token = None
return token
def raise_syntax_error(
self,
message: str,
*,
span_start: Optional[int] = None,
span_end: Optional[int] = None,
) -> NoReturn:
"""Raise ParserSyntaxError at the given position."""
span = (
self.position if span_start is None else span_start,
self.position if span_end is None else span_end,
)
raise ParserSyntaxError(
message,
source=self.source,
span=span,
)
@contextlib.contextmanager
def enclosing_tokens(
self, open_token: str, close_token: str, *, around: str
) -> Iterator[None]:
if self.check(open_token):
open_position = self.position
self.read()
else:
open_position = None
yield
if open_position is None:
return
if not self.check(close_token):
self.raise_syntax_error(
f"Expected matching {close_token} for {open_token}, after {around}",
span_start=open_position,
)
self.read()
| 5,292 |
Python
| 26.42487 | 84 | 0.520975 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/packaging/specifiers.py
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
"""
.. testsetup::
from packaging.specifiers import Specifier, SpecifierSet, InvalidSpecifier
from packaging.version import Version
"""
import abc
import itertools
import re
from typing import (
Callable,
Iterable,
Iterator,
List,
Optional,
Set,
Tuple,
TypeVar,
Union,
)
from .utils import canonicalize_version
from .version import Version
UnparsedVersion = Union[Version, str]
UnparsedVersionVar = TypeVar("UnparsedVersionVar", bound=UnparsedVersion)
CallableOperator = Callable[[Version, str], bool]
def _coerce_version(version: UnparsedVersion) -> Version:
if not isinstance(version, Version):
version = Version(version)
return version
class InvalidSpecifier(ValueError):
"""
Raised when attempting to create a :class:`Specifier` with a specifier
string that is invalid.
>>> Specifier("lolwat")
Traceback (most recent call last):
...
packaging.specifiers.InvalidSpecifier: Invalid specifier: 'lolwat'
"""
class BaseSpecifier(metaclass=abc.ABCMeta):
@abc.abstractmethod
def __str__(self) -> str:
"""
Returns the str representation of this Specifier-like object. This
should be representative of the Specifier itself.
"""
@abc.abstractmethod
def __hash__(self) -> int:
"""
Returns a hash value for this Specifier-like object.
"""
@abc.abstractmethod
def __eq__(self, other: object) -> bool:
"""
Returns a boolean representing whether or not the two Specifier-like
objects are equal.
:param other: The other object to check against.
"""
@property
@abc.abstractmethod
def prereleases(self) -> Optional[bool]:
"""Whether or not pre-releases as a whole are allowed.
This can be set to either ``True`` or ``False`` to explicitly enable or disable
prereleases or it can be set to ``None`` (the default) to use default semantics.
"""
@prereleases.setter
def prereleases(self, value: bool) -> None:
"""Setter for :attr:`prereleases`.
:param value: The value to set.
"""
@abc.abstractmethod
def contains(self, item: str, prereleases: Optional[bool] = None) -> bool:
"""
Determines if the given item is contained within this specifier.
"""
@abc.abstractmethod
def filter(
self, iterable: Iterable[UnparsedVersionVar], prereleases: Optional[bool] = None
) -> Iterator[UnparsedVersionVar]:
"""
Takes an iterable of items and filters them so that only items which
are contained within this specifier are allowed in it.
"""
class Specifier(BaseSpecifier):
"""This class abstracts handling of version specifiers.
.. tip::
It is generally not required to instantiate this manually. You should instead
prefer to work with :class:`SpecifierSet` instead, which can parse
comma-separated version specifiers (which is what package metadata contains).
"""
_operator_regex_str = r"""
(?P<operator>(~=|==|!=|<=|>=|<|>|===))
"""
_version_regex_str = r"""
(?P<version>
(?:
# The identity operators allow for an escape hatch that will
# do an exact string match of the version you wish to install.
# This will not be parsed by PEP 440 and we cannot determine
# any semantic meaning from it. This operator is discouraged
# but included entirely as an escape hatch.
(?<====) # Only match for the identity operator
\s*
[^\s;)]* # The arbitrary version can be just about anything,
# we match everything except for whitespace, a
# semi-colon for marker support, and a closing paren
# since versions can be enclosed in them.
)
|
(?:
# The (non)equality operators allow for wild card and local
# versions to be specified so we have to define these two
# operators separately to enable that.
(?<===|!=) # Only match for equals and not equals
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)* # release
# You cannot use a wild card and a pre-release, post-release, a dev or
# local version together so group them with a | and make them optional.
(?:
\.\* # Wild card syntax of .*
|
(?: # pre release
[-_\.]?
(alpha|beta|preview|pre|a|b|c|rc)
[-_\.]?
[0-9]*
)?
(?: # post release
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
)?
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
(?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local
)?
)
|
(?:
# The compatible operator requires at least two digits in the
# release segment.
(?<=~=) # Only match for the compatible operator
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *)
(?: # pre release
[-_\.]?
(alpha|beta|preview|pre|a|b|c|rc)
[-_\.]?
[0-9]*
)?
(?: # post release
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
)?
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
)
|
(?:
# All other operators only allow a sub set of what the
# (non)equality operators do. Specifically they do not allow
# local versions to be specified nor do they allow the prefix
# matching wild cards.
(?<!==|!=|~=) # We have special cases for these
# operators so we want to make sure they
# don't match here.
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)* # release
(?: # pre release
[-_\.]?
(alpha|beta|preview|pre|a|b|c|rc)
[-_\.]?
[0-9]*
)?
(?: # post release
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
)?
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
)
)
"""
_regex = re.compile(
r"^\s*" + _operator_regex_str + _version_regex_str + r"\s*$",
re.VERBOSE | re.IGNORECASE,
)
_operators = {
"~=": "compatible",
"==": "equal",
"!=": "not_equal",
"<=": "less_than_equal",
">=": "greater_than_equal",
"<": "less_than",
">": "greater_than",
"===": "arbitrary",
}
def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None:
"""Initialize a Specifier instance.
:param spec:
The string representation of a specifier which will be parsed and
normalized before use.
:param prereleases:
This tells the specifier if it should accept prerelease versions if
applicable or not. The default of ``None`` will autodetect it from the
given specifiers.
:raises InvalidSpecifier:
If the given specifier is invalid (i.e. bad syntax).
"""
match = self._regex.search(spec)
if not match:
raise InvalidSpecifier(f"Invalid specifier: '{spec}'")
self._spec: Tuple[str, str] = (
match.group("operator").strip(),
match.group("version").strip(),
)
# Store whether or not this Specifier should accept prereleases
self._prereleases = prereleases
# https://github.com/python/mypy/pull/13475#pullrequestreview-1079784515
@property # type: ignore[override]
def prereleases(self) -> bool:
# If there is an explicit prereleases set for this, then we'll just
# blindly use that.
if self._prereleases is not None:
return self._prereleases
# Look at all of our specifiers and determine if they are inclusive
# operators, and if they are if they are including an explicit
# prerelease.
operator, version = self._spec
if operator in ["==", ">=", "<=", "~=", "==="]:
# The == specifier can include a trailing .*, if it does we
# want to remove before parsing.
if operator == "==" and version.endswith(".*"):
version = version[:-2]
# Parse the version, and if it is a pre-release than this
# specifier allows pre-releases.
if Version(version).is_prerelease:
return True
return False
@prereleases.setter
def prereleases(self, value: bool) -> None:
self._prereleases = value
@property
def operator(self) -> str:
"""The operator of this specifier.
>>> Specifier("==1.2.3").operator
'=='
"""
return self._spec[0]
@property
def version(self) -> str:
"""The version of this specifier.
>>> Specifier("==1.2.3").version
'1.2.3'
"""
return self._spec[1]
def __repr__(self) -> str:
"""A representation of the Specifier that shows all internal state.
>>> Specifier('>=1.0.0')
<Specifier('>=1.0.0')>
>>> Specifier('>=1.0.0', prereleases=False)
<Specifier('>=1.0.0', prereleases=False)>
>>> Specifier('>=1.0.0', prereleases=True)
<Specifier('>=1.0.0', prereleases=True)>
"""
pre = (
f", prereleases={self.prereleases!r}"
if self._prereleases is not None
else ""
)
return f"<{self.__class__.__name__}({str(self)!r}{pre})>"
def __str__(self) -> str:
"""A string representation of the Specifier that can be round-tripped.
>>> str(Specifier('>=1.0.0'))
'>=1.0.0'
>>> str(Specifier('>=1.0.0', prereleases=False))
'>=1.0.0'
"""
return "{}{}".format(*self._spec)
@property
def _canonical_spec(self) -> Tuple[str, str]:
canonical_version = canonicalize_version(
self._spec[1],
strip_trailing_zero=(self._spec[0] != "~="),
)
return self._spec[0], canonical_version
def __hash__(self) -> int:
return hash(self._canonical_spec)
def __eq__(self, other: object) -> bool:
"""Whether or not the two Specifier-like objects are equal.
:param other: The other object to check against.
The value of :attr:`prereleases` is ignored.
>>> Specifier("==1.2.3") == Specifier("== 1.2.3.0")
True
>>> (Specifier("==1.2.3", prereleases=False) ==
... Specifier("==1.2.3", prereleases=True))
True
>>> Specifier("==1.2.3") == "==1.2.3"
True
>>> Specifier("==1.2.3") == Specifier("==1.2.4")
False
>>> Specifier("==1.2.3") == Specifier("~=1.2.3")
False
"""
if isinstance(other, str):
try:
other = self.__class__(str(other))
except InvalidSpecifier:
return NotImplemented
elif not isinstance(other, self.__class__):
return NotImplemented
return self._canonical_spec == other._canonical_spec
def _get_operator(self, op: str) -> CallableOperator:
operator_callable: CallableOperator = getattr(
self, f"_compare_{self._operators[op]}"
)
return operator_callable
def _compare_compatible(self, prospective: Version, spec: str) -> bool:
# Compatible releases have an equivalent combination of >= and ==. That
# is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to
# implement this in terms of the other specifiers instead of
# implementing it ourselves. The only thing we need to do is construct
# the other specifiers.
# We want everything but the last item in the version, but we want to
# ignore suffix segments.
prefix = ".".join(
list(itertools.takewhile(_is_not_suffix, _version_split(spec)))[:-1]
)
# Add the prefix notation to the end of our string
prefix += ".*"
return self._get_operator(">=")(prospective, spec) and self._get_operator("==")(
prospective, prefix
)
def _compare_equal(self, prospective: Version, spec: str) -> bool:
# We need special logic to handle prefix matching
if spec.endswith(".*"):
# In the case of prefix matching we want to ignore local segment.
normalized_prospective = canonicalize_version(
prospective.public, strip_trailing_zero=False
)
# Get the normalized version string ignoring the trailing .*
normalized_spec = canonicalize_version(spec[:-2], strip_trailing_zero=False)
# Split the spec out by dots, and pretend that there is an implicit
# dot in between a release segment and a pre-release segment.
split_spec = _version_split(normalized_spec)
# Split the prospective version out by dots, and pretend that there
# is an implicit dot in between a release segment and a pre-release
# segment.
split_prospective = _version_split(normalized_prospective)
# 0-pad the prospective version before shortening it to get the correct
# shortened version.
padded_prospective, _ = _pad_version(split_prospective, split_spec)
# Shorten the prospective version to be the same length as the spec
# so that we can determine if the specifier is a prefix of the
# prospective version or not.
shortened_prospective = padded_prospective[: len(split_spec)]
return shortened_prospective == split_spec
else:
# Convert our spec string into a Version
spec_version = Version(spec)
# If the specifier does not have a local segment, then we want to
# act as if the prospective version also does not have a local
# segment.
if not spec_version.local:
prospective = Version(prospective.public)
return prospective == spec_version
def _compare_not_equal(self, prospective: Version, spec: str) -> bool:
return not self._compare_equal(prospective, spec)
def _compare_less_than_equal(self, prospective: Version, spec: str) -> bool:
# NB: Local version identifiers are NOT permitted in the version
# specifier, so local version labels can be universally removed from
# the prospective version.
return Version(prospective.public) <= Version(spec)
def _compare_greater_than_equal(self, prospective: Version, spec: str) -> bool:
# NB: Local version identifiers are NOT permitted in the version
# specifier, so local version labels can be universally removed from
# the prospective version.
return Version(prospective.public) >= Version(spec)
def _compare_less_than(self, prospective: Version, spec_str: str) -> bool:
# Convert our spec to a Version instance, since we'll want to work with
# it as a version.
spec = Version(spec_str)
# Check to see if the prospective version is less than the spec
# version. If it's not we can short circuit and just return False now
# instead of doing extra unneeded work.
if not prospective < spec:
return False
# This special case is here so that, unless the specifier itself
# includes is a pre-release version, that we do not accept pre-release
# versions for the version mentioned in the specifier (e.g. <3.1 should
# not match 3.1.dev0, but should match 3.0.dev0).
if not spec.is_prerelease and prospective.is_prerelease:
if Version(prospective.base_version) == Version(spec.base_version):
return False
# If we've gotten to here, it means that prospective version is both
# less than the spec version *and* it's not a pre-release of the same
# version in the spec.
return True
def _compare_greater_than(self, prospective: Version, spec_str: str) -> bool:
# Convert our spec to a Version instance, since we'll want to work with
# it as a version.
spec = Version(spec_str)
# Check to see if the prospective version is greater than the spec
# version. If it's not we can short circuit and just return False now
# instead of doing extra unneeded work.
if not prospective > spec:
return False
# This special case is here so that, unless the specifier itself
# includes is a post-release version, that we do not accept
# post-release versions for the version mentioned in the specifier
# (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0).
if not spec.is_postrelease and prospective.is_postrelease:
if Version(prospective.base_version) == Version(spec.base_version):
return False
# Ensure that we do not allow a local version of the version mentioned
# in the specifier, which is technically greater than, to match.
if prospective.local is not None:
if Version(prospective.base_version) == Version(spec.base_version):
return False
# If we've gotten to here, it means that prospective version is both
# greater than the spec version *and* it's not a pre-release of the
# same version in the spec.
return True
def _compare_arbitrary(self, prospective: Version, spec: str) -> bool:
return str(prospective).lower() == str(spec).lower()
def __contains__(self, item: Union[str, Version]) -> bool:
"""Return whether or not the item is contained in this specifier.
:param item: The item to check for.
This is used for the ``in`` operator and behaves the same as
:meth:`contains` with no ``prereleases`` argument passed.
>>> "1.2.3" in Specifier(">=1.2.3")
True
>>> Version("1.2.3") in Specifier(">=1.2.3")
True
>>> "1.0.0" in Specifier(">=1.2.3")
False
>>> "1.3.0a1" in Specifier(">=1.2.3")
False
>>> "1.3.0a1" in Specifier(">=1.2.3", prereleases=True)
True
"""
return self.contains(item)
def contains(
self, item: UnparsedVersion, prereleases: Optional[bool] = None
) -> bool:
"""Return whether or not the item is contained in this specifier.
:param item:
The item to check for, which can be a version string or a
:class:`Version` instance.
:param prereleases:
Whether or not to match prereleases with this Specifier. If set to
``None`` (the default), it uses :attr:`prereleases` to determine
whether or not prereleases are allowed.
>>> Specifier(">=1.2.3").contains("1.2.3")
True
>>> Specifier(">=1.2.3").contains(Version("1.2.3"))
True
>>> Specifier(">=1.2.3").contains("1.0.0")
False
>>> Specifier(">=1.2.3").contains("1.3.0a1")
False
>>> Specifier(">=1.2.3", prereleases=True).contains("1.3.0a1")
True
>>> Specifier(">=1.2.3").contains("1.3.0a1", prereleases=True)
True
"""
# Determine if prereleases are to be allowed or not.
if prereleases is None:
prereleases = self.prereleases
# Normalize item to a Version, this allows us to have a shortcut for
# "2.0" in Specifier(">=2")
normalized_item = _coerce_version(item)
# Determine if we should be supporting prereleases in this specifier
# or not, if we do not support prereleases than we can short circuit
# logic if this version is a prereleases.
if normalized_item.is_prerelease and not prereleases:
return False
# Actually do the comparison to determine if this item is contained
# within this Specifier or not.
operator_callable: CallableOperator = self._get_operator(self.operator)
return operator_callable(normalized_item, self.version)
def filter(
self, iterable: Iterable[UnparsedVersionVar], prereleases: Optional[bool] = None
) -> Iterator[UnparsedVersionVar]:
"""Filter items in the given iterable, that match the specifier.
:param iterable:
An iterable that can contain version strings and :class:`Version` instances.
The items in the iterable will be filtered according to the specifier.
:param prereleases:
Whether or not to allow prereleases in the returned iterator. If set to
``None`` (the default), it will be intelligently decide whether to allow
prereleases or not (based on the :attr:`prereleases` attribute, and
whether the only versions matching are prereleases).
This method is smarter than just ``filter(Specifier().contains, [...])``
because it implements the rule from :pep:`440` that a prerelease item
SHOULD be accepted if no other versions match the given specifier.
>>> list(Specifier(">=1.2.3").filter(["1.2", "1.3", "1.5a1"]))
['1.3']
>>> list(Specifier(">=1.2.3").filter(["1.2", "1.2.3", "1.3", Version("1.4")]))
['1.2.3', '1.3', <Version('1.4')>]
>>> list(Specifier(">=1.2.3").filter(["1.2", "1.5a1"]))
['1.5a1']
>>> list(Specifier(">=1.2.3").filter(["1.3", "1.5a1"], prereleases=True))
['1.3', '1.5a1']
>>> list(Specifier(">=1.2.3", prereleases=True).filter(["1.3", "1.5a1"]))
['1.3', '1.5a1']
"""
yielded = False
found_prereleases = []
kw = {"prereleases": prereleases if prereleases is not None else True}
# Attempt to iterate over all the values in the iterable and if any of
# them match, yield them.
for version in iterable:
parsed_version = _coerce_version(version)
if self.contains(parsed_version, **kw):
# If our version is a prerelease, and we were not set to allow
# prereleases, then we'll store it for later in case nothing
# else matches this specifier.
if parsed_version.is_prerelease and not (
prereleases or self.prereleases
):
found_prereleases.append(version)
# Either this is not a prerelease, or we should have been
# accepting prereleases from the beginning.
else:
yielded = True
yield version
# Now that we've iterated over everything, determine if we've yielded
# any values, and if we have not and we have any prereleases stored up
# then we will go ahead and yield the prereleases.
if not yielded and found_prereleases:
for version in found_prereleases:
yield version
_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$")
def _version_split(version: str) -> List[str]:
result: List[str] = []
for item in version.split("."):
match = _prefix_regex.search(item)
if match:
result.extend(match.groups())
else:
result.append(item)
return result
def _is_not_suffix(segment: str) -> bool:
return not any(
segment.startswith(prefix) for prefix in ("dev", "a", "b", "rc", "post")
)
def _pad_version(left: List[str], right: List[str]) -> Tuple[List[str], List[str]]:
left_split, right_split = [], []
# Get the release segment of our versions
left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left)))
right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right)))
# Get the rest of our versions
left_split.append(left[len(left_split[0]) :])
right_split.append(right[len(right_split[0]) :])
# Insert our padding
left_split.insert(1, ["0"] * max(0, len(right_split[0]) - len(left_split[0])))
right_split.insert(1, ["0"] * max(0, len(left_split[0]) - len(right_split[0])))
return (list(itertools.chain(*left_split)), list(itertools.chain(*right_split)))
class SpecifierSet(BaseSpecifier):
"""This class abstracts handling of a set of version specifiers.
It can be passed a single specifier (``>=3.0``), a comma-separated list of
specifiers (``>=3.0,!=3.1``), or no specifier at all.
"""
def __init__(
self, specifiers: str = "", prereleases: Optional[bool] = None
) -> None:
"""Initialize a SpecifierSet instance.
:param specifiers:
The string representation of a specifier or a comma-separated list of
specifiers which will be parsed and normalized before use.
:param prereleases:
This tells the SpecifierSet if it should accept prerelease versions if
applicable or not. The default of ``None`` will autodetect it from the
given specifiers.
:raises InvalidSpecifier:
If the given ``specifiers`` are not parseable than this exception will be
raised.
"""
# Split on `,` to break each individual specifier into it's own item, and
# strip each item to remove leading/trailing whitespace.
split_specifiers = [s.strip() for s in specifiers.split(",") if s.strip()]
# Parsed each individual specifier, attempting first to make it a
# Specifier.
parsed: Set[Specifier] = set()
for specifier in split_specifiers:
parsed.add(Specifier(specifier))
# Turn our parsed specifiers into a frozen set and save them for later.
self._specs = frozenset(parsed)
# Store our prereleases value so we can use it later to determine if
# we accept prereleases or not.
self._prereleases = prereleases
@property
def prereleases(self) -> Optional[bool]:
# If we have been given an explicit prerelease modifier, then we'll
# pass that through here.
if self._prereleases is not None:
return self._prereleases
# If we don't have any specifiers, and we don't have a forced value,
# then we'll just return None since we don't know if this should have
# pre-releases or not.
if not self._specs:
return None
# Otherwise we'll see if any of the given specifiers accept
# prereleases, if any of them do we'll return True, otherwise False.
return any(s.prereleases for s in self._specs)
@prereleases.setter
def prereleases(self, value: bool) -> None:
self._prereleases = value
def __repr__(self) -> str:
"""A representation of the specifier set that shows all internal state.
Note that the ordering of the individual specifiers within the set may not
match the input string.
>>> SpecifierSet('>=1.0.0,!=2.0.0')
<SpecifierSet('!=2.0.0,>=1.0.0')>
>>> SpecifierSet('>=1.0.0,!=2.0.0', prereleases=False)
<SpecifierSet('!=2.0.0,>=1.0.0', prereleases=False)>
>>> SpecifierSet('>=1.0.0,!=2.0.0', prereleases=True)
<SpecifierSet('!=2.0.0,>=1.0.0', prereleases=True)>
"""
pre = (
f", prereleases={self.prereleases!r}"
if self._prereleases is not None
else ""
)
return f"<SpecifierSet({str(self)!r}{pre})>"
def __str__(self) -> str:
"""A string representation of the specifier set that can be round-tripped.
Note that the ordering of the individual specifiers within the set may not
match the input string.
>>> str(SpecifierSet(">=1.0.0,!=1.0.1"))
'!=1.0.1,>=1.0.0'
>>> str(SpecifierSet(">=1.0.0,!=1.0.1", prereleases=False))
'!=1.0.1,>=1.0.0'
"""
return ",".join(sorted(str(s) for s in self._specs))
def __hash__(self) -> int:
return hash(self._specs)
def __and__(self, other: Union["SpecifierSet", str]) -> "SpecifierSet":
"""Return a SpecifierSet which is a combination of the two sets.
:param other: The other object to combine with.
>>> SpecifierSet(">=1.0.0,!=1.0.1") & '<=2.0.0,!=2.0.1'
<SpecifierSet('!=1.0.1,!=2.0.1,<=2.0.0,>=1.0.0')>
>>> SpecifierSet(">=1.0.0,!=1.0.1") & SpecifierSet('<=2.0.0,!=2.0.1')
<SpecifierSet('!=1.0.1,!=2.0.1,<=2.0.0,>=1.0.0')>
"""
if isinstance(other, str):
other = SpecifierSet(other)
elif not isinstance(other, SpecifierSet):
return NotImplemented
specifier = SpecifierSet()
specifier._specs = frozenset(self._specs | other._specs)
if self._prereleases is None and other._prereleases is not None:
specifier._prereleases = other._prereleases
elif self._prereleases is not None and other._prereleases is None:
specifier._prereleases = self._prereleases
elif self._prereleases == other._prereleases:
specifier._prereleases = self._prereleases
else:
raise ValueError(
"Cannot combine SpecifierSets with True and False prerelease "
"overrides."
)
return specifier
def __eq__(self, other: object) -> bool:
"""Whether or not the two SpecifierSet-like objects are equal.
:param other: The other object to check against.
The value of :attr:`prereleases` is ignored.
>>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0,!=1.0.1")
True
>>> (SpecifierSet(">=1.0.0,!=1.0.1", prereleases=False) ==
... SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True))
True
>>> SpecifierSet(">=1.0.0,!=1.0.1") == ">=1.0.0,!=1.0.1"
True
>>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0")
False
>>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0,!=1.0.2")
False
"""
if isinstance(other, (str, Specifier)):
other = SpecifierSet(str(other))
elif not isinstance(other, SpecifierSet):
return NotImplemented
return self._specs == other._specs
def __len__(self) -> int:
"""Returns the number of specifiers in this specifier set."""
return len(self._specs)
def __iter__(self) -> Iterator[Specifier]:
"""
Returns an iterator over all the underlying :class:`Specifier` instances
in this specifier set.
>>> sorted(SpecifierSet(">=1.0.0,!=1.0.1"), key=str)
[<Specifier('!=1.0.1')>, <Specifier('>=1.0.0')>]
"""
return iter(self._specs)
def __contains__(self, item: UnparsedVersion) -> bool:
"""Return whether or not the item is contained in this specifier.
:param item: The item to check for.
This is used for the ``in`` operator and behaves the same as
:meth:`contains` with no ``prereleases`` argument passed.
>>> "1.2.3" in SpecifierSet(">=1.0.0,!=1.0.1")
True
>>> Version("1.2.3") in SpecifierSet(">=1.0.0,!=1.0.1")
True
>>> "1.0.1" in SpecifierSet(">=1.0.0,!=1.0.1")
False
>>> "1.3.0a1" in SpecifierSet(">=1.0.0,!=1.0.1")
False
>>> "1.3.0a1" in SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True)
True
"""
return self.contains(item)
def contains(
self,
item: UnparsedVersion,
prereleases: Optional[bool] = None,
installed: Optional[bool] = None,
) -> bool:
"""Return whether or not the item is contained in this SpecifierSet.
:param item:
The item to check for, which can be a version string or a
:class:`Version` instance.
:param prereleases:
Whether or not to match prereleases with this SpecifierSet. If set to
``None`` (the default), it uses :attr:`prereleases` to determine
whether or not prereleases are allowed.
>>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.2.3")
True
>>> SpecifierSet(">=1.0.0,!=1.0.1").contains(Version("1.2.3"))
True
>>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.0.1")
False
>>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.3.0a1")
False
>>> SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True).contains("1.3.0a1")
True
>>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.3.0a1", prereleases=True)
True
"""
# Ensure that our item is a Version instance.
if not isinstance(item, Version):
item = Version(item)
# Determine if we're forcing a prerelease or not, if we're not forcing
# one for this particular filter call, then we'll use whatever the
# SpecifierSet thinks for whether or not we should support prereleases.
if prereleases is None:
prereleases = self.prereleases
# We can determine if we're going to allow pre-releases by looking to
# see if any of the underlying items supports them. If none of them do
# and this item is a pre-release then we do not allow it and we can
# short circuit that here.
# Note: This means that 1.0.dev1 would not be contained in something
# like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0
if not prereleases and item.is_prerelease:
return False
if installed and item.is_prerelease:
item = Version(item.base_version)
# We simply dispatch to the underlying specs here to make sure that the
# given version is contained within all of them.
# Note: This use of all() here means that an empty set of specifiers
# will always return True, this is an explicit design decision.
return all(s.contains(item, prereleases=prereleases) for s in self._specs)
def filter(
self, iterable: Iterable[UnparsedVersionVar], prereleases: Optional[bool] = None
) -> Iterator[UnparsedVersionVar]:
"""Filter items in the given iterable, that match the specifiers in this set.
:param iterable:
An iterable that can contain version strings and :class:`Version` instances.
The items in the iterable will be filtered according to the specifier.
:param prereleases:
Whether or not to allow prereleases in the returned iterator. If set to
``None`` (the default), it will be intelligently decide whether to allow
prereleases or not (based on the :attr:`prereleases` attribute, and
whether the only versions matching are prereleases).
This method is smarter than just ``filter(SpecifierSet(...).contains, [...])``
because it implements the rule from :pep:`440` that a prerelease item
SHOULD be accepted if no other versions match the given specifier.
>>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.3", "1.5a1"]))
['1.3']
>>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.3", Version("1.4")]))
['1.3', <Version('1.4')>]
>>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.5a1"]))
[]
>>> list(SpecifierSet(">=1.2.3").filter(["1.3", "1.5a1"], prereleases=True))
['1.3', '1.5a1']
>>> list(SpecifierSet(">=1.2.3", prereleases=True).filter(["1.3", "1.5a1"]))
['1.3', '1.5a1']
An "empty" SpecifierSet will filter items based on the presence of prerelease
versions in the set.
>>> list(SpecifierSet("").filter(["1.3", "1.5a1"]))
['1.3']
>>> list(SpecifierSet("").filter(["1.5a1"]))
['1.5a1']
>>> list(SpecifierSet("", prereleases=True).filter(["1.3", "1.5a1"]))
['1.3', '1.5a1']
>>> list(SpecifierSet("").filter(["1.3", "1.5a1"], prereleases=True))
['1.3', '1.5a1']
"""
# Determine if we're forcing a prerelease or not, if we're not forcing
# one for this particular filter call, then we'll use whatever the
# SpecifierSet thinks for whether or not we should support prereleases.
if prereleases is None:
prereleases = self.prereleases
# If we have any specifiers, then we want to wrap our iterable in the
# filter method for each one, this will act as a logical AND amongst
# each specifier.
if self._specs:
for spec in self._specs:
iterable = spec.filter(iterable, prereleases=bool(prereleases))
return iter(iterable)
# If we do not have any specifiers, then we need to have a rough filter
# which will filter out any pre-releases, unless there are no final
# releases.
else:
filtered: List[UnparsedVersionVar] = []
found_prereleases: List[UnparsedVersionVar] = []
for item in iterable:
parsed_version = _coerce_version(item)
# Store any item which is a pre-release for later unless we've
# already found a final version or we are accepting prereleases
if parsed_version.is_prerelease and not prereleases:
if not filtered:
found_prereleases.append(item)
else:
filtered.append(item)
# If we've found no items except for pre-releases, then we'll go
# ahead and use the pre-releases
if not filtered and found_prereleases and prereleases is None:
return iter(found_prereleases)
return iter(filtered)
| 39,206 |
Python
| 37.857284 | 88 | 0.564837 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/packaging/markers.py
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
import operator
import os
import platform
import sys
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from ._parser import (
MarkerAtom,
MarkerList,
Op,
Value,
Variable,
parse_marker as _parse_marker,
)
from ._tokenizer import ParserSyntaxError
from .specifiers import InvalidSpecifier, Specifier
from .utils import canonicalize_name
__all__ = [
"InvalidMarker",
"UndefinedComparison",
"UndefinedEnvironmentName",
"Marker",
"default_environment",
]
Operator = Callable[[str, str], bool]
class InvalidMarker(ValueError):
"""
An invalid marker was found, users should refer to PEP 508.
"""
class UndefinedComparison(ValueError):
"""
An invalid operation was attempted on a value that doesn't support it.
"""
class UndefinedEnvironmentName(ValueError):
"""
A name was attempted to be used that does not exist inside of the
environment.
"""
def _normalize_extra_values(results: Any) -> Any:
"""
Normalize extra values.
"""
if isinstance(results[0], tuple):
lhs, op, rhs = results[0]
if isinstance(lhs, Variable) and lhs.value == "extra":
normalized_extra = canonicalize_name(rhs.value)
rhs = Value(normalized_extra)
elif isinstance(rhs, Variable) and rhs.value == "extra":
normalized_extra = canonicalize_name(lhs.value)
lhs = Value(normalized_extra)
results[0] = lhs, op, rhs
return results
def _format_marker(
marker: Union[List[str], MarkerAtom, str], first: Optional[bool] = True
) -> str:
assert isinstance(marker, (list, tuple, str))
# Sometimes we have a structure like [[...]] which is a single item list
# where the single item is itself it's own list. In that case we want skip
# the rest of this function so that we don't get extraneous () on the
# outside.
if (
isinstance(marker, list)
and len(marker) == 1
and isinstance(marker[0], (list, tuple))
):
return _format_marker(marker[0])
if isinstance(marker, list):
inner = (_format_marker(m, first=False) for m in marker)
if first:
return " ".join(inner)
else:
return "(" + " ".join(inner) + ")"
elif isinstance(marker, tuple):
return " ".join([m.serialize() for m in marker])
else:
return marker
_operators: Dict[str, Operator] = {
"in": lambda lhs, rhs: lhs in rhs,
"not in": lambda lhs, rhs: lhs not in rhs,
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def _eval_op(lhs: str, op: Op, rhs: str) -> bool:
try:
spec = Specifier("".join([op.serialize(), rhs]))
except InvalidSpecifier:
pass
else:
return spec.contains(lhs, prereleases=True)
oper: Optional[Operator] = _operators.get(op.serialize())
if oper is None:
raise UndefinedComparison(f"Undefined {op!r} on {lhs!r} and {rhs!r}.")
return oper(lhs, rhs)
def _normalize(*values: str, key: str) -> Tuple[str, ...]:
# PEP 685 – Comparison of extra names for optional distribution dependencies
# https://peps.python.org/pep-0685/
# > When comparing extra names, tools MUST normalize the names being
# > compared using the semantics outlined in PEP 503 for names
if key == "extra":
return tuple(canonicalize_name(v) for v in values)
# other environment markers don't have such standards
return values
def _evaluate_markers(markers: MarkerList, environment: Dict[str, str]) -> bool:
groups: List[List[bool]] = [[]]
for marker in markers:
assert isinstance(marker, (list, tuple, str))
if isinstance(marker, list):
groups[-1].append(_evaluate_markers(marker, environment))
elif isinstance(marker, tuple):
lhs, op, rhs = marker
if isinstance(lhs, Variable):
environment_key = lhs.value
lhs_value = environment[environment_key]
rhs_value = rhs.value
else:
lhs_value = lhs.value
environment_key = rhs.value
rhs_value = environment[environment_key]
lhs_value, rhs_value = _normalize(lhs_value, rhs_value, key=environment_key)
groups[-1].append(_eval_op(lhs_value, op, rhs_value))
else:
assert marker in ["and", "or"]
if marker == "or":
groups.append([])
return any(all(item) for item in groups)
def format_full_version(info: "sys._version_info") -> str:
version = "{0.major}.{0.minor}.{0.micro}".format(info)
kind = info.releaselevel
if kind != "final":
version += kind[0] + str(info.serial)
return version
def default_environment() -> Dict[str, str]:
iver = format_full_version(sys.implementation.version)
implementation_name = sys.implementation.name
return {
"implementation_name": implementation_name,
"implementation_version": iver,
"os_name": os.name,
"platform_machine": platform.machine(),
"platform_release": platform.release(),
"platform_system": platform.system(),
"platform_version": platform.version(),
"python_full_version": platform.python_version(),
"platform_python_implementation": platform.python_implementation(),
"python_version": ".".join(platform.python_version_tuple()[:2]),
"sys_platform": sys.platform,
}
class Marker:
def __init__(self, marker: str) -> None:
# Note: We create a Marker object without calling this constructor in
# packaging.requirements.Requirement. If any additional logic is
# added here, make sure to mirror/adapt Requirement.
try:
self._markers = _normalize_extra_values(_parse_marker(marker))
# The attribute `_markers` can be described in terms of a recursive type:
# MarkerList = List[Union[Tuple[Node, ...], str, MarkerList]]
#
# For example, the following expression:
# python_version > "3.6" or (python_version == "3.6" and os_name == "unix")
#
# is parsed into:
# [
# (<Variable('python_version')>, <Op('>')>, <Value('3.6')>),
# 'and',
# [
# (<Variable('python_version')>, <Op('==')>, <Value('3.6')>),
# 'or',
# (<Variable('os_name')>, <Op('==')>, <Value('unix')>)
# ]
# ]
except ParserSyntaxError as e:
raise InvalidMarker(str(e)) from e
def __str__(self) -> str:
return _format_marker(self._markers)
def __repr__(self) -> str:
return f"<Marker('{self}')>"
def __hash__(self) -> int:
return hash((self.__class__.__name__, str(self)))
def __eq__(self, other: Any) -> bool:
if not isinstance(other, Marker):
return NotImplemented
return str(self) == str(other)
def evaluate(self, environment: Optional[Dict[str, str]] = None) -> bool:
"""Evaluate a marker.
Return the boolean from evaluating the given marker against the
environment. environment is an optional argument to override all or
part of the determined environment.
The environment is determined from the current Python process.
"""
current_environment = default_environment()
current_environment["extra"] = ""
if environment is not None:
current_environment.update(environment)
# The API used to allow setting extra to None. We need to handle this
# case for backwards compatibility.
if current_environment["extra"] is None:
current_environment["extra"] = ""
return _evaluate_markers(self._markers, current_environment)
| 8,206 |
Python
| 31.438735 | 88 | 0.597977 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/packaging/__init__.py
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
__title__ = "packaging"
__summary__ = "Core utilities for Python packages"
__uri__ = "https://github.com/pypa/packaging"
__version__ = "23.1"
__author__ = "Donald Stufft and individual contributors"
__email__ = "[email protected]"
__license__ = "BSD-2-Clause or Apache-2.0"
__copyright__ = "2014-2019 %s" % __author__
| 501 |
Python
| 30.374998 | 79 | 0.682635 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/packaging/version.py
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
"""
.. testsetup::
from packaging.version import parse, Version
"""
import collections
import itertools
import re
from typing import Any, Callable, Optional, SupportsInt, Tuple, Union
from ._structures import Infinity, InfinityType, NegativeInfinity, NegativeInfinityType
__all__ = ["VERSION_PATTERN", "parse", "Version", "InvalidVersion"]
InfiniteTypes = Union[InfinityType, NegativeInfinityType]
PrePostDevType = Union[InfiniteTypes, Tuple[str, int]]
SubLocalType = Union[InfiniteTypes, int, str]
LocalType = Union[
NegativeInfinityType,
Tuple[
Union[
SubLocalType,
Tuple[SubLocalType, str],
Tuple[NegativeInfinityType, SubLocalType],
],
...,
],
]
CmpKey = Tuple[
int, Tuple[int, ...], PrePostDevType, PrePostDevType, PrePostDevType, LocalType
]
VersionComparisonMethod = Callable[[CmpKey, CmpKey], bool]
_Version = collections.namedtuple(
"_Version", ["epoch", "release", "dev", "pre", "post", "local"]
)
def parse(version: str) -> "Version":
"""Parse the given version string.
>>> parse('1.0.dev1')
<Version('1.0.dev1')>
:param version: The version string to parse.
:raises InvalidVersion: When the version string is not a valid version.
"""
return Version(version)
class InvalidVersion(ValueError):
"""Raised when a version string is not a valid version.
>>> Version("invalid")
Traceback (most recent call last):
...
packaging.version.InvalidVersion: Invalid version: 'invalid'
"""
class _BaseVersion:
_key: Tuple[Any, ...]
def __hash__(self) -> int:
return hash(self._key)
# Please keep the duplicated `isinstance` check
# in the six comparisons hereunder
# unless you find a way to avoid adding overhead function calls.
def __lt__(self, other: "_BaseVersion") -> bool:
if not isinstance(other, _BaseVersion):
return NotImplemented
return self._key < other._key
def __le__(self, other: "_BaseVersion") -> bool:
if not isinstance(other, _BaseVersion):
return NotImplemented
return self._key <= other._key
def __eq__(self, other: object) -> bool:
if not isinstance(other, _BaseVersion):
return NotImplemented
return self._key == other._key
def __ge__(self, other: "_BaseVersion") -> bool:
if not isinstance(other, _BaseVersion):
return NotImplemented
return self._key >= other._key
def __gt__(self, other: "_BaseVersion") -> bool:
if not isinstance(other, _BaseVersion):
return NotImplemented
return self._key > other._key
def __ne__(self, other: object) -> bool:
if not isinstance(other, _BaseVersion):
return NotImplemented
return self._key != other._key
# Deliberately not anchored to the start and end of the string, to make it
# easier for 3rd party code to reuse
_VERSION_PATTERN = r"""
v?
(?:
(?:(?P<epoch>[0-9]+)!)? # epoch
(?P<release>[0-9]+(?:\.[0-9]+)*) # release segment
(?P<pre> # pre-release
[-_\.]?
(?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))
[-_\.]?
(?P<pre_n>[0-9]+)?
)?
(?P<post> # post release
(?:-(?P<post_n1>[0-9]+))
|
(?:
[-_\.]?
(?P<post_l>post|rev|r)
[-_\.]?
(?P<post_n2>[0-9]+)?
)
)?
(?P<dev> # dev release
[-_\.]?
(?P<dev_l>dev)
[-_\.]?
(?P<dev_n>[0-9]+)?
)?
)
(?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version
"""
VERSION_PATTERN = _VERSION_PATTERN
"""
A string containing the regular expression used to match a valid version.
The pattern is not anchored at either end, and is intended for embedding in larger
expressions (for example, matching a version number as part of a file name). The
regular expression should be compiled with the ``re.VERBOSE`` and ``re.IGNORECASE``
flags set.
:meta hide-value:
"""
class Version(_BaseVersion):
"""This class abstracts handling of a project's versions.
A :class:`Version` instance is comparison aware and can be compared and
sorted using the standard Python interfaces.
>>> v1 = Version("1.0a5")
>>> v2 = Version("1.0")
>>> v1
<Version('1.0a5')>
>>> v2
<Version('1.0')>
>>> v1 < v2
True
>>> v1 == v2
False
>>> v1 > v2
False
>>> v1 >= v2
False
>>> v1 <= v2
True
"""
_regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
_key: CmpKey
def __init__(self, version: str) -> None:
"""Initialize a Version object.
:param version:
The string representation of a version which will be parsed and normalized
before use.
:raises InvalidVersion:
If the ``version`` does not conform to PEP 440 in any way then this
exception will be raised.
"""
# Validate the version and parse it into pieces
match = self._regex.search(version)
if not match:
raise InvalidVersion(f"Invalid version: '{version}'")
# Store the parsed out pieces of the version
self._version = _Version(
epoch=int(match.group("epoch")) if match.group("epoch") else 0,
release=tuple(int(i) for i in match.group("release").split(".")),
pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
post=_parse_letter_version(
match.group("post_l"), match.group("post_n1") or match.group("post_n2")
),
dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
local=_parse_local_version(match.group("local")),
)
# Generate a key which will be used for sorting
self._key = _cmpkey(
self._version.epoch,
self._version.release,
self._version.pre,
self._version.post,
self._version.dev,
self._version.local,
)
def __repr__(self) -> str:
"""A representation of the Version that shows all internal state.
>>> Version('1.0.0')
<Version('1.0.0')>
"""
return f"<Version('{self}')>"
def __str__(self) -> str:
"""A string representation of the version that can be rounded-tripped.
>>> str(Version("1.0a5"))
'1.0a5'
"""
parts = []
# Epoch
if self.epoch != 0:
parts.append(f"{self.epoch}!")
# Release segment
parts.append(".".join(str(x) for x in self.release))
# Pre-release
if self.pre is not None:
parts.append("".join(str(x) for x in self.pre))
# Post-release
if self.post is not None:
parts.append(f".post{self.post}")
# Development release
if self.dev is not None:
parts.append(f".dev{self.dev}")
# Local version segment
if self.local is not None:
parts.append(f"+{self.local}")
return "".join(parts)
@property
def epoch(self) -> int:
"""The epoch of the version.
>>> Version("2.0.0").epoch
0
>>> Version("1!2.0.0").epoch
1
"""
_epoch: int = self._version.epoch
return _epoch
@property
def release(self) -> Tuple[int, ...]:
"""The components of the "release" segment of the version.
>>> Version("1.2.3").release
(1, 2, 3)
>>> Version("2.0.0").release
(2, 0, 0)
>>> Version("1!2.0.0.post0").release
(2, 0, 0)
Includes trailing zeroes but not the epoch or any pre-release / development /
post-release suffixes.
"""
_release: Tuple[int, ...] = self._version.release
return _release
@property
def pre(self) -> Optional[Tuple[str, int]]:
"""The pre-release segment of the version.
>>> print(Version("1.2.3").pre)
None
>>> Version("1.2.3a1").pre
('a', 1)
>>> Version("1.2.3b1").pre
('b', 1)
>>> Version("1.2.3rc1").pre
('rc', 1)
"""
_pre: Optional[Tuple[str, int]] = self._version.pre
return _pre
@property
def post(self) -> Optional[int]:
"""The post-release number of the version.
>>> print(Version("1.2.3").post)
None
>>> Version("1.2.3.post1").post
1
"""
return self._version.post[1] if self._version.post else None
@property
def dev(self) -> Optional[int]:
"""The development number of the version.
>>> print(Version("1.2.3").dev)
None
>>> Version("1.2.3.dev1").dev
1
"""
return self._version.dev[1] if self._version.dev else None
@property
def local(self) -> Optional[str]:
"""The local version segment of the version.
>>> print(Version("1.2.3").local)
None
>>> Version("1.2.3+abc").local
'abc'
"""
if self._version.local:
return ".".join(str(x) for x in self._version.local)
else:
return None
@property
def public(self) -> str:
"""The public portion of the version.
>>> Version("1.2.3").public
'1.2.3'
>>> Version("1.2.3+abc").public
'1.2.3'
>>> Version("1.2.3+abc.dev1").public
'1.2.3'
"""
return str(self).split("+", 1)[0]
@property
def base_version(self) -> str:
"""The "base version" of the version.
>>> Version("1.2.3").base_version
'1.2.3'
>>> Version("1.2.3+abc").base_version
'1.2.3'
>>> Version("1!1.2.3+abc.dev1").base_version
'1!1.2.3'
The "base version" is the public version of the project without any pre or post
release markers.
"""
parts = []
# Epoch
if self.epoch != 0:
parts.append(f"{self.epoch}!")
# Release segment
parts.append(".".join(str(x) for x in self.release))
return "".join(parts)
@property
def is_prerelease(self) -> bool:
"""Whether this version is a pre-release.
>>> Version("1.2.3").is_prerelease
False
>>> Version("1.2.3a1").is_prerelease
True
>>> Version("1.2.3b1").is_prerelease
True
>>> Version("1.2.3rc1").is_prerelease
True
>>> Version("1.2.3dev1").is_prerelease
True
"""
return self.dev is not None or self.pre is not None
@property
def is_postrelease(self) -> bool:
"""Whether this version is a post-release.
>>> Version("1.2.3").is_postrelease
False
>>> Version("1.2.3.post1").is_postrelease
True
"""
return self.post is not None
@property
def is_devrelease(self) -> bool:
"""Whether this version is a development release.
>>> Version("1.2.3").is_devrelease
False
>>> Version("1.2.3.dev1").is_devrelease
True
"""
return self.dev is not None
@property
def major(self) -> int:
"""The first item of :attr:`release` or ``0`` if unavailable.
>>> Version("1.2.3").major
1
"""
return self.release[0] if len(self.release) >= 1 else 0
@property
def minor(self) -> int:
"""The second item of :attr:`release` or ``0`` if unavailable.
>>> Version("1.2.3").minor
2
>>> Version("1").minor
0
"""
return self.release[1] if len(self.release) >= 2 else 0
@property
def micro(self) -> int:
"""The third item of :attr:`release` or ``0`` if unavailable.
>>> Version("1.2.3").micro
3
>>> Version("1").micro
0
"""
return self.release[2] if len(self.release) >= 3 else 0
def _parse_letter_version(
letter: str, number: Union[str, bytes, SupportsInt]
) -> Optional[Tuple[str, int]]:
if letter:
# We consider there to be an implicit 0 in a pre-release if there is
# not a numeral associated with it.
if number is None:
number = 0
# We normalize any letters to their lower case form
letter = letter.lower()
# We consider some words to be alternate spellings of other words and
# in those cases we want to normalize the spellings to our preferred
# spelling.
if letter == "alpha":
letter = "a"
elif letter == "beta":
letter = "b"
elif letter in ["c", "pre", "preview"]:
letter = "rc"
elif letter in ["rev", "r"]:
letter = "post"
return letter, int(number)
if not letter and number:
# We assume if we are given a number, but we are not given a letter
# then this is using the implicit post release syntax (e.g. 1.0-1)
letter = "post"
return letter, int(number)
return None
_local_version_separators = re.compile(r"[\._-]")
def _parse_local_version(local: str) -> Optional[LocalType]:
"""
Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
"""
if local is not None:
return tuple(
part.lower() if not part.isdigit() else int(part)
for part in _local_version_separators.split(local)
)
return None
def _cmpkey(
epoch: int,
release: Tuple[int, ...],
pre: Optional[Tuple[str, int]],
post: Optional[Tuple[str, int]],
dev: Optional[Tuple[str, int]],
local: Optional[Tuple[SubLocalType]],
) -> CmpKey:
# When we compare a release version, we want to compare it with all of the
# trailing zeros removed. So we'll use a reverse the list, drop all the now
# leading zeros until we come to something non zero, then take the rest
# re-reverse it back into the correct order and make it a tuple and use
# that for our sorting key.
_release = tuple(
reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
)
# We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
# We'll do this by abusing the pre segment, but we _only_ want to do this
# if there is not a pre or a post segment. If we have one of those then
# the normal sorting rules will handle this case correctly.
if pre is None and post is None and dev is not None:
_pre: PrePostDevType = NegativeInfinity
# Versions without a pre-release (except as noted above) should sort after
# those with one.
elif pre is None:
_pre = Infinity
else:
_pre = pre
# Versions without a post segment should sort before those with one.
if post is None:
_post: PrePostDevType = NegativeInfinity
else:
_post = post
# Versions without a development segment should sort after those with one.
if dev is None:
_dev: PrePostDevType = Infinity
else:
_dev = dev
if local is None:
# Versions without a local segment should sort before those with one.
_local: LocalType = NegativeInfinity
else:
# Versions with a local segment need that segment parsed to implement
# the sorting rules in PEP440.
# - Alpha numeric segments sort before numeric segments
# - Alpha numeric segments sort lexicographically
# - Numeric segments sort numerically
# - Shorter versions sort before longer versions when the prefixes
# match exactly
_local = tuple(
(i, "") if isinstance(i, int) else (NegativeInfinity, i) for i in local
)
return epoch, _release, _pre, _post, _dev, _local
| 16,326 |
Python
| 27.897345 | 88 | 0.554086 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/packaging/utils.py
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
import re
from typing import FrozenSet, NewType, Tuple, Union, cast
from .tags import Tag, parse_tag
from .version import InvalidVersion, Version
BuildTag = Union[Tuple[()], Tuple[int, str]]
NormalizedName = NewType("NormalizedName", str)
class InvalidWheelFilename(ValueError):
"""
An invalid wheel filename was found, users should refer to PEP 427.
"""
class InvalidSdistFilename(ValueError):
"""
An invalid sdist filename was found, users should refer to the packaging user guide.
"""
_canonicalize_regex = re.compile(r"[-_.]+")
# PEP 427: The build number must start with a digit.
_build_tag_regex = re.compile(r"(\d+)(.*)")
def canonicalize_name(name: str) -> NormalizedName:
# This is taken from PEP 503.
value = _canonicalize_regex.sub("-", name).lower()
return cast(NormalizedName, value)
def canonicalize_version(
version: Union[Version, str], *, strip_trailing_zero: bool = True
) -> str:
"""
This is very similar to Version.__str__, but has one subtle difference
with the way it handles the release segment.
"""
if isinstance(version, str):
try:
parsed = Version(version)
except InvalidVersion:
# Legacy versions cannot be normalized
return version
else:
parsed = version
parts = []
# Epoch
if parsed.epoch != 0:
parts.append(f"{parsed.epoch}!")
# Release segment
release_segment = ".".join(str(x) for x in parsed.release)
if strip_trailing_zero:
# NB: This strips trailing '.0's to normalize
release_segment = re.sub(r"(\.0)+$", "", release_segment)
parts.append(release_segment)
# Pre-release
if parsed.pre is not None:
parts.append("".join(str(x) for x in parsed.pre))
# Post-release
if parsed.post is not None:
parts.append(f".post{parsed.post}")
# Development release
if parsed.dev is not None:
parts.append(f".dev{parsed.dev}")
# Local version segment
if parsed.local is not None:
parts.append(f"+{parsed.local}")
return "".join(parts)
def parse_wheel_filename(
filename: str,
) -> Tuple[NormalizedName, Version, BuildTag, FrozenSet[Tag]]:
if not filename.endswith(".whl"):
raise InvalidWheelFilename(
f"Invalid wheel filename (extension must be '.whl'): {filename}"
)
filename = filename[:-4]
dashes = filename.count("-")
if dashes not in (4, 5):
raise InvalidWheelFilename(
f"Invalid wheel filename (wrong number of parts): {filename}"
)
parts = filename.split("-", dashes - 2)
name_part = parts[0]
# See PEP 427 for the rules on escaping the project name
if "__" in name_part or re.match(r"^[\w\d._]*$", name_part, re.UNICODE) is None:
raise InvalidWheelFilename(f"Invalid project name: {filename}")
name = canonicalize_name(name_part)
version = Version(parts[1])
if dashes == 5:
build_part = parts[2]
build_match = _build_tag_regex.match(build_part)
if build_match is None:
raise InvalidWheelFilename(
f"Invalid build number: {build_part} in '{filename}'"
)
build = cast(BuildTag, (int(build_match.group(1)), build_match.group(2)))
else:
build = ()
tags = parse_tag(parts[-1])
return (name, version, build, tags)
def parse_sdist_filename(filename: str) -> Tuple[NormalizedName, Version]:
if filename.endswith(".tar.gz"):
file_stem = filename[: -len(".tar.gz")]
elif filename.endswith(".zip"):
file_stem = filename[: -len(".zip")]
else:
raise InvalidSdistFilename(
f"Invalid sdist filename (extension must be '.tar.gz' or '.zip'):"
f" {filename}"
)
# We are requiring a PEP 440 version, which cannot contain dashes,
# so we split on the last dash.
name_part, sep, version_part = file_stem.rpartition("-")
if not sep:
raise InvalidSdistFilename(f"Invalid sdist filename: {filename}")
name = canonicalize_name(name_part)
version = Version(version_part)
return (name, version)
| 4,355 |
Python
| 29.676056 | 88 | 0.631228 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/packaging/_manylinux.py
|
import collections
import contextlib
import functools
import os
import re
import sys
import warnings
from typing import Dict, Generator, Iterator, NamedTuple, Optional, Tuple
from ._elffile import EIClass, EIData, ELFFile, EMachine
EF_ARM_ABIMASK = 0xFF000000
EF_ARM_ABI_VER5 = 0x05000000
EF_ARM_ABI_FLOAT_HARD = 0x00000400
# `os.PathLike` not a generic type until Python 3.9, so sticking with `str`
# as the type for `path` until then.
@contextlib.contextmanager
def _parse_elf(path: str) -> Generator[Optional[ELFFile], None, None]:
try:
with open(path, "rb") as f:
yield ELFFile(f)
except (OSError, TypeError, ValueError):
yield None
def _is_linux_armhf(executable: str) -> bool:
# hard-float ABI can be detected from the ELF header of the running
# process
# https://static.docs.arm.com/ihi0044/g/aaelf32.pdf
with _parse_elf(executable) as f:
return (
f is not None
and f.capacity == EIClass.C32
and f.encoding == EIData.Lsb
and f.machine == EMachine.Arm
and f.flags & EF_ARM_ABIMASK == EF_ARM_ABI_VER5
and f.flags & EF_ARM_ABI_FLOAT_HARD == EF_ARM_ABI_FLOAT_HARD
)
def _is_linux_i686(executable: str) -> bool:
with _parse_elf(executable) as f:
return (
f is not None
and f.capacity == EIClass.C32
and f.encoding == EIData.Lsb
and f.machine == EMachine.I386
)
def _have_compatible_abi(executable: str, arch: str) -> bool:
if arch == "armv7l":
return _is_linux_armhf(executable)
if arch == "i686":
return _is_linux_i686(executable)
return arch in {"x86_64", "aarch64", "ppc64", "ppc64le", "s390x"}
# If glibc ever changes its major version, we need to know what the last
# minor version was, so we can build the complete list of all versions.
# For now, guess what the highest minor version might be, assume it will
# be 50 for testing. Once this actually happens, update the dictionary
# with the actual value.
_LAST_GLIBC_MINOR: Dict[int, int] = collections.defaultdict(lambda: 50)
class _GLibCVersion(NamedTuple):
major: int
minor: int
def _glibc_version_string_confstr() -> Optional[str]:
"""
Primary implementation of glibc_version_string using os.confstr.
"""
# os.confstr is quite a bit faster than ctypes.DLL. It's also less likely
# to be broken or missing. This strategy is used in the standard library
# platform module.
# https://github.com/python/cpython/blob/fcf1d003bf4f0100c/Lib/platform.py#L175-L183
try:
# Should be a string like "glibc 2.17".
version_string: str = getattr(os, "confstr")("CS_GNU_LIBC_VERSION")
assert version_string is not None
_, version = version_string.rsplit()
except (AssertionError, AttributeError, OSError, ValueError):
# os.confstr() or CS_GNU_LIBC_VERSION not available (or a bad value)...
return None
return version
def _glibc_version_string_ctypes() -> Optional[str]:
"""
Fallback implementation of glibc_version_string using ctypes.
"""
try:
import ctypes
except ImportError:
return None
# ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen
# manpage says, "If filename is NULL, then the returned handle is for the
# main program". This way we can let the linker do the work to figure out
# which libc our process is actually using.
#
# We must also handle the special case where the executable is not a
# dynamically linked executable. This can occur when using musl libc,
# for example. In this situation, dlopen() will error, leading to an
# OSError. Interestingly, at least in the case of musl, there is no
# errno set on the OSError. The single string argument used to construct
# OSError comes from libc itself and is therefore not portable to
# hard code here. In any case, failure to call dlopen() means we
# can proceed, so we bail on our attempt.
try:
process_namespace = ctypes.CDLL(None)
except OSError:
return None
try:
gnu_get_libc_version = process_namespace.gnu_get_libc_version
except AttributeError:
# Symbol doesn't exist -> therefore, we are not linked to
# glibc.
return None
# Call gnu_get_libc_version, which returns a string like "2.5"
gnu_get_libc_version.restype = ctypes.c_char_p
version_str: str = gnu_get_libc_version()
# py2 / py3 compatibility:
if not isinstance(version_str, str):
version_str = version_str.decode("ascii")
return version_str
def _glibc_version_string() -> Optional[str]:
"""Returns glibc version string, or None if not using glibc."""
return _glibc_version_string_confstr() or _glibc_version_string_ctypes()
def _parse_glibc_version(version_str: str) -> Tuple[int, int]:
"""Parse glibc version.
We use a regexp instead of str.split because we want to discard any
random junk that might come after the minor version -- this might happen
in patched/forked versions of glibc (e.g. Linaro's version of glibc
uses version strings like "2.20-2014.11"). See gh-3588.
"""
m = re.match(r"(?P<major>[0-9]+)\.(?P<minor>[0-9]+)", version_str)
if not m:
warnings.warn(
f"Expected glibc version with 2 components major.minor,"
f" got: {version_str}",
RuntimeWarning,
)
return -1, -1
return int(m.group("major")), int(m.group("minor"))
@functools.lru_cache()
def _get_glibc_version() -> Tuple[int, int]:
version_str = _glibc_version_string()
if version_str is None:
return (-1, -1)
return _parse_glibc_version(version_str)
# From PEP 513, PEP 600
def _is_compatible(name: str, arch: str, version: _GLibCVersion) -> bool:
sys_glibc = _get_glibc_version()
if sys_glibc < version:
return False
# Check for presence of _manylinux module.
try:
import _manylinux # noqa
except ImportError:
return True
if hasattr(_manylinux, "manylinux_compatible"):
result = _manylinux.manylinux_compatible(version[0], version[1], arch)
if result is not None:
return bool(result)
return True
if version == _GLibCVersion(2, 5):
if hasattr(_manylinux, "manylinux1_compatible"):
return bool(_manylinux.manylinux1_compatible)
if version == _GLibCVersion(2, 12):
if hasattr(_manylinux, "manylinux2010_compatible"):
return bool(_manylinux.manylinux2010_compatible)
if version == _GLibCVersion(2, 17):
if hasattr(_manylinux, "manylinux2014_compatible"):
return bool(_manylinux.manylinux2014_compatible)
return True
_LEGACY_MANYLINUX_MAP = {
# CentOS 7 w/ glibc 2.17 (PEP 599)
(2, 17): "manylinux2014",
# CentOS 6 w/ glibc 2.12 (PEP 571)
(2, 12): "manylinux2010",
# CentOS 5 w/ glibc 2.5 (PEP 513)
(2, 5): "manylinux1",
}
def platform_tags(linux: str, arch: str) -> Iterator[str]:
if not _have_compatible_abi(sys.executable, arch):
return
# Oldest glibc to be supported regardless of architecture is (2, 17).
too_old_glibc2 = _GLibCVersion(2, 16)
if arch in {"x86_64", "i686"}:
# On x86/i686 also oldest glibc to be supported is (2, 5).
too_old_glibc2 = _GLibCVersion(2, 4)
current_glibc = _GLibCVersion(*_get_glibc_version())
glibc_max_list = [current_glibc]
# We can assume compatibility across glibc major versions.
# https://sourceware.org/bugzilla/show_bug.cgi?id=24636
#
# Build a list of maximum glibc versions so that we can
# output the canonical list of all glibc from current_glibc
# down to too_old_glibc2, including all intermediary versions.
for glibc_major in range(current_glibc.major - 1, 1, -1):
glibc_minor = _LAST_GLIBC_MINOR[glibc_major]
glibc_max_list.append(_GLibCVersion(glibc_major, glibc_minor))
for glibc_max in glibc_max_list:
if glibc_max.major == too_old_glibc2.major:
min_minor = too_old_glibc2.minor
else:
# For other glibc major versions oldest supported is (x, 0).
min_minor = -1
for glibc_minor in range(glibc_max.minor, min_minor, -1):
glibc_version = _GLibCVersion(glibc_max.major, glibc_minor)
tag = "manylinux_{}_{}".format(*glibc_version)
if _is_compatible(tag, arch, glibc_version):
yield linux.replace("linux", tag)
# Handle the legacy manylinux1, manylinux2010, manylinux2014 tags.
if glibc_version in _LEGACY_MANYLINUX_MAP:
legacy_tag = _LEGACY_MANYLINUX_MAP[glibc_version]
if _is_compatible(legacy_tag, arch, glibc_version):
yield linux.replace("linux", legacy_tag)
| 8,926 |
Python
| 36.041494 | 88 | 0.647995 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/packaging/_musllinux.py
|
"""PEP 656 support.
This module implements logic to detect if the currently running Python is
linked against musl, and what musl version is used.
"""
import functools
import re
import subprocess
import sys
from typing import Iterator, NamedTuple, Optional
from ._elffile import ELFFile
class _MuslVersion(NamedTuple):
major: int
minor: int
def _parse_musl_version(output: str) -> Optional[_MuslVersion]:
lines = [n for n in (n.strip() for n in output.splitlines()) if n]
if len(lines) < 2 or lines[0][:4] != "musl":
return None
m = re.match(r"Version (\d+)\.(\d+)", lines[1])
if not m:
return None
return _MuslVersion(major=int(m.group(1)), minor=int(m.group(2)))
@functools.lru_cache()
def _get_musl_version(executable: str) -> Optional[_MuslVersion]:
"""Detect currently-running musl runtime version.
This is done by checking the specified executable's dynamic linking
information, and invoking the loader to parse its output for a version
string. If the loader is musl, the output would be something like::
musl libc (x86_64)
Version 1.2.2
Dynamic Program Loader
"""
try:
with open(executable, "rb") as f:
ld = ELFFile(f).interpreter
except (OSError, TypeError, ValueError):
return None
if ld is None or "musl" not in ld:
return None
proc = subprocess.run([ld], stderr=subprocess.PIPE, universal_newlines=True)
return _parse_musl_version(proc.stderr)
def platform_tags(arch: str) -> Iterator[str]:
"""Generate musllinux tags compatible to the current platform.
:param arch: Should be the part of platform tag after the ``linux_``
prefix, e.g. ``x86_64``. The ``linux_`` prefix is assumed as a
prerequisite for the current platform to be musllinux-compatible.
:returns: An iterator of compatible musllinux tags.
"""
sys_musl = _get_musl_version(sys.executable)
if sys_musl is None: # Python not dynamically linked against musl.
return
for minor in range(sys_musl.minor, -1, -1):
yield f"musllinux_{sys_musl.major}_{minor}_{arch}"
if __name__ == "__main__": # pragma: no cover
import sysconfig
plat = sysconfig.get_platform()
assert plat.startswith("linux-"), "not linux"
print("plat:", plat)
print("musl:", _get_musl_version(sys.executable))
print("tags:", end=" ")
for t in platform_tags(re.sub(r"[.-]", "_", plat.split("-", 1)[-1])):
print(t, end="\n ")
| 2,524 |
Python
| 30.172839 | 80 | 0.650555 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/packaging/tags.py
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
import logging
import platform
import subprocess
import sys
import sysconfig
from importlib.machinery import EXTENSION_SUFFIXES
from typing import (
Dict,
FrozenSet,
Iterable,
Iterator,
List,
Optional,
Sequence,
Tuple,
Union,
cast,
)
from . import _manylinux, _musllinux
logger = logging.getLogger(__name__)
PythonVersion = Sequence[int]
MacVersion = Tuple[int, int]
INTERPRETER_SHORT_NAMES: Dict[str, str] = {
"python": "py", # Generic.
"cpython": "cp",
"pypy": "pp",
"ironpython": "ip",
"jython": "jy",
}
_32_BIT_INTERPRETER = sys.maxsize <= 2**32
class Tag:
"""
A representation of the tag triple for a wheel.
Instances are considered immutable and thus are hashable. Equality checking
is also supported.
"""
__slots__ = ["_interpreter", "_abi", "_platform", "_hash"]
def __init__(self, interpreter: str, abi: str, platform: str) -> None:
self._interpreter = interpreter.lower()
self._abi = abi.lower()
self._platform = platform.lower()
# The __hash__ of every single element in a Set[Tag] will be evaluated each time
# that a set calls its `.disjoint()` method, which may be called hundreds of
# times when scanning a page of links for packages with tags matching that
# Set[Tag]. Pre-computing the value here produces significant speedups for
# downstream consumers.
self._hash = hash((self._interpreter, self._abi, self._platform))
@property
def interpreter(self) -> str:
return self._interpreter
@property
def abi(self) -> str:
return self._abi
@property
def platform(self) -> str:
return self._platform
def __eq__(self, other: object) -> bool:
if not isinstance(other, Tag):
return NotImplemented
return (
(self._hash == other._hash) # Short-circuit ASAP for perf reasons.
and (self._platform == other._platform)
and (self._abi == other._abi)
and (self._interpreter == other._interpreter)
)
def __hash__(self) -> int:
return self._hash
def __str__(self) -> str:
return f"{self._interpreter}-{self._abi}-{self._platform}"
def __repr__(self) -> str:
return f"<{self} @ {id(self)}>"
def parse_tag(tag: str) -> FrozenSet[Tag]:
"""
Parses the provided tag (e.g. `py3-none-any`) into a frozenset of Tag instances.
Returning a set is required due to the possibility that the tag is a
compressed tag set.
"""
tags = set()
interpreters, abis, platforms = tag.split("-")
for interpreter in interpreters.split("."):
for abi in abis.split("."):
for platform_ in platforms.split("."):
tags.add(Tag(interpreter, abi, platform_))
return frozenset(tags)
def _get_config_var(name: str, warn: bool = False) -> Union[int, str, None]:
value: Union[int, str, None] = sysconfig.get_config_var(name)
if value is None and warn:
logger.debug(
"Config variable '%s' is unset, Python ABI tag may be incorrect", name
)
return value
def _normalize_string(string: str) -> str:
return string.replace(".", "_").replace("-", "_").replace(" ", "_")
def _abi3_applies(python_version: PythonVersion) -> bool:
"""
Determine if the Python version supports abi3.
PEP 384 was first implemented in Python 3.2.
"""
return len(python_version) > 1 and tuple(python_version) >= (3, 2)
def _cpython_abis(py_version: PythonVersion, warn: bool = False) -> List[str]:
py_version = tuple(py_version) # To allow for version comparison.
abis = []
version = _version_nodot(py_version[:2])
debug = pymalloc = ucs4 = ""
with_debug = _get_config_var("Py_DEBUG", warn)
has_refcount = hasattr(sys, "gettotalrefcount")
# Windows doesn't set Py_DEBUG, so checking for support of debug-compiled
# extension modules is the best option.
# https://github.com/pypa/pip/issues/3383#issuecomment-173267692
has_ext = "_d.pyd" in EXTENSION_SUFFIXES
if with_debug or (with_debug is None and (has_refcount or has_ext)):
debug = "d"
if py_version < (3, 8):
with_pymalloc = _get_config_var("WITH_PYMALLOC", warn)
if with_pymalloc or with_pymalloc is None:
pymalloc = "m"
if py_version < (3, 3):
unicode_size = _get_config_var("Py_UNICODE_SIZE", warn)
if unicode_size == 4 or (
unicode_size is None and sys.maxunicode == 0x10FFFF
):
ucs4 = "u"
elif debug:
# Debug builds can also load "normal" extension modules.
# We can also assume no UCS-4 or pymalloc requirement.
abis.append(f"cp{version}")
abis.insert(
0,
"cp{version}{debug}{pymalloc}{ucs4}".format(
version=version, debug=debug, pymalloc=pymalloc, ucs4=ucs4
),
)
return abis
def cpython_tags(
python_version: Optional[PythonVersion] = None,
abis: Optional[Iterable[str]] = None,
platforms: Optional[Iterable[str]] = None,
*,
warn: bool = False,
) -> Iterator[Tag]:
"""
Yields the tags for a CPython interpreter.
The tags consist of:
- cp<python_version>-<abi>-<platform>
- cp<python_version>-abi3-<platform>
- cp<python_version>-none-<platform>
- cp<less than python_version>-abi3-<platform> # Older Python versions down to 3.2.
If python_version only specifies a major version then user-provided ABIs and
the 'none' ABItag will be used.
If 'abi3' or 'none' are specified in 'abis' then they will be yielded at
their normal position and not at the beginning.
"""
if not python_version:
python_version = sys.version_info[:2]
interpreter = f"cp{_version_nodot(python_version[:2])}"
if abis is None:
if len(python_version) > 1:
abis = _cpython_abis(python_version, warn)
else:
abis = []
abis = list(abis)
# 'abi3' and 'none' are explicitly handled later.
for explicit_abi in ("abi3", "none"):
try:
abis.remove(explicit_abi)
except ValueError:
pass
platforms = list(platforms or platform_tags())
for abi in abis:
for platform_ in platforms:
yield Tag(interpreter, abi, platform_)
if _abi3_applies(python_version):
yield from (Tag(interpreter, "abi3", platform_) for platform_ in platforms)
yield from (Tag(interpreter, "none", platform_) for platform_ in platforms)
if _abi3_applies(python_version):
for minor_version in range(python_version[1] - 1, 1, -1):
for platform_ in platforms:
interpreter = "cp{version}".format(
version=_version_nodot((python_version[0], minor_version))
)
yield Tag(interpreter, "abi3", platform_)
def _generic_abi() -> List[str]:
"""
Return the ABI tag based on EXT_SUFFIX.
"""
# The following are examples of `EXT_SUFFIX`.
# We want to keep the parts which are related to the ABI and remove the
# parts which are related to the platform:
# - linux: '.cpython-310-x86_64-linux-gnu.so' => cp310
# - mac: '.cpython-310-darwin.so' => cp310
# - win: '.cp310-win_amd64.pyd' => cp310
# - win: '.pyd' => cp37 (uses _cpython_abis())
# - pypy: '.pypy38-pp73-x86_64-linux-gnu.so' => pypy38_pp73
# - graalpy: '.graalpy-38-native-x86_64-darwin.dylib'
# => graalpy_38_native
ext_suffix = _get_config_var("EXT_SUFFIX", warn=True)
if not isinstance(ext_suffix, str) or ext_suffix[0] != ".":
raise SystemError("invalid sysconfig.get_config_var('EXT_SUFFIX')")
parts = ext_suffix.split(".")
if len(parts) < 3:
# CPython3.7 and earlier uses ".pyd" on Windows.
return _cpython_abis(sys.version_info[:2])
soabi = parts[1]
if soabi.startswith("cpython"):
# non-windows
abi = "cp" + soabi.split("-")[1]
elif soabi.startswith("cp"):
# windows
abi = soabi.split("-")[0]
elif soabi.startswith("pypy"):
abi = "-".join(soabi.split("-")[:2])
elif soabi.startswith("graalpy"):
abi = "-".join(soabi.split("-")[:3])
elif soabi:
# pyston, ironpython, others?
abi = soabi
else:
return []
return [_normalize_string(abi)]
def generic_tags(
interpreter: Optional[str] = None,
abis: Optional[Iterable[str]] = None,
platforms: Optional[Iterable[str]] = None,
*,
warn: bool = False,
) -> Iterator[Tag]:
"""
Yields the tags for a generic interpreter.
The tags consist of:
- <interpreter>-<abi>-<platform>
The "none" ABI will be added if it was not explicitly provided.
"""
if not interpreter:
interp_name = interpreter_name()
interp_version = interpreter_version(warn=warn)
interpreter = "".join([interp_name, interp_version])
if abis is None:
abis = _generic_abi()
else:
abis = list(abis)
platforms = list(platforms or platform_tags())
if "none" not in abis:
abis.append("none")
for abi in abis:
for platform_ in platforms:
yield Tag(interpreter, abi, platform_)
def _py_interpreter_range(py_version: PythonVersion) -> Iterator[str]:
"""
Yields Python versions in descending order.
After the latest version, the major-only version will be yielded, and then
all previous versions of that major version.
"""
if len(py_version) > 1:
yield f"py{_version_nodot(py_version[:2])}"
yield f"py{py_version[0]}"
if len(py_version) > 1:
for minor in range(py_version[1] - 1, -1, -1):
yield f"py{_version_nodot((py_version[0], minor))}"
def compatible_tags(
python_version: Optional[PythonVersion] = None,
interpreter: Optional[str] = None,
platforms: Optional[Iterable[str]] = None,
) -> Iterator[Tag]:
"""
Yields the sequence of tags that are compatible with a specific version of Python.
The tags consist of:
- py*-none-<platform>
- <interpreter>-none-any # ... if `interpreter` is provided.
- py*-none-any
"""
if not python_version:
python_version = sys.version_info[:2]
platforms = list(platforms or platform_tags())
for version in _py_interpreter_range(python_version):
for platform_ in platforms:
yield Tag(version, "none", platform_)
if interpreter:
yield Tag(interpreter, "none", "any")
for version in _py_interpreter_range(python_version):
yield Tag(version, "none", "any")
def _mac_arch(arch: str, is_32bit: bool = _32_BIT_INTERPRETER) -> str:
if not is_32bit:
return arch
if arch.startswith("ppc"):
return "ppc"
return "i386"
def _mac_binary_formats(version: MacVersion, cpu_arch: str) -> List[str]:
formats = [cpu_arch]
if cpu_arch == "x86_64":
if version < (10, 4):
return []
formats.extend(["intel", "fat64", "fat32"])
elif cpu_arch == "i386":
if version < (10, 4):
return []
formats.extend(["intel", "fat32", "fat"])
elif cpu_arch == "ppc64":
# TODO: Need to care about 32-bit PPC for ppc64 through 10.2?
if version > (10, 5) or version < (10, 4):
return []
formats.append("fat64")
elif cpu_arch == "ppc":
if version > (10, 6):
return []
formats.extend(["fat32", "fat"])
if cpu_arch in {"arm64", "x86_64"}:
formats.append("universal2")
if cpu_arch in {"x86_64", "i386", "ppc64", "ppc", "intel"}:
formats.append("universal")
return formats
def mac_platforms(
version: Optional[MacVersion] = None, arch: Optional[str] = None
) -> Iterator[str]:
"""
Yields the platform tags for a macOS system.
The `version` parameter is a two-item tuple specifying the macOS version to
generate platform tags for. The `arch` parameter is the CPU architecture to
generate platform tags for. Both parameters default to the appropriate value
for the current system.
"""
version_str, _, cpu_arch = platform.mac_ver()
if version is None:
version = cast("MacVersion", tuple(map(int, version_str.split(".")[:2])))
if version == (10, 16):
# When built against an older macOS SDK, Python will report macOS 10.16
# instead of the real version.
version_str = subprocess.run(
[
sys.executable,
"-sS",
"-c",
"import platform; print(platform.mac_ver()[0])",
],
check=True,
env={"SYSTEM_VERSION_COMPAT": "0"},
stdout=subprocess.PIPE,
universal_newlines=True,
).stdout
version = cast("MacVersion", tuple(map(int, version_str.split(".")[:2])))
else:
version = version
if arch is None:
arch = _mac_arch(cpu_arch)
else:
arch = arch
if (10, 0) <= version and version < (11, 0):
# Prior to Mac OS 11, each yearly release of Mac OS bumped the
# "minor" version number. The major version was always 10.
for minor_version in range(version[1], -1, -1):
compat_version = 10, minor_version
binary_formats = _mac_binary_formats(compat_version, arch)
for binary_format in binary_formats:
yield "macosx_{major}_{minor}_{binary_format}".format(
major=10, minor=minor_version, binary_format=binary_format
)
if version >= (11, 0):
# Starting with Mac OS 11, each yearly release bumps the major version
# number. The minor versions are now the midyear updates.
for major_version in range(version[0], 10, -1):
compat_version = major_version, 0
binary_formats = _mac_binary_formats(compat_version, arch)
for binary_format in binary_formats:
yield "macosx_{major}_{minor}_{binary_format}".format(
major=major_version, minor=0, binary_format=binary_format
)
if version >= (11, 0):
# Mac OS 11 on x86_64 is compatible with binaries from previous releases.
# Arm64 support was introduced in 11.0, so no Arm binaries from previous
# releases exist.
#
# However, the "universal2" binary format can have a
# macOS version earlier than 11.0 when the x86_64 part of the binary supports
# that version of macOS.
if arch == "x86_64":
for minor_version in range(16, 3, -1):
compat_version = 10, minor_version
binary_formats = _mac_binary_formats(compat_version, arch)
for binary_format in binary_formats:
yield "macosx_{major}_{minor}_{binary_format}".format(
major=compat_version[0],
minor=compat_version[1],
binary_format=binary_format,
)
else:
for minor_version in range(16, 3, -1):
compat_version = 10, minor_version
binary_format = "universal2"
yield "macosx_{major}_{minor}_{binary_format}".format(
major=compat_version[0],
minor=compat_version[1],
binary_format=binary_format,
)
def _linux_platforms(is_32bit: bool = _32_BIT_INTERPRETER) -> Iterator[str]:
linux = _normalize_string(sysconfig.get_platform())
if is_32bit:
if linux == "linux_x86_64":
linux = "linux_i686"
elif linux == "linux_aarch64":
linux = "linux_armv7l"
_, arch = linux.split("_", 1)
yield from _manylinux.platform_tags(linux, arch)
yield from _musllinux.platform_tags(arch)
yield linux
def _generic_platforms() -> Iterator[str]:
yield _normalize_string(sysconfig.get_platform())
def platform_tags() -> Iterator[str]:
"""
Provides the platform tags for this installation.
"""
if platform.system() == "Darwin":
return mac_platforms()
elif platform.system() == "Linux":
return _linux_platforms()
else:
return _generic_platforms()
def interpreter_name() -> str:
"""
Returns the name of the running interpreter.
Some implementations have a reserved, two-letter abbreviation which will
be returned when appropriate.
"""
name = sys.implementation.name
return INTERPRETER_SHORT_NAMES.get(name) or name
def interpreter_version(*, warn: bool = False) -> str:
"""
Returns the version of the running interpreter.
"""
version = _get_config_var("py_version_nodot", warn=warn)
if version:
version = str(version)
else:
version = _version_nodot(sys.version_info[:2])
return version
def _version_nodot(version: PythonVersion) -> str:
return "".join(map(str, version))
def sys_tags(*, warn: bool = False) -> Iterator[Tag]:
"""
Returns the sequence of tag triples for the running interpreter.
The order of the sequence corresponds to priority order for the
interpreter, from most to least important.
"""
interp_name = interpreter_name()
if interp_name == "cp":
yield from cpython_tags(warn=warn)
else:
yield from generic_tags()
if interp_name == "pp":
interp = "pp3"
elif interp_name == "cp":
interp = "cp" + interpreter_version(warn=warn)
else:
interp = None
yield from compatible_tags(interpreter=interp)
| 18,106 |
Python
| 32.102377 | 88 | 0.591958 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/packaging/metadata.py
|
import email.feedparser
import email.header
import email.message
import email.parser
import email.policy
import sys
import typing
from typing import Dict, List, Optional, Tuple, Union, cast
if sys.version_info >= (3, 8): # pragma: no cover
from typing import TypedDict
else: # pragma: no cover
if typing.TYPE_CHECKING:
from typing_extensions import TypedDict
else:
try:
from typing_extensions import TypedDict
except ImportError:
class TypedDict:
def __init_subclass__(*_args, **_kwargs):
pass
# The RawMetadata class attempts to make as few assumptions about the underlying
# serialization formats as possible. The idea is that as long as a serialization
# formats offer some very basic primitives in *some* way then we can support
# serializing to and from that format.
class RawMetadata(TypedDict, total=False):
"""A dictionary of raw core metadata.
Each field in core metadata maps to a key of this dictionary (when data is
provided). The key is lower-case and underscores are used instead of dashes
compared to the equivalent core metadata field. Any core metadata field that
can be specified multiple times or can hold multiple values in a single
field have a key with a plural name.
Core metadata fields that can be specified multiple times are stored as a
list or dict depending on which is appropriate for the field. Any fields
which hold multiple values in a single field are stored as a list.
"""
# Metadata 1.0 - PEP 241
metadata_version: str
name: str
version: str
platforms: List[str]
summary: str
description: str
keywords: List[str]
home_page: str
author: str
author_email: str
license: str
# Metadata 1.1 - PEP 314
supported_platforms: List[str]
download_url: str
classifiers: List[str]
requires: List[str]
provides: List[str]
obsoletes: List[str]
# Metadata 1.2 - PEP 345
maintainer: str
maintainer_email: str
requires_dist: List[str]
provides_dist: List[str]
obsoletes_dist: List[str]
requires_python: str
requires_external: List[str]
project_urls: Dict[str, str]
# Metadata 2.0
# PEP 426 attempted to completely revamp the metadata format
# but got stuck without ever being able to build consensus on
# it and ultimately ended up withdrawn.
#
# However, a number of tools had started emiting METADATA with
# `2.0` Metadata-Version, so for historical reasons, this version
# was skipped.
# Metadata 2.1 - PEP 566
description_content_type: str
provides_extra: List[str]
# Metadata 2.2 - PEP 643
dynamic: List[str]
# Metadata 2.3 - PEP 685
# No new fields were added in PEP 685, just some edge case were
# tightened up to provide better interoptability.
_STRING_FIELDS = {
"author",
"author_email",
"description",
"description_content_type",
"download_url",
"home_page",
"license",
"maintainer",
"maintainer_email",
"metadata_version",
"name",
"requires_python",
"summary",
"version",
}
_LIST_STRING_FIELDS = {
"classifiers",
"dynamic",
"obsoletes",
"obsoletes_dist",
"platforms",
"provides",
"provides_dist",
"provides_extra",
"requires",
"requires_dist",
"requires_external",
"supported_platforms",
}
def _parse_keywords(data: str) -> List[str]:
"""Split a string of comma-separate keyboards into a list of keywords."""
return [k.strip() for k in data.split(",")]
def _parse_project_urls(data: List[str]) -> Dict[str, str]:
"""Parse a list of label/URL string pairings separated by a comma."""
urls = {}
for pair in data:
# Our logic is slightly tricky here as we want to try and do
# *something* reasonable with malformed data.
#
# The main thing that we have to worry about, is data that does
# not have a ',' at all to split the label from the Value. There
# isn't a singular right answer here, and we will fail validation
# later on (if the caller is validating) so it doesn't *really*
# matter, but since the missing value has to be an empty str
# and our return value is dict[str, str], if we let the key
# be the missing value, then they'd have multiple '' values that
# overwrite each other in a accumulating dict.
#
# The other potentional issue is that it's possible to have the
# same label multiple times in the metadata, with no solid "right"
# answer with what to do in that case. As such, we'll do the only
# thing we can, which is treat the field as unparseable and add it
# to our list of unparsed fields.
parts = [p.strip() for p in pair.split(",", 1)]
parts.extend([""] * (max(0, 2 - len(parts)))) # Ensure 2 items
# TODO: The spec doesn't say anything about if the keys should be
# considered case sensitive or not... logically they should
# be case-preserving and case-insensitive, but doing that
# would open up more cases where we might have duplicate
# entries.
label, url = parts
if label in urls:
# The label already exists in our set of urls, so this field
# is unparseable, and we can just add the whole thing to our
# unparseable data and stop processing it.
raise KeyError("duplicate labels in project urls")
urls[label] = url
return urls
def _get_payload(msg: email.message.Message, source: Union[bytes, str]) -> str:
"""Get the body of the message."""
# If our source is a str, then our caller has managed encodings for us,
# and we don't need to deal with it.
if isinstance(source, str):
payload: str = msg.get_payload()
return payload
# If our source is a bytes, then we're managing the encoding and we need
# to deal with it.
else:
bpayload: bytes = msg.get_payload(decode=True)
try:
return bpayload.decode("utf8", "strict")
except UnicodeDecodeError:
raise ValueError("payload in an invalid encoding")
# The various parse_FORMAT functions here are intended to be as lenient as
# possible in their parsing, while still returning a correctly typed
# RawMetadata.
#
# To aid in this, we also generally want to do as little touching of the
# data as possible, except where there are possibly some historic holdovers
# that make valid data awkward to work with.
#
# While this is a lower level, intermediate format than our ``Metadata``
# class, some light touch ups can make a massive difference in usability.
# Map METADATA fields to RawMetadata.
_EMAIL_TO_RAW_MAPPING = {
"author": "author",
"author-email": "author_email",
"classifier": "classifiers",
"description": "description",
"description-content-type": "description_content_type",
"download-url": "download_url",
"dynamic": "dynamic",
"home-page": "home_page",
"keywords": "keywords",
"license": "license",
"maintainer": "maintainer",
"maintainer-email": "maintainer_email",
"metadata-version": "metadata_version",
"name": "name",
"obsoletes": "obsoletes",
"obsoletes-dist": "obsoletes_dist",
"platform": "platforms",
"project-url": "project_urls",
"provides": "provides",
"provides-dist": "provides_dist",
"provides-extra": "provides_extra",
"requires": "requires",
"requires-dist": "requires_dist",
"requires-external": "requires_external",
"requires-python": "requires_python",
"summary": "summary",
"supported-platform": "supported_platforms",
"version": "version",
}
def parse_email(data: Union[bytes, str]) -> Tuple[RawMetadata, Dict[str, List[str]]]:
"""Parse a distribution's metadata.
This function returns a two-item tuple of dicts. The first dict is of
recognized fields from the core metadata specification. Fields that can be
parsed and translated into Python's built-in types are converted
appropriately. All other fields are left as-is. Fields that are allowed to
appear multiple times are stored as lists.
The second dict contains all other fields from the metadata. This includes
any unrecognized fields. It also includes any fields which are expected to
be parsed into a built-in type but were not formatted appropriately. Finally,
any fields that are expected to appear only once but are repeated are
included in this dict.
"""
raw: Dict[str, Union[str, List[str], Dict[str, str]]] = {}
unparsed: Dict[str, List[str]] = {}
if isinstance(data, str):
parsed = email.parser.Parser(policy=email.policy.compat32).parsestr(data)
else:
parsed = email.parser.BytesParser(policy=email.policy.compat32).parsebytes(data)
# We have to wrap parsed.keys() in a set, because in the case of multiple
# values for a key (a list), the key will appear multiple times in the
# list of keys, but we're avoiding that by using get_all().
for name in frozenset(parsed.keys()):
# Header names in RFC are case insensitive, so we'll normalize to all
# lower case to make comparisons easier.
name = name.lower()
# We use get_all() here, even for fields that aren't multiple use,
# because otherwise someone could have e.g. two Name fields, and we
# would just silently ignore it rather than doing something about it.
headers = parsed.get_all(name)
# The way the email module works when parsing bytes is that it
# unconditionally decodes the bytes as ascii using the surrogateescape
# handler. When you pull that data back out (such as with get_all() ),
# it looks to see if the str has any surrogate escapes, and if it does
# it wraps it in a Header object instead of returning the string.
#
# As such, we'll look for those Header objects, and fix up the encoding.
value = []
# Flag if we have run into any issues processing the headers, thus
# signalling that the data belongs in 'unparsed'.
valid_encoding = True
for h in headers:
# It's unclear if this can return more types than just a Header or
# a str, so we'll just assert here to make sure.
assert isinstance(h, (email.header.Header, str))
# If it's a header object, we need to do our little dance to get
# the real data out of it. In cases where there is invalid data
# we're going to end up with mojibake, but there's no obvious, good
# way around that without reimplementing parts of the Header object
# ourselves.
#
# That should be fine since, if mojibacked happens, this key is
# going into the unparsed dict anyways.
if isinstance(h, email.header.Header):
# The Header object stores it's data as chunks, and each chunk
# can be independently encoded, so we'll need to check each
# of them.
chunks: List[Tuple[bytes, Optional[str]]] = []
for bin, encoding in email.header.decode_header(h):
try:
bin.decode("utf8", "strict")
except UnicodeDecodeError:
# Enable mojibake.
encoding = "latin1"
valid_encoding = False
else:
encoding = "utf8"
chunks.append((bin, encoding))
# Turn our chunks back into a Header object, then let that
# Header object do the right thing to turn them into a
# string for us.
value.append(str(email.header.make_header(chunks)))
# This is already a string, so just add it.
else:
value.append(h)
# We've processed all of our values to get them into a list of str,
# but we may have mojibake data, in which case this is an unparsed
# field.
if not valid_encoding:
unparsed[name] = value
continue
raw_name = _EMAIL_TO_RAW_MAPPING.get(name)
if raw_name is None:
# This is a bit of a weird situation, we've encountered a key that
# we don't know what it means, so we don't know whether it's meant
# to be a list or not.
#
# Since we can't really tell one way or another, we'll just leave it
# as a list, even though it may be a single item list, because that's
# what makes the most sense for email headers.
unparsed[name] = value
continue
# If this is one of our string fields, then we'll check to see if our
# value is a list of a single item. If it is then we'll assume that
# it was emitted as a single string, and unwrap the str from inside
# the list.
#
# If it's any other kind of data, then we haven't the faintest clue
# what we should parse it as, and we have to just add it to our list
# of unparsed stuff.
if raw_name in _STRING_FIELDS and len(value) == 1:
raw[raw_name] = value[0]
# If this is one of our list of string fields, then we can just assign
# the value, since email *only* has strings, and our get_all() call
# above ensures that this is a list.
elif raw_name in _LIST_STRING_FIELDS:
raw[raw_name] = value
# Special Case: Keywords
# The keywords field is implemented in the metadata spec as a str,
# but it conceptually is a list of strings, and is serialized using
# ", ".join(keywords), so we'll do some light data massaging to turn
# this into what it logically is.
elif raw_name == "keywords" and len(value) == 1:
raw[raw_name] = _parse_keywords(value[0])
# Special Case: Project-URL
# The project urls is implemented in the metadata spec as a list of
# specially-formatted strings that represent a key and a value, which
# is fundamentally a mapping, however the email format doesn't support
# mappings in a sane way, so it was crammed into a list of strings
# instead.
#
# We will do a little light data massaging to turn this into a map as
# it logically should be.
elif raw_name == "project_urls":
try:
raw[raw_name] = _parse_project_urls(value)
except KeyError:
unparsed[name] = value
# Nothing that we've done has managed to parse this, so it'll just
# throw it in our unparseable data and move on.
else:
unparsed[name] = value
# We need to support getting the Description from the message payload in
# addition to getting it from the the headers. This does mean, though, there
# is the possibility of it being set both ways, in which case we put both
# in 'unparsed' since we don't know which is right.
try:
payload = _get_payload(parsed, data)
except ValueError:
unparsed.setdefault("description", []).append(
parsed.get_payload(decode=isinstance(data, bytes))
)
else:
if payload:
# Check to see if we've already got a description, if so then both
# it, and this body move to unparseable.
if "description" in raw:
description_header = cast(str, raw.pop("description"))
unparsed.setdefault("description", []).extend(
[description_header, payload]
)
elif "description" in unparsed:
unparsed["description"].append(payload)
else:
raw["description"] = payload
# We need to cast our `raw` to a metadata, because a TypedDict only support
# literal key names, but we're computing our key names on purpose, but the
# way this function is implemented, our `TypedDict` can only have valid key
# names.
return cast(RawMetadata, raw), unparsed
| 16,397 |
Python
| 39.092909 | 88 | 0.631152 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/packaging/_elffile.py
|
"""
ELF file parser.
This provides a class ``ELFFile`` that parses an ELF executable in a similar
interface to ``ZipFile``. Only the read interface is implemented.
Based on: https://gist.github.com/lyssdod/f51579ae8d93c8657a5564aefc2ffbca
ELF header: https://refspecs.linuxfoundation.org/elf/gabi4+/ch4.eheader.html
"""
import enum
import os
import struct
from typing import IO, Optional, Tuple
class ELFInvalid(ValueError):
pass
class EIClass(enum.IntEnum):
C32 = 1
C64 = 2
class EIData(enum.IntEnum):
Lsb = 1
Msb = 2
class EMachine(enum.IntEnum):
I386 = 3
S390 = 22
Arm = 40
X8664 = 62
AArc64 = 183
class ELFFile:
"""
Representation of an ELF executable.
"""
def __init__(self, f: IO[bytes]) -> None:
self._f = f
try:
ident = self._read("16B")
except struct.error:
raise ELFInvalid("unable to parse identification")
magic = bytes(ident[:4])
if magic != b"\x7fELF":
raise ELFInvalid(f"invalid magic: {magic!r}")
self.capacity = ident[4] # Format for program header (bitness).
self.encoding = ident[5] # Data structure encoding (endianness).
try:
# e_fmt: Format for program header.
# p_fmt: Format for section header.
# p_idx: Indexes to find p_type, p_offset, and p_filesz.
e_fmt, self._p_fmt, self._p_idx = {
(1, 1): ("<HHIIIIIHHH", "<IIIIIIII", (0, 1, 4)), # 32-bit LSB.
(1, 2): (">HHIIIIIHHH", ">IIIIIIII", (0, 1, 4)), # 32-bit MSB.
(2, 1): ("<HHIQQQIHHH", "<IIQQQQQQ", (0, 2, 5)), # 64-bit LSB.
(2, 2): (">HHIQQQIHHH", ">IIQQQQQQ", (0, 2, 5)), # 64-bit MSB.
}[(self.capacity, self.encoding)]
except KeyError:
raise ELFInvalid(
f"unrecognized capacity ({self.capacity}) or "
f"encoding ({self.encoding})"
)
try:
(
_,
self.machine, # Architecture type.
_,
_,
self._e_phoff, # Offset of program header.
_,
self.flags, # Processor-specific flags.
_,
self._e_phentsize, # Size of section.
self._e_phnum, # Number of sections.
) = self._read(e_fmt)
except struct.error as e:
raise ELFInvalid("unable to parse machine and section information") from e
def _read(self, fmt: str) -> Tuple[int, ...]:
return struct.unpack(fmt, self._f.read(struct.calcsize(fmt)))
@property
def interpreter(self) -> Optional[str]:
"""
The path recorded in the ``PT_INTERP`` section header.
"""
for index in range(self._e_phnum):
self._f.seek(self._e_phoff + self._e_phentsize * index)
try:
data = self._read(self._p_fmt)
except struct.error:
continue
if data[self._p_idx[0]] != 3: # Not PT_INTERP.
continue
self._f.seek(data[self._p_idx[1]])
return os.fsdecode(self._f.read(data[self._p_idx[2]])).strip("\0")
return None
| 3,266 |
Python
| 28.972477 | 86 | 0.527863 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/packaging/_parser.py
|
"""Handwritten parser of dependency specifiers.
The docstring for each __parse_* function contains ENBF-inspired grammar representing
the implementation.
"""
import ast
from typing import Any, List, NamedTuple, Optional, Tuple, Union
from ._tokenizer import DEFAULT_RULES, Tokenizer
class Node:
def __init__(self, value: str) -> None:
self.value = value
def __str__(self) -> str:
return self.value
def __repr__(self) -> str:
return f"<{self.__class__.__name__}('{self}')>"
def serialize(self) -> str:
raise NotImplementedError
class Variable(Node):
def serialize(self) -> str:
return str(self)
class Value(Node):
def serialize(self) -> str:
return f'"{self}"'
class Op(Node):
def serialize(self) -> str:
return str(self)
MarkerVar = Union[Variable, Value]
MarkerItem = Tuple[MarkerVar, Op, MarkerVar]
# MarkerAtom = Union[MarkerItem, List["MarkerAtom"]]
# MarkerList = List[Union["MarkerList", MarkerAtom, str]]
# mypy does not support recursive type definition
# https://github.com/python/mypy/issues/731
MarkerAtom = Any
MarkerList = List[Any]
class ParsedRequirement(NamedTuple):
name: str
url: str
extras: List[str]
specifier: str
marker: Optional[MarkerList]
# --------------------------------------------------------------------------------------
# Recursive descent parser for dependency specifier
# --------------------------------------------------------------------------------------
def parse_requirement(source: str) -> ParsedRequirement:
return _parse_requirement(Tokenizer(source, rules=DEFAULT_RULES))
def _parse_requirement(tokenizer: Tokenizer) -> ParsedRequirement:
"""
requirement = WS? IDENTIFIER WS? extras WS? requirement_details
"""
tokenizer.consume("WS")
name_token = tokenizer.expect(
"IDENTIFIER", expected="package name at the start of dependency specifier"
)
name = name_token.text
tokenizer.consume("WS")
extras = _parse_extras(tokenizer)
tokenizer.consume("WS")
url, specifier, marker = _parse_requirement_details(tokenizer)
tokenizer.expect("END", expected="end of dependency specifier")
return ParsedRequirement(name, url, extras, specifier, marker)
def _parse_requirement_details(
tokenizer: Tokenizer,
) -> Tuple[str, str, Optional[MarkerList]]:
"""
requirement_details = AT URL (WS requirement_marker?)?
| specifier WS? (requirement_marker)?
"""
specifier = ""
url = ""
marker = None
if tokenizer.check("AT"):
tokenizer.read()
tokenizer.consume("WS")
url_start = tokenizer.position
url = tokenizer.expect("URL", expected="URL after @").text
if tokenizer.check("END", peek=True):
return (url, specifier, marker)
tokenizer.expect("WS", expected="whitespace after URL")
# The input might end after whitespace.
if tokenizer.check("END", peek=True):
return (url, specifier, marker)
marker = _parse_requirement_marker(
tokenizer, span_start=url_start, after="URL and whitespace"
)
else:
specifier_start = tokenizer.position
specifier = _parse_specifier(tokenizer)
tokenizer.consume("WS")
if tokenizer.check("END", peek=True):
return (url, specifier, marker)
marker = _parse_requirement_marker(
tokenizer,
span_start=specifier_start,
after=(
"version specifier"
if specifier
else "name and no valid version specifier"
),
)
return (url, specifier, marker)
def _parse_requirement_marker(
tokenizer: Tokenizer, *, span_start: int, after: str
) -> MarkerList:
"""
requirement_marker = SEMICOLON marker WS?
"""
if not tokenizer.check("SEMICOLON"):
tokenizer.raise_syntax_error(
f"Expected end or semicolon (after {after})",
span_start=span_start,
)
tokenizer.read()
marker = _parse_marker(tokenizer)
tokenizer.consume("WS")
return marker
def _parse_extras(tokenizer: Tokenizer) -> List[str]:
"""
extras = (LEFT_BRACKET wsp* extras_list? wsp* RIGHT_BRACKET)?
"""
if not tokenizer.check("LEFT_BRACKET", peek=True):
return []
with tokenizer.enclosing_tokens(
"LEFT_BRACKET",
"RIGHT_BRACKET",
around="extras",
):
tokenizer.consume("WS")
extras = _parse_extras_list(tokenizer)
tokenizer.consume("WS")
return extras
def _parse_extras_list(tokenizer: Tokenizer) -> List[str]:
"""
extras_list = identifier (wsp* ',' wsp* identifier)*
"""
extras: List[str] = []
if not tokenizer.check("IDENTIFIER"):
return extras
extras.append(tokenizer.read().text)
while True:
tokenizer.consume("WS")
if tokenizer.check("IDENTIFIER", peek=True):
tokenizer.raise_syntax_error("Expected comma between extra names")
elif not tokenizer.check("COMMA"):
break
tokenizer.read()
tokenizer.consume("WS")
extra_token = tokenizer.expect("IDENTIFIER", expected="extra name after comma")
extras.append(extra_token.text)
return extras
def _parse_specifier(tokenizer: Tokenizer) -> str:
"""
specifier = LEFT_PARENTHESIS WS? version_many WS? RIGHT_PARENTHESIS
| WS? version_many WS?
"""
with tokenizer.enclosing_tokens(
"LEFT_PARENTHESIS",
"RIGHT_PARENTHESIS",
around="version specifier",
):
tokenizer.consume("WS")
parsed_specifiers = _parse_version_many(tokenizer)
tokenizer.consume("WS")
return parsed_specifiers
def _parse_version_many(tokenizer: Tokenizer) -> str:
"""
version_many = (SPECIFIER (WS? COMMA WS? SPECIFIER)*)?
"""
parsed_specifiers = ""
while tokenizer.check("SPECIFIER"):
span_start = tokenizer.position
parsed_specifiers += tokenizer.read().text
if tokenizer.check("VERSION_PREFIX_TRAIL", peek=True):
tokenizer.raise_syntax_error(
".* suffix can only be used with `==` or `!=` operators",
span_start=span_start,
span_end=tokenizer.position + 1,
)
if tokenizer.check("VERSION_LOCAL_LABEL_TRAIL", peek=True):
tokenizer.raise_syntax_error(
"Local version label can only be used with `==` or `!=` operators",
span_start=span_start,
span_end=tokenizer.position,
)
tokenizer.consume("WS")
if not tokenizer.check("COMMA"):
break
parsed_specifiers += tokenizer.read().text
tokenizer.consume("WS")
return parsed_specifiers
# --------------------------------------------------------------------------------------
# Recursive descent parser for marker expression
# --------------------------------------------------------------------------------------
def parse_marker(source: str) -> MarkerList:
return _parse_marker(Tokenizer(source, rules=DEFAULT_RULES))
def _parse_marker(tokenizer: Tokenizer) -> MarkerList:
"""
marker = marker_atom (BOOLOP marker_atom)+
"""
expression = [_parse_marker_atom(tokenizer)]
while tokenizer.check("BOOLOP"):
token = tokenizer.read()
expr_right = _parse_marker_atom(tokenizer)
expression.extend((token.text, expr_right))
return expression
def _parse_marker_atom(tokenizer: Tokenizer) -> MarkerAtom:
"""
marker_atom = WS? LEFT_PARENTHESIS WS? marker WS? RIGHT_PARENTHESIS WS?
| WS? marker_item WS?
"""
tokenizer.consume("WS")
if tokenizer.check("LEFT_PARENTHESIS", peek=True):
with tokenizer.enclosing_tokens(
"LEFT_PARENTHESIS",
"RIGHT_PARENTHESIS",
around="marker expression",
):
tokenizer.consume("WS")
marker: MarkerAtom = _parse_marker(tokenizer)
tokenizer.consume("WS")
else:
marker = _parse_marker_item(tokenizer)
tokenizer.consume("WS")
return marker
def _parse_marker_item(tokenizer: Tokenizer) -> MarkerItem:
"""
marker_item = WS? marker_var WS? marker_op WS? marker_var WS?
"""
tokenizer.consume("WS")
marker_var_left = _parse_marker_var(tokenizer)
tokenizer.consume("WS")
marker_op = _parse_marker_op(tokenizer)
tokenizer.consume("WS")
marker_var_right = _parse_marker_var(tokenizer)
tokenizer.consume("WS")
return (marker_var_left, marker_op, marker_var_right)
def _parse_marker_var(tokenizer: Tokenizer) -> MarkerVar:
"""
marker_var = VARIABLE | QUOTED_STRING
"""
if tokenizer.check("VARIABLE"):
return process_env_var(tokenizer.read().text.replace(".", "_"))
elif tokenizer.check("QUOTED_STRING"):
return process_python_str(tokenizer.read().text)
else:
tokenizer.raise_syntax_error(
message="Expected a marker variable or quoted string"
)
def process_env_var(env_var: str) -> Variable:
if (
env_var == "platform_python_implementation"
or env_var == "python_implementation"
):
return Variable("platform_python_implementation")
else:
return Variable(env_var)
def process_python_str(python_str: str) -> Value:
value = ast.literal_eval(python_str)
return Value(str(value))
def _parse_marker_op(tokenizer: Tokenizer) -> Op:
"""
marker_op = IN | NOT IN | OP
"""
if tokenizer.check("IN"):
tokenizer.read()
return Op("in")
elif tokenizer.check("NOT"):
tokenizer.read()
tokenizer.expect("WS", expected="whitespace after 'not'")
tokenizer.expect("IN", expected="'in' after 'not'")
return Op("not in")
elif tokenizer.check("OP"):
return Op(tokenizer.read().text)
else:
return tokenizer.raise_syntax_error(
"Expected marker operator, one of "
"<=, <, !=, ==, >=, >, ~=, ===, in, not in"
)
| 10,194 |
Python
| 27.799435 | 88 | 0.593388 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/importlib_metadata/_meta.py
|
from ._compat import Protocol
from typing import Any, Dict, Iterator, List, TypeVar, Union
_T = TypeVar("_T")
class PackageMetadata(Protocol):
def __len__(self) -> int:
... # pragma: no cover
def __contains__(self, item: str) -> bool:
... # pragma: no cover
def __getitem__(self, key: str) -> str:
... # pragma: no cover
def __iter__(self) -> Iterator[str]:
... # pragma: no cover
def get_all(self, name: str, failobj: _T = ...) -> Union[List[Any], _T]:
"""
Return all values associated with a possibly multi-valued key.
"""
@property
def json(self) -> Dict[str, Union[str, List[str]]]:
"""
A JSON-compatible form of the metadata.
"""
class SimplePath(Protocol[_T]):
"""
A minimal subset of pathlib.Path required by PathDistribution.
"""
def joinpath(self) -> _T:
... # pragma: no cover
def __truediv__(self, other: Union[str, _T]) -> _T:
... # pragma: no cover
@property
def parent(self) -> _T:
... # pragma: no cover
def read_text(self) -> str:
... # pragma: no cover
| 1,165 |
Python
| 22.32 | 76 | 0.535622 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/importlib_metadata/_itertools.py
|
from itertools import filterfalse
def unique_everseen(iterable, key=None):
"List unique elements, preserving order. Remember all elements ever seen."
# unique_everseen('AAAABBBCCDAABBB') --> A B C D
# unique_everseen('ABBCcAD', str.lower) --> A B C D
seen = set()
seen_add = seen.add
if key is None:
for element in filterfalse(seen.__contains__, iterable):
seen_add(element)
yield element
else:
for element in iterable:
k = key(element)
if k not in seen:
seen_add(k)
yield element
# copied from more_itertools 8.8
def always_iterable(obj, base_type=(str, bytes)):
"""If *obj* is iterable, return an iterator over its items::
>>> obj = (1, 2, 3)
>>> list(always_iterable(obj))
[1, 2, 3]
If *obj* is not iterable, return a one-item iterable containing *obj*::
>>> obj = 1
>>> list(always_iterable(obj))
[1]
If *obj* is ``None``, return an empty iterable:
>>> obj = None
>>> list(always_iterable(None))
[]
By default, binary and text strings are not considered iterable::
>>> obj = 'foo'
>>> list(always_iterable(obj))
['foo']
If *base_type* is set, objects for which ``isinstance(obj, base_type)``
returns ``True`` won't be considered iterable.
>>> obj = {'a': 1}
>>> list(always_iterable(obj)) # Iterate over the dict's keys
['a']
>>> list(always_iterable(obj, base_type=dict)) # Treat dicts as a unit
[{'a': 1}]
Set *base_type* to ``None`` to avoid any special handling and treat objects
Python considers iterable as iterable:
>>> obj = 'foo'
>>> list(always_iterable(obj, base_type=None))
['f', 'o', 'o']
"""
if obj is None:
return iter(())
if (base_type is not None) and isinstance(obj, base_type):
return iter((obj,))
try:
return iter(obj)
except TypeError:
return iter((obj,))
| 2,068 |
Python
| 26.959459 | 79 | 0.558027 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/importlib_metadata/_collections.py
|
import collections
# from jaraco.collections 3.3
class FreezableDefaultDict(collections.defaultdict):
"""
Often it is desirable to prevent the mutation of
a default dict after its initial construction, such
as to prevent mutation during iteration.
>>> dd = FreezableDefaultDict(list)
>>> dd[0].append('1')
>>> dd.freeze()
>>> dd[1]
[]
>>> len(dd)
1
"""
def __missing__(self, key):
return getattr(self, '_frozen', super().__missing__)(key)
def freeze(self):
self._frozen = lambda key: self.default_factory()
class Pair(collections.namedtuple('Pair', 'name value')):
@classmethod
def parse(cls, text):
return cls(*map(str.strip, text.split("=", 1)))
| 743 |
Python
| 22.999999 | 65 | 0.620458 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/importlib_metadata/_compat.py
|
import sys
import platform
__all__ = ['install', 'NullFinder', 'Protocol']
try:
from typing import Protocol
except ImportError: # pragma: no cover
# Python 3.7 compatibility
from ..typing_extensions import Protocol # type: ignore
def install(cls):
"""
Class decorator for installation on sys.meta_path.
Adds the backport DistributionFinder to sys.meta_path and
attempts to disable the finder functionality of the stdlib
DistributionFinder.
"""
sys.meta_path.append(cls())
disable_stdlib_finder()
return cls
def disable_stdlib_finder():
"""
Give the backport primacy for discovering path-based distributions
by monkey-patching the stdlib O_O.
See #91 for more background for rationale on this sketchy
behavior.
"""
def matches(finder):
return getattr(
finder, '__module__', None
) == '_frozen_importlib_external' and hasattr(finder, 'find_distributions')
for finder in filter(matches, sys.meta_path): # pragma: nocover
del finder.find_distributions
class NullFinder:
"""
A "Finder" (aka "MetaClassFinder") that never finds any modules,
but may find distributions.
"""
@staticmethod
def find_spec(*args, **kwargs):
return None
# In Python 2, the import system requires finders
# to have a find_module() method, but this usage
# is deprecated in Python 3 in favor of find_spec().
# For the purposes of this finder (i.e. being present
# on sys.meta_path but having no other import
# system functionality), the two methods are identical.
find_module = find_spec
def pypy_partial(val):
"""
Adjust for variable stacklevel on partial under PyPy.
Workaround for #327.
"""
is_pypy = platform.python_implementation() == 'PyPy'
return val + is_pypy
| 1,859 |
Python
| 24.479452 | 83 | 0.667025 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/importlib_metadata/__init__.py
|
import os
import re
import abc
import csv
import sys
from .. import zipp
import email
import pathlib
import operator
import textwrap
import warnings
import functools
import itertools
import posixpath
import collections
from . import _adapters, _meta, _py39compat
from ._collections import FreezableDefaultDict, Pair
from ._compat import (
NullFinder,
install,
pypy_partial,
)
from ._functools import method_cache, pass_none
from ._itertools import always_iterable, unique_everseen
from ._meta import PackageMetadata, SimplePath
from contextlib import suppress
from importlib import import_module
from importlib.abc import MetaPathFinder
from itertools import starmap
from typing import List, Mapping, Optional
__all__ = [
'Distribution',
'DistributionFinder',
'PackageMetadata',
'PackageNotFoundError',
'distribution',
'distributions',
'entry_points',
'files',
'metadata',
'packages_distributions',
'requires',
'version',
]
class PackageNotFoundError(ModuleNotFoundError):
"""The package was not found."""
def __str__(self):
return f"No package metadata was found for {self.name}"
@property
def name(self):
(name,) = self.args
return name
class Sectioned:
"""
A simple entry point config parser for performance
>>> for item in Sectioned.read(Sectioned._sample):
... print(item)
Pair(name='sec1', value='# comments ignored')
Pair(name='sec1', value='a = 1')
Pair(name='sec1', value='b = 2')
Pair(name='sec2', value='a = 2')
>>> res = Sectioned.section_pairs(Sectioned._sample)
>>> item = next(res)
>>> item.name
'sec1'
>>> item.value
Pair(name='a', value='1')
>>> item = next(res)
>>> item.value
Pair(name='b', value='2')
>>> item = next(res)
>>> item.name
'sec2'
>>> item.value
Pair(name='a', value='2')
>>> list(res)
[]
"""
_sample = textwrap.dedent(
"""
[sec1]
# comments ignored
a = 1
b = 2
[sec2]
a = 2
"""
).lstrip()
@classmethod
def section_pairs(cls, text):
return (
section._replace(value=Pair.parse(section.value))
for section in cls.read(text, filter_=cls.valid)
if section.name is not None
)
@staticmethod
def read(text, filter_=None):
lines = filter(filter_, map(str.strip, text.splitlines()))
name = None
for value in lines:
section_match = value.startswith('[') and value.endswith(']')
if section_match:
name = value.strip('[]')
continue
yield Pair(name, value)
@staticmethod
def valid(line):
return line and not line.startswith('#')
class DeprecatedTuple:
"""
Provide subscript item access for backward compatibility.
>>> recwarn = getfixture('recwarn')
>>> ep = EntryPoint(name='name', value='value', group='group')
>>> ep[:]
('name', 'value', 'group')
>>> ep[0]
'name'
>>> len(recwarn)
1
"""
# Do not remove prior to 2023-05-01 or Python 3.13
_warn = functools.partial(
warnings.warn,
"EntryPoint tuple interface is deprecated. Access members by name.",
DeprecationWarning,
stacklevel=pypy_partial(2),
)
def __getitem__(self, item):
self._warn()
return self._key()[item]
class EntryPoint(DeprecatedTuple):
"""An entry point as defined by Python packaging conventions.
See `the packaging docs on entry points
<https://packaging.python.org/specifications/entry-points/>`_
for more information.
>>> ep = EntryPoint(
... name=None, group=None, value='package.module:attr [extra1, extra2]')
>>> ep.module
'package.module'
>>> ep.attr
'attr'
>>> ep.extras
['extra1', 'extra2']
"""
pattern = re.compile(
r'(?P<module>[\w.]+)\s*'
r'(:\s*(?P<attr>[\w.]+)\s*)?'
r'((?P<extras>\[.*\])\s*)?$'
)
"""
A regular expression describing the syntax for an entry point,
which might look like:
- module
- package.module
- package.module:attribute
- package.module:object.attribute
- package.module:attr [extra1, extra2]
Other combinations are possible as well.
The expression is lenient about whitespace around the ':',
following the attr, and following any extras.
"""
name: str
value: str
group: str
dist: Optional['Distribution'] = None
def __init__(self, name, value, group):
vars(self).update(name=name, value=value, group=group)
def load(self):
"""Load the entry point from its definition. If only a module
is indicated by the value, return that module. Otherwise,
return the named object.
"""
match = self.pattern.match(self.value)
module = import_module(match.group('module'))
attrs = filter(None, (match.group('attr') or '').split('.'))
return functools.reduce(getattr, attrs, module)
@property
def module(self):
match = self.pattern.match(self.value)
return match.group('module')
@property
def attr(self):
match = self.pattern.match(self.value)
return match.group('attr')
@property
def extras(self):
match = self.pattern.match(self.value)
return re.findall(r'\w+', match.group('extras') or '')
def _for(self, dist):
vars(self).update(dist=dist)
return self
def matches(self, **params):
"""
EntryPoint matches the given parameters.
>>> ep = EntryPoint(group='foo', name='bar', value='bing:bong [extra1, extra2]')
>>> ep.matches(group='foo')
True
>>> ep.matches(name='bar', value='bing:bong [extra1, extra2]')
True
>>> ep.matches(group='foo', name='other')
False
>>> ep.matches()
True
>>> ep.matches(extras=['extra1', 'extra2'])
True
>>> ep.matches(module='bing')
True
>>> ep.matches(attr='bong')
True
"""
attrs = (getattr(self, param) for param in params)
return all(map(operator.eq, params.values(), attrs))
def _key(self):
return self.name, self.value, self.group
def __lt__(self, other):
return self._key() < other._key()
def __eq__(self, other):
return self._key() == other._key()
def __setattr__(self, name, value):
raise AttributeError("EntryPoint objects are immutable.")
def __repr__(self):
return (
f'EntryPoint(name={self.name!r}, value={self.value!r}, '
f'group={self.group!r})'
)
def __hash__(self):
return hash(self._key())
class EntryPoints(tuple):
"""
An immutable collection of selectable EntryPoint objects.
"""
__slots__ = ()
def __getitem__(self, name): # -> EntryPoint:
"""
Get the EntryPoint in self matching name.
"""
try:
return next(iter(self.select(name=name)))
except StopIteration:
raise KeyError(name)
def select(self, **params):
"""
Select entry points from self that match the
given parameters (typically group and/or name).
"""
return EntryPoints(ep for ep in self if _py39compat.ep_matches(ep, **params))
@property
def names(self):
"""
Return the set of all names of all entry points.
"""
return {ep.name for ep in self}
@property
def groups(self):
"""
Return the set of all groups of all entry points.
"""
return {ep.group for ep in self}
@classmethod
def _from_text_for(cls, text, dist):
return cls(ep._for(dist) for ep in cls._from_text(text))
@staticmethod
def _from_text(text):
return (
EntryPoint(name=item.value.name, value=item.value.value, group=item.name)
for item in Sectioned.section_pairs(text or '')
)
class PackagePath(pathlib.PurePosixPath):
"""A reference to a path in a package"""
def read_text(self, encoding='utf-8'):
with self.locate().open(encoding=encoding) as stream:
return stream.read()
def read_binary(self):
with self.locate().open('rb') as stream:
return stream.read()
def locate(self):
"""Return a path-like object for this path"""
return self.dist.locate_file(self)
class FileHash:
def __init__(self, spec):
self.mode, _, self.value = spec.partition('=')
def __repr__(self):
return f'<FileHash mode: {self.mode} value: {self.value}>'
class Distribution(metaclass=abc.ABCMeta):
"""A Python distribution package."""
@abc.abstractmethod
def read_text(self, filename):
"""Attempt to load metadata file given by the name.
:param filename: The name of the file in the distribution info.
:return: The text if found, otherwise None.
"""
@abc.abstractmethod
def locate_file(self, path):
"""
Given a path to a file in this distribution, return a path
to it.
"""
@classmethod
def from_name(cls, name: str):
"""Return the Distribution for the given package name.
:param name: The name of the distribution package to search for.
:return: The Distribution instance (or subclass thereof) for the named
package, if found.
:raises PackageNotFoundError: When the named package's distribution
metadata cannot be found.
:raises ValueError: When an invalid value is supplied for name.
"""
if not name:
raise ValueError("A distribution name is required.")
try:
return next(cls.discover(name=name))
except StopIteration:
raise PackageNotFoundError(name)
@classmethod
def discover(cls, **kwargs):
"""Return an iterable of Distribution objects for all packages.
Pass a ``context`` or pass keyword arguments for constructing
a context.
:context: A ``DistributionFinder.Context`` object.
:return: Iterable of Distribution objects for all packages.
"""
context = kwargs.pop('context', None)
if context and kwargs:
raise ValueError("cannot accept context and kwargs")
context = context or DistributionFinder.Context(**kwargs)
return itertools.chain.from_iterable(
resolver(context) for resolver in cls._discover_resolvers()
)
@staticmethod
def at(path):
"""Return a Distribution for the indicated metadata path
:param path: a string or path-like object
:return: a concrete Distribution instance for the path
"""
return PathDistribution(pathlib.Path(path))
@staticmethod
def _discover_resolvers():
"""Search the meta_path for resolvers."""
declared = (
getattr(finder, 'find_distributions', None) for finder in sys.meta_path
)
return filter(None, declared)
@property
def metadata(self) -> _meta.PackageMetadata:
"""Return the parsed metadata for this Distribution.
The returned object will have keys that name the various bits of
metadata. See PEP 566 for details.
"""
text = (
self.read_text('METADATA')
or self.read_text('PKG-INFO')
# This last clause is here to support old egg-info files. Its
# effect is to just end up using the PathDistribution's self._path
# (which points to the egg-info file) attribute unchanged.
or self.read_text('')
)
return _adapters.Message(email.message_from_string(text))
@property
def name(self):
"""Return the 'Name' metadata for the distribution package."""
return self.metadata['Name']
@property
def _normalized_name(self):
"""Return a normalized version of the name."""
return Prepared.normalize(self.name)
@property
def version(self):
"""Return the 'Version' metadata for the distribution package."""
return self.metadata['Version']
@property
def entry_points(self):
return EntryPoints._from_text_for(self.read_text('entry_points.txt'), self)
@property
def files(self):
"""Files in this distribution.
:return: List of PackagePath for this distribution or None
Result is `None` if the metadata file that enumerates files
(i.e. RECORD for dist-info or SOURCES.txt for egg-info) is
missing.
Result may be empty if the metadata exists but is empty.
"""
def make_file(name, hash=None, size_str=None):
result = PackagePath(name)
result.hash = FileHash(hash) if hash else None
result.size = int(size_str) if size_str else None
result.dist = self
return result
@pass_none
def make_files(lines):
return list(starmap(make_file, csv.reader(lines)))
return make_files(self._read_files_distinfo() or self._read_files_egginfo())
def _read_files_distinfo(self):
"""
Read the lines of RECORD
"""
text = self.read_text('RECORD')
return text and text.splitlines()
def _read_files_egginfo(self):
"""
SOURCES.txt might contain literal commas, so wrap each line
in quotes.
"""
text = self.read_text('SOURCES.txt')
return text and map('"{}"'.format, text.splitlines())
@property
def requires(self):
"""Generated requirements specified for this Distribution"""
reqs = self._read_dist_info_reqs() or self._read_egg_info_reqs()
return reqs and list(reqs)
def _read_dist_info_reqs(self):
return self.metadata.get_all('Requires-Dist')
def _read_egg_info_reqs(self):
source = self.read_text('requires.txt')
return pass_none(self._deps_from_requires_text)(source)
@classmethod
def _deps_from_requires_text(cls, source):
return cls._convert_egg_info_reqs_to_simple_reqs(Sectioned.read(source))
@staticmethod
def _convert_egg_info_reqs_to_simple_reqs(sections):
"""
Historically, setuptools would solicit and store 'extra'
requirements, including those with environment markers,
in separate sections. More modern tools expect each
dependency to be defined separately, with any relevant
extras and environment markers attached directly to that
requirement. This method converts the former to the
latter. See _test_deps_from_requires_text for an example.
"""
def make_condition(name):
return name and f'extra == "{name}"'
def quoted_marker(section):
section = section or ''
extra, sep, markers = section.partition(':')
if extra and markers:
markers = f'({markers})'
conditions = list(filter(None, [markers, make_condition(extra)]))
return '; ' + ' and '.join(conditions) if conditions else ''
def url_req_space(req):
"""
PEP 508 requires a space between the url_spec and the quoted_marker.
Ref python/importlib_metadata#357.
"""
# '@' is uniquely indicative of a url_req.
return ' ' * ('@' in req)
for section in sections:
space = url_req_space(section.value)
yield section.value + space + quoted_marker(section.name)
class DistributionFinder(MetaPathFinder):
"""
A MetaPathFinder capable of discovering installed distributions.
"""
class Context:
"""
Keyword arguments presented by the caller to
``distributions()`` or ``Distribution.discover()``
to narrow the scope of a search for distributions
in all DistributionFinders.
Each DistributionFinder may expect any parameters
and should attempt to honor the canonical
parameters defined below when appropriate.
"""
name = None
"""
Specific name for which a distribution finder should match.
A name of ``None`` matches all distributions.
"""
def __init__(self, **kwargs):
vars(self).update(kwargs)
@property
def path(self):
"""
The sequence of directory path that a distribution finder
should search.
Typically refers to Python installed package paths such as
"site-packages" directories and defaults to ``sys.path``.
"""
return vars(self).get('path', sys.path)
@abc.abstractmethod
def find_distributions(self, context=Context()):
"""
Find distributions.
Return an iterable of all Distribution instances capable of
loading the metadata for packages matching the ``context``,
a DistributionFinder.Context instance.
"""
class FastPath:
"""
Micro-optimized class for searching a path for
children.
>>> FastPath('').children()
['...']
"""
@functools.lru_cache() # type: ignore
def __new__(cls, root):
return super().__new__(cls)
def __init__(self, root):
self.root = root
def joinpath(self, child):
return pathlib.Path(self.root, child)
def children(self):
with suppress(Exception):
return os.listdir(self.root or '.')
with suppress(Exception):
return self.zip_children()
return []
def zip_children(self):
zip_path = zipp.Path(self.root)
names = zip_path.root.namelist()
self.joinpath = zip_path.joinpath
return dict.fromkeys(child.split(posixpath.sep, 1)[0] for child in names)
def search(self, name):
return self.lookup(self.mtime).search(name)
@property
def mtime(self):
with suppress(OSError):
return os.stat(self.root).st_mtime
self.lookup.cache_clear()
@method_cache
def lookup(self, mtime):
return Lookup(self)
class Lookup:
def __init__(self, path: FastPath):
base = os.path.basename(path.root).lower()
base_is_egg = base.endswith(".egg")
self.infos = FreezableDefaultDict(list)
self.eggs = FreezableDefaultDict(list)
for child in path.children():
low = child.lower()
if low.endswith((".dist-info", ".egg-info")):
# rpartition is faster than splitext and suitable for this purpose.
name = low.rpartition(".")[0].partition("-")[0]
normalized = Prepared.normalize(name)
self.infos[normalized].append(path.joinpath(child))
elif base_is_egg and low == "egg-info":
name = base.rpartition(".")[0].partition("-")[0]
legacy_normalized = Prepared.legacy_normalize(name)
self.eggs[legacy_normalized].append(path.joinpath(child))
self.infos.freeze()
self.eggs.freeze()
def search(self, prepared):
infos = (
self.infos[prepared.normalized]
if prepared
else itertools.chain.from_iterable(self.infos.values())
)
eggs = (
self.eggs[prepared.legacy_normalized]
if prepared
else itertools.chain.from_iterable(self.eggs.values())
)
return itertools.chain(infos, eggs)
class Prepared:
"""
A prepared search for metadata on a possibly-named package.
"""
normalized = None
legacy_normalized = None
def __init__(self, name):
self.name = name
if name is None:
return
self.normalized = self.normalize(name)
self.legacy_normalized = self.legacy_normalize(name)
@staticmethod
def normalize(name):
"""
PEP 503 normalization plus dashes as underscores.
"""
return re.sub(r"[-_.]+", "-", name).lower().replace('-', '_')
@staticmethod
def legacy_normalize(name):
"""
Normalize the package name as found in the convention in
older packaging tools versions and specs.
"""
return name.lower().replace('-', '_')
def __bool__(self):
return bool(self.name)
@install
class MetadataPathFinder(NullFinder, DistributionFinder):
"""A degenerate finder for distribution packages on the file system.
This finder supplies only a find_distributions() method for versions
of Python that do not have a PathFinder find_distributions().
"""
def find_distributions(self, context=DistributionFinder.Context()):
"""
Find distributions.
Return an iterable of all Distribution instances capable of
loading the metadata for packages matching ``context.name``
(or all names if ``None`` indicated) along the paths in the list
of directories ``context.path``.
"""
found = self._search_paths(context.name, context.path)
return map(PathDistribution, found)
@classmethod
def _search_paths(cls, name, paths):
"""Find metadata directories in paths heuristically."""
prepared = Prepared(name)
return itertools.chain.from_iterable(
path.search(prepared) for path in map(FastPath, paths)
)
def invalidate_caches(cls):
FastPath.__new__.cache_clear()
class PathDistribution(Distribution):
def __init__(self, path: SimplePath):
"""Construct a distribution.
:param path: SimplePath indicating the metadata directory.
"""
self._path = path
def read_text(self, filename):
with suppress(
FileNotFoundError,
IsADirectoryError,
KeyError,
NotADirectoryError,
PermissionError,
):
return self._path.joinpath(filename).read_text(encoding='utf-8')
read_text.__doc__ = Distribution.read_text.__doc__
def locate_file(self, path):
return self._path.parent / path
@property
def _normalized_name(self):
"""
Performance optimization: where possible, resolve the
normalized name from the file system path.
"""
stem = os.path.basename(str(self._path))
return (
pass_none(Prepared.normalize)(self._name_from_stem(stem))
or super()._normalized_name
)
@staticmethod
def _name_from_stem(stem):
"""
>>> PathDistribution._name_from_stem('foo-3.0.egg-info')
'foo'
>>> PathDistribution._name_from_stem('CherryPy-3.0.dist-info')
'CherryPy'
>>> PathDistribution._name_from_stem('face.egg-info')
'face'
>>> PathDistribution._name_from_stem('foo.bar')
"""
filename, ext = os.path.splitext(stem)
if ext not in ('.dist-info', '.egg-info'):
return
name, sep, rest = filename.partition('-')
return name
def distribution(distribution_name):
"""Get the ``Distribution`` instance for the named package.
:param distribution_name: The name of the distribution package as a string.
:return: A ``Distribution`` instance (or subclass thereof).
"""
return Distribution.from_name(distribution_name)
def distributions(**kwargs):
"""Get all ``Distribution`` instances in the current environment.
:return: An iterable of ``Distribution`` instances.
"""
return Distribution.discover(**kwargs)
def metadata(distribution_name) -> _meta.PackageMetadata:
"""Get the metadata for the named package.
:param distribution_name: The name of the distribution package to query.
:return: A PackageMetadata containing the parsed metadata.
"""
return Distribution.from_name(distribution_name).metadata
def version(distribution_name):
"""Get the version string for the named package.
:param distribution_name: The name of the distribution package to query.
:return: The version string for the package as defined in the package's
"Version" metadata key.
"""
return distribution(distribution_name).version
_unique = functools.partial(
unique_everseen,
key=_py39compat.normalized_name,
)
"""
Wrapper for ``distributions`` to return unique distributions by name.
"""
def entry_points(**params) -> EntryPoints:
"""Return EntryPoint objects for all installed packages.
Pass selection parameters (group or name) to filter the
result to entry points matching those properties (see
EntryPoints.select()).
:return: EntryPoints for all installed packages.
"""
eps = itertools.chain.from_iterable(
dist.entry_points for dist in _unique(distributions())
)
return EntryPoints(eps).select(**params)
def files(distribution_name):
"""Return a list of files for the named package.
:param distribution_name: The name of the distribution package to query.
:return: List of files composing the distribution.
"""
return distribution(distribution_name).files
def requires(distribution_name):
"""
Return a list of requirements for the named package.
:return: An iterator of requirements, suitable for
packaging.requirement.Requirement.
"""
return distribution(distribution_name).requires
def packages_distributions() -> Mapping[str, List[str]]:
"""
Return a mapping of top-level packages to their
distributions.
>>> import collections.abc
>>> pkgs = packages_distributions()
>>> all(isinstance(dist, collections.abc.Sequence) for dist in pkgs.values())
True
"""
pkg_to_dist = collections.defaultdict(list)
for dist in distributions():
for pkg in _top_level_declared(dist) or _top_level_inferred(dist):
pkg_to_dist[pkg].append(dist.metadata['Name'])
return dict(pkg_to_dist)
def _top_level_declared(dist):
return (dist.read_text('top_level.txt') or '').split()
def _top_level_inferred(dist):
return {
f.parts[0] if len(f.parts) > 1 else f.with_suffix('').name
for f in always_iterable(dist.files)
if f.suffix == ".py"
}
| 26,498 |
Python
| 28.280663 | 88 | 0.606272 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/importlib_metadata/_adapters.py
|
import functools
import warnings
import re
import textwrap
import email.message
from ._text import FoldedCase
from ._compat import pypy_partial
# Do not remove prior to 2024-01-01 or Python 3.14
_warn = functools.partial(
warnings.warn,
"Implicit None on return values is deprecated and will raise KeyErrors.",
DeprecationWarning,
stacklevel=pypy_partial(2),
)
class Message(email.message.Message):
multiple_use_keys = set(
map(
FoldedCase,
[
'Classifier',
'Obsoletes-Dist',
'Platform',
'Project-URL',
'Provides-Dist',
'Provides-Extra',
'Requires-Dist',
'Requires-External',
'Supported-Platform',
'Dynamic',
],
)
)
"""
Keys that may be indicated multiple times per PEP 566.
"""
def __new__(cls, orig: email.message.Message):
res = super().__new__(cls)
vars(res).update(vars(orig))
return res
def __init__(self, *args, **kwargs):
self._headers = self._repair_headers()
# suppress spurious error from mypy
def __iter__(self):
return super().__iter__()
def __getitem__(self, item):
"""
Warn users that a ``KeyError`` can be expected when a
mising key is supplied. Ref python/importlib_metadata#371.
"""
res = super().__getitem__(item)
if res is None:
_warn()
return res
def _repair_headers(self):
def redent(value):
"Correct for RFC822 indentation"
if not value or '\n' not in value:
return value
return textwrap.dedent(' ' * 8 + value)
headers = [(key, redent(value)) for key, value in vars(self)['_headers']]
if self._payload:
headers.append(('Description', self.get_payload()))
return headers
@property
def json(self):
"""
Convert PackageMetadata to a JSON-compatible format
per PEP 0566.
"""
def transform(key):
value = self.get_all(key) if key in self.multiple_use_keys else self[key]
if key == 'Keywords':
value = re.split(r'\s+', value)
tk = key.lower().replace('-', '_')
return tk, value
return dict(map(transform, map(FoldedCase, self)))
| 2,454 |
Python
| 25.978022 | 85 | 0.537897 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/importlib_metadata/_functools.py
|
import types
import functools
# from jaraco.functools 3.3
def method_cache(method, cache_wrapper=None):
"""
Wrap lru_cache to support storing the cache data in the object instances.
Abstracts the common paradigm where the method explicitly saves an
underscore-prefixed protected property on first call and returns that
subsequently.
>>> class MyClass:
... calls = 0
...
... @method_cache
... def method(self, value):
... self.calls += 1
... return value
>>> a = MyClass()
>>> a.method(3)
3
>>> for x in range(75):
... res = a.method(x)
>>> a.calls
75
Note that the apparent behavior will be exactly like that of lru_cache
except that the cache is stored on each instance, so values in one
instance will not flush values from another, and when an instance is
deleted, so are the cached values for that instance.
>>> b = MyClass()
>>> for x in range(35):
... res = b.method(x)
>>> b.calls
35
>>> a.method(0)
0
>>> a.calls
75
Note that if method had been decorated with ``functools.lru_cache()``,
a.calls would have been 76 (due to the cached value of 0 having been
flushed by the 'b' instance).
Clear the cache with ``.cache_clear()``
>>> a.method.cache_clear()
Same for a method that hasn't yet been called.
>>> c = MyClass()
>>> c.method.cache_clear()
Another cache wrapper may be supplied:
>>> cache = functools.lru_cache(maxsize=2)
>>> MyClass.method2 = method_cache(lambda self: 3, cache_wrapper=cache)
>>> a = MyClass()
>>> a.method2()
3
Caution - do not subsequently wrap the method with another decorator, such
as ``@property``, which changes the semantics of the function.
See also
http://code.activestate.com/recipes/577452-a-memoize-decorator-for-instance-methods/
for another implementation and additional justification.
"""
cache_wrapper = cache_wrapper or functools.lru_cache()
def wrapper(self, *args, **kwargs):
# it's the first call, replace the method with a cached, bound method
bound_method = types.MethodType(method, self)
cached_method = cache_wrapper(bound_method)
setattr(self, method.__name__, cached_method)
return cached_method(*args, **kwargs)
# Support cache clear even before cache has been created.
wrapper.cache_clear = lambda: None
return wrapper
# From jaraco.functools 3.3
def pass_none(func):
"""
Wrap func so it's not called if its first param is None
>>> print_text = pass_none(print)
>>> print_text('text')
text
>>> print_text(None)
"""
@functools.wraps(func)
def wrapper(param, *args, **kwargs):
if param is not None:
return func(param, *args, **kwargs)
return wrapper
| 2,895 |
Python
| 26.580952 | 88 | 0.62867 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/importlib_metadata/_py39compat.py
|
"""
Compatibility layer with Python 3.8/3.9
"""
from typing import TYPE_CHECKING, Any, Optional
if TYPE_CHECKING: # pragma: no cover
# Prevent circular imports on runtime.
from . import Distribution, EntryPoint
else:
Distribution = EntryPoint = Any
def normalized_name(dist: Distribution) -> Optional[str]:
"""
Honor name normalization for distributions that don't provide ``_normalized_name``.
"""
try:
return dist._normalized_name
except AttributeError:
from . import Prepared # -> delay to prevent circular imports.
return Prepared.normalize(getattr(dist, "name", None) or dist.metadata['Name'])
def ep_matches(ep: EntryPoint, **params) -> bool:
"""
Workaround for ``EntryPoint`` objects without the ``matches`` method.
"""
try:
return ep.matches(**params)
except AttributeError:
from . import EntryPoint # -> delay to prevent circular imports.
# Reconstruct the EntryPoint object to make sure it is compatible.
return EntryPoint(ep.name, ep.value, ep.group).matches(**params)
| 1,098 |
Python
| 29.527777 | 87 | 0.672131 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/importlib_metadata/_text.py
|
import re
from ._functools import method_cache
# from jaraco.text 3.5
class FoldedCase(str):
"""
A case insensitive string class; behaves just like str
except compares equal when the only variation is case.
>>> s = FoldedCase('hello world')
>>> s == 'Hello World'
True
>>> 'Hello World' == s
True
>>> s != 'Hello World'
False
>>> s.index('O')
4
>>> s.split('O')
['hell', ' w', 'rld']
>>> sorted(map(FoldedCase, ['GAMMA', 'alpha', 'Beta']))
['alpha', 'Beta', 'GAMMA']
Sequence membership is straightforward.
>>> "Hello World" in [s]
True
>>> s in ["Hello World"]
True
You may test for set inclusion, but candidate and elements
must both be folded.
>>> FoldedCase("Hello World") in {s}
True
>>> s in {FoldedCase("Hello World")}
True
String inclusion works as long as the FoldedCase object
is on the right.
>>> "hello" in FoldedCase("Hello World")
True
But not if the FoldedCase object is on the left:
>>> FoldedCase('hello') in 'Hello World'
False
In that case, use in_:
>>> FoldedCase('hello').in_('Hello World')
True
>>> FoldedCase('hello') > FoldedCase('Hello')
False
"""
def __lt__(self, other):
return self.lower() < other.lower()
def __gt__(self, other):
return self.lower() > other.lower()
def __eq__(self, other):
return self.lower() == other.lower()
def __ne__(self, other):
return self.lower() != other.lower()
def __hash__(self):
return hash(self.lower())
def __contains__(self, other):
return super().lower().__contains__(other.lower())
def in_(self, other):
"Does self appear in other?"
return self in FoldedCase(other)
# cache lower since it's likely to be called frequently.
@method_cache
def lower(self):
return super().lower()
def index(self, sub):
return self.lower().index(sub.lower())
def split(self, splitter=' ', maxsplit=0):
pattern = re.compile(re.escape(splitter), re.I)
return pattern.split(self, maxsplit)
| 2,166 |
Python
| 20.67 | 62 | 0.576639 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/importlib_resources/abc.py
|
import abc
import io
import itertools
import pathlib
from typing import Any, BinaryIO, Iterable, Iterator, NoReturn, Text, Optional
from ._compat import runtime_checkable, Protocol, StrPath
__all__ = ["ResourceReader", "Traversable", "TraversableResources"]
class ResourceReader(metaclass=abc.ABCMeta):
"""Abstract base class for loaders to provide resource reading support."""
@abc.abstractmethod
def open_resource(self, resource: Text) -> BinaryIO:
"""Return an opened, file-like object for binary reading.
The 'resource' argument is expected to represent only a file name.
If the resource cannot be found, FileNotFoundError is raised.
"""
# This deliberately raises FileNotFoundError instead of
# NotImplementedError so that if this method is accidentally called,
# it'll still do the right thing.
raise FileNotFoundError
@abc.abstractmethod
def resource_path(self, resource: Text) -> Text:
"""Return the file system path to the specified resource.
The 'resource' argument is expected to represent only a file name.
If the resource does not exist on the file system, raise
FileNotFoundError.
"""
# This deliberately raises FileNotFoundError instead of
# NotImplementedError so that if this method is accidentally called,
# it'll still do the right thing.
raise FileNotFoundError
@abc.abstractmethod
def is_resource(self, path: Text) -> bool:
"""Return True if the named 'path' is a resource.
Files are resources, directories are not.
"""
raise FileNotFoundError
@abc.abstractmethod
def contents(self) -> Iterable[str]:
"""Return an iterable of entries in `package`."""
raise FileNotFoundError
class TraversalError(Exception):
pass
@runtime_checkable
class Traversable(Protocol):
"""
An object with a subset of pathlib.Path methods suitable for
traversing directories and opening files.
Any exceptions that occur when accessing the backing resource
may propagate unaltered.
"""
@abc.abstractmethod
def iterdir(self) -> Iterator["Traversable"]:
"""
Yield Traversable objects in self
"""
def read_bytes(self) -> bytes:
"""
Read contents of self as bytes
"""
with self.open('rb') as strm:
return strm.read()
def read_text(self, encoding: Optional[str] = None) -> str:
"""
Read contents of self as text
"""
with self.open(encoding=encoding) as strm:
return strm.read()
@abc.abstractmethod
def is_dir(self) -> bool:
"""
Return True if self is a directory
"""
@abc.abstractmethod
def is_file(self) -> bool:
"""
Return True if self is a file
"""
def joinpath(self, *descendants: StrPath) -> "Traversable":
"""
Return Traversable resolved with any descendants applied.
Each descendant should be a path segment relative to self
and each may contain multiple levels separated by
``posixpath.sep`` (``/``).
"""
if not descendants:
return self
names = itertools.chain.from_iterable(
path.parts for path in map(pathlib.PurePosixPath, descendants)
)
target = next(names)
matches = (
traversable for traversable in self.iterdir() if traversable.name == target
)
try:
match = next(matches)
except StopIteration:
raise TraversalError(
"Target not found during traversal.", target, list(names)
)
return match.joinpath(*names)
def __truediv__(self, child: StrPath) -> "Traversable":
"""
Return Traversable child in self
"""
return self.joinpath(child)
@abc.abstractmethod
def open(self, mode='r', *args, **kwargs):
"""
mode may be 'r' or 'rb' to open as text or binary. Return a handle
suitable for reading (same as pathlib.Path.open).
When opening as text, accepts encoding parameters such as those
accepted by io.TextIOWrapper.
"""
@property
@abc.abstractmethod
def name(self) -> str:
"""
The base name of this object without any parent references.
"""
class TraversableResources(ResourceReader):
"""
The required interface for providing traversable
resources.
"""
@abc.abstractmethod
def files(self) -> "Traversable":
"""Return a Traversable object for the loaded package."""
def open_resource(self, resource: StrPath) -> io.BufferedReader:
return self.files().joinpath(resource).open('rb')
def resource_path(self, resource: Any) -> NoReturn:
raise FileNotFoundError(resource)
def is_resource(self, path: StrPath) -> bool:
return self.files().joinpath(path).is_file()
def contents(self) -> Iterator[str]:
return (item.name for item in self.files().iterdir())
| 5,140 |
Python
| 29.064327 | 87 | 0.628405 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/importlib_resources/_itertools.py
|
from itertools import filterfalse
from typing import (
Callable,
Iterable,
Iterator,
Optional,
Set,
TypeVar,
Union,
)
# Type and type variable definitions
_T = TypeVar('_T')
_U = TypeVar('_U')
def unique_everseen(
iterable: Iterable[_T], key: Optional[Callable[[_T], _U]] = None
) -> Iterator[_T]:
"List unique elements, preserving order. Remember all elements ever seen."
# unique_everseen('AAAABBBCCDAABBB') --> A B C D
# unique_everseen('ABBCcAD', str.lower) --> A B C D
seen: Set[Union[_T, _U]] = set()
seen_add = seen.add
if key is None:
for element in filterfalse(seen.__contains__, iterable):
seen_add(element)
yield element
else:
for element in iterable:
k = key(element)
if k not in seen:
seen_add(k)
yield element
| 884 |
Python
| 23.583333 | 78 | 0.580317 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/importlib_resources/_compat.py
|
# flake8: noqa
import abc
import os
import sys
import pathlib
from contextlib import suppress
from typing import Union
if sys.version_info >= (3, 10):
from zipfile import Path as ZipPath # type: ignore
else:
from ..zipp import Path as ZipPath # type: ignore
try:
from typing import runtime_checkable # type: ignore
except ImportError:
def runtime_checkable(cls): # type: ignore
return cls
try:
from typing import Protocol # type: ignore
except ImportError:
Protocol = abc.ABC # type: ignore
class TraversableResourcesLoader:
"""
Adapt loaders to provide TraversableResources and other
compatibility.
Used primarily for Python 3.9 and earlier where the native
loaders do not yet implement TraversableResources.
"""
def __init__(self, spec):
self.spec = spec
@property
def path(self):
return self.spec.origin
def get_resource_reader(self, name):
from . import readers, _adapters
def _zip_reader(spec):
with suppress(AttributeError):
return readers.ZipReader(spec.loader, spec.name)
def _namespace_reader(spec):
with suppress(AttributeError, ValueError):
return readers.NamespaceReader(spec.submodule_search_locations)
def _available_reader(spec):
with suppress(AttributeError):
return spec.loader.get_resource_reader(spec.name)
def _native_reader(spec):
reader = _available_reader(spec)
return reader if hasattr(reader, 'files') else None
def _file_reader(spec):
try:
path = pathlib.Path(self.path)
except TypeError:
return None
if path.exists():
return readers.FileReader(self)
return (
# native reader if it supplies 'files'
_native_reader(self.spec)
or
# local ZipReader if a zip module
_zip_reader(self.spec)
or
# local NamespaceReader if a namespace module
_namespace_reader(self.spec)
or
# local FileReader
_file_reader(self.spec)
# fallback - adapt the spec ResourceReader to TraversableReader
or _adapters.CompatibilityFiles(self.spec)
)
def wrap_spec(package):
"""
Construct a package spec with traversable compatibility
on the spec/loader/reader.
Supersedes _adapters.wrap_spec to use TraversableResourcesLoader
from above for older Python compatibility (<3.10).
"""
from . import _adapters
return _adapters.SpecLoaderAdapter(package.__spec__, TraversableResourcesLoader)
if sys.version_info >= (3, 9):
StrPath = Union[str, os.PathLike[str]]
else:
# PathLike is only subscriptable at runtime in 3.9+
StrPath = Union[str, "os.PathLike[str]"]
| 2,925 |
Python
| 25.844036 | 84 | 0.627009 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/importlib_resources/__init__.py
|
"""Read resources contained within a package."""
from ._common import (
as_file,
files,
Package,
)
from ._legacy import (
contents,
open_binary,
read_binary,
open_text,
read_text,
is_resource,
path,
Resource,
)
from .abc import ResourceReader
__all__ = [
'Package',
'Resource',
'ResourceReader',
'as_file',
'contents',
'files',
'is_resource',
'open_binary',
'open_text',
'path',
'read_binary',
'read_text',
]
| 506 |
Python
| 12.702702 | 48 | 0.55336 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/importlib_resources/_common.py
|
import os
import pathlib
import tempfile
import functools
import contextlib
import types
import importlib
import inspect
import warnings
import itertools
from typing import Union, Optional, cast
from .abc import ResourceReader, Traversable
from ._compat import wrap_spec
Package = Union[types.ModuleType, str]
Anchor = Package
def package_to_anchor(func):
"""
Replace 'package' parameter as 'anchor' and warn about the change.
Other errors should fall through.
>>> files('a', 'b')
Traceback (most recent call last):
TypeError: files() takes from 0 to 1 positional arguments but 2 were given
"""
undefined = object()
@functools.wraps(func)
def wrapper(anchor=undefined, package=undefined):
if package is not undefined:
if anchor is not undefined:
return func(anchor, package)
warnings.warn(
"First parameter to files is renamed to 'anchor'",
DeprecationWarning,
stacklevel=2,
)
return func(package)
elif anchor is undefined:
return func()
return func(anchor)
return wrapper
@package_to_anchor
def files(anchor: Optional[Anchor] = None) -> Traversable:
"""
Get a Traversable resource for an anchor.
"""
return from_package(resolve(anchor))
def get_resource_reader(package: types.ModuleType) -> Optional[ResourceReader]:
"""
Return the package's loader if it's a ResourceReader.
"""
# We can't use
# a issubclass() check here because apparently abc.'s __subclasscheck__()
# hook wants to create a weak reference to the object, but
# zipimport.zipimporter does not support weak references, resulting in a
# TypeError. That seems terrible.
spec = package.__spec__
reader = getattr(spec.loader, 'get_resource_reader', None) # type: ignore
if reader is None:
return None
return reader(spec.name) # type: ignore
@functools.singledispatch
def resolve(cand: Optional[Anchor]) -> types.ModuleType:
return cast(types.ModuleType, cand)
@resolve.register
def _(cand: str) -> types.ModuleType:
return importlib.import_module(cand)
@resolve.register
def _(cand: None) -> types.ModuleType:
return resolve(_infer_caller().f_globals['__name__'])
def _infer_caller():
"""
Walk the stack and find the frame of the first caller not in this module.
"""
def is_this_file(frame_info):
return frame_info.filename == __file__
def is_wrapper(frame_info):
return frame_info.function == 'wrapper'
not_this_file = itertools.filterfalse(is_this_file, inspect.stack())
# also exclude 'wrapper' due to singledispatch in the call stack
callers = itertools.filterfalse(is_wrapper, not_this_file)
return next(callers).frame
def from_package(package: types.ModuleType):
"""
Return a Traversable object for the given package.
"""
spec = wrap_spec(package)
reader = spec.loader.get_resource_reader(spec.name)
return reader.files()
@contextlib.contextmanager
def _tempfile(
reader,
suffix='',
# gh-93353: Keep a reference to call os.remove() in late Python
# finalization.
*,
_os_remove=os.remove,
):
# Not using tempfile.NamedTemporaryFile as it leads to deeper 'try'
# blocks due to the need to close the temporary file to work on Windows
# properly.
fd, raw_path = tempfile.mkstemp(suffix=suffix)
try:
try:
os.write(fd, reader())
finally:
os.close(fd)
del reader
yield pathlib.Path(raw_path)
finally:
try:
_os_remove(raw_path)
except FileNotFoundError:
pass
def _temp_file(path):
return _tempfile(path.read_bytes, suffix=path.name)
def _is_present_dir(path: Traversable) -> bool:
"""
Some Traversables implement ``is_dir()`` to raise an
exception (i.e. ``FileNotFoundError``) when the
directory doesn't exist. This function wraps that call
to always return a boolean and only return True
if there's a dir and it exists.
"""
with contextlib.suppress(FileNotFoundError):
return path.is_dir()
return False
@functools.singledispatch
def as_file(path):
"""
Given a Traversable object, return that object as a
path on the local file system in a context manager.
"""
return _temp_dir(path) if _is_present_dir(path) else _temp_file(path)
@as_file.register(pathlib.Path)
@contextlib.contextmanager
def _(path):
"""
Degenerate behavior for pathlib.Path objects.
"""
yield path
@contextlib.contextmanager
def _temp_path(dir: tempfile.TemporaryDirectory):
"""
Wrap tempfile.TemporyDirectory to return a pathlib object.
"""
with dir as result:
yield pathlib.Path(result)
@contextlib.contextmanager
def _temp_dir(path):
"""
Given a traversable dir, recursively replicate the whole tree
to the file system in a context manager.
"""
assert path.is_dir()
with _temp_path(tempfile.TemporaryDirectory()) as temp_dir:
yield _write_contents(temp_dir, path)
def _write_contents(target, source):
child = target.joinpath(source.name)
if source.is_dir():
child.mkdir()
for item in source.iterdir():
_write_contents(child, item)
else:
child.write_bytes(source.read_bytes())
return child
| 5,457 |
Python
| 25.240384 | 79 | 0.658604 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/importlib_resources/_legacy.py
|
import functools
import os
import pathlib
import types
import warnings
from typing import Union, Iterable, ContextManager, BinaryIO, TextIO, Any
from . import _common
Package = Union[types.ModuleType, str]
Resource = str
def deprecated(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
warnings.warn(
f"{func.__name__} is deprecated. Use files() instead. "
"Refer to https://importlib-resources.readthedocs.io"
"/en/latest/using.html#migrating-from-legacy for migration advice.",
DeprecationWarning,
stacklevel=2,
)
return func(*args, **kwargs)
return wrapper
def normalize_path(path: Any) -> str:
"""Normalize a path by ensuring it is a string.
If the resulting string contains path separators, an exception is raised.
"""
str_path = str(path)
parent, file_name = os.path.split(str_path)
if parent:
raise ValueError(f'{path!r} must be only a file name')
return file_name
@deprecated
def open_binary(package: Package, resource: Resource) -> BinaryIO:
"""Return a file-like object opened for binary reading of the resource."""
return (_common.files(package) / normalize_path(resource)).open('rb')
@deprecated
def read_binary(package: Package, resource: Resource) -> bytes:
"""Return the binary contents of the resource."""
return (_common.files(package) / normalize_path(resource)).read_bytes()
@deprecated
def open_text(
package: Package,
resource: Resource,
encoding: str = 'utf-8',
errors: str = 'strict',
) -> TextIO:
"""Return a file-like object opened for text reading of the resource."""
return (_common.files(package) / normalize_path(resource)).open(
'r', encoding=encoding, errors=errors
)
@deprecated
def read_text(
package: Package,
resource: Resource,
encoding: str = 'utf-8',
errors: str = 'strict',
) -> str:
"""Return the decoded string of the resource.
The decoding-related arguments have the same semantics as those of
bytes.decode().
"""
with open_text(package, resource, encoding, errors) as fp:
return fp.read()
@deprecated
def contents(package: Package) -> Iterable[str]:
"""Return an iterable of entries in `package`.
Note that not all entries are resources. Specifically, directories are
not considered resources. Use `is_resource()` on each entry returned here
to check if it is a resource or not.
"""
return [path.name for path in _common.files(package).iterdir()]
@deprecated
def is_resource(package: Package, name: str) -> bool:
"""True if `name` is a resource inside `package`.
Directories are *not* resources.
"""
resource = normalize_path(name)
return any(
traversable.name == resource and traversable.is_file()
for traversable in _common.files(package).iterdir()
)
@deprecated
def path(
package: Package,
resource: Resource,
) -> ContextManager[pathlib.Path]:
"""A context manager providing a file path object to the resource.
If the resource does not already exist on its own on the file system,
a temporary file will be created. If the file was created, the file
will be deleted upon exiting the context manager (no exception is
raised if the file was deleted prior to the context manager
exiting).
"""
return _common.as_file(_common.files(package) / normalize_path(resource))
| 3,481 |
Python
| 27.776859 | 80 | 0.673657 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/importlib_resources/_adapters.py
|
from contextlib import suppress
from io import TextIOWrapper
from . import abc
class SpecLoaderAdapter:
"""
Adapt a package spec to adapt the underlying loader.
"""
def __init__(self, spec, adapter=lambda spec: spec.loader):
self.spec = spec
self.loader = adapter(spec)
def __getattr__(self, name):
return getattr(self.spec, name)
class TraversableResourcesLoader:
"""
Adapt a loader to provide TraversableResources.
"""
def __init__(self, spec):
self.spec = spec
def get_resource_reader(self, name):
return CompatibilityFiles(self.spec)._native()
def _io_wrapper(file, mode='r', *args, **kwargs):
if mode == 'r':
return TextIOWrapper(file, *args, **kwargs)
elif mode == 'rb':
return file
raise ValueError(
"Invalid mode value '{}', only 'r' and 'rb' are supported".format(mode)
)
class CompatibilityFiles:
"""
Adapter for an existing or non-existent resource reader
to provide a compatibility .files().
"""
class SpecPath(abc.Traversable):
"""
Path tied to a module spec.
Can be read and exposes the resource reader children.
"""
def __init__(self, spec, reader):
self._spec = spec
self._reader = reader
def iterdir(self):
if not self._reader:
return iter(())
return iter(
CompatibilityFiles.ChildPath(self._reader, path)
for path in self._reader.contents()
)
def is_file(self):
return False
is_dir = is_file
def joinpath(self, other):
if not self._reader:
return CompatibilityFiles.OrphanPath(other)
return CompatibilityFiles.ChildPath(self._reader, other)
@property
def name(self):
return self._spec.name
def open(self, mode='r', *args, **kwargs):
return _io_wrapper(self._reader.open_resource(None), mode, *args, **kwargs)
class ChildPath(abc.Traversable):
"""
Path tied to a resource reader child.
Can be read but doesn't expose any meaningful children.
"""
def __init__(self, reader, name):
self._reader = reader
self._name = name
def iterdir(self):
return iter(())
def is_file(self):
return self._reader.is_resource(self.name)
def is_dir(self):
return not self.is_file()
def joinpath(self, other):
return CompatibilityFiles.OrphanPath(self.name, other)
@property
def name(self):
return self._name
def open(self, mode='r', *args, **kwargs):
return _io_wrapper(
self._reader.open_resource(self.name), mode, *args, **kwargs
)
class OrphanPath(abc.Traversable):
"""
Orphan path, not tied to a module spec or resource reader.
Can't be read and doesn't expose any meaningful children.
"""
def __init__(self, *path_parts):
if len(path_parts) < 1:
raise ValueError('Need at least one path part to construct a path')
self._path = path_parts
def iterdir(self):
return iter(())
def is_file(self):
return False
is_dir = is_file
def joinpath(self, other):
return CompatibilityFiles.OrphanPath(*self._path, other)
@property
def name(self):
return self._path[-1]
def open(self, mode='r', *args, **kwargs):
raise FileNotFoundError("Can't open orphan path")
def __init__(self, spec):
self.spec = spec
@property
def _reader(self):
with suppress(AttributeError):
return self.spec.loader.get_resource_reader(self.spec.name)
def _native(self):
"""
Return the native reader if it supports files().
"""
reader = self._reader
return reader if hasattr(reader, 'files') else self
def __getattr__(self, attr):
return getattr(self._reader, attr)
def files(self):
return CompatibilityFiles.SpecPath(self.spec, self._reader)
def wrap_spec(package):
"""
Construct a package spec with traversable compatibility
on the spec/loader/reader.
"""
return SpecLoaderAdapter(package.__spec__, TraversableResourcesLoader)
| 4,504 |
Python
| 25.345029 | 87 | 0.571714 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/importlib_resources/simple.py
|
"""
Interface adapters for low-level readers.
"""
import abc
import io
import itertools
from typing import BinaryIO, List
from .abc import Traversable, TraversableResources
class SimpleReader(abc.ABC):
"""
The minimum, low-level interface required from a resource
provider.
"""
@property
@abc.abstractmethod
def package(self) -> str:
"""
The name of the package for which this reader loads resources.
"""
@abc.abstractmethod
def children(self) -> List['SimpleReader']:
"""
Obtain an iterable of SimpleReader for available
child containers (e.g. directories).
"""
@abc.abstractmethod
def resources(self) -> List[str]:
"""
Obtain available named resources for this virtual package.
"""
@abc.abstractmethod
def open_binary(self, resource: str) -> BinaryIO:
"""
Obtain a File-like for a named resource.
"""
@property
def name(self):
return self.package.split('.')[-1]
class ResourceContainer(Traversable):
"""
Traversable container for a package's resources via its reader.
"""
def __init__(self, reader: SimpleReader):
self.reader = reader
def is_dir(self):
return True
def is_file(self):
return False
def iterdir(self):
files = (ResourceHandle(self, name) for name in self.reader.resources)
dirs = map(ResourceContainer, self.reader.children())
return itertools.chain(files, dirs)
def open(self, *args, **kwargs):
raise IsADirectoryError()
class ResourceHandle(Traversable):
"""
Handle to a named resource in a ResourceReader.
"""
def __init__(self, parent: ResourceContainer, name: str):
self.parent = parent
self.name = name # type: ignore
def is_file(self):
return True
def is_dir(self):
return False
def open(self, mode='r', *args, **kwargs):
stream = self.parent.reader.open_binary(self.name)
if 'b' not in mode:
stream = io.TextIOWrapper(*args, **kwargs)
return stream
def joinpath(self, name):
raise RuntimeError("Cannot traverse into a resource")
class TraversableReader(TraversableResources, SimpleReader):
"""
A TraversableResources based on SimpleReader. Resource providers
may derive from this class to provide the TraversableResources
interface by supplying the SimpleReader interface.
"""
def files(self):
return ResourceContainer(self)
| 2,576 |
Python
| 23.084112 | 78 | 0.63354 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/importlib_resources/readers.py
|
import collections
import pathlib
import operator
from . import abc
from ._itertools import unique_everseen
from ._compat import ZipPath
def remove_duplicates(items):
return iter(collections.OrderedDict.fromkeys(items))
class FileReader(abc.TraversableResources):
def __init__(self, loader):
self.path = pathlib.Path(loader.path).parent
def resource_path(self, resource):
"""
Return the file system path to prevent
`resources.path()` from creating a temporary
copy.
"""
return str(self.path.joinpath(resource))
def files(self):
return self.path
class ZipReader(abc.TraversableResources):
def __init__(self, loader, module):
_, _, name = module.rpartition('.')
self.prefix = loader.prefix.replace('\\', '/') + name + '/'
self.archive = loader.archive
def open_resource(self, resource):
try:
return super().open_resource(resource)
except KeyError as exc:
raise FileNotFoundError(exc.args[0])
def is_resource(self, path):
# workaround for `zipfile.Path.is_file` returning true
# for non-existent paths.
target = self.files().joinpath(path)
return target.is_file() and target.exists()
def files(self):
return ZipPath(self.archive, self.prefix)
class MultiplexedPath(abc.Traversable):
"""
Given a series of Traversable objects, implement a merged
version of the interface across all objects. Useful for
namespace packages which may be multihomed at a single
name.
"""
def __init__(self, *paths):
self._paths = list(map(pathlib.Path, remove_duplicates(paths)))
if not self._paths:
message = 'MultiplexedPath must contain at least one path'
raise FileNotFoundError(message)
if not all(path.is_dir() for path in self._paths):
raise NotADirectoryError('MultiplexedPath only supports directories')
def iterdir(self):
files = (file for path in self._paths for file in path.iterdir())
return unique_everseen(files, key=operator.attrgetter('name'))
def read_bytes(self):
raise FileNotFoundError(f'{self} is not a file')
def read_text(self, *args, **kwargs):
raise FileNotFoundError(f'{self} is not a file')
def is_dir(self):
return True
def is_file(self):
return False
def joinpath(self, *descendants):
try:
return super().joinpath(*descendants)
except abc.TraversalError:
# One of the paths did not resolve (a directory does not exist).
# Just return something that will not exist.
return self._paths[0].joinpath(*descendants)
def open(self, *args, **kwargs):
raise FileNotFoundError(f'{self} is not a file')
@property
def name(self):
return self._paths[0].name
def __repr__(self):
paths = ', '.join(f"'{path}'" for path in self._paths)
return f'MultiplexedPath({paths})'
class NamespaceReader(abc.TraversableResources):
def __init__(self, namespace_path):
if 'NamespacePath' not in str(namespace_path):
raise ValueError('Invalid path')
self.path = MultiplexedPath(*list(namespace_path))
def resource_path(self, resource):
"""
Return the file system path to prevent
`resources.path()` from creating a temporary
copy.
"""
return str(self.path.joinpath(resource))
def files(self):
return self.path
| 3,581 |
Python
| 28.603306 | 81 | 0.63055 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/jaraco/functools.py
|
import functools
import time
import inspect
import collections
import types
import itertools
import warnings
import setuptools.extern.more_itertools
from typing import Callable, TypeVar
CallableT = TypeVar("CallableT", bound=Callable[..., object])
def compose(*funcs):
"""
Compose any number of unary functions into a single unary function.
>>> import textwrap
>>> expected = str.strip(textwrap.dedent(compose.__doc__))
>>> strip_and_dedent = compose(str.strip, textwrap.dedent)
>>> strip_and_dedent(compose.__doc__) == expected
True
Compose also allows the innermost function to take arbitrary arguments.
>>> round_three = lambda x: round(x, ndigits=3)
>>> f = compose(round_three, int.__truediv__)
>>> [f(3*x, x+1) for x in range(1,10)]
[1.5, 2.0, 2.25, 2.4, 2.5, 2.571, 2.625, 2.667, 2.7]
"""
def compose_two(f1, f2):
return lambda *args, **kwargs: f1(f2(*args, **kwargs))
return functools.reduce(compose_two, funcs)
def method_caller(method_name, *args, **kwargs):
"""
Return a function that will call a named method on the
target object with optional positional and keyword
arguments.
>>> lower = method_caller('lower')
>>> lower('MyString')
'mystring'
"""
def call_method(target):
func = getattr(target, method_name)
return func(*args, **kwargs)
return call_method
def once(func):
"""
Decorate func so it's only ever called the first time.
This decorator can ensure that an expensive or non-idempotent function
will not be expensive on subsequent calls and is idempotent.
>>> add_three = once(lambda a: a+3)
>>> add_three(3)
6
>>> add_three(9)
6
>>> add_three('12')
6
To reset the stored value, simply clear the property ``saved_result``.
>>> del add_three.saved_result
>>> add_three(9)
12
>>> add_three(8)
12
Or invoke 'reset()' on it.
>>> add_three.reset()
>>> add_three(-3)
0
>>> add_three(0)
0
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
if not hasattr(wrapper, 'saved_result'):
wrapper.saved_result = func(*args, **kwargs)
return wrapper.saved_result
wrapper.reset = lambda: vars(wrapper).__delitem__('saved_result')
return wrapper
def method_cache(
method: CallableT,
cache_wrapper: Callable[
[CallableT], CallableT
] = functools.lru_cache(), # type: ignore[assignment]
) -> CallableT:
"""
Wrap lru_cache to support storing the cache data in the object instances.
Abstracts the common paradigm where the method explicitly saves an
underscore-prefixed protected property on first call and returns that
subsequently.
>>> class MyClass:
... calls = 0
...
... @method_cache
... def method(self, value):
... self.calls += 1
... return value
>>> a = MyClass()
>>> a.method(3)
3
>>> for x in range(75):
... res = a.method(x)
>>> a.calls
75
Note that the apparent behavior will be exactly like that of lru_cache
except that the cache is stored on each instance, so values in one
instance will not flush values from another, and when an instance is
deleted, so are the cached values for that instance.
>>> b = MyClass()
>>> for x in range(35):
... res = b.method(x)
>>> b.calls
35
>>> a.method(0)
0
>>> a.calls
75
Note that if method had been decorated with ``functools.lru_cache()``,
a.calls would have been 76 (due to the cached value of 0 having been
flushed by the 'b' instance).
Clear the cache with ``.cache_clear()``
>>> a.method.cache_clear()
Same for a method that hasn't yet been called.
>>> c = MyClass()
>>> c.method.cache_clear()
Another cache wrapper may be supplied:
>>> cache = functools.lru_cache(maxsize=2)
>>> MyClass.method2 = method_cache(lambda self: 3, cache_wrapper=cache)
>>> a = MyClass()
>>> a.method2()
3
Caution - do not subsequently wrap the method with another decorator, such
as ``@property``, which changes the semantics of the function.
See also
http://code.activestate.com/recipes/577452-a-memoize-decorator-for-instance-methods/
for another implementation and additional justification.
"""
def wrapper(self: object, *args: object, **kwargs: object) -> object:
# it's the first call, replace the method with a cached, bound method
bound_method: CallableT = types.MethodType( # type: ignore[assignment]
method, self
)
cached_method = cache_wrapper(bound_method)
setattr(self, method.__name__, cached_method)
return cached_method(*args, **kwargs)
# Support cache clear even before cache has been created.
wrapper.cache_clear = lambda: None # type: ignore[attr-defined]
return ( # type: ignore[return-value]
_special_method_cache(method, cache_wrapper) or wrapper
)
def _special_method_cache(method, cache_wrapper):
"""
Because Python treats special methods differently, it's not
possible to use instance attributes to implement the cached
methods.
Instead, install the wrapper method under a different name
and return a simple proxy to that wrapper.
https://github.com/jaraco/jaraco.functools/issues/5
"""
name = method.__name__
special_names = '__getattr__', '__getitem__'
if name not in special_names:
return
wrapper_name = '__cached' + name
def proxy(self, *args, **kwargs):
if wrapper_name not in vars(self):
bound = types.MethodType(method, self)
cache = cache_wrapper(bound)
setattr(self, wrapper_name, cache)
else:
cache = getattr(self, wrapper_name)
return cache(*args, **kwargs)
return proxy
def apply(transform):
"""
Decorate a function with a transform function that is
invoked on results returned from the decorated function.
>>> @apply(reversed)
... def get_numbers(start):
... "doc for get_numbers"
... return range(start, start+3)
>>> list(get_numbers(4))
[6, 5, 4]
>>> get_numbers.__doc__
'doc for get_numbers'
"""
def wrap(func):
return functools.wraps(func)(compose(transform, func))
return wrap
def result_invoke(action):
r"""
Decorate a function with an action function that is
invoked on the results returned from the decorated
function (for its side-effect), then return the original
result.
>>> @result_invoke(print)
... def add_two(a, b):
... return a + b
>>> x = add_two(2, 3)
5
>>> x
5
"""
def wrap(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
action(result)
return result
return wrapper
return wrap
def invoke(f, *args, **kwargs):
"""
Call a function for its side effect after initialization.
The benefit of using the decorator instead of simply invoking a function
after defining it is that it makes explicit the author's intent for the
function to be called immediately. Whereas if one simply calls the
function immediately, it's less obvious if that was intentional or
incidental. It also avoids repeating the name - the two actions, defining
the function and calling it immediately are modeled separately, but linked
by the decorator construct.
The benefit of having a function construct (opposed to just invoking some
behavior inline) is to serve as a scope in which the behavior occurs. It
avoids polluting the global namespace with local variables, provides an
anchor on which to attach documentation (docstring), keeps the behavior
logically separated (instead of conceptually separated or not separated at
all), and provides potential to re-use the behavior for testing or other
purposes.
This function is named as a pithy way to communicate, "call this function
primarily for its side effect", or "while defining this function, also
take it aside and call it". It exists because there's no Python construct
for "define and call" (nor should there be, as decorators serve this need
just fine). The behavior happens immediately and synchronously.
>>> @invoke
... def func(): print("called")
called
>>> func()
called
Use functools.partial to pass parameters to the initial call
>>> @functools.partial(invoke, name='bingo')
... def func(name): print("called with", name)
called with bingo
"""
f(*args, **kwargs)
return f
def call_aside(*args, **kwargs):
"""
Deprecated name for invoke.
"""
warnings.warn("call_aside is deprecated, use invoke", DeprecationWarning)
return invoke(*args, **kwargs)
class Throttler:
"""
Rate-limit a function (or other callable)
"""
def __init__(self, func, max_rate=float('Inf')):
if isinstance(func, Throttler):
func = func.func
self.func = func
self.max_rate = max_rate
self.reset()
def reset(self):
self.last_called = 0
def __call__(self, *args, **kwargs):
self._wait()
return self.func(*args, **kwargs)
def _wait(self):
"ensure at least 1/max_rate seconds from last call"
elapsed = time.time() - self.last_called
must_wait = 1 / self.max_rate - elapsed
time.sleep(max(0, must_wait))
self.last_called = time.time()
def __get__(self, obj, type=None):
return first_invoke(self._wait, functools.partial(self.func, obj))
def first_invoke(func1, func2):
"""
Return a function that when invoked will invoke func1 without
any parameters (for its side-effect) and then invoke func2
with whatever parameters were passed, returning its result.
"""
def wrapper(*args, **kwargs):
func1()
return func2(*args, **kwargs)
return wrapper
def retry_call(func, cleanup=lambda: None, retries=0, trap=()):
"""
Given a callable func, trap the indicated exceptions
for up to 'retries' times, invoking cleanup on the
exception. On the final attempt, allow any exceptions
to propagate.
"""
attempts = itertools.count() if retries == float('inf') else range(retries)
for attempt in attempts:
try:
return func()
except trap:
cleanup()
return func()
def retry(*r_args, **r_kwargs):
"""
Decorator wrapper for retry_call. Accepts arguments to retry_call
except func and then returns a decorator for the decorated function.
Ex:
>>> @retry(retries=3)
... def my_func(a, b):
... "this is my funk"
... print(a, b)
>>> my_func.__doc__
'this is my funk'
"""
def decorate(func):
@functools.wraps(func)
def wrapper(*f_args, **f_kwargs):
bound = functools.partial(func, *f_args, **f_kwargs)
return retry_call(bound, *r_args, **r_kwargs)
return wrapper
return decorate
def print_yielded(func):
"""
Convert a generator into a function that prints all yielded elements
>>> @print_yielded
... def x():
... yield 3; yield None
>>> x()
3
None
"""
print_all = functools.partial(map, print)
print_results = compose(more_itertools.consume, print_all, func)
return functools.wraps(func)(print_results)
def pass_none(func):
"""
Wrap func so it's not called if its first param is None
>>> print_text = pass_none(print)
>>> print_text('text')
text
>>> print_text(None)
"""
@functools.wraps(func)
def wrapper(param, *args, **kwargs):
if param is not None:
return func(param, *args, **kwargs)
return wrapper
def assign_params(func, namespace):
"""
Assign parameters from namespace where func solicits.
>>> def func(x, y=3):
... print(x, y)
>>> assigned = assign_params(func, dict(x=2, z=4))
>>> assigned()
2 3
The usual errors are raised if a function doesn't receive
its required parameters:
>>> assigned = assign_params(func, dict(y=3, z=4))
>>> assigned()
Traceback (most recent call last):
TypeError: func() ...argument...
It even works on methods:
>>> class Handler:
... def meth(self, arg):
... print(arg)
>>> assign_params(Handler().meth, dict(arg='crystal', foo='clear'))()
crystal
"""
sig = inspect.signature(func)
params = sig.parameters.keys()
call_ns = {k: namespace[k] for k in params if k in namespace}
return functools.partial(func, **call_ns)
def save_method_args(method):
"""
Wrap a method such that when it is called, the args and kwargs are
saved on the method.
>>> class MyClass:
... @save_method_args
... def method(self, a, b):
... print(a, b)
>>> my_ob = MyClass()
>>> my_ob.method(1, 2)
1 2
>>> my_ob._saved_method.args
(1, 2)
>>> my_ob._saved_method.kwargs
{}
>>> my_ob.method(a=3, b='foo')
3 foo
>>> my_ob._saved_method.args
()
>>> my_ob._saved_method.kwargs == dict(a=3, b='foo')
True
The arguments are stored on the instance, allowing for
different instance to save different args.
>>> your_ob = MyClass()
>>> your_ob.method({str('x'): 3}, b=[4])
{'x': 3} [4]
>>> your_ob._saved_method.args
({'x': 3},)
>>> my_ob._saved_method.args
()
"""
args_and_kwargs = collections.namedtuple('args_and_kwargs', 'args kwargs')
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
attr_name = '_saved_' + method.__name__
attr = args_and_kwargs(args, kwargs)
setattr(self, attr_name, attr)
return method(self, *args, **kwargs)
return wrapper
def except_(*exceptions, replace=None, use=None):
"""
Replace the indicated exceptions, if raised, with the indicated
literal replacement or evaluated expression (if present).
>>> safe_int = except_(ValueError)(int)
>>> safe_int('five')
>>> safe_int('5')
5
Specify a literal replacement with ``replace``.
>>> safe_int_r = except_(ValueError, replace=0)(int)
>>> safe_int_r('five')
0
Provide an expression to ``use`` to pass through particular parameters.
>>> safe_int_pt = except_(ValueError, use='args[0]')(int)
>>> safe_int_pt('five')
'five'
"""
def decorate(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except exceptions:
try:
return eval(use)
except TypeError:
return replace
return wrapper
return decorate
| 15,053 |
Python
| 26.02693 | 88 | 0.613433 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/jaraco/context.py
|
import os
import subprocess
import contextlib
import functools
import tempfile
import shutil
import operator
import warnings
@contextlib.contextmanager
def pushd(dir):
"""
>>> tmp_path = getfixture('tmp_path')
>>> with pushd(tmp_path):
... assert os.getcwd() == os.fspath(tmp_path)
>>> assert os.getcwd() != os.fspath(tmp_path)
"""
orig = os.getcwd()
os.chdir(dir)
try:
yield dir
finally:
os.chdir(orig)
@contextlib.contextmanager
def tarball_context(url, target_dir=None, runner=None, pushd=pushd):
"""
Get a tarball, extract it, change to that directory, yield, then
clean up.
`runner` is the function to invoke commands.
`pushd` is a context manager for changing the directory.
"""
if target_dir is None:
target_dir = os.path.basename(url).replace('.tar.gz', '').replace('.tgz', '')
if runner is None:
runner = functools.partial(subprocess.check_call, shell=True)
else:
warnings.warn("runner parameter is deprecated", DeprecationWarning)
# In the tar command, use --strip-components=1 to strip the first path and
# then
# use -C to cause the files to be extracted to {target_dir}. This ensures
# that we always know where the files were extracted.
runner('mkdir {target_dir}'.format(**vars()))
try:
getter = 'wget {url} -O -'
extract = 'tar x{compression} --strip-components=1 -C {target_dir}'
cmd = ' | '.join((getter, extract))
runner(cmd.format(compression=infer_compression(url), **vars()))
with pushd(target_dir):
yield target_dir
finally:
runner('rm -Rf {target_dir}'.format(**vars()))
def infer_compression(url):
"""
Given a URL or filename, infer the compression code for tar.
>>> infer_compression('http://foo/bar.tar.gz')
'z'
>>> infer_compression('http://foo/bar.tgz')
'z'
>>> infer_compression('file.bz')
'j'
>>> infer_compression('file.xz')
'J'
"""
# cheat and just assume it's the last two characters
compression_indicator = url[-2:]
mapping = dict(gz='z', bz='j', xz='J')
# Assume 'z' (gzip) if no match
return mapping.get(compression_indicator, 'z')
@contextlib.contextmanager
def temp_dir(remover=shutil.rmtree):
"""
Create a temporary directory context. Pass a custom remover
to override the removal behavior.
>>> import pathlib
>>> with temp_dir() as the_dir:
... assert os.path.isdir(the_dir)
... _ = pathlib.Path(the_dir).joinpath('somefile').write_text('contents')
>>> assert not os.path.exists(the_dir)
"""
temp_dir = tempfile.mkdtemp()
try:
yield temp_dir
finally:
remover(temp_dir)
@contextlib.contextmanager
def repo_context(url, branch=None, quiet=True, dest_ctx=temp_dir):
"""
Check out the repo indicated by url.
If dest_ctx is supplied, it should be a context manager
to yield the target directory for the check out.
"""
exe = 'git' if 'git' in url else 'hg'
with dest_ctx() as repo_dir:
cmd = [exe, 'clone', url, repo_dir]
if branch:
cmd.extend(['--branch', branch])
devnull = open(os.path.devnull, 'w')
stdout = devnull if quiet else None
subprocess.check_call(cmd, stdout=stdout)
yield repo_dir
@contextlib.contextmanager
def null():
"""
A null context suitable to stand in for a meaningful context.
>>> with null() as value:
... assert value is None
"""
yield
class ExceptionTrap:
"""
A context manager that will catch certain exceptions and provide an
indication they occurred.
>>> with ExceptionTrap() as trap:
... raise Exception()
>>> bool(trap)
True
>>> with ExceptionTrap() as trap:
... pass
>>> bool(trap)
False
>>> with ExceptionTrap(ValueError) as trap:
... raise ValueError("1 + 1 is not 3")
>>> bool(trap)
True
>>> trap.value
ValueError('1 + 1 is not 3')
>>> trap.tb
<traceback object at ...>
>>> with ExceptionTrap(ValueError) as trap:
... raise Exception()
Traceback (most recent call last):
...
Exception
>>> bool(trap)
False
"""
exc_info = None, None, None
def __init__(self, exceptions=(Exception,)):
self.exceptions = exceptions
def __enter__(self):
return self
@property
def type(self):
return self.exc_info[0]
@property
def value(self):
return self.exc_info[1]
@property
def tb(self):
return self.exc_info[2]
def __exit__(self, *exc_info):
type = exc_info[0]
matches = type and issubclass(type, self.exceptions)
if matches:
self.exc_info = exc_info
return matches
def __bool__(self):
return bool(self.type)
def raises(self, func, *, _test=bool):
"""
Wrap func and replace the result with the truth
value of the trap (True if an exception occurred).
First, give the decorator an alias to support Python 3.8
Syntax.
>>> raises = ExceptionTrap(ValueError).raises
Now decorate a function that always fails.
>>> @raises
... def fail():
... raise ValueError('failed')
>>> fail()
True
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
with ExceptionTrap(self.exceptions) as trap:
func(*args, **kwargs)
return _test(trap)
return wrapper
def passes(self, func):
"""
Wrap func and replace the result with the truth
value of the trap (True if no exception).
First, give the decorator an alias to support Python 3.8
Syntax.
>>> passes = ExceptionTrap(ValueError).passes
Now decorate a function that always fails.
>>> @passes
... def fail():
... raise ValueError('failed')
>>> fail()
False
"""
return self.raises(func, _test=operator.not_)
class suppress(contextlib.suppress, contextlib.ContextDecorator):
"""
A version of contextlib.suppress with decorator support.
>>> @suppress(KeyError)
... def key_error():
... {}['']
>>> key_error()
"""
class on_interrupt(contextlib.ContextDecorator):
"""
Replace a KeyboardInterrupt with SystemExit(1)
>>> def do_interrupt():
... raise KeyboardInterrupt()
>>> on_interrupt('error')(do_interrupt)()
Traceback (most recent call last):
...
SystemExit: 1
>>> on_interrupt('error', code=255)(do_interrupt)()
Traceback (most recent call last):
...
SystemExit: 255
>>> on_interrupt('suppress')(do_interrupt)()
>>> with __import__('pytest').raises(KeyboardInterrupt):
... on_interrupt('ignore')(do_interrupt)()
"""
def __init__(
self,
action='error',
# py3.7 compat
# /,
code=1,
):
self.action = action
self.code = code
def __enter__(self):
return self
def __exit__(self, exctype, excinst, exctb):
if exctype is not KeyboardInterrupt or self.action == 'ignore':
return
elif self.action == 'error':
raise SystemExit(self.code) from excinst
return self.action == 'suppress'
| 7,460 |
Python
| 24.816609 | 85 | 0.586863 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/jaraco/text/__init__.py
|
import re
import itertools
import textwrap
import functools
try:
from importlib.resources import files # type: ignore
except ImportError: # pragma: nocover
from setuptools.extern.importlib_resources import files # type: ignore
from setuptools.extern.jaraco.functools import compose, method_cache
from setuptools.extern.jaraco.context import ExceptionTrap
def substitution(old, new):
"""
Return a function that will perform a substitution on a string
"""
return lambda s: s.replace(old, new)
def multi_substitution(*substitutions):
"""
Take a sequence of pairs specifying substitutions, and create
a function that performs those substitutions.
>>> multi_substitution(('foo', 'bar'), ('bar', 'baz'))('foo')
'baz'
"""
substitutions = itertools.starmap(substitution, substitutions)
# compose function applies last function first, so reverse the
# substitutions to get the expected order.
substitutions = reversed(tuple(substitutions))
return compose(*substitutions)
class FoldedCase(str):
"""
A case insensitive string class; behaves just like str
except compares equal when the only variation is case.
>>> s = FoldedCase('hello world')
>>> s == 'Hello World'
True
>>> 'Hello World' == s
True
>>> s != 'Hello World'
False
>>> s.index('O')
4
>>> s.split('O')
['hell', ' w', 'rld']
>>> sorted(map(FoldedCase, ['GAMMA', 'alpha', 'Beta']))
['alpha', 'Beta', 'GAMMA']
Sequence membership is straightforward.
>>> "Hello World" in [s]
True
>>> s in ["Hello World"]
True
You may test for set inclusion, but candidate and elements
must both be folded.
>>> FoldedCase("Hello World") in {s}
True
>>> s in {FoldedCase("Hello World")}
True
String inclusion works as long as the FoldedCase object
is on the right.
>>> "hello" in FoldedCase("Hello World")
True
But not if the FoldedCase object is on the left:
>>> FoldedCase('hello') in 'Hello World'
False
In that case, use ``in_``:
>>> FoldedCase('hello').in_('Hello World')
True
>>> FoldedCase('hello') > FoldedCase('Hello')
False
"""
def __lt__(self, other):
return self.lower() < other.lower()
def __gt__(self, other):
return self.lower() > other.lower()
def __eq__(self, other):
return self.lower() == other.lower()
def __ne__(self, other):
return self.lower() != other.lower()
def __hash__(self):
return hash(self.lower())
def __contains__(self, other):
return super().lower().__contains__(other.lower())
def in_(self, other):
"Does self appear in other?"
return self in FoldedCase(other)
# cache lower since it's likely to be called frequently.
@method_cache
def lower(self):
return super().lower()
def index(self, sub):
return self.lower().index(sub.lower())
def split(self, splitter=' ', maxsplit=0):
pattern = re.compile(re.escape(splitter), re.I)
return pattern.split(self, maxsplit)
# Python 3.8 compatibility
_unicode_trap = ExceptionTrap(UnicodeDecodeError)
@_unicode_trap.passes
def is_decodable(value):
r"""
Return True if the supplied value is decodable (using the default
encoding).
>>> is_decodable(b'\xff')
False
>>> is_decodable(b'\x32')
True
"""
value.decode()
def is_binary(value):
r"""
Return True if the value appears to be binary (that is, it's a byte
string and isn't decodable).
>>> is_binary(b'\xff')
True
>>> is_binary('\xff')
False
"""
return isinstance(value, bytes) and not is_decodable(value)
def trim(s):
r"""
Trim something like a docstring to remove the whitespace that
is common due to indentation and formatting.
>>> trim("\n\tfoo = bar\n\t\tbar = baz\n")
'foo = bar\n\tbar = baz'
"""
return textwrap.dedent(s).strip()
def wrap(s):
"""
Wrap lines of text, retaining existing newlines as
paragraph markers.
>>> print(wrap(lorem_ipsum))
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do
eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad
minim veniam, quis nostrud exercitation ullamco laboris nisi ut
aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla
pariatur. Excepteur sint occaecat cupidatat non proident, sunt in
culpa qui officia deserunt mollit anim id est laborum.
<BLANKLINE>
Curabitur pretium tincidunt lacus. Nulla gravida orci a odio. Nullam
varius, turpis et commodo pharetra, est eros bibendum elit, nec luctus
magna felis sollicitudin mauris. Integer in mauris eu nibh euismod
gravida. Duis ac tellus et risus vulputate vehicula. Donec lobortis
risus a elit. Etiam tempor. Ut ullamcorper, ligula eu tempor congue,
eros est euismod turpis, id tincidunt sapien risus a quam. Maecenas
fermentum consequat mi. Donec fermentum. Pellentesque malesuada nulla
a mi. Duis sapien sem, aliquet nec, commodo eget, consequat quis,
neque. Aliquam faucibus, elit ut dictum aliquet, felis nisl adipiscing
sapien, sed malesuada diam lacus eget erat. Cras mollis scelerisque
nunc. Nullam arcu. Aliquam consequat. Curabitur augue lorem, dapibus
quis, laoreet et, pretium ac, nisi. Aenean magna nisl, mollis quis,
molestie eu, feugiat in, orci. In hac habitasse platea dictumst.
"""
paragraphs = s.splitlines()
wrapped = ('\n'.join(textwrap.wrap(para)) for para in paragraphs)
return '\n\n'.join(wrapped)
def unwrap(s):
r"""
Given a multi-line string, return an unwrapped version.
>>> wrapped = wrap(lorem_ipsum)
>>> wrapped.count('\n')
20
>>> unwrapped = unwrap(wrapped)
>>> unwrapped.count('\n')
1
>>> print(unwrapped)
Lorem ipsum dolor sit amet, consectetur adipiscing ...
Curabitur pretium tincidunt lacus. Nulla gravida orci ...
"""
paragraphs = re.split(r'\n\n+', s)
cleaned = (para.replace('\n', ' ') for para in paragraphs)
return '\n'.join(cleaned)
class Splitter(object):
"""object that will split a string with the given arguments for each call
>>> s = Splitter(',')
>>> s('hello, world, this is your, master calling')
['hello', ' world', ' this is your', ' master calling']
"""
def __init__(self, *args):
self.args = args
def __call__(self, s):
return s.split(*self.args)
def indent(string, prefix=' ' * 4):
"""
>>> indent('foo')
' foo'
"""
return prefix + string
class WordSet(tuple):
"""
Given an identifier, return the words that identifier represents,
whether in camel case, underscore-separated, etc.
>>> WordSet.parse("camelCase")
('camel', 'Case')
>>> WordSet.parse("under_sep")
('under', 'sep')
Acronyms should be retained
>>> WordSet.parse("firstSNL")
('first', 'SNL')
>>> WordSet.parse("you_and_I")
('you', 'and', 'I')
>>> WordSet.parse("A simple test")
('A', 'simple', 'test')
Multiple caps should not interfere with the first cap of another word.
>>> WordSet.parse("myABCClass")
('my', 'ABC', 'Class')
The result is a WordSet, so you can get the form you need.
>>> WordSet.parse("myABCClass").underscore_separated()
'my_ABC_Class'
>>> WordSet.parse('a-command').camel_case()
'ACommand'
>>> WordSet.parse('someIdentifier').lowered().space_separated()
'some identifier'
Slices of the result should return another WordSet.
>>> WordSet.parse('taken-out-of-context')[1:].underscore_separated()
'out_of_context'
>>> WordSet.from_class_name(WordSet()).lowered().space_separated()
'word set'
>>> example = WordSet.parse('figured it out')
>>> example.headless_camel_case()
'figuredItOut'
>>> example.dash_separated()
'figured-it-out'
"""
_pattern = re.compile('([A-Z]?[a-z]+)|([A-Z]+(?![a-z]))')
def capitalized(self):
return WordSet(word.capitalize() for word in self)
def lowered(self):
return WordSet(word.lower() for word in self)
def camel_case(self):
return ''.join(self.capitalized())
def headless_camel_case(self):
words = iter(self)
first = next(words).lower()
new_words = itertools.chain((first,), WordSet(words).camel_case())
return ''.join(new_words)
def underscore_separated(self):
return '_'.join(self)
def dash_separated(self):
return '-'.join(self)
def space_separated(self):
return ' '.join(self)
def trim_right(self, item):
"""
Remove the item from the end of the set.
>>> WordSet.parse('foo bar').trim_right('foo')
('foo', 'bar')
>>> WordSet.parse('foo bar').trim_right('bar')
('foo',)
>>> WordSet.parse('').trim_right('bar')
()
"""
return self[:-1] if self and self[-1] == item else self
def trim_left(self, item):
"""
Remove the item from the beginning of the set.
>>> WordSet.parse('foo bar').trim_left('foo')
('bar',)
>>> WordSet.parse('foo bar').trim_left('bar')
('foo', 'bar')
>>> WordSet.parse('').trim_left('bar')
()
"""
return self[1:] if self and self[0] == item else self
def trim(self, item):
"""
>>> WordSet.parse('foo bar').trim('foo')
('bar',)
"""
return self.trim_left(item).trim_right(item)
def __getitem__(self, item):
result = super(WordSet, self).__getitem__(item)
if isinstance(item, slice):
result = WordSet(result)
return result
@classmethod
def parse(cls, identifier):
matches = cls._pattern.finditer(identifier)
return WordSet(match.group(0) for match in matches)
@classmethod
def from_class_name(cls, subject):
return cls.parse(subject.__class__.__name__)
# for backward compatibility
words = WordSet.parse
def simple_html_strip(s):
r"""
Remove HTML from the string `s`.
>>> str(simple_html_strip(''))
''
>>> print(simple_html_strip('A <bold>stormy</bold> day in paradise'))
A stormy day in paradise
>>> print(simple_html_strip('Somebody <!-- do not --> tell the truth.'))
Somebody tell the truth.
>>> print(simple_html_strip('What about<br/>\nmultiple lines?'))
What about
multiple lines?
"""
html_stripper = re.compile('(<!--.*?-->)|(<[^>]*>)|([^<]+)', re.DOTALL)
texts = (match.group(3) or '' for match in html_stripper.finditer(s))
return ''.join(texts)
class SeparatedValues(str):
"""
A string separated by a separator. Overrides __iter__ for getting
the values.
>>> list(SeparatedValues('a,b,c'))
['a', 'b', 'c']
Whitespace is stripped and empty values are discarded.
>>> list(SeparatedValues(' a, b , c, '))
['a', 'b', 'c']
"""
separator = ','
def __iter__(self):
parts = self.split(self.separator)
return filter(None, (part.strip() for part in parts))
class Stripper:
r"""
Given a series of lines, find the common prefix and strip it from them.
>>> lines = [
... 'abcdefg\n',
... 'abc\n',
... 'abcde\n',
... ]
>>> res = Stripper.strip_prefix(lines)
>>> res.prefix
'abc'
>>> list(res.lines)
['defg\n', '\n', 'de\n']
If no prefix is common, nothing should be stripped.
>>> lines = [
... 'abcd\n',
... '1234\n',
... ]
>>> res = Stripper.strip_prefix(lines)
>>> res.prefix = ''
>>> list(res.lines)
['abcd\n', '1234\n']
"""
def __init__(self, prefix, lines):
self.prefix = prefix
self.lines = map(self, lines)
@classmethod
def strip_prefix(cls, lines):
prefix_lines, lines = itertools.tee(lines)
prefix = functools.reduce(cls.common_prefix, prefix_lines)
return cls(prefix, lines)
def __call__(self, line):
if not self.prefix:
return line
null, prefix, rest = line.partition(self.prefix)
return rest
@staticmethod
def common_prefix(s1, s2):
"""
Return the common prefix of two lines.
"""
index = min(len(s1), len(s2))
while s1[:index] != s2[:index]:
index -= 1
return s1[:index]
def remove_prefix(text, prefix):
"""
Remove the prefix from the text if it exists.
>>> remove_prefix('underwhelming performance', 'underwhelming ')
'performance'
>>> remove_prefix('something special', 'sample')
'something special'
"""
null, prefix, rest = text.rpartition(prefix)
return rest
def remove_suffix(text, suffix):
"""
Remove the suffix from the text if it exists.
>>> remove_suffix('name.git', '.git')
'name'
>>> remove_suffix('something special', 'sample')
'something special'
"""
rest, suffix, null = text.partition(suffix)
return rest
def normalize_newlines(text):
r"""
Replace alternate newlines with the canonical newline.
>>> normalize_newlines('Lorem Ipsum\u2029')
'Lorem Ipsum\n'
>>> normalize_newlines('Lorem Ipsum\r\n')
'Lorem Ipsum\n'
>>> normalize_newlines('Lorem Ipsum\x85')
'Lorem Ipsum\n'
"""
newlines = ['\r\n', '\r', '\n', '\u0085', '\u2028', '\u2029']
pattern = '|'.join(newlines)
return re.sub(pattern, '\n', text)
def _nonblank(str):
return str and not str.startswith('#')
@functools.singledispatch
def yield_lines(iterable):
r"""
Yield valid lines of a string or iterable.
>>> list(yield_lines(''))
[]
>>> list(yield_lines(['foo', 'bar']))
['foo', 'bar']
>>> list(yield_lines('foo\nbar'))
['foo', 'bar']
>>> list(yield_lines('\nfoo\n#bar\nbaz #comment'))
['foo', 'baz #comment']
>>> list(yield_lines(['foo\nbar', 'baz', 'bing\n\n\n']))
['foo', 'bar', 'baz', 'bing']
"""
return itertools.chain.from_iterable(map(yield_lines, iterable))
@yield_lines.register(str)
def _(text):
return filter(_nonblank, map(str.strip, text.splitlines()))
def drop_comment(line):
"""
Drop comments.
>>> drop_comment('foo # bar')
'foo'
A hash without a space may be in a URL.
>>> drop_comment('http://example.com/foo#bar')
'http://example.com/foo#bar'
"""
return line.partition(' #')[0]
def join_continuation(lines):
r"""
Join lines continued by a trailing backslash.
>>> list(join_continuation(['foo \\', 'bar', 'baz']))
['foobar', 'baz']
>>> list(join_continuation(['foo \\', 'bar', 'baz']))
['foobar', 'baz']
>>> list(join_continuation(['foo \\', 'bar \\', 'baz']))
['foobarbaz']
Not sure why, but...
The character preceeding the backslash is also elided.
>>> list(join_continuation(['goo\\', 'dly']))
['godly']
A terrible idea, but...
If no line is available to continue, suppress the lines.
>>> list(join_continuation(['foo', 'bar\\', 'baz\\']))
['foo']
"""
lines = iter(lines)
for item in lines:
while item.endswith('\\'):
try:
item = item[:-2].strip() + next(lines)
except StopIteration:
return
yield item
| 15,517 |
Python
| 24.863333 | 77 | 0.598634 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/tomli/_types.py
|
# SPDX-License-Identifier: MIT
# SPDX-FileCopyrightText: 2021 Taneli Hukkinen
# Licensed to PSF under a Contributor Agreement.
from typing import Any, Callable, Tuple
# Type annotations
ParseFloat = Callable[[str], Any]
Key = Tuple[str, ...]
Pos = int
| 254 |
Python
| 22.181816 | 48 | 0.748031 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/tomli/__init__.py
|
# SPDX-License-Identifier: MIT
# SPDX-FileCopyrightText: 2021 Taneli Hukkinen
# Licensed to PSF under a Contributor Agreement.
__all__ = ("loads", "load", "TOMLDecodeError")
__version__ = "2.0.1" # DO NOT EDIT THIS LINE MANUALLY. LET bump2version UTILITY DO IT
from ._parser import TOMLDecodeError, load, loads
# Pretend this exception was created here.
TOMLDecodeError.__module__ = __name__
| 396 |
Python
| 32.083331 | 87 | 0.729798 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/tomli/_re.py
|
# SPDX-License-Identifier: MIT
# SPDX-FileCopyrightText: 2021 Taneli Hukkinen
# Licensed to PSF under a Contributor Agreement.
from __future__ import annotations
from datetime import date, datetime, time, timedelta, timezone, tzinfo
from functools import lru_cache
import re
from typing import Any
from ._types import ParseFloat
# E.g.
# - 00:32:00.999999
# - 00:32:00
_TIME_RE_STR = r"([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9])(?:\.([0-9]{1,6})[0-9]*)?"
RE_NUMBER = re.compile(
r"""
0
(?:
x[0-9A-Fa-f](?:_?[0-9A-Fa-f])* # hex
|
b[01](?:_?[01])* # bin
|
o[0-7](?:_?[0-7])* # oct
)
|
[+-]?(?:0|[1-9](?:_?[0-9])*) # dec, integer part
(?P<floatpart>
(?:\.[0-9](?:_?[0-9])*)? # optional fractional part
(?:[eE][+-]?[0-9](?:_?[0-9])*)? # optional exponent part
)
""",
flags=re.VERBOSE,
)
RE_LOCALTIME = re.compile(_TIME_RE_STR)
RE_DATETIME = re.compile(
rf"""
([0-9]{{4}})-(0[1-9]|1[0-2])-(0[1-9]|[12][0-9]|3[01]) # date, e.g. 1988-10-27
(?:
[Tt ]
{_TIME_RE_STR}
(?:([Zz])|([+-])([01][0-9]|2[0-3]):([0-5][0-9]))? # optional time offset
)?
""",
flags=re.VERBOSE,
)
def match_to_datetime(match: re.Match) -> datetime | date:
"""Convert a `RE_DATETIME` match to `datetime.datetime` or `datetime.date`.
Raises ValueError if the match does not correspond to a valid date
or datetime.
"""
(
year_str,
month_str,
day_str,
hour_str,
minute_str,
sec_str,
micros_str,
zulu_time,
offset_sign_str,
offset_hour_str,
offset_minute_str,
) = match.groups()
year, month, day = int(year_str), int(month_str), int(day_str)
if hour_str is None:
return date(year, month, day)
hour, minute, sec = int(hour_str), int(minute_str), int(sec_str)
micros = int(micros_str.ljust(6, "0")) if micros_str else 0
if offset_sign_str:
tz: tzinfo | None = cached_tz(
offset_hour_str, offset_minute_str, offset_sign_str
)
elif zulu_time:
tz = timezone.utc
else: # local date-time
tz = None
return datetime(year, month, day, hour, minute, sec, micros, tzinfo=tz)
@lru_cache(maxsize=None)
def cached_tz(hour_str: str, minute_str: str, sign_str: str) -> timezone:
sign = 1 if sign_str == "+" else -1
return timezone(
timedelta(
hours=sign * int(hour_str),
minutes=sign * int(minute_str),
)
)
def match_to_localtime(match: re.Match) -> time:
hour_str, minute_str, sec_str, micros_str = match.groups()
micros = int(micros_str.ljust(6, "0")) if micros_str else 0
return time(int(hour_str), int(minute_str), int(sec_str), micros)
def match_to_number(match: re.Match, parse_float: ParseFloat) -> Any:
if match.group("floatpart"):
return parse_float(match.group())
return int(match.group(), 0)
| 2,943 |
Python
| 26.259259 | 87 | 0.561672 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/_vendor/tomli/_parser.py
|
# SPDX-License-Identifier: MIT
# SPDX-FileCopyrightText: 2021 Taneli Hukkinen
# Licensed to PSF under a Contributor Agreement.
from __future__ import annotations
from collections.abc import Iterable
import string
from types import MappingProxyType
from typing import Any, BinaryIO, NamedTuple
from ._re import (
RE_DATETIME,
RE_LOCALTIME,
RE_NUMBER,
match_to_datetime,
match_to_localtime,
match_to_number,
)
from ._types import Key, ParseFloat, Pos
ASCII_CTRL = frozenset(chr(i) for i in range(32)) | frozenset(chr(127))
# Neither of these sets include quotation mark or backslash. They are
# currently handled as separate cases in the parser functions.
ILLEGAL_BASIC_STR_CHARS = ASCII_CTRL - frozenset("\t")
ILLEGAL_MULTILINE_BASIC_STR_CHARS = ASCII_CTRL - frozenset("\t\n")
ILLEGAL_LITERAL_STR_CHARS = ILLEGAL_BASIC_STR_CHARS
ILLEGAL_MULTILINE_LITERAL_STR_CHARS = ILLEGAL_MULTILINE_BASIC_STR_CHARS
ILLEGAL_COMMENT_CHARS = ILLEGAL_BASIC_STR_CHARS
TOML_WS = frozenset(" \t")
TOML_WS_AND_NEWLINE = TOML_WS | frozenset("\n")
BARE_KEY_CHARS = frozenset(string.ascii_letters + string.digits + "-_")
KEY_INITIAL_CHARS = BARE_KEY_CHARS | frozenset("\"'")
HEXDIGIT_CHARS = frozenset(string.hexdigits)
BASIC_STR_ESCAPE_REPLACEMENTS = MappingProxyType(
{
"\\b": "\u0008", # backspace
"\\t": "\u0009", # tab
"\\n": "\u000A", # linefeed
"\\f": "\u000C", # form feed
"\\r": "\u000D", # carriage return
'\\"': "\u0022", # quote
"\\\\": "\u005C", # backslash
}
)
class TOMLDecodeError(ValueError):
"""An error raised if a document is not valid TOML."""
def load(__fp: BinaryIO, *, parse_float: ParseFloat = float) -> dict[str, Any]:
"""Parse TOML from a binary file object."""
b = __fp.read()
try:
s = b.decode()
except AttributeError:
raise TypeError(
"File must be opened in binary mode, e.g. use `open('foo.toml', 'rb')`"
) from None
return loads(s, parse_float=parse_float)
def loads(__s: str, *, parse_float: ParseFloat = float) -> dict[str, Any]: # noqa: C901
"""Parse TOML from a string."""
# The spec allows converting "\r\n" to "\n", even in string
# literals. Let's do so to simplify parsing.
src = __s.replace("\r\n", "\n")
pos = 0
out = Output(NestedDict(), Flags())
header: Key = ()
parse_float = make_safe_parse_float(parse_float)
# Parse one statement at a time
# (typically means one line in TOML source)
while True:
# 1. Skip line leading whitespace
pos = skip_chars(src, pos, TOML_WS)
# 2. Parse rules. Expect one of the following:
# - end of file
# - end of line
# - comment
# - key/value pair
# - append dict to list (and move to its namespace)
# - create dict (and move to its namespace)
# Skip trailing whitespace when applicable.
try:
char = src[pos]
except IndexError:
break
if char == "\n":
pos += 1
continue
if char in KEY_INITIAL_CHARS:
pos = key_value_rule(src, pos, out, header, parse_float)
pos = skip_chars(src, pos, TOML_WS)
elif char == "[":
try:
second_char: str | None = src[pos + 1]
except IndexError:
second_char = None
out.flags.finalize_pending()
if second_char == "[":
pos, header = create_list_rule(src, pos, out)
else:
pos, header = create_dict_rule(src, pos, out)
pos = skip_chars(src, pos, TOML_WS)
elif char != "#":
raise suffixed_err(src, pos, "Invalid statement")
# 3. Skip comment
pos = skip_comment(src, pos)
# 4. Expect end of line or end of file
try:
char = src[pos]
except IndexError:
break
if char != "\n":
raise suffixed_err(
src, pos, "Expected newline or end of document after a statement"
)
pos += 1
return out.data.dict
class Flags:
"""Flags that map to parsed keys/namespaces."""
# Marks an immutable namespace (inline array or inline table).
FROZEN = 0
# Marks a nest that has been explicitly created and can no longer
# be opened using the "[table]" syntax.
EXPLICIT_NEST = 1
def __init__(self) -> None:
self._flags: dict[str, dict] = {}
self._pending_flags: set[tuple[Key, int]] = set()
def add_pending(self, key: Key, flag: int) -> None:
self._pending_flags.add((key, flag))
def finalize_pending(self) -> None:
for key, flag in self._pending_flags:
self.set(key, flag, recursive=False)
self._pending_flags.clear()
def unset_all(self, key: Key) -> None:
cont = self._flags
for k in key[:-1]:
if k not in cont:
return
cont = cont[k]["nested"]
cont.pop(key[-1], None)
def set(self, key: Key, flag: int, *, recursive: bool) -> None: # noqa: A003
cont = self._flags
key_parent, key_stem = key[:-1], key[-1]
for k in key_parent:
if k not in cont:
cont[k] = {"flags": set(), "recursive_flags": set(), "nested": {}}
cont = cont[k]["nested"]
if key_stem not in cont:
cont[key_stem] = {"flags": set(), "recursive_flags": set(), "nested": {}}
cont[key_stem]["recursive_flags" if recursive else "flags"].add(flag)
def is_(self, key: Key, flag: int) -> bool:
if not key:
return False # document root has no flags
cont = self._flags
for k in key[:-1]:
if k not in cont:
return False
inner_cont = cont[k]
if flag in inner_cont["recursive_flags"]:
return True
cont = inner_cont["nested"]
key_stem = key[-1]
if key_stem in cont:
cont = cont[key_stem]
return flag in cont["flags"] or flag in cont["recursive_flags"]
return False
class NestedDict:
def __init__(self) -> None:
# The parsed content of the TOML document
self.dict: dict[str, Any] = {}
def get_or_create_nest(
self,
key: Key,
*,
access_lists: bool = True,
) -> dict:
cont: Any = self.dict
for k in key:
if k not in cont:
cont[k] = {}
cont = cont[k]
if access_lists and isinstance(cont, list):
cont = cont[-1]
if not isinstance(cont, dict):
raise KeyError("There is no nest behind this key")
return cont
def append_nest_to_list(self, key: Key) -> None:
cont = self.get_or_create_nest(key[:-1])
last_key = key[-1]
if last_key in cont:
list_ = cont[last_key]
if not isinstance(list_, list):
raise KeyError("An object other than list found behind this key")
list_.append({})
else:
cont[last_key] = [{}]
class Output(NamedTuple):
data: NestedDict
flags: Flags
def skip_chars(src: str, pos: Pos, chars: Iterable[str]) -> Pos:
try:
while src[pos] in chars:
pos += 1
except IndexError:
pass
return pos
def skip_until(
src: str,
pos: Pos,
expect: str,
*,
error_on: frozenset[str],
error_on_eof: bool,
) -> Pos:
try:
new_pos = src.index(expect, pos)
except ValueError:
new_pos = len(src)
if error_on_eof:
raise suffixed_err(src, new_pos, f"Expected {expect!r}") from None
if not error_on.isdisjoint(src[pos:new_pos]):
while src[pos] not in error_on:
pos += 1
raise suffixed_err(src, pos, f"Found invalid character {src[pos]!r}")
return new_pos
def skip_comment(src: str, pos: Pos) -> Pos:
try:
char: str | None = src[pos]
except IndexError:
char = None
if char == "#":
return skip_until(
src, pos + 1, "\n", error_on=ILLEGAL_COMMENT_CHARS, error_on_eof=False
)
return pos
def skip_comments_and_array_ws(src: str, pos: Pos) -> Pos:
while True:
pos_before_skip = pos
pos = skip_chars(src, pos, TOML_WS_AND_NEWLINE)
pos = skip_comment(src, pos)
if pos == pos_before_skip:
return pos
def create_dict_rule(src: str, pos: Pos, out: Output) -> tuple[Pos, Key]:
pos += 1 # Skip "["
pos = skip_chars(src, pos, TOML_WS)
pos, key = parse_key(src, pos)
if out.flags.is_(key, Flags.EXPLICIT_NEST) or out.flags.is_(key, Flags.FROZEN):
raise suffixed_err(src, pos, f"Cannot declare {key} twice")
out.flags.set(key, Flags.EXPLICIT_NEST, recursive=False)
try:
out.data.get_or_create_nest(key)
except KeyError:
raise suffixed_err(src, pos, "Cannot overwrite a value") from None
if not src.startswith("]", pos):
raise suffixed_err(src, pos, "Expected ']' at the end of a table declaration")
return pos + 1, key
def create_list_rule(src: str, pos: Pos, out: Output) -> tuple[Pos, Key]:
pos += 2 # Skip "[["
pos = skip_chars(src, pos, TOML_WS)
pos, key = parse_key(src, pos)
if out.flags.is_(key, Flags.FROZEN):
raise suffixed_err(src, pos, f"Cannot mutate immutable namespace {key}")
# Free the namespace now that it points to another empty list item...
out.flags.unset_all(key)
# ...but this key precisely is still prohibited from table declaration
out.flags.set(key, Flags.EXPLICIT_NEST, recursive=False)
try:
out.data.append_nest_to_list(key)
except KeyError:
raise suffixed_err(src, pos, "Cannot overwrite a value") from None
if not src.startswith("]]", pos):
raise suffixed_err(src, pos, "Expected ']]' at the end of an array declaration")
return pos + 2, key
def key_value_rule(
src: str, pos: Pos, out: Output, header: Key, parse_float: ParseFloat
) -> Pos:
pos, key, value = parse_key_value_pair(src, pos, parse_float)
key_parent, key_stem = key[:-1], key[-1]
abs_key_parent = header + key_parent
relative_path_cont_keys = (header + key[:i] for i in range(1, len(key)))
for cont_key in relative_path_cont_keys:
# Check that dotted key syntax does not redefine an existing table
if out.flags.is_(cont_key, Flags.EXPLICIT_NEST):
raise suffixed_err(src, pos, f"Cannot redefine namespace {cont_key}")
# Containers in the relative path can't be opened with the table syntax or
# dotted key/value syntax in following table sections.
out.flags.add_pending(cont_key, Flags.EXPLICIT_NEST)
if out.flags.is_(abs_key_parent, Flags.FROZEN):
raise suffixed_err(
src, pos, f"Cannot mutate immutable namespace {abs_key_parent}"
)
try:
nest = out.data.get_or_create_nest(abs_key_parent)
except KeyError:
raise suffixed_err(src, pos, "Cannot overwrite a value") from None
if key_stem in nest:
raise suffixed_err(src, pos, "Cannot overwrite a value")
# Mark inline table and array namespaces recursively immutable
if isinstance(value, (dict, list)):
out.flags.set(header + key, Flags.FROZEN, recursive=True)
nest[key_stem] = value
return pos
def parse_key_value_pair(
src: str, pos: Pos, parse_float: ParseFloat
) -> tuple[Pos, Key, Any]:
pos, key = parse_key(src, pos)
try:
char: str | None = src[pos]
except IndexError:
char = None
if char != "=":
raise suffixed_err(src, pos, "Expected '=' after a key in a key/value pair")
pos += 1
pos = skip_chars(src, pos, TOML_WS)
pos, value = parse_value(src, pos, parse_float)
return pos, key, value
def parse_key(src: str, pos: Pos) -> tuple[Pos, Key]:
pos, key_part = parse_key_part(src, pos)
key: Key = (key_part,)
pos = skip_chars(src, pos, TOML_WS)
while True:
try:
char: str | None = src[pos]
except IndexError:
char = None
if char != ".":
return pos, key
pos += 1
pos = skip_chars(src, pos, TOML_WS)
pos, key_part = parse_key_part(src, pos)
key += (key_part,)
pos = skip_chars(src, pos, TOML_WS)
def parse_key_part(src: str, pos: Pos) -> tuple[Pos, str]:
try:
char: str | None = src[pos]
except IndexError:
char = None
if char in BARE_KEY_CHARS:
start_pos = pos
pos = skip_chars(src, pos, BARE_KEY_CHARS)
return pos, src[start_pos:pos]
if char == "'":
return parse_literal_str(src, pos)
if char == '"':
return parse_one_line_basic_str(src, pos)
raise suffixed_err(src, pos, "Invalid initial character for a key part")
def parse_one_line_basic_str(src: str, pos: Pos) -> tuple[Pos, str]:
pos += 1
return parse_basic_str(src, pos, multiline=False)
def parse_array(src: str, pos: Pos, parse_float: ParseFloat) -> tuple[Pos, list]:
pos += 1
array: list = []
pos = skip_comments_and_array_ws(src, pos)
if src.startswith("]", pos):
return pos + 1, array
while True:
pos, val = parse_value(src, pos, parse_float)
array.append(val)
pos = skip_comments_and_array_ws(src, pos)
c = src[pos : pos + 1]
if c == "]":
return pos + 1, array
if c != ",":
raise suffixed_err(src, pos, "Unclosed array")
pos += 1
pos = skip_comments_and_array_ws(src, pos)
if src.startswith("]", pos):
return pos + 1, array
def parse_inline_table(src: str, pos: Pos, parse_float: ParseFloat) -> tuple[Pos, dict]:
pos += 1
nested_dict = NestedDict()
flags = Flags()
pos = skip_chars(src, pos, TOML_WS)
if src.startswith("}", pos):
return pos + 1, nested_dict.dict
while True:
pos, key, value = parse_key_value_pair(src, pos, parse_float)
key_parent, key_stem = key[:-1], key[-1]
if flags.is_(key, Flags.FROZEN):
raise suffixed_err(src, pos, f"Cannot mutate immutable namespace {key}")
try:
nest = nested_dict.get_or_create_nest(key_parent, access_lists=False)
except KeyError:
raise suffixed_err(src, pos, "Cannot overwrite a value") from None
if key_stem in nest:
raise suffixed_err(src, pos, f"Duplicate inline table key {key_stem!r}")
nest[key_stem] = value
pos = skip_chars(src, pos, TOML_WS)
c = src[pos : pos + 1]
if c == "}":
return pos + 1, nested_dict.dict
if c != ",":
raise suffixed_err(src, pos, "Unclosed inline table")
if isinstance(value, (dict, list)):
flags.set(key, Flags.FROZEN, recursive=True)
pos += 1
pos = skip_chars(src, pos, TOML_WS)
def parse_basic_str_escape(
src: str, pos: Pos, *, multiline: bool = False
) -> tuple[Pos, str]:
escape_id = src[pos : pos + 2]
pos += 2
if multiline and escape_id in {"\\ ", "\\\t", "\\\n"}:
# Skip whitespace until next non-whitespace character or end of
# the doc. Error if non-whitespace is found before newline.
if escape_id != "\\\n":
pos = skip_chars(src, pos, TOML_WS)
try:
char = src[pos]
except IndexError:
return pos, ""
if char != "\n":
raise suffixed_err(src, pos, "Unescaped '\\' in a string")
pos += 1
pos = skip_chars(src, pos, TOML_WS_AND_NEWLINE)
return pos, ""
if escape_id == "\\u":
return parse_hex_char(src, pos, 4)
if escape_id == "\\U":
return parse_hex_char(src, pos, 8)
try:
return pos, BASIC_STR_ESCAPE_REPLACEMENTS[escape_id]
except KeyError:
raise suffixed_err(src, pos, "Unescaped '\\' in a string") from None
def parse_basic_str_escape_multiline(src: str, pos: Pos) -> tuple[Pos, str]:
return parse_basic_str_escape(src, pos, multiline=True)
def parse_hex_char(src: str, pos: Pos, hex_len: int) -> tuple[Pos, str]:
hex_str = src[pos : pos + hex_len]
if len(hex_str) != hex_len or not HEXDIGIT_CHARS.issuperset(hex_str):
raise suffixed_err(src, pos, "Invalid hex value")
pos += hex_len
hex_int = int(hex_str, 16)
if not is_unicode_scalar_value(hex_int):
raise suffixed_err(src, pos, "Escaped character is not a Unicode scalar value")
return pos, chr(hex_int)
def parse_literal_str(src: str, pos: Pos) -> tuple[Pos, str]:
pos += 1 # Skip starting apostrophe
start_pos = pos
pos = skip_until(
src, pos, "'", error_on=ILLEGAL_LITERAL_STR_CHARS, error_on_eof=True
)
return pos + 1, src[start_pos:pos] # Skip ending apostrophe
def parse_multiline_str(src: str, pos: Pos, *, literal: bool) -> tuple[Pos, str]:
pos += 3
if src.startswith("\n", pos):
pos += 1
if literal:
delim = "'"
end_pos = skip_until(
src,
pos,
"'''",
error_on=ILLEGAL_MULTILINE_LITERAL_STR_CHARS,
error_on_eof=True,
)
result = src[pos:end_pos]
pos = end_pos + 3
else:
delim = '"'
pos, result = parse_basic_str(src, pos, multiline=True)
# Add at maximum two extra apostrophes/quotes if the end sequence
# is 4 or 5 chars long instead of just 3.
if not src.startswith(delim, pos):
return pos, result
pos += 1
if not src.startswith(delim, pos):
return pos, result + delim
pos += 1
return pos, result + (delim * 2)
def parse_basic_str(src: str, pos: Pos, *, multiline: bool) -> tuple[Pos, str]:
if multiline:
error_on = ILLEGAL_MULTILINE_BASIC_STR_CHARS
parse_escapes = parse_basic_str_escape_multiline
else:
error_on = ILLEGAL_BASIC_STR_CHARS
parse_escapes = parse_basic_str_escape
result = ""
start_pos = pos
while True:
try:
char = src[pos]
except IndexError:
raise suffixed_err(src, pos, "Unterminated string") from None
if char == '"':
if not multiline:
return pos + 1, result + src[start_pos:pos]
if src.startswith('"""', pos):
return pos + 3, result + src[start_pos:pos]
pos += 1
continue
if char == "\\":
result += src[start_pos:pos]
pos, parsed_escape = parse_escapes(src, pos)
result += parsed_escape
start_pos = pos
continue
if char in error_on:
raise suffixed_err(src, pos, f"Illegal character {char!r}")
pos += 1
def parse_value( # noqa: C901
src: str, pos: Pos, parse_float: ParseFloat
) -> tuple[Pos, Any]:
try:
char: str | None = src[pos]
except IndexError:
char = None
# IMPORTANT: order conditions based on speed of checking and likelihood
# Basic strings
if char == '"':
if src.startswith('"""', pos):
return parse_multiline_str(src, pos, literal=False)
return parse_one_line_basic_str(src, pos)
# Literal strings
if char == "'":
if src.startswith("'''", pos):
return parse_multiline_str(src, pos, literal=True)
return parse_literal_str(src, pos)
# Booleans
if char == "t":
if src.startswith("true", pos):
return pos + 4, True
if char == "f":
if src.startswith("false", pos):
return pos + 5, False
# Arrays
if char == "[":
return parse_array(src, pos, parse_float)
# Inline tables
if char == "{":
return parse_inline_table(src, pos, parse_float)
# Dates and times
datetime_match = RE_DATETIME.match(src, pos)
if datetime_match:
try:
datetime_obj = match_to_datetime(datetime_match)
except ValueError as e:
raise suffixed_err(src, pos, "Invalid date or datetime") from e
return datetime_match.end(), datetime_obj
localtime_match = RE_LOCALTIME.match(src, pos)
if localtime_match:
return localtime_match.end(), match_to_localtime(localtime_match)
# Integers and "normal" floats.
# The regex will greedily match any type starting with a decimal
# char, so needs to be located after handling of dates and times.
number_match = RE_NUMBER.match(src, pos)
if number_match:
return number_match.end(), match_to_number(number_match, parse_float)
# Special floats
first_three = src[pos : pos + 3]
if first_three in {"inf", "nan"}:
return pos + 3, parse_float(first_three)
first_four = src[pos : pos + 4]
if first_four in {"-inf", "+inf", "-nan", "+nan"}:
return pos + 4, parse_float(first_four)
raise suffixed_err(src, pos, "Invalid value")
def suffixed_err(src: str, pos: Pos, msg: str) -> TOMLDecodeError:
"""Return a `TOMLDecodeError` where error message is suffixed with
coordinates in source."""
def coord_repr(src: str, pos: Pos) -> str:
if pos >= len(src):
return "end of document"
line = src.count("\n", 0, pos) + 1
if line == 1:
column = pos + 1
else:
column = pos - src.rindex("\n", 0, pos)
return f"line {line}, column {column}"
return TOMLDecodeError(f"{msg} (at {coord_repr(src, pos)})")
def is_unicode_scalar_value(codepoint: int) -> bool:
return (0 <= codepoint <= 55295) or (57344 <= codepoint <= 1114111)
def make_safe_parse_float(parse_float: ParseFloat) -> ParseFloat:
"""A decorator to make `parse_float` safe.
`parse_float` must not return dicts or lists, because these types
would be mixed with parsed TOML tables and arrays, thus confusing
the parser. The returned decorated callable raises `ValueError`
instead of returning illegal types.
"""
# The default `float` callable never returns illegal types. Optimize it.
if parse_float is float: # type: ignore[comparison-overlap]
return float
def safe_parse_float(float_str: str) -> Any:
float_value = parse_float(float_str)
if isinstance(float_value, (dict, list)):
raise ValueError("parse_float must not return dicts or lists")
return float_value
return safe_parse_float
| 22,633 |
Python
| 31.708092 | 88 | 0.576901 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/setuptools/extern/__init__.py
|
import importlib.util
import sys
class VendorImporter:
"""
A PEP 302 meta path importer for finding optionally-vendored
or otherwise naturally-installed packages from root_name.
"""
def __init__(self, root_name, vendored_names=(), vendor_pkg=None):
self.root_name = root_name
self.vendored_names = set(vendored_names)
self.vendor_pkg = vendor_pkg or root_name.replace('extern', '_vendor')
@property
def search_path(self):
"""
Search first the vendor package then as a natural package.
"""
yield self.vendor_pkg + '.'
yield ''
def _module_matches_namespace(self, fullname):
"""Figure out if the target module is vendored."""
root, base, target = fullname.partition(self.root_name + '.')
return not root and any(map(target.startswith, self.vendored_names))
def load_module(self, fullname):
"""
Iterate over the search path to locate and load fullname.
"""
root, base, target = fullname.partition(self.root_name + '.')
for prefix in self.search_path:
try:
extant = prefix + target
__import__(extant)
mod = sys.modules[extant]
sys.modules[fullname] = mod
return mod
except ImportError:
pass
else:
raise ImportError(
"The '{target}' package is required; "
"normally this is bundled with this package so if you get "
"this warning, consult the packager of your "
"distribution.".format(**locals())
)
def create_module(self, spec):
return self.load_module(spec.name)
def exec_module(self, module):
pass
def find_spec(self, fullname, path=None, target=None):
"""Return a module spec for vendored names."""
return (
importlib.util.spec_from_loader(fullname, self)
if self._module_matches_namespace(fullname) else None
)
def install(self):
"""
Install this importer into sys.meta_path if not already present.
"""
if self not in sys.meta_path:
sys.meta_path.append(self)
names = (
'packaging',
'ordered_set',
'more_itertools',
'importlib_metadata',
'zipp',
'importlib_resources',
'jaraco',
'typing_extensions',
'tomli',
)
VendorImporter(__name__, names, 'setuptools._vendor').install()
| 2,527 |
Python
| 29.095238 | 78 | 0.576573 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/exceptiongroup/_version.py
|
# file generated by setuptools_scm
# don't change, don't track in version control
__version__ = version = '1.1.2'
__version_tuple__ = version_tuple = (1, 1, 2)
| 160 |
Python
| 31.199994 | 46 | 0.66875 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/exceptiongroup/__init__.py
|
__all__ = [
"BaseExceptionGroup",
"ExceptionGroup",
"catch",
"format_exception",
"format_exception_only",
"print_exception",
"print_exc",
]
import os
import sys
from ._catch import catch
from ._version import version as __version__ # noqa: F401
if sys.version_info < (3, 11):
from ._exceptions import BaseExceptionGroup, ExceptionGroup
from ._formatting import (
format_exception,
format_exception_only,
print_exc,
print_exception,
)
if os.getenv("EXCEPTIONGROUP_NO_PATCH") != "1":
from . import _formatting # noqa: F401
BaseExceptionGroup.__module__ = __name__
ExceptionGroup.__module__ = __name__
else:
from traceback import (
format_exception,
format_exception_only,
print_exc,
print_exception,
)
BaseExceptionGroup = BaseExceptionGroup
ExceptionGroup = ExceptionGroup
| 920 |
Python
| 21.463414 | 63 | 0.626087 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/exceptiongroup/_exceptions.py
|
from __future__ import annotations
from collections.abc import Callable, Sequence
from functools import partial
from inspect import getmro, isclass
from typing import TYPE_CHECKING, Generic, Type, TypeVar, cast, overload
if TYPE_CHECKING:
from typing import Self
_BaseExceptionT_co = TypeVar("_BaseExceptionT_co", bound=BaseException, covariant=True)
_BaseExceptionT = TypeVar("_BaseExceptionT", bound=BaseException)
_ExceptionT_co = TypeVar("_ExceptionT_co", bound=Exception, covariant=True)
_ExceptionT = TypeVar("_ExceptionT", bound=Exception)
def check_direct_subclass(
exc: BaseException, parents: tuple[type[BaseException]]
) -> bool:
for cls in getmro(exc.__class__)[:-1]:
if cls in parents:
return True
return False
def get_condition_filter(
condition: type[_BaseExceptionT]
| tuple[type[_BaseExceptionT], ...]
| Callable[[_BaseExceptionT_co], bool]
) -> Callable[[_BaseExceptionT_co], bool]:
if isclass(condition) and issubclass(
cast(Type[BaseException], condition), BaseException
):
return partial(check_direct_subclass, parents=(condition,))
elif isinstance(condition, tuple):
if all(isclass(x) and issubclass(x, BaseException) for x in condition):
return partial(check_direct_subclass, parents=condition)
elif callable(condition):
return cast("Callable[[BaseException], bool]", condition)
raise TypeError("expected a function, exception type or tuple of exception types")
class BaseExceptionGroup(BaseException, Generic[_BaseExceptionT_co]):
"""A combination of multiple unrelated exceptions."""
def __new__(
cls, __message: str, __exceptions: Sequence[_BaseExceptionT_co]
) -> Self:
if not isinstance(__message, str):
raise TypeError(f"argument 1 must be str, not {type(__message)}")
if not isinstance(__exceptions, Sequence):
raise TypeError("second argument (exceptions) must be a sequence")
if not __exceptions:
raise ValueError(
"second argument (exceptions) must be a non-empty sequence"
)
for i, exc in enumerate(__exceptions):
if not isinstance(exc, BaseException):
raise ValueError(
f"Item {i} of second argument (exceptions) is not an exception"
)
if cls is BaseExceptionGroup:
if all(isinstance(exc, Exception) for exc in __exceptions):
cls = ExceptionGroup
if issubclass(cls, Exception):
for exc in __exceptions:
if not isinstance(exc, Exception):
if cls is ExceptionGroup:
raise TypeError(
"Cannot nest BaseExceptions in an ExceptionGroup"
)
else:
raise TypeError(
f"Cannot nest BaseExceptions in {cls.__name__!r}"
)
instance = super().__new__(cls, __message, __exceptions)
instance._message = __message
instance._exceptions = __exceptions
return instance
def add_note(self, note: str) -> None:
if not isinstance(note, str):
raise TypeError(
f"Expected a string, got note={note!r} (type {type(note).__name__})"
)
if not hasattr(self, "__notes__"):
self.__notes__: list[str] = []
self.__notes__.append(note)
@property
def message(self) -> str:
return self._message
@property
def exceptions(
self,
) -> tuple[_BaseExceptionT_co | BaseExceptionGroup[_BaseExceptionT_co], ...]:
return tuple(self._exceptions)
@overload
def subgroup(
self, __condition: type[_BaseExceptionT] | tuple[type[_BaseExceptionT], ...]
) -> BaseExceptionGroup[_BaseExceptionT] | None:
...
@overload
def subgroup(
self: Self, __condition: Callable[[_BaseExceptionT_co], bool]
) -> Self | None:
...
def subgroup(
self: Self,
__condition: type[_BaseExceptionT]
| tuple[type[_BaseExceptionT], ...]
| Callable[[_BaseExceptionT_co], bool],
) -> BaseExceptionGroup[_BaseExceptionT] | Self | None:
condition = get_condition_filter(__condition)
modified = False
if condition(self):
return self
exceptions: list[BaseException] = []
for exc in self.exceptions:
if isinstance(exc, BaseExceptionGroup):
subgroup = exc.subgroup(__condition)
if subgroup is not None:
exceptions.append(subgroup)
if subgroup is not exc:
modified = True
elif condition(exc):
exceptions.append(exc)
else:
modified = True
if not modified:
return self
elif exceptions:
group = self.derive(exceptions)
group.__cause__ = self.__cause__
group.__context__ = self.__context__
group.__traceback__ = self.__traceback__
return group
else:
return None
@overload
def split(
self: Self,
__condition: type[_BaseExceptionT] | tuple[type[_BaseExceptionT], ...],
) -> tuple[BaseExceptionGroup[_BaseExceptionT] | None, Self | None]:
...
@overload
def split(
self: Self, __condition: Callable[[_BaseExceptionT_co], bool]
) -> tuple[Self | None, Self | None]:
...
def split(
self: Self,
__condition: type[_BaseExceptionT]
| tuple[type[_BaseExceptionT], ...]
| Callable[[_BaseExceptionT_co], bool],
) -> (
tuple[BaseExceptionGroup[_BaseExceptionT] | None, Self | None]
| tuple[Self | None, Self | None]
):
condition = get_condition_filter(__condition)
if condition(self):
return self, None
matching_exceptions: list[BaseException] = []
nonmatching_exceptions: list[BaseException] = []
for exc in self.exceptions:
if isinstance(exc, BaseExceptionGroup):
matching, nonmatching = exc.split(condition)
if matching is not None:
matching_exceptions.append(matching)
if nonmatching is not None:
nonmatching_exceptions.append(nonmatching)
elif condition(exc):
matching_exceptions.append(exc)
else:
nonmatching_exceptions.append(exc)
matching_group: Self | None = None
if matching_exceptions:
matching_group = self.derive(matching_exceptions)
matching_group.__cause__ = self.__cause__
matching_group.__context__ = self.__context__
matching_group.__traceback__ = self.__traceback__
nonmatching_group: Self | None = None
if nonmatching_exceptions:
nonmatching_group = self.derive(nonmatching_exceptions)
nonmatching_group.__cause__ = self.__cause__
nonmatching_group.__context__ = self.__context__
nonmatching_group.__traceback__ = self.__traceback__
return matching_group, nonmatching_group
def derive(self: Self, __excs: Sequence[_BaseExceptionT_co]) -> Self:
eg = BaseExceptionGroup(self.message, __excs)
if hasattr(self, "__notes__"):
# Create a new list so that add_note() only affects one exceptiongroup
eg.__notes__ = list(self.__notes__)
return eg
def __str__(self) -> str:
suffix = "" if len(self._exceptions) == 1 else "s"
return f"{self.message} ({len(self._exceptions)} sub-exception{suffix})"
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self.message!r}, {self._exceptions!r})"
class ExceptionGroup(BaseExceptionGroup[_ExceptionT_co], Exception):
def __new__(cls, __message: str, __exceptions: Sequence[_ExceptionT_co]) -> Self:
return super().__new__(cls, __message, __exceptions)
if TYPE_CHECKING:
@property
def exceptions(
self,
) -> tuple[_ExceptionT_co | ExceptionGroup[_ExceptionT_co], ...]:
...
@overload # type: ignore[override]
def subgroup(
self, __condition: type[_ExceptionT] | tuple[type[_ExceptionT], ...]
) -> ExceptionGroup[_ExceptionT] | None:
...
@overload
def subgroup(
self: Self, __condition: Callable[[_ExceptionT_co], bool]
) -> Self | None:
...
def subgroup(
self: Self,
__condition: type[_ExceptionT]
| tuple[type[_ExceptionT], ...]
| Callable[[_ExceptionT_co], bool],
) -> ExceptionGroup[_ExceptionT] | Self | None:
return super().subgroup(__condition)
@overload # type: ignore[override]
def split(
self: Self, __condition: type[_ExceptionT] | tuple[type[_ExceptionT], ...]
) -> tuple[ExceptionGroup[_ExceptionT] | None, Self | None]:
...
@overload
def split(
self: Self, __condition: Callable[[_ExceptionT_co], bool]
) -> tuple[Self | None, Self | None]:
...
def split(
self: Self,
__condition: type[_ExceptionT]
| tuple[type[_ExceptionT], ...]
| Callable[[_ExceptionT_co], bool],
) -> (
tuple[ExceptionGroup[_ExceptionT] | None, Self | None]
| tuple[Self | None, Self | None]
):
return super().split(__condition)
| 9,768 |
Python
| 33.519435 | 87 | 0.566544 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/exceptiongroup/_catch.py
|
from __future__ import annotations
import sys
from collections.abc import Callable, Iterable, Mapping
from contextlib import AbstractContextManager
from types import TracebackType
from typing import TYPE_CHECKING, Any
if sys.version_info < (3, 11):
from ._exceptions import BaseExceptionGroup
if TYPE_CHECKING:
_Handler = Callable[[BaseException], Any]
class _Catcher:
def __init__(self, handler_map: Mapping[tuple[type[BaseException], ...], _Handler]):
self._handler_map = handler_map
def __enter__(self) -> None:
pass
def __exit__(
self,
etype: type[BaseException] | None,
exc: BaseException | None,
tb: TracebackType | None,
) -> bool:
if exc is not None:
unhandled = self.handle_exception(exc)
if unhandled is exc:
return False
elif unhandled is None:
return True
else:
raise unhandled from None
return False
def handle_exception(self, exc: BaseException) -> BaseException | None:
excgroup: BaseExceptionGroup | None
if isinstance(exc, BaseExceptionGroup):
excgroup = exc
else:
excgroup = BaseExceptionGroup("", [exc])
new_exceptions: list[BaseException] = []
for exc_types, handler in self._handler_map.items():
matched, excgroup = excgroup.split(exc_types)
if matched:
try:
handler(matched)
except BaseException as new_exc:
new_exceptions.append(new_exc)
if not excgroup:
break
if new_exceptions:
if len(new_exceptions) == 1:
return new_exceptions[0]
if excgroup:
new_exceptions.append(excgroup)
return BaseExceptionGroup("", new_exceptions)
elif (
excgroup and len(excgroup.exceptions) == 1 and excgroup.exceptions[0] is exc
):
return exc
else:
return excgroup
def catch(
__handlers: Mapping[type[BaseException] | Iterable[type[BaseException]], _Handler]
) -> AbstractContextManager[None]:
if not isinstance(__handlers, Mapping):
raise TypeError("the argument must be a mapping")
handler_map: dict[
tuple[type[BaseException], ...], Callable[[BaseExceptionGroup]]
] = {}
for type_or_iterable, handler in __handlers.items():
iterable: tuple[type[BaseException]]
if isinstance(type_or_iterable, type) and issubclass(
type_or_iterable, BaseException
):
iterable = (type_or_iterable,)
elif isinstance(type_or_iterable, Iterable):
iterable = tuple(type_or_iterable)
else:
raise TypeError(
"each key must be either an exception classes or an iterable thereof"
)
if not callable(handler):
raise TypeError("handlers must be callable")
for exc_type in iterable:
if not isinstance(exc_type, type) or not issubclass(
exc_type, BaseException
):
raise TypeError(
"each key must be either an exception classes or an iterable "
"thereof"
)
if issubclass(exc_type, BaseExceptionGroup):
raise TypeError(
"catching ExceptionGroup with catch() is not allowed. "
"Use except instead."
)
handler_map[iterable] = handler
return _Catcher(handler_map)
| 3,656 |
Python
| 29.991525 | 88 | 0.571937 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/exceptiongroup/_formatting.py
|
# traceback_exception_init() adapted from trio
#
# _ExceptionPrintContext and traceback_exception_format() copied from the standard
# library
from __future__ import annotations
import collections.abc
import sys
import textwrap
import traceback
from functools import singledispatch
from types import TracebackType
from typing import Any, List, Optional
from ._exceptions import BaseExceptionGroup
max_group_width = 15
max_group_depth = 10
_cause_message = (
"\nThe above exception was the direct cause of the following exception:\n\n"
)
_context_message = (
"\nDuring handling of the above exception, another exception occurred:\n\n"
)
def _format_final_exc_line(etype, value):
valuestr = _safe_string(value, "exception")
if value is None or not valuestr:
line = f"{etype}\n"
else:
line = f"{etype}: {valuestr}\n"
return line
def _safe_string(value, what, func=str):
try:
return func(value)
except BaseException:
return f"<{what} {func.__name__}() failed>"
class _ExceptionPrintContext:
def __init__(self):
self.seen = set()
self.exception_group_depth = 0
self.need_close = False
def indent(self):
return " " * (2 * self.exception_group_depth)
def emit(self, text_gen, margin_char=None):
if margin_char is None:
margin_char = "|"
indent_str = self.indent()
if self.exception_group_depth:
indent_str += margin_char + " "
if isinstance(text_gen, str):
yield textwrap.indent(text_gen, indent_str, lambda line: True)
else:
for text in text_gen:
yield textwrap.indent(text, indent_str, lambda line: True)
def exceptiongroup_excepthook(
etype: type[BaseException], value: BaseException, tb: TracebackType | None
) -> None:
sys.stderr.write("".join(traceback.format_exception(etype, value, tb)))
class PatchedTracebackException(traceback.TracebackException):
def __init__(
self,
exc_type: type[BaseException],
exc_value: BaseException,
exc_traceback: TracebackType | None,
*,
limit: int | None = None,
lookup_lines: bool = True,
capture_locals: bool = False,
compact: bool = False,
_seen: set[int] | None = None,
) -> None:
kwargs: dict[str, Any] = {}
if sys.version_info >= (3, 10):
kwargs["compact"] = compact
is_recursive_call = _seen is not None
if _seen is None:
_seen = set()
_seen.add(id(exc_value))
self.stack = traceback.StackSummary.extract(
traceback.walk_tb(exc_traceback),
limit=limit,
lookup_lines=lookup_lines,
capture_locals=capture_locals,
)
self.exc_type = exc_type
# Capture now to permit freeing resources: only complication is in the
# unofficial API _format_final_exc_line
self._str = _safe_string(exc_value, "exception")
try:
self.__notes__ = getattr(exc_value, "__notes__", None)
except KeyError:
# Workaround for https://github.com/python/cpython/issues/98778 on Python
# <= 3.9, and some 3.10 and 3.11 patch versions.
HTTPError = getattr(sys.modules.get("urllib.error", None), "HTTPError", ())
if sys.version_info[:2] <= (3, 11) and isinstance(exc_value, HTTPError):
self.__notes__ = None
else:
raise
if exc_type and issubclass(exc_type, SyntaxError):
# Handle SyntaxError's specially
self.filename = exc_value.filename
lno = exc_value.lineno
self.lineno = str(lno) if lno is not None else None
self.text = exc_value.text
self.offset = exc_value.offset
self.msg = exc_value.msg
if sys.version_info >= (3, 10):
end_lno = exc_value.end_lineno
self.end_lineno = str(end_lno) if end_lno is not None else None
self.end_offset = exc_value.end_offset
elif (
exc_type
and issubclass(exc_type, (NameError, AttributeError))
and getattr(exc_value, "name", None) is not None
):
suggestion = _compute_suggestion_error(exc_value, exc_traceback)
if suggestion:
self._str += f". Did you mean: '{suggestion}'?"
if lookup_lines:
# Force all lines in the stack to be loaded
for frame in self.stack:
frame.line
self.__suppress_context__ = (
exc_value.__suppress_context__ if exc_value is not None else False
)
# Convert __cause__ and __context__ to `TracebackExceptions`s, use a
# queue to avoid recursion (only the top-level call gets _seen == None)
if not is_recursive_call:
queue = [(self, exc_value)]
while queue:
te, e = queue.pop()
if e and e.__cause__ is not None and id(e.__cause__) not in _seen:
cause = PatchedTracebackException(
type(e.__cause__),
e.__cause__,
e.__cause__.__traceback__,
limit=limit,
lookup_lines=lookup_lines,
capture_locals=capture_locals,
_seen=_seen,
)
else:
cause = None
if compact:
need_context = (
cause is None and e is not None and not e.__suppress_context__
)
else:
need_context = True
if (
e
and e.__context__ is not None
and need_context
and id(e.__context__) not in _seen
):
context = PatchedTracebackException(
type(e.__context__),
e.__context__,
e.__context__.__traceback__,
limit=limit,
lookup_lines=lookup_lines,
capture_locals=capture_locals,
_seen=_seen,
)
else:
context = None
# Capture each of the exceptions in the ExceptionGroup along with each
# of their causes and contexts
if e and isinstance(e, BaseExceptionGroup):
exceptions = []
for exc in e.exceptions:
texc = PatchedTracebackException(
type(exc),
exc,
exc.__traceback__,
lookup_lines=lookup_lines,
capture_locals=capture_locals,
_seen=_seen,
)
exceptions.append(texc)
else:
exceptions = None
te.__cause__ = cause
te.__context__ = context
te.exceptions = exceptions
if cause:
queue.append((te.__cause__, e.__cause__))
if context:
queue.append((te.__context__, e.__context__))
if exceptions:
queue.extend(zip(te.exceptions, e.exceptions))
def format(self, *, chain=True, _ctx=None):
if _ctx is None:
_ctx = _ExceptionPrintContext()
output = []
exc = self
if chain:
while exc:
if exc.__cause__ is not None:
chained_msg = _cause_message
chained_exc = exc.__cause__
elif exc.__context__ is not None and not exc.__suppress_context__:
chained_msg = _context_message
chained_exc = exc.__context__
else:
chained_msg = None
chained_exc = None
output.append((chained_msg, exc))
exc = chained_exc
else:
output.append((None, exc))
for msg, exc in reversed(output):
if msg is not None:
yield from _ctx.emit(msg)
if exc.exceptions is None:
if exc.stack:
yield from _ctx.emit("Traceback (most recent call last):\n")
yield from _ctx.emit(exc.stack.format())
yield from _ctx.emit(exc.format_exception_only())
elif _ctx.exception_group_depth > max_group_depth:
# exception group, but depth exceeds limit
yield from _ctx.emit(f"... (max_group_depth is {max_group_depth})\n")
else:
# format exception group
is_toplevel = _ctx.exception_group_depth == 0
if is_toplevel:
_ctx.exception_group_depth += 1
if exc.stack:
yield from _ctx.emit(
"Exception Group Traceback (most recent call last):\n",
margin_char="+" if is_toplevel else None,
)
yield from _ctx.emit(exc.stack.format())
yield from _ctx.emit(exc.format_exception_only())
num_excs = len(exc.exceptions)
if num_excs <= max_group_width:
n = num_excs
else:
n = max_group_width + 1
_ctx.need_close = False
for i in range(n):
last_exc = i == n - 1
if last_exc:
# The closing frame may be added by a recursive call
_ctx.need_close = True
if max_group_width is not None:
truncated = i >= max_group_width
else:
truncated = False
title = f"{i + 1}" if not truncated else "..."
yield (
_ctx.indent()
+ ("+-" if i == 0 else " ")
+ f"+---------------- {title} ----------------\n"
)
_ctx.exception_group_depth += 1
if not truncated:
yield from exc.exceptions[i].format(chain=chain, _ctx=_ctx)
else:
remaining = num_excs - max_group_width
plural = "s" if remaining > 1 else ""
yield from _ctx.emit(
f"and {remaining} more exception{plural}\n"
)
if last_exc and _ctx.need_close:
yield _ctx.indent() + "+------------------------------------\n"
_ctx.need_close = False
_ctx.exception_group_depth -= 1
if is_toplevel:
assert _ctx.exception_group_depth == 1
_ctx.exception_group_depth = 0
def format_exception_only(self):
"""Format the exception part of the traceback.
The return value is a generator of strings, each ending in a newline.
Normally, the generator emits a single string; however, for
SyntaxError exceptions, it emits several lines that (when
printed) display detailed information about where the syntax
error occurred.
The message indicating which exception occurred is always the last
string in the output.
"""
if self.exc_type is None:
yield traceback._format_final_exc_line(None, self._str)
return
stype = self.exc_type.__qualname__
smod = self.exc_type.__module__
if smod not in ("__main__", "builtins"):
if not isinstance(smod, str):
smod = "<unknown>"
stype = smod + "." + stype
if not issubclass(self.exc_type, SyntaxError):
yield _format_final_exc_line(stype, self._str)
elif traceback_exception_format_syntax_error is not None:
yield from traceback_exception_format_syntax_error(self, stype)
else:
yield from traceback_exception_original_format_exception_only(self)
if isinstance(self.__notes__, collections.abc.Sequence):
for note in self.__notes__:
note = _safe_string(note, "note")
yield from [line + "\n" for line in note.split("\n")]
elif self.__notes__ is not None:
yield _safe_string(self.__notes__, "__notes__", func=repr)
traceback_exception_original_format = traceback.TracebackException.format
traceback_exception_original_format_exception_only = (
traceback.TracebackException.format_exception_only
)
traceback_exception_format_syntax_error = getattr(
traceback.TracebackException, "_format_syntax_error", None
)
if sys.excepthook is sys.__excepthook__:
traceback.TracebackException.__init__ = ( # type: ignore[assignment]
PatchedTracebackException.__init__
)
traceback.TracebackException.format = ( # type: ignore[assignment]
PatchedTracebackException.format
)
traceback.TracebackException.format_exception_only = ( # type: ignore[assignment]
PatchedTracebackException.format_exception_only
)
sys.excepthook = exceptiongroup_excepthook
@singledispatch
def format_exception_only(__exc: BaseException) -> List[str]:
return list(
PatchedTracebackException(
type(__exc), __exc, None, compact=True
).format_exception_only()
)
@format_exception_only.register
def _(__exc: type, value: BaseException) -> List[str]:
return format_exception_only(value)
@singledispatch
def format_exception(
__exc: BaseException,
limit: Optional[int] = None,
chain: bool = True,
) -> List[str]:
return list(
PatchedTracebackException(
type(__exc), __exc, __exc.__traceback__, limit=limit, compact=True
).format(chain=chain)
)
@format_exception.register
def _(
__exc: type,
value: BaseException,
tb: TracebackType,
limit: Optional[int] = None,
chain: bool = True,
) -> List[str]:
return format_exception(value, limit, chain)
@singledispatch
def print_exception(
__exc: BaseException,
limit: Optional[int] = None,
file: Any = None,
chain: bool = True,
) -> None:
if file is None:
file = sys.stderr
for line in PatchedTracebackException(
type(__exc), __exc, __exc.__traceback__, limit=limit
).format(chain=chain):
print(line, file=file, end="")
@print_exception.register
def _(
__exc: type,
value: BaseException,
tb: TracebackType,
limit: Optional[int] = None,
file: Any = None,
chain: bool = True,
) -> None:
print_exception(value, limit, file, chain)
def print_exc(
limit: Optional[int] = None,
file: Any | None = None,
chain: bool = True,
) -> None:
value = sys.exc_info()[1]
print_exception(value, limit, file, chain)
# Python levenshtein edit distance code for NameError/AttributeError
# suggestions, backported from 3.12
_MAX_CANDIDATE_ITEMS = 750
_MAX_STRING_SIZE = 40
_MOVE_COST = 2
_CASE_COST = 1
_SENTINEL = object()
def _substitution_cost(ch_a, ch_b):
if ch_a == ch_b:
return 0
if ch_a.lower() == ch_b.lower():
return _CASE_COST
return _MOVE_COST
def _compute_suggestion_error(exc_value, tb):
wrong_name = getattr(exc_value, "name", None)
if wrong_name is None or not isinstance(wrong_name, str):
return None
if isinstance(exc_value, AttributeError):
obj = getattr(exc_value, "obj", _SENTINEL)
if obj is _SENTINEL:
return None
obj = exc_value.obj
try:
d = dir(obj)
except Exception:
return None
else:
assert isinstance(exc_value, NameError)
# find most recent frame
if tb is None:
return None
while tb.tb_next is not None:
tb = tb.tb_next
frame = tb.tb_frame
d = list(frame.f_locals) + list(frame.f_globals) + list(frame.f_builtins)
if len(d) > _MAX_CANDIDATE_ITEMS:
return None
wrong_name_len = len(wrong_name)
if wrong_name_len > _MAX_STRING_SIZE:
return None
best_distance = wrong_name_len
suggestion = None
for possible_name in d:
if possible_name == wrong_name:
# A missing attribute is "found". Don't suggest it (see GH-88821).
continue
# No more than 1/3 of the involved characters should need changed.
max_distance = (len(possible_name) + wrong_name_len + 3) * _MOVE_COST // 6
# Don't take matches we've already beaten.
max_distance = min(max_distance, best_distance - 1)
current_distance = _levenshtein_distance(
wrong_name, possible_name, max_distance
)
if current_distance > max_distance:
continue
if not suggestion or current_distance < best_distance:
suggestion = possible_name
best_distance = current_distance
return suggestion
def _levenshtein_distance(a, b, max_cost):
# A Python implementation of Python/suggestions.c:levenshtein_distance.
# Both strings are the same
if a == b:
return 0
# Trim away common affixes
pre = 0
while a[pre:] and b[pre:] and a[pre] == b[pre]:
pre += 1
a = a[pre:]
b = b[pre:]
post = 0
while a[: post or None] and b[: post or None] and a[post - 1] == b[post - 1]:
post -= 1
a = a[: post or None]
b = b[: post or None]
if not a or not b:
return _MOVE_COST * (len(a) + len(b))
if len(a) > _MAX_STRING_SIZE or len(b) > _MAX_STRING_SIZE:
return max_cost + 1
# Prefer shorter buffer
if len(b) < len(a):
a, b = b, a
# Quick fail when a match is impossible
if (len(b) - len(a)) * _MOVE_COST > max_cost:
return max_cost + 1
# Instead of producing the whole traditional len(a)-by-len(b)
# matrix, we can update just one row in place.
# Initialize the buffer row
row = list(range(_MOVE_COST, _MOVE_COST * (len(a) + 1), _MOVE_COST))
result = 0
for bindex in range(len(b)):
bchar = b[bindex]
distance = result = bindex * _MOVE_COST
minimum = sys.maxsize
for index in range(len(a)):
# 1) Previous distance in this row is cost(b[:b_index], a[:index])
substitute = distance + _substitution_cost(bchar, a[index])
# 2) cost(b[:b_index], a[:index+1]) from previous row
distance = row[index]
# 3) existing result is cost(b[:b_index+1], a[index])
insert_delete = min(result, distance) + _MOVE_COST
result = min(insert_delete, substitute)
# cost(b[:b_index+1], a[:index+1])
row[index] = result
if result < minimum:
minimum = result
if minimum > max_cost:
# Everything in this row is too big, so bail early.
return max_cost + 1
return result
| 19,475 |
Python
| 33.531915 | 87 | 0.532478 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aioitertools/__version__.py
|
__version__ = "0.7.1"
| 22 |
Python
| 10.499995 | 21 | 0.454545 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aioitertools/helpers.py
|
# Copyright 2018 John Reese
# Licensed under the MIT license
import inspect
from typing import Awaitable, Union
from typing_extensions import Protocol
from .types import T
class Orderable(Protocol): # pragma: no cover
def __lt__(self, other):
...
def __gt__(self, other):
...
async def maybe_await(object: Union[Awaitable[T], T]) -> T:
if inspect.isawaitable(object):
return await object # type: ignore
return object # type: ignore
| 483 |
Python
| 19.166666 | 59 | 0.662526 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aioitertools/builtins.py
|
# Copyright 2018 John Reese
# Licensed under the MIT license
"""
Async-compatible versions of builtin functions for iterables.
These functions intentionally shadow their builtins counterparts,
enabling use with both standard iterables and async iterables, without
needing to use if/else clauses or awkward logic. Standard iterables
get wrapped in async generators, and all functions are designed for
use with `await`, `async for`, etc.
"""
import asyncio
import builtins
from typing import (
Any,
AsyncIterable,
AsyncIterator,
Callable,
Iterable,
List,
Optional,
Set,
Tuple,
Union,
cast,
overload,
)
from .helpers import Orderable, maybe_await
from .types import T1, T2, T3, T4, T5, AnyIterable, AnyIterator, AnyStop, R, T
def iter(itr: AnyIterable[T]) -> AsyncIterator[T]:
"""
Get an async iterator from any mixed iterable.
Async iterators will be returned directly.
Async iterables will return an async iterator.
Standard iterables will be wrapped in an async generator yielding
each item in the iterable in the same order.
Examples:
async for iter(range(10)):
...
"""
if isinstance(itr, AsyncIterator):
return itr
if isinstance(itr, AsyncIterable):
return itr.__aiter__()
async def gen() -> AsyncIterator[T]:
for item in cast(Iterable[T], itr):
yield item
return gen()
async def next(itr: AnyIterator[T]) -> T:
"""
Return the next item of any mixed iterator.
Calls builtins.next() on standard iterators, and awaits itr.__anext__()
on async iterators.
Example:
value = await next(it)
"""
if isinstance(itr, AsyncIterator):
return await itr.__anext__()
try:
return builtins.next(itr)
except StopIteration:
raise StopAsyncIteration
async def list(itr: AnyIterable[T]) -> List[T]:
"""
Consume a mixed iterable and return a list of items in order.
Example:
await list(range(5))
-> [0, 1, 2, 3, 4]
"""
return [item async for item in iter(itr)]
async def set(itr: AnyIterable[T]) -> Set[T]:
"""
Consume a mixed iterable and return a set of items.
Example:
await set([0, 1, 2, 3, 0, 1, 2, 3])
-> {0, 1, 2, 3}
"""
return {item async for item in iter(itr)}
async def enumerate(
itr: AnyIterable[T], start: int = 0
) -> AsyncIterator[Tuple[int, T]]:
"""
Consume a mixed iterable and yield the current index and item.
Example:
async for index, value in enumerate(...):
...
"""
index = start
async for item in iter(itr):
yield index, item
index += 1
async def map(fn: Callable[[T], R], itr: AnyIterable[T]) -> AsyncIterator[R]:
"""
Modify item of a mixed iterable using the given function or coroutine.
Example:
async for response in map(func, data):
...
"""
# todo: queue items eagerly
async for item in iter(itr):
yield await maybe_await(fn(item))
@overload
async def max(
itr: AnyIterable[Orderable], *, key: Optional[Callable] = None
) -> Orderable: # pragma: no cover
pass
@overload
async def max(
itr: AnyIterable[Orderable], *, default: T, key: Optional[Callable] = None
) -> Union[Orderable, T]: # pragma: no cover
pass
async def max(itr: AnyIterable[Orderable], **kwargs: Any) -> Any:
"""
Return the largest item in an iterable or the largest of two or more arguments.
Example:
await min(range(5))
-> 4
"""
for k in kwargs:
if k not in ("key", "default"):
raise ValueError(f"kwarg {k} not supported")
value: Orderable
vkey: Any
keyfunc = kwargs.get("key", None)
it = iter(itr)
try:
value = await next(it)
if keyfunc:
vkey = keyfunc(value)
except StopAsyncIteration:
if "default" in kwargs:
return kwargs["default"]
raise ValueError("iterable is empty and no default value given")
if keyfunc:
async for item in it:
ikey = keyfunc(item)
if ikey > vkey:
value = item
vkey = ikey
else:
async for item in it:
if item > value:
value = item
return value
@overload
async def min(
itr: AnyIterable[Orderable], *, key: Optional[Callable] = None
) -> Orderable: # pragma: no cover
pass
@overload
async def min(
itr: AnyIterable[Orderable], *, default: T, key: Optional[Callable] = None
) -> Union[Orderable, T]: # pragma: no cover
pass
async def min(itr: AnyIterable[Orderable], **kwargs: Any) -> Any:
"""
Return the smallest item in an iterable or the smallest of two or more arguments.
Example:
await min(range(5))
-> 0
"""
for k in kwargs:
if k not in ("key", "default"):
raise ValueError(f"kwarg {k} not supported")
value: Orderable
vkey: Any
keyfunc = kwargs.get("key", None)
it = iter(itr)
try:
value = await next(it)
if keyfunc:
vkey = keyfunc(value)
except StopAsyncIteration:
if "default" in kwargs:
return kwargs["default"]
raise ValueError("iterable is empty and no default value given")
if keyfunc:
async for item in it:
ikey = keyfunc(item)
if ikey < vkey:
value = item
vkey = ikey
else:
async for item in it:
if item < value:
value = item
return value
async def sum(itr: AnyIterable[T], start: T = None) -> T:
"""
Compute the sum of a mixed iterable, adding each value with the start value.
Example:
await sum(generator())
-> 1024
"""
value: T
if start is None:
value = cast(T, 0) # emulate stdlib but still type nicely for non-ints
else:
value = start
async for item in iter(itr):
value += item # type: ignore # mypy doesn't know T + T
return value
# pylint: disable=undefined-variable,multiple-statements,too-many-arguments
@overload
def zip(__iter1: AnyIterable[T1]) -> AsyncIterator[Tuple[T1]]: # pragma: no cover
pass
@overload
def zip(
__iter1: AnyIterable[T1], __iter2: AnyIterable[T2]
) -> AsyncIterator[Tuple[T1, T2]]: # pragma: no cover
pass
@overload
def zip(
__iter1: AnyIterable[T1], __iter2: AnyIterable[T2], __iter3: AnyIterable[T3]
) -> AsyncIterator[Tuple[T1, T2, T3]]: # pragma: no cover
pass
@overload
def zip(
__iter1: AnyIterable[T1],
__iter2: AnyIterable[T2],
__iter3: AnyIterable[T3],
__iter4: AnyIterable[T4],
) -> AsyncIterator[Tuple[T1, T2, T3, T4]]: # pragma: no cover
pass
@overload
def zip(
__iter1: AnyIterable[T1],
__iter2: AnyIterable[T2],
__iter3: AnyIterable[T3],
__iter4: AnyIterable[T4],
__iter5: AnyIterable[T5],
) -> AsyncIterator[Tuple[T1, T2, T3, T4, T5]]: # pragma: no cover
pass
@overload
def zip(
__iter1: AnyIterable[Any],
__iter2: AnyIterable[Any],
__iter3: AnyIterable[Any],
__iter4: AnyIterable[Any],
__iter5: AnyIterable[Any],
__iter6: AnyIterable[Any],
*__iterables: AnyIterable[Any],
) -> AsyncIterator[Tuple[Any, ...]]: # pragma: no cover
pass
# pylint: enable=undefined-variable,multiple-statements,too-many-arguments
async def zip(*itrs: AnyIterable[Any]) -> AsyncIterator[Tuple[Any, ...]]:
"""
Yield a tuple of items from mixed iterables until the shortest is consumed.
Example:
async for a, b, c in zip(i, j, k):
...
"""
its: List[AsyncIterator[Any]] = [iter(itr) for itr in itrs]
while True:
try:
values = await asyncio.gather(*[it.__anext__() for it in its])
yield values
except AnyStop:
break
| 7,968 |
Python
| 21.384831 | 85 | 0.599523 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aioitertools/more_itertools.py
|
# Copyright 2020 John Reese
# Licensed under the MIT license
from typing import AsyncIterable, List, TypeVar
from .builtins import iter
from .itertools import islice
from .types import AnyIterable
T = TypeVar("T")
async def take(n: int, iterable: AnyIterable[T]) -> List[T]:
"""
Return the first n items of iterable as a list.
If there are too few items in iterable, all of them are returned.
n needs to be at least 0. If it is 0, an empty list is returned.
Example:
first_two = await take(2, [1, 2, 3, 4, 5])
"""
if n < 0:
raise ValueError("take's first parameter can't be negative")
return [item async for item in islice(iterable, n)]
async def chunked(iterable: AnyIterable[T], n: int) -> AsyncIterable[List[T]]:
"""
Break iterable into chunks of length n.
The last chunk will be shorter if the total number of items is not
divisible by n.
Example:
async for chunk in chunked([1, 2, 3, 4, 5], n=2):
... # first iteration: chunk == [1, 2]; last one: chunk == [5]
"""
it = iter(iterable)
chunk = await take(n, it)
while chunk != []:
yield chunk
chunk = await take(n, it)
| 1,207 |
Python
| 24.702127 | 78 | 0.628003 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aioitertools/__init__.py
|
# Copyright 2018 John Reese
# Licensed under the MIT license
"""
itertools and builtins for AsyncIO and mixed iterables
"""
__author__ = "John Reese"
from . import asyncio
from .__version__ import __version__
from .builtins import enumerate, iter, list, map, max, min, next, set, sum, zip
from .itertools import (
accumulate,
chain,
combinations,
combinations_with_replacement,
compress,
count,
cycle,
dropwhile,
filterfalse,
groupby,
islice,
permutations,
product,
repeat,
starmap,
takewhile,
tee,
zip_longest,
)
| 588 |
Python
| 17.406249 | 79 | 0.656463 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aioitertools/asyncio.py
|
# Copyright 2019 John Reese
# Licensed under the MIT license
"""
Friendlier version of asyncio standard library.
Provisional library. Must be imported as `aioitertools.asyncio`.
"""
import asyncio
import time
from typing import Any, Awaitable, Dict, Iterable, List, Optional, Set, Tuple, cast
from .types import AsyncIterator, T
async def as_completed(
aws: Iterable[Awaitable[T]],
*,
loop: Optional[asyncio.AbstractEventLoop] = None,
timeout: Optional[float] = None
) -> AsyncIterator[T]:
"""
Run awaitables in `aws` concurrently, and yield results as they complete.
Unlike `asyncio.as_completed`, this yields actual results, and does not require
awaiting each item in the iterable.
Example:
async for value in as_completed(futures):
... # use value immediately
"""
done: Set[Awaitable[T]] = set()
pending: Set[Awaitable[T]] = set(aws)
remaining: Optional[float] = None
if timeout and timeout > 0:
threshold = time.time() + timeout
else:
timeout = None
while pending:
if timeout:
remaining = threshold - time.time()
if remaining <= 0:
raise asyncio.TimeoutError()
# asyncio.Future inherits from typing.Awaitable
# asyncio.wait takes Iterable[Union[Future, Generator, Awaitable]], but
# returns Tuple[Set[Future], Set[Future]. Because mypy doesn't like assigning
# these values to existing Set[Awaitable] or even Set[Union[Awaitable, Future]],
# we need to first cast the results to something that we can actually use
# asyncio.Future: https://github.com/python/typeshed/blob/72ff7b94e534c610ddf8939bacbc55343e9465d2/stdlib/3/asyncio/futures.pyi#L30
# asyncio.wait(): https://github.com/python/typeshed/blob/72ff7b94e534c610ddf8939bacbc55343e9465d2/stdlib/3/asyncio/tasks.pyi#L89
done, pending = cast(
Tuple[Set[Awaitable[T]], Set[Awaitable[T]]],
await asyncio.wait(
pending,
loop=loop,
timeout=remaining,
return_when=asyncio.FIRST_COMPLETED,
),
)
for item in done:
yield await item
async def gather(
*args: Awaitable[T],
loop: Optional[asyncio.AbstractEventLoop] = None,
return_exceptions: bool = False,
limit: int = -1
) -> List[Any]:
"""Like asyncio.gather but with a limit on concurrency.
Much of the complexity of gather comes with it support for cancel, which we
omit here. Note that all results are buffered.
"""
# For detecting input duplicates and reconciling them at the end
input_map: Dict[Awaitable[T], List[int]] = {}
# This is keyed on what we'll get back from asyncio.wait
pos: Dict[asyncio.Future[T], int] = {}
ret: List[Any] = [None] * len(args)
pending: Set[asyncio.Future[T]] = set()
done: Set[asyncio.Future[T]] = set()
next_arg = 0
while True:
while next_arg < len(args) and (limit == -1 or len(pending) < limit):
# We have to defer the creation of the Task as long as possible
# because once we do, it starts executing, regardless of what we
# have in the pending set.
if args[next_arg] in input_map:
input_map[args[next_arg]].append(next_arg)
else:
# We call ensure_future directly to ensure that we have a Task
# because the return value of asyncio.wait will be an implicit
# task otherwise, and we won't be able to know which input it
# corresponds to.
task: asyncio.Future[T] = asyncio.ensure_future(args[next_arg])
pending.add(task)
pos[task] = next_arg
input_map[args[next_arg]] = [next_arg]
next_arg += 1
# pending might be empty if the last items of args were dupes;
# asyncio.wait([]) will raise an exception.
if pending:
done, pending = await asyncio.wait(
pending, loop=loop, return_when=asyncio.FIRST_COMPLETED
)
for x in done:
if return_exceptions and x.exception():
ret[pos[x]] = x.exception()
else:
ret[pos[x]] = x.result()
if not pending and next_arg == len(args):
break
for lst in input_map.values():
for i in range(1, len(lst)):
ret[lst[i]] = ret[lst[0]]
return ret
| 4,562 |
Python
| 33.568182 | 139 | 0.604998 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aioitertools/types.py
|
# Copyright 2018 John Reese
# Licensed under the MIT license
from typing import (
AsyncIterable,
AsyncIterator,
Awaitable,
Callable,
Iterable,
Iterator,
TypeVar,
Union,
)
R = TypeVar("R")
T = TypeVar("T")
T1 = TypeVar("T1")
T2 = TypeVar("T2")
T3 = TypeVar("T3")
T4 = TypeVar("T4")
T5 = TypeVar("T5")
N = TypeVar("N", int, float)
AnyFunction = Union[Callable[..., R], Callable[..., Awaitable[R]]]
AnyIterable = Union[Iterable[T], AsyncIterable[T]]
AnyIterableIterable = Union[Iterable[AnyIterable[T]], AsyncIterable[AnyIterable[T]]]
AnyIterator = Union[Iterator[T], AsyncIterator[T]]
AnyStop = (StopIteration, StopAsyncIteration)
Accumulator = Union[Callable[[T, T], T], Callable[[T, T], Awaitable[T]]]
KeyFunction = Union[Callable[[T], R], Callable[[T], Awaitable[R]]]
Predicate = Union[Callable[[T], object], Callable[[T], Awaitable[object]]]
| 879 |
Python
| 26.499999 | 84 | 0.680319 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aioitertools/itertools.py
|
# Copyright 2018 John Reese
# Licensed under the MIT license
"""
Async-compatible version of itertools standard library functions.
These functions build on top of the async builtins components,
enabling use of both standard iterables and async iterables, without
needing to use if/else clauses or awkward logic. Standard iterables
get wrapped in async generators, and all functions are designed for
use with `await`, `async for`, etc.
See https://docs.python.org/3/library/itertools.html for reference.
"""
import asyncio
import builtins
import itertools
import operator
from typing import Any, AsyncIterator, List, Optional, Tuple, overload
from .builtins import enumerate, iter, list, next, zip
from .helpers import maybe_await
from .types import (
Accumulator,
AnyFunction,
AnyIterable,
AnyIterableIterable,
AnyStop,
KeyFunction,
N,
Predicate,
R,
T,
)
async def accumulate(
itr: AnyIterable[T], func: Accumulator[T] = operator.add
) -> AsyncIterator[T]:
"""
Yield the running accumulation of an iterable and operator.
Accepts both a standard function or a coroutine for accumulation.
Example:
data = [1, 2, 3, 4]
async def mul(a, b):
return a * b
async for total in accumulate(data, func=mul):
... # 1, 2, 6, 24
"""
itr = iter(itr)
try:
total: T = await next(itr)
except AnyStop:
return
yield total
async for item in itr:
total = await maybe_await(func(total, item))
yield total
class Chain:
def __call__(self, *itrs: AnyIterable[T]) -> AsyncIterator[T]:
"""
Yield values from one or more iterables in series.
Consumes the first iterable lazily, in entirety, then the second, and so on.
Example:
async for value in chain([1, 2, 3], [7, 8, 9]):
... # 1, 2, 3, 7, 8, 9
"""
return self.from_iterable(itrs)
async def from_iterable(self, itrs: AnyIterableIterable[T]) -> AsyncIterator[T]:
"""
Like chain, but takes an iterable of iterables.
Alias for chain(*itrs)
"""
async for itr in iter(itrs):
async for item in iter(itr):
yield item
chain = Chain()
async def combinations(itr: AnyIterable[T], r: int) -> AsyncIterator[Tuple[T, ...]]:
"""
Yield r length subsequences from the given iterable.
Simple wrapper around itertools.combinations for asyncio.
This will consume the entire iterable before yielding values.
Example:
async for value in combinations(range(4), 3):
... # (0, 1, 2), (0, 1, 3), (0, 2, 3), (1, 2, 3)
"""
pool: List[T] = await list(itr)
for value in itertools.combinations(pool, r):
yield value
async def combinations_with_replacement(
itr: AnyIterable[T], r: int
) -> AsyncIterator[Tuple[T, ...]]:
"""
Yield r length subsequences from the given iterable with replacement.
Simple wrapper around itertools.combinations_with_replacement.
This will consume the entire iterable before yielding values.
Example:
async for value in combinations_with_replacement("ABC", 2):
... # ("A", "A"), ("A", "B"), ("A", "C"), ("B", "B"), ...
"""
pool: List[T] = await list(itr)
for value in itertools.combinations_with_replacement(pool, r):
yield value
async def compress(
itr: AnyIterable[T], selectors: AnyIterable[Any]
) -> AsyncIterator[T]:
"""
Yield elements only when the corresponding selector evaluates to True.
Stops when either the iterable or the selectors have been exhausted.
Example:
async for value in compress(range(5), [1, 0, 0, 1, 1]):
... # 0, 3, 4
"""
async for value, selector in zip(itr, selectors):
if selector:
yield value
async def count(start: N = 0, step: N = 1) -> AsyncIterator[N]:
"""
Yield an infinite series, starting at the given value and increasing by step.
Example:
async for value in counter(10, -1):
... # 10, 9, 8, 7, ...
"""
value = start
while True:
yield value
value += step
async def cycle(itr: AnyIterable[T]) -> AsyncIterator[T]:
"""
Yield a repeating series from the given iterable.
Lazily consumes the iterable when the next value is needed, and caching
the values in memory for future iterations of the series.
Example:
async for value in cycle([1, 2]):
... # 1, 2, 1, 2, 1, 2, ...
"""
items = []
async for item in iter(itr):
yield item
items.append(item)
while True:
for item in items:
yield item
async def dropwhile(
predicate: Predicate[T], iterable: AnyIterable[T]
) -> AsyncIterator[T]:
"""
Drops all items until the predicate evaluates False; yields all items afterwards.
Accepts both standard functions and coroutines for the predicate.
Example:
def pred(x):
return x < 4
async for item in dropwhile(pred, range(6)):
... # 4, 5, 6
"""
itr = iter(iterable)
async for item in itr:
if not await maybe_await(predicate(item)):
yield item
break
async for item in itr:
yield item
async def filterfalse(
predicate: Predicate[T], iterable: AnyIterable[T]
) -> AsyncIterator[T]:
"""
Yield items from the iterable only when the predicate evaluates to False.
Accepts both standard functions and coroutines for the predicate.
Example:
def pred(x):
return x < 4
async for item in filterfalse(pred, range(6)):
... # 4, 5
"""
async for item in iter(iterable):
if not await maybe_await(predicate(item)):
yield item
# pylint: disable=undefined-variable,multiple-statements
@overload
def groupby(itr: AnyIterable[T]) -> AsyncIterator[Tuple[T, List[T]]]: # pragma: nocover
pass
@overload
def groupby(
itr: AnyIterable[T], key: KeyFunction[T, R]
) -> AsyncIterator[Tuple[R, List[T]]]: # pragma: nocover
pass
# pylint: enable=undefined-variable,multiple-statements
async def groupby(
itr: AnyIterable[T], key: Optional[KeyFunction[T, R]] = None
) -> AsyncIterator[Tuple[Any, List[T]]]:
"""
Yield consecutive keys and groupings from the given iterable.
Items will be grouped based on the key function, which defaults to
the identity of each item. Accepts both standard functions and
coroutines for the key function. Suggest sorting by the key
function before using groupby.
Example:
data = ["A", "a", "b", "c", "C", "c"]
async for key, group in groupby(data, key=str.lower):
key # "a", "b", "c"
group # ["A", "a"], ["b"], ["c", "C", "c"]
"""
if key is None:
key = lambda x: x
grouping: List[T] = []
it = iter(itr)
try:
item = await next(it)
except StopAsyncIteration:
return
grouping = [item]
j = await maybe_await(key(item))
async for item in it:
k = await maybe_await(key(item))
if k != j:
yield j, grouping
grouping = [item]
else:
grouping.append(item)
j = k
yield j, grouping
# pylint: disable=undefined-variable,multiple-statements
@overload
def islice(
itr: AnyIterable[T], __stop: Optional[int]
) -> AsyncIterator[T]: # pragma: nocover
pass
@overload
def islice(
itr: AnyIterable[T], __start: int, __stop: Optional[int], __step: int = 1
) -> AsyncIterator[T]: # pragma: nocover
pass
# pylint: enable=undefined-variable,multiple-statements
async def islice(itr: AnyIterable[T], *args: Optional[int]) -> AsyncIterator[T]:
"""
Yield selected items from the given iterable.
islice(iterable, stop)
islice(iterable, start, stop[, step])
Starting from the start index (or zero), stopping at the stop
index (or until exhausted), skipping items if step > 0.
Example:
data = range(10)
async for item in islice(data, 5):
... # 0, 1, 2, 3, 4
async for item in islice(data, 2, 5):
... # 2, 3, 4
async for item in islice(data, 1, 7, 2):
... # 1, 3, 5
"""
start = 0
step = 1
if not args:
raise ValueError("must pass stop index")
if len(args) == 1:
(stop,) = args
elif len(args) == 2:
start, stop = args # type: ignore
elif len(args) == 3:
start, stop, step = args # type: ignore
else:
raise ValueError("too many arguments given")
assert start >= 0 and (stop is None or stop >= 0) and step >= 0
step = max(1, step)
if stop == 0:
return
async for index, item in enumerate(itr):
if index >= start and (index - start) % step == 0:
yield item
if stop is not None and index + 1 >= stop:
break
async def permutations(
itr: AnyIterable[T], r: Optional[int] = None
) -> AsyncIterator[Tuple[T, ...]]:
"""
Yield r length permutations of elements in the iterable.
Simple wrapper around itertools.combinations for asyncio.
This will consume the entire iterable before yielding values.
Example:
async for value in permutations(range(3)):
... # (0, 1, 2), (0, 2, 1), (1, 0, 2), ...
"""
pool: List[T] = await list(itr)
for value in itertools.permutations(pool, r):
yield value
async def product(
*itrs: AnyIterable[T], repeat: int = 1 # pylint: disable=redefined-outer-name
) -> AsyncIterator[Tuple[T, ...]]:
"""
Yield cartesian products of all iterables.
Simple wrapper around itertools.combinations for asyncio.
This will consume all iterables before yielding any values.
Example:
async for value in product("abc", "xy"):
... # ("a", "x"), ("a", "y"), ("b", "x"), ...
async for value in product(range(3), repeat=3):
... # (0, 0, 0), (0, 0, 1), (0, 0, 2), ...
"""
pools = await asyncio.gather(*[list(itr) for itr in itrs])
for value in itertools.product(*pools, repeat=repeat):
yield value
async def repeat(elem: T, n: int = -1) -> AsyncIterator[T]:
"""
Yield the given value repeatedly, forever or up to n times.
Example:
async for value in repeat(7):
... # 7, 7, 7, 7, 7, 7, ...
"""
while True:
if n == 0:
break
yield elem
n -= 1
async def starmap(
fn: AnyFunction[R], iterable: AnyIterableIterable[Any]
) -> AsyncIterator[R]:
"""
Yield values from a function using an iterable of iterables for arguments.
Each iterable contained within will be unpacked and consumed before
executing the function or coroutine.
Example:
data = [(1, 1), (1, 1, 1), (2, 2)]
async for value in starmap(operator.add, data):
... # 2, 3, 4
"""
async for itr in iter(iterable):
args = await list(itr)
yield await maybe_await(fn(*args))
async def takewhile(
predicate: Predicate[T], iterable: AnyIterable[T]
) -> AsyncIterator[T]:
"""
Yield values from the iterable until the predicate evaluates False.
Accepts both standard functions and coroutines for the predicate.
Example:
def pred(x):
return x < 4
async for value in takewhile(pred, range(8)):
... # 0, 1, 2, 3
"""
async for item in iter(iterable):
if await maybe_await(predicate(item)):
yield item
else:
break
def tee(itr: AnyIterable[T], n: int = 2) -> Tuple[AsyncIterator[T], ...]:
"""
Return n iterators that each yield items from the given iterable.
The first iterator lazily fetches from the original iterable, and then
queues the values for the other iterators to yield when needed.
Caveat: all iterators are dependent on the first iterator – if it is
consumed more slowly than the rest, the other consumers will be blocked
until the first iterator continues forward. Similarly, if the first
iterator is consumed more quickly than the rest, more memory will be
used in keeping values in the queues until the other iterators finish
consuming them.
Example:
it1, it2 = tee(range(5), n=2)
async for value in it1:
... # 0, 1, 2, 3, 4
async for value in it2:
... # 0, 1, 2, 3, 4
"""
assert n > 0
sentinel = object()
queues: List[asyncio.Queue] = [asyncio.Queue() for k in range(n)]
async def gen(k: int, q: asyncio.Queue) -> AsyncIterator[T]:
if k == 0:
async for value in iter(itr):
await asyncio.gather(*[queue.put(value) for queue in queues[1:]])
yield value
await asyncio.gather(*[queue.put(sentinel) for queue in queues[1:]])
else:
while True:
value = await q.get()
if value is sentinel:
break
yield value
return tuple(gen(k, q) for k, q in builtins.enumerate(queues))
async def zip_longest(
*itrs: AnyIterable[Any], fillvalue: Any = None
) -> AsyncIterator[Tuple[Any, ...]]:
"""
Yield a tuple of items from mixed iterables until all are consumed.
If shorter iterables are exhausted, the default value will be used
until all iterables are exhausted.
Example:
a = range(3)
b = range(5)
async for a, b in zip_longest(a, b, fillvalue=-1):
a # 0, 1, 2, -1, -1
b # 0, 1, 2, 3, 4
"""
its: List[AsyncIterator[Any]] = [iter(itr) for itr in itrs]
itr_count = len(its)
finished = 0
while True:
values = await asyncio.gather(
*[it.__anext__() for it in its], return_exceptions=True
)
for idx, value in builtins.enumerate(values):
if isinstance(value, AnyStop):
finished += 1
values[idx] = fillvalue
its[idx] = repeat(fillvalue)
elif isinstance(value, BaseException):
raise value
if finished >= itr_count:
break
yield tuple(values)
| 14,405 |
Python
| 24.910072 | 88 | 0.592294 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aioitertools/tests/helpers.py
|
# Copyright 2018 John Reese
# Licensed under the MIT license
import asyncio
import functools
from unittest import TestCase
from aioitertools.helpers import maybe_await
def async_test(fn):
def wrapped(*args, **kwargs):
loop = asyncio.get_event_loop()
return loop.run_until_complete(fn(*args, **kwargs))
return wrapped
class HelpersTest(TestCase):
# aioitertools.helpers.maybe_await()
@async_test
async def test_maybe_await(self):
self.assertEqual(await maybe_await(42), 42)
@async_test
async def test_maybe_await_async_def(self):
async def forty_two():
await asyncio.sleep(0.0001)
return 42
self.assertEqual(await maybe_await(forty_two()), 42)
@async_test
async def test_maybe_await_coroutine(self):
@asyncio.coroutine
def forty_two():
yield from asyncio.sleep(0.0001)
return 42
self.assertEqual(await maybe_await(forty_two()), 42)
@async_test
async def test_maybe_await_partial(self):
async def multiply(a, b):
await asyncio.sleep(0.0001)
return a * b
self.assertEqual(await maybe_await(functools.partial(multiply, 6)(7)), 42)
| 1,238 |
Python
| 23.294117 | 82 | 0.640549 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aioitertools/tests/builtins.py
|
# Copyright 2018 John Reese
# Licensed under the MIT license
import asyncio
from typing import AsyncIterator
from unittest import TestCase
import aioitertools as ait
from .helpers import async_test
slist = ["A", "B", "C"]
srange = range(3)
class BuiltinsTest(TestCase):
# aioitertools.iter()
@async_test
async def test_iter_list(self):
it = ait.iter(slist)
self.assertIsInstance(it, AsyncIterator)
idx = 0
async for item in it:
self.assertEqual(item, slist[idx])
idx += 1
@async_test
async def test_iter_range(self):
it = ait.iter(srange)
self.assertIsInstance(it, AsyncIterator)
idx = 0
async for item in it:
self.assertEqual(item, srange[idx])
idx += 1
@async_test
async def test_iter_iterable(self):
sentinel = object()
class async_iterable:
def __aiter__(self):
return sentinel
aiter = async_iterable()
self.assertEqual(ait.iter(aiter), sentinel)
@async_test
async def test_iter_iterator(self):
sentinel = object()
class async_iterator:
def __aiter__(self):
return sentinel
def __anext__(self):
return sentinel
aiter = async_iterator()
self.assertEqual(ait.iter(aiter), aiter)
@async_test
async def test_iter_async_generator(self):
async def async_gen():
yield 1
yield 2
agen = async_gen()
self.assertEqual(ait.iter(agen), agen)
# aioitertools.next()
@async_test
async def test_next_list(self):
it = ait.iter(slist)
self.assertEqual(await ait.next(it), "A")
self.assertEqual(await ait.next(it), "B")
self.assertEqual(await ait.next(it), "C")
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_next_range(self):
it = ait.iter(srange)
self.assertEqual(await ait.next(it), 0)
self.assertEqual(await ait.next(it), 1)
self.assertEqual(await ait.next(it), 2)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_next_iterable(self):
class async_iter:
def __init__(self):
self.index = 0
def __aiter__(self):
return self
def __anext__(self):
if self.index > 2:
raise StopAsyncIteration()
return self.fake_next()
async def fake_next(self):
value = slist[self.index]
self.index += 1
return value
it = ait.iter(async_iter())
self.assertEqual(await ait.next(it), "A")
self.assertEqual(await ait.next(it), "B")
self.assertEqual(await ait.next(it), "C")
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
it = iter(slist)
self.assertEqual(await ait.next(it), "A")
self.assertEqual(await ait.next(it), "B")
self.assertEqual(await ait.next(it), "C")
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_next_async_generator(self):
async def async_gen():
for item in slist:
yield item
it = ait.iter(async_gen())
self.assertEqual(await ait.next(it), "A")
self.assertEqual(await ait.next(it), "B")
self.assertEqual(await ait.next(it), "C")
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
# aioitertools.list()
@async_test
async def test_list(self):
self.assertEqual(await ait.list(ait.iter(slist)), slist)
# aioitertools.set()
@async_test
async def test_set(self):
self.assertEqual(await ait.set(ait.iter(slist)), set(slist))
# aioitertools.enumerate()
@async_test
async def test_enumerate(self):
async for index, value in ait.enumerate(slist):
self.assertEqual(value, slist[index])
@async_test
async def test_enumerate_start(self):
async for index, value in ait.enumerate(slist, 4):
self.assertEqual(value, slist[index - 4])
# aioitertools.map()
@async_test
async def test_map_function_list(self):
idx = 0
async for value in ait.map(str.lower, slist):
self.assertEqual(value, slist[idx].lower())
idx += 1
@async_test
async def test_map_function_async_generator(self):
async def gen():
for item in slist:
yield item
idx = 0
async for value in ait.map(str.lower, gen()):
self.assertEqual(value, slist[idx].lower())
idx += 1
@async_test
async def test_map_coroutine_list(self):
async def double(x):
await asyncio.sleep(0.0001)
return x * 2
idx = 0
async for value in ait.map(double, slist):
self.assertEqual(value, slist[idx] * 2)
idx += 1
@async_test
async def test_map_coroutine_generator(self):
async def gen():
for item in slist:
yield item
async def double(x):
await asyncio.sleep(0.0001)
return x * 2
idx = 0
async for value in ait.map(double, gen()):
self.assertEqual(value, slist[idx] * 2)
idx += 1
# aioitertools.max()
@async_test
async def test_max_basic(self):
async def gen():
for item in slist:
yield item
self.assertEqual(await ait.max(gen()), "C")
self.assertEqual(await ait.max(range(4)), 3)
with self.assertRaisesRegex(ValueError, "iterable is empty"):
await ait.max([])
with self.assertRaisesRegex(ValueError, "kwarg .+ not supported"):
await ait.max(None, foo="foo")
@async_test
async def test_max_default(self):
self.assertEqual(await ait.max(range(2), default="x"), 1)
self.assertEqual(await ait.max([], default="x"), "x")
self.assertEqual(await ait.max([], default=None), None)
@async_test
async def test_max_key(self):
words = ["star", "buzz", "guard"]
def reverse(s):
return s[::-1]
self.assertEqual(reverse("python"), "nohtyp")
self.assertEqual(await ait.max(words), "star")
self.assertEqual(await ait.max(words, key=reverse), "buzz")
# aioitertools.min()
@async_test
async def test_min_basic(self):
async def gen():
for item in slist:
yield item
self.assertEqual(await ait.min(gen()), "A")
self.assertEqual(await ait.min(range(4)), 0)
with self.assertRaisesRegex(ValueError, "iterable is empty"):
await ait.min([])
with self.assertRaisesRegex(ValueError, "kwarg .+ not supported"):
await ait.min(None, foo="foo")
@async_test
async def test_min_default(self):
self.assertEqual(await ait.min(range(2), default="x"), 0)
self.assertEqual(await ait.min([], default="x"), "x")
self.assertEqual(await ait.min([], default=None), None)
@async_test
async def test_min_key(self):
words = ["star", "buzz", "guard"]
def reverse(s):
return s[::-1]
self.assertEqual(reverse("python"), "nohtyp")
self.assertEqual(await ait.min(words), "buzz")
self.assertEqual(await ait.min(words, key=reverse), "guard")
# aioitertools.sum()
@async_test
async def test_sum_range_default(self):
self.assertEqual(await ait.sum(srange), sum(srange))
@async_test
async def test_sum_list_string(self):
self.assertEqual(await ait.sum(slist, "foo"), "fooABC")
# aioitertools.zip()
@async_test
async def test_zip_equal(self):
idx = 0
async for a, b in ait.zip(slist, srange):
self.assertEqual(a, slist[idx])
self.assertEqual(b, srange[idx])
idx += 1
| 8,223 |
Python
| 26.783784 | 74 | 0.568284 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aioitertools/tests/more_itertools.py
|
# Copyright 2020 John Reese
# Licensed under the MIT license
from typing import AsyncIterable
from unittest import TestCase
import aioitertools.more_itertools as mit
from .helpers import async_test
async def _gen() -> AsyncIterable[int]:
for i in range(5):
yield i
async def _empty() -> AsyncIterable[int]:
return
yield 0 # pylint: disable=unreachable
class MoreItertoolsTest(TestCase):
@async_test
async def test_take(self) -> None:
self.assertEqual(await mit.take(2, _gen()), [0, 1])
self.assertEqual(await mit.take(2, range(5)), [0, 1])
@async_test
async def test_take_zero(self) -> None:
self.assertEqual(await mit.take(0, _gen()), [])
@async_test
async def test_take_negative(self) -> None:
with self.assertRaises(ValueError):
await mit.take(-1, _gen())
@async_test
async def test_take_more_than_iterable(self) -> None:
self.assertEqual(await mit.take(10, _gen()), list(range(5)))
@async_test
async def test_take_empty(self) -> None:
it = _gen()
self.assertEqual(len(await mit.take(5, it)), 5)
self.assertEqual(await mit.take(1, it), [])
self.assertEqual(await mit.take(1, _empty()), [])
@async_test
async def test_chunked(self) -> None:
self.assertEqual(
[chunk async for chunk in mit.chunked(_gen(), 2)], [[0, 1], [2, 3], [4]]
)
self.assertEqual(
[chunk async for chunk in mit.chunked(range(5), 2)], [[0, 1], [2, 3], [4]]
)
@async_test
async def test_chunked_empty(self) -> None:
self.assertEqual([], [chunk async for chunk in mit.chunked(_empty(), 2)])
| 1,700 |
Python
| 27.830508 | 86 | 0.607059 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aioitertools/tests/__init__.py
|
# Copyright 2018 John Reese
# Licensed under the MIT license
from .asyncio import AsyncioTest
from .builtins import BuiltinsTest
from .helpers import HelpersTest
from .itertools import ItertoolsTest
from .more_itertools import MoreItertoolsTest
| 246 |
Python
| 26.444442 | 45 | 0.837398 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aioitertools/tests/asyncio.py
|
# Copyright 2019 John Reese
# Licensed under the MIT license
import asyncio
from unittest import TestCase
import aioitertools as ait
import aioitertools.asyncio as aio
from .helpers import async_test
slist = ["A", "B", "C"]
srange = range(3)
class AsyncioTest(TestCase):
def test_import(self):
self.assertEqual(ait.asyncio, aio)
@async_test
async def test_as_completed(self):
async def sleepy(number, duration):
await asyncio.sleep(duration)
return number
pairs = [(1, 0.3), (2, 0.1), (3, 0.5)]
expected = [2, 1, 3]
futures = [sleepy(*pair) for pair in pairs]
results = await ait.list(aio.as_completed(futures))
self.assertEqual(results, expected)
futures = [sleepy(*pair) for pair in pairs]
results = []
async for value in aio.as_completed(futures):
results.append(value)
self.assertEqual(results, expected)
@async_test
async def test_as_completed_timeout(self):
calls = [(1.0,), (0.1,)]
futures = [asyncio.sleep(*args) for args in calls]
with self.assertRaises(asyncio.TimeoutError):
await ait.list(aio.as_completed(futures, timeout=0.5))
futures = [asyncio.sleep(*args) for args in calls]
results = 0
with self.assertRaises(asyncio.TimeoutError):
async for _ in aio.as_completed(futures, timeout=0.5):
results += 1
self.assertEqual(results, 1)
@async_test
async def test_gather_input_types(self):
async def fn(arg):
await asyncio.sleep(0.001)
return arg
fns = [fn(1), asyncio.ensure_future(fn(2))]
if hasattr(asyncio, "create_task"):
# 3.7 only
fns.append(asyncio.create_task(fn(3))) # pylint: disable=no-member
else:
fns.append(fn(3))
result = await aio.gather(*fns)
self.assertEqual([1, 2, 3], result)
@async_test
async def test_gather_limited(self):
max_counter = 0
counter = 0
async def fn(arg):
nonlocal counter, max_counter
counter += 1
if max_counter < counter:
max_counter = counter
await asyncio.sleep(0.001)
counter -= 1
return arg
# Limit of 2
result = await aio.gather(*[fn(i) for i in range(10)], limit=2)
self.assertEqual(2, max_counter)
self.assertEqual([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], result)
# No limit
result = await aio.gather(*[fn(i) for i in range(10)])
self.assertEqual(
10, max_counter
) # TODO: on a loaded machine this might be less?
self.assertEqual([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], result)
@async_test
async def test_gather_limited_dupes(self):
async def fn(arg):
await asyncio.sleep(0.001)
return arg
f = fn(1)
g = fn(2)
result = await aio.gather(f, f, f, g, f, g, limit=2)
self.assertEqual([1, 1, 1, 2, 1, 2], result)
f = fn(1)
g = fn(2)
result = await aio.gather(f, f, f, g, f, g)
self.assertEqual([1, 1, 1, 2, 1, 2], result)
@async_test
async def test_gather_with_exceptions(self):
class MyException(Exception):
pass
async def fn(arg, fail=False):
await asyncio.sleep(arg)
if fail:
raise MyException(arg)
return arg
with self.assertRaises(MyException):
await aio.gather(fn(0.002, fail=True), fn(0.001))
result = await aio.gather(
fn(0.002, fail=True), fn(0.001), return_exceptions=True
)
self.assertEqual(result[1], 0.001)
self.assertIsInstance(result[0], MyException)
| 3,846 |
Python
| 28.592307 | 79 | 0.561362 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aioitertools/tests/itertools.py
|
# Copyright 2018 John Reese
# Licensed under the MIT license
import asyncio
import operator
from unittest import TestCase
import aioitertools as ait
from .helpers import async_test
slist = ["A", "B", "C"]
srange = range(1, 4)
class ItertoolsTest(TestCase):
@async_test
async def test_accumulate_range_default(self):
it = ait.accumulate(srange)
for k in [1, 3, 6]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_accumulate_range_function(self):
it = ait.accumulate(srange, func=operator.mul)
for k in [1, 2, 6]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_accumulate_range_coroutine(self):
async def mul(a, b):
return a * b
it = ait.accumulate(srange, func=mul)
for k in [1, 2, 6]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_accumulate_gen_function(self):
async def gen():
yield 1
yield 2
yield 4
it = ait.accumulate(gen(), func=operator.mul)
for k in [1, 2, 8]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_accumulate_gen_coroutine(self):
async def mul(a, b):
return a * b
async def gen():
yield 1
yield 2
yield 4
it = ait.accumulate(gen(), func=mul)
for k in [1, 2, 8]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_accumulate_empty(self):
values = []
async for value in ait.accumulate([]):
values.append(value)
self.assertEqual(values, [])
@async_test
async def test_chain_lists(self):
it = ait.chain(slist, srange)
for k in ["A", "B", "C", 1, 2, 3]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_chain_list_gens(self):
async def gen():
for k in range(2, 9, 2):
yield k
it = ait.chain(slist, gen())
for k in ["A", "B", "C", 2, 4, 6, 8]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_chain_from_iterable(self):
async def gen():
for k in range(2, 9, 2):
yield k
it = ait.chain.from_iterable([slist, gen()])
for k in ["A", "B", "C", 2, 4, 6, 8]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_chain_from_iterable_parameter_expansion_gen(self):
async def gen():
for k in range(2, 9, 2):
yield k
async def parameters_gen():
yield slist
yield gen()
it = ait.chain.from_iterable(parameters_gen())
for k in ["A", "B", "C", 2, 4, 6, 8]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_combinations(self):
it = ait.combinations(range(4), 3)
for k in [(0, 1, 2), (0, 1, 3), (0, 2, 3), (1, 2, 3)]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_combinations_with_replacement(self):
it = ait.combinations_with_replacement(slist, 2)
for k in [
("A", "A"),
("A", "B"),
("A", "C"),
("B", "B"),
("B", "C"),
("C", "C"),
]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_compress_list(self):
data = range(10)
selectors = [0, 1, 1, 0, 0, 0, 1, 0, 1, 0]
it = ait.compress(data, selectors)
for k in [1, 2, 6, 8]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_compress_gen(self):
data = "abcdefghijkl"
selectors = ait.cycle([1, 0, 0])
it = ait.compress(data, selectors)
for k in ["a", "d", "g", "j"]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_count_bare(self):
it = ait.count()
for k in [0, 1, 2, 3]:
self.assertEqual(await ait.next(it), k)
@async_test
async def test_count_start(self):
it = ait.count(42)
for k in [42, 43, 44, 45]:
self.assertEqual(await ait.next(it), k)
@async_test
async def test_count_start_step(self):
it = ait.count(42, 3)
for k in [42, 45, 48, 51]:
self.assertEqual(await ait.next(it), k)
@async_test
async def test_count_negative(self):
it = ait.count(step=-2)
for k in [0, -2, -4, -6]:
self.assertEqual(await ait.next(it), k)
@async_test
async def test_cycle_list(self):
it = ait.cycle(slist)
for k in ["A", "B", "C", "A", "B", "C", "A", "B"]:
self.assertEqual(await ait.next(it), k)
@async_test
async def test_cycle_gen(self):
async def gen():
yield 1
yield 2
yield 42
it = ait.cycle(gen())
for k in [1, 2, 42, 1, 2, 42, 1, 2]:
self.assertEqual(await ait.next(it), k)
@async_test
async def test_dropwhile_empty(self):
def pred(x):
return x < 2
result = await ait.list(ait.dropwhile(pred, []))
self.assertEqual(result, [])
@async_test
async def test_dropwhile_function_list(self):
def pred(x):
return x < 2
it = ait.dropwhile(pred, srange)
for k in [2, 3]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_dropwhile_function_gen(self):
def pred(x):
return x < 2
async def gen():
yield 1
yield 2
yield 42
it = ait.dropwhile(pred, gen())
for k in [2, 42]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_dropwhile_coroutine_list(self):
async def pred(x):
return x < 2
it = ait.dropwhile(pred, srange)
for k in [2, 3]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_dropwhile_coroutine_gen(self):
async def pred(x):
return x < 2
async def gen():
yield 1
yield 2
yield 42
it = ait.dropwhile(pred, gen())
for k in [2, 42]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_filterfalse_function_list(self):
def pred(x):
return x % 2 == 0
it = ait.filterfalse(pred, srange)
for k in [1, 3]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_filterfalse_coroutine_list(self):
async def pred(x):
return x % 2 == 0
it = ait.filterfalse(pred, srange)
for k in [1, 3]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_groupby_list(self):
data = "aaabba"
it = ait.groupby(data)
for k in [("a", ["a", "a", "a"]), ("b", ["b", "b"]), ("a", ["a"])]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_groupby_list_key(self):
data = "aAabBA"
it = ait.groupby(data, key=str.lower)
for k in [("a", ["a", "A", "a"]), ("b", ["b", "B"]), ("a", ["A"])]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_groupby_gen(self):
async def gen():
for c in "aaabba":
yield c
it = ait.groupby(gen())
for k in [("a", ["a", "a", "a"]), ("b", ["b", "b"]), ("a", ["a"])]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_groupby_gen_key(self):
async def gen():
for c in "aAabBA":
yield c
it = ait.groupby(gen(), key=str.lower)
for k in [("a", ["a", "A", "a"]), ("b", ["b", "B"]), ("a", ["A"])]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_groupby_empty(self):
async def gen():
for _ in range(0):
yield # Force generator with no actual iteration
async for _ in ait.groupby(gen()):
self.fail("No iteration should have happened")
@async_test
async def test_islice_bad_range(self):
with self.assertRaisesRegex(ValueError, "must pass stop index"):
async for _ in ait.islice([1, 2]):
pass
with self.assertRaisesRegex(ValueError, "too many arguments"):
async for _ in ait.islice([1, 2], 1, 2, 3, 4):
pass
@async_test
async def test_islice_stop_zero(self):
values = []
async for value in ait.islice(range(5), 0):
values.append(value)
self.assertEqual(values, [])
@async_test
async def test_islice_range_stop(self):
it = ait.islice(srange, 2)
for k in [1, 2]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_islice_range_start_step(self):
it = ait.islice(srange, 0, None, 2)
for k in [1, 3]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_islice_range_start_stop(self):
it = ait.islice(srange, 1, 3)
for k in [2, 3]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_islice_range_start_stop_step(self):
it = ait.islice(srange, 1, 3, 2)
for k in [2]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_islice_gen_stop(self):
async def gen():
yield 1
yield 2
yield 3
yield 4
gen_it = gen()
it = ait.islice(gen_it, 2)
for k in [1, 2]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
assert await ait.list(gen_it) == [3, 4]
@async_test
async def test_islice_gen_start_step(self):
async def gen():
yield 1
yield 2
yield 3
yield 4
it = ait.islice(gen(), 1, None, 2)
for k in [2, 4]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_islice_gen_start_stop(self):
async def gen():
yield 1
yield 2
yield 3
yield 4
it = ait.islice(gen(), 1, 3)
for k in [2, 3]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_islice_gen_start_stop_step(self):
async def gen():
yield 1
yield 2
yield 3
yield 4
gen_it = gen()
it = ait.islice(gen_it, 1, 3, 2)
for k in [2]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
assert await ait.list(gen_it) == [4]
@async_test
async def test_permutations_list(self):
it = ait.permutations(srange, r=2)
for k in [(1, 2), (1, 3), (2, 1), (2, 3), (3, 1), (3, 2)]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_permutations_gen(self):
async def gen():
yield 1
yield 2
yield 3
it = ait.permutations(gen(), r=2)
for k in [(1, 2), (1, 3), (2, 1), (2, 3), (3, 1), (3, 2)]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_product_list(self):
it = ait.product([1, 2], [6, 7])
for k in [(1, 6), (1, 7), (2, 6), (2, 7)]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_product_gen(self):
async def gen(x):
yield x
yield x + 1
it = ait.product(gen(1), gen(6))
for k in [(1, 6), (1, 7), (2, 6), (2, 7)]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_repeat(self):
it = ait.repeat(42)
for k in [42] * 10:
self.assertEqual(await ait.next(it), k)
@async_test
async def test_repeat_limit(self):
it = ait.repeat(42, 5)
for k in [42] * 5:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_starmap_function_list(self):
data = [slist[:2], slist[1:], slist]
def concat(*args):
return "".join(args)
it = ait.starmap(concat, data)
for k in ["AB", "BC", "ABC"]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_starmap_function_gen(self):
def gen():
yield slist[:2]
yield slist[1:]
yield slist
def concat(*args):
return "".join(args)
it = ait.starmap(concat, gen())
for k in ["AB", "BC", "ABC"]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_starmap_coroutine_list(self):
data = [slist[:2], slist[1:], slist]
async def concat(*args):
return "".join(args)
it = ait.starmap(concat, data)
for k in ["AB", "BC", "ABC"]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_starmap_coroutine_gen(self):
async def gen():
yield slist[:2]
yield slist[1:]
yield slist
async def concat(*args):
return "".join(args)
it = ait.starmap(concat, gen())
for k in ["AB", "BC", "ABC"]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_takewhile_empty(self):
def pred(x):
return x < 3
values = await ait.list(ait.takewhile(pred, []))
self.assertEqual(values, [])
@async_test
async def test_takewhile_function_list(self):
def pred(x):
return x < 3
it = ait.takewhile(pred, srange)
for k in [1, 2]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_takewhile_function_gen(self):
async def gen():
yield 1
yield 2
yield 3
def pred(x):
return x < 3
it = ait.takewhile(pred, gen())
for k in [1, 2]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_takewhile_coroutine_list(self):
async def pred(x):
return x < 3
it = ait.takewhile(pred, srange)
for k in [1, 2]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_takewhile_coroutine_gen(self):
def gen():
yield 1
yield 2
yield 3
async def pred(x):
return x < 3
it = ait.takewhile(pred, gen())
for k in [1, 2]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_tee_list_two(self):
it1, it2 = ait.tee(slist * 2)
for k in slist * 2:
a, b = await asyncio.gather(ait.next(it1), ait.next(it2))
self.assertEqual(a, b)
self.assertEqual(a, k)
self.assertEqual(b, k)
for it in [it1, it2]:
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_tee_list_six(self):
itrs = ait.tee(slist * 2, n=6)
for k in slist * 2:
values = await asyncio.gather(*[ait.next(it) for it in itrs])
for value in values:
self.assertEqual(value, k)
for it in itrs:
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_tee_gen_two(self):
async def gen():
yield 1
yield 4
yield 9
yield 16
it1, it2 = ait.tee(gen())
for k in [1, 4, 9, 16]:
a, b = await asyncio.gather(ait.next(it1), ait.next(it2))
self.assertEqual(a, b)
self.assertEqual(a, k)
self.assertEqual(b, k)
for it in [it1, it2]:
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_tee_gen_six(self):
async def gen():
yield 1
yield 4
yield 9
yield 16
itrs = ait.tee(gen(), n=6)
for k in [1, 4, 9, 16]:
values = await asyncio.gather(*[ait.next(it) for it in itrs])
for value in values:
self.assertEqual(value, k)
for it in itrs:
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_zip_longest_range(self):
a = range(3)
b = range(5)
it = ait.zip_longest(a, b)
for k in [(0, 0), (1, 1), (2, 2), (None, 3), (None, 4)]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_zip_longest_fillvalue(self):
async def gen():
yield 1
yield 4
yield 9
yield 16
a = gen()
b = range(5)
it = ait.zip_longest(a, b, fillvalue=42)
for k in [(1, 0), (4, 1), (9, 2), (16, 3), (42, 4)]:
self.assertEqual(await ait.next(it), k)
with self.assertRaises(StopAsyncIteration):
await ait.next(it)
@async_test
async def test_zip_longest_exception(self):
async def gen():
yield 1
yield 2
raise Exception("fake error")
a = gen()
b = ait.repeat(5)
it = ait.zip_longest(a, b)
for k in [(1, 5), (2, 5)]:
self.assertEqual(await ait.next(it), k)
with self.assertRaisesRegex(Exception, "fake error"):
await ait.next(it)
| 21,574 |
Python
| 28.037685 | 75 | 0.527348 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aioitertools/tests/__main__.py
|
# Copyright 2019 John Reese
# Licensed under the MIT license
import unittest
if __name__ == "__main__": # pragma: no cover
unittest.main(module="aioitertools.tests", verbosity=2)
| 186 |
Python
| 22.374997 | 59 | 0.698925 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/discovery.py
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import time
import logging
import weakref
from botocore import xform_name
from botocore.exceptions import BotoCoreError, HTTPClientError, ConnectionError
from botocore.model import OperationNotFoundError
from botocore.utils import CachedProperty
logger = logging.getLogger(__name__)
class EndpointDiscoveryException(BotoCoreError):
pass
class EndpointDiscoveryRequired(EndpointDiscoveryException):
""" Endpoint Discovery is disabled but is required for this operation. """
fmt = 'Endpoint Discovery is not enabled but this operation requires it.'
class EndpointDiscoveryRefreshFailed(EndpointDiscoveryException):
""" Endpoint Discovery failed to the refresh the known endpoints. """
fmt = 'Endpoint Discovery failed to refresh the required endpoints.'
def block_endpoint_discovery_required_operations(model, **kwargs):
endpoint_discovery = model.endpoint_discovery
if endpoint_discovery and endpoint_discovery.get('required'):
raise EndpointDiscoveryRequired()
class EndpointDiscoveryModel(object):
def __init__(self, service_model):
self._service_model = service_model
@CachedProperty
def discovery_operation_name(self):
discovery_operation = self._service_model.endpoint_discovery_operation
return xform_name(discovery_operation.name)
@CachedProperty
def discovery_operation_keys(self):
discovery_operation = self._service_model.endpoint_discovery_operation
keys = []
if discovery_operation.input_shape:
keys = list(discovery_operation.input_shape.members.keys())
return keys
def discovery_required_for(self, operation_name):
try:
operation_model = self._service_model.operation_model(operation_name)
return operation_model.endpoint_discovery.get('required', False)
except OperationNotFoundError:
return False
def discovery_operation_kwargs(self, **kwargs):
input_keys = self.discovery_operation_keys
# Operation and Identifiers are only sent if there are Identifiers
if not kwargs.get('Identifiers'):
kwargs.pop('Operation', None)
kwargs.pop('Identifiers', None)
return dict((k, v) for k, v in kwargs.items() if k in input_keys)
def gather_identifiers(self, operation, params):
return self._gather_ids(operation.input_shape, params)
def _gather_ids(self, shape, params, ids=None):
# Traverse the input shape and corresponding parameters, gathering
# any input fields labeled as an endpoint discovery id
if ids is None:
ids = {}
for member_name, member_shape in shape.members.items():
if member_shape.metadata.get('endpointdiscoveryid'):
ids[member_name] = params[member_name]
elif member_shape.type_name == 'structure' and member_name in params:
self._gather_ids(member_shape, params[member_name], ids)
return ids
class EndpointDiscoveryManager(object):
def __init__(self, client, cache=None, current_time=None, always_discover=True):
if cache is None:
cache = {}
self._cache = cache
self._failed_attempts = {}
if current_time is None:
current_time = time.time
self._time = current_time
self._always_discover = always_discover
# This needs to be a weak ref in order to prevent memory leaks on
# python 2.6
self._client = weakref.proxy(client)
self._model = EndpointDiscoveryModel(client.meta.service_model)
def _parse_endpoints(self, response):
endpoints = response['Endpoints']
current_time = self._time()
for endpoint in endpoints:
cache_time = endpoint.get('CachePeriodInMinutes')
endpoint['Expiration'] = current_time + cache_time * 60
return endpoints
def _cache_item(self, value):
if isinstance(value, dict):
return tuple(sorted(value.items()))
else:
return value
def _create_cache_key(self, **kwargs):
kwargs = self._model.discovery_operation_kwargs(**kwargs)
return tuple(self._cache_item(v) for k, v in sorted(kwargs.items()))
def gather_identifiers(self, operation, params):
return self._model.gather_identifiers(operation, params)
def delete_endpoints(self, **kwargs):
cache_key = self._create_cache_key(**kwargs)
if cache_key in self._cache:
del self._cache[cache_key]
def _describe_endpoints(self, **kwargs):
# This is effectively a proxy to whatever name/kwargs the service
# supports for endpoint discovery.
kwargs = self._model.discovery_operation_kwargs(**kwargs)
operation_name = self._model.discovery_operation_name
discovery_operation = getattr(self._client, operation_name)
logger.debug('Discovering endpoints with kwargs: %s', kwargs)
return discovery_operation(**kwargs)
def _get_current_endpoints(self, key):
if key not in self._cache:
return None
now = self._time()
return [e for e in self._cache[key] if now < e['Expiration']]
def _refresh_current_endpoints(self, **kwargs):
cache_key = self._create_cache_key(**kwargs)
try:
response = self._describe_endpoints(**kwargs)
endpoints = self._parse_endpoints(response)
self._cache[cache_key] = endpoints
self._failed_attempts.pop(cache_key, None)
return endpoints
except (ConnectionError, HTTPClientError):
self._failed_attempts[cache_key] = self._time() + 60
return None
def _recently_failed(self, cache_key):
if cache_key in self._failed_attempts:
now = self._time()
if now < self._failed_attempts[cache_key]:
return True
del self._failed_attempts[cache_key]
return False
def _select_endpoint(self, endpoints):
return endpoints[0]['Address']
def describe_endpoint(self, **kwargs):
operation = kwargs['Operation']
discovery_required = self._model.discovery_required_for(operation)
if not self._always_discover and not discovery_required:
# Discovery set to only run on required operations
logger.debug(
'Optional discovery disabled. Skipping discovery for Operation: %s'
% operation
)
return None
# Get the endpoint for the provided operation and identifiers
cache_key = self._create_cache_key(**kwargs)
endpoints = self._get_current_endpoints(cache_key)
if endpoints:
return self._select_endpoint(endpoints)
# All known endpoints are stale
recently_failed = self._recently_failed(cache_key)
if not recently_failed:
# We haven't failed to discover recently, go ahead and refresh
endpoints = self._refresh_current_endpoints(**kwargs)
if endpoints:
return self._select_endpoint(endpoints)
# Discovery has failed recently, do our best to get an endpoint
logger.debug('Endpoint Discovery has failed for: %s', kwargs)
stale_entries = self._cache.get(cache_key, None)
if stale_entries:
# We have stale entries, use those while discovery is failing
return self._select_endpoint(stale_entries)
if discovery_required:
# It looks strange to be checking recently_failed again but,
# this informs us as to whether or not we tried to refresh earlier
if recently_failed:
# Discovery is required and we haven't already refreshed
endpoints = self._refresh_current_endpoints(**kwargs)
if endpoints:
return self._select_endpoint(endpoints)
# No endpoints even refresh, raise hard error
raise EndpointDiscoveryRefreshFailed()
# Discovery is optional, just use the default endpoint for now
return None
class EndpointDiscoveryHandler(object):
def __init__(self, manager):
self._manager = manager
def register(self, events, service_id):
events.register(
'before-parameter-build.%s' % service_id, self.gather_identifiers
)
events.register_first(
'request-created.%s' % service_id, self.discover_endpoint
)
events.register('needs-retry.%s' % service_id, self.handle_retries)
def gather_identifiers(self, params, model, context, **kwargs):
endpoint_discovery = model.endpoint_discovery
# Only continue if the operation supports endpoint discovery
if endpoint_discovery is None:
return
ids = self._manager.gather_identifiers(model, params)
context['discovery'] = {'identifiers': ids}
def discover_endpoint(self, request, operation_name, **kwargs):
ids = request.context.get('discovery', {}).get('identifiers')
if ids is None:
return
endpoint = self._manager.describe_endpoint(
Operation=operation_name, Identifiers=ids
)
if endpoint is None:
logger.debug('Failed to discover and inject endpoint')
return
if not endpoint.startswith('http'):
endpoint = 'https://' + endpoint
logger.debug('Injecting discovered endpoint: %s', endpoint)
request.url = endpoint
def handle_retries(self, request_dict, response, operation, **kwargs):
if response is None:
return None
_, response = response
status = response.get('ResponseMetadata', {}).get('HTTPStatusCode')
error_code = response.get('Error', {}).get('Code')
if status != 421 and error_code != 'InvalidEndpointException':
return None
context = request_dict.get('context', {})
ids = context.get('discovery', {}).get('identifiers')
if ids is None:
return None
# Delete the cached endpoints, forcing a refresh on retry
# TODO: Improve eviction behavior to only evict the bad endpoint if
# there are multiple. This will almost certainly require a lock.
self._manager.delete_endpoints(
Operation=operation.name, Identifiers=ids
)
return 0
| 11,031 |
Python
| 39.116363 | 84 | 0.646995 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/configloader.py
|
# Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import shlex
import copy
import sys
from botocore.compat import six
import botocore.exceptions
def multi_file_load_config(*filenames):
"""Load and combine multiple INI configs with profiles.
This function will take a list of filesnames and return
a single dictionary that represents the merging of the loaded
config files.
If any of the provided filenames does not exist, then that file
is ignored. It is therefore ok to provide a list of filenames,
some of which may not exist.
Configuration files are **not** deep merged, only the top level
keys are merged. The filenames should be passed in order of
precedence. The first config file has precedence over the
second config file, which has precedence over the third config file,
etc. The only exception to this is that the "profiles" key is
merged to combine profiles from multiple config files into a
single profiles mapping. However, if a profile is defined in
multiple config files, then the config file with the highest
precedence is used. Profile values themselves are not merged.
For example::
FileA FileB FileC
[foo] [foo] [bar]
a=1 a=2 a=3
b=2
[bar] [baz] [profile a]
a=2 a=3 region=e
[profile a] [profile b] [profile c]
region=c region=d region=f
The final result of ``multi_file_load_config(FileA, FileB, FileC)``
would be::
{"foo": {"a": 1}, "bar": {"a": 2}, "baz": {"a": 3},
"profiles": {"a": {"region": "c"}}, {"b": {"region": d"}},
{"c": {"region": "f"}}}
Note that the "foo" key comes from A, even though it's defined in both
FileA and FileB. Because "foo" was defined in FileA first, then the values
for "foo" from FileA are used and the values for "foo" from FileB are
ignored. Also note where the profiles originate from. Profile "a"
comes FileA, profile "b" comes from FileB, and profile "c" comes
from FileC.
"""
configs = []
profiles = []
for filename in filenames:
try:
loaded = load_config(filename)
except botocore.exceptions.ConfigNotFound:
continue
profiles.append(loaded.pop('profiles'))
configs.append(loaded)
merged_config = _merge_list_of_dicts(configs)
merged_profiles = _merge_list_of_dicts(profiles)
merged_config['profiles'] = merged_profiles
return merged_config
def _merge_list_of_dicts(list_of_dicts):
merged_dicts = {}
for single_dict in list_of_dicts:
for key, value in single_dict.items():
if key not in merged_dicts:
merged_dicts[key] = value
return merged_dicts
def load_config(config_filename):
"""Parse a INI config with profiles.
This will parse an INI config file and map top level profiles
into a top level "profile" key.
If you want to parse an INI file and map all section names to
top level keys, use ``raw_config_parse`` instead.
"""
parsed = raw_config_parse(config_filename)
return build_profile_map(parsed)
def raw_config_parse(config_filename, parse_subsections=True):
"""Returns the parsed INI config contents.
Each section name is a top level key.
:param config_filename: The name of the INI file to parse
:param parse_subsections: If True, parse indented blocks as
subsections that represent their own configuration dictionary.
For example, if the config file had the contents::
s3 =
signature_version = s3v4
addressing_style = path
The resulting ``raw_config_parse`` would be::
{'s3': {'signature_version': 's3v4', 'addressing_style': 'path'}}
If False, do not try to parse subsections and return the indented
block as its literal value::
{'s3': '\nsignature_version = s3v4\naddressing_style = path'}
:returns: A dict with keys for each profile found in the config
file and the value of each key being a dict containing name
value pairs found in that profile.
:raises: ConfigNotFound, ConfigParseError
"""
config = {}
path = config_filename
if path is not None:
path = os.path.expandvars(path)
path = os.path.expanduser(path)
if not os.path.isfile(path):
raise botocore.exceptions.ConfigNotFound(path=_unicode_path(path))
cp = six.moves.configparser.RawConfigParser()
try:
cp.read([path])
except (six.moves.configparser.Error, UnicodeDecodeError):
raise botocore.exceptions.ConfigParseError(
path=_unicode_path(path))
else:
for section in cp.sections():
config[section] = {}
for option in cp.options(section):
config_value = cp.get(section, option)
if parse_subsections and config_value.startswith('\n'):
# Then we need to parse the inner contents as
# hierarchical. We support a single level
# of nesting for now.
try:
config_value = _parse_nested(config_value)
except ValueError:
raise botocore.exceptions.ConfigParseError(
path=_unicode_path(path))
config[section][option] = config_value
return config
def _unicode_path(path):
if isinstance(path, six.text_type):
return path
# According to the documentation getfilesystemencoding can return None
# on unix in which case the default encoding is used instead.
filesystem_encoding = sys.getfilesystemencoding()
if filesystem_encoding is None:
filesystem_encoding = sys.getdefaultencoding()
return path.decode(filesystem_encoding, 'replace')
def _parse_nested(config_value):
# Given a value like this:
# \n
# foo = bar
# bar = baz
# We need to parse this into
# {'foo': 'bar', 'bar': 'baz}
parsed = {}
for line in config_value.splitlines():
line = line.strip()
if not line:
continue
# The caller will catch ValueError
# and raise an appropriate error
# if this fails.
key, value = line.split('=', 1)
parsed[key.strip()] = value.strip()
return parsed
def build_profile_map(parsed_ini_config):
"""Convert the parsed INI config into a profile map.
The config file format requires that every profile except the
default to be prepended with "profile", e.g.::
[profile test]
aws_... = foo
aws_... = bar
[profile bar]
aws_... = foo
aws_... = bar
# This is *not* a profile
[preview]
otherstuff = 1
# Neither is this
[foobar]
morestuff = 2
The build_profile_map will take a parsed INI config file where each top
level key represents a section name, and convert into a format where all
the profiles are under a single top level "profiles" key, and each key in
the sub dictionary is a profile name. For example, the above config file
would be converted from::
{"profile test": {"aws_...": "foo", "aws...": "bar"},
"profile bar": {"aws...": "foo", "aws...": "bar"},
"preview": {"otherstuff": ...},
"foobar": {"morestuff": ...},
}
into::
{"profiles": {"test": {"aws_...": "foo", "aws...": "bar"},
"bar": {"aws...": "foo", "aws...": "bar"},
"preview": {"otherstuff": ...},
"foobar": {"morestuff": ...},
}
If there are no profiles in the provided parsed INI contents, then
an empty dict will be the value associated with the ``profiles`` key.
.. note::
This will not mutate the passed in parsed_ini_config. Instead it will
make a deepcopy and return that value.
"""
parsed_config = copy.deepcopy(parsed_ini_config)
profiles = {}
final_config = {}
for key, values in parsed_config.items():
if key.startswith("profile"):
try:
parts = shlex.split(key)
except ValueError:
continue
if len(parts) == 2:
profiles[parts[1]] = values
elif key == 'default':
# default section is special and is considered a profile
# name but we don't require you use 'profile "default"'
# as a section.
profiles[key] = values
else:
final_config[key] = values
final_config['profiles'] = profiles
return final_config
| 9,580 |
Python
| 34.095238 | 79 | 0.599269 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/config.py
|
# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import copy
from botocore.compat import OrderedDict
from botocore.endpoint import DEFAULT_TIMEOUT, MAX_POOL_CONNECTIONS
from botocore.exceptions import InvalidS3AddressingStyleError
from botocore.exceptions import InvalidRetryConfigurationError
from botocore.exceptions import InvalidMaxRetryAttemptsError
from botocore.exceptions import InvalidRetryModeError
class Config(object):
"""Advanced configuration for Botocore clients.
:type region_name: str
:param region_name: The region to use in instantiating the client
:type signature_version: str
:param signature_version: The signature version when signing requests.
:type user_agent: str
:param user_agent: The value to use in the User-Agent header.
:type user_agent_extra: str
:param user_agent_extra: The value to append to the current User-Agent
header value.
:type connect_timeout: float or int
:param connect_timeout: The time in seconds till a timeout exception is
thrown when attempting to make a connection. The default is 60
seconds.
:type read_timeout: float or int
:param read_timeout: The time in seconds till a timeout exception is
thrown when attempting to read from a connection. The default is
60 seconds.
:type parameter_validation: bool
:param parameter_validation: Whether parameter validation should occur
when serializing requests. The default is True. You can disable
parameter validation for performance reasons. Otherwise, it's
recommended to leave parameter validation enabled.
:type max_pool_connections: int
:param max_pool_connections: The maximum number of connections to
keep in a connection pool. If this value is not set, the default
value of 10 is used.
:type proxies: dict
:param proxies: A dictionary of proxy servers to use by protocol or
endpoint, e.g.:
{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
:type proxies_config: dict
:param proxies_config: A dictionary of additional proxy configurations.
Valid keys are:
* 'proxy_ca_bundle' -- The path to a custom certificate bundle to use
when establishing SSL/TLS connections with proxy.
* 'proxy_client_cert' -- The path to a certificate for proxy
TLS client authentication.
When a str is provided it is treated as a path to a proxy client
certificate. When a two element tuple is provided, it will be
interpreted as the path to the client certificate, and the path
to the certificate key.
* 'proxy_use_forwarding_for_https' -- For HTTPS proxies,
forward your requests to HTTPS destinations with an absolute
URI. We strongly recommend you only use this option with
trusted or corporate proxies. Value must be boolean.
:type s3: dict
:param s3: A dictionary of s3 specific configurations.
Valid keys are:
* 'use_accelerate_endpoint' -- Refers to whether to use the S3
Accelerate endpoint. The value must be a boolean. If True, the
client will use the S3 Accelerate endpoint. If the S3 Accelerate
endpoint is being used then the addressing style will always
be virtual.
* 'payload_signing_enabled' -- Refers to whether or not to SHA256
sign sigv4 payloads. By default, this is disabled for streaming
uploads (UploadPart and PutObject).
* 'addressing_style' -- Refers to the style in which to address
s3 endpoints. Values must be a string that equals:
* auto -- Addressing style is chosen for user. Depending
on the configuration of client, the endpoint may be addressed in
the virtual or the path style. Note that this is the default
behavior if no style is specified.
* virtual -- Addressing style is always virtual. The name of the
bucket must be DNS compatible or an exception will be thrown.
Endpoints will be addressed as such: mybucket.s3.amazonaws.com
* path -- Addressing style is always by path. Endpoints will be
addressed as such: s3.amazonaws.com/mybucket
* 'us_east_1_regional_endpoint' - Refers to what S3 endpoint to use
when the region is configured to be us-east-1. Values must be a
string that equals:
* regional -- Use the us-east-1.amazonaws.com endpoint if the
client is configured to use the us-east-1 region.
* legacy -- Use the s3.amazonaws.com endpoint if the client is
configured to use the us-east-1 region. This is the default if
the configuration option is not specified.
:type retries: dict
:param retries: A dictionary for retry specific configurations.
Valid keys are:
* 'total_max_attempts' -- An integer representing the maximum number of
total attempts that will be made on a single request. This includes
the initial request, so a value of 1 indicates that no requests
will be retried. If ``total_max_attempts`` and ``max_attempts``
are both provided, ``total_max_attempts`` takes precedence.
``total_max_attempts`` is preferred over ``max_attempts`` because
it maps to the ``AWS_MAX_ATTEMPTS`` environment variable and
the ``max_attempts`` config file value.
* 'max_attempts' -- An integer representing the maximum number of
retry attempts that will be made on a single request. For
example, setting this value to 2 will result in the request
being retried at most two times after the initial request. Setting
this value to 0 will result in no retries ever being attempted on
the initial request. If not provided, the number of retries will
default to whatever is modeled, which is typically four retries.
* 'mode' -- A string representing the type of retry mode botocore
should use. Valid values are:
* ``legacy`` - The pre-existing retry behavior.
* ``standard`` - The standardized set of retry rules. This
will also default to 3 max attempts unless overridden.
* ``adaptive`` - Retries with additional client side throttling.
:type client_cert: str, (str, str)
:param client_cert: The path to a certificate for TLS client authentication.
When a str is provided it is treated as a path to a client certificate
to be used when creating a TLS connection.
If a client key is to be provided alongside the client certificate the
client_cert should be set to a tuple of length two where the first
element is the path to the client certificate and the second element is
the path to the certificate key.
:type inject_host_prefix: bool
:param inject_host_prefix: Whether host prefix injection should occur.
Defaults to True.
Setting this to False disables the injection of operation parameters
into the prefix of the hostname. This is useful for clients providing
custom endpoints that should not have their host prefix modified.
"""
OPTION_DEFAULTS = OrderedDict([
('region_name', None),
('signature_version', None),
('user_agent', None),
('user_agent_extra', None),
('connect_timeout', DEFAULT_TIMEOUT),
('read_timeout', DEFAULT_TIMEOUT),
('parameter_validation', True),
('max_pool_connections', MAX_POOL_CONNECTIONS),
('proxies', None),
('proxies_config', None),
('s3', None),
('retries', None),
('client_cert', None),
('inject_host_prefix', True),
('endpoint_discovery_enabled', None),
])
def __init__(self, *args, **kwargs):
self._user_provided_options = self._record_user_provided_options(
args, kwargs)
# Merge the user_provided options onto the default options
config_vars = copy.copy(self.OPTION_DEFAULTS)
config_vars.update(self._user_provided_options)
# Set the attributes based on the config_vars
for key, value in config_vars.items():
setattr(self, key, value)
# Validate the s3 options
self._validate_s3_configuration(self.s3)
self._validate_retry_configuration(self.retries)
def _record_user_provided_options(self, args, kwargs):
option_order = list(self.OPTION_DEFAULTS)
user_provided_options = {}
# Iterate through the kwargs passed through to the constructor and
# map valid keys to the dictionary
for key, value in kwargs.items():
if key in self.OPTION_DEFAULTS:
user_provided_options[key] = value
# The key must exist in the available options
else:
raise TypeError(
'Got unexpected keyword argument \'%s\'' % key)
# The number of args should not be longer than the allowed
# options
if len(args) > len(option_order):
raise TypeError(
'Takes at most %s arguments (%s given)' % (
len(option_order), len(args)))
# Iterate through the args passed through to the constructor and map
# them to appropriate keys.
for i, arg in enumerate(args):
# If it a kwarg was specified for the arg, then error out
if option_order[i] in user_provided_options:
raise TypeError(
'Got multiple values for keyword argument \'%s\'' % (
option_order[i]))
user_provided_options[option_order[i]] = arg
return user_provided_options
def _validate_s3_configuration(self, s3):
if s3 is not None:
addressing_style = s3.get('addressing_style')
if addressing_style not in ['virtual', 'auto', 'path', None]:
raise InvalidS3AddressingStyleError(
s3_addressing_style=addressing_style)
def _validate_retry_configuration(self, retries):
if retries is not None:
for key, value in retries.items():
if key not in ['max_attempts', 'mode', 'total_max_attempts']:
raise InvalidRetryConfigurationError(
retry_config_option=key)
if key == 'max_attempts' and value < 0:
raise InvalidMaxRetryAttemptsError(
provided_max_attempts=value,
min_value=0,
)
if key == 'total_max_attempts' and value < 1:
raise InvalidMaxRetryAttemptsError(
provided_max_attempts=value,
min_value=1,
)
if key == 'mode' and value not in ['legacy', 'standard',
'adaptive']:
raise InvalidRetryModeError(
provided_retry_mode=value
)
def merge(self, other_config):
"""Merges the config object with another config object
This will merge in all non-default values from the provided config
and return a new config object
:type other_config: botocore.config.Config
:param other config: Another config object to merge with. The values
in the provided config object will take precedence in the merging
:returns: A config object built from the merged values of both
config objects.
"""
# Make a copy of the current attributes in the config object.
config_options = copy.copy(self._user_provided_options)
# Merge in the user provided options from the other config
config_options.update(other_config._user_provided_options)
# Return a new config object with the merged properties.
return Config(**config_options)
| 12,721 |
Python
| 42.718213 | 80 | 0.642324 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/response.py
|
# Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import sys
import logging
from botocore import ScalarTypes
from botocore.hooks import first_non_none_response
from botocore.compat import json, set_socket_timeout, XMLParseError
from botocore.exceptions import IncompleteReadError, ReadTimeoutError
from urllib3.exceptions import ReadTimeoutError as URLLib3ReadTimeoutError
from botocore import parsers
logger = logging.getLogger(__name__)
class StreamingBody(object):
"""Wrapper class for an http response body.
This provides a few additional conveniences that do not exist
in the urllib3 model:
* Set the timeout on the socket (i.e read() timeouts)
* Auto validation of content length, if the amount of bytes
we read does not match the content length, an exception
is raised.
"""
_DEFAULT_CHUNK_SIZE = 1024
def __init__(self, raw_stream, content_length):
self._raw_stream = raw_stream
self._content_length = content_length
self._amount_read = 0
def set_socket_timeout(self, timeout):
"""Set the timeout seconds on the socket."""
# The problem we're trying to solve is to prevent .read() calls from
# hanging. This can happen in rare cases. What we'd like to ideally
# do is set a timeout on the .read() call so that callers can retry
# the request.
# Unfortunately, this isn't currently possible in requests.
# See: https://github.com/kennethreitz/requests/issues/1803
# So what we're going to do is reach into the guts of the stream and
# grab the socket object, which we can set the timeout on. We're
# putting in a check here so in case this interface goes away, we'll
# know.
try:
# To further complicate things, the way to grab the
# underlying socket object from an HTTPResponse is different
# in py2 and py3. So this code has been pushed to botocore.compat.
set_socket_timeout(self._raw_stream, timeout)
except AttributeError:
logger.error("Cannot access the socket object of "
"a streaming response. It's possible "
"the interface has changed.", exc_info=True)
raise
def read(self, amt=None):
"""Read at most amt bytes from the stream.
If the amt argument is omitted, read all data.
"""
try:
chunk = self._raw_stream.read(amt)
except URLLib3ReadTimeoutError as e:
# TODO: the url will be None as urllib3 isn't setting it yet
raise ReadTimeoutError(endpoint_url=e.url, error=e)
self._amount_read += len(chunk)
if amt is None or (not chunk and amt > 0):
# If the server sends empty contents or
# we ask to read all of the contents, then we know
# we need to verify the content length.
self._verify_content_length()
return chunk
def __iter__(self):
"""Return an iterator to yield 1k chunks from the raw stream.
"""
return self.iter_chunks(self._DEFAULT_CHUNK_SIZE)
def __next__(self):
"""Return the next 1k chunk from the raw stream.
"""
current_chunk = self.read(self._DEFAULT_CHUNK_SIZE)
if current_chunk:
return current_chunk
raise StopIteration()
next = __next__
def iter_lines(self, chunk_size=1024, keepends=False):
"""Return an iterator to yield lines from the raw stream.
This is achieved by reading chunk of bytes (of size chunk_size) at a
time from the raw stream, and then yielding lines from there.
"""
pending = b''
for chunk in self.iter_chunks(chunk_size):
lines = (pending + chunk).splitlines(True)
for line in lines[:-1]:
yield line.splitlines(keepends)[0]
pending = lines[-1]
if pending:
yield pending.splitlines(keepends)[0]
def iter_chunks(self, chunk_size=_DEFAULT_CHUNK_SIZE):
"""Return an iterator to yield chunks of chunk_size bytes from the raw
stream.
"""
while True:
current_chunk = self.read(chunk_size)
if current_chunk == b"":
break
yield current_chunk
def _verify_content_length(self):
# See: https://github.com/kennethreitz/requests/issues/1855
# Basically, our http library doesn't do this for us, so we have
# to do this ourself.
if self._content_length is not None and \
self._amount_read != int(self._content_length):
raise IncompleteReadError(
actual_bytes=self._amount_read,
expected_bytes=int(self._content_length))
def close(self):
"""Close the underlying http response stream."""
self._raw_stream.close()
def get_response(operation_model, http_response):
protocol = operation_model.metadata['protocol']
response_dict = {
'headers': http_response.headers,
'status_code': http_response.status_code,
}
# TODO: Unfortunately, we have to have error logic here.
# If it looks like an error, in the streaming response case we
# need to actually grab the contents.
if response_dict['status_code'] >= 300:
response_dict['body'] = http_response.content
elif operation_model.has_streaming_output:
response_dict['body'] = StreamingBody(
http_response.raw, response_dict['headers'].get('content-length'))
else:
response_dict['body'] = http_response.content
parser = parsers.create_parser(protocol)
return http_response, parser.parse(response_dict,
operation_model.output_shape)
| 6,434 |
Python
| 38.237805 | 79 | 0.635996 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/handlers.py
|
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Builtin event handlers.
This module contains builtin handlers for events emitted by botocore.
"""
import base64
import logging
import copy
import re
import warnings
import uuid
from botocore.compat import (
unquote, json, six, unquote_str, ensure_bytes, get_md5,
MD5_AVAILABLE, OrderedDict, urlsplit, urlunsplit, XMLParseError,
ETree,
)
from botocore.docs.utils import AutoPopulatedParam
from botocore.docs.utils import HideParamFromOperations
from botocore.docs.utils import AppendParamDocumentation
from botocore.signers import add_generate_presigned_url
from botocore.signers import add_generate_presigned_post
from botocore.signers import add_generate_db_auth_token
from botocore.exceptions import ParamValidationError
from botocore.exceptions import AliasConflictParameterError
from botocore.exceptions import UnsupportedTLSVersionWarning
from botocore.exceptions import MissingServiceIdError
from botocore.utils import percent_encode, SAFE_CHARS
from botocore.utils import switch_host_with_param
from botocore.utils import hyphenize_service_id
from botocore.utils import conditionally_calculate_md5
from botocore import retryhandler
from botocore import utils
from botocore import translate
import botocore
import botocore.auth
logger = logging.getLogger(__name__)
REGISTER_FIRST = object()
REGISTER_LAST = object()
# From the S3 docs:
# The rules for bucket names in the US Standard region allow bucket names
# to be as long as 255 characters, and bucket names can contain any
# combination of uppercase letters, lowercase letters, numbers, periods
# (.), hyphens (-), and underscores (_).
VALID_BUCKET = re.compile(r'^[a-zA-Z0-9.\-_]{1,255}$')
_ACCESSPOINT_ARN = (
r'^arn:(aws).*:(s3|s3-object-lambda):[a-z\-0-9]+:[0-9]{12}:accesspoint[/:]'
r'[a-zA-Z0-9\-]{1,63}$'
)
_OUTPOST_ARN = (
r'^arn:(aws).*:s3-outposts:[a-z\-0-9]+:[0-9]{12}:outpost[/:]'
r'[a-zA-Z0-9\-]{1,63}[/:]accesspoint[/:][a-zA-Z0-9\-]{1,63}$'
)
VALID_S3_ARN = re.compile('|'.join([_ACCESSPOINT_ARN, _OUTPOST_ARN]))
VERSION_ID_SUFFIX = re.compile(r'\?versionId=[^\s]+$')
SERVICE_NAME_ALIASES = {
'runtime.sagemaker': 'sagemaker-runtime'
}
def handle_service_name_alias(service_name, **kwargs):
return SERVICE_NAME_ALIASES.get(service_name, service_name)
def escape_xml_payload(params, **kwargs):
# Replace \r and \n with the escaped sequence over the whole XML document
# to avoid linebreak normalization modifying customer input when the
# document is parsed. Ideally, we would do this in ElementTree.tostring,
# but it doesn't allow us to override entity escaping for text fields. For
# this operation \r and \n can only appear in the XML document if they were
# passed as part of the customer input.
body = params['body']
replaced = False
if b'\r' in body:
replaced = True
body = body.replace(b'\r', b'
')
if b'\n' in body:
replaced = True
body = body.replace(b'\n', b'
')
if not replaced:
return
params['body'] = body
if 'Content-MD5' in params['headers']:
# The Content-MD5 is now wrong, so we'll need to recalculate it
del params['headers']['Content-MD5']
conditionally_calculate_md5(params, **kwargs)
def check_for_200_error(response, **kwargs):
# From: http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectCOPY.html
# There are two opportunities for a copy request to return an error. One
# can occur when Amazon S3 receives the copy request and the other can
# occur while Amazon S3 is copying the files. If the error occurs before
# the copy operation starts, you receive a standard Amazon S3 error. If the
# error occurs during the copy operation, the error response is embedded in
# the 200 OK response. This means that a 200 OK response can contain either
# a success or an error. Make sure to design your application to parse the
# contents of the response and handle it appropriately.
#
# So this handler checks for this case. Even though the server sends a
# 200 response, conceptually this should be handled exactly like a
# 500 response (with respect to raising exceptions, retries, etc.)
# We're connected *before* all the other retry logic handlers, so as long
# as we switch the error code to 500, we'll retry the error as expected.
if response is None:
# A None response can happen if an exception is raised while
# trying to retrieve the response. See Endpoint._get_response().
return
http_response, parsed = response
if _looks_like_special_case_error(http_response):
logger.debug("Error found for response with 200 status code, "
"errors: %s, changing status code to "
"500.", parsed)
http_response.status_code = 500
def _looks_like_special_case_error(http_response):
if http_response.status_code == 200:
try:
parser = ETree.XMLParser(
target=ETree.TreeBuilder(),
encoding='utf-8')
parser.feed(http_response.content)
root = parser.close()
except XMLParseError:
# In cases of network disruptions, we may end up with a partial
# streamed response from S3. We need to treat these cases as
# 500 Service Errors and try again.
return True
if root.tag == 'Error':
return True
return False
def set_operation_specific_signer(context, signing_name, **kwargs):
""" Choose the operation-specific signer.
Individual operations may have a different auth type than the service as a
whole. This will most often manifest as operations that should not be
authenticated at all, but can include other auth modes such as sigv4
without body signing.
"""
auth_type = context.get('auth_type')
# Auth type will be None if the operation doesn't have a configured auth
# type.
if not auth_type:
return
# Auth type will be the string value 'none' if the operation should not
# be signed at all.
if auth_type == 'none':
return botocore.UNSIGNED
if auth_type.startswith('v4'):
signature_version = 'v4'
if signing_name == 's3':
signature_version = 's3v4'
# If the operation needs an unsigned body, we set additional context
# allowing the signer to be aware of this.
if auth_type == 'v4-unsigned-body':
context['payload_signing_enabled'] = False
return signature_version
def decode_console_output(parsed, **kwargs):
if 'Output' in parsed:
try:
# We're using 'replace' for errors because it is
# possible that console output contains non string
# chars we can't utf-8 decode.
value = base64.b64decode(six.b(parsed['Output'])).decode(
'utf-8', 'replace')
parsed['Output'] = value
except (ValueError, TypeError, AttributeError):
logger.debug('Error decoding base64', exc_info=True)
def generate_idempotent_uuid(params, model, **kwargs):
for name in model.idempotent_members:
if name not in params:
params[name] = str(uuid.uuid4())
logger.debug("injecting idempotency token (%s) into param '%s'." %
(params[name], name))
def decode_quoted_jsondoc(value):
try:
value = json.loads(unquote(value))
except (ValueError, TypeError):
logger.debug('Error loading quoted JSON', exc_info=True)
return value
def json_decode_template_body(parsed, **kwargs):
if 'TemplateBody' in parsed:
try:
value = json.loads(
parsed['TemplateBody'], object_pairs_hook=OrderedDict)
parsed['TemplateBody'] = value
except (ValueError, TypeError):
logger.debug('error loading JSON', exc_info=True)
def validate_bucket_name(params, **kwargs):
if 'Bucket' not in params:
return
bucket = params['Bucket']
if not VALID_BUCKET.search(bucket) and not VALID_S3_ARN.search(bucket):
error_msg = (
'Invalid bucket name "%s": Bucket name must match '
'the regex "%s" or be an ARN matching the regex "%s"' % (
bucket, VALID_BUCKET.pattern, VALID_S3_ARN.pattern))
raise ParamValidationError(report=error_msg)
def sse_md5(params, **kwargs):
"""
S3 server-side encryption requires the encryption key to be sent to the
server base64 encoded, as well as a base64-encoded MD5 hash of the
encryption key. This handler does both if the MD5 has not been set by
the caller.
"""
_sse_md5(params, 'SSECustomer')
def copy_source_sse_md5(params, **kwargs):
"""
S3 server-side encryption requires the encryption key to be sent to the
server base64 encoded, as well as a base64-encoded MD5 hash of the
encryption key. This handler does both if the MD5 has not been set by
the caller specifically if the parameter is for the copy-source sse-c key.
"""
_sse_md5(params, 'CopySourceSSECustomer')
def _sse_md5(params, sse_member_prefix='SSECustomer'):
if not _needs_s3_sse_customization(params, sse_member_prefix):
return
sse_key_member = sse_member_prefix + 'Key'
sse_md5_member = sse_member_prefix + 'KeyMD5'
key_as_bytes = params[sse_key_member]
if isinstance(key_as_bytes, six.text_type):
key_as_bytes = key_as_bytes.encode('utf-8')
key_md5_str = base64.b64encode(
get_md5(key_as_bytes).digest()).decode('utf-8')
key_b64_encoded = base64.b64encode(key_as_bytes).decode('utf-8')
params[sse_key_member] = key_b64_encoded
params[sse_md5_member] = key_md5_str
def _needs_s3_sse_customization(params, sse_member_prefix):
return (params.get(sse_member_prefix + 'Key') is not None and
sse_member_prefix + 'KeyMD5' not in params)
def disable_signing(**kwargs):
"""
This handler disables request signing by setting the signer
name to a special sentinel value.
"""
return botocore.UNSIGNED
def add_expect_header(model, params, **kwargs):
if model.http.get('method', '') not in ['PUT', 'POST']:
return
if 'body' in params:
body = params['body']
if hasattr(body, 'read'):
# Any file like object will use an expect 100-continue
# header regardless of size.
logger.debug("Adding expect 100 continue header to request.")
params['headers']['Expect'] = '100-continue'
class DeprecatedServiceDocumenter(object):
def __init__(self, replacement_service_name):
self._replacement_service_name = replacement_service_name
def inject_deprecation_notice(self, section, event_name, **kwargs):
section.style.start_important()
section.write('This service client is deprecated. Please use ')
section.style.ref(
self._replacement_service_name,
self._replacement_service_name,
)
section.write(' instead.')
section.style.end_important()
def document_copy_source_form(section, event_name, **kwargs):
if 'request-example' in event_name:
parent = section.get_section('structure-value')
param_line = parent.get_section('CopySource')
value_portion = param_line.get_section('member-value')
value_portion.clear_text()
value_portion.write("'string' or {'Bucket': 'string', "
"'Key': 'string', 'VersionId': 'string'}")
elif 'request-params' in event_name:
param_section = section.get_section('CopySource')
type_section = param_section.get_section('param-type')
type_section.clear_text()
type_section.write(':type CopySource: str or dict')
doc_section = param_section.get_section('param-documentation')
doc_section.clear_text()
doc_section.write(
"The name of the source bucket, key name of the source object, "
"and optional version ID of the source object. You can either "
"provide this value as a string or a dictionary. The "
"string form is {bucket}/{key} or "
"{bucket}/{key}?versionId={versionId} if you want to copy a "
"specific version. You can also provide this value as a "
"dictionary. The dictionary format is recommended over "
"the string format because it is more explicit. The dictionary "
"format is: {'Bucket': 'bucket', 'Key': 'key', 'VersionId': 'id'}."
" Note that the VersionId key is optional and may be omitted."
" To specify an S3 access point, provide the access point"
" ARN for the ``Bucket`` key in the copy source dictionary. If you"
" want to provide the copy source for an S3 access point as a"
" string instead of a dictionary, the ARN provided must be the"
" full S3 access point object ARN"
" (i.e. {accesspoint_arn}/object/{key})"
)
def handle_copy_source_param(params, **kwargs):
"""Convert CopySource param for CopyObject/UploadPartCopy.
This handler will deal with two cases:
* CopySource provided as a string. We'll make a best effort
to URL encode the key name as required. This will require
parsing the bucket and version id from the CopySource value
and only encoding the key.
* CopySource provided as a dict. In this case we're
explicitly given the Bucket, Key, and VersionId so we're
able to encode the key and ensure this value is serialized
and correctly sent to S3.
"""
source = params.get('CopySource')
if source is None:
# The call will eventually fail but we'll let the
# param validator take care of this. It will
# give a better error message.
return
if isinstance(source, six.string_types):
params['CopySource'] = _quote_source_header(source)
elif isinstance(source, dict):
params['CopySource'] = _quote_source_header_from_dict(source)
def _quote_source_header_from_dict(source_dict):
try:
bucket = source_dict['Bucket']
key = source_dict['Key']
version_id = source_dict.get('VersionId')
if VALID_S3_ARN.search(bucket):
final = '%s/object/%s' % (bucket, key)
else:
final = '%s/%s' % (bucket, key)
except KeyError as e:
raise ParamValidationError(
report='Missing required parameter: %s' % str(e))
final = percent_encode(final, safe=SAFE_CHARS + '/')
if version_id is not None:
final += '?versionId=%s' % version_id
return final
def _quote_source_header(value):
result = VERSION_ID_SUFFIX.search(value)
if result is None:
return percent_encode(value, safe=SAFE_CHARS + '/')
else:
first, version_id = value[:result.start()], value[result.start():]
return percent_encode(first, safe=SAFE_CHARS + '/') + version_id
def _get_cross_region_presigned_url(request_signer, request_dict, model,
source_region, destination_region):
# The better way to do this is to actually get the
# endpoint_resolver and get the endpoint_url given the
# source region. In this specific case, we know that
# we can safely replace the dest region with the source
# region because of the supported EC2 regions, but in
# general this is not a safe assumption to make.
# I think eventually we should try to plumb through something
# that allows us to resolve endpoints from regions.
request_dict_copy = copy.deepcopy(request_dict)
request_dict_copy['body']['DestinationRegion'] = destination_region
request_dict_copy['url'] = request_dict['url'].replace(
destination_region, source_region)
request_dict_copy['method'] = 'GET'
request_dict_copy['headers'] = {}
return request_signer.generate_presigned_url(
request_dict_copy, region_name=source_region,
operation_name=model.name)
def _get_presigned_url_source_and_destination_regions(request_signer, params):
# Gets the source and destination regions to be used
destination_region = request_signer._region_name
source_region = params.get('SourceRegion')
return source_region, destination_region
def inject_presigned_url_ec2(params, request_signer, model, **kwargs):
# The customer can still provide this, so we should pass if they do.
if 'PresignedUrl' in params['body']:
return
src, dest = _get_presigned_url_source_and_destination_regions(
request_signer, params['body'])
url = _get_cross_region_presigned_url(
request_signer, params, model, src, dest)
params['body']['PresignedUrl'] = url
# EC2 Requires that the destination region be sent over the wire in
# addition to the source region.
params['body']['DestinationRegion'] = dest
def inject_presigned_url_rds(params, request_signer, model, **kwargs):
# SourceRegion is not required for RDS operations, so it's possible that
# it isn't set. In that case it's probably a local copy so we don't need
# to do anything else.
if 'SourceRegion' not in params['body']:
return
src, dest = _get_presigned_url_source_and_destination_regions(
request_signer, params['body'])
# Since SourceRegion isn't actually modeled for RDS, it needs to be
# removed from the request params before we send the actual request.
del params['body']['SourceRegion']
if 'PreSignedUrl' in params['body']:
return
url = _get_cross_region_presigned_url(
request_signer, params, model, src, dest)
params['body']['PreSignedUrl'] = url
def json_decode_policies(parsed, model, **kwargs):
# Any time an IAM operation returns a policy document
# it is a string that is json that has been urlencoded,
# i.e urlencode(json.dumps(policy_document)).
# To give users something more useful, we will urldecode
# this value and json.loads() the result so that they have
# the policy document as a dictionary.
output_shape = model.output_shape
if output_shape is not None:
_decode_policy_types(parsed, model.output_shape)
def _decode_policy_types(parsed, shape):
# IAM consistently uses the policyDocumentType shape to indicate
# strings that have policy documents.
shape_name = 'policyDocumentType'
if shape.type_name == 'structure':
for member_name, member_shape in shape.members.items():
if member_shape.type_name == 'string' and \
member_shape.name == shape_name and \
member_name in parsed:
parsed[member_name] = decode_quoted_jsondoc(
parsed[member_name])
elif member_name in parsed:
_decode_policy_types(parsed[member_name], member_shape)
if shape.type_name == 'list':
shape_member = shape.member
for item in parsed:
_decode_policy_types(item, shape_member)
def parse_get_bucket_location(parsed, http_response, **kwargs):
# s3.GetBucketLocation cannot be modeled properly. To
# account for this we just manually parse the XML document.
# The "parsed" passed in only has the ResponseMetadata
# filled out. This handler will fill in the LocationConstraint
# value.
if http_response.raw is None:
return
response_body = http_response.content
parser = ETree.XMLParser(
target=ETree.TreeBuilder(),
encoding='utf-8')
parser.feed(response_body)
root = parser.close()
region = root.text
parsed['LocationConstraint'] = region
def base64_encode_user_data(params, **kwargs):
if 'UserData' in params:
if isinstance(params['UserData'], six.text_type):
# Encode it to bytes if it is text.
params['UserData'] = params['UserData'].encode('utf-8')
params['UserData'] = base64.b64encode(
params['UserData']).decode('utf-8')
def document_base64_encoding(param):
description = ('**This value will be base64 encoded automatically. Do '
'not base64 encode this value prior to performing the '
'operation.**')
append = AppendParamDocumentation(param, description)
return append.append_documentation
def validate_ascii_metadata(params, **kwargs):
"""Verify S3 Metadata only contains ascii characters.
From: http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
"Amazon S3 stores user-defined metadata in lowercase. Each name, value pair
must conform to US-ASCII when using REST and UTF-8 when using SOAP or
browser-based uploads via POST."
"""
metadata = params.get('Metadata')
if not metadata or not isinstance(metadata, dict):
# We have to at least type check the metadata as a dict type
# because this handler is called before param validation.
# We'll go ahead and return because the param validator will
# give a descriptive error message for us.
# We might need a post-param validation event.
return
for key, value in metadata.items():
try:
key.encode('ascii')
value.encode('ascii')
except UnicodeEncodeError as e:
error_msg = (
'Non ascii characters found in S3 metadata '
'for key "%s", value: "%s". \nS3 metadata can only '
'contain ASCII characters. ' % (key, value)
)
raise ParamValidationError(
report=error_msg)
def fix_route53_ids(params, model, **kwargs):
"""
Check for and split apart Route53 resource IDs, setting
only the last piece. This allows the output of one operation
(e.g. ``'foo/1234'``) to be used as input in another
operation (e.g. it expects just ``'1234'``).
"""
input_shape = model.input_shape
if not input_shape or not hasattr(input_shape, 'members'):
return
members = [name for (name, shape) in input_shape.members.items()
if shape.name in ['ResourceId', 'DelegationSetId']]
for name in members:
if name in params:
orig_value = params[name]
params[name] = orig_value.split('/')[-1]
logger.debug('%s %s -> %s', name, orig_value, params[name])
def inject_account_id(params, **kwargs):
if params.get('accountId') is None:
# Glacier requires accountId, but allows you
# to specify '-' for the current owners account.
# We add this default value if the user does not
# provide the accountId as a convenience.
params['accountId'] = '-'
def add_glacier_version(model, params, **kwargs):
request_dict = params
request_dict['headers']['x-amz-glacier-version'] = model.metadata[
'apiVersion']
def add_accept_header(model, params, **kwargs):
if params['headers'].get('Accept', None) is None:
request_dict = params
request_dict['headers']['Accept'] = 'application/json'
def add_glacier_checksums(params, **kwargs):
"""Add glacier checksums to the http request.
This will add two headers to the http request:
* x-amz-content-sha256
* x-amz-sha256-tree-hash
These values will only be added if they are not present
in the HTTP request.
"""
request_dict = params
headers = request_dict['headers']
body = request_dict['body']
if isinstance(body, six.binary_type):
# If the user provided a bytes type instead of a file
# like object, we're temporarily create a BytesIO object
# so we can use the util functions to calculate the
# checksums which assume file like objects. Note that
# we're not actually changing the body in the request_dict.
body = six.BytesIO(body)
starting_position = body.tell()
if 'x-amz-content-sha256' not in headers:
headers['x-amz-content-sha256'] = utils.calculate_sha256(
body, as_hex=True)
body.seek(starting_position)
if 'x-amz-sha256-tree-hash' not in headers:
headers['x-amz-sha256-tree-hash'] = utils.calculate_tree_hash(body)
body.seek(starting_position)
def document_glacier_tree_hash_checksum():
doc = '''
This is a required field.
Ideally you will want to compute this value with checksums from
previous uploaded parts, using the algorithm described in
`Glacier documentation <http://docs.aws.amazon.com/amazonglacier/latest/dev/checksum-calculations.html>`_.
But if you prefer, you can also use botocore.utils.calculate_tree_hash()
to compute it from raw file by::
checksum = calculate_tree_hash(open('your_file.txt', 'rb'))
'''
return AppendParamDocumentation('checksum', doc).append_documentation
def document_cloudformation_get_template_return_type(section, event_name, **kwargs):
if 'response-params' in event_name:
template_body_section = section.get_section('TemplateBody')
type_section = template_body_section.get_section('param-type')
type_section.clear_text()
type_section.write('(*dict*) --')
elif 'response-example' in event_name:
parent = section.get_section('structure-value')
param_line = parent.get_section('TemplateBody')
value_portion = param_line.get_section('member-value')
value_portion.clear_text()
value_portion.write('{}')
def switch_host_machinelearning(request, **kwargs):
switch_host_with_param(request, 'PredictEndpoint')
def check_openssl_supports_tls_version_1_2(**kwargs):
import ssl
try:
openssl_version_tuple = ssl.OPENSSL_VERSION_INFO
if openssl_version_tuple < (1, 0, 1):
warnings.warn(
'Currently installed openssl version: %s does not '
'support TLS 1.2, which is required for use of iot-data. '
'Please use python installed with openssl version 1.0.1 or '
'higher.' % (ssl.OPENSSL_VERSION),
UnsupportedTLSVersionWarning
)
# We cannot check the openssl version on python2.6, so we should just
# pass on this conveniency check.
except AttributeError:
pass
def change_get_to_post(request, **kwargs):
# This is useful when we need to change a potentially large GET request
# into a POST with x-www-form-urlencoded encoding.
if request.method == 'GET' and '?' in request.url:
request.headers['Content-Type'] = 'application/x-www-form-urlencoded'
request.method = 'POST'
request.url, request.data = request.url.split('?', 1)
def set_list_objects_encoding_type_url(params, context, **kwargs):
if 'EncodingType' not in params:
# We set this context so that we know it wasn't the customer that
# requested the encoding.
context['encoding_type_auto_set'] = True
params['EncodingType'] = 'url'
def decode_list_object(parsed, context, **kwargs):
# This is needed because we are passing url as the encoding type. Since the
# paginator is based on the key, we need to handle it before it can be
# round tripped.
#
# From the documentation: If you specify encoding-type request parameter,
# Amazon S3 includes this element in the response, and returns encoded key
# name values in the following response elements:
# Delimiter, Marker, Prefix, NextMarker, Key.
_decode_list_object(
top_level_keys=['Delimiter', 'Marker', 'NextMarker'],
nested_keys=[('Contents', 'Key'), ('CommonPrefixes', 'Prefix')],
parsed=parsed,
context=context
)
def decode_list_object_v2(parsed, context, **kwargs):
# From the documentation: If you specify encoding-type request parameter,
# Amazon S3 includes this element in the response, and returns encoded key
# name values in the following response elements:
# Delimiter, Prefix, ContinuationToken, Key, and StartAfter.
_decode_list_object(
top_level_keys=['Delimiter', 'Prefix', 'StartAfter'],
nested_keys=[('Contents', 'Key'), ('CommonPrefixes', 'Prefix')],
parsed=parsed,
context=context
)
def decode_list_object_versions(parsed, context, **kwargs):
# From the documentation: If you specify encoding-type request parameter,
# Amazon S3 includes this element in the response, and returns encoded key
# name values in the following response elements:
# KeyMarker, NextKeyMarker, Prefix, Key, and Delimiter.
_decode_list_object(
top_level_keys=[
'KeyMarker',
'NextKeyMarker',
'Prefix',
'Delimiter',
],
nested_keys=[
('Versions', 'Key'),
('DeleteMarkers', 'Key'),
('CommonPrefixes', 'Prefix'),
],
parsed=parsed,
context=context
)
def _decode_list_object(top_level_keys, nested_keys, parsed, context):
if parsed.get('EncodingType') == 'url' and \
context.get('encoding_type_auto_set'):
# URL decode top-level keys in the response if present.
for key in top_level_keys:
if key in parsed:
parsed[key] = unquote_str(parsed[key])
# URL decode nested keys from the response if present.
for (top_key, child_key) in nested_keys:
if top_key in parsed:
for member in parsed[top_key]:
member[child_key] = unquote_str(member[child_key])
def convert_body_to_file_like_object(params, **kwargs):
if 'Body' in params:
if isinstance(params['Body'], six.string_types):
params['Body'] = six.BytesIO(ensure_bytes(params['Body']))
elif isinstance(params['Body'], six.binary_type):
params['Body'] = six.BytesIO(params['Body'])
def _add_parameter_aliases(handler_list):
# Mapping of original parameter to parameter alias.
# The key is <service>.<operation>.parameter
# The first part of the key is used for event registration.
# The last part is the original parameter name and the value is the
# alias to expose in documentation.
aliases = {
'ec2.*.Filter': 'Filters',
'logs.CreateExportTask.from': 'fromTime',
'cloudsearchdomain.Search.return': 'returnFields'
}
for original, new_name in aliases.items():
event_portion, original_name = original.rsplit('.', 1)
parameter_alias = ParameterAlias(original_name, new_name)
# Add the handlers to the list of handlers.
# One handler is to handle when users provide the alias.
# The other handler is to update the documentation to show only
# the alias.
parameter_build_event_handler_tuple = (
'before-parameter-build.' + event_portion,
parameter_alias.alias_parameter_in_call,
REGISTER_FIRST
)
docs_event_handler_tuple = (
'docs.*.' + event_portion + '.complete-section',
parameter_alias.alias_parameter_in_documentation)
handler_list.append(parameter_build_event_handler_tuple)
handler_list.append(docs_event_handler_tuple)
class ParameterAlias(object):
def __init__(self, original_name, alias_name):
self._original_name = original_name
self._alias_name = alias_name
def alias_parameter_in_call(self, params, model, **kwargs):
if model.input_shape:
# Only consider accepting the alias if it is modeled in the
# input shape.
if self._original_name in model.input_shape.members:
if self._alias_name in params:
if self._original_name in params:
raise AliasConflictParameterError(
original=self._original_name,
alias=self._alias_name,
operation=model.name
)
# Remove the alias parameter value and use the old name
# instead.
params[self._original_name] = params.pop(self._alias_name)
def alias_parameter_in_documentation(self, event_name, section, **kwargs):
if event_name.startswith('docs.request-params'):
if self._original_name not in section.available_sections:
return
# Replace the name for parameter type
param_section = section.get_section(self._original_name)
param_type_section = param_section.get_section('param-type')
self._replace_content(param_type_section)
# Replace the name for the parameter description
param_name_section = param_section.get_section('param-name')
self._replace_content(param_name_section)
elif event_name.startswith('docs.request-example'):
section = section.get_section('structure-value')
if self._original_name not in section.available_sections:
return
# Replace the name for the example
param_section = section.get_section(self._original_name)
self._replace_content(param_section)
def _replace_content(self, section):
content = section.getvalue().decode('utf-8')
updated_content = content.replace(
self._original_name, self._alias_name)
section.clear_text()
section.write(updated_content)
class ClientMethodAlias(object):
def __init__(self, actual_name):
""" Aliases a non-extant method to an existing method.
:param actual_name: The name of the method that actually exists on
the client.
"""
self._actual = actual_name
def __call__(self, client, **kwargs):
return getattr(client, self._actual)
# TODO: Remove this class as it is no longer used
class HeaderToHostHoister(object):
"""Takes a header and moves it to the front of the hoststring.
"""
_VALID_HOSTNAME = re.compile(r'(?!-)[a-z\d-]{1,63}(?<!-)$', re.IGNORECASE)
def __init__(self, header_name):
self._header_name = header_name
def hoist(self, params, **kwargs):
"""Hoist a header to the hostname.
Hoist a header to the beginning of the hostname with a suffix "." after
it. The original header should be removed from the header map. This
method is intended to be used as a target for the before-call event.
"""
if self._header_name not in params['headers']:
return
header_value = params['headers'][self._header_name]
self._ensure_header_is_valid_host(header_value)
original_url = params['url']
new_url = self._prepend_to_host(original_url, header_value)
params['url'] = new_url
def _ensure_header_is_valid_host(self, header):
match = self._VALID_HOSTNAME.match(header)
if not match:
raise ParamValidationError(report=(
'Hostnames must contain only - and alphanumeric characters, '
'and between 1 and 63 characters long.'
))
def _prepend_to_host(self, url, prefix):
url_components = urlsplit(url)
parts = url_components.netloc.split('.')
parts = [prefix] + parts
new_netloc = '.'.join(parts)
new_components = (
url_components.scheme,
new_netloc,
url_components.path,
url_components.query,
''
)
new_url = urlunsplit(new_components)
return new_url
def inject_api_version_header_if_needed(model, params, **kwargs):
if not model.is_endpoint_discovery_operation:
return
params['headers']['x-amz-api-version'] = model.service_model.api_version
def remove_lex_v2_start_conversation(class_attributes, **kwargs):
"""Operation requires h2 which is currently unsupported in Python"""
if 'start_conversation' in class_attributes:
del class_attributes['start_conversation']
# This is a list of (event_name, handler).
# When a Session is created, everything in this list will be
# automatically registered with that Session.
BUILTIN_HANDLERS = [
('choose-service-name', handle_service_name_alias),
('getattr.mturk.list_hi_ts_for_qualification_type',
ClientMethodAlias('list_hits_for_qualification_type')),
('before-parameter-build.s3.UploadPart',
convert_body_to_file_like_object, REGISTER_LAST),
('before-parameter-build.s3.PutObject',
convert_body_to_file_like_object, REGISTER_LAST),
('creating-client-class', add_generate_presigned_url),
('creating-client-class.s3', add_generate_presigned_post),
('creating-client-class.iot-data', check_openssl_supports_tls_version_1_2),
('creating-client-class.lex-runtime-v2', remove_lex_v2_start_conversation),
('after-call.iam', json_decode_policies),
('after-call.ec2.GetConsoleOutput', decode_console_output),
('after-call.cloudformation.GetTemplate', json_decode_template_body),
('after-call.s3.GetBucketLocation', parse_get_bucket_location),
('before-parameter-build', generate_idempotent_uuid),
('before-parameter-build.s3', validate_bucket_name),
('before-parameter-build.s3.ListObjects',
set_list_objects_encoding_type_url),
('before-parameter-build.s3.ListObjectsV2',
set_list_objects_encoding_type_url),
('before-parameter-build.s3.ListObjectVersions',
set_list_objects_encoding_type_url),
('before-parameter-build.s3.CopyObject',
handle_copy_source_param),
('before-parameter-build.s3.UploadPartCopy',
handle_copy_source_param),
('before-parameter-build.s3.CopyObject', validate_ascii_metadata),
('before-parameter-build.s3.PutObject', validate_ascii_metadata),
('before-parameter-build.s3.CreateMultipartUpload',
validate_ascii_metadata),
('docs.*.s3.CopyObject.complete-section', document_copy_source_form),
('docs.*.s3.UploadPartCopy.complete-section', document_copy_source_form),
('before-call.s3', add_expect_header),
('before-call.glacier', add_glacier_version),
('before-call.apigateway', add_accept_header),
('before-call.s3.PutObject', conditionally_calculate_md5),
('before-call.s3.UploadPart', conditionally_calculate_md5),
('before-call.s3.DeleteObjects', escape_xml_payload),
('before-call.s3.PutBucketLifecycleConfiguration', escape_xml_payload),
('before-call.glacier.UploadArchive', add_glacier_checksums),
('before-call.glacier.UploadMultipartPart', add_glacier_checksums),
('before-call.ec2.CopySnapshot', inject_presigned_url_ec2),
('request-created.machinelearning.Predict', switch_host_machinelearning),
('needs-retry.s3.UploadPartCopy', check_for_200_error, REGISTER_FIRST),
('needs-retry.s3.CopyObject', check_for_200_error, REGISTER_FIRST),
('needs-retry.s3.CompleteMultipartUpload', check_for_200_error,
REGISTER_FIRST),
('choose-signer.cognito-identity.GetId', disable_signing),
('choose-signer.cognito-identity.GetOpenIdToken', disable_signing),
('choose-signer.cognito-identity.UnlinkIdentity', disable_signing),
('choose-signer.cognito-identity.GetCredentialsForIdentity',
disable_signing),
('choose-signer.sts.AssumeRoleWithSAML', disable_signing),
('choose-signer.sts.AssumeRoleWithWebIdentity', disable_signing),
('choose-signer', set_operation_specific_signer),
('before-parameter-build.s3.HeadObject', sse_md5),
('before-parameter-build.s3.GetObject', sse_md5),
('before-parameter-build.s3.PutObject', sse_md5),
('before-parameter-build.s3.CopyObject', sse_md5),
('before-parameter-build.s3.CopyObject', copy_source_sse_md5),
('before-parameter-build.s3.CreateMultipartUpload', sse_md5),
('before-parameter-build.s3.UploadPart', sse_md5),
('before-parameter-build.s3.UploadPartCopy', sse_md5),
('before-parameter-build.s3.UploadPartCopy', copy_source_sse_md5),
('before-parameter-build.ec2.RunInstances', base64_encode_user_data),
('before-parameter-build.autoscaling.CreateLaunchConfiguration',
base64_encode_user_data),
('before-parameter-build.route53', fix_route53_ids),
('before-parameter-build.glacier', inject_account_id),
('after-call.s3.ListObjects', decode_list_object),
('after-call.s3.ListObjectsV2', decode_list_object_v2),
('after-call.s3.ListObjectVersions', decode_list_object_versions),
# Cloudsearchdomain search operation will be sent by HTTP POST
('request-created.cloudsearchdomain.Search',
change_get_to_post),
# Glacier documentation customizations
('docs.*.glacier.*.complete-section',
AutoPopulatedParam('accountId', 'Note: this parameter is set to "-" by'
'default if no value is not specified.')
.document_auto_populated_param),
('docs.*.glacier.UploadArchive.complete-section',
AutoPopulatedParam('checksum').document_auto_populated_param),
('docs.*.glacier.UploadMultipartPart.complete-section',
AutoPopulatedParam('checksum').document_auto_populated_param),
('docs.request-params.glacier.CompleteMultipartUpload.complete-section',
document_glacier_tree_hash_checksum()),
# Cloudformation documentation customizations
('docs.*.cloudformation.GetTemplate.complete-section',
document_cloudformation_get_template_return_type),
# UserData base64 encoding documentation customizations
('docs.*.ec2.RunInstances.complete-section',
document_base64_encoding('UserData')),
('docs.*.autoscaling.CreateLaunchConfiguration.complete-section',
document_base64_encoding('UserData')),
# EC2 CopySnapshot documentation customizations
('docs.*.ec2.CopySnapshot.complete-section',
AutoPopulatedParam('PresignedUrl').document_auto_populated_param),
('docs.*.ec2.CopySnapshot.complete-section',
AutoPopulatedParam('DestinationRegion').document_auto_populated_param),
# S3 SSE documentation modifications
('docs.*.s3.*.complete-section',
AutoPopulatedParam('SSECustomerKeyMD5').document_auto_populated_param),
# S3 SSE Copy Source documentation modifications
('docs.*.s3.*.complete-section',
AutoPopulatedParam(
'CopySourceSSECustomerKeyMD5').document_auto_populated_param),
# Add base64 information to Lambda
('docs.*.lambda.UpdateFunctionCode.complete-section',
document_base64_encoding('ZipFile')),
# The following S3 operations cannot actually accept a ContentMD5
('docs.*.s3.*.complete-section',
HideParamFromOperations(
's3', 'ContentMD5',
['DeleteObjects', 'PutBucketAcl', 'PutBucketCors',
'PutBucketLifecycle', 'PutBucketLogging', 'PutBucketNotification',
'PutBucketPolicy', 'PutBucketReplication', 'PutBucketRequestPayment',
'PutBucketTagging', 'PutBucketVersioning', 'PutBucketWebsite',
'PutObjectAcl']).hide_param),
#############
# RDS
#############
('creating-client-class.rds', add_generate_db_auth_token),
('before-call.rds.CopyDBClusterSnapshot',
inject_presigned_url_rds),
('before-call.rds.CreateDBCluster',
inject_presigned_url_rds),
('before-call.rds.CopyDBSnapshot',
inject_presigned_url_rds),
('before-call.rds.CreateDBInstanceReadReplica',
inject_presigned_url_rds),
('before-call.rds.StartDBInstanceAutomatedBackupsReplication',
inject_presigned_url_rds),
# RDS PresignedUrl documentation customizations
('docs.*.rds.CopyDBClusterSnapshot.complete-section',
AutoPopulatedParam('PreSignedUrl').document_auto_populated_param),
('docs.*.rds.CreateDBCluster.complete-section',
AutoPopulatedParam('PreSignedUrl').document_auto_populated_param),
('docs.*.rds.CopyDBSnapshot.complete-section',
AutoPopulatedParam('PreSignedUrl').document_auto_populated_param),
('docs.*.rds.CreateDBInstanceReadReplica.complete-section',
AutoPopulatedParam('PreSignedUrl').document_auto_populated_param),
('docs.*.rds.StartDBInstanceAutomatedBackupsReplication.complete-section',
AutoPopulatedParam('PreSignedUrl').document_auto_populated_param),
#############
# Neptune
#############
('before-call.neptune.CopyDBClusterSnapshot',
inject_presigned_url_rds),
('before-call.neptune.CreateDBCluster',
inject_presigned_url_rds),
# Neptune PresignedUrl documentation customizations
('docs.*.neptune.CopyDBClusterSnapshot.complete-section',
AutoPopulatedParam('PreSignedUrl').document_auto_populated_param),
('docs.*.neptune.CreateDBCluster.complete-section',
AutoPopulatedParam('PreSignedUrl').document_auto_populated_param),
#############
# DocDB
#############
('before-call.docdb.CopyDBClusterSnapshot',
inject_presigned_url_rds),
('before-call.docdb.CreateDBCluster',
inject_presigned_url_rds),
# DocDB PresignedUrl documentation customizations
('docs.*.docdb.CopyDBClusterSnapshot.complete-section',
AutoPopulatedParam('PreSignedUrl').document_auto_populated_param),
('docs.*.docdb.CreateDBCluster.complete-section',
AutoPopulatedParam('PreSignedUrl').document_auto_populated_param),
###########
# SMS Voice
##########
('docs.title.sms-voice',
DeprecatedServiceDocumenter(
'pinpoint-sms-voice').inject_deprecation_notice),
('before-call', inject_api_version_header_if_needed),
]
_add_parameter_aliases(BUILTIN_HANDLERS)
| 46,277 |
Python
| 39.701847 | 114 | 0.659528 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/exceptions.py
|
# Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import unicode_literals
from botocore.vendored import requests
from botocore.vendored.requests.packages import urllib3
def _exception_from_packed_args(exception_cls, args=None, kwargs=None):
# This is helpful for reducing Exceptions that only accept kwargs as
# only positional arguments can be provided for __reduce__
# Ideally, this would also be a class method on the BotoCoreError
# but instance methods cannot be pickled.
if args is None:
args = ()
if kwargs is None:
kwargs = {}
return exception_cls(*args, **kwargs)
class BotoCoreError(Exception):
"""
The base exception class for BotoCore exceptions.
:ivar msg: The descriptive message associated with the error.
"""
fmt = 'An unspecified error occurred'
def __init__(self, **kwargs):
msg = self.fmt.format(**kwargs)
Exception.__init__(self, msg)
self.kwargs = kwargs
def __reduce__(self):
return _exception_from_packed_args, (self.__class__, None, self.kwargs)
class DataNotFoundError(BotoCoreError):
"""
The data associated with a particular path could not be loaded.
:ivar data_path: The data path that the user attempted to load.
"""
fmt = 'Unable to load data for: {data_path}'
class UnknownServiceError(DataNotFoundError):
"""Raised when trying to load data for an unknown service.
:ivar service_name: The name of the unknown service.
"""
fmt = (
"Unknown service: '{service_name}'. Valid service names are: "
"{known_service_names}")
class ApiVersionNotFoundError(BotoCoreError):
"""
The data associated with either the API version or a compatible one
could not be loaded.
:ivar data_path: The data path that the user attempted to load.
:ivar api_version: The API version that the user attempted to load.
"""
fmt = 'Unable to load data {data_path} for: {api_version}'
class HTTPClientError(BotoCoreError):
fmt = 'An HTTP Client raised an unhandled exception: {error}'
def __init__(self, request=None, response=None, **kwargs):
self.request = request
self.response = response
super(HTTPClientError, self).__init__(**kwargs)
def __reduce__(self):
return _exception_from_packed_args, (
self.__class__, (self.request, self.response), self.kwargs)
class ConnectionError(BotoCoreError):
fmt = 'An HTTP Client failed to establish a connection: {error}'
class InvalidIMDSEndpointError(BotoCoreError):
fmt = 'Invalid endpoint EC2 Instance Metadata endpoint: {endpoint}'
class EndpointConnectionError(ConnectionError):
fmt = 'Could not connect to the endpoint URL: "{endpoint_url}"'
class SSLError(ConnectionError, requests.exceptions.SSLError):
fmt = 'SSL validation failed for {endpoint_url} {error}'
class ConnectionClosedError(HTTPClientError):
fmt = (
'Connection was closed before we received a valid response '
'from endpoint URL: "{endpoint_url}".')
class ReadTimeoutError(HTTPClientError, requests.exceptions.ReadTimeout,
urllib3.exceptions.ReadTimeoutError):
fmt = 'Read timeout on endpoint URL: "{endpoint_url}"'
class ConnectTimeoutError(ConnectionError, requests.exceptions.ConnectTimeout):
fmt = 'Connect timeout on endpoint URL: "{endpoint_url}"'
class ProxyConnectionError(ConnectionError, requests.exceptions.ProxyError):
fmt = 'Failed to connect to proxy URL: "{proxy_url}"'
class NoCredentialsError(BotoCoreError):
"""
No credentials could be found.
"""
fmt = 'Unable to locate credentials'
class PartialCredentialsError(BotoCoreError):
"""
Only partial credentials were found.
:ivar cred_var: The missing credential variable name.
"""
fmt = 'Partial credentials found in {provider}, missing: {cred_var}'
class CredentialRetrievalError(BotoCoreError):
"""
Error attempting to retrieve credentials from a remote source.
:ivar provider: The name of the credential provider.
:ivar error_msg: The msg explaining why credentials could not be
retrieved.
"""
fmt = 'Error when retrieving credentials from {provider}: {error_msg}'
class UnknownSignatureVersionError(BotoCoreError):
"""
Requested Signature Version is not known.
:ivar signature_version: The name of the requested signature version.
"""
fmt = 'Unknown Signature Version: {signature_version}.'
class ServiceNotInRegionError(BotoCoreError):
"""
The service is not available in requested region.
:ivar service_name: The name of the service.
:ivar region_name: The name of the region.
"""
fmt = 'Service {service_name} not available in region {region_name}'
class BaseEndpointResolverError(BotoCoreError):
"""Base error for endpoint resolving errors.
Should never be raised directly, but clients can catch
this exception if they want to generically handle any errors
during the endpoint resolution process.
"""
class NoRegionError(BaseEndpointResolverError):
"""No region was specified."""
fmt = 'You must specify a region.'
class UnknownEndpointError(BaseEndpointResolverError, ValueError):
"""
Could not construct an endpoint.
:ivar service_name: The name of the service.
:ivar region_name: The name of the region.
"""
fmt = (
'Unable to construct an endpoint for '
'{service_name} in region {region_name}')
class ProfileNotFound(BotoCoreError):
"""
The specified configuration profile was not found in the
configuration file.
:ivar profile: The name of the profile the user attempted to load.
"""
fmt = 'The config profile ({profile}) could not be found'
class ConfigParseError(BotoCoreError):
"""
The configuration file could not be parsed.
:ivar path: The path to the configuration file.
"""
fmt = 'Unable to parse config file: {path}'
class ConfigNotFound(BotoCoreError):
"""
The specified configuration file could not be found.
:ivar path: The path to the configuration file.
"""
fmt = 'The specified config file ({path}) could not be found.'
class MissingParametersError(BotoCoreError):
"""
One or more required parameters were not supplied.
:ivar object: The object that has missing parameters.
This can be an operation or a parameter (in the
case of inner params). The str() of this object
will be used so it doesn't need to implement anything
other than str().
:ivar missing: The names of the missing parameters.
"""
fmt = ('The following required parameters are missing for '
'{object_name}: {missing}')
class ValidationError(BotoCoreError):
"""
An exception occurred validating parameters.
Subclasses must accept a ``value`` and ``param``
argument in their ``__init__``.
:ivar value: The value that was being validated.
:ivar param: The parameter that failed validation.
:ivar type_name: The name of the underlying type.
"""
fmt = ("Invalid value ('{value}') for param {param} "
"of type {type_name} ")
class ParamValidationError(BotoCoreError):
fmt = 'Parameter validation failed:\n{report}'
# These exceptions subclass from ValidationError so that code
# can just 'except ValidationError' to catch any possibly validation
# error.
class UnknownKeyError(ValidationError):
"""
Unknown key in a struct parameter.
:ivar value: The value that was being checked.
:ivar param: The name of the parameter.
:ivar choices: The valid choices the value can be.
"""
fmt = ("Unknown key '{value}' for param '{param}'. Must be one "
"of: {choices}")
class RangeError(ValidationError):
"""
A parameter value was out of the valid range.
:ivar value: The value that was being checked.
:ivar param: The parameter that failed validation.
:ivar min_value: The specified minimum value.
:ivar max_value: The specified maximum value.
"""
fmt = ('Value out of range for param {param}: '
'{min_value} <= {value} <= {max_value}')
class UnknownParameterError(ValidationError):
"""
Unknown top level parameter.
:ivar name: The name of the unknown parameter.
:ivar operation: The name of the operation.
:ivar choices: The valid choices the parameter name can be.
"""
fmt = (
"Unknown parameter '{name}' for operation {operation}. Must be one "
"of: {choices}"
)
class InvalidRegionError(ValidationError, ValueError):
"""
Invalid region_name provided to client or resource.
:ivar region_name: region_name that was being validated.
"""
fmt = (
"Provided region_name '{region_name}' doesn't match a supported format."
)
class AliasConflictParameterError(ValidationError):
"""
Error when an alias is provided for a parameter as well as the original.
:ivar original: The name of the original parameter.
:ivar alias: The name of the alias
:ivar operation: The name of the operation.
"""
fmt = (
"Parameter '{original}' and its alias '{alias}' were provided "
"for operation {operation}. Only one of them may be used."
)
class UnknownServiceStyle(BotoCoreError):
"""
Unknown style of service invocation.
:ivar service_style: The style requested.
"""
fmt = 'The service style ({service_style}) is not understood.'
class PaginationError(BotoCoreError):
fmt = 'Error during pagination: {message}'
class OperationNotPageableError(BotoCoreError):
fmt = 'Operation cannot be paginated: {operation_name}'
class ChecksumError(BotoCoreError):
"""The expected checksum did not match the calculated checksum.
"""
fmt = ('Checksum {checksum_type} failed, expected checksum '
'{expected_checksum} did not match calculated checksum '
'{actual_checksum}.')
class UnseekableStreamError(BotoCoreError):
"""Need to seek a stream, but stream does not support seeking.
"""
fmt = ('Need to rewind the stream {stream_object}, but stream '
'is not seekable.')
class WaiterError(BotoCoreError):
"""Waiter failed to reach desired state."""
fmt = 'Waiter {name} failed: {reason}'
def __init__(self, name, reason, last_response):
super(WaiterError, self).__init__(name=name, reason=reason)
self.last_response = last_response
class IncompleteReadError(BotoCoreError):
"""HTTP response did not return expected number of bytes."""
fmt = ('{actual_bytes} read, but total bytes '
'expected is {expected_bytes}.')
class InvalidExpressionError(BotoCoreError):
"""Expression is either invalid or too complex."""
fmt = 'Invalid expression {expression}: Only dotted lookups are supported.'
class UnknownCredentialError(BotoCoreError):
"""Tried to insert before/after an unregistered credential type."""
fmt = 'Credential named {name} not found.'
class WaiterConfigError(BotoCoreError):
"""Error when processing waiter configuration."""
fmt = 'Error processing waiter config: {error_msg}'
class UnknownClientMethodError(BotoCoreError):
"""Error when trying to access a method on a client that does not exist."""
fmt = 'Client does not have method: {method_name}'
class UnsupportedSignatureVersionError(BotoCoreError):
"""Error when trying to use an unsupported Signature Version."""
fmt = 'Signature version is not supported: {signature_version}'
class ClientError(Exception):
MSG_TEMPLATE = (
'An error occurred ({error_code}) when calling the {operation_name} '
'operation{retry_info}: {error_message}')
def __init__(self, error_response, operation_name):
retry_info = self._get_retry_info(error_response)
error = error_response.get('Error', {})
msg = self.MSG_TEMPLATE.format(
error_code=error.get('Code', 'Unknown'),
error_message=error.get('Message', 'Unknown'),
operation_name=operation_name,
retry_info=retry_info,
)
super(ClientError, self).__init__(msg)
self.response = error_response
self.operation_name = operation_name
def _get_retry_info(self, response):
retry_info = ''
if 'ResponseMetadata' in response:
metadata = response['ResponseMetadata']
if metadata.get('MaxAttemptsReached', False):
if 'RetryAttempts' in metadata:
retry_info = (' (reached max retries: %s)' %
metadata['RetryAttempts'])
return retry_info
def __reduce__(self):
# Subclasses of ClientError's are dynamically generated and
# cannot be pickled unless they are attributes of a
# module. So at the very least return a ClientError back.
return ClientError, (self.response, self.operation_name)
class EventStreamError(ClientError):
pass
class UnsupportedTLSVersionWarning(Warning):
"""Warn when an openssl version that uses TLS 1.2 is required"""
pass
class ImminentRemovalWarning(Warning):
pass
class InvalidDNSNameError(BotoCoreError):
"""Error when virtual host path is forced on a non-DNS compatible bucket"""
fmt = (
'Bucket named {bucket_name} is not DNS compatible. Virtual '
'hosted-style addressing cannot be used. The addressing style '
'can be configured by removing the addressing_style value '
'or setting that value to \'path\' or \'auto\' in the AWS Config '
'file or in the botocore.client.Config object.'
)
class InvalidS3AddressingStyleError(BotoCoreError):
"""Error when an invalid path style is specified"""
fmt = (
'S3 addressing style {s3_addressing_style} is invalid. Valid options '
'are: \'auto\', \'virtual\', and \'path\''
)
class UnsupportedS3ArnError(BotoCoreError):
"""Error when S3 ARN provided to Bucket parameter is not supported"""
fmt = (
'S3 ARN {arn} provided to "Bucket" parameter is invalid. Only '
'ARNs for S3 access-points are supported.'
)
class UnsupportedS3ControlArnError(BotoCoreError):
"""Error when S3 ARN provided to S3 control parameter is not supported"""
fmt = (
'S3 ARN "{arn}" provided is invalid for this operation. {msg}'
)
class InvalidHostLabelError(BotoCoreError):
"""Error when an invalid host label would be bound to an endpoint"""
fmt = (
'Invalid host label to be bound to the hostname of the endpoint: '
'"{label}".'
)
class UnsupportedOutpostResourceError(BotoCoreError):
"""Error when S3 Outpost ARN provided to Bucket parameter is incomplete"""
fmt = (
'S3 Outpost ARN resource "{resource_name}" provided to "Bucket" '
'parameter is invalid. Only ARNs for S3 Outpost arns with an '
'access-point sub-resource are supported.'
)
class UnsupportedS3ConfigurationError(BotoCoreError):
"""Error when an unsupported configuration is used with access-points"""
fmt = (
'Unsupported configuration when using S3: {msg}'
)
class UnsupportedS3AccesspointConfigurationError(BotoCoreError):
"""Error when an unsupported configuration is used with access-points"""
fmt = (
'Unsupported configuration when using S3 access-points: {msg}'
)
class InvalidEndpointDiscoveryConfigurationError(BotoCoreError):
"""Error when invalid value supplied for endpoint_discovery_enabled"""
fmt = (
'Unsupported configuration value for endpoint_discovery_enabled. '
'Expected one of ("true", "false", "auto") but got {config_value}.'
)
class UnsupportedS3ControlConfigurationError(BotoCoreError):
"""Error when an unsupported configuration is used with S3 Control"""
fmt = (
'Unsupported configuration when using S3 Control: {msg}'
)
class InvalidRetryConfigurationError(BotoCoreError):
"""Error when invalid retry configuration is specified"""
fmt = (
'Cannot provide retry configuration for "{retry_config_option}". '
'Valid retry configuration options are: \'max_attempts\''
)
class InvalidMaxRetryAttemptsError(InvalidRetryConfigurationError):
"""Error when invalid retry configuration is specified"""
fmt = (
'Value provided to "max_attempts": {provided_max_attempts} must '
'be an integer greater than or equal to {min_value}.'
)
class InvalidRetryModeError(InvalidRetryConfigurationError):
"""Error when invalid retry mode configuration is specified"""
fmt = (
'Invalid value provided to "mode": "{provided_retry_mode}" must '
'be one of: "legacy", "standard", "adaptive"'
)
class InvalidS3UsEast1RegionalEndpointConfigError(BotoCoreError):
"""Error for invalid s3 us-east-1 regional endpoints configuration"""
fmt = (
'S3 us-east-1 regional endpoint option '
'{s3_us_east_1_regional_endpoint_config} is '
'invalid. Valid options are: "legacy", "regional"'
)
class InvalidSTSRegionalEndpointsConfigError(BotoCoreError):
"""Error when invalid sts regional endpoints configuration is specified"""
fmt = (
'STS regional endpoints option {sts_regional_endpoints_config} is '
'invalid. Valid options are: "legacy", "regional"'
)
class StubResponseError(BotoCoreError):
fmt = 'Error getting response stub for operation {operation_name}: {reason}'
class StubAssertionError(StubResponseError, AssertionError):
pass
class UnStubbedResponseError(StubResponseError):
pass
class InvalidConfigError(BotoCoreError):
fmt = '{error_msg}'
class InfiniteLoopConfigError(InvalidConfigError):
fmt = (
'Infinite loop in credential configuration detected. Attempting to '
'load from profile {source_profile} which has already been visited. '
'Visited profiles: {visited_profiles}'
)
class RefreshWithMFAUnsupportedError(BotoCoreError):
fmt = 'Cannot refresh credentials: MFA token required.'
class MD5UnavailableError(BotoCoreError):
fmt = "This system does not support MD5 generation."
class MetadataRetrievalError(BotoCoreError):
fmt = "Error retrieving metadata: {error_msg}"
class UndefinedModelAttributeError(Exception):
pass
class MissingServiceIdError(UndefinedModelAttributeError):
fmt = (
"The model being used for the service {service_name} is missing the "
"serviceId metadata property, which is required."
)
def __init__(self, **kwargs):
msg = self.fmt.format(**kwargs)
Exception.__init__(self, msg)
self.kwargs = kwargs
class SSOError(BotoCoreError):
fmt = "An unspecified error happened when resolving SSO credentials"
class SSOTokenLoadError(SSOError):
fmt = "Error loading SSO Token: {error_msg}"
class UnauthorizedSSOTokenError(SSOError):
fmt = (
"The SSO session associated with this profile has expired or is "
"otherwise invalid. To refresh this SSO session run aws sso login "
"with the corresponding profile."
)
class CapacityNotAvailableError(BotoCoreError):
fmt = (
'Insufficient request capacity available.'
)
class InvalidProxiesConfigError(BotoCoreError):
fmt = (
'Invalid configuration value(s) provided for proxies_config.'
)
| 20,137 |
Python
| 29.933948 | 80 | 0.684015 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/signers.py
|
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import datetime
import weakref
import json
import base64
import botocore
import botocore.auth
from botocore.compat import six, OrderedDict
from botocore.awsrequest import create_request_object, prepare_request_dict
from botocore.exceptions import UnknownSignatureVersionError
from botocore.exceptions import UnknownClientMethodError
from botocore.exceptions import UnsupportedSignatureVersionError
from botocore.utils import fix_s3_host, datetime2timestamp
class RequestSigner(object):
"""
An object to sign requests before they go out over the wire using
one of the authentication mechanisms defined in ``auth.py``. This
class fires two events scoped to a service and operation name:
* choose-signer: Allows overriding the auth signer name.
* before-sign: Allows mutating the request before signing.
Together these events allow for customization of the request
signing pipeline, including overrides, request path manipulation,
and disabling signing per operation.
:type service_id: botocore.model.ServiceId
:param service_id: The service id for the service, e.g. ``S3``
:type region_name: string
:param region_name: Name of the service region, e.g. ``us-east-1``
:type signing_name: string
:param signing_name: Service signing name. This is usually the
same as the service name, but can differ. E.g.
``emr`` vs. ``elasticmapreduce``.
:type signature_version: string
:param signature_version: Signature name like ``v4``.
:type credentials: :py:class:`~botocore.credentials.Credentials`
:param credentials: User credentials with which to sign requests.
:type event_emitter: :py:class:`~botocore.hooks.BaseEventHooks`
:param event_emitter: Extension mechanism to fire events.
"""
def __init__(self, service_id, region_name, signing_name,
signature_version, credentials, event_emitter):
self._region_name = region_name
self._signing_name = signing_name
self._signature_version = signature_version
self._credentials = credentials
self._service_id = service_id
# We need weakref to prevent leaking memory in Python 2.6 on Linux 2.6
self._event_emitter = weakref.proxy(event_emitter)
@property
def region_name(self):
return self._region_name
@property
def signature_version(self):
return self._signature_version
@property
def signing_name(self):
return self._signing_name
def handler(self, operation_name=None, request=None, **kwargs):
# This is typically hooked up to the "request-created" event
# from a client's event emitter. When a new request is created
# this method is invoked to sign the request.
# Don't call this method directly.
return self.sign(operation_name, request)
def sign(self, operation_name, request, region_name=None,
signing_type='standard', expires_in=None, signing_name=None):
"""Sign a request before it goes out over the wire.
:type operation_name: string
:param operation_name: The name of the current operation, e.g.
``ListBuckets``.
:type request: AWSRequest
:param request: The request object to be sent over the wire.
:type region_name: str
:param region_name: The region to sign the request for.
:type signing_type: str
:param signing_type: The type of signing to perform. This can be one of
three possible values:
* 'standard' - This should be used for most requests.
* 'presign-url' - This should be used when pre-signing a request.
* 'presign-post' - This should be used when pre-signing an S3 post.
:type expires_in: int
:param expires_in: The number of seconds the presigned url is valid
for. This parameter is only valid for signing type 'presign-url'.
:type signing_name: str
:param signing_name: The name to use for the service when signing.
"""
explicit_region_name = region_name
if region_name is None:
region_name = self._region_name
if signing_name is None:
signing_name = self._signing_name
signature_version = self._choose_signer(
operation_name, signing_type, request.context)
# Allow mutating request before signing
self._event_emitter.emit(
'before-sign.{0}.{1}'.format(
self._service_id.hyphenize(), operation_name),
request=request, signing_name=signing_name,
region_name=self._region_name,
signature_version=signature_version, request_signer=self,
operation_name=operation_name
)
if signature_version != botocore.UNSIGNED:
kwargs = {
'signing_name': signing_name,
'region_name': region_name,
'signature_version': signature_version
}
if expires_in is not None:
kwargs['expires'] = expires_in
signing_context = request.context.get('signing', {})
if not explicit_region_name and signing_context.get('region'):
kwargs['region_name'] = signing_context['region']
if signing_context.get('signing_name'):
kwargs['signing_name'] = signing_context['signing_name']
try:
auth = self.get_auth_instance(**kwargs)
except UnknownSignatureVersionError as e:
if signing_type != 'standard':
raise UnsupportedSignatureVersionError(
signature_version=signature_version)
else:
raise e
auth.add_auth(request)
def _choose_signer(self, operation_name, signing_type, context):
"""
Allow setting the signature version via the choose-signer event.
A value of `botocore.UNSIGNED` means no signing will be performed.
:param operation_name: The operation to sign.
:param signing_type: The type of signing that the signer is to be used
for.
:return: The signature version to sign with.
"""
signing_type_suffix_map = {
'presign-post': '-presign-post',
'presign-url': '-query'
}
suffix = signing_type_suffix_map.get(signing_type, '')
signature_version = self._signature_version
if signature_version is not botocore.UNSIGNED and not \
signature_version.endswith(suffix):
signature_version += suffix
handler, response = self._event_emitter.emit_until_response(
'choose-signer.{0}.{1}'.format(
self._service_id.hyphenize(), operation_name),
signing_name=self._signing_name, region_name=self._region_name,
signature_version=signature_version, context=context)
if response is not None:
signature_version = response
# The suffix needs to be checked again in case we get an improper
# signature version from choose-signer.
if signature_version is not botocore.UNSIGNED and not \
signature_version.endswith(suffix):
signature_version += suffix
return signature_version
def get_auth_instance(self, signing_name, region_name,
signature_version=None, **kwargs):
"""
Get an auth instance which can be used to sign a request
using the given signature version.
:type signing_name: string
:param signing_name: Service signing name. This is usually the
same as the service name, but can differ. E.g.
``emr`` vs. ``elasticmapreduce``.
:type region_name: string
:param region_name: Name of the service region, e.g. ``us-east-1``
:type signature_version: string
:param signature_version: Signature name like ``v4``.
:rtype: :py:class:`~botocore.auth.BaseSigner`
:return: Auth instance to sign a request.
"""
if signature_version is None:
signature_version = self._signature_version
cls = botocore.auth.AUTH_TYPE_MAPS.get(signature_version)
if cls is None:
raise UnknownSignatureVersionError(
signature_version=signature_version)
# If there's no credentials provided (i.e credentials is None),
# then we'll pass a value of "None" over to the auth classes,
# which already handle the cases where no credentials have
# been provided.
frozen_credentials = None
if self._credentials is not None:
frozen_credentials = self._credentials.get_frozen_credentials()
kwargs['credentials'] = frozen_credentials
if cls.REQUIRES_REGION:
if self._region_name is None:
raise botocore.exceptions.NoRegionError()
kwargs['region_name'] = region_name
kwargs['service_name'] = signing_name
auth = cls(**kwargs)
return auth
# Alias get_auth for backwards compatibility.
get_auth = get_auth_instance
def generate_presigned_url(self, request_dict, operation_name,
expires_in=3600, region_name=None,
signing_name=None):
"""Generates a presigned url
:type request_dict: dict
:param request_dict: The prepared request dictionary returned by
``botocore.awsrequest.prepare_request_dict()``
:type operation_name: str
:param operation_name: The operation being signed.
:type expires_in: int
:param expires_in: The number of seconds the presigned url is valid
for. By default it expires in an hour (3600 seconds)
:type region_name: string
:param region_name: The region name to sign the presigned url.
:type signing_name: str
:param signing_name: The name to use for the service when signing.
:returns: The presigned url
"""
request = create_request_object(request_dict)
self.sign(operation_name, request, region_name,
'presign-url', expires_in, signing_name)
request.prepare()
return request.url
class CloudFrontSigner(object):
'''A signer to create a signed CloudFront URL.
First you create a cloudfront signer based on a normalized RSA signer::
import rsa
def rsa_signer(message):
private_key = open('private_key.pem', 'r').read()
return rsa.sign(
message,
rsa.PrivateKey.load_pkcs1(private_key.encode('utf8')),
'SHA-1') # CloudFront requires SHA-1 hash
cf_signer = CloudFrontSigner(key_id, rsa_signer)
To sign with a canned policy::
signed_url = cf_signer.generate_signed_url(
url, date_less_than=datetime(2015, 12, 1))
To sign with a custom policy::
signed_url = cf_signer.generate_signed_url(url, policy=my_policy)
'''
def __init__(self, key_id, rsa_signer):
"""Create a CloudFrontSigner.
:type key_id: str
:param key_id: The CloudFront Key Pair ID
:type rsa_signer: callable
:param rsa_signer: An RSA signer.
Its only input parameter will be the message to be signed,
and its output will be the signed content as a binary string.
The hash algorithm needed by CloudFront is SHA-1.
"""
self.key_id = key_id
self.rsa_signer = rsa_signer
def generate_presigned_url(self, url, date_less_than=None, policy=None):
"""Creates a signed CloudFront URL based on given parameters.
:type url: str
:param url: The URL of the protected object
:type date_less_than: datetime
:param date_less_than: The URL will expire after that date and time
:type policy: str
:param policy: The custom policy, possibly built by self.build_policy()
:rtype: str
:return: The signed URL.
"""
if (date_less_than is not None and policy is not None or
date_less_than is None and policy is None):
e = 'Need to provide either date_less_than or policy, but not both'
raise ValueError(e)
if date_less_than is not None:
# We still need to build a canned policy for signing purpose
policy = self.build_policy(url, date_less_than)
if isinstance(policy, six.text_type):
policy = policy.encode('utf8')
if date_less_than is not None:
params = ['Expires=%s' % int(datetime2timestamp(date_less_than))]
else:
params = ['Policy=%s' % self._url_b64encode(policy).decode('utf8')]
signature = self.rsa_signer(policy)
params.extend([
'Signature=%s' % self._url_b64encode(signature).decode('utf8'),
'Key-Pair-Id=%s' % self.key_id,
])
return self._build_url(url, params)
def _build_url(self, base_url, extra_params):
separator = '&' if '?' in base_url else '?'
return base_url + separator + '&'.join(extra_params)
def build_policy(self, resource, date_less_than,
date_greater_than=None, ip_address=None):
"""A helper to build policy.
:type resource: str
:param resource: The URL or the stream filename of the protected object
:type date_less_than: datetime
:param date_less_than: The URL will expire after the time has passed
:type date_greater_than: datetime
:param date_greater_than: The URL will not be valid until this time
:type ip_address: str
:param ip_address: Use 'x.x.x.x' for an IP, or 'x.x.x.x/x' for a subnet
:rtype: str
:return: The policy in a compact string.
"""
# Note:
# 1. Order in canned policy is significant. Special care has been taken
# to ensure the output will match the order defined by the document.
# There is also a test case to ensure that order.
# SEE: http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-creating-signed-url-canned-policy.html#private-content-canned-policy-creating-policy-statement
# 2. Albeit the order in custom policy is not required by CloudFront,
# we still use OrderedDict internally to ensure the result is stable
# and also matches canned policy requirement.
# SEE: http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-creating-signed-url-custom-policy.html
moment = int(datetime2timestamp(date_less_than))
condition = OrderedDict({"DateLessThan": {"AWS:EpochTime": moment}})
if ip_address:
if '/' not in ip_address:
ip_address += '/32'
condition["IpAddress"] = {"AWS:SourceIp": ip_address}
if date_greater_than:
moment = int(datetime2timestamp(date_greater_than))
condition["DateGreaterThan"] = {"AWS:EpochTime": moment}
ordered_payload = [('Resource', resource), ('Condition', condition)]
custom_policy = {"Statement": [OrderedDict(ordered_payload)]}
return json.dumps(custom_policy, separators=(',', ':'))
def _url_b64encode(self, data):
# Required by CloudFront. See also:
# http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-linux-openssl.html
return base64.b64encode(
data).replace(b'+', b'-').replace(b'=', b'_').replace(b'/', b'~')
def add_generate_db_auth_token(class_attributes, **kwargs):
class_attributes['generate_db_auth_token'] = generate_db_auth_token
def generate_db_auth_token(self, DBHostname, Port, DBUsername, Region=None):
"""Generates an auth token used to connect to a db with IAM credentials.
:type DBHostname: str
:param DBHostname: The hostname of the database to connect to.
:type Port: int
:param Port: The port number the database is listening on.
:type DBUsername: str
:param DBUsername: The username to log in as.
:type Region: str
:param Region: The region the database is in. If None, the client
region will be used.
:return: A presigned url which can be used as an auth token.
"""
region = Region
if region is None:
region = self.meta.region_name
params = {
'Action': 'connect',
'DBUser': DBUsername,
}
request_dict = {
'url_path': '/',
'query_string': '',
'headers': {},
'body': params,
'method': 'GET'
}
# RDS requires that the scheme not be set when sent over. This can cause
# issues when signing because the Python url parsing libraries follow
# RFC 1808 closely, which states that a netloc must be introduced by `//`.
# Otherwise the url is presumed to be relative, and thus the whole
# netloc would be treated as a path component. To work around this we
# introduce https here and remove it once we're done processing it.
scheme = 'https://'
endpoint_url = '%s%s:%s' % (scheme, DBHostname, Port)
prepare_request_dict(request_dict, endpoint_url)
presigned_url = self._request_signer.generate_presigned_url(
operation_name='connect', request_dict=request_dict,
region_name=region, expires_in=900, signing_name='rds-db'
)
return presigned_url[len(scheme):]
class S3PostPresigner(object):
def __init__(self, request_signer):
self._request_signer = request_signer
def generate_presigned_post(self, request_dict, fields=None,
conditions=None, expires_in=3600,
region_name=None):
"""Generates the url and the form fields used for a presigned s3 post
:type request_dict: dict
:param request_dict: The prepared request dictionary returned by
``botocore.awsrequest.prepare_request_dict()``
:type fields: dict
:param fields: A dictionary of prefilled form fields to build on top
of.
:type conditions: list
:param conditions: A list of conditions to include in the policy. Each
element can be either a list or a structure. For example:
[
{"acl": "public-read"},
{"bucket": "mybucket"},
["starts-with", "$key", "mykey"]
]
:type expires_in: int
:param expires_in: The number of seconds the presigned post is valid
for.
:type region_name: string
:param region_name: The region name to sign the presigned post to.
:rtype: dict
:returns: A dictionary with two elements: ``url`` and ``fields``.
Url is the url to post to. Fields is a dictionary filled with
the form fields and respective values to use when submitting the
post. For example:
{'url': 'https://mybucket.s3.amazonaws.com
'fields': {'acl': 'public-read',
'key': 'mykey',
'signature': 'mysignature',
'policy': 'mybase64 encoded policy'}
}
"""
if fields is None:
fields = {}
if conditions is None:
conditions = []
# Create the policy for the post.
policy = {}
# Create an expiration date for the policy
datetime_now = datetime.datetime.utcnow()
expire_date = datetime_now + datetime.timedelta(seconds=expires_in)
policy['expiration'] = expire_date.strftime(botocore.auth.ISO8601)
# Append all of the conditions that the user supplied.
policy['conditions'] = []
for condition in conditions:
policy['conditions'].append(condition)
# Store the policy and the fields in the request for signing
request = create_request_object(request_dict)
request.context['s3-presign-post-fields'] = fields
request.context['s3-presign-post-policy'] = policy
self._request_signer.sign(
'PutObject', request, region_name, 'presign-post')
# Return the url and the fields for th form to post.
return {'url': request.url, 'fields': fields}
def add_generate_presigned_url(class_attributes, **kwargs):
class_attributes['generate_presigned_url'] = generate_presigned_url
def generate_presigned_url(self, ClientMethod, Params=None, ExpiresIn=3600,
HttpMethod=None):
"""Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to
``ClientMethod``.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid
for. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By
default, the http method is whatever is used in the method's model.
:returns: The presigned url
"""
client_method = ClientMethod
params = Params
if params is None:
params = {}
expires_in = ExpiresIn
http_method = HttpMethod
context = {
'is_presign_request': True,
'use_global_endpoint': _should_use_global_endpoint(self),
}
request_signer = self._request_signer
serializer = self._serializer
try:
operation_name = self._PY_TO_OP_NAME[client_method]
except KeyError:
raise UnknownClientMethodError(method_name=client_method)
operation_model = self.meta.service_model.operation_model(
operation_name)
params = self._emit_api_params(params, operation_model, context)
# Create a request dict based on the params to serialize.
request_dict = serializer.serialize_to_request(
params, operation_model)
# Switch out the http method if user specified it.
if http_method is not None:
request_dict['method'] = http_method
# Prepare the request dict by including the client's endpoint url.
prepare_request_dict(
request_dict, endpoint_url=self.meta.endpoint_url, context=context)
# Generate the presigned url.
return request_signer.generate_presigned_url(
request_dict=request_dict, expires_in=expires_in,
operation_name=operation_name)
def add_generate_presigned_post(class_attributes, **kwargs):
class_attributes['generate_presigned_post'] = generate_presigned_post
def generate_presigned_post(self, Bucket, Key, Fields=None, Conditions=None,
ExpiresIn=3600):
"""Builds the url and the form fields used for a presigned s3 post
:type Bucket: string
:param Bucket: The name of the bucket to presign the post to. Note that
bucket related conditions should not be included in the
``conditions`` parameter.
:type Key: string
:param Key: Key name, optionally add ${filename} to the end to
attach the submitted filename. Note that key related conditions and
fields are filled out for you and should not be included in the
``Fields`` or ``Conditions`` parameter.
:type Fields: dict
:param Fields: A dictionary of prefilled form fields to build on top
of. Elements that may be included are acl, Cache-Control,
Content-Type, Content-Disposition, Content-Encoding, Expires,
success_action_redirect, redirect, success_action_status,
and x-amz-meta-.
Note that if a particular element is included in the fields
dictionary it will not be automatically added to the conditions
list. You must specify a condition for the element as well.
:type Conditions: list
:param Conditions: A list of conditions to include in the policy. Each
element can be either a list or a structure. For example:
[
{"acl": "public-read"},
["content-length-range", 2, 5],
["starts-with", "$success_action_redirect", ""]
]
Conditions that are included may pertain to acl,
content-length-range, Cache-Control, Content-Type,
Content-Disposition, Content-Encoding, Expires,
success_action_redirect, redirect, success_action_status,
and/or x-amz-meta-.
Note that if you include a condition, you must specify
the a valid value in the fields dictionary as well. A value will
not be added automatically to the fields dictionary based on the
conditions.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned post
is valid for.
:rtype: dict
:returns: A dictionary with two elements: ``url`` and ``fields``.
Url is the url to post to. Fields is a dictionary filled with
the form fields and respective values to use when submitting the
post. For example:
{'url': 'https://mybucket.s3.amazonaws.com
'fields': {'acl': 'public-read',
'key': 'mykey',
'signature': 'mysignature',
'policy': 'mybase64 encoded policy'}
}
"""
bucket = Bucket
key = Key
fields = Fields
conditions = Conditions
expires_in = ExpiresIn
if fields is None:
fields = {}
else:
fields = fields.copy()
if conditions is None:
conditions = []
post_presigner = S3PostPresigner(self._request_signer)
serializer = self._serializer
# We choose the CreateBucket operation model because its url gets
# serialized to what a presign post requires.
operation_model = self.meta.service_model.operation_model(
'CreateBucket')
# Create a request dict based on the params to serialize.
request_dict = serializer.serialize_to_request(
{'Bucket': bucket}, operation_model)
# Prepare the request dict by including the client's endpoint url.
prepare_request_dict(
request_dict, endpoint_url=self.meta.endpoint_url,
context={
'is_presign_request': True,
'use_global_endpoint': _should_use_global_endpoint(self),
},
)
# Append that the bucket name to the list of conditions.
conditions.append({'bucket': bucket})
# If the key ends with filename, the only constraint that can be
# imposed is if it starts with the specified prefix.
if key.endswith('${filename}'):
conditions.append(["starts-with", '$key', key[:-len('${filename}')]])
else:
conditions.append({'key': key})
# Add the key to the fields.
fields['key'] = key
return post_presigner.generate_presigned_post(
request_dict=request_dict, fields=fields, conditions=conditions,
expires_in=expires_in)
def _should_use_global_endpoint(client):
if client.meta.partition != 'aws':
return False
s3_config = client.meta.config.s3
if s3_config:
if s3_config.get('use_dualstack_endpoint', False):
return False
if s3_config.get('us_east_1_regional_endpoint') == 'regional' and \
client.meta.config.region_name == 'us-east-1':
return False
return True
| 28,234 |
Python
| 37.414966 | 194 | 0.629985 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/hooks.py
|
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import copy
import logging
from collections import defaultdict, deque, namedtuple
from botocore.compat import accepts_kwargs, six
from botocore.utils import EVENT_ALIASES
logger = logging.getLogger(__name__)
_NodeList = namedtuple('NodeList', ['first', 'middle', 'last'])
_FIRST = 0
_MIDDLE = 1
_LAST = 2
class NodeList(_NodeList):
def __copy__(self):
first_copy = copy.copy(self.first)
middle_copy = copy.copy(self.middle)
last_copy = copy.copy(self.last)
copied = NodeList(first_copy, middle_copy, last_copy)
return copied
def first_non_none_response(responses, default=None):
"""Find first non None response in a list of tuples.
This function can be used to find the first non None response from
handlers connected to an event. This is useful if you are interested
in the returned responses from event handlers. Example usage::
print(first_non_none_response([(func1, None), (func2, 'foo'),
(func3, 'bar')]))
# This will print 'foo'
:type responses: list of tuples
:param responses: The responses from the ``EventHooks.emit`` method.
This is a list of tuples, and each tuple is
(handler, handler_response).
:param default: If no non-None responses are found, then this default
value will be returned.
:return: The first non-None response in the list of tuples.
"""
for response in responses:
if response[1] is not None:
return response[1]
return default
class BaseEventHooks(object):
def emit(self, event_name, **kwargs):
"""Call all handlers subscribed to an event.
:type event_name: str
:param event_name: The name of the event to emit.
:type **kwargs: dict
:param **kwargs: Arbitrary kwargs to pass through to the
subscribed handlers. The ``event_name`` will be injected
into the kwargs so it's not necesary to add this to **kwargs.
:rtype: list of tuples
:return: A list of ``(handler_func, handler_func_return_value)``
"""
return []
def register(self, event_name, handler, unique_id=None,
unique_id_uses_count=False):
"""Register an event handler for a given event.
If a ``unique_id`` is given, the handler will not be registered
if a handler with the ``unique_id`` has already been registered.
Handlers are called in the order they have been registered.
Note handlers can also be registered with ``register_first()``
and ``register_last()``. All handlers registered with
``register_first()`` are called before handlers registered
with ``register()`` which are called before handlers registered
with ``register_last()``.
"""
self._verify_and_register(event_name, handler, unique_id,
register_method=self._register,
unique_id_uses_count=unique_id_uses_count)
def register_first(self, event_name, handler, unique_id=None,
unique_id_uses_count=False):
"""Register an event handler to be called first for an event.
All event handlers registered with ``register_first()`` will
be called before handlers registered with ``register()`` and
``register_last()``.
"""
self._verify_and_register(event_name, handler, unique_id,
register_method=self._register_first,
unique_id_uses_count=unique_id_uses_count)
def register_last(self, event_name, handler, unique_id=None,
unique_id_uses_count=False):
"""Register an event handler to be called last for an event.
All event handlers registered with ``register_last()`` will be called
after handlers registered with ``register_first()`` and ``register()``.
"""
self._verify_and_register(event_name, handler, unique_id,
register_method=self._register_last,
unique_id_uses_count=unique_id_uses_count)
def _verify_and_register(self, event_name, handler, unique_id,
register_method, unique_id_uses_count):
self._verify_is_callable(handler)
self._verify_accept_kwargs(handler)
register_method(event_name, handler, unique_id, unique_id_uses_count)
def unregister(self, event_name, handler=None, unique_id=None,
unique_id_uses_count=False):
"""Unregister an event handler for a given event.
If no ``unique_id`` was given during registration, then the
first instance of the event handler is removed (if the event
handler has been registered multiple times).
"""
pass
def _verify_is_callable(self, func):
if not six.callable(func):
raise ValueError("Event handler %s must be callable." % func)
def _verify_accept_kwargs(self, func):
"""Verifies a callable accepts kwargs
:type func: callable
:param func: A callable object.
:returns: True, if ``func`` accepts kwargs, otherwise False.
"""
try:
if not accepts_kwargs(func):
raise ValueError("Event handler %s must accept keyword "
"arguments (**kwargs)" % func)
except TypeError:
return False
class HierarchicalEmitter(BaseEventHooks):
def __init__(self):
# We keep a reference to the handlers for quick
# read only access (we never modify self._handlers).
# A cache of event name to handler list.
self._lookup_cache = {}
self._handlers = _PrefixTrie()
# This is used to ensure that unique_id's are only
# registered once.
self._unique_id_handlers = {}
def _emit(self, event_name, kwargs, stop_on_response=False):
"""
Emit an event with optional keyword arguments.
:type event_name: string
:param event_name: Name of the event
:type kwargs: dict
:param kwargs: Arguments to be passed to the handler functions.
:type stop_on_response: boolean
:param stop_on_response: Whether to stop on the first non-None
response. If False, then all handlers
will be called. This is especially useful
to handlers which mutate data and then
want to stop propagation of the event.
:rtype: list
:return: List of (handler, response) tuples from all processed
handlers.
"""
responses = []
# Invoke the event handlers from most specific
# to least specific, each time stripping off a dot.
handlers_to_call = self._lookup_cache.get(event_name)
if handlers_to_call is None:
handlers_to_call = self._handlers.prefix_search(event_name)
self._lookup_cache[event_name] = handlers_to_call
elif not handlers_to_call:
# Short circuit and return an empty response is we have
# no handlers to call. This is the common case where
# for the majority of signals, nothing is listening.
return []
kwargs['event_name'] = event_name
responses = []
for handler in handlers_to_call:
logger.debug('Event %s: calling handler %s', event_name, handler)
response = handler(**kwargs)
responses.append((handler, response))
if stop_on_response and response is not None:
return responses
return responses
def emit(self, event_name, **kwargs):
"""
Emit an event by name with arguments passed as keyword args.
>>> responses = emitter.emit(
... 'my-event.service.operation', arg1='one', arg2='two')
:rtype: list
:return: List of (handler, response) tuples from all processed
handlers.
"""
return self._emit(event_name, kwargs)
def emit_until_response(self, event_name, **kwargs):
"""
Emit an event by name with arguments passed as keyword args,
until the first non-``None`` response is received. This
method prevents subsequent handlers from being invoked.
>>> handler, response = emitter.emit_until_response(
'my-event.service.operation', arg1='one', arg2='two')
:rtype: tuple
:return: The first (handler, response) tuple where the response
is not ``None``, otherwise (``None``, ``None``).
"""
responses = self._emit(event_name, kwargs, stop_on_response=True)
if responses:
return responses[-1]
else:
return (None, None)
def _register(self, event_name, handler, unique_id=None,
unique_id_uses_count=False):
self._register_section(event_name, handler, unique_id,
unique_id_uses_count, section=_MIDDLE)
def _register_first(self, event_name, handler, unique_id=None,
unique_id_uses_count=False):
self._register_section(event_name, handler, unique_id,
unique_id_uses_count, section=_FIRST)
def _register_last(self, event_name, handler, unique_id,
unique_id_uses_count=False):
self._register_section(event_name, handler, unique_id,
unique_id_uses_count, section=_LAST)
def _register_section(self, event_name, handler, unique_id,
unique_id_uses_count, section):
if unique_id is not None:
if unique_id in self._unique_id_handlers:
# We've already registered a handler using this unique_id
# so we don't need to register it again.
count = self._unique_id_handlers[unique_id].get('count', None)
if unique_id_uses_count:
if not count:
raise ValueError(
"Initial registration of unique id %s was "
"specified to use a counter. Subsequent register "
"calls to unique id must specify use of a counter "
"as well." % unique_id)
else:
self._unique_id_handlers[unique_id]['count'] += 1
else:
if count:
raise ValueError(
"Initial registration of unique id %s was "
"specified to not use a counter. Subsequent "
"register calls to unique id must specify not to "
"use a counter as well." % unique_id)
return
else:
# Note that the trie knows nothing about the unique
# id. We track uniqueness in this class via the
# _unique_id_handlers.
self._handlers.append_item(event_name, handler,
section=section)
unique_id_handler_item = {'handler': handler}
if unique_id_uses_count:
unique_id_handler_item['count'] = 1
self._unique_id_handlers[unique_id] = unique_id_handler_item
else:
self._handlers.append_item(event_name, handler, section=section)
# Super simple caching strategy for now, if we change the registrations
# clear the cache. This has the opportunity for smarter invalidations.
self._lookup_cache = {}
def unregister(self, event_name, handler=None, unique_id=None,
unique_id_uses_count=False):
if unique_id is not None:
try:
count = self._unique_id_handlers[unique_id].get('count', None)
except KeyError:
# There's no handler matching that unique_id so we have
# nothing to unregister.
return
if unique_id_uses_count:
if count is None:
raise ValueError(
"Initial registration of unique id %s was specified to "
"use a counter. Subsequent unregister calls to unique "
"id must specify use of a counter as well." % unique_id)
elif count == 1:
handler = self._unique_id_handlers.pop(unique_id)['handler']
else:
self._unique_id_handlers[unique_id]['count'] -= 1
return
else:
if count:
raise ValueError(
"Initial registration of unique id %s was specified "
"to not use a counter. Subsequent unregister calls "
"to unique id must specify not to use a counter as "
"well." % unique_id)
handler = self._unique_id_handlers.pop(unique_id)['handler']
try:
self._handlers.remove_item(event_name, handler)
self._lookup_cache = {}
except ValueError:
pass
def __copy__(self):
new_instance = self.__class__()
new_state = self.__dict__.copy()
new_state['_handlers'] = copy.copy(self._handlers)
new_state['_unique_id_handlers'] = copy.copy(self._unique_id_handlers)
new_instance.__dict__ = new_state
return new_instance
class EventAliaser(BaseEventHooks):
def __init__(self, event_emitter, event_aliases=None):
self._event_aliases = event_aliases
if event_aliases is None:
self._event_aliases = EVENT_ALIASES
self._emitter = event_emitter
def emit(self, event_name, **kwargs):
aliased_event_name = self._alias_event_name(event_name)
return self._emitter.emit(aliased_event_name, **kwargs)
def emit_until_response(self, event_name, **kwargs):
aliased_event_name = self._alias_event_name(event_name)
return self._emitter.emit_until_response(aliased_event_name, **kwargs)
def register(self, event_name, handler, unique_id=None,
unique_id_uses_count=False):
aliased_event_name = self._alias_event_name(event_name)
return self._emitter.register(
aliased_event_name, handler, unique_id, unique_id_uses_count
)
def register_first(self, event_name, handler, unique_id=None,
unique_id_uses_count=False):
aliased_event_name = self._alias_event_name(event_name)
return self._emitter.register_first(
aliased_event_name, handler, unique_id, unique_id_uses_count
)
def register_last(self, event_name, handler, unique_id=None,
unique_id_uses_count=False):
aliased_event_name = self._alias_event_name(event_name)
return self._emitter.register_last(
aliased_event_name, handler, unique_id, unique_id_uses_count
)
def unregister(self, event_name, handler=None, unique_id=None,
unique_id_uses_count=False):
aliased_event_name = self._alias_event_name(event_name)
return self._emitter.unregister(
aliased_event_name, handler, unique_id, unique_id_uses_count
)
def _alias_event_name(self, event_name):
for old_part, new_part in self._event_aliases.items():
# We can't simply do a string replace for everything, otherwise we
# might end up translating substrings that we never intended to
# translate. When there aren't any dots in the old event name
# part, then we can quickly replace the item in the list if it's
# there.
event_parts = event_name.split('.')
if '.' not in old_part:
try:
# Theoretically a given event name could have the same part
# repeated, but in practice this doesn't happen
event_parts[event_parts.index(old_part)] = new_part
except ValueError:
continue
# If there's dots in the name, it gets more complicated. Now we
# have to replace multiple sections of the original event.
elif old_part in event_name:
old_parts = old_part.split('.')
self._replace_subsection(event_parts, old_parts, new_part)
else:
continue
new_name = '.'.join(event_parts)
logger.debug("Changing event name from %s to %s" % (
event_name, new_name
))
return new_name
return event_name
def _replace_subsection(self, sections, old_parts, new_part):
for i in range(len(sections)):
if sections[i] == old_parts[0] and \
sections[i:i+len(old_parts)] == old_parts:
sections[i:i+len(old_parts)] = [new_part]
return
def __copy__(self):
return self.__class__(
copy.copy(self._emitter),
copy.copy(self._event_aliases)
)
class _PrefixTrie(object):
"""Specialized prefix trie that handles wildcards.
The prefixes in this case are based on dot separated
names so 'foo.bar.baz' is::
foo -> bar -> baz
Wildcard support just means that having a key such as 'foo.bar.*.baz' will
be matched with a call to ``get_items(key='foo.bar.ANYTHING.baz')``.
You can think of this prefix trie as the equivalent as defaultdict(list),
except that it can do prefix searches:
foo.bar.baz -> A
foo.bar -> B
foo -> C
Calling ``get_items('foo.bar.baz')`` will return [A + B + C], from
most specific to least specific.
"""
def __init__(self):
# Each dictionary can be though of as a node, where a node
# has values associated with the node, and children is a link
# to more nodes. So 'foo.bar' would have a 'foo' node with
# a 'bar' node as a child of foo.
# {'foo': {'children': {'bar': {...}}}}.
self._root = {'chunk': None, 'children': {}, 'values': None}
def append_item(self, key, value, section=_MIDDLE):
"""Add an item to a key.
If a value is already associated with that key, the new
value is appended to the list for the key.
"""
key_parts = key.split('.')
current = self._root
for part in key_parts:
if part not in current['children']:
new_child = {'chunk': part, 'values': None, 'children': {}}
current['children'][part] = new_child
current = new_child
else:
current = current['children'][part]
if current['values'] is None:
current['values'] = NodeList([], [], [])
current['values'][section].append(value)
def prefix_search(self, key):
"""Collect all items that are prefixes of key.
Prefix in this case are delineated by '.' characters so
'foo.bar.baz' is a 3 chunk sequence of 3 "prefixes" (
"foo", "bar", and "baz").
"""
collected = deque()
key_parts = key.split('.')
current = self._root
self._get_items(current, key_parts, collected, 0)
return collected
def _get_items(self, starting_node, key_parts, collected, starting_index):
stack = [(starting_node, starting_index)]
key_parts_len = len(key_parts)
# Traverse down the nodes, where at each level we add the
# next part from key_parts as well as the wildcard element '*'.
# This means for each node we see we potentially add two more
# elements to our stack.
while stack:
current_node, index = stack.pop()
if current_node['values']:
# We're using extendleft because we want
# the values associated with the node furthest
# from the root to come before nodes closer
# to the root. extendleft() also adds its items
# in right-left order so .extendleft([1, 2, 3])
# will result in final_list = [3, 2, 1], which is
# why we reverse the lists.
node_list = current_node['values']
complete_order = (node_list.first + node_list.middle +
node_list.last)
collected.extendleft(reversed(complete_order))
if not index == key_parts_len:
children = current_node['children']
directs = children.get(key_parts[index])
wildcard = children.get('*')
next_index = index + 1
if wildcard is not None:
stack.append((wildcard, next_index))
if directs is not None:
stack.append((directs, next_index))
def remove_item(self, key, value):
"""Remove an item associated with a key.
If the value is not associated with the key a ``ValueError``
will be raised. If the key does not exist in the trie, a
``ValueError`` will be raised.
"""
key_parts = key.split('.')
current = self._root
self._remove_item(current, key_parts, value, index=0)
def _remove_item(self, current_node, key_parts, value, index):
if current_node is None:
return
elif index < len(key_parts):
next_node = current_node['children'].get(key_parts[index])
if next_node is not None:
self._remove_item(next_node, key_parts, value, index + 1)
if index == len(key_parts) - 1:
node_list = next_node['values']
if value in node_list.first:
node_list.first.remove(value)
elif value in node_list.middle:
node_list.middle.remove(value)
elif value in node_list.last:
node_list.last.remove(value)
if not next_node['children'] and not next_node['values']:
# Then this is a leaf node with no values so
# we can just delete this link from the parent node.
# This makes subsequent search faster in the case
# where a key does not exist.
del current_node['children'][key_parts[index]]
else:
raise ValueError(
"key is not in trie: %s" % '.'.join(key_parts))
def __copy__(self):
# The fact that we're using a nested dict under the covers
# is an implementation detail, and the user shouldn't have
# to know that they'd normally need a deepcopy so we expose
# __copy__ instead of __deepcopy__.
new_copy = self.__class__()
copied_attrs = self._recursive_copy(self.__dict__)
new_copy.__dict__ = copied_attrs
return new_copy
def _recursive_copy(self, node):
# We can't use copy.deepcopy because we actually only want to copy
# the structure of the trie, not the handlers themselves.
# Each node has a chunk, children, and values.
copied_node = {}
for key, value in node.items():
if isinstance(value, NodeList):
copied_node[key] = copy.copy(value)
elif isinstance(value, dict):
copied_node[key] = self._recursive_copy(value)
else:
copied_node[key] = value
return copied_node
| 24,573 |
Python
| 40.650847 | 80 | 0.568836 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/awsrequest.py
|
# Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import io
import sys
import logging
import functools
import socket
import urllib3.util
from urllib3.connection import VerifiedHTTPSConnection
from urllib3.connection import HTTPConnection
from urllib3.connectionpool import HTTPConnectionPool
from urllib3.connectionpool import HTTPSConnectionPool
import botocore.utils
from botocore.compat import six
from botocore.compat import HTTPHeaders, HTTPResponse, urlunsplit, urlsplit, \
urlencode, MutableMapping
from botocore.exceptions import UnseekableStreamError
logger = logging.getLogger(__name__)
class AWSHTTPResponse(HTTPResponse):
# The *args, **kwargs is used because the args are slightly
# different in py2.6 than in py2.7/py3.
def __init__(self, *args, **kwargs):
self._status_tuple = kwargs.pop('status_tuple')
HTTPResponse.__init__(self, *args, **kwargs)
def _read_status(self):
if self._status_tuple is not None:
status_tuple = self._status_tuple
self._status_tuple = None
return status_tuple
else:
return HTTPResponse._read_status(self)
class AWSConnection(object):
"""Mixin for HTTPConnection that supports Expect 100-continue.
This when mixed with a subclass of httplib.HTTPConnection (though
technically we subclass from urllib3, which subclasses
httplib.HTTPConnection) and we only override this class to support Expect
100-continue, which we need for S3. As far as I can tell, this is
general purpose enough to not be specific to S3, but I'm being
tentative and keeping it in botocore because I've only tested
this against AWS services.
"""
def __init__(self, *args, **kwargs):
super(AWSConnection, self).__init__(*args, **kwargs)
self._original_response_cls = self.response_class
# We'd ideally hook into httplib's states, but they're all
# __mangled_vars so we use our own state var. This variable is set
# when we receive an early response from the server. If this value is
# set to True, any calls to send() are noops. This value is reset to
# false every time _send_request is called. This is to workaround the
# fact that py2.6 (and only py2.6) has a separate send() call for the
# body in _send_request, as opposed to endheaders(), which is where the
# body is sent in all versions > 2.6.
self._response_received = False
self._expect_header_set = False
def close(self):
super(AWSConnection, self).close()
# Reset all of our instance state we were tracking.
self._response_received = False
self._expect_header_set = False
self.response_class = self._original_response_cls
def _send_request(self, method, url, body, headers, *args, **kwargs):
self._response_received = False
if headers.get('Expect', b'') == b'100-continue':
self._expect_header_set = True
else:
self._expect_header_set = False
self.response_class = self._original_response_cls
rval = super(AWSConnection, self)._send_request(
method, url, body, headers, *args, **kwargs)
self._expect_header_set = False
return rval
def _convert_to_bytes(self, mixed_buffer):
# Take a list of mixed str/bytes and convert it
# all into a single bytestring.
# Any six.text_types will be encoded as utf-8.
bytes_buffer = []
for chunk in mixed_buffer:
if isinstance(chunk, six.text_type):
bytes_buffer.append(chunk.encode('utf-8'))
else:
bytes_buffer.append(chunk)
msg = b"\r\n".join(bytes_buffer)
return msg
def _send_output(self, message_body=None, *args, **kwargs):
self._buffer.extend((b"", b""))
msg = self._convert_to_bytes(self._buffer)
del self._buffer[:]
# If msg and message_body are sent in a single send() call,
# it will avoid performance problems caused by the interaction
# between delayed ack and the Nagle algorithm.
if isinstance(message_body, bytes):
msg += message_body
message_body = None
self.send(msg)
if self._expect_header_set:
# This is our custom behavior. If the Expect header was
# set, it will trigger this custom behavior.
logger.debug("Waiting for 100 Continue response.")
# Wait for 1 second for the server to send a response.
if urllib3.util.wait_for_read(self.sock, 1):
self._handle_expect_response(message_body)
return
else:
# From the RFC:
# Because of the presence of older implementations, the
# protocol allows ambiguous situations in which a client may
# send "Expect: 100-continue" without receiving either a 417
# (Expectation Failed) status or a 100 (Continue) status.
# Therefore, when a client sends this header field to an origin
# server (possibly via a proxy) from which it has never seen a
# 100 (Continue) status, the client SHOULD NOT wait for an
# indefinite period before sending the request body.
logger.debug("No response seen from server, continuing to "
"send the response body.")
if message_body is not None:
# message_body was not a string (i.e. it is a file), and
# we must run the risk of Nagle.
self.send(message_body)
def _consume_headers(self, fp):
# Most servers (including S3) will just return
# the CLRF after the 100 continue response. However,
# some servers (I've specifically seen this for squid when
# used as a straight HTTP proxy) will also inject a
# Connection: keep-alive header. To account for this
# we'll read until we read '\r\n', and ignore any headers
# that come immediately after the 100 continue response.
current = None
while current != b'\r\n':
current = fp.readline()
def _handle_expect_response(self, message_body):
# This is called when we sent the request headers containing
# an Expect: 100-continue header and received a response.
# We now need to figure out what to do.
fp = self.sock.makefile('rb', 0)
try:
maybe_status_line = fp.readline()
parts = maybe_status_line.split(None, 2)
if self._is_100_continue_status(maybe_status_line):
self._consume_headers(fp)
logger.debug("100 Continue response seen, "
"now sending request body.")
self._send_message_body(message_body)
elif len(parts) == 3 and parts[0].startswith(b'HTTP/'):
# From the RFC:
# Requirements for HTTP/1.1 origin servers:
#
# - Upon receiving a request which includes an Expect
# request-header field with the "100-continue"
# expectation, an origin server MUST either respond with
# 100 (Continue) status and continue to read from the
# input stream, or respond with a final status code.
#
# So if we don't get a 100 Continue response, then
# whatever the server has sent back is the final response
# and don't send the message_body.
logger.debug("Received a non 100 Continue response "
"from the server, NOT sending request body.")
status_tuple = (parts[0].decode('ascii'),
int(parts[1]), parts[2].decode('ascii'))
response_class = functools.partial(
AWSHTTPResponse, status_tuple=status_tuple)
self.response_class = response_class
self._response_received = True
finally:
fp.close()
def _send_message_body(self, message_body):
if message_body is not None:
self.send(message_body)
def send(self, str):
if self._response_received:
logger.debug("send() called, but reseponse already received. "
"Not sending data.")
return
return super(AWSConnection, self).send(str)
def _is_100_continue_status(self, maybe_status_line):
parts = maybe_status_line.split(None, 2)
# Check for HTTP/<version> 100 Continue\r\n
return (
len(parts) >= 3 and parts[0].startswith(b'HTTP/') and
parts[1] == b'100')
class AWSHTTPConnection(AWSConnection, HTTPConnection):
""" An HTTPConnection that supports 100 Continue behavior. """
class AWSHTTPSConnection(AWSConnection, VerifiedHTTPSConnection):
""" An HTTPSConnection that supports 100 Continue behavior. """
class AWSHTTPConnectionPool(HTTPConnectionPool):
ConnectionCls = AWSHTTPConnection
class AWSHTTPSConnectionPool(HTTPSConnectionPool):
ConnectionCls = AWSHTTPSConnection
def prepare_request_dict(request_dict, endpoint_url, context=None,
user_agent=None):
"""
This method prepares a request dict to be created into an
AWSRequestObject. This prepares the request dict by adding the
url and the user agent to the request dict.
:type request_dict: dict
:param request_dict: The request dict (created from the
``serialize`` module).
:type user_agent: string
:param user_agent: The user agent to use for this request.
:type endpoint_url: string
:param endpoint_url: The full endpoint url, which contains at least
the scheme, the hostname, and optionally any path components.
"""
r = request_dict
if user_agent is not None:
headers = r['headers']
headers['User-Agent'] = user_agent
host_prefix = r.get('host_prefix')
url = _urljoin(endpoint_url, r['url_path'], host_prefix)
if r['query_string']:
# NOTE: This is to avoid circular import with utils. This is being
# done to avoid moving classes to different modules as to not cause
# breaking chainges.
percent_encode_sequence = botocore.utils.percent_encode_sequence
encoded_query_string = percent_encode_sequence(r['query_string'])
if '?' not in url:
url += '?%s' % encoded_query_string
else:
url += '&%s' % encoded_query_string
r['url'] = url
r['context'] = context
if context is None:
r['context'] = {}
def create_request_object(request_dict):
"""
This method takes a request dict and creates an AWSRequest object
from it.
:type request_dict: dict
:param request_dict: The request dict (created from the
``prepare_request_dict`` method).
:rtype: ``botocore.awsrequest.AWSRequest``
:return: An AWSRequest object based on the request_dict.
"""
r = request_dict
request_object = AWSRequest(
method=r['method'], url=r['url'], data=r['body'], headers=r['headers'])
request_object.context = r['context']
return request_object
def _urljoin(endpoint_url, url_path, host_prefix):
p = urlsplit(endpoint_url)
# <part> - <index>
# scheme - p[0]
# netloc - p[1]
# path - p[2]
# query - p[3]
# fragment - p[4]
if not url_path or url_path == '/':
# If there's no path component, ensure the URL ends with
# a '/' for backwards compatibility.
if not p[2]:
new_path = '/'
else:
new_path = p[2]
elif p[2].endswith('/') and url_path.startswith('/'):
new_path = p[2][:-1] + url_path
else:
new_path = p[2] + url_path
new_netloc = p[1]
if host_prefix is not None:
new_netloc = host_prefix + new_netloc
reconstructed = urlunsplit((p[0], new_netloc, new_path, p[3], p[4]))
return reconstructed
class AWSRequestPreparer(object):
"""
This class performs preparation on AWSRequest objects similar to that of
the PreparedRequest class does in the requests library. However, the logic
has been boiled down to meet the specific use cases in botocore. Of note
there are the following differences:
This class does not heavily prepare the URL. Requests performed many
validations and corrections to ensure the URL is properly formatted.
Botocore either performs these validations elsewhere or otherwise
consistently provides well formatted URLs.
This class does not heavily prepare the body. Body preperation is
simple and supports only the cases that we document: bytes and
file-like objects to determine the content-length. This will also
additionally prepare a body that is a dict to be url encoded params
string as some signers rely on this. Finally, this class does not
support multipart file uploads.
This class does not prepare the method, auth or cookies.
"""
def prepare(self, original):
method = original.method
url = self._prepare_url(original)
body = self._prepare_body(original)
headers = self._prepare_headers(original, body)
stream_output = original.stream_output
return AWSPreparedRequest(method, url, headers, body, stream_output)
def _prepare_url(self, original):
url = original.url
if original.params:
params = urlencode(list(original.params.items()), doseq=True)
url = '%s?%s' % (url, params)
return url
def _prepare_headers(self, original, prepared_body=None):
headers = HeadersDict(original.headers.items())
# If the transfer encoding or content length is already set, use that
if 'Transfer-Encoding' in headers or 'Content-Length' in headers:
return headers
# Ensure we set the content length when it is expected
if original.method not in ('GET', 'HEAD', 'OPTIONS'):
length = self._determine_content_length(prepared_body)
if length is not None:
headers['Content-Length'] = str(length)
else:
# Failed to determine content length, using chunked
# NOTE: This shouldn't ever happen in practice
body_type = type(prepared_body)
logger.debug('Failed to determine length of %s', body_type)
headers['Transfer-Encoding'] = 'chunked'
return headers
def _to_utf8(self, item):
key, value = item
if isinstance(key, six.text_type):
key = key.encode('utf-8')
if isinstance(value, six.text_type):
value = value.encode('utf-8')
return key, value
def _prepare_body(self, original):
"""Prepares the given HTTP body data."""
body = original.data
if body == b'':
body = None
if isinstance(body, dict):
params = [self._to_utf8(item) for item in body.items()]
body = urlencode(params, doseq=True)
return body
def _determine_content_length(self, body):
# No body, content length of 0
if not body:
return 0
# Try asking the body for it's length
try:
return len(body)
except (AttributeError, TypeError) as e:
pass
# Try getting the length from a seekable stream
if hasattr(body, 'seek') and hasattr(body, 'tell'):
try:
orig_pos = body.tell()
body.seek(0, 2)
end_file_pos = body.tell()
body.seek(orig_pos)
return end_file_pos - orig_pos
except io.UnsupportedOperation:
# in case when body is, for example, io.BufferedIOBase object
# it has "seek" method which throws "UnsupportedOperation"
# exception in such case we want to fall back to "chunked"
# encoding
pass
# Failed to determine the length
return None
class AWSRequest(object):
"""Represents the elements of an HTTP request.
This class was originally inspired by requests.models.Request, but has been
boiled down to meet the specific use cases in botocore. That being said this
class (even in requests) is effectively a named-tuple.
"""
_REQUEST_PREPARER_CLS = AWSRequestPreparer
def __init__(self,
method=None,
url=None,
headers=None,
data=None,
params=None,
auth_path=None,
stream_output=False):
self._request_preparer = self._REQUEST_PREPARER_CLS()
# Default empty dicts for dict params.
params = {} if params is None else params
self.method = method
self.url = url
self.headers = HTTPHeaders()
self.data = data
self.params = params
self.auth_path = auth_path
self.stream_output = stream_output
if headers is not None:
for key, value in headers.items():
self.headers[key] = value
# This is a dictionary to hold information that is used when
# processing the request. What is inside of ``context`` is open-ended.
# For example, it may have a timestamp key that is used for holding
# what the timestamp is when signing the request. Note that none
# of the information that is inside of ``context`` is directly
# sent over the wire; the information is only used to assist in
# creating what is sent over the wire.
self.context = {}
def prepare(self):
"""Constructs a :class:`AWSPreparedRequest <AWSPreparedRequest>`."""
return self._request_preparer.prepare(self)
@property
def body(self):
body = self.prepare().body
if isinstance(body, six.text_type):
body = body.encode('utf-8')
return body
class AWSPreparedRequest(object):
"""A data class representing a finalized request to be sent over the wire.
Requests at this stage should be treated as final, and the properties of
the request should not be modified.
:ivar method: The HTTP Method
:ivar url: The full url
:ivar headers: The HTTP headers to send.
:ivar body: The HTTP body.
:ivar stream_output: If the response for this request should be streamed.
"""
def __init__(self, method, url, headers, body, stream_output):
self.method = method
self.url = url
self.headers = headers
self.body = body
self.stream_output = stream_output
def __repr__(self):
fmt = (
'<AWSPreparedRequest stream_output=%s, method=%s, url=%s, '
'headers=%s>'
)
return fmt % (self.stream_output, self.method, self.url, self.headers)
def reset_stream(self):
"""Resets the streaming body to it's initial position.
If the request contains a streaming body (a streamable file-like object)
seek to the object's initial position to ensure the entire contents of
the object is sent. This is a no-op for static bytes-like body types.
"""
# Trying to reset a stream when there is a no stream will
# just immediately return. It's not an error, it will produce
# the same result as if we had actually reset the stream (we'll send
# the entire body contents again if we need to).
# Same case if the body is a string/bytes/bytearray type.
non_seekable_types = (six.binary_type, six.text_type, bytearray)
if self.body is None or isinstance(self.body, non_seekable_types):
return
try:
logger.debug("Rewinding stream: %s", self.body)
self.body.seek(0)
except Exception as e:
logger.debug("Unable to rewind stream: %s", e)
raise UnseekableStreamError(stream_object=self.body)
class AWSResponse(object):
"""A data class representing an HTTP response.
This class was originally inspired by requests.models.Response, but has
been boiled down to meet the specific use cases in botocore. This has
effectively been reduced to a named tuple.
:ivar url: The full url.
:ivar status_code: The status code of the HTTP response.
:ivar headers: The HTTP headers received.
:ivar body: The HTTP response body.
"""
def __init__(self, url, status_code, headers, raw):
self.url = url
self.status_code = status_code
self.headers = HeadersDict(headers)
self.raw = raw
self._content = None
@property
def content(self):
"""Content of the response as bytes."""
if self._content is None:
# Read the contents.
# NOTE: requests would attempt to call stream and fall back
# to a custom generator that would call read in a loop, but
# we don't rely on this behavior
self._content = bytes().join(self.raw.stream()) or bytes()
return self._content
@property
def text(self):
"""Content of the response as a proper text type.
Uses the encoding type provided in the reponse headers to decode the
response content into a proper text type. If the encoding is not
present in the headers, UTF-8 is used as a default.
"""
encoding = botocore.utils.get_encoding_from_headers(self.headers)
if encoding:
return self.content.decode(encoding)
else:
return self.content.decode('utf-8')
class _HeaderKey(object):
def __init__(self, key):
self._key = key
self._lower = key.lower()
def __hash__(self):
return hash(self._lower)
def __eq__(self, other):
return isinstance(other, _HeaderKey) and self._lower == other._lower
def __str__(self):
return self._key
def __repr__(self):
return repr(self._key)
class HeadersDict(MutableMapping):
"""A case-insenseitive dictionary to represent HTTP headers. """
def __init__(self, *args, **kwargs):
self._dict = {}
self.update(*args, **kwargs)
def __setitem__(self, key, value):
self._dict[_HeaderKey(key)] = value
def __getitem__(self, key):
return self._dict[_HeaderKey(key)]
def __delitem__(self, key):
del self._dict[_HeaderKey(key)]
def __iter__(self):
return (str(key) for key in self._dict)
def __len__(self):
return len(self._dict)
def __repr__(self):
return repr(self._dict)
def copy(self):
return HeadersDict(self.items())
| 23,613 |
Python
| 36.842949 | 80 | 0.615678 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/history.py
|
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import logging
HISTORY_RECORDER = None
logger = logging.getLogger(__name__)
class BaseHistoryHandler(object):
def emit(self, event_type, payload, source):
raise NotImplementedError('emit()')
class HistoryRecorder(object):
def __init__(self):
self._enabled = False
self._handlers = []
def enable(self):
self._enabled = True
def disable(self):
self._enabled = False
def add_handler(self, handler):
self._handlers.append(handler)
def record(self, event_type, payload, source='BOTOCORE'):
if self._enabled and self._handlers:
for handler in self._handlers:
try:
handler.emit(event_type, payload, source)
except Exception:
# Never let the process die because we had a failure in
# a record collection handler.
logger.debug("Exception raised in %s.", handler,
exc_info=True)
def get_global_history_recorder():
global HISTORY_RECORDER
if HISTORY_RECORDER is None:
HISTORY_RECORDER = HistoryRecorder()
return HISTORY_RECORDER
| 1,748 |
Python
| 30.232142 | 75 | 0.644737 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/errorfactory.py
|
# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from botocore.exceptions import ClientError
from botocore.utils import get_service_module_name
class BaseClientExceptions(object):
ClientError = ClientError
def __init__(self, code_to_exception):
"""Base class for exceptions object on a client
:type code_to_exception: dict
:param code_to_exception: Mapping of error codes (strings) to exception
class that should be raised when encountering a particular
error code.
"""
self._code_to_exception = code_to_exception
def from_code(self, error_code):
"""Retrieves the error class based on the error code
This is helpful for identifying the exception class needing to be
caught based on the ClientError.parsed_reponse['Error']['Code'] value
:type error_code: string
:param error_code: The error code associated to a ClientError exception
:rtype: ClientError or a subclass of ClientError
:returns: The appropriate modeled exception class for that error
code. If the error code does not match any of the known
modeled exceptions then return a generic ClientError.
"""
return self._code_to_exception.get(error_code, self.ClientError)
def __getattr__(self, name):
exception_cls_names = [
exception_cls.__name__ for exception_cls
in self._code_to_exception.values()
]
raise AttributeError(
'%r object has no attribute %r. Valid exceptions are: %s' % (
self, name, ', '.join(exception_cls_names)))
class ClientExceptionsFactory(object):
def __init__(self):
self._client_exceptions_cache = {}
def create_client_exceptions(self, service_model):
"""Creates a ClientExceptions object for the particular service client
:type service_model: botocore.model.ServiceModel
:param service_model: The service model for the client
:rtype: object that subclasses from BaseClientExceptions
:returns: The exceptions object of a client that can be used
to grab the various different modeled exceptions.
"""
service_name = service_model.service_name
if service_name not in self._client_exceptions_cache:
client_exceptions = self._create_client_exceptions(service_model)
self._client_exceptions_cache[service_name] = client_exceptions
return self._client_exceptions_cache[service_name]
def _create_client_exceptions(self, service_model):
cls_props = {}
code_to_exception = {}
for error_shape in service_model.error_shapes:
exception_name = str(error_shape.name)
exception_cls = type(exception_name, (ClientError,), {})
cls_props[exception_name] = exception_cls
code = str(error_shape.error_code)
code_to_exception[code] = exception_cls
cls_name = str(get_service_module_name(service_model) + 'Exceptions')
client_exceptions_cls = type(
cls_name, (BaseClientExceptions,), cls_props)
return client_exceptions_cls(code_to_exception)
| 3,727 |
Python
| 40.88764 | 79 | 0.669708 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/args.py
|
# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Internal module to help with normalizing botocore client args.
This module (and all function/classes within this module) should be
considered internal, and *not* a public API.
"""
import copy
import logging
import socket
import botocore.exceptions
import botocore.serialize
import botocore.utils
from botocore.signers import RequestSigner
from botocore.config import Config
from botocore.endpoint import EndpointCreator
logger = logging.getLogger(__name__)
VALID_REGIONAL_ENDPOINTS_CONFIG = [
'legacy',
'regional',
]
LEGACY_GLOBAL_STS_REGIONS = [
'ap-northeast-1',
'ap-south-1',
'ap-southeast-1',
'ap-southeast-2',
'aws-global',
'ca-central-1',
'eu-central-1',
'eu-north-1',
'eu-west-1',
'eu-west-2',
'eu-west-3',
'sa-east-1',
'us-east-1',
'us-east-2',
'us-west-1',
'us-west-2',
]
class ClientArgsCreator(object):
def __init__(self, event_emitter, user_agent, response_parser_factory,
loader, exceptions_factory, config_store):
self._event_emitter = event_emitter
self._user_agent = user_agent
self._response_parser_factory = response_parser_factory
self._loader = loader
self._exceptions_factory = exceptions_factory
self._config_store = config_store
def get_client_args(self, service_model, region_name, is_secure,
endpoint_url, verify, credentials, scoped_config,
client_config, endpoint_bridge):
final_args = self.compute_client_args(
service_model, client_config, endpoint_bridge, region_name,
endpoint_url, is_secure, scoped_config)
service_name = final_args['service_name']
parameter_validation = final_args['parameter_validation']
endpoint_config = final_args['endpoint_config']
protocol = final_args['protocol']
config_kwargs = final_args['config_kwargs']
s3_config = final_args['s3_config']
partition = endpoint_config['metadata'].get('partition', None)
socket_options = final_args['socket_options']
signing_region = endpoint_config['signing_region']
endpoint_region_name = endpoint_config['region_name']
event_emitter = copy.copy(self._event_emitter)
signer = RequestSigner(
service_model.service_id, signing_region,
endpoint_config['signing_name'],
endpoint_config['signature_version'],
credentials, event_emitter
)
config_kwargs['s3'] = s3_config
new_config = Config(**config_kwargs)
endpoint_creator = EndpointCreator(event_emitter)
endpoint = endpoint_creator.create_endpoint(
service_model, region_name=endpoint_region_name,
endpoint_url=endpoint_config['endpoint_url'], verify=verify,
response_parser_factory=self._response_parser_factory,
max_pool_connections=new_config.max_pool_connections,
proxies=new_config.proxies,
timeout=(new_config.connect_timeout, new_config.read_timeout),
socket_options=socket_options,
client_cert=new_config.client_cert,
proxies_config=new_config.proxies_config)
serializer = botocore.serialize.create_serializer(
protocol, parameter_validation)
response_parser = botocore.parsers.create_parser(protocol)
return {
'serializer': serializer,
'endpoint': endpoint,
'response_parser': response_parser,
'event_emitter': event_emitter,
'request_signer': signer,
'service_model': service_model,
'loader': self._loader,
'client_config': new_config,
'partition': partition,
'exceptions_factory': self._exceptions_factory
}
def compute_client_args(self, service_model, client_config,
endpoint_bridge, region_name, endpoint_url,
is_secure, scoped_config):
service_name = service_model.endpoint_prefix
protocol = service_model.metadata['protocol']
parameter_validation = True
if client_config and not client_config.parameter_validation:
parameter_validation = False
elif scoped_config:
raw_value = scoped_config.get('parameter_validation')
if raw_value is not None:
parameter_validation = botocore.utils.ensure_boolean(raw_value)
# Override the user agent if specified in the client config.
user_agent = self._user_agent
if client_config is not None:
if client_config.user_agent is not None:
user_agent = client_config.user_agent
if client_config.user_agent_extra is not None:
user_agent += ' %s' % client_config.user_agent_extra
s3_config = self.compute_s3_config(client_config)
endpoint_config = self._compute_endpoint_config(
service_name=service_name,
region_name=region_name,
endpoint_url=endpoint_url,
is_secure=is_secure,
endpoint_bridge=endpoint_bridge,
s3_config=s3_config,
)
# Create a new client config to be passed to the client based
# on the final values. We do not want the user to be able
# to try to modify an existing client with a client config.
config_kwargs = dict(
region_name=endpoint_config['region_name'],
signature_version=endpoint_config['signature_version'],
user_agent=user_agent)
if client_config is not None:
config_kwargs.update(
connect_timeout=client_config.connect_timeout,
read_timeout=client_config.read_timeout,
max_pool_connections=client_config.max_pool_connections,
proxies=client_config.proxies,
proxies_config=client_config.proxies_config,
retries=client_config.retries,
client_cert=client_config.client_cert,
inject_host_prefix=client_config.inject_host_prefix,
)
self._compute_retry_config(config_kwargs)
s3_config = self.compute_s3_config(client_config)
return {
'service_name': service_name,
'parameter_validation': parameter_validation,
'user_agent': user_agent,
'endpoint_config': endpoint_config,
'protocol': protocol,
'config_kwargs': config_kwargs,
's3_config': s3_config,
'socket_options': self._compute_socket_options(scoped_config)
}
def compute_s3_config(self, client_config):
s3_configuration = self._config_store.get_config_variable('s3')
# Next specific client config values takes precedence over
# specific values in the scoped config.
if client_config is not None:
if client_config.s3 is not None:
if s3_configuration is None:
s3_configuration = client_config.s3
else:
# The current s3_configuration dictionary may be
# from a source that only should be read from so
# we want to be safe and just make a copy of it to modify
# before it actually gets updated.
s3_configuration = s3_configuration.copy()
s3_configuration.update(client_config.s3)
return s3_configuration
def _compute_endpoint_config(self, service_name, region_name, endpoint_url,
is_secure, endpoint_bridge, s3_config):
resolve_endpoint_kwargs = {
'service_name': service_name,
'region_name': region_name,
'endpoint_url': endpoint_url,
'is_secure': is_secure,
'endpoint_bridge': endpoint_bridge,
}
if service_name == 's3':
return self._compute_s3_endpoint_config(
s3_config=s3_config, **resolve_endpoint_kwargs)
if service_name == 'sts':
return self._compute_sts_endpoint_config(**resolve_endpoint_kwargs)
return self._resolve_endpoint(**resolve_endpoint_kwargs)
def _compute_s3_endpoint_config(self, s3_config,
**resolve_endpoint_kwargs):
force_s3_global = self._should_force_s3_global(
resolve_endpoint_kwargs['region_name'], s3_config)
if force_s3_global:
resolve_endpoint_kwargs['region_name'] = None
endpoint_config = self._resolve_endpoint(**resolve_endpoint_kwargs)
self._set_region_if_custom_s3_endpoint(
endpoint_config, resolve_endpoint_kwargs['endpoint_bridge'])
# For backwards compatibility reasons, we want to make sure the
# client.meta.region_name will remain us-east-1 if we forced the
# endpoint to be the global region. Specifically, if this value
# changes to aws-global, it breaks logic where a user is checking
# for us-east-1 as the global endpoint such as in creating buckets.
if force_s3_global and endpoint_config['region_name'] == 'aws-global':
endpoint_config['region_name'] = 'us-east-1'
return endpoint_config
def _should_force_s3_global(self, region_name, s3_config):
s3_regional_config = 'legacy'
if s3_config and 'us_east_1_regional_endpoint' in s3_config:
s3_regional_config = s3_config['us_east_1_regional_endpoint']
self._validate_s3_regional_config(s3_regional_config)
return (
s3_regional_config == 'legacy' and
region_name in ['us-east-1', None]
)
def _validate_s3_regional_config(self, config_val):
if config_val not in VALID_REGIONAL_ENDPOINTS_CONFIG:
raise botocore.exceptions.\
InvalidS3UsEast1RegionalEndpointConfigError(
s3_us_east_1_regional_endpoint_config=config_val)
def _set_region_if_custom_s3_endpoint(self, endpoint_config,
endpoint_bridge):
# If a user is providing a custom URL, the endpoint resolver will
# refuse to infer a signing region. If we want to default to s3v4,
# we have to account for this.
if endpoint_config['signing_region'] is None \
and endpoint_config['region_name'] is None:
endpoint = endpoint_bridge.resolve('s3')
endpoint_config['signing_region'] = endpoint['signing_region']
endpoint_config['region_name'] = endpoint['region_name']
def _compute_sts_endpoint_config(self, **resolve_endpoint_kwargs):
endpoint_config = self._resolve_endpoint(**resolve_endpoint_kwargs)
if self._should_set_global_sts_endpoint(
resolve_endpoint_kwargs['region_name'],
resolve_endpoint_kwargs['endpoint_url']):
self._set_global_sts_endpoint(
endpoint_config, resolve_endpoint_kwargs['is_secure'])
return endpoint_config
def _should_set_global_sts_endpoint(self, region_name, endpoint_url):
if endpoint_url:
return False
return (
self._get_sts_regional_endpoints_config() == 'legacy' and
region_name in LEGACY_GLOBAL_STS_REGIONS
)
def _get_sts_regional_endpoints_config(self):
sts_regional_endpoints_config = self._config_store.get_config_variable(
'sts_regional_endpoints')
if not sts_regional_endpoints_config:
sts_regional_endpoints_config = 'legacy'
if sts_regional_endpoints_config not in \
VALID_REGIONAL_ENDPOINTS_CONFIG:
raise botocore.exceptions.InvalidSTSRegionalEndpointsConfigError(
sts_regional_endpoints_config=sts_regional_endpoints_config)
return sts_regional_endpoints_config
def _set_global_sts_endpoint(self, endpoint_config, is_secure):
scheme = 'https' if is_secure else 'http'
endpoint_config['endpoint_url'] = '%s://sts.amazonaws.com' % scheme
endpoint_config['signing_region'] = 'us-east-1'
def _resolve_endpoint(self, service_name, region_name,
endpoint_url, is_secure, endpoint_bridge):
return endpoint_bridge.resolve(
service_name, region_name, endpoint_url, is_secure)
def _compute_socket_options(self, scoped_config):
# This disables Nagle's algorithm and is the default socket options
# in urllib3.
socket_options = [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]
if scoped_config:
# Enables TCP Keepalive if specified in shared config file.
if self._ensure_boolean(scoped_config.get('tcp_keepalive', False)):
socket_options.append(
(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1))
return socket_options
def _compute_retry_config(self, config_kwargs):
self._compute_retry_max_attempts(config_kwargs)
self._compute_retry_mode(config_kwargs)
def _compute_retry_max_attempts(self, config_kwargs):
# There's a pre-existing max_attempts client config value that actually
# means max *retry* attempts. There's also a `max_attempts` we pull
# from the config store that means *total attempts*, which includes the
# intitial request. We can't change what `max_attempts` means in
# client config so we try to normalize everything to a new
# "total_max_attempts" variable. We ensure that after this, the only
# configuration for "max attempts" is the 'total_max_attempts' key.
# An explicitly provided max_attempts in the client config
# overrides everything.
retries = config_kwargs.get('retries')
if retries is not None:
if 'total_max_attempts' in retries:
retries.pop('max_attempts', None)
return
if 'max_attempts' in retries:
value = retries.pop('max_attempts')
# client config max_attempts means total retries so we
# have to add one for 'total_max_attempts' to account
# for the initial request.
retries['total_max_attempts'] = value + 1
return
# Otherwise we'll check the config store which checks env vars,
# config files, etc. There is no default value for max_attempts
# so if this returns None and we don't set a default value here.
max_attempts = self._config_store.get_config_variable('max_attempts')
if max_attempts is not None:
if retries is None:
retries = {}
config_kwargs['retries'] = retries
retries['total_max_attempts'] = max_attempts
def _compute_retry_mode(self, config_kwargs):
retries = config_kwargs.get('retries')
if retries is None:
retries = {}
config_kwargs['retries'] = retries
elif 'mode' in retries:
# If there's a retry mode explicitly set in the client config
# that overrides everything.
return
retry_mode = self._config_store.get_config_variable('retry_mode')
if retry_mode is None:
retry_mode = 'legacy'
retries['mode'] = retry_mode
def _ensure_boolean(self, val):
if isinstance(val, bool):
return val
else:
return val.lower() == 'true'
| 16,199 |
Python
| 42.665768 | 79 | 0.618063 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/__init__.py
|
# Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import re
import logging
__version__ = '1.20.49'
class NullHandler(logging.Handler):
def emit(self, record):
pass
# Configure default logger to do nothing
log = logging.getLogger('botocore')
log.addHandler(NullHandler())
_first_cap_regex = re.compile('(.)([A-Z][a-z]+)')
_end_cap_regex = re.compile('([a-z0-9])([A-Z])')
# The regex below handles the special case where some acryonym
# name is pluralized, e.g GatewayARNs, ListWebACLs, SomeCNAMEs.
_special_case_transform = re.compile('[A-Z]{3,}s$')
# Prepopulate the cache with special cases that don't match
# our regular transformation.
_xform_cache = {
('CreateCachediSCSIVolume', '_'): 'create_cached_iscsi_volume',
('CreateCachediSCSIVolume', '-'): 'create-cached-iscsi-volume',
('DescribeCachediSCSIVolumes', '_'): 'describe_cached_iscsi_volumes',
('DescribeCachediSCSIVolumes', '-'): 'describe-cached-iscsi-volumes',
('DescribeStorediSCSIVolumes', '_'): 'describe_stored_iscsi_volumes',
('DescribeStorediSCSIVolumes', '-'): 'describe-stored-iscsi-volumes',
('CreateStorediSCSIVolume', '_'): 'create_stored_iscsi_volume',
('CreateStorediSCSIVolume', '-'): 'create-stored-iscsi-volume',
('ListHITsForQualificationType', '_'): 'list_hits_for_qualification_type',
('ListHITsForQualificationType', '-'): 'list-hits-for-qualification-type',
('ExecutePartiQLStatement', '_'): 'execute_partiql_statement',
('ExecutePartiQLStatement', '-'): 'execute-partiql-statement',
('ExecutePartiQLTransaction', '_'): 'execute_partiql_transaction',
('ExecutePartiQLTransaction', '-'): 'execute-partiql-transaction',
('ExecutePartiQLBatch', '_'): 'execute_partiql_batch',
('ExecutePartiQLBatch', '-'): 'execute-partiql-batch',
}
# The items in this dict represent partial renames to apply globally to all
# services which might have a matching argument or operation. This way a
# common mis-translation can be fixed without having to call out each
# individual case.
ScalarTypes = ('string', 'integer', 'boolean', 'timestamp', 'float', 'double')
BOTOCORE_ROOT = os.path.dirname(os.path.abspath(__file__))
# Used to specify anonymous (unsigned) request signature
class UNSIGNED(object):
def __copy__(self):
return self
def __deepcopy__(self, memodict):
return self
UNSIGNED = UNSIGNED()
def xform_name(name, sep='_', _xform_cache=_xform_cache):
"""Convert camel case to a "pythonic" name.
If the name contains the ``sep`` character, then it is
returned unchanged.
"""
if sep in name:
# If the sep is in the name, assume that it's already
# transformed and return the string unchanged.
return name
key = (name, sep)
if key not in _xform_cache:
if _special_case_transform.search(name) is not None:
is_special = _special_case_transform.search(name)
matched = is_special.group()
# Replace something like ARNs, ACLs with _arns, _acls.
name = name[:-len(matched)] + sep + matched.lower()
s1 = _first_cap_regex.sub(r'\1' + sep + r'\2', name)
transformed = _end_cap_regex.sub(r'\1' + sep + r'\2', s1).lower()
_xform_cache[key] = transformed
return _xform_cache[key]
| 3,881 |
Python
| 38.212121 | 78 | 0.681783 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/waiter.py
|
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import jmespath
import logging
import time
from botocore.utils import get_service_module_name
from botocore.docs.docstring import WaiterDocstring
from .exceptions import WaiterError, ClientError, WaiterConfigError
from . import xform_name
logger = logging.getLogger(__name__)
def create_waiter_with_client(waiter_name, waiter_model, client):
"""
:type waiter_name: str
:param waiter_name: The name of the waiter. The name should match
the name (including the casing) of the key name in the waiter
model file (typically this is CamelCasing).
:type waiter_model: botocore.waiter.WaiterModel
:param waiter_model: The model for the waiter configuration.
:type client: botocore.client.BaseClient
:param client: The botocore client associated with the service.
:rtype: botocore.waiter.Waiter
:return: The waiter object.
"""
single_waiter_config = waiter_model.get_waiter(waiter_name)
operation_name = xform_name(single_waiter_config.operation)
operation_method = NormalizedOperationMethod(
getattr(client, operation_name))
# Create a new wait method that will serve as a proxy to the underlying
# Waiter.wait method. This is needed to attach a docstring to the
# method.
def wait(self, **kwargs):
Waiter.wait(self, **kwargs)
wait.__doc__ = WaiterDocstring(
waiter_name=waiter_name,
event_emitter=client.meta.events,
service_model=client.meta.service_model,
service_waiter_model=waiter_model,
include_signature=False
)
# Rename the waiter class based on the type of waiter.
waiter_class_name = str('%s.Waiter.%s' % (
get_service_module_name(client.meta.service_model),
waiter_name))
# Create the new waiter class
documented_waiter_cls = type(
waiter_class_name, (Waiter,), {'wait': wait})
# Return an instance of the new waiter class.
return documented_waiter_cls(
waiter_name, single_waiter_config, operation_method
)
def is_valid_waiter_error(response):
error = response.get('Error')
if isinstance(error, dict) and 'Code' in error:
return True
return False
class NormalizedOperationMethod(object):
def __init__(self, client_method):
self._client_method = client_method
def __call__(self, **kwargs):
try:
return self._client_method(**kwargs)
except ClientError as e:
return e.response
class WaiterModel(object):
SUPPORTED_VERSION = 2
def __init__(self, waiter_config):
"""
Note that the WaiterModel takes ownership of the waiter_config.
It may or may not mutate the waiter_config. If this is a concern,
it is best to make a copy of the waiter config before passing it to
the WaiterModel.
:type waiter_config: dict
:param waiter_config: The loaded waiter config
from the <service>*.waiters.json file. This can be
obtained from a botocore Loader object as well.
"""
self._waiter_config = waiter_config['waiters']
# These are part of the public API. Changing these
# will result in having to update the consuming code,
# so don't change unless you really need to.
version = waiter_config.get('version', 'unknown')
self._verify_supported_version(version)
self.version = version
self.waiter_names = list(sorted(waiter_config['waiters'].keys()))
def _verify_supported_version(self, version):
if version != self.SUPPORTED_VERSION:
raise WaiterConfigError(
error_msg=("Unsupported waiter version, supported version "
"must be: %s, but version of waiter config "
"is: %s" % (self.SUPPORTED_VERSION,
version)))
def get_waiter(self, waiter_name):
try:
single_waiter_config = self._waiter_config[waiter_name]
except KeyError:
raise ValueError("Waiter does not exist: %s" % waiter_name)
return SingleWaiterConfig(single_waiter_config)
class SingleWaiterConfig(object):
"""Represents the waiter configuration for a single waiter.
A single waiter is considered the configuration for a single
value associated with a named waiter (i.e TableExists).
"""
def __init__(self, single_waiter_config):
self._config = single_waiter_config
# These attributes are part of the public API.
self.description = single_waiter_config.get('description', '')
# Per the spec, these three fields are required.
self.operation = single_waiter_config['operation']
self.delay = single_waiter_config['delay']
self.max_attempts = single_waiter_config['maxAttempts']
@property
def acceptors(self):
acceptors = []
for acceptor_config in self._config['acceptors']:
acceptor = AcceptorConfig(acceptor_config)
acceptors.append(acceptor)
return acceptors
class AcceptorConfig(object):
def __init__(self, config):
self.state = config['state']
self.matcher = config['matcher']
self.expected = config['expected']
self.argument = config.get('argument')
self.matcher_func = self._create_matcher_func()
@property
def explanation(self):
if self.matcher == 'path':
return 'For expression "%s" we matched expected path: "%s"' % (self.argument, self.expected)
elif self.matcher == 'pathAll':
return 'For expression "%s" all members matched excepted path: "%s"' % (self.argument, self.expected)
elif self.matcher == 'pathAny':
return 'For expression "%s" we matched expected path: "%s" at least once' % (self.argument, self.expected)
elif self.matcher == 'status':
return 'Matched expected HTTP status code: %s' % self.expected
elif self.matcher == 'error':
return 'Matched expected service error code: %s' % self.expected
else:
return 'No explanation for unknown waiter type: "%s"' % self.matcher
def _create_matcher_func(self):
# An acceptor function is a callable that takes a single value. The
# parsed AWS response. Note that the parsed error response is also
# provided in the case of errors, so it's entirely possible to
# handle all the available matcher capabilities in the future.
# There's only three supported matchers, so for now, this is all
# contained to a single method. If this grows, we can expand this
# out to separate methods or even objects.
if self.matcher == 'path':
return self._create_path_matcher()
elif self.matcher == 'pathAll':
return self._create_path_all_matcher()
elif self.matcher == 'pathAny':
return self._create_path_any_matcher()
elif self.matcher == 'status':
return self._create_status_matcher()
elif self.matcher == 'error':
return self._create_error_matcher()
else:
raise WaiterConfigError(
error_msg="Unknown acceptor: %s" % self.matcher)
def _create_path_matcher(self):
expression = jmespath.compile(self.argument)
expected = self.expected
def acceptor_matches(response):
if is_valid_waiter_error(response):
return
return expression.search(response) == expected
return acceptor_matches
def _create_path_all_matcher(self):
expression = jmespath.compile(self.argument)
expected = self.expected
def acceptor_matches(response):
if is_valid_waiter_error(response):
return
result = expression.search(response)
if not isinstance(result, list) or not result:
# pathAll matcher must result in a list.
# Also we require at least one element in the list,
# that is, an empty list should not result in this
# acceptor match.
return False
for element in result:
if element != expected:
return False
return True
return acceptor_matches
def _create_path_any_matcher(self):
expression = jmespath.compile(self.argument)
expected = self.expected
def acceptor_matches(response):
if is_valid_waiter_error(response):
return
result = expression.search(response)
if not isinstance(result, list) or not result:
# pathAny matcher must result in a list.
# Also we require at least one element in the list,
# that is, an empty list should not result in this
# acceptor match.
return False
for element in result:
if element == expected:
return True
return False
return acceptor_matches
def _create_status_matcher(self):
expected = self.expected
def acceptor_matches(response):
# We don't have any requirements on the expected incoming data
# other than it is a dict, so we don't assume there's
# a ResponseMetadata.HTTPStatusCode.
status_code = response.get('ResponseMetadata', {}).get(
'HTTPStatusCode')
return status_code == expected
return acceptor_matches
def _create_error_matcher(self):
expected = self.expected
def acceptor_matches(response):
# When the client encounters an error, it will normally raise
# an exception. However, the waiter implementation will catch
# this exception, and instead send us the parsed error
# response. So response is still a dictionary, and in the case
# of an error response will contain the "Error" and
# "ResponseMetadata" key.
return response.get("Error", {}).get("Code", "") == expected
return acceptor_matches
class Waiter(object):
def __init__(self, name, config, operation_method):
"""
:type name: string
:param name: The name of the waiter
:type config: botocore.waiter.SingleWaiterConfig
:param config: The configuration for the waiter.
:type operation_method: callable
:param operation_method: A callable that accepts **kwargs
and returns a response. For example, this can be
a method from a botocore client.
"""
self._operation_method = operation_method
# The two attributes are exposed to allow for introspection
# and documentation.
self.name = name
self.config = config
def wait(self, **kwargs):
acceptors = list(self.config.acceptors)
current_state = 'waiting'
# pop the invocation specific config
config = kwargs.pop('WaiterConfig', {})
sleep_amount = config.get('Delay', self.config.delay)
max_attempts = config.get('MaxAttempts', self.config.max_attempts)
last_matched_acceptor = None
num_attempts = 0
while True:
response = self._operation_method(**kwargs)
num_attempts += 1
for acceptor in acceptors:
if acceptor.matcher_func(response):
last_matched_acceptor = acceptor
current_state = acceptor.state
break
else:
# If none of the acceptors matched, we should
# transition to the failure state if an error
# response was received.
if is_valid_waiter_error(response):
# Transition to a failure state, which we
# can just handle here by raising an exception.
raise WaiterError(
name=self.name,
reason='An error occurred (%s): %s' % (
response['Error'].get('Code', 'Unknown'),
response['Error'].get('Message', 'Unknown'),
),
last_response=response,
)
if current_state == 'success':
logger.debug("Waiting complete, waiter matched the "
"success state.")
return
if current_state == 'failure':
reason = 'Waiter encountered a terminal failure state: %s' % (
acceptor.explanation
)
raise WaiterError(
name=self.name,
reason=reason,
last_response=response,
)
if num_attempts >= max_attempts:
if last_matched_acceptor is None:
reason = 'Max attempts exceeded'
else:
reason = 'Max attempts exceeded. Previously accepted state: %s' %(
acceptor.explanation
)
raise WaiterError(
name=self.name,
reason=reason,
last_response=response,
)
time.sleep(sleep_amount)
| 14,070 |
Python
| 37.236413 | 118 | 0.599005 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/configprovider.py
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""This module contains the inteface for controlling how configuration
is loaded.
"""
import logging
import os
from botocore import utils
logger = logging.getLogger(__name__)
#: A default dictionary that maps the logical names for session variables
#: to the specific environment variables and configuration file names
#: that contain the values for these variables.
#: When creating a new Session object, you can pass in your own dictionary
#: to remap the logical names or to add new logical names. You can then
#: get the current value for these variables by using the
#: ``get_config_variable`` method of the :class:`botocore.session.Session`
#: class.
#: These form the keys of the dictionary. The values in the dictionary
#: are tuples of (<config_name>, <environment variable>, <default value>,
#: <conversion func>).
#: The conversion func is a function that takes the configuration value
#: as an argument and returns the converted value. If this value is
#: None, then the configuration value is returned unmodified. This
#: conversion function can be used to type convert config values to
#: values other than the default values of strings.
#: The ``profile`` and ``config_file`` variables should always have a
#: None value for the first entry in the tuple because it doesn't make
#: sense to look inside the config file for the location of the config
#: file or for the default profile to use.
#: The ``config_name`` is the name to look for in the configuration file,
#: the ``env var`` is the OS environment variable (``os.environ``) to
#: use, and ``default_value`` is the value to use if no value is otherwise
#: found.
BOTOCORE_DEFAUT_SESSION_VARIABLES = {
# logical: config_file, env_var, default_value, conversion_func
'profile': (None, ['AWS_DEFAULT_PROFILE', 'AWS_PROFILE'], None, None),
'region': ('region', 'AWS_DEFAULT_REGION', None, None),
'data_path': ('data_path', 'AWS_DATA_PATH', None, None),
'config_file': (None, 'AWS_CONFIG_FILE', '~/.aws/config', None),
'ca_bundle': ('ca_bundle', 'AWS_CA_BUNDLE', None, None),
'api_versions': ('api_versions', None, {}, None),
# This is the shared credentials file amongst sdks.
'credentials_file': (None, 'AWS_SHARED_CREDENTIALS_FILE',
'~/.aws/credentials', None),
# These variables only exist in the config file.
# This is the number of seconds until we time out a request to
# the instance metadata service.
'metadata_service_timeout': (
'metadata_service_timeout',
'AWS_METADATA_SERVICE_TIMEOUT', 1, int),
# This is the number of request attempts we make until we give
# up trying to retrieve data from the instance metadata service.
'metadata_service_num_attempts': (
'metadata_service_num_attempts',
'AWS_METADATA_SERVICE_NUM_ATTEMPTS', 1, int),
'ec2_metadata_service_endpoint': (
'ec2_metadata_service_endpoint',
'AWS_EC2_METADATA_SERVICE_ENDPOINT',
None, None),
'imds_use_ipv6': (
'imds_use_ipv6',
'AWS_IMDS_USE_IPV6',
False, None),
'parameter_validation': ('parameter_validation', None, True, None),
# Client side monitoring configurations.
# Note: These configurations are considered internal to botocore.
# Do not use them until publicly documented.
'csm_enabled': (
'csm_enabled', 'AWS_CSM_ENABLED', False, utils.ensure_boolean),
'csm_host': ('csm_host', 'AWS_CSM_HOST', '127.0.0.1', None),
'csm_port': ('csm_port', 'AWS_CSM_PORT', 31000, int),
'csm_client_id': ('csm_client_id', 'AWS_CSM_CLIENT_ID', '', None),
# Endpoint discovery configuration
'endpoint_discovery_enabled': (
'endpoint_discovery_enabled', 'AWS_ENDPOINT_DISCOVERY_ENABLED',
'auto', None),
'sts_regional_endpoints': (
'sts_regional_endpoints', 'AWS_STS_REGIONAL_ENDPOINTS', 'legacy',
None
),
'retry_mode': ('retry_mode', 'AWS_RETRY_MODE', 'legacy', None),
# We can't have a default here for v1 because we need to defer to
# whatever the defaults are in _retry.json.
'max_attempts': ('max_attempts', 'AWS_MAX_ATTEMPTS', None, int),
}
# A mapping for the s3 specific configuration vars. These are the configuration
# vars that typically go in the s3 section of the config file. This mapping
# follows the same schema as the previous session variable mapping.
DEFAULT_S3_CONFIG_VARS = {
'addressing_style': (
('s3', 'addressing_style'), None, None, None),
'use_accelerate_endpoint': (
('s3', 'use_accelerate_endpoint'), None, None, utils.ensure_boolean
),
'use_dualstack_endpoint': (
('s3', 'use_dualstack_endpoint'), None, None, utils.ensure_boolean
),
'payload_signing_enabled': (
('s3', 'payload_signing_enabled'), None, None, utils.ensure_boolean
),
'use_arn_region': (
['s3_use_arn_region',
('s3', 'use_arn_region')],
'AWS_S3_USE_ARN_REGION', None, utils.ensure_boolean
),
'us_east_1_regional_endpoint': (
['s3_us_east_1_regional_endpoint',
('s3', 'us_east_1_regional_endpoint')],
'AWS_S3_US_EAST_1_REGIONAL_ENDPOINT', None, None
)
}
# A mapping for the proxy specific configuration vars. These are
# used to configure how botocore interacts with proxy setups while
# sending requests.
DEFAULT_PROXIES_CONFIG_VARS = {
'proxy_ca_bundle': ('proxy_ca_bundle', None, None, None),
'proxy_client_cert': ('proxy_client_cert', None, None, None),
'proxy_use_forwarding_for_https': (
'proxy_use_forwarding_for_https', None, None, utils.normalize_boolean),
}
def create_botocore_default_config_mapping(session):
chain_builder = ConfigChainFactory(session=session)
config_mapping = _create_config_chain_mapping(
chain_builder, BOTOCORE_DEFAUT_SESSION_VARIABLES)
config_mapping['s3'] = SectionConfigProvider(
's3', session, _create_config_chain_mapping(
chain_builder, DEFAULT_S3_CONFIG_VARS)
)
config_mapping['proxies_config'] = SectionConfigProvider(
'proxies_config', session, _create_config_chain_mapping(
chain_builder, DEFAULT_PROXIES_CONFIG_VARS)
)
return config_mapping
def _create_config_chain_mapping(chain_builder, config_variables):
mapping = {}
for logical_name, config in config_variables.items():
mapping[logical_name] = chain_builder.create_config_chain(
instance_name=logical_name,
env_var_names=config[1],
config_property_names=config[0],
default=config[2],
conversion_func=config[3]
)
return mapping
class ConfigChainFactory(object):
"""Factory class to create our most common configuration chain case.
This is a convenience class to construct configuration chains that follow
our most common pattern. This is to prevent ordering them incorrectly,
and to make the config chain construction more readable.
"""
def __init__(self, session, environ=None):
"""Initialize a ConfigChainFactory.
:type session: :class:`botocore.session.Session`
:param session: This is the session that should be used to look up
values from the config file.
:type environ: dict
:param environ: A mapping to use for environment variables. If this
is not provided it will default to use os.environ.
"""
self._session = session
if environ is None:
environ = os.environ
self._environ = environ
def create_config_chain(self, instance_name=None, env_var_names=None,
config_property_names=None, default=None,
conversion_func=None):
"""Build a config chain following the standard botocore pattern.
In botocore most of our config chains follow the the precendence:
session_instance_variables, environment, config_file, default_value.
This is a convenience function for creating a chain that follow
that precendence.
:type instance_name: str
:param instance_name: This indicates what session instance variable
corresponds to this config value. If it is None it will not be
added to the chain.
:type env_var_names: str or list of str or None
:param env_var_names: One or more environment variable names to
search for this value. They are searched in order. If it is None
it will not be added to the chain.
:type config_property_names: str/tuple or list of str/tuple or None
:param config_property_names: One of more strings or tuples
representing the name of the key in the config file for this
config option. They are searched in order. If it is None it will
not be added to the chain.
:type default: Any
:param default: Any constant value to be returned.
:type conversion_func: None or callable
:param conversion_func: If this value is None then it has no effect on
the return type. Otherwise, it is treated as a function that will
conversion_func our provided type.
:rvalue: ConfigChain
:returns: A ConfigChain that resolves in the order env_var_names ->
config_property_name -> default. Any values that were none are
omitted form the chain.
"""
providers = []
if instance_name is not None:
providers.append(
InstanceVarProvider(
instance_var=instance_name,
session=self._session
)
)
if env_var_names is not None:
providers.extend(self._get_env_providers(env_var_names))
if config_property_names is not None:
providers.extend(
self._get_scoped_config_providers(config_property_names)
)
if default is not None:
providers.append(ConstantProvider(value=default))
return ChainProvider(
providers=providers,
conversion_func=conversion_func,
)
def _get_env_providers(self, env_var_names):
env_var_providers = []
if not isinstance(env_var_names, list):
env_var_names = [env_var_names]
for env_var_name in env_var_names:
env_var_providers.append(
EnvironmentProvider(name=env_var_name, env=self._environ)
)
return env_var_providers
def _get_scoped_config_providers(self, config_property_names):
scoped_config_providers = []
if not isinstance(config_property_names, list):
config_property_names = [config_property_names]
for config_property_name in config_property_names:
scoped_config_providers.append(
ScopedConfigProvider(
config_var_name=config_property_name,
session=self._session,
)
)
return scoped_config_providers
class ConfigValueStore(object):
"""The ConfigValueStore object stores configuration values."""
def __init__(self, mapping=None):
"""Initialize a ConfigValueStore.
:type mapping: dict
:param mapping: The mapping parameter is a map of string to a subclass
of BaseProvider. When a config variable is asked for via the
get_config_variable method, the corresponding provider will be
invoked to load the value.
"""
self._overrides = {}
self._mapping = {}
if mapping is not None:
for logical_name, provider in mapping.items():
self.set_config_provider(logical_name, provider)
def get_config_variable(self, logical_name):
"""
Retrieve the value associeated with the specified logical_name
from the corresponding provider. If no value is found None will
be returned.
:type logical_name: str
:param logical_name: The logical name of the session variable
you want to retrieve. This name will be mapped to the
appropriate environment variable name for this session as
well as the appropriate config file entry.
:returns: value of variable or None if not defined.
"""
if logical_name in self._overrides:
return self._overrides[logical_name]
if logical_name not in self._mapping:
return None
provider = self._mapping[logical_name]
return provider.provide()
def set_config_variable(self, logical_name, value):
"""Set a configuration variable to a specific value.
By using this method, you can override the normal lookup
process used in ``get_config_variable`` by explicitly setting
a value. Subsequent calls to ``get_config_variable`` will
use the ``value``. This gives you per-session specific
configuration values.
::
>>> # Assume logical name 'foo' maps to env var 'FOO'
>>> os.environ['FOO'] = 'myvalue'
>>> s.get_config_variable('foo')
'myvalue'
>>> s.set_config_variable('foo', 'othervalue')
>>> s.get_config_variable('foo')
'othervalue'
:type logical_name: str
:param logical_name: The logical name of the session variable
you want to set. These are the keys in ``SESSION_VARIABLES``.
:param value: The value to associate with the config variable.
"""
self._overrides[logical_name] = value
def clear_config_variable(self, logical_name):
"""Remove an override config variable from the session.
:type logical_name: str
:param logical_name: The name of the parameter to clear the override
value from.
"""
self._overrides.pop(logical_name, None)
def set_config_provider(self, logical_name, provider):
"""Set the provider for a config value.
This provides control over how a particular configuration value is
loaded. This replaces the provider for ``logical_name`` with the new
``provider``.
:type logical_name: str
:param logical_name: The name of the config value to change the config
provider for.
:type provider: :class:`botocore.configprovider.BaseProvider`
:param provider: The new provider that should be responsible for
providing a value for the config named ``logical_name``.
"""
self._mapping[logical_name] = provider
class BaseProvider(object):
"""Base class for configuration value providers.
A configuration provider has some method of providing a configuration
value.
"""
def provide(self):
"""Provide a config value."""
raise NotImplementedError('provide')
class ChainProvider(BaseProvider):
"""This provider wraps one or more other providers.
Each provider in the chain is called, the first one returning a non-None
value is then returned.
"""
def __init__(self, providers=None, conversion_func=None):
"""Initalize a ChainProvider.
:type providers: list
:param providers: The initial list of providers to check for values
when invoked.
:type conversion_func: None or callable
:param conversion_func: If this value is None then it has no affect on
the return type. Otherwise, it is treated as a function that will
transform provided value.
"""
if providers is None:
providers = []
self._providers = providers
self._conversion_func = conversion_func
def provide(self):
"""Provide the value from the first provider to return non-None.
Each provider in the chain has its provide method called. The first
one in the chain to return a non-None value is the returned from the
ChainProvider. When no non-None value is found, None is returned.
"""
for provider in self._providers:
value = provider.provide()
if value is not None:
return self._convert_type(value)
return None
def _convert_type(self, value):
if self._conversion_func is not None:
return self._conversion_func(value)
return value
def __repr__(self):
return '[%s]' % ', '.join([str(p) for p in self._providers])
class InstanceVarProvider(BaseProvider):
"""This class loads config values from the session instance vars."""
def __init__(self, instance_var, session):
"""Initialize InstanceVarProvider.
:type instance_var: str
:param instance_var: The instance variable to load from the session.
:type session: :class:`botocore.session.Session`
:param session: The botocore session to get the loaded configuration
file variables from.
"""
self._instance_var = instance_var
self._session = session
def provide(self):
"""Provide a config value from the session instance vars."""
instance_vars = self._session.instance_variables()
value = instance_vars.get(self._instance_var)
return value
def __repr__(self):
return 'InstanceVarProvider(instance_var=%s, session=%s)' % (
self._instance_var,
self._session,
)
class ScopedConfigProvider(BaseProvider):
def __init__(self, config_var_name, session):
"""Initialize ScopedConfigProvider.
:type config_var_name: str or tuple
:param config_var_name: The name of the config variable to load from
the configuration file. If the value is a tuple, it must only
consist of two items, where the first item represents the section
and the second item represents the config var name in the section.
:type session: :class:`botocore.session.Session`
:param session: The botocore session to get the loaded configuration
file variables from.
"""
self._config_var_name = config_var_name
self._session = session
def provide(self):
"""Provide a value from a config file property."""
scoped_config = self._session.get_scoped_config()
if isinstance(self._config_var_name, tuple):
section_config = scoped_config.get(self._config_var_name[0])
if not isinstance(section_config, dict):
return None
return section_config.get(self._config_var_name[1])
return scoped_config.get(self._config_var_name)
def __repr__(self):
return 'ScopedConfigProvider(config_var_name=%s, session=%s)' % (
self._config_var_name,
self._session,
)
class EnvironmentProvider(BaseProvider):
"""This class loads config values from environment variables."""
def __init__(self, name, env):
"""Initialize with the keys in the dictionary to check.
:type name: str
:param name: The key with that name will be loaded and returned.
:type env: dict
:param env: Environment variables dictionary to get variables from.
"""
self._name = name
self._env = env
def provide(self):
"""Provide a config value from a source dictionary."""
if self._name in self._env:
return self._env[self._name]
return None
def __repr__(self):
return 'EnvironmentProvider(name=%s, env=%s)' % (self._name, self._env)
class SectionConfigProvider(BaseProvider):
"""Provides a dictionary from a section in the scoped config
This is useful for retrieving scoped config variables (i.e. s3) that have
their own set of config variables and resolving logic.
"""
def __init__(self, section_name, session, override_providers=None):
self._section_name = section_name
self._session = session
self._scoped_config_provider = ScopedConfigProvider(
self._section_name, self._session)
self._override_providers = override_providers
if self._override_providers is None:
self._override_providers = {}
def provide(self):
section_config = self._scoped_config_provider.provide()
if section_config and not isinstance(section_config, dict):
logger.debug("The %s config key is not a dictionary type, "
"ignoring its value of: %s", self._section_name,
section_config)
return None
for section_config_var, provider in self._override_providers.items():
provider_val = provider.provide()
if provider_val is not None:
if section_config is None:
section_config = {}
section_config[section_config_var] = provider_val
return section_config
def __repr__(self):
return (
'SectionConfigProvider(section_name=%s, '
'session=%s, override_providers=%s)' % (
self._section_name, self._session,
self._override_providers,
)
)
class ConstantProvider(BaseProvider):
"""This provider provides a constant value."""
def __init__(self, value):
self._value = value
def provide(self):
"""Provide the constant value given during initialization."""
return self._value
def __repr__(self):
return 'ConstantProvider(value=%s)' % self._value
| 22,235 |
Python
| 38.636364 | 79 | 0.636609 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/endpoint.py
|
# Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import logging
import time
import threading
from botocore.vendored import six
from botocore.awsrequest import create_request_object
from botocore.exceptions import HTTPClientError
from botocore.httpsession import URLLib3Session
from botocore.utils import is_valid_endpoint_url, get_environ_proxies
from botocore.hooks import first_non_none_response
from botocore.history import get_global_history_recorder
from botocore.response import StreamingBody
from botocore import parsers
logger = logging.getLogger(__name__)
history_recorder = get_global_history_recorder()
DEFAULT_TIMEOUT = 60
MAX_POOL_CONNECTIONS = 10
def convert_to_response_dict(http_response, operation_model):
"""Convert an HTTP response object to a request dict.
This converts the requests library's HTTP response object to
a dictionary.
:type http_response: botocore.vendored.requests.model.Response
:param http_response: The HTTP response from an AWS service request.
:rtype: dict
:return: A response dictionary which will contain the following keys:
* headers (dict)
* status_code (int)
* body (string or file-like object)
"""
response_dict = {
'headers': http_response.headers,
'status_code': http_response.status_code,
'context': {
'operation_name': operation_model.name,
}
}
if response_dict['status_code'] >= 300:
response_dict['body'] = http_response.content
elif operation_model.has_event_stream_output:
response_dict['body'] = http_response.raw
elif operation_model.has_streaming_output:
length = response_dict['headers'].get('content-length')
response_dict['body'] = StreamingBody(http_response.raw, length)
else:
response_dict['body'] = http_response.content
return response_dict
class Endpoint(object):
"""
Represents an endpoint for a particular service in a specific
region. Only an endpoint can make requests.
:ivar service: The Service object that describes this endpoints
service.
:ivar host: The fully qualified endpoint hostname.
:ivar session: The session object.
"""
def __init__(self, host, endpoint_prefix, event_emitter,
response_parser_factory=None, http_session=None):
self._endpoint_prefix = endpoint_prefix
self._event_emitter = event_emitter
self.host = host
self._lock = threading.Lock()
if response_parser_factory is None:
response_parser_factory = parsers.ResponseParserFactory()
self._response_parser_factory = response_parser_factory
self.http_session = http_session
if self.http_session is None:
self.http_session = URLLib3Session()
def __repr__(self):
return '%s(%s)' % (self._endpoint_prefix, self.host)
def make_request(self, operation_model, request_dict):
logger.debug("Making request for %s with params: %s",
operation_model, request_dict)
return self._send_request(request_dict, operation_model)
def create_request(self, params, operation_model=None):
request = create_request_object(params)
if operation_model:
request.stream_output = any([
operation_model.has_streaming_output,
operation_model.has_event_stream_output
])
service_id = operation_model.service_model.service_id.hyphenize()
event_name = 'request-created.{service_id}.{op_name}'.format(
service_id=service_id,
op_name=operation_model.name)
self._event_emitter.emit(event_name, request=request,
operation_name=operation_model.name)
prepared_request = self.prepare_request(request)
return prepared_request
def _encode_headers(self, headers):
# In place encoding of headers to utf-8 if they are unicode.
for key, value in headers.items():
if isinstance(value, six.text_type):
headers[key] = value.encode('utf-8')
def prepare_request(self, request):
self._encode_headers(request.headers)
return request.prepare()
def _send_request(self, request_dict, operation_model):
attempts = 1
request = self.create_request(request_dict, operation_model)
context = request_dict['context']
success_response, exception = self._get_response(
request, operation_model, context)
while self._needs_retry(attempts, operation_model, request_dict,
success_response, exception):
attempts += 1
# If there is a stream associated with the request, we need
# to reset it before attempting to send the request again.
# This will ensure that we resend the entire contents of the
# body.
request.reset_stream()
# Create a new request when retried (including a new signature).
request = self.create_request(
request_dict, operation_model)
success_response, exception = self._get_response(
request, operation_model, context)
if success_response is not None and \
'ResponseMetadata' in success_response[1]:
# We want to share num retries, not num attempts.
total_retries = attempts - 1
success_response[1]['ResponseMetadata']['RetryAttempts'] = \
total_retries
if exception is not None:
raise exception
else:
return success_response
def _get_response(self, request, operation_model, context):
# This will return a tuple of (success_response, exception)
# and success_response is itself a tuple of
# (http_response, parsed_dict).
# If an exception occurs then the success_response is None.
# If no exception occurs then exception is None.
success_response, exception = self._do_get_response(
request, operation_model)
kwargs_to_emit = {
'response_dict': None,
'parsed_response': None,
'context': context,
'exception': exception,
}
if success_response is not None:
http_response, parsed_response = success_response
kwargs_to_emit['parsed_response'] = parsed_response
kwargs_to_emit['response_dict'] = convert_to_response_dict(
http_response, operation_model)
service_id = operation_model.service_model.service_id.hyphenize()
self._event_emitter.emit(
'response-received.%s.%s' % (
service_id, operation_model.name), **kwargs_to_emit)
return success_response, exception
def _do_get_response(self, request, operation_model):
try:
logger.debug("Sending http request: %s", request)
history_recorder.record('HTTP_REQUEST', {
'method': request.method,
'headers': request.headers,
'streaming': operation_model.has_streaming_input,
'url': request.url,
'body': request.body
})
service_id = operation_model.service_model.service_id.hyphenize()
event_name = 'before-send.%s.%s' % (service_id, operation_model.name)
responses = self._event_emitter.emit(event_name, request=request)
http_response = first_non_none_response(responses)
if http_response is None:
http_response = self._send(request)
except HTTPClientError as e:
return (None, e)
except Exception as e:
logger.debug("Exception received when sending HTTP request.",
exc_info=True)
return (None, e)
# This returns the http_response and the parsed_data.
response_dict = convert_to_response_dict(http_response, operation_model)
http_response_record_dict = response_dict.copy()
http_response_record_dict['streaming'] = \
operation_model.has_streaming_output
history_recorder.record('HTTP_RESPONSE', http_response_record_dict)
protocol = operation_model.metadata['protocol']
parser = self._response_parser_factory.create_parser(protocol)
parsed_response = parser.parse(
response_dict, operation_model.output_shape)
# Do a second parsing pass to pick up on any modeled error fields
# NOTE: Ideally, we would push this down into the parser classes but
# they currently have no reference to the operation or service model
# The parsers should probably take the operation model instead of
# output shape but we can't change that now
if http_response.status_code >= 300:
self._add_modeled_error_fields(
response_dict, parsed_response,
operation_model, parser,
)
history_recorder.record('PARSED_RESPONSE', parsed_response)
return (http_response, parsed_response), None
def _add_modeled_error_fields(
self, response_dict, parsed_response,
operation_model, parser,
):
error_code = parsed_response.get("Error", {}).get("Code")
if error_code is None:
return
service_model = operation_model.service_model
error_shape = service_model.shape_for_error_code(error_code)
if error_shape is None:
return
modeled_parse = parser.parse(response_dict, error_shape)
# TODO: avoid naming conflicts with ResponseMetadata and Error
parsed_response.update(modeled_parse)
def _needs_retry(self, attempts, operation_model, request_dict,
response=None, caught_exception=None):
service_id = operation_model.service_model.service_id.hyphenize()
event_name = 'needs-retry.%s.%s' % (
service_id,
operation_model.name)
responses = self._event_emitter.emit(
event_name, response=response, endpoint=self,
operation=operation_model, attempts=attempts,
caught_exception=caught_exception, request_dict=request_dict)
handler_response = first_non_none_response(responses)
if handler_response is None:
return False
else:
# Request needs to be retried, and we need to sleep
# for the specified number of times.
logger.debug("Response received to retry, sleeping for "
"%s seconds", handler_response)
time.sleep(handler_response)
return True
def _send(self, request):
return self.http_session.send(request)
class EndpointCreator(object):
def __init__(self, event_emitter):
self._event_emitter = event_emitter
def create_endpoint(self, service_model, region_name, endpoint_url,
verify=None, response_parser_factory=None,
timeout=DEFAULT_TIMEOUT,
max_pool_connections=MAX_POOL_CONNECTIONS,
http_session_cls=URLLib3Session,
proxies=None,
socket_options=None,
client_cert=None,
proxies_config=None):
if not is_valid_endpoint_url(endpoint_url):
raise ValueError("Invalid endpoint: %s" % endpoint_url)
if proxies is None:
proxies = self._get_proxies(endpoint_url)
endpoint_prefix = service_model.endpoint_prefix
logger.debug('Setting %s timeout as %s', endpoint_prefix, timeout)
http_session = http_session_cls(
timeout=timeout,
proxies=proxies,
verify=self._get_verify_value(verify),
max_pool_connections=max_pool_connections,
socket_options=socket_options,
client_cert=client_cert,
proxies_config=proxies_config
)
return Endpoint(
endpoint_url,
endpoint_prefix=endpoint_prefix,
event_emitter=self._event_emitter,
response_parser_factory=response_parser_factory,
http_session=http_session
)
def _get_proxies(self, url):
# We could also support getting proxies from a config file,
# but for now proxy support is taken from the environment.
return get_environ_proxies(url)
def _get_verify_value(self, verify):
# This is to account for:
# https://github.com/kennethreitz/requests/issues/1436
# where we need to honor REQUESTS_CA_BUNDLE because we're creating our
# own request objects.
# First, if verify is not None, then the user explicitly specified
# a value so this automatically wins.
if verify is not None:
return verify
# Otherwise use the value from REQUESTS_CA_BUNDLE, or default to
# True if the env var does not exist.
return os.environ.get('REQUESTS_CA_BUNDLE', True)
| 13,811 |
Python
| 41.109756 | 81 | 0.627543 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/validate.py
|
"""User input parameter validation.
This module handles user input parameter validation
against a provided input model.
Note that the objects in this module do *not* mutate any
arguments. No type version happens here. It is up to another
layer to properly convert arguments to any required types.
Validation Errors
-----------------
"""
from botocore.compat import six
import decimal
import json
from datetime import datetime
from botocore.utils import parse_to_aware_datetime
from botocore.utils import is_json_value_header
from botocore.exceptions import ParamValidationError
def validate_parameters(params, shape):
"""Validates input parameters against a schema.
This is a convenience function that validates parameters against a schema.
You can also instantiate and use the ParamValidator class directly if you
want more control.
If there are any validation errors then a ParamValidationError
will be raised. If there are no validation errors than no exception
is raised and a value of None is returned.
:param params: The user provided input parameters.
:type shape: botocore.model.Shape
:param shape: The schema which the input parameters should
adhere to.
:raise: ParamValidationError
"""
validator = ParamValidator()
report = validator.validate(params, shape)
if report.has_errors():
raise ParamValidationError(report=report.generate_report())
def type_check(valid_types):
def _create_type_check_guard(func):
def _on_passes_type_check(self, param, shape, errors, name):
if _type_check(param, errors, name):
return func(self, param, shape, errors, name)
def _type_check(param, errors, name):
if not isinstance(param, valid_types):
valid_type_names = [six.text_type(t) for t in valid_types]
errors.report(name, 'invalid type', param=param,
valid_types=valid_type_names)
return False
return True
return _on_passes_type_check
return _create_type_check_guard
def range_check(name, value, shape, error_type, errors):
failed = False
min_allowed = float('-inf')
if 'min' in shape.metadata:
min_allowed = shape.metadata['min']
if value < min_allowed:
failed = True
elif hasattr(shape, 'serialization'):
# Members that can be bound to the host have an implicit min of 1
if shape.serialization.get('hostLabel'):
min_allowed = 1
if value < min_allowed:
failed = True
if failed:
errors.report(name, error_type, param=value, min_allowed=min_allowed)
class ValidationErrors(object):
def __init__(self):
self._errors = []
def has_errors(self):
if self._errors:
return True
return False
def generate_report(self):
error_messages = []
for error in self._errors:
error_messages.append(self._format_error(error))
return '\n'.join(error_messages)
def _format_error(self, error):
error_type, name, additional = error
name = self._get_name(name)
if error_type == 'missing required field':
return 'Missing required parameter in %s: "%s"' % (
name, additional['required_name'])
elif error_type == 'unknown field':
return 'Unknown parameter in %s: "%s", must be one of: %s' % (
name, additional['unknown_param'],
', '.join(additional['valid_names']))
elif error_type == 'invalid type':
return 'Invalid type for parameter %s, value: %s, type: %s, ' \
'valid types: %s' % (name, additional['param'],
str(type(additional['param'])),
', '.join(additional['valid_types']))
elif error_type == 'invalid range':
min_allowed = additional['min_allowed']
return ('Invalid value for parameter %s, value: %s, '
'valid min value: %s' % (name, additional['param'],
min_allowed))
elif error_type == 'invalid length':
min_allowed = additional['min_allowed']
return ('Invalid length for parameter %s, value: %s, '
'valid min length: %s' % (name, additional['param'],
min_allowed))
elif error_type == 'unable to encode to json':
return 'Invalid parameter %s must be json serializable: %s' \
% (name, additional['type_error'])
def _get_name(self, name):
if not name:
return 'input'
elif name.startswith('.'):
return name[1:]
else:
return name
def report(self, name, reason, **kwargs):
self._errors.append((reason, name, kwargs))
class ParamValidator(object):
"""Validates parameters against a shape model."""
def validate(self, params, shape):
"""Validate parameters against a shape model.
This method will validate the parameters against a provided shape model.
All errors will be collected before returning to the caller. This means
that this method will not stop at the first error, it will return all
possible errors.
:param params: User provided dict of parameters
:param shape: A shape model describing the expected input.
:return: A list of errors.
"""
errors = ValidationErrors()
self._validate(params, shape, errors, name='')
return errors
def _check_special_validation_cases(self, shape):
if is_json_value_header(shape):
return self._validate_jsonvalue_string
def _validate(self, params, shape, errors, name):
special_validator = self._check_special_validation_cases(shape)
if special_validator:
special_validator(params, shape, errors, name)
else:
getattr(self, '_validate_%s' % shape.type_name)(
params, shape, errors, name)
def _validate_jsonvalue_string(self, params, shape, errors, name):
# Check to see if a value marked as a jsonvalue can be dumped to
# a json string.
try:
json.dumps(params)
except (ValueError, TypeError) as e:
errors.report(name, 'unable to encode to json', type_error=e)
@type_check(valid_types=(dict,))
def _validate_structure(self, params, shape, errors, name):
# Validate required fields.
for required_member in shape.metadata.get('required', []):
if required_member not in params:
errors.report(name, 'missing required field',
required_name=required_member, user_params=params)
members = shape.members
known_params = []
# Validate known params.
for param in params:
if param not in members:
errors.report(name, 'unknown field', unknown_param=param,
valid_names=list(members))
else:
known_params.append(param)
# Validate structure members.
for param in known_params:
self._validate(params[param], shape.members[param],
errors, '%s.%s' % (name, param))
@type_check(valid_types=six.string_types)
def _validate_string(self, param, shape, errors, name):
# Validate range. For a string, the min/max contraints
# are of the string length.
# Looks like:
# "WorkflowId":{
# "type":"string",
# "min":1,
# "max":256
# }
range_check(name, len(param), shape, 'invalid length', errors)
@type_check(valid_types=(list, tuple))
def _validate_list(self, param, shape, errors, name):
member_shape = shape.member
range_check(name, len(param), shape, 'invalid length', errors)
for i, item in enumerate(param):
self._validate(item, member_shape, errors, '%s[%s]' % (name, i))
@type_check(valid_types=(dict,))
def _validate_map(self, param, shape, errors, name):
key_shape = shape.key
value_shape = shape.value
for key, value in param.items():
self._validate(key, key_shape, errors, "%s (key: %s)"
% (name, key))
self._validate(value, value_shape, errors, '%s.%s' % (name, key))
@type_check(valid_types=six.integer_types)
def _validate_integer(self, param, shape, errors, name):
range_check(name, param, shape, 'invalid range', errors)
def _validate_blob(self, param, shape, errors, name):
if isinstance(param, (bytes, bytearray, six.text_type)):
return
elif hasattr(param, 'read'):
# File like objects are also allowed for blob types.
return
else:
errors.report(name, 'invalid type', param=param,
valid_types=[str(bytes), str(bytearray),
'file-like object'])
@type_check(valid_types=(bool,))
def _validate_boolean(self, param, shape, errors, name):
pass
@type_check(valid_types=(float, decimal.Decimal) + six.integer_types)
def _validate_double(self, param, shape, errors, name):
range_check(name, param, shape, 'invalid range', errors)
_validate_float = _validate_double
@type_check(valid_types=six.integer_types)
def _validate_long(self, param, shape, errors, name):
range_check(name, param, shape, 'invalid range', errors)
def _validate_timestamp(self, param, shape, errors, name):
# We don't use @type_check because datetimes are a bit
# more flexible. You can either provide a datetime
# object, or a string that parses to a datetime.
is_valid_type = self._type_check_datetime(param)
if not is_valid_type:
valid_type_names = [six.text_type(datetime), 'timestamp-string']
errors.report(name, 'invalid type', param=param,
valid_types=valid_type_names)
def _type_check_datetime(self, value):
try:
parse_to_aware_datetime(value)
return True
except (TypeError, ValueError, AttributeError):
# Yes, dateutil can sometimes raise an AttributeError
# when parsing timestamps.
return False
class ParamValidationDecorator(object):
def __init__(self, param_validator, serializer):
self._param_validator = param_validator
self._serializer = serializer
def serialize_to_request(self, parameters, operation_model):
input_shape = operation_model.input_shape
if input_shape is not None:
report = self._param_validator.validate(parameters,
operation_model.input_shape)
if report.has_errors():
raise ParamValidationError(report=report.generate_report())
return self._serializer.serialize_to_request(parameters,
operation_model)
| 11,323 |
Python
| 37.256757 | 80 | 0.59463 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/utils.py
|
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import base64
import re
import time
import logging
import datetime
import hashlib
import binascii
import functools
import weakref
import random
import os
import socket
import cgi
import warnings
import dateutil.parser
from dateutil.tz import tzutc
import botocore
import botocore.awsrequest
import botocore.httpsession
from botocore.compat import (
json, quote, zip_longest, urlsplit, urlunsplit, OrderedDict,
six, urlparse, get_tzinfo_options, get_md5, MD5_AVAILABLE
)
from botocore.vendored.six.moves.urllib.request import getproxies, proxy_bypass
from botocore.exceptions import (
InvalidExpressionError, ConfigNotFound, InvalidDNSNameError, ClientError,
MetadataRetrievalError, EndpointConnectionError, ReadTimeoutError,
ConnectionClosedError, ConnectTimeoutError, UnsupportedS3ArnError,
UnsupportedS3AccesspointConfigurationError, SSOTokenLoadError,
InvalidRegionError, InvalidIMDSEndpointError, UnsupportedOutpostResourceError,
UnsupportedS3ControlConfigurationError, UnsupportedS3ControlArnError,
InvalidHostLabelError, HTTPClientError, UnsupportedS3ConfigurationError,
)
from urllib3.exceptions import LocationParseError
logger = logging.getLogger(__name__)
DEFAULT_METADATA_SERVICE_TIMEOUT = 1
METADATA_BASE_URL = 'http://169.254.169.254/'
METADATA_BASE_URL_IPv6 = 'http://[fe80:ec2::254%eth0]/'
# These are chars that do not need to be urlencoded.
# Based on rfc2986, section 2.3
SAFE_CHARS = '-._~'
LABEL_RE = re.compile(r'[a-z0-9][a-z0-9\-]*[a-z0-9]')
RETRYABLE_HTTP_ERRORS = (
ReadTimeoutError, EndpointConnectionError, ConnectionClosedError,
ConnectTimeoutError,
)
S3_ACCELERATE_WHITELIST = ['dualstack']
# In switching events from using service name / endpoint prefix to service
# id, we have to preserve compatibility. This maps the instances where either
# is different than the transformed service id.
EVENT_ALIASES = {
"a4b": "alexa-for-business",
"alexaforbusiness": "alexa-for-business",
"api.mediatailor": "mediatailor",
"api.pricing": "pricing",
"api.sagemaker": "sagemaker",
"apigateway": "api-gateway",
"application-autoscaling": "application-auto-scaling",
"appstream2": "appstream",
"autoscaling": "auto-scaling",
"autoscaling-plans": "auto-scaling-plans",
"ce": "cost-explorer",
"cloudhsmv2": "cloudhsm-v2",
"cloudsearchdomain": "cloudsearch-domain",
"cognito-idp": "cognito-identity-provider",
"config": "config-service",
"cur": "cost-and-usage-report-service",
"data.iot": "iot-data-plane",
"data.jobs.iot": "iot-jobs-data-plane",
"data.mediastore": "mediastore-data",
"datapipeline": "data-pipeline",
"devicefarm": "device-farm",
"devices.iot1click": "iot-1click-devices-service",
"directconnect": "direct-connect",
"discovery": "application-discovery-service",
"dms": "database-migration-service",
"ds": "directory-service",
"dynamodbstreams": "dynamodb-streams",
"elasticbeanstalk": "elastic-beanstalk",
"elasticfilesystem": "efs",
"elasticloadbalancing": "elastic-load-balancing",
"elasticmapreduce": "emr",
"elastictranscoder": "elastic-transcoder",
"elb": "elastic-load-balancing",
"elbv2": "elastic-load-balancing-v2",
"email": "ses",
"entitlement.marketplace": "marketplace-entitlement-service",
"es": "elasticsearch-service",
"events": "eventbridge",
"cloudwatch-events": "eventbridge",
"iot-data": "iot-data-plane",
"iot-jobs-data": "iot-jobs-data-plane",
"iot1click-devices": "iot-1click-devices-service",
"iot1click-projects": "iot-1click-projects",
"kinesisanalytics": "kinesis-analytics",
"kinesisvideo": "kinesis-video",
"lex-models": "lex-model-building-service",
"lex-runtime": "lex-runtime-service",
"logs": "cloudwatch-logs",
"machinelearning": "machine-learning",
"marketplace-entitlement": "marketplace-entitlement-service",
"marketplacecommerceanalytics": "marketplace-commerce-analytics",
"metering.marketplace": "marketplace-metering",
"meteringmarketplace": "marketplace-metering",
"mgh": "migration-hub",
"models.lex": "lex-model-building-service",
"monitoring": "cloudwatch",
"mturk-requester": "mturk",
"opsworks-cm": "opsworkscm",
"projects.iot1click": "iot-1click-projects",
"resourcegroupstaggingapi": "resource-groups-tagging-api",
"route53": "route-53",
"route53domains": "route-53-domains",
"runtime.lex": "lex-runtime-service",
"runtime.sagemaker": "sagemaker-runtime",
"sdb": "simpledb",
"secretsmanager": "secrets-manager",
"serverlessrepo": "serverlessapplicationrepository",
"servicecatalog": "service-catalog",
"states": "sfn",
"stepfunctions": "sfn",
"storagegateway": "storage-gateway",
"streams.dynamodb": "dynamodb-streams",
"tagging": "resource-groups-tagging-api"
}
# Vendoring IPv6 validation regex patterns from urllib3
# https://github.com/urllib3/urllib3/blob/7e856c0/src/urllib3/util/url.py
IPV4_PAT = r"(?:[0-9]{1,3}\.){3}[0-9]{1,3}"
HEX_PAT = "[0-9A-Fa-f]{1,4}"
LS32_PAT = "(?:{hex}:{hex}|{ipv4})".format(hex=HEX_PAT, ipv4=IPV4_PAT)
_subs = {"hex": HEX_PAT, "ls32": LS32_PAT}
_variations = [
# 6( h16 ":" ) ls32
"(?:%(hex)s:){6}%(ls32)s",
# "::" 5( h16 ":" ) ls32
"::(?:%(hex)s:){5}%(ls32)s",
# [ h16 ] "::" 4( h16 ":" ) ls32
"(?:%(hex)s)?::(?:%(hex)s:){4}%(ls32)s",
# [ *1( h16 ":" ) h16 ] "::" 3( h16 ":" ) ls32
"(?:(?:%(hex)s:)?%(hex)s)?::(?:%(hex)s:){3}%(ls32)s",
# [ *2( h16 ":" ) h16 ] "::" 2( h16 ":" ) ls32
"(?:(?:%(hex)s:){0,2}%(hex)s)?::(?:%(hex)s:){2}%(ls32)s",
# [ *3( h16 ":" ) h16 ] "::" h16 ":" ls32
"(?:(?:%(hex)s:){0,3}%(hex)s)?::%(hex)s:%(ls32)s",
# [ *4( h16 ":" ) h16 ] "::" ls32
"(?:(?:%(hex)s:){0,4}%(hex)s)?::%(ls32)s",
# [ *5( h16 ":" ) h16 ] "::" h16
"(?:(?:%(hex)s:){0,5}%(hex)s)?::%(hex)s",
# [ *6( h16 ":" ) h16 ] "::"
"(?:(?:%(hex)s:){0,6}%(hex)s)?::",
]
UNRESERVED_PAT = r"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789._!\-~"
IPV6_PAT = "(?:" + "|".join([x % _subs for x in _variations]) + ")"
ZONE_ID_PAT = "(?:%25|%)(?:[" + UNRESERVED_PAT + "]|%[a-fA-F0-9]{2})+"
IPV6_ADDRZ_PAT = r"\[" + IPV6_PAT + r"(?:" + ZONE_ID_PAT + r")?\]"
IPV6_ADDRZ_RE = re.compile("^" + IPV6_ADDRZ_PAT + "$")
def ensure_boolean(val):
"""Ensures a boolean value if a string or boolean is provided
For strings, the value for True/False is case insensitive
"""
if isinstance(val, bool):
return val
else:
return val.lower() == 'true'
def is_json_value_header(shape):
"""Determines if the provided shape is the special header type jsonvalue.
:type shape: botocore.shape
:param shape: Shape to be inspected for the jsonvalue trait.
:return: True if this type is a jsonvalue, False otherwise
:rtype: Bool
"""
return (hasattr(shape, 'serialization') and
shape.serialization.get('jsonvalue', False) and
shape.serialization.get('location') == 'header' and
shape.type_name == 'string')
def get_service_module_name(service_model):
"""Returns the module name for a service
This is the value used in both the documentation and client class name
"""
name = service_model.metadata.get(
'serviceAbbreviation',
service_model.metadata.get(
'serviceFullName', service_model.service_name))
name = name.replace('Amazon', '')
name = name.replace('AWS', '')
name = re.sub(r'\W+', '', name)
return name
def normalize_url_path(path):
if not path:
return '/'
return remove_dot_segments(path)
def normalize_boolean(val):
"""Returns None if val is None, otherwise ensure value
converted to boolean"""
if val is None:
return val
else:
return ensure_boolean(val)
def remove_dot_segments(url):
# RFC 3986, section 5.2.4 "Remove Dot Segments"
# Also, AWS services require consecutive slashes to be removed,
# so that's done here as well
if not url:
return ''
input_url = url.split('/')
output_list = []
for x in input_url:
if x and x != '.':
if x == '..':
if output_list:
output_list.pop()
else:
output_list.append(x)
if url[0] == '/':
first = '/'
else:
first = ''
if url[-1] == '/' and output_list:
last = '/'
else:
last = ''
return first + '/'.join(output_list) + last
def validate_jmespath_for_set(expression):
# Validates a limited jmespath expression to determine if we can set a
# value based on it. Only works with dotted paths.
if not expression or expression == '.':
raise InvalidExpressionError(expression=expression)
for invalid in ['[', ']', '*']:
if invalid in expression:
raise InvalidExpressionError(expression=expression)
def set_value_from_jmespath(source, expression, value, is_first=True):
# This takes a (limited) jmespath-like expression & can set a value based
# on it.
# Limitations:
# * Only handles dotted lookups
# * No offsets/wildcards/slices/etc.
if is_first:
validate_jmespath_for_set(expression)
bits = expression.split('.', 1)
current_key, remainder = bits[0], bits[1] if len(bits) > 1 else ''
if not current_key:
raise InvalidExpressionError(expression=expression)
if remainder:
if current_key not in source:
# We've got something in the expression that's not present in the
# source (new key). If there's any more bits, we'll set the key
# with an empty dictionary.
source[current_key] = {}
return set_value_from_jmespath(
source[current_key],
remainder,
value,
is_first=False
)
# If we're down to a single key, set it.
source[current_key] = value
class _RetriesExceededError(Exception):
"""Internal exception used when the number of retries are exceeded."""
pass
class BadIMDSRequestError(Exception):
def __init__(self, request):
self.request = request
class IMDSFetcher(object):
_RETRIES_EXCEEDED_ERROR_CLS = _RetriesExceededError
_TOKEN_PATH = 'latest/api/token'
_TOKEN_TTL = '21600'
def __init__(self, timeout=DEFAULT_METADATA_SERVICE_TIMEOUT,
num_attempts=1, base_url=METADATA_BASE_URL,
env=None, user_agent=None, config=None):
self._timeout = timeout
self._num_attempts = num_attempts
self._base_url = self._select_base_url(base_url, config)
if env is None:
env = os.environ.copy()
self._disabled = env.get('AWS_EC2_METADATA_DISABLED', 'false').lower()
self._disabled = self._disabled == 'true'
self._user_agent = user_agent
self._session = botocore.httpsession.URLLib3Session(
timeout=self._timeout,
proxies=get_environ_proxies(self._base_url),
)
def get_base_url(self):
return self._base_url
def _select_base_url(self, base_url, config):
if config is None:
config = {}
requires_ipv6 = ensure_boolean(config.get('imds_use_ipv6', False))
custom_metadata_endpoint = config.get('ec2_metadata_service_endpoint')
if requires_ipv6 and custom_metadata_endpoint:
logger.warn("Custom endpoint and IMDS_USE_IPV6 are both set. Using custom endpoint.")
chosen_base_url = None
if base_url != METADATA_BASE_URL:
chosen_base_url = base_url
elif custom_metadata_endpoint:
chosen_base_url = custom_metadata_endpoint
elif requires_ipv6:
chosen_base_url = METADATA_BASE_URL_IPv6
else:
chosen_base_url = METADATA_BASE_URL
logger.debug("IMDS ENDPOINT: %s" % chosen_base_url)
if not is_valid_uri(chosen_base_url):
raise InvalidIMDSEndpointError(endpoint=chosen_base_url)
return chosen_base_url
def _fetch_metadata_token(self):
self._assert_enabled()
url = self._base_url + self._TOKEN_PATH
headers = {
'x-aws-ec2-metadata-token-ttl-seconds': self._TOKEN_TTL,
}
self._add_user_agent(headers)
request = botocore.awsrequest.AWSRequest(
method='PUT', url=url, headers=headers)
for i in range(self._num_attempts):
try:
response = self._session.send(request.prepare())
if response.status_code == 200:
return response.text
elif response.status_code in (404, 403, 405):
return None
elif response.status_code in (400,):
raise BadIMDSRequestError(request)
except ReadTimeoutError:
return None
except RETRYABLE_HTTP_ERRORS as e:
logger.debug(
"Caught retryable HTTP exception while making metadata "
"service request to %s: %s", url, e, exc_info=True)
except HTTPClientError as e:
if isinstance(e.kwargs.get('error'), LocationParseError):
raise InvalidIMDSEndpointError(endpoint=url, error=e)
else:
raise
return None
def _get_request(self, url_path, retry_func, token=None):
"""Make a get request to the Instance Metadata Service.
:type url_path: str
:param url_path: The path component of the URL to make a get request.
This arg is appended to the base_url that was provided in the
initializer.
:type retry_func: callable
:param retry_func: A function that takes the response as an argument
and determines if it needs to retry. By default empty and non
200 OK responses are retried.
:type token: str
:param token: Metadata token to send along with GET requests to IMDS.
"""
self._assert_enabled()
if retry_func is None:
retry_func = self._default_retry
url = self._base_url + url_path
headers = {}
if token is not None:
headers['x-aws-ec2-metadata-token'] = token
self._add_user_agent(headers)
for i in range(self._num_attempts):
try:
request = botocore.awsrequest.AWSRequest(
method='GET', url=url, headers=headers)
response = self._session.send(request.prepare())
if not retry_func(response):
return response
except RETRYABLE_HTTP_ERRORS as e:
logger.debug(
"Caught retryable HTTP exception while making metadata "
"service request to %s: %s", url, e, exc_info=True)
raise self._RETRIES_EXCEEDED_ERROR_CLS()
def _add_user_agent(self, headers):
if self._user_agent is not None:
headers['User-Agent'] = self._user_agent
def _assert_enabled(self):
if self._disabled:
logger.debug("Access to EC2 metadata has been disabled.")
raise self._RETRIES_EXCEEDED_ERROR_CLS()
def _default_retry(self, response):
return (
self._is_non_ok_response(response) or
self._is_empty(response)
)
def _is_non_ok_response(self, response):
if response.status_code != 200:
self._log_imds_response(response, 'non-200', log_body=True)
return True
return False
def _is_empty(self, response):
if not response.content:
self._log_imds_response(response, 'no body', log_body=True)
return True
return False
def _log_imds_response(self, response, reason_to_log, log_body=False):
statement = (
"Metadata service returned %s response "
"with status code of %s for url: %s"
)
logger_args = [
reason_to_log, response.status_code, response.url
]
if log_body:
statement += ", content body: %s"
logger_args.append(response.content)
logger.debug(statement, *logger_args)
class InstanceMetadataFetcher(IMDSFetcher):
_URL_PATH = 'latest/meta-data/iam/security-credentials/'
_REQUIRED_CREDENTIAL_FIELDS = [
'AccessKeyId', 'SecretAccessKey', 'Token', 'Expiration'
]
def retrieve_iam_role_credentials(self):
try:
token = self._fetch_metadata_token()
role_name = self._get_iam_role(token)
credentials = self._get_credentials(role_name, token)
if self._contains_all_credential_fields(credentials):
return {
'role_name': role_name,
'access_key': credentials['AccessKeyId'],
'secret_key': credentials['SecretAccessKey'],
'token': credentials['Token'],
'expiry_time': credentials['Expiration'],
}
else:
# IMDS can return a 200 response that has a JSON formatted
# error message (i.e. if ec2 is not trusted entity for the
# attached role). We do not necessarily want to retry for
# these and we also do not necessarily want to raise a key
# error. So at least log the problematic response and return
# an empty dictionary to signal that it was not able to
# retrieve credentials. These error will contain both a
# Code and Message key.
if 'Code' in credentials and 'Message' in credentials:
logger.debug('Error response received when retrieving'
'credentials: %s.', credentials)
return {}
except self._RETRIES_EXCEEDED_ERROR_CLS:
logger.debug("Max number of attempts exceeded (%s) when "
"attempting to retrieve data from metadata service.",
self._num_attempts)
except BadIMDSRequestError as e:
logger.debug("Bad IMDS request: %s", e.request)
return {}
def _get_iam_role(self, token=None):
return self._get_request(
url_path=self._URL_PATH,
retry_func=self._needs_retry_for_role_name,
token=token,
).text
def _get_credentials(self, role_name, token=None):
r = self._get_request(
url_path=self._URL_PATH + role_name,
retry_func=self._needs_retry_for_credentials,
token=token,
)
return json.loads(r.text)
def _is_invalid_json(self, response):
try:
json.loads(response.text)
return False
except ValueError:
self._log_imds_response(response, 'invalid json')
return True
def _needs_retry_for_role_name(self, response):
return (
self._is_non_ok_response(response) or
self._is_empty(response)
)
def _needs_retry_for_credentials(self, response):
return (
self._is_non_ok_response(response) or
self._is_empty(response) or
self._is_invalid_json(response)
)
def _contains_all_credential_fields(self, credentials):
for field in self._REQUIRED_CREDENTIAL_FIELDS:
if field not in credentials:
logger.debug(
'Retrieved credentials is missing required field: %s',
field)
return False
return True
def merge_dicts(dict1, dict2, append_lists=False):
"""Given two dict, merge the second dict into the first.
The dicts can have arbitrary nesting.
:param append_lists: If true, instead of clobbering a list with the new
value, append all of the new values onto the original list.
"""
for key in dict2:
if isinstance(dict2[key], dict):
if key in dict1 and key in dict2:
merge_dicts(dict1[key], dict2[key])
else:
dict1[key] = dict2[key]
# If the value is a list and the ``append_lists`` flag is set,
# append the new values onto the original list
elif isinstance(dict2[key], list) and append_lists:
# The value in dict1 must be a list in order to append new
# values onto it.
if key in dict1 and isinstance(dict1[key], list):
dict1[key].extend(dict2[key])
else:
dict1[key] = dict2[key]
else:
# At scalar types, we iterate and merge the
# current dict that we're on.
dict1[key] = dict2[key]
def lowercase_dict(original):
"""Copies the given dictionary ensuring all keys are lowercase strings. """
copy = {}
for key in original:
copy[key.lower()] = original[key]
return copy
def parse_key_val_file(filename, _open=open):
try:
with _open(filename) as f:
contents = f.read()
return parse_key_val_file_contents(contents)
except OSError:
raise ConfigNotFound(path=filename)
def parse_key_val_file_contents(contents):
# This was originally extracted from the EC2 credential provider, which was
# fairly lenient in its parsing. We only try to parse key/val pairs if
# there's a '=' in the line.
final = {}
for line in contents.splitlines():
if '=' not in line:
continue
key, val = line.split('=', 1)
key = key.strip()
val = val.strip()
final[key] = val
return final
def percent_encode_sequence(mapping, safe=SAFE_CHARS):
"""Urlencode a dict or list into a string.
This is similar to urllib.urlencode except that:
* It uses quote, and not quote_plus
* It has a default list of safe chars that don't need
to be encoded, which matches what AWS services expect.
If any value in the input ``mapping`` is a list type,
then each list element wil be serialized. This is the equivalent
to ``urlencode``'s ``doseq=True`` argument.
This function should be preferred over the stdlib
``urlencode()`` function.
:param mapping: Either a dict to urlencode or a list of
``(key, value)`` pairs.
"""
encoded_pairs = []
if hasattr(mapping, 'items'):
pairs = mapping.items()
else:
pairs = mapping
for key, value in pairs:
if isinstance(value, list):
for element in value:
encoded_pairs.append('%s=%s' % (percent_encode(key),
percent_encode(element)))
else:
encoded_pairs.append('%s=%s' % (percent_encode(key),
percent_encode(value)))
return '&'.join(encoded_pairs)
def percent_encode(input_str, safe=SAFE_CHARS):
"""Urlencodes a string.
Whereas percent_encode_sequence handles taking a dict/sequence and
producing a percent encoded string, this function deals only with
taking a string (not a dict/sequence) and percent encoding it.
If given the binary type, will simply URL encode it. If given the
text type, will produce the binary type by UTF-8 encoding the
text. If given something else, will convert it to the text type
first.
"""
# If its not a binary or text string, make it a text string.
if not isinstance(input_str, (six.binary_type, six.text_type)):
input_str = six.text_type(input_str)
# If it's not bytes, make it bytes by UTF-8 encoding it.
if not isinstance(input_str, six.binary_type):
input_str = input_str.encode('utf-8')
return quote(input_str, safe=safe)
def _parse_timestamp_with_tzinfo(value, tzinfo):
"""Parse timestamp with pluggable tzinfo options."""
if isinstance(value, (int, float)):
# Possibly an epoch time.
return datetime.datetime.fromtimestamp(value, tzinfo())
else:
try:
return datetime.datetime.fromtimestamp(float(value), tzinfo())
except (TypeError, ValueError):
pass
try:
# In certain cases, a timestamp marked with GMT can be parsed into a
# different time zone, so here we provide a context which will
# enforce that GMT == UTC.
return dateutil.parser.parse(value, tzinfos={'GMT': tzutc()})
except (TypeError, ValueError) as e:
raise ValueError('Invalid timestamp "%s": %s' % (value, e))
def parse_timestamp(value):
"""Parse a timestamp into a datetime object.
Supported formats:
* iso8601
* rfc822
* epoch (value is an integer)
This will return a ``datetime.datetime`` object.
"""
for tzinfo in get_tzinfo_options():
try:
return _parse_timestamp_with_tzinfo(value, tzinfo)
except OSError as e:
logger.debug('Unable to parse timestamp with "%s" timezone info.',
tzinfo.__name__, exc_info=e)
raise RuntimeError('Unable to calculate correct timezone offset for '
'"%s"' % value)
def parse_to_aware_datetime(value):
"""Converted the passed in value to a datetime object with tzinfo.
This function can be used to normalize all timestamp inputs. This
function accepts a number of different types of inputs, but
will always return a datetime.datetime object with time zone
information.
The input param ``value`` can be one of several types:
* A datetime object (both naive and aware)
* An integer representing the epoch time (can also be a string
of the integer, i.e '0', instead of 0). The epoch time is
considered to be UTC.
* An iso8601 formatted timestamp. This does not need to be
a complete timestamp, it can contain just the date portion
without the time component.
The returned value will be a datetime object that will have tzinfo.
If no timezone info was provided in the input value, then UTC is
assumed, not local time.
"""
# This is a general purpose method that handles several cases of
# converting the provided value to a string timestamp suitable to be
# serialized to an http request. It can handle:
# 1) A datetime.datetime object.
if isinstance(value, datetime.datetime):
datetime_obj = value
else:
# 2) A string object that's formatted as a timestamp.
# We document this as being an iso8601 timestamp, although
# parse_timestamp is a bit more flexible.
datetime_obj = parse_timestamp(value)
if datetime_obj.tzinfo is None:
# I think a case would be made that if no time zone is provided,
# we should use the local time. However, to restore backwards
# compat, the previous behavior was to assume UTC, which is
# what we're going to do here.
datetime_obj = datetime_obj.replace(tzinfo=tzutc())
else:
datetime_obj = datetime_obj.astimezone(tzutc())
return datetime_obj
def datetime2timestamp(dt, default_timezone=None):
"""Calculate the timestamp based on the given datetime instance.
:type dt: datetime
:param dt: A datetime object to be converted into timestamp
:type default_timezone: tzinfo
:param default_timezone: If it is provided as None, we treat it as tzutc().
But it is only used when dt is a naive datetime.
:returns: The timestamp
"""
epoch = datetime.datetime(1970, 1, 1)
if dt.tzinfo is None:
if default_timezone is None:
default_timezone = tzutc()
dt = dt.replace(tzinfo=default_timezone)
d = dt.replace(tzinfo=None) - dt.utcoffset() - epoch
if hasattr(d, "total_seconds"):
return d.total_seconds() # Works in Python 2.7+
return (d.microseconds + (d.seconds + d.days * 24 * 3600) * 10**6) / 10**6
def calculate_sha256(body, as_hex=False):
"""Calculate a sha256 checksum.
This method will calculate the sha256 checksum of a file like
object. Note that this method will iterate through the entire
file contents. The caller is responsible for ensuring the proper
starting position of the file and ``seek()``'ing the file back
to its starting location if other consumers need to read from
the file like object.
:param body: Any file like object. The file must be opened
in binary mode such that a ``.read()`` call returns bytes.
:param as_hex: If True, then the hex digest is returned.
If False, then the digest (as binary bytes) is returned.
:returns: The sha256 checksum
"""
checksum = hashlib.sha256()
for chunk in iter(lambda: body.read(1024 * 1024), b''):
checksum.update(chunk)
if as_hex:
return checksum.hexdigest()
else:
return checksum.digest()
def calculate_tree_hash(body):
"""Calculate a tree hash checksum.
For more information see:
http://docs.aws.amazon.com/amazonglacier/latest/dev/checksum-calculations.html
:param body: Any file like object. This has the same constraints as
the ``body`` param in calculate_sha256
:rtype: str
:returns: The hex version of the calculated tree hash
"""
chunks = []
required_chunk_size = 1024 * 1024
sha256 = hashlib.sha256
for chunk in iter(lambda: body.read(required_chunk_size), b''):
chunks.append(sha256(chunk).digest())
if not chunks:
return sha256(b'').hexdigest()
while len(chunks) > 1:
new_chunks = []
for first, second in _in_pairs(chunks):
if second is not None:
new_chunks.append(sha256(first + second).digest())
else:
# We're at the end of the list and there's no pair left.
new_chunks.append(first)
chunks = new_chunks
return binascii.hexlify(chunks[0]).decode('ascii')
def _in_pairs(iterable):
# Creates iterator that iterates over the list in pairs:
# for a, b in _in_pairs([0, 1, 2, 3, 4]):
# print(a, b)
#
# will print:
# 0, 1
# 2, 3
# 4, None
shared_iter = iter(iterable)
# Note that zip_longest is a compat import that uses
# the itertools izip_longest. This creates an iterator,
# this call below does _not_ immediately create the list
# of pairs.
return zip_longest(shared_iter, shared_iter)
class CachedProperty(object):
"""A read only property that caches the initially computed value.
This descriptor will only call the provided ``fget`` function once.
Subsequent access to this property will return the cached value.
"""
def __init__(self, fget):
self._fget = fget
def __get__(self, obj, cls):
if obj is None:
return self
else:
computed_value = self._fget(obj)
obj.__dict__[self._fget.__name__] = computed_value
return computed_value
class ArgumentGenerator(object):
"""Generate sample input based on a shape model.
This class contains a ``generate_skeleton`` method that will take
an input/output shape (created from ``botocore.model``) and generate
a sample dictionary corresponding to the input/output shape.
The specific values used are place holder values. For strings either an
empty string or the member name can be used, for numbers 0 or 0.0 is used.
The intended usage of this class is to generate the *shape* of the input
structure.
This can be useful for operations that have complex input shapes.
This allows a user to just fill in the necessary data instead of
worrying about the specific structure of the input arguments.
Example usage::
s = botocore.session.get_session()
ddb = s.get_service_model('dynamodb')
arg_gen = ArgumentGenerator()
sample_input = arg_gen.generate_skeleton(
ddb.operation_model('CreateTable').input_shape)
print("Sample input for dynamodb.CreateTable: %s" % sample_input)
"""
def __init__(self, use_member_names=False):
self._use_member_names = use_member_names
def generate_skeleton(self, shape):
"""Generate a sample input.
:type shape: ``botocore.model.Shape``
:param shape: The input shape.
:return: The generated skeleton input corresponding to the
provided input shape.
"""
stack = []
return self._generate_skeleton(shape, stack)
def _generate_skeleton(self, shape, stack, name=''):
stack.append(shape.name)
try:
if shape.type_name == 'structure':
return self._generate_type_structure(shape, stack)
elif shape.type_name == 'list':
return self._generate_type_list(shape, stack)
elif shape.type_name == 'map':
return self._generate_type_map(shape, stack)
elif shape.type_name == 'string':
if self._use_member_names:
return name
if shape.enum:
return random.choice(shape.enum)
return ''
elif shape.type_name in ['integer', 'long']:
return 0
elif shape.type_name in ['float', 'double']:
return 0.0
elif shape.type_name == 'boolean':
return True
elif shape.type_name == 'timestamp':
return datetime.datetime(1970, 1, 1, 0, 0, 0)
finally:
stack.pop()
def _generate_type_structure(self, shape, stack):
if stack.count(shape.name) > 1:
return {}
skeleton = OrderedDict()
for member_name, member_shape in shape.members.items():
skeleton[member_name] = self._generate_skeleton(
member_shape, stack, name=member_name)
return skeleton
def _generate_type_list(self, shape, stack):
# For list elements we've arbitrarily decided to
# return two elements for the skeleton list.
name = ''
if self._use_member_names:
name = shape.member.name
return [
self._generate_skeleton(shape.member, stack, name),
]
def _generate_type_map(self, shape, stack):
key_shape = shape.key
value_shape = shape.value
assert key_shape.type_name == 'string'
return OrderedDict([
('KeyName', self._generate_skeleton(value_shape, stack)),
])
def is_valid_ipv6_endpoint_url(endpoint_url):
netloc = urlparse(endpoint_url).netloc
return IPV6_ADDRZ_RE.match(netloc) is not None
def is_valid_endpoint_url(endpoint_url):
"""Verify the endpoint_url is valid.
:type endpoint_url: string
:param endpoint_url: An endpoint_url. Must have at least a scheme
and a hostname.
:return: True if the endpoint url is valid. False otherwise.
"""
parts = urlsplit(endpoint_url)
hostname = parts.hostname
if hostname is None:
return False
if len(hostname) > 255:
return False
if hostname[-1] == ".":
hostname = hostname[:-1]
allowed = re.compile(
r"^((?!-)[A-Z\d-]{1,63}(?<!-)\.)*((?!-)[A-Z\d-]{1,63}(?<!-))$",
re.IGNORECASE)
return allowed.match(hostname)
def is_valid_uri(endpoint_url):
return is_valid_endpoint_url(endpoint_url) or is_valid_ipv6_endpoint_url(endpoint_url)
def validate_region_name(region_name):
"""Provided region_name must be a valid host label."""
if region_name is None:
return
valid_host_label = re.compile(r'^(?![0-9]+$)(?!-)[a-zA-Z0-9-]{,63}(?<!-)$')
valid = valid_host_label.match(region_name)
if not valid:
raise InvalidRegionError(region_name=region_name)
def check_dns_name(bucket_name):
"""
Check to see if the ``bucket_name`` complies with the
restricted DNS naming conventions necessary to allow
access via virtual-hosting style.
Even though "." characters are perfectly valid in this DNS
naming scheme, we are going to punt on any name containing a
"." character because these will cause SSL cert validation
problems if we try to use virtual-hosting style addressing.
"""
if '.' in bucket_name:
return False
n = len(bucket_name)
if n < 3 or n > 63:
# Wrong length
return False
match = LABEL_RE.match(bucket_name)
if match is None or match.end() != len(bucket_name):
return False
return True
def fix_s3_host(request, signature_version, region_name,
default_endpoint_url=None, **kwargs):
"""
This handler looks at S3 requests just before they are signed.
If there is a bucket name on the path (true for everything except
ListAllBuckets) it checks to see if that bucket name conforms to
the DNS naming conventions. If it does, it alters the request to
use ``virtual hosting`` style addressing rather than ``path-style``
addressing.
"""
if request.context.get('use_global_endpoint', False):
default_endpoint_url = 's3.amazonaws.com'
try:
switch_to_virtual_host_style(
request, signature_version, default_endpoint_url)
except InvalidDNSNameError as e:
bucket_name = e.kwargs['bucket_name']
logger.debug('Not changing URI, bucket is not DNS compatible: %s',
bucket_name)
def switch_to_virtual_host_style(request, signature_version,
default_endpoint_url=None, **kwargs):
"""
This is a handler to force virtual host style s3 addressing no matter
the signature version (which is taken in consideration for the default
case). If the bucket is not DNS compatible an InvalidDNSName is thrown.
:param request: A AWSRequest object that is about to be sent.
:param signature_version: The signature version to sign with
:param default_endpoint_url: The endpoint to use when switching to a
virtual style. If None is supplied, the virtual host will be
constructed from the url of the request.
"""
if request.auth_path is not None:
# The auth_path has already been applied (this may be a
# retried request). We don't need to perform this
# customization again.
return
elif _is_get_bucket_location_request(request):
# For the GetBucketLocation response, we should not be using
# the virtual host style addressing so we can avoid any sigv4
# issues.
logger.debug("Request is GetBucketLocation operation, not checking "
"for DNS compatibility.")
return
parts = urlsplit(request.url)
request.auth_path = parts.path
path_parts = parts.path.split('/')
# Retrieve what the endpoint we will be prepending the bucket name to.
if default_endpoint_url is None:
default_endpoint_url = parts.netloc
if len(path_parts) > 1:
bucket_name = path_parts[1]
if not bucket_name:
# If the bucket name is empty we should not be checking for
# dns compatibility.
return
logger.debug('Checking for DNS compatible bucket for: %s',
request.url)
if check_dns_name(bucket_name):
# If the operation is on a bucket, the auth_path must be
# terminated with a '/' character.
if len(path_parts) == 2:
if request.auth_path[-1] != '/':
request.auth_path += '/'
path_parts.remove(bucket_name)
# At the very least the path must be a '/', such as with the
# CreateBucket operation when DNS style is being used. If this
# is not used you will get an empty path which is incorrect.
path = '/'.join(path_parts) or '/'
global_endpoint = default_endpoint_url
host = bucket_name + '.' + global_endpoint
new_tuple = (parts.scheme, host, path,
parts.query, '')
new_uri = urlunsplit(new_tuple)
request.url = new_uri
logger.debug('URI updated to: %s', new_uri)
else:
raise InvalidDNSNameError(bucket_name=bucket_name)
def _is_get_bucket_location_request(request):
return request.url.endswith('?location')
def instance_cache(func):
"""Method decorator for caching method calls to a single instance.
**This is not a general purpose caching decorator.**
In order to use this, you *must* provide an ``_instance_cache``
attribute on the instance.
This decorator is used to cache method calls. The cache is only
scoped to a single instance though such that multiple instances
will maintain their own cache. In order to keep things simple,
this decorator requires that you provide an ``_instance_cache``
attribute on your instance.
"""
func_name = func.__name__
@functools.wraps(func)
def _cache_guard(self, *args, **kwargs):
cache_key = (func_name, args)
if kwargs:
kwarg_items = tuple(sorted(kwargs.items()))
cache_key = (func_name, args, kwarg_items)
result = self._instance_cache.get(cache_key)
if result is not None:
return result
result = func(self, *args, **kwargs)
self._instance_cache[cache_key] = result
return result
return _cache_guard
def switch_host_s3_accelerate(request, operation_name, **kwargs):
"""Switches the current s3 endpoint with an S3 Accelerate endpoint"""
# Note that when registered the switching of the s3 host happens
# before it gets changed to virtual. So we are not concerned with ensuring
# that the bucket name is translated to the virtual style here and we
# can hard code the Accelerate endpoint.
parts = urlsplit(request.url).netloc.split('.')
parts = [p for p in parts if p in S3_ACCELERATE_WHITELIST]
endpoint = 'https://s3-accelerate.'
if len(parts) > 0:
endpoint += '.'.join(parts) + '.'
endpoint += 'amazonaws.com'
if operation_name in ['ListBuckets', 'CreateBucket', 'DeleteBucket']:
return
_switch_hosts(request, endpoint, use_new_scheme=False)
def switch_host_with_param(request, param_name):
"""Switches the host using a parameter value from a JSON request body"""
request_json = json.loads(request.data.decode('utf-8'))
if request_json.get(param_name):
new_endpoint = request_json[param_name]
_switch_hosts(request, new_endpoint)
def _switch_hosts(request, new_endpoint, use_new_scheme=True):
final_endpoint = _get_new_endpoint(
request.url, new_endpoint, use_new_scheme)
request.url = final_endpoint
def _get_new_endpoint(original_endpoint, new_endpoint, use_new_scheme=True):
new_endpoint_components = urlsplit(new_endpoint)
original_endpoint_components = urlsplit(original_endpoint)
scheme = original_endpoint_components.scheme
if use_new_scheme:
scheme = new_endpoint_components.scheme
final_endpoint_components = (
scheme,
new_endpoint_components.netloc,
original_endpoint_components.path,
original_endpoint_components.query,
''
)
final_endpoint = urlunsplit(final_endpoint_components)
logger.debug('Updating URI from %s to %s' % (
original_endpoint, final_endpoint))
return final_endpoint
def deep_merge(base, extra):
"""Deeply two dictionaries, overriding existing keys in the base.
:param base: The base dictionary which will be merged into.
:param extra: The dictionary to merge into the base. Keys from this
dictionary will take precedence.
"""
for key in extra:
# If the key represents a dict on both given dicts, merge the sub-dicts
if key in base and isinstance(base[key], dict)\
and isinstance(extra[key], dict):
deep_merge(base[key], extra[key])
continue
# Otherwise, set the key on the base to be the value of the extra.
base[key] = extra[key]
def hyphenize_service_id(service_id):
"""Translate the form used for event emitters.
:param service_id: The service_id to convert.
"""
return service_id.replace(' ', '-').lower()
class S3RegionRedirector(object):
def __init__(self, endpoint_bridge, client, cache=None):
self._endpoint_resolver = endpoint_bridge
self._cache = cache
if self._cache is None:
self._cache = {}
# This needs to be a weak ref in order to prevent memory leaks on
# python 2.6
self._client = weakref.proxy(client)
def register(self, event_emitter=None):
emitter = event_emitter or self._client.meta.events
emitter.register('needs-retry.s3', self.redirect_from_error)
emitter.register('before-call.s3', self.set_request_url)
emitter.register('before-parameter-build.s3',
self.redirect_from_cache)
def redirect_from_error(self, request_dict, response, operation, **kwargs):
"""
An S3 request sent to the wrong region will return an error that
contains the endpoint the request should be sent to. This handler
will add the redirect information to the signing context and then
redirect the request.
"""
if response is None:
# This could be none if there was a ConnectionError or other
# transport error.
return
if self._is_s3_accesspoint(request_dict.get('context', {})):
logger.debug(
'S3 request was previously to an accesspoint, not redirecting.'
)
return
if request_dict.get('context', {}).get('s3_redirected'):
logger.debug(
'S3 request was previously redirected, not redirecting.')
return
error = response[1].get('Error', {})
error_code = error.get('Code')
response_metadata = response[1].get('ResponseMetadata', {})
# We have to account for 400 responses because
# if we sign a Head* request with the wrong region,
# we'll get a 400 Bad Request but we won't get a
# body saying it's an "AuthorizationHeaderMalformed".
is_special_head_object = (
error_code in ['301', '400'] and
operation.name == 'HeadObject'
)
is_special_head_bucket = (
error_code in ['301', '400'] and
operation.name == 'HeadBucket' and
'x-amz-bucket-region' in response_metadata.get('HTTPHeaders', {})
)
is_wrong_signing_region = (
error_code == 'AuthorizationHeaderMalformed' and
'Region' in error
)
is_redirect_status = response[0] is not None and \
response[0].status_code in [301, 302, 307]
is_permanent_redirect = error_code == 'PermanentRedirect'
if not any([is_special_head_object, is_wrong_signing_region,
is_permanent_redirect, is_special_head_bucket,
is_redirect_status]):
return
bucket = request_dict['context']['signing']['bucket']
client_region = request_dict['context'].get('client_region')
new_region = self.get_bucket_region(bucket, response)
if new_region is None:
logger.debug(
"S3 client configured for region %s but the bucket %s is not "
"in that region and the proper region could not be "
"automatically determined." % (client_region, bucket))
return
logger.debug(
"S3 client configured for region %s but the bucket %s is in region"
" %s; Please configure the proper region to avoid multiple "
"unnecessary redirects and signing attempts." % (
client_region, bucket, new_region))
endpoint = self._endpoint_resolver.resolve('s3', new_region)
endpoint = endpoint['endpoint_url']
signing_context = {
'region': new_region,
'bucket': bucket,
'endpoint': endpoint
}
request_dict['context']['signing'] = signing_context
self._cache[bucket] = signing_context
self.set_request_url(request_dict, request_dict['context'])
request_dict['context']['s3_redirected'] = True
# Return 0 so it doesn't wait to retry
return 0
def get_bucket_region(self, bucket, response):
"""
There are multiple potential sources for the new region to redirect to,
but they aren't all universally available for use. This will try to
find region from response elements, but will fall back to calling
HEAD on the bucket if all else fails.
:param bucket: The bucket to find the region for. This is necessary if
the region is not available in the error response.
:param response: A response representing a service request that failed
due to incorrect region configuration.
"""
# First try to source the region from the headers.
service_response = response[1]
response_headers = service_response['ResponseMetadata']['HTTPHeaders']
if 'x-amz-bucket-region' in response_headers:
return response_headers['x-amz-bucket-region']
# Next, check the error body
region = service_response.get('Error', {}).get('Region', None)
if region is not None:
return region
# Finally, HEAD the bucket. No other choice sadly.
try:
response = self._client.head_bucket(Bucket=bucket)
headers = response['ResponseMetadata']['HTTPHeaders']
except ClientError as e:
headers = e.response['ResponseMetadata']['HTTPHeaders']
region = headers.get('x-amz-bucket-region', None)
return region
def set_request_url(self, params, context, **kwargs):
endpoint = context.get('signing', {}).get('endpoint', None)
if endpoint is not None:
params['url'] = _get_new_endpoint(params['url'], endpoint, False)
def redirect_from_cache(self, params, context, **kwargs):
"""
This handler retrieves a given bucket's signing context from the cache
and adds it into the request context.
"""
if self._is_s3_accesspoint(context):
return
bucket = params.get('Bucket')
signing_context = self._cache.get(bucket)
if signing_context is not None:
context['signing'] = signing_context
else:
context['signing'] = {'bucket': bucket}
def _is_s3_accesspoint(self, context):
return 's3_accesspoint' in context
class InvalidArnException(ValueError):
pass
class ArnParser(object):
def parse_arn(self, arn):
arn_parts = arn.split(':', 5)
if len(arn_parts) < 6:
raise InvalidArnException(
'Provided ARN: %s must be of the format: '
'arn:partition:service:region:account:resource' % arn
)
return {
'partition': arn_parts[1],
'service': arn_parts[2],
'region': arn_parts[3],
'account': arn_parts[4],
'resource': arn_parts[5],
}
class S3ArnParamHandler(object):
_RESOURCE_REGEX = re.compile(
r'^(?P<resource_type>accesspoint|outpost)[/:](?P<resource_name>.+)$'
)
_OUTPOST_RESOURCE_REGEX = re.compile(
r'^(?P<outpost_name>[a-zA-Z0-9\-]{1,63})[/:]accesspoint[/:]'
r'(?P<accesspoint_name>[a-zA-Z0-9\-]{1,63}$)'
)
_BLACKLISTED_OPERATIONS = [
'CreateBucket'
]
def __init__(self, arn_parser=None):
self._arn_parser = arn_parser
if arn_parser is None:
self._arn_parser = ArnParser()
def register(self, event_emitter):
event_emitter.register('before-parameter-build.s3', self.handle_arn)
def handle_arn(self, params, model, context, **kwargs):
if model.name in self._BLACKLISTED_OPERATIONS:
return
arn_details = self._get_arn_details_from_bucket_param(params)
if arn_details is None:
return
if arn_details['resource_type'] == 'accesspoint':
self._store_accesspoint(params, context, arn_details)
elif arn_details['resource_type'] == 'outpost':
self._store_outpost(params, context, arn_details)
def _get_arn_details_from_bucket_param(self, params):
if 'Bucket' in params:
try:
arn = params['Bucket']
arn_details = self._arn_parser.parse_arn(arn)
self._add_resource_type_and_name(arn, arn_details)
return arn_details
except InvalidArnException:
pass
return None
def _add_resource_type_and_name(self, arn, arn_details):
match = self._RESOURCE_REGEX.match(arn_details['resource'])
if match:
arn_details['resource_type'] = match.group('resource_type')
arn_details['resource_name'] = match.group('resource_name')
else:
raise UnsupportedS3ArnError(arn=arn)
def _store_accesspoint(self, params, context, arn_details):
# Ideally the access-point would be stored as a parameter in the
# request where the serializer would then know how to serialize it,
# but access-points are not modeled in S3 operations so it would fail
# validation. Instead, we set the access-point to the bucket parameter
# to have some value set when serializing the request and additional
# information on the context from the arn to use in forming the
# access-point endpoint.
params['Bucket'] = arn_details['resource_name']
context['s3_accesspoint'] = {
'name': arn_details['resource_name'],
'account': arn_details['account'],
'partition': arn_details['partition'],
'region': arn_details['region'],
'service': arn_details['service'],
}
def _store_outpost(self, params, context, arn_details):
resource_name = arn_details['resource_name']
match = self._OUTPOST_RESOURCE_REGEX.match(resource_name)
if not match:
raise UnsupportedOutpostResourceError(resource_name=resource_name)
# Because we need to set the bucket name to something to pass
# validation we're going to use the access point name to be consistent
# with normal access point arns.
accesspoint_name = match.group('accesspoint_name')
params['Bucket'] = accesspoint_name
context['s3_accesspoint'] = {
'outpost_name': match.group('outpost_name'),
'name': accesspoint_name,
'account': arn_details['account'],
'partition': arn_details['partition'],
'region': arn_details['region'],
'service': arn_details['service'],
}
class S3EndpointSetter(object):
_DEFAULT_PARTITION = 'aws'
_DEFAULT_DNS_SUFFIX = 'amazonaws.com'
def __init__(self, endpoint_resolver, region=None,
s3_config=None, endpoint_url=None, partition=None):
self._endpoint_resolver = endpoint_resolver
self._region = region
self._s3_config = s3_config
if s3_config is None:
self._s3_config = {}
self._endpoint_url = endpoint_url
self._partition = partition
if partition is None:
self._partition = self._DEFAULT_PARTITION
def register(self, event_emitter):
event_emitter.register('before-sign.s3', self.set_endpoint)
event_emitter.register(
'before-call.s3.WriteGetObjectResponse',
self.update_endpoint_to_s3_object_lambda
)
def update_endpoint_to_s3_object_lambda(self, params, context, **kwargs):
if self._use_accelerate_endpoint:
raise UnsupportedS3ConfigurationError(
msg='S3 client does not support accelerate endpoints for S3 Object Lambda operations',
)
self._override_signing_name(context, 's3-object-lambda')
if self._endpoint_url:
# Only update the url if an explicit url was not provided
return
resolver = self._endpoint_resolver
resolved = resolver.construct_endpoint('s3-object-lambda', self._region)
# Ideally we would be able to replace the endpoint before
# serialization but there's no event to do that currently
new_endpoint = 'https://{host_prefix}{hostname}'.format(
host_prefix=params['host_prefix'],
hostname=resolved['hostname'],
)
params['url'] = _get_new_endpoint(params['url'], new_endpoint, False)
def set_endpoint(self, request, **kwargs):
if self._use_accesspoint_endpoint(request):
self._validate_accesspoint_supported(request)
region_name = self._resolve_region_for_accesspoint_endpoint(
request)
self._resolve_signing_name_for_accesspoint_endpoint(
request)
self._switch_to_accesspoint_endpoint(request, region_name)
return
if self._use_accelerate_endpoint:
switch_host_s3_accelerate(request=request, **kwargs)
if self._s3_addressing_handler:
self._s3_addressing_handler(request=request, **kwargs)
def _use_accesspoint_endpoint(self, request):
return 's3_accesspoint' in request.context
def _validate_accesspoint_supported(self, request):
if self._use_accelerate_endpoint:
raise UnsupportedS3AccesspointConfigurationError(
msg=(
'Client does not support s3 accelerate configuration '
'when an access-point ARN is specified.'
)
)
request_partition = request.context['s3_accesspoint']['partition']
if request_partition != self._partition:
raise UnsupportedS3AccesspointConfigurationError(
msg=(
'Client is configured for "%s" partition, but access-point'
' ARN provided is for "%s" partition. The client and '
' access-point partition must be the same.' % (
self._partition, request_partition)
)
)
s3_service = request.context['s3_accesspoint'].get('service')
if s3_service == 's3-object-lambda' and self._s3_config.get('use_dualstack_endpoint'):
raise UnsupportedS3AccesspointConfigurationError(
msg=(
'Client does not support s3 dualstack configuration '
'when an S3 Object Lambda access point ARN is specified.'
)
)
outpost_name = request.context['s3_accesspoint'].get('outpost_name')
if outpost_name and self._s3_config.get('use_dualstack_endpoint'):
raise UnsupportedS3AccesspointConfigurationError(
msg=(
'Client does not support s3 dualstack configuration '
'when an outpost ARN is specified.'
)
)
def _resolve_region_for_accesspoint_endpoint(self, request):
if self._s3_config.get('use_arn_region', True):
accesspoint_region = request.context['s3_accesspoint']['region']
# If we are using the region from the access point,
# we will also want to make sure that we set it as the
# signing region as well
self._override_signing_region(request, accesspoint_region)
return accesspoint_region
return self._region
def _resolve_signing_name_for_accesspoint_endpoint(self, request):
accesspoint_service = request.context['s3_accesspoint']['service']
self._override_signing_name(request.context, accesspoint_service)
def _switch_to_accesspoint_endpoint(self, request, region_name):
original_components = urlsplit(request.url)
accesspoint_endpoint = urlunsplit((
original_components.scheme,
self._get_accesspoint_netloc(request.context, region_name),
self._get_accesspoint_path(
original_components.path, request.context),
original_components.query,
''
))
logger.debug(
'Updating URI from %s to %s' % (request.url, accesspoint_endpoint))
request.url = accesspoint_endpoint
def _get_accesspoint_netloc(self, request_context, region_name):
s3_accesspoint = request_context['s3_accesspoint']
accesspoint_netloc_components = [
'%s-%s' % (s3_accesspoint['name'], s3_accesspoint['account']),
]
outpost_name = s3_accesspoint.get('outpost_name')
if self._endpoint_url:
if outpost_name:
accesspoint_netloc_components.append(outpost_name)
endpoint_url_netloc = urlsplit(self._endpoint_url).netloc
accesspoint_netloc_components.append(endpoint_url_netloc)
else:
if outpost_name:
outpost_host = [outpost_name, 's3-outposts']
accesspoint_netloc_components.extend(outpost_host)
elif s3_accesspoint['service'] == 's3-object-lambda':
accesspoint_netloc_components.append('s3-object-lambda')
else:
accesspoint_netloc_components.append('s3-accesspoint')
if self._s3_config.get('use_dualstack_endpoint'):
accesspoint_netloc_components.append('dualstack')
accesspoint_netloc_components.extend(
[
region_name,
self._get_dns_suffix(region_name)
]
)
return '.'.join(accesspoint_netloc_components)
def _get_accesspoint_path(self, original_path, request_context):
# The Bucket parameter was substituted with the access-point name as
# some value was required in serializing the bucket name. Now that
# we are making the request directly to the access point, we will
# want to remove that access-point name from the path.
name = request_context['s3_accesspoint']['name']
# All S3 operations require at least a / in their path.
return original_path.replace('/' + name, '', 1) or '/'
def _get_dns_suffix(self, region_name):
resolved = self._endpoint_resolver.construct_endpoint(
's3', region_name)
dns_suffix = self._DEFAULT_DNS_SUFFIX
if resolved and 'dnsSuffix' in resolved:
dns_suffix = resolved['dnsSuffix']
return dns_suffix
def _override_signing_region(self, request, region_name):
signing_context = request.context.get('signing', {})
# S3SigV4Auth will use the context['signing']['region'] value to
# sign with if present. This is used by the Bucket redirector
# as well but we should be fine because the redirector is never
# used in combination with the accesspoint setting logic.
signing_context['region'] = region_name
request.context['signing'] = signing_context
def _override_signing_name(self, context, signing_name):
signing_context = context.get('signing', {})
# S3SigV4Auth will use the context['signing']['signing_name'] value to
# sign with if present. This is used by the Bucket redirector
# as well but we should be fine because the redirector is never
# used in combination with the accesspoint setting logic.
signing_context['signing_name'] = signing_name
context['signing'] = signing_context
@CachedProperty
def _use_accelerate_endpoint(self):
# Enable accelerate if the configuration is set to to true or the
# endpoint being used matches one of the accelerate endpoints.
# Accelerate has been explicitly configured.
if self._s3_config.get('use_accelerate_endpoint'):
return True
# Accelerate mode is turned on automatically if an endpoint url is
# provided that matches the accelerate scheme.
if self._endpoint_url is None:
return False
# Accelerate is only valid for Amazon endpoints.
netloc = urlsplit(self._endpoint_url).netloc
if not netloc.endswith('amazonaws.com'):
return False
# The first part of the url should always be s3-accelerate.
parts = netloc.split('.')
if parts[0] != 's3-accelerate':
return False
# Url parts between 's3-accelerate' and 'amazonaws.com' which
# represent different url features.
feature_parts = parts[1:-2]
# There should be no duplicate url parts.
if len(feature_parts) != len(set(feature_parts)):
return False
# Remaining parts must all be in the whitelist.
return all(p in S3_ACCELERATE_WHITELIST for p in feature_parts)
@CachedProperty
def _addressing_style(self):
# Use virtual host style addressing if accelerate is enabled or if
# the given endpoint url is an accelerate endpoint.
if self._use_accelerate_endpoint:
return 'virtual'
# If a particular addressing style is configured, use it.
configured_addressing_style = self._s3_config.get('addressing_style')
if configured_addressing_style:
return configured_addressing_style
@CachedProperty
def _s3_addressing_handler(self):
# If virtual host style was configured, use it regardless of whether
# or not the bucket looks dns compatible.
if self._addressing_style == 'virtual':
logger.debug("Using S3 virtual host style addressing.")
return switch_to_virtual_host_style
# If path style is configured, no additional steps are needed. If
# endpoint_url was specified, don't default to virtual. We could
# potentially default provided endpoint urls to virtual hosted
# style, but for now it is avoided.
if self._addressing_style == 'path' or self._endpoint_url is not None:
logger.debug("Using S3 path style addressing.")
return None
logger.debug("Defaulting to S3 virtual host style addressing with "
"path style addressing fallback.")
# By default, try to use virtual style with path fallback.
return fix_s3_host
class S3ControlEndpointSetter(object):
_DEFAULT_PARTITION = 'aws'
_DEFAULT_DNS_SUFFIX = 'amazonaws.com'
_HOST_LABEL_REGEX = re.compile(r'^[a-zA-Z0-9\-]{1,63}$')
def __init__(self, endpoint_resolver, region=None,
s3_config=None, endpoint_url=None, partition=None):
self._endpoint_resolver = endpoint_resolver
self._region = region
self._s3_config = s3_config
if s3_config is None:
self._s3_config = {}
self._endpoint_url = endpoint_url
self._partition = partition
if partition is None:
self._partition = self._DEFAULT_PARTITION
def register(self, event_emitter):
event_emitter.register('before-sign.s3-control', self.set_endpoint)
def set_endpoint(self, request, **kwargs):
if self._use_endpoint_from_arn_details(request):
self._validate_endpoint_from_arn_details_supported(request)
region_name = self._resolve_region_from_arn_details(request)
self._resolve_signing_name_from_arn_details(request)
self._resolve_endpoint_from_arn_details(request, region_name)
self._add_headers_from_arn_details(request)
elif self._use_endpoint_from_outpost_id(request):
self._validate_outpost_redirection_valid(request)
outpost_id = request.context['outpost_id']
self._override_signing_name(request, 's3-outposts')
new_netloc = self._construct_outpost_endpoint(self._region)
self._update_request_netloc(request, new_netloc)
def _use_endpoint_from_arn_details(self, request):
return 'arn_details' in request.context
def _use_endpoint_from_outpost_id(self, request):
return 'outpost_id' in request.context
def _validate_endpoint_from_arn_details_supported(self, request):
if not self._s3_config.get('use_arn_region', False):
arn_region = request.context['arn_details']['region']
if arn_region != self._region:
error_msg = (
'The use_arn_region configuration is disabled but '
'received arn for "%s" when the client is configured '
'to use "%s"'
) % (arn_region, self._region)
raise UnsupportedS3ControlConfigurationError(msg=error_msg)
request_partion = request.context['arn_details']['partition']
if request_partion != self._partition:
raise UnsupportedS3ControlConfigurationError(
msg=(
'Client is configured for "%s" partition, but arn '
'provided is for "%s" partition. The client and '
'arn partition must be the same.' % (
self._partition, request_partion)
)
)
if self._s3_config.get('use_accelerate_endpoint'):
raise UnsupportedS3ControlConfigurationError(
msg='S3 control client does not support accelerate endpoints',
)
if 'outpost_name' in request.context['arn_details']:
self._validate_outpost_redirection_valid(request)
def _validate_outpost_redirection_valid(self, request):
if self._s3_config.get('use_dualstack_endpoint'):
raise UnsupportedS3ControlConfigurationError(
msg=(
'Client does not support s3 dualstack configuration '
'when an outpost is specified.'
)
)
def _resolve_region_from_arn_details(self, request):
if self._s3_config.get('use_arn_region', False):
arn_region = request.context['arn_details']['region']
# If we are using the region from the expanded arn, we will also
# want to make sure that we set it as the signing region as well
self._override_signing_region(request, arn_region)
return arn_region
return self._region
def _resolve_signing_name_from_arn_details(self, request):
arn_service = request.context['arn_details']['service']
self._override_signing_name(request, arn_service)
return arn_service
def _resolve_endpoint_from_arn_details(self, request, region_name):
new_netloc = self._resolve_netloc_from_arn_details(request, region_name)
self._update_request_netloc(request, new_netloc)
def _update_request_netloc(self, request, new_netloc):
original_components = urlsplit(request.url)
arn_details_endpoint = urlunsplit((
original_components.scheme,
new_netloc,
original_components.path,
original_components.query,
''
))
logger.debug(
'Updating URI from %s to %s' % (request.url, arn_details_endpoint)
)
request.url = arn_details_endpoint
def _resolve_netloc_from_arn_details(self, request, region_name):
arn_details = request.context['arn_details']
if 'outpost_name' in arn_details:
return self._construct_outpost_endpoint(region_name)
account = arn_details['account']
return self._construct_s3_control_endpoint(region_name, account)
def _is_valid_host_label(self, label):
return self._HOST_LABEL_REGEX.match(label)
def _validate_host_labels(self, *labels):
for label in labels:
if not self._is_valid_host_label(label):
raise InvalidHostLabelError(label=label)
def _construct_s3_control_endpoint(self, region_name, account):
self._validate_host_labels(region_name, account)
if self._endpoint_url:
endpoint_url_netloc = urlsplit(self._endpoint_url).netloc
netloc = [account, endpoint_url_netloc]
else:
netloc = [
account,
's3-control',
]
self._add_dualstack(netloc)
dns_suffix = self._get_dns_suffix(region_name)
netloc.extend([region_name, dns_suffix])
return self._construct_netloc(netloc)
def _construct_outpost_endpoint(self, region_name):
self._validate_host_labels(region_name)
if self._endpoint_url:
return urlsplit(self._endpoint_url).netloc
else:
netloc = [
's3-outposts',
region_name,
self._get_dns_suffix(region_name),
]
return self._construct_netloc(netloc)
def _construct_netloc(self, netloc):
return '.'.join(netloc)
def _add_dualstack(self, netloc):
if self._s3_config.get('use_dualstack_endpoint'):
netloc.append('dualstack')
def _get_dns_suffix(self, region_name):
resolved = self._endpoint_resolver.construct_endpoint(
's3', region_name)
dns_suffix = self._DEFAULT_DNS_SUFFIX
if resolved and 'dnsSuffix' in resolved:
dns_suffix = resolved['dnsSuffix']
return dns_suffix
def _override_signing_region(self, request, region_name):
signing_context = request.context.get('signing', {})
# S3SigV4Auth will use the context['signing']['region'] value to
# sign with if present. This is used by the Bucket redirector
# as well but we should be fine because the redirector is never
# used in combination with the accesspoint setting logic.
signing_context['region'] = region_name
request.context['signing'] = signing_context
def _override_signing_name(self, request, signing_name):
signing_context = request.context.get('signing', {})
# S3SigV4Auth will use the context['signing']['signing_name'] value to
# sign with if present. This is used by the Bucket redirector
# as well but we should be fine because the redirector is never
# used in combination with the accesspoint setting logic.
signing_context['signing_name'] = signing_name
request.context['signing'] = signing_context
def _add_headers_from_arn_details(self, request):
arn_details = request.context['arn_details']
outpost_name = arn_details.get('outpost_name')
if outpost_name:
self._add_outpost_id_header(request, outpost_name)
def _add_outpost_id_header(self, request, outpost_name):
request.headers['x-amz-outpost-id'] = outpost_name
class S3ControlArnParamHandler(object):
_RESOURCE_SPLIT_REGEX = re.compile(r'[/:]')
def __init__(self, arn_parser=None):
self._arn_parser = arn_parser
if arn_parser is None:
self._arn_parser = ArnParser()
def register(self, event_emitter):
event_emitter.register(
'before-parameter-build.s3-control',
self.handle_arn,
)
def handle_arn(self, params, model, context, **kwargs):
if model.name in ('CreateBucket', 'ListRegionalBuckets'):
# CreateBucket and ListRegionalBuckets are special cases that do
# not obey ARN based redirection but will redirect based off of the
# presence of the OutpostId parameter
self._handle_outpost_id_param(params, model, context)
else:
self._handle_name_param(params, model, context)
self._handle_bucket_param(params, model, context)
def _get_arn_details_from_param(self, params, param_name):
if param_name not in params:
return None
try:
arn = params[param_name]
arn_details = self._arn_parser.parse_arn(arn)
arn_details['original'] = arn
arn_details['resources'] = self._split_resource(arn_details)
return arn_details
except InvalidArnException:
return None
def _split_resource(self, arn_details):
return self._RESOURCE_SPLIT_REGEX.split(arn_details['resource'])
def _override_account_id_param(self, params, arn_details):
account_id = arn_details['account']
if 'AccountId' in params and params['AccountId'] != account_id:
error_msg = (
'Account ID in arn does not match the AccountId parameter '
'provided: "%s"'
) % params['AccountId']
raise UnsupportedS3ControlArnError(
arn=arn_details['original'],
msg=error_msg,
)
params['AccountId'] = account_id
def _handle_outpost_id_param(self, params, model, context):
if 'OutpostId' not in params:
return
context['outpost_id'] = params['OutpostId']
def _handle_name_param(self, params, model, context):
# CreateAccessPoint is a special case that does not expand Name
if model.name == 'CreateAccessPoint':
return
arn_details = self._get_arn_details_from_param(params, 'Name')
if arn_details is None:
return
if self._is_outpost_accesspoint(arn_details):
self._store_outpost_accesspoint(params, context, arn_details)
else:
error_msg = 'The Name parameter does not support the provided ARN'
raise UnsupportedS3ControlArnError(
arn=arn_details['original'],
msg=error_msg,
)
def _is_outpost_accesspoint(self, arn_details):
if arn_details['service'] != 's3-outposts':
return False
resources = arn_details['resources']
if len(resources) != 4:
return False
# Resource must be of the form outpost/op-123/accesspoint/name
return resources[0] == 'outpost' and resources[2] == 'accesspoint'
def _store_outpost_accesspoint(self, params, context, arn_details):
self._override_account_id_param(params, arn_details)
accesspoint_name = arn_details['resources'][3]
params['Name'] = accesspoint_name
arn_details['accesspoint_name'] = accesspoint_name
arn_details['outpost_name'] = arn_details['resources'][1]
context['arn_details'] = arn_details
def _handle_bucket_param(self, params, model, context):
arn_details = self._get_arn_details_from_param(params, 'Bucket')
if arn_details is None:
return
if self._is_outpost_bucket(arn_details):
self._store_outpost_bucket(params, context, arn_details)
else:
error_msg = (
'The Bucket parameter does not support the provided ARN'
)
raise UnsupportedS3ControlArnError(
arn=arn_details['original'],
msg=error_msg,
)
def _is_outpost_bucket(self, arn_details):
if arn_details['service'] != 's3-outposts':
return False
resources = arn_details['resources']
if len(resources) != 4:
return False
# Resource must be of the form outpost/op-123/bucket/name
return resources[0] == 'outpost' and resources[2] == 'bucket'
def _store_outpost_bucket(self, params, context, arn_details):
self._override_account_id_param(params, arn_details)
bucket_name = arn_details['resources'][3]
params['Bucket'] = bucket_name
arn_details['bucket_name'] = bucket_name
arn_details['outpost_name'] = arn_details['resources'][1]
context['arn_details'] = arn_details
class ContainerMetadataFetcher(object):
TIMEOUT_SECONDS = 2
RETRY_ATTEMPTS = 3
SLEEP_TIME = 1
IP_ADDRESS = '169.254.170.2'
_ALLOWED_HOSTS = [IP_ADDRESS, 'localhost', '127.0.0.1']
def __init__(self, session=None, sleep=time.sleep):
if session is None:
session = botocore.httpsession.URLLib3Session(
timeout=self.TIMEOUT_SECONDS
)
self._session = session
self._sleep = sleep
def retrieve_full_uri(self, full_url, headers=None):
"""Retrieve JSON metadata from container metadata.
:type full_url: str
:param full_url: The full URL of the metadata service.
This should include the scheme as well, e.g
"http://localhost:123/foo"
"""
self._validate_allowed_url(full_url)
return self._retrieve_credentials(full_url, headers)
def _validate_allowed_url(self, full_url):
parsed = botocore.compat.urlparse(full_url)
is_whitelisted_host = self._check_if_whitelisted_host(
parsed.hostname)
if not is_whitelisted_host:
raise ValueError(
"Unsupported host '%s'. Can only "
"retrieve metadata from these hosts: %s" %
(parsed.hostname, ', '.join(self._ALLOWED_HOSTS)))
def _check_if_whitelisted_host(self, host):
if host in self._ALLOWED_HOSTS:
return True
return False
def retrieve_uri(self, relative_uri):
"""Retrieve JSON metadata from ECS metadata.
:type relative_uri: str
:param relative_uri: A relative URI, e.g "/foo/bar?id=123"
:return: The parsed JSON response.
"""
full_url = self.full_url(relative_uri)
return self._retrieve_credentials(full_url)
def _retrieve_credentials(self, full_url, extra_headers=None):
headers = {'Accept': 'application/json'}
if extra_headers is not None:
headers.update(extra_headers)
attempts = 0
while True:
try:
return self._get_response(
full_url, headers, self.TIMEOUT_SECONDS)
except MetadataRetrievalError as e:
logger.debug("Received error when attempting to retrieve "
"container metadata: %s", e, exc_info=True)
self._sleep(self.SLEEP_TIME)
attempts += 1
if attempts >= self.RETRY_ATTEMPTS:
raise
def _get_response(self, full_url, headers, timeout):
try:
AWSRequest = botocore.awsrequest.AWSRequest
request = AWSRequest(method='GET', url=full_url, headers=headers)
response = self._session.send(request.prepare())
response_text = response.content.decode('utf-8')
if response.status_code != 200:
raise MetadataRetrievalError(
error_msg=(
"Received non 200 response (%s) from ECS metadata: %s"
) % (response.status_code, response_text))
try:
return json.loads(response_text)
except ValueError:
error_msg = (
"Unable to parse JSON returned from ECS metadata services"
)
logger.debug('%s:%s', error_msg, response_text)
raise MetadataRetrievalError(error_msg=error_msg)
except RETRYABLE_HTTP_ERRORS as e:
error_msg = ("Received error when attempting to retrieve "
"ECS metadata: %s" % e)
raise MetadataRetrievalError(error_msg=error_msg)
def full_url(self, relative_uri):
return 'http://%s%s' % (self.IP_ADDRESS, relative_uri)
def get_environ_proxies(url):
if should_bypass_proxies(url):
return {}
else:
return getproxies()
def should_bypass_proxies(url):
"""
Returns whether we should bypass proxies or not.
"""
# NOTE: requests allowed for ip/cidr entries in no_proxy env that we don't
# support current as urllib only checks DNS suffix
# If the system proxy settings indicate that this URL should be bypassed,
# don't proxy.
# The proxy_bypass function is incredibly buggy on OS X in early versions
# of Python 2.6, so allow this call to fail. Only catch the specific
# exceptions we've seen, though: this call failing in other ways can reveal
# legitimate problems.
try:
if proxy_bypass(urlparse(url).netloc):
return True
except (TypeError, socket.gaierror):
pass
return False
def get_encoding_from_headers(headers, default='ISO-8859-1'):
"""Returns encodings from given HTTP Header Dict.
:param headers: dictionary to extract encoding from.
:param default: default encoding if the content-type is text
"""
content_type = headers.get('content-type')
if not content_type:
return None
content_type, params = cgi.parse_header(content_type)
if 'charset' in params:
return params['charset'].strip("'\"")
if 'text' in content_type:
return default
def calculate_md5(body, **kwargs):
if isinstance(body, (bytes, bytearray)):
binary_md5 = _calculate_md5_from_bytes(body)
else:
binary_md5 = _calculate_md5_from_file(body)
return base64.b64encode(binary_md5).decode('ascii')
def _calculate_md5_from_bytes(body_bytes):
md5 = get_md5(body_bytes)
return md5.digest()
def _calculate_md5_from_file(fileobj):
start_position = fileobj.tell()
md5 = get_md5()
for chunk in iter(lambda: fileobj.read(1024 * 1024), b''):
md5.update(chunk)
fileobj.seek(start_position)
return md5.digest()
def conditionally_calculate_md5(params, **kwargs):
"""Only add a Content-MD5 if the system supports it."""
headers = params['headers']
body = params['body']
if MD5_AVAILABLE and body is not None and 'Content-MD5' not in headers:
md5_digest = calculate_md5(body, **kwargs)
params['headers']['Content-MD5'] = md5_digest
class FileWebIdentityTokenLoader(object):
def __init__(self, web_identity_token_path, _open=open):
self._web_identity_token_path = web_identity_token_path
self._open = _open
def __call__(self):
with self._open(self._web_identity_token_path) as token_file:
return token_file.read()
class SSOTokenLoader(object):
def __init__(self, cache=None):
if cache is None:
cache = {}
self._cache = cache
def _generate_cache_key(self, start_url):
return hashlib.sha1(start_url.encode('utf-8')).hexdigest()
def __call__(self, start_url):
cache_key = self._generate_cache_key(start_url)
try:
token = self._cache[cache_key]
return token['accessToken']
except KeyError:
logger.debug('Failed to load SSO token:', exc_info=True)
error_msg = (
'The SSO access token has either expired or is otherwise '
'invalid.'
)
raise SSOTokenLoadError(error_msg=error_msg)
| 88,236 |
Python
| 37.414018 | 102 | 0.612437 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/serialize.py
|
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Protocol input serializes.
This module contains classes that implement input serialization
for the various AWS protocol types.
These classes essentially take user input, a model object that
represents what the expected input should look like, and it returns
a dictionary that contains the various parts of a request. A few
high level design decisions:
* Each protocol type maps to a separate class, all inherit from
``Serializer``.
* The return value for ``serialize_to_request`` (the main entry
point) returns a dictionary that represents a request. This
will have keys like ``url_path``, ``query_string``, etc. This
is done so that it's a) easy to test and b) not tied to a
particular HTTP library. See the ``serialize_to_request`` docstring
for more details.
Unicode
-------
The input to the serializers should be text (str/unicode), not bytes,
with the exception of blob types. Those are assumed to be binary,
and if a str/unicode type is passed in, it will be encoded as utf-8.
"""
import re
import base64
import calendar
import datetime
from xml.etree import ElementTree
from botocore.compat import six
from botocore.compat import json, formatdate
from botocore.utils import parse_to_aware_datetime
from botocore.utils import percent_encode
from botocore.utils import is_json_value_header
from botocore.utils import conditionally_calculate_md5
from botocore import validate
# From the spec, the default timestamp format if not specified is iso8601.
DEFAULT_TIMESTAMP_FORMAT = 'iso8601'
ISO8601 = '%Y-%m-%dT%H:%M:%SZ'
# Same as ISO8601, but with microsecond precision.
ISO8601_MICRO = '%Y-%m-%dT%H:%M:%S.%fZ'
def create_serializer(protocol_name, include_validation=True):
# TODO: Unknown protocols.
serializer = SERIALIZERS[protocol_name]()
if include_validation:
validator = validate.ParamValidator()
serializer = validate.ParamValidationDecorator(validator, serializer)
return serializer
class Serializer(object):
DEFAULT_METHOD = 'POST'
# Clients can change this to a different MutableMapping
# (i.e OrderedDict) if they want. This is used in the
# compliance test to match the hash ordering used in the
# tests.
MAP_TYPE = dict
DEFAULT_ENCODING = 'utf-8'
def serialize_to_request(self, parameters, operation_model):
"""Serialize parameters into an HTTP request.
This method takes user provided parameters and a shape
model and serializes the parameters to an HTTP request.
More specifically, this method returns information about
parts of the HTTP request, it does not enforce a particular
interface or standard for an HTTP request. It instead returns
a dictionary of:
* 'url_path'
* 'host_prefix'
* 'query_string'
* 'headers'
* 'body'
* 'method'
It is then up to consumers to decide how to map this to a Request
object of their HTTP library of choice. Below is an example
return value::
{'body': {'Action': 'OperationName',
'Bar': 'val2',
'Foo': 'val1',
'Version': '2014-01-01'},
'headers': {},
'method': 'POST',
'query_string': '',
'host_prefix': 'value.',
'url_path': '/'}
:param parameters: The dictionary input parameters for the
operation (i.e the user input).
:param operation_model: The OperationModel object that describes
the operation.
"""
raise NotImplementedError("serialize_to_request")
def _create_default_request(self):
# Creates a boilerplate default request dict that subclasses
# can use as a starting point.
serialized = {
'url_path': '/',
'query_string': '',
'method': self.DEFAULT_METHOD,
'headers': {},
# An empty body is represented as an empty byte string.
'body': b''
}
return serialized
# Some extra utility methods subclasses can use.
def _timestamp_iso8601(self, value):
if value.microsecond > 0:
timestamp_format = ISO8601_MICRO
else:
timestamp_format = ISO8601
return value.strftime(timestamp_format)
def _timestamp_unixtimestamp(self, value):
return int(calendar.timegm(value.timetuple()))
def _timestamp_rfc822(self, value):
if isinstance(value, datetime.datetime):
value = self._timestamp_unixtimestamp(value)
return formatdate(value, usegmt=True)
def _convert_timestamp_to_str(self, value, timestamp_format=None):
if timestamp_format is None:
timestamp_format = self.TIMESTAMP_FORMAT
timestamp_format = timestamp_format.lower()
datetime_obj = parse_to_aware_datetime(value)
converter = getattr(
self, '_timestamp_%s' % timestamp_format)
final_value = converter(datetime_obj)
return final_value
def _get_serialized_name(self, shape, default_name):
# Returns the serialized name for the shape if it exists.
# Otherwise it will return the passed in default_name.
return shape.serialization.get('name', default_name)
def _get_base64(self, value):
# Returns the base64-encoded version of value, handling
# both strings and bytes. The returned value is a string
# via the default encoding.
if isinstance(value, six.text_type):
value = value.encode(self.DEFAULT_ENCODING)
return base64.b64encode(value).strip().decode(
self.DEFAULT_ENCODING)
def _expand_host_prefix(self, parameters, operation_model):
operation_endpoint = operation_model.endpoint
if operation_endpoint is None:
return None
host_prefix_expression = operation_endpoint['hostPrefix']
input_members = operation_model.input_shape.members
host_labels = [
member for member, shape in input_members.items()
if shape.serialization.get('hostLabel')
]
format_kwargs = dict((name, parameters[name]) for name in host_labels)
return host_prefix_expression.format(**format_kwargs)
def _prepare_additional_traits(self, request, operation_model):
"""Determine if additional traits are required for given model"""
if operation_model.http_checksum_required:
conditionally_calculate_md5(request)
return request
class QuerySerializer(Serializer):
TIMESTAMP_FORMAT = 'iso8601'
def serialize_to_request(self, parameters, operation_model):
shape = operation_model.input_shape
serialized = self._create_default_request()
serialized['method'] = operation_model.http.get('method',
self.DEFAULT_METHOD)
serialized['headers'] = {
'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8'
}
# The query serializer only deals with body params so
# that's what we hand off the _serialize_* methods.
body_params = self.MAP_TYPE()
body_params['Action'] = operation_model.name
body_params['Version'] = operation_model.metadata['apiVersion']
if shape is not None:
self._serialize(body_params, parameters, shape)
serialized['body'] = body_params
host_prefix = self._expand_host_prefix(parameters, operation_model)
if host_prefix is not None:
serialized['host_prefix'] = host_prefix
serialized = self._prepare_additional_traits(serialized,
operation_model)
return serialized
def _serialize(self, serialized, value, shape, prefix=''):
# serialized: The dict that is incrementally added to with the
# final serialized parameters.
# value: The current user input value.
# shape: The shape object that describes the structure of the
# input.
# prefix: The incrementally built up prefix for the serialized
# key (i.e Foo.bar.members.1).
method = getattr(self, '_serialize_type_%s' % shape.type_name,
self._default_serialize)
method(serialized, value, shape, prefix=prefix)
def _serialize_type_structure(self, serialized, value, shape, prefix=''):
members = shape.members
for key, value in value.items():
member_shape = members[key]
member_prefix = self._get_serialized_name(member_shape, key)
if prefix:
member_prefix = '%s.%s' % (prefix, member_prefix)
self._serialize(serialized, value, member_shape, member_prefix)
def _serialize_type_list(self, serialized, value, shape, prefix=''):
if not value:
# The query protocol serializes empty lists.
serialized[prefix] = ''
return
if self._is_shape_flattened(shape):
list_prefix = prefix
if shape.member.serialization.get('name'):
name = self._get_serialized_name(shape.member, default_name='')
# Replace '.Original' with '.{name}'.
list_prefix = '.'.join(prefix.split('.')[:-1] + [name])
else:
list_name = shape.member.serialization.get('name', 'member')
list_prefix = '%s.%s' % (prefix, list_name)
for i, element in enumerate(value, 1):
element_prefix = '%s.%s' % (list_prefix, i)
element_shape = shape.member
self._serialize(serialized, element, element_shape, element_prefix)
def _serialize_type_map(self, serialized, value, shape, prefix=''):
if self._is_shape_flattened(shape):
full_prefix = prefix
else:
full_prefix = '%s.entry' % prefix
template = full_prefix + '.{i}.{suffix}'
key_shape = shape.key
value_shape = shape.value
key_suffix = self._get_serialized_name(key_shape, default_name='key')
value_suffix = self._get_serialized_name(value_shape, 'value')
for i, key in enumerate(value, 1):
key_prefix = template.format(i=i, suffix=key_suffix)
value_prefix = template.format(i=i, suffix=value_suffix)
self._serialize(serialized, key, key_shape, key_prefix)
self._serialize(serialized, value[key], value_shape, value_prefix)
def _serialize_type_blob(self, serialized, value, shape, prefix=''):
# Blob args must be base64 encoded.
serialized[prefix] = self._get_base64(value)
def _serialize_type_timestamp(self, serialized, value, shape, prefix=''):
serialized[prefix] = self._convert_timestamp_to_str(
value, shape.serialization.get('timestampFormat'))
def _serialize_type_boolean(self, serialized, value, shape, prefix=''):
if value:
serialized[prefix] = 'true'
else:
serialized[prefix] = 'false'
def _default_serialize(self, serialized, value, shape, prefix=''):
serialized[prefix] = value
def _is_shape_flattened(self, shape):
return shape.serialization.get('flattened')
class EC2Serializer(QuerySerializer):
"""EC2 specific customizations to the query protocol serializers.
The EC2 model is almost, but not exactly, similar to the query protocol
serializer. This class encapsulates those differences. The model
will have be marked with a ``protocol`` of ``ec2``, so you don't need
to worry about wiring this class up correctly.
"""
def _get_serialized_name(self, shape, default_name):
# Returns the serialized name for the shape if it exists.
# Otherwise it will return the passed in default_name.
if 'queryName' in shape.serialization:
return shape.serialization['queryName']
elif 'name' in shape.serialization:
# A locationName is always capitalized
# on input for the ec2 protocol.
name = shape.serialization['name']
return name[0].upper() + name[1:]
else:
return default_name
def _serialize_type_list(self, serialized, value, shape, prefix=''):
for i, element in enumerate(value, 1):
element_prefix = '%s.%s' % (prefix, i)
element_shape = shape.member
self._serialize(serialized, element, element_shape, element_prefix)
class JSONSerializer(Serializer):
TIMESTAMP_FORMAT = 'unixtimestamp'
def serialize_to_request(self, parameters, operation_model):
target = '%s.%s' % (operation_model.metadata['targetPrefix'],
operation_model.name)
json_version = operation_model.metadata['jsonVersion']
serialized = self._create_default_request()
serialized['method'] = operation_model.http.get('method',
self.DEFAULT_METHOD)
serialized['headers'] = {
'X-Amz-Target': target,
'Content-Type': 'application/x-amz-json-%s' % json_version,
}
body = self.MAP_TYPE()
input_shape = operation_model.input_shape
if input_shape is not None:
self._serialize(body, parameters, input_shape)
serialized['body'] = json.dumps(body).encode(self.DEFAULT_ENCODING)
host_prefix = self._expand_host_prefix(parameters, operation_model)
if host_prefix is not None:
serialized['host_prefix'] = host_prefix
serialized = self._prepare_additional_traits(serialized,
operation_model)
return serialized
def _serialize(self, serialized, value, shape, key=None):
method = getattr(self, '_serialize_type_%s' % shape.type_name,
self._default_serialize)
method(serialized, value, shape, key)
def _serialize_type_structure(self, serialized, value, shape, key):
if key is not None:
# If a key is provided, this is a result of a recursive
# call so we need to add a new child dict as the value
# of the passed in serialized dict. We'll then add
# all the structure members as key/vals in the new serialized
# dictionary we just created.
new_serialized = self.MAP_TYPE()
serialized[key] = new_serialized
serialized = new_serialized
members = shape.members
for member_key, member_value in value.items():
member_shape = members[member_key]
if 'name' in member_shape.serialization:
member_key = member_shape.serialization['name']
self._serialize(serialized, member_value, member_shape, member_key)
def _serialize_type_map(self, serialized, value, shape, key):
map_obj = self.MAP_TYPE()
serialized[key] = map_obj
for sub_key, sub_value in value.items():
self._serialize(map_obj, sub_value, shape.value, sub_key)
def _serialize_type_list(self, serialized, value, shape, key):
list_obj = []
serialized[key] = list_obj
for list_item in value:
wrapper = {}
# The JSON list serialization is the only case where we aren't
# setting a key on a dict. We handle this by using
# a __current__ key on a wrapper dict to serialize each
# list item before appending it to the serialized list.
self._serialize(wrapper, list_item, shape.member, "__current__")
list_obj.append(wrapper["__current__"])
def _default_serialize(self, serialized, value, shape, key):
serialized[key] = value
def _serialize_type_timestamp(self, serialized, value, shape, key):
serialized[key] = self._convert_timestamp_to_str(
value, shape.serialization.get('timestampFormat'))
def _serialize_type_blob(self, serialized, value, shape, key):
serialized[key] = self._get_base64(value)
class BaseRestSerializer(Serializer):
"""Base class for rest protocols.
The only variance between the various rest protocols is the
way that the body is serialized. All other aspects (headers, uri, etc.)
are the same and logic for serializing those aspects lives here.
Subclasses must implement the ``_serialize_body_params`` method.
"""
QUERY_STRING_TIMESTAMP_FORMAT = 'iso8601'
HEADER_TIMESTAMP_FORMAT = 'rfc822'
# This is a list of known values for the "location" key in the
# serialization dict. The location key tells us where on the request
# to put the serialized value.
KNOWN_LOCATIONS = ['uri', 'querystring', 'header', 'headers']
def serialize_to_request(self, parameters, operation_model):
serialized = self._create_default_request()
serialized['method'] = operation_model.http.get('method',
self.DEFAULT_METHOD)
shape = operation_model.input_shape
if shape is None:
serialized['url_path'] = operation_model.http['requestUri']
return serialized
shape_members = shape.members
# While the ``serialized`` key holds the final serialized request
# data, we need interim dicts for the various locations of the
# request. We need this for the uri_path_kwargs and the
# query_string_kwargs because they are templated, so we need
# to gather all the needed data for the string template,
# then we render the template. The body_kwargs is needed
# because once we've collected them all, we run them through
# _serialize_body_params, which for rest-json, creates JSON,
# and for rest-xml, will create XML. This is what the
# ``partitioned`` dict below is for.
partitioned = {
'uri_path_kwargs': self.MAP_TYPE(),
'query_string_kwargs': self.MAP_TYPE(),
'body_kwargs': self.MAP_TYPE(),
'headers': self.MAP_TYPE(),
}
for param_name, param_value in parameters.items():
if param_value is None:
# Don't serialize any parameter with a None value.
continue
self._partition_parameters(partitioned, param_name, param_value,
shape_members)
serialized['url_path'] = self._render_uri_template(
operation_model.http['requestUri'],
partitioned['uri_path_kwargs'])
# Note that we lean on the http implementation to handle the case
# where the requestUri path already has query parameters.
# The bundled http client, requests, already supports this.
serialized['query_string'] = partitioned['query_string_kwargs']
if partitioned['headers']:
serialized['headers'] = partitioned['headers']
self._serialize_payload(partitioned, parameters,
serialized, shape, shape_members)
host_prefix = self._expand_host_prefix(parameters, operation_model)
if host_prefix is not None:
serialized['host_prefix'] = host_prefix
serialized = self._prepare_additional_traits(serialized,
operation_model)
return serialized
def _render_uri_template(self, uri_template, params):
# We need to handle two cases::
#
# /{Bucket}/foo
# /{Key+}/bar
# A label ending with '+' is greedy. There can only
# be one greedy key.
encoded_params = {}
for template_param in re.findall(r'{(.*?)}', uri_template):
if template_param.endswith('+'):
encoded_params[template_param] = percent_encode(
params[template_param[:-1]], safe='/~')
else:
encoded_params[template_param] = percent_encode(
params[template_param])
return uri_template.format(**encoded_params)
def _serialize_payload(self, partitioned, parameters,
serialized, shape, shape_members):
# partitioned - The user input params partitioned by location.
# parameters - The user input params.
# serialized - The final serialized request dict.
# shape - Describes the expected input shape
# shape_members - The members of the input struct shape
payload_member = shape.serialization.get('payload')
if payload_member is not None and \
shape_members[payload_member].type_name in ['blob', 'string']:
# If it's streaming, then the body is just the
# value of the payload.
body_payload = parameters.get(payload_member, b'')
body_payload = self._encode_payload(body_payload)
serialized['body'] = body_payload
elif payload_member is not None:
# If there's a payload member, we serialized that
# member to they body.
body_params = parameters.get(payload_member)
if body_params is not None:
serialized['body'] = self._serialize_body_params(
body_params,
shape_members[payload_member])
elif partitioned['body_kwargs']:
serialized['body'] = self._serialize_body_params(
partitioned['body_kwargs'], shape)
def _encode_payload(self, body):
if isinstance(body, six.text_type):
return body.encode(self.DEFAULT_ENCODING)
return body
def _partition_parameters(self, partitioned, param_name,
param_value, shape_members):
# This takes the user provided input parameter (``param``)
# and figures out where they go in the request dict.
# Some params are HTTP headers, some are used in the URI, some
# are in the request body. This method deals with this.
member = shape_members[param_name]
location = member.serialization.get('location')
key_name = member.serialization.get('name', param_name)
if location == 'uri':
partitioned['uri_path_kwargs'][key_name] = param_value
elif location == 'querystring':
if isinstance(param_value, dict):
partitioned['query_string_kwargs'].update(param_value)
elif isinstance(param_value, bool):
partitioned['query_string_kwargs'][
key_name] = str(param_value).lower()
elif member.type_name == 'timestamp':
timestamp_format = member.serialization.get(
'timestampFormat', self.QUERY_STRING_TIMESTAMP_FORMAT)
partitioned['query_string_kwargs'][
key_name] = self._convert_timestamp_to_str(
param_value, timestamp_format
)
else:
partitioned['query_string_kwargs'][key_name] = param_value
elif location == 'header':
shape = shape_members[param_name]
value = self._convert_header_value(shape, param_value)
partitioned['headers'][key_name] = str(value)
elif location == 'headers':
# 'headers' is a bit of an oddball. The ``key_name``
# is actually really a prefix for the header names:
header_prefix = key_name
# The value provided by the user is a dict so we'll be
# creating multiple header key/val pairs. The key
# name to use for each header is the header_prefix (``key_name``)
# plus the key provided by the user.
self._do_serialize_header_map(header_prefix,
partitioned['headers'],
param_value)
else:
partitioned['body_kwargs'][param_name] = param_value
def _do_serialize_header_map(self, header_prefix, headers, user_input):
for key, val in user_input.items():
full_key = header_prefix + key
headers[full_key] = val
def _serialize_body_params(self, params, shape):
raise NotImplementedError('_serialize_body_params')
def _convert_header_value(self, shape, value):
if shape.type_name == 'timestamp':
datetime_obj = parse_to_aware_datetime(value)
timestamp = calendar.timegm(datetime_obj.utctimetuple())
timestamp_format = shape.serialization.get(
'timestampFormat', self.HEADER_TIMESTAMP_FORMAT)
return self._convert_timestamp_to_str(timestamp, timestamp_format)
elif is_json_value_header(shape):
# Serialize with no spaces after separators to save space in
# the header.
return self._get_base64(json.dumps(value, separators=(',', ':')))
else:
return value
class RestJSONSerializer(BaseRestSerializer, JSONSerializer):
def _serialize_body_params(self, params, shape):
serialized_body = self.MAP_TYPE()
self._serialize(serialized_body, params, shape)
return json.dumps(serialized_body).encode(self.DEFAULT_ENCODING)
class RestXMLSerializer(BaseRestSerializer):
TIMESTAMP_FORMAT = 'iso8601'
def _serialize_body_params(self, params, shape):
root_name = shape.serialization['name']
pseudo_root = ElementTree.Element('')
self._serialize(shape, params, pseudo_root, root_name)
real_root = list(pseudo_root)[0]
return ElementTree.tostring(real_root, encoding=self.DEFAULT_ENCODING)
def _serialize(self, shape, params, xmlnode, name):
method = getattr(self, '_serialize_type_%s' % shape.type_name,
self._default_serialize)
method(xmlnode, params, shape, name)
def _serialize_type_structure(self, xmlnode, params, shape, name):
structure_node = ElementTree.SubElement(xmlnode, name)
if 'xmlNamespace' in shape.serialization:
namespace_metadata = shape.serialization['xmlNamespace']
attribute_name = 'xmlns'
if namespace_metadata.get('prefix'):
attribute_name += ':%s' % namespace_metadata['prefix']
structure_node.attrib[attribute_name] = namespace_metadata['uri']
for key, value in params.items():
member_shape = shape.members[key]
member_name = member_shape.serialization.get('name', key)
# We need to special case member shapes that are marked as an
# xmlAttribute. Rather than serializing into an XML child node,
# we instead serialize the shape to an XML attribute of the
# *current* node.
if value is None:
# Don't serialize any param whose value is None.
return
if member_shape.serialization.get('xmlAttribute'):
# xmlAttributes must have a serialization name.
xml_attribute_name = member_shape.serialization['name']
structure_node.attrib[xml_attribute_name] = value
continue
self._serialize(member_shape, value, structure_node, member_name)
def _serialize_type_list(self, xmlnode, params, shape, name):
member_shape = shape.member
if shape.serialization.get('flattened'):
element_name = name
list_node = xmlnode
else:
element_name = member_shape.serialization.get('name', 'member')
list_node = ElementTree.SubElement(xmlnode, name)
for item in params:
self._serialize(member_shape, item, list_node, element_name)
def _serialize_type_map(self, xmlnode, params, shape, name):
# Given the ``name`` of MyMap, and input of {"key1": "val1"}
# we serialize this as:
# <MyMap>
# <entry>
# <key>key1</key>
# <value>val1</value>
# </entry>
# </MyMap>
node = ElementTree.SubElement(xmlnode, name)
# TODO: handle flattened maps.
for key, value in params.items():
entry_node = ElementTree.SubElement(node, 'entry')
key_name = self._get_serialized_name(shape.key, default_name='key')
val_name = self._get_serialized_name(shape.value,
default_name='value')
self._serialize(shape.key, key, entry_node, key_name)
self._serialize(shape.value, value, entry_node, val_name)
def _serialize_type_boolean(self, xmlnode, params, shape, name):
# For scalar types, the 'params' attr is actually just a scalar
# value representing the data we need to serialize as a boolean.
# It will either be 'true' or 'false'
node = ElementTree.SubElement(xmlnode, name)
if params:
str_value = 'true'
else:
str_value = 'false'
node.text = str_value
def _serialize_type_blob(self, xmlnode, params, shape, name):
node = ElementTree.SubElement(xmlnode, name)
node.text = self._get_base64(params)
def _serialize_type_timestamp(self, xmlnode, params, shape, name):
node = ElementTree.SubElement(xmlnode, name)
node.text = self._convert_timestamp_to_str(
params, shape.serialization.get('timestampFormat'))
def _default_serialize(self, xmlnode, params, shape, name):
node = ElementTree.SubElement(xmlnode, name)
node.text = six.text_type(params)
SERIALIZERS = {
'ec2': EC2Serializer,
'query': QuerySerializer,
'json': JSONSerializer,
'rest-json': RestJSONSerializer,
'rest-xml': RestXMLSerializer,
}
| 30,430 |
Python
| 42.164539 | 79 | 0.616891 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/model.py
|
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Abstractions to interact with service models."""
from collections import defaultdict
from botocore.utils import CachedProperty, instance_cache, hyphenize_service_id
from botocore.compat import OrderedDict
from botocore.exceptions import MissingServiceIdError
from botocore.exceptions import UndefinedModelAttributeError
NOT_SET = object()
class NoShapeFoundError(Exception):
pass
class InvalidShapeError(Exception):
pass
class OperationNotFoundError(Exception):
pass
class InvalidShapeReferenceError(Exception):
pass
class ServiceId(str):
def hyphenize(self):
return hyphenize_service_id(self)
class Shape(object):
"""Object representing a shape from the service model."""
# To simplify serialization logic, all shape params that are
# related to serialization are moved from the top level hash into
# a 'serialization' hash. This list below contains the names of all
# the attributes that should be moved.
SERIALIZED_ATTRS = ['locationName', 'queryName', 'flattened', 'location',
'payload', 'streaming', 'timestampFormat',
'xmlNamespace', 'resultWrapper', 'xmlAttribute',
'eventstream', 'event', 'eventheader', 'eventpayload',
'jsonvalue', 'timestampFormat', 'hostLabel']
METADATA_ATTRS = ['required', 'min', 'max', 'sensitive', 'enum',
'idempotencyToken', 'error', 'exception',
'endpointdiscoveryid', 'retryable']
MAP_TYPE = OrderedDict
def __init__(self, shape_name, shape_model, shape_resolver=None):
"""
:type shape_name: string
:param shape_name: The name of the shape.
:type shape_model: dict
:param shape_model: The shape model. This would be the value
associated with the key in the "shapes" dict of the
service model (i.e ``model['shapes'][shape_name]``)
:type shape_resolver: botocore.model.ShapeResolver
:param shape_resolver: A shape resolver object. This is used to
resolve references to other shapes. For scalar shape types
(string, integer, boolean, etc.), this argument is not
required. If a shape_resolver is not provided for a complex
type, then a ``ValueError`` will be raised when an attempt
to resolve a shape is made.
"""
self.name = shape_name
self.type_name = shape_model['type']
self.documentation = shape_model.get('documentation', '')
self._shape_model = shape_model
if shape_resolver is None:
# If a shape_resolver is not provided, we create an object
# that will throw errors if you attempt to resolve
# a shape. This is actually ok for scalar shapes
# because they don't need to resolve shapes and shouldn't
# be required to provide an object they won't use.
shape_resolver = UnresolvableShapeMap()
self._shape_resolver = shape_resolver
self._cache = {}
@CachedProperty
def serialization(self):
"""Serialization information about the shape.
This contains information that may be needed for input serialization
or response parsing. This can include:
* name
* queryName
* flattened
* location
* payload
* streaming
* xmlNamespace
* resultWrapper
* xmlAttribute
* jsonvalue
* timestampFormat
:rtype: dict
:return: Serialization information about the shape.
"""
model = self._shape_model
serialization = {}
for attr in self.SERIALIZED_ATTRS:
if attr in self._shape_model:
serialization[attr] = model[attr]
# For consistency, locationName is renamed to just 'name'.
if 'locationName' in serialization:
serialization['name'] = serialization.pop('locationName')
return serialization
@CachedProperty
def metadata(self):
"""Metadata about the shape.
This requires optional information about the shape, including:
* min
* max
* enum
* sensitive
* required
* idempotencyToken
:rtype: dict
:return: Metadata about the shape.
"""
model = self._shape_model
metadata = {}
for attr in self.METADATA_ATTRS:
if attr in self._shape_model:
metadata[attr] = model[attr]
return metadata
@CachedProperty
def required_members(self):
"""A list of members that are required.
A structure shape can define members that are required.
This value will return a list of required members. If there
are no required members an empty list is returned.
"""
return self.metadata.get('required', [])
def _resolve_shape_ref(self, shape_ref):
return self._shape_resolver.resolve_shape_ref(shape_ref)
def __repr__(self):
return "<%s(%s)>" % (self.__class__.__name__,
self.name)
@property
def event_stream_name(self):
return None
class StructureShape(Shape):
@CachedProperty
def members(self):
members = self._shape_model['members']
# The members dict looks like:
# 'members': {
# 'MemberName': {'shape': 'shapeName'},
# 'MemberName2': {'shape': 'shapeName'},
# }
# We return a dict of member name to Shape object.
shape_members = self.MAP_TYPE()
for name, shape_ref in members.items():
shape_members[name] = self._resolve_shape_ref(shape_ref)
return shape_members
@CachedProperty
def event_stream_name(self):
for member_name, member in self.members.items():
if member.serialization.get('eventstream'):
return member_name
return None
@CachedProperty
def error_code(self):
if not self.metadata.get('exception', False):
return None
error_metadata = self.metadata.get("error", {})
code = error_metadata.get("code")
if code:
return code
# Use the exception name if there is no explicit code modeled
return self.name
class ListShape(Shape):
@CachedProperty
def member(self):
return self._resolve_shape_ref(self._shape_model['member'])
class MapShape(Shape):
@CachedProperty
def key(self):
return self._resolve_shape_ref(self._shape_model['key'])
@CachedProperty
def value(self):
return self._resolve_shape_ref(self._shape_model['value'])
class StringShape(Shape):
@CachedProperty
def enum(self):
return self.metadata.get('enum', [])
class ServiceModel(object):
"""
:ivar service_description: The parsed service description dictionary.
"""
def __init__(self, service_description, service_name=None):
"""
:type service_description: dict
:param service_description: The service description model. This value
is obtained from a botocore.loader.Loader, or from directly loading
the file yourself::
service_description = json.load(
open('/path/to/service-description-model.json'))
model = ServiceModel(service_description)
:type service_name: str
:param service_name: The name of the service. Normally this is
the endpoint prefix defined in the service_description. However,
you can override this value to provide a more convenient name.
This is done in a few places in botocore (ses instead of email,
emr instead of elasticmapreduce). If this value is not provided,
it will default to the endpointPrefix defined in the model.
"""
self._service_description = service_description
# We want clients to be able to access metadata directly.
self.metadata = service_description.get('metadata', {})
self._shape_resolver = ShapeResolver(
service_description.get('shapes', {}))
self._signature_version = NOT_SET
self._service_name = service_name
self._instance_cache = {}
def shape_for(self, shape_name, member_traits=None):
return self._shape_resolver.get_shape_by_name(
shape_name, member_traits)
def shape_for_error_code(self, error_code):
return self._error_code_cache.get(error_code, None)
@CachedProperty
def _error_code_cache(self):
error_code_cache = {}
for error_shape in self.error_shapes:
code = error_shape.error_code
error_code_cache[code] = error_shape
return error_code_cache
def resolve_shape_ref(self, shape_ref):
return self._shape_resolver.resolve_shape_ref(shape_ref)
@CachedProperty
def shape_names(self):
return list(self._service_description.get('shapes', {}))
@CachedProperty
def error_shapes(self):
error_shapes = []
for shape_name in self.shape_names:
error_shape = self.shape_for(shape_name)
if error_shape.metadata.get('exception', False):
error_shapes.append(error_shape)
return error_shapes
@instance_cache
def operation_model(self, operation_name):
try:
model = self._service_description['operations'][operation_name]
except KeyError:
raise OperationNotFoundError(operation_name)
return OperationModel(model, self, operation_name)
@CachedProperty
def documentation(self):
return self._service_description.get('documentation', '')
@CachedProperty
def operation_names(self):
return list(self._service_description.get('operations', []))
@CachedProperty
def service_name(self):
"""The name of the service.
This defaults to the endpointPrefix defined in the service model.
However, this value can be overriden when a ``ServiceModel`` is
created. If a service_name was not provided when the ``ServiceModel``
was created and if there is no endpointPrefix defined in the
service model, then an ``UndefinedModelAttributeError`` exception
will be raised.
"""
if self._service_name is not None:
return self._service_name
else:
return self.endpoint_prefix
@CachedProperty
def service_id(self):
try:
return ServiceId(self._get_metadata_property('serviceId'))
except UndefinedModelAttributeError:
raise MissingServiceIdError(
service_name=self._service_name
)
@CachedProperty
def signing_name(self):
"""The name to use when computing signatures.
If the model does not define a signing name, this
value will be the endpoint prefix defined in the model.
"""
signing_name = self.metadata.get('signingName')
if signing_name is None:
signing_name = self.endpoint_prefix
return signing_name
@CachedProperty
def api_version(self):
return self._get_metadata_property('apiVersion')
@CachedProperty
def protocol(self):
return self._get_metadata_property('protocol')
@CachedProperty
def endpoint_prefix(self):
return self._get_metadata_property('endpointPrefix')
@CachedProperty
def endpoint_discovery_operation(self):
for operation in self.operation_names:
model = self.operation_model(operation)
if model.is_endpoint_discovery_operation:
return model
@CachedProperty
def endpoint_discovery_required(self):
for operation in self.operation_names:
model = self.operation_model(operation)
if (model.endpoint_discovery is not None and
model.endpoint_discovery.get('required')):
return True
return False
def _get_metadata_property(self, name):
try:
return self.metadata[name]
except KeyError:
raise UndefinedModelAttributeError(
'"%s" not defined in the metadata of the model: %s' %
(name, self))
# Signature version is one of the rare properties
# than can be modified so a CachedProperty is not used here.
@property
def signature_version(self):
if self._signature_version is NOT_SET:
signature_version = self.metadata.get('signatureVersion')
self._signature_version = signature_version
return self._signature_version
@signature_version.setter
def signature_version(self, value):
self._signature_version = value
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self.service_name)
class OperationModel(object):
def __init__(self, operation_model, service_model, name=None):
"""
:type operation_model: dict
:param operation_model: The operation model. This comes from the
service model, and is the value associated with the operation
name in the service model (i.e ``model['operations'][op_name]``).
:type service_model: botocore.model.ServiceModel
:param service_model: The service model associated with the operation.
:type name: string
:param name: The operation name. This is the operation name exposed to
the users of this model. This can potentially be different from
the "wire_name", which is the operation name that *must* by
provided over the wire. For example, given::
"CreateCloudFrontOriginAccessIdentity":{
"name":"CreateCloudFrontOriginAccessIdentity2014_11_06",
...
}
The ``name`` would be ``CreateCloudFrontOriginAccessIdentity``,
but the ``self.wire_name`` would be
``CreateCloudFrontOriginAccessIdentity2014_11_06``, which is the
value we must send in the corresponding HTTP request.
"""
self._operation_model = operation_model
self._service_model = service_model
self._api_name = name
# Clients can access '.name' to get the operation name
# and '.metadata' to get the top level metdata of the service.
self._wire_name = operation_model.get('name')
self.metadata = service_model.metadata
self.http = operation_model.get('http', {})
@CachedProperty
def name(self):
if self._api_name is not None:
return self._api_name
else:
return self.wire_name
@property
def wire_name(self):
"""The wire name of the operation.
In many situations this is the same value as the
``name``, value, but in some services, the operation name
exposed to the user is different from the operaiton name
we send across the wire (e.g cloudfront).
Any serialization code should use ``wire_name``.
"""
return self._operation_model.get('name')
@property
def service_model(self):
return self._service_model
@CachedProperty
def documentation(self):
return self._operation_model.get('documentation', '')
@CachedProperty
def deprecated(self):
return self._operation_model.get('deprecated', False)
@CachedProperty
def endpoint_discovery(self):
# Explicit None default. An empty dictionary for this trait means it is
# enabled but not required to be used.
return self._operation_model.get('endpointdiscovery', None)
@CachedProperty
def is_endpoint_discovery_operation(self):
return self._operation_model.get('endpointoperation', False)
@CachedProperty
def input_shape(self):
if 'input' not in self._operation_model:
# Some operations do not accept any input and do not define an
# input shape.
return None
return self._service_model.resolve_shape_ref(
self._operation_model['input'])
@CachedProperty
def output_shape(self):
if 'output' not in self._operation_model:
# Some operations do not define an output shape,
# in which case we return None to indicate the
# operation has no expected output.
return None
return self._service_model.resolve_shape_ref(
self._operation_model['output'])
@CachedProperty
def idempotent_members(self):
input_shape = self.input_shape
if not input_shape:
return []
return [name for (name, shape) in input_shape.members.items()
if 'idempotencyToken' in shape.metadata and
shape.metadata['idempotencyToken']]
@CachedProperty
def auth_type(self):
return self._operation_model.get('authtype')
@CachedProperty
def error_shapes(self):
shapes = self._operation_model.get("errors", [])
return list(self._service_model.resolve_shape_ref(s) for s in shapes)
@CachedProperty
def endpoint(self):
return self._operation_model.get('endpoint')
@CachedProperty
def http_checksum_required(self):
return self._operation_model.get('httpChecksumRequired', False)
@CachedProperty
def has_event_stream_input(self):
return self.get_event_stream_input() is not None
@CachedProperty
def has_event_stream_output(self):
return self.get_event_stream_output() is not None
def get_event_stream_input(self):
return self._get_event_stream(self.input_shape)
def get_event_stream_output(self):
return self._get_event_stream(self.output_shape)
def _get_event_stream(self, shape):
"""Returns the event stream member's shape if any or None otherwise."""
if shape is None:
return None
event_name = shape.event_stream_name
if event_name:
return shape.members[event_name]
return None
@CachedProperty
def has_streaming_input(self):
return self.get_streaming_input() is not None
@CachedProperty
def has_streaming_output(self):
return self.get_streaming_output() is not None
def get_streaming_input(self):
return self._get_streaming_body(self.input_shape)
def get_streaming_output(self):
return self._get_streaming_body(self.output_shape)
def _get_streaming_body(self, shape):
"""Returns the streaming member's shape if any; or None otherwise."""
if shape is None:
return None
payload = shape.serialization.get('payload')
if payload is not None:
payload_shape = shape.members[payload]
if payload_shape.type_name == 'blob':
return payload_shape
return None
def __repr__(self):
return '%s(name=%s)' % (self.__class__.__name__, self.name)
class ShapeResolver(object):
"""Resolves shape references."""
# Any type not in this mapping will default to the Shape class.
SHAPE_CLASSES = {
'structure': StructureShape,
'list': ListShape,
'map': MapShape,
'string': StringShape
}
def __init__(self, shape_map):
self._shape_map = shape_map
self._shape_cache = {}
def get_shape_by_name(self, shape_name, member_traits=None):
try:
shape_model = self._shape_map[shape_name]
except KeyError:
raise NoShapeFoundError(shape_name)
try:
shape_cls = self.SHAPE_CLASSES.get(shape_model['type'], Shape)
except KeyError:
raise InvalidShapeError("Shape is missing required key 'type': %s"
% shape_model)
if member_traits:
shape_model = shape_model.copy()
shape_model.update(member_traits)
result = shape_cls(shape_name, shape_model, self)
return result
def resolve_shape_ref(self, shape_ref):
# A shape_ref is a dict that has a 'shape' key that
# refers to a shape name as well as any additional
# member traits that are then merged over the shape
# definition. For example:
# {"shape": "StringType", "locationName": "Foobar"}
if len(shape_ref) == 1 and 'shape' in shape_ref:
# It's just a shape ref with no member traits, we can avoid
# a .copy(). This is the common case so it's specifically
# called out here.
return self.get_shape_by_name(shape_ref['shape'])
else:
member_traits = shape_ref.copy()
try:
shape_name = member_traits.pop('shape')
except KeyError:
raise InvalidShapeReferenceError(
"Invalid model, missing shape reference: %s" % shape_ref)
return self.get_shape_by_name(shape_name, member_traits)
class UnresolvableShapeMap(object):
"""A ShapeResolver that will throw ValueErrors when shapes are resolved.
"""
def get_shape_by_name(self, shape_name, member_traits=None):
raise ValueError("Attempted to lookup shape '%s', but no shape "
"map was provided.")
def resolve_shape_ref(self, shape_ref):
raise ValueError("Attempted to resolve shape '%s', but no shape "
"map was provided.")
class DenormalizedStructureBuilder(object):
"""Build a StructureShape from a denormalized model.
This is a convenience builder class that makes it easy to construct
``StructureShape``s based on a denormalized model.
It will handle the details of creating unique shape names and creating
the appropriate shape map needed by the ``StructureShape`` class.
Example usage::
builder = DenormalizedStructureBuilder()
shape = builder.with_members({
'A': {
'type': 'structure',
'members': {
'B': {
'type': 'structure',
'members': {
'C': {
'type': 'string',
}
}
}
}
}
}).build_model()
# ``shape`` is now an instance of botocore.model.StructureShape
:type dict_type: class
:param dict_type: The dictionary type to use, allowing you to opt-in
to using OrderedDict or another dict type. This can
be particularly useful for testing when order
matters, such as for documentation.
"""
def __init__(self, name=None):
self.members = OrderedDict()
self._name_generator = ShapeNameGenerator()
if name is None:
self.name = self._name_generator.new_shape_name('structure')
def with_members(self, members):
"""
:type members: dict
:param members: The denormalized members.
:return: self
"""
self._members = members
return self
def build_model(self):
"""Build the model based on the provided members.
:rtype: botocore.model.StructureShape
:return: The built StructureShape object.
"""
shapes = OrderedDict()
denormalized = {
'type': 'structure',
'members': self._members,
}
self._build_model(denormalized, shapes, self.name)
resolver = ShapeResolver(shape_map=shapes)
return StructureShape(shape_name=self.name,
shape_model=shapes[self.name],
shape_resolver=resolver)
def _build_model(self, model, shapes, shape_name):
if model['type'] == 'structure':
shapes[shape_name] = self._build_structure(model, shapes)
elif model['type'] == 'list':
shapes[shape_name] = self._build_list(model, shapes)
elif model['type'] == 'map':
shapes[shape_name] = self._build_map(model, shapes)
elif model['type'] in ['string', 'integer', 'boolean', 'blob', 'float',
'timestamp', 'long', 'double', 'char']:
shapes[shape_name] = self._build_scalar(model)
else:
raise InvalidShapeError("Unknown shape type: %s" % model['type'])
def _build_structure(self, model, shapes):
members = OrderedDict()
shape = self._build_initial_shape(model)
shape['members'] = members
for name, member_model in model['members'].items():
member_shape_name = self._get_shape_name(member_model)
members[name] = {'shape': member_shape_name}
self._build_model(member_model, shapes, member_shape_name)
return shape
def _build_list(self, model, shapes):
member_shape_name = self._get_shape_name(model)
shape = self._build_initial_shape(model)
shape['member'] = {'shape': member_shape_name}
self._build_model(model['member'], shapes, member_shape_name)
return shape
def _build_map(self, model, shapes):
key_shape_name = self._get_shape_name(model['key'])
value_shape_name = self._get_shape_name(model['value'])
shape = self._build_initial_shape(model)
shape['key'] = {'shape': key_shape_name}
shape['value'] = {'shape': value_shape_name}
self._build_model(model['key'], shapes, key_shape_name)
self._build_model(model['value'], shapes, value_shape_name)
return shape
def _build_initial_shape(self, model):
shape = {
'type': model['type'],
}
if 'documentation' in model:
shape['documentation'] = model['documentation']
for attr in Shape.METADATA_ATTRS:
if attr in model:
shape[attr] = model[attr]
return shape
def _build_scalar(self, model):
return self._build_initial_shape(model)
def _get_shape_name(self, model):
if 'shape_name' in model:
return model['shape_name']
else:
return self._name_generator.new_shape_name(model['type'])
class ShapeNameGenerator(object):
"""Generate unique shape names for a type.
This class can be used in conjunction with the DenormalizedStructureBuilder
to generate unique shape names for a given type.
"""
def __init__(self):
self._name_cache = defaultdict(int)
def new_shape_name(self, type_name):
"""Generate a unique shape name.
This method will guarantee a unique shape name each time it is
called with the same type.
::
>>> s = ShapeNameGenerator()
>>> s.new_shape_name('structure')
'StructureType1'
>>> s.new_shape_name('structure')
'StructureType2'
>>> s.new_shape_name('list')
'ListType1'
>>> s.new_shape_name('list')
'ListType2'
:type type_name: string
:param type_name: The type name (structure, list, map, string, etc.)
:rtype: string
:return: A unique shape name for the given type
"""
self._name_cache[type_name] += 1
current_index = self._name_cache[type_name]
return '%sType%s' % (type_name.capitalize(),
current_index)
| 28,352 |
Python
| 33.367273 | 79 | 0.60553 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/retryhandler.py
|
# Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import random
import functools
import logging
from binascii import crc32
from botocore.exceptions import (
ChecksumError, EndpointConnectionError, ReadTimeoutError,
ConnectionError, ConnectionClosedError,
)
logger = logging.getLogger(__name__)
# The only supported error for now is GENERAL_CONNECTION_ERROR
# which maps to requests generic ConnectionError. If we're able
# to get more specific exceptions from requests we can update
# this mapping with more specific exceptions.
EXCEPTION_MAP = {
'GENERAL_CONNECTION_ERROR': [
ConnectionError, ConnectionClosedError, ReadTimeoutError,
EndpointConnectionError
],
}
def delay_exponential(base, growth_factor, attempts):
"""Calculate time to sleep based on exponential function.
The format is::
base * growth_factor ^ (attempts - 1)
If ``base`` is set to 'rand' then a random number between
0 and 1 will be used as the base.
Base must be greater than 0, otherwise a ValueError will be
raised.
"""
if base == 'rand':
base = random.random()
elif base <= 0:
raise ValueError("The 'base' param must be greater than 0, "
"got: %s" % base)
time_to_sleep = base * (growth_factor ** (attempts - 1))
return time_to_sleep
def create_exponential_delay_function(base, growth_factor):
"""Create an exponential delay function based on the attempts.
This is used so that you only have to pass it the attempts
parameter to calculate the delay.
"""
return functools.partial(
delay_exponential, base=base, growth_factor=growth_factor)
def create_retry_handler(config, operation_name=None):
checker = create_checker_from_retry_config(
config, operation_name=operation_name)
action = create_retry_action_from_config(
config, operation_name=operation_name)
return RetryHandler(checker=checker, action=action)
def create_retry_action_from_config(config, operation_name=None):
# The spec has the possibility of supporting per policy
# actions, but right now, we assume this comes from the
# default section, which means that delay functions apply
# for every policy in the retry config (per service).
delay_config = config['__default__']['delay']
if delay_config['type'] == 'exponential':
return create_exponential_delay_function(
base=delay_config['base'],
growth_factor=delay_config['growth_factor'])
def create_checker_from_retry_config(config, operation_name=None):
checkers = []
max_attempts = None
retryable_exceptions = []
if '__default__' in config:
policies = config['__default__'].get('policies', [])
max_attempts = config['__default__']['max_attempts']
for key in policies:
current_config = policies[key]
checkers.append(_create_single_checker(current_config))
retry_exception = _extract_retryable_exception(current_config)
if retry_exception is not None:
retryable_exceptions.extend(retry_exception)
if operation_name is not None and config.get(operation_name) is not None:
operation_policies = config[operation_name]['policies']
for key in operation_policies:
checkers.append(_create_single_checker(operation_policies[key]))
retry_exception = _extract_retryable_exception(
operation_policies[key])
if retry_exception is not None:
retryable_exceptions.extend(retry_exception)
if len(checkers) == 1:
# Don't need to use a MultiChecker
return MaxAttemptsDecorator(checkers[0], max_attempts=max_attempts)
else:
multi_checker = MultiChecker(checkers)
return MaxAttemptsDecorator(
multi_checker, max_attempts=max_attempts,
retryable_exceptions=tuple(retryable_exceptions))
def _create_single_checker(config):
if 'response' in config['applies_when']:
return _create_single_response_checker(
config['applies_when']['response'])
elif 'socket_errors' in config['applies_when']:
return ExceptionRaiser()
def _create_single_response_checker(response):
if 'service_error_code' in response:
checker = ServiceErrorCodeChecker(
status_code=response['http_status_code'],
error_code=response['service_error_code'])
elif 'http_status_code' in response:
checker = HTTPStatusCodeChecker(
status_code=response['http_status_code'])
elif 'crc32body' in response:
checker = CRC32Checker(header=response['crc32body'])
else:
# TODO: send a signal.
raise ValueError("Unknown retry policy: %s" % config)
return checker
def _extract_retryable_exception(config):
applies_when = config['applies_when']
if 'crc32body' in applies_when.get('response', {}):
return [ChecksumError]
elif 'socket_errors' in applies_when:
exceptions = []
for name in applies_when['socket_errors']:
exceptions.extend(EXCEPTION_MAP[name])
return exceptions
class RetryHandler(object):
"""Retry handler.
The retry handler takes two params, ``checker`` object
and an ``action`` object.
The ``checker`` object must be a callable object and based on a response
and an attempt number, determines whether or not sufficient criteria for
a retry has been met. If this is the case then the ``action`` object
(which also is a callable) determines what needs to happen in the event
of a retry.
"""
def __init__(self, checker, action):
self._checker = checker
self._action = action
def __call__(self, attempts, response, caught_exception, **kwargs):
"""Handler for a retry.
Intended to be hooked up to an event handler (hence the **kwargs),
this will process retries appropriately.
"""
if self._checker(attempts, response, caught_exception):
result = self._action(attempts=attempts)
logger.debug("Retry needed, action of: %s", result)
return result
logger.debug("No retry needed.")
class BaseChecker(object):
"""Base class for retry checkers.
Each class is responsible for checking a single criteria that determines
whether or not a retry should not happen.
"""
def __call__(self, attempt_number, response, caught_exception):
"""Determine if retry criteria matches.
Note that either ``response`` is not None and ``caught_exception`` is
None or ``response`` is None and ``caught_exception`` is not None.
:type attempt_number: int
:param attempt_number: The total number of times we've attempted
to send the request.
:param response: The HTTP response (if one was received).
:type caught_exception: Exception
:param caught_exception: Any exception that was caught while trying to
send the HTTP response.
:return: True, if the retry criteria matches (and therefore a retry
should occur. False if the criteria does not match.
"""
# The default implementation allows subclasses to not have to check
# whether or not response is None or not.
if response is not None:
return self._check_response(attempt_number, response)
elif caught_exception is not None:
return self._check_caught_exception(
attempt_number, caught_exception)
else:
raise ValueError("Both response and caught_exception are None.")
def _check_response(self, attempt_number, response):
pass
def _check_caught_exception(self, attempt_number, caught_exception):
pass
class MaxAttemptsDecorator(BaseChecker):
"""Allow retries up to a maximum number of attempts.
This will pass through calls to the decorated retry checker, provided
that the number of attempts does not exceed max_attempts. It will
also catch any retryable_exceptions passed in. Once max_attempts has
been exceeded, then False will be returned or the retryable_exceptions
that was previously being caught will be raised.
"""
def __init__(self, checker, max_attempts, retryable_exceptions=None):
self._checker = checker
self._max_attempts = max_attempts
self._retryable_exceptions = retryable_exceptions
def __call__(self, attempt_number, response, caught_exception):
should_retry = self._should_retry(attempt_number, response,
caught_exception)
if should_retry:
if attempt_number >= self._max_attempts:
# explicitly set MaxAttemptsReached
if response is not None and 'ResponseMetadata' in response[1]:
response[1]['ResponseMetadata']['MaxAttemptsReached'] = True
logger.debug("Reached the maximum number of retry "
"attempts: %s", attempt_number)
return False
else:
return should_retry
else:
return False
def _should_retry(self, attempt_number, response, caught_exception):
if self._retryable_exceptions and \
attempt_number < self._max_attempts:
try:
return self._checker(attempt_number, response, caught_exception)
except self._retryable_exceptions as e:
logger.debug("retry needed, retryable exception caught: %s",
e, exc_info=True)
return True
else:
# If we've exceeded the max attempts we just let the exception
# propogate if one has occurred.
return self._checker(attempt_number, response, caught_exception)
class HTTPStatusCodeChecker(BaseChecker):
def __init__(self, status_code):
self._status_code = status_code
def _check_response(self, attempt_number, response):
if response[0].status_code == self._status_code:
logger.debug(
"retry needed: retryable HTTP status code received: %s",
self._status_code)
return True
else:
return False
class ServiceErrorCodeChecker(BaseChecker):
def __init__(self, status_code, error_code):
self._status_code = status_code
self._error_code = error_code
def _check_response(self, attempt_number, response):
if response[0].status_code == self._status_code:
actual_error_code = response[1].get('Error', {}).get('Code')
if actual_error_code == self._error_code:
logger.debug(
"retry needed: matching HTTP status and error code seen: "
"%s, %s", self._status_code, self._error_code)
return True
return False
class MultiChecker(BaseChecker):
def __init__(self, checkers):
self._checkers = checkers
def __call__(self, attempt_number, response, caught_exception):
for checker in self._checkers:
checker_response = checker(attempt_number, response,
caught_exception)
if checker_response:
return checker_response
return False
class CRC32Checker(BaseChecker):
def __init__(self, header):
# The header where the expected crc32 is located.
self._header_name = header
def _check_response(self, attempt_number, response):
http_response = response[0]
expected_crc = http_response.headers.get(self._header_name)
if expected_crc is None:
logger.debug("crc32 check skipped, the %s header is not "
"in the http response.", self._header_name)
else:
actual_crc32 = crc32(response[0].content) & 0xffffffff
if not actual_crc32 == int(expected_crc):
logger.debug(
"retry needed: crc32 check failed, expected != actual: "
"%s != %s", int(expected_crc), actual_crc32)
raise ChecksumError(checksum_type='crc32',
expected_checksum=int(expected_crc),
actual_checksum=actual_crc32)
class ExceptionRaiser(BaseChecker):
"""Raise any caught exceptions.
This class will raise any non None ``caught_exception``.
"""
def _check_caught_exception(self, attempt_number, caught_exception):
# This is implementation specific, but this class is useful by
# coordinating with the MaxAttemptsDecorator.
# The MaxAttemptsDecorator has a list of exceptions it should catch
# and retry, but something needs to come along and actually raise the
# caught_exception. That's what this class is being used for. If
# the MaxAttemptsDecorator is not interested in retrying the exception
# then this exception just propogates out past the retry code.
raise caught_exception
| 13,781 |
Python
| 37.283333 | 80 | 0.642261 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/credentials.py
|
# Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import time
import datetime
import logging
import os
import getpass
import threading
import json
import subprocess
from collections import namedtuple
from copy import deepcopy
from hashlib import sha1
from dateutil.parser import parse
from dateutil.tz import tzlocal, tzutc
import botocore.configloader
import botocore.compat
from botocore import UNSIGNED
from botocore.compat import total_seconds
from botocore.compat import compat_shell_split
from botocore.config import Config
from botocore.exceptions import UnknownCredentialError
from botocore.exceptions import PartialCredentialsError
from botocore.exceptions import ConfigNotFound
from botocore.exceptions import InvalidConfigError
from botocore.exceptions import InfiniteLoopConfigError
from botocore.exceptions import RefreshWithMFAUnsupportedError
from botocore.exceptions import MetadataRetrievalError
from botocore.exceptions import CredentialRetrievalError
from botocore.exceptions import UnauthorizedSSOTokenError
from botocore.utils import InstanceMetadataFetcher, parse_key_val_file
from botocore.utils import ContainerMetadataFetcher
from botocore.utils import FileWebIdentityTokenLoader
from botocore.utils import SSOTokenLoader
logger = logging.getLogger(__name__)
ReadOnlyCredentials = namedtuple('ReadOnlyCredentials',
['access_key', 'secret_key', 'token'])
def create_credential_resolver(session, cache=None, region_name=None):
"""Create a default credential resolver.
This creates a pre-configured credential resolver
that includes the default lookup chain for
credentials.
"""
profile_name = session.get_config_variable('profile') or 'default'
metadata_timeout = session.get_config_variable('metadata_service_timeout')
num_attempts = session.get_config_variable('metadata_service_num_attempts')
disable_env_vars = session.instance_variables().get('profile') is not None
imds_config = {
'ec2_metadata_service_endpoint': session.get_config_variable(
'ec2_metadata_service_endpoint'),
'imds_use_ipv6': session.get_config_variable('imds_use_ipv6')
}
if cache is None:
cache = {}
env_provider = EnvProvider()
container_provider = ContainerProvider()
instance_metadata_provider = InstanceMetadataProvider(
iam_role_fetcher=InstanceMetadataFetcher(
timeout=metadata_timeout,
num_attempts=num_attempts,
user_agent=session.user_agent(),
config=imds_config)
)
profile_provider_builder = ProfileProviderBuilder(
session, cache=cache, region_name=region_name)
assume_role_provider = AssumeRoleProvider(
load_config=lambda: session.full_config,
client_creator=_get_client_creator(session, region_name),
cache=cache,
profile_name=profile_name,
credential_sourcer=CanonicalNameCredentialSourcer([
env_provider, container_provider, instance_metadata_provider
]),
profile_provider_builder=profile_provider_builder,
)
pre_profile = [
env_provider,
assume_role_provider,
]
profile_providers = profile_provider_builder.providers(
profile_name=profile_name,
disable_env_vars=disable_env_vars,
)
post_profile = [
OriginalEC2Provider(),
BotoProvider(),
container_provider,
instance_metadata_provider,
]
providers = pre_profile + profile_providers + post_profile
if disable_env_vars:
# An explicitly provided profile will negate an EnvProvider.
# We will defer to providers that understand the "profile"
# concept to retrieve credentials.
# The one edge case if is all three values are provided via
# env vars:
# export AWS_ACCESS_KEY_ID=foo
# export AWS_SECRET_ACCESS_KEY=bar
# export AWS_PROFILE=baz
# Then, just like our client() calls, the explicit credentials
# will take precedence.
#
# This precedence is enforced by leaving the EnvProvider in the chain.
# This means that the only way a "profile" would win is if the
# EnvProvider does not return credentials, which is what we want
# in this scenario.
providers.remove(env_provider)
logger.debug('Skipping environment variable credential check'
' because profile name was explicitly set.')
resolver = CredentialResolver(providers=providers)
return resolver
class ProfileProviderBuilder(object):
"""This class handles the creation of profile based providers.
NOTE: This class is only intended for internal use.
This class handles the creation and ordering of the various credential
providers that primarly source their configuration from the shared config.
This is needed to enable sharing between the default credential chain and
the source profile chain created by the assume role provider.
"""
def __init__(self, session, cache=None, region_name=None,
sso_token_cache=None):
self._session = session
self._cache = cache
self._region_name = region_name
self._sso_token_cache = sso_token_cache
def providers(self, profile_name, disable_env_vars=False):
return [
self._create_web_identity_provider(
profile_name, disable_env_vars,
),
self._create_sso_provider(profile_name),
self._create_shared_credential_provider(profile_name),
self._create_process_provider(profile_name),
self._create_config_provider(profile_name),
]
def _create_process_provider(self, profile_name):
return ProcessProvider(
profile_name=profile_name,
load_config=lambda: self._session.full_config,
)
def _create_shared_credential_provider(self, profile_name):
credential_file = self._session.get_config_variable('credentials_file')
return SharedCredentialProvider(
profile_name=profile_name,
creds_filename=credential_file,
)
def _create_config_provider(self, profile_name):
config_file = self._session.get_config_variable('config_file')
return ConfigProvider(
profile_name=profile_name,
config_filename=config_file,
)
def _create_web_identity_provider(self, profile_name, disable_env_vars):
return AssumeRoleWithWebIdentityProvider(
load_config=lambda: self._session.full_config,
client_creator=_get_client_creator(
self._session, self._region_name),
cache=self._cache,
profile_name=profile_name,
disable_env_vars=disable_env_vars,
)
def _create_sso_provider(self, profile_name):
return SSOProvider(
load_config=lambda: self._session.full_config,
client_creator=self._session.create_client,
profile_name=profile_name,
cache=self._cache,
token_cache=self._sso_token_cache,
)
def get_credentials(session):
resolver = create_credential_resolver(session)
return resolver.load_credentials()
def _local_now():
return datetime.datetime.now(tzlocal())
def _parse_if_needed(value):
if isinstance(value, datetime.datetime):
return value
return parse(value)
def _serialize_if_needed(value, iso=False):
if isinstance(value, datetime.datetime):
if iso:
return value.isoformat()
return value.strftime('%Y-%m-%dT%H:%M:%S%Z')
return value
def _get_client_creator(session, region_name):
def client_creator(service_name, **kwargs):
create_client_kwargs = {
'region_name': region_name
}
create_client_kwargs.update(**kwargs)
return session.create_client(service_name, **create_client_kwargs)
return client_creator
def create_assume_role_refresher(client, params):
def refresh():
response = client.assume_role(**params)
credentials = response['Credentials']
# We need to normalize the credential names to
# the values expected by the refresh creds.
return {
'access_key': credentials['AccessKeyId'],
'secret_key': credentials['SecretAccessKey'],
'token': credentials['SessionToken'],
'expiry_time': _serialize_if_needed(credentials['Expiration']),
}
return refresh
def create_mfa_serial_refresher(actual_refresh):
class _Refresher(object):
def __init__(self, refresh):
self._refresh = refresh
self._has_been_called = False
def __call__(self):
if self._has_been_called:
# We can explore an option in the future to support
# reprompting for MFA, but for now we just error out
# when the temp creds expire.
raise RefreshWithMFAUnsupportedError()
self._has_been_called = True
return self._refresh()
return _Refresher(actual_refresh)
class JSONFileCache(object):
"""JSON file cache.
This provides a dict like interface that stores JSON serializable
objects.
The objects are serialized to JSON and stored in a file. These
values can be retrieved at a later time.
"""
CACHE_DIR = os.path.expanduser(os.path.join('~', '.aws', 'boto', 'cache'))
def __init__(self, working_dir=CACHE_DIR, dumps_func=None):
self._working_dir = working_dir
if dumps_func is None:
dumps_func = self._default_dumps
self._dumps = dumps_func
def _default_dumps(self, obj):
return json.dumps(obj, default=_serialize_if_needed)
def __contains__(self, cache_key):
actual_key = self._convert_cache_key(cache_key)
return os.path.isfile(actual_key)
def __getitem__(self, cache_key):
"""Retrieve value from a cache key."""
actual_key = self._convert_cache_key(cache_key)
try:
with open(actual_key) as f:
return json.load(f)
except (OSError, ValueError, IOError):
raise KeyError(cache_key)
def __setitem__(self, cache_key, value):
full_key = self._convert_cache_key(cache_key)
try:
file_content = self._dumps(value)
except (TypeError, ValueError):
raise ValueError("Value cannot be cached, must be "
"JSON serializable: %s" % value)
if not os.path.isdir(self._working_dir):
os.makedirs(self._working_dir)
with os.fdopen(os.open(full_key,
os.O_WRONLY | os.O_CREAT, 0o600), 'w') as f:
f.truncate()
f.write(file_content)
def _convert_cache_key(self, cache_key):
full_path = os.path.join(self._working_dir, cache_key + '.json')
return full_path
class Credentials(object):
"""
Holds the credentials needed to authenticate requests.
:ivar access_key: The access key part of the credentials.
:ivar secret_key: The secret key part of the credentials.
:ivar token: The security token, valid only for session credentials.
:ivar method: A string which identifies where the credentials
were found.
"""
def __init__(self, access_key, secret_key, token=None,
method=None):
self.access_key = access_key
self.secret_key = secret_key
self.token = token
if method is None:
method = 'explicit'
self.method = method
self._normalize()
def _normalize(self):
# Keys would sometimes (accidentally) contain non-ascii characters.
# It would cause a confusing UnicodeDecodeError in Python 2.
# We explicitly convert them into unicode to avoid such error.
#
# Eventually the service will decide whether to accept the credential.
# This also complies with the behavior in Python 3.
self.access_key = botocore.compat.ensure_unicode(self.access_key)
self.secret_key = botocore.compat.ensure_unicode(self.secret_key)
def get_frozen_credentials(self):
return ReadOnlyCredentials(self.access_key,
self.secret_key,
self.token)
class RefreshableCredentials(Credentials):
"""
Holds the credentials needed to authenticate requests. In addition, it
knows how to refresh itself.
:ivar access_key: The access key part of the credentials.
:ivar secret_key: The secret key part of the credentials.
:ivar token: The security token, valid only for session credentials.
:ivar method: A string which identifies where the credentials
were found.
"""
# The time at which we'll attempt to refresh, but not
# block if someone else is refreshing.
_advisory_refresh_timeout = 15 * 60
# The time at which all threads will block waiting for
# refreshed credentials.
_mandatory_refresh_timeout = 10 * 60
def __init__(self, access_key, secret_key, token,
expiry_time, refresh_using, method,
time_fetcher=_local_now):
self._refresh_using = refresh_using
self._access_key = access_key
self._secret_key = secret_key
self._token = token
self._expiry_time = expiry_time
self._time_fetcher = time_fetcher
self._refresh_lock = threading.Lock()
self.method = method
self._frozen_credentials = ReadOnlyCredentials(
access_key, secret_key, token)
self._normalize()
def _normalize(self):
self._access_key = botocore.compat.ensure_unicode(self._access_key)
self._secret_key = botocore.compat.ensure_unicode(self._secret_key)
@classmethod
def create_from_metadata(cls, metadata, refresh_using, method):
instance = cls(
access_key=metadata['access_key'],
secret_key=metadata['secret_key'],
token=metadata['token'],
expiry_time=cls._expiry_datetime(metadata['expiry_time']),
method=method,
refresh_using=refresh_using
)
return instance
@property
def access_key(self):
"""Warning: Using this property can lead to race conditions if you
access another property subsequently along the refresh boundary.
Please use get_frozen_credentials instead.
"""
self._refresh()
return self._access_key
@access_key.setter
def access_key(self, value):
self._access_key = value
@property
def secret_key(self):
"""Warning: Using this property can lead to race conditions if you
access another property subsequently along the refresh boundary.
Please use get_frozen_credentials instead.
"""
self._refresh()
return self._secret_key
@secret_key.setter
def secret_key(self, value):
self._secret_key = value
@property
def token(self):
"""Warning: Using this property can lead to race conditions if you
access another property subsequently along the refresh boundary.
Please use get_frozen_credentials instead.
"""
self._refresh()
return self._token
@token.setter
def token(self, value):
self._token = value
def _seconds_remaining(self):
delta = self._expiry_time - self._time_fetcher()
return total_seconds(delta)
def refresh_needed(self, refresh_in=None):
"""Check if a refresh is needed.
A refresh is needed if the expiry time associated
with the temporary credentials is less than the
provided ``refresh_in``. If ``time_delta`` is not
provided, ``self.advisory_refresh_needed`` will be used.
For example, if your temporary credentials expire
in 10 minutes and the provided ``refresh_in`` is
``15 * 60``, then this function will return ``True``.
:type refresh_in: int
:param refresh_in: The number of seconds before the
credentials expire in which refresh attempts should
be made.
:return: True if refresh needed, False otherwise.
"""
if self._expiry_time is None:
# No expiration, so assume we don't need to refresh.
return False
if refresh_in is None:
refresh_in = self._advisory_refresh_timeout
# The credentials should be refreshed if they're going to expire
# in less than 5 minutes.
if self._seconds_remaining() >= refresh_in:
# There's enough time left. Don't refresh.
return False
logger.debug("Credentials need to be refreshed.")
return True
def _is_expired(self):
# Checks if the current credentials are expired.
return self.refresh_needed(refresh_in=0)
def _refresh(self):
# In the common case where we don't need a refresh, we
# can immediately exit and not require acquiring the
# refresh lock.
if not self.refresh_needed(self._advisory_refresh_timeout):
return
# acquire() doesn't accept kwargs, but False is indicating
# that we should not block if we can't acquire the lock.
# If we aren't able to acquire the lock, we'll trigger
# the else clause.
if self._refresh_lock.acquire(False):
try:
if not self.refresh_needed(self._advisory_refresh_timeout):
return
is_mandatory_refresh = self.refresh_needed(
self._mandatory_refresh_timeout)
self._protected_refresh(is_mandatory=is_mandatory_refresh)
return
finally:
self._refresh_lock.release()
elif self.refresh_needed(self._mandatory_refresh_timeout):
# If we're within the mandatory refresh window,
# we must block until we get refreshed credentials.
with self._refresh_lock:
if not self.refresh_needed(self._mandatory_refresh_timeout):
return
self._protected_refresh(is_mandatory=True)
def _protected_refresh(self, is_mandatory):
# precondition: this method should only be called if you've acquired
# the self._refresh_lock.
try:
metadata = self._refresh_using()
except Exception as e:
period_name = 'mandatory' if is_mandatory else 'advisory'
logger.warning("Refreshing temporary credentials failed "
"during %s refresh period.",
period_name, exc_info=True)
if is_mandatory:
# If this is a mandatory refresh, then
# all errors that occur when we attempt to refresh
# credentials are propagated back to the user.
raise
# Otherwise we'll just return.
# The end result will be that we'll use the current
# set of temporary credentials we have.
return
self._set_from_data(metadata)
self._frozen_credentials = ReadOnlyCredentials(
self._access_key, self._secret_key, self._token)
if self._is_expired():
# We successfully refreshed credentials but for whatever
# reason, our refreshing function returned credentials
# that are still expired. In this scenario, the only
# thing we can do is let the user know and raise
# an exception.
msg = ("Credentials were refreshed, but the "
"refreshed credentials are still expired.")
logger.warning(msg)
raise RuntimeError(msg)
@staticmethod
def _expiry_datetime(time_str):
return parse(time_str)
def _set_from_data(self, data):
expected_keys = ['access_key', 'secret_key', 'token', 'expiry_time']
if not data:
missing_keys = expected_keys
else:
missing_keys = [k for k in expected_keys if k not in data]
if missing_keys:
message = "Credential refresh failed, response did not contain: %s"
raise CredentialRetrievalError(
provider=self.method,
error_msg=message % ', '.join(missing_keys),
)
self.access_key = data['access_key']
self.secret_key = data['secret_key']
self.token = data['token']
self._expiry_time = parse(data['expiry_time'])
logger.debug("Retrieved credentials will expire at: %s",
self._expiry_time)
self._normalize()
def get_frozen_credentials(self):
"""Return immutable credentials.
The ``access_key``, ``secret_key``, and ``token`` properties
on this class will always check and refresh credentials if
needed before returning the particular credentials.
This has an edge case where you can get inconsistent
credentials. Imagine this:
# Current creds are "t1"
tmp.access_key ---> expired? no, so return t1.access_key
# ---- time is now expired, creds need refreshing to "t2" ----
tmp.secret_key ---> expired? yes, refresh and return t2.secret_key
This means we're using the access key from t1 with the secret key
from t2. To fix this issue, you can request a frozen credential object
which is guaranteed not to change.
The frozen credentials returned from this method should be used
immediately and then discarded. The typical usage pattern would
be::
creds = RefreshableCredentials(...)
some_code = SomeSignerObject()
# I'm about to sign the request.
# The frozen credentials are only used for the
# duration of generate_presigned_url and will be
# immediately thrown away.
request = some_code.sign_some_request(
with_credentials=creds.get_frozen_credentials())
print("Signed request:", request)
"""
self._refresh()
return self._frozen_credentials
class DeferredRefreshableCredentials(RefreshableCredentials):
"""Refreshable credentials that don't require initial credentials.
refresh_using will be called upon first access.
"""
def __init__(self, refresh_using, method, time_fetcher=_local_now):
self._refresh_using = refresh_using
self._access_key = None
self._secret_key = None
self._token = None
self._expiry_time = None
self._time_fetcher = time_fetcher
self._refresh_lock = threading.Lock()
self.method = method
self._frozen_credentials = None
def refresh_needed(self, refresh_in=None):
if self._frozen_credentials is None:
return True
return super(DeferredRefreshableCredentials, self).refresh_needed(
refresh_in
)
class CachedCredentialFetcher(object):
DEFAULT_EXPIRY_WINDOW_SECONDS = 60 * 15
def __init__(self, cache=None, expiry_window_seconds=None):
if cache is None:
cache = {}
self._cache = cache
self._cache_key = self._create_cache_key()
if expiry_window_seconds is None:
expiry_window_seconds = self.DEFAULT_EXPIRY_WINDOW_SECONDS
self._expiry_window_seconds = expiry_window_seconds
def _create_cache_key(self):
raise NotImplementedError('_create_cache_key()')
def _make_file_safe(self, filename):
# Replace :, path sep, and / to make it the string filename safe.
filename = filename.replace(':', '_').replace(os.path.sep, '_')
return filename.replace('/', '_')
def _get_credentials(self):
raise NotImplementedError('_get_credentials()')
def fetch_credentials(self):
return self._get_cached_credentials()
def _get_cached_credentials(self):
"""Get up-to-date credentials.
This will check the cache for up-to-date credentials, calling assume
role if none are available.
"""
response = self._load_from_cache()
if response is None:
response = self._get_credentials()
self._write_to_cache(response)
else:
logger.debug("Credentials for role retrieved from cache.")
creds = response['Credentials']
expiration = _serialize_if_needed(creds['Expiration'], iso=True)
return {
'access_key': creds['AccessKeyId'],
'secret_key': creds['SecretAccessKey'],
'token': creds['SessionToken'],
'expiry_time': expiration,
}
def _load_from_cache(self):
if self._cache_key in self._cache:
creds = deepcopy(self._cache[self._cache_key])
if not self._is_expired(creds):
return creds
else:
logger.debug(
"Credentials were found in cache, but they are expired."
)
return None
def _write_to_cache(self, response):
self._cache[self._cache_key] = deepcopy(response)
def _is_expired(self, credentials):
"""Check if credentials are expired."""
end_time = _parse_if_needed(credentials['Credentials']['Expiration'])
seconds = total_seconds(end_time - _local_now())
return seconds < self._expiry_window_seconds
class BaseAssumeRoleCredentialFetcher(CachedCredentialFetcher):
def __init__(self, client_creator, role_arn, extra_args=None,
cache=None, expiry_window_seconds=None):
self._client_creator = client_creator
self._role_arn = role_arn
if extra_args is None:
self._assume_kwargs = {}
else:
self._assume_kwargs = deepcopy(extra_args)
self._assume_kwargs['RoleArn'] = self._role_arn
self._role_session_name = self._assume_kwargs.get('RoleSessionName')
self._using_default_session_name = False
if not self._role_session_name:
self._generate_assume_role_name()
super(BaseAssumeRoleCredentialFetcher, self).__init__(
cache, expiry_window_seconds
)
def _generate_assume_role_name(self):
self._role_session_name = 'botocore-session-%s' % (int(time.time()))
self._assume_kwargs['RoleSessionName'] = self._role_session_name
self._using_default_session_name = True
def _create_cache_key(self):
"""Create a predictable cache key for the current configuration.
The cache key is intended to be compatible with file names.
"""
args = deepcopy(self._assume_kwargs)
# The role session name gets randomly generated, so we don't want it
# in the hash.
if self._using_default_session_name:
del args['RoleSessionName']
if 'Policy' in args:
# To have a predictable hash, the keys of the policy must be
# sorted, so we have to load it here to make sure it gets sorted
# later on.
args['Policy'] = json.loads(args['Policy'])
args = json.dumps(args, sort_keys=True)
argument_hash = sha1(args.encode('utf-8')).hexdigest()
return self._make_file_safe(argument_hash)
class AssumeRoleCredentialFetcher(BaseAssumeRoleCredentialFetcher):
def __init__(self, client_creator, source_credentials, role_arn,
extra_args=None, mfa_prompter=None, cache=None,
expiry_window_seconds=None):
"""
:type client_creator: callable
:param client_creator: A callable that creates a client taking
arguments like ``Session.create_client``.
:type source_credentials: Credentials
:param source_credentials: The credentials to use to create the
client for the call to AssumeRole.
:type role_arn: str
:param role_arn: The ARN of the role to be assumed.
:type extra_args: dict
:param extra_args: Any additional arguments to add to the assume
role request using the format of the botocore operation.
Possible keys include, but may not be limited to,
DurationSeconds, Policy, SerialNumber, ExternalId and
RoleSessionName.
:type mfa_prompter: callable
:param mfa_prompter: A callable that returns input provided by the
user (i.e raw_input, getpass.getpass, etc.).
:type cache: dict
:param cache: An object that supports ``__getitem__``,
``__setitem__``, and ``__contains__``. An example of this is
the ``JSONFileCache`` class in aws-cli.
:type expiry_window_seconds: int
:param expiry_window_seconds: The amount of time, in seconds,
"""
self._source_credentials = source_credentials
self._mfa_prompter = mfa_prompter
if self._mfa_prompter is None:
self._mfa_prompter = getpass.getpass
super(AssumeRoleCredentialFetcher, self).__init__(
client_creator, role_arn, extra_args=extra_args,
cache=cache, expiry_window_seconds=expiry_window_seconds
)
def _get_credentials(self):
"""Get credentials by calling assume role."""
kwargs = self._assume_role_kwargs()
client = self._create_client()
return client.assume_role(**kwargs)
def _assume_role_kwargs(self):
"""Get the arguments for assume role based on current configuration."""
assume_role_kwargs = deepcopy(self._assume_kwargs)
mfa_serial = assume_role_kwargs.get('SerialNumber')
if mfa_serial is not None:
prompt = 'Enter MFA code for %s: ' % mfa_serial
token_code = self._mfa_prompter(prompt)
assume_role_kwargs['TokenCode'] = token_code
duration_seconds = assume_role_kwargs.get('DurationSeconds')
if duration_seconds is not None:
assume_role_kwargs['DurationSeconds'] = duration_seconds
return assume_role_kwargs
def _create_client(self):
"""Create an STS client using the source credentials."""
frozen_credentials = self._source_credentials.get_frozen_credentials()
return self._client_creator(
'sts',
aws_access_key_id=frozen_credentials.access_key,
aws_secret_access_key=frozen_credentials.secret_key,
aws_session_token=frozen_credentials.token,
)
class AssumeRoleWithWebIdentityCredentialFetcher(
BaseAssumeRoleCredentialFetcher
):
def __init__(self, client_creator, web_identity_token_loader, role_arn,
extra_args=None, cache=None, expiry_window_seconds=None):
"""
:type client_creator: callable
:param client_creator: A callable that creates a client taking
arguments like ``Session.create_client``.
:type web_identity_token_loader: callable
:param web_identity_token_loader: A callable that takes no arguments
and returns a web identity token str.
:type role_arn: str
:param role_arn: The ARN of the role to be assumed.
:type extra_args: dict
:param extra_args: Any additional arguments to add to the assume
role request using the format of the botocore operation.
Possible keys include, but may not be limited to,
DurationSeconds, Policy, SerialNumber, ExternalId and
RoleSessionName.
:type cache: dict
:param cache: An object that supports ``__getitem__``,
``__setitem__``, and ``__contains__``. An example of this is
the ``JSONFileCache`` class in aws-cli.
:type expiry_window_seconds: int
:param expiry_window_seconds: The amount of time, in seconds,
"""
self._web_identity_token_loader = web_identity_token_loader
super(AssumeRoleWithWebIdentityCredentialFetcher, self).__init__(
client_creator, role_arn, extra_args=extra_args,
cache=cache, expiry_window_seconds=expiry_window_seconds
)
def _get_credentials(self):
"""Get credentials by calling assume role."""
kwargs = self._assume_role_kwargs()
# Assume role with web identity does not require credentials other than
# the token, explicitly configure the client to not sign requests.
config = Config(signature_version=UNSIGNED)
client = self._client_creator('sts', config=config)
return client.assume_role_with_web_identity(**kwargs)
def _assume_role_kwargs(self):
"""Get the arguments for assume role based on current configuration."""
assume_role_kwargs = deepcopy(self._assume_kwargs)
identity_token = self._web_identity_token_loader()
assume_role_kwargs['WebIdentityToken'] = identity_token
return assume_role_kwargs
class CredentialProvider(object):
# A short name to identify the provider within botocore.
METHOD = None
# A name to identify the provider for use in cross-sdk features like
# assume role's `credential_source` configuration option. These names
# are to be treated in a case-insensitive way. NOTE: any providers not
# implemented in botocore MUST prefix their canonical names with
# 'custom' or we DO NOT guarantee that it will work with any features
# that this provides.
CANONICAL_NAME = None
def __init__(self, session=None):
self.session = session
def load(self):
"""
Loads the credentials from their source & sets them on the object.
Subclasses should implement this method (by reading from disk, the
environment, the network or wherever), returning ``True`` if they were
found & loaded.
If not found, this method should return ``False``, indictating that the
``CredentialResolver`` should fall back to the next available method.
The default implementation does nothing, assuming the user has set the
``access_key/secret_key/token`` themselves.
:returns: Whether credentials were found & set
:rtype: Credentials
"""
return True
def _extract_creds_from_mapping(self, mapping, *key_names):
found = []
for key_name in key_names:
try:
found.append(mapping[key_name])
except KeyError:
raise PartialCredentialsError(provider=self.METHOD,
cred_var=key_name)
return found
class ProcessProvider(CredentialProvider):
METHOD = 'custom-process'
def __init__(self, profile_name, load_config, popen=subprocess.Popen):
self._profile_name = profile_name
self._load_config = load_config
self._loaded_config = None
self._popen = popen
def load(self):
credential_process = self._credential_process
if credential_process is None:
return
creds_dict = self._retrieve_credentials_using(credential_process)
if creds_dict.get('expiry_time') is not None:
return RefreshableCredentials.create_from_metadata(
creds_dict,
lambda: self._retrieve_credentials_using(credential_process),
self.METHOD
)
return Credentials(
access_key=creds_dict['access_key'],
secret_key=creds_dict['secret_key'],
token=creds_dict.get('token'),
method=self.METHOD
)
def _retrieve_credentials_using(self, credential_process):
# We're not using shell=True, so we need to pass the
# command and all arguments as a list.
process_list = compat_shell_split(credential_process)
p = self._popen(process_list,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
raise CredentialRetrievalError(
provider=self.METHOD, error_msg=stderr.decode('utf-8'))
parsed = botocore.compat.json.loads(stdout.decode('utf-8'))
version = parsed.get('Version', '<Version key not provided>')
if version != 1:
raise CredentialRetrievalError(
provider=self.METHOD,
error_msg=("Unsupported version '%s' for credential process "
"provider, supported versions: 1" % version))
try:
return {
'access_key': parsed['AccessKeyId'],
'secret_key': parsed['SecretAccessKey'],
'token': parsed.get('SessionToken'),
'expiry_time': parsed.get('Expiration'),
}
except KeyError as e:
raise CredentialRetrievalError(
provider=self.METHOD,
error_msg="Missing required key in response: %s" % e
)
@property
def _credential_process(self):
if self._loaded_config is None:
self._loaded_config = self._load_config()
profile_config = self._loaded_config.get(
'profiles', {}).get(self._profile_name, {})
return profile_config.get('credential_process')
class InstanceMetadataProvider(CredentialProvider):
METHOD = 'iam-role'
CANONICAL_NAME = 'Ec2InstanceMetadata'
def __init__(self, iam_role_fetcher):
self._role_fetcher = iam_role_fetcher
def load(self):
fetcher = self._role_fetcher
# We do the first request, to see if we get useful data back.
# If not, we'll pass & move on to whatever's next in the credential
# chain.
metadata = fetcher.retrieve_iam_role_credentials()
if not metadata:
return None
logger.debug('Found credentials from IAM Role: %s',
metadata['role_name'])
# We manually set the data here, since we already made the request &
# have it. When the expiry is hit, the credentials will auto-refresh
# themselves.
creds = RefreshableCredentials.create_from_metadata(
metadata,
method=self.METHOD,
refresh_using=fetcher.retrieve_iam_role_credentials,
)
return creds
class EnvProvider(CredentialProvider):
METHOD = 'env'
CANONICAL_NAME = 'Environment'
ACCESS_KEY = 'AWS_ACCESS_KEY_ID'
SECRET_KEY = 'AWS_SECRET_ACCESS_KEY'
# The token can come from either of these env var.
# AWS_SESSION_TOKEN is what other AWS SDKs have standardized on.
TOKENS = ['AWS_SECURITY_TOKEN', 'AWS_SESSION_TOKEN']
EXPIRY_TIME = 'AWS_CREDENTIAL_EXPIRATION'
def __init__(self, environ=None, mapping=None):
"""
:param environ: The environment variables (defaults to
``os.environ`` if no value is provided).
:param mapping: An optional mapping of variable names to
environment variable names. Use this if you want to
change the mapping of access_key->AWS_ACCESS_KEY_ID, etc.
The dict can have up to 3 keys: ``access_key``, ``secret_key``,
``session_token``.
"""
if environ is None:
environ = os.environ
self.environ = environ
self._mapping = self._build_mapping(mapping)
def _build_mapping(self, mapping):
# Mapping of variable name to env var name.
var_mapping = {}
if mapping is None:
# Use the class var default.
var_mapping['access_key'] = self.ACCESS_KEY
var_mapping['secret_key'] = self.SECRET_KEY
var_mapping['token'] = self.TOKENS
var_mapping['expiry_time'] = self.EXPIRY_TIME
else:
var_mapping['access_key'] = mapping.get(
'access_key', self.ACCESS_KEY)
var_mapping['secret_key'] = mapping.get(
'secret_key', self.SECRET_KEY)
var_mapping['token'] = mapping.get(
'token', self.TOKENS)
if not isinstance(var_mapping['token'], list):
var_mapping['token'] = [var_mapping['token']]
var_mapping['expiry_time'] = mapping.get(
'expiry_time', self.EXPIRY_TIME)
return var_mapping
def load(self):
"""
Search for credentials in explicit environment variables.
"""
access_key = self.environ.get(self._mapping['access_key'], '')
if access_key:
logger.info('Found credentials in environment variables.')
fetcher = self._create_credentials_fetcher()
credentials = fetcher(require_expiry=False)
expiry_time = credentials['expiry_time']
if expiry_time is not None:
expiry_time = parse(expiry_time)
return RefreshableCredentials(
credentials['access_key'], credentials['secret_key'],
credentials['token'], expiry_time,
refresh_using=fetcher, method=self.METHOD
)
return Credentials(
credentials['access_key'], credentials['secret_key'],
credentials['token'], method=self.METHOD
)
else:
return None
def _create_credentials_fetcher(self):
mapping = self._mapping
method = self.METHOD
environ = self.environ
def fetch_credentials(require_expiry=True):
credentials = {}
access_key = environ.get(mapping['access_key'], '')
if not access_key:
raise PartialCredentialsError(
provider=method, cred_var=mapping['access_key'])
credentials['access_key'] = access_key
secret_key = environ.get(mapping['secret_key'], '')
if not secret_key:
raise PartialCredentialsError(
provider=method, cred_var=mapping['secret_key'])
credentials['secret_key'] = secret_key
credentials['token'] = None
for token_env_var in mapping['token']:
token = environ.get(token_env_var, '')
if token:
credentials['token'] = token
break
credentials['expiry_time'] = None
expiry_time = environ.get(mapping['expiry_time'], '')
if expiry_time:
credentials['expiry_time'] = expiry_time
if require_expiry and not expiry_time:
raise PartialCredentialsError(
provider=method, cred_var=mapping['expiry_time'])
return credentials
return fetch_credentials
class OriginalEC2Provider(CredentialProvider):
METHOD = 'ec2-credentials-file'
CANONICAL_NAME = 'Ec2Config'
CRED_FILE_ENV = 'AWS_CREDENTIAL_FILE'
ACCESS_KEY = 'AWSAccessKeyId'
SECRET_KEY = 'AWSSecretKey'
def __init__(self, environ=None, parser=None):
if environ is None:
environ = os.environ
if parser is None:
parser = parse_key_val_file
self._environ = environ
self._parser = parser
def load(self):
"""
Search for a credential file used by original EC2 CLI tools.
"""
if 'AWS_CREDENTIAL_FILE' in self._environ:
full_path = os.path.expanduser(
self._environ['AWS_CREDENTIAL_FILE'])
creds = self._parser(full_path)
if self.ACCESS_KEY in creds:
logger.info('Found credentials in AWS_CREDENTIAL_FILE.')
access_key = creds[self.ACCESS_KEY]
secret_key = creds[self.SECRET_KEY]
# EC2 creds file doesn't support session tokens.
return Credentials(access_key, secret_key, method=self.METHOD)
else:
return None
class SharedCredentialProvider(CredentialProvider):
METHOD = 'shared-credentials-file'
CANONICAL_NAME = 'SharedCredentials'
ACCESS_KEY = 'aws_access_key_id'
SECRET_KEY = 'aws_secret_access_key'
# Same deal as the EnvProvider above. Botocore originally supported
# aws_security_token, but the SDKs are standardizing on aws_session_token
# so we support both.
TOKENS = ['aws_security_token', 'aws_session_token']
def __init__(self, creds_filename, profile_name=None, ini_parser=None):
self._creds_filename = creds_filename
if profile_name is None:
profile_name = 'default'
self._profile_name = profile_name
if ini_parser is None:
ini_parser = botocore.configloader.raw_config_parse
self._ini_parser = ini_parser
def load(self):
try:
available_creds = self._ini_parser(self._creds_filename)
except ConfigNotFound:
return None
if self._profile_name in available_creds:
config = available_creds[self._profile_name]
if self.ACCESS_KEY in config:
logger.info("Found credentials in shared credentials file: %s",
self._creds_filename)
access_key, secret_key = self._extract_creds_from_mapping(
config, self.ACCESS_KEY, self.SECRET_KEY)
token = self._get_session_token(config)
return Credentials(access_key, secret_key, token,
method=self.METHOD)
def _get_session_token(self, config):
for token_envvar in self.TOKENS:
if token_envvar in config:
return config[token_envvar]
class ConfigProvider(CredentialProvider):
"""INI based config provider with profile sections."""
METHOD = 'config-file'
CANONICAL_NAME = 'SharedConfig'
ACCESS_KEY = 'aws_access_key_id'
SECRET_KEY = 'aws_secret_access_key'
# Same deal as the EnvProvider above. Botocore originally supported
# aws_security_token, but the SDKs are standardizing on aws_session_token
# so we support both.
TOKENS = ['aws_security_token', 'aws_session_token']
def __init__(self, config_filename, profile_name, config_parser=None):
"""
:param config_filename: The session configuration scoped to the current
profile. This is available via ``session.config``.
:param profile_name: The name of the current profile.
:param config_parser: A config parser callable.
"""
self._config_filename = config_filename
self._profile_name = profile_name
if config_parser is None:
config_parser = botocore.configloader.load_config
self._config_parser = config_parser
def load(self):
"""
If there is are credentials in the configuration associated with
the session, use those.
"""
try:
full_config = self._config_parser(self._config_filename)
except ConfigNotFound:
return None
if self._profile_name in full_config['profiles']:
profile_config = full_config['profiles'][self._profile_name]
if self.ACCESS_KEY in profile_config:
logger.info("Credentials found in config file: %s",
self._config_filename)
access_key, secret_key = self._extract_creds_from_mapping(
profile_config, self.ACCESS_KEY, self.SECRET_KEY)
token = self._get_session_token(profile_config)
return Credentials(access_key, secret_key, token,
method=self.METHOD)
else:
return None
def _get_session_token(self, profile_config):
for token_name in self.TOKENS:
if token_name in profile_config:
return profile_config[token_name]
class BotoProvider(CredentialProvider):
METHOD = 'boto-config'
CANONICAL_NAME = 'Boto2Config'
BOTO_CONFIG_ENV = 'BOTO_CONFIG'
DEFAULT_CONFIG_FILENAMES = ['/etc/boto.cfg', '~/.boto']
ACCESS_KEY = 'aws_access_key_id'
SECRET_KEY = 'aws_secret_access_key'
def __init__(self, environ=None, ini_parser=None):
if environ is None:
environ = os.environ
if ini_parser is None:
ini_parser = botocore.configloader.raw_config_parse
self._environ = environ
self._ini_parser = ini_parser
def load(self):
"""
Look for credentials in boto config file.
"""
if self.BOTO_CONFIG_ENV in self._environ:
potential_locations = [self._environ[self.BOTO_CONFIG_ENV]]
else:
potential_locations = self.DEFAULT_CONFIG_FILENAMES
for filename in potential_locations:
try:
config = self._ini_parser(filename)
except ConfigNotFound:
# Move on to the next potential config file name.
continue
if 'Credentials' in config:
credentials = config['Credentials']
if self.ACCESS_KEY in credentials:
logger.info("Found credentials in boto config file: %s",
filename)
access_key, secret_key = self._extract_creds_from_mapping(
credentials, self.ACCESS_KEY, self.SECRET_KEY)
return Credentials(access_key, secret_key,
method=self.METHOD)
class AssumeRoleProvider(CredentialProvider):
METHOD = 'assume-role'
# The AssumeRole provider is logically part of the SharedConfig and
# SharedCredentials providers. Since the purpose of the canonical name
# is to provide cross-sdk compatibility, calling code will need to be
# aware that either of those providers should be tied to the AssumeRole
# provider as much as possible.
CANONICAL_NAME = None
ROLE_CONFIG_VAR = 'role_arn'
WEB_IDENTITY_TOKE_FILE_VAR = 'web_identity_token_file'
# Credentials are considered expired (and will be refreshed) once the total
# remaining time left until the credentials expires is less than the
# EXPIRY_WINDOW.
EXPIRY_WINDOW_SECONDS = 60 * 15
def __init__(self, load_config, client_creator, cache, profile_name,
prompter=getpass.getpass, credential_sourcer=None,
profile_provider_builder=None):
"""
:type load_config: callable
:param load_config: A function that accepts no arguments, and
when called, will return the full configuration dictionary
for the session (``session.full_config``).
:type client_creator: callable
:param client_creator: A factory function that will create
a client when called. Has the same interface as
``botocore.session.Session.create_client``.
:type cache: dict
:param cache: An object that supports ``__getitem__``,
``__setitem__``, and ``__contains__``. An example
of this is the ``JSONFileCache`` class in the CLI.
:type profile_name: str
:param profile_name: The name of the profile.
:type prompter: callable
:param prompter: A callable that returns input provided
by the user (i.e raw_input, getpass.getpass, etc.).
:type credential_sourcer: CanonicalNameCredentialSourcer
:param credential_sourcer: A credential provider that takes a
configuration, which is used to provide the source credentials
for the STS call.
"""
#: The cache used to first check for assumed credentials.
#: This is checked before making the AssumeRole API
#: calls and can be useful if you have short lived
#: scripts and you'd like to avoid calling AssumeRole
#: until the credentials are expired.
self.cache = cache
self._load_config = load_config
# client_creator is a callable that creates function.
# It's basically session.create_client
self._client_creator = client_creator
self._profile_name = profile_name
self._prompter = prompter
# The _loaded_config attribute will be populated from the
# load_config() function once the configuration is actually
# loaded. The reason we go through all this instead of just
# requiring that the loaded_config be passed to us is to that
# we can defer configuration loaded until we actually try
# to load credentials (as opposed to when the object is
# instantiated).
self._loaded_config = {}
self._credential_sourcer = credential_sourcer
self._profile_provider_builder = profile_provider_builder
self._visited_profiles = [self._profile_name]
def load(self):
self._loaded_config = self._load_config()
profiles = self._loaded_config.get('profiles', {})
profile = profiles.get(self._profile_name, {})
if self._has_assume_role_config_vars(profile):
return self._load_creds_via_assume_role(self._profile_name)
def _has_assume_role_config_vars(self, profile):
return (
self.ROLE_CONFIG_VAR in profile and
# We need to ensure this provider doesn't look at a profile when
# the profile has configuration for web identity. Simply relying on
# the order in the credential chain is insufficient as it doesn't
# prevent the case when we're doing an assume role chain.
self.WEB_IDENTITY_TOKE_FILE_VAR not in profile
)
def _load_creds_via_assume_role(self, profile_name):
role_config = self._get_role_config(profile_name)
source_credentials = self._resolve_source_credentials(
role_config, profile_name
)
extra_args = {}
role_session_name = role_config.get('role_session_name')
if role_session_name is not None:
extra_args['RoleSessionName'] = role_session_name
external_id = role_config.get('external_id')
if external_id is not None:
extra_args['ExternalId'] = external_id
mfa_serial = role_config.get('mfa_serial')
if mfa_serial is not None:
extra_args['SerialNumber'] = mfa_serial
duration_seconds = role_config.get('duration_seconds')
if duration_seconds is not None:
extra_args['DurationSeconds'] = duration_seconds
fetcher = AssumeRoleCredentialFetcher(
client_creator=self._client_creator,
source_credentials=source_credentials,
role_arn=role_config['role_arn'],
extra_args=extra_args,
mfa_prompter=self._prompter,
cache=self.cache,
)
refresher = fetcher.fetch_credentials
if mfa_serial is not None:
refresher = create_mfa_serial_refresher(refresher)
# The initial credentials are empty and the expiration time is set
# to now so that we can delay the call to assume role until it is
# strictly needed.
return DeferredRefreshableCredentials(
method=self.METHOD,
refresh_using=refresher,
time_fetcher=_local_now
)
def _get_role_config(self, profile_name):
"""Retrieves and validates the role configuration for the profile."""
profiles = self._loaded_config.get('profiles', {})
profile = profiles[profile_name]
source_profile = profile.get('source_profile')
role_arn = profile['role_arn']
credential_source = profile.get('credential_source')
mfa_serial = profile.get('mfa_serial')
external_id = profile.get('external_id')
role_session_name = profile.get('role_session_name')
duration_seconds = profile.get('duration_seconds')
role_config = {
'role_arn': role_arn,
'external_id': external_id,
'mfa_serial': mfa_serial,
'role_session_name': role_session_name,
'source_profile': source_profile,
'credential_source': credential_source
}
if duration_seconds is not None:
try:
role_config['duration_seconds'] = int(duration_seconds)
except ValueError:
pass
# Either the credential source or the source profile must be
# specified, but not both.
if credential_source is not None and source_profile is not None:
raise InvalidConfigError(
error_msg=(
'The profile "%s" contains both source_profile and '
'credential_source.' % profile_name
)
)
elif credential_source is None and source_profile is None:
raise PartialCredentialsError(
provider=self.METHOD,
cred_var='source_profile or credential_source'
)
elif credential_source is not None:
self._validate_credential_source(
profile_name, credential_source)
else:
self._validate_source_profile(profile_name, source_profile)
return role_config
def _validate_credential_source(self, parent_profile, credential_source):
if self._credential_sourcer is None:
raise InvalidConfigError(error_msg=(
'The credential_source "%s" is specified in profile "%s", '
'but no source provider was configured.' % (
credential_source, parent_profile)
))
if not self._credential_sourcer.is_supported(credential_source):
raise InvalidConfigError(error_msg=(
'The credential source "%s" referenced in profile "%s" is not '
'valid.' % (credential_source, parent_profile)
))
def _source_profile_has_credentials(self, profile):
return any([
self._has_static_credentials(profile),
self._has_assume_role_config_vars(profile),
])
def _validate_source_profile(self, parent_profile_name,
source_profile_name):
profiles = self._loaded_config.get('profiles', {})
if source_profile_name not in profiles:
raise InvalidConfigError(
error_msg=(
'The source_profile "%s" referenced in '
'the profile "%s" does not exist.' % (
source_profile_name, parent_profile_name)
)
)
source_profile = profiles[source_profile_name]
# Make sure we aren't going into an infinite loop. If we haven't
# visited the profile yet, we're good.
if source_profile_name not in self._visited_profiles:
return
# If we have visited the profile and the profile isn't simply
# referencing itself, that's an infinite loop.
if source_profile_name != parent_profile_name:
raise InfiniteLoopConfigError(
source_profile=source_profile_name,
visited_profiles=self._visited_profiles
)
# A profile is allowed to reference itself so that it can source
# static credentials and have configuration all in the same
# profile. This will only ever work for the top level assume
# role because the static credentials will otherwise take
# precedence.
if not self._has_static_credentials(source_profile):
raise InfiniteLoopConfigError(
source_profile=source_profile_name,
visited_profiles=self._visited_profiles
)
def _has_static_credentials(self, profile):
static_keys = ['aws_secret_access_key', 'aws_access_key_id']
return any(static_key in profile for static_key in static_keys)
def _resolve_source_credentials(self, role_config, profile_name):
credential_source = role_config.get('credential_source')
if credential_source is not None:
return self._resolve_credentials_from_source(
credential_source, profile_name
)
source_profile = role_config['source_profile']
self._visited_profiles.append(source_profile)
return self._resolve_credentials_from_profile(source_profile)
def _resolve_credentials_from_profile(self, profile_name):
profiles = self._loaded_config.get('profiles', {})
profile = profiles[profile_name]
if self._has_static_credentials(profile) and \
not self._profile_provider_builder:
# This is only here for backwards compatibility. If this provider
# isn't given a profile provider builder we still want to be able
# handle the basic static credential case as we would before the
# provile provider builder parameter was added.
return self._resolve_static_credentials_from_profile(profile)
elif self._has_static_credentials(profile) or \
not self._has_assume_role_config_vars(profile):
profile_providers = self._profile_provider_builder.providers(
profile_name=profile_name,
disable_env_vars=True,
)
profile_chain = CredentialResolver(profile_providers)
credentials = profile_chain.load_credentials()
if credentials is None:
error_message = (
'The source profile "%s" must have credentials.'
)
raise InvalidConfigError(
error_msg=error_message % profile_name,
)
return credentials
return self._load_creds_via_assume_role(profile_name)
def _resolve_static_credentials_from_profile(self, profile):
try:
return Credentials(
access_key=profile['aws_access_key_id'],
secret_key=profile['aws_secret_access_key'],
token=profile.get('aws_session_token')
)
except KeyError as e:
raise PartialCredentialsError(
provider=self.METHOD, cred_var=str(e))
def _resolve_credentials_from_source(self, credential_source,
profile_name):
credentials = self._credential_sourcer.source_credentials(
credential_source)
if credentials is None:
raise CredentialRetrievalError(
provider=credential_source,
error_msg=(
'No credentials found in credential_source referenced '
'in profile %s' % profile_name
)
)
return credentials
class AssumeRoleWithWebIdentityProvider(CredentialProvider):
METHOD = 'assume-role-with-web-identity'
CANONICAL_NAME = None
_CONFIG_TO_ENV_VAR = {
'web_identity_token_file': 'AWS_WEB_IDENTITY_TOKEN_FILE',
'role_session_name': 'AWS_ROLE_SESSION_NAME',
'role_arn': 'AWS_ROLE_ARN',
}
def __init__(
self,
load_config,
client_creator,
profile_name,
cache=None,
disable_env_vars=False,
token_loader_cls=None,
):
self.cache = cache
self._load_config = load_config
self._client_creator = client_creator
self._profile_name = profile_name
self._profile_config = None
self._disable_env_vars = disable_env_vars
if token_loader_cls is None:
token_loader_cls = FileWebIdentityTokenLoader
self._token_loader_cls = token_loader_cls
def load(self):
return self._assume_role_with_web_identity()
def _get_profile_config(self, key):
if self._profile_config is None:
loaded_config = self._load_config()
profiles = loaded_config.get('profiles', {})
self._profile_config = profiles.get(self._profile_name, {})
return self._profile_config.get(key)
def _get_env_config(self, key):
if self._disable_env_vars:
return None
env_key = self._CONFIG_TO_ENV_VAR.get(key)
if env_key and env_key in os.environ:
return os.environ[env_key]
return None
def _get_config(self, key):
env_value = self._get_env_config(key)
if env_value is not None:
return env_value
return self._get_profile_config(key)
def _assume_role_with_web_identity(self):
token_path = self._get_config('web_identity_token_file')
if not token_path:
return None
token_loader = self._token_loader_cls(token_path)
role_arn = self._get_config('role_arn')
if not role_arn:
error_msg = (
'The provided profile or the current environment is '
'configured to assume role with web identity but has no '
'role ARN configured. Ensure that the profile has the role_arn'
'configuration set or the AWS_ROLE_ARN env var is set.'
)
raise InvalidConfigError(error_msg=error_msg)
extra_args = {}
role_session_name = self._get_config('role_session_name')
if role_session_name is not None:
extra_args['RoleSessionName'] = role_session_name
fetcher = AssumeRoleWithWebIdentityCredentialFetcher(
client_creator=self._client_creator,
web_identity_token_loader=token_loader,
role_arn=role_arn,
extra_args=extra_args,
cache=self.cache,
)
# The initial credentials are empty and the expiration time is set
# to now so that we can delay the call to assume role until it is
# strictly needed.
return DeferredRefreshableCredentials(
method=self.METHOD,
refresh_using=fetcher.fetch_credentials,
)
class CanonicalNameCredentialSourcer(object):
def __init__(self, providers):
self._providers = providers
def is_supported(self, source_name):
"""Validates a given source name.
:type source_name: str
:param source_name: The value of credential_source in the config
file. This is the canonical name of the credential provider.
:rtype: bool
:returns: True if the credential provider is supported,
False otherwise.
"""
return source_name in [p.CANONICAL_NAME for p in self._providers]
def source_credentials(self, source_name):
"""Loads source credentials based on the provided configuration.
:type source_name: str
:param source_name: The value of credential_source in the config
file. This is the canonical name of the credential provider.
:rtype: Credentials
"""
source = self._get_provider(source_name)
if isinstance(source, CredentialResolver):
return source.load_credentials()
return source.load()
def _get_provider(self, canonical_name):
"""Return a credential provider by its canonical name.
:type canonical_name: str
:param canonical_name: The canonical name of the provider.
:raises UnknownCredentialError: Raised if no
credential provider by the provided name
is found.
"""
provider = self._get_provider_by_canonical_name(canonical_name)
# The AssumeRole provider should really be part of the SharedConfig
# provider rather than being its own thing, but it is not. It is
# effectively part of both the SharedConfig provider and the
# SharedCredentials provider now due to the way it behaves.
# Therefore if we want either of those providers we should return
# the AssumeRole provider with it.
if canonical_name.lower() in ['sharedconfig', 'sharedcredentials']:
assume_role_provider = self._get_provider_by_method('assume-role')
if assume_role_provider is not None:
# The SharedConfig or SharedCredentials provider may not be
# present if it was removed for some reason, but the
# AssumeRole provider could still be present. In that case,
# return the assume role provider by itself.
if provider is None:
return assume_role_provider
# If both are present, return them both as a
# CredentialResolver so that calling code can treat them as
# a single entity.
return CredentialResolver([assume_role_provider, provider])
if provider is None:
raise UnknownCredentialError(name=canonical_name)
return provider
def _get_provider_by_canonical_name(self, canonical_name):
"""Return a credential provider by its canonical name.
This function is strict, it does not attempt to address
compatibility issues.
"""
for provider in self._providers:
name = provider.CANONICAL_NAME
# Canonical names are case-insensitive
if name and name.lower() == canonical_name.lower():
return provider
def _get_provider_by_method(self, method):
"""Return a credential provider by its METHOD name."""
for provider in self._providers:
if provider.METHOD == method:
return provider
class ContainerProvider(CredentialProvider):
METHOD = 'container-role'
CANONICAL_NAME = 'EcsContainer'
ENV_VAR = 'AWS_CONTAINER_CREDENTIALS_RELATIVE_URI'
ENV_VAR_FULL = 'AWS_CONTAINER_CREDENTIALS_FULL_URI'
ENV_VAR_AUTH_TOKEN = 'AWS_CONTAINER_AUTHORIZATION_TOKEN'
def __init__(self, environ=None, fetcher=None):
if environ is None:
environ = os.environ
if fetcher is None:
fetcher = ContainerMetadataFetcher()
self._environ = environ
self._fetcher = fetcher
def load(self):
# This cred provider is only triggered if the self.ENV_VAR is set,
# which only happens if you opt into this feature.
if self.ENV_VAR in self._environ or self.ENV_VAR_FULL in self._environ:
return self._retrieve_or_fail()
def _retrieve_or_fail(self):
if self._provided_relative_uri():
full_uri = self._fetcher.full_url(self._environ[self.ENV_VAR])
else:
full_uri = self._environ[self.ENV_VAR_FULL]
headers = self._build_headers()
fetcher = self._create_fetcher(full_uri, headers)
creds = fetcher()
return RefreshableCredentials(
access_key=creds['access_key'],
secret_key=creds['secret_key'],
token=creds['token'],
method=self.METHOD,
expiry_time=_parse_if_needed(creds['expiry_time']),
refresh_using=fetcher,
)
def _build_headers(self):
headers = {}
auth_token = self._environ.get(self.ENV_VAR_AUTH_TOKEN)
if auth_token is not None:
return {
'Authorization': auth_token
}
def _create_fetcher(self, full_uri, headers):
def fetch_creds():
try:
response = self._fetcher.retrieve_full_uri(
full_uri, headers=headers)
except MetadataRetrievalError as e:
logger.debug("Error retrieving container metadata: %s", e,
exc_info=True)
raise CredentialRetrievalError(provider=self.METHOD,
error_msg=str(e))
return {
'access_key': response['AccessKeyId'],
'secret_key': response['SecretAccessKey'],
'token': response['Token'],
'expiry_time': response['Expiration'],
}
return fetch_creds
def _provided_relative_uri(self):
return self.ENV_VAR in self._environ
class CredentialResolver(object):
def __init__(self, providers):
"""
:param providers: A list of ``CredentialProvider`` instances.
"""
self.providers = providers
def insert_before(self, name, credential_provider):
"""
Inserts a new instance of ``CredentialProvider`` into the chain that
will be tried before an existing one.
:param name: The short name of the credentials you'd like to insert the
new credentials before. (ex. ``env`` or ``config``). Existing names
& ordering can be discovered via ``self.available_methods``.
:type name: string
:param cred_instance: An instance of the new ``Credentials`` object
you'd like to add to the chain.
:type cred_instance: A subclass of ``Credentials``
"""
try:
offset = [p.METHOD for p in self.providers].index(name)
except ValueError:
raise UnknownCredentialError(name=name)
self.providers.insert(offset, credential_provider)
def insert_after(self, name, credential_provider):
"""
Inserts a new type of ``Credentials`` instance into the chain that will
be tried after an existing one.
:param name: The short name of the credentials you'd like to insert the
new credentials after. (ex. ``env`` or ``config``). Existing names
& ordering can be discovered via ``self.available_methods``.
:type name: string
:param cred_instance: An instance of the new ``Credentials`` object
you'd like to add to the chain.
:type cred_instance: A subclass of ``Credentials``
"""
offset = self._get_provider_offset(name)
self.providers.insert(offset + 1, credential_provider)
def remove(self, name):
"""
Removes a given ``Credentials`` instance from the chain.
:param name: The short name of the credentials instance to remove.
:type name: string
"""
available_methods = [p.METHOD for p in self.providers]
if name not in available_methods:
# It's not present. Fail silently.
return
offset = available_methods.index(name)
self.providers.pop(offset)
def get_provider(self, name):
"""Return a credential provider by name.
:type name: str
:param name: The name of the provider.
:raises UnknownCredentialError: Raised if no
credential provider by the provided name
is found.
"""
return self.providers[self._get_provider_offset(name)]
def _get_provider_offset(self, name):
try:
return [p.METHOD for p in self.providers].index(name)
except ValueError:
raise UnknownCredentialError(name=name)
def load_credentials(self):
"""
Goes through the credentials chain, returning the first ``Credentials``
that could be loaded.
"""
# First provider to return a non-None response wins.
for provider in self.providers:
logger.debug("Looking for credentials via: %s", provider.METHOD)
creds = provider.load()
if creds is not None:
return creds
# If we got here, no credentials could be found.
# This feels like it should be an exception, but historically, ``None``
# is returned.
#
# +1
# -js
return None
class SSOCredentialFetcher(CachedCredentialFetcher):
_UTC_DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
def __init__(self, start_url, sso_region, role_name, account_id,
client_creator, token_loader=None, cache=None,
expiry_window_seconds=None):
self._client_creator = client_creator
self._sso_region = sso_region
self._role_name = role_name
self._account_id = account_id
self._start_url = start_url
self._token_loader = token_loader
super(SSOCredentialFetcher, self).__init__(
cache, expiry_window_seconds
)
def _create_cache_key(self):
"""Create a predictable cache key for the current configuration.
The cache key is intended to be compatible with file names.
"""
args = {
'startUrl': self._start_url,
'roleName': self._role_name,
'accountId': self._account_id,
}
# NOTE: It would be good to hoist this cache key construction logic
# into the CachedCredentialFetcher class as we should be consistent.
# Unfortunately, the current assume role fetchers that sub class don't
# pass separators resulting in non-minified JSON. In the long term,
# all fetchers should use the below caching scheme.
args = json.dumps(args, sort_keys=True, separators=(',', ':'))
argument_hash = sha1(args.encode('utf-8')).hexdigest()
return self._make_file_safe(argument_hash)
def _parse_timestamp(self, timestamp_ms):
# fromtimestamp expects seconds so: milliseconds / 1000 = seconds
timestamp_seconds = timestamp_ms / 1000.0
timestamp = datetime.datetime.fromtimestamp(timestamp_seconds, tzutc())
return timestamp.strftime(self._UTC_DATE_FORMAT)
def _get_credentials(self):
"""Get credentials by calling SSO get role credentials."""
config = Config(
signature_version=UNSIGNED,
region_name=self._sso_region,
)
client = self._client_creator('sso', config=config)
kwargs = {
'roleName': self._role_name,
'accountId': self._account_id,
'accessToken': self._token_loader(self._start_url),
}
try:
response = client.get_role_credentials(**kwargs)
except client.exceptions.UnauthorizedException:
raise UnauthorizedSSOTokenError()
credentials = response['roleCredentials']
credentials = {
'ProviderType': 'sso',
'Credentials': {
'AccessKeyId': credentials['accessKeyId'],
'SecretAccessKey': credentials['secretAccessKey'],
'SessionToken': credentials['sessionToken'],
'Expiration': self._parse_timestamp(credentials['expiration']),
}
}
return credentials
class SSOProvider(CredentialProvider):
METHOD = 'sso'
_SSO_TOKEN_CACHE_DIR = os.path.expanduser(
os.path.join('~', '.aws', 'sso', 'cache')
)
_SSO_CONFIG_VARS = [
'sso_start_url',
'sso_region',
'sso_role_name',
'sso_account_id',
]
def __init__(self, load_config, client_creator, profile_name,
cache=None, token_cache=None):
if token_cache is None:
token_cache = JSONFileCache(self._SSO_TOKEN_CACHE_DIR)
self._token_cache = token_cache
if cache is None:
cache = {}
self.cache = cache
self._load_config = load_config
self._client_creator = client_creator
self._profile_name = profile_name
def _load_sso_config(self):
loaded_config = self._load_config()
profiles = loaded_config.get('profiles', {})
profile_name = self._profile_name
profile_config = profiles.get(self._profile_name, {})
if all(c not in profile_config for c in self._SSO_CONFIG_VARS):
return None
config = {}
missing_config_vars = []
for config_var in self._SSO_CONFIG_VARS:
if config_var in profile_config:
config[config_var] = profile_config[config_var]
else:
missing_config_vars.append(config_var)
if missing_config_vars:
missing = ', '.join(missing_config_vars)
raise InvalidConfigError(
error_msg=(
'The profile "%s" is configured to use SSO but is missing '
'required configuration: %s' % (profile_name, missing)
)
)
return config
def load(self):
sso_config = self._load_sso_config()
if not sso_config:
return None
sso_fetcher = SSOCredentialFetcher(
sso_config['sso_start_url'],
sso_config['sso_region'],
sso_config['sso_role_name'],
sso_config['sso_account_id'],
self._client_creator,
token_loader=SSOTokenLoader(cache=self._token_cache),
cache=self.cache,
)
return DeferredRefreshableCredentials(
method=self.METHOD,
refresh_using=sso_fetcher.fetch_credentials,
)
| 81,776 |
Python
| 37.356942 | 79 | 0.610424 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/loaders.py
|
# Copyright 2012-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Module for loading various model files.
This module provides the classes that are used to load models used
by botocore. This can include:
* Service models (e.g. the model for EC2, S3, DynamoDB, etc.)
* Service model extras which customize the service models
* Other models associated with a service (pagination, waiters)
* Non service-specific config (Endpoint data, retry config)
Loading a module is broken down into several steps:
* Determining the path to load
* Search the data_path for files to load
* The mechanics of loading the file
* Searching for extras and applying them to the loaded file
The last item is used so that other faster loading mechanism
besides the default JSON loader can be used.
The Search Path
===============
Similar to how the PATH environment variable is to finding executables
and the PYTHONPATH environment variable is to finding python modules
to import, the botocore loaders have the concept of a data path exposed
through AWS_DATA_PATH.
This enables end users to provide additional search paths where we
will attempt to load models outside of the models we ship with
botocore. When you create a ``Loader``, there are two paths
automatically added to the model search path:
* <botocore root>/data/
* ~/.aws/models
The first value is the path where all the model files shipped with
botocore are located.
The second path is so that users can just drop new model files in
``~/.aws/models`` without having to mess around with the AWS_DATA_PATH.
The AWS_DATA_PATH using the platform specific path separator to
separate entries (typically ``:`` on linux and ``;`` on windows).
Directory Layout
================
The Loader expects a particular directory layout. In order for any
directory specified in AWS_DATA_PATH to be considered, it must have
this structure for service models::
<root>
|
|-- servicename1
| |-- 2012-10-25
| |-- service-2.json
|-- ec2
| |-- 2014-01-01
| | |-- paginators-1.json
| | |-- service-2.json
| | |-- waiters-2.json
| |-- 2015-03-01
| |-- paginators-1.json
| |-- service-2.json
| |-- waiters-2.json
| |-- service-2.sdk-extras.json
That is:
* The root directory contains sub directories that are the name
of the services.
* Within each service directory, there's a sub directory for each
available API version.
* Within each API version, there are model specific files, including
(but not limited to): service-2.json, waiters-2.json, paginators-1.json
The ``-1`` and ``-2`` suffix at the end of the model files denote which version
schema is used within the model. Even though this information is available in
the ``version`` key within the model, this version is also part of the filename
so that code does not need to load the JSON model in order to determine which
version to use.
The ``sdk-extras`` and similar files represent extra data that needs to be
applied to the model after it is loaded. Data in these files might represent
information that doesn't quite fit in the original models, but is still needed
for the sdk. For instance, additional operation parameters might be added here
which don't represent the actual service api.
"""
import os
import logging
from botocore import BOTOCORE_ROOT
from botocore.compat import json
from botocore.compat import OrderedDict
from botocore.exceptions import DataNotFoundError, UnknownServiceError
from botocore.utils import deep_merge
logger = logging.getLogger(__name__)
def instance_cache(func):
"""Cache the result of a method on a per instance basis.
This is not a general purpose caching decorator. In order
for this to be used, it must be used on methods on an
instance, and that instance *must* provide a
``self._cache`` dictionary.
"""
def _wrapper(self, *args, **kwargs):
key = (func.__name__,) + args
for pair in sorted(kwargs.items()):
key += pair
if key in self._cache:
return self._cache[key]
data = func(self, *args, **kwargs)
self._cache[key] = data
return data
return _wrapper
class JSONFileLoader(object):
"""Loader JSON files.
This class can load the default format of models, which is a JSON file.
"""
def exists(self, file_path):
"""Checks if the file exists.
:type file_path: str
:param file_path: The full path to the file to load without
the '.json' extension.
:return: True if file path exists, False otherwise.
"""
return os.path.isfile(file_path + '.json')
def load_file(self, file_path):
"""Attempt to load the file path.
:type file_path: str
:param file_path: The full path to the file to load without
the '.json' extension.
:return: The loaded data if it exists, otherwise None.
"""
full_path = file_path + '.json'
if not os.path.isfile(full_path):
return
# By default the file will be opened with locale encoding on Python 3.
# We specify "utf8" here to ensure the correct behavior.
with open(full_path, 'rb') as fp:
payload = fp.read().decode('utf-8')
logger.debug("Loading JSON file: %s", full_path)
return json.loads(payload, object_pairs_hook=OrderedDict)
def create_loader(search_path_string=None):
"""Create a Loader class.
This factory function creates a loader given a search string path.
:type search_string_path: str
:param search_string_path: The AWS_DATA_PATH value. A string
of data path values separated by the ``os.path.pathsep`` value,
which is typically ``:`` on POSIX platforms and ``;`` on
windows.
:return: A ``Loader`` instance.
"""
if search_path_string is None:
return Loader()
paths = []
extra_paths = search_path_string.split(os.pathsep)
for path in extra_paths:
path = os.path.expanduser(os.path.expandvars(path))
paths.append(path)
return Loader(extra_search_paths=paths)
class Loader(object):
"""Find and load data models.
This class will handle searching for and loading data models.
The main method used here is ``load_service_model``, which is a
convenience method over ``load_data`` and ``determine_latest_version``.
"""
FILE_LOADER_CLASS = JSONFileLoader
# The included models in botocore/data/ that we ship with botocore.
BUILTIN_DATA_PATH = os.path.join(BOTOCORE_ROOT, 'data')
# For convenience we automatically add ~/.aws/models to the data path.
CUSTOMER_DATA_PATH = os.path.join(os.path.expanduser('~'),
'.aws', 'models')
BUILTIN_EXTRAS_TYPES = ['sdk']
def __init__(self, extra_search_paths=None, file_loader=None,
cache=None, include_default_search_paths=True,
include_default_extras=True):
self._cache = {}
if file_loader is None:
file_loader = self.FILE_LOADER_CLASS()
self.file_loader = file_loader
if extra_search_paths is not None:
self._search_paths = extra_search_paths
else:
self._search_paths = []
if include_default_search_paths:
self._search_paths.extend([self.CUSTOMER_DATA_PATH,
self.BUILTIN_DATA_PATH])
self._extras_types = []
if include_default_extras:
self._extras_types.extend(self.BUILTIN_EXTRAS_TYPES)
self._extras_processor = ExtrasProcessor()
@property
def search_paths(self):
return self._search_paths
@property
def extras_types(self):
return self._extras_types
@instance_cache
def list_available_services(self, type_name):
"""List all known services.
This will traverse the search path and look for all known
services.
:type type_name: str
:param type_name: The type of the service (service-2,
paginators-1, waiters-2, etc). This is needed because
the list of available services depends on the service
type. For example, the latest API version available for
a resource-1.json file may not be the latest API version
available for a services-2.json file.
:return: A list of all services. The list of services will
be sorted.
"""
services = set()
for possible_path in self._potential_locations():
# Any directory in the search path is potentially a service.
# We'll collect any initial list of potential services,
# but we'll then need to further process these directories
# by searching for the corresponding type_name in each
# potential directory.
possible_services = [
d for d in os.listdir(possible_path)
if os.path.isdir(os.path.join(possible_path, d))]
for service_name in possible_services:
full_dirname = os.path.join(possible_path, service_name)
api_versions = os.listdir(full_dirname)
for api_version in api_versions:
full_load_path = os.path.join(full_dirname,
api_version,
type_name)
if self.file_loader.exists(full_load_path):
services.add(service_name)
break
return sorted(services)
@instance_cache
def determine_latest_version(self, service_name, type_name):
"""Find the latest API version available for a service.
:type service_name: str
:param service_name: The name of the service.
:type type_name: str
:param type_name: The type of the service (service-2,
paginators-1, waiters-2, etc). This is needed because
the latest API version available can depend on the service
type. For example, the latest API version available for
a resource-1.json file may not be the latest API version
available for a services-2.json file.
:rtype: str
:return: The latest API version. If the service does not exist
or does not have any available API data, then a
``DataNotFoundError`` exception will be raised.
"""
return max(self.list_api_versions(service_name, type_name))
@instance_cache
def list_api_versions(self, service_name, type_name):
"""List all API versions available for a particular service type
:type service_name: str
:param service_name: The name of the service
:type type_name: str
:param type_name: The type name for the service (i.e service-2,
paginators-1, etc.)
:rtype: list
:return: A list of API version strings in sorted order.
"""
known_api_versions = set()
for possible_path in self._potential_locations(service_name,
must_exist=True,
is_dir=True):
for dirname in os.listdir(possible_path):
full_path = os.path.join(possible_path, dirname, type_name)
# Only add to the known_api_versions if the directory
# contains a service-2, paginators-1, etc. file corresponding
# to the type_name passed in.
if self.file_loader.exists(full_path):
known_api_versions.add(dirname)
if not known_api_versions:
raise DataNotFoundError(data_path=service_name)
return sorted(known_api_versions)
@instance_cache
def load_service_model(self, service_name, type_name, api_version=None):
"""Load a botocore service model
This is the main method for loading botocore models (e.g. a service
model, pagination configs, waiter configs, etc.).
:type service_name: str
:param service_name: The name of the service (e.g ``ec2``, ``s3``).
:type type_name: str
:param type_name: The model type. Valid types include, but are not
limited to: ``service-2``, ``paginators-1``, ``waiters-2``.
:type api_version: str
:param api_version: The API version to load. If this is not
provided, then the latest API version will be used.
:type load_extras: bool
:param load_extras: Whether or not to load the tool extras which
contain additional data to be added to the model.
:raises: UnknownServiceError if there is no known service with
the provided service_name.
:raises: DataNotFoundError if no data could be found for the
service_name/type_name/api_version.
:return: The loaded data, as a python type (e.g. dict, list, etc).
"""
# Wrapper around the load_data. This will calculate the path
# to call load_data with.
known_services = self.list_available_services(type_name)
if service_name not in known_services:
raise UnknownServiceError(
service_name=service_name,
known_service_names=', '.join(sorted(known_services)))
if api_version is None:
api_version = self.determine_latest_version(
service_name, type_name)
full_path = os.path.join(service_name, api_version, type_name)
model = self.load_data(full_path)
# Load in all the extras
extras_data = self._find_extras(service_name, type_name, api_version)
self._extras_processor.process(model, extras_data)
return model
def _find_extras(self, service_name, type_name, api_version):
"""Creates an iterator over all the extras data."""
for extras_type in self.extras_types:
extras_name = '%s.%s-extras' % (type_name, extras_type)
full_path = os.path.join(service_name, api_version, extras_name)
try:
yield self.load_data(full_path)
except DataNotFoundError:
pass
@instance_cache
def load_data(self, name):
"""Load data given a data path.
This is a low level method that will search through the various
search paths until it's able to load a value. This is typically
only needed to load *non* model files (such as _endpoints and
_retry). If you need to load model files, you should prefer
``load_service_model``.
:type name: str
:param name: The data path, i.e ``ec2/2015-03-01/service-2``.
:return: The loaded data. If no data could be found then
a DataNotFoundError is raised.
"""
for possible_path in self._potential_locations(name):
found = self.file_loader.load_file(possible_path)
if found is not None:
return found
# We didn't find anything that matched on any path.
raise DataNotFoundError(data_path=name)
def _potential_locations(self, name=None, must_exist=False,
is_dir=False):
# Will give an iterator over the full path of potential locations
# according to the search path.
for path in self.search_paths:
if os.path.isdir(path):
full_path = path
if name is not None:
full_path = os.path.join(path, name)
if not must_exist:
yield full_path
else:
if is_dir and os.path.isdir(full_path):
yield full_path
elif os.path.exists(full_path):
yield full_path
class ExtrasProcessor(object):
"""Processes data from extras files into service models."""
def process(self, original_model, extra_models):
"""Processes data from a list of loaded extras files into a model
:type original_model: dict
:param original_model: The service model to load all the extras into.
:type extra_models: iterable of dict
:param extra_models: A list of loaded extras models.
"""
for extras in extra_models:
self._process(original_model, extras)
def _process(self, model, extra_model):
"""Process a single extras model into a service model."""
if 'merge' in extra_model:
deep_merge(model, extra_model['merge'])
| 17,355 |
Python
| 36.567099 | 79 | 0.623855 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/stub.py
|
# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import copy
from collections import deque
from pprint import pformat
from botocore.validate import validate_parameters
from botocore.exceptions import ParamValidationError, \
StubResponseError, StubAssertionError, UnStubbedResponseError
from botocore.awsrequest import AWSResponse
class _ANY(object):
"""
A helper object that compares equal to everything. Copied from
unittest.mock
"""
def __eq__(self, other):
return True
def __ne__(self, other):
return False
def __repr__(self):
return '<ANY>'
ANY = _ANY()
class Stubber(object):
"""
This class will allow you to stub out requests so you don't have to hit
an endpoint to write tests. Responses are returned first in, first out.
If operations are called out of order, or are called with no remaining
queued responses, an error will be raised.
**Example:**
::
import datetime
import botocore.session
from botocore.stub import Stubber
s3 = botocore.session.get_session().create_client('s3')
stubber = Stubber(s3)
response = {
'IsTruncated': False,
'Name': 'test-bucket',
'MaxKeys': 1000, 'Prefix': '',
'Contents': [{
'Key': 'test.txt',
'ETag': '"abc123"',
'StorageClass': 'STANDARD',
'LastModified': datetime.datetime(2016, 1, 20, 22, 9),
'Owner': {'ID': 'abc123', 'DisplayName': 'myname'},
'Size': 14814
}],
'EncodingType': 'url',
'ResponseMetadata': {
'RequestId': 'abc123',
'HTTPStatusCode': 200,
'HostId': 'abc123'
},
'Marker': ''
}
expected_params = {'Bucket': 'test-bucket'}
stubber.add_response('list_objects', response, expected_params)
stubber.activate()
service_response = s3.list_objects(Bucket='test-bucket')
assert service_response == response
This class can also be called as a context manager, which will handle
activation / deactivation for you.
**Example:**
::
import datetime
import botocore.session
from botocore.stub import Stubber
s3 = botocore.session.get_session().create_client('s3')
response = {
"Owner": {
"ID": "foo",
"DisplayName": "bar"
},
"Buckets": [{
"CreationDate": datetime.datetime(2016, 1, 20, 22, 9),
"Name": "baz"
}]
}
with Stubber(s3) as stubber:
stubber.add_response('list_buckets', response, {})
service_response = s3.list_buckets()
assert service_response == response
If you have an input parameter that is a randomly generated value, or you
otherwise don't care about its value, you can use ``stub.ANY`` to ignore
it in validation.
**Example:**
::
import datetime
import botocore.session
from botocore.stub import Stubber, ANY
s3 = botocore.session.get_session().create_client('s3')
stubber = Stubber(s3)
response = {
'IsTruncated': False,
'Name': 'test-bucket',
'MaxKeys': 1000, 'Prefix': '',
'Contents': [{
'Key': 'test.txt',
'ETag': '"abc123"',
'StorageClass': 'STANDARD',
'LastModified': datetime.datetime(2016, 1, 20, 22, 9),
'Owner': {'ID': 'abc123', 'DisplayName': 'myname'},
'Size': 14814
}],
'EncodingType': 'url',
'ResponseMetadata': {
'RequestId': 'abc123',
'HTTPStatusCode': 200,
'HostId': 'abc123'
},
'Marker': ''
}
expected_params = {'Bucket': ANY}
stubber.add_response('list_objects', response, expected_params)
with stubber:
service_response = s3.list_objects(Bucket='test-bucket')
assert service_response == response
"""
def __init__(self, client):
"""
:param client: The client to add your stubs to.
"""
self.client = client
self._event_id = 'boto_stubber'
self._expected_params_event_id = 'boto_stubber_expected_params'
self._queue = deque()
def __enter__(self):
self.activate()
return self
def __exit__(self, exception_type, exception_value, traceback):
self.deactivate()
def activate(self):
"""
Activates the stubber on the client
"""
self.client.meta.events.register_first(
'before-parameter-build.*.*',
self._assert_expected_params,
unique_id=self._expected_params_event_id)
self.client.meta.events.register(
'before-call.*.*',
self._get_response_handler,
unique_id=self._event_id)
def deactivate(self):
"""
Deactivates the stubber on the client
"""
self.client.meta.events.unregister(
'before-parameter-build.*.*',
self._assert_expected_params,
unique_id=self._expected_params_event_id)
self.client.meta.events.unregister(
'before-call.*.*',
self._get_response_handler,
unique_id=self._event_id)
def add_response(self, method, service_response, expected_params=None):
"""
Adds a service response to the response queue. This will be validated
against the service model to ensure correctness. It should be noted,
however, that while missing attributes are often considered correct,
your code may not function properly if you leave them out. Therefore
you should always fill in every value you see in a typical response for
your particular request.
:param method: The name of the client method to stub.
:type method: str
:param service_response: A dict response stub. Provided parameters will
be validated against the service model.
:type service_response: dict
:param expected_params: A dictionary of the expected parameters to
be called for the provided service response. The parameters match
the names of keyword arguments passed to that client call. If
any of the parameters differ a ``StubResponseError`` is thrown.
You can use stub.ANY to indicate a particular parameter to ignore
in validation. stub.ANY is only valid for top level params.
"""
self._add_response(method, service_response, expected_params)
def _add_response(self, method, service_response, expected_params):
if not hasattr(self.client, method):
raise ValueError(
"Client %s does not have method: %s"
% (self.client.meta.service_model.service_name, method))
# Create a successful http response
http_response = AWSResponse(None, 200, {}, None)
operation_name = self.client.meta.method_to_api_mapping.get(method)
self._validate_response(operation_name, service_response)
# Add the service_response to the queue for returning responses
response = {
'operation_name': operation_name,
'response': (http_response, service_response),
'expected_params': expected_params
}
self._queue.append(response)
def add_client_error(self, method, service_error_code='',
service_message='', http_status_code=400,
service_error_meta=None, expected_params=None,
response_meta=None):
"""
Adds a ``ClientError`` to the response queue.
:param method: The name of the service method to return the error on.
:type method: str
:param service_error_code: The service error code to return,
e.g. ``NoSuchBucket``
:type service_error_code: str
:param service_message: The service message to return, e.g.
'The specified bucket does not exist.'
:type service_message: str
:param http_status_code: The HTTP status code to return, e.g. 404, etc
:type http_status_code: int
:param service_error_meta: Additional keys to be added to the
service Error
:type service_error_meta: dict
:param expected_params: A dictionary of the expected parameters to
be called for the provided service response. The parameters match
the names of keyword arguments passed to that client call. If
any of the parameters differ a ``StubResponseError`` is thrown.
You can use stub.ANY to indicate a particular parameter to ignore
in validation.
:param response_meta: Additional keys to be added to the
response's ResponseMetadata
:type response_meta: dict
"""
http_response = AWSResponse(None, http_status_code, {}, None)
# We don't look to the model to build this because the caller would
# need to know the details of what the HTTP body would need to
# look like.
parsed_response = {
'ResponseMetadata': {'HTTPStatusCode': http_status_code},
'Error': {
'Message': service_message,
'Code': service_error_code
}
}
if service_error_meta is not None:
parsed_response['Error'].update(service_error_meta)
if response_meta is not None:
parsed_response['ResponseMetadata'].update(response_meta)
operation_name = self.client.meta.method_to_api_mapping.get(method)
# Note that we do not allow for expected_params while
# adding errors into the queue yet.
response = {
'operation_name': operation_name,
'response': (http_response, parsed_response),
'expected_params': expected_params,
}
self._queue.append(response)
def assert_no_pending_responses(self):
"""
Asserts that all expected calls were made.
"""
remaining = len(self._queue)
if remaining != 0:
raise AssertionError(
"%d responses remaining in queue." % remaining)
def _assert_expected_call_order(self, model, params):
if not self._queue:
raise UnStubbedResponseError(
operation_name=model.name,
reason=(
'Unexpected API Call: A call was made but no additional calls expected. '
'Either the API Call was not stubbed or it was called multiple times.'
)
)
name = self._queue[0]['operation_name']
if name != model.name:
raise StubResponseError(
operation_name=model.name,
reason='Operation mismatch: found response for %s.' % name)
def _get_response_handler(self, model, params, context, **kwargs):
self._assert_expected_call_order(model, params)
# Pop off the entire response once everything has been validated
return self._queue.popleft()['response']
def _assert_expected_params(self, model, params, context, **kwargs):
if self._should_not_stub(context):
return
self._assert_expected_call_order(model, params)
expected_params = self._queue[0]['expected_params']
if expected_params is None:
return
# Validate the parameters are equal
for param, value in expected_params.items():
if param not in params or expected_params[param] != params[param]:
raise StubAssertionError(
operation_name=model.name,
reason='Expected parameters:\n%s,\nbut received:\n%s' % (
pformat(expected_params), pformat(params)))
# Ensure there are no extra params hanging around
if sorted(expected_params.keys()) != sorted(params.keys()):
raise StubAssertionError(
operation_name=model.name,
reason='Expected parameters:\n%s,\nbut received:\n%s' % (
pformat(expected_params), pformat(params)))
def _should_not_stub(self, context):
# Do not include presign requests when processing stubbed client calls
# as a presign request will never have an HTTP request sent over the
# wire for it and therefore not receive a response back.
if context and context.get('is_presign_request'):
return True
def _validate_response(self, operation_name, service_response):
service_model = self.client.meta.service_model
operation_model = service_model.operation_model(operation_name)
output_shape = operation_model.output_shape
# Remove ResponseMetadata so that the validator doesn't attempt to
# perform validation on it.
response = service_response
if 'ResponseMetadata' in response:
response = copy.copy(service_response)
del response['ResponseMetadata']
if output_shape is not None:
validate_parameters(response, output_shape)
elif response:
# If the output shape is None, that means the response should be
# empty apart from ResponseMetadata
raise ParamValidationError(
report=(
"Service response should only contain ResponseMetadata."))
| 14,361 |
Python
| 35.359494 | 97 | 0.590906 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/paginate.py
|
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from itertools import tee
from botocore.compat import six
import jmespath
import json
import base64
import logging
from botocore.exceptions import PaginationError
from botocore.compat import zip
from botocore.utils import set_value_from_jmespath, merge_dicts
log = logging.getLogger(__name__)
class TokenEncoder(object):
"""Encodes dictionaries into opaque strings.
This for the most part json dumps + base64 encoding, but also supports
having bytes in the dictionary in addition to the types that json can
handle by default.
This is intended for use in encoding pagination tokens, which in some
cases can be complex structures and / or contain bytes.
"""
def encode(self, token):
"""Encodes a dictionary to an opaque string.
:type token: dict
:param token: A dictionary containing pagination information,
particularly the service pagination token(s) but also other boto
metadata.
:rtype: str
:returns: An opaque string
"""
try:
# Try just using json dumps first to avoid having to traverse
# and encode the dict. In 99.9999% of cases this will work.
json_string = json.dumps(token)
except (TypeError, UnicodeDecodeError):
# If normal dumping failed, go through and base64 encode all bytes.
encoded_token, encoded_keys = self._encode(token, [])
# Save the list of all the encoded key paths. We can safely
# assume that no service will ever use this key.
encoded_token['boto_encoded_keys'] = encoded_keys
# Now that the bytes are all encoded, dump the json.
json_string = json.dumps(encoded_token)
# base64 encode the json string to produce an opaque token string.
return base64.b64encode(json_string.encode('utf-8')).decode('utf-8')
def _encode(self, data, path):
"""Encode bytes in given data, keeping track of the path traversed."""
if isinstance(data, dict):
return self._encode_dict(data, path)
elif isinstance(data, list):
return self._encode_list(data, path)
elif isinstance(data, six.binary_type):
return self._encode_bytes(data, path)
else:
return data, []
def _encode_list(self, data, path):
"""Encode any bytes in a list, noting the index of what is encoded."""
new_data = []
encoded = []
for i, value in enumerate(data):
new_path = path + [i]
new_value, new_encoded = self._encode(value, new_path)
new_data.append(new_value)
encoded.extend(new_encoded)
return new_data, encoded
def _encode_dict(self, data, path):
"""Encode any bytes in a dict, noting the index of what is encoded."""
new_data = {}
encoded = []
for key, value in data.items():
new_path = path + [key]
new_value, new_encoded = self._encode(value, new_path)
new_data[key] = new_value
encoded.extend(new_encoded)
return new_data, encoded
def _encode_bytes(self, data, path):
"""Base64 encode a byte string."""
return base64.b64encode(data).decode('utf-8'), [path]
class TokenDecoder(object):
"""Decodes token strings back into dictionaries.
This performs the inverse operation to the TokenEncoder, accepting
opaque strings and decoding them into a useable form.
"""
def decode(self, token):
"""Decodes an opaque string to a dictionary.
:type token: str
:param token: A token string given by the botocore pagination
interface.
:rtype: dict
:returns: A dictionary containing pagination information,
particularly the service pagination token(s) but also other boto
metadata.
"""
json_string = base64.b64decode(token.encode('utf-8')).decode('utf-8')
decoded_token = json.loads(json_string)
# Remove the encoding metadata as it is read since it will no longer
# be needed.
encoded_keys = decoded_token.pop('boto_encoded_keys', None)
if encoded_keys is None:
return decoded_token
else:
return self._decode(decoded_token, encoded_keys)
def _decode(self, token, encoded_keys):
"""Find each encoded value and decode it."""
for key in encoded_keys:
encoded = self._path_get(token, key)
decoded = base64.b64decode(encoded.encode('utf-8'))
self._path_set(token, key, decoded)
return token
def _path_get(self, data, path):
"""Return the nested data at the given path.
For instance:
data = {'foo': ['bar', 'baz']}
path = ['foo', 0]
==> 'bar'
"""
# jmespath isn't used here because it would be difficult to actually
# create the jmespath query when taking all of the unknowns of key
# structure into account. Gross though this is, it is simple and not
# very error prone.
d = data
for step in path:
d = d[step]
return d
def _path_set(self, data, path, value):
"""Set the value of a key in the given data.
Example:
data = {'foo': ['bar', 'baz']}
path = ['foo', 1]
value = 'bin'
==> data = {'foo': ['bar', 'bin']}
"""
container = self._path_get(data, path[:-1])
container[path[-1]] = value
class PaginatorModel(object):
def __init__(self, paginator_config):
self._paginator_config = paginator_config['pagination']
def get_paginator(self, operation_name):
try:
single_paginator_config = self._paginator_config[operation_name]
except KeyError:
raise ValueError("Paginator for operation does not exist: %s"
% operation_name)
return single_paginator_config
class PageIterator(object):
def __init__(self, method, input_token, output_token, more_results,
result_keys, non_aggregate_keys, limit_key, max_items,
starting_token, page_size, op_kwargs):
self._method = method
self._input_token = input_token
self._output_token = output_token
self._more_results = more_results
self._result_keys = result_keys
self._max_items = max_items
self._limit_key = limit_key
self._starting_token = starting_token
self._page_size = page_size
self._op_kwargs = op_kwargs
self._resume_token = None
self._non_aggregate_key_exprs = non_aggregate_keys
self._non_aggregate_part = {}
self._token_encoder = TokenEncoder()
self._token_decoder = TokenDecoder()
@property
def result_keys(self):
return self._result_keys
@property
def resume_token(self):
"""Token to specify to resume pagination."""
return self._resume_token
@resume_token.setter
def resume_token(self, value):
if not isinstance(value, dict):
raise ValueError("Bad starting token: %s" % value)
if 'boto_truncate_amount' in value:
token_keys = sorted(self._input_token + ['boto_truncate_amount'])
else:
token_keys = sorted(self._input_token)
dict_keys = sorted(value.keys())
if token_keys == dict_keys:
self._resume_token = self._token_encoder.encode(value)
else:
raise ValueError("Bad starting token: %s" % value)
@property
def non_aggregate_part(self):
return self._non_aggregate_part
def __iter__(self):
current_kwargs = self._op_kwargs
previous_next_token = None
next_token = dict((key, None) for key in self._input_token)
if self._starting_token is not None:
# If the starting token exists, populate the next_token with the
# values inside it. This ensures that we have the service's
# pagination token on hand if we need to truncate after the
# first response.
next_token = self._parse_starting_token()[0]
# The number of items from result_key we've seen so far.
total_items = 0
first_request = True
primary_result_key = self.result_keys[0]
starting_truncation = 0
self._inject_starting_params(current_kwargs)
while True:
response = self._make_request(current_kwargs)
parsed = self._extract_parsed_response(response)
if first_request:
# The first request is handled differently. We could
# possibly have a resume/starting token that tells us where
# to index into the retrieved page.
if self._starting_token is not None:
starting_truncation = self._handle_first_request(
parsed, primary_result_key, starting_truncation)
first_request = False
self._record_non_aggregate_key_values(parsed)
else:
# If this isn't the first request, we have already sliced into
# the first request and had to make additional requests after.
# We no longer need to add this to truncation.
starting_truncation = 0
current_response = primary_result_key.search(parsed)
if current_response is None:
current_response = []
num_current_response = len(current_response)
truncate_amount = 0
if self._max_items is not None:
truncate_amount = (total_items + num_current_response) \
- self._max_items
if truncate_amount > 0:
self._truncate_response(parsed, primary_result_key,
truncate_amount, starting_truncation,
next_token)
yield response
break
else:
yield response
total_items += num_current_response
next_token = self._get_next_token(parsed)
if all(t is None for t in next_token.values()):
break
if self._max_items is not None and \
total_items == self._max_items:
# We're on a page boundary so we can set the current
# next token to be the resume token.
self.resume_token = next_token
break
if previous_next_token is not None and \
previous_next_token == next_token:
message = ("The same next token was received "
"twice: %s" % next_token)
raise PaginationError(message=message)
self._inject_token_into_kwargs(current_kwargs, next_token)
previous_next_token = next_token
def search(self, expression):
"""Applies a JMESPath expression to a paginator
Each page of results is searched using the provided JMESPath
expression. If the result is not a list, it is yielded
directly. If the result is a list, each element in the result
is yielded individually (essentially implementing a flatmap in
which the JMESPath search is the mapping function).
:type expression: str
:param expression: JMESPath expression to apply to each page.
:return: Returns an iterator that yields the individual
elements of applying a JMESPath expression to each page of
results.
"""
compiled = jmespath.compile(expression)
for page in self:
results = compiled.search(page)
if isinstance(results, list):
for element in results:
yield element
else:
# Yield result directly if it is not a list.
yield results
def _make_request(self, current_kwargs):
return self._method(**current_kwargs)
def _extract_parsed_response(self, response):
return response
def _record_non_aggregate_key_values(self, response):
non_aggregate_keys = {}
for expression in self._non_aggregate_key_exprs:
result = expression.search(response)
set_value_from_jmespath(non_aggregate_keys,
expression.expression,
result)
self._non_aggregate_part = non_aggregate_keys
def _inject_starting_params(self, op_kwargs):
# If the user has specified a starting token we need to
# inject that into the operation's kwargs.
if self._starting_token is not None:
# Don't need to do anything special if there is no starting
# token specified.
next_token = self._parse_starting_token()[0]
self._inject_token_into_kwargs(op_kwargs, next_token)
if self._page_size is not None:
# Pass the page size as the parameter name for limiting
# page size, also known as the limit_key.
op_kwargs[self._limit_key] = self._page_size
def _inject_token_into_kwargs(self, op_kwargs, next_token):
for name, token in next_token.items():
if (token is not None) and (token != 'None'):
op_kwargs[name] = token
elif name in op_kwargs:
del op_kwargs[name]
def _handle_first_request(self, parsed, primary_result_key,
starting_truncation):
# If the payload is an array or string, we need to slice into it
# and only return the truncated amount.
starting_truncation = self._parse_starting_token()[1]
all_data = primary_result_key.search(parsed)
if isinstance(all_data, (list, six.string_types)):
data = all_data[starting_truncation:]
else:
data = None
set_value_from_jmespath(
parsed,
primary_result_key.expression,
data
)
# We also need to truncate any secondary result keys
# because they were not truncated in the previous last
# response.
for token in self.result_keys:
if token == primary_result_key:
continue
sample = token.search(parsed)
if isinstance(sample, list):
empty_value = []
elif isinstance(sample, six.string_types):
empty_value = ''
elif isinstance(sample, (int, float)):
empty_value = 0
else:
empty_value = None
set_value_from_jmespath(parsed, token.expression, empty_value)
return starting_truncation
def _truncate_response(self, parsed, primary_result_key, truncate_amount,
starting_truncation, next_token):
original = primary_result_key.search(parsed)
if original is None:
original = []
amount_to_keep = len(original) - truncate_amount
truncated = original[:amount_to_keep]
set_value_from_jmespath(
parsed,
primary_result_key.expression,
truncated
)
# The issue here is that even though we know how much we've truncated
# we need to account for this globally including any starting
# left truncation. For example:
# Raw response: [0,1,2,3]
# Starting index: 1
# Max items: 1
# Starting left truncation: [1, 2, 3]
# End right truncation for max items: [1]
# However, even though we only kept 1, this is post
# left truncation so the next starting index should be 2, not 1
# (left_truncation + amount_to_keep).
next_token['boto_truncate_amount'] = \
amount_to_keep + starting_truncation
self.resume_token = next_token
def _get_next_token(self, parsed):
if self._more_results is not None:
if not self._more_results.search(parsed):
return {}
next_tokens = {}
for output_token, input_key in \
zip(self._output_token, self._input_token):
next_token = output_token.search(parsed)
# We do not want to include any empty strings as actual tokens.
# Treat them as None.
if next_token:
next_tokens[input_key] = next_token
else:
next_tokens[input_key] = None
return next_tokens
def result_key_iters(self):
teed_results = tee(self, len(self.result_keys))
return [ResultKeyIterator(i, result_key) for i, result_key
in zip(teed_results, self.result_keys)]
def build_full_result(self):
complete_result = {}
for response in self:
page = response
# We want to try to catch operation object pagination
# and format correctly for those. They come in the form
# of a tuple of two elements: (http_response, parsed_responsed).
# We want the parsed_response as that is what the page iterator
# uses. We can remove it though once operation objects are removed.
if isinstance(response, tuple) and len(response) == 2:
page = response[1]
# We're incrementally building the full response page
# by page. For each page in the response we need to
# inject the necessary components from the page
# into the complete_result.
for result_expression in self.result_keys:
# In order to incrementally update a result key
# we need to search the existing value from complete_result,
# then we need to search the _current_ page for the
# current result key value. Then we append the current
# value onto the existing value, and re-set that value
# as the new value.
result_value = result_expression.search(page)
if result_value is None:
continue
existing_value = result_expression.search(complete_result)
if existing_value is None:
# Set the initial result
set_value_from_jmespath(
complete_result, result_expression.expression,
result_value)
continue
# Now both result_value and existing_value contain something
if isinstance(result_value, list):
existing_value.extend(result_value)
elif isinstance(result_value, (int, float, six.string_types)):
# Modify the existing result with the sum or concatenation
set_value_from_jmespath(
complete_result, result_expression.expression,
existing_value + result_value)
merge_dicts(complete_result, self.non_aggregate_part)
if self.resume_token is not None:
complete_result['NextToken'] = self.resume_token
return complete_result
def _parse_starting_token(self):
if self._starting_token is None:
return None
# The starting token is a dict passed as a base64 encoded string.
next_token = self._starting_token
try:
next_token = self._token_decoder.decode(next_token)
index = 0
if 'boto_truncate_amount' in next_token:
index = next_token.get('boto_truncate_amount')
del next_token['boto_truncate_amount']
except (ValueError, TypeError):
next_token, index = self._parse_starting_token_deprecated()
return next_token, index
def _parse_starting_token_deprecated(self):
"""
This handles parsing of old style starting tokens, and attempts to
coerce them into the new style.
"""
log.debug("Attempting to fall back to old starting token parser. For "
"token: %s" % self._starting_token)
if self._starting_token is None:
return None
parts = self._starting_token.split('___')
next_token = []
index = 0
if len(parts) == len(self._input_token) + 1:
try:
index = int(parts.pop())
except ValueError:
# This doesn't look like a valid old-style token, so we're
# passing it along as an opaque service token.
parts = [self._starting_token]
for part in parts:
if part == 'None':
next_token.append(None)
else:
next_token.append(part)
return self._convert_deprecated_starting_token(next_token), index
def _convert_deprecated_starting_token(self, deprecated_token):
"""
This attempts to convert a deprecated starting token into the new
style.
"""
len_deprecated_token = len(deprecated_token)
len_input_token = len(self._input_token)
if len_deprecated_token > len_input_token:
raise ValueError("Bad starting token: %s" % self._starting_token)
elif len_deprecated_token < len_input_token:
log.debug("Old format starting token does not contain all input "
"tokens. Setting the rest, in order, as None.")
for i in range(len_input_token - len_deprecated_token):
deprecated_token.append(None)
return dict(zip(self._input_token, deprecated_token))
class Paginator(object):
PAGE_ITERATOR_CLS = PageIterator
def __init__(self, method, pagination_config, model):
self._model = model
self._method = method
self._pagination_cfg = pagination_config
self._output_token = self._get_output_tokens(self._pagination_cfg)
self._input_token = self._get_input_tokens(self._pagination_cfg)
self._more_results = self._get_more_results_token(self._pagination_cfg)
self._non_aggregate_keys = self._get_non_aggregate_keys(
self._pagination_cfg)
self._result_keys = self._get_result_keys(self._pagination_cfg)
self._limit_key = self._get_limit_key(self._pagination_cfg)
@property
def result_keys(self):
return self._result_keys
def _get_non_aggregate_keys(self, config):
keys = []
for key in config.get('non_aggregate_keys', []):
keys.append(jmespath.compile(key))
return keys
def _get_output_tokens(self, config):
output = []
output_token = config['output_token']
if not isinstance(output_token, list):
output_token = [output_token]
for config in output_token:
output.append(jmespath.compile(config))
return output
def _get_input_tokens(self, config):
input_token = self._pagination_cfg['input_token']
if not isinstance(input_token, list):
input_token = [input_token]
return input_token
def _get_more_results_token(self, config):
more_results = config.get('more_results')
if more_results is not None:
return jmespath.compile(more_results)
def _get_result_keys(self, config):
result_key = config.get('result_key')
if result_key is not None:
if not isinstance(result_key, list):
result_key = [result_key]
result_key = [jmespath.compile(rk) for rk in result_key]
return result_key
def _get_limit_key(self, config):
return config.get('limit_key')
def paginate(self, **kwargs):
"""Create paginator object for an operation.
This returns an iterable object. Iterating over
this object will yield a single page of a response
at a time.
"""
page_params = self._extract_paging_params(kwargs)
return self.PAGE_ITERATOR_CLS(
self._method, self._input_token,
self._output_token, self._more_results,
self._result_keys, self._non_aggregate_keys,
self._limit_key,
page_params['MaxItems'],
page_params['StartingToken'],
page_params['PageSize'],
kwargs)
def _extract_paging_params(self, kwargs):
pagination_config = kwargs.pop('PaginationConfig', {})
max_items = pagination_config.get('MaxItems', None)
if max_items is not None:
max_items = int(max_items)
page_size = pagination_config.get('PageSize', None)
if page_size is not None:
if self._limit_key is None:
raise PaginationError(
message="PageSize parameter is not supported for the "
"pagination interface for this operation.")
input_members = self._model.input_shape.members
limit_key_shape = input_members.get(self._limit_key)
if limit_key_shape.type_name == 'string':
if not isinstance(page_size, six.string_types):
page_size = str(page_size)
else:
page_size = int(page_size)
return {
'MaxItems': max_items,
'StartingToken': pagination_config.get('StartingToken', None),
'PageSize': page_size,
}
class ResultKeyIterator(object):
"""Iterates over the results of paginated responses.
Each iterator is associated with a single result key.
Iterating over this object will give you each element in
the result key list.
:param pages_iterator: An iterator that will give you
pages of results (a ``PageIterator`` class).
:param result_key: The JMESPath expression representing
the result key.
"""
def __init__(self, pages_iterator, result_key):
self._pages_iterator = pages_iterator
self.result_key = result_key
def __iter__(self):
for page in self._pages_iterator:
results = self.result_key.search(page)
if results is None:
results = []
for result in results:
yield result
| 27,128 |
Python
| 39.013274 | 79 | 0.585373 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.