file_path
stringlengths 32
153
| content
stringlengths 0
3.14M
|
---|---|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/jsonschema/tests/test_types.py | """
Tests on the new type interface. The actual correctness of the type checking
is handled in test_jsonschema_test_suite; these tests check that TypeChecker
functions correctly and can facilitate extensions to type checking
"""
from collections import namedtuple
from unittest import TestCase
from jsonschema import ValidationError, _validators
from jsonschema._types import TypeChecker
from jsonschema.exceptions import UndefinedTypeCheck
from jsonschema.validators import Draft4Validator, extend
def equals_2(checker, instance):
return instance == 2
def is_namedtuple(instance):
return isinstance(instance, tuple) and getattr(instance, "_fields", None)
def is_object_or_named_tuple(checker, instance):
if Draft4Validator.TYPE_CHECKER.is_type(instance, "object"):
return True
return is_namedtuple(instance)
def coerce_named_tuple(fn):
def coerced(validator, value, instance, schema):
if is_namedtuple(instance):
instance = instance._asdict()
return fn(validator, value, instance, schema)
return coerced
required = coerce_named_tuple(_validators.required)
properties = coerce_named_tuple(_validators.properties)
class TestTypeChecker(TestCase):
def test_is_type(self):
checker = TypeChecker({"two": equals_2})
self.assertEqual(
(
checker.is_type(instance=2, type="two"),
checker.is_type(instance="bar", type="two"),
),
(True, False),
)
def test_is_unknown_type(self):
with self.assertRaises(UndefinedTypeCheck) as context:
TypeChecker().is_type(4, "foobar")
self.assertIn("foobar", str(context.exception))
def test_checks_can_be_added_at_init(self):
checker = TypeChecker({"two": equals_2})
self.assertEqual(checker, TypeChecker().redefine("two", equals_2))
def test_redefine_existing_type(self):
self.assertEqual(
TypeChecker().redefine("two", object()).redefine("two", equals_2),
TypeChecker().redefine("two", equals_2),
)
def test_remove(self):
self.assertEqual(
TypeChecker({"two": equals_2}).remove("two"),
TypeChecker(),
)
def test_remove_unknown_type(self):
with self.assertRaises(UndefinedTypeCheck) as context:
TypeChecker().remove("foobar")
self.assertIn("foobar", str(context.exception))
def test_redefine_many(self):
self.assertEqual(
TypeChecker().redefine_many({"foo": int, "bar": str}),
TypeChecker().redefine("foo", int).redefine("bar", str),
)
def test_remove_multiple(self):
self.assertEqual(
TypeChecker({"foo": int, "bar": str}).remove("foo", "bar"),
TypeChecker(),
)
def test_type_check_can_raise_key_error(self):
"""
Make sure no one writes:
try:
self._type_checkers[type](...)
except KeyError:
ignoring the fact that the function itself can raise that.
"""
error = KeyError("Stuff")
def raises_keyerror(checker, instance):
raise error
with self.assertRaises(KeyError) as context:
TypeChecker({"foo": raises_keyerror}).is_type(4, "foo")
self.assertIs(context.exception, error)
class TestCustomTypes(TestCase):
def test_simple_type_can_be_extended(self):
def int_or_str_int(checker, instance):
if not isinstance(instance, (int, str)):
return False
try:
int(instance)
except ValueError:
return False
return True
CustomValidator = extend(
Draft4Validator,
type_checker=Draft4Validator.TYPE_CHECKER.redefine(
"integer", int_or_str_int,
),
)
validator = CustomValidator({"type": "integer"})
validator.validate(4)
validator.validate("4")
with self.assertRaises(ValidationError):
validator.validate(4.4)
def test_object_can_be_extended(self):
schema = {"type": "object"}
Point = namedtuple("Point", ["x", "y"])
type_checker = Draft4Validator.TYPE_CHECKER.redefine(
u"object", is_object_or_named_tuple,
)
CustomValidator = extend(Draft4Validator, type_checker=type_checker)
validator = CustomValidator(schema)
validator.validate(Point(x=4, y=5))
def test_object_extensions_require_custom_validators(self):
schema = {"type": "object", "required": ["x"]}
type_checker = Draft4Validator.TYPE_CHECKER.redefine(
u"object", is_object_or_named_tuple,
)
CustomValidator = extend(Draft4Validator, type_checker=type_checker)
validator = CustomValidator(schema)
Point = namedtuple("Point", ["x", "y"])
# Cannot handle required
with self.assertRaises(ValidationError):
validator.validate(Point(x=4, y=5))
def test_object_extensions_can_handle_custom_validators(self):
schema = {
"type": "object",
"required": ["x"],
"properties": {"x": {"type": "integer"}},
}
type_checker = Draft4Validator.TYPE_CHECKER.redefine(
u"object", is_object_or_named_tuple,
)
CustomValidator = extend(
Draft4Validator,
type_checker=type_checker,
validators={"required": required, "properties": properties},
)
validator = CustomValidator(schema)
Point = namedtuple("Point", ["x", "y"])
# Can now process required and properties
validator.validate(Point(x=4, y=5))
with self.assertRaises(ValidationError):
validator.validate(Point(x="not an integer", y=5))
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/jsonschema/tests/test_validators.py | from collections import deque
from contextlib import contextmanager
from decimal import Decimal
from io import BytesIO
from unittest import TestCase
import json
import os
import sys
import tempfile
import unittest
from twisted.trial.unittest import SynchronousTestCase
import attr
from jsonschema import FormatChecker, TypeChecker, exceptions, validators
from jsonschema.compat import PY3, pathname2url
from jsonschema.tests._helpers import bug
def startswith(validator, startswith, instance, schema):
if not instance.startswith(startswith):
yield exceptions.ValidationError(u"Whoops!")
class TestCreateAndExtend(SynchronousTestCase):
def setUp(self):
self.addCleanup(
self.assertEqual,
validators.meta_schemas,
dict(validators.meta_schemas),
)
self.meta_schema = {u"$id": "some://meta/schema"}
self.validators = {u"startswith": startswith}
self.type_checker = TypeChecker()
self.Validator = validators.create(
meta_schema=self.meta_schema,
validators=self.validators,
type_checker=self.type_checker,
)
def test_attrs(self):
self.assertEqual(
(
self.Validator.VALIDATORS,
self.Validator.META_SCHEMA,
self.Validator.TYPE_CHECKER,
), (
self.validators,
self.meta_schema,
self.type_checker,
),
)
def test_init(self):
schema = {u"startswith": u"foo"}
self.assertEqual(self.Validator(schema).schema, schema)
def test_iter_errors(self):
schema = {u"startswith": u"hel"}
iter_errors = self.Validator(schema).iter_errors
errors = list(iter_errors(u"hello"))
self.assertEqual(errors, [])
expected_error = exceptions.ValidationError(
u"Whoops!",
instance=u"goodbye",
schema=schema,
validator=u"startswith",
validator_value=u"hel",
schema_path=deque([u"startswith"]),
)
errors = list(iter_errors(u"goodbye"))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0]._contents(), expected_error._contents())
def test_if_a_version_is_provided_it_is_registered(self):
Validator = validators.create(
meta_schema={u"$id": "something"},
version="my version",
)
self.addCleanup(validators.meta_schemas.pop, "something")
self.assertEqual(Validator.__name__, "MyVersionValidator")
def test_if_a_version_is_not_provided_it_is_not_registered(self):
original = dict(validators.meta_schemas)
validators.create(meta_schema={u"id": "id"})
self.assertEqual(validators.meta_schemas, original)
def test_validates_registers_meta_schema_id(self):
meta_schema_key = "meta schema id"
my_meta_schema = {u"id": meta_schema_key}
validators.create(
meta_schema=my_meta_schema,
version="my version",
id_of=lambda s: s.get("id", ""),
)
self.addCleanup(validators.meta_schemas.pop, meta_schema_key)
self.assertIn(meta_schema_key, validators.meta_schemas)
def test_validates_registers_meta_schema_draft6_id(self):
meta_schema_key = "meta schema $id"
my_meta_schema = {u"$id": meta_schema_key}
validators.create(
meta_schema=my_meta_schema,
version="my version",
)
self.addCleanup(validators.meta_schemas.pop, meta_schema_key)
self.assertIn(meta_schema_key, validators.meta_schemas)
def test_create_default_types(self):
Validator = validators.create(meta_schema={}, validators=())
self.assertTrue(
all(
Validator({}).is_type(instance=instance, type=type)
for type, instance in [
(u"array", []),
(u"boolean", True),
(u"integer", 12),
(u"null", None),
(u"number", 12.0),
(u"object", {}),
(u"string", u"foo"),
]
),
)
def test_extend(self):
original = dict(self.Validator.VALIDATORS)
new = object()
Extended = validators.extend(
self.Validator,
validators={u"new": new},
)
self.assertEqual(
(
Extended.VALIDATORS,
Extended.META_SCHEMA,
Extended.TYPE_CHECKER,
self.Validator.VALIDATORS,
), (
dict(original, new=new),
self.Validator.META_SCHEMA,
self.Validator.TYPE_CHECKER,
original,
),
)
def test_extend_idof(self):
"""
Extending a validator preserves its notion of schema IDs.
"""
def id_of(schema):
return schema.get(u"__test__", self.Validator.ID_OF(schema))
correct_id = "the://correct/id/"
meta_schema = {
u"$id": "the://wrong/id/",
u"__test__": correct_id,
}
Original = validators.create(
meta_schema=meta_schema,
validators=self.validators,
type_checker=self.type_checker,
id_of=id_of,
)
self.assertEqual(Original.ID_OF(Original.META_SCHEMA), correct_id)
Derived = validators.extend(Original)
self.assertEqual(Derived.ID_OF(Derived.META_SCHEMA), correct_id)
class TestLegacyTypeChecking(SynchronousTestCase):
def test_create_default_types(self):
Validator = validators.create(meta_schema={}, validators=())
self.assertEqual(
set(Validator.DEFAULT_TYPES), {
u"array",
u"boolean",
u"integer",
u"null",
u"number",
u"object", u"string",
},
)
self.flushWarnings()
def test_extend(self):
Validator = validators.create(meta_schema={}, validators=())
original = dict(Validator.VALIDATORS)
new = object()
Extended = validators.extend(
Validator,
validators={u"new": new},
)
self.assertEqual(
(
Extended.VALIDATORS,
Extended.META_SCHEMA,
Extended.TYPE_CHECKER,
Validator.VALIDATORS,
Extended.DEFAULT_TYPES,
Extended({}).DEFAULT_TYPES,
self.flushWarnings()[0]["message"],
), (
dict(original, new=new),
Validator.META_SCHEMA,
Validator.TYPE_CHECKER,
original,
Validator.DEFAULT_TYPES,
Validator.DEFAULT_TYPES,
self.flushWarnings()[0]["message"],
),
)
def test_types_redefines_the_validators_type_checker(self):
schema = {"type": "string"}
self.assertFalse(validators.Draft7Validator(schema).is_valid(12))
validator = validators.Draft7Validator(
schema,
types={"string": (str, int)},
)
self.assertTrue(validator.is_valid(12))
self.flushWarnings()
def test_providing_default_types_warns(self):
self.assertWarns(
category=DeprecationWarning,
message=(
"The default_types argument is deprecated. "
"Use the type_checker argument instead."
),
# https://tm.tl/9363 :'(
filename=sys.modules[self.assertWarns.__module__].__file__,
f=validators.create,
meta_schema={},
validators={},
default_types={"foo": object},
)
def test_cannot_ask_for_default_types_with_non_default_type_checker(self):
"""
We raise an error when you ask a validator with non-default
type checker for its DEFAULT_TYPES.
The type checker argument is new, so no one but this library
itself should be trying to use it, and doing so while then
asking for DEFAULT_TYPES makes no sense (not to mention is
deprecated), since type checkers are not strictly about Python
type.
"""
Validator = validators.create(
meta_schema={},
validators={},
type_checker=TypeChecker(),
)
with self.assertRaises(validators._DontDoThat) as e:
Validator.DEFAULT_TYPES
self.assertIn(
"DEFAULT_TYPES cannot be used on Validators using TypeCheckers",
str(e.exception),
)
with self.assertRaises(validators._DontDoThat):
Validator({}).DEFAULT_TYPES
self.assertFalse(self.flushWarnings())
def test_providing_explicit_type_checker_does_not_warn(self):
Validator = validators.create(
meta_schema={},
validators={},
type_checker=TypeChecker(),
)
self.assertFalse(self.flushWarnings())
Validator({})
self.assertFalse(self.flushWarnings())
def test_providing_neither_does_not_warn(self):
Validator = validators.create(meta_schema={}, validators={})
self.assertFalse(self.flushWarnings())
Validator({})
self.assertFalse(self.flushWarnings())
def test_providing_default_types_with_type_checker_errors(self):
with self.assertRaises(TypeError) as e:
validators.create(
meta_schema={},
validators={},
default_types={"foo": object},
type_checker=TypeChecker(),
)
self.assertIn(
"Do not specify default_types when providing a type checker",
str(e.exception),
)
self.assertFalse(self.flushWarnings())
def test_extending_a_legacy_validator_with_a_type_checker_errors(self):
Validator = validators.create(
meta_schema={},
validators={},
default_types={u"array": list}
)
with self.assertRaises(TypeError) as e:
validators.extend(
Validator,
validators={},
type_checker=TypeChecker(),
)
self.assertIn(
(
"Cannot extend a validator created with default_types "
"with a type_checker. Update the validator to use a "
"type_checker when created."
),
str(e.exception),
)
self.flushWarnings()
def test_extending_a_legacy_validator_does_not_rewarn(self):
Validator = validators.create(meta_schema={}, default_types={})
self.assertTrue(self.flushWarnings())
validators.extend(Validator)
self.assertFalse(self.flushWarnings())
def test_accessing_default_types_warns(self):
Validator = validators.create(meta_schema={}, validators={})
self.assertFalse(self.flushWarnings())
self.assertWarns(
DeprecationWarning,
(
"The DEFAULT_TYPES attribute is deprecated. "
"See the type checker attached to this validator instead."
),
# https://tm.tl/9363 :'(
sys.modules[self.assertWarns.__module__].__file__,
getattr,
Validator,
"DEFAULT_TYPES",
)
def test_accessing_default_types_on_the_instance_warns(self):
Validator = validators.create(meta_schema={}, validators={})
self.assertFalse(self.flushWarnings())
self.assertWarns(
DeprecationWarning,
(
"The DEFAULT_TYPES attribute is deprecated. "
"See the type checker attached to this validator instead."
),
# https://tm.tl/9363 :'(
sys.modules[self.assertWarns.__module__].__file__,
getattr,
Validator({}),
"DEFAULT_TYPES",
)
def test_providing_types_to_init_warns(self):
Validator = validators.create(meta_schema={}, validators={})
self.assertFalse(self.flushWarnings())
self.assertWarns(
category=DeprecationWarning,
message=(
"The types argument is deprecated. "
"Provide a type_checker to jsonschema.validators.extend "
"instead."
),
# https://tm.tl/9363 :'(
filename=sys.modules[self.assertWarns.__module__].__file__,
f=Validator,
schema={},
types={"bar": object},
)
class TestIterErrors(TestCase):
def setUp(self):
self.validator = validators.Draft3Validator({})
def test_iter_errors(self):
instance = [1, 2]
schema = {
u"disallow": u"array",
u"enum": [["a", "b", "c"], ["d", "e", "f"]],
u"minItems": 3,
}
got = (e.message for e in self.validator.iter_errors(instance, schema))
expected = [
"%r is disallowed for [1, 2]" % (schema["disallow"],),
"[1, 2] is too short",
"[1, 2] is not one of %r" % (schema["enum"],),
]
self.assertEqual(sorted(got), sorted(expected))
def test_iter_errors_multiple_failures_one_validator(self):
instance = {"foo": 2, "bar": [1], "baz": 15, "quux": "spam"}
schema = {
u"properties": {
"foo": {u"type": "string"},
"bar": {u"minItems": 2},
"baz": {u"maximum": 10, u"enum": [2, 4, 6, 8]},
},
}
errors = list(self.validator.iter_errors(instance, schema))
self.assertEqual(len(errors), 4)
class TestValidationErrorMessages(TestCase):
def message_for(self, instance, schema, *args, **kwargs):
kwargs.setdefault("cls", validators.Draft3Validator)
with self.assertRaises(exceptions.ValidationError) as e:
validators.validate(instance, schema, *args, **kwargs)
return e.exception.message
def test_single_type_failure(self):
message = self.message_for(instance=1, schema={u"type": u"string"})
self.assertEqual(message, "1 is not of type %r" % u"string")
def test_single_type_list_failure(self):
message = self.message_for(instance=1, schema={u"type": [u"string"]})
self.assertEqual(message, "1 is not of type %r" % u"string")
def test_multiple_type_failure(self):
types = u"string", u"object"
message = self.message_for(instance=1, schema={u"type": list(types)})
self.assertEqual(message, "1 is not of type %r, %r" % types)
def test_object_without_title_type_failure(self):
type = {u"type": [{u"minimum": 3}]}
message = self.message_for(instance=1, schema={u"type": [type]})
self.assertEqual(message, "1 is less than the minimum of 3")
def test_object_with_named_type_failure(self):
schema = {u"type": [{u"name": "Foo", u"minimum": 3}]}
message = self.message_for(instance=1, schema=schema)
self.assertEqual(message, "1 is less than the minimum of 3")
def test_minimum(self):
message = self.message_for(instance=1, schema={"minimum": 2})
self.assertEqual(message, "1 is less than the minimum of 2")
def test_maximum(self):
message = self.message_for(instance=1, schema={"maximum": 0})
self.assertEqual(message, "1 is greater than the maximum of 0")
def test_dependencies_single_element(self):
depend, on = "bar", "foo"
schema = {u"dependencies": {depend: on}}
message = self.message_for(
instance={"bar": 2},
schema=schema,
cls=validators.Draft3Validator,
)
self.assertEqual(message, "%r is a dependency of %r" % (on, depend))
def test_dependencies_list_draft3(self):
depend, on = "bar", "foo"
schema = {u"dependencies": {depend: [on]}}
message = self.message_for(
instance={"bar": 2},
schema=schema,
cls=validators.Draft3Validator,
)
self.assertEqual(message, "%r is a dependency of %r" % (on, depend))
def test_dependencies_list_draft7(self):
depend, on = "bar", "foo"
schema = {u"dependencies": {depend: [on]}}
message = self.message_for(
instance={"bar": 2},
schema=schema,
cls=validators.Draft7Validator,
)
self.assertEqual(message, "%r is a dependency of %r" % (on, depend))
def test_additionalItems_single_failure(self):
message = self.message_for(
instance=[2],
schema={u"items": [], u"additionalItems": False},
)
self.assertIn("(2 was unexpected)", message)
def test_additionalItems_multiple_failures(self):
message = self.message_for(
instance=[1, 2, 3],
schema={u"items": [], u"additionalItems": False}
)
self.assertIn("(1, 2, 3 were unexpected)", message)
def test_additionalProperties_single_failure(self):
additional = "foo"
schema = {u"additionalProperties": False}
message = self.message_for(instance={additional: 2}, schema=schema)
self.assertIn("(%r was unexpected)" % (additional,), message)
def test_additionalProperties_multiple_failures(self):
schema = {u"additionalProperties": False}
message = self.message_for(
instance=dict.fromkeys(["foo", "bar"]),
schema=schema,
)
self.assertIn(repr("foo"), message)
self.assertIn(repr("bar"), message)
self.assertIn("were unexpected)", message)
def test_const(self):
schema = {u"const": 12}
message = self.message_for(
instance={"foo": "bar"},
schema=schema,
cls=validators.Draft6Validator,
)
self.assertIn("12 was expected", message)
def test_contains(self):
schema = {u"contains": {u"const": 12}}
message = self.message_for(
instance=[2, {}, []],
schema=schema,
cls=validators.Draft6Validator,
)
self.assertIn(
"None of [2, {}, []] are valid under the given schema",
message,
)
def test_invalid_format_default_message(self):
checker = FormatChecker(formats=())
checker.checks(u"thing")(lambda value: False)
schema = {u"format": u"thing"}
message = self.message_for(
instance="bla",
schema=schema,
format_checker=checker,
)
self.assertIn(repr("bla"), message)
self.assertIn(repr("thing"), message)
self.assertIn("is not a", message)
def test_additionalProperties_false_patternProperties(self):
schema = {u"type": u"object",
u"additionalProperties": False,
u"patternProperties": {
u"^abc$": {u"type": u"string"},
u"^def$": {u"type": u"string"},
}}
message = self.message_for(
instance={u"zebra": 123},
schema=schema,
cls=validators.Draft4Validator,
)
self.assertEqual(
message,
"{} does not match any of the regexes: {}, {}".format(
repr(u"zebra"), repr(u"^abc$"), repr(u"^def$"),
),
)
message = self.message_for(
instance={u"zebra": 123, u"fish": 456},
schema=schema,
cls=validators.Draft4Validator,
)
self.assertEqual(
message,
"{}, {} do not match any of the regexes: {}, {}".format(
repr(u"fish"), repr(u"zebra"), repr(u"^abc$"), repr(u"^def$")
),
)
def test_False_schema(self):
message = self.message_for(
instance="something",
schema=False,
cls=validators.Draft7Validator,
)
self.assertIn("False schema does not allow 'something'", message)
class TestValidationErrorDetails(TestCase):
# TODO: These really need unit tests for each individual validator, rather
# than just these higher level tests.
def test_anyOf(self):
instance = 5
schema = {
"anyOf": [
{"minimum": 20},
{"type": "string"},
],
}
validator = validators.Draft4Validator(schema)
errors = list(validator.iter_errors(instance))
self.assertEqual(len(errors), 1)
e = errors[0]
self.assertEqual(e.validator, "anyOf")
self.assertEqual(e.validator_value, schema["anyOf"])
self.assertEqual(e.instance, instance)
self.assertEqual(e.schema, schema)
self.assertIsNone(e.parent)
self.assertEqual(e.path, deque([]))
self.assertEqual(e.relative_path, deque([]))
self.assertEqual(e.absolute_path, deque([]))
self.assertEqual(e.schema_path, deque(["anyOf"]))
self.assertEqual(e.relative_schema_path, deque(["anyOf"]))
self.assertEqual(e.absolute_schema_path, deque(["anyOf"]))
self.assertEqual(len(e.context), 2)
e1, e2 = sorted_errors(e.context)
self.assertEqual(e1.validator, "minimum")
self.assertEqual(e1.validator_value, schema["anyOf"][0]["minimum"])
self.assertEqual(e1.instance, instance)
self.assertEqual(e1.schema, schema["anyOf"][0])
self.assertIs(e1.parent, e)
self.assertEqual(e1.path, deque([]))
self.assertEqual(e1.absolute_path, deque([]))
self.assertEqual(e1.relative_path, deque([]))
self.assertEqual(e1.schema_path, deque([0, "minimum"]))
self.assertEqual(e1.relative_schema_path, deque([0, "minimum"]))
self.assertEqual(
e1.absolute_schema_path, deque(["anyOf", 0, "minimum"]),
)
self.assertFalse(e1.context)
self.assertEqual(e2.validator, "type")
self.assertEqual(e2.validator_value, schema["anyOf"][1]["type"])
self.assertEqual(e2.instance, instance)
self.assertEqual(e2.schema, schema["anyOf"][1])
self.assertIs(e2.parent, e)
self.assertEqual(e2.path, deque([]))
self.assertEqual(e2.relative_path, deque([]))
self.assertEqual(e2.absolute_path, deque([]))
self.assertEqual(e2.schema_path, deque([1, "type"]))
self.assertEqual(e2.relative_schema_path, deque([1, "type"]))
self.assertEqual(e2.absolute_schema_path, deque(["anyOf", 1, "type"]))
self.assertEqual(len(e2.context), 0)
def test_type(self):
instance = {"foo": 1}
schema = {
"type": [
{"type": "integer"},
{
"type": "object",
"properties": {"foo": {"enum": [2]}},
},
],
}
validator = validators.Draft3Validator(schema)
errors = list(validator.iter_errors(instance))
self.assertEqual(len(errors), 1)
e = errors[0]
self.assertEqual(e.validator, "type")
self.assertEqual(e.validator_value, schema["type"])
self.assertEqual(e.instance, instance)
self.assertEqual(e.schema, schema)
self.assertIsNone(e.parent)
self.assertEqual(e.path, deque([]))
self.assertEqual(e.relative_path, deque([]))
self.assertEqual(e.absolute_path, deque([]))
self.assertEqual(e.schema_path, deque(["type"]))
self.assertEqual(e.relative_schema_path, deque(["type"]))
self.assertEqual(e.absolute_schema_path, deque(["type"]))
self.assertEqual(len(e.context), 2)
e1, e2 = sorted_errors(e.context)
self.assertEqual(e1.validator, "type")
self.assertEqual(e1.validator_value, schema["type"][0]["type"])
self.assertEqual(e1.instance, instance)
self.assertEqual(e1.schema, schema["type"][0])
self.assertIs(e1.parent, e)
self.assertEqual(e1.path, deque([]))
self.assertEqual(e1.relative_path, deque([]))
self.assertEqual(e1.absolute_path, deque([]))
self.assertEqual(e1.schema_path, deque([0, "type"]))
self.assertEqual(e1.relative_schema_path, deque([0, "type"]))
self.assertEqual(e1.absolute_schema_path, deque(["type", 0, "type"]))
self.assertFalse(e1.context)
self.assertEqual(e2.validator, "enum")
self.assertEqual(e2.validator_value, [2])
self.assertEqual(e2.instance, 1)
self.assertEqual(e2.schema, {u"enum": [2]})
self.assertIs(e2.parent, e)
self.assertEqual(e2.path, deque(["foo"]))
self.assertEqual(e2.relative_path, deque(["foo"]))
self.assertEqual(e2.absolute_path, deque(["foo"]))
self.assertEqual(
e2.schema_path, deque([1, "properties", "foo", "enum"]),
)
self.assertEqual(
e2.relative_schema_path, deque([1, "properties", "foo", "enum"]),
)
self.assertEqual(
e2.absolute_schema_path,
deque(["type", 1, "properties", "foo", "enum"]),
)
self.assertFalse(e2.context)
def test_single_nesting(self):
instance = {"foo": 2, "bar": [1], "baz": 15, "quux": "spam"}
schema = {
"properties": {
"foo": {"type": "string"},
"bar": {"minItems": 2},
"baz": {"maximum": 10, "enum": [2, 4, 6, 8]},
},
}
validator = validators.Draft3Validator(schema)
errors = validator.iter_errors(instance)
e1, e2, e3, e4 = sorted_errors(errors)
self.assertEqual(e1.path, deque(["bar"]))
self.assertEqual(e2.path, deque(["baz"]))
self.assertEqual(e3.path, deque(["baz"]))
self.assertEqual(e4.path, deque(["foo"]))
self.assertEqual(e1.relative_path, deque(["bar"]))
self.assertEqual(e2.relative_path, deque(["baz"]))
self.assertEqual(e3.relative_path, deque(["baz"]))
self.assertEqual(e4.relative_path, deque(["foo"]))
self.assertEqual(e1.absolute_path, deque(["bar"]))
self.assertEqual(e2.absolute_path, deque(["baz"]))
self.assertEqual(e3.absolute_path, deque(["baz"]))
self.assertEqual(e4.absolute_path, deque(["foo"]))
self.assertEqual(e1.validator, "minItems")
self.assertEqual(e2.validator, "enum")
self.assertEqual(e3.validator, "maximum")
self.assertEqual(e4.validator, "type")
def test_multiple_nesting(self):
instance = [1, {"foo": 2, "bar": {"baz": [1]}}, "quux"]
schema = {
"type": "string",
"items": {
"type": ["string", "object"],
"properties": {
"foo": {"enum": [1, 3]},
"bar": {
"type": "array",
"properties": {
"bar": {"required": True},
"baz": {"minItems": 2},
},
},
},
},
}
validator = validators.Draft3Validator(schema)
errors = validator.iter_errors(instance)
e1, e2, e3, e4, e5, e6 = sorted_errors(errors)
self.assertEqual(e1.path, deque([]))
self.assertEqual(e2.path, deque([0]))
self.assertEqual(e3.path, deque([1, "bar"]))
self.assertEqual(e4.path, deque([1, "bar", "bar"]))
self.assertEqual(e5.path, deque([1, "bar", "baz"]))
self.assertEqual(e6.path, deque([1, "foo"]))
self.assertEqual(e1.schema_path, deque(["type"]))
self.assertEqual(e2.schema_path, deque(["items", "type"]))
self.assertEqual(
list(e3.schema_path), ["items", "properties", "bar", "type"],
)
self.assertEqual(
list(e4.schema_path),
["items", "properties", "bar", "properties", "bar", "required"],
)
self.assertEqual(
list(e5.schema_path),
["items", "properties", "bar", "properties", "baz", "minItems"]
)
self.assertEqual(
list(e6.schema_path), ["items", "properties", "foo", "enum"],
)
self.assertEqual(e1.validator, "type")
self.assertEqual(e2.validator, "type")
self.assertEqual(e3.validator, "type")
self.assertEqual(e4.validator, "required")
self.assertEqual(e5.validator, "minItems")
self.assertEqual(e6.validator, "enum")
def test_recursive(self):
schema = {
"definitions": {
"node": {
"anyOf": [{
"type": "object",
"required": ["name", "children"],
"properties": {
"name": {
"type": "string",
},
"children": {
"type": "object",
"patternProperties": {
"^.*$": {
"$ref": "#/definitions/node",
},
},
},
},
}],
},
},
"type": "object",
"required": ["root"],
"properties": {"root": {"$ref": "#/definitions/node"}},
}
instance = {
"root": {
"name": "root",
"children": {
"a": {
"name": "a",
"children": {
"ab": {
"name": "ab",
# missing "children"
},
},
},
},
},
}
validator = validators.Draft4Validator(schema)
e, = validator.iter_errors(instance)
self.assertEqual(e.absolute_path, deque(["root"]))
self.assertEqual(
e.absolute_schema_path, deque(["properties", "root", "anyOf"]),
)
e1, = e.context
self.assertEqual(e1.absolute_path, deque(["root", "children", "a"]))
self.assertEqual(
e1.absolute_schema_path, deque(
[
"properties",
"root",
"anyOf",
0,
"properties",
"children",
"patternProperties",
"^.*$",
"anyOf",
],
),
)
e2, = e1.context
self.assertEqual(
e2.absolute_path, deque(
["root", "children", "a", "children", "ab"],
),
)
self.assertEqual(
e2.absolute_schema_path, deque(
[
"properties",
"root",
"anyOf",
0,
"properties",
"children",
"patternProperties",
"^.*$",
"anyOf",
0,
"properties",
"children",
"patternProperties",
"^.*$",
"anyOf",
],
),
)
def test_additionalProperties(self):
instance = {"bar": "bar", "foo": 2}
schema = {"additionalProperties": {"type": "integer", "minimum": 5}}
validator = validators.Draft3Validator(schema)
errors = validator.iter_errors(instance)
e1, e2 = sorted_errors(errors)
self.assertEqual(e1.path, deque(["bar"]))
self.assertEqual(e2.path, deque(["foo"]))
self.assertEqual(e1.validator, "type")
self.assertEqual(e2.validator, "minimum")
def test_patternProperties(self):
instance = {"bar": 1, "foo": 2}
schema = {
"patternProperties": {
"bar": {"type": "string"},
"foo": {"minimum": 5},
},
}
validator = validators.Draft3Validator(schema)
errors = validator.iter_errors(instance)
e1, e2 = sorted_errors(errors)
self.assertEqual(e1.path, deque(["bar"]))
self.assertEqual(e2.path, deque(["foo"]))
self.assertEqual(e1.validator, "type")
self.assertEqual(e2.validator, "minimum")
def test_additionalItems(self):
instance = ["foo", 1]
schema = {
"items": [],
"additionalItems": {"type": "integer", "minimum": 5},
}
validator = validators.Draft3Validator(schema)
errors = validator.iter_errors(instance)
e1, e2 = sorted_errors(errors)
self.assertEqual(e1.path, deque([0]))
self.assertEqual(e2.path, deque([1]))
self.assertEqual(e1.validator, "type")
self.assertEqual(e2.validator, "minimum")
def test_additionalItems_with_items(self):
instance = ["foo", "bar", 1]
schema = {
"items": [{}],
"additionalItems": {"type": "integer", "minimum": 5},
}
validator = validators.Draft3Validator(schema)
errors = validator.iter_errors(instance)
e1, e2 = sorted_errors(errors)
self.assertEqual(e1.path, deque([1]))
self.assertEqual(e2.path, deque([2]))
self.assertEqual(e1.validator, "type")
self.assertEqual(e2.validator, "minimum")
def test_propertyNames(self):
instance = {"foo": 12}
schema = {"propertyNames": {"not": {"const": "foo"}}}
validator = validators.Draft7Validator(schema)
error, = validator.iter_errors(instance)
self.assertEqual(error.validator, "not")
self.assertEqual(
error.message,
"%r is not allowed for %r" % ({"const": "foo"}, "foo"),
)
self.assertEqual(error.path, deque([]))
self.assertEqual(error.schema_path, deque(["propertyNames", "not"]))
def test_if_then(self):
schema = {
"if": {"const": 12},
"then": {"const": 13},
}
validator = validators.Draft7Validator(schema)
error, = validator.iter_errors(12)
self.assertEqual(error.validator, "const")
self.assertEqual(error.message, "13 was expected")
self.assertEqual(error.path, deque([]))
self.assertEqual(error.schema_path, deque(["if", "then", "const"]))
def test_if_else(self):
schema = {
"if": {"const": 12},
"else": {"const": 13},
}
validator = validators.Draft7Validator(schema)
error, = validator.iter_errors(15)
self.assertEqual(error.validator, "const")
self.assertEqual(error.message, "13 was expected")
self.assertEqual(error.path, deque([]))
self.assertEqual(error.schema_path, deque(["if", "else", "const"]))
def test_boolean_schema_False(self):
validator = validators.Draft7Validator(False)
error, = validator.iter_errors(12)
self.assertEqual(
(
error.message,
error.validator,
error.validator_value,
error.instance,
error.schema,
error.schema_path,
),
(
"False schema does not allow 12",
None,
None,
12,
False,
deque([]),
),
)
def test_ref(self):
ref, schema = "someRef", {"additionalProperties": {"type": "integer"}}
validator = validators.Draft7Validator(
{"$ref": ref},
resolver=validators.RefResolver("", {}, store={ref: schema}),
)
error, = validator.iter_errors({"foo": "notAnInteger"})
self.assertEqual(
(
error.message,
error.validator,
error.validator_value,
error.instance,
error.absolute_path,
error.schema,
error.schema_path,
),
(
"'notAnInteger' is not of type 'integer'",
"type",
"integer",
"notAnInteger",
deque(["foo"]),
{"type": "integer"},
deque(["additionalProperties", "type"]),
),
)
class MetaSchemaTestsMixin(object):
# TODO: These all belong upstream
def test_invalid_properties(self):
with self.assertRaises(exceptions.SchemaError):
self.Validator.check_schema({"properties": {"test": object()}})
def test_minItems_invalid_string(self):
with self.assertRaises(exceptions.SchemaError):
# needs to be an integer
self.Validator.check_schema({"minItems": "1"})
def test_enum_allows_empty_arrays(self):
"""
Technically, all the spec says is they SHOULD have elements, not MUST.
See https://github.com/Julian/jsonschema/issues/529.
"""
self.Validator.check_schema({"enum": []})
def test_enum_allows_non_unique_items(self):
"""
Technically, all the spec says is they SHOULD be unique, not MUST.
See https://github.com/Julian/jsonschema/issues/529.
"""
self.Validator.check_schema({"enum": [12, 12]})
class ValidatorTestMixin(MetaSchemaTestsMixin, object):
def test_valid_instances_are_valid(self):
schema, instance = self.valid
self.assertTrue(self.Validator(schema).is_valid(instance))
def test_invalid_instances_are_not_valid(self):
schema, instance = self.invalid
self.assertFalse(self.Validator(schema).is_valid(instance))
def test_non_existent_properties_are_ignored(self):
self.Validator({object(): object()}).validate(instance=object())
def test_it_creates_a_ref_resolver_if_not_provided(self):
self.assertIsInstance(
self.Validator({}).resolver,
validators.RefResolver,
)
def test_it_delegates_to_a_ref_resolver(self):
ref, schema = "someCoolRef", {"type": "integer"}
resolver = validators.RefResolver("", {}, store={ref: schema})
validator = self.Validator({"$ref": ref}, resolver=resolver)
with self.assertRaises(exceptions.ValidationError):
validator.validate(None)
def test_it_delegates_to_a_legacy_ref_resolver(self):
"""
Legacy RefResolvers support only the context manager form of
resolution.
"""
class LegacyRefResolver(object):
@contextmanager
def resolving(this, ref):
self.assertEqual(ref, "the ref")
yield {"type": "integer"}
resolver = LegacyRefResolver()
schema = {"$ref": "the ref"}
with self.assertRaises(exceptions.ValidationError):
self.Validator(schema, resolver=resolver).validate(None)
def test_is_type_is_true_for_valid_type(self):
self.assertTrue(self.Validator({}).is_type("foo", "string"))
def test_is_type_is_false_for_invalid_type(self):
self.assertFalse(self.Validator({}).is_type("foo", "array"))
def test_is_type_evades_bool_inheriting_from_int(self):
self.assertFalse(self.Validator({}).is_type(True, "integer"))
self.assertFalse(self.Validator({}).is_type(True, "number"))
@unittest.skipIf(PY3, "In Python 3 json.load always produces unicode")
def test_string_a_bytestring_is_a_string(self):
self.Validator({"type": "string"}).validate(b"foo")
def test_patterns_can_be_native_strings(self):
"""
See https://github.com/Julian/jsonschema/issues/611.
"""
self.Validator({"pattern": "foo"}).validate("foo")
def test_it_can_validate_with_decimals(self):
schema = {"items": {"type": "number"}}
Validator = validators.extend(
self.Validator,
type_checker=self.Validator.TYPE_CHECKER.redefine(
"number",
lambda checker, thing: isinstance(
thing, (int, float, Decimal),
) and not isinstance(thing, bool),
)
)
validator = Validator(schema)
validator.validate([1, 1.1, Decimal(1) / Decimal(8)])
invalid = ["foo", {}, [], True, None]
self.assertEqual(
[error.instance for error in validator.iter_errors(invalid)],
invalid,
)
def test_it_returns_true_for_formats_it_does_not_know_about(self):
validator = self.Validator(
{"format": "carrot"}, format_checker=FormatChecker(),
)
validator.validate("bugs")
def test_it_does_not_validate_formats_by_default(self):
validator = self.Validator({})
self.assertIsNone(validator.format_checker)
def test_it_validates_formats_if_a_checker_is_provided(self):
checker = FormatChecker()
bad = ValueError("Bad!")
@checker.checks("foo", raises=ValueError)
def check(value):
if value == "good":
return True
elif value == "bad":
raise bad
else: # pragma: no cover
self.fail("What is {}? [Baby Don't Hurt Me]".format(value))
validator = self.Validator(
{"format": "foo"}, format_checker=checker,
)
validator.validate("good")
with self.assertRaises(exceptions.ValidationError) as cm:
validator.validate("bad")
# Make sure original cause is attached
self.assertIs(cm.exception.cause, bad)
def test_non_string_custom_type(self):
non_string_type = object()
schema = {"type": [non_string_type]}
Crazy = validators.extend(
self.Validator,
type_checker=self.Validator.TYPE_CHECKER.redefine(
non_string_type,
lambda checker, thing: isinstance(thing, int),
)
)
Crazy(schema).validate(15)
def test_it_properly_formats_tuples_in_errors(self):
"""
A tuple instance properly formats validation errors for uniqueItems.
See https://github.com/Julian/jsonschema/pull/224
"""
TupleValidator = validators.extend(
self.Validator,
type_checker=self.Validator.TYPE_CHECKER.redefine(
"array",
lambda checker, thing: isinstance(thing, tuple),
)
)
with self.assertRaises(exceptions.ValidationError) as e:
TupleValidator({"uniqueItems": True}).validate((1, 1))
self.assertIn("(1, 1) has non-unique elements", str(e.exception))
class AntiDraft6LeakMixin(object):
"""
Make sure functionality from draft 6 doesn't leak backwards in time.
"""
def test_True_is_not_a_schema(self):
with self.assertRaises(exceptions.SchemaError) as e:
self.Validator.check_schema(True)
self.assertIn("True is not of type", str(e.exception))
def test_False_is_not_a_schema(self):
with self.assertRaises(exceptions.SchemaError) as e:
self.Validator.check_schema(False)
self.assertIn("False is not of type", str(e.exception))
@unittest.skip(bug(523))
def test_True_is_not_a_schema_even_if_you_forget_to_check(self):
resolver = validators.RefResolver("", {})
with self.assertRaises(Exception) as e:
self.Validator(True, resolver=resolver).validate(12)
self.assertNotIsInstance(e.exception, exceptions.ValidationError)
@unittest.skip(bug(523))
def test_False_is_not_a_schema_even_if_you_forget_to_check(self):
resolver = validators.RefResolver("", {})
with self.assertRaises(Exception) as e:
self.Validator(False, resolver=resolver).validate(12)
self.assertNotIsInstance(e.exception, exceptions.ValidationError)
class TestDraft3Validator(AntiDraft6LeakMixin, ValidatorTestMixin, TestCase):
Validator = validators.Draft3Validator
valid = {}, {}
invalid = {"type": "integer"}, "foo"
def test_any_type_is_valid_for_type_any(self):
validator = self.Validator({"type": "any"})
validator.validate(object())
def test_any_type_is_redefinable(self):
"""
Sigh, because why not.
"""
Crazy = validators.extend(
self.Validator,
type_checker=self.Validator.TYPE_CHECKER.redefine(
"any", lambda checker, thing: isinstance(thing, int),
)
)
validator = Crazy({"type": "any"})
validator.validate(12)
with self.assertRaises(exceptions.ValidationError):
validator.validate("foo")
def test_is_type_is_true_for_any_type(self):
self.assertTrue(self.Validator({}).is_valid(object(), {"type": "any"}))
def test_is_type_does_not_evade_bool_if_it_is_being_tested(self):
self.assertTrue(self.Validator({}).is_type(True, "boolean"))
self.assertTrue(self.Validator({}).is_valid(True, {"type": "any"}))
class TestDraft4Validator(AntiDraft6LeakMixin, ValidatorTestMixin, TestCase):
Validator = validators.Draft4Validator
valid = {}, {}
invalid = {"type": "integer"}, "foo"
class TestDraft6Validator(ValidatorTestMixin, TestCase):
Validator = validators.Draft6Validator
valid = {}, {}
invalid = {"type": "integer"}, "foo"
class TestDraft7Validator(ValidatorTestMixin, TestCase):
Validator = validators.Draft7Validator
valid = {}, {}
invalid = {"type": "integer"}, "foo"
class TestValidatorFor(SynchronousTestCase):
def test_draft_3(self):
schema = {"$schema": "http://json-schema.org/draft-03/schema"}
self.assertIs(
validators.validator_for(schema),
validators.Draft3Validator,
)
schema = {"$schema": "http://json-schema.org/draft-03/schema#"}
self.assertIs(
validators.validator_for(schema),
validators.Draft3Validator,
)
def test_draft_4(self):
schema = {"$schema": "http://json-schema.org/draft-04/schema"}
self.assertIs(
validators.validator_for(schema),
validators.Draft4Validator,
)
schema = {"$schema": "http://json-schema.org/draft-04/schema#"}
self.assertIs(
validators.validator_for(schema),
validators.Draft4Validator,
)
def test_draft_6(self):
schema = {"$schema": "http://json-schema.org/draft-06/schema"}
self.assertIs(
validators.validator_for(schema),
validators.Draft6Validator,
)
schema = {"$schema": "http://json-schema.org/draft-06/schema#"}
self.assertIs(
validators.validator_for(schema),
validators.Draft6Validator,
)
def test_draft_7(self):
schema = {"$schema": "http://json-schema.org/draft-07/schema"}
self.assertIs(
validators.validator_for(schema),
validators.Draft7Validator,
)
schema = {"$schema": "http://json-schema.org/draft-07/schema#"}
self.assertIs(
validators.validator_for(schema),
validators.Draft7Validator,
)
def test_True(self):
self.assertIs(
validators.validator_for(True),
validators._LATEST_VERSION,
)
def test_False(self):
self.assertIs(
validators.validator_for(False),
validators._LATEST_VERSION,
)
def test_custom_validator(self):
Validator = validators.create(
meta_schema={"id": "meta schema id"},
version="12",
id_of=lambda s: s.get("id", ""),
)
schema = {"$schema": "meta schema id"}
self.assertIs(
validators.validator_for(schema),
Validator,
)
def test_custom_validator_draft6(self):
Validator = validators.create(
meta_schema={"$id": "meta schema $id"},
version="13",
)
schema = {"$schema": "meta schema $id"}
self.assertIs(
validators.validator_for(schema),
Validator,
)
def test_validator_for_jsonschema_default(self):
self.assertIs(validators.validator_for({}), validators._LATEST_VERSION)
def test_validator_for_custom_default(self):
self.assertIs(validators.validator_for({}, default=None), None)
def test_warns_if_meta_schema_specified_was_not_found(self):
self.assertWarns(
category=DeprecationWarning,
message=(
"The metaschema specified by $schema was not found. "
"Using the latest draft to validate, but this will raise "
"an error in the future."
),
# https://tm.tl/9363 :'(
filename=sys.modules[self.assertWarns.__module__].__file__,
f=validators.validator_for,
schema={u"$schema": "unknownSchema"},
default={},
)
def test_does_not_warn_if_meta_schema_is_unspecified(self):
validators.validator_for(schema={}, default={}),
self.assertFalse(self.flushWarnings())
class TestValidate(SynchronousTestCase):
def assertUses(self, schema, Validator):
result = []
self.patch(Validator, "check_schema", result.append)
validators.validate({}, schema)
self.assertEqual(result, [schema])
def test_draft3_validator_is_chosen(self):
self.assertUses(
schema={"$schema": "http://json-schema.org/draft-03/schema#"},
Validator=validators.Draft3Validator,
)
# Make sure it works without the empty fragment
self.assertUses(
schema={"$schema": "http://json-schema.org/draft-03/schema"},
Validator=validators.Draft3Validator,
)
def test_draft4_validator_is_chosen(self):
self.assertUses(
schema={"$schema": "http://json-schema.org/draft-04/schema#"},
Validator=validators.Draft4Validator,
)
# Make sure it works without the empty fragment
self.assertUses(
schema={"$schema": "http://json-schema.org/draft-04/schema"},
Validator=validators.Draft4Validator,
)
def test_draft6_validator_is_chosen(self):
self.assertUses(
schema={"$schema": "http://json-schema.org/draft-06/schema#"},
Validator=validators.Draft6Validator,
)
# Make sure it works without the empty fragment
self.assertUses(
schema={"$schema": "http://json-schema.org/draft-06/schema"},
Validator=validators.Draft6Validator,
)
def test_draft7_validator_is_chosen(self):
self.assertUses(
schema={"$schema": "http://json-schema.org/draft-07/schema#"},
Validator=validators.Draft7Validator,
)
# Make sure it works without the empty fragment
self.assertUses(
schema={"$schema": "http://json-schema.org/draft-07/schema"},
Validator=validators.Draft7Validator,
)
def test_draft7_validator_is_the_default(self):
self.assertUses(schema={}, Validator=validators.Draft7Validator)
def test_validation_error_message(self):
with self.assertRaises(exceptions.ValidationError) as e:
validators.validate(12, {"type": "string"})
self.assertRegexpMatches(
str(e.exception),
"(?s)Failed validating u?'.*' in schema.*On instance",
)
def test_schema_error_message(self):
with self.assertRaises(exceptions.SchemaError) as e:
validators.validate(12, {"type": 12})
self.assertRegexpMatches(
str(e.exception),
"(?s)Failed validating u?'.*' in metaschema.*On schema",
)
def test_it_uses_best_match(self):
# This is a schema that best_match will recurse into
schema = {"oneOf": [{"type": "string"}, {"type": "array"}]}
with self.assertRaises(exceptions.ValidationError) as e:
validators.validate(12, schema)
self.assertIn("12 is not of type", str(e.exception))
class TestRefResolver(SynchronousTestCase):
base_uri = ""
stored_uri = "foo://stored"
stored_schema = {"stored": "schema"}
def setUp(self):
self.referrer = {}
self.store = {self.stored_uri: self.stored_schema}
self.resolver = validators.RefResolver(
self.base_uri, self.referrer, self.store,
)
def test_it_does_not_retrieve_schema_urls_from_the_network(self):
ref = validators.Draft3Validator.META_SCHEMA["id"]
self.patch(
self.resolver,
"resolve_remote",
lambda *args, **kwargs: self.fail("Should not have been called!"),
)
with self.resolver.resolving(ref) as resolved:
pass
self.assertEqual(resolved, validators.Draft3Validator.META_SCHEMA)
def test_it_resolves_local_refs(self):
ref = "#/properties/foo"
self.referrer["properties"] = {"foo": object()}
with self.resolver.resolving(ref) as resolved:
self.assertEqual(resolved, self.referrer["properties"]["foo"])
def test_it_resolves_local_refs_with_id(self):
schema = {"id": "http://bar/schema#", "a": {"foo": "bar"}}
resolver = validators.RefResolver.from_schema(
schema,
id_of=lambda schema: schema.get(u"id", u""),
)
with resolver.resolving("#/a") as resolved:
self.assertEqual(resolved, schema["a"])
with resolver.resolving("http://bar/schema#/a") as resolved:
self.assertEqual(resolved, schema["a"])
def test_it_retrieves_stored_refs(self):
with self.resolver.resolving(self.stored_uri) as resolved:
self.assertIs(resolved, self.stored_schema)
self.resolver.store["cached_ref"] = {"foo": 12}
with self.resolver.resolving("cached_ref#/foo") as resolved:
self.assertEqual(resolved, 12)
def test_it_retrieves_unstored_refs_via_requests(self):
ref = "http://bar#baz"
schema = {"baz": 12}
if "requests" in sys.modules:
self.addCleanup(
sys.modules.__setitem__, "requests", sys.modules["requests"],
)
sys.modules["requests"] = ReallyFakeRequests({"http://bar": schema})
with self.resolver.resolving(ref) as resolved:
self.assertEqual(resolved, 12)
def test_it_retrieves_unstored_refs_via_urlopen(self):
ref = "http://bar#baz"
schema = {"baz": 12}
if "requests" in sys.modules:
self.addCleanup(
sys.modules.__setitem__, "requests", sys.modules["requests"],
)
sys.modules["requests"] = None
@contextmanager
def fake_urlopen(url):
self.assertEqual(url, "http://bar")
yield BytesIO(json.dumps(schema).encode("utf8"))
self.addCleanup(setattr, validators, "urlopen", validators.urlopen)
validators.urlopen = fake_urlopen
with self.resolver.resolving(ref) as resolved:
pass
self.assertEqual(resolved, 12)
def test_it_retrieves_local_refs_via_urlopen(self):
with tempfile.NamedTemporaryFile(delete=False, mode="wt") as tempf:
self.addCleanup(os.remove, tempf.name)
json.dump({"foo": "bar"}, tempf)
ref = "file://{}#foo".format(pathname2url(tempf.name))
with self.resolver.resolving(ref) as resolved:
self.assertEqual(resolved, "bar")
def test_it_can_construct_a_base_uri_from_a_schema(self):
schema = {"id": "foo"}
resolver = validators.RefResolver.from_schema(
schema,
id_of=lambda schema: schema.get(u"id", u""),
)
self.assertEqual(resolver.base_uri, "foo")
self.assertEqual(resolver.resolution_scope, "foo")
with resolver.resolving("") as resolved:
self.assertEqual(resolved, schema)
with resolver.resolving("#") as resolved:
self.assertEqual(resolved, schema)
with resolver.resolving("foo") as resolved:
self.assertEqual(resolved, schema)
with resolver.resolving("foo#") as resolved:
self.assertEqual(resolved, schema)
def test_it_can_construct_a_base_uri_from_a_schema_without_id(self):
schema = {}
resolver = validators.RefResolver.from_schema(schema)
self.assertEqual(resolver.base_uri, "")
self.assertEqual(resolver.resolution_scope, "")
with resolver.resolving("") as resolved:
self.assertEqual(resolved, schema)
with resolver.resolving("#") as resolved:
self.assertEqual(resolved, schema)
def test_custom_uri_scheme_handlers(self):
def handler(url):
self.assertEqual(url, ref)
return schema
schema = {"foo": "bar"}
ref = "foo://bar"
resolver = validators.RefResolver("", {}, handlers={"foo": handler})
with resolver.resolving(ref) as resolved:
self.assertEqual(resolved, schema)
def test_cache_remote_on(self):
response = [object()]
def handler(url):
try:
return response.pop()
except IndexError: # pragma: no cover
self.fail("Response must not have been cached!")
ref = "foo://bar"
resolver = validators.RefResolver(
"", {}, cache_remote=True, handlers={"foo": handler},
)
with resolver.resolving(ref):
pass
with resolver.resolving(ref):
pass
def test_cache_remote_off(self):
response = [object()]
def handler(url):
try:
return response.pop()
except IndexError: # pragma: no cover
self.fail("Handler called twice!")
ref = "foo://bar"
resolver = validators.RefResolver(
"", {}, cache_remote=False, handlers={"foo": handler},
)
with resolver.resolving(ref):
pass
def test_if_you_give_it_junk_you_get_a_resolution_error(self):
error = ValueError("Oh no! What's this?")
def handler(url):
raise error
ref = "foo://bar"
resolver = validators.RefResolver("", {}, handlers={"foo": handler})
with self.assertRaises(exceptions.RefResolutionError) as err:
with resolver.resolving(ref):
self.fail("Shouldn't get this far!") # pragma: no cover
self.assertEqual(err.exception, exceptions.RefResolutionError(error))
def test_helpful_error_message_on_failed_pop_scope(self):
resolver = validators.RefResolver("", {})
resolver.pop_scope()
with self.assertRaises(exceptions.RefResolutionError) as exc:
resolver.pop_scope()
self.assertIn("Failed to pop the scope", str(exc.exception))
def sorted_errors(errors):
def key(error):
return (
[str(e) for e in error.path],
[str(e) for e in error.schema_path],
)
return sorted(errors, key=key)
@attr.s
class ReallyFakeRequests(object):
_responses = attr.ib()
def get(self, url):
response = self._responses.get(url)
if url is None: # pragma: no cover
raise ValueError("Unknown URL: " + repr(url))
return _ReallyFakeJSONResponse(json.dumps(response))
@attr.s
class _ReallyFakeJSONResponse(object):
_response = attr.ib()
def json(self):
return json.loads(self._response)
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/jsonschema/benchmarks/issue232.py | #!/usr/bin/env python
"""
A performance benchmark using the example from issue #232.
See https://github.com/Julian/jsonschema/pull/232.
"""
from twisted.python.filepath import FilePath
from pyperf import Runner
from pyrsistent import m
from jsonschema.tests._suite import Version
import jsonschema
issue232 = Version(
path=FilePath(__file__).sibling("issue232"),
remotes=m(),
name="issue232",
)
if __name__ == "__main__":
issue232.benchmark(
runner=Runner(),
Validator=jsonschema.Draft4Validator,
)
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/jsonschema/benchmarks/__init__.py | """
Benchmarks for validation.
This package is *not* public API.
"""
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/jsonschema/benchmarks/json_schema_test_suite.py | #!/usr/bin/env python
"""
A performance benchmark using the official test suite.
This benchmarks jsonschema using every valid example in the
JSON-Schema-Test-Suite. It will take some time to complete.
"""
from pyperf import Runner
from jsonschema.tests._suite import Suite
if __name__ == "__main__":
Suite().benchmark(runner=Runner())
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiohttp/_helpers.pyi | from typing import Any
class reify:
def __init__(self, wrapped: Any) -> None: ...
def __get__(self, inst: Any, owner: Any) -> Any: ...
def __set__(self, inst: Any, value: Any) -> None: ...
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiohttp/connector.py | import asyncio
import functools
import random
import sys
import traceback
import warnings
from collections import defaultdict, deque
from contextlib import suppress
from http.cookies import SimpleCookie
from itertools import cycle, islice
from time import monotonic
from types import TracebackType
from typing import (
TYPE_CHECKING,
Any,
Awaitable,
Callable,
DefaultDict,
Dict,
Iterator,
List,
Optional,
Set,
Tuple,
Type,
Union,
cast,
)
import attr
from . import hdrs, helpers
from .abc import AbstractResolver
from .client_exceptions import (
ClientConnectionError,
ClientConnectorCertificateError,
ClientConnectorError,
ClientConnectorSSLError,
ClientHttpProxyError,
ClientProxyConnectionError,
ServerFingerprintMismatch,
UnixClientConnectorError,
cert_errors,
ssl_errors,
)
from .client_proto import ResponseHandler
from .client_reqrep import ClientRequest, Fingerprint, _merge_ssl_params
from .helpers import (
PY_36,
ceil_timeout,
get_running_loop,
is_ip_address,
noop,
sentinel,
)
from .http import RESPONSES
from .locks import EventResultOrError
from .resolver import DefaultResolver
try:
import ssl
SSLContext = ssl.SSLContext
except ImportError: # pragma: no cover
ssl = None # type: ignore[assignment]
SSLContext = object # type: ignore[misc,assignment]
__all__ = ("BaseConnector", "TCPConnector", "UnixConnector", "NamedPipeConnector")
if TYPE_CHECKING: # pragma: no cover
from .client import ClientTimeout
from .client_reqrep import ConnectionKey
from .tracing import Trace
class _DeprecationWaiter:
__slots__ = ("_awaitable", "_awaited")
def __init__(self, awaitable: Awaitable[Any]) -> None:
self._awaitable = awaitable
self._awaited = False
def __await__(self) -> Any:
self._awaited = True
return self._awaitable.__await__()
def __del__(self) -> None:
if not self._awaited:
warnings.warn(
"Connector.close() is a coroutine, "
"please use await connector.close()",
DeprecationWarning,
)
class Connection:
_source_traceback = None
_transport = None
def __init__(
self,
connector: "BaseConnector",
key: "ConnectionKey",
protocol: ResponseHandler,
loop: asyncio.AbstractEventLoop,
) -> None:
self._key = key
self._connector = connector
self._loop = loop
self._protocol: Optional[ResponseHandler] = protocol
self._callbacks: List[Callable[[], None]] = []
if loop.get_debug():
self._source_traceback = traceback.extract_stack(sys._getframe(1))
def __repr__(self) -> str:
return f"Connection<{self._key}>"
def __del__(self, _warnings: Any = warnings) -> None:
if self._protocol is not None:
if PY_36:
kwargs = {"source": self}
else:
kwargs = {}
_warnings.warn(f"Unclosed connection {self!r}", ResourceWarning, **kwargs)
if self._loop.is_closed():
return
self._connector._release(self._key, self._protocol, should_close=True)
context = {"client_connection": self, "message": "Unclosed connection"}
if self._source_traceback is not None:
context["source_traceback"] = self._source_traceback
self._loop.call_exception_handler(context)
@property
def loop(self) -> asyncio.AbstractEventLoop:
warnings.warn(
"connector.loop property is deprecated", DeprecationWarning, stacklevel=2
)
return self._loop
@property
def transport(self) -> Optional[asyncio.Transport]:
if self._protocol is None:
return None
return self._protocol.transport
@property
def protocol(self) -> Optional[ResponseHandler]:
return self._protocol
def add_callback(self, callback: Callable[[], None]) -> None:
if callback is not None:
self._callbacks.append(callback)
def _notify_release(self) -> None:
callbacks, self._callbacks = self._callbacks[:], []
for cb in callbacks:
with suppress(Exception):
cb()
def close(self) -> None:
self._notify_release()
if self._protocol is not None:
self._connector._release(self._key, self._protocol, should_close=True)
self._protocol = None
def release(self) -> None:
self._notify_release()
if self._protocol is not None:
self._connector._release(
self._key, self._protocol, should_close=self._protocol.should_close
)
self._protocol = None
@property
def closed(self) -> bool:
return self._protocol is None or not self._protocol.is_connected()
class _TransportPlaceholder:
"""placeholder for BaseConnector.connect function"""
def close(self) -> None:
pass
class BaseConnector:
"""Base connector class.
keepalive_timeout - (optional) Keep-alive timeout.
force_close - Set to True to force close and do reconnect
after each request (and between redirects).
limit - The total number of simultaneous connections.
limit_per_host - Number of simultaneous connections to one host.
enable_cleanup_closed - Enables clean-up closed ssl transports.
Disabled by default.
loop - Optional event loop.
"""
_closed = True # prevent AttributeError in __del__ if ctor was failed
_source_traceback = None
# abort transport after 2 seconds (cleanup broken connections)
_cleanup_closed_period = 2.0
def __init__(
self,
*,
keepalive_timeout: Union[object, None, float] = sentinel,
force_close: bool = False,
limit: int = 100,
limit_per_host: int = 0,
enable_cleanup_closed: bool = False,
loop: Optional[asyncio.AbstractEventLoop] = None,
) -> None:
if force_close:
if keepalive_timeout is not None and keepalive_timeout is not sentinel:
raise ValueError(
"keepalive_timeout cannot " "be set if force_close is True"
)
else:
if keepalive_timeout is sentinel:
keepalive_timeout = 15.0
loop = get_running_loop(loop)
self._closed = False
if loop.get_debug():
self._source_traceback = traceback.extract_stack(sys._getframe(1))
self._conns: Dict[ConnectionKey, List[Tuple[ResponseHandler, float]]] = {}
self._limit = limit
self._limit_per_host = limit_per_host
self._acquired: Set[ResponseHandler] = set()
self._acquired_per_host: DefaultDict[
ConnectionKey, Set[ResponseHandler]
] = defaultdict(set)
self._keepalive_timeout = cast(float, keepalive_timeout)
self._force_close = force_close
# {host_key: FIFO list of waiters}
self._waiters = defaultdict(deque) # type: ignore[var-annotated]
self._loop = loop
self._factory = functools.partial(ResponseHandler, loop=loop)
self.cookies: SimpleCookie[str] = SimpleCookie()
# start keep-alive connection cleanup task
self._cleanup_handle: Optional[asyncio.TimerHandle] = None
# start cleanup closed transports task
self._cleanup_closed_handle: Optional[asyncio.TimerHandle] = None
self._cleanup_closed_disabled = not enable_cleanup_closed
self._cleanup_closed_transports: List[Optional[asyncio.Transport]] = []
self._cleanup_closed()
def __del__(self, _warnings: Any = warnings) -> None:
if self._closed:
return
if not self._conns:
return
conns = [repr(c) for c in self._conns.values()]
self._close()
if PY_36:
kwargs = {"source": self}
else:
kwargs = {}
_warnings.warn(f"Unclosed connector {self!r}", ResourceWarning, **kwargs)
context = {
"connector": self,
"connections": conns,
"message": "Unclosed connector",
}
if self._source_traceback is not None:
context["source_traceback"] = self._source_traceback
self._loop.call_exception_handler(context)
def __enter__(self) -> "BaseConnector":
warnings.warn(
'"with Connector():" is deprecated, '
'use "async with Connector():" instead',
DeprecationWarning,
)
return self
def __exit__(self, *exc: Any) -> None:
self._close()
async def __aenter__(self) -> "BaseConnector":
return self
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]] = None,
exc_value: Optional[BaseException] = None,
exc_traceback: Optional[TracebackType] = None,
) -> None:
await self.close()
@property
def force_close(self) -> bool:
"""Ultimately close connection on releasing if True."""
return self._force_close
@property
def limit(self) -> int:
"""The total number for simultaneous connections.
If limit is 0 the connector has no limit.
The default limit size is 100.
"""
return self._limit
@property
def limit_per_host(self) -> int:
"""The limit for simultaneous connections to the same endpoint.
Endpoints are the same if they are have equal
(host, port, is_ssl) triple.
"""
return self._limit_per_host
def _cleanup(self) -> None:
"""Cleanup unused transports."""
if self._cleanup_handle:
self._cleanup_handle.cancel()
# _cleanup_handle should be unset, otherwise _release() will not
# recreate it ever!
self._cleanup_handle = None
now = self._loop.time()
timeout = self._keepalive_timeout
if self._conns:
connections = {}
deadline = now - timeout
for key, conns in self._conns.items():
alive = []
for proto, use_time in conns:
if proto.is_connected():
if use_time - deadline < 0:
transport = proto.transport
proto.close()
if key.is_ssl and not self._cleanup_closed_disabled:
self._cleanup_closed_transports.append(transport)
else:
alive.append((proto, use_time))
else:
transport = proto.transport
proto.close()
if key.is_ssl and not self._cleanup_closed_disabled:
self._cleanup_closed_transports.append(transport)
if alive:
connections[key] = alive
self._conns = connections
if self._conns:
self._cleanup_handle = helpers.weakref_handle(
self, "_cleanup", timeout, self._loop
)
def _drop_acquired_per_host(
self, key: "ConnectionKey", val: ResponseHandler
) -> None:
acquired_per_host = self._acquired_per_host
if key not in acquired_per_host:
return
conns = acquired_per_host[key]
conns.remove(val)
if not conns:
del self._acquired_per_host[key]
def _cleanup_closed(self) -> None:
"""Double confirmation for transport close.
Some broken ssl servers may leave socket open without proper close.
"""
if self._cleanup_closed_handle:
self._cleanup_closed_handle.cancel()
for transport in self._cleanup_closed_transports:
if transport is not None:
transport.abort()
self._cleanup_closed_transports = []
if not self._cleanup_closed_disabled:
self._cleanup_closed_handle = helpers.weakref_handle(
self, "_cleanup_closed", self._cleanup_closed_period, self._loop
)
def close(self) -> Awaitable[None]:
"""Close all opened transports."""
self._close()
return _DeprecationWaiter(noop())
def _close(self) -> None:
if self._closed:
return
self._closed = True
try:
if self._loop.is_closed():
return
# cancel cleanup task
if self._cleanup_handle:
self._cleanup_handle.cancel()
# cancel cleanup close task
if self._cleanup_closed_handle:
self._cleanup_closed_handle.cancel()
for data in self._conns.values():
for proto, t0 in data:
proto.close()
for proto in self._acquired:
proto.close()
for transport in self._cleanup_closed_transports:
if transport is not None:
transport.abort()
finally:
self._conns.clear()
self._acquired.clear()
self._waiters.clear()
self._cleanup_handle = None
self._cleanup_closed_transports.clear()
self._cleanup_closed_handle = None
@property
def closed(self) -> bool:
"""Is connector closed.
A readonly property.
"""
return self._closed
def _available_connections(self, key: "ConnectionKey") -> int:
"""
Return number of available connections.
The limit, limit_per_host and the connection key are taken into account.
If it returns less than 1 means that there are no connections
available.
"""
if self._limit:
# total calc available connections
available = self._limit - len(self._acquired)
# check limit per host
if (
self._limit_per_host
and available > 0
and key in self._acquired_per_host
):
acquired = self._acquired_per_host.get(key)
assert acquired is not None
available = self._limit_per_host - len(acquired)
elif self._limit_per_host and key in self._acquired_per_host:
# check limit per host
acquired = self._acquired_per_host.get(key)
assert acquired is not None
available = self._limit_per_host - len(acquired)
else:
available = 1
return available
async def connect(
self, req: "ClientRequest", traces: List["Trace"], timeout: "ClientTimeout"
) -> Connection:
"""Get from pool or create new connection."""
key = req.connection_key
available = self._available_connections(key)
# Wait if there are no available connections or if there are/were
# waiters (i.e. don't steal connection from a waiter about to wake up)
if available <= 0 or key in self._waiters:
fut = self._loop.create_future()
# This connection will now count towards the limit.
self._waiters[key].append(fut)
if traces:
for trace in traces:
await trace.send_connection_queued_start()
try:
await fut
except BaseException as e:
if key in self._waiters:
# remove a waiter even if it was cancelled, normally it's
# removed when it's notified
try:
self._waiters[key].remove(fut)
except ValueError: # fut may no longer be in list
pass
raise e
finally:
if key in self._waiters and not self._waiters[key]:
del self._waiters[key]
if traces:
for trace in traces:
await trace.send_connection_queued_end()
proto = self._get(key)
if proto is None:
placeholder = cast(ResponseHandler, _TransportPlaceholder())
self._acquired.add(placeholder)
self._acquired_per_host[key].add(placeholder)
if traces:
for trace in traces:
await trace.send_connection_create_start()
try:
proto = await self._create_connection(req, traces, timeout)
if self._closed:
proto.close()
raise ClientConnectionError("Connector is closed.")
except BaseException:
if not self._closed:
self._acquired.remove(placeholder)
self._drop_acquired_per_host(key, placeholder)
self._release_waiter()
raise
else:
if not self._closed:
self._acquired.remove(placeholder)
self._drop_acquired_per_host(key, placeholder)
if traces:
for trace in traces:
await trace.send_connection_create_end()
else:
if traces:
# Acquire the connection to prevent race conditions with limits
placeholder = cast(ResponseHandler, _TransportPlaceholder())
self._acquired.add(placeholder)
self._acquired_per_host[key].add(placeholder)
for trace in traces:
await trace.send_connection_reuseconn()
self._acquired.remove(placeholder)
self._drop_acquired_per_host(key, placeholder)
self._acquired.add(proto)
self._acquired_per_host[key].add(proto)
return Connection(self, key, proto, self._loop)
def _get(self, key: "ConnectionKey") -> Optional[ResponseHandler]:
try:
conns = self._conns[key]
except KeyError:
return None
t1 = self._loop.time()
while conns:
proto, t0 = conns.pop()
if proto.is_connected():
if t1 - t0 > self._keepalive_timeout:
transport = proto.transport
proto.close()
# only for SSL transports
if key.is_ssl and not self._cleanup_closed_disabled:
self._cleanup_closed_transports.append(transport)
else:
if not conns:
# The very last connection was reclaimed: drop the key
del self._conns[key]
return proto
else:
transport = proto.transport
proto.close()
if key.is_ssl and not self._cleanup_closed_disabled:
self._cleanup_closed_transports.append(transport)
# No more connections: drop the key
del self._conns[key]
return None
def _release_waiter(self) -> None:
"""
Iterates over all waiters until one to be released is found.
The one to be released is not finsihed and
belongs to a host that has available connections.
"""
if not self._waiters:
return
# Having the dict keys ordered this avoids to iterate
# at the same order at each call.
queues = list(self._waiters.keys())
random.shuffle(queues)
for key in queues:
if self._available_connections(key) < 1:
continue
waiters = self._waiters[key]
while waiters:
waiter = waiters.popleft()
if not waiter.done():
waiter.set_result(None)
return
def _release_acquired(self, key: "ConnectionKey", proto: ResponseHandler) -> None:
if self._closed:
# acquired connection is already released on connector closing
return
try:
self._acquired.remove(proto)
self._drop_acquired_per_host(key, proto)
except KeyError: # pragma: no cover
# this may be result of undetermenistic order of objects
# finalization due garbage collection.
pass
else:
self._release_waiter()
def _release(
self,
key: "ConnectionKey",
protocol: ResponseHandler,
*,
should_close: bool = False,
) -> None:
if self._closed:
# acquired connection is already released on connector closing
return
self._release_acquired(key, protocol)
if self._force_close:
should_close = True
if should_close or protocol.should_close:
transport = protocol.transport
protocol.close()
if key.is_ssl and not self._cleanup_closed_disabled:
self._cleanup_closed_transports.append(transport)
else:
conns = self._conns.get(key)
if conns is None:
conns = self._conns[key] = []
conns.append((protocol, self._loop.time()))
if self._cleanup_handle is None:
self._cleanup_handle = helpers.weakref_handle(
self, "_cleanup", self._keepalive_timeout, self._loop
)
async def _create_connection(
self, req: "ClientRequest", traces: List["Trace"], timeout: "ClientTimeout"
) -> ResponseHandler:
raise NotImplementedError()
class _DNSCacheTable:
def __init__(self, ttl: Optional[float] = None) -> None:
self._addrs_rr: Dict[Tuple[str, int], Tuple[Iterator[Dict[str, Any]], int]] = {}
self._timestamps: Dict[Tuple[str, int], float] = {}
self._ttl = ttl
def __contains__(self, host: object) -> bool:
return host in self._addrs_rr
def add(self, key: Tuple[str, int], addrs: List[Dict[str, Any]]) -> None:
self._addrs_rr[key] = (cycle(addrs), len(addrs))
if self._ttl:
self._timestamps[key] = monotonic()
def remove(self, key: Tuple[str, int]) -> None:
self._addrs_rr.pop(key, None)
if self._ttl:
self._timestamps.pop(key, None)
def clear(self) -> None:
self._addrs_rr.clear()
self._timestamps.clear()
def next_addrs(self, key: Tuple[str, int]) -> List[Dict[str, Any]]:
loop, length = self._addrs_rr[key]
addrs = list(islice(loop, length))
# Consume one more element to shift internal state of `cycle`
next(loop)
return addrs
def expired(self, key: Tuple[str, int]) -> bool:
if self._ttl is None:
return False
return self._timestamps[key] + self._ttl < monotonic()
class TCPConnector(BaseConnector):
"""TCP connector.
verify_ssl - Set to True to check ssl certifications.
fingerprint - Pass the binary sha256
digest of the expected certificate in DER format to verify
that the certificate the server presents matches. See also
https://en.wikipedia.org/wiki/Transport_Layer_Security#Certificate_pinning
resolver - Enable DNS lookups and use this
resolver
use_dns_cache - Use memory cache for DNS lookups.
ttl_dns_cache - Max seconds having cached a DNS entry, None forever.
family - socket address family
local_addr - local tuple of (host, port) to bind socket to
keepalive_timeout - (optional) Keep-alive timeout.
force_close - Set to True to force close and do reconnect
after each request (and between redirects).
limit - The total number of simultaneous connections.
limit_per_host - Number of simultaneous connections to one host.
enable_cleanup_closed - Enables clean-up closed ssl transports.
Disabled by default.
loop - Optional event loop.
"""
def __init__(
self,
*,
verify_ssl: bool = True,
fingerprint: Optional[bytes] = None,
use_dns_cache: bool = True,
ttl_dns_cache: Optional[int] = 10,
family: int = 0,
ssl_context: Optional[SSLContext] = None,
ssl: Union[None, bool, Fingerprint, SSLContext] = None,
local_addr: Optional[Tuple[str, int]] = None,
resolver: Optional[AbstractResolver] = None,
keepalive_timeout: Union[None, float, object] = sentinel,
force_close: bool = False,
limit: int = 100,
limit_per_host: int = 0,
enable_cleanup_closed: bool = False,
loop: Optional[asyncio.AbstractEventLoop] = None,
):
super().__init__(
keepalive_timeout=keepalive_timeout,
force_close=force_close,
limit=limit,
limit_per_host=limit_per_host,
enable_cleanup_closed=enable_cleanup_closed,
loop=loop,
)
self._ssl = _merge_ssl_params(ssl, verify_ssl, ssl_context, fingerprint)
if resolver is None:
resolver = DefaultResolver(loop=self._loop)
self._resolver = resolver
self._use_dns_cache = use_dns_cache
self._cached_hosts = _DNSCacheTable(ttl=ttl_dns_cache)
self._throttle_dns_events: Dict[Tuple[str, int], EventResultOrError] = {}
self._family = family
self._local_addr = local_addr
def close(self) -> Awaitable[None]:
"""Close all ongoing DNS calls."""
for ev in self._throttle_dns_events.values():
ev.cancel()
return super().close()
@property
def family(self) -> int:
"""Socket family like AF_INET."""
return self._family
@property
def use_dns_cache(self) -> bool:
"""True if local DNS caching is enabled."""
return self._use_dns_cache
def clear_dns_cache(
self, host: Optional[str] = None, port: Optional[int] = None
) -> None:
"""Remove specified host/port or clear all dns local cache."""
if host is not None and port is not None:
self._cached_hosts.remove((host, port))
elif host is not None or port is not None:
raise ValueError("either both host and port " "or none of them are allowed")
else:
self._cached_hosts.clear()
async def _resolve_host(
self, host: str, port: int, traces: Optional[List["Trace"]] = None
) -> List[Dict[str, Any]]:
if is_ip_address(host):
return [
{
"hostname": host,
"host": host,
"port": port,
"family": self._family,
"proto": 0,
"flags": 0,
}
]
if not self._use_dns_cache:
if traces:
for trace in traces:
await trace.send_dns_resolvehost_start(host)
res = await self._resolver.resolve(host, port, family=self._family)
if traces:
for trace in traces:
await trace.send_dns_resolvehost_end(host)
return res
key = (host, port)
if (key in self._cached_hosts) and (not self._cached_hosts.expired(key)):
# get result early, before any await (#4014)
result = self._cached_hosts.next_addrs(key)
if traces:
for trace in traces:
await trace.send_dns_cache_hit(host)
return result
if key in self._throttle_dns_events:
# get event early, before any await (#4014)
event = self._throttle_dns_events[key]
if traces:
for trace in traces:
await trace.send_dns_cache_hit(host)
await event.wait()
else:
# update dict early, before any await (#4014)
self._throttle_dns_events[key] = EventResultOrError(self._loop)
if traces:
for trace in traces:
await trace.send_dns_cache_miss(host)
try:
if traces:
for trace in traces:
await trace.send_dns_resolvehost_start(host)
addrs = await self._resolver.resolve(host, port, family=self._family)
if traces:
for trace in traces:
await trace.send_dns_resolvehost_end(host)
self._cached_hosts.add(key, addrs)
self._throttle_dns_events[key].set()
except BaseException as e:
# any DNS exception, independently of the implementation
# is set for the waiters to raise the same exception.
self._throttle_dns_events[key].set(exc=e)
raise
finally:
self._throttle_dns_events.pop(key)
return self._cached_hosts.next_addrs(key)
async def _create_connection(
self, req: "ClientRequest", traces: List["Trace"], timeout: "ClientTimeout"
) -> ResponseHandler:
"""Create connection.
Has same keyword arguments as BaseEventLoop.create_connection.
"""
if req.proxy:
_, proto = await self._create_proxy_connection(req, traces, timeout)
else:
_, proto = await self._create_direct_connection(req, traces, timeout)
return proto
@staticmethod
@functools.lru_cache(None)
def _make_ssl_context(verified: bool) -> SSLContext:
if verified:
return ssl.create_default_context()
else:
sslcontext = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext.options |= ssl.OP_NO_SSLv2
sslcontext.options |= ssl.OP_NO_SSLv3
sslcontext.check_hostname = False
sslcontext.verify_mode = ssl.CERT_NONE
try:
sslcontext.options |= ssl.OP_NO_COMPRESSION
except AttributeError as attr_err:
warnings.warn(
"{!s}: The Python interpreter is compiled "
"against OpenSSL < 1.0.0. Ref: "
"https://docs.python.org/3/library/ssl.html"
"#ssl.OP_NO_COMPRESSION".format(attr_err),
)
sslcontext.set_default_verify_paths()
return sslcontext
def _get_ssl_context(self, req: "ClientRequest") -> Optional[SSLContext]:
"""Logic to get the correct SSL context
0. if req.ssl is false, return None
1. if ssl_context is specified in req, use it
2. if _ssl_context is specified in self, use it
3. otherwise:
1. if verify_ssl is not specified in req, use self.ssl_context
(will generate a default context according to self.verify_ssl)
2. if verify_ssl is True in req, generate a default SSL context
3. if verify_ssl is False in req, generate a SSL context that
won't verify
"""
if req.is_ssl():
if ssl is None: # pragma: no cover
raise RuntimeError("SSL is not supported.")
sslcontext = req.ssl
if isinstance(sslcontext, ssl.SSLContext):
return sslcontext
if sslcontext is not None:
# not verified or fingerprinted
return self._make_ssl_context(False)
sslcontext = self._ssl
if isinstance(sslcontext, ssl.SSLContext):
return sslcontext
if sslcontext is not None:
# not verified or fingerprinted
return self._make_ssl_context(False)
return self._make_ssl_context(True)
else:
return None
def _get_fingerprint(self, req: "ClientRequest") -> Optional["Fingerprint"]:
ret = req.ssl
if isinstance(ret, Fingerprint):
return ret
ret = self._ssl
if isinstance(ret, Fingerprint):
return ret
return None
async def _wrap_create_connection(
self,
*args: Any,
req: "ClientRequest",
timeout: "ClientTimeout",
client_error: Type[Exception] = ClientConnectorError,
**kwargs: Any,
) -> Tuple[asyncio.Transport, ResponseHandler]:
try:
async with ceil_timeout(timeout.sock_connect):
return await self._loop.create_connection(*args, **kwargs) # type: ignore[return-value] # noqa
except cert_errors as exc:
raise ClientConnectorCertificateError(req.connection_key, exc) from exc
except ssl_errors as exc:
raise ClientConnectorSSLError(req.connection_key, exc) from exc
except OSError as exc:
if exc.errno is None and isinstance(exc, asyncio.TimeoutError):
raise
raise client_error(req.connection_key, exc) from exc
def _fail_on_no_start_tls(self, req: "ClientRequest") -> None:
"""Raise a :py:exc:`RuntimeError` on missing ``start_tls()``.
One case is that :py:meth:`asyncio.loop.start_tls` is not yet
implemented under Python 3.6. It is necessary for TLS-in-TLS so
that it is possible to send HTTPS queries through HTTPS proxies.
This doesn't affect regular HTTP requests, though.
"""
if not req.is_ssl():
return
proxy_url = req.proxy
assert proxy_url is not None
if proxy_url.scheme != "https":
return
self._check_loop_for_start_tls()
def _check_loop_for_start_tls(self) -> None:
try:
self._loop.start_tls
except AttributeError as attr_exc:
raise RuntimeError(
"An HTTPS request is being sent through an HTTPS proxy. "
"This needs support for TLS in TLS but it is not implemented "
"in your runtime for the stdlib asyncio.\n\n"
"Please upgrade to Python 3.7 or higher. For more details, "
"please see:\n"
"* https://bugs.python.org/issue37179\n"
"* https://github.com/python/cpython/pull/28073\n"
"* https://docs.aiohttp.org/en/stable/"
"client_advanced.html#proxy-support\n"
"* https://github.com/aio-libs/aiohttp/discussions/6044\n",
) from attr_exc
def _loop_supports_start_tls(self) -> bool:
try:
self._check_loop_for_start_tls()
except RuntimeError:
return False
else:
return True
def _warn_about_tls_in_tls(
self,
underlying_transport: asyncio.Transport,
req: "ClientRequest",
) -> None:
"""Issue a warning if the requested URL has HTTPS scheme."""
if req.request_info.url.scheme != "https":
return
asyncio_supports_tls_in_tls = getattr(
underlying_transport,
"_start_tls_compatible",
False,
)
if asyncio_supports_tls_in_tls:
return
warnings.warn(
"An HTTPS request is being sent through an HTTPS proxy. "
"This support for TLS in TLS is known to be disabled "
"in the stdlib asyncio. This is why you'll probably see "
"an error in the log below.\n\n"
"It is possible to enable it via monkeypatching under "
"Python 3.7 or higher. For more details, see:\n"
"* https://bugs.python.org/issue37179\n"
"* https://github.com/python/cpython/pull/28073\n\n"
"You can temporarily patch this as follows:\n"
"* https://docs.aiohttp.org/en/stable/client_advanced.html#proxy-support\n"
"* https://github.com/aio-libs/aiohttp/discussions/6044\n",
RuntimeWarning,
source=self,
# Why `4`? At least 3 of the calls in the stack originate
# from the methods in this class.
stacklevel=3,
)
async def _start_tls_connection(
self,
underlying_transport: asyncio.Transport,
req: "ClientRequest",
timeout: "ClientTimeout",
client_error: Type[Exception] = ClientConnectorError,
) -> Tuple[asyncio.BaseTransport, ResponseHandler]:
"""Wrap the raw TCP transport with TLS."""
tls_proto = self._factory() # Create a brand new proto for TLS
# Safety of the `cast()` call here is based on the fact that
# internally `_get_ssl_context()` only returns `None` when
# `req.is_ssl()` evaluates to `False` which is never gonna happen
# in this code path. Of course, it's rather fragile
# maintainability-wise but this is to be solved separately.
sslcontext = cast(ssl.SSLContext, self._get_ssl_context(req))
try:
async with ceil_timeout(timeout.sock_connect):
try:
tls_transport = await self._loop.start_tls(
underlying_transport,
tls_proto,
sslcontext,
server_hostname=req.host,
ssl_handshake_timeout=timeout.total,
)
except BaseException:
# We need to close the underlying transport since
# `start_tls()` probably failed before it had a
# chance to do this:
underlying_transport.close()
raise
except cert_errors as exc:
raise ClientConnectorCertificateError(req.connection_key, exc) from exc
except ssl_errors as exc:
raise ClientConnectorSSLError(req.connection_key, exc) from exc
except OSError as exc:
if exc.errno is None and isinstance(exc, asyncio.TimeoutError):
raise
raise client_error(req.connection_key, exc) from exc
except TypeError as type_err:
# Example cause looks like this:
# TypeError: transport <asyncio.sslproto._SSLProtocolTransport
# object at 0x7f760615e460> is not supported by start_tls()
raise ClientConnectionError(
"Cannot initialize a TLS-in-TLS connection to host "
f"{req.host!s}:{req.port:d} through an underlying connection "
f"to an HTTPS proxy {req.proxy!s} ssl:{req.ssl or 'default'} "
f"[{type_err!s}]"
) from type_err
else:
tls_proto.connection_made(
tls_transport
) # Kick the state machine of the new TLS protocol
return tls_transport, tls_proto
async def _create_direct_connection(
self,
req: "ClientRequest",
traces: List["Trace"],
timeout: "ClientTimeout",
*,
client_error: Type[Exception] = ClientConnectorError,
) -> Tuple[asyncio.Transport, ResponseHandler]:
sslcontext = self._get_ssl_context(req)
fingerprint = self._get_fingerprint(req)
host = req.url.raw_host
assert host is not None
port = req.port
assert port is not None
host_resolved = asyncio.ensure_future(
self._resolve_host(host, port, traces=traces), loop=self._loop
)
try:
# Cancelling this lookup should not cancel the underlying lookup
# or else the cancel event will get broadcast to all the waiters
# across all connections.
hosts = await asyncio.shield(host_resolved)
except asyncio.CancelledError:
def drop_exception(fut: "asyncio.Future[List[Dict[str, Any]]]") -> None:
with suppress(Exception, asyncio.CancelledError):
fut.result()
host_resolved.add_done_callback(drop_exception)
raise
except OSError as exc:
if exc.errno is None and isinstance(exc, asyncio.TimeoutError):
raise
# in case of proxy it is not ClientProxyConnectionError
# it is problem of resolving proxy ip itself
raise ClientConnectorError(req.connection_key, exc) from exc
last_exc: Optional[Exception] = None
for hinfo in hosts:
host = hinfo["host"]
port = hinfo["port"]
try:
transp, proto = await self._wrap_create_connection(
self._factory,
host,
port,
timeout=timeout,
ssl=sslcontext,
family=hinfo["family"],
proto=hinfo["proto"],
flags=hinfo["flags"],
server_hostname=hinfo["hostname"] if sslcontext else None,
local_addr=self._local_addr,
req=req,
client_error=client_error,
)
except ClientConnectorError as exc:
last_exc = exc
continue
if req.is_ssl() and fingerprint:
try:
fingerprint.check(transp)
except ServerFingerprintMismatch as exc:
transp.close()
if not self._cleanup_closed_disabled:
self._cleanup_closed_transports.append(transp)
last_exc = exc
continue
return transp, proto
else:
assert last_exc is not None
raise last_exc
async def _create_proxy_connection(
self, req: "ClientRequest", traces: List["Trace"], timeout: "ClientTimeout"
) -> Tuple[asyncio.BaseTransport, ResponseHandler]:
self._fail_on_no_start_tls(req)
runtime_has_start_tls = self._loop_supports_start_tls()
headers: Dict[str, str] = {}
if req.proxy_headers is not None:
headers = req.proxy_headers # type: ignore[assignment]
headers[hdrs.HOST] = req.headers[hdrs.HOST]
url = req.proxy
assert url is not None
proxy_req = ClientRequest(
hdrs.METH_GET,
url,
headers=headers,
auth=req.proxy_auth,
loop=self._loop,
ssl=req.ssl,
)
# create connection to proxy server
transport, proto = await self._create_direct_connection(
proxy_req, [], timeout, client_error=ClientProxyConnectionError
)
# Many HTTP proxies has buggy keepalive support. Let's not
# reuse connection but close it after processing every
# response.
proto.force_close()
auth = proxy_req.headers.pop(hdrs.AUTHORIZATION, None)
if auth is not None:
if not req.is_ssl():
req.headers[hdrs.PROXY_AUTHORIZATION] = auth
else:
proxy_req.headers[hdrs.PROXY_AUTHORIZATION] = auth
if req.is_ssl():
if runtime_has_start_tls:
self._warn_about_tls_in_tls(transport, req)
# For HTTPS requests over HTTP proxy
# we must notify proxy to tunnel connection
# so we send CONNECT command:
# CONNECT www.python.org:443 HTTP/1.1
# Host: www.python.org
#
# next we must do TLS handshake and so on
# to do this we must wrap raw socket into secure one
# asyncio handles this perfectly
proxy_req.method = hdrs.METH_CONNECT
proxy_req.url = req.url
key = attr.evolve(
req.connection_key, proxy=None, proxy_auth=None, proxy_headers_hash=None
)
conn = Connection(self, key, proto, self._loop)
proxy_resp = await proxy_req.send(conn)
try:
protocol = conn._protocol
assert protocol is not None
# read_until_eof=True will ensure the connection isn't closed
# once the response is received and processed allowing
# START_TLS to work on the connection below.
protocol.set_response_params(read_until_eof=runtime_has_start_tls)
resp = await proxy_resp.start(conn)
except BaseException:
proxy_resp.close()
conn.close()
raise
else:
conn._protocol = None
conn._transport = None
try:
if resp.status != 200:
message = resp.reason
if message is None:
message = RESPONSES[resp.status][0]
raise ClientHttpProxyError(
proxy_resp.request_info,
resp.history,
status=resp.status,
message=message,
headers=resp.headers,
)
if not runtime_has_start_tls:
rawsock = transport.get_extra_info("socket", default=None)
if rawsock is None:
raise RuntimeError(
"Transport does not expose socket instance"
)
# Duplicate the socket, so now we can close proxy transport
rawsock = rawsock.dup()
except BaseException:
# It shouldn't be closed in `finally` because it's fed to
# `loop.start_tls()` and the docs say not to touch it after
# passing there.
transport.close()
raise
finally:
if not runtime_has_start_tls:
transport.close()
if not runtime_has_start_tls:
# HTTP proxy with support for upgrade to HTTPS
sslcontext = self._get_ssl_context(req)
return await self._wrap_create_connection(
self._factory,
timeout=timeout,
ssl=sslcontext,
sock=rawsock,
server_hostname=req.host,
req=req,
)
return await self._start_tls_connection(
# Access the old transport for the last time before it's
# closed and forgotten forever:
transport,
req=req,
timeout=timeout,
)
finally:
proxy_resp.close()
return transport, proto
class UnixConnector(BaseConnector):
"""Unix socket connector.
path - Unix socket path.
keepalive_timeout - (optional) Keep-alive timeout.
force_close - Set to True to force close and do reconnect
after each request (and between redirects).
limit - The total number of simultaneous connections.
limit_per_host - Number of simultaneous connections to one host.
loop - Optional event loop.
"""
def __init__(
self,
path: str,
force_close: bool = False,
keepalive_timeout: Union[object, float, None] = sentinel,
limit: int = 100,
limit_per_host: int = 0,
loop: Optional[asyncio.AbstractEventLoop] = None,
) -> None:
super().__init__(
force_close=force_close,
keepalive_timeout=keepalive_timeout,
limit=limit,
limit_per_host=limit_per_host,
loop=loop,
)
self._path = path
@property
def path(self) -> str:
"""Path to unix socket."""
return self._path
async def _create_connection(
self, req: "ClientRequest", traces: List["Trace"], timeout: "ClientTimeout"
) -> ResponseHandler:
try:
async with ceil_timeout(timeout.sock_connect):
_, proto = await self._loop.create_unix_connection(
self._factory, self._path
)
except OSError as exc:
if exc.errno is None and isinstance(exc, asyncio.TimeoutError):
raise
raise UnixClientConnectorError(self.path, req.connection_key, exc) from exc
return cast(ResponseHandler, proto)
class NamedPipeConnector(BaseConnector):
"""Named pipe connector.
Only supported by the proactor event loop.
See also: https://docs.python.org/3.7/library/asyncio-eventloop.html
path - Windows named pipe path.
keepalive_timeout - (optional) Keep-alive timeout.
force_close - Set to True to force close and do reconnect
after each request (and between redirects).
limit - The total number of simultaneous connections.
limit_per_host - Number of simultaneous connections to one host.
loop - Optional event loop.
"""
def __init__(
self,
path: str,
force_close: bool = False,
keepalive_timeout: Union[object, float, None] = sentinel,
limit: int = 100,
limit_per_host: int = 0,
loop: Optional[asyncio.AbstractEventLoop] = None,
) -> None:
super().__init__(
force_close=force_close,
keepalive_timeout=keepalive_timeout,
limit=limit,
limit_per_host=limit_per_host,
loop=loop,
)
if not isinstance(
self._loop, asyncio.ProactorEventLoop # type: ignore[attr-defined]
):
raise RuntimeError(
"Named Pipes only available in proactor " "loop under windows"
)
self._path = path
@property
def path(self) -> str:
"""Path to the named pipe."""
return self._path
async def _create_connection(
self, req: "ClientRequest", traces: List["Trace"], timeout: "ClientTimeout"
) -> ResponseHandler:
try:
async with ceil_timeout(timeout.sock_connect):
_, proto = await self._loop.create_pipe_connection( # type: ignore[attr-defined] # noqa: E501
self._factory, self._path
)
# the drain is required so that the connection_made is called
# and transport is set otherwise it is not set before the
# `assert conn.transport is not None`
# in client.py's _request method
await asyncio.sleep(0)
# other option is to manually set transport like
# `proto.transport = trans`
except OSError as exc:
if exc.errno is None and isinstance(exc, asyncio.TimeoutError):
raise
raise ClientConnectorError(req.connection_key, exc) from exc
return cast(ResponseHandler, proto)
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiohttp/web_middlewares.py | import re
from typing import TYPE_CHECKING, Awaitable, Callable, Tuple, Type, TypeVar
from .typedefs import Handler
from .web_exceptions import HTTPPermanentRedirect, _HTTPMove
from .web_request import Request
from .web_response import StreamResponse
from .web_urldispatcher import SystemRoute
__all__ = (
"middleware",
"normalize_path_middleware",
)
if TYPE_CHECKING: # pragma: no cover
from .web_app import Application
_Func = TypeVar("_Func")
async def _check_request_resolves(request: Request, path: str) -> Tuple[bool, Request]:
alt_request = request.clone(rel_url=path)
match_info = await request.app.router.resolve(alt_request)
alt_request._match_info = match_info
if match_info.http_exception is None:
return True, alt_request
return False, request
def middleware(f: _Func) -> _Func:
f.__middleware_version__ = 1 # type: ignore[attr-defined]
return f
_Middleware = Callable[[Request, Handler], Awaitable[StreamResponse]]
def normalize_path_middleware(
*,
append_slash: bool = True,
remove_slash: bool = False,
merge_slashes: bool = True,
redirect_class: Type[_HTTPMove] = HTTPPermanentRedirect,
) -> _Middleware:
"""Factory for producing a middleware that normalizes the path of a request.
Normalizing means:
- Add or remove a trailing slash to the path.
- Double slashes are replaced by one.
The middleware returns as soon as it finds a path that resolves
correctly. The order if both merge and append/remove are enabled is
1) merge slashes
2) append/remove slash
3) both merge slashes and append/remove slash.
If the path resolves with at least one of those conditions, it will
redirect to the new path.
Only one of `append_slash` and `remove_slash` can be enabled. If both
are `True` the factory will raise an assertion error
If `append_slash` is `True` the middleware will append a slash when
needed. If a resource is defined with trailing slash and the request
comes without it, it will append it automatically.
If `remove_slash` is `True`, `append_slash` must be `False`. When enabled
the middleware will remove trailing slashes and redirect if the resource
is defined
If merge_slashes is True, merge multiple consecutive slashes in the
path into one.
"""
correct_configuration = not (append_slash and remove_slash)
assert correct_configuration, "Cannot both remove and append slash"
@middleware
async def impl(request: Request, handler: Handler) -> StreamResponse:
if isinstance(request.match_info.route, SystemRoute):
paths_to_check = []
if "?" in request.raw_path:
path, query = request.raw_path.split("?", 1)
query = "?" + query
else:
query = ""
path = request.raw_path
if merge_slashes:
paths_to_check.append(re.sub("//+", "/", path))
if append_slash and not request.path.endswith("/"):
paths_to_check.append(path + "/")
if remove_slash and request.path.endswith("/"):
paths_to_check.append(path[:-1])
if merge_slashes and append_slash:
paths_to_check.append(re.sub("//+", "/", path + "/"))
if merge_slashes and remove_slash:
merged_slashes = re.sub("//+", "/", path)
paths_to_check.append(merged_slashes[:-1])
for path in paths_to_check:
path = re.sub("^//+", "/", path) # SECURITY: GHSA-v6wp-4m6f-gcjg
resolves, request = await _check_request_resolves(request, path)
if resolves:
raise redirect_class(request.raw_path + query)
return await handler(request)
return impl
def _fix_request_current_app(app: "Application") -> _Middleware:
@middleware
async def impl(request: Request, handler: Handler) -> StreamResponse:
with request.match_info.set_current_app(app):
return await handler(request)
return impl
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiohttp/tcp_helpers.py | """Helper methods to tune a TCP connection"""
import asyncio
import socket
from contextlib import suppress
from typing import Optional # noqa
__all__ = ("tcp_keepalive", "tcp_nodelay")
if hasattr(socket, "SO_KEEPALIVE"):
def tcp_keepalive(transport: asyncio.Transport) -> None:
sock = transport.get_extra_info("socket")
if sock is not None:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
else:
def tcp_keepalive(transport: asyncio.Transport) -> None: # pragma: no cover
pass
def tcp_nodelay(transport: asyncio.Transport, value: bool) -> None:
sock = transport.get_extra_info("socket")
if sock is None:
return
if sock.family not in (socket.AF_INET, socket.AF_INET6):
return
value = bool(value)
# socket may be closed already, on windows OSError get raised
with suppress(OSError):
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, value)
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiohttp/formdata.py | import io
from typing import Any, Iterable, List, Optional
from urllib.parse import urlencode
from multidict import MultiDict, MultiDictProxy
from . import hdrs, multipart, payload
from .helpers import guess_filename
from .payload import Payload
__all__ = ("FormData",)
class FormData:
"""Helper class for form body generation.
Supports multipart/form-data and application/x-www-form-urlencoded.
"""
def __init__(
self,
fields: Iterable[Any] = (),
quote_fields: bool = True,
charset: Optional[str] = None,
) -> None:
self._writer = multipart.MultipartWriter("form-data")
self._fields: List[Any] = []
self._is_multipart = False
self._is_processed = False
self._quote_fields = quote_fields
self._charset = charset
if isinstance(fields, dict):
fields = list(fields.items())
elif not isinstance(fields, (list, tuple)):
fields = (fields,)
self.add_fields(*fields)
@property
def is_multipart(self) -> bool:
return self._is_multipart
def add_field(
self,
name: str,
value: Any,
*,
content_type: Optional[str] = None,
filename: Optional[str] = None,
content_transfer_encoding: Optional[str] = None,
) -> None:
if isinstance(value, io.IOBase):
self._is_multipart = True
elif isinstance(value, (bytes, bytearray, memoryview)):
if filename is None and content_transfer_encoding is None:
filename = name
type_options: MultiDict[str] = MultiDict({"name": name})
if filename is not None and not isinstance(filename, str):
raise TypeError(
"filename must be an instance of str. " "Got: %s" % filename
)
if filename is None and isinstance(value, io.IOBase):
filename = guess_filename(value, name)
if filename is not None:
type_options["filename"] = filename
self._is_multipart = True
headers = {}
if content_type is not None:
if not isinstance(content_type, str):
raise TypeError(
"content_type must be an instance of str. " "Got: %s" % content_type
)
headers[hdrs.CONTENT_TYPE] = content_type
self._is_multipart = True
if content_transfer_encoding is not None:
if not isinstance(content_transfer_encoding, str):
raise TypeError(
"content_transfer_encoding must be an instance"
" of str. Got: %s" % content_transfer_encoding
)
headers[hdrs.CONTENT_TRANSFER_ENCODING] = content_transfer_encoding
self._is_multipart = True
self._fields.append((type_options, headers, value))
def add_fields(self, *fields: Any) -> None:
to_add = list(fields)
while to_add:
rec = to_add.pop(0)
if isinstance(rec, io.IOBase):
k = guess_filename(rec, "unknown")
self.add_field(k, rec) # type: ignore[arg-type]
elif isinstance(rec, (MultiDictProxy, MultiDict)):
to_add.extend(rec.items())
elif isinstance(rec, (list, tuple)) and len(rec) == 2:
k, fp = rec
self.add_field(k, fp) # type: ignore[arg-type]
else:
raise TypeError(
"Only io.IOBase, multidict and (name, file) "
"pairs allowed, use .add_field() for passing "
"more complex parameters, got {!r}".format(rec)
)
def _gen_form_urlencoded(self) -> payload.BytesPayload:
# form data (x-www-form-urlencoded)
data = []
for type_options, _, value in self._fields:
data.append((type_options["name"], value))
charset = self._charset if self._charset is not None else "utf-8"
if charset == "utf-8":
content_type = "application/x-www-form-urlencoded"
else:
content_type = "application/x-www-form-urlencoded; " "charset=%s" % charset
return payload.BytesPayload(
urlencode(data, doseq=True, encoding=charset).encode(),
content_type=content_type,
)
def _gen_form_data(self) -> multipart.MultipartWriter:
"""Encode a list of fields using the multipart/form-data MIME format"""
if self._is_processed:
raise RuntimeError("Form data has been processed already")
for dispparams, headers, value in self._fields:
try:
if hdrs.CONTENT_TYPE in headers:
part = payload.get_payload(
value,
content_type=headers[hdrs.CONTENT_TYPE],
headers=headers,
encoding=self._charset,
)
else:
part = payload.get_payload(
value, headers=headers, encoding=self._charset
)
except Exception as exc:
raise TypeError(
"Can not serialize value type: %r\n "
"headers: %r\n value: %r" % (type(value), headers, value)
) from exc
if dispparams:
part.set_content_disposition(
"form-data", quote_fields=self._quote_fields, **dispparams
)
# FIXME cgi.FieldStorage doesn't likes body parts with
# Content-Length which were sent via chunked transfer encoding
assert part.headers is not None
part.headers.popall(hdrs.CONTENT_LENGTH, None)
self._writer.append_payload(part)
self._is_processed = True
return self._writer
def __call__(self) -> Payload:
if self._is_multipart:
return self._gen_form_data()
else:
return self._gen_form_urlencoded()
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiohttp/web_routedef.py | import abc
import os # noqa
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterator,
List,
Optional,
Sequence,
Type,
Union,
overload,
)
import attr
from . import hdrs
from .abc import AbstractView
from .typedefs import Handler, PathLike
if TYPE_CHECKING: # pragma: no cover
from .web_request import Request
from .web_response import StreamResponse
from .web_urldispatcher import AbstractRoute, UrlDispatcher
else:
Request = StreamResponse = UrlDispatcher = AbstractRoute = None
__all__ = (
"AbstractRouteDef",
"RouteDef",
"StaticDef",
"RouteTableDef",
"head",
"options",
"get",
"post",
"patch",
"put",
"delete",
"route",
"view",
"static",
)
class AbstractRouteDef(abc.ABC):
@abc.abstractmethod
def register(self, router: UrlDispatcher) -> List[AbstractRoute]:
pass # pragma: no cover
_HandlerType = Union[Type[AbstractView], Handler]
@attr.s(auto_attribs=True, frozen=True, repr=False, slots=True)
class RouteDef(AbstractRouteDef):
method: str
path: str
handler: _HandlerType
kwargs: Dict[str, Any]
def __repr__(self) -> str:
info = []
for name, value in sorted(self.kwargs.items()):
info.append(f", {name}={value!r}")
return "<RouteDef {method} {path} -> {handler.__name__!r}" "{info}>".format(
method=self.method, path=self.path, handler=self.handler, info="".join(info)
)
def register(self, router: UrlDispatcher) -> List[AbstractRoute]:
if self.method in hdrs.METH_ALL:
reg = getattr(router, "add_" + self.method.lower())
return [reg(self.path, self.handler, **self.kwargs)]
else:
return [
router.add_route(self.method, self.path, self.handler, **self.kwargs)
]
@attr.s(auto_attribs=True, frozen=True, repr=False, slots=True)
class StaticDef(AbstractRouteDef):
prefix: str
path: PathLike
kwargs: Dict[str, Any]
def __repr__(self) -> str:
info = []
for name, value in sorted(self.kwargs.items()):
info.append(f", {name}={value!r}")
return "<StaticDef {prefix} -> {path}" "{info}>".format(
prefix=self.prefix, path=self.path, info="".join(info)
)
def register(self, router: UrlDispatcher) -> List[AbstractRoute]:
resource = router.add_static(self.prefix, self.path, **self.kwargs)
routes = resource.get_info().get("routes", {})
return list(routes.values())
def route(method: str, path: str, handler: _HandlerType, **kwargs: Any) -> RouteDef:
return RouteDef(method, path, handler, kwargs)
def head(path: str, handler: _HandlerType, **kwargs: Any) -> RouteDef:
return route(hdrs.METH_HEAD, path, handler, **kwargs)
def options(path: str, handler: _HandlerType, **kwargs: Any) -> RouteDef:
return route(hdrs.METH_OPTIONS, path, handler, **kwargs)
def get(
path: str,
handler: _HandlerType,
*,
name: Optional[str] = None,
allow_head: bool = True,
**kwargs: Any,
) -> RouteDef:
return route(
hdrs.METH_GET, path, handler, name=name, allow_head=allow_head, **kwargs
)
def post(path: str, handler: _HandlerType, **kwargs: Any) -> RouteDef:
return route(hdrs.METH_POST, path, handler, **kwargs)
def put(path: str, handler: _HandlerType, **kwargs: Any) -> RouteDef:
return route(hdrs.METH_PUT, path, handler, **kwargs)
def patch(path: str, handler: _HandlerType, **kwargs: Any) -> RouteDef:
return route(hdrs.METH_PATCH, path, handler, **kwargs)
def delete(path: str, handler: _HandlerType, **kwargs: Any) -> RouteDef:
return route(hdrs.METH_DELETE, path, handler, **kwargs)
def view(path: str, handler: Type[AbstractView], **kwargs: Any) -> RouteDef:
return route(hdrs.METH_ANY, path, handler, **kwargs)
def static(prefix: str, path: PathLike, **kwargs: Any) -> StaticDef:
return StaticDef(prefix, path, kwargs)
_Deco = Callable[[_HandlerType], _HandlerType]
class RouteTableDef(Sequence[AbstractRouteDef]):
"""Route definition table"""
def __init__(self) -> None:
self._items: List[AbstractRouteDef] = []
def __repr__(self) -> str:
return f"<RouteTableDef count={len(self._items)}>"
@overload
def __getitem__(self, index: int) -> AbstractRouteDef:
...
@overload
def __getitem__(self, index: slice) -> List[AbstractRouteDef]:
...
def __getitem__(self, index): # type: ignore[no-untyped-def]
return self._items[index]
def __iter__(self) -> Iterator[AbstractRouteDef]:
return iter(self._items)
def __len__(self) -> int:
return len(self._items)
def __contains__(self, item: object) -> bool:
return item in self._items
def route(self, method: str, path: str, **kwargs: Any) -> _Deco:
def inner(handler: _HandlerType) -> _HandlerType:
self._items.append(RouteDef(method, path, handler, kwargs))
return handler
return inner
def head(self, path: str, **kwargs: Any) -> _Deco:
return self.route(hdrs.METH_HEAD, path, **kwargs)
def get(self, path: str, **kwargs: Any) -> _Deco:
return self.route(hdrs.METH_GET, path, **kwargs)
def post(self, path: str, **kwargs: Any) -> _Deco:
return self.route(hdrs.METH_POST, path, **kwargs)
def put(self, path: str, **kwargs: Any) -> _Deco:
return self.route(hdrs.METH_PUT, path, **kwargs)
def patch(self, path: str, **kwargs: Any) -> _Deco:
return self.route(hdrs.METH_PATCH, path, **kwargs)
def delete(self, path: str, **kwargs: Any) -> _Deco:
return self.route(hdrs.METH_DELETE, path, **kwargs)
def options(self, path: str, **kwargs: Any) -> _Deco:
return self.route(hdrs.METH_OPTIONS, path, **kwargs)
def view(self, path: str, **kwargs: Any) -> _Deco:
return self.route(hdrs.METH_ANY, path, **kwargs)
def static(self, prefix: str, path: PathLike, **kwargs: Any) -> None:
self._items.append(StaticDef(prefix, path, kwargs))
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiohttp/http.py | import http.server
import sys
from typing import Mapping, Tuple
from . import __version__
from .http_exceptions import HttpProcessingError as HttpProcessingError
from .http_parser import (
HeadersParser as HeadersParser,
HttpParser as HttpParser,
HttpRequestParser as HttpRequestParser,
HttpResponseParser as HttpResponseParser,
RawRequestMessage as RawRequestMessage,
RawResponseMessage as RawResponseMessage,
)
from .http_websocket import (
WS_CLOSED_MESSAGE as WS_CLOSED_MESSAGE,
WS_CLOSING_MESSAGE as WS_CLOSING_MESSAGE,
WS_KEY as WS_KEY,
WebSocketError as WebSocketError,
WebSocketReader as WebSocketReader,
WebSocketWriter as WebSocketWriter,
WSCloseCode as WSCloseCode,
WSMessage as WSMessage,
WSMsgType as WSMsgType,
ws_ext_gen as ws_ext_gen,
ws_ext_parse as ws_ext_parse,
)
from .http_writer import (
HttpVersion as HttpVersion,
HttpVersion10 as HttpVersion10,
HttpVersion11 as HttpVersion11,
StreamWriter as StreamWriter,
)
__all__ = (
"HttpProcessingError",
"RESPONSES",
"SERVER_SOFTWARE",
# .http_writer
"StreamWriter",
"HttpVersion",
"HttpVersion10",
"HttpVersion11",
# .http_parser
"HeadersParser",
"HttpParser",
"HttpRequestParser",
"HttpResponseParser",
"RawRequestMessage",
"RawResponseMessage",
# .http_websocket
"WS_CLOSED_MESSAGE",
"WS_CLOSING_MESSAGE",
"WS_KEY",
"WebSocketReader",
"WebSocketWriter",
"ws_ext_gen",
"ws_ext_parse",
"WSMessage",
"WebSocketError",
"WSMsgType",
"WSCloseCode",
)
SERVER_SOFTWARE: str = "Python/{0[0]}.{0[1]} aiohttp/{1}".format(
sys.version_info, __version__
)
RESPONSES: Mapping[int, Tuple[str, str]] = http.server.BaseHTTPRequestHandler.responses
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiohttp/helpers.py | """Various helper functions"""
import asyncio
import base64
import binascii
import datetime
import functools
import inspect
import netrc
import os
import platform
import re
import sys
import time
import warnings
import weakref
from collections import namedtuple
from contextlib import suppress
from email.parser import HeaderParser
from email.utils import parsedate
from math import ceil
from pathlib import Path
from types import TracebackType
from typing import (
Any,
Callable,
ContextManager,
Dict,
Generator,
Generic,
Iterable,
Iterator,
List,
Mapping,
Optional,
Pattern,
Set,
Tuple,
Type,
TypeVar,
Union,
cast,
)
from urllib.parse import quote
from urllib.request import getproxies, proxy_bypass
import async_timeout
import attr
from multidict import MultiDict, MultiDictProxy
from yarl import URL
from . import hdrs
from .log import client_logger, internal_logger
from .typedefs import PathLike, Protocol # noqa
__all__ = ("BasicAuth", "ChainMapProxy", "ETag")
IS_MACOS = platform.system() == "Darwin"
IS_WINDOWS = platform.system() == "Windows"
PY_36 = sys.version_info >= (3, 6)
PY_37 = sys.version_info >= (3, 7)
PY_38 = sys.version_info >= (3, 8)
PY_310 = sys.version_info >= (3, 10)
if sys.version_info < (3, 7):
import idna_ssl
idna_ssl.patch_match_hostname()
def all_tasks(
loop: Optional[asyncio.AbstractEventLoop] = None,
) -> Set["asyncio.Task[Any]"]:
tasks = list(asyncio.Task.all_tasks(loop))
return {t for t in tasks if not t.done()}
else:
all_tasks = asyncio.all_tasks
_T = TypeVar("_T")
_S = TypeVar("_S")
sentinel: Any = object()
NO_EXTENSIONS: bool = bool(os.environ.get("AIOHTTP_NO_EXTENSIONS"))
# N.B. sys.flags.dev_mode is available on Python 3.7+, use getattr
# for compatibility with older versions
DEBUG: bool = getattr(sys.flags, "dev_mode", False) or (
not sys.flags.ignore_environment and bool(os.environ.get("PYTHONASYNCIODEBUG"))
)
CHAR = {chr(i) for i in range(0, 128)}
CTL = {chr(i) for i in range(0, 32)} | {
chr(127),
}
SEPARATORS = {
"(",
")",
"<",
">",
"@",
",",
";",
":",
"\\",
'"',
"/",
"[",
"]",
"?",
"=",
"{",
"}",
" ",
chr(9),
}
TOKEN = CHAR ^ CTL ^ SEPARATORS
class noop:
def __await__(self) -> Generator[None, None, None]:
yield
class BasicAuth(namedtuple("BasicAuth", ["login", "password", "encoding"])):
"""Http basic authentication helper."""
def __new__(
cls, login: str, password: str = "", encoding: str = "latin1"
) -> "BasicAuth":
if login is None:
raise ValueError("None is not allowed as login value")
if password is None:
raise ValueError("None is not allowed as password value")
if ":" in login:
raise ValueError('A ":" is not allowed in login (RFC 1945#section-11.1)')
return super().__new__(cls, login, password, encoding)
@classmethod
def decode(cls, auth_header: str, encoding: str = "latin1") -> "BasicAuth":
"""Create a BasicAuth object from an Authorization HTTP header."""
try:
auth_type, encoded_credentials = auth_header.split(" ", 1)
except ValueError:
raise ValueError("Could not parse authorization header.")
if auth_type.lower() != "basic":
raise ValueError("Unknown authorization method %s" % auth_type)
try:
decoded = base64.b64decode(
encoded_credentials.encode("ascii"), validate=True
).decode(encoding)
except binascii.Error:
raise ValueError("Invalid base64 encoding.")
try:
# RFC 2617 HTTP Authentication
# https://www.ietf.org/rfc/rfc2617.txt
# the colon must be present, but the username and password may be
# otherwise blank.
username, password = decoded.split(":", 1)
except ValueError:
raise ValueError("Invalid credentials.")
return cls(username, password, encoding=encoding)
@classmethod
def from_url(cls, url: URL, *, encoding: str = "latin1") -> Optional["BasicAuth"]:
"""Create BasicAuth from url."""
if not isinstance(url, URL):
raise TypeError("url should be yarl.URL instance")
if url.user is None:
return None
return cls(url.user, url.password or "", encoding=encoding)
def encode(self) -> str:
"""Encode credentials."""
creds = (f"{self.login}:{self.password}").encode(self.encoding)
return "Basic %s" % base64.b64encode(creds).decode(self.encoding)
def strip_auth_from_url(url: URL) -> Tuple[URL, Optional[BasicAuth]]:
auth = BasicAuth.from_url(url)
if auth is None:
return url, None
else:
return url.with_user(None), auth
def netrc_from_env() -> Optional[netrc.netrc]:
"""Load netrc from file.
Attempt to load it from the path specified by the env-var
NETRC or in the default location in the user's home directory.
Returns None if it couldn't be found or fails to parse.
"""
netrc_env = os.environ.get("NETRC")
if netrc_env is not None:
netrc_path = Path(netrc_env)
else:
try:
home_dir = Path.home()
except RuntimeError as e: # pragma: no cover
# if pathlib can't resolve home, it may raise a RuntimeError
client_logger.debug(
"Could not resolve home directory when "
"trying to look for .netrc file: %s",
e,
)
return None
netrc_path = home_dir / ("_netrc" if IS_WINDOWS else ".netrc")
try:
return netrc.netrc(str(netrc_path))
except netrc.NetrcParseError as e:
client_logger.warning("Could not parse .netrc file: %s", e)
except OSError as e:
# we couldn't read the file (doesn't exist, permissions, etc.)
if netrc_env or netrc_path.is_file():
# only warn if the environment wanted us to load it,
# or it appears like the default file does actually exist
client_logger.warning("Could not read .netrc file: %s", e)
return None
@attr.s(auto_attribs=True, frozen=True, slots=True)
class ProxyInfo:
proxy: URL
proxy_auth: Optional[BasicAuth]
def proxies_from_env() -> Dict[str, ProxyInfo]:
proxy_urls = {
k: URL(v)
for k, v in getproxies().items()
if k in ("http", "https", "ws", "wss")
}
netrc_obj = netrc_from_env()
stripped = {k: strip_auth_from_url(v) for k, v in proxy_urls.items()}
ret = {}
for proto, val in stripped.items():
proxy, auth = val
if proxy.scheme in ("https", "wss"):
client_logger.warning(
"%s proxies %s are not supported, ignoring", proxy.scheme.upper(), proxy
)
continue
if netrc_obj and auth is None:
auth_from_netrc = None
if proxy.host is not None:
auth_from_netrc = netrc_obj.authenticators(proxy.host)
if auth_from_netrc is not None:
# auth_from_netrc is a (`user`, `account`, `password`) tuple,
# `user` and `account` both can be username,
# if `user` is None, use `account`
*logins, password = auth_from_netrc
login = logins[0] if logins[0] else logins[-1]
auth = BasicAuth(cast(str, login), cast(str, password))
ret[proto] = ProxyInfo(proxy, auth)
return ret
def current_task(
loop: Optional[asyncio.AbstractEventLoop] = None,
) -> "Optional[asyncio.Task[Any]]":
if sys.version_info >= (3, 7):
return asyncio.current_task(loop=loop)
else:
return asyncio.Task.current_task(loop=loop)
def get_running_loop(
loop: Optional[asyncio.AbstractEventLoop] = None,
) -> asyncio.AbstractEventLoop:
if loop is None:
loop = asyncio.get_event_loop()
if not loop.is_running():
warnings.warn(
"The object should be created within an async function",
DeprecationWarning,
stacklevel=3,
)
if loop.get_debug():
internal_logger.warning(
"The object should be created within an async function", stack_info=True
)
return loop
def isasyncgenfunction(obj: Any) -> bool:
func = getattr(inspect, "isasyncgenfunction", None)
if func is not None:
return func(obj) # type: ignore[no-any-return]
else:
return False
def get_env_proxy_for_url(url: URL) -> Tuple[URL, Optional[BasicAuth]]:
"""Get a permitted proxy for the given URL from the env."""
if url.host is not None and proxy_bypass(url.host):
raise LookupError(f"Proxying is disallowed for `{url.host!r}`")
proxies_in_env = proxies_from_env()
try:
proxy_info = proxies_in_env[url.scheme]
except KeyError:
raise LookupError(f"No proxies found for `{url!s}` in the env")
else:
return proxy_info.proxy, proxy_info.proxy_auth
@attr.s(auto_attribs=True, frozen=True, slots=True)
class MimeType:
type: str
subtype: str
suffix: str
parameters: "MultiDictProxy[str]"
@functools.lru_cache(maxsize=56)
def parse_mimetype(mimetype: str) -> MimeType:
"""Parses a MIME type into its components.
mimetype is a MIME type string.
Returns a MimeType object.
Example:
>>> parse_mimetype('text/html; charset=utf-8')
MimeType(type='text', subtype='html', suffix='',
parameters={'charset': 'utf-8'})
"""
if not mimetype:
return MimeType(
type="", subtype="", suffix="", parameters=MultiDictProxy(MultiDict())
)
parts = mimetype.split(";")
params: MultiDict[str] = MultiDict()
for item in parts[1:]:
if not item:
continue
key, value = cast(
Tuple[str, str], item.split("=", 1) if "=" in item else (item, "")
)
params.add(key.lower().strip(), value.strip(' "'))
fulltype = parts[0].strip().lower()
if fulltype == "*":
fulltype = "*/*"
mtype, stype = (
cast(Tuple[str, str], fulltype.split("/", 1))
if "/" in fulltype
else (fulltype, "")
)
stype, suffix = (
cast(Tuple[str, str], stype.split("+", 1)) if "+" in stype else (stype, "")
)
return MimeType(
type=mtype, subtype=stype, suffix=suffix, parameters=MultiDictProxy(params)
)
def guess_filename(obj: Any, default: Optional[str] = None) -> Optional[str]:
name = getattr(obj, "name", None)
if name and isinstance(name, str) and name[0] != "<" and name[-1] != ">":
return Path(name).name
return default
not_qtext_re = re.compile(r"[^\041\043-\133\135-\176]")
QCONTENT = {chr(i) for i in range(0x20, 0x7F)} | {"\t"}
def quoted_string(content: str) -> str:
"""Return 7-bit content as quoted-string.
Format content into a quoted-string as defined in RFC5322 for
Internet Message Format. Notice that this is not the 8-bit HTTP
format, but the 7-bit email format. Content must be in usascii or
a ValueError is raised.
"""
if not (QCONTENT > set(content)):
raise ValueError(f"bad content for quoted-string {content!r}")
return not_qtext_re.sub(lambda x: "\\" + x.group(0), content)
def content_disposition_header(
disptype: str, quote_fields: bool = True, _charset: str = "utf-8", **params: str
) -> str:
"""Sets ``Content-Disposition`` header for MIME.
This is the MIME payload Content-Disposition header from RFC 2183
and RFC 7579 section 4.2, not the HTTP Content-Disposition from
RFC 6266.
disptype is a disposition type: inline, attachment, form-data.
Should be valid extension token (see RFC 2183)
quote_fields performs value quoting to 7-bit MIME headers
according to RFC 7578. Set to quote_fields to False if recipient
can take 8-bit file names and field values.
_charset specifies the charset to use when quote_fields is True.
params is a dict with disposition params.
"""
if not disptype or not (TOKEN > set(disptype)):
raise ValueError("bad content disposition type {!r}" "".format(disptype))
value = disptype
if params:
lparams = []
for key, val in params.items():
if not key or not (TOKEN > set(key)):
raise ValueError(
"bad content disposition parameter" " {!r}={!r}".format(key, val)
)
if quote_fields:
if key.lower() == "filename":
qval = quote(val, "", encoding=_charset)
lparams.append((key, '"%s"' % qval))
else:
try:
qval = quoted_string(val)
except ValueError:
qval = "".join(
(_charset, "''", quote(val, "", encoding=_charset))
)
lparams.append((key + "*", qval))
else:
lparams.append((key, '"%s"' % qval))
else:
qval = val.replace("\\", "\\\\").replace('"', '\\"')
lparams.append((key, '"%s"' % qval))
sparams = "; ".join("=".join(pair) for pair in lparams)
value = "; ".join((value, sparams))
return value
class _TSelf(Protocol, Generic[_T]):
_cache: Dict[str, _T]
class reify(Generic[_T]):
"""Use as a class method decorator.
It operates almost exactly like
the Python `@property` decorator, but it puts the result of the
method it decorates into the instance dict after the first call,
effectively replacing the function it decorates with an instance
variable. It is, in Python parlance, a data descriptor.
"""
def __init__(self, wrapped: Callable[..., _T]) -> None:
self.wrapped = wrapped
self.__doc__ = wrapped.__doc__
self.name = wrapped.__name__
def __get__(self, inst: _TSelf[_T], owner: Optional[Type[Any]] = None) -> _T:
try:
try:
return inst._cache[self.name]
except KeyError:
val = self.wrapped(inst)
inst._cache[self.name] = val
return val
except AttributeError:
if inst is None:
return self
raise
def __set__(self, inst: _TSelf[_T], value: _T) -> None:
raise AttributeError("reified property is read-only")
reify_py = reify
try:
from ._helpers import reify as reify_c
if not NO_EXTENSIONS:
reify = reify_c # type: ignore[misc,assignment]
except ImportError:
pass
_ipv4_pattern = (
r"^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}"
r"(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$"
)
_ipv6_pattern = (
r"^(?:(?:(?:[A-F0-9]{1,4}:){6}|(?=(?:[A-F0-9]{0,4}:){0,6}"
r"(?:[0-9]{1,3}\.){3}[0-9]{1,3}$)(([0-9A-F]{1,4}:){0,5}|:)"
r"((:[0-9A-F]{1,4}){1,5}:|:)|::(?:[A-F0-9]{1,4}:){5})"
r"(?:(?:25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])\.){3}"
r"(?:25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])|(?:[A-F0-9]{1,4}:){7}"
r"[A-F0-9]{1,4}|(?=(?:[A-F0-9]{0,4}:){0,7}[A-F0-9]{0,4}$)"
r"(([0-9A-F]{1,4}:){1,7}|:)((:[0-9A-F]{1,4}){1,7}|:)|(?:[A-F0-9]{1,4}:){7}"
r":|:(:[A-F0-9]{1,4}){7})$"
)
_ipv4_regex = re.compile(_ipv4_pattern)
_ipv6_regex = re.compile(_ipv6_pattern, flags=re.IGNORECASE)
_ipv4_regexb = re.compile(_ipv4_pattern.encode("ascii"))
_ipv6_regexb = re.compile(_ipv6_pattern.encode("ascii"), flags=re.IGNORECASE)
def _is_ip_address(
regex: Pattern[str], regexb: Pattern[bytes], host: Optional[Union[str, bytes]]
) -> bool:
if host is None:
return False
if isinstance(host, str):
return bool(regex.match(host))
elif isinstance(host, (bytes, bytearray, memoryview)):
return bool(regexb.match(host))
else:
raise TypeError(f"{host} [{type(host)}] is not a str or bytes")
is_ipv4_address = functools.partial(_is_ip_address, _ipv4_regex, _ipv4_regexb)
is_ipv6_address = functools.partial(_is_ip_address, _ipv6_regex, _ipv6_regexb)
def is_ip_address(host: Optional[Union[str, bytes, bytearray, memoryview]]) -> bool:
return is_ipv4_address(host) or is_ipv6_address(host)
def next_whole_second() -> datetime.datetime:
"""Return current time rounded up to the next whole second."""
return datetime.datetime.now(datetime.timezone.utc).replace(
microsecond=0
) + datetime.timedelta(seconds=0)
_cached_current_datetime: Optional[int] = None
_cached_formatted_datetime = ""
def rfc822_formatted_time() -> str:
global _cached_current_datetime
global _cached_formatted_datetime
now = int(time.time())
if now != _cached_current_datetime:
# Weekday and month names for HTTP date/time formatting;
# always English!
# Tuples are constants stored in codeobject!
_weekdayname = ("Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun")
_monthname = (
"", # Dummy so we can use 1-based month numbers
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec",
)
year, month, day, hh, mm, ss, wd, *tail = time.gmtime(now)
_cached_formatted_datetime = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
_weekdayname[wd],
day,
_monthname[month],
year,
hh,
mm,
ss,
)
_cached_current_datetime = now
return _cached_formatted_datetime
def _weakref_handle(info: "Tuple[weakref.ref[object], str]") -> None:
ref, name = info
ob = ref()
if ob is not None:
with suppress(Exception):
getattr(ob, name)()
def weakref_handle(
ob: object, name: str, timeout: float, loop: asyncio.AbstractEventLoop
) -> Optional[asyncio.TimerHandle]:
if timeout is not None and timeout > 0:
when = loop.time() + timeout
if timeout >= 5:
when = ceil(when)
return loop.call_at(when, _weakref_handle, (weakref.ref(ob), name))
return None
def call_later(
cb: Callable[[], Any], timeout: float, loop: asyncio.AbstractEventLoop
) -> Optional[asyncio.TimerHandle]:
if timeout is not None and timeout > 0:
when = loop.time() + timeout
if timeout > 5:
when = ceil(when)
return loop.call_at(when, cb)
return None
class TimeoutHandle:
"""Timeout handle"""
def __init__(
self, loop: asyncio.AbstractEventLoop, timeout: Optional[float]
) -> None:
self._timeout = timeout
self._loop = loop
self._callbacks: List[
Tuple[Callable[..., None], Tuple[Any, ...], Dict[str, Any]]
] = []
def register(
self, callback: Callable[..., None], *args: Any, **kwargs: Any
) -> None:
self._callbacks.append((callback, args, kwargs))
def close(self) -> None:
self._callbacks.clear()
def start(self) -> Optional[asyncio.Handle]:
timeout = self._timeout
if timeout is not None and timeout > 0:
when = self._loop.time() + timeout
if timeout >= 5:
when = ceil(when)
return self._loop.call_at(when, self.__call__)
else:
return None
def timer(self) -> "BaseTimerContext":
if self._timeout is not None and self._timeout > 0:
timer = TimerContext(self._loop)
self.register(timer.timeout)
return timer
else:
return TimerNoop()
def __call__(self) -> None:
for cb, args, kwargs in self._callbacks:
with suppress(Exception):
cb(*args, **kwargs)
self._callbacks.clear()
class BaseTimerContext(ContextManager["BaseTimerContext"]):
pass
class TimerNoop(BaseTimerContext):
def __enter__(self) -> BaseTimerContext:
return self
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
return
class TimerContext(BaseTimerContext):
"""Low resolution timeout context manager"""
def __init__(self, loop: asyncio.AbstractEventLoop) -> None:
self._loop = loop
self._tasks: List[asyncio.Task[Any]] = []
self._cancelled = False
def __enter__(self) -> BaseTimerContext:
task = current_task(loop=self._loop)
if task is None:
raise RuntimeError(
"Timeout context manager should be used " "inside a task"
)
if self._cancelled:
raise asyncio.TimeoutError from None
self._tasks.append(task)
return self
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> Optional[bool]:
if self._tasks:
self._tasks.pop()
if exc_type is asyncio.CancelledError and self._cancelled:
raise asyncio.TimeoutError from None
return None
def timeout(self) -> None:
if not self._cancelled:
for task in set(self._tasks):
task.cancel()
self._cancelled = True
def ceil_timeout(delay: Optional[float]) -> async_timeout.Timeout:
if delay is None or delay <= 0:
return async_timeout.timeout(None)
loop = get_running_loop()
now = loop.time()
when = now + delay
if delay > 5:
when = ceil(when)
return async_timeout.timeout_at(when)
class HeadersMixin:
ATTRS = frozenset(["_content_type", "_content_dict", "_stored_content_type"])
_content_type: Optional[str] = None
_content_dict: Optional[Dict[str, str]] = None
_stored_content_type = sentinel
def _parse_content_type(self, raw: str) -> None:
self._stored_content_type = raw
if raw is None:
# default value according to RFC 2616
self._content_type = "application/octet-stream"
self._content_dict = {}
else:
msg = HeaderParser().parsestr("Content-Type: " + raw)
self._content_type = msg.get_content_type()
params = msg.get_params()
self._content_dict = dict(params[1:]) # First element is content type again
@property
def content_type(self) -> str:
"""The value of content part for Content-Type HTTP header."""
raw = self._headers.get(hdrs.CONTENT_TYPE) # type: ignore[attr-defined]
if self._stored_content_type != raw:
self._parse_content_type(raw)
return self._content_type # type: ignore[return-value]
@property
def charset(self) -> Optional[str]:
"""The value of charset part for Content-Type HTTP header."""
raw = self._headers.get(hdrs.CONTENT_TYPE) # type: ignore[attr-defined]
if self._stored_content_type != raw:
self._parse_content_type(raw)
return self._content_dict.get("charset") # type: ignore[union-attr]
@property
def content_length(self) -> Optional[int]:
"""The value of Content-Length HTTP header."""
content_length = self._headers.get( # type: ignore[attr-defined]
hdrs.CONTENT_LENGTH
)
if content_length is not None:
return int(content_length)
else:
return None
def set_result(fut: "asyncio.Future[_T]", result: _T) -> None:
if not fut.done():
fut.set_result(result)
def set_exception(fut: "asyncio.Future[_T]", exc: BaseException) -> None:
if not fut.done():
fut.set_exception(exc)
class ChainMapProxy(Mapping[str, Any]):
__slots__ = ("_maps",)
def __init__(self, maps: Iterable[Mapping[str, Any]]) -> None:
self._maps = tuple(maps)
def __init_subclass__(cls) -> None:
raise TypeError(
"Inheritance class {} from ChainMapProxy "
"is forbidden".format(cls.__name__)
)
def __getitem__(self, key: str) -> Any:
for mapping in self._maps:
try:
return mapping[key]
except KeyError:
pass
raise KeyError(key)
def get(self, key: str, default: Any = None) -> Any:
return self[key] if key in self else default
def __len__(self) -> int:
# reuses stored hash values if possible
return len(set().union(*self._maps)) # type: ignore[arg-type]
def __iter__(self) -> Iterator[str]:
d: Dict[str, Any] = {}
for mapping in reversed(self._maps):
# reuses stored hash values if possible
d.update(mapping)
return iter(d)
def __contains__(self, key: object) -> bool:
return any(key in m for m in self._maps)
def __bool__(self) -> bool:
return any(self._maps)
def __repr__(self) -> str:
content = ", ".join(map(repr, self._maps))
return f"ChainMapProxy({content})"
# https://tools.ietf.org/html/rfc7232#section-2.3
_ETAGC = r"[!#-}\x80-\xff]+"
_ETAGC_RE = re.compile(_ETAGC)
_QUOTED_ETAG = rf'(W/)?"({_ETAGC})"'
QUOTED_ETAG_RE = re.compile(_QUOTED_ETAG)
LIST_QUOTED_ETAG_RE = re.compile(rf"({_QUOTED_ETAG})(?:\s*,\s*|$)|(.)")
ETAG_ANY = "*"
@attr.s(auto_attribs=True, frozen=True, slots=True)
class ETag:
value: str
is_weak: bool = False
def validate_etag_value(value: str) -> None:
if value != ETAG_ANY and not _ETAGC_RE.fullmatch(value):
raise ValueError(
f"Value {value!r} is not a valid etag. Maybe it contains '\"'?"
)
def parse_http_date(date_str: Optional[str]) -> Optional[datetime.datetime]:
"""Process a date string, return a datetime object"""
if date_str is not None:
timetuple = parsedate(date_str)
if timetuple is not None:
with suppress(ValueError):
return datetime.datetime(*timetuple[:6], tzinfo=datetime.timezone.utc)
return None
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiohttp/web_response.py | import asyncio
import collections.abc
import datetime
import enum
import json
import math
import time
import warnings
import zlib
from concurrent.futures import Executor
from http.cookies import Morsel, SimpleCookie
from typing import (
TYPE_CHECKING,
Any,
Dict,
Iterator,
Mapping,
MutableMapping,
Optional,
Tuple,
Union,
cast,
)
from multidict import CIMultiDict, istr
from . import hdrs, payload
from .abc import AbstractStreamWriter
from .helpers import (
ETAG_ANY,
PY_38,
QUOTED_ETAG_RE,
ETag,
HeadersMixin,
parse_http_date,
rfc822_formatted_time,
sentinel,
validate_etag_value,
)
from .http import RESPONSES, SERVER_SOFTWARE, HttpVersion10, HttpVersion11
from .payload import Payload
from .typedefs import JSONEncoder, LooseHeaders
__all__ = ("ContentCoding", "StreamResponse", "Response", "json_response")
if TYPE_CHECKING: # pragma: no cover
from .web_request import BaseRequest
BaseClass = MutableMapping[str, Any]
else:
BaseClass = collections.abc.MutableMapping
if not PY_38:
# allow samesite to be used in python < 3.8
# already permitted in python 3.8, see https://bugs.python.org/issue29613
Morsel._reserved["samesite"] = "SameSite" # type: ignore[attr-defined]
class ContentCoding(enum.Enum):
# The content codings that we have support for.
#
# Additional registered codings are listed at:
# https://www.iana.org/assignments/http-parameters/http-parameters.xhtml#content-coding
deflate = "deflate"
gzip = "gzip"
identity = "identity"
############################################################
# HTTP Response classes
############################################################
class StreamResponse(BaseClass, HeadersMixin):
_length_check = True
def __init__(
self,
*,
status: int = 200,
reason: Optional[str] = None,
headers: Optional[LooseHeaders] = None,
) -> None:
self._body = None
self._keep_alive: Optional[bool] = None
self._chunked = False
self._compression = False
self._compression_force: Optional[ContentCoding] = None
self._cookies: SimpleCookie[str] = SimpleCookie()
self._req: Optional[BaseRequest] = None
self._payload_writer: Optional[AbstractStreamWriter] = None
self._eof_sent = False
self._body_length = 0
self._state: Dict[str, Any] = {}
if headers is not None:
self._headers: CIMultiDict[str] = CIMultiDict(headers)
else:
self._headers = CIMultiDict()
self.set_status(status, reason)
@property
def prepared(self) -> bool:
return self._payload_writer is not None
@property
def task(self) -> "Optional[asyncio.Task[None]]":
if self._req:
return self._req.task
else:
return None
@property
def status(self) -> int:
return self._status
@property
def chunked(self) -> bool:
return self._chunked
@property
def compression(self) -> bool:
return self._compression
@property
def reason(self) -> str:
return self._reason
def set_status(
self,
status: int,
reason: Optional[str] = None,
_RESPONSES: Mapping[int, Tuple[str, str]] = RESPONSES,
) -> None:
assert not self.prepared, (
"Cannot change the response status code after " "the headers have been sent"
)
self._status = int(status)
if reason is None:
try:
reason = _RESPONSES[self._status][0]
except Exception:
reason = ""
self._reason = reason
@property
def keep_alive(self) -> Optional[bool]:
return self._keep_alive
def force_close(self) -> None:
self._keep_alive = False
@property
def body_length(self) -> int:
return self._body_length
@property
def output_length(self) -> int:
warnings.warn("output_length is deprecated", DeprecationWarning)
assert self._payload_writer
return self._payload_writer.buffer_size
def enable_chunked_encoding(self, chunk_size: Optional[int] = None) -> None:
"""Enables automatic chunked transfer encoding."""
self._chunked = True
if hdrs.CONTENT_LENGTH in self._headers:
raise RuntimeError(
"You can't enable chunked encoding when " "a content length is set"
)
if chunk_size is not None:
warnings.warn("Chunk size is deprecated #1615", DeprecationWarning)
def enable_compression(
self, force: Optional[Union[bool, ContentCoding]] = None
) -> None:
"""Enables response compression encoding."""
# Backwards compatibility for when force was a bool <0.17.
if type(force) == bool:
force = ContentCoding.deflate if force else ContentCoding.identity
warnings.warn(
"Using boolean for force is deprecated #3318", DeprecationWarning
)
elif force is not None:
assert isinstance(force, ContentCoding), (
"force should one of " "None, bool or " "ContentEncoding"
)
self._compression = True
self._compression_force = force
@property
def headers(self) -> "CIMultiDict[str]":
return self._headers
@property
def cookies(self) -> "SimpleCookie[str]":
return self._cookies
def set_cookie(
self,
name: str,
value: str,
*,
expires: Optional[str] = None,
domain: Optional[str] = None,
max_age: Optional[Union[int, str]] = None,
path: str = "/",
secure: Optional[bool] = None,
httponly: Optional[bool] = None,
version: Optional[str] = None,
samesite: Optional[str] = None,
) -> None:
"""Set or update response cookie.
Sets new cookie or updates existent with new value.
Also updates only those params which are not None.
"""
old = self._cookies.get(name)
if old is not None and old.coded_value == "":
# deleted cookie
self._cookies.pop(name, None)
self._cookies[name] = value
c = self._cookies[name]
if expires is not None:
c["expires"] = expires
elif c.get("expires") == "Thu, 01 Jan 1970 00:00:00 GMT":
del c["expires"]
if domain is not None:
c["domain"] = domain
if max_age is not None:
c["max-age"] = str(max_age)
elif "max-age" in c:
del c["max-age"]
c["path"] = path
if secure is not None:
c["secure"] = secure
if httponly is not None:
c["httponly"] = httponly
if version is not None:
c["version"] = version
if samesite is not None:
c["samesite"] = samesite
def del_cookie(
self, name: str, *, domain: Optional[str] = None, path: str = "/"
) -> None:
"""Delete cookie.
Creates new empty expired cookie.
"""
# TODO: do we need domain/path here?
self._cookies.pop(name, None)
self.set_cookie(
name,
"",
max_age=0,
expires="Thu, 01 Jan 1970 00:00:00 GMT",
domain=domain,
path=path,
)
@property
def content_length(self) -> Optional[int]:
# Just a placeholder for adding setter
return super().content_length
@content_length.setter
def content_length(self, value: Optional[int]) -> None:
if value is not None:
value = int(value)
if self._chunked:
raise RuntimeError(
"You can't set content length when " "chunked encoding is enable"
)
self._headers[hdrs.CONTENT_LENGTH] = str(value)
else:
self._headers.pop(hdrs.CONTENT_LENGTH, None)
@property
def content_type(self) -> str:
# Just a placeholder for adding setter
return super().content_type
@content_type.setter
def content_type(self, value: str) -> None:
self.content_type # read header values if needed
self._content_type = str(value)
self._generate_content_type_header()
@property
def charset(self) -> Optional[str]:
# Just a placeholder for adding setter
return super().charset
@charset.setter
def charset(self, value: Optional[str]) -> None:
ctype = self.content_type # read header values if needed
if ctype == "application/octet-stream":
raise RuntimeError(
"Setting charset for application/octet-stream "
"doesn't make sense, setup content_type first"
)
assert self._content_dict is not None
if value is None:
self._content_dict.pop("charset", None)
else:
self._content_dict["charset"] = str(value).lower()
self._generate_content_type_header()
@property
def last_modified(self) -> Optional[datetime.datetime]:
"""The value of Last-Modified HTTP header, or None.
This header is represented as a `datetime` object.
"""
return parse_http_date(self._headers.get(hdrs.LAST_MODIFIED))
@last_modified.setter
def last_modified(
self, value: Optional[Union[int, float, datetime.datetime, str]]
) -> None:
if value is None:
self._headers.pop(hdrs.LAST_MODIFIED, None)
elif isinstance(value, (int, float)):
self._headers[hdrs.LAST_MODIFIED] = time.strftime(
"%a, %d %b %Y %H:%M:%S GMT", time.gmtime(math.ceil(value))
)
elif isinstance(value, datetime.datetime):
self._headers[hdrs.LAST_MODIFIED] = time.strftime(
"%a, %d %b %Y %H:%M:%S GMT", value.utctimetuple()
)
elif isinstance(value, str):
self._headers[hdrs.LAST_MODIFIED] = value
@property
def etag(self) -> Optional[ETag]:
quoted_value = self._headers.get(hdrs.ETAG)
if not quoted_value:
return None
elif quoted_value == ETAG_ANY:
return ETag(value=ETAG_ANY)
match = QUOTED_ETAG_RE.fullmatch(quoted_value)
if not match:
return None
is_weak, value = match.group(1, 2)
return ETag(
is_weak=bool(is_weak),
value=value,
)
@etag.setter
def etag(self, value: Optional[Union[ETag, str]]) -> None:
if value is None:
self._headers.pop(hdrs.ETAG, None)
elif (isinstance(value, str) and value == ETAG_ANY) or (
isinstance(value, ETag) and value.value == ETAG_ANY
):
self._headers[hdrs.ETAG] = ETAG_ANY
elif isinstance(value, str):
validate_etag_value(value)
self._headers[hdrs.ETAG] = f'"{value}"'
elif isinstance(value, ETag) and isinstance(value.value, str):
validate_etag_value(value.value)
hdr_value = f'W/"{value.value}"' if value.is_weak else f'"{value.value}"'
self._headers[hdrs.ETAG] = hdr_value
else:
raise ValueError(
f"Unsupported etag type: {type(value)}. "
f"etag must be str, ETag or None"
)
def _generate_content_type_header(
self, CONTENT_TYPE: istr = hdrs.CONTENT_TYPE
) -> None:
assert self._content_dict is not None
assert self._content_type is not None
params = "; ".join(f"{k}={v}" for k, v in self._content_dict.items())
if params:
ctype = self._content_type + "; " + params
else:
ctype = self._content_type
self._headers[CONTENT_TYPE] = ctype
async def _do_start_compression(self, coding: ContentCoding) -> None:
if coding != ContentCoding.identity:
assert self._payload_writer is not None
self._headers[hdrs.CONTENT_ENCODING] = coding.value
self._payload_writer.enable_compression(coding.value)
# Compressed payload may have different content length,
# remove the header
self._headers.popall(hdrs.CONTENT_LENGTH, None)
async def _start_compression(self, request: "BaseRequest") -> None:
if self._compression_force:
await self._do_start_compression(self._compression_force)
else:
accept_encoding = request.headers.get(hdrs.ACCEPT_ENCODING, "").lower()
for coding in ContentCoding:
if coding.value in accept_encoding:
await self._do_start_compression(coding)
return
async def prepare(self, request: "BaseRequest") -> Optional[AbstractStreamWriter]:
if self._eof_sent:
return None
if self._payload_writer is not None:
return self._payload_writer
return await self._start(request)
async def _start(self, request: "BaseRequest") -> AbstractStreamWriter:
self._req = request
writer = self._payload_writer = request._payload_writer
await self._prepare_headers()
await request._prepare_hook(self)
await self._write_headers()
return writer
async def _prepare_headers(self) -> None:
request = self._req
assert request is not None
writer = self._payload_writer
assert writer is not None
keep_alive = self._keep_alive
if keep_alive is None:
keep_alive = request.keep_alive
self._keep_alive = keep_alive
version = request.version
headers = self._headers
for cookie in self._cookies.values():
value = cookie.output(header="")[1:]
headers.add(hdrs.SET_COOKIE, value)
if self._compression:
await self._start_compression(request)
if self._chunked:
if version != HttpVersion11:
raise RuntimeError(
"Using chunked encoding is forbidden "
"for HTTP/{0.major}.{0.minor}".format(request.version)
)
writer.enable_chunking()
headers[hdrs.TRANSFER_ENCODING] = "chunked"
if hdrs.CONTENT_LENGTH in headers:
del headers[hdrs.CONTENT_LENGTH]
elif self._length_check:
writer.length = self.content_length
if writer.length is None:
if version >= HttpVersion11 and self.status != 204:
writer.enable_chunking()
headers[hdrs.TRANSFER_ENCODING] = "chunked"
if hdrs.CONTENT_LENGTH in headers:
del headers[hdrs.CONTENT_LENGTH]
else:
keep_alive = False
# HTTP 1.1: https://tools.ietf.org/html/rfc7230#section-3.3.2
# HTTP 1.0: https://tools.ietf.org/html/rfc1945#section-10.4
elif version >= HttpVersion11 and self.status in (100, 101, 102, 103, 204):
del headers[hdrs.CONTENT_LENGTH]
if self.status not in (204, 304):
headers.setdefault(hdrs.CONTENT_TYPE, "application/octet-stream")
headers.setdefault(hdrs.DATE, rfc822_formatted_time())
headers.setdefault(hdrs.SERVER, SERVER_SOFTWARE)
# connection header
if hdrs.CONNECTION not in headers:
if keep_alive:
if version == HttpVersion10:
headers[hdrs.CONNECTION] = "keep-alive"
else:
if version == HttpVersion11:
headers[hdrs.CONNECTION] = "close"
async def _write_headers(self) -> None:
request = self._req
assert request is not None
writer = self._payload_writer
assert writer is not None
# status line
version = request.version
status_line = "HTTP/{}.{} {} {}".format(
version[0], version[1], self._status, self._reason
)
await writer.write_headers(status_line, self._headers)
async def write(self, data: bytes) -> None:
assert isinstance(
data, (bytes, bytearray, memoryview)
), "data argument must be byte-ish (%r)" % type(data)
if self._eof_sent:
raise RuntimeError("Cannot call write() after write_eof()")
if self._payload_writer is None:
raise RuntimeError("Cannot call write() before prepare()")
await self._payload_writer.write(data)
async def drain(self) -> None:
assert not self._eof_sent, "EOF has already been sent"
assert self._payload_writer is not None, "Response has not been started"
warnings.warn(
"drain method is deprecated, use await resp.write()",
DeprecationWarning,
stacklevel=2,
)
await self._payload_writer.drain()
async def write_eof(self, data: bytes = b"") -> None:
assert isinstance(
data, (bytes, bytearray, memoryview)
), "data argument must be byte-ish (%r)" % type(data)
if self._eof_sent:
return
assert self._payload_writer is not None, "Response has not been started"
await self._payload_writer.write_eof(data)
self._eof_sent = True
self._req = None
self._body_length = self._payload_writer.output_size
self._payload_writer = None
def __repr__(self) -> str:
if self._eof_sent:
info = "eof"
elif self.prepared:
assert self._req is not None
info = f"{self._req.method} {self._req.path} "
else:
info = "not prepared"
return f"<{self.__class__.__name__} {self.reason} {info}>"
def __getitem__(self, key: str) -> Any:
return self._state[key]
def __setitem__(self, key: str, value: Any) -> None:
self._state[key] = value
def __delitem__(self, key: str) -> None:
del self._state[key]
def __len__(self) -> int:
return len(self._state)
def __iter__(self) -> Iterator[str]:
return iter(self._state)
def __hash__(self) -> int:
return hash(id(self))
def __eq__(self, other: object) -> bool:
return self is other
class Response(StreamResponse):
def __init__(
self,
*,
body: Any = None,
status: int = 200,
reason: Optional[str] = None,
text: Optional[str] = None,
headers: Optional[LooseHeaders] = None,
content_type: Optional[str] = None,
charset: Optional[str] = None,
zlib_executor_size: Optional[int] = None,
zlib_executor: Optional[Executor] = None,
) -> None:
if body is not None and text is not None:
raise ValueError("body and text are not allowed together")
if headers is None:
real_headers: CIMultiDict[str] = CIMultiDict()
elif not isinstance(headers, CIMultiDict):
real_headers = CIMultiDict(headers)
else:
real_headers = headers # = cast('CIMultiDict[str]', headers)
if content_type is not None and "charset" in content_type:
raise ValueError("charset must not be in content_type " "argument")
if text is not None:
if hdrs.CONTENT_TYPE in real_headers:
if content_type or charset:
raise ValueError(
"passing both Content-Type header and "
"content_type or charset params "
"is forbidden"
)
else:
# fast path for filling headers
if not isinstance(text, str):
raise TypeError("text argument must be str (%r)" % type(text))
if content_type is None:
content_type = "text/plain"
if charset is None:
charset = "utf-8"
real_headers[hdrs.CONTENT_TYPE] = content_type + "; charset=" + charset
body = text.encode(charset)
text = None
else:
if hdrs.CONTENT_TYPE in real_headers:
if content_type is not None or charset is not None:
raise ValueError(
"passing both Content-Type header and "
"content_type or charset params "
"is forbidden"
)
else:
if content_type is not None:
if charset is not None:
content_type += "; charset=" + charset
real_headers[hdrs.CONTENT_TYPE] = content_type
super().__init__(status=status, reason=reason, headers=real_headers)
if text is not None:
self.text = text
else:
self.body = body
self._compressed_body: Optional[bytes] = None
self._zlib_executor_size = zlib_executor_size
self._zlib_executor = zlib_executor
@property
def body(self) -> Optional[Union[bytes, Payload]]:
return self._body
@body.setter
def body(
self,
body: bytes,
CONTENT_TYPE: istr = hdrs.CONTENT_TYPE,
CONTENT_LENGTH: istr = hdrs.CONTENT_LENGTH,
) -> None:
if body is None:
self._body: Optional[bytes] = None
self._body_payload: bool = False
elif isinstance(body, (bytes, bytearray)):
self._body = body
self._body_payload = False
else:
try:
self._body = body = payload.PAYLOAD_REGISTRY.get(body)
except payload.LookupError:
raise ValueError("Unsupported body type %r" % type(body))
self._body_payload = True
headers = self._headers
# set content-length header if needed
if not self._chunked and CONTENT_LENGTH not in headers:
size = body.size
if size is not None:
headers[CONTENT_LENGTH] = str(size)
# set content-type
if CONTENT_TYPE not in headers:
headers[CONTENT_TYPE] = body.content_type
# copy payload headers
if body.headers:
for (key, value) in body.headers.items():
if key not in headers:
headers[key] = value
self._compressed_body = None
@property
def text(self) -> Optional[str]:
if self._body is None:
return None
return self._body.decode(self.charset or "utf-8")
@text.setter
def text(self, text: str) -> None:
assert text is None or isinstance(
text, str
), "text argument must be str (%r)" % type(text)
if self.content_type == "application/octet-stream":
self.content_type = "text/plain"
if self.charset is None:
self.charset = "utf-8"
self._body = text.encode(self.charset)
self._body_payload = False
self._compressed_body = None
@property
def content_length(self) -> Optional[int]:
if self._chunked:
return None
if hdrs.CONTENT_LENGTH in self._headers:
return super().content_length
if self._compressed_body is not None:
# Return length of the compressed body
return len(self._compressed_body)
elif self._body_payload:
# A payload without content length, or a compressed payload
return None
elif self._body is not None:
return len(self._body)
else:
return 0
@content_length.setter
def content_length(self, value: Optional[int]) -> None:
raise RuntimeError("Content length is set automatically")
async def write_eof(self, data: bytes = b"") -> None:
if self._eof_sent:
return
if self._compressed_body is None:
body: Optional[Union[bytes, Payload]] = self._body
else:
body = self._compressed_body
assert not data, f"data arg is not supported, got {data!r}"
assert self._req is not None
assert self._payload_writer is not None
if body is not None:
if self._req._method == hdrs.METH_HEAD or self._status in [204, 304]:
await super().write_eof()
elif self._body_payload:
payload = cast(Payload, body)
await payload.write(self._payload_writer)
await super().write_eof()
else:
await super().write_eof(cast(bytes, body))
else:
await super().write_eof()
async def _start(self, request: "BaseRequest") -> AbstractStreamWriter:
if not self._chunked and hdrs.CONTENT_LENGTH not in self._headers:
if not self._body_payload:
if self._body is not None:
self._headers[hdrs.CONTENT_LENGTH] = str(len(self._body))
else:
self._headers[hdrs.CONTENT_LENGTH] = "0"
return await super()._start(request)
def _compress_body(self, zlib_mode: int) -> None:
assert zlib_mode > 0
compressobj = zlib.compressobj(wbits=zlib_mode)
body_in = self._body
assert body_in is not None
self._compressed_body = compressobj.compress(body_in) + compressobj.flush()
async def _do_start_compression(self, coding: ContentCoding) -> None:
if self._body_payload or self._chunked:
return await super()._do_start_compression(coding)
if coding != ContentCoding.identity:
# Instead of using _payload_writer.enable_compression,
# compress the whole body
zlib_mode = (
16 + zlib.MAX_WBITS if coding == ContentCoding.gzip else zlib.MAX_WBITS
)
body_in = self._body
assert body_in is not None
if (
self._zlib_executor_size is not None
and len(body_in) > self._zlib_executor_size
):
await asyncio.get_event_loop().run_in_executor(
self._zlib_executor, self._compress_body, zlib_mode
)
else:
self._compress_body(zlib_mode)
body_out = self._compressed_body
assert body_out is not None
self._headers[hdrs.CONTENT_ENCODING] = coding.value
self._headers[hdrs.CONTENT_LENGTH] = str(len(body_out))
def json_response(
data: Any = sentinel,
*,
text: Optional[str] = None,
body: Optional[bytes] = None,
status: int = 200,
reason: Optional[str] = None,
headers: Optional[LooseHeaders] = None,
content_type: str = "application/json",
dumps: JSONEncoder = json.dumps,
) -> Response:
if data is not sentinel:
if text or body:
raise ValueError("only one of data, text, or body should be specified")
else:
text = dumps(data)
return Response(
text=text,
body=body,
status=status,
reason=reason,
headers=headers,
content_type=content_type,
)
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiohttp/typedefs.py | import json
import os
import sys
from typing import (
TYPE_CHECKING,
Any,
Awaitable,
Callable,
Iterable,
Mapping,
Tuple,
Union,
)
from multidict import CIMultiDict, CIMultiDictProxy, MultiDict, MultiDictProxy, istr
from yarl import URL
# These are for other modules to use (to avoid repeating the conditional import).
if sys.version_info >= (3, 8):
from typing import Final as Final, Protocol as Protocol, TypedDict as TypedDict
else:
from typing_extensions import ( # noqa: F401
Final,
Protocol as Protocol,
TypedDict as TypedDict,
)
DEFAULT_JSON_ENCODER = json.dumps
DEFAULT_JSON_DECODER = json.loads
if TYPE_CHECKING: # pragma: no cover
_CIMultiDict = CIMultiDict[str]
_CIMultiDictProxy = CIMultiDictProxy[str]
_MultiDict = MultiDict[str]
_MultiDictProxy = MultiDictProxy[str]
from http.cookies import BaseCookie, Morsel
from .web import Request, StreamResponse
else:
_CIMultiDict = CIMultiDict
_CIMultiDictProxy = CIMultiDictProxy
_MultiDict = MultiDict
_MultiDictProxy = MultiDictProxy
Byteish = Union[bytes, bytearray, memoryview]
JSONEncoder = Callable[[Any], str]
JSONDecoder = Callable[[str], Any]
LooseHeaders = Union[Mapping[Union[str, istr], str], _CIMultiDict, _CIMultiDictProxy]
RawHeaders = Tuple[Tuple[bytes, bytes], ...]
StrOrURL = Union[str, URL]
LooseCookiesMappings = Mapping[str, Union[str, "BaseCookie[str]", "Morsel[Any]"]]
LooseCookiesIterables = Iterable[
Tuple[str, Union[str, "BaseCookie[str]", "Morsel[Any]"]]
]
LooseCookies = Union[
LooseCookiesMappings,
LooseCookiesIterables,
"BaseCookie[str]",
]
Handler = Callable[["Request"], Awaitable["StreamResponse"]]
PathLike = Union[str, "os.PathLike[str]"]
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiohttp/tracing.py | from types import SimpleNamespace
from typing import TYPE_CHECKING, Awaitable, Optional, Type, TypeVar
import attr
from aiosignal import Signal
from multidict import CIMultiDict
from yarl import URL
from .client_reqrep import ClientResponse
if TYPE_CHECKING: # pragma: no cover
from .client import ClientSession
from .typedefs import Protocol
_ParamT_contra = TypeVar("_ParamT_contra", contravariant=True)
class _SignalCallback(Protocol[_ParamT_contra]):
def __call__(
self,
__client_session: ClientSession,
__trace_config_ctx: SimpleNamespace,
__params: _ParamT_contra,
) -> Awaitable[None]:
...
__all__ = (
"TraceConfig",
"TraceRequestStartParams",
"TraceRequestEndParams",
"TraceRequestExceptionParams",
"TraceConnectionQueuedStartParams",
"TraceConnectionQueuedEndParams",
"TraceConnectionCreateStartParams",
"TraceConnectionCreateEndParams",
"TraceConnectionReuseconnParams",
"TraceDnsResolveHostStartParams",
"TraceDnsResolveHostEndParams",
"TraceDnsCacheHitParams",
"TraceDnsCacheMissParams",
"TraceRequestRedirectParams",
"TraceRequestChunkSentParams",
"TraceResponseChunkReceivedParams",
"TraceRequestHeadersSentParams",
)
class TraceConfig:
"""First-class used to trace requests launched via ClientSession objects."""
def __init__(
self, trace_config_ctx_factory: Type[SimpleNamespace] = SimpleNamespace
) -> None:
self._on_request_start: Signal[
_SignalCallback[TraceRequestStartParams]
] = Signal(self)
self._on_request_chunk_sent: Signal[
_SignalCallback[TraceRequestChunkSentParams]
] = Signal(self)
self._on_response_chunk_received: Signal[
_SignalCallback[TraceResponseChunkReceivedParams]
] = Signal(self)
self._on_request_end: Signal[_SignalCallback[TraceRequestEndParams]] = Signal(
self
)
self._on_request_exception: Signal[
_SignalCallback[TraceRequestExceptionParams]
] = Signal(self)
self._on_request_redirect: Signal[
_SignalCallback[TraceRequestRedirectParams]
] = Signal(self)
self._on_connection_queued_start: Signal[
_SignalCallback[TraceConnectionQueuedStartParams]
] = Signal(self)
self._on_connection_queued_end: Signal[
_SignalCallback[TraceConnectionQueuedEndParams]
] = Signal(self)
self._on_connection_create_start: Signal[
_SignalCallback[TraceConnectionCreateStartParams]
] = Signal(self)
self._on_connection_create_end: Signal[
_SignalCallback[TraceConnectionCreateEndParams]
] = Signal(self)
self._on_connection_reuseconn: Signal[
_SignalCallback[TraceConnectionReuseconnParams]
] = Signal(self)
self._on_dns_resolvehost_start: Signal[
_SignalCallback[TraceDnsResolveHostStartParams]
] = Signal(self)
self._on_dns_resolvehost_end: Signal[
_SignalCallback[TraceDnsResolveHostEndParams]
] = Signal(self)
self._on_dns_cache_hit: Signal[
_SignalCallback[TraceDnsCacheHitParams]
] = Signal(self)
self._on_dns_cache_miss: Signal[
_SignalCallback[TraceDnsCacheMissParams]
] = Signal(self)
self._on_request_headers_sent: Signal[
_SignalCallback[TraceRequestHeadersSentParams]
] = Signal(self)
self._trace_config_ctx_factory = trace_config_ctx_factory
def trace_config_ctx(
self, trace_request_ctx: Optional[SimpleNamespace] = None
) -> SimpleNamespace:
"""Return a new trace_config_ctx instance"""
return self._trace_config_ctx_factory(trace_request_ctx=trace_request_ctx)
def freeze(self) -> None:
self._on_request_start.freeze()
self._on_request_chunk_sent.freeze()
self._on_response_chunk_received.freeze()
self._on_request_end.freeze()
self._on_request_exception.freeze()
self._on_request_redirect.freeze()
self._on_connection_queued_start.freeze()
self._on_connection_queued_end.freeze()
self._on_connection_create_start.freeze()
self._on_connection_create_end.freeze()
self._on_connection_reuseconn.freeze()
self._on_dns_resolvehost_start.freeze()
self._on_dns_resolvehost_end.freeze()
self._on_dns_cache_hit.freeze()
self._on_dns_cache_miss.freeze()
self._on_request_headers_sent.freeze()
@property
def on_request_start(self) -> "Signal[_SignalCallback[TraceRequestStartParams]]":
return self._on_request_start
@property
def on_request_chunk_sent(
self,
) -> "Signal[_SignalCallback[TraceRequestChunkSentParams]]":
return self._on_request_chunk_sent
@property
def on_response_chunk_received(
self,
) -> "Signal[_SignalCallback[TraceResponseChunkReceivedParams]]":
return self._on_response_chunk_received
@property
def on_request_end(self) -> "Signal[_SignalCallback[TraceRequestEndParams]]":
return self._on_request_end
@property
def on_request_exception(
self,
) -> "Signal[_SignalCallback[TraceRequestExceptionParams]]":
return self._on_request_exception
@property
def on_request_redirect(
self,
) -> "Signal[_SignalCallback[TraceRequestRedirectParams]]":
return self._on_request_redirect
@property
def on_connection_queued_start(
self,
) -> "Signal[_SignalCallback[TraceConnectionQueuedStartParams]]":
return self._on_connection_queued_start
@property
def on_connection_queued_end(
self,
) -> "Signal[_SignalCallback[TraceConnectionQueuedEndParams]]":
return self._on_connection_queued_end
@property
def on_connection_create_start(
self,
) -> "Signal[_SignalCallback[TraceConnectionCreateStartParams]]":
return self._on_connection_create_start
@property
def on_connection_create_end(
self,
) -> "Signal[_SignalCallback[TraceConnectionCreateEndParams]]":
return self._on_connection_create_end
@property
def on_connection_reuseconn(
self,
) -> "Signal[_SignalCallback[TraceConnectionReuseconnParams]]":
return self._on_connection_reuseconn
@property
def on_dns_resolvehost_start(
self,
) -> "Signal[_SignalCallback[TraceDnsResolveHostStartParams]]":
return self._on_dns_resolvehost_start
@property
def on_dns_resolvehost_end(
self,
) -> "Signal[_SignalCallback[TraceDnsResolveHostEndParams]]":
return self._on_dns_resolvehost_end
@property
def on_dns_cache_hit(self) -> "Signal[_SignalCallback[TraceDnsCacheHitParams]]":
return self._on_dns_cache_hit
@property
def on_dns_cache_miss(self) -> "Signal[_SignalCallback[TraceDnsCacheMissParams]]":
return self._on_dns_cache_miss
@property
def on_request_headers_sent(
self,
) -> "Signal[_SignalCallback[TraceRequestHeadersSentParams]]":
return self._on_request_headers_sent
@attr.s(auto_attribs=True, frozen=True, slots=True)
class TraceRequestStartParams:
"""Parameters sent by the `on_request_start` signal"""
method: str
url: URL
headers: "CIMultiDict[str]"
@attr.s(auto_attribs=True, frozen=True, slots=True)
class TraceRequestChunkSentParams:
"""Parameters sent by the `on_request_chunk_sent` signal"""
method: str
url: URL
chunk: bytes
@attr.s(auto_attribs=True, frozen=True, slots=True)
class TraceResponseChunkReceivedParams:
"""Parameters sent by the `on_response_chunk_received` signal"""
method: str
url: URL
chunk: bytes
@attr.s(auto_attribs=True, frozen=True, slots=True)
class TraceRequestEndParams:
"""Parameters sent by the `on_request_end` signal"""
method: str
url: URL
headers: "CIMultiDict[str]"
response: ClientResponse
@attr.s(auto_attribs=True, frozen=True, slots=True)
class TraceRequestExceptionParams:
"""Parameters sent by the `on_request_exception` signal"""
method: str
url: URL
headers: "CIMultiDict[str]"
exception: BaseException
@attr.s(auto_attribs=True, frozen=True, slots=True)
class TraceRequestRedirectParams:
"""Parameters sent by the `on_request_redirect` signal"""
method: str
url: URL
headers: "CIMultiDict[str]"
response: ClientResponse
@attr.s(auto_attribs=True, frozen=True, slots=True)
class TraceConnectionQueuedStartParams:
"""Parameters sent by the `on_connection_queued_start` signal"""
@attr.s(auto_attribs=True, frozen=True, slots=True)
class TraceConnectionQueuedEndParams:
"""Parameters sent by the `on_connection_queued_end` signal"""
@attr.s(auto_attribs=True, frozen=True, slots=True)
class TraceConnectionCreateStartParams:
"""Parameters sent by the `on_connection_create_start` signal"""
@attr.s(auto_attribs=True, frozen=True, slots=True)
class TraceConnectionCreateEndParams:
"""Parameters sent by the `on_connection_create_end` signal"""
@attr.s(auto_attribs=True, frozen=True, slots=True)
class TraceConnectionReuseconnParams:
"""Parameters sent by the `on_connection_reuseconn` signal"""
@attr.s(auto_attribs=True, frozen=True, slots=True)
class TraceDnsResolveHostStartParams:
"""Parameters sent by the `on_dns_resolvehost_start` signal"""
host: str
@attr.s(auto_attribs=True, frozen=True, slots=True)
class TraceDnsResolveHostEndParams:
"""Parameters sent by the `on_dns_resolvehost_end` signal"""
host: str
@attr.s(auto_attribs=True, frozen=True, slots=True)
class TraceDnsCacheHitParams:
"""Parameters sent by the `on_dns_cache_hit` signal"""
host: str
@attr.s(auto_attribs=True, frozen=True, slots=True)
class TraceDnsCacheMissParams:
"""Parameters sent by the `on_dns_cache_miss` signal"""
host: str
@attr.s(auto_attribs=True, frozen=True, slots=True)
class TraceRequestHeadersSentParams:
"""Parameters sent by the `on_request_headers_sent` signal"""
method: str
url: URL
headers: "CIMultiDict[str]"
class Trace:
"""Internal dependency holder class.
Used to keep together the main dependencies used
at the moment of send a signal.
"""
def __init__(
self,
session: "ClientSession",
trace_config: TraceConfig,
trace_config_ctx: SimpleNamespace,
) -> None:
self._trace_config = trace_config
self._trace_config_ctx = trace_config_ctx
self._session = session
async def send_request_start(
self, method: str, url: URL, headers: "CIMultiDict[str]"
) -> None:
return await self._trace_config.on_request_start.send(
self._session,
self._trace_config_ctx,
TraceRequestStartParams(method, url, headers),
)
async def send_request_chunk_sent(
self, method: str, url: URL, chunk: bytes
) -> None:
return await self._trace_config.on_request_chunk_sent.send(
self._session,
self._trace_config_ctx,
TraceRequestChunkSentParams(method, url, chunk),
)
async def send_response_chunk_received(
self, method: str, url: URL, chunk: bytes
) -> None:
return await self._trace_config.on_response_chunk_received.send(
self._session,
self._trace_config_ctx,
TraceResponseChunkReceivedParams(method, url, chunk),
)
async def send_request_end(
self,
method: str,
url: URL,
headers: "CIMultiDict[str]",
response: ClientResponse,
) -> None:
return await self._trace_config.on_request_end.send(
self._session,
self._trace_config_ctx,
TraceRequestEndParams(method, url, headers, response),
)
async def send_request_exception(
self,
method: str,
url: URL,
headers: "CIMultiDict[str]",
exception: BaseException,
) -> None:
return await self._trace_config.on_request_exception.send(
self._session,
self._trace_config_ctx,
TraceRequestExceptionParams(method, url, headers, exception),
)
async def send_request_redirect(
self,
method: str,
url: URL,
headers: "CIMultiDict[str]",
response: ClientResponse,
) -> None:
return await self._trace_config._on_request_redirect.send(
self._session,
self._trace_config_ctx,
TraceRequestRedirectParams(method, url, headers, response),
)
async def send_connection_queued_start(self) -> None:
return await self._trace_config.on_connection_queued_start.send(
self._session, self._trace_config_ctx, TraceConnectionQueuedStartParams()
)
async def send_connection_queued_end(self) -> None:
return await self._trace_config.on_connection_queued_end.send(
self._session, self._trace_config_ctx, TraceConnectionQueuedEndParams()
)
async def send_connection_create_start(self) -> None:
return await self._trace_config.on_connection_create_start.send(
self._session, self._trace_config_ctx, TraceConnectionCreateStartParams()
)
async def send_connection_create_end(self) -> None:
return await self._trace_config.on_connection_create_end.send(
self._session, self._trace_config_ctx, TraceConnectionCreateEndParams()
)
async def send_connection_reuseconn(self) -> None:
return await self._trace_config.on_connection_reuseconn.send(
self._session, self._trace_config_ctx, TraceConnectionReuseconnParams()
)
async def send_dns_resolvehost_start(self, host: str) -> None:
return await self._trace_config.on_dns_resolvehost_start.send(
self._session, self._trace_config_ctx, TraceDnsResolveHostStartParams(host)
)
async def send_dns_resolvehost_end(self, host: str) -> None:
return await self._trace_config.on_dns_resolvehost_end.send(
self._session, self._trace_config_ctx, TraceDnsResolveHostEndParams(host)
)
async def send_dns_cache_hit(self, host: str) -> None:
return await self._trace_config.on_dns_cache_hit.send(
self._session, self._trace_config_ctx, TraceDnsCacheHitParams(host)
)
async def send_dns_cache_miss(self, host: str) -> None:
return await self._trace_config.on_dns_cache_miss.send(
self._session, self._trace_config_ctx, TraceDnsCacheMissParams(host)
)
async def send_request_headers(
self, method: str, url: URL, headers: "CIMultiDict[str]"
) -> None:
return await self._trace_config._on_request_headers_sent.send(
self._session,
self._trace_config_ctx,
TraceRequestHeadersSentParams(method, url, headers),
)
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiohttp/abc.py | import asyncio
import logging
from abc import ABC, abstractmethod
from collections.abc import Sized
from http.cookies import BaseCookie, Morsel
from typing import (
TYPE_CHECKING,
Any,
Awaitable,
Callable,
Dict,
Generator,
Iterable,
List,
Optional,
Tuple,
)
from multidict import CIMultiDict
from yarl import URL
from .helpers import get_running_loop
from .typedefs import LooseCookies
if TYPE_CHECKING: # pragma: no cover
from .web_app import Application
from .web_exceptions import HTTPException
from .web_request import BaseRequest, Request
from .web_response import StreamResponse
else:
BaseRequest = Request = Application = StreamResponse = None
HTTPException = None
class AbstractRouter(ABC):
def __init__(self) -> None:
self._frozen = False
def post_init(self, app: Application) -> None:
"""Post init stage.
Not an abstract method for sake of backward compatibility,
but if the router wants to be aware of the application
it can override this.
"""
@property
def frozen(self) -> bool:
return self._frozen
def freeze(self) -> None:
"""Freeze router."""
self._frozen = True
@abstractmethod
async def resolve(self, request: Request) -> "AbstractMatchInfo":
"""Return MATCH_INFO for given request"""
class AbstractMatchInfo(ABC):
@property # pragma: no branch
@abstractmethod
def handler(self) -> Callable[[Request], Awaitable[StreamResponse]]:
"""Execute matched request handler"""
@property
@abstractmethod
def expect_handler(self) -> Callable[[Request], Awaitable[None]]:
"""Expect handler for 100-continue processing"""
@property # pragma: no branch
@abstractmethod
def http_exception(self) -> Optional[HTTPException]:
"""HTTPException instance raised on router's resolving, or None"""
@abstractmethod # pragma: no branch
def get_info(self) -> Dict[str, Any]:
"""Return a dict with additional info useful for introspection"""
@property # pragma: no branch
@abstractmethod
def apps(self) -> Tuple[Application, ...]:
"""Stack of nested applications.
Top level application is left-most element.
"""
@abstractmethod
def add_app(self, app: Application) -> None:
"""Add application to the nested apps stack."""
@abstractmethod
def freeze(self) -> None:
"""Freeze the match info.
The method is called after route resolution.
After the call .add_app() is forbidden.
"""
class AbstractView(ABC):
"""Abstract class based view."""
def __init__(self, request: Request) -> None:
self._request = request
@property
def request(self) -> Request:
"""Request instance."""
return self._request
@abstractmethod
def __await__(self) -> Generator[Any, None, StreamResponse]:
"""Execute the view handler."""
class AbstractResolver(ABC):
"""Abstract DNS resolver."""
@abstractmethod
async def resolve(self, host: str, port: int, family: int) -> List[Dict[str, Any]]:
"""Return IP address for given hostname"""
@abstractmethod
async def close(self) -> None:
"""Release resolver"""
if TYPE_CHECKING: # pragma: no cover
IterableBase = Iterable[Morsel[str]]
else:
IterableBase = Iterable
ClearCookiePredicate = Callable[["Morsel[str]"], bool]
class AbstractCookieJar(Sized, IterableBase):
"""Abstract Cookie Jar."""
def __init__(self, *, loop: Optional[asyncio.AbstractEventLoop] = None) -> None:
self._loop = get_running_loop(loop)
@abstractmethod
def clear(self, predicate: Optional[ClearCookiePredicate] = None) -> None:
"""Clear all cookies if no predicate is passed."""
@abstractmethod
def clear_domain(self, domain: str) -> None:
"""Clear all cookies for domain and all subdomains."""
@abstractmethod
def update_cookies(self, cookies: LooseCookies, response_url: URL = URL()) -> None:
"""Update cookies."""
@abstractmethod
def filter_cookies(self, request_url: URL) -> "BaseCookie[str]":
"""Return the jar's cookies filtered by their attributes."""
class AbstractStreamWriter(ABC):
"""Abstract stream writer."""
buffer_size = 0
output_size = 0
length: Optional[int] = 0
@abstractmethod
async def write(self, chunk: bytes) -> None:
"""Write chunk into stream."""
@abstractmethod
async def write_eof(self, chunk: bytes = b"") -> None:
"""Write last chunk."""
@abstractmethod
async def drain(self) -> None:
"""Flush the write buffer."""
@abstractmethod
def enable_compression(self, encoding: str = "deflate") -> None:
"""Enable HTTP body compression"""
@abstractmethod
def enable_chunking(self) -> None:
"""Enable HTTP chunked mode"""
@abstractmethod
async def write_headers(
self, status_line: str, headers: "CIMultiDict[str]"
) -> None:
"""Write HTTP headers"""
class AbstractAccessLogger(ABC):
"""Abstract writer to access log."""
def __init__(self, logger: logging.Logger, log_format: str) -> None:
self.logger = logger
self.log_format = log_format
@abstractmethod
def log(self, request: BaseRequest, response: StreamResponse, time: float) -> None:
"""Emit log to logger."""
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiohttp/http_exceptions.py | """Low-level http related exceptions."""
from typing import Optional, Union
from .typedefs import _CIMultiDict
__all__ = ("HttpProcessingError",)
class HttpProcessingError(Exception):
"""HTTP error.
Shortcut for raising HTTP errors with custom code, message and headers.
code: HTTP Error code.
message: (optional) Error message.
headers: (optional) Headers to be sent in response, a list of pairs
"""
code = 0
message = ""
headers = None
def __init__(
self,
*,
code: Optional[int] = None,
message: str = "",
headers: Optional[_CIMultiDict] = None,
) -> None:
if code is not None:
self.code = code
self.headers = headers
self.message = message
def __str__(self) -> str:
return f"{self.code}, message={self.message!r}"
def __repr__(self) -> str:
return f"<{self.__class__.__name__}: {self}>"
class BadHttpMessage(HttpProcessingError):
code = 400
message = "Bad Request"
def __init__(self, message: str, *, headers: Optional[_CIMultiDict] = None) -> None:
super().__init__(message=message, headers=headers)
self.args = (message,)
class HttpBadRequest(BadHttpMessage):
code = 400
message = "Bad Request"
class PayloadEncodingError(BadHttpMessage):
"""Base class for payload errors"""
class ContentEncodingError(PayloadEncodingError):
"""Content encoding error."""
class TransferEncodingError(PayloadEncodingError):
"""transfer encoding error."""
class ContentLengthError(PayloadEncodingError):
"""Not enough data for satisfy content length header."""
class LineTooLong(BadHttpMessage):
def __init__(
self, line: str, limit: str = "Unknown", actual_size: str = "Unknown"
) -> None:
super().__init__(
f"Got more than {limit} bytes ({actual_size}) when reading {line}."
)
self.args = (line, limit, actual_size)
class InvalidHeader(BadHttpMessage):
def __init__(self, hdr: Union[bytes, str]) -> None:
if isinstance(hdr, bytes):
hdr = hdr.decode("utf-8", "surrogateescape")
super().__init__(f"Invalid HTTP Header: {hdr}")
self.hdr = hdr
self.args = (hdr,)
class BadStatusLine(BadHttpMessage):
def __init__(self, line: str = "") -> None:
if not isinstance(line, str):
line = repr(line)
super().__init__(f"Bad status line {line!r}")
self.args = (line,)
self.line = line
class InvalidURLError(BadHttpMessage):
pass
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiohttp/web_fileresponse.py | import asyncio
import mimetypes
import os
import pathlib
import sys
from typing import ( # noqa
IO,
TYPE_CHECKING,
Any,
Awaitable,
Callable,
Iterator,
List,
Optional,
Tuple,
Union,
cast,
)
from . import hdrs
from .abc import AbstractStreamWriter
from .helpers import ETAG_ANY, ETag
from .typedefs import Final, LooseHeaders
from .web_exceptions import (
HTTPNotModified,
HTTPPartialContent,
HTTPPreconditionFailed,
HTTPRequestRangeNotSatisfiable,
)
from .web_response import StreamResponse
__all__ = ("FileResponse",)
if TYPE_CHECKING: # pragma: no cover
from .web_request import BaseRequest
_T_OnChunkSent = Optional[Callable[[bytes], Awaitable[None]]]
NOSENDFILE: Final[bool] = bool(os.environ.get("AIOHTTP_NOSENDFILE"))
class FileResponse(StreamResponse):
"""A response object can be used to send files."""
def __init__(
self,
path: Union[str, pathlib.Path],
chunk_size: int = 256 * 1024,
status: int = 200,
reason: Optional[str] = None,
headers: Optional[LooseHeaders] = None,
) -> None:
super().__init__(status=status, reason=reason, headers=headers)
if isinstance(path, str):
path = pathlib.Path(path)
self._path = path
self._chunk_size = chunk_size
async def _sendfile_fallback(
self, writer: AbstractStreamWriter, fobj: IO[Any], offset: int, count: int
) -> AbstractStreamWriter:
# To keep memory usage low,fobj is transferred in chunks
# controlled by the constructor's chunk_size argument.
chunk_size = self._chunk_size
loop = asyncio.get_event_loop()
await loop.run_in_executor(None, fobj.seek, offset)
chunk = await loop.run_in_executor(None, fobj.read, chunk_size)
while chunk:
await writer.write(chunk)
count = count - chunk_size
if count <= 0:
break
chunk = await loop.run_in_executor(None, fobj.read, min(chunk_size, count))
await writer.drain()
return writer
async def _sendfile(
self, request: "BaseRequest", fobj: IO[Any], offset: int, count: int
) -> AbstractStreamWriter:
writer = await super().prepare(request)
assert writer is not None
if NOSENDFILE or sys.version_info < (3, 7) or self.compression:
return await self._sendfile_fallback(writer, fobj, offset, count)
loop = request._loop
transport = request.transport
assert transport is not None
try:
await loop.sendfile(transport, fobj, offset, count)
except NotImplementedError:
return await self._sendfile_fallback(writer, fobj, offset, count)
await super().write_eof()
return writer
@staticmethod
def _strong_etag_match(etag_value: str, etags: Tuple[ETag, ...]) -> bool:
if len(etags) == 1 and etags[0].value == ETAG_ANY:
return True
return any(etag.value == etag_value for etag in etags if not etag.is_weak)
async def _not_modified(
self, request: "BaseRequest", etag_value: str, last_modified: float
) -> Optional[AbstractStreamWriter]:
self.set_status(HTTPNotModified.status_code)
self._length_check = False
self.etag = etag_value # type: ignore[assignment]
self.last_modified = last_modified # type: ignore[assignment]
# Delete any Content-Length headers provided by user. HTTP 304
# should always have empty response body
return await super().prepare(request)
async def _precondition_failed(
self, request: "BaseRequest"
) -> Optional[AbstractStreamWriter]:
self.set_status(HTTPPreconditionFailed.status_code)
self.content_length = 0
return await super().prepare(request)
async def prepare(self, request: "BaseRequest") -> Optional[AbstractStreamWriter]:
filepath = self._path
gzip = False
if "gzip" in request.headers.get(hdrs.ACCEPT_ENCODING, ""):
gzip_path = filepath.with_name(filepath.name + ".gz")
if gzip_path.is_file():
filepath = gzip_path
gzip = True
loop = asyncio.get_event_loop()
st: os.stat_result = await loop.run_in_executor(None, filepath.stat)
etag_value = f"{st.st_mtime_ns:x}-{st.st_size:x}"
last_modified = st.st_mtime
# https://tools.ietf.org/html/rfc7232#section-6
ifmatch = request.if_match
if ifmatch is not None and not self._strong_etag_match(etag_value, ifmatch):
return await self._precondition_failed(request)
unmodsince = request.if_unmodified_since
if (
unmodsince is not None
and ifmatch is None
and st.st_mtime > unmodsince.timestamp()
):
return await self._precondition_failed(request)
ifnonematch = request.if_none_match
if ifnonematch is not None and self._strong_etag_match(etag_value, ifnonematch):
return await self._not_modified(request, etag_value, last_modified)
modsince = request.if_modified_since
if (
modsince is not None
and ifnonematch is None
and st.st_mtime <= modsince.timestamp()
):
return await self._not_modified(request, etag_value, last_modified)
if hdrs.CONTENT_TYPE not in self.headers:
ct, encoding = mimetypes.guess_type(str(filepath))
if not ct:
ct = "application/octet-stream"
should_set_ct = True
else:
encoding = "gzip" if gzip else None
should_set_ct = False
status = self._status
file_size = st.st_size
count = file_size
start = None
ifrange = request.if_range
if ifrange is None or st.st_mtime <= ifrange.timestamp():
# If-Range header check:
# condition = cached date >= last modification date
# return 206 if True else 200.
# if False:
# Range header would not be processed, return 200
# if True but Range header missing
# return 200
try:
rng = request.http_range
start = rng.start
end = rng.stop
except ValueError:
# https://tools.ietf.org/html/rfc7233:
# A server generating a 416 (Range Not Satisfiable) response to
# a byte-range request SHOULD send a Content-Range header field
# with an unsatisfied-range value.
# The complete-length in a 416 response indicates the current
# length of the selected representation.
#
# Will do the same below. Many servers ignore this and do not
# send a Content-Range header with HTTP 416
self.headers[hdrs.CONTENT_RANGE] = f"bytes */{file_size}"
self.set_status(HTTPRequestRangeNotSatisfiable.status_code)
return await super().prepare(request)
# If a range request has been made, convert start, end slice
# notation into file pointer offset and count
if start is not None or end is not None:
if start < 0 and end is None: # return tail of file
start += file_size
if start < 0:
# if Range:bytes=-1000 in request header but file size
# is only 200, there would be trouble without this
start = 0
count = file_size - start
else:
# rfc7233:If the last-byte-pos value is
# absent, or if the value is greater than or equal to
# the current length of the representation data,
# the byte range is interpreted as the remainder
# of the representation (i.e., the server replaces the
# value of last-byte-pos with a value that is one less than
# the current length of the selected representation).
count = (
min(end if end is not None else file_size, file_size) - start
)
if start >= file_size:
# HTTP 416 should be returned in this case.
#
# According to https://tools.ietf.org/html/rfc7233:
# If a valid byte-range-set includes at least one
# byte-range-spec with a first-byte-pos that is less than
# the current length of the representation, or at least one
# suffix-byte-range-spec with a non-zero suffix-length,
# then the byte-range-set is satisfiable. Otherwise, the
# byte-range-set is unsatisfiable.
self.headers[hdrs.CONTENT_RANGE] = f"bytes */{file_size}"
self.set_status(HTTPRequestRangeNotSatisfiable.status_code)
return await super().prepare(request)
status = HTTPPartialContent.status_code
# Even though you are sending the whole file, you should still
# return a HTTP 206 for a Range request.
self.set_status(status)
if should_set_ct:
self.content_type = ct # type: ignore[assignment]
if encoding:
self.headers[hdrs.CONTENT_ENCODING] = encoding
if gzip:
self.headers[hdrs.VARY] = hdrs.ACCEPT_ENCODING
self.etag = etag_value # type: ignore[assignment]
self.last_modified = st.st_mtime # type: ignore[assignment]
self.content_length = count
self.headers[hdrs.ACCEPT_RANGES] = "bytes"
real_start = cast(int, start)
if status == HTTPPartialContent.status_code:
self.headers[hdrs.CONTENT_RANGE] = "bytes {}-{}/{}".format(
real_start, real_start + count - 1, file_size
)
# If we are sending 0 bytes calling sendfile() will throw a ValueError
if count == 0 or request.method == hdrs.METH_HEAD or self.status in [204, 304]:
return await super().prepare(request)
fobj = await loop.run_in_executor(None, filepath.open, "rb")
if start: # be aware that start could be None or int=0 here.
offset = start
else:
offset = 0
try:
return await self._sendfile(request, fobj, offset, count)
finally:
await loop.run_in_executor(None, fobj.close)
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiohttp/hdrs.py | """HTTP Headers constants."""
# After changing the file content call ./tools/gen.py
# to regenerate the headers parser
import sys
from typing import Set
from multidict import istr
if sys.version_info >= (3, 8):
from typing import Final
else:
from typing_extensions import Final
METH_ANY: Final[str] = "*"
METH_CONNECT: Final[str] = "CONNECT"
METH_HEAD: Final[str] = "HEAD"
METH_GET: Final[str] = "GET"
METH_DELETE: Final[str] = "DELETE"
METH_OPTIONS: Final[str] = "OPTIONS"
METH_PATCH: Final[str] = "PATCH"
METH_POST: Final[str] = "POST"
METH_PUT: Final[str] = "PUT"
METH_TRACE: Final[str] = "TRACE"
METH_ALL: Final[Set[str]] = {
METH_CONNECT,
METH_HEAD,
METH_GET,
METH_DELETE,
METH_OPTIONS,
METH_PATCH,
METH_POST,
METH_PUT,
METH_TRACE,
}
ACCEPT: Final[istr] = istr("Accept")
ACCEPT_CHARSET: Final[istr] = istr("Accept-Charset")
ACCEPT_ENCODING: Final[istr] = istr("Accept-Encoding")
ACCEPT_LANGUAGE: Final[istr] = istr("Accept-Language")
ACCEPT_RANGES: Final[istr] = istr("Accept-Ranges")
ACCESS_CONTROL_MAX_AGE: Final[istr] = istr("Access-Control-Max-Age")
ACCESS_CONTROL_ALLOW_CREDENTIALS: Final[istr] = istr("Access-Control-Allow-Credentials")
ACCESS_CONTROL_ALLOW_HEADERS: Final[istr] = istr("Access-Control-Allow-Headers")
ACCESS_CONTROL_ALLOW_METHODS: Final[istr] = istr("Access-Control-Allow-Methods")
ACCESS_CONTROL_ALLOW_ORIGIN: Final[istr] = istr("Access-Control-Allow-Origin")
ACCESS_CONTROL_EXPOSE_HEADERS: Final[istr] = istr("Access-Control-Expose-Headers")
ACCESS_CONTROL_REQUEST_HEADERS: Final[istr] = istr("Access-Control-Request-Headers")
ACCESS_CONTROL_REQUEST_METHOD: Final[istr] = istr("Access-Control-Request-Method")
AGE: Final[istr] = istr("Age")
ALLOW: Final[istr] = istr("Allow")
AUTHORIZATION: Final[istr] = istr("Authorization")
CACHE_CONTROL: Final[istr] = istr("Cache-Control")
CONNECTION: Final[istr] = istr("Connection")
CONTENT_DISPOSITION: Final[istr] = istr("Content-Disposition")
CONTENT_ENCODING: Final[istr] = istr("Content-Encoding")
CONTENT_LANGUAGE: Final[istr] = istr("Content-Language")
CONTENT_LENGTH: Final[istr] = istr("Content-Length")
CONTENT_LOCATION: Final[istr] = istr("Content-Location")
CONTENT_MD5: Final[istr] = istr("Content-MD5")
CONTENT_RANGE: Final[istr] = istr("Content-Range")
CONTENT_TRANSFER_ENCODING: Final[istr] = istr("Content-Transfer-Encoding")
CONTENT_TYPE: Final[istr] = istr("Content-Type")
COOKIE: Final[istr] = istr("Cookie")
DATE: Final[istr] = istr("Date")
DESTINATION: Final[istr] = istr("Destination")
DIGEST: Final[istr] = istr("Digest")
ETAG: Final[istr] = istr("Etag")
EXPECT: Final[istr] = istr("Expect")
EXPIRES: Final[istr] = istr("Expires")
FORWARDED: Final[istr] = istr("Forwarded")
FROM: Final[istr] = istr("From")
HOST: Final[istr] = istr("Host")
IF_MATCH: Final[istr] = istr("If-Match")
IF_MODIFIED_SINCE: Final[istr] = istr("If-Modified-Since")
IF_NONE_MATCH: Final[istr] = istr("If-None-Match")
IF_RANGE: Final[istr] = istr("If-Range")
IF_UNMODIFIED_SINCE: Final[istr] = istr("If-Unmodified-Since")
KEEP_ALIVE: Final[istr] = istr("Keep-Alive")
LAST_EVENT_ID: Final[istr] = istr("Last-Event-ID")
LAST_MODIFIED: Final[istr] = istr("Last-Modified")
LINK: Final[istr] = istr("Link")
LOCATION: Final[istr] = istr("Location")
MAX_FORWARDS: Final[istr] = istr("Max-Forwards")
ORIGIN: Final[istr] = istr("Origin")
PRAGMA: Final[istr] = istr("Pragma")
PROXY_AUTHENTICATE: Final[istr] = istr("Proxy-Authenticate")
PROXY_AUTHORIZATION: Final[istr] = istr("Proxy-Authorization")
RANGE: Final[istr] = istr("Range")
REFERER: Final[istr] = istr("Referer")
RETRY_AFTER: Final[istr] = istr("Retry-After")
SEC_WEBSOCKET_ACCEPT: Final[istr] = istr("Sec-WebSocket-Accept")
SEC_WEBSOCKET_VERSION: Final[istr] = istr("Sec-WebSocket-Version")
SEC_WEBSOCKET_PROTOCOL: Final[istr] = istr("Sec-WebSocket-Protocol")
SEC_WEBSOCKET_EXTENSIONS: Final[istr] = istr("Sec-WebSocket-Extensions")
SEC_WEBSOCKET_KEY: Final[istr] = istr("Sec-WebSocket-Key")
SEC_WEBSOCKET_KEY1: Final[istr] = istr("Sec-WebSocket-Key1")
SERVER: Final[istr] = istr("Server")
SET_COOKIE: Final[istr] = istr("Set-Cookie")
TE: Final[istr] = istr("TE")
TRAILER: Final[istr] = istr("Trailer")
TRANSFER_ENCODING: Final[istr] = istr("Transfer-Encoding")
UPGRADE: Final[istr] = istr("Upgrade")
URI: Final[istr] = istr("URI")
USER_AGENT: Final[istr] = istr("User-Agent")
VARY: Final[istr] = istr("Vary")
VIA: Final[istr] = istr("Via")
WANT_DIGEST: Final[istr] = istr("Want-Digest")
WARNING: Final[istr] = istr("Warning")
WWW_AUTHENTICATE: Final[istr] = istr("WWW-Authenticate")
X_FORWARDED_FOR: Final[istr] = istr("X-Forwarded-For")
X_FORWARDED_HOST: Final[istr] = istr("X-Forwarded-Host")
X_FORWARDED_PROTO: Final[istr] = istr("X-Forwarded-Proto")
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiohttp/client_ws.py | """WebSocket client for asyncio."""
import asyncio
from typing import Any, Optional, cast
import async_timeout
from .client_exceptions import ClientError
from .client_reqrep import ClientResponse
from .helpers import call_later, set_result
from .http import (
WS_CLOSED_MESSAGE,
WS_CLOSING_MESSAGE,
WebSocketError,
WSCloseCode,
WSMessage,
WSMsgType,
)
from .http_websocket import WebSocketWriter # WSMessage
from .streams import EofStream, FlowControlDataQueue
from .typedefs import (
DEFAULT_JSON_DECODER,
DEFAULT_JSON_ENCODER,
JSONDecoder,
JSONEncoder,
)
class ClientWebSocketResponse:
def __init__(
self,
reader: "FlowControlDataQueue[WSMessage]",
writer: WebSocketWriter,
protocol: Optional[str],
response: ClientResponse,
timeout: float,
autoclose: bool,
autoping: bool,
loop: asyncio.AbstractEventLoop,
*,
receive_timeout: Optional[float] = None,
heartbeat: Optional[float] = None,
compress: int = 0,
client_notakeover: bool = False,
) -> None:
self._response = response
self._conn = response.connection
self._writer = writer
self._reader = reader
self._protocol = protocol
self._closed = False
self._closing = False
self._close_code: Optional[int] = None
self._timeout = timeout
self._receive_timeout = receive_timeout
self._autoclose = autoclose
self._autoping = autoping
self._heartbeat = heartbeat
self._heartbeat_cb: Optional[asyncio.TimerHandle] = None
if heartbeat is not None:
self._pong_heartbeat = heartbeat / 2.0
self._pong_response_cb: Optional[asyncio.TimerHandle] = None
self._loop = loop
self._waiting: Optional[asyncio.Future[bool]] = None
self._exception: Optional[BaseException] = None
self._compress = compress
self._client_notakeover = client_notakeover
self._reset_heartbeat()
def _cancel_heartbeat(self) -> None:
if self._pong_response_cb is not None:
self._pong_response_cb.cancel()
self._pong_response_cb = None
if self._heartbeat_cb is not None:
self._heartbeat_cb.cancel()
self._heartbeat_cb = None
def _reset_heartbeat(self) -> None:
self._cancel_heartbeat()
if self._heartbeat is not None:
self._heartbeat_cb = call_later(
self._send_heartbeat, self._heartbeat, self._loop
)
def _send_heartbeat(self) -> None:
if self._heartbeat is not None and not self._closed:
# fire-and-forget a task is not perfect but maybe ok for
# sending ping. Otherwise we need a long-living heartbeat
# task in the class.
self._loop.create_task(self._writer.ping())
if self._pong_response_cb is not None:
self._pong_response_cb.cancel()
self._pong_response_cb = call_later(
self._pong_not_received, self._pong_heartbeat, self._loop
)
def _pong_not_received(self) -> None:
if not self._closed:
self._closed = True
self._close_code = WSCloseCode.ABNORMAL_CLOSURE
self._exception = asyncio.TimeoutError()
self._response.close()
@property
def closed(self) -> bool:
return self._closed
@property
def close_code(self) -> Optional[int]:
return self._close_code
@property
def protocol(self) -> Optional[str]:
return self._protocol
@property
def compress(self) -> int:
return self._compress
@property
def client_notakeover(self) -> bool:
return self._client_notakeover
def get_extra_info(self, name: str, default: Any = None) -> Any:
"""extra info from connection transport"""
conn = self._response.connection
if conn is None:
return default
transport = conn.transport
if transport is None:
return default
return transport.get_extra_info(name, default)
def exception(self) -> Optional[BaseException]:
return self._exception
async def ping(self, message: bytes = b"") -> None:
await self._writer.ping(message)
async def pong(self, message: bytes = b"") -> None:
await self._writer.pong(message)
async def send_str(self, data: str, compress: Optional[int] = None) -> None:
if not isinstance(data, str):
raise TypeError("data argument must be str (%r)" % type(data))
await self._writer.send(data, binary=False, compress=compress)
async def send_bytes(self, data: bytes, compress: Optional[int] = None) -> None:
if not isinstance(data, (bytes, bytearray, memoryview)):
raise TypeError("data argument must be byte-ish (%r)" % type(data))
await self._writer.send(data, binary=True, compress=compress)
async def send_json(
self,
data: Any,
compress: Optional[int] = None,
*,
dumps: JSONEncoder = DEFAULT_JSON_ENCODER,
) -> None:
await self.send_str(dumps(data), compress=compress)
async def close(self, *, code: int = WSCloseCode.OK, message: bytes = b"") -> bool:
# we need to break `receive()` cycle first,
# `close()` may be called from different task
if self._waiting is not None and not self._closed:
self._reader.feed_data(WS_CLOSING_MESSAGE, 0)
await self._waiting
if not self._closed:
self._cancel_heartbeat()
self._closed = True
try:
await self._writer.close(code, message)
except asyncio.CancelledError:
self._close_code = WSCloseCode.ABNORMAL_CLOSURE
self._response.close()
raise
except Exception as exc:
self._close_code = WSCloseCode.ABNORMAL_CLOSURE
self._exception = exc
self._response.close()
return True
if self._closing:
self._response.close()
return True
while True:
try:
async with async_timeout.timeout(self._timeout):
msg = await self._reader.read()
except asyncio.CancelledError:
self._close_code = WSCloseCode.ABNORMAL_CLOSURE
self._response.close()
raise
except Exception as exc:
self._close_code = WSCloseCode.ABNORMAL_CLOSURE
self._exception = exc
self._response.close()
return True
if msg.type == WSMsgType.CLOSE:
self._close_code = msg.data
self._response.close()
return True
else:
return False
async def receive(self, timeout: Optional[float] = None) -> WSMessage:
while True:
if self._waiting is not None:
raise RuntimeError("Concurrent call to receive() is not allowed")
if self._closed:
return WS_CLOSED_MESSAGE
elif self._closing:
await self.close()
return WS_CLOSED_MESSAGE
try:
self._waiting = self._loop.create_future()
try:
async with async_timeout.timeout(timeout or self._receive_timeout):
msg = await self._reader.read()
self._reset_heartbeat()
finally:
waiter = self._waiting
self._waiting = None
set_result(waiter, True)
except (asyncio.CancelledError, asyncio.TimeoutError):
self._close_code = WSCloseCode.ABNORMAL_CLOSURE
raise
except EofStream:
self._close_code = WSCloseCode.OK
await self.close()
return WSMessage(WSMsgType.CLOSED, None, None)
except ClientError:
self._closed = True
self._close_code = WSCloseCode.ABNORMAL_CLOSURE
return WS_CLOSED_MESSAGE
except WebSocketError as exc:
self._close_code = exc.code
await self.close(code=exc.code)
return WSMessage(WSMsgType.ERROR, exc, None)
except Exception as exc:
self._exception = exc
self._closing = True
self._close_code = WSCloseCode.ABNORMAL_CLOSURE
await self.close()
return WSMessage(WSMsgType.ERROR, exc, None)
if msg.type == WSMsgType.CLOSE:
self._closing = True
self._close_code = msg.data
if not self._closed and self._autoclose:
await self.close()
elif msg.type == WSMsgType.CLOSING:
self._closing = True
elif msg.type == WSMsgType.PING and self._autoping:
await self.pong(msg.data)
continue
elif msg.type == WSMsgType.PONG and self._autoping:
continue
return msg
async def receive_str(self, *, timeout: Optional[float] = None) -> str:
msg = await self.receive(timeout)
if msg.type != WSMsgType.TEXT:
raise TypeError(f"Received message {msg.type}:{msg.data!r} is not str")
return cast(str, msg.data)
async def receive_bytes(self, *, timeout: Optional[float] = None) -> bytes:
msg = await self.receive(timeout)
if msg.type != WSMsgType.BINARY:
raise TypeError(f"Received message {msg.type}:{msg.data!r} is not bytes")
return cast(bytes, msg.data)
async def receive_json(
self,
*,
loads: JSONDecoder = DEFAULT_JSON_DECODER,
timeout: Optional[float] = None,
) -> Any:
data = await self.receive_str(timeout=timeout)
return loads(data)
def __aiter__(self) -> "ClientWebSocketResponse":
return self
async def __anext__(self) -> WSMessage:
msg = await self.receive()
if msg.type in (WSMsgType.CLOSE, WSMsgType.CLOSING, WSMsgType.CLOSED):
raise StopAsyncIteration
return msg
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiohttp/multipart.py | import base64
import binascii
import json
import re
import uuid
import warnings
import zlib
from collections import deque
from types import TracebackType
from typing import (
TYPE_CHECKING,
Any,
AsyncIterator,
Deque,
Dict,
Iterator,
List,
Mapping,
Optional,
Sequence,
Tuple,
Type,
Union,
cast,
)
from urllib.parse import parse_qsl, unquote, urlencode
from multidict import CIMultiDict, CIMultiDictProxy, MultiMapping
from .hdrs import (
CONTENT_DISPOSITION,
CONTENT_ENCODING,
CONTENT_LENGTH,
CONTENT_TRANSFER_ENCODING,
CONTENT_TYPE,
)
from .helpers import CHAR, TOKEN, parse_mimetype, reify
from .http import HeadersParser
from .payload import (
JsonPayload,
LookupError,
Order,
Payload,
StringPayload,
get_payload,
payload_type,
)
from .streams import StreamReader
__all__ = (
"MultipartReader",
"MultipartWriter",
"BodyPartReader",
"BadContentDispositionHeader",
"BadContentDispositionParam",
"parse_content_disposition",
"content_disposition_filename",
)
if TYPE_CHECKING: # pragma: no cover
from .client_reqrep import ClientResponse
class BadContentDispositionHeader(RuntimeWarning):
pass
class BadContentDispositionParam(RuntimeWarning):
pass
def parse_content_disposition(
header: Optional[str],
) -> Tuple[Optional[str], Dict[str, str]]:
def is_token(string: str) -> bool:
return bool(string) and TOKEN >= set(string)
def is_quoted(string: str) -> bool:
return string[0] == string[-1] == '"'
def is_rfc5987(string: str) -> bool:
return is_token(string) and string.count("'") == 2
def is_extended_param(string: str) -> bool:
return string.endswith("*")
def is_continuous_param(string: str) -> bool:
pos = string.find("*") + 1
if not pos:
return False
substring = string[pos:-1] if string.endswith("*") else string[pos:]
return substring.isdigit()
def unescape(text: str, *, chars: str = "".join(map(re.escape, CHAR))) -> str:
return re.sub(f"\\\\([{chars}])", "\\1", text)
if not header:
return None, {}
disptype, *parts = header.split(";")
if not is_token(disptype):
warnings.warn(BadContentDispositionHeader(header))
return None, {}
params: Dict[str, str] = {}
while parts:
item = parts.pop(0)
if "=" not in item:
warnings.warn(BadContentDispositionHeader(header))
return None, {}
key, value = item.split("=", 1)
key = key.lower().strip()
value = value.lstrip()
if key in params:
warnings.warn(BadContentDispositionHeader(header))
return None, {}
if not is_token(key):
warnings.warn(BadContentDispositionParam(item))
continue
elif is_continuous_param(key):
if is_quoted(value):
value = unescape(value[1:-1])
elif not is_token(value):
warnings.warn(BadContentDispositionParam(item))
continue
elif is_extended_param(key):
if is_rfc5987(value):
encoding, _, value = value.split("'", 2)
encoding = encoding or "utf-8"
else:
warnings.warn(BadContentDispositionParam(item))
continue
try:
value = unquote(value, encoding, "strict")
except UnicodeDecodeError: # pragma: nocover
warnings.warn(BadContentDispositionParam(item))
continue
else:
failed = True
if is_quoted(value):
failed = False
value = unescape(value[1:-1].lstrip("\\/"))
elif is_token(value):
failed = False
elif parts:
# maybe just ; in filename, in any case this is just
# one case fix, for proper fix we need to redesign parser
_value = f"{value};{parts[0]}"
if is_quoted(_value):
parts.pop(0)
value = unescape(_value[1:-1].lstrip("\\/"))
failed = False
if failed:
warnings.warn(BadContentDispositionHeader(header))
return None, {}
params[key] = value
return disptype.lower(), params
def content_disposition_filename(
params: Mapping[str, str], name: str = "filename"
) -> Optional[str]:
name_suf = "%s*" % name
if not params:
return None
elif name_suf in params:
return params[name_suf]
elif name in params:
return params[name]
else:
parts = []
fnparams = sorted(
(key, value) for key, value in params.items() if key.startswith(name_suf)
)
for num, (key, value) in enumerate(fnparams):
_, tail = key.split("*", 1)
if tail.endswith("*"):
tail = tail[:-1]
if tail == str(num):
parts.append(value)
else:
break
if not parts:
return None
value = "".join(parts)
if "'" in value:
encoding, _, value = value.split("'", 2)
encoding = encoding or "utf-8"
return unquote(value, encoding, "strict")
return value
class MultipartResponseWrapper:
"""Wrapper around the MultipartReader.
It takes care about
underlying connection and close it when it needs in.
"""
def __init__(
self,
resp: "ClientResponse",
stream: "MultipartReader",
) -> None:
self.resp = resp
self.stream = stream
def __aiter__(self) -> "MultipartResponseWrapper":
return self
async def __anext__(
self,
) -> Union["MultipartReader", "BodyPartReader"]:
part = await self.next()
if part is None:
raise StopAsyncIteration
return part
def at_eof(self) -> bool:
"""Returns True when all response data had been read."""
return self.resp.content.at_eof()
async def next(
self,
) -> Optional[Union["MultipartReader", "BodyPartReader"]]:
"""Emits next multipart reader object."""
item = await self.stream.next()
if self.stream.at_eof():
await self.release()
return item
async def release(self) -> None:
"""Release the connection gracefully.
All remaining content is read to the void.
"""
await self.resp.release()
class BodyPartReader:
"""Multipart reader for single body part."""
chunk_size = 8192
def __init__(
self, boundary: bytes, headers: "CIMultiDictProxy[str]", content: StreamReader
) -> None:
self.headers = headers
self._boundary = boundary
self._content = content
self._at_eof = False
length = self.headers.get(CONTENT_LENGTH, None)
self._length = int(length) if length is not None else None
self._read_bytes = 0
# TODO: typeing.Deque is not supported by Python 3.5
self._unread: Deque[bytes] = deque()
self._prev_chunk: Optional[bytes] = None
self._content_eof = 0
self._cache: Dict[str, Any] = {}
def __aiter__(self) -> AsyncIterator["BodyPartReader"]:
return self # type: ignore[return-value]
async def __anext__(self) -> bytes:
part = await self.next()
if part is None:
raise StopAsyncIteration
return part
async def next(self) -> Optional[bytes]:
item = await self.read()
if not item:
return None
return item
async def read(self, *, decode: bool = False) -> bytes:
"""Reads body part data.
decode: Decodes data following by encoding
method from Content-Encoding header. If it missed
data remains untouched
"""
if self._at_eof:
return b""
data = bytearray()
while not self._at_eof:
data.extend(await self.read_chunk(self.chunk_size))
if decode:
return self.decode(data)
return data
async def read_chunk(self, size: int = chunk_size) -> bytes:
"""Reads body part content chunk of the specified size.
size: chunk size
"""
if self._at_eof:
return b""
if self._length:
chunk = await self._read_chunk_from_length(size)
else:
chunk = await self._read_chunk_from_stream(size)
self._read_bytes += len(chunk)
if self._read_bytes == self._length:
self._at_eof = True
if self._at_eof:
clrf = await self._content.readline()
assert (
b"\r\n" == clrf
), "reader did not read all the data or it is malformed"
return chunk
async def _read_chunk_from_length(self, size: int) -> bytes:
# Reads body part content chunk of the specified size.
# The body part must has Content-Length header with proper value.
assert self._length is not None, "Content-Length required for chunked read"
chunk_size = min(size, self._length - self._read_bytes)
chunk = await self._content.read(chunk_size)
return chunk
async def _read_chunk_from_stream(self, size: int) -> bytes:
# Reads content chunk of body part with unknown length.
# The Content-Length header for body part is not necessary.
assert (
size >= len(self._boundary) + 2
), "Chunk size must be greater or equal than boundary length + 2"
first_chunk = self._prev_chunk is None
if first_chunk:
self._prev_chunk = await self._content.read(size)
chunk = await self._content.read(size)
self._content_eof += int(self._content.at_eof())
assert self._content_eof < 3, "Reading after EOF"
assert self._prev_chunk is not None
window = self._prev_chunk + chunk
sub = b"\r\n" + self._boundary
if first_chunk:
idx = window.find(sub)
else:
idx = window.find(sub, max(0, len(self._prev_chunk) - len(sub)))
if idx >= 0:
# pushing boundary back to content
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
self._content.unread_data(window[idx:])
if size > idx:
self._prev_chunk = self._prev_chunk[:idx]
chunk = window[len(self._prev_chunk) : idx]
if not chunk:
self._at_eof = True
result = self._prev_chunk
self._prev_chunk = chunk
return result
async def readline(self) -> bytes:
"""Reads body part by line by line."""
if self._at_eof:
return b""
if self._unread:
line = self._unread.popleft()
else:
line = await self._content.readline()
if line.startswith(self._boundary):
# the very last boundary may not come with \r\n,
# so set single rules for everyone
sline = line.rstrip(b"\r\n")
boundary = self._boundary
last_boundary = self._boundary + b"--"
# ensure that we read exactly the boundary, not something alike
if sline == boundary or sline == last_boundary:
self._at_eof = True
self._unread.append(line)
return b""
else:
next_line = await self._content.readline()
if next_line.startswith(self._boundary):
line = line[:-2] # strip CRLF but only once
self._unread.append(next_line)
return line
async def release(self) -> None:
"""Like read(), but reads all the data to the void."""
if self._at_eof:
return
while not self._at_eof:
await self.read_chunk(self.chunk_size)
async def text(self, *, encoding: Optional[str] = None) -> str:
"""Like read(), but assumes that body part contains text data."""
data = await self.read(decode=True)
# see https://www.w3.org/TR/html5/forms.html#multipart/form-data-encoding-algorithm # NOQA
# and https://dvcs.w3.org/hg/xhr/raw-file/tip/Overview.html#dom-xmlhttprequest-send # NOQA
encoding = encoding or self.get_charset(default="utf-8")
return data.decode(encoding)
async def json(self, *, encoding: Optional[str] = None) -> Optional[Dict[str, Any]]:
"""Like read(), but assumes that body parts contains JSON data."""
data = await self.read(decode=True)
if not data:
return None
encoding = encoding or self.get_charset(default="utf-8")
return cast(Dict[str, Any], json.loads(data.decode(encoding)))
async def form(self, *, encoding: Optional[str] = None) -> List[Tuple[str, str]]:
"""Like read(), but assumes that body parts contain form urlencoded data."""
data = await self.read(decode=True)
if not data:
return []
if encoding is not None:
real_encoding = encoding
else:
real_encoding = self.get_charset(default="utf-8")
return parse_qsl(
data.rstrip().decode(real_encoding),
keep_blank_values=True,
encoding=real_encoding,
)
def at_eof(self) -> bool:
"""Returns True if the boundary was reached or False otherwise."""
return self._at_eof
def decode(self, data: bytes) -> bytes:
"""Decodes data.
Decoding is done according the specified Content-Encoding
or Content-Transfer-Encoding headers value.
"""
if CONTENT_TRANSFER_ENCODING in self.headers:
data = self._decode_content_transfer(data)
if CONTENT_ENCODING in self.headers:
return self._decode_content(data)
return data
def _decode_content(self, data: bytes) -> bytes:
encoding = self.headers.get(CONTENT_ENCODING, "").lower()
if encoding == "deflate":
return zlib.decompress(data, -zlib.MAX_WBITS)
elif encoding == "gzip":
return zlib.decompress(data, 16 + zlib.MAX_WBITS)
elif encoding == "identity":
return data
else:
raise RuntimeError(f"unknown content encoding: {encoding}")
def _decode_content_transfer(self, data: bytes) -> bytes:
encoding = self.headers.get(CONTENT_TRANSFER_ENCODING, "").lower()
if encoding == "base64":
return base64.b64decode(data)
elif encoding == "quoted-printable":
return binascii.a2b_qp(data)
elif encoding in ("binary", "8bit", "7bit"):
return data
else:
raise RuntimeError(
"unknown content transfer encoding: {}" "".format(encoding)
)
def get_charset(self, default: str) -> str:
"""Returns charset parameter from Content-Type header or default."""
ctype = self.headers.get(CONTENT_TYPE, "")
mimetype = parse_mimetype(ctype)
return mimetype.parameters.get("charset", default)
@reify
def name(self) -> Optional[str]:
"""Returns name specified in Content-Disposition header.
If the header is missing or malformed, returns None.
"""
_, params = parse_content_disposition(self.headers.get(CONTENT_DISPOSITION))
return content_disposition_filename(params, "name")
@reify
def filename(self) -> Optional[str]:
"""Returns filename specified in Content-Disposition header.
Returns None if the header is missing or malformed.
"""
_, params = parse_content_disposition(self.headers.get(CONTENT_DISPOSITION))
return content_disposition_filename(params, "filename")
@payload_type(BodyPartReader, order=Order.try_first)
class BodyPartReaderPayload(Payload):
def __init__(self, value: BodyPartReader, *args: Any, **kwargs: Any) -> None:
super().__init__(value, *args, **kwargs)
params: Dict[str, str] = {}
if value.name is not None:
params["name"] = value.name
if value.filename is not None:
params["filename"] = value.filename
if params:
self.set_content_disposition("attachment", True, **params)
async def write(self, writer: Any) -> None:
field = self._value
chunk = await field.read_chunk(size=2**16)
while chunk:
await writer.write(field.decode(chunk))
chunk = await field.read_chunk(size=2**16)
class MultipartReader:
"""Multipart body reader."""
#: Response wrapper, used when multipart readers constructs from response.
response_wrapper_cls = MultipartResponseWrapper
#: Multipart reader class, used to handle multipart/* body parts.
#: None points to type(self)
multipart_reader_cls = None
#: Body part reader class for non multipart/* content types.
part_reader_cls = BodyPartReader
def __init__(self, headers: Mapping[str, str], content: StreamReader) -> None:
self.headers = headers
self._boundary = ("--" + self._get_boundary()).encode()
self._content = content
self._last_part: Optional[Union["MultipartReader", BodyPartReader]] = None
self._at_eof = False
self._at_bof = True
self._unread: List[bytes] = []
def __aiter__(
self,
) -> AsyncIterator["BodyPartReader"]:
return self # type: ignore[return-value]
async def __anext__(
self,
) -> Optional[Union["MultipartReader", BodyPartReader]]:
part = await self.next()
if part is None:
raise StopAsyncIteration
return part
@classmethod
def from_response(
cls,
response: "ClientResponse",
) -> MultipartResponseWrapper:
"""Constructs reader instance from HTTP response.
:param response: :class:`~aiohttp.client.ClientResponse` instance
"""
obj = cls.response_wrapper_cls(
response, cls(response.headers, response.content)
)
return obj
def at_eof(self) -> bool:
"""Returns True if the final boundary was reached, false otherwise."""
return self._at_eof
async def next(
self,
) -> Optional[Union["MultipartReader", BodyPartReader]]:
"""Emits the next multipart body part."""
# So, if we're at BOF, we need to skip till the boundary.
if self._at_eof:
return None
await self._maybe_release_last_part()
if self._at_bof:
await self._read_until_first_boundary()
self._at_bof = False
else:
await self._read_boundary()
if self._at_eof: # we just read the last boundary, nothing to do there
return None
self._last_part = await self.fetch_next_part()
return self._last_part
async def release(self) -> None:
"""Reads all the body parts to the void till the final boundary."""
while not self._at_eof:
item = await self.next()
if item is None:
break
await item.release()
async def fetch_next_part(
self,
) -> Union["MultipartReader", BodyPartReader]:
"""Returns the next body part reader."""
headers = await self._read_headers()
return self._get_part_reader(headers)
def _get_part_reader(
self,
headers: "CIMultiDictProxy[str]",
) -> Union["MultipartReader", BodyPartReader]:
"""Dispatches the response by the `Content-Type` header.
Returns a suitable reader instance.
:param dict headers: Response headers
"""
ctype = headers.get(CONTENT_TYPE, "")
mimetype = parse_mimetype(ctype)
if mimetype.type == "multipart":
if self.multipart_reader_cls is None:
return type(self)(headers, self._content)
return self.multipart_reader_cls(headers, self._content)
else:
return self.part_reader_cls(self._boundary, headers, self._content)
def _get_boundary(self) -> str:
mimetype = parse_mimetype(self.headers[CONTENT_TYPE])
assert mimetype.type == "multipart", "multipart/* content type expected"
if "boundary" not in mimetype.parameters:
raise ValueError(
"boundary missed for Content-Type: %s" % self.headers[CONTENT_TYPE]
)
boundary = mimetype.parameters["boundary"]
if len(boundary) > 70:
raise ValueError("boundary %r is too long (70 chars max)" % boundary)
return boundary
async def _readline(self) -> bytes:
if self._unread:
return self._unread.pop()
return await self._content.readline()
async def _read_until_first_boundary(self) -> None:
while True:
chunk = await self._readline()
if chunk == b"":
raise ValueError(
"Could not find starting boundary %r" % (self._boundary)
)
chunk = chunk.rstrip()
if chunk == self._boundary:
return
elif chunk == self._boundary + b"--":
self._at_eof = True
return
async def _read_boundary(self) -> None:
chunk = (await self._readline()).rstrip()
if chunk == self._boundary:
pass
elif chunk == self._boundary + b"--":
self._at_eof = True
epilogue = await self._readline()
next_line = await self._readline()
# the epilogue is expected and then either the end of input or the
# parent multipart boundary, if the parent boundary is found then
# it should be marked as unread and handed to the parent for
# processing
if next_line[:2] == b"--":
self._unread.append(next_line)
# otherwise the request is likely missing an epilogue and both
# lines should be passed to the parent for processing
# (this handles the old behavior gracefully)
else:
self._unread.extend([next_line, epilogue])
else:
raise ValueError(f"Invalid boundary {chunk!r}, expected {self._boundary!r}")
async def _read_headers(self) -> "CIMultiDictProxy[str]":
lines = [b""]
while True:
chunk = await self._content.readline()
chunk = chunk.strip()
lines.append(chunk)
if not chunk:
break
parser = HeadersParser()
headers, raw_headers = parser.parse_headers(lines)
return headers
async def _maybe_release_last_part(self) -> None:
"""Ensures that the last read body part is read completely."""
if self._last_part is not None:
if not self._last_part.at_eof():
await self._last_part.release()
self._unread.extend(self._last_part._unread)
self._last_part = None
_Part = Tuple[Payload, str, str]
class MultipartWriter(Payload):
"""Multipart body writer."""
def __init__(self, subtype: str = "mixed", boundary: Optional[str] = None) -> None:
boundary = boundary if boundary is not None else uuid.uuid4().hex
# The underlying Payload API demands a str (utf-8), not bytes,
# so we need to ensure we don't lose anything during conversion.
# As a result, require the boundary to be ASCII only.
# In both situations.
try:
self._boundary = boundary.encode("ascii")
except UnicodeEncodeError:
raise ValueError("boundary should contain ASCII only chars") from None
ctype = f"multipart/{subtype}; boundary={self._boundary_value}"
super().__init__(None, content_type=ctype)
self._parts: List[_Part] = []
def __enter__(self) -> "MultipartWriter":
return self
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
pass
def __iter__(self) -> Iterator[_Part]:
return iter(self._parts)
def __len__(self) -> int:
return len(self._parts)
def __bool__(self) -> bool:
return True
_valid_tchar_regex = re.compile(rb"\A[!#$%&'*+\-.^_`|~\w]+\Z")
_invalid_qdtext_char_regex = re.compile(rb"[\x00-\x08\x0A-\x1F\x7F]")
@property
def _boundary_value(self) -> str:
"""Wrap boundary parameter value in quotes, if necessary.
Reads self.boundary and returns a unicode sting.
"""
# Refer to RFCs 7231, 7230, 5234.
#
# parameter = token "=" ( token / quoted-string )
# token = 1*tchar
# quoted-string = DQUOTE *( qdtext / quoted-pair ) DQUOTE
# qdtext = HTAB / SP / %x21 / %x23-5B / %x5D-7E / obs-text
# obs-text = %x80-FF
# quoted-pair = "\" ( HTAB / SP / VCHAR / obs-text )
# tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*"
# / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~"
# / DIGIT / ALPHA
# ; any VCHAR, except delimiters
# VCHAR = %x21-7E
value = self._boundary
if re.match(self._valid_tchar_regex, value):
return value.decode("ascii") # cannot fail
if re.search(self._invalid_qdtext_char_regex, value):
raise ValueError("boundary value contains invalid characters")
# escape %x5C and %x22
quoted_value_content = value.replace(b"\\", b"\\\\")
quoted_value_content = quoted_value_content.replace(b'"', b'\\"')
return '"' + quoted_value_content.decode("ascii") + '"'
@property
def boundary(self) -> str:
return self._boundary.decode("ascii")
def append(self, obj: Any, headers: Optional[MultiMapping[str]] = None) -> Payload:
if headers is None:
headers = CIMultiDict()
if isinstance(obj, Payload):
obj.headers.update(headers)
return self.append_payload(obj)
else:
try:
payload = get_payload(obj, headers=headers)
except LookupError:
raise TypeError("Cannot create payload from %r" % obj)
else:
return self.append_payload(payload)
def append_payload(self, payload: Payload) -> Payload:
"""Adds a new body part to multipart writer."""
# compression
encoding: Optional[str] = payload.headers.get(
CONTENT_ENCODING,
"",
).lower()
if encoding and encoding not in ("deflate", "gzip", "identity"):
raise RuntimeError(f"unknown content encoding: {encoding}")
if encoding == "identity":
encoding = None
# te encoding
te_encoding: Optional[str] = payload.headers.get(
CONTENT_TRANSFER_ENCODING,
"",
).lower()
if te_encoding not in ("", "base64", "quoted-printable", "binary"):
raise RuntimeError(
"unknown content transfer encoding: {}" "".format(te_encoding)
)
if te_encoding == "binary":
te_encoding = None
# size
size = payload.size
if size is not None and not (encoding or te_encoding):
payload.headers[CONTENT_LENGTH] = str(size)
self._parts.append((payload, encoding, te_encoding)) # type: ignore[arg-type]
return payload
def append_json(
self, obj: Any, headers: Optional[MultiMapping[str]] = None
) -> Payload:
"""Helper to append JSON part."""
if headers is None:
headers = CIMultiDict()
return self.append_payload(JsonPayload(obj, headers=headers))
def append_form(
self,
obj: Union[Sequence[Tuple[str, str]], Mapping[str, str]],
headers: Optional[MultiMapping[str]] = None,
) -> Payload:
"""Helper to append form urlencoded part."""
assert isinstance(obj, (Sequence, Mapping))
if headers is None:
headers = CIMultiDict()
if isinstance(obj, Mapping):
obj = list(obj.items())
data = urlencode(obj, doseq=True)
return self.append_payload(
StringPayload(
data, headers=headers, content_type="application/x-www-form-urlencoded"
)
)
@property
def size(self) -> Optional[int]:
"""Size of the payload."""
total = 0
for part, encoding, te_encoding in self._parts:
if encoding or te_encoding or part.size is None:
return None
total += int(
2
+ len(self._boundary)
+ 2
+ part.size # b'--'+self._boundary+b'\r\n'
+ len(part._binary_headers)
+ 2 # b'\r\n'
)
total += 2 + len(self._boundary) + 4 # b'--'+self._boundary+b'--\r\n'
return total
async def write(self, writer: Any, close_boundary: bool = True) -> None:
"""Write body."""
for part, encoding, te_encoding in self._parts:
await writer.write(b"--" + self._boundary + b"\r\n")
await writer.write(part._binary_headers)
if encoding or te_encoding:
w = MultipartPayloadWriter(writer)
if encoding:
w.enable_compression(encoding)
if te_encoding:
w.enable_encoding(te_encoding)
await part.write(w) # type: ignore[arg-type]
await w.write_eof()
else:
await part.write(writer)
await writer.write(b"\r\n")
if close_boundary:
await writer.write(b"--" + self._boundary + b"--\r\n")
class MultipartPayloadWriter:
def __init__(self, writer: Any) -> None:
self._writer = writer
self._encoding: Optional[str] = None
self._compress: Any = None
self._encoding_buffer: Optional[bytearray] = None
def enable_encoding(self, encoding: str) -> None:
if encoding == "base64":
self._encoding = encoding
self._encoding_buffer = bytearray()
elif encoding == "quoted-printable":
self._encoding = "quoted-printable"
def enable_compression(
self, encoding: str = "deflate", strategy: int = zlib.Z_DEFAULT_STRATEGY
) -> None:
zlib_mode = 16 + zlib.MAX_WBITS if encoding == "gzip" else -zlib.MAX_WBITS
self._compress = zlib.compressobj(wbits=zlib_mode, strategy=strategy)
async def write_eof(self) -> None:
if self._compress is not None:
chunk = self._compress.flush()
if chunk:
self._compress = None
await self.write(chunk)
if self._encoding == "base64":
if self._encoding_buffer:
await self._writer.write(base64.b64encode(self._encoding_buffer))
async def write(self, chunk: bytes) -> None:
if self._compress is not None:
if chunk:
chunk = self._compress.compress(chunk)
if not chunk:
return
if self._encoding == "base64":
buf = self._encoding_buffer
assert buf is not None
buf.extend(chunk)
if buf:
div, mod = divmod(len(buf), 3)
enc_chunk, self._encoding_buffer = (buf[: div * 3], buf[div * 3 :])
if enc_chunk:
b64chunk = base64.b64encode(enc_chunk)
await self._writer.write(b64chunk)
elif self._encoding == "quoted-printable":
await self._writer.write(binascii.b2a_qp(chunk))
else:
await self._writer.write(chunk)
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiohttp/client_reqrep.py | import asyncio
import codecs
import functools
import io
import re
import sys
import traceback
import warnings
from hashlib import md5, sha1, sha256
from http.cookies import CookieError, Morsel, SimpleCookie
from types import MappingProxyType, TracebackType
from typing import (
TYPE_CHECKING,
Any,
Dict,
Iterable,
List,
Mapping,
Optional,
Tuple,
Type,
Union,
cast,
)
import attr
from multidict import CIMultiDict, CIMultiDictProxy, MultiDict, MultiDictProxy
from yarl import URL
from . import hdrs, helpers, http, multipart, payload
from .abc import AbstractStreamWriter
from .client_exceptions import (
ClientConnectionError,
ClientOSError,
ClientResponseError,
ContentTypeError,
InvalidURL,
ServerFingerprintMismatch,
)
from .formdata import FormData
from .helpers import (
PY_36,
BaseTimerContext,
BasicAuth,
HeadersMixin,
TimerNoop,
noop,
reify,
set_result,
)
from .http import SERVER_SOFTWARE, HttpVersion10, HttpVersion11, StreamWriter
from .log import client_logger
from .streams import StreamReader
from .typedefs import (
DEFAULT_JSON_DECODER,
JSONDecoder,
LooseCookies,
LooseHeaders,
RawHeaders,
)
try:
import ssl
from ssl import SSLContext
except ImportError: # pragma: no cover
ssl = None # type: ignore[assignment]
SSLContext = object # type: ignore[misc,assignment]
try:
import cchardet as chardet
except ImportError: # pragma: no cover
import charset_normalizer as chardet # type: ignore[no-redef]
__all__ = ("ClientRequest", "ClientResponse", "RequestInfo", "Fingerprint")
if TYPE_CHECKING: # pragma: no cover
from .client import ClientSession
from .connector import Connection
from .tracing import Trace
json_re = re.compile(r"^application/(?:[\w.+-]+?\+)?json")
@attr.s(auto_attribs=True, frozen=True, slots=True)
class ContentDisposition:
type: Optional[str]
parameters: "MappingProxyType[str, str]"
filename: Optional[str]
@attr.s(auto_attribs=True, frozen=True, slots=True)
class RequestInfo:
url: URL
method: str
headers: "CIMultiDictProxy[str]"
real_url: URL = attr.ib()
@real_url.default
def real_url_default(self) -> URL:
return self.url
class Fingerprint:
HASHFUNC_BY_DIGESTLEN = {
16: md5,
20: sha1,
32: sha256,
}
def __init__(self, fingerprint: bytes) -> None:
digestlen = len(fingerprint)
hashfunc = self.HASHFUNC_BY_DIGESTLEN.get(digestlen)
if not hashfunc:
raise ValueError("fingerprint has invalid length")
elif hashfunc is md5 or hashfunc is sha1:
raise ValueError(
"md5 and sha1 are insecure and " "not supported. Use sha256."
)
self._hashfunc = hashfunc
self._fingerprint = fingerprint
@property
def fingerprint(self) -> bytes:
return self._fingerprint
def check(self, transport: asyncio.Transport) -> None:
if not transport.get_extra_info("sslcontext"):
return
sslobj = transport.get_extra_info("ssl_object")
cert = sslobj.getpeercert(binary_form=True)
got = self._hashfunc(cert).digest()
if got != self._fingerprint:
host, port, *_ = transport.get_extra_info("peername")
raise ServerFingerprintMismatch(self._fingerprint, got, host, port)
if ssl is not None:
SSL_ALLOWED_TYPES = (ssl.SSLContext, bool, Fingerprint, type(None))
else: # pragma: no cover
SSL_ALLOWED_TYPES = type(None)
def _merge_ssl_params(
ssl: Union["SSLContext", bool, Fingerprint, None],
verify_ssl: Optional[bool],
ssl_context: Optional["SSLContext"],
fingerprint: Optional[bytes],
) -> Union["SSLContext", bool, Fingerprint, None]:
if verify_ssl is not None and not verify_ssl:
warnings.warn(
"verify_ssl is deprecated, use ssl=False instead",
DeprecationWarning,
stacklevel=3,
)
if ssl is not None:
raise ValueError(
"verify_ssl, ssl_context, fingerprint and ssl "
"parameters are mutually exclusive"
)
else:
ssl = False
if ssl_context is not None:
warnings.warn(
"ssl_context is deprecated, use ssl=context instead",
DeprecationWarning,
stacklevel=3,
)
if ssl is not None:
raise ValueError(
"verify_ssl, ssl_context, fingerprint and ssl "
"parameters are mutually exclusive"
)
else:
ssl = ssl_context
if fingerprint is not None:
warnings.warn(
"fingerprint is deprecated, " "use ssl=Fingerprint(fingerprint) instead",
DeprecationWarning,
stacklevel=3,
)
if ssl is not None:
raise ValueError(
"verify_ssl, ssl_context, fingerprint and ssl "
"parameters are mutually exclusive"
)
else:
ssl = Fingerprint(fingerprint)
if not isinstance(ssl, SSL_ALLOWED_TYPES):
raise TypeError(
"ssl should be SSLContext, bool, Fingerprint or None, "
"got {!r} instead.".format(ssl)
)
return ssl
@attr.s(auto_attribs=True, slots=True, frozen=True)
class ConnectionKey:
# the key should contain an information about used proxy / TLS
# to prevent reusing wrong connections from a pool
host: str
port: Optional[int]
is_ssl: bool
ssl: Union[SSLContext, None, bool, Fingerprint]
proxy: Optional[URL]
proxy_auth: Optional[BasicAuth]
proxy_headers_hash: Optional[int] # hash(CIMultiDict)
def _is_expected_content_type(
response_content_type: str, expected_content_type: str
) -> bool:
if expected_content_type == "application/json":
return json_re.match(response_content_type) is not None
return expected_content_type in response_content_type
class ClientRequest:
GET_METHODS = {
hdrs.METH_GET,
hdrs.METH_HEAD,
hdrs.METH_OPTIONS,
hdrs.METH_TRACE,
}
POST_METHODS = {hdrs.METH_PATCH, hdrs.METH_POST, hdrs.METH_PUT}
ALL_METHODS = GET_METHODS.union(POST_METHODS).union({hdrs.METH_DELETE})
DEFAULT_HEADERS = {
hdrs.ACCEPT: "*/*",
hdrs.ACCEPT_ENCODING: "gzip, deflate",
}
body = b""
auth = None
response = None
_writer = None # async task for streaming data
_continue = None # waiter future for '100 Continue' response
# N.B.
# Adding __del__ method with self._writer closing doesn't make sense
# because _writer is instance method, thus it keeps a reference to self.
# Until writer has finished finalizer will not be called.
def __init__(
self,
method: str,
url: URL,
*,
params: Optional[Mapping[str, str]] = None,
headers: Optional[LooseHeaders] = None,
skip_auto_headers: Iterable[str] = frozenset(),
data: Any = None,
cookies: Optional[LooseCookies] = None,
auth: Optional[BasicAuth] = None,
version: http.HttpVersion = http.HttpVersion11,
compress: Optional[str] = None,
chunked: Optional[bool] = None,
expect100: bool = False,
loop: Optional[asyncio.AbstractEventLoop] = None,
response_class: Optional[Type["ClientResponse"]] = None,
proxy: Optional[URL] = None,
proxy_auth: Optional[BasicAuth] = None,
timer: Optional[BaseTimerContext] = None,
session: Optional["ClientSession"] = None,
ssl: Union[SSLContext, bool, Fingerprint, None] = None,
proxy_headers: Optional[LooseHeaders] = None,
traces: Optional[List["Trace"]] = None,
):
if loop is None:
loop = asyncio.get_event_loop()
assert isinstance(url, URL), url
assert isinstance(proxy, (URL, type(None))), proxy
# FIXME: session is None in tests only, need to fix tests
# assert session is not None
self._session = cast("ClientSession", session)
if params:
q = MultiDict(url.query)
url2 = url.with_query(params)
q.extend(url2.query)
url = url.with_query(q)
self.original_url = url
self.url = url.with_fragment(None)
self.method = method.upper()
self.chunked = chunked
self.compress = compress
self.loop = loop
self.length = None
if response_class is None:
real_response_class = ClientResponse
else:
real_response_class = response_class
self.response_class: Type[ClientResponse] = real_response_class
self._timer = timer if timer is not None else TimerNoop()
self._ssl = ssl
if loop.get_debug():
self._source_traceback = traceback.extract_stack(sys._getframe(1))
self.update_version(version)
self.update_host(url)
self.update_headers(headers)
self.update_auto_headers(skip_auto_headers)
self.update_cookies(cookies)
self.update_content_encoding(data)
self.update_auth(auth)
self.update_proxy(proxy, proxy_auth, proxy_headers)
self.update_body_from_data(data)
if data is not None or self.method not in self.GET_METHODS:
self.update_transfer_encoding()
self.update_expect_continue(expect100)
if traces is None:
traces = []
self._traces = traces
def is_ssl(self) -> bool:
return self.url.scheme in ("https", "wss")
@property
def ssl(self) -> Union["SSLContext", None, bool, Fingerprint]:
return self._ssl
@property
def connection_key(self) -> ConnectionKey:
proxy_headers = self.proxy_headers
if proxy_headers:
h: Optional[int] = hash(tuple((k, v) for k, v in proxy_headers.items()))
else:
h = None
return ConnectionKey(
self.host,
self.port,
self.is_ssl(),
self.ssl,
self.proxy,
self.proxy_auth,
h,
)
@property
def host(self) -> str:
ret = self.url.raw_host
assert ret is not None
return ret
@property
def port(self) -> Optional[int]:
return self.url.port
@property
def request_info(self) -> RequestInfo:
headers: CIMultiDictProxy[str] = CIMultiDictProxy(self.headers)
return RequestInfo(self.url, self.method, headers, self.original_url)
def update_host(self, url: URL) -> None:
"""Update destination host, port and connection type (ssl)."""
# get host/port
if not url.raw_host:
raise InvalidURL(url)
# basic auth info
username, password = url.user, url.password
if username:
self.auth = helpers.BasicAuth(username, password or "")
def update_version(self, version: Union[http.HttpVersion, str]) -> None:
"""Convert request version to two elements tuple.
parser HTTP version '1.1' => (1, 1)
"""
if isinstance(version, str):
v = [part.strip() for part in version.split(".", 1)]
try:
version = http.HttpVersion(int(v[0]), int(v[1]))
except ValueError:
raise ValueError(
f"Can not parse http version number: {version}"
) from None
self.version = version
def update_headers(self, headers: Optional[LooseHeaders]) -> None:
"""Update request headers."""
self.headers: CIMultiDict[str] = CIMultiDict()
# add host
netloc = cast(str, self.url.raw_host)
if helpers.is_ipv6_address(netloc):
netloc = f"[{netloc}]"
if self.url.port is not None and not self.url.is_default_port():
netloc += ":" + str(self.url.port)
self.headers[hdrs.HOST] = netloc
if headers:
if isinstance(headers, (dict, MultiDictProxy, MultiDict)):
headers = headers.items() # type: ignore[assignment]
for key, value in headers: # type: ignore[misc]
# A special case for Host header
if key.lower() == "host":
self.headers[key] = value
else:
self.headers.add(key, value)
def update_auto_headers(self, skip_auto_headers: Iterable[str]) -> None:
self.skip_auto_headers = CIMultiDict(
(hdr, None) for hdr in sorted(skip_auto_headers)
)
used_headers = self.headers.copy()
used_headers.extend(self.skip_auto_headers) # type: ignore[arg-type]
for hdr, val in self.DEFAULT_HEADERS.items():
if hdr not in used_headers:
self.headers.add(hdr, val)
if hdrs.USER_AGENT not in used_headers:
self.headers[hdrs.USER_AGENT] = SERVER_SOFTWARE
def update_cookies(self, cookies: Optional[LooseCookies]) -> None:
"""Update request cookies header."""
if not cookies:
return
c: SimpleCookie[str] = SimpleCookie()
if hdrs.COOKIE in self.headers:
c.load(self.headers.get(hdrs.COOKIE, ""))
del self.headers[hdrs.COOKIE]
if isinstance(cookies, Mapping):
iter_cookies = cookies.items()
else:
iter_cookies = cookies # type: ignore[assignment]
for name, value in iter_cookies:
if isinstance(value, Morsel):
# Preserve coded_value
mrsl_val = value.get(value.key, Morsel())
mrsl_val.set(value.key, value.value, value.coded_value)
c[name] = mrsl_val
else:
c[name] = value # type: ignore[assignment]
self.headers[hdrs.COOKIE] = c.output(header="", sep=";").strip()
def update_content_encoding(self, data: Any) -> None:
"""Set request content encoding."""
if data is None:
return
enc = self.headers.get(hdrs.CONTENT_ENCODING, "").lower()
if enc:
if self.compress:
raise ValueError(
"compress can not be set " "if Content-Encoding header is set"
)
elif self.compress:
if not isinstance(self.compress, str):
self.compress = "deflate"
self.headers[hdrs.CONTENT_ENCODING] = self.compress
self.chunked = True # enable chunked, no need to deal with length
def update_transfer_encoding(self) -> None:
"""Analyze transfer-encoding header."""
te = self.headers.get(hdrs.TRANSFER_ENCODING, "").lower()
if "chunked" in te:
if self.chunked:
raise ValueError(
"chunked can not be set "
'if "Transfer-Encoding: chunked" header is set'
)
elif self.chunked:
if hdrs.CONTENT_LENGTH in self.headers:
raise ValueError(
"chunked can not be set " "if Content-Length header is set"
)
self.headers[hdrs.TRANSFER_ENCODING] = "chunked"
else:
if hdrs.CONTENT_LENGTH not in self.headers:
self.headers[hdrs.CONTENT_LENGTH] = str(len(self.body))
def update_auth(self, auth: Optional[BasicAuth]) -> None:
"""Set basic auth."""
if auth is None:
auth = self.auth
if auth is None:
return
if not isinstance(auth, helpers.BasicAuth):
raise TypeError("BasicAuth() tuple is required instead")
self.headers[hdrs.AUTHORIZATION] = auth.encode()
def update_body_from_data(self, body: Any) -> None:
if body is None:
return
# FormData
if isinstance(body, FormData):
body = body()
try:
body = payload.PAYLOAD_REGISTRY.get(body, disposition=None)
except payload.LookupError:
body = FormData(body)()
self.body = body
# enable chunked encoding if needed
if not self.chunked:
if hdrs.CONTENT_LENGTH not in self.headers:
size = body.size
if size is None:
self.chunked = True
else:
if hdrs.CONTENT_LENGTH not in self.headers:
self.headers[hdrs.CONTENT_LENGTH] = str(size)
# copy payload headers
assert body.headers
for (key, value) in body.headers.items():
if key in self.headers:
continue
if key in self.skip_auto_headers:
continue
self.headers[key] = value
def update_expect_continue(self, expect: bool = False) -> None:
if expect:
self.headers[hdrs.EXPECT] = "100-continue"
elif self.headers.get(hdrs.EXPECT, "").lower() == "100-continue":
expect = True
if expect:
self._continue = self.loop.create_future()
def update_proxy(
self,
proxy: Optional[URL],
proxy_auth: Optional[BasicAuth],
proxy_headers: Optional[LooseHeaders],
) -> None:
if proxy_auth and not isinstance(proxy_auth, helpers.BasicAuth):
raise ValueError("proxy_auth must be None or BasicAuth() tuple")
self.proxy = proxy
self.proxy_auth = proxy_auth
self.proxy_headers = proxy_headers
def keep_alive(self) -> bool:
if self.version < HttpVersion10:
# keep alive not supported at all
return False
if self.version == HttpVersion10:
if self.headers.get(hdrs.CONNECTION) == "keep-alive":
return True
else: # no headers means we close for Http 1.0
return False
elif self.headers.get(hdrs.CONNECTION) == "close":
return False
return True
async def write_bytes(
self, writer: AbstractStreamWriter, conn: "Connection"
) -> None:
"""Support coroutines that yields bytes objects."""
# 100 response
if self._continue is not None:
await writer.drain()
await self._continue
protocol = conn.protocol
assert protocol is not None
try:
if isinstance(self.body, payload.Payload):
await self.body.write(writer)
else:
if isinstance(self.body, (bytes, bytearray)):
self.body = (self.body,) # type: ignore[assignment]
for chunk in self.body:
await writer.write(chunk) # type: ignore[arg-type]
await writer.write_eof()
except OSError as exc:
if exc.errno is None and isinstance(exc, asyncio.TimeoutError):
protocol.set_exception(exc)
else:
new_exc = ClientOSError(
exc.errno, "Can not write request body for %s" % self.url
)
new_exc.__context__ = exc
new_exc.__cause__ = exc
protocol.set_exception(new_exc)
except asyncio.CancelledError as exc:
if not conn.closed:
protocol.set_exception(exc)
except Exception as exc:
protocol.set_exception(exc)
finally:
self._writer = None
async def send(self, conn: "Connection") -> "ClientResponse":
# Specify request target:
# - CONNECT request must send authority form URI
# - not CONNECT proxy must send absolute form URI
# - most common is origin form URI
if self.method == hdrs.METH_CONNECT:
connect_host = self.url.raw_host
assert connect_host is not None
if helpers.is_ipv6_address(connect_host):
connect_host = f"[{connect_host}]"
path = f"{connect_host}:{self.url.port}"
elif self.proxy and not self.is_ssl():
path = str(self.url)
else:
path = self.url.raw_path
if self.url.raw_query_string:
path += "?" + self.url.raw_query_string
protocol = conn.protocol
assert protocol is not None
writer = StreamWriter(
protocol,
self.loop,
on_chunk_sent=functools.partial(
self._on_chunk_request_sent, self.method, self.url
),
on_headers_sent=functools.partial(
self._on_headers_request_sent, self.method, self.url
),
)
if self.compress:
writer.enable_compression(self.compress)
if self.chunked is not None:
writer.enable_chunking()
# set default content-type
if (
self.method in self.POST_METHODS
and hdrs.CONTENT_TYPE not in self.skip_auto_headers
and hdrs.CONTENT_TYPE not in self.headers
):
self.headers[hdrs.CONTENT_TYPE] = "application/octet-stream"
# set the connection header
connection = self.headers.get(hdrs.CONNECTION)
if not connection:
if self.keep_alive():
if self.version == HttpVersion10:
connection = "keep-alive"
else:
if self.version == HttpVersion11:
connection = "close"
if connection is not None:
self.headers[hdrs.CONNECTION] = connection
# status + headers
status_line = "{0} {1} HTTP/{2[0]}.{2[1]}".format(
self.method, path, self.version
)
await writer.write_headers(status_line, self.headers)
self._writer = self.loop.create_task(self.write_bytes(writer, conn))
response_class = self.response_class
assert response_class is not None
self.response = response_class(
self.method,
self.original_url,
writer=self._writer,
continue100=self._continue,
timer=self._timer,
request_info=self.request_info,
traces=self._traces,
loop=self.loop,
session=self._session,
)
return self.response
async def close(self) -> None:
if self._writer is not None:
try:
await self._writer
finally:
self._writer = None
def terminate(self) -> None:
if self._writer is not None:
if not self.loop.is_closed():
self._writer.cancel()
self._writer = None
async def _on_chunk_request_sent(self, method: str, url: URL, chunk: bytes) -> None:
for trace in self._traces:
await trace.send_request_chunk_sent(method, url, chunk)
async def _on_headers_request_sent(
self, method: str, url: URL, headers: "CIMultiDict[str]"
) -> None:
for trace in self._traces:
await trace.send_request_headers(method, url, headers)
class ClientResponse(HeadersMixin):
# from the Status-Line of the response
version = None # HTTP-Version
status: int = None # type: ignore[assignment] # Status-Code
reason = None # Reason-Phrase
content: StreamReader = None # type: ignore[assignment] # Payload stream
_headers: "CIMultiDictProxy[str]" = None # type: ignore[assignment]
_raw_headers: RawHeaders = None # type: ignore[assignment] # Response raw headers
_connection = None # current connection
_source_traceback = None
# setted up by ClientRequest after ClientResponse object creation
# post-init stage allows to not change ctor signature
_closed = True # to allow __del__ for non-initialized properly response
_released = False
def __init__(
self,
method: str,
url: URL,
*,
writer: "asyncio.Task[None]",
continue100: Optional["asyncio.Future[bool]"],
timer: BaseTimerContext,
request_info: RequestInfo,
traces: List["Trace"],
loop: asyncio.AbstractEventLoop,
session: "ClientSession",
) -> None:
assert isinstance(url, URL)
self.method = method
self.cookies: SimpleCookie[str] = SimpleCookie()
self._real_url = url
self._url = url.with_fragment(None)
self._body: Any = None
self._writer: Optional[asyncio.Task[None]] = writer
self._continue = continue100 # None by default
self._closed = True
self._history: Tuple[ClientResponse, ...] = ()
self._request_info = request_info
self._timer = timer if timer is not None else TimerNoop()
self._cache: Dict[str, Any] = {}
self._traces = traces
self._loop = loop
# store a reference to session #1985
self._session: Optional[ClientSession] = session
if loop.get_debug():
self._source_traceback = traceback.extract_stack(sys._getframe(1))
@reify
def url(self) -> URL:
return self._url
@reify
def url_obj(self) -> URL:
warnings.warn("Deprecated, use .url #1654", DeprecationWarning, stacklevel=2)
return self._url
@reify
def real_url(self) -> URL:
return self._real_url
@reify
def host(self) -> str:
assert self._url.host is not None
return self._url.host
@reify
def headers(self) -> "CIMultiDictProxy[str]":
return self._headers
@reify
def raw_headers(self) -> RawHeaders:
return self._raw_headers
@reify
def request_info(self) -> RequestInfo:
return self._request_info
@reify
def content_disposition(self) -> Optional[ContentDisposition]:
raw = self._headers.get(hdrs.CONTENT_DISPOSITION)
if raw is None:
return None
disposition_type, params_dct = multipart.parse_content_disposition(raw)
params = MappingProxyType(params_dct)
filename = multipart.content_disposition_filename(params)
return ContentDisposition(disposition_type, params, filename)
def __del__(self, _warnings: Any = warnings) -> None:
if self._closed:
return
if self._connection is not None:
self._connection.release()
self._cleanup_writer()
if self._loop.get_debug():
if PY_36:
kwargs = {"source": self}
else:
kwargs = {}
_warnings.warn(f"Unclosed response {self!r}", ResourceWarning, **kwargs)
context = {"client_response": self, "message": "Unclosed response"}
if self._source_traceback:
context["source_traceback"] = self._source_traceback
self._loop.call_exception_handler(context)
def __repr__(self) -> str:
out = io.StringIO()
ascii_encodable_url = str(self.url)
if self.reason:
ascii_encodable_reason = self.reason.encode(
"ascii", "backslashreplace"
).decode("ascii")
else:
ascii_encodable_reason = self.reason
print(
"<ClientResponse({}) [{} {}]>".format(
ascii_encodable_url, self.status, ascii_encodable_reason
),
file=out,
)
print(self.headers, file=out)
return out.getvalue()
@property
def connection(self) -> Optional["Connection"]:
return self._connection
@reify
def history(self) -> Tuple["ClientResponse", ...]:
"""A sequence of of responses, if redirects occurred."""
return self._history
@reify
def links(self) -> "MultiDictProxy[MultiDictProxy[Union[str, URL]]]":
links_str = ", ".join(self.headers.getall("link", []))
if not links_str:
return MultiDictProxy(MultiDict())
links: MultiDict[MultiDictProxy[Union[str, URL]]] = MultiDict()
for val in re.split(r",(?=\s*<)", links_str):
match = re.match(r"\s*<(.*)>(.*)", val)
if match is None: # pragma: no cover
# the check exists to suppress mypy error
continue
url, params_str = match.groups()
params = params_str.split(";")[1:]
link: MultiDict[Union[str, URL]] = MultiDict()
for param in params:
match = re.match(r"^\s*(\S*)\s*=\s*(['\"]?)(.*?)(\2)\s*$", param, re.M)
if match is None: # pragma: no cover
# the check exists to suppress mypy error
continue
key, _, value, _ = match.groups()
link.add(key, value)
key = link.get("rel", url) # type: ignore[assignment]
link.add("url", self.url.join(URL(url)))
links.add(key, MultiDictProxy(link))
return MultiDictProxy(links)
async def start(self, connection: "Connection") -> "ClientResponse":
"""Start response processing."""
self._closed = False
self._protocol = connection.protocol
self._connection = connection
with self._timer:
while True:
# read response
try:
protocol = self._protocol
message, payload = await protocol.read() # type: ignore[union-attr]
except http.HttpProcessingError as exc:
raise ClientResponseError(
self.request_info,
self.history,
status=exc.code,
message=exc.message,
headers=exc.headers,
) from exc
if message.code < 100 or message.code > 199 or message.code == 101:
break
if self._continue is not None:
set_result(self._continue, True)
self._continue = None
# payload eof handler
payload.on_eof(self._response_eof)
# response status
self.version = message.version
self.status = message.code
self.reason = message.reason
# headers
self._headers = message.headers # type is CIMultiDictProxy
self._raw_headers = message.raw_headers # type is Tuple[bytes, bytes]
# payload
self.content = payload
# cookies
for hdr in self.headers.getall(hdrs.SET_COOKIE, ()):
try:
self.cookies.load(hdr)
except CookieError as exc:
client_logger.warning("Can not load response cookies: %s", exc)
return self
def _response_eof(self) -> None:
if self._closed:
return
if self._connection is not None:
# websocket, protocol could be None because
# connection could be detached
if (
self._connection.protocol is not None
and self._connection.protocol.upgraded
):
return
self._connection.release()
self._connection = None
self._closed = True
self._cleanup_writer()
@property
def closed(self) -> bool:
return self._closed
def close(self) -> None:
if not self._released:
self._notify_content()
if self._closed:
return
self._closed = True
if self._loop is None or self._loop.is_closed():
return
if self._connection is not None:
self._connection.close()
self._connection = None
self._cleanup_writer()
def release(self) -> Any:
if not self._released:
self._notify_content()
if self._closed:
return noop()
self._closed = True
if self._connection is not None:
self._connection.release()
self._connection = None
self._cleanup_writer()
return noop()
@property
def ok(self) -> bool:
"""Returns ``True`` if ``status`` is less than ``400``, ``False`` if not.
This is **not** a check for ``200 OK`` but a check that the response
status is under 400.
"""
return 400 > self.status
def raise_for_status(self) -> None:
if not self.ok:
# reason should always be not None for a started response
assert self.reason is not None
self.release()
raise ClientResponseError(
self.request_info,
self.history,
status=self.status,
message=self.reason,
headers=self.headers,
)
def _cleanup_writer(self) -> None:
if self._writer is not None:
self._writer.cancel()
self._writer = None
self._session = None
def _notify_content(self) -> None:
content = self.content
if content and content.exception() is None:
content.set_exception(ClientConnectionError("Connection closed"))
self._released = True
async def wait_for_close(self) -> None:
if self._writer is not None:
try:
await self._writer
finally:
self._writer = None
self.release()
async def read(self) -> bytes:
"""Read response payload."""
if self._body is None:
try:
self._body = await self.content.read()
for trace in self._traces:
await trace.send_response_chunk_received(
self.method, self.url, self._body
)
except BaseException:
self.close()
raise
elif self._released:
raise ClientConnectionError("Connection closed")
return self._body # type: ignore[no-any-return]
def get_encoding(self) -> str:
ctype = self.headers.get(hdrs.CONTENT_TYPE, "").lower()
mimetype = helpers.parse_mimetype(ctype)
encoding = mimetype.parameters.get("charset")
if encoding:
try:
codecs.lookup(encoding)
except LookupError:
encoding = None
if not encoding:
if mimetype.type == "application" and (
mimetype.subtype == "json" or mimetype.subtype == "rdap"
):
# RFC 7159 states that the default encoding is UTF-8.
# RFC 7483 defines application/rdap+json
encoding = "utf-8"
elif self._body is None:
raise RuntimeError(
"Cannot guess the encoding of " "a not yet read body"
)
else:
encoding = chardet.detect(self._body)["encoding"]
if not encoding:
encoding = "utf-8"
return encoding
async def text(self, encoding: Optional[str] = None, errors: str = "strict") -> str:
"""Read response payload and decode."""
if self._body is None:
await self.read()
if encoding is None:
encoding = self.get_encoding()
return self._body.decode( # type: ignore[no-any-return,union-attr]
encoding, errors=errors
)
async def json(
self,
*,
encoding: Optional[str] = None,
loads: JSONDecoder = DEFAULT_JSON_DECODER,
content_type: Optional[str] = "application/json",
) -> Any:
"""Read and decodes JSON response."""
if self._body is None:
await self.read()
if content_type:
ctype = self.headers.get(hdrs.CONTENT_TYPE, "").lower()
if not _is_expected_content_type(ctype, content_type):
raise ContentTypeError(
self.request_info,
self.history,
message=(
"Attempt to decode JSON with " "unexpected mimetype: %s" % ctype
),
headers=self.headers,
)
stripped = self._body.strip() # type: ignore[union-attr]
if not stripped:
return None
if encoding is None:
encoding = self.get_encoding()
return loads(stripped.decode(encoding))
async def __aenter__(self) -> "ClientResponse":
return self
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
# similar to _RequestContextManager, we do not need to check
# for exceptions, response object can close connection
# if state is broken
self.release()
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiohttp/log.py | import logging
access_logger = logging.getLogger("aiohttp.access")
client_logger = logging.getLogger("aiohttp.client")
internal_logger = logging.getLogger("aiohttp.internal")
server_logger = logging.getLogger("aiohttp.server")
web_logger = logging.getLogger("aiohttp.web")
ws_logger = logging.getLogger("aiohttp.websocket")
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiohttp/client_exceptions.py | """HTTP related errors."""
import asyncio
import warnings
from typing import TYPE_CHECKING, Any, Optional, Tuple, Union
from .http_parser import RawResponseMessage
from .typedefs import LooseHeaders
try:
import ssl
SSLContext = ssl.SSLContext
except ImportError: # pragma: no cover
ssl = SSLContext = None # type: ignore[assignment]
if TYPE_CHECKING: # pragma: no cover
from .client_reqrep import ClientResponse, ConnectionKey, Fingerprint, RequestInfo
else:
RequestInfo = ClientResponse = ConnectionKey = None
__all__ = (
"ClientError",
"ClientConnectionError",
"ClientOSError",
"ClientConnectorError",
"ClientProxyConnectionError",
"ClientSSLError",
"ClientConnectorSSLError",
"ClientConnectorCertificateError",
"ServerConnectionError",
"ServerTimeoutError",
"ServerDisconnectedError",
"ServerFingerprintMismatch",
"ClientResponseError",
"ClientHttpProxyError",
"WSServerHandshakeError",
"ContentTypeError",
"ClientPayloadError",
"InvalidURL",
)
class ClientError(Exception):
"""Base class for client connection errors."""
class ClientResponseError(ClientError):
"""Connection error during reading response.
request_info: instance of RequestInfo
"""
def __init__(
self,
request_info: RequestInfo,
history: Tuple[ClientResponse, ...],
*,
code: Optional[int] = None,
status: Optional[int] = None,
message: str = "",
headers: Optional[LooseHeaders] = None,
) -> None:
self.request_info = request_info
if code is not None:
if status is not None:
raise ValueError(
"Both code and status arguments are provided; "
"code is deprecated, use status instead"
)
warnings.warn(
"code argument is deprecated, use status instead",
DeprecationWarning,
stacklevel=2,
)
if status is not None:
self.status = status
elif code is not None:
self.status = code
else:
self.status = 0
self.message = message
self.headers = headers
self.history = history
self.args = (request_info, history)
def __str__(self) -> str:
return "{}, message={!r}, url={!r}".format(
self.status,
self.message,
self.request_info.real_url,
)
def __repr__(self) -> str:
args = f"{self.request_info!r}, {self.history!r}"
if self.status != 0:
args += f", status={self.status!r}"
if self.message != "":
args += f", message={self.message!r}"
if self.headers is not None:
args += f", headers={self.headers!r}"
return f"{type(self).__name__}({args})"
@property
def code(self) -> int:
warnings.warn(
"code property is deprecated, use status instead",
DeprecationWarning,
stacklevel=2,
)
return self.status
@code.setter
def code(self, value: int) -> None:
warnings.warn(
"code property is deprecated, use status instead",
DeprecationWarning,
stacklevel=2,
)
self.status = value
class ContentTypeError(ClientResponseError):
"""ContentType found is not valid."""
class WSServerHandshakeError(ClientResponseError):
"""websocket server handshake error."""
class ClientHttpProxyError(ClientResponseError):
"""HTTP proxy error.
Raised in :class:`aiohttp.connector.TCPConnector` if
proxy responds with status other than ``200 OK``
on ``CONNECT`` request.
"""
class TooManyRedirects(ClientResponseError):
"""Client was redirected too many times."""
class ClientConnectionError(ClientError):
"""Base class for client socket errors."""
class ClientOSError(ClientConnectionError, OSError):
"""OSError error."""
class ClientConnectorError(ClientOSError):
"""Client connector error.
Raised in :class:`aiohttp.connector.TCPConnector` if
a connection can not be established.
"""
def __init__(self, connection_key: ConnectionKey, os_error: OSError) -> None:
self._conn_key = connection_key
self._os_error = os_error
super().__init__(os_error.errno, os_error.strerror)
self.args = (connection_key, os_error)
@property
def os_error(self) -> OSError:
return self._os_error
@property
def host(self) -> str:
return self._conn_key.host
@property
def port(self) -> Optional[int]:
return self._conn_key.port
@property
def ssl(self) -> Union[SSLContext, None, bool, "Fingerprint"]:
return self._conn_key.ssl
def __str__(self) -> str:
return "Cannot connect to host {0.host}:{0.port} ssl:{1} [{2}]".format(
self, self.ssl if self.ssl is not None else "default", self.strerror
)
# OSError.__reduce__ does too much black magick
__reduce__ = BaseException.__reduce__
class ClientProxyConnectionError(ClientConnectorError):
"""Proxy connection error.
Raised in :class:`aiohttp.connector.TCPConnector` if
connection to proxy can not be established.
"""
class UnixClientConnectorError(ClientConnectorError):
"""Unix connector error.
Raised in :py:class:`aiohttp.connector.UnixConnector`
if connection to unix socket can not be established.
"""
def __init__(
self, path: str, connection_key: ConnectionKey, os_error: OSError
) -> None:
self._path = path
super().__init__(connection_key, os_error)
@property
def path(self) -> str:
return self._path
def __str__(self) -> str:
return "Cannot connect to unix socket {0.path} ssl:{1} [{2}]".format(
self, self.ssl if self.ssl is not None else "default", self.strerror
)
class ServerConnectionError(ClientConnectionError):
"""Server connection errors."""
class ServerDisconnectedError(ServerConnectionError):
"""Server disconnected."""
def __init__(self, message: Union[RawResponseMessage, str, None] = None) -> None:
if message is None:
message = "Server disconnected"
self.args = (message,)
self.message = message
class ServerTimeoutError(ServerConnectionError, asyncio.TimeoutError):
"""Server timeout error."""
class ServerFingerprintMismatch(ServerConnectionError):
"""SSL certificate does not match expected fingerprint."""
def __init__(self, expected: bytes, got: bytes, host: str, port: int) -> None:
self.expected = expected
self.got = got
self.host = host
self.port = port
self.args = (expected, got, host, port)
def __repr__(self) -> str:
return "<{} expected={!r} got={!r} host={!r} port={!r}>".format(
self.__class__.__name__, self.expected, self.got, self.host, self.port
)
class ClientPayloadError(ClientError):
"""Response payload error."""
class InvalidURL(ClientError, ValueError):
"""Invalid URL.
URL used for fetching is malformed, e.g. it doesn't contains host
part.
"""
# Derive from ValueError for backward compatibility
def __init__(self, url: Any) -> None:
# The type of url is not yarl.URL because the exception can be raised
# on URL(url) call
super().__init__(url)
@property
def url(self) -> Any:
return self.args[0]
def __repr__(self) -> str:
return f"<{self.__class__.__name__} {self.url}>"
class ClientSSLError(ClientConnectorError):
"""Base error for ssl.*Errors."""
if ssl is not None:
cert_errors = (ssl.CertificateError,)
cert_errors_bases = (
ClientSSLError,
ssl.CertificateError,
)
ssl_errors = (ssl.SSLError,)
ssl_error_bases = (ClientSSLError, ssl.SSLError)
else: # pragma: no cover
cert_errors = tuple()
cert_errors_bases = (
ClientSSLError,
ValueError,
)
ssl_errors = tuple()
ssl_error_bases = (ClientSSLError,)
class ClientConnectorSSLError(*ssl_error_bases): # type: ignore[misc]
"""Response ssl error."""
class ClientConnectorCertificateError(*cert_errors_bases): # type: ignore[misc]
"""Response certificate error."""
def __init__(
self, connection_key: ConnectionKey, certificate_error: Exception
) -> None:
self._conn_key = connection_key
self._certificate_error = certificate_error
self.args = (connection_key, certificate_error)
@property
def certificate_error(self) -> Exception:
return self._certificate_error
@property
def host(self) -> str:
return self._conn_key.host
@property
def port(self) -> Optional[int]:
return self._conn_key.port
@property
def ssl(self) -> bool:
return self._conn_key.is_ssl
def __str__(self) -> str:
return (
"Cannot connect to host {0.host}:{0.port} ssl:{0.ssl} "
"[{0.certificate_error.__class__.__name__}: "
"{0.certificate_error.args}]".format(self)
)
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiohttp/http_websocket.py | """WebSocket protocol versions 13 and 8."""
import asyncio
import collections
import json
import random
import re
import sys
import zlib
from enum import IntEnum
from struct import Struct
from typing import Any, Callable, List, Optional, Pattern, Set, Tuple, Union, cast
from .base_protocol import BaseProtocol
from .helpers import NO_EXTENSIONS
from .streams import DataQueue
from .typedefs import Final
__all__ = (
"WS_CLOSED_MESSAGE",
"WS_CLOSING_MESSAGE",
"WS_KEY",
"WebSocketReader",
"WebSocketWriter",
"WSMessage",
"WebSocketError",
"WSMsgType",
"WSCloseCode",
)
class WSCloseCode(IntEnum):
OK = 1000
GOING_AWAY = 1001
PROTOCOL_ERROR = 1002
UNSUPPORTED_DATA = 1003
ABNORMAL_CLOSURE = 1006
INVALID_TEXT = 1007
POLICY_VIOLATION = 1008
MESSAGE_TOO_BIG = 1009
MANDATORY_EXTENSION = 1010
INTERNAL_ERROR = 1011
SERVICE_RESTART = 1012
TRY_AGAIN_LATER = 1013
BAD_GATEWAY = 1014
ALLOWED_CLOSE_CODES: Final[Set[int]] = {int(i) for i in WSCloseCode}
class WSMsgType(IntEnum):
# websocket spec types
CONTINUATION = 0x0
TEXT = 0x1
BINARY = 0x2
PING = 0x9
PONG = 0xA
CLOSE = 0x8
# aiohttp specific types
CLOSING = 0x100
CLOSED = 0x101
ERROR = 0x102
text = TEXT
binary = BINARY
ping = PING
pong = PONG
close = CLOSE
closing = CLOSING
closed = CLOSED
error = ERROR
WS_KEY: Final[bytes] = b"258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
UNPACK_LEN2 = Struct("!H").unpack_from
UNPACK_LEN3 = Struct("!Q").unpack_from
UNPACK_CLOSE_CODE = Struct("!H").unpack
PACK_LEN1 = Struct("!BB").pack
PACK_LEN2 = Struct("!BBH").pack
PACK_LEN3 = Struct("!BBQ").pack
PACK_CLOSE_CODE = Struct("!H").pack
MSG_SIZE: Final[int] = 2**14
DEFAULT_LIMIT: Final[int] = 2**16
_WSMessageBase = collections.namedtuple("_WSMessageBase", ["type", "data", "extra"])
class WSMessage(_WSMessageBase):
def json(self, *, loads: Callable[[Any], Any] = json.loads) -> Any:
"""Return parsed JSON data.
.. versionadded:: 0.22
"""
return loads(self.data)
WS_CLOSED_MESSAGE = WSMessage(WSMsgType.CLOSED, None, None)
WS_CLOSING_MESSAGE = WSMessage(WSMsgType.CLOSING, None, None)
class WebSocketError(Exception):
"""WebSocket protocol parser error."""
def __init__(self, code: int, message: str) -> None:
self.code = code
super().__init__(code, message)
def __str__(self) -> str:
return cast(str, self.args[1])
class WSHandshakeError(Exception):
"""WebSocket protocol handshake error."""
native_byteorder: Final[str] = sys.byteorder
# Used by _websocket_mask_python
_XOR_TABLE: Final[List[bytes]] = [bytes(a ^ b for a in range(256)) for b in range(256)]
def _websocket_mask_python(mask: bytes, data: bytearray) -> None:
"""Websocket masking function.
`mask` is a `bytes` object of length 4; `data` is a `bytearray`
object of any length. The contents of `data` are masked with `mask`,
as specified in section 5.3 of RFC 6455.
Note that this function mutates the `data` argument.
This pure-python implementation may be replaced by an optimized
version when available.
"""
assert isinstance(data, bytearray), data
assert len(mask) == 4, mask
if data:
a, b, c, d = (_XOR_TABLE[n] for n in mask)
data[::4] = data[::4].translate(a)
data[1::4] = data[1::4].translate(b)
data[2::4] = data[2::4].translate(c)
data[3::4] = data[3::4].translate(d)
if NO_EXTENSIONS: # pragma: no cover
_websocket_mask = _websocket_mask_python
else:
try:
from ._websocket import _websocket_mask_cython # type: ignore[import]
_websocket_mask = _websocket_mask_cython
except ImportError: # pragma: no cover
_websocket_mask = _websocket_mask_python
_WS_DEFLATE_TRAILING: Final[bytes] = bytes([0x00, 0x00, 0xFF, 0xFF])
_WS_EXT_RE: Final[Pattern[str]] = re.compile(
r"^(?:;\s*(?:"
r"(server_no_context_takeover)|"
r"(client_no_context_takeover)|"
r"(server_max_window_bits(?:=(\d+))?)|"
r"(client_max_window_bits(?:=(\d+))?)))*$"
)
_WS_EXT_RE_SPLIT: Final[Pattern[str]] = re.compile(r"permessage-deflate([^,]+)?")
def ws_ext_parse(extstr: Optional[str], isserver: bool = False) -> Tuple[int, bool]:
if not extstr:
return 0, False
compress = 0
notakeover = False
for ext in _WS_EXT_RE_SPLIT.finditer(extstr):
defext = ext.group(1)
# Return compress = 15 when get `permessage-deflate`
if not defext:
compress = 15
break
match = _WS_EXT_RE.match(defext)
if match:
compress = 15
if isserver:
# Server never fail to detect compress handshake.
# Server does not need to send max wbit to client
if match.group(4):
compress = int(match.group(4))
# Group3 must match if group4 matches
# Compress wbit 8 does not support in zlib
# If compress level not support,
# CONTINUE to next extension
if compress > 15 or compress < 9:
compress = 0
continue
if match.group(1):
notakeover = True
# Ignore regex group 5 & 6 for client_max_window_bits
break
else:
if match.group(6):
compress = int(match.group(6))
# Group5 must match if group6 matches
# Compress wbit 8 does not support in zlib
# If compress level not support,
# FAIL the parse progress
if compress > 15 or compress < 9:
raise WSHandshakeError("Invalid window size")
if match.group(2):
notakeover = True
# Ignore regex group 5 & 6 for client_max_window_bits
break
# Return Fail if client side and not match
elif not isserver:
raise WSHandshakeError("Extension for deflate not supported" + ext.group(1))
return compress, notakeover
def ws_ext_gen(
compress: int = 15, isserver: bool = False, server_notakeover: bool = False
) -> str:
# client_notakeover=False not used for server
# compress wbit 8 does not support in zlib
if compress < 9 or compress > 15:
raise ValueError(
"Compress wbits must between 9 and 15, " "zlib does not support wbits=8"
)
enabledext = ["permessage-deflate"]
if not isserver:
enabledext.append("client_max_window_bits")
if compress < 15:
enabledext.append("server_max_window_bits=" + str(compress))
if server_notakeover:
enabledext.append("server_no_context_takeover")
# if client_notakeover:
# enabledext.append('client_no_context_takeover')
return "; ".join(enabledext)
class WSParserState(IntEnum):
READ_HEADER = 1
READ_PAYLOAD_LENGTH = 2
READ_PAYLOAD_MASK = 3
READ_PAYLOAD = 4
class WebSocketReader:
def __init__(
self, queue: DataQueue[WSMessage], max_msg_size: int, compress: bool = True
) -> None:
self.queue = queue
self._max_msg_size = max_msg_size
self._exc: Optional[BaseException] = None
self._partial = bytearray()
self._state = WSParserState.READ_HEADER
self._opcode: Optional[int] = None
self._frame_fin = False
self._frame_opcode: Optional[int] = None
self._frame_payload = bytearray()
self._tail = b""
self._has_mask = False
self._frame_mask: Optional[bytes] = None
self._payload_length = 0
self._payload_length_flag = 0
self._compressed: Optional[bool] = None
self._decompressobj: Any = None # zlib.decompressobj actually
self._compress = compress
def feed_eof(self) -> None:
self.queue.feed_eof()
def feed_data(self, data: bytes) -> Tuple[bool, bytes]:
if self._exc:
return True, data
try:
return self._feed_data(data)
except Exception as exc:
self._exc = exc
self.queue.set_exception(exc)
return True, b""
def _feed_data(self, data: bytes) -> Tuple[bool, bytes]:
for fin, opcode, payload, compressed in self.parse_frame(data):
if compressed and not self._decompressobj:
self._decompressobj = zlib.decompressobj(wbits=-zlib.MAX_WBITS)
if opcode == WSMsgType.CLOSE:
if len(payload) >= 2:
close_code = UNPACK_CLOSE_CODE(payload[:2])[0]
if close_code < 3000 and close_code not in ALLOWED_CLOSE_CODES:
raise WebSocketError(
WSCloseCode.PROTOCOL_ERROR,
f"Invalid close code: {close_code}",
)
try:
close_message = payload[2:].decode("utf-8")
except UnicodeDecodeError as exc:
raise WebSocketError(
WSCloseCode.INVALID_TEXT, "Invalid UTF-8 text message"
) from exc
msg = WSMessage(WSMsgType.CLOSE, close_code, close_message)
elif payload:
raise WebSocketError(
WSCloseCode.PROTOCOL_ERROR,
f"Invalid close frame: {fin} {opcode} {payload!r}",
)
else:
msg = WSMessage(WSMsgType.CLOSE, 0, "")
self.queue.feed_data(msg, 0)
elif opcode == WSMsgType.PING:
self.queue.feed_data(
WSMessage(WSMsgType.PING, payload, ""), len(payload)
)
elif opcode == WSMsgType.PONG:
self.queue.feed_data(
WSMessage(WSMsgType.PONG, payload, ""), len(payload)
)
elif (
opcode not in (WSMsgType.TEXT, WSMsgType.BINARY)
and self._opcode is None
):
raise WebSocketError(
WSCloseCode.PROTOCOL_ERROR, f"Unexpected opcode={opcode!r}"
)
else:
# load text/binary
if not fin:
# got partial frame payload
if opcode != WSMsgType.CONTINUATION:
self._opcode = opcode
self._partial.extend(payload)
if self._max_msg_size and len(self._partial) >= self._max_msg_size:
raise WebSocketError(
WSCloseCode.MESSAGE_TOO_BIG,
"Message size {} exceeds limit {}".format(
len(self._partial), self._max_msg_size
),
)
else:
# previous frame was non finished
# we should get continuation opcode
if self._partial:
if opcode != WSMsgType.CONTINUATION:
raise WebSocketError(
WSCloseCode.PROTOCOL_ERROR,
"The opcode in non-fin frame is expected "
"to be zero, got {!r}".format(opcode),
)
if opcode == WSMsgType.CONTINUATION:
assert self._opcode is not None
opcode = self._opcode
self._opcode = None
self._partial.extend(payload)
if self._max_msg_size and len(self._partial) >= self._max_msg_size:
raise WebSocketError(
WSCloseCode.MESSAGE_TOO_BIG,
"Message size {} exceeds limit {}".format(
len(self._partial), self._max_msg_size
),
)
# Decompress process must to be done after all packets
# received.
if compressed:
self._partial.extend(_WS_DEFLATE_TRAILING)
payload_merged = self._decompressobj.decompress(
self._partial, self._max_msg_size
)
if self._decompressobj.unconsumed_tail:
left = len(self._decompressobj.unconsumed_tail)
raise WebSocketError(
WSCloseCode.MESSAGE_TOO_BIG,
"Decompressed message size {} exceeds limit {}".format(
self._max_msg_size + left, self._max_msg_size
),
)
else:
payload_merged = bytes(self._partial)
self._partial.clear()
if opcode == WSMsgType.TEXT:
try:
text = payload_merged.decode("utf-8")
self.queue.feed_data(
WSMessage(WSMsgType.TEXT, text, ""), len(text)
)
except UnicodeDecodeError as exc:
raise WebSocketError(
WSCloseCode.INVALID_TEXT, "Invalid UTF-8 text message"
) from exc
else:
self.queue.feed_data(
WSMessage(WSMsgType.BINARY, payload_merged, ""),
len(payload_merged),
)
return False, b""
def parse_frame(
self, buf: bytes
) -> List[Tuple[bool, Optional[int], bytearray, Optional[bool]]]:
"""Return the next frame from the socket."""
frames = []
if self._tail:
buf, self._tail = self._tail + buf, b""
start_pos = 0
buf_length = len(buf)
while True:
# read header
if self._state == WSParserState.READ_HEADER:
if buf_length - start_pos >= 2:
data = buf[start_pos : start_pos + 2]
start_pos += 2
first_byte, second_byte = data
fin = (first_byte >> 7) & 1
rsv1 = (first_byte >> 6) & 1
rsv2 = (first_byte >> 5) & 1
rsv3 = (first_byte >> 4) & 1
opcode = first_byte & 0xF
# frame-fin = %x0 ; more frames of this message follow
# / %x1 ; final frame of this message
# frame-rsv1 = %x0 ;
# 1 bit, MUST be 0 unless negotiated otherwise
# frame-rsv2 = %x0 ;
# 1 bit, MUST be 0 unless negotiated otherwise
# frame-rsv3 = %x0 ;
# 1 bit, MUST be 0 unless negotiated otherwise
#
# Remove rsv1 from this test for deflate development
if rsv2 or rsv3 or (rsv1 and not self._compress):
raise WebSocketError(
WSCloseCode.PROTOCOL_ERROR,
"Received frame with non-zero reserved bits",
)
if opcode > 0x7 and fin == 0:
raise WebSocketError(
WSCloseCode.PROTOCOL_ERROR,
"Received fragmented control frame",
)
has_mask = (second_byte >> 7) & 1
length = second_byte & 0x7F
# Control frames MUST have a payload
# length of 125 bytes or less
if opcode > 0x7 and length > 125:
raise WebSocketError(
WSCloseCode.PROTOCOL_ERROR,
"Control frame payload cannot be " "larger than 125 bytes",
)
# Set compress status if last package is FIN
# OR set compress status if this is first fragment
# Raise error if not first fragment with rsv1 = 0x1
if self._frame_fin or self._compressed is None:
self._compressed = True if rsv1 else False
elif rsv1:
raise WebSocketError(
WSCloseCode.PROTOCOL_ERROR,
"Received frame with non-zero reserved bits",
)
self._frame_fin = bool(fin)
self._frame_opcode = opcode
self._has_mask = bool(has_mask)
self._payload_length_flag = length
self._state = WSParserState.READ_PAYLOAD_LENGTH
else:
break
# read payload length
if self._state == WSParserState.READ_PAYLOAD_LENGTH:
length = self._payload_length_flag
if length == 126:
if buf_length - start_pos >= 2:
data = buf[start_pos : start_pos + 2]
start_pos += 2
length = UNPACK_LEN2(data)[0]
self._payload_length = length
self._state = (
WSParserState.READ_PAYLOAD_MASK
if self._has_mask
else WSParserState.READ_PAYLOAD
)
else:
break
elif length > 126:
if buf_length - start_pos >= 8:
data = buf[start_pos : start_pos + 8]
start_pos += 8
length = UNPACK_LEN3(data)[0]
self._payload_length = length
self._state = (
WSParserState.READ_PAYLOAD_MASK
if self._has_mask
else WSParserState.READ_PAYLOAD
)
else:
break
else:
self._payload_length = length
self._state = (
WSParserState.READ_PAYLOAD_MASK
if self._has_mask
else WSParserState.READ_PAYLOAD
)
# read payload mask
if self._state == WSParserState.READ_PAYLOAD_MASK:
if buf_length - start_pos >= 4:
self._frame_mask = buf[start_pos : start_pos + 4]
start_pos += 4
self._state = WSParserState.READ_PAYLOAD
else:
break
if self._state == WSParserState.READ_PAYLOAD:
length = self._payload_length
payload = self._frame_payload
chunk_len = buf_length - start_pos
if length >= chunk_len:
self._payload_length = length - chunk_len
payload.extend(buf[start_pos:])
start_pos = buf_length
else:
self._payload_length = 0
payload.extend(buf[start_pos : start_pos + length])
start_pos = start_pos + length
if self._payload_length == 0:
if self._has_mask:
assert self._frame_mask is not None
_websocket_mask(self._frame_mask, payload)
frames.append(
(self._frame_fin, self._frame_opcode, payload, self._compressed)
)
self._frame_payload = bytearray()
self._state = WSParserState.READ_HEADER
else:
break
self._tail = buf[start_pos:]
return frames
class WebSocketWriter:
def __init__(
self,
protocol: BaseProtocol,
transport: asyncio.Transport,
*,
use_mask: bool = False,
limit: int = DEFAULT_LIMIT,
random: Any = random.Random(),
compress: int = 0,
notakeover: bool = False,
) -> None:
self.protocol = protocol
self.transport = transport
self.use_mask = use_mask
self.randrange = random.randrange
self.compress = compress
self.notakeover = notakeover
self._closing = False
self._limit = limit
self._output_size = 0
self._compressobj: Any = None # actually compressobj
async def _send_frame(
self, message: bytes, opcode: int, compress: Optional[int] = None
) -> None:
"""Send a frame over the websocket with message as its payload."""
if self._closing and not (opcode & WSMsgType.CLOSE):
raise ConnectionResetError("Cannot write to closing transport")
rsv = 0
# Only compress larger packets (disabled)
# Does small packet needs to be compressed?
# if self.compress and opcode < 8 and len(message) > 124:
if (compress or self.compress) and opcode < 8:
if compress:
# Do not set self._compress if compressing is for this frame
compressobj = zlib.compressobj(level=zlib.Z_BEST_SPEED, wbits=-compress)
else: # self.compress
if not self._compressobj:
self._compressobj = zlib.compressobj(
level=zlib.Z_BEST_SPEED, wbits=-self.compress
)
compressobj = self._compressobj
message = compressobj.compress(message)
message = message + compressobj.flush(
zlib.Z_FULL_FLUSH if self.notakeover else zlib.Z_SYNC_FLUSH
)
if message.endswith(_WS_DEFLATE_TRAILING):
message = message[:-4]
rsv = rsv | 0x40
msg_length = len(message)
use_mask = self.use_mask
if use_mask:
mask_bit = 0x80
else:
mask_bit = 0
if msg_length < 126:
header = PACK_LEN1(0x80 | rsv | opcode, msg_length | mask_bit)
elif msg_length < (1 << 16):
header = PACK_LEN2(0x80 | rsv | opcode, 126 | mask_bit, msg_length)
else:
header = PACK_LEN3(0x80 | rsv | opcode, 127 | mask_bit, msg_length)
if use_mask:
mask = self.randrange(0, 0xFFFFFFFF)
mask = mask.to_bytes(4, "big")
message = bytearray(message)
_websocket_mask(mask, message)
self._write(header + mask + message)
self._output_size += len(header) + len(mask) + len(message)
else:
if len(message) > MSG_SIZE:
self._write(header)
self._write(message)
else:
self._write(header + message)
self._output_size += len(header) + len(message)
if self._output_size > self._limit:
self._output_size = 0
await self.protocol._drain_helper()
def _write(self, data: bytes) -> None:
if self.transport is None or self.transport.is_closing():
raise ConnectionResetError("Cannot write to closing transport")
self.transport.write(data)
async def pong(self, message: bytes = b"") -> None:
"""Send pong message."""
if isinstance(message, str):
message = message.encode("utf-8")
await self._send_frame(message, WSMsgType.PONG)
async def ping(self, message: bytes = b"") -> None:
"""Send ping message."""
if isinstance(message, str):
message = message.encode("utf-8")
await self._send_frame(message, WSMsgType.PING)
async def send(
self,
message: Union[str, bytes],
binary: bool = False,
compress: Optional[int] = None,
) -> None:
"""Send a frame over the websocket with message as its payload."""
if isinstance(message, str):
message = message.encode("utf-8")
if binary:
await self._send_frame(message, WSMsgType.BINARY, compress)
else:
await self._send_frame(message, WSMsgType.TEXT, compress)
async def close(self, code: int = 1000, message: bytes = b"") -> None:
"""Close the websocket, sending the specified code and message."""
if isinstance(message, str):
message = message.encode("utf-8")
try:
await self._send_frame(
PACK_CLOSE_CODE(code) + message, opcode=WSMsgType.CLOSE
)
finally:
self._closing = True
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiohttp/__init__.py | __version__ = "3.8.3"
from typing import Tuple
from . import hdrs as hdrs
from .client import (
BaseConnector as BaseConnector,
ClientConnectionError as ClientConnectionError,
ClientConnectorCertificateError as ClientConnectorCertificateError,
ClientConnectorError as ClientConnectorError,
ClientConnectorSSLError as ClientConnectorSSLError,
ClientError as ClientError,
ClientHttpProxyError as ClientHttpProxyError,
ClientOSError as ClientOSError,
ClientPayloadError as ClientPayloadError,
ClientProxyConnectionError as ClientProxyConnectionError,
ClientRequest as ClientRequest,
ClientResponse as ClientResponse,
ClientResponseError as ClientResponseError,
ClientSession as ClientSession,
ClientSSLError as ClientSSLError,
ClientTimeout as ClientTimeout,
ClientWebSocketResponse as ClientWebSocketResponse,
ContentTypeError as ContentTypeError,
Fingerprint as Fingerprint,
InvalidURL as InvalidURL,
NamedPipeConnector as NamedPipeConnector,
RequestInfo as RequestInfo,
ServerConnectionError as ServerConnectionError,
ServerDisconnectedError as ServerDisconnectedError,
ServerFingerprintMismatch as ServerFingerprintMismatch,
ServerTimeoutError as ServerTimeoutError,
TCPConnector as TCPConnector,
TooManyRedirects as TooManyRedirects,
UnixConnector as UnixConnector,
WSServerHandshakeError as WSServerHandshakeError,
request as request,
)
from .cookiejar import CookieJar as CookieJar, DummyCookieJar as DummyCookieJar
from .formdata import FormData as FormData
from .helpers import BasicAuth, ChainMapProxy, ETag
from .http import (
HttpVersion as HttpVersion,
HttpVersion10 as HttpVersion10,
HttpVersion11 as HttpVersion11,
WebSocketError as WebSocketError,
WSCloseCode as WSCloseCode,
WSMessage as WSMessage,
WSMsgType as WSMsgType,
)
from .multipart import (
BadContentDispositionHeader as BadContentDispositionHeader,
BadContentDispositionParam as BadContentDispositionParam,
BodyPartReader as BodyPartReader,
MultipartReader as MultipartReader,
MultipartWriter as MultipartWriter,
content_disposition_filename as content_disposition_filename,
parse_content_disposition as parse_content_disposition,
)
from .payload import (
PAYLOAD_REGISTRY as PAYLOAD_REGISTRY,
AsyncIterablePayload as AsyncIterablePayload,
BufferedReaderPayload as BufferedReaderPayload,
BytesIOPayload as BytesIOPayload,
BytesPayload as BytesPayload,
IOBasePayload as IOBasePayload,
JsonPayload as JsonPayload,
Payload as Payload,
StringIOPayload as StringIOPayload,
StringPayload as StringPayload,
TextIOPayload as TextIOPayload,
get_payload as get_payload,
payload_type as payload_type,
)
from .payload_streamer import streamer as streamer
from .resolver import (
AsyncResolver as AsyncResolver,
DefaultResolver as DefaultResolver,
ThreadedResolver as ThreadedResolver,
)
from .streams import (
EMPTY_PAYLOAD as EMPTY_PAYLOAD,
DataQueue as DataQueue,
EofStream as EofStream,
FlowControlDataQueue as FlowControlDataQueue,
StreamReader as StreamReader,
)
from .tracing import (
TraceConfig as TraceConfig,
TraceConnectionCreateEndParams as TraceConnectionCreateEndParams,
TraceConnectionCreateStartParams as TraceConnectionCreateStartParams,
TraceConnectionQueuedEndParams as TraceConnectionQueuedEndParams,
TraceConnectionQueuedStartParams as TraceConnectionQueuedStartParams,
TraceConnectionReuseconnParams as TraceConnectionReuseconnParams,
TraceDnsCacheHitParams as TraceDnsCacheHitParams,
TraceDnsCacheMissParams as TraceDnsCacheMissParams,
TraceDnsResolveHostEndParams as TraceDnsResolveHostEndParams,
TraceDnsResolveHostStartParams as TraceDnsResolveHostStartParams,
TraceRequestChunkSentParams as TraceRequestChunkSentParams,
TraceRequestEndParams as TraceRequestEndParams,
TraceRequestExceptionParams as TraceRequestExceptionParams,
TraceRequestRedirectParams as TraceRequestRedirectParams,
TraceRequestStartParams as TraceRequestStartParams,
TraceResponseChunkReceivedParams as TraceResponseChunkReceivedParams,
)
__all__: Tuple[str, ...] = (
"hdrs",
# client
"BaseConnector",
"ClientConnectionError",
"ClientConnectorCertificateError",
"ClientConnectorError",
"ClientConnectorSSLError",
"ClientError",
"ClientHttpProxyError",
"ClientOSError",
"ClientPayloadError",
"ClientProxyConnectionError",
"ClientResponse",
"ClientRequest",
"ClientResponseError",
"ClientSSLError",
"ClientSession",
"ClientTimeout",
"ClientWebSocketResponse",
"ContentTypeError",
"Fingerprint",
"InvalidURL",
"RequestInfo",
"ServerConnectionError",
"ServerDisconnectedError",
"ServerFingerprintMismatch",
"ServerTimeoutError",
"TCPConnector",
"TooManyRedirects",
"UnixConnector",
"NamedPipeConnector",
"WSServerHandshakeError",
"request",
# cookiejar
"CookieJar",
"DummyCookieJar",
# formdata
"FormData",
# helpers
"BasicAuth",
"ChainMapProxy",
"ETag",
# http
"HttpVersion",
"HttpVersion10",
"HttpVersion11",
"WSMsgType",
"WSCloseCode",
"WSMessage",
"WebSocketError",
# multipart
"BadContentDispositionHeader",
"BadContentDispositionParam",
"BodyPartReader",
"MultipartReader",
"MultipartWriter",
"content_disposition_filename",
"parse_content_disposition",
# payload
"AsyncIterablePayload",
"BufferedReaderPayload",
"BytesIOPayload",
"BytesPayload",
"IOBasePayload",
"JsonPayload",
"PAYLOAD_REGISTRY",
"Payload",
"StringIOPayload",
"StringPayload",
"TextIOPayload",
"get_payload",
"payload_type",
# payload_streamer
"streamer",
# resolver
"AsyncResolver",
"DefaultResolver",
"ThreadedResolver",
# streams
"DataQueue",
"EMPTY_PAYLOAD",
"EofStream",
"FlowControlDataQueue",
"StreamReader",
# tracing
"TraceConfig",
"TraceConnectionCreateEndParams",
"TraceConnectionCreateStartParams",
"TraceConnectionQueuedEndParams",
"TraceConnectionQueuedStartParams",
"TraceConnectionReuseconnParams",
"TraceDnsCacheHitParams",
"TraceDnsCacheMissParams",
"TraceDnsResolveHostEndParams",
"TraceDnsResolveHostStartParams",
"TraceRequestChunkSentParams",
"TraceRequestEndParams",
"TraceRequestExceptionParams",
"TraceRequestRedirectParams",
"TraceRequestStartParams",
"TraceResponseChunkReceivedParams",
)
try:
from .worker import GunicornUVLoopWebWorker, GunicornWebWorker
__all__ += ("GunicornWebWorker", "GunicornUVLoopWebWorker")
except ImportError: # pragma: no cover
pass
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiohttp/web_exceptions.py | import warnings
from typing import Any, Dict, Iterable, List, Optional, Set # noqa
from yarl import URL
from .typedefs import LooseHeaders, StrOrURL
from .web_response import Response
__all__ = (
"HTTPException",
"HTTPError",
"HTTPRedirection",
"HTTPSuccessful",
"HTTPOk",
"HTTPCreated",
"HTTPAccepted",
"HTTPNonAuthoritativeInformation",
"HTTPNoContent",
"HTTPResetContent",
"HTTPPartialContent",
"HTTPMultipleChoices",
"HTTPMovedPermanently",
"HTTPFound",
"HTTPSeeOther",
"HTTPNotModified",
"HTTPUseProxy",
"HTTPTemporaryRedirect",
"HTTPPermanentRedirect",
"HTTPClientError",
"HTTPBadRequest",
"HTTPUnauthorized",
"HTTPPaymentRequired",
"HTTPForbidden",
"HTTPNotFound",
"HTTPMethodNotAllowed",
"HTTPNotAcceptable",
"HTTPProxyAuthenticationRequired",
"HTTPRequestTimeout",
"HTTPConflict",
"HTTPGone",
"HTTPLengthRequired",
"HTTPPreconditionFailed",
"HTTPRequestEntityTooLarge",
"HTTPRequestURITooLong",
"HTTPUnsupportedMediaType",
"HTTPRequestRangeNotSatisfiable",
"HTTPExpectationFailed",
"HTTPMisdirectedRequest",
"HTTPUnprocessableEntity",
"HTTPFailedDependency",
"HTTPUpgradeRequired",
"HTTPPreconditionRequired",
"HTTPTooManyRequests",
"HTTPRequestHeaderFieldsTooLarge",
"HTTPUnavailableForLegalReasons",
"HTTPServerError",
"HTTPInternalServerError",
"HTTPNotImplemented",
"HTTPBadGateway",
"HTTPServiceUnavailable",
"HTTPGatewayTimeout",
"HTTPVersionNotSupported",
"HTTPVariantAlsoNegotiates",
"HTTPInsufficientStorage",
"HTTPNotExtended",
"HTTPNetworkAuthenticationRequired",
)
############################################################
# HTTP Exceptions
############################################################
class HTTPException(Response, Exception):
# You should set in subclasses:
# status = 200
status_code = -1
empty_body = False
__http_exception__ = True
def __init__(
self,
*,
headers: Optional[LooseHeaders] = None,
reason: Optional[str] = None,
body: Any = None,
text: Optional[str] = None,
content_type: Optional[str] = None,
) -> None:
if body is not None:
warnings.warn(
"body argument is deprecated for http web exceptions",
DeprecationWarning,
)
Response.__init__(
self,
status=self.status_code,
headers=headers,
reason=reason,
body=body,
text=text,
content_type=content_type,
)
Exception.__init__(self, self.reason)
if self.body is None and not self.empty_body:
self.text = f"{self.status}: {self.reason}"
def __bool__(self) -> bool:
return True
class HTTPError(HTTPException):
"""Base class for exceptions with status codes in the 400s and 500s."""
class HTTPRedirection(HTTPException):
"""Base class for exceptions with status codes in the 300s."""
class HTTPSuccessful(HTTPException):
"""Base class for exceptions with status codes in the 200s."""
class HTTPOk(HTTPSuccessful):
status_code = 200
class HTTPCreated(HTTPSuccessful):
status_code = 201
class HTTPAccepted(HTTPSuccessful):
status_code = 202
class HTTPNonAuthoritativeInformation(HTTPSuccessful):
status_code = 203
class HTTPNoContent(HTTPSuccessful):
status_code = 204
empty_body = True
class HTTPResetContent(HTTPSuccessful):
status_code = 205
empty_body = True
class HTTPPartialContent(HTTPSuccessful):
status_code = 206
############################################################
# 3xx redirection
############################################################
class _HTTPMove(HTTPRedirection):
def __init__(
self,
location: StrOrURL,
*,
headers: Optional[LooseHeaders] = None,
reason: Optional[str] = None,
body: Any = None,
text: Optional[str] = None,
content_type: Optional[str] = None,
) -> None:
if not location:
raise ValueError("HTTP redirects need a location to redirect to.")
super().__init__(
headers=headers,
reason=reason,
body=body,
text=text,
content_type=content_type,
)
self.headers["Location"] = str(URL(location))
self.location = location
class HTTPMultipleChoices(_HTTPMove):
status_code = 300
class HTTPMovedPermanently(_HTTPMove):
status_code = 301
class HTTPFound(_HTTPMove):
status_code = 302
# This one is safe after a POST (the redirected location will be
# retrieved with GET):
class HTTPSeeOther(_HTTPMove):
status_code = 303
class HTTPNotModified(HTTPRedirection):
# FIXME: this should include a date or etag header
status_code = 304
empty_body = True
class HTTPUseProxy(_HTTPMove):
# Not a move, but looks a little like one
status_code = 305
class HTTPTemporaryRedirect(_HTTPMove):
status_code = 307
class HTTPPermanentRedirect(_HTTPMove):
status_code = 308
############################################################
# 4xx client error
############################################################
class HTTPClientError(HTTPError):
pass
class HTTPBadRequest(HTTPClientError):
status_code = 400
class HTTPUnauthorized(HTTPClientError):
status_code = 401
class HTTPPaymentRequired(HTTPClientError):
status_code = 402
class HTTPForbidden(HTTPClientError):
status_code = 403
class HTTPNotFound(HTTPClientError):
status_code = 404
class HTTPMethodNotAllowed(HTTPClientError):
status_code = 405
def __init__(
self,
method: str,
allowed_methods: Iterable[str],
*,
headers: Optional[LooseHeaders] = None,
reason: Optional[str] = None,
body: Any = None,
text: Optional[str] = None,
content_type: Optional[str] = None,
) -> None:
allow = ",".join(sorted(allowed_methods))
super().__init__(
headers=headers,
reason=reason,
body=body,
text=text,
content_type=content_type,
)
self.headers["Allow"] = allow
self.allowed_methods: Set[str] = set(allowed_methods)
self.method = method.upper()
class HTTPNotAcceptable(HTTPClientError):
status_code = 406
class HTTPProxyAuthenticationRequired(HTTPClientError):
status_code = 407
class HTTPRequestTimeout(HTTPClientError):
status_code = 408
class HTTPConflict(HTTPClientError):
status_code = 409
class HTTPGone(HTTPClientError):
status_code = 410
class HTTPLengthRequired(HTTPClientError):
status_code = 411
class HTTPPreconditionFailed(HTTPClientError):
status_code = 412
class HTTPRequestEntityTooLarge(HTTPClientError):
status_code = 413
def __init__(self, max_size: float, actual_size: float, **kwargs: Any) -> None:
kwargs.setdefault(
"text",
"Maximum request body size {} exceeded, "
"actual body size {}".format(max_size, actual_size),
)
super().__init__(**kwargs)
class HTTPRequestURITooLong(HTTPClientError):
status_code = 414
class HTTPUnsupportedMediaType(HTTPClientError):
status_code = 415
class HTTPRequestRangeNotSatisfiable(HTTPClientError):
status_code = 416
class HTTPExpectationFailed(HTTPClientError):
status_code = 417
class HTTPMisdirectedRequest(HTTPClientError):
status_code = 421
class HTTPUnprocessableEntity(HTTPClientError):
status_code = 422
class HTTPFailedDependency(HTTPClientError):
status_code = 424
class HTTPUpgradeRequired(HTTPClientError):
status_code = 426
class HTTPPreconditionRequired(HTTPClientError):
status_code = 428
class HTTPTooManyRequests(HTTPClientError):
status_code = 429
class HTTPRequestHeaderFieldsTooLarge(HTTPClientError):
status_code = 431
class HTTPUnavailableForLegalReasons(HTTPClientError):
status_code = 451
def __init__(
self,
link: str,
*,
headers: Optional[LooseHeaders] = None,
reason: Optional[str] = None,
body: Any = None,
text: Optional[str] = None,
content_type: Optional[str] = None,
) -> None:
super().__init__(
headers=headers,
reason=reason,
body=body,
text=text,
content_type=content_type,
)
self.headers["Link"] = '<%s>; rel="blocked-by"' % link
self.link = link
############################################################
# 5xx Server Error
############################################################
# Response status codes beginning with the digit "5" indicate cases in
# which the server is aware that it has erred or is incapable of
# performing the request. Except when responding to a HEAD request, the
# server SHOULD include an entity containing an explanation of the error
# situation, and whether it is a temporary or permanent condition. User
# agents SHOULD display any included entity to the user. These response
# codes are applicable to any request method.
class HTTPServerError(HTTPError):
pass
class HTTPInternalServerError(HTTPServerError):
status_code = 500
class HTTPNotImplemented(HTTPServerError):
status_code = 501
class HTTPBadGateway(HTTPServerError):
status_code = 502
class HTTPServiceUnavailable(HTTPServerError):
status_code = 503
class HTTPGatewayTimeout(HTTPServerError):
status_code = 504
class HTTPVersionNotSupported(HTTPServerError):
status_code = 505
class HTTPVariantAlsoNegotiates(HTTPServerError):
status_code = 506
class HTTPInsufficientStorage(HTTPServerError):
status_code = 507
class HTTPNotExtended(HTTPServerError):
status_code = 510
class HTTPNetworkAuthenticationRequired(HTTPServerError):
status_code = 511
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiohttp/web_server.py | """Low level HTTP server."""
import asyncio
from typing import Any, Awaitable, Callable, Dict, List, Optional # noqa
from .abc import AbstractStreamWriter
from .helpers import get_running_loop
from .http_parser import RawRequestMessage
from .streams import StreamReader
from .web_protocol import RequestHandler, _RequestFactory, _RequestHandler
from .web_request import BaseRequest
__all__ = ("Server",)
class Server:
def __init__(
self,
handler: _RequestHandler,
*,
request_factory: Optional[_RequestFactory] = None,
loop: Optional[asyncio.AbstractEventLoop] = None,
**kwargs: Any
) -> None:
self._loop = get_running_loop(loop)
self._connections: Dict[RequestHandler, asyncio.Transport] = {}
self._kwargs = kwargs
self.requests_count = 0
self.request_handler = handler
self.request_factory = request_factory or self._make_request
@property
def connections(self) -> List[RequestHandler]:
return list(self._connections.keys())
def connection_made(
self, handler: RequestHandler, transport: asyncio.Transport
) -> None:
self._connections[handler] = transport
def connection_lost(
self, handler: RequestHandler, exc: Optional[BaseException] = None
) -> None:
if handler in self._connections:
del self._connections[handler]
def _make_request(
self,
message: RawRequestMessage,
payload: StreamReader,
protocol: RequestHandler,
writer: AbstractStreamWriter,
task: "asyncio.Task[None]",
) -> BaseRequest:
return BaseRequest(message, payload, protocol, writer, task, self._loop)
async def shutdown(self, timeout: Optional[float] = None) -> None:
coros = [conn.shutdown(timeout) for conn in self._connections]
await asyncio.gather(*coros)
self._connections.clear()
def __call__(self) -> RequestHandler:
return RequestHandler(self, loop=self._loop, **self._kwargs)
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiohttp/web.py | import asyncio
import logging
import socket
import sys
from argparse import ArgumentParser
from collections.abc import Iterable
from importlib import import_module
from typing import (
Any,
Awaitable,
Callable,
Iterable as TypingIterable,
List,
Optional,
Set,
Type,
Union,
cast,
)
from .abc import AbstractAccessLogger
from .helpers import all_tasks
from .log import access_logger
from .web_app import Application as Application, CleanupError as CleanupError
from .web_exceptions import (
HTTPAccepted as HTTPAccepted,
HTTPBadGateway as HTTPBadGateway,
HTTPBadRequest as HTTPBadRequest,
HTTPClientError as HTTPClientError,
HTTPConflict as HTTPConflict,
HTTPCreated as HTTPCreated,
HTTPError as HTTPError,
HTTPException as HTTPException,
HTTPExpectationFailed as HTTPExpectationFailed,
HTTPFailedDependency as HTTPFailedDependency,
HTTPForbidden as HTTPForbidden,
HTTPFound as HTTPFound,
HTTPGatewayTimeout as HTTPGatewayTimeout,
HTTPGone as HTTPGone,
HTTPInsufficientStorage as HTTPInsufficientStorage,
HTTPInternalServerError as HTTPInternalServerError,
HTTPLengthRequired as HTTPLengthRequired,
HTTPMethodNotAllowed as HTTPMethodNotAllowed,
HTTPMisdirectedRequest as HTTPMisdirectedRequest,
HTTPMovedPermanently as HTTPMovedPermanently,
HTTPMultipleChoices as HTTPMultipleChoices,
HTTPNetworkAuthenticationRequired as HTTPNetworkAuthenticationRequired,
HTTPNoContent as HTTPNoContent,
HTTPNonAuthoritativeInformation as HTTPNonAuthoritativeInformation,
HTTPNotAcceptable as HTTPNotAcceptable,
HTTPNotExtended as HTTPNotExtended,
HTTPNotFound as HTTPNotFound,
HTTPNotImplemented as HTTPNotImplemented,
HTTPNotModified as HTTPNotModified,
HTTPOk as HTTPOk,
HTTPPartialContent as HTTPPartialContent,
HTTPPaymentRequired as HTTPPaymentRequired,
HTTPPermanentRedirect as HTTPPermanentRedirect,
HTTPPreconditionFailed as HTTPPreconditionFailed,
HTTPPreconditionRequired as HTTPPreconditionRequired,
HTTPProxyAuthenticationRequired as HTTPProxyAuthenticationRequired,
HTTPRedirection as HTTPRedirection,
HTTPRequestEntityTooLarge as HTTPRequestEntityTooLarge,
HTTPRequestHeaderFieldsTooLarge as HTTPRequestHeaderFieldsTooLarge,
HTTPRequestRangeNotSatisfiable as HTTPRequestRangeNotSatisfiable,
HTTPRequestTimeout as HTTPRequestTimeout,
HTTPRequestURITooLong as HTTPRequestURITooLong,
HTTPResetContent as HTTPResetContent,
HTTPSeeOther as HTTPSeeOther,
HTTPServerError as HTTPServerError,
HTTPServiceUnavailable as HTTPServiceUnavailable,
HTTPSuccessful as HTTPSuccessful,
HTTPTemporaryRedirect as HTTPTemporaryRedirect,
HTTPTooManyRequests as HTTPTooManyRequests,
HTTPUnauthorized as HTTPUnauthorized,
HTTPUnavailableForLegalReasons as HTTPUnavailableForLegalReasons,
HTTPUnprocessableEntity as HTTPUnprocessableEntity,
HTTPUnsupportedMediaType as HTTPUnsupportedMediaType,
HTTPUpgradeRequired as HTTPUpgradeRequired,
HTTPUseProxy as HTTPUseProxy,
HTTPVariantAlsoNegotiates as HTTPVariantAlsoNegotiates,
HTTPVersionNotSupported as HTTPVersionNotSupported,
)
from .web_fileresponse import FileResponse as FileResponse
from .web_log import AccessLogger
from .web_middlewares import (
middleware as middleware,
normalize_path_middleware as normalize_path_middleware,
)
from .web_protocol import (
PayloadAccessError as PayloadAccessError,
RequestHandler as RequestHandler,
RequestPayloadError as RequestPayloadError,
)
from .web_request import (
BaseRequest as BaseRequest,
FileField as FileField,
Request as Request,
)
from .web_response import (
ContentCoding as ContentCoding,
Response as Response,
StreamResponse as StreamResponse,
json_response as json_response,
)
from .web_routedef import (
AbstractRouteDef as AbstractRouteDef,
RouteDef as RouteDef,
RouteTableDef as RouteTableDef,
StaticDef as StaticDef,
delete as delete,
get as get,
head as head,
options as options,
patch as patch,
post as post,
put as put,
route as route,
static as static,
view as view,
)
from .web_runner import (
AppRunner as AppRunner,
BaseRunner as BaseRunner,
BaseSite as BaseSite,
GracefulExit as GracefulExit,
NamedPipeSite as NamedPipeSite,
ServerRunner as ServerRunner,
SockSite as SockSite,
TCPSite as TCPSite,
UnixSite as UnixSite,
)
from .web_server import Server as Server
from .web_urldispatcher import (
AbstractResource as AbstractResource,
AbstractRoute as AbstractRoute,
DynamicResource as DynamicResource,
PlainResource as PlainResource,
PrefixedSubAppResource as PrefixedSubAppResource,
Resource as Resource,
ResourceRoute as ResourceRoute,
StaticResource as StaticResource,
UrlDispatcher as UrlDispatcher,
UrlMappingMatchInfo as UrlMappingMatchInfo,
View as View,
)
from .web_ws import (
WebSocketReady as WebSocketReady,
WebSocketResponse as WebSocketResponse,
WSMsgType as WSMsgType,
)
__all__ = (
# web_app
"Application",
"CleanupError",
# web_exceptions
"HTTPAccepted",
"HTTPBadGateway",
"HTTPBadRequest",
"HTTPClientError",
"HTTPConflict",
"HTTPCreated",
"HTTPError",
"HTTPException",
"HTTPExpectationFailed",
"HTTPFailedDependency",
"HTTPForbidden",
"HTTPFound",
"HTTPGatewayTimeout",
"HTTPGone",
"HTTPInsufficientStorage",
"HTTPInternalServerError",
"HTTPLengthRequired",
"HTTPMethodNotAllowed",
"HTTPMisdirectedRequest",
"HTTPMovedPermanently",
"HTTPMultipleChoices",
"HTTPNetworkAuthenticationRequired",
"HTTPNoContent",
"HTTPNonAuthoritativeInformation",
"HTTPNotAcceptable",
"HTTPNotExtended",
"HTTPNotFound",
"HTTPNotImplemented",
"HTTPNotModified",
"HTTPOk",
"HTTPPartialContent",
"HTTPPaymentRequired",
"HTTPPermanentRedirect",
"HTTPPreconditionFailed",
"HTTPPreconditionRequired",
"HTTPProxyAuthenticationRequired",
"HTTPRedirection",
"HTTPRequestEntityTooLarge",
"HTTPRequestHeaderFieldsTooLarge",
"HTTPRequestRangeNotSatisfiable",
"HTTPRequestTimeout",
"HTTPRequestURITooLong",
"HTTPResetContent",
"HTTPSeeOther",
"HTTPServerError",
"HTTPServiceUnavailable",
"HTTPSuccessful",
"HTTPTemporaryRedirect",
"HTTPTooManyRequests",
"HTTPUnauthorized",
"HTTPUnavailableForLegalReasons",
"HTTPUnprocessableEntity",
"HTTPUnsupportedMediaType",
"HTTPUpgradeRequired",
"HTTPUseProxy",
"HTTPVariantAlsoNegotiates",
"HTTPVersionNotSupported",
# web_fileresponse
"FileResponse",
# web_middlewares
"middleware",
"normalize_path_middleware",
# web_protocol
"PayloadAccessError",
"RequestHandler",
"RequestPayloadError",
# web_request
"BaseRequest",
"FileField",
"Request",
# web_response
"ContentCoding",
"Response",
"StreamResponse",
"json_response",
# web_routedef
"AbstractRouteDef",
"RouteDef",
"RouteTableDef",
"StaticDef",
"delete",
"get",
"head",
"options",
"patch",
"post",
"put",
"route",
"static",
"view",
# web_runner
"AppRunner",
"BaseRunner",
"BaseSite",
"GracefulExit",
"ServerRunner",
"SockSite",
"TCPSite",
"UnixSite",
"NamedPipeSite",
# web_server
"Server",
# web_urldispatcher
"AbstractResource",
"AbstractRoute",
"DynamicResource",
"PlainResource",
"PrefixedSubAppResource",
"Resource",
"ResourceRoute",
"StaticResource",
"UrlDispatcher",
"UrlMappingMatchInfo",
"View",
# web_ws
"WebSocketReady",
"WebSocketResponse",
"WSMsgType",
# web
"run_app",
)
try:
from ssl import SSLContext
except ImportError: # pragma: no cover
SSLContext = Any # type: ignore[misc,assignment]
HostSequence = TypingIterable[str]
async def _run_app(
app: Union[Application, Awaitable[Application]],
*,
host: Optional[Union[str, HostSequence]] = None,
port: Optional[int] = None,
path: Optional[str] = None,
sock: Optional[Union[socket.socket, TypingIterable[socket.socket]]] = None,
shutdown_timeout: float = 60.0,
keepalive_timeout: float = 75.0,
ssl_context: Optional[SSLContext] = None,
print: Callable[..., None] = print,
backlog: int = 128,
access_log_class: Type[AbstractAccessLogger] = AccessLogger,
access_log_format: str = AccessLogger.LOG_FORMAT,
access_log: Optional[logging.Logger] = access_logger,
handle_signals: bool = True,
reuse_address: Optional[bool] = None,
reuse_port: Optional[bool] = None,
) -> None:
# A internal functio to actually do all dirty job for application running
if asyncio.iscoroutine(app):
app = await app # type: ignore[misc]
app = cast(Application, app)
runner = AppRunner(
app,
handle_signals=handle_signals,
access_log_class=access_log_class,
access_log_format=access_log_format,
access_log=access_log,
keepalive_timeout=keepalive_timeout,
)
await runner.setup()
sites: List[BaseSite] = []
try:
if host is not None:
if isinstance(host, (str, bytes, bytearray, memoryview)):
sites.append(
TCPSite(
runner,
host,
port,
shutdown_timeout=shutdown_timeout,
ssl_context=ssl_context,
backlog=backlog,
reuse_address=reuse_address,
reuse_port=reuse_port,
)
)
else:
for h in host:
sites.append(
TCPSite(
runner,
h,
port,
shutdown_timeout=shutdown_timeout,
ssl_context=ssl_context,
backlog=backlog,
reuse_address=reuse_address,
reuse_port=reuse_port,
)
)
elif path is None and sock is None or port is not None:
sites.append(
TCPSite(
runner,
port=port,
shutdown_timeout=shutdown_timeout,
ssl_context=ssl_context,
backlog=backlog,
reuse_address=reuse_address,
reuse_port=reuse_port,
)
)
if path is not None:
if isinstance(path, (str, bytes, bytearray, memoryview)):
sites.append(
UnixSite(
runner,
path,
shutdown_timeout=shutdown_timeout,
ssl_context=ssl_context,
backlog=backlog,
)
)
else:
for p in path:
sites.append(
UnixSite(
runner,
p,
shutdown_timeout=shutdown_timeout,
ssl_context=ssl_context,
backlog=backlog,
)
)
if sock is not None:
if not isinstance(sock, Iterable):
sites.append(
SockSite(
runner,
sock,
shutdown_timeout=shutdown_timeout,
ssl_context=ssl_context,
backlog=backlog,
)
)
else:
for s in sock:
sites.append(
SockSite(
runner,
s,
shutdown_timeout=shutdown_timeout,
ssl_context=ssl_context,
backlog=backlog,
)
)
for site in sites:
await site.start()
if print: # pragma: no branch
names = sorted(str(s.name) for s in runner.sites)
print(
"======== Running on {} ========\n"
"(Press CTRL+C to quit)".format(", ".join(names))
)
# sleep forever by 1 hour intervals,
# on Windows before Python 3.8 wake up every 1 second to handle
# Ctrl+C smoothly
if sys.platform == "win32" and sys.version_info < (3, 8):
delay = 1
else:
delay = 3600
while True:
await asyncio.sleep(delay)
finally:
await runner.cleanup()
def _cancel_tasks(
to_cancel: Set["asyncio.Task[Any]"], loop: asyncio.AbstractEventLoop
) -> None:
if not to_cancel:
return
for task in to_cancel:
task.cancel()
loop.run_until_complete(asyncio.gather(*to_cancel, return_exceptions=True))
for task in to_cancel:
if task.cancelled():
continue
if task.exception() is not None:
loop.call_exception_handler(
{
"message": "unhandled exception during asyncio.run() shutdown",
"exception": task.exception(),
"task": task,
}
)
def run_app(
app: Union[Application, Awaitable[Application]],
*,
host: Optional[Union[str, HostSequence]] = None,
port: Optional[int] = None,
path: Optional[str] = None,
sock: Optional[Union[socket.socket, TypingIterable[socket.socket]]] = None,
shutdown_timeout: float = 60.0,
keepalive_timeout: float = 75.0,
ssl_context: Optional[SSLContext] = None,
print: Callable[..., None] = print,
backlog: int = 128,
access_log_class: Type[AbstractAccessLogger] = AccessLogger,
access_log_format: str = AccessLogger.LOG_FORMAT,
access_log: Optional[logging.Logger] = access_logger,
handle_signals: bool = True,
reuse_address: Optional[bool] = None,
reuse_port: Optional[bool] = None,
loop: Optional[asyncio.AbstractEventLoop] = None,
) -> None:
"""Run an app locally"""
if loop is None:
loop = asyncio.new_event_loop()
# Configure if and only if in debugging mode and using the default logger
if loop.get_debug() and access_log and access_log.name == "aiohttp.access":
if access_log.level == logging.NOTSET:
access_log.setLevel(logging.DEBUG)
if not access_log.hasHandlers():
access_log.addHandler(logging.StreamHandler())
main_task = loop.create_task(
_run_app(
app,
host=host,
port=port,
path=path,
sock=sock,
shutdown_timeout=shutdown_timeout,
keepalive_timeout=keepalive_timeout,
ssl_context=ssl_context,
print=print,
backlog=backlog,
access_log_class=access_log_class,
access_log_format=access_log_format,
access_log=access_log,
handle_signals=handle_signals,
reuse_address=reuse_address,
reuse_port=reuse_port,
)
)
try:
asyncio.set_event_loop(loop)
loop.run_until_complete(main_task)
except (GracefulExit, KeyboardInterrupt): # pragma: no cover
pass
finally:
_cancel_tasks({main_task}, loop)
_cancel_tasks(all_tasks(loop), loop)
loop.run_until_complete(loop.shutdown_asyncgens())
loop.close()
def main(argv: List[str]) -> None:
arg_parser = ArgumentParser(
description="aiohttp.web Application server", prog="aiohttp.web"
)
arg_parser.add_argument(
"entry_func",
help=(
"Callable returning the `aiohttp.web.Application` instance to "
"run. Should be specified in the 'module:function' syntax."
),
metavar="entry-func",
)
arg_parser.add_argument(
"-H",
"--hostname",
help="TCP/IP hostname to serve on (default: %(default)r)",
default="localhost",
)
arg_parser.add_argument(
"-P",
"--port",
help="TCP/IP port to serve on (default: %(default)r)",
type=int,
default="8080",
)
arg_parser.add_argument(
"-U",
"--path",
help="Unix file system path to serve on. Specifying a path will cause "
"hostname and port arguments to be ignored.",
)
args, extra_argv = arg_parser.parse_known_args(argv)
# Import logic
mod_str, _, func_str = args.entry_func.partition(":")
if not func_str or not mod_str:
arg_parser.error("'entry-func' not in 'module:function' syntax")
if mod_str.startswith("."):
arg_parser.error("relative module names not supported")
try:
module = import_module(mod_str)
except ImportError as ex:
arg_parser.error(f"unable to import {mod_str}: {ex}")
try:
func = getattr(module, func_str)
except AttributeError:
arg_parser.error(f"module {mod_str!r} has no attribute {func_str!r}")
# Compatibility logic
if args.path is not None and not hasattr(socket, "AF_UNIX"):
arg_parser.error(
"file system paths not supported by your operating" " environment"
)
logging.basicConfig(level=logging.DEBUG)
app = func(extra_argv)
run_app(app, host=args.hostname, port=args.port, path=args.path)
arg_parser.exit(message="Stopped\n")
if __name__ == "__main__": # pragma: no branch
main(sys.argv[1:]) # pragma: no cover
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiohttp/locks.py | import asyncio
import collections
from typing import Any, Deque, Optional
class EventResultOrError:
"""Event asyncio lock helper class.
Wraps the Event asyncio lock allowing either to awake the
locked Tasks without any error or raising an exception.
thanks to @vorpalsmith for the simple design.
"""
def __init__(self, loop: asyncio.AbstractEventLoop) -> None:
self._loop = loop
self._exc: Optional[BaseException] = None
self._event = asyncio.Event()
self._waiters: Deque[asyncio.Future[Any]] = collections.deque()
def set(self, exc: Optional[BaseException] = None) -> None:
self._exc = exc
self._event.set()
async def wait(self) -> Any:
waiter = self._loop.create_task(self._event.wait())
self._waiters.append(waiter)
try:
val = await waiter
finally:
self._waiters.remove(waiter)
if self._exc is not None:
raise self._exc
return val
def cancel(self) -> None:
"""Cancel all waiters"""
for waiter in self._waiters:
waiter.cancel()
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiohttp/web_runner.py | import asyncio
import signal
import socket
from abc import ABC, abstractmethod
from typing import Any, List, Optional, Set
from yarl import URL
from .web_app import Application
from .web_server import Server
try:
from ssl import SSLContext
except ImportError:
SSLContext = object # type: ignore[misc,assignment]
__all__ = (
"BaseSite",
"TCPSite",
"UnixSite",
"NamedPipeSite",
"SockSite",
"BaseRunner",
"AppRunner",
"ServerRunner",
"GracefulExit",
)
class GracefulExit(SystemExit):
code = 1
def _raise_graceful_exit() -> None:
raise GracefulExit()
class BaseSite(ABC):
__slots__ = ("_runner", "_shutdown_timeout", "_ssl_context", "_backlog", "_server")
def __init__(
self,
runner: "BaseRunner",
*,
shutdown_timeout: float = 60.0,
ssl_context: Optional[SSLContext] = None,
backlog: int = 128,
) -> None:
if runner.server is None:
raise RuntimeError("Call runner.setup() before making a site")
self._runner = runner
self._shutdown_timeout = shutdown_timeout
self._ssl_context = ssl_context
self._backlog = backlog
self._server: Optional[asyncio.AbstractServer] = None
@property
@abstractmethod
def name(self) -> str:
pass # pragma: no cover
@abstractmethod
async def start(self) -> None:
self._runner._reg_site(self)
async def stop(self) -> None:
self._runner._check_site(self)
if self._server is None:
self._runner._unreg_site(self)
return # not started yet
self._server.close()
# named pipes do not have wait_closed property
if hasattr(self._server, "wait_closed"):
await self._server.wait_closed()
await self._runner.shutdown()
assert self._runner.server
await self._runner.server.shutdown(self._shutdown_timeout)
self._runner._unreg_site(self)
class TCPSite(BaseSite):
__slots__ = ("_host", "_port", "_reuse_address", "_reuse_port")
def __init__(
self,
runner: "BaseRunner",
host: Optional[str] = None,
port: Optional[int] = None,
*,
shutdown_timeout: float = 60.0,
ssl_context: Optional[SSLContext] = None,
backlog: int = 128,
reuse_address: Optional[bool] = None,
reuse_port: Optional[bool] = None,
) -> None:
super().__init__(
runner,
shutdown_timeout=shutdown_timeout,
ssl_context=ssl_context,
backlog=backlog,
)
self._host = host
if port is None:
port = 8443 if self._ssl_context else 8080
self._port = port
self._reuse_address = reuse_address
self._reuse_port = reuse_port
@property
def name(self) -> str:
scheme = "https" if self._ssl_context else "http"
host = "0.0.0.0" if self._host is None else self._host
return str(URL.build(scheme=scheme, host=host, port=self._port))
async def start(self) -> None:
await super().start()
loop = asyncio.get_event_loop()
server = self._runner.server
assert server is not None
self._server = await loop.create_server(
server,
self._host,
self._port,
ssl=self._ssl_context,
backlog=self._backlog,
reuse_address=self._reuse_address,
reuse_port=self._reuse_port,
)
class UnixSite(BaseSite):
__slots__ = ("_path",)
def __init__(
self,
runner: "BaseRunner",
path: str,
*,
shutdown_timeout: float = 60.0,
ssl_context: Optional[SSLContext] = None,
backlog: int = 128,
) -> None:
super().__init__(
runner,
shutdown_timeout=shutdown_timeout,
ssl_context=ssl_context,
backlog=backlog,
)
self._path = path
@property
def name(self) -> str:
scheme = "https" if self._ssl_context else "http"
return f"{scheme}://unix:{self._path}:"
async def start(self) -> None:
await super().start()
loop = asyncio.get_event_loop()
server = self._runner.server
assert server is not None
self._server = await loop.create_unix_server(
server, self._path, ssl=self._ssl_context, backlog=self._backlog
)
class NamedPipeSite(BaseSite):
__slots__ = ("_path",)
def __init__(
self, runner: "BaseRunner", path: str, *, shutdown_timeout: float = 60.0
) -> None:
loop = asyncio.get_event_loop()
if not isinstance(
loop, asyncio.ProactorEventLoop # type: ignore[attr-defined]
):
raise RuntimeError(
"Named Pipes only available in proactor" "loop under windows"
)
super().__init__(runner, shutdown_timeout=shutdown_timeout)
self._path = path
@property
def name(self) -> str:
return self._path
async def start(self) -> None:
await super().start()
loop = asyncio.get_event_loop()
server = self._runner.server
assert server is not None
_server = await loop.start_serving_pipe( # type: ignore[attr-defined]
server, self._path
)
self._server = _server[0]
class SockSite(BaseSite):
__slots__ = ("_sock", "_name")
def __init__(
self,
runner: "BaseRunner",
sock: socket.socket,
*,
shutdown_timeout: float = 60.0,
ssl_context: Optional[SSLContext] = None,
backlog: int = 128,
) -> None:
super().__init__(
runner,
shutdown_timeout=shutdown_timeout,
ssl_context=ssl_context,
backlog=backlog,
)
self._sock = sock
scheme = "https" if self._ssl_context else "http"
if hasattr(socket, "AF_UNIX") and sock.family == socket.AF_UNIX:
name = f"{scheme}://unix:{sock.getsockname()}:"
else:
host, port = sock.getsockname()[:2]
name = str(URL.build(scheme=scheme, host=host, port=port))
self._name = name
@property
def name(self) -> str:
return self._name
async def start(self) -> None:
await super().start()
loop = asyncio.get_event_loop()
server = self._runner.server
assert server is not None
self._server = await loop.create_server(
server, sock=self._sock, ssl=self._ssl_context, backlog=self._backlog
)
class BaseRunner(ABC):
__slots__ = ("_handle_signals", "_kwargs", "_server", "_sites")
def __init__(self, *, handle_signals: bool = False, **kwargs: Any) -> None:
self._handle_signals = handle_signals
self._kwargs = kwargs
self._server: Optional[Server] = None
self._sites: List[BaseSite] = []
@property
def server(self) -> Optional[Server]:
return self._server
@property
def addresses(self) -> List[Any]:
ret: List[Any] = []
for site in self._sites:
server = site._server
if server is not None:
sockets = server.sockets
if sockets is not None:
for sock in sockets:
ret.append(sock.getsockname())
return ret
@property
def sites(self) -> Set[BaseSite]:
return set(self._sites)
async def setup(self) -> None:
loop = asyncio.get_event_loop()
if self._handle_signals:
try:
loop.add_signal_handler(signal.SIGINT, _raise_graceful_exit)
loop.add_signal_handler(signal.SIGTERM, _raise_graceful_exit)
except NotImplementedError: # pragma: no cover
# add_signal_handler is not implemented on Windows
pass
self._server = await self._make_server()
@abstractmethod
async def shutdown(self) -> None:
pass # pragma: no cover
async def cleanup(self) -> None:
loop = asyncio.get_event_loop()
# The loop over sites is intentional, an exception on gather()
# leaves self._sites in unpredictable state.
# The loop guaranties that a site is either deleted on success or
# still present on failure
for site in list(self._sites):
await site.stop()
await self._cleanup_server()
self._server = None
if self._handle_signals:
try:
loop.remove_signal_handler(signal.SIGINT)
loop.remove_signal_handler(signal.SIGTERM)
except NotImplementedError: # pragma: no cover
# remove_signal_handler is not implemented on Windows
pass
@abstractmethod
async def _make_server(self) -> Server:
pass # pragma: no cover
@abstractmethod
async def _cleanup_server(self) -> None:
pass # pragma: no cover
def _reg_site(self, site: BaseSite) -> None:
if site in self._sites:
raise RuntimeError(f"Site {site} is already registered in runner {self}")
self._sites.append(site)
def _check_site(self, site: BaseSite) -> None:
if site not in self._sites:
raise RuntimeError(f"Site {site} is not registered in runner {self}")
def _unreg_site(self, site: BaseSite) -> None:
if site not in self._sites:
raise RuntimeError(f"Site {site} is not registered in runner {self}")
self._sites.remove(site)
class ServerRunner(BaseRunner):
"""Low-level web server runner"""
__slots__ = ("_web_server",)
def __init__(
self, web_server: Server, *, handle_signals: bool = False, **kwargs: Any
) -> None:
super().__init__(handle_signals=handle_signals, **kwargs)
self._web_server = web_server
async def shutdown(self) -> None:
pass
async def _make_server(self) -> Server:
return self._web_server
async def _cleanup_server(self) -> None:
pass
class AppRunner(BaseRunner):
"""Web Application runner"""
__slots__ = ("_app",)
def __init__(
self, app: Application, *, handle_signals: bool = False, **kwargs: Any
) -> None:
super().__init__(handle_signals=handle_signals, **kwargs)
if not isinstance(app, Application):
raise TypeError(
"The first argument should be web.Application "
"instance, got {!r}".format(app)
)
self._app = app
@property
def app(self) -> Application:
return self._app
async def shutdown(self) -> None:
await self._app.shutdown()
async def _make_server(self) -> Server:
loop = asyncio.get_event_loop()
self._app._set_loop(loop)
self._app.on_startup.freeze()
await self._app.startup()
self._app.freeze()
return self._app._make_handler(loop=loop, **self._kwargs)
async def _cleanup_server(self) -> None:
await self._app.cleanup()
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiohttp/test_utils.py | """Utilities shared by tests."""
import asyncio
import contextlib
import gc
import inspect
import ipaddress
import os
import socket
import sys
import warnings
from abc import ABC, abstractmethod
from types import TracebackType
from typing import (
TYPE_CHECKING,
Any,
Callable,
Iterator,
List,
Optional,
Type,
Union,
cast,
)
from unittest import mock
from aiosignal import Signal
from multidict import CIMultiDict, CIMultiDictProxy
from yarl import URL
import aiohttp
from aiohttp.client import _RequestContextManager, _WSRequestContextManager
from . import ClientSession, hdrs
from .abc import AbstractCookieJar
from .client_reqrep import ClientResponse
from .client_ws import ClientWebSocketResponse
from .helpers import PY_38, sentinel
from .http import HttpVersion, RawRequestMessage
from .web import (
Application,
AppRunner,
BaseRunner,
Request,
Server,
ServerRunner,
SockSite,
UrlMappingMatchInfo,
)
from .web_protocol import _RequestHandler
if TYPE_CHECKING: # pragma: no cover
from ssl import SSLContext
else:
SSLContext = None
if PY_38:
from unittest import IsolatedAsyncioTestCase as TestCase
else:
from asynctest import TestCase # type: ignore[no-redef]
REUSE_ADDRESS = os.name == "posix" and sys.platform != "cygwin"
def get_unused_port_socket(
host: str, family: socket.AddressFamily = socket.AF_INET
) -> socket.socket:
return get_port_socket(host, 0, family)
def get_port_socket(
host: str, port: int, family: socket.AddressFamily
) -> socket.socket:
s = socket.socket(family, socket.SOCK_STREAM)
if REUSE_ADDRESS:
# Windows has different semantics for SO_REUSEADDR,
# so don't set it. Ref:
# https://docs.microsoft.com/en-us/windows/win32/winsock/using-so-reuseaddr-and-so-exclusiveaddruse
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((host, port))
return s
def unused_port() -> int:
"""Return a port that is unused on the current host."""
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind(("127.0.0.1", 0))
return cast(int, s.getsockname()[1])
class BaseTestServer(ABC):
__test__ = False
def __init__(
self,
*,
scheme: Union[str, object] = sentinel,
loop: Optional[asyncio.AbstractEventLoop] = None,
host: str = "127.0.0.1",
port: Optional[int] = None,
skip_url_asserts: bool = False,
socket_factory: Callable[
[str, int, socket.AddressFamily], socket.socket
] = get_port_socket,
**kwargs: Any,
) -> None:
self._loop = loop
self.runner: Optional[BaseRunner] = None
self._root: Optional[URL] = None
self.host = host
self.port = port
self._closed = False
self.scheme = scheme
self.skip_url_asserts = skip_url_asserts
self.socket_factory = socket_factory
async def start_server(
self, loop: Optional[asyncio.AbstractEventLoop] = None, **kwargs: Any
) -> None:
if self.runner:
return
self._loop = loop
self._ssl = kwargs.pop("ssl", None)
self.runner = await self._make_runner(**kwargs)
await self.runner.setup()
if not self.port:
self.port = 0
try:
version = ipaddress.ip_address(self.host).version
except ValueError:
version = 4
family = socket.AF_INET6 if version == 6 else socket.AF_INET
_sock = self.socket_factory(self.host, self.port, family)
self.host, self.port = _sock.getsockname()[:2]
site = SockSite(self.runner, sock=_sock, ssl_context=self._ssl)
await site.start()
server = site._server
assert server is not None
sockets = server.sockets
assert sockets is not None
self.port = sockets[0].getsockname()[1]
if self.scheme is sentinel:
if self._ssl:
scheme = "https"
else:
scheme = "http"
self.scheme = scheme
self._root = URL(f"{self.scheme}://{self.host}:{self.port}")
@abstractmethod # pragma: no cover
async def _make_runner(self, **kwargs: Any) -> BaseRunner:
pass
def make_url(self, path: str) -> URL:
assert self._root is not None
url = URL(path)
if not self.skip_url_asserts:
assert not url.is_absolute()
return self._root.join(url)
else:
return URL(str(self._root) + path)
@property
def started(self) -> bool:
return self.runner is not None
@property
def closed(self) -> bool:
return self._closed
@property
def handler(self) -> Server:
# for backward compatibility
# web.Server instance
runner = self.runner
assert runner is not None
assert runner.server is not None
return runner.server
async def close(self) -> None:
"""Close all fixtures created by the test client.
After that point, the TestClient is no longer usable.
This is an idempotent function: running close multiple times
will not have any additional effects.
close is also run when the object is garbage collected, and on
exit when used as a context manager.
"""
if self.started and not self.closed:
assert self.runner is not None
await self.runner.cleanup()
self._root = None
self.port = None
self._closed = True
def __enter__(self) -> None:
raise TypeError("Use async with instead")
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> None:
# __exit__ should exist in pair with __enter__ but never executed
pass # pragma: no cover
async def __aenter__(self) -> "BaseTestServer":
await self.start_server(loop=self._loop)
return self
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> None:
await self.close()
class TestServer(BaseTestServer):
def __init__(
self,
app: Application,
*,
scheme: Union[str, object] = sentinel,
host: str = "127.0.0.1",
port: Optional[int] = None,
**kwargs: Any,
):
self.app = app
super().__init__(scheme=scheme, host=host, port=port, **kwargs)
async def _make_runner(self, **kwargs: Any) -> BaseRunner:
return AppRunner(self.app, **kwargs)
class RawTestServer(BaseTestServer):
def __init__(
self,
handler: _RequestHandler,
*,
scheme: Union[str, object] = sentinel,
host: str = "127.0.0.1",
port: Optional[int] = None,
**kwargs: Any,
) -> None:
self._handler = handler
super().__init__(scheme=scheme, host=host, port=port, **kwargs)
async def _make_runner(self, debug: bool = True, **kwargs: Any) -> ServerRunner:
srv = Server(self._handler, loop=self._loop, debug=debug, **kwargs)
return ServerRunner(srv, debug=debug, **kwargs)
class TestClient:
"""
A test client implementation.
To write functional tests for aiohttp based servers.
"""
__test__ = False
def __init__(
self,
server: BaseTestServer,
*,
cookie_jar: Optional[AbstractCookieJar] = None,
loop: Optional[asyncio.AbstractEventLoop] = None,
**kwargs: Any,
) -> None:
if not isinstance(server, BaseTestServer):
raise TypeError(
"server must be TestServer " "instance, found type: %r" % type(server)
)
self._server = server
self._loop = loop
if cookie_jar is None:
cookie_jar = aiohttp.CookieJar(unsafe=True, loop=loop)
self._session = ClientSession(loop=loop, cookie_jar=cookie_jar, **kwargs)
self._closed = False
self._responses: List[ClientResponse] = []
self._websockets: List[ClientWebSocketResponse] = []
async def start_server(self) -> None:
await self._server.start_server(loop=self._loop)
@property
def host(self) -> str:
return self._server.host
@property
def port(self) -> Optional[int]:
return self._server.port
@property
def server(self) -> BaseTestServer:
return self._server
@property
def app(self) -> Optional[Application]:
return cast(Optional[Application], getattr(self._server, "app", None))
@property
def session(self) -> ClientSession:
"""An internal aiohttp.ClientSession.
Unlike the methods on the TestClient, client session requests
do not automatically include the host in the url queried, and
will require an absolute path to the resource.
"""
return self._session
def make_url(self, path: str) -> URL:
return self._server.make_url(path)
async def _request(self, method: str, path: str, **kwargs: Any) -> ClientResponse:
resp = await self._session.request(method, self.make_url(path), **kwargs)
# save it to close later
self._responses.append(resp)
return resp
def request(self, method: str, path: str, **kwargs: Any) -> _RequestContextManager:
"""Routes a request to tested http server.
The interface is identical to aiohttp.ClientSession.request,
except the loop kwarg is overridden by the instance used by the
test server.
"""
return _RequestContextManager(self._request(method, path, **kwargs))
def get(self, path: str, **kwargs: Any) -> _RequestContextManager:
"""Perform an HTTP GET request."""
return _RequestContextManager(self._request(hdrs.METH_GET, path, **kwargs))
def post(self, path: str, **kwargs: Any) -> _RequestContextManager:
"""Perform an HTTP POST request."""
return _RequestContextManager(self._request(hdrs.METH_POST, path, **kwargs))
def options(self, path: str, **kwargs: Any) -> _RequestContextManager:
"""Perform an HTTP OPTIONS request."""
return _RequestContextManager(self._request(hdrs.METH_OPTIONS, path, **kwargs))
def head(self, path: str, **kwargs: Any) -> _RequestContextManager:
"""Perform an HTTP HEAD request."""
return _RequestContextManager(self._request(hdrs.METH_HEAD, path, **kwargs))
def put(self, path: str, **kwargs: Any) -> _RequestContextManager:
"""Perform an HTTP PUT request."""
return _RequestContextManager(self._request(hdrs.METH_PUT, path, **kwargs))
def patch(self, path: str, **kwargs: Any) -> _RequestContextManager:
"""Perform an HTTP PATCH request."""
return _RequestContextManager(self._request(hdrs.METH_PATCH, path, **kwargs))
def delete(self, path: str, **kwargs: Any) -> _RequestContextManager:
"""Perform an HTTP PATCH request."""
return _RequestContextManager(self._request(hdrs.METH_DELETE, path, **kwargs))
def ws_connect(self, path: str, **kwargs: Any) -> _WSRequestContextManager:
"""Initiate websocket connection.
The api corresponds to aiohttp.ClientSession.ws_connect.
"""
return _WSRequestContextManager(self._ws_connect(path, **kwargs))
async def _ws_connect(self, path: str, **kwargs: Any) -> ClientWebSocketResponse:
ws = await self._session.ws_connect(self.make_url(path), **kwargs)
self._websockets.append(ws)
return ws
async def close(self) -> None:
"""Close all fixtures created by the test client.
After that point, the TestClient is no longer usable.
This is an idempotent function: running close multiple times
will not have any additional effects.
close is also run on exit when used as a(n) (asynchronous)
context manager.
"""
if not self._closed:
for resp in self._responses:
resp.close()
for ws in self._websockets:
await ws.close()
await self._session.close()
await self._server.close()
self._closed = True
def __enter__(self) -> None:
raise TypeError("Use async with instead")
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc: Optional[BaseException],
tb: Optional[TracebackType],
) -> None:
# __exit__ should exist in pair with __enter__ but never executed
pass # pragma: no cover
async def __aenter__(self) -> "TestClient":
await self.start_server()
return self
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc: Optional[BaseException],
tb: Optional[TracebackType],
) -> None:
await self.close()
class AioHTTPTestCase(TestCase):
"""A base class to allow for unittest web applications using aiohttp.
Provides the following:
* self.client (aiohttp.test_utils.TestClient): an aiohttp test client.
* self.loop (asyncio.BaseEventLoop): the event loop in which the
application and server are running.
* self.app (aiohttp.web.Application): the application returned by
self.get_application()
Note that the TestClient's methods are asynchronous: you have to
execute function on the test client using asynchronous methods.
"""
async def get_application(self) -> Application:
"""Get application.
This method should be overridden
to return the aiohttp.web.Application
object to test.
"""
return self.get_app()
def get_app(self) -> Application:
"""Obsolete method used to constructing web application.
Use .get_application() coroutine instead.
"""
raise RuntimeError("Did you forget to define get_application()?")
def setUp(self) -> None:
if not PY_38:
asyncio.get_event_loop().run_until_complete(self.asyncSetUp())
async def asyncSetUp(self) -> None:
try:
self.loop = asyncio.get_running_loop()
except (AttributeError, RuntimeError): # AttributeError->py36
self.loop = asyncio.get_event_loop_policy().get_event_loop()
return await self.setUpAsync()
async def setUpAsync(self) -> None:
self.app = await self.get_application()
self.server = await self.get_server(self.app)
self.client = await self.get_client(self.server)
await self.client.start_server()
def tearDown(self) -> None:
if not PY_38:
self.loop.run_until_complete(self.asyncTearDown())
async def asyncTearDown(self) -> None:
return await self.tearDownAsync()
async def tearDownAsync(self) -> None:
await self.client.close()
async def get_server(self, app: Application) -> TestServer:
"""Return a TestServer instance."""
return TestServer(app, loop=self.loop)
async def get_client(self, server: TestServer) -> TestClient:
"""Return a TestClient instance."""
return TestClient(server, loop=self.loop)
def unittest_run_loop(func: Any, *args: Any, **kwargs: Any) -> Any:
"""
A decorator dedicated to use with asynchronous AioHTTPTestCase test methods.
In 3.8+, this does nothing.
"""
warnings.warn(
"Decorator `@unittest_run_loop` is no longer needed in aiohttp 3.8+",
DeprecationWarning,
stacklevel=2,
)
return func
_LOOP_FACTORY = Callable[[], asyncio.AbstractEventLoop]
@contextlib.contextmanager
def loop_context(
loop_factory: _LOOP_FACTORY = asyncio.new_event_loop, fast: bool = False
) -> Iterator[asyncio.AbstractEventLoop]:
"""A contextmanager that creates an event_loop, for test purposes.
Handles the creation and cleanup of a test loop.
"""
loop = setup_test_loop(loop_factory)
yield loop
teardown_test_loop(loop, fast=fast)
def setup_test_loop(
loop_factory: _LOOP_FACTORY = asyncio.new_event_loop,
) -> asyncio.AbstractEventLoop:
"""Create and return an asyncio.BaseEventLoop instance.
The caller should also call teardown_test_loop,
once they are done with the loop.
"""
loop = loop_factory()
try:
module = loop.__class__.__module__
skip_watcher = "uvloop" in module
except AttributeError: # pragma: no cover
# Just in case
skip_watcher = True
asyncio.set_event_loop(loop)
if sys.platform != "win32" and not skip_watcher:
policy = asyncio.get_event_loop_policy()
watcher: asyncio.AbstractChildWatcher
try: # Python >= 3.8
# Refs:
# * https://github.com/pytest-dev/pytest-xdist/issues/620
# * https://stackoverflow.com/a/58614689/595220
# * https://bugs.python.org/issue35621
# * https://github.com/python/cpython/pull/14344
watcher = asyncio.ThreadedChildWatcher()
except AttributeError: # Python < 3.8
watcher = asyncio.SafeChildWatcher()
watcher.attach_loop(loop)
with contextlib.suppress(NotImplementedError):
policy.set_child_watcher(watcher)
return loop
def teardown_test_loop(loop: asyncio.AbstractEventLoop, fast: bool = False) -> None:
"""Teardown and cleanup an event_loop created by setup_test_loop."""
closed = loop.is_closed()
if not closed:
loop.call_soon(loop.stop)
loop.run_forever()
loop.close()
if not fast:
gc.collect()
asyncio.set_event_loop(None)
def _create_app_mock() -> mock.MagicMock:
def get_dict(app: Any, key: str) -> Any:
return app.__app_dict[key]
def set_dict(app: Any, key: str, value: Any) -> None:
app.__app_dict[key] = value
app = mock.MagicMock(spec=Application)
app.__app_dict = {}
app.__getitem__ = get_dict
app.__setitem__ = set_dict
app._debug = False
app.on_response_prepare = Signal(app)
app.on_response_prepare.freeze()
return app
def _create_transport(sslcontext: Optional[SSLContext] = None) -> mock.Mock:
transport = mock.Mock()
def get_extra_info(key: str) -> Optional[SSLContext]:
if key == "sslcontext":
return sslcontext
else:
return None
transport.get_extra_info.side_effect = get_extra_info
return transport
def make_mocked_request(
method: str,
path: str,
headers: Any = None,
*,
match_info: Any = sentinel,
version: HttpVersion = HttpVersion(1, 1),
closing: bool = False,
app: Any = None,
writer: Any = sentinel,
protocol: Any = sentinel,
transport: Any = sentinel,
payload: Any = sentinel,
sslcontext: Optional[SSLContext] = None,
client_max_size: int = 1024**2,
loop: Any = ...,
) -> Request:
"""Creates mocked web.Request testing purposes.
Useful in unit tests, when spinning full web server is overkill or
specific conditions and errors are hard to trigger.
"""
task = mock.Mock()
if loop is ...:
loop = mock.Mock()
loop.create_future.return_value = ()
if version < HttpVersion(1, 1):
closing = True
if headers:
headers = CIMultiDictProxy(CIMultiDict(headers))
raw_hdrs = tuple(
(k.encode("utf-8"), v.encode("utf-8")) for k, v in headers.items()
)
else:
headers = CIMultiDictProxy(CIMultiDict())
raw_hdrs = ()
chunked = "chunked" in headers.get(hdrs.TRANSFER_ENCODING, "").lower()
message = RawRequestMessage(
method,
path,
version,
headers,
raw_hdrs,
closing,
None,
False,
chunked,
URL(path),
)
if app is None:
app = _create_app_mock()
if transport is sentinel:
transport = _create_transport(sslcontext)
if protocol is sentinel:
protocol = mock.Mock()
protocol.transport = transport
if writer is sentinel:
writer = mock.Mock()
writer.write_headers = make_mocked_coro(None)
writer.write = make_mocked_coro(None)
writer.write_eof = make_mocked_coro(None)
writer.drain = make_mocked_coro(None)
writer.transport = transport
protocol.transport = transport
protocol.writer = writer
if payload is sentinel:
payload = mock.Mock()
req = Request(
message, payload, protocol, writer, task, loop, client_max_size=client_max_size
)
match_info = UrlMappingMatchInfo(
{} if match_info is sentinel else match_info, mock.Mock()
)
match_info.add_app(app)
req._match_info = match_info
return req
def make_mocked_coro(
return_value: Any = sentinel, raise_exception: Any = sentinel
) -> Any:
"""Creates a coroutine mock."""
async def mock_coro(*args: Any, **kwargs: Any) -> Any:
if raise_exception is not sentinel:
raise raise_exception
if not inspect.isawaitable(return_value):
return return_value
await return_value
return mock.Mock(wraps=mock_coro)
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiohttp/client_proto.py | import asyncio
from contextlib import suppress
from typing import Any, Optional, Tuple
from .base_protocol import BaseProtocol
from .client_exceptions import (
ClientOSError,
ClientPayloadError,
ServerDisconnectedError,
ServerTimeoutError,
)
from .helpers import BaseTimerContext
from .http import HttpResponseParser, RawResponseMessage
from .streams import EMPTY_PAYLOAD, DataQueue, StreamReader
class ResponseHandler(BaseProtocol, DataQueue[Tuple[RawResponseMessage, StreamReader]]):
"""Helper class to adapt between Protocol and StreamReader."""
def __init__(self, loop: asyncio.AbstractEventLoop) -> None:
BaseProtocol.__init__(self, loop=loop)
DataQueue.__init__(self, loop)
self._should_close = False
self._payload: Optional[StreamReader] = None
self._skip_payload = False
self._payload_parser = None
self._timer = None
self._tail = b""
self._upgraded = False
self._parser: Optional[HttpResponseParser] = None
self._read_timeout: Optional[float] = None
self._read_timeout_handle: Optional[asyncio.TimerHandle] = None
@property
def upgraded(self) -> bool:
return self._upgraded
@property
def should_close(self) -> bool:
if self._payload is not None and not self._payload.is_eof() or self._upgraded:
return True
return (
self._should_close
or self._upgraded
or self.exception() is not None
or self._payload_parser is not None
or len(self) > 0
or bool(self._tail)
)
def force_close(self) -> None:
self._should_close = True
def close(self) -> None:
transport = self.transport
if transport is not None:
transport.close()
self.transport = None
self._payload = None
self._drop_timeout()
def is_connected(self) -> bool:
return self.transport is not None and not self.transport.is_closing()
def connection_lost(self, exc: Optional[BaseException]) -> None:
self._drop_timeout()
if self._payload_parser is not None:
with suppress(Exception):
self._payload_parser.feed_eof()
uncompleted = None
if self._parser is not None:
try:
uncompleted = self._parser.feed_eof()
except Exception:
if self._payload is not None:
self._payload.set_exception(
ClientPayloadError("Response payload is not completed")
)
if not self.is_eof():
if isinstance(exc, OSError):
exc = ClientOSError(*exc.args)
if exc is None:
exc = ServerDisconnectedError(uncompleted)
# assigns self._should_close to True as side effect,
# we do it anyway below
self.set_exception(exc)
self._should_close = True
self._parser = None
self._payload = None
self._payload_parser = None
self._reading_paused = False
super().connection_lost(exc)
def eof_received(self) -> None:
# should call parser.feed_eof() most likely
self._drop_timeout()
def pause_reading(self) -> None:
super().pause_reading()
self._drop_timeout()
def resume_reading(self) -> None:
super().resume_reading()
self._reschedule_timeout()
def set_exception(self, exc: BaseException) -> None:
self._should_close = True
self._drop_timeout()
super().set_exception(exc)
def set_parser(self, parser: Any, payload: Any) -> None:
# TODO: actual types are:
# parser: WebSocketReader
# payload: FlowControlDataQueue
# but they are not generi enough
# Need an ABC for both types
self._payload = payload
self._payload_parser = parser
self._drop_timeout()
if self._tail:
data, self._tail = self._tail, b""
self.data_received(data)
def set_response_params(
self,
*,
timer: Optional[BaseTimerContext] = None,
skip_payload: bool = False,
read_until_eof: bool = False,
auto_decompress: bool = True,
read_timeout: Optional[float] = None,
read_bufsize: int = 2**16,
) -> None:
self._skip_payload = skip_payload
self._read_timeout = read_timeout
self._reschedule_timeout()
self._parser = HttpResponseParser(
self,
self._loop,
read_bufsize,
timer=timer,
payload_exception=ClientPayloadError,
response_with_body=not skip_payload,
read_until_eof=read_until_eof,
auto_decompress=auto_decompress,
)
if self._tail:
data, self._tail = self._tail, b""
self.data_received(data)
def _drop_timeout(self) -> None:
if self._read_timeout_handle is not None:
self._read_timeout_handle.cancel()
self._read_timeout_handle = None
def _reschedule_timeout(self) -> None:
timeout = self._read_timeout
if self._read_timeout_handle is not None:
self._read_timeout_handle.cancel()
if timeout:
self._read_timeout_handle = self._loop.call_later(
timeout, self._on_read_timeout
)
else:
self._read_timeout_handle = None
def _on_read_timeout(self) -> None:
exc = ServerTimeoutError("Timeout on reading data from socket")
self.set_exception(exc)
if self._payload is not None:
self._payload.set_exception(exc)
def data_received(self, data: bytes) -> None:
self._reschedule_timeout()
if not data:
return
# custom payload parser
if self._payload_parser is not None:
eof, tail = self._payload_parser.feed_data(data)
if eof:
self._payload = None
self._payload_parser = None
if tail:
self.data_received(tail)
return
else:
if self._upgraded or self._parser is None:
# i.e. websocket connection, websocket parser is not set yet
self._tail += data
else:
# parse http messages
try:
messages, upgraded, tail = self._parser.feed_data(data)
except BaseException as exc:
if self.transport is not None:
# connection.release() could be called BEFORE
# data_received(), the transport is already
# closed in this case
self.transport.close()
# should_close is True after the call
self.set_exception(exc)
return
self._upgraded = upgraded
payload: Optional[StreamReader] = None
for message, payload in messages:
if message.should_close:
self._should_close = True
self._payload = payload
if self._skip_payload or message.code in (204, 304):
self.feed_data((message, EMPTY_PAYLOAD), 0)
else:
self.feed_data((message, payload), 0)
if payload is not None:
# new message(s) was processed
# register timeout handler unsubscribing
# either on end-of-stream or immediately for
# EMPTY_PAYLOAD
if payload is not EMPTY_PAYLOAD:
payload.on_eof(self._drop_timeout)
else:
self._drop_timeout()
if tail:
if upgraded:
self.data_received(tail)
else:
self._tail = tail
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiohttp/pytest_plugin.py | import asyncio
import contextlib
import warnings
from collections.abc import Callable
from typing import Any, Awaitable, Callable, Dict, Generator, Optional, Union
import pytest
from aiohttp.helpers import PY_37, isasyncgenfunction
from aiohttp.web import Application
from .test_utils import (
BaseTestServer,
RawTestServer,
TestClient,
TestServer,
loop_context,
setup_test_loop,
teardown_test_loop,
unused_port as _unused_port,
)
try:
import uvloop
except ImportError: # pragma: no cover
uvloop = None
try:
import tokio
except ImportError: # pragma: no cover
tokio = None
AiohttpClient = Callable[[Union[Application, BaseTestServer]], Awaitable[TestClient]]
def pytest_addoption(parser): # type: ignore[no-untyped-def]
parser.addoption(
"--aiohttp-fast",
action="store_true",
default=False,
help="run tests faster by disabling extra checks",
)
parser.addoption(
"--aiohttp-loop",
action="store",
default="pyloop",
help="run tests with specific loop: pyloop, uvloop, tokio or all",
)
parser.addoption(
"--aiohttp-enable-loop-debug",
action="store_true",
default=False,
help="enable event loop debug mode",
)
def pytest_fixture_setup(fixturedef): # type: ignore[no-untyped-def]
"""Set up pytest fixture.
Allow fixtures to be coroutines. Run coroutine fixtures in an event loop.
"""
func = fixturedef.func
if isasyncgenfunction(func):
# async generator fixture
is_async_gen = True
elif asyncio.iscoroutinefunction(func):
# regular async fixture
is_async_gen = False
else:
# not an async fixture, nothing to do
return
strip_request = False
if "request" not in fixturedef.argnames:
fixturedef.argnames += ("request",)
strip_request = True
def wrapper(*args, **kwargs): # type: ignore[no-untyped-def]
request = kwargs["request"]
if strip_request:
del kwargs["request"]
# if neither the fixture nor the test use the 'loop' fixture,
# 'getfixturevalue' will fail because the test is not parameterized
# (this can be removed someday if 'loop' is no longer parameterized)
if "loop" not in request.fixturenames:
raise Exception(
"Asynchronous fixtures must depend on the 'loop' fixture or "
"be used in tests depending from it."
)
_loop = request.getfixturevalue("loop")
if is_async_gen:
# for async generators, we need to advance the generator once,
# then advance it again in a finalizer
gen = func(*args, **kwargs)
def finalizer(): # type: ignore[no-untyped-def]
try:
return _loop.run_until_complete(gen.__anext__())
except StopAsyncIteration:
pass
request.addfinalizer(finalizer)
return _loop.run_until_complete(gen.__anext__())
else:
return _loop.run_until_complete(func(*args, **kwargs))
fixturedef.func = wrapper
@pytest.fixture
def fast(request): # type: ignore[no-untyped-def]
"""--fast config option"""
return request.config.getoption("--aiohttp-fast")
@pytest.fixture
def loop_debug(request): # type: ignore[no-untyped-def]
"""--enable-loop-debug config option"""
return request.config.getoption("--aiohttp-enable-loop-debug")
@contextlib.contextmanager
def _runtime_warning_context(): # type: ignore[no-untyped-def]
"""Context manager which checks for RuntimeWarnings.
This exists specifically to
avoid "coroutine 'X' was never awaited" warnings being missed.
If RuntimeWarnings occur in the context a RuntimeError is raised.
"""
with warnings.catch_warnings(record=True) as _warnings:
yield
rw = [
"{w.filename}:{w.lineno}:{w.message}".format(w=w)
for w in _warnings
if w.category == RuntimeWarning
]
if rw:
raise RuntimeError(
"{} Runtime Warning{},\n{}".format(
len(rw), "" if len(rw) == 1 else "s", "\n".join(rw)
)
)
@contextlib.contextmanager
def _passthrough_loop_context(loop, fast=False): # type: ignore[no-untyped-def]
"""Passthrough loop context.
Sets up and tears down a loop unless one is passed in via the loop
argument when it's passed straight through.
"""
if loop:
# loop already exists, pass it straight through
yield loop
else:
# this shadows loop_context's standard behavior
loop = setup_test_loop()
yield loop
teardown_test_loop(loop, fast=fast)
def pytest_pycollect_makeitem(collector, name, obj): # type: ignore[no-untyped-def]
"""Fix pytest collecting for coroutines."""
if collector.funcnamefilter(name) and asyncio.iscoroutinefunction(obj):
return list(collector._genfunctions(name, obj))
def pytest_pyfunc_call(pyfuncitem): # type: ignore[no-untyped-def]
"""Run coroutines in an event loop instead of a normal function call."""
fast = pyfuncitem.config.getoption("--aiohttp-fast")
if asyncio.iscoroutinefunction(pyfuncitem.function):
existing_loop = pyfuncitem.funcargs.get(
"proactor_loop"
) or pyfuncitem.funcargs.get("loop", None)
with _runtime_warning_context():
with _passthrough_loop_context(existing_loop, fast=fast) as _loop:
testargs = {
arg: pyfuncitem.funcargs[arg]
for arg in pyfuncitem._fixtureinfo.argnames
}
_loop.run_until_complete(pyfuncitem.obj(**testargs))
return True
def pytest_generate_tests(metafunc): # type: ignore[no-untyped-def]
if "loop_factory" not in metafunc.fixturenames:
return
loops = metafunc.config.option.aiohttp_loop
avail_factories = {"pyloop": asyncio.DefaultEventLoopPolicy}
if uvloop is not None: # pragma: no cover
avail_factories["uvloop"] = uvloop.EventLoopPolicy
if tokio is not None: # pragma: no cover
avail_factories["tokio"] = tokio.EventLoopPolicy
if loops == "all":
loops = "pyloop,uvloop?,tokio?"
factories = {} # type: ignore[var-annotated]
for name in loops.split(","):
required = not name.endswith("?")
name = name.strip(" ?")
if name not in avail_factories: # pragma: no cover
if required:
raise ValueError(
"Unknown loop '%s', available loops: %s"
% (name, list(factories.keys()))
)
else:
continue
factories[name] = avail_factories[name]
metafunc.parametrize(
"loop_factory", list(factories.values()), ids=list(factories.keys())
)
@pytest.fixture
def loop(loop_factory, fast, loop_debug): # type: ignore[no-untyped-def]
"""Return an instance of the event loop."""
policy = loop_factory()
asyncio.set_event_loop_policy(policy)
with loop_context(fast=fast) as _loop:
if loop_debug:
_loop.set_debug(True) # pragma: no cover
asyncio.set_event_loop(_loop)
yield _loop
@pytest.fixture
def proactor_loop(): # type: ignore[no-untyped-def]
if not PY_37:
policy = asyncio.get_event_loop_policy()
policy._loop_factory = asyncio.ProactorEventLoop # type: ignore[attr-defined]
else:
policy = asyncio.WindowsProactorEventLoopPolicy() # type: ignore[attr-defined]
asyncio.set_event_loop_policy(policy)
with loop_context(policy.new_event_loop) as _loop:
asyncio.set_event_loop(_loop)
yield _loop
@pytest.fixture
def unused_port(aiohttp_unused_port): # type: ignore[no-untyped-def] # pragma: no cover
warnings.warn(
"Deprecated, use aiohttp_unused_port fixture instead",
DeprecationWarning,
stacklevel=2,
)
return aiohttp_unused_port
@pytest.fixture
def aiohttp_unused_port(): # type: ignore[no-untyped-def]
"""Return a port that is unused on the current host."""
return _unused_port
@pytest.fixture
def aiohttp_server(loop): # type: ignore[no-untyped-def]
"""Factory to create a TestServer instance, given an app.
aiohttp_server(app, **kwargs)
"""
servers = []
async def go(app, *, port=None, **kwargs): # type: ignore[no-untyped-def]
server = TestServer(app, port=port)
await server.start_server(loop=loop, **kwargs)
servers.append(server)
return server
yield go
async def finalize() -> None:
while servers:
await servers.pop().close()
loop.run_until_complete(finalize())
@pytest.fixture
def test_server(aiohttp_server): # type: ignore[no-untyped-def] # pragma: no cover
warnings.warn(
"Deprecated, use aiohttp_server fixture instead",
DeprecationWarning,
stacklevel=2,
)
return aiohttp_server
@pytest.fixture
def aiohttp_raw_server(loop): # type: ignore[no-untyped-def]
"""Factory to create a RawTestServer instance, given a web handler.
aiohttp_raw_server(handler, **kwargs)
"""
servers = []
async def go(handler, *, port=None, **kwargs): # type: ignore[no-untyped-def]
server = RawTestServer(handler, port=port)
await server.start_server(loop=loop, **kwargs)
servers.append(server)
return server
yield go
async def finalize() -> None:
while servers:
await servers.pop().close()
loop.run_until_complete(finalize())
@pytest.fixture
def raw_test_server( # type: ignore[no-untyped-def] # pragma: no cover
aiohttp_raw_server,
):
warnings.warn(
"Deprecated, use aiohttp_raw_server fixture instead",
DeprecationWarning,
stacklevel=2,
)
return aiohttp_raw_server
@pytest.fixture
def aiohttp_client(
loop: asyncio.AbstractEventLoop,
) -> Generator[AiohttpClient, None, None]:
"""Factory to create a TestClient instance.
aiohttp_client(app, **kwargs)
aiohttp_client(server, **kwargs)
aiohttp_client(raw_server, **kwargs)
"""
clients = []
async def go(
__param: Union[Application, BaseTestServer],
*args: Any,
server_kwargs: Optional[Dict[str, Any]] = None,
**kwargs: Any
) -> TestClient:
if isinstance(__param, Callable) and not isinstance( # type: ignore[arg-type]
__param, (Application, BaseTestServer)
):
__param = __param(loop, *args, **kwargs)
kwargs = {}
else:
assert not args, "args should be empty"
if isinstance(__param, Application):
server_kwargs = server_kwargs or {}
server = TestServer(__param, loop=loop, **server_kwargs)
client = TestClient(server, loop=loop, **kwargs)
elif isinstance(__param, BaseTestServer):
client = TestClient(__param, loop=loop, **kwargs)
else:
raise ValueError("Unknown argument type: %r" % type(__param))
await client.start_server()
clients.append(client)
return client
yield go
async def finalize() -> None:
while clients:
await clients.pop().close()
loop.run_until_complete(finalize())
@pytest.fixture
def test_client(aiohttp_client): # type: ignore[no-untyped-def] # pragma: no cover
warnings.warn(
"Deprecated, use aiohttp_client fixture instead",
DeprecationWarning,
stacklevel=2,
)
return aiohttp_client
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiohttp/web_app.py | import asyncio
import logging
import warnings
from functools import partial, update_wrapper
from typing import (
TYPE_CHECKING,
Any,
AsyncIterator,
Awaitable,
Callable,
Dict,
Iterable,
Iterator,
List,
Mapping,
MutableMapping,
Optional,
Sequence,
Tuple,
Type,
Union,
cast,
)
from aiosignal import Signal
from frozenlist import FrozenList
from . import hdrs
from .abc import (
AbstractAccessLogger,
AbstractMatchInfo,
AbstractRouter,
AbstractStreamWriter,
)
from .helpers import DEBUG
from .http_parser import RawRequestMessage
from .log import web_logger
from .streams import StreamReader
from .web_log import AccessLogger
from .web_middlewares import _fix_request_current_app
from .web_protocol import RequestHandler
from .web_request import Request
from .web_response import StreamResponse
from .web_routedef import AbstractRouteDef
from .web_server import Server
from .web_urldispatcher import (
AbstractResource,
AbstractRoute,
Domain,
MaskDomain,
MatchedSubAppResource,
PrefixedSubAppResource,
UrlDispatcher,
)
__all__ = ("Application", "CleanupError")
if TYPE_CHECKING: # pragma: no cover
from .typedefs import Handler
_AppSignal = Signal[Callable[["Application"], Awaitable[None]]]
_RespPrepareSignal = Signal[Callable[[Request, StreamResponse], Awaitable[None]]]
_Middleware = Union[
Callable[[Request, Handler], Awaitable[StreamResponse]],
Callable[["Application", Handler], Awaitable[Handler]], # old-style
]
_Middlewares = FrozenList[_Middleware]
_MiddlewaresHandlers = Optional[Sequence[Tuple[_Middleware, bool]]]
_Subapps = List["Application"]
else:
# No type checker mode, skip types
_AppSignal = Signal
_RespPrepareSignal = Signal
_Middleware = Callable
_Middlewares = FrozenList
_MiddlewaresHandlers = Optional[Sequence]
_Subapps = List
class Application(MutableMapping[str, Any]):
ATTRS = frozenset(
[
"logger",
"_debug",
"_router",
"_loop",
"_handler_args",
"_middlewares",
"_middlewares_handlers",
"_run_middlewares",
"_state",
"_frozen",
"_pre_frozen",
"_subapps",
"_on_response_prepare",
"_on_startup",
"_on_shutdown",
"_on_cleanup",
"_client_max_size",
"_cleanup_ctx",
]
)
def __init__(
self,
*,
logger: logging.Logger = web_logger,
router: Optional[UrlDispatcher] = None,
middlewares: Iterable[_Middleware] = (),
handler_args: Optional[Mapping[str, Any]] = None,
client_max_size: int = 1024**2,
loop: Optional[asyncio.AbstractEventLoop] = None,
debug: Any = ..., # mypy doesn't support ellipsis
) -> None:
if router is None:
router = UrlDispatcher()
else:
warnings.warn(
"router argument is deprecated", DeprecationWarning, stacklevel=2
)
assert isinstance(router, AbstractRouter), router
if loop is not None:
warnings.warn(
"loop argument is deprecated", DeprecationWarning, stacklevel=2
)
if debug is not ...:
warnings.warn(
"debug argument is deprecated", DeprecationWarning, stacklevel=2
)
self._debug = debug
self._router: UrlDispatcher = router
self._loop = loop
self._handler_args = handler_args
self.logger = logger
self._middlewares: _Middlewares = FrozenList(middlewares)
# initialized on freezing
self._middlewares_handlers: _MiddlewaresHandlers = None
# initialized on freezing
self._run_middlewares: Optional[bool] = None
self._state: Dict[str, Any] = {}
self._frozen = False
self._pre_frozen = False
self._subapps: _Subapps = []
self._on_response_prepare: _RespPrepareSignal = Signal(self)
self._on_startup: _AppSignal = Signal(self)
self._on_shutdown: _AppSignal = Signal(self)
self._on_cleanup: _AppSignal = Signal(self)
self._cleanup_ctx = CleanupContext()
self._on_startup.append(self._cleanup_ctx._on_startup)
self._on_cleanup.append(self._cleanup_ctx._on_cleanup)
self._client_max_size = client_max_size
def __init_subclass__(cls: Type["Application"]) -> None:
warnings.warn(
"Inheritance class {} from web.Application "
"is discouraged".format(cls.__name__),
DeprecationWarning,
stacklevel=2,
)
if DEBUG: # pragma: no cover
def __setattr__(self, name: str, val: Any) -> None:
if name not in self.ATTRS:
warnings.warn(
"Setting custom web.Application.{} attribute "
"is discouraged".format(name),
DeprecationWarning,
stacklevel=2,
)
super().__setattr__(name, val)
# MutableMapping API
def __eq__(self, other: object) -> bool:
return self is other
def __getitem__(self, key: str) -> Any:
return self._state[key]
def _check_frozen(self) -> None:
if self._frozen:
warnings.warn(
"Changing state of started or joined " "application is deprecated",
DeprecationWarning,
stacklevel=3,
)
def __setitem__(self, key: str, value: Any) -> None:
self._check_frozen()
self._state[key] = value
def __delitem__(self, key: str) -> None:
self._check_frozen()
del self._state[key]
def __len__(self) -> int:
return len(self._state)
def __iter__(self) -> Iterator[str]:
return iter(self._state)
########
@property
def loop(self) -> asyncio.AbstractEventLoop:
# Technically the loop can be None
# but we mask it by explicit type cast
# to provide more convinient type annotation
warnings.warn("loop property is deprecated", DeprecationWarning, stacklevel=2)
return cast(asyncio.AbstractEventLoop, self._loop)
def _set_loop(self, loop: Optional[asyncio.AbstractEventLoop]) -> None:
if loop is None:
loop = asyncio.get_event_loop()
if self._loop is not None and self._loop is not loop:
raise RuntimeError(
"web.Application instance initialized with different loop"
)
self._loop = loop
# set loop debug
if self._debug is ...:
self._debug = loop.get_debug()
# set loop to sub applications
for subapp in self._subapps:
subapp._set_loop(loop)
@property
def pre_frozen(self) -> bool:
return self._pre_frozen
def pre_freeze(self) -> None:
if self._pre_frozen:
return
self._pre_frozen = True
self._middlewares.freeze()
self._router.freeze()
self._on_response_prepare.freeze()
self._cleanup_ctx.freeze()
self._on_startup.freeze()
self._on_shutdown.freeze()
self._on_cleanup.freeze()
self._middlewares_handlers = tuple(self._prepare_middleware())
# If current app and any subapp do not have middlewares avoid run all
# of the code footprint that it implies, which have a middleware
# hardcoded per app that sets up the current_app attribute. If no
# middlewares are configured the handler will receive the proper
# current_app without needing all of this code.
self._run_middlewares = True if self.middlewares else False
for subapp in self._subapps:
subapp.pre_freeze()
self._run_middlewares = self._run_middlewares or subapp._run_middlewares
@property
def frozen(self) -> bool:
return self._frozen
def freeze(self) -> None:
if self._frozen:
return
self.pre_freeze()
self._frozen = True
for subapp in self._subapps:
subapp.freeze()
@property
def debug(self) -> bool:
warnings.warn("debug property is deprecated", DeprecationWarning, stacklevel=2)
return self._debug # type: ignore[no-any-return]
def _reg_subapp_signals(self, subapp: "Application") -> None:
def reg_handler(signame: str) -> None:
subsig = getattr(subapp, signame)
async def handler(app: "Application") -> None:
await subsig.send(subapp)
appsig = getattr(self, signame)
appsig.append(handler)
reg_handler("on_startup")
reg_handler("on_shutdown")
reg_handler("on_cleanup")
def add_subapp(self, prefix: str, subapp: "Application") -> AbstractResource:
if not isinstance(prefix, str):
raise TypeError("Prefix must be str")
prefix = prefix.rstrip("/")
if not prefix:
raise ValueError("Prefix cannot be empty")
factory = partial(PrefixedSubAppResource, prefix, subapp)
return self._add_subapp(factory, subapp)
def _add_subapp(
self, resource_factory: Callable[[], AbstractResource], subapp: "Application"
) -> AbstractResource:
if self.frozen:
raise RuntimeError("Cannot add sub application to frozen application")
if subapp.frozen:
raise RuntimeError("Cannot add frozen application")
resource = resource_factory()
self.router.register_resource(resource)
self._reg_subapp_signals(subapp)
self._subapps.append(subapp)
subapp.pre_freeze()
if self._loop is not None:
subapp._set_loop(self._loop)
return resource
def add_domain(self, domain: str, subapp: "Application") -> AbstractResource:
if not isinstance(domain, str):
raise TypeError("Domain must be str")
elif "*" in domain:
rule: Domain = MaskDomain(domain)
else:
rule = Domain(domain)
factory = partial(MatchedSubAppResource, rule, subapp)
return self._add_subapp(factory, subapp)
def add_routes(self, routes: Iterable[AbstractRouteDef]) -> List[AbstractRoute]:
return self.router.add_routes(routes)
@property
def on_response_prepare(self) -> _RespPrepareSignal:
return self._on_response_prepare
@property
def on_startup(self) -> _AppSignal:
return self._on_startup
@property
def on_shutdown(self) -> _AppSignal:
return self._on_shutdown
@property
def on_cleanup(self) -> _AppSignal:
return self._on_cleanup
@property
def cleanup_ctx(self) -> "CleanupContext":
return self._cleanup_ctx
@property
def router(self) -> UrlDispatcher:
return self._router
@property
def middlewares(self) -> _Middlewares:
return self._middlewares
def _make_handler(
self,
*,
loop: Optional[asyncio.AbstractEventLoop] = None,
access_log_class: Type[AbstractAccessLogger] = AccessLogger,
**kwargs: Any,
) -> Server:
if not issubclass(access_log_class, AbstractAccessLogger):
raise TypeError(
"access_log_class must be subclass of "
"aiohttp.abc.AbstractAccessLogger, got {}".format(access_log_class)
)
self._set_loop(loop)
self.freeze()
kwargs["debug"] = self._debug
kwargs["access_log_class"] = access_log_class
if self._handler_args:
for k, v in self._handler_args.items():
kwargs[k] = v
return Server(
self._handle, # type: ignore[arg-type]
request_factory=self._make_request,
loop=self._loop,
**kwargs,
)
def make_handler(
self,
*,
loop: Optional[asyncio.AbstractEventLoop] = None,
access_log_class: Type[AbstractAccessLogger] = AccessLogger,
**kwargs: Any,
) -> Server:
warnings.warn(
"Application.make_handler(...) is deprecated, " "use AppRunner API instead",
DeprecationWarning,
stacklevel=2,
)
return self._make_handler(
loop=loop, access_log_class=access_log_class, **kwargs
)
async def startup(self) -> None:
"""Causes on_startup signal
Should be called in the event loop along with the request handler.
"""
await self.on_startup.send(self)
async def shutdown(self) -> None:
"""Causes on_shutdown signal
Should be called before cleanup()
"""
await self.on_shutdown.send(self)
async def cleanup(self) -> None:
"""Causes on_cleanup signal
Should be called after shutdown()
"""
if self.on_cleanup.frozen:
await self.on_cleanup.send(self)
else:
# If an exception occurs in startup, ensure cleanup contexts are completed.
await self._cleanup_ctx._on_cleanup(self)
def _make_request(
self,
message: RawRequestMessage,
payload: StreamReader,
protocol: RequestHandler,
writer: AbstractStreamWriter,
task: "asyncio.Task[None]",
_cls: Type[Request] = Request,
) -> Request:
return _cls(
message,
payload,
protocol,
writer,
task,
self._loop,
client_max_size=self._client_max_size,
)
def _prepare_middleware(self) -> Iterator[Tuple[_Middleware, bool]]:
for m in reversed(self._middlewares):
if getattr(m, "__middleware_version__", None) == 1:
yield m, True
else:
warnings.warn(
'old-style middleware "{!r}" deprecated, ' "see #2252".format(m),
DeprecationWarning,
stacklevel=2,
)
yield m, False
yield _fix_request_current_app(self), True
async def _handle(self, request: Request) -> StreamResponse:
loop = asyncio.get_event_loop()
debug = loop.get_debug()
match_info = await self._router.resolve(request)
if debug: # pragma: no cover
if not isinstance(match_info, AbstractMatchInfo):
raise TypeError(
"match_info should be AbstractMatchInfo "
"instance, not {!r}".format(match_info)
)
match_info.add_app(self)
match_info.freeze()
resp = None
request._match_info = match_info
expect = request.headers.get(hdrs.EXPECT)
if expect:
resp = await match_info.expect_handler(request)
await request.writer.drain()
if resp is None:
handler = match_info.handler
if self._run_middlewares:
for app in match_info.apps[::-1]:
for m, new_style in app._middlewares_handlers: # type: ignore[union-attr] # noqa
if new_style:
handler = update_wrapper(
partial(m, handler=handler), handler
)
else:
handler = await m(app, handler) # type: ignore[arg-type]
resp = await handler(request)
return resp
def __call__(self) -> "Application":
"""gunicorn compatibility"""
return self
def __repr__(self) -> str:
return f"<Application 0x{id(self):x}>"
def __bool__(self) -> bool:
return True
class CleanupError(RuntimeError):
@property
def exceptions(self) -> List[BaseException]:
return cast(List[BaseException], self.args[1])
if TYPE_CHECKING: # pragma: no cover
_CleanupContextBase = FrozenList[Callable[[Application], AsyncIterator[None]]]
else:
_CleanupContextBase = FrozenList
class CleanupContext(_CleanupContextBase):
def __init__(self) -> None:
super().__init__()
self._exits: List[AsyncIterator[None]] = []
async def _on_startup(self, app: Application) -> None:
for cb in self:
it = cb(app).__aiter__()
await it.__anext__()
self._exits.append(it)
async def _on_cleanup(self, app: Application) -> None:
errors = []
for it in reversed(self._exits):
try:
await it.__anext__()
except StopAsyncIteration:
pass
except Exception as exc:
errors.append(exc)
else:
errors.append(RuntimeError(f"{it!r} has more than one 'yield'"))
if errors:
if len(errors) == 1:
raise errors[0]
else:
raise CleanupError("Multiple errors on cleanup stage", errors)
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiohttp/http_writer.py | """Http related parsers and protocol."""
import asyncio
import zlib
from typing import Any, Awaitable, Callable, NamedTuple, Optional, Union # noqa
from multidict import CIMultiDict
from .abc import AbstractStreamWriter
from .base_protocol import BaseProtocol
from .helpers import NO_EXTENSIONS
__all__ = ("StreamWriter", "HttpVersion", "HttpVersion10", "HttpVersion11")
class HttpVersion(NamedTuple):
major: int
minor: int
HttpVersion10 = HttpVersion(1, 0)
HttpVersion11 = HttpVersion(1, 1)
_T_OnChunkSent = Optional[Callable[[bytes], Awaitable[None]]]
_T_OnHeadersSent = Optional[Callable[["CIMultiDict[str]"], Awaitable[None]]]
class StreamWriter(AbstractStreamWriter):
def __init__(
self,
protocol: BaseProtocol,
loop: asyncio.AbstractEventLoop,
on_chunk_sent: _T_OnChunkSent = None,
on_headers_sent: _T_OnHeadersSent = None,
) -> None:
self._protocol = protocol
self._transport = protocol.transport
self.loop = loop
self.length = None
self.chunked = False
self.buffer_size = 0
self.output_size = 0
self._eof = False
self._compress: Any = None
self._drain_waiter = None
self._on_chunk_sent: _T_OnChunkSent = on_chunk_sent
self._on_headers_sent: _T_OnHeadersSent = on_headers_sent
@property
def transport(self) -> Optional[asyncio.Transport]:
return self._transport
@property
def protocol(self) -> BaseProtocol:
return self._protocol
def enable_chunking(self) -> None:
self.chunked = True
def enable_compression(
self, encoding: str = "deflate", strategy: int = zlib.Z_DEFAULT_STRATEGY
) -> None:
zlib_mode = 16 + zlib.MAX_WBITS if encoding == "gzip" else zlib.MAX_WBITS
self._compress = zlib.compressobj(wbits=zlib_mode, strategy=strategy)
def _write(self, chunk: bytes) -> None:
size = len(chunk)
self.buffer_size += size
self.output_size += size
if self._transport is None or self._transport.is_closing():
raise ConnectionResetError("Cannot write to closing transport")
self._transport.write(chunk)
async def write(
self, chunk: bytes, *, drain: bool = True, LIMIT: int = 0x10000
) -> None:
"""Writes chunk of data to a stream.
write_eof() indicates end of stream.
writer can't be used after write_eof() method being called.
write() return drain future.
"""
if self._on_chunk_sent is not None:
await self._on_chunk_sent(chunk)
if isinstance(chunk, memoryview):
if chunk.nbytes != len(chunk):
# just reshape it
chunk = chunk.cast("c")
if self._compress is not None:
chunk = self._compress.compress(chunk)
if not chunk:
return
if self.length is not None:
chunk_len = len(chunk)
if self.length >= chunk_len:
self.length = self.length - chunk_len
else:
chunk = chunk[: self.length]
self.length = 0
if not chunk:
return
if chunk:
if self.chunked:
chunk_len_pre = ("%x\r\n" % len(chunk)).encode("ascii")
chunk = chunk_len_pre + chunk + b"\r\n"
self._write(chunk)
if self.buffer_size > LIMIT and drain:
self.buffer_size = 0
await self.drain()
async def write_headers(
self, status_line: str, headers: "CIMultiDict[str]"
) -> None:
"""Write request/response status and headers."""
if self._on_headers_sent is not None:
await self._on_headers_sent(headers)
# status + headers
buf = _serialize_headers(status_line, headers)
self._write(buf)
async def write_eof(self, chunk: bytes = b"") -> None:
if self._eof:
return
if chunk and self._on_chunk_sent is not None:
await self._on_chunk_sent(chunk)
if self._compress:
if chunk:
chunk = self._compress.compress(chunk)
chunk = chunk + self._compress.flush()
if chunk and self.chunked:
chunk_len = ("%x\r\n" % len(chunk)).encode("ascii")
chunk = chunk_len + chunk + b"\r\n0\r\n\r\n"
else:
if self.chunked:
if chunk:
chunk_len = ("%x\r\n" % len(chunk)).encode("ascii")
chunk = chunk_len + chunk + b"\r\n0\r\n\r\n"
else:
chunk = b"0\r\n\r\n"
if chunk:
self._write(chunk)
await self.drain()
self._eof = True
self._transport = None
async def drain(self) -> None:
"""Flush the write buffer.
The intended use is to write
await w.write(data)
await w.drain()
"""
if self._protocol.transport is not None:
await self._protocol._drain_helper()
def _safe_header(string: str) -> str:
if "\r" in string or "\n" in string:
raise ValueError(
"Newline or carriage return detected in headers. "
"Potential header injection attack."
)
return string
def _py_serialize_headers(status_line: str, headers: "CIMultiDict[str]") -> bytes:
headers_gen = (_safe_header(k) + ": " + _safe_header(v) for k, v in headers.items())
line = status_line + "\r\n" + "\r\n".join(headers_gen) + "\r\n\r\n"
return line.encode("utf-8")
_serialize_headers = _py_serialize_headers
try:
import aiohttp._http_writer as _http_writer # type: ignore[import]
_c_serialize_headers = _http_writer._serialize_headers
if not NO_EXTENSIONS:
_serialize_headers = _c_serialize_headers
except ImportError:
pass
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiohttp/base_protocol.py | import asyncio
from typing import Optional, cast
from .tcp_helpers import tcp_nodelay
class BaseProtocol(asyncio.Protocol):
__slots__ = (
"_loop",
"_paused",
"_drain_waiter",
"_connection_lost",
"_reading_paused",
"transport",
)
def __init__(self, loop: asyncio.AbstractEventLoop) -> None:
self._loop: asyncio.AbstractEventLoop = loop
self._paused = False
self._drain_waiter: Optional[asyncio.Future[None]] = None
self._connection_lost = False
self._reading_paused = False
self.transport: Optional[asyncio.Transport] = None
def pause_writing(self) -> None:
assert not self._paused
self._paused = True
def resume_writing(self) -> None:
assert self._paused
self._paused = False
waiter = self._drain_waiter
if waiter is not None:
self._drain_waiter = None
if not waiter.done():
waiter.set_result(None)
def pause_reading(self) -> None:
if not self._reading_paused and self.transport is not None:
try:
self.transport.pause_reading()
except (AttributeError, NotImplementedError, RuntimeError):
pass
self._reading_paused = True
def resume_reading(self) -> None:
if self._reading_paused and self.transport is not None:
try:
self.transport.resume_reading()
except (AttributeError, NotImplementedError, RuntimeError):
pass
self._reading_paused = False
def connection_made(self, transport: asyncio.BaseTransport) -> None:
tr = cast(asyncio.Transport, transport)
tcp_nodelay(tr, True)
self.transport = tr
def connection_lost(self, exc: Optional[BaseException]) -> None:
self._connection_lost = True
# Wake up the writer if currently paused.
self.transport = None
if not self._paused:
return
waiter = self._drain_waiter
if waiter is None:
return
self._drain_waiter = None
if waiter.done():
return
if exc is None:
waiter.set_result(None)
else:
waiter.set_exception(exc)
async def _drain_helper(self) -> None:
if self._connection_lost:
raise ConnectionResetError("Connection lost")
if not self._paused:
return
waiter = self._drain_waiter
if waiter is None:
waiter = self._loop.create_future()
self._drain_waiter = waiter
await asyncio.shield(waiter)
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiohttp/web_protocol.py | import asyncio
import asyncio.streams
import traceback
import warnings
from collections import deque
from contextlib import suppress
from html import escape as html_escape
from http import HTTPStatus
from logging import Logger
from typing import (
TYPE_CHECKING,
Any,
Awaitable,
Callable,
Deque,
Optional,
Sequence,
Tuple,
Type,
Union,
cast,
)
import attr
import yarl
from .abc import AbstractAccessLogger, AbstractStreamWriter
from .base_protocol import BaseProtocol
from .helpers import ceil_timeout
from .http import (
HttpProcessingError,
HttpRequestParser,
HttpVersion10,
RawRequestMessage,
StreamWriter,
)
from .log import access_logger, server_logger
from .streams import EMPTY_PAYLOAD, StreamReader
from .tcp_helpers import tcp_keepalive
from .web_exceptions import HTTPException
from .web_log import AccessLogger
from .web_request import BaseRequest
from .web_response import Response, StreamResponse
__all__ = ("RequestHandler", "RequestPayloadError", "PayloadAccessError")
if TYPE_CHECKING: # pragma: no cover
from .web_server import Server
_RequestFactory = Callable[
[
RawRequestMessage,
StreamReader,
"RequestHandler",
AbstractStreamWriter,
"asyncio.Task[None]",
],
BaseRequest,
]
_RequestHandler = Callable[[BaseRequest], Awaitable[StreamResponse]]
ERROR = RawRequestMessage(
"UNKNOWN",
"/",
HttpVersion10,
{}, # type: ignore[arg-type]
{}, # type: ignore[arg-type]
True,
None,
False,
False,
yarl.URL("/"),
)
class RequestPayloadError(Exception):
"""Payload parsing error."""
class PayloadAccessError(Exception):
"""Payload was accessed after response was sent."""
@attr.s(auto_attribs=True, frozen=True, slots=True)
class _ErrInfo:
status: int
exc: BaseException
message: str
_MsgType = Tuple[Union[RawRequestMessage, _ErrInfo], StreamReader]
class RequestHandler(BaseProtocol):
"""HTTP protocol implementation.
RequestHandler handles incoming HTTP request. It reads request line,
request headers and request payload and calls handle_request() method.
By default it always returns with 404 response.
RequestHandler handles errors in incoming request, like bad
status line, bad headers or incomplete payload. If any error occurs,
connection gets closed.
keepalive_timeout -- number of seconds before closing
keep-alive connection
tcp_keepalive -- TCP keep-alive is on, default is on
debug -- enable debug mode
logger -- custom logger object
access_log_class -- custom class for access_logger
access_log -- custom logging object
access_log_format -- access log format string
loop -- Optional event loop
max_line_size -- Optional maximum header line size
max_field_size -- Optional maximum header field size
max_headers -- Optional maximum header size
"""
KEEPALIVE_RESCHEDULE_DELAY = 1
__slots__ = (
"_request_count",
"_keepalive",
"_manager",
"_request_handler",
"_request_factory",
"_tcp_keepalive",
"_keepalive_time",
"_keepalive_handle",
"_keepalive_timeout",
"_lingering_time",
"_messages",
"_message_tail",
"_waiter",
"_task_handler",
"_upgrade",
"_payload_parser",
"_request_parser",
"_reading_paused",
"logger",
"debug",
"access_log",
"access_logger",
"_close",
"_force_close",
"_current_request",
)
def __init__(
self,
manager: "Server",
*,
loop: asyncio.AbstractEventLoop,
keepalive_timeout: float = 75.0, # NGINX default is 75 secs
tcp_keepalive: bool = True,
logger: Logger = server_logger,
access_log_class: Type[AbstractAccessLogger] = AccessLogger,
access_log: Logger = access_logger,
access_log_format: str = AccessLogger.LOG_FORMAT,
debug: bool = False,
max_line_size: int = 8190,
max_headers: int = 32768,
max_field_size: int = 8190,
lingering_time: float = 10.0,
read_bufsize: int = 2**16,
auto_decompress: bool = True,
):
super().__init__(loop)
self._request_count = 0
self._keepalive = False
self._current_request: Optional[BaseRequest] = None
self._manager: Optional[Server] = manager
self._request_handler: Optional[_RequestHandler] = manager.request_handler
self._request_factory: Optional[_RequestFactory] = manager.request_factory
self._tcp_keepalive = tcp_keepalive
# placeholder to be replaced on keepalive timeout setup
self._keepalive_time = 0.0
self._keepalive_handle: Optional[asyncio.Handle] = None
self._keepalive_timeout = keepalive_timeout
self._lingering_time = float(lingering_time)
self._messages: Deque[_MsgType] = deque()
self._message_tail = b""
self._waiter: Optional[asyncio.Future[None]] = None
self._task_handler: Optional[asyncio.Task[None]] = None
self._upgrade = False
self._payload_parser: Any = None
self._request_parser: Optional[HttpRequestParser] = HttpRequestParser(
self,
loop,
read_bufsize,
max_line_size=max_line_size,
max_field_size=max_field_size,
max_headers=max_headers,
payload_exception=RequestPayloadError,
auto_decompress=auto_decompress,
)
self.logger = logger
self.debug = debug
self.access_log = access_log
if access_log:
self.access_logger: Optional[AbstractAccessLogger] = access_log_class(
access_log, access_log_format
)
else:
self.access_logger = None
self._close = False
self._force_close = False
def __repr__(self) -> str:
return "<{} {}>".format(
self.__class__.__name__,
"connected" if self.transport is not None else "disconnected",
)
@property
def keepalive_timeout(self) -> float:
return self._keepalive_timeout
async def shutdown(self, timeout: Optional[float] = 15.0) -> None:
"""Do worker process exit preparations.
We need to clean up everything and stop accepting requests.
It is especially important for keep-alive connections.
"""
self._force_close = True
if self._keepalive_handle is not None:
self._keepalive_handle.cancel()
if self._waiter:
self._waiter.cancel()
# wait for handlers
with suppress(asyncio.CancelledError, asyncio.TimeoutError):
async with ceil_timeout(timeout):
if self._current_request is not None:
self._current_request._cancel(asyncio.CancelledError())
if self._task_handler is not None and not self._task_handler.done():
await self._task_handler
# force-close non-idle handler
if self._task_handler is not None:
self._task_handler.cancel()
if self.transport is not None:
self.transport.close()
self.transport = None
def connection_made(self, transport: asyncio.BaseTransport) -> None:
super().connection_made(transport)
real_transport = cast(asyncio.Transport, transport)
if self._tcp_keepalive:
tcp_keepalive(real_transport)
self._task_handler = self._loop.create_task(self.start())
assert self._manager is not None
self._manager.connection_made(self, real_transport)
def connection_lost(self, exc: Optional[BaseException]) -> None:
if self._manager is None:
return
self._manager.connection_lost(self, exc)
super().connection_lost(exc)
self._manager = None
self._force_close = True
self._request_factory = None
self._request_handler = None
self._request_parser = None
if self._keepalive_handle is not None:
self._keepalive_handle.cancel()
if self._current_request is not None:
if exc is None:
exc = ConnectionResetError("Connection lost")
self._current_request._cancel(exc)
if self._waiter is not None:
self._waiter.cancel()
self._task_handler = None
if self._payload_parser is not None:
self._payload_parser.feed_eof()
self._payload_parser = None
def set_parser(self, parser: Any) -> None:
# Actual type is WebReader
assert self._payload_parser is None
self._payload_parser = parser
if self._message_tail:
self._payload_parser.feed_data(self._message_tail)
self._message_tail = b""
def eof_received(self) -> None:
pass
def data_received(self, data: bytes) -> None:
if self._force_close or self._close:
return
# parse http messages
messages: Sequence[_MsgType]
if self._payload_parser is None and not self._upgrade:
assert self._request_parser is not None
try:
messages, upgraded, tail = self._request_parser.feed_data(data)
except HttpProcessingError as exc:
messages = [
(_ErrInfo(status=400, exc=exc, message=exc.message), EMPTY_PAYLOAD)
]
upgraded = False
tail = b""
for msg, payload in messages or ():
self._request_count += 1
self._messages.append((msg, payload))
waiter = self._waiter
if messages and waiter is not None and not waiter.done():
# don't set result twice
waiter.set_result(None)
self._upgrade = upgraded
if upgraded and tail:
self._message_tail = tail
# no parser, just store
elif self._payload_parser is None and self._upgrade and data:
self._message_tail += data
# feed payload
elif data:
eof, tail = self._payload_parser.feed_data(data)
if eof:
self.close()
def keep_alive(self, val: bool) -> None:
"""Set keep-alive connection mode.
:param bool val: new state.
"""
self._keepalive = val
if self._keepalive_handle:
self._keepalive_handle.cancel()
self._keepalive_handle = None
def close(self) -> None:
"""Close connection.
Stop accepting new pipelining messages and close
connection when handlers done processing messages.
"""
self._close = True
if self._waiter:
self._waiter.cancel()
def force_close(self) -> None:
"""Forcefully close connection."""
self._force_close = True
if self._waiter:
self._waiter.cancel()
if self.transport is not None:
self.transport.close()
self.transport = None
def log_access(
self, request: BaseRequest, response: StreamResponse, time: float
) -> None:
if self.access_logger is not None:
self.access_logger.log(request, response, self._loop.time() - time)
def log_debug(self, *args: Any, **kw: Any) -> None:
if self.debug:
self.logger.debug(*args, **kw)
def log_exception(self, *args: Any, **kw: Any) -> None:
self.logger.exception(*args, **kw)
def _process_keepalive(self) -> None:
if self._force_close or not self._keepalive:
return
next = self._keepalive_time + self._keepalive_timeout
# handler in idle state
if self._waiter:
if self._loop.time() > next:
self.force_close()
return
# not all request handlers are done,
# reschedule itself to next second
self._keepalive_handle = self._loop.call_later(
self.KEEPALIVE_RESCHEDULE_DELAY, self._process_keepalive
)
async def _handle_request(
self,
request: BaseRequest,
start_time: float,
request_handler: Callable[[BaseRequest], Awaitable[StreamResponse]],
) -> Tuple[StreamResponse, bool]:
assert self._request_handler is not None
try:
try:
self._current_request = request
resp = await request_handler(request)
finally:
self._current_request = None
except HTTPException as exc:
resp = exc
reset = await self.finish_response(request, resp, start_time)
except asyncio.CancelledError:
raise
except asyncio.TimeoutError as exc:
self.log_debug("Request handler timed out.", exc_info=exc)
resp = self.handle_error(request, 504)
reset = await self.finish_response(request, resp, start_time)
except Exception as exc:
resp = self.handle_error(request, 500, exc)
reset = await self.finish_response(request, resp, start_time)
else:
# Deprecation warning (See #2415)
if getattr(resp, "__http_exception__", False):
warnings.warn(
"returning HTTPException object is deprecated "
"(#2415) and will be removed, "
"please raise the exception instead",
DeprecationWarning,
)
reset = await self.finish_response(request, resp, start_time)
return resp, reset
async def start(self) -> None:
"""Process incoming request.
It reads request line, request headers and request payload, then
calls handle_request() method. Subclass has to override
handle_request(). start() handles various exceptions in request
or response handling. Connection is being closed always unless
keep_alive(True) specified.
"""
loop = self._loop
handler = self._task_handler
assert handler is not None
manager = self._manager
assert manager is not None
keepalive_timeout = self._keepalive_timeout
resp = None
assert self._request_factory is not None
assert self._request_handler is not None
while not self._force_close:
if not self._messages:
try:
# wait for next request
self._waiter = loop.create_future()
await self._waiter
except asyncio.CancelledError:
break
finally:
self._waiter = None
message, payload = self._messages.popleft()
start = loop.time()
manager.requests_count += 1
writer = StreamWriter(self, loop)
if isinstance(message, _ErrInfo):
# make request_factory work
request_handler = self._make_error_handler(message)
message = ERROR
else:
request_handler = self._request_handler
request = self._request_factory(message, payload, self, writer, handler)
try:
# a new task is used for copy context vars (#3406)
task = self._loop.create_task(
self._handle_request(request, start, request_handler)
)
try:
resp, reset = await task
except (asyncio.CancelledError, ConnectionError):
self.log_debug("Ignored premature client disconnection")
break
# Drop the processed task from asyncio.Task.all_tasks() early
del task
if reset:
self.log_debug("Ignored premature client disconnection 2")
break
# notify server about keep-alive
self._keepalive = bool(resp.keep_alive)
# check payload
if not payload.is_eof():
lingering_time = self._lingering_time
if not self._force_close and lingering_time:
self.log_debug(
"Start lingering close timer for %s sec.", lingering_time
)
now = loop.time()
end_t = now + lingering_time
with suppress(asyncio.TimeoutError, asyncio.CancelledError):
while not payload.is_eof() and now < end_t:
async with ceil_timeout(end_t - now):
# read and ignore
await payload.readany()
now = loop.time()
# if payload still uncompleted
if not payload.is_eof() and not self._force_close:
self.log_debug("Uncompleted request.")
self.close()
payload.set_exception(PayloadAccessError())
except asyncio.CancelledError:
self.log_debug("Ignored premature client disconnection ")
break
except RuntimeError as exc:
if self.debug:
self.log_exception("Unhandled runtime exception", exc_info=exc)
self.force_close()
except Exception as exc:
self.log_exception("Unhandled exception", exc_info=exc)
self.force_close()
finally:
if self.transport is None and resp is not None:
self.log_debug("Ignored premature client disconnection.")
elif not self._force_close:
if self._keepalive and not self._close:
# start keep-alive timer
if keepalive_timeout is not None:
now = self._loop.time()
self._keepalive_time = now
if self._keepalive_handle is None:
self._keepalive_handle = loop.call_at(
now + keepalive_timeout, self._process_keepalive
)
else:
break
# remove handler, close transport if no handlers left
if not self._force_close:
self._task_handler = None
if self.transport is not None:
self.transport.close()
async def finish_response(
self, request: BaseRequest, resp: StreamResponse, start_time: float
) -> bool:
"""Prepare the response and write_eof, then log access.
This has to
be called within the context of any exception so the access logger
can get exception information. Returns True if the client disconnects
prematurely.
"""
if self._request_parser is not None:
self._request_parser.set_upgraded(False)
self._upgrade = False
if self._message_tail:
self._request_parser.feed_data(self._message_tail)
self._message_tail = b""
try:
prepare_meth = resp.prepare
except AttributeError:
if resp is None:
raise RuntimeError("Missing return " "statement on request handler")
else:
raise RuntimeError(
"Web-handler should return "
"a response instance, "
"got {!r}".format(resp)
)
try:
await prepare_meth(request)
await resp.write_eof()
except ConnectionError:
self.log_access(request, resp, start_time)
return True
else:
self.log_access(request, resp, start_time)
return False
def handle_error(
self,
request: BaseRequest,
status: int = 500,
exc: Optional[BaseException] = None,
message: Optional[str] = None,
) -> StreamResponse:
"""Handle errors.
Returns HTTP response with specific status code. Logs additional
information. It always closes current connection.
"""
self.log_exception("Error handling request", exc_info=exc)
# some data already got sent, connection is broken
if request.writer.output_size > 0:
raise ConnectionError(
"Response is sent already, cannot send another response "
"with the error message"
)
ct = "text/plain"
if status == HTTPStatus.INTERNAL_SERVER_ERROR:
title = "{0.value} {0.phrase}".format(HTTPStatus.INTERNAL_SERVER_ERROR)
msg = HTTPStatus.INTERNAL_SERVER_ERROR.description
tb = None
if self.debug:
with suppress(Exception):
tb = traceback.format_exc()
if "text/html" in request.headers.get("Accept", ""):
if tb:
tb = html_escape(tb)
msg = f"<h2>Traceback:</h2>\n<pre>{tb}</pre>"
message = (
"<html><head>"
"<title>{title}</title>"
"</head><body>\n<h1>{title}</h1>"
"\n{msg}\n</body></html>\n"
).format(title=title, msg=msg)
ct = "text/html"
else:
if tb:
msg = tb
message = title + "\n\n" + msg
resp = Response(status=status, text=message, content_type=ct)
resp.force_close()
return resp
def _make_error_handler(
self, err_info: _ErrInfo
) -> Callable[[BaseRequest], Awaitable[StreamResponse]]:
async def handler(request: BaseRequest) -> StreamResponse:
return self.handle_error(
request, err_info.status, err_info.exc, err_info.message
)
return handler
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiohttp/streams.py | import asyncio
import collections
import warnings
from typing import Awaitable, Callable, Deque, Generic, List, Optional, Tuple, TypeVar
from .base_protocol import BaseProtocol
from .helpers import BaseTimerContext, set_exception, set_result
from .log import internal_logger
from .typedefs import Final
__all__ = (
"EMPTY_PAYLOAD",
"EofStream",
"StreamReader",
"DataQueue",
"FlowControlDataQueue",
)
_T = TypeVar("_T")
class EofStream(Exception):
"""eof stream indication."""
class AsyncStreamIterator(Generic[_T]):
def __init__(self, read_func: Callable[[], Awaitable[_T]]) -> None:
self.read_func = read_func
def __aiter__(self) -> "AsyncStreamIterator[_T]":
return self
async def __anext__(self) -> _T:
try:
rv = await self.read_func()
except EofStream:
raise StopAsyncIteration
if rv == b"":
raise StopAsyncIteration
return rv
class ChunkTupleAsyncStreamIterator:
def __init__(self, stream: "StreamReader") -> None:
self._stream = stream
def __aiter__(self) -> "ChunkTupleAsyncStreamIterator":
return self
async def __anext__(self) -> Tuple[bytes, bool]:
rv = await self._stream.readchunk()
if rv == (b"", False):
raise StopAsyncIteration
return rv
class AsyncStreamReaderMixin:
def __aiter__(self) -> AsyncStreamIterator[bytes]:
return AsyncStreamIterator(self.readline) # type: ignore[attr-defined]
def iter_chunked(self, n: int) -> AsyncStreamIterator[bytes]:
"""Returns an asynchronous iterator that yields chunks of size n.
Python-3.5 available for Python 3.5+ only
"""
return AsyncStreamIterator(
lambda: self.read(n) # type: ignore[attr-defined,no-any-return]
)
def iter_any(self) -> AsyncStreamIterator[bytes]:
"""Yield all available data as soon as it is received.
Python-3.5 available for Python 3.5+ only
"""
return AsyncStreamIterator(self.readany) # type: ignore[attr-defined]
def iter_chunks(self) -> ChunkTupleAsyncStreamIterator:
"""Yield chunks of data as they are received by the server.
The yielded objects are tuples
of (bytes, bool) as returned by the StreamReader.readchunk method.
Python-3.5 available for Python 3.5+ only
"""
return ChunkTupleAsyncStreamIterator(self) # type: ignore[arg-type]
class StreamReader(AsyncStreamReaderMixin):
"""An enhancement of asyncio.StreamReader.
Supports asynchronous iteration by line, chunk or as available::
async for line in reader:
...
async for chunk in reader.iter_chunked(1024):
...
async for slice in reader.iter_any():
...
"""
total_bytes = 0
def __init__(
self,
protocol: BaseProtocol,
limit: int,
*,
timer: Optional[BaseTimerContext] = None,
loop: Optional[asyncio.AbstractEventLoop] = None,
) -> None:
self._protocol = protocol
self._low_water = limit
self._high_water = limit * 2
if loop is None:
loop = asyncio.get_event_loop()
self._loop = loop
self._size = 0
self._cursor = 0
self._http_chunk_splits: Optional[List[int]] = None
self._buffer: Deque[bytes] = collections.deque()
self._buffer_offset = 0
self._eof = False
self._waiter: Optional[asyncio.Future[None]] = None
self._eof_waiter: Optional[asyncio.Future[None]] = None
self._exception: Optional[BaseException] = None
self._timer = timer
self._eof_callbacks: List[Callable[[], None]] = []
def __repr__(self) -> str:
info = [self.__class__.__name__]
if self._size:
info.append("%d bytes" % self._size)
if self._eof:
info.append("eof")
if self._low_water != 2**16: # default limit
info.append("low=%d high=%d" % (self._low_water, self._high_water))
if self._waiter:
info.append("w=%r" % self._waiter)
if self._exception:
info.append("e=%r" % self._exception)
return "<%s>" % " ".join(info)
def get_read_buffer_limits(self) -> Tuple[int, int]:
return (self._low_water, self._high_water)
def exception(self) -> Optional[BaseException]:
return self._exception
def set_exception(self, exc: BaseException) -> None:
self._exception = exc
self._eof_callbacks.clear()
waiter = self._waiter
if waiter is not None:
self._waiter = None
set_exception(waiter, exc)
waiter = self._eof_waiter
if waiter is not None:
self._eof_waiter = None
set_exception(waiter, exc)
def on_eof(self, callback: Callable[[], None]) -> None:
if self._eof:
try:
callback()
except Exception:
internal_logger.exception("Exception in eof callback")
else:
self._eof_callbacks.append(callback)
def feed_eof(self) -> None:
self._eof = True
waiter = self._waiter
if waiter is not None:
self._waiter = None
set_result(waiter, None)
waiter = self._eof_waiter
if waiter is not None:
self._eof_waiter = None
set_result(waiter, None)
for cb in self._eof_callbacks:
try:
cb()
except Exception:
internal_logger.exception("Exception in eof callback")
self._eof_callbacks.clear()
def is_eof(self) -> bool:
"""Return True if 'feed_eof' was called."""
return self._eof
def at_eof(self) -> bool:
"""Return True if the buffer is empty and 'feed_eof' was called."""
return self._eof and not self._buffer
async def wait_eof(self) -> None:
if self._eof:
return
assert self._eof_waiter is None
self._eof_waiter = self._loop.create_future()
try:
await self._eof_waiter
finally:
self._eof_waiter = None
def unread_data(self, data: bytes) -> None:
"""rollback reading some data from stream, inserting it to buffer head."""
warnings.warn(
"unread_data() is deprecated "
"and will be removed in future releases (#3260)",
DeprecationWarning,
stacklevel=2,
)
if not data:
return
if self._buffer_offset:
self._buffer[0] = self._buffer[0][self._buffer_offset :]
self._buffer_offset = 0
self._size += len(data)
self._cursor -= len(data)
self._buffer.appendleft(data)
self._eof_counter = 0
# TODO: size is ignored, remove the param later
def feed_data(self, data: bytes, size: int = 0) -> None:
assert not self._eof, "feed_data after feed_eof"
if not data:
return
self._size += len(data)
self._buffer.append(data)
self.total_bytes += len(data)
waiter = self._waiter
if waiter is not None:
self._waiter = None
set_result(waiter, None)
if self._size > self._high_water and not self._protocol._reading_paused:
self._protocol.pause_reading()
def begin_http_chunk_receiving(self) -> None:
if self._http_chunk_splits is None:
if self.total_bytes:
raise RuntimeError(
"Called begin_http_chunk_receiving when" "some data was already fed"
)
self._http_chunk_splits = []
def end_http_chunk_receiving(self) -> None:
if self._http_chunk_splits is None:
raise RuntimeError(
"Called end_chunk_receiving without calling "
"begin_chunk_receiving first"
)
# self._http_chunk_splits contains logical byte offsets from start of
# the body transfer. Each offset is the offset of the end of a chunk.
# "Logical" means bytes, accessible for a user.
# If no chunks containig logical data were received, current position
# is difinitely zero.
pos = self._http_chunk_splits[-1] if self._http_chunk_splits else 0
if self.total_bytes == pos:
# We should not add empty chunks here. So we check for that.
# Note, when chunked + gzip is used, we can receive a chunk
# of compressed data, but that data may not be enough for gzip FSM
# to yield any uncompressed data. That's why current position may
# not change after receiving a chunk.
return
self._http_chunk_splits.append(self.total_bytes)
# wake up readchunk when end of http chunk received
waiter = self._waiter
if waiter is not None:
self._waiter = None
set_result(waiter, None)
async def _wait(self, func_name: str) -> None:
# StreamReader uses a future to link the protocol feed_data() method
# to a read coroutine. Running two read coroutines at the same time
# would have an unexpected behaviour. It would not possible to know
# which coroutine would get the next data.
if self._waiter is not None:
raise RuntimeError(
"%s() called while another coroutine is "
"already waiting for incoming data" % func_name
)
waiter = self._waiter = self._loop.create_future()
try:
if self._timer:
with self._timer:
await waiter
else:
await waiter
finally:
self._waiter = None
async def readline(self) -> bytes:
return await self.readuntil()
async def readuntil(self, separator: bytes = b"\n") -> bytes:
seplen = len(separator)
if seplen == 0:
raise ValueError("Separator should be at least one-byte string")
if self._exception is not None:
raise self._exception
chunk = b""
chunk_size = 0
not_enough = True
while not_enough:
while self._buffer and not_enough:
offset = self._buffer_offset
ichar = self._buffer[0].find(separator, offset) + 1
# Read from current offset to found separator or to the end.
data = self._read_nowait_chunk(ichar - offset if ichar else -1)
chunk += data
chunk_size += len(data)
if ichar:
not_enough = False
if chunk_size > self._high_water:
raise ValueError("Chunk too big")
if self._eof:
break
if not_enough:
await self._wait("readuntil")
return chunk
async def read(self, n: int = -1) -> bytes:
if self._exception is not None:
raise self._exception
# migration problem; with DataQueue you have to catch
# EofStream exception, so common way is to run payload.read() inside
# infinite loop. what can cause real infinite loop with StreamReader
# lets keep this code one major release.
if __debug__:
if self._eof and not self._buffer:
self._eof_counter = getattr(self, "_eof_counter", 0) + 1
if self._eof_counter > 5:
internal_logger.warning(
"Multiple access to StreamReader in eof state, "
"might be infinite loop.",
stack_info=True,
)
if not n:
return b""
if n < 0:
# This used to just loop creating a new waiter hoping to
# collect everything in self._buffer, but that would
# deadlock if the subprocess sends more than self.limit
# bytes. So just call self.readany() until EOF.
blocks = []
while True:
block = await self.readany()
if not block:
break
blocks.append(block)
return b"".join(blocks)
# TODO: should be `if` instead of `while`
# because waiter maybe triggered on chunk end,
# without feeding any data
while not self._buffer and not self._eof:
await self._wait("read")
return self._read_nowait(n)
async def readany(self) -> bytes:
if self._exception is not None:
raise self._exception
# TODO: should be `if` instead of `while`
# because waiter maybe triggered on chunk end,
# without feeding any data
while not self._buffer and not self._eof:
await self._wait("readany")
return self._read_nowait(-1)
async def readchunk(self) -> Tuple[bytes, bool]:
"""Returns a tuple of (data, end_of_http_chunk).
When chunked transfer
encoding is used, end_of_http_chunk is a boolean indicating if the end
of the data corresponds to the end of a HTTP chunk , otherwise it is
always False.
"""
while True:
if self._exception is not None:
raise self._exception
while self._http_chunk_splits:
pos = self._http_chunk_splits.pop(0)
if pos == self._cursor:
return (b"", True)
if pos > self._cursor:
return (self._read_nowait(pos - self._cursor), True)
internal_logger.warning(
"Skipping HTTP chunk end due to data "
"consumption beyond chunk boundary"
)
if self._buffer:
return (self._read_nowait_chunk(-1), False)
# return (self._read_nowait(-1), False)
if self._eof:
# Special case for signifying EOF.
# (b'', True) is not a final return value actually.
return (b"", False)
await self._wait("readchunk")
async def readexactly(self, n: int) -> bytes:
if self._exception is not None:
raise self._exception
blocks: List[bytes] = []
while n > 0:
block = await self.read(n)
if not block:
partial = b"".join(blocks)
raise asyncio.IncompleteReadError(partial, len(partial) + n)
blocks.append(block)
n -= len(block)
return b"".join(blocks)
def read_nowait(self, n: int = -1) -> bytes:
# default was changed to be consistent with .read(-1)
#
# I believe the most users don't know about the method and
# they are not affected.
if self._exception is not None:
raise self._exception
if self._waiter and not self._waiter.done():
raise RuntimeError(
"Called while some coroutine is waiting for incoming data."
)
return self._read_nowait(n)
def _read_nowait_chunk(self, n: int) -> bytes:
first_buffer = self._buffer[0]
offset = self._buffer_offset
if n != -1 and len(first_buffer) - offset > n:
data = first_buffer[offset : offset + n]
self._buffer_offset += n
elif offset:
self._buffer.popleft()
data = first_buffer[offset:]
self._buffer_offset = 0
else:
data = self._buffer.popleft()
self._size -= len(data)
self._cursor += len(data)
chunk_splits = self._http_chunk_splits
# Prevent memory leak: drop useless chunk splits
while chunk_splits and chunk_splits[0] < self._cursor:
chunk_splits.pop(0)
if self._size < self._low_water and self._protocol._reading_paused:
self._protocol.resume_reading()
return data
def _read_nowait(self, n: int) -> bytes:
"""Read not more than n bytes, or whole buffer if n == -1"""
chunks = []
while self._buffer:
chunk = self._read_nowait_chunk(n)
chunks.append(chunk)
if n != -1:
n -= len(chunk)
if n == 0:
break
return b"".join(chunks) if chunks else b""
class EmptyStreamReader(StreamReader): # lgtm [py/missing-call-to-init]
def __init__(self) -> None:
pass
def exception(self) -> Optional[BaseException]:
return None
def set_exception(self, exc: BaseException) -> None:
pass
def on_eof(self, callback: Callable[[], None]) -> None:
try:
callback()
except Exception:
internal_logger.exception("Exception in eof callback")
def feed_eof(self) -> None:
pass
def is_eof(self) -> bool:
return True
def at_eof(self) -> bool:
return True
async def wait_eof(self) -> None:
return
def feed_data(self, data: bytes, n: int = 0) -> None:
pass
async def readline(self) -> bytes:
return b""
async def read(self, n: int = -1) -> bytes:
return b""
# TODO add async def readuntil
async def readany(self) -> bytes:
return b""
async def readchunk(self) -> Tuple[bytes, bool]:
return (b"", True)
async def readexactly(self, n: int) -> bytes:
raise asyncio.IncompleteReadError(b"", n)
def read_nowait(self, n: int = -1) -> bytes:
return b""
EMPTY_PAYLOAD: Final[StreamReader] = EmptyStreamReader()
class DataQueue(Generic[_T]):
"""DataQueue is a general-purpose blocking queue with one reader."""
def __init__(self, loop: asyncio.AbstractEventLoop) -> None:
self._loop = loop
self._eof = False
self._waiter: Optional[asyncio.Future[None]] = None
self._exception: Optional[BaseException] = None
self._size = 0
self._buffer: Deque[Tuple[_T, int]] = collections.deque()
def __len__(self) -> int:
return len(self._buffer)
def is_eof(self) -> bool:
return self._eof
def at_eof(self) -> bool:
return self._eof and not self._buffer
def exception(self) -> Optional[BaseException]:
return self._exception
def set_exception(self, exc: BaseException) -> None:
self._eof = True
self._exception = exc
waiter = self._waiter
if waiter is not None:
self._waiter = None
set_exception(waiter, exc)
def feed_data(self, data: _T, size: int = 0) -> None:
self._size += size
self._buffer.append((data, size))
waiter = self._waiter
if waiter is not None:
self._waiter = None
set_result(waiter, None)
def feed_eof(self) -> None:
self._eof = True
waiter = self._waiter
if waiter is not None:
self._waiter = None
set_result(waiter, None)
async def read(self) -> _T:
if not self._buffer and not self._eof:
assert not self._waiter
self._waiter = self._loop.create_future()
try:
await self._waiter
except (asyncio.CancelledError, asyncio.TimeoutError):
self._waiter = None
raise
if self._buffer:
data, size = self._buffer.popleft()
self._size -= size
return data
else:
if self._exception is not None:
raise self._exception
else:
raise EofStream
def __aiter__(self) -> AsyncStreamIterator[_T]:
return AsyncStreamIterator(self.read)
class FlowControlDataQueue(DataQueue[_T]):
"""FlowControlDataQueue resumes and pauses an underlying stream.
It is a destination for parsed data.
"""
def __init__(
self, protocol: BaseProtocol, limit: int, *, loop: asyncio.AbstractEventLoop
) -> None:
super().__init__(loop=loop)
self._protocol = protocol
self._limit = limit * 2
def feed_data(self, data: _T, size: int = 0) -> None:
super().feed_data(data, size)
if self._size > self._limit and not self._protocol._reading_paused:
self._protocol.pause_reading()
async def read(self) -> _T:
try:
return await super().read()
finally:
if self._size < self._limit and self._protocol._reading_paused:
self._protocol.resume_reading()
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiohttp/resolver.py | import asyncio
import socket
from typing import Any, Dict, List, Optional, Type, Union
from .abc import AbstractResolver
from .helpers import get_running_loop
__all__ = ("ThreadedResolver", "AsyncResolver", "DefaultResolver")
try:
import aiodns
# aiodns_default = hasattr(aiodns.DNSResolver, 'gethostbyname')
except ImportError: # pragma: no cover
aiodns = None
aiodns_default = False
class ThreadedResolver(AbstractResolver):
"""Threaded resolver.
Uses an Executor for synchronous getaddrinfo() calls.
concurrent.futures.ThreadPoolExecutor is used by default.
"""
def __init__(self, loop: Optional[asyncio.AbstractEventLoop] = None) -> None:
self._loop = get_running_loop(loop)
async def resolve(
self, hostname: str, port: int = 0, family: int = socket.AF_INET
) -> List[Dict[str, Any]]:
infos = await self._loop.getaddrinfo(
hostname,
port,
type=socket.SOCK_STREAM,
family=family,
flags=socket.AI_ADDRCONFIG,
)
hosts = []
for family, _, proto, _, address in infos:
if family == socket.AF_INET6:
if len(address) < 3:
# IPv6 is not supported by Python build,
# or IPv6 is not enabled in the host
continue
if address[3]: # type: ignore[misc]
# This is essential for link-local IPv6 addresses.
# LL IPv6 is a VERY rare case. Strictly speaking, we should use
# getnameinfo() unconditionally, but performance makes sense.
host, _port = socket.getnameinfo(
address, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV
)
port = int(_port)
else:
host, port = address[:2]
else: # IPv4
assert family == socket.AF_INET
host, port = address # type: ignore[misc]
hosts.append(
{
"hostname": hostname,
"host": host,
"port": port,
"family": family,
"proto": proto,
"flags": socket.AI_NUMERICHOST | socket.AI_NUMERICSERV,
}
)
return hosts
async def close(self) -> None:
pass
class AsyncResolver(AbstractResolver):
"""Use the `aiodns` package to make asynchronous DNS lookups"""
def __init__(
self,
loop: Optional[asyncio.AbstractEventLoop] = None,
*args: Any,
**kwargs: Any
) -> None:
if aiodns is None:
raise RuntimeError("Resolver requires aiodns library")
self._loop = get_running_loop(loop)
self._resolver = aiodns.DNSResolver(*args, loop=loop, **kwargs)
if not hasattr(self._resolver, "gethostbyname"):
# aiodns 1.1 is not available, fallback to DNSResolver.query
self.resolve = self._resolve_with_query # type: ignore
async def resolve(
self, host: str, port: int = 0, family: int = socket.AF_INET
) -> List[Dict[str, Any]]:
try:
resp = await self._resolver.gethostbyname(host, family)
except aiodns.error.DNSError as exc:
msg = exc.args[1] if len(exc.args) >= 1 else "DNS lookup failed"
raise OSError(msg) from exc
hosts = []
for address in resp.addresses:
hosts.append(
{
"hostname": host,
"host": address,
"port": port,
"family": family,
"proto": 0,
"flags": socket.AI_NUMERICHOST | socket.AI_NUMERICSERV,
}
)
if not hosts:
raise OSError("DNS lookup failed")
return hosts
async def _resolve_with_query(
self, host: str, port: int = 0, family: int = socket.AF_INET
) -> List[Dict[str, Any]]:
if family == socket.AF_INET6:
qtype = "AAAA"
else:
qtype = "A"
try:
resp = await self._resolver.query(host, qtype)
except aiodns.error.DNSError as exc:
msg = exc.args[1] if len(exc.args) >= 1 else "DNS lookup failed"
raise OSError(msg) from exc
hosts = []
for rr in resp:
hosts.append(
{
"hostname": host,
"host": rr.host,
"port": port,
"family": family,
"proto": 0,
"flags": socket.AI_NUMERICHOST,
}
)
if not hosts:
raise OSError("DNS lookup failed")
return hosts
async def close(self) -> None:
self._resolver.cancel()
_DefaultType = Type[Union[AsyncResolver, ThreadedResolver]]
DefaultResolver: _DefaultType = AsyncResolver if aiodns_default else ThreadedResolver
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiohttp/cookiejar.py | import asyncio
import contextlib
import datetime
import os # noqa
import pathlib
import pickle
import re
from collections import defaultdict
from http.cookies import BaseCookie, Morsel, SimpleCookie
from typing import ( # noqa
DefaultDict,
Dict,
Iterable,
Iterator,
List,
Mapping,
Optional,
Set,
Tuple,
Union,
cast,
)
from yarl import URL
from .abc import AbstractCookieJar, ClearCookiePredicate
from .helpers import is_ip_address, next_whole_second
from .typedefs import LooseCookies, PathLike, StrOrURL
__all__ = ("CookieJar", "DummyCookieJar")
CookieItem = Union[str, "Morsel[str]"]
class CookieJar(AbstractCookieJar):
"""Implements cookie storage adhering to RFC 6265."""
DATE_TOKENS_RE = re.compile(
r"[\x09\x20-\x2F\x3B-\x40\x5B-\x60\x7B-\x7E]*"
r"(?P<token>[\x00-\x08\x0A-\x1F\d:a-zA-Z\x7F-\xFF]+)"
)
DATE_HMS_TIME_RE = re.compile(r"(\d{1,2}):(\d{1,2}):(\d{1,2})")
DATE_DAY_OF_MONTH_RE = re.compile(r"(\d{1,2})")
DATE_MONTH_RE = re.compile(
"(jan)|(feb)|(mar)|(apr)|(may)|(jun)|(jul)|" "(aug)|(sep)|(oct)|(nov)|(dec)",
re.I,
)
DATE_YEAR_RE = re.compile(r"(\d{2,4})")
MAX_TIME = datetime.datetime.max.replace(tzinfo=datetime.timezone.utc)
MAX_32BIT_TIME = datetime.datetime.utcfromtimestamp(2**31 - 1)
def __init__(
self,
*,
unsafe: bool = False,
quote_cookie: bool = True,
treat_as_secure_origin: Union[StrOrURL, List[StrOrURL], None] = None,
loop: Optional[asyncio.AbstractEventLoop] = None,
) -> None:
super().__init__(loop=loop)
self._cookies: DefaultDict[str, SimpleCookie[str]] = defaultdict(SimpleCookie)
self._host_only_cookies: Set[Tuple[str, str]] = set()
self._unsafe = unsafe
self._quote_cookie = quote_cookie
if treat_as_secure_origin is None:
treat_as_secure_origin = []
elif isinstance(treat_as_secure_origin, URL):
treat_as_secure_origin = [treat_as_secure_origin.origin()]
elif isinstance(treat_as_secure_origin, str):
treat_as_secure_origin = [URL(treat_as_secure_origin).origin()]
else:
treat_as_secure_origin = [
URL(url).origin() if isinstance(url, str) else url.origin()
for url in treat_as_secure_origin
]
self._treat_as_secure_origin = treat_as_secure_origin
self._next_expiration = next_whole_second()
self._expirations: Dict[Tuple[str, str], datetime.datetime] = {}
# #4515: datetime.max may not be representable on 32-bit platforms
self._max_time = self.MAX_TIME
try:
self._max_time.timestamp()
except OverflowError:
self._max_time = self.MAX_32BIT_TIME
def save(self, file_path: PathLike) -> None:
file_path = pathlib.Path(file_path)
with file_path.open(mode="wb") as f:
pickle.dump(self._cookies, f, pickle.HIGHEST_PROTOCOL)
def load(self, file_path: PathLike) -> None:
file_path = pathlib.Path(file_path)
with file_path.open(mode="rb") as f:
self._cookies = pickle.load(f)
def clear(self, predicate: Optional[ClearCookiePredicate] = None) -> None:
if predicate is None:
self._next_expiration = next_whole_second()
self._cookies.clear()
self._host_only_cookies.clear()
self._expirations.clear()
return
to_del = []
now = datetime.datetime.now(datetime.timezone.utc)
for domain, cookie in self._cookies.items():
for name, morsel in cookie.items():
key = (domain, name)
if (
key in self._expirations and self._expirations[key] <= now
) or predicate(morsel):
to_del.append(key)
for domain, name in to_del:
key = (domain, name)
self._host_only_cookies.discard(key)
if key in self._expirations:
del self._expirations[(domain, name)]
self._cookies[domain].pop(name, None)
next_expiration = min(self._expirations.values(), default=self._max_time)
try:
self._next_expiration = next_expiration.replace(
microsecond=0
) + datetime.timedelta(seconds=1)
except OverflowError:
self._next_expiration = self._max_time
def clear_domain(self, domain: str) -> None:
self.clear(lambda x: self._is_domain_match(domain, x["domain"]))
def __iter__(self) -> "Iterator[Morsel[str]]":
self._do_expiration()
for val in self._cookies.values():
yield from val.values()
def __len__(self) -> int:
return sum(1 for i in self)
def _do_expiration(self) -> None:
self.clear(lambda x: False)
def _expire_cookie(self, when: datetime.datetime, domain: str, name: str) -> None:
self._next_expiration = min(self._next_expiration, when)
self._expirations[(domain, name)] = when
def update_cookies(self, cookies: LooseCookies, response_url: URL = URL()) -> None:
"""Update cookies."""
hostname = response_url.raw_host
if not self._unsafe and is_ip_address(hostname):
# Don't accept cookies from IPs
return
if isinstance(cookies, Mapping):
cookies = cookies.items()
for name, cookie in cookies:
if not isinstance(cookie, Morsel):
tmp: SimpleCookie[str] = SimpleCookie()
tmp[name] = cookie # type: ignore[assignment]
cookie = tmp[name]
domain = cookie["domain"]
# ignore domains with trailing dots
if domain.endswith("."):
domain = ""
del cookie["domain"]
if not domain and hostname is not None:
# Set the cookie's domain to the response hostname
# and set its host-only-flag
self._host_only_cookies.add((hostname, name))
domain = cookie["domain"] = hostname
if domain.startswith("."):
# Remove leading dot
domain = domain[1:]
cookie["domain"] = domain
if hostname and not self._is_domain_match(domain, hostname):
# Setting cookies for different domains is not allowed
continue
path = cookie["path"]
if not path or not path.startswith("/"):
# Set the cookie's path to the response path
path = response_url.path
if not path.startswith("/"):
path = "/"
else:
# Cut everything from the last slash to the end
path = "/" + path[1 : path.rfind("/")]
cookie["path"] = path
max_age = cookie["max-age"]
if max_age:
try:
delta_seconds = int(max_age)
try:
max_age_expiration = datetime.datetime.now(
datetime.timezone.utc
) + datetime.timedelta(seconds=delta_seconds)
except OverflowError:
max_age_expiration = self._max_time
self._expire_cookie(max_age_expiration, domain, name)
except ValueError:
cookie["max-age"] = ""
else:
expires = cookie["expires"]
if expires:
expire_time = self._parse_date(expires)
if expire_time:
self._expire_cookie(expire_time, domain, name)
else:
cookie["expires"] = ""
self._cookies[domain][name] = cookie
self._do_expiration()
def filter_cookies(
self, request_url: URL = URL()
) -> Union["BaseCookie[str]", "SimpleCookie[str]"]:
"""Returns this jar's cookies filtered by their attributes."""
self._do_expiration()
request_url = URL(request_url)
filtered: Union["SimpleCookie[str]", "BaseCookie[str]"] = (
SimpleCookie() if self._quote_cookie else BaseCookie()
)
hostname = request_url.raw_host or ""
request_origin = URL()
with contextlib.suppress(ValueError):
request_origin = request_url.origin()
is_not_secure = (
request_url.scheme not in ("https", "wss")
and request_origin not in self._treat_as_secure_origin
)
for cookie in self:
name = cookie.key
domain = cookie["domain"]
# Send shared cookies
if not domain:
filtered[name] = cookie.value
continue
if not self._unsafe and is_ip_address(hostname):
continue
if (domain, name) in self._host_only_cookies:
if domain != hostname:
continue
elif not self._is_domain_match(domain, hostname):
continue
if not self._is_path_match(request_url.path, cookie["path"]):
continue
if is_not_secure and cookie["secure"]:
continue
# It's critical we use the Morsel so the coded_value
# (based on cookie version) is preserved
mrsl_val = cast("Morsel[str]", cookie.get(cookie.key, Morsel()))
mrsl_val.set(cookie.key, cookie.value, cookie.coded_value)
filtered[name] = mrsl_val
return filtered
@staticmethod
def _is_domain_match(domain: str, hostname: str) -> bool:
"""Implements domain matching adhering to RFC 6265."""
if hostname == domain:
return True
if not hostname.endswith(domain):
return False
non_matching = hostname[: -len(domain)]
if not non_matching.endswith("."):
return False
return not is_ip_address(hostname)
@staticmethod
def _is_path_match(req_path: str, cookie_path: str) -> bool:
"""Implements path matching adhering to RFC 6265."""
if not req_path.startswith("/"):
req_path = "/"
if req_path == cookie_path:
return True
if not req_path.startswith(cookie_path):
return False
if cookie_path.endswith("/"):
return True
non_matching = req_path[len(cookie_path) :]
return non_matching.startswith("/")
@classmethod
def _parse_date(cls, date_str: str) -> Optional[datetime.datetime]:
"""Implements date string parsing adhering to RFC 6265."""
if not date_str:
return None
found_time = False
found_day = False
found_month = False
found_year = False
hour = minute = second = 0
day = 0
month = 0
year = 0
for token_match in cls.DATE_TOKENS_RE.finditer(date_str):
token = token_match.group("token")
if not found_time:
time_match = cls.DATE_HMS_TIME_RE.match(token)
if time_match:
found_time = True
hour, minute, second = (int(s) for s in time_match.groups())
continue
if not found_day:
day_match = cls.DATE_DAY_OF_MONTH_RE.match(token)
if day_match:
found_day = True
day = int(day_match.group())
continue
if not found_month:
month_match = cls.DATE_MONTH_RE.match(token)
if month_match:
found_month = True
assert month_match.lastindex is not None
month = month_match.lastindex
continue
if not found_year:
year_match = cls.DATE_YEAR_RE.match(token)
if year_match:
found_year = True
year = int(year_match.group())
if 70 <= year <= 99:
year += 1900
elif 0 <= year <= 69:
year += 2000
if False in (found_day, found_month, found_year, found_time):
return None
if not 1 <= day <= 31:
return None
if year < 1601 or hour > 23 or minute > 59 or second > 59:
return None
return datetime.datetime(
year, month, day, hour, minute, second, tzinfo=datetime.timezone.utc
)
class DummyCookieJar(AbstractCookieJar):
"""Implements a dummy cookie storage.
It can be used with the ClientSession when no cookie processing is needed.
"""
def __init__(self, *, loop: Optional[asyncio.AbstractEventLoop] = None) -> None:
super().__init__(loop=loop)
def __iter__(self) -> "Iterator[Morsel[str]]":
while False:
yield None
def __len__(self) -> int:
return 0
def clear(self, predicate: Optional[ClearCookiePredicate] = None) -> None:
pass
def clear_domain(self, domain: str) -> None:
pass
def update_cookies(self, cookies: LooseCookies, response_url: URL = URL()) -> None:
pass
def filter_cookies(self, request_url: URL) -> "BaseCookie[str]":
return SimpleCookie()
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiohttp/client.py | """HTTP Client for asyncio."""
import asyncio
import base64
import hashlib
import json
import os
import sys
import traceback
import warnings
from contextlib import suppress
from types import SimpleNamespace, TracebackType
from typing import (
Any,
Awaitable,
Callable,
Coroutine,
FrozenSet,
Generator,
Generic,
Iterable,
List,
Mapping,
Optional,
Set,
Tuple,
Type,
TypeVar,
Union,
)
import attr
from multidict import CIMultiDict, MultiDict, MultiDictProxy, istr
from yarl import URL
from . import hdrs, http, payload
from .abc import AbstractCookieJar
from .client_exceptions import (
ClientConnectionError as ClientConnectionError,
ClientConnectorCertificateError as ClientConnectorCertificateError,
ClientConnectorError as ClientConnectorError,
ClientConnectorSSLError as ClientConnectorSSLError,
ClientError as ClientError,
ClientHttpProxyError as ClientHttpProxyError,
ClientOSError as ClientOSError,
ClientPayloadError as ClientPayloadError,
ClientProxyConnectionError as ClientProxyConnectionError,
ClientResponseError as ClientResponseError,
ClientSSLError as ClientSSLError,
ContentTypeError as ContentTypeError,
InvalidURL as InvalidURL,
ServerConnectionError as ServerConnectionError,
ServerDisconnectedError as ServerDisconnectedError,
ServerFingerprintMismatch as ServerFingerprintMismatch,
ServerTimeoutError as ServerTimeoutError,
TooManyRedirects as TooManyRedirects,
WSServerHandshakeError as WSServerHandshakeError,
)
from .client_reqrep import (
ClientRequest as ClientRequest,
ClientResponse as ClientResponse,
Fingerprint as Fingerprint,
RequestInfo as RequestInfo,
_merge_ssl_params,
)
from .client_ws import ClientWebSocketResponse as ClientWebSocketResponse
from .connector import (
BaseConnector as BaseConnector,
NamedPipeConnector as NamedPipeConnector,
TCPConnector as TCPConnector,
UnixConnector as UnixConnector,
)
from .cookiejar import CookieJar
from .helpers import (
DEBUG,
PY_36,
BasicAuth,
TimeoutHandle,
ceil_timeout,
get_env_proxy_for_url,
get_running_loop,
sentinel,
strip_auth_from_url,
)
from .http import WS_KEY, HttpVersion, WebSocketReader, WebSocketWriter
from .http_websocket import WSHandshakeError, WSMessage, ws_ext_gen, ws_ext_parse
from .streams import FlowControlDataQueue
from .tracing import Trace, TraceConfig
from .typedefs import Final, JSONEncoder, LooseCookies, LooseHeaders, StrOrURL
__all__ = (
# client_exceptions
"ClientConnectionError",
"ClientConnectorCertificateError",
"ClientConnectorError",
"ClientConnectorSSLError",
"ClientError",
"ClientHttpProxyError",
"ClientOSError",
"ClientPayloadError",
"ClientProxyConnectionError",
"ClientResponseError",
"ClientSSLError",
"ContentTypeError",
"InvalidURL",
"ServerConnectionError",
"ServerDisconnectedError",
"ServerFingerprintMismatch",
"ServerTimeoutError",
"TooManyRedirects",
"WSServerHandshakeError",
# client_reqrep
"ClientRequest",
"ClientResponse",
"Fingerprint",
"RequestInfo",
# connector
"BaseConnector",
"TCPConnector",
"UnixConnector",
"NamedPipeConnector",
# client_ws
"ClientWebSocketResponse",
# client
"ClientSession",
"ClientTimeout",
"request",
)
try:
from ssl import SSLContext
except ImportError: # pragma: no cover
SSLContext = object # type: ignore[misc,assignment]
@attr.s(auto_attribs=True, frozen=True, slots=True)
class ClientTimeout:
total: Optional[float] = None
connect: Optional[float] = None
sock_read: Optional[float] = None
sock_connect: Optional[float] = None
# pool_queue_timeout: Optional[float] = None
# dns_resolution_timeout: Optional[float] = None
# socket_connect_timeout: Optional[float] = None
# connection_acquiring_timeout: Optional[float] = None
# new_connection_timeout: Optional[float] = None
# http_header_timeout: Optional[float] = None
# response_body_timeout: Optional[float] = None
# to create a timeout specific for a single request, either
# - create a completely new one to overwrite the default
# - or use http://www.attrs.org/en/stable/api.html#attr.evolve
# to overwrite the defaults
# 5 Minute default read timeout
DEFAULT_TIMEOUT: Final[ClientTimeout] = ClientTimeout(total=5 * 60)
_RetType = TypeVar("_RetType")
class ClientSession:
"""First-class interface for making HTTP requests."""
ATTRS = frozenset(
[
"_base_url",
"_source_traceback",
"_connector",
"requote_redirect_url",
"_loop",
"_cookie_jar",
"_connector_owner",
"_default_auth",
"_version",
"_json_serialize",
"_requote_redirect_url",
"_timeout",
"_raise_for_status",
"_auto_decompress",
"_trust_env",
"_default_headers",
"_skip_auto_headers",
"_request_class",
"_response_class",
"_ws_response_class",
"_trace_configs",
"_read_bufsize",
]
)
_source_traceback = None # type: Optional[traceback.StackSummary]
_connector = None # type: Optional[BaseConnector]
def __init__(
self,
base_url: Optional[StrOrURL] = None,
*,
connector: Optional[BaseConnector] = None,
loop: Optional[asyncio.AbstractEventLoop] = None,
cookies: Optional[LooseCookies] = None,
headers: Optional[LooseHeaders] = None,
skip_auto_headers: Optional[Iterable[str]] = None,
auth: Optional[BasicAuth] = None,
json_serialize: JSONEncoder = json.dumps,
request_class: Type[ClientRequest] = ClientRequest,
response_class: Type[ClientResponse] = ClientResponse,
ws_response_class: Type[ClientWebSocketResponse] = ClientWebSocketResponse,
version: HttpVersion = http.HttpVersion11,
cookie_jar: Optional[AbstractCookieJar] = None,
connector_owner: bool = True,
raise_for_status: bool = False,
read_timeout: Union[float, object] = sentinel,
conn_timeout: Optional[float] = None,
timeout: Union[object, ClientTimeout] = sentinel,
auto_decompress: bool = True,
trust_env: bool = False,
requote_redirect_url: bool = True,
trace_configs: Optional[List[TraceConfig]] = None,
read_bufsize: int = 2**16,
) -> None:
if loop is None:
if connector is not None:
loop = connector._loop
loop = get_running_loop(loop)
if base_url is None or isinstance(base_url, URL):
self._base_url: Optional[URL] = base_url
else:
self._base_url = URL(base_url)
assert (
self._base_url.origin() == self._base_url
), "Only absolute URLs without path part are supported"
if connector is None:
connector = TCPConnector(loop=loop)
if connector._loop is not loop:
raise RuntimeError("Session and connector has to use same event loop")
self._loop = loop
if loop.get_debug():
self._source_traceback = traceback.extract_stack(sys._getframe(1))
if cookie_jar is None:
cookie_jar = CookieJar(loop=loop)
self._cookie_jar = cookie_jar
if cookies is not None:
self._cookie_jar.update_cookies(cookies)
self._connector = connector
self._connector_owner = connector_owner
self._default_auth = auth
self._version = version
self._json_serialize = json_serialize
if timeout is sentinel:
self._timeout = DEFAULT_TIMEOUT
if read_timeout is not sentinel:
warnings.warn(
"read_timeout is deprecated, " "use timeout argument instead",
DeprecationWarning,
stacklevel=2,
)
self._timeout = attr.evolve(self._timeout, total=read_timeout)
if conn_timeout is not None:
self._timeout = attr.evolve(self._timeout, connect=conn_timeout)
warnings.warn(
"conn_timeout is deprecated, " "use timeout argument instead",
DeprecationWarning,
stacklevel=2,
)
else:
self._timeout = timeout # type: ignore[assignment]
if read_timeout is not sentinel:
raise ValueError(
"read_timeout and timeout parameters "
"conflict, please setup "
"timeout.read"
)
if conn_timeout is not None:
raise ValueError(
"conn_timeout and timeout parameters "
"conflict, please setup "
"timeout.connect"
)
self._raise_for_status = raise_for_status
self._auto_decompress = auto_decompress
self._trust_env = trust_env
self._requote_redirect_url = requote_redirect_url
self._read_bufsize = read_bufsize
# Convert to list of tuples
if headers:
real_headers: CIMultiDict[str] = CIMultiDict(headers)
else:
real_headers = CIMultiDict()
self._default_headers: CIMultiDict[str] = real_headers
if skip_auto_headers is not None:
self._skip_auto_headers = frozenset(istr(i) for i in skip_auto_headers)
else:
self._skip_auto_headers = frozenset()
self._request_class = request_class
self._response_class = response_class
self._ws_response_class = ws_response_class
self._trace_configs = trace_configs or []
for trace_config in self._trace_configs:
trace_config.freeze()
def __init_subclass__(cls: Type["ClientSession"]) -> None:
warnings.warn(
"Inheritance class {} from ClientSession "
"is discouraged".format(cls.__name__),
DeprecationWarning,
stacklevel=2,
)
if DEBUG:
def __setattr__(self, name: str, val: Any) -> None:
if name not in self.ATTRS:
warnings.warn(
"Setting custom ClientSession.{} attribute "
"is discouraged".format(name),
DeprecationWarning,
stacklevel=2,
)
super().__setattr__(name, val)
def __del__(self, _warnings: Any = warnings) -> None:
if not self.closed:
if PY_36:
kwargs = {"source": self}
else:
kwargs = {}
_warnings.warn(
f"Unclosed client session {self!r}", ResourceWarning, **kwargs
)
context = {"client_session": self, "message": "Unclosed client session"}
if self._source_traceback is not None:
context["source_traceback"] = self._source_traceback
self._loop.call_exception_handler(context)
def request(
self, method: str, url: StrOrURL, **kwargs: Any
) -> "_RequestContextManager":
"""Perform HTTP request."""
return _RequestContextManager(self._request(method, url, **kwargs))
def _build_url(self, str_or_url: StrOrURL) -> URL:
url = URL(str_or_url)
if self._base_url is None:
return url
else:
assert not url.is_absolute() and url.path.startswith("/")
return self._base_url.join(url)
async def _request(
self,
method: str,
str_or_url: StrOrURL,
*,
params: Optional[Mapping[str, str]] = None,
data: Any = None,
json: Any = None,
cookies: Optional[LooseCookies] = None,
headers: Optional[LooseHeaders] = None,
skip_auto_headers: Optional[Iterable[str]] = None,
auth: Optional[BasicAuth] = None,
allow_redirects: bool = True,
max_redirects: int = 10,
compress: Optional[str] = None,
chunked: Optional[bool] = None,
expect100: bool = False,
raise_for_status: Optional[bool] = None,
read_until_eof: bool = True,
proxy: Optional[StrOrURL] = None,
proxy_auth: Optional[BasicAuth] = None,
timeout: Union[ClientTimeout, object] = sentinel,
verify_ssl: Optional[bool] = None,
fingerprint: Optional[bytes] = None,
ssl_context: Optional[SSLContext] = None,
ssl: Optional[Union[SSLContext, bool, Fingerprint]] = None,
proxy_headers: Optional[LooseHeaders] = None,
trace_request_ctx: Optional[SimpleNamespace] = None,
read_bufsize: Optional[int] = None,
) -> ClientResponse:
# NOTE: timeout clamps existing connect and read timeouts. We cannot
# set the default to None because we need to detect if the user wants
# to use the existing timeouts by setting timeout to None.
if self.closed:
raise RuntimeError("Session is closed")
ssl = _merge_ssl_params(ssl, verify_ssl, ssl_context, fingerprint)
if data is not None and json is not None:
raise ValueError(
"data and json parameters can not be used at the same time"
)
elif json is not None:
data = payload.JsonPayload(json, dumps=self._json_serialize)
if not isinstance(chunked, bool) and chunked is not None:
warnings.warn("Chunk size is deprecated #1615", DeprecationWarning)
redirects = 0
history = []
version = self._version
# Merge with default headers and transform to CIMultiDict
headers = self._prepare_headers(headers)
proxy_headers = self._prepare_headers(proxy_headers)
try:
url = self._build_url(str_or_url)
except ValueError as e:
raise InvalidURL(str_or_url) from e
skip_headers = set(self._skip_auto_headers)
if skip_auto_headers is not None:
for i in skip_auto_headers:
skip_headers.add(istr(i))
if proxy is not None:
try:
proxy = URL(proxy)
except ValueError as e:
raise InvalidURL(proxy) from e
if timeout is sentinel:
real_timeout: ClientTimeout = self._timeout
else:
if not isinstance(timeout, ClientTimeout):
real_timeout = ClientTimeout(total=timeout) # type: ignore[arg-type]
else:
real_timeout = timeout
# timeout is cumulative for all request operations
# (request, redirects, responses, data consuming)
tm = TimeoutHandle(self._loop, real_timeout.total)
handle = tm.start()
if read_bufsize is None:
read_bufsize = self._read_bufsize
traces = [
Trace(
self,
trace_config,
trace_config.trace_config_ctx(trace_request_ctx=trace_request_ctx),
)
for trace_config in self._trace_configs
]
for trace in traces:
await trace.send_request_start(method, url.update_query(params), headers)
timer = tm.timer()
try:
with timer:
while True:
url, auth_from_url = strip_auth_from_url(url)
if auth and auth_from_url:
raise ValueError(
"Cannot combine AUTH argument with "
"credentials encoded in URL"
)
if auth is None:
auth = auth_from_url
if auth is None:
auth = self._default_auth
# It would be confusing if we support explicit
# Authorization header with auth argument
if (
headers is not None
and auth is not None
and hdrs.AUTHORIZATION in headers
):
raise ValueError(
"Cannot combine AUTHORIZATION header "
"with AUTH argument or credentials "
"encoded in URL"
)
all_cookies = self._cookie_jar.filter_cookies(url)
if cookies is not None:
tmp_cookie_jar = CookieJar()
tmp_cookie_jar.update_cookies(cookies)
req_cookies = tmp_cookie_jar.filter_cookies(url)
if req_cookies:
all_cookies.load(req_cookies)
if proxy is not None:
proxy = URL(proxy)
elif self._trust_env:
with suppress(LookupError):
proxy, proxy_auth = get_env_proxy_for_url(url)
req = self._request_class(
method,
url,
params=params,
headers=headers,
skip_auto_headers=skip_headers,
data=data,
cookies=all_cookies,
auth=auth,
version=version,
compress=compress,
chunked=chunked,
expect100=expect100,
loop=self._loop,
response_class=self._response_class,
proxy=proxy,
proxy_auth=proxy_auth,
timer=timer,
session=self,
ssl=ssl,
proxy_headers=proxy_headers,
traces=traces,
)
# connection timeout
try:
async with ceil_timeout(real_timeout.connect):
assert self._connector is not None
conn = await self._connector.connect(
req, traces=traces, timeout=real_timeout
)
except asyncio.TimeoutError as exc:
raise ServerTimeoutError(
"Connection timeout " "to host {}".format(url)
) from exc
assert conn.transport is not None
assert conn.protocol is not None
conn.protocol.set_response_params(
timer=timer,
skip_payload=method.upper() == "HEAD",
read_until_eof=read_until_eof,
auto_decompress=self._auto_decompress,
read_timeout=real_timeout.sock_read,
read_bufsize=read_bufsize,
)
try:
try:
resp = await req.send(conn)
try:
await resp.start(conn)
except BaseException:
resp.close()
raise
except BaseException:
conn.close()
raise
except ClientError:
raise
except OSError as exc:
if exc.errno is None and isinstance(exc, asyncio.TimeoutError):
raise
raise ClientOSError(*exc.args) from exc
self._cookie_jar.update_cookies(resp.cookies, resp.url)
# redirects
if resp.status in (301, 302, 303, 307, 308) and allow_redirects:
for trace in traces:
await trace.send_request_redirect(
method, url.update_query(params), headers, resp
)
redirects += 1
history.append(resp)
if max_redirects and redirects >= max_redirects:
resp.close()
raise TooManyRedirects(
history[0].request_info, tuple(history)
)
# For 301 and 302, mimic IE, now changed in RFC
# https://github.com/kennethreitz/requests/pull/269
if (resp.status == 303 and resp.method != hdrs.METH_HEAD) or (
resp.status in (301, 302) and resp.method == hdrs.METH_POST
):
method = hdrs.METH_GET
data = None
if headers.get(hdrs.CONTENT_LENGTH):
headers.pop(hdrs.CONTENT_LENGTH)
r_url = resp.headers.get(hdrs.LOCATION) or resp.headers.get(
hdrs.URI
)
if r_url is None:
# see github.com/aio-libs/aiohttp/issues/2022
break
else:
# reading from correct redirection
# response is forbidden
resp.release()
try:
parsed_url = URL(
r_url, encoded=not self._requote_redirect_url
)
except ValueError as e:
raise InvalidURL(r_url) from e
scheme = parsed_url.scheme
if scheme not in ("http", "https", ""):
resp.close()
raise ValueError("Can redirect only to http or https")
elif not scheme:
parsed_url = url.join(parsed_url)
if url.origin() != parsed_url.origin():
auth = None
headers.pop(hdrs.AUTHORIZATION, None)
url = parsed_url
params = None
resp.release()
continue
break
# check response status
if raise_for_status is None:
raise_for_status = self._raise_for_status
if raise_for_status:
resp.raise_for_status()
# register connection
if handle is not None:
if resp.connection is not None:
resp.connection.add_callback(handle.cancel)
else:
handle.cancel()
resp._history = tuple(history)
for trace in traces:
await trace.send_request_end(
method, url.update_query(params), headers, resp
)
return resp
except BaseException as e:
# cleanup timer
tm.close()
if handle:
handle.cancel()
handle = None
for trace in traces:
await trace.send_request_exception(
method, url.update_query(params), headers, e
)
raise
def ws_connect(
self,
url: StrOrURL,
*,
method: str = hdrs.METH_GET,
protocols: Iterable[str] = (),
timeout: float = 10.0,
receive_timeout: Optional[float] = None,
autoclose: bool = True,
autoping: bool = True,
heartbeat: Optional[float] = None,
auth: Optional[BasicAuth] = None,
origin: Optional[str] = None,
params: Optional[Mapping[str, str]] = None,
headers: Optional[LooseHeaders] = None,
proxy: Optional[StrOrURL] = None,
proxy_auth: Optional[BasicAuth] = None,
ssl: Union[SSLContext, bool, None, Fingerprint] = None,
verify_ssl: Optional[bool] = None,
fingerprint: Optional[bytes] = None,
ssl_context: Optional[SSLContext] = None,
proxy_headers: Optional[LooseHeaders] = None,
compress: int = 0,
max_msg_size: int = 4 * 1024 * 1024,
) -> "_WSRequestContextManager":
"""Initiate websocket connection."""
return _WSRequestContextManager(
self._ws_connect(
url,
method=method,
protocols=protocols,
timeout=timeout,
receive_timeout=receive_timeout,
autoclose=autoclose,
autoping=autoping,
heartbeat=heartbeat,
auth=auth,
origin=origin,
params=params,
headers=headers,
proxy=proxy,
proxy_auth=proxy_auth,
ssl=ssl,
verify_ssl=verify_ssl,
fingerprint=fingerprint,
ssl_context=ssl_context,
proxy_headers=proxy_headers,
compress=compress,
max_msg_size=max_msg_size,
)
)
async def _ws_connect(
self,
url: StrOrURL,
*,
method: str = hdrs.METH_GET,
protocols: Iterable[str] = (),
timeout: float = 10.0,
receive_timeout: Optional[float] = None,
autoclose: bool = True,
autoping: bool = True,
heartbeat: Optional[float] = None,
auth: Optional[BasicAuth] = None,
origin: Optional[str] = None,
params: Optional[Mapping[str, str]] = None,
headers: Optional[LooseHeaders] = None,
proxy: Optional[StrOrURL] = None,
proxy_auth: Optional[BasicAuth] = None,
ssl: Union[SSLContext, bool, None, Fingerprint] = None,
verify_ssl: Optional[bool] = None,
fingerprint: Optional[bytes] = None,
ssl_context: Optional[SSLContext] = None,
proxy_headers: Optional[LooseHeaders] = None,
compress: int = 0,
max_msg_size: int = 4 * 1024 * 1024,
) -> ClientWebSocketResponse:
if headers is None:
real_headers: CIMultiDict[str] = CIMultiDict()
else:
real_headers = CIMultiDict(headers)
default_headers = {
hdrs.UPGRADE: "websocket",
hdrs.CONNECTION: "upgrade",
hdrs.SEC_WEBSOCKET_VERSION: "13",
}
for key, value in default_headers.items():
real_headers.setdefault(key, value)
sec_key = base64.b64encode(os.urandom(16))
real_headers[hdrs.SEC_WEBSOCKET_KEY] = sec_key.decode()
if protocols:
real_headers[hdrs.SEC_WEBSOCKET_PROTOCOL] = ",".join(protocols)
if origin is not None:
real_headers[hdrs.ORIGIN] = origin
if compress:
extstr = ws_ext_gen(compress=compress)
real_headers[hdrs.SEC_WEBSOCKET_EXTENSIONS] = extstr
ssl = _merge_ssl_params(ssl, verify_ssl, ssl_context, fingerprint)
# send request
resp = await self.request(
method,
url,
params=params,
headers=real_headers,
read_until_eof=False,
auth=auth,
proxy=proxy,
proxy_auth=proxy_auth,
ssl=ssl,
proxy_headers=proxy_headers,
)
try:
# check handshake
if resp.status != 101:
raise WSServerHandshakeError(
resp.request_info,
resp.history,
message="Invalid response status",
status=resp.status,
headers=resp.headers,
)
if resp.headers.get(hdrs.UPGRADE, "").lower() != "websocket":
raise WSServerHandshakeError(
resp.request_info,
resp.history,
message="Invalid upgrade header",
status=resp.status,
headers=resp.headers,
)
if resp.headers.get(hdrs.CONNECTION, "").lower() != "upgrade":
raise WSServerHandshakeError(
resp.request_info,
resp.history,
message="Invalid connection header",
status=resp.status,
headers=resp.headers,
)
# key calculation
r_key = resp.headers.get(hdrs.SEC_WEBSOCKET_ACCEPT, "")
match = base64.b64encode(hashlib.sha1(sec_key + WS_KEY).digest()).decode()
if r_key != match:
raise WSServerHandshakeError(
resp.request_info,
resp.history,
message="Invalid challenge response",
status=resp.status,
headers=resp.headers,
)
# websocket protocol
protocol = None
if protocols and hdrs.SEC_WEBSOCKET_PROTOCOL in resp.headers:
resp_protocols = [
proto.strip()
for proto in resp.headers[hdrs.SEC_WEBSOCKET_PROTOCOL].split(",")
]
for proto in resp_protocols:
if proto in protocols:
protocol = proto
break
# websocket compress
notakeover = False
if compress:
compress_hdrs = resp.headers.get(hdrs.SEC_WEBSOCKET_EXTENSIONS)
if compress_hdrs:
try:
compress, notakeover = ws_ext_parse(compress_hdrs)
except WSHandshakeError as exc:
raise WSServerHandshakeError(
resp.request_info,
resp.history,
message=exc.args[0],
status=resp.status,
headers=resp.headers,
) from exc
else:
compress = 0
notakeover = False
conn = resp.connection
assert conn is not None
conn_proto = conn.protocol
assert conn_proto is not None
transport = conn.transport
assert transport is not None
reader: FlowControlDataQueue[WSMessage] = FlowControlDataQueue(
conn_proto, 2**16, loop=self._loop
)
conn_proto.set_parser(WebSocketReader(reader, max_msg_size), reader)
writer = WebSocketWriter(
conn_proto,
transport,
use_mask=True,
compress=compress,
notakeover=notakeover,
)
except BaseException:
resp.close()
raise
else:
return self._ws_response_class(
reader,
writer,
protocol,
resp,
timeout,
autoclose,
autoping,
self._loop,
receive_timeout=receive_timeout,
heartbeat=heartbeat,
compress=compress,
client_notakeover=notakeover,
)
def _prepare_headers(self, headers: Optional[LooseHeaders]) -> "CIMultiDict[str]":
"""Add default headers and transform it to CIMultiDict"""
# Convert headers to MultiDict
result = CIMultiDict(self._default_headers)
if headers:
if not isinstance(headers, (MultiDictProxy, MultiDict)):
headers = CIMultiDict(headers)
added_names: Set[str] = set()
for key, value in headers.items():
if key in added_names:
result.add(key, value)
else:
result[key] = value
added_names.add(key)
return result
def get(
self, url: StrOrURL, *, allow_redirects: bool = True, **kwargs: Any
) -> "_RequestContextManager":
"""Perform HTTP GET request."""
return _RequestContextManager(
self._request(hdrs.METH_GET, url, allow_redirects=allow_redirects, **kwargs)
)
def options(
self, url: StrOrURL, *, allow_redirects: bool = True, **kwargs: Any
) -> "_RequestContextManager":
"""Perform HTTP OPTIONS request."""
return _RequestContextManager(
self._request(
hdrs.METH_OPTIONS, url, allow_redirects=allow_redirects, **kwargs
)
)
def head(
self, url: StrOrURL, *, allow_redirects: bool = False, **kwargs: Any
) -> "_RequestContextManager":
"""Perform HTTP HEAD request."""
return _RequestContextManager(
self._request(
hdrs.METH_HEAD, url, allow_redirects=allow_redirects, **kwargs
)
)
def post(
self, url: StrOrURL, *, data: Any = None, **kwargs: Any
) -> "_RequestContextManager":
"""Perform HTTP POST request."""
return _RequestContextManager(
self._request(hdrs.METH_POST, url, data=data, **kwargs)
)
def put(
self, url: StrOrURL, *, data: Any = None, **kwargs: Any
) -> "_RequestContextManager":
"""Perform HTTP PUT request."""
return _RequestContextManager(
self._request(hdrs.METH_PUT, url, data=data, **kwargs)
)
def patch(
self, url: StrOrURL, *, data: Any = None, **kwargs: Any
) -> "_RequestContextManager":
"""Perform HTTP PATCH request."""
return _RequestContextManager(
self._request(hdrs.METH_PATCH, url, data=data, **kwargs)
)
def delete(self, url: StrOrURL, **kwargs: Any) -> "_RequestContextManager":
"""Perform HTTP DELETE request."""
return _RequestContextManager(self._request(hdrs.METH_DELETE, url, **kwargs))
async def close(self) -> None:
"""Close underlying connector.
Release all acquired resources.
"""
if not self.closed:
if self._connector is not None and self._connector_owner:
await self._connector.close()
self._connector = None
@property
def closed(self) -> bool:
"""Is client session closed.
A readonly property.
"""
return self._connector is None or self._connector.closed
@property
def connector(self) -> Optional[BaseConnector]:
"""Connector instance used for the session."""
return self._connector
@property
def cookie_jar(self) -> AbstractCookieJar:
"""The session cookies."""
return self._cookie_jar
@property
def version(self) -> Tuple[int, int]:
"""The session HTTP protocol version."""
return self._version
@property
def requote_redirect_url(self) -> bool:
"""Do URL requoting on redirection handling."""
return self._requote_redirect_url
@requote_redirect_url.setter
def requote_redirect_url(self, val: bool) -> None:
"""Do URL requoting on redirection handling."""
warnings.warn(
"session.requote_redirect_url modification " "is deprecated #2778",
DeprecationWarning,
stacklevel=2,
)
self._requote_redirect_url = val
@property
def loop(self) -> asyncio.AbstractEventLoop:
"""Session's loop."""
warnings.warn(
"client.loop property is deprecated", DeprecationWarning, stacklevel=2
)
return self._loop
@property
def timeout(self) -> ClientTimeout:
"""Timeout for the session."""
return self._timeout
@property
def headers(self) -> "CIMultiDict[str]":
"""The default headers of the client session."""
return self._default_headers
@property
def skip_auto_headers(self) -> FrozenSet[istr]:
"""Headers for which autogeneration should be skipped"""
return self._skip_auto_headers
@property
def auth(self) -> Optional[BasicAuth]:
"""An object that represents HTTP Basic Authorization"""
return self._default_auth
@property
def json_serialize(self) -> JSONEncoder:
"""Json serializer callable"""
return self._json_serialize
@property
def connector_owner(self) -> bool:
"""Should connector be closed on session closing"""
return self._connector_owner
@property
def raise_for_status(
self,
) -> Union[bool, Callable[[ClientResponse], Awaitable[None]]]:
"""Should `ClientResponse.raise_for_status()` be called for each response."""
return self._raise_for_status
@property
def auto_decompress(self) -> bool:
"""Should the body response be automatically decompressed."""
return self._auto_decompress
@property
def trust_env(self) -> bool:
"""
Should proxies information from environment or netrc be trusted.
Information is from HTTP_PROXY / HTTPS_PROXY environment variables
or ~/.netrc file if present.
"""
return self._trust_env
@property
def trace_configs(self) -> List[TraceConfig]:
"""A list of TraceConfig instances used for client tracing"""
return self._trace_configs
def detach(self) -> None:
"""Detach connector from session without closing the former.
Session is switched to closed state anyway.
"""
self._connector = None
def __enter__(self) -> None:
raise TypeError("Use async with instead")
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
# __exit__ should exist in pair with __enter__ but never executed
pass # pragma: no cover
async def __aenter__(self) -> "ClientSession":
return self
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
await self.close()
class _BaseRequestContextManager(Coroutine[Any, Any, _RetType], Generic[_RetType]):
__slots__ = ("_coro", "_resp")
def __init__(self, coro: Coroutine["asyncio.Future[Any]", None, _RetType]) -> None:
self._coro = coro
def send(self, arg: None) -> "asyncio.Future[Any]":
return self._coro.send(arg)
def throw(self, arg: BaseException) -> None: # type: ignore[arg-type,override]
self._coro.throw(arg)
def close(self) -> None:
return self._coro.close()
def __await__(self) -> Generator[Any, None, _RetType]:
ret = self._coro.__await__()
return ret
def __iter__(self) -> Generator[Any, None, _RetType]:
return self.__await__()
async def __aenter__(self) -> _RetType:
self._resp = await self._coro
return self._resp
class _RequestContextManager(_BaseRequestContextManager[ClientResponse]):
__slots__ = ()
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc: Optional[BaseException],
tb: Optional[TracebackType],
) -> None:
# We're basing behavior on the exception as it can be caused by
# user code unrelated to the status of the connection. If you
# would like to close a connection you must do that
# explicitly. Otherwise connection error handling should kick in
# and close/recycle the connection as required.
self._resp.release()
class _WSRequestContextManager(_BaseRequestContextManager[ClientWebSocketResponse]):
__slots__ = ()
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc: Optional[BaseException],
tb: Optional[TracebackType],
) -> None:
await self._resp.close()
class _SessionRequestContextManager:
__slots__ = ("_coro", "_resp", "_session")
def __init__(
self,
coro: Coroutine["asyncio.Future[Any]", None, ClientResponse],
session: ClientSession,
) -> None:
self._coro = coro
self._resp: Optional[ClientResponse] = None
self._session = session
async def __aenter__(self) -> ClientResponse:
try:
self._resp = await self._coro
except BaseException:
await self._session.close()
raise
else:
return self._resp
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc: Optional[BaseException],
tb: Optional[TracebackType],
) -> None:
assert self._resp is not None
self._resp.close()
await self._session.close()
def request(
method: str,
url: StrOrURL,
*,
params: Optional[Mapping[str, str]] = None,
data: Any = None,
json: Any = None,
headers: Optional[LooseHeaders] = None,
skip_auto_headers: Optional[Iterable[str]] = None,
auth: Optional[BasicAuth] = None,
allow_redirects: bool = True,
max_redirects: int = 10,
compress: Optional[str] = None,
chunked: Optional[bool] = None,
expect100: bool = False,
raise_for_status: Optional[bool] = None,
read_until_eof: bool = True,
proxy: Optional[StrOrURL] = None,
proxy_auth: Optional[BasicAuth] = None,
timeout: Union[ClientTimeout, object] = sentinel,
cookies: Optional[LooseCookies] = None,
version: HttpVersion = http.HttpVersion11,
connector: Optional[BaseConnector] = None,
read_bufsize: Optional[int] = None,
loop: Optional[asyncio.AbstractEventLoop] = None,
) -> _SessionRequestContextManager:
"""Constructs and sends a request.
Returns response object.
method - HTTP method
url - request url
params - (optional) Dictionary or bytes to be sent in the query
string of the new request
data - (optional) Dictionary, bytes, or file-like object to
send in the body of the request
json - (optional) Any json compatible python object
headers - (optional) Dictionary of HTTP Headers to send with
the request
cookies - (optional) Dict object to send with the request
auth - (optional) BasicAuth named tuple represent HTTP Basic Auth
auth - aiohttp.helpers.BasicAuth
allow_redirects - (optional) If set to False, do not follow
redirects
version - Request HTTP version.
compress - Set to True if request has to be compressed
with deflate encoding.
chunked - Set to chunk size for chunked transfer encoding.
expect100 - Expect 100-continue response from server.
connector - BaseConnector sub-class instance to support
connection pooling.
read_until_eof - Read response until eof if response
does not have Content-Length header.
loop - Optional event loop.
timeout - Optional ClientTimeout settings structure, 5min
total timeout by default.
Usage::
>>> import aiohttp
>>> resp = await aiohttp.request('GET', 'http://python.org/')
>>> resp
<ClientResponse(python.org/) [200]>
>>> data = await resp.read()
"""
connector_owner = False
if connector is None:
connector_owner = True
connector = TCPConnector(loop=loop, force_close=True)
session = ClientSession(
loop=loop,
cookies=cookies,
version=version,
timeout=timeout,
connector=connector,
connector_owner=connector_owner,
)
return _SessionRequestContextManager(
session._request(
method,
url,
params=params,
data=data,
json=json,
headers=headers,
skip_auto_headers=skip_auto_headers,
auth=auth,
allow_redirects=allow_redirects,
max_redirects=max_redirects,
compress=compress,
chunked=chunked,
expect100=expect100,
raise_for_status=raise_for_status,
read_until_eof=read_until_eof,
proxy=proxy,
proxy_auth=proxy_auth,
read_bufsize=read_bufsize,
),
session,
)
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiohttp/web_ws.py | import asyncio
import base64
import binascii
import hashlib
import json
from typing import Any, Iterable, Optional, Tuple, cast
import async_timeout
import attr
from multidict import CIMultiDict
from . import hdrs
from .abc import AbstractStreamWriter
from .helpers import call_later, set_result
from .http import (
WS_CLOSED_MESSAGE,
WS_CLOSING_MESSAGE,
WS_KEY,
WebSocketError,
WebSocketReader,
WebSocketWriter,
WSCloseCode,
WSMessage,
WSMsgType as WSMsgType,
ws_ext_gen,
ws_ext_parse,
)
from .log import ws_logger
from .streams import EofStream, FlowControlDataQueue
from .typedefs import Final, JSONDecoder, JSONEncoder
from .web_exceptions import HTTPBadRequest, HTTPException
from .web_request import BaseRequest
from .web_response import StreamResponse
__all__ = (
"WebSocketResponse",
"WebSocketReady",
"WSMsgType",
)
THRESHOLD_CONNLOST_ACCESS: Final[int] = 5
@attr.s(auto_attribs=True, frozen=True, slots=True)
class WebSocketReady:
ok: bool
protocol: Optional[str]
def __bool__(self) -> bool:
return self.ok
class WebSocketResponse(StreamResponse):
_length_check = False
def __init__(
self,
*,
timeout: float = 10.0,
receive_timeout: Optional[float] = None,
autoclose: bool = True,
autoping: bool = True,
heartbeat: Optional[float] = None,
protocols: Iterable[str] = (),
compress: bool = True,
max_msg_size: int = 4 * 1024 * 1024,
) -> None:
super().__init__(status=101)
self._protocols = protocols
self._ws_protocol: Optional[str] = None
self._writer: Optional[WebSocketWriter] = None
self._reader: Optional[FlowControlDataQueue[WSMessage]] = None
self._closed = False
self._closing = False
self._conn_lost = 0
self._close_code: Optional[int] = None
self._loop: Optional[asyncio.AbstractEventLoop] = None
self._waiting: Optional[asyncio.Future[bool]] = None
self._exception: Optional[BaseException] = None
self._timeout = timeout
self._receive_timeout = receive_timeout
self._autoclose = autoclose
self._autoping = autoping
self._heartbeat = heartbeat
self._heartbeat_cb: Optional[asyncio.TimerHandle] = None
if heartbeat is not None:
self._pong_heartbeat = heartbeat / 2.0
self._pong_response_cb: Optional[asyncio.TimerHandle] = None
self._compress = compress
self._max_msg_size = max_msg_size
def _cancel_heartbeat(self) -> None:
if self._pong_response_cb is not None:
self._pong_response_cb.cancel()
self._pong_response_cb = None
if self._heartbeat_cb is not None:
self._heartbeat_cb.cancel()
self._heartbeat_cb = None
def _reset_heartbeat(self) -> None:
self._cancel_heartbeat()
if self._heartbeat is not None:
assert self._loop is not None
self._heartbeat_cb = call_later(
self._send_heartbeat, self._heartbeat, self._loop
)
def _send_heartbeat(self) -> None:
if self._heartbeat is not None and not self._closed:
assert self._loop is not None
# fire-and-forget a task is not perfect but maybe ok for
# sending ping. Otherwise we need a long-living heartbeat
# task in the class.
self._loop.create_task(self._writer.ping()) # type: ignore[union-attr]
if self._pong_response_cb is not None:
self._pong_response_cb.cancel()
self._pong_response_cb = call_later(
self._pong_not_received, self._pong_heartbeat, self._loop
)
def _pong_not_received(self) -> None:
if self._req is not None and self._req.transport is not None:
self._closed = True
self._close_code = WSCloseCode.ABNORMAL_CLOSURE
self._exception = asyncio.TimeoutError()
self._req.transport.close()
async def prepare(self, request: BaseRequest) -> AbstractStreamWriter:
# make pre-check to don't hide it by do_handshake() exceptions
if self._payload_writer is not None:
return self._payload_writer
protocol, writer = self._pre_start(request)
payload_writer = await super().prepare(request)
assert payload_writer is not None
self._post_start(request, protocol, writer)
await payload_writer.drain()
return payload_writer
def _handshake(
self, request: BaseRequest
) -> Tuple["CIMultiDict[str]", str, bool, bool]:
headers = request.headers
if "websocket" != headers.get(hdrs.UPGRADE, "").lower().strip():
raise HTTPBadRequest(
text=(
"No WebSocket UPGRADE hdr: {}\n Can "
'"Upgrade" only to "WebSocket".'
).format(headers.get(hdrs.UPGRADE))
)
if "upgrade" not in headers.get(hdrs.CONNECTION, "").lower():
raise HTTPBadRequest(
text="No CONNECTION upgrade hdr: {}".format(
headers.get(hdrs.CONNECTION)
)
)
# find common sub-protocol between client and server
protocol = None
if hdrs.SEC_WEBSOCKET_PROTOCOL in headers:
req_protocols = [
str(proto.strip())
for proto in headers[hdrs.SEC_WEBSOCKET_PROTOCOL].split(",")
]
for proto in req_protocols:
if proto in self._protocols:
protocol = proto
break
else:
# No overlap found: Return no protocol as per spec
ws_logger.warning(
"Client protocols %r don’t overlap server-known ones %r",
req_protocols,
self._protocols,
)
# check supported version
version = headers.get(hdrs.SEC_WEBSOCKET_VERSION, "")
if version not in ("13", "8", "7"):
raise HTTPBadRequest(text=f"Unsupported version: {version}")
# check client handshake for validity
key = headers.get(hdrs.SEC_WEBSOCKET_KEY)
try:
if not key or len(base64.b64decode(key)) != 16:
raise HTTPBadRequest(text=f"Handshake error: {key!r}")
except binascii.Error:
raise HTTPBadRequest(text=f"Handshake error: {key!r}") from None
accept_val = base64.b64encode(
hashlib.sha1(key.encode() + WS_KEY).digest()
).decode()
response_headers = CIMultiDict(
{
hdrs.UPGRADE: "websocket",
hdrs.CONNECTION: "upgrade",
hdrs.SEC_WEBSOCKET_ACCEPT: accept_val,
}
)
notakeover = False
compress = 0
if self._compress:
extensions = headers.get(hdrs.SEC_WEBSOCKET_EXTENSIONS)
# Server side always get return with no exception.
# If something happened, just drop compress extension
compress, notakeover = ws_ext_parse(extensions, isserver=True)
if compress:
enabledext = ws_ext_gen(
compress=compress, isserver=True, server_notakeover=notakeover
)
response_headers[hdrs.SEC_WEBSOCKET_EXTENSIONS] = enabledext
if protocol:
response_headers[hdrs.SEC_WEBSOCKET_PROTOCOL] = protocol
return (
response_headers,
protocol,
compress,
notakeover,
) # type: ignore[return-value]
def _pre_start(self, request: BaseRequest) -> Tuple[str, WebSocketWriter]:
self._loop = request._loop
headers, protocol, compress, notakeover = self._handshake(request)
self.set_status(101)
self.headers.update(headers)
self.force_close()
self._compress = compress
transport = request._protocol.transport
assert transport is not None
writer = WebSocketWriter(
request._protocol, transport, compress=compress, notakeover=notakeover
)
return protocol, writer
def _post_start(
self, request: BaseRequest, protocol: str, writer: WebSocketWriter
) -> None:
self._ws_protocol = protocol
self._writer = writer
self._reset_heartbeat()
loop = self._loop
assert loop is not None
self._reader = FlowControlDataQueue(request._protocol, 2**16, loop=loop)
request.protocol.set_parser(
WebSocketReader(self._reader, self._max_msg_size, compress=self._compress)
)
# disable HTTP keepalive for WebSocket
request.protocol.keep_alive(False)
def can_prepare(self, request: BaseRequest) -> WebSocketReady:
if self._writer is not None:
raise RuntimeError("Already started")
try:
_, protocol, _, _ = self._handshake(request)
except HTTPException:
return WebSocketReady(False, None)
else:
return WebSocketReady(True, protocol)
@property
def closed(self) -> bool:
return self._closed
@property
def close_code(self) -> Optional[int]:
return self._close_code
@property
def ws_protocol(self) -> Optional[str]:
return self._ws_protocol
@property
def compress(self) -> bool:
return self._compress
def exception(self) -> Optional[BaseException]:
return self._exception
async def ping(self, message: bytes = b"") -> None:
if self._writer is None:
raise RuntimeError("Call .prepare() first")
await self._writer.ping(message)
async def pong(self, message: bytes = b"") -> None:
# unsolicited pong
if self._writer is None:
raise RuntimeError("Call .prepare() first")
await self._writer.pong(message)
async def send_str(self, data: str, compress: Optional[bool] = None) -> None:
if self._writer is None:
raise RuntimeError("Call .prepare() first")
if not isinstance(data, str):
raise TypeError("data argument must be str (%r)" % type(data))
await self._writer.send(data, binary=False, compress=compress)
async def send_bytes(self, data: bytes, compress: Optional[bool] = None) -> None:
if self._writer is None:
raise RuntimeError("Call .prepare() first")
if not isinstance(data, (bytes, bytearray, memoryview)):
raise TypeError("data argument must be byte-ish (%r)" % type(data))
await self._writer.send(data, binary=True, compress=compress)
async def send_json(
self,
data: Any,
compress: Optional[bool] = None,
*,
dumps: JSONEncoder = json.dumps,
) -> None:
await self.send_str(dumps(data), compress=compress)
async def write_eof(self) -> None: # type: ignore[override]
if self._eof_sent:
return
if self._payload_writer is None:
raise RuntimeError("Response has not been started")
await self.close()
self._eof_sent = True
async def close(self, *, code: int = WSCloseCode.OK, message: bytes = b"") -> bool:
if self._writer is None:
raise RuntimeError("Call .prepare() first")
self._cancel_heartbeat()
reader = self._reader
assert reader is not None
# we need to break `receive()` cycle first,
# `close()` may be called from different task
if self._waiting is not None and not self._closed:
reader.feed_data(WS_CLOSING_MESSAGE, 0)
await self._waiting
if not self._closed:
self._closed = True
try:
await self._writer.close(code, message)
writer = self._payload_writer
assert writer is not None
await writer.drain()
except (asyncio.CancelledError, asyncio.TimeoutError):
self._close_code = WSCloseCode.ABNORMAL_CLOSURE
raise
except Exception as exc:
self._close_code = WSCloseCode.ABNORMAL_CLOSURE
self._exception = exc
return True
if self._closing:
return True
reader = self._reader
assert reader is not None
try:
async with async_timeout.timeout(self._timeout):
msg = await reader.read()
except asyncio.CancelledError:
self._close_code = WSCloseCode.ABNORMAL_CLOSURE
raise
except Exception as exc:
self._close_code = WSCloseCode.ABNORMAL_CLOSURE
self._exception = exc
return True
if msg.type == WSMsgType.CLOSE:
self._close_code = msg.data
return True
self._close_code = WSCloseCode.ABNORMAL_CLOSURE
self._exception = asyncio.TimeoutError()
return True
else:
return False
async def receive(self, timeout: Optional[float] = None) -> WSMessage:
if self._reader is None:
raise RuntimeError("Call .prepare() first")
loop = self._loop
assert loop is not None
while True:
if self._waiting is not None:
raise RuntimeError("Concurrent call to receive() is not allowed")
if self._closed:
self._conn_lost += 1
if self._conn_lost >= THRESHOLD_CONNLOST_ACCESS:
raise RuntimeError("WebSocket connection is closed.")
return WS_CLOSED_MESSAGE
elif self._closing:
return WS_CLOSING_MESSAGE
try:
self._waiting = loop.create_future()
try:
async with async_timeout.timeout(timeout or self._receive_timeout):
msg = await self._reader.read()
self._reset_heartbeat()
finally:
waiter = self._waiting
set_result(waiter, True)
self._waiting = None
except (asyncio.CancelledError, asyncio.TimeoutError):
self._close_code = WSCloseCode.ABNORMAL_CLOSURE
raise
except EofStream:
self._close_code = WSCloseCode.OK
await self.close()
return WSMessage(WSMsgType.CLOSED, None, None)
except WebSocketError as exc:
self._close_code = exc.code
await self.close(code=exc.code)
return WSMessage(WSMsgType.ERROR, exc, None)
except Exception as exc:
self._exception = exc
self._closing = True
self._close_code = WSCloseCode.ABNORMAL_CLOSURE
await self.close()
return WSMessage(WSMsgType.ERROR, exc, None)
if msg.type == WSMsgType.CLOSE:
self._closing = True
self._close_code = msg.data
if not self._closed and self._autoclose:
await self.close()
elif msg.type == WSMsgType.CLOSING:
self._closing = True
elif msg.type == WSMsgType.PING and self._autoping:
await self.pong(msg.data)
continue
elif msg.type == WSMsgType.PONG and self._autoping:
continue
return msg
async def receive_str(self, *, timeout: Optional[float] = None) -> str:
msg = await self.receive(timeout)
if msg.type != WSMsgType.TEXT:
raise TypeError(
"Received message {}:{!r} is not WSMsgType.TEXT".format(
msg.type, msg.data
)
)
return cast(str, msg.data)
async def receive_bytes(self, *, timeout: Optional[float] = None) -> bytes:
msg = await self.receive(timeout)
if msg.type != WSMsgType.BINARY:
raise TypeError(f"Received message {msg.type}:{msg.data!r} is not bytes")
return cast(bytes, msg.data)
async def receive_json(
self, *, loads: JSONDecoder = json.loads, timeout: Optional[float] = None
) -> Any:
data = await self.receive_str(timeout=timeout)
return loads(data)
async def write(self, data: bytes) -> None:
raise RuntimeError("Cannot call .write() for websocket")
def __aiter__(self) -> "WebSocketResponse":
return self
async def __anext__(self) -> WSMessage:
msg = await self.receive()
if msg.type in (WSMsgType.CLOSE, WSMsgType.CLOSING, WSMsgType.CLOSED):
raise StopAsyncIteration
return msg
def _cancel(self, exc: BaseException) -> None:
if self._reader is not None:
self._reader.set_exception(exc)
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiohttp/web_log.py | import datetime
import functools
import logging
import os
import re
from collections import namedtuple
from typing import Any, Callable, Dict, Iterable, List, Tuple # noqa
from .abc import AbstractAccessLogger
from .web_request import BaseRequest
from .web_response import StreamResponse
KeyMethod = namedtuple("KeyMethod", "key method")
class AccessLogger(AbstractAccessLogger):
"""Helper object to log access.
Usage:
log = logging.getLogger("spam")
log_format = "%a %{User-Agent}i"
access_logger = AccessLogger(log, log_format)
access_logger.log(request, response, time)
Format:
%% The percent sign
%a Remote IP-address (IP-address of proxy if using reverse proxy)
%t Time when the request was started to process
%P The process ID of the child that serviced the request
%r First line of request
%s Response status code
%b Size of response in bytes, including HTTP headers
%T Time taken to serve the request, in seconds
%Tf Time taken to serve the request, in seconds with floating fraction
in .06f format
%D Time taken to serve the request, in microseconds
%{FOO}i request.headers['FOO']
%{FOO}o response.headers['FOO']
%{FOO}e os.environ['FOO']
"""
LOG_FORMAT_MAP = {
"a": "remote_address",
"t": "request_start_time",
"P": "process_id",
"r": "first_request_line",
"s": "response_status",
"b": "response_size",
"T": "request_time",
"Tf": "request_time_frac",
"D": "request_time_micro",
"i": "request_header",
"o": "response_header",
}
LOG_FORMAT = '%a %t "%r" %s %b "%{Referer}i" "%{User-Agent}i"'
FORMAT_RE = re.compile(r"%(\{([A-Za-z0-9\-_]+)\}([ioe])|[atPrsbOD]|Tf?)")
CLEANUP_RE = re.compile(r"(%[^s])")
_FORMAT_CACHE: Dict[str, Tuple[str, List[KeyMethod]]] = {}
def __init__(self, logger: logging.Logger, log_format: str = LOG_FORMAT) -> None:
"""Initialise the logger.
logger is a logger object to be used for logging.
log_format is a string with apache compatible log format description.
"""
super().__init__(logger, log_format=log_format)
_compiled_format = AccessLogger._FORMAT_CACHE.get(log_format)
if not _compiled_format:
_compiled_format = self.compile_format(log_format)
AccessLogger._FORMAT_CACHE[log_format] = _compiled_format
self._log_format, self._methods = _compiled_format
def compile_format(self, log_format: str) -> Tuple[str, List[KeyMethod]]:
"""Translate log_format into form usable by modulo formatting
All known atoms will be replaced with %s
Also methods for formatting of those atoms will be added to
_methods in appropriate order
For example we have log_format = "%a %t"
This format will be translated to "%s %s"
Also contents of _methods will be
[self._format_a, self._format_t]
These method will be called and results will be passed
to translated string format.
Each _format_* method receive 'args' which is list of arguments
given to self.log
Exceptions are _format_e, _format_i and _format_o methods which
also receive key name (by functools.partial)
"""
# list of (key, method) tuples, we don't use an OrderedDict as users
# can repeat the same key more than once
methods = list()
for atom in self.FORMAT_RE.findall(log_format):
if atom[1] == "":
format_key1 = self.LOG_FORMAT_MAP[atom[0]]
m = getattr(AccessLogger, "_format_%s" % atom[0])
key_method = KeyMethod(format_key1, m)
else:
format_key2 = (self.LOG_FORMAT_MAP[atom[2]], atom[1])
m = getattr(AccessLogger, "_format_%s" % atom[2])
key_method = KeyMethod(format_key2, functools.partial(m, atom[1]))
methods.append(key_method)
log_format = self.FORMAT_RE.sub(r"%s", log_format)
log_format = self.CLEANUP_RE.sub(r"%\1", log_format)
return log_format, methods
@staticmethod
def _format_i(
key: str, request: BaseRequest, response: StreamResponse, time: float
) -> str:
if request is None:
return "(no headers)"
# suboptimal, make istr(key) once
return request.headers.get(key, "-")
@staticmethod
def _format_o(
key: str, request: BaseRequest, response: StreamResponse, time: float
) -> str:
# suboptimal, make istr(key) once
return response.headers.get(key, "-")
@staticmethod
def _format_a(request: BaseRequest, response: StreamResponse, time: float) -> str:
if request is None:
return "-"
ip = request.remote
return ip if ip is not None else "-"
@staticmethod
def _format_t(request: BaseRequest, response: StreamResponse, time: float) -> str:
now = datetime.datetime.utcnow()
start_time = now - datetime.timedelta(seconds=time)
return start_time.strftime("[%d/%b/%Y:%H:%M:%S +0000]")
@staticmethod
def _format_P(request: BaseRequest, response: StreamResponse, time: float) -> str:
return "<%s>" % os.getpid()
@staticmethod
def _format_r(request: BaseRequest, response: StreamResponse, time: float) -> str:
if request is None:
return "-"
return "{} {} HTTP/{}.{}".format(
request.method,
request.path_qs,
request.version.major,
request.version.minor,
)
@staticmethod
def _format_s(request: BaseRequest, response: StreamResponse, time: float) -> int:
return response.status
@staticmethod
def _format_b(request: BaseRequest, response: StreamResponse, time: float) -> int:
return response.body_length
@staticmethod
def _format_T(request: BaseRequest, response: StreamResponse, time: float) -> str:
return str(round(time))
@staticmethod
def _format_Tf(request: BaseRequest, response: StreamResponse, time: float) -> str:
return "%06f" % time
@staticmethod
def _format_D(request: BaseRequest, response: StreamResponse, time: float) -> str:
return str(round(time * 1000000))
def _format_line(
self, request: BaseRequest, response: StreamResponse, time: float
) -> Iterable[Tuple[str, Callable[[BaseRequest, StreamResponse, float], str]]]:
return [(key, method(request, response, time)) for key, method in self._methods]
def log(self, request: BaseRequest, response: StreamResponse, time: float) -> None:
try:
fmt_info = self._format_line(request, response, time)
values = list()
extra = dict()
for key, value in fmt_info:
values.append(value)
if key.__class__ is str:
extra[key] = value
else:
k1, k2 = key # type: ignore[misc]
dct = extra.get(k1, {}) # type: ignore[var-annotated,has-type]
dct[k2] = value # type: ignore[index,has-type]
extra[k1] = dct # type: ignore[has-type,assignment]
self.logger.info(self._log_format % tuple(values), extra=extra)
except Exception:
self.logger.exception("Error in logging")
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiohttp/payload_streamer.py | """
Payload implemenation for coroutines as data provider.
As a simple case, you can upload data from file::
@aiohttp.streamer
async def file_sender(writer, file_name=None):
with open(file_name, 'rb') as f:
chunk = f.read(2**16)
while chunk:
await writer.write(chunk)
chunk = f.read(2**16)
Then you can use `file_sender` like this:
async with session.post('http://httpbin.org/post',
data=file_sender(file_name='huge_file')) as resp:
print(await resp.text())
..note:: Coroutine must accept `writer` as first argument
"""
import types
import warnings
from typing import Any, Awaitable, Callable, Dict, Tuple
from .abc import AbstractStreamWriter
from .payload import Payload, payload_type
__all__ = ("streamer",)
class _stream_wrapper:
def __init__(
self,
coro: Callable[..., Awaitable[None]],
args: Tuple[Any, ...],
kwargs: Dict[str, Any],
) -> None:
self.coro = types.coroutine(coro)
self.args = args
self.kwargs = kwargs
async def __call__(self, writer: AbstractStreamWriter) -> None:
await self.coro(writer, *self.args, **self.kwargs) # type: ignore[operator]
class streamer:
def __init__(self, coro: Callable[..., Awaitable[None]]) -> None:
warnings.warn(
"@streamer is deprecated, use async generators instead",
DeprecationWarning,
stacklevel=2,
)
self.coro = coro
def __call__(self, *args: Any, **kwargs: Any) -> _stream_wrapper:
return _stream_wrapper(self.coro, args, kwargs)
@payload_type(_stream_wrapper)
class StreamWrapperPayload(Payload):
async def write(self, writer: AbstractStreamWriter) -> None:
await self._value(writer)
@payload_type(streamer)
class StreamPayload(StreamWrapperPayload):
def __init__(self, value: Any, *args: Any, **kwargs: Any) -> None:
super().__init__(value(), *args, **kwargs)
async def write(self, writer: AbstractStreamWriter) -> None:
await self._value(writer)
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiohttp/payload.py | import asyncio
import enum
import io
import json
import mimetypes
import os
import warnings
from abc import ABC, abstractmethod
from itertools import chain
from typing import (
IO,
TYPE_CHECKING,
Any,
ByteString,
Dict,
Iterable,
Optional,
TextIO,
Tuple,
Type,
Union,
)
from multidict import CIMultiDict
from . import hdrs
from .abc import AbstractStreamWriter
from .helpers import (
PY_36,
content_disposition_header,
guess_filename,
parse_mimetype,
sentinel,
)
from .streams import StreamReader
from .typedefs import Final, JSONEncoder, _CIMultiDict
__all__ = (
"PAYLOAD_REGISTRY",
"get_payload",
"payload_type",
"Payload",
"BytesPayload",
"StringPayload",
"IOBasePayload",
"BytesIOPayload",
"BufferedReaderPayload",
"TextIOPayload",
"StringIOPayload",
"JsonPayload",
"AsyncIterablePayload",
)
TOO_LARGE_BYTES_BODY: Final[int] = 2**20 # 1 MB
if TYPE_CHECKING: # pragma: no cover
from typing import List
class LookupError(Exception):
pass
class Order(str, enum.Enum):
normal = "normal"
try_first = "try_first"
try_last = "try_last"
def get_payload(data: Any, *args: Any, **kwargs: Any) -> "Payload":
return PAYLOAD_REGISTRY.get(data, *args, **kwargs)
def register_payload(
factory: Type["Payload"], type: Any, *, order: Order = Order.normal
) -> None:
PAYLOAD_REGISTRY.register(factory, type, order=order)
class payload_type:
def __init__(self, type: Any, *, order: Order = Order.normal) -> None:
self.type = type
self.order = order
def __call__(self, factory: Type["Payload"]) -> Type["Payload"]:
register_payload(factory, self.type, order=self.order)
return factory
PayloadType = Type["Payload"]
_PayloadRegistryItem = Tuple[PayloadType, Any]
class PayloadRegistry:
"""Payload registry.
note: we need zope.interface for more efficient adapter search
"""
def __init__(self) -> None:
self._first: List[_PayloadRegistryItem] = []
self._normal: List[_PayloadRegistryItem] = []
self._last: List[_PayloadRegistryItem] = []
def get(
self,
data: Any,
*args: Any,
_CHAIN: "Type[chain[_PayloadRegistryItem]]" = chain,
**kwargs: Any,
) -> "Payload":
if isinstance(data, Payload):
return data
for factory, type in _CHAIN(self._first, self._normal, self._last):
if isinstance(data, type):
return factory(data, *args, **kwargs)
raise LookupError()
def register(
self, factory: PayloadType, type: Any, *, order: Order = Order.normal
) -> None:
if order is Order.try_first:
self._first.append((factory, type))
elif order is Order.normal:
self._normal.append((factory, type))
elif order is Order.try_last:
self._last.append((factory, type))
else:
raise ValueError(f"Unsupported order {order!r}")
class Payload(ABC):
_default_content_type: str = "application/octet-stream"
_size: Optional[int] = None
def __init__(
self,
value: Any,
headers: Optional[
Union[_CIMultiDict, Dict[str, str], Iterable[Tuple[str, str]]]
] = None,
content_type: Optional[str] = sentinel,
filename: Optional[str] = None,
encoding: Optional[str] = None,
**kwargs: Any,
) -> None:
self._encoding = encoding
self._filename = filename
self._headers: _CIMultiDict = CIMultiDict()
self._value = value
if content_type is not sentinel and content_type is not None:
self._headers[hdrs.CONTENT_TYPE] = content_type
elif self._filename is not None:
content_type = mimetypes.guess_type(self._filename)[0]
if content_type is None:
content_type = self._default_content_type
self._headers[hdrs.CONTENT_TYPE] = content_type
else:
self._headers[hdrs.CONTENT_TYPE] = self._default_content_type
self._headers.update(headers or {})
@property
def size(self) -> Optional[int]:
"""Size of the payload."""
return self._size
@property
def filename(self) -> Optional[str]:
"""Filename of the payload."""
return self._filename
@property
def headers(self) -> _CIMultiDict:
"""Custom item headers"""
return self._headers
@property
def _binary_headers(self) -> bytes:
return (
"".join([k + ": " + v + "\r\n" for k, v in self.headers.items()]).encode(
"utf-8"
)
+ b"\r\n"
)
@property
def encoding(self) -> Optional[str]:
"""Payload encoding"""
return self._encoding
@property
def content_type(self) -> str:
"""Content type"""
return self._headers[hdrs.CONTENT_TYPE]
def set_content_disposition(
self,
disptype: str,
quote_fields: bool = True,
_charset: str = "utf-8",
**params: Any,
) -> None:
"""Sets ``Content-Disposition`` header."""
self._headers[hdrs.CONTENT_DISPOSITION] = content_disposition_header(
disptype, quote_fields=quote_fields, _charset=_charset, **params
)
@abstractmethod
async def write(self, writer: AbstractStreamWriter) -> None:
"""Write payload.
writer is an AbstractStreamWriter instance:
"""
class BytesPayload(Payload):
def __init__(self, value: ByteString, *args: Any, **kwargs: Any) -> None:
if not isinstance(value, (bytes, bytearray, memoryview)):
raise TypeError(f"value argument must be byte-ish, not {type(value)!r}")
if "content_type" not in kwargs:
kwargs["content_type"] = "application/octet-stream"
super().__init__(value, *args, **kwargs)
if isinstance(value, memoryview):
self._size = value.nbytes
else:
self._size = len(value)
if self._size > TOO_LARGE_BYTES_BODY:
if PY_36:
kwargs = {"source": self}
else:
kwargs = {}
warnings.warn(
"Sending a large body directly with raw bytes might"
" lock the event loop. You should probably pass an "
"io.BytesIO object instead",
ResourceWarning,
**kwargs,
)
async def write(self, writer: AbstractStreamWriter) -> None:
await writer.write(self._value)
class StringPayload(BytesPayload):
def __init__(
self,
value: str,
*args: Any,
encoding: Optional[str] = None,
content_type: Optional[str] = None,
**kwargs: Any,
) -> None:
if encoding is None:
if content_type is None:
real_encoding = "utf-8"
content_type = "text/plain; charset=utf-8"
else:
mimetype = parse_mimetype(content_type)
real_encoding = mimetype.parameters.get("charset", "utf-8")
else:
if content_type is None:
content_type = "text/plain; charset=%s" % encoding
real_encoding = encoding
super().__init__(
value.encode(real_encoding),
encoding=real_encoding,
content_type=content_type,
*args,
**kwargs,
)
class StringIOPayload(StringPayload):
def __init__(self, value: IO[str], *args: Any, **kwargs: Any) -> None:
super().__init__(value.read(), *args, **kwargs)
class IOBasePayload(Payload):
_value: IO[Any]
def __init__(
self, value: IO[Any], disposition: str = "attachment", *args: Any, **kwargs: Any
) -> None:
if "filename" not in kwargs:
kwargs["filename"] = guess_filename(value)
super().__init__(value, *args, **kwargs)
if self._filename is not None and disposition is not None:
if hdrs.CONTENT_DISPOSITION not in self.headers:
self.set_content_disposition(disposition, filename=self._filename)
async def write(self, writer: AbstractStreamWriter) -> None:
loop = asyncio.get_event_loop()
try:
chunk = await loop.run_in_executor(None, self._value.read, 2**16)
while chunk:
await writer.write(chunk)
chunk = await loop.run_in_executor(None, self._value.read, 2**16)
finally:
await loop.run_in_executor(None, self._value.close)
class TextIOPayload(IOBasePayload):
_value: TextIO
def __init__(
self,
value: TextIO,
*args: Any,
encoding: Optional[str] = None,
content_type: Optional[str] = None,
**kwargs: Any,
) -> None:
if encoding is None:
if content_type is None:
encoding = "utf-8"
content_type = "text/plain; charset=utf-8"
else:
mimetype = parse_mimetype(content_type)
encoding = mimetype.parameters.get("charset", "utf-8")
else:
if content_type is None:
content_type = "text/plain; charset=%s" % encoding
super().__init__(
value,
content_type=content_type,
encoding=encoding,
*args,
**kwargs,
)
@property
def size(self) -> Optional[int]:
try:
return os.fstat(self._value.fileno()).st_size - self._value.tell()
except OSError:
return None
async def write(self, writer: AbstractStreamWriter) -> None:
loop = asyncio.get_event_loop()
try:
chunk = await loop.run_in_executor(None, self._value.read, 2**16)
while chunk:
data = (
chunk.encode(encoding=self._encoding)
if self._encoding
else chunk.encode()
)
await writer.write(data)
chunk = await loop.run_in_executor(None, self._value.read, 2**16)
finally:
await loop.run_in_executor(None, self._value.close)
class BytesIOPayload(IOBasePayload):
@property
def size(self) -> int:
position = self._value.tell()
end = self._value.seek(0, os.SEEK_END)
self._value.seek(position)
return end - position
class BufferedReaderPayload(IOBasePayload):
@property
def size(self) -> Optional[int]:
try:
return os.fstat(self._value.fileno()).st_size - self._value.tell()
except OSError:
# data.fileno() is not supported, e.g.
# io.BufferedReader(io.BytesIO(b'data'))
return None
class JsonPayload(BytesPayload):
def __init__(
self,
value: Any,
encoding: str = "utf-8",
content_type: str = "application/json",
dumps: JSONEncoder = json.dumps,
*args: Any,
**kwargs: Any,
) -> None:
super().__init__(
dumps(value).encode(encoding),
content_type=content_type,
encoding=encoding,
*args,
**kwargs,
)
if TYPE_CHECKING: # pragma: no cover
from typing import AsyncIterable, AsyncIterator
_AsyncIterator = AsyncIterator[bytes]
_AsyncIterable = AsyncIterable[bytes]
else:
from collections.abc import AsyncIterable, AsyncIterator
_AsyncIterator = AsyncIterator
_AsyncIterable = AsyncIterable
class AsyncIterablePayload(Payload):
_iter: Optional[_AsyncIterator] = None
def __init__(self, value: _AsyncIterable, *args: Any, **kwargs: Any) -> None:
if not isinstance(value, AsyncIterable):
raise TypeError(
"value argument must support "
"collections.abc.AsyncIterablebe interface, "
"got {!r}".format(type(value))
)
if "content_type" not in kwargs:
kwargs["content_type"] = "application/octet-stream"
super().__init__(value, *args, **kwargs)
self._iter = value.__aiter__()
async def write(self, writer: AbstractStreamWriter) -> None:
if self._iter:
try:
# iter is not None check prevents rare cases
# when the case iterable is used twice
while True:
chunk = await self._iter.__anext__()
await writer.write(chunk)
except StopAsyncIteration:
self._iter = None
class StreamReaderPayload(AsyncIterablePayload):
def __init__(self, value: StreamReader, *args: Any, **kwargs: Any) -> None:
super().__init__(value.iter_any(), *args, **kwargs)
PAYLOAD_REGISTRY = PayloadRegistry()
PAYLOAD_REGISTRY.register(BytesPayload, (bytes, bytearray, memoryview))
PAYLOAD_REGISTRY.register(StringPayload, str)
PAYLOAD_REGISTRY.register(StringIOPayload, io.StringIO)
PAYLOAD_REGISTRY.register(TextIOPayload, io.TextIOBase)
PAYLOAD_REGISTRY.register(BytesIOPayload, io.BytesIO)
PAYLOAD_REGISTRY.register(BufferedReaderPayload, (io.BufferedReader, io.BufferedRandom))
PAYLOAD_REGISTRY.register(IOBasePayload, io.IOBase)
PAYLOAD_REGISTRY.register(StreamReaderPayload, StreamReader)
# try_last for giving a chance to more specialized async interables like
# multidict.BodyPartReaderPayload override the default
PAYLOAD_REGISTRY.register(AsyncIterablePayload, AsyncIterable, order=Order.try_last)
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiohttp/http_parser.py | import abc
import asyncio
import collections
import re
import string
import zlib
from contextlib import suppress
from enum import IntEnum
from typing import (
Any,
Generic,
List,
NamedTuple,
Optional,
Pattern,
Set,
Tuple,
Type,
TypeVar,
Union,
cast,
)
from multidict import CIMultiDict, CIMultiDictProxy, istr
from yarl import URL
from . import hdrs
from .base_protocol import BaseProtocol
from .helpers import NO_EXTENSIONS, BaseTimerContext
from .http_exceptions import (
BadHttpMessage,
BadStatusLine,
ContentEncodingError,
ContentLengthError,
InvalidHeader,
LineTooLong,
TransferEncodingError,
)
from .http_writer import HttpVersion, HttpVersion10
from .log import internal_logger
from .streams import EMPTY_PAYLOAD, StreamReader
from .typedefs import Final, RawHeaders
try:
import brotli
HAS_BROTLI = True
except ImportError: # pragma: no cover
HAS_BROTLI = False
__all__ = (
"HeadersParser",
"HttpParser",
"HttpRequestParser",
"HttpResponseParser",
"RawRequestMessage",
"RawResponseMessage",
)
ASCIISET: Final[Set[str]] = set(string.printable)
# See https://tools.ietf.org/html/rfc7230#section-3.1.1
# and https://tools.ietf.org/html/rfc7230#appendix-B
#
# method = token
# tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." /
# "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA
# token = 1*tchar
METHRE: Final[Pattern[str]] = re.compile(r"[!#$%&'*+\-.^_`|~0-9A-Za-z]+")
VERSRE: Final[Pattern[str]] = re.compile(r"HTTP/(\d+).(\d+)")
HDRRE: Final[Pattern[bytes]] = re.compile(rb"[\x00-\x1F\x7F()<>@,;:\[\]={} \t\\\\\"]")
class RawRequestMessage(NamedTuple):
method: str
path: str
version: HttpVersion
headers: "CIMultiDictProxy[str]"
raw_headers: RawHeaders
should_close: bool
compression: Optional[str]
upgrade: bool
chunked: bool
url: URL
RawResponseMessage = collections.namedtuple(
"RawResponseMessage",
[
"version",
"code",
"reason",
"headers",
"raw_headers",
"should_close",
"compression",
"upgrade",
"chunked",
],
)
_MsgT = TypeVar("_MsgT", RawRequestMessage, RawResponseMessage)
class ParseState(IntEnum):
PARSE_NONE = 0
PARSE_LENGTH = 1
PARSE_CHUNKED = 2
PARSE_UNTIL_EOF = 3
class ChunkState(IntEnum):
PARSE_CHUNKED_SIZE = 0
PARSE_CHUNKED_CHUNK = 1
PARSE_CHUNKED_CHUNK_EOF = 2
PARSE_MAYBE_TRAILERS = 3
PARSE_TRAILERS = 4
class HeadersParser:
def __init__(
self,
max_line_size: int = 8190,
max_headers: int = 32768,
max_field_size: int = 8190,
) -> None:
self.max_line_size = max_line_size
self.max_headers = max_headers
self.max_field_size = max_field_size
def parse_headers(
self, lines: List[bytes]
) -> Tuple["CIMultiDictProxy[str]", RawHeaders]:
headers: CIMultiDict[str] = CIMultiDict()
raw_headers = []
lines_idx = 1
line = lines[1]
line_count = len(lines)
while line:
# Parse initial header name : value pair.
try:
bname, bvalue = line.split(b":", 1)
except ValueError:
raise InvalidHeader(line) from None
bname = bname.strip(b" \t")
bvalue = bvalue.lstrip()
if HDRRE.search(bname):
raise InvalidHeader(bname)
if len(bname) > self.max_field_size:
raise LineTooLong(
"request header name {}".format(
bname.decode("utf8", "xmlcharrefreplace")
),
str(self.max_field_size),
str(len(bname)),
)
header_length = len(bvalue)
# next line
lines_idx += 1
line = lines[lines_idx]
# consume continuation lines
continuation = line and line[0] in (32, 9) # (' ', '\t')
if continuation:
bvalue_lst = [bvalue]
while continuation:
header_length += len(line)
if header_length > self.max_field_size:
raise LineTooLong(
"request header field {}".format(
bname.decode("utf8", "xmlcharrefreplace")
),
str(self.max_field_size),
str(header_length),
)
bvalue_lst.append(line)
# next line
lines_idx += 1
if lines_idx < line_count:
line = lines[lines_idx]
if line:
continuation = line[0] in (32, 9) # (' ', '\t')
else:
line = b""
break
bvalue = b"".join(bvalue_lst)
else:
if header_length > self.max_field_size:
raise LineTooLong(
"request header field {}".format(
bname.decode("utf8", "xmlcharrefreplace")
),
str(self.max_field_size),
str(header_length),
)
bvalue = bvalue.strip()
name = bname.decode("utf-8", "surrogateescape")
value = bvalue.decode("utf-8", "surrogateescape")
headers.add(name, value)
raw_headers.append((bname, bvalue))
return (CIMultiDictProxy(headers), tuple(raw_headers))
class HttpParser(abc.ABC, Generic[_MsgT]):
def __init__(
self,
protocol: Optional[BaseProtocol] = None,
loop: Optional[asyncio.AbstractEventLoop] = None,
limit: int = 2**16,
max_line_size: int = 8190,
max_headers: int = 32768,
max_field_size: int = 8190,
timer: Optional[BaseTimerContext] = None,
code: Optional[int] = None,
method: Optional[str] = None,
readall: bool = False,
payload_exception: Optional[Type[BaseException]] = None,
response_with_body: bool = True,
read_until_eof: bool = False,
auto_decompress: bool = True,
) -> None:
self.protocol = protocol
self.loop = loop
self.max_line_size = max_line_size
self.max_headers = max_headers
self.max_field_size = max_field_size
self.timer = timer
self.code = code
self.method = method
self.readall = readall
self.payload_exception = payload_exception
self.response_with_body = response_with_body
self.read_until_eof = read_until_eof
self._lines: List[bytes] = []
self._tail = b""
self._upgraded = False
self._payload = None
self._payload_parser: Optional[HttpPayloadParser] = None
self._auto_decompress = auto_decompress
self._limit = limit
self._headers_parser = HeadersParser(max_line_size, max_headers, max_field_size)
@abc.abstractmethod
def parse_message(self, lines: List[bytes]) -> _MsgT:
pass
def feed_eof(self) -> Optional[_MsgT]:
if self._payload_parser is not None:
self._payload_parser.feed_eof()
self._payload_parser = None
else:
# try to extract partial message
if self._tail:
self._lines.append(self._tail)
if self._lines:
if self._lines[-1] != "\r\n":
self._lines.append(b"")
with suppress(Exception):
return self.parse_message(self._lines)
return None
def feed_data(
self,
data: bytes,
SEP: bytes = b"\r\n",
EMPTY: bytes = b"",
CONTENT_LENGTH: istr = hdrs.CONTENT_LENGTH,
METH_CONNECT: str = hdrs.METH_CONNECT,
SEC_WEBSOCKET_KEY1: istr = hdrs.SEC_WEBSOCKET_KEY1,
) -> Tuple[List[Tuple[_MsgT, StreamReader]], bool, bytes]:
messages = []
if self._tail:
data, self._tail = self._tail + data, b""
data_len = len(data)
start_pos = 0
loop = self.loop
while start_pos < data_len:
# read HTTP message (request/response line + headers), \r\n\r\n
# and split by lines
if self._payload_parser is None and not self._upgraded:
pos = data.find(SEP, start_pos)
# consume \r\n
if pos == start_pos and not self._lines:
start_pos = pos + 2
continue
if pos >= start_pos:
# line found
self._lines.append(data[start_pos:pos])
start_pos = pos + 2
# \r\n\r\n found
if self._lines[-1] == EMPTY:
try:
msg: _MsgT = self.parse_message(self._lines)
finally:
self._lines.clear()
def get_content_length() -> Optional[int]:
# payload length
length_hdr = msg.headers.get(CONTENT_LENGTH)
if length_hdr is None:
return None
try:
length = int(length_hdr)
except ValueError:
raise InvalidHeader(CONTENT_LENGTH)
if length < 0:
raise InvalidHeader(CONTENT_LENGTH)
return length
length = get_content_length()
# do not support old websocket spec
if SEC_WEBSOCKET_KEY1 in msg.headers:
raise InvalidHeader(SEC_WEBSOCKET_KEY1)
self._upgraded = msg.upgrade
method = getattr(msg, "method", self.method)
assert self.protocol is not None
# calculate payload
if (
(length is not None and length > 0)
or msg.chunked
and not msg.upgrade
):
payload = StreamReader(
self.protocol,
timer=self.timer,
loop=loop,
limit=self._limit,
)
payload_parser = HttpPayloadParser(
payload,
length=length,
chunked=msg.chunked,
method=method,
compression=msg.compression,
code=self.code,
readall=self.readall,
response_with_body=self.response_with_body,
auto_decompress=self._auto_decompress,
)
if not payload_parser.done:
self._payload_parser = payload_parser
elif method == METH_CONNECT:
assert isinstance(msg, RawRequestMessage)
payload = StreamReader(
self.protocol,
timer=self.timer,
loop=loop,
limit=self._limit,
)
self._upgraded = True
self._payload_parser = HttpPayloadParser(
payload,
method=msg.method,
compression=msg.compression,
readall=True,
auto_decompress=self._auto_decompress,
)
else:
if (
getattr(msg, "code", 100) >= 199
and length is None
and self.read_until_eof
):
payload = StreamReader(
self.protocol,
timer=self.timer,
loop=loop,
limit=self._limit,
)
payload_parser = HttpPayloadParser(
payload,
length=length,
chunked=msg.chunked,
method=method,
compression=msg.compression,
code=self.code,
readall=True,
response_with_body=self.response_with_body,
auto_decompress=self._auto_decompress,
)
if not payload_parser.done:
self._payload_parser = payload_parser
else:
payload = EMPTY_PAYLOAD
messages.append((msg, payload))
else:
self._tail = data[start_pos:]
data = EMPTY
break
# no parser, just store
elif self._payload_parser is None and self._upgraded:
assert not self._lines
break
# feed payload
elif data and start_pos < data_len:
assert not self._lines
assert self._payload_parser is not None
try:
eof, data = self._payload_parser.feed_data(data[start_pos:])
except BaseException as exc:
if self.payload_exception is not None:
self._payload_parser.payload.set_exception(
self.payload_exception(str(exc))
)
else:
self._payload_parser.payload.set_exception(exc)
eof = True
data = b""
if eof:
start_pos = 0
data_len = len(data)
self._payload_parser = None
continue
else:
break
if data and start_pos < data_len:
data = data[start_pos:]
else:
data = EMPTY
return messages, self._upgraded, data
def parse_headers(
self, lines: List[bytes]
) -> Tuple[
"CIMultiDictProxy[str]", RawHeaders, Optional[bool], Optional[str], bool, bool
]:
"""Parses RFC 5322 headers from a stream.
Line continuations are supported. Returns list of header name
and value pairs. Header name is in upper case.
"""
headers, raw_headers = self._headers_parser.parse_headers(lines)
close_conn = None
encoding = None
upgrade = False
chunked = False
# keep-alive
conn = headers.get(hdrs.CONNECTION)
if conn:
v = conn.lower()
if v == "close":
close_conn = True
elif v == "keep-alive":
close_conn = False
elif v == "upgrade":
upgrade = True
# encoding
enc = headers.get(hdrs.CONTENT_ENCODING)
if enc:
enc = enc.lower()
if enc in ("gzip", "deflate", "br"):
encoding = enc
# chunking
te = headers.get(hdrs.TRANSFER_ENCODING)
if te is not None:
if "chunked" == te.lower():
chunked = True
else:
raise BadHttpMessage("Request has invalid `Transfer-Encoding`")
if hdrs.CONTENT_LENGTH in headers:
raise BadHttpMessage(
"Content-Length can't be present with Transfer-Encoding",
)
return (headers, raw_headers, close_conn, encoding, upgrade, chunked)
def set_upgraded(self, val: bool) -> None:
"""Set connection upgraded (to websocket) mode.
:param bool val: new state.
"""
self._upgraded = val
class HttpRequestParser(HttpParser[RawRequestMessage]):
"""Read request status line.
Exception .http_exceptions.BadStatusLine
could be raised in case of any errors in status line.
Returns RawRequestMessage.
"""
def parse_message(self, lines: List[bytes]) -> RawRequestMessage:
# request line
line = lines[0].decode("utf-8", "surrogateescape")
try:
method, path, version = line.split(None, 2)
except ValueError:
raise BadStatusLine(line) from None
if len(path) > self.max_line_size:
raise LineTooLong(
"Status line is too long", str(self.max_line_size), str(len(path))
)
# method
if not METHRE.match(method):
raise BadStatusLine(method)
# version
try:
if version.startswith("HTTP/"):
n1, n2 = version[5:].split(".", 1)
version_o = HttpVersion(int(n1), int(n2))
else:
raise BadStatusLine(version)
except Exception:
raise BadStatusLine(version)
if method == "CONNECT":
# authority-form,
# https://datatracker.ietf.org/doc/html/rfc7230#section-5.3.3
url = URL.build(authority=path, encoded=True)
elif path.startswith("/"):
# origin-form,
# https://datatracker.ietf.org/doc/html/rfc7230#section-5.3.1
path_part, _hash_separator, url_fragment = path.partition("#")
path_part, _question_mark_separator, qs_part = path_part.partition("?")
# NOTE: `yarl.URL.build()` is used to mimic what the Cython-based
# NOTE: parser does, otherwise it results into the same
# NOTE: HTTP Request-Line input producing different
# NOTE: `yarl.URL()` objects
url = URL.build(
path=path_part,
query_string=qs_part,
fragment=url_fragment,
encoded=True,
)
else:
# absolute-form for proxy maybe,
# https://datatracker.ietf.org/doc/html/rfc7230#section-5.3.2
url = URL(path, encoded=True)
# read headers
(
headers,
raw_headers,
close,
compression,
upgrade,
chunked,
) = self.parse_headers(lines)
if close is None: # then the headers weren't set in the request
if version_o <= HttpVersion10: # HTTP 1.0 must asks to not close
close = True
else: # HTTP 1.1 must ask to close.
close = False
return RawRequestMessage(
method,
path,
version_o,
headers,
raw_headers,
close,
compression,
upgrade,
chunked,
url,
)
class HttpResponseParser(HttpParser[RawResponseMessage]):
"""Read response status line and headers.
BadStatusLine could be raised in case of any errors in status line.
Returns RawResponseMessage.
"""
def parse_message(self, lines: List[bytes]) -> RawResponseMessage:
line = lines[0].decode("utf-8", "surrogateescape")
try:
version, status = line.split(None, 1)
except ValueError:
raise BadStatusLine(line) from None
try:
status, reason = status.split(None, 1)
except ValueError:
reason = ""
if len(reason) > self.max_line_size:
raise LineTooLong(
"Status line is too long", str(self.max_line_size), str(len(reason))
)
# version
match = VERSRE.match(version)
if match is None:
raise BadStatusLine(line)
version_o = HttpVersion(int(match.group(1)), int(match.group(2)))
# The status code is a three-digit number
try:
status_i = int(status)
except ValueError:
raise BadStatusLine(line) from None
if status_i > 999:
raise BadStatusLine(line)
# read headers
(
headers,
raw_headers,
close,
compression,
upgrade,
chunked,
) = self.parse_headers(lines)
if close is None:
close = version_o <= HttpVersion10
return RawResponseMessage(
version_o,
status_i,
reason.strip(),
headers,
raw_headers,
close,
compression,
upgrade,
chunked,
)
class HttpPayloadParser:
def __init__(
self,
payload: StreamReader,
length: Optional[int] = None,
chunked: bool = False,
compression: Optional[str] = None,
code: Optional[int] = None,
method: Optional[str] = None,
readall: bool = False,
response_with_body: bool = True,
auto_decompress: bool = True,
) -> None:
self._length = 0
self._type = ParseState.PARSE_NONE
self._chunk = ChunkState.PARSE_CHUNKED_SIZE
self._chunk_size = 0
self._chunk_tail = b""
self._auto_decompress = auto_decompress
self.done = False
# payload decompression wrapper
if response_with_body and compression and self._auto_decompress:
real_payload: Union[StreamReader, DeflateBuffer] = DeflateBuffer(
payload, compression
)
else:
real_payload = payload
# payload parser
if not response_with_body:
# don't parse payload if it's not expected to be received
self._type = ParseState.PARSE_NONE
real_payload.feed_eof()
self.done = True
elif chunked:
self._type = ParseState.PARSE_CHUNKED
elif length is not None:
self._type = ParseState.PARSE_LENGTH
self._length = length
if self._length == 0:
real_payload.feed_eof()
self.done = True
else:
if readall and code != 204:
self._type = ParseState.PARSE_UNTIL_EOF
elif method in ("PUT", "POST"):
internal_logger.warning( # pragma: no cover
"Content-Length or Transfer-Encoding header is required"
)
self._type = ParseState.PARSE_NONE
real_payload.feed_eof()
self.done = True
self.payload = real_payload
def feed_eof(self) -> None:
if self._type == ParseState.PARSE_UNTIL_EOF:
self.payload.feed_eof()
elif self._type == ParseState.PARSE_LENGTH:
raise ContentLengthError(
"Not enough data for satisfy content length header."
)
elif self._type == ParseState.PARSE_CHUNKED:
raise TransferEncodingError(
"Not enough data for satisfy transfer length header."
)
def feed_data(
self, chunk: bytes, SEP: bytes = b"\r\n", CHUNK_EXT: bytes = b";"
) -> Tuple[bool, bytes]:
# Read specified amount of bytes
if self._type == ParseState.PARSE_LENGTH:
required = self._length
chunk_len = len(chunk)
if required >= chunk_len:
self._length = required - chunk_len
self.payload.feed_data(chunk, chunk_len)
if self._length == 0:
self.payload.feed_eof()
return True, b""
else:
self._length = 0
self.payload.feed_data(chunk[:required], required)
self.payload.feed_eof()
return True, chunk[required:]
# Chunked transfer encoding parser
elif self._type == ParseState.PARSE_CHUNKED:
if self._chunk_tail:
chunk = self._chunk_tail + chunk
self._chunk_tail = b""
while chunk:
# read next chunk size
if self._chunk == ChunkState.PARSE_CHUNKED_SIZE:
pos = chunk.find(SEP)
if pos >= 0:
i = chunk.find(CHUNK_EXT, 0, pos)
if i >= 0:
size_b = chunk[:i] # strip chunk-extensions
else:
size_b = chunk[:pos]
try:
size = int(bytes(size_b), 16)
except ValueError:
exc = TransferEncodingError(
chunk[:pos].decode("ascii", "surrogateescape")
)
self.payload.set_exception(exc)
raise exc from None
chunk = chunk[pos + 2 :]
if size == 0: # eof marker
self._chunk = ChunkState.PARSE_MAYBE_TRAILERS
else:
self._chunk = ChunkState.PARSE_CHUNKED_CHUNK
self._chunk_size = size
self.payload.begin_http_chunk_receiving()
else:
self._chunk_tail = chunk
return False, b""
# read chunk and feed buffer
if self._chunk == ChunkState.PARSE_CHUNKED_CHUNK:
required = self._chunk_size
chunk_len = len(chunk)
if required > chunk_len:
self._chunk_size = required - chunk_len
self.payload.feed_data(chunk, chunk_len)
return False, b""
else:
self._chunk_size = 0
self.payload.feed_data(chunk[:required], required)
chunk = chunk[required:]
self._chunk = ChunkState.PARSE_CHUNKED_CHUNK_EOF
self.payload.end_http_chunk_receiving()
# toss the CRLF at the end of the chunk
if self._chunk == ChunkState.PARSE_CHUNKED_CHUNK_EOF:
if chunk[:2] == SEP:
chunk = chunk[2:]
self._chunk = ChunkState.PARSE_CHUNKED_SIZE
else:
self._chunk_tail = chunk
return False, b""
# if stream does not contain trailer, after 0\r\n
# we should get another \r\n otherwise
# trailers needs to be skiped until \r\n\r\n
if self._chunk == ChunkState.PARSE_MAYBE_TRAILERS:
head = chunk[:2]
if head == SEP:
# end of stream
self.payload.feed_eof()
return True, chunk[2:]
# Both CR and LF, or only LF may not be received yet. It is
# expected that CRLF or LF will be shown at the very first
# byte next time, otherwise trailers should come. The last
# CRLF which marks the end of response might not be
# contained in the same TCP segment which delivered the
# size indicator.
if not head:
return False, b""
if head == SEP[:1]:
self._chunk_tail = head
return False, b""
self._chunk = ChunkState.PARSE_TRAILERS
# read and discard trailer up to the CRLF terminator
if self._chunk == ChunkState.PARSE_TRAILERS:
pos = chunk.find(SEP)
if pos >= 0:
chunk = chunk[pos + 2 :]
self._chunk = ChunkState.PARSE_MAYBE_TRAILERS
else:
self._chunk_tail = chunk
return False, b""
# Read all bytes until eof
elif self._type == ParseState.PARSE_UNTIL_EOF:
self.payload.feed_data(chunk, len(chunk))
return False, b""
class DeflateBuffer:
"""DeflateStream decompress stream and feed data into specified stream."""
decompressor: Any
def __init__(self, out: StreamReader, encoding: Optional[str]) -> None:
self.out = out
self.size = 0
self.encoding = encoding
self._started_decoding = False
if encoding == "br":
if not HAS_BROTLI: # pragma: no cover
raise ContentEncodingError(
"Can not decode content-encoding: brotli (br). "
"Please install `Brotli`"
)
class BrotliDecoder:
# Supports both 'brotlipy' and 'Brotli' packages
# since they share an import name. The top branches
# are for 'brotlipy' and bottom branches for 'Brotli'
def __init__(self) -> None:
self._obj = brotli.Decompressor()
def decompress(self, data: bytes) -> bytes:
if hasattr(self._obj, "decompress"):
return cast(bytes, self._obj.decompress(data))
return cast(bytes, self._obj.process(data))
def flush(self) -> bytes:
if hasattr(self._obj, "flush"):
return cast(bytes, self._obj.flush())
return b""
self.decompressor = BrotliDecoder()
else:
zlib_mode = 16 + zlib.MAX_WBITS if encoding == "gzip" else zlib.MAX_WBITS
self.decompressor = zlib.decompressobj(wbits=zlib_mode)
def set_exception(self, exc: BaseException) -> None:
self.out.set_exception(exc)
def feed_data(self, chunk: bytes, size: int) -> None:
if not size:
return
self.size += size
# RFC1950
# bits 0..3 = CM = 0b1000 = 8 = "deflate"
# bits 4..7 = CINFO = 1..7 = windows size.
if (
not self._started_decoding
and self.encoding == "deflate"
and chunk[0] & 0xF != 8
):
# Change the decoder to decompress incorrectly compressed data
# Actually we should issue a warning about non-RFC-compliant data.
self.decompressor = zlib.decompressobj(wbits=-zlib.MAX_WBITS)
try:
chunk = self.decompressor.decompress(chunk)
except Exception:
raise ContentEncodingError(
"Can not decode content-encoding: %s" % self.encoding
)
self._started_decoding = True
if chunk:
self.out.feed_data(chunk, len(chunk))
def feed_eof(self) -> None:
chunk = self.decompressor.flush()
if chunk or self.size > 0:
self.out.feed_data(chunk, len(chunk))
if self.encoding == "deflate" and not self.decompressor.eof:
raise ContentEncodingError("deflate")
self.out.feed_eof()
def begin_http_chunk_receiving(self) -> None:
self.out.begin_http_chunk_receiving()
def end_http_chunk_receiving(self) -> None:
self.out.end_http_chunk_receiving()
HttpRequestParserPy = HttpRequestParser
HttpResponseParserPy = HttpResponseParser
RawRequestMessagePy = RawRequestMessage
RawResponseMessagePy = RawResponseMessage
try:
if not NO_EXTENSIONS:
from ._http_parser import ( # type: ignore[import,no-redef]
HttpRequestParser,
HttpResponseParser,
RawRequestMessage,
RawResponseMessage,
)
HttpRequestParserC = HttpRequestParser
HttpResponseParserC = HttpResponseParser
RawRequestMessageC = RawRequestMessage
RawResponseMessageC = RawResponseMessage
except ImportError: # pragma: no cover
pass
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiohttp/worker.py | """Async gunicorn worker for aiohttp.web"""
import asyncio
import os
import re
import signal
import sys
from types import FrameType
from typing import Any, Awaitable, Callable, Optional, Union # noqa
from gunicorn.config import AccessLogFormat as GunicornAccessLogFormat
from gunicorn.workers import base
from aiohttp import web
from .helpers import set_result
from .web_app import Application
from .web_log import AccessLogger
try:
import ssl
SSLContext = ssl.SSLContext
except ImportError: # pragma: no cover
ssl = None # type: ignore[assignment]
SSLContext = object # type: ignore[misc,assignment]
__all__ = ("GunicornWebWorker", "GunicornUVLoopWebWorker", "GunicornTokioWebWorker")
class GunicornWebWorker(base.Worker): # type: ignore[misc,no-any-unimported]
DEFAULT_AIOHTTP_LOG_FORMAT = AccessLogger.LOG_FORMAT
DEFAULT_GUNICORN_LOG_FORMAT = GunicornAccessLogFormat.default
def __init__(self, *args: Any, **kw: Any) -> None: # pragma: no cover
super().__init__(*args, **kw)
self._task: Optional[asyncio.Task[None]] = None
self.exit_code = 0
self._notify_waiter: Optional[asyncio.Future[bool]] = None
def init_process(self) -> None:
# create new event_loop after fork
asyncio.get_event_loop().close()
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
super().init_process()
def run(self) -> None:
self._task = self.loop.create_task(self._run())
try: # ignore all finalization problems
self.loop.run_until_complete(self._task)
except Exception:
self.log.exception("Exception in gunicorn worker")
self.loop.run_until_complete(self.loop.shutdown_asyncgens())
self.loop.close()
sys.exit(self.exit_code)
async def _run(self) -> None:
runner = None
if isinstance(self.wsgi, Application):
app = self.wsgi
elif asyncio.iscoroutinefunction(self.wsgi):
wsgi = await self.wsgi()
if isinstance(wsgi, web.AppRunner):
runner = wsgi
app = runner.app
else:
app = wsgi
else:
raise RuntimeError(
"wsgi app should be either Application or "
"async function returning Application, got {}".format(self.wsgi)
)
if runner is None:
access_log = self.log.access_log if self.cfg.accesslog else None
runner = web.AppRunner(
app,
logger=self.log,
keepalive_timeout=self.cfg.keepalive,
access_log=access_log,
access_log_format=self._get_valid_log_format(
self.cfg.access_log_format
),
)
await runner.setup()
ctx = self._create_ssl_context(self.cfg) if self.cfg.is_ssl else None
runner = runner
assert runner is not None
server = runner.server
assert server is not None
for sock in self.sockets:
site = web.SockSite(
runner,
sock,
ssl_context=ctx,
shutdown_timeout=self.cfg.graceful_timeout / 100 * 95,
)
await site.start()
# If our parent changed then we shut down.
pid = os.getpid()
try:
while self.alive: # type: ignore[has-type]
self.notify()
cnt = server.requests_count
if self.cfg.max_requests and cnt > self.cfg.max_requests:
self.alive = False
self.log.info("Max requests, shutting down: %s", self)
elif pid == os.getpid() and self.ppid != os.getppid():
self.alive = False
self.log.info("Parent changed, shutting down: %s", self)
else:
await self._wait_next_notify()
except BaseException:
pass
await runner.cleanup()
def _wait_next_notify(self) -> "asyncio.Future[bool]":
self._notify_waiter_done()
loop = self.loop
assert loop is not None
self._notify_waiter = waiter = loop.create_future()
self.loop.call_later(1.0, self._notify_waiter_done, waiter)
return waiter
def _notify_waiter_done(
self, waiter: Optional["asyncio.Future[bool]"] = None
) -> None:
if waiter is None:
waiter = self._notify_waiter
if waiter is not None:
set_result(waiter, True)
if waiter is self._notify_waiter:
self._notify_waiter = None
def init_signals(self) -> None:
# Set up signals through the event loop API.
self.loop.add_signal_handler(
signal.SIGQUIT, self.handle_quit, signal.SIGQUIT, None
)
self.loop.add_signal_handler(
signal.SIGTERM, self.handle_exit, signal.SIGTERM, None
)
self.loop.add_signal_handler(
signal.SIGINT, self.handle_quit, signal.SIGINT, None
)
self.loop.add_signal_handler(
signal.SIGWINCH, self.handle_winch, signal.SIGWINCH, None
)
self.loop.add_signal_handler(
signal.SIGUSR1, self.handle_usr1, signal.SIGUSR1, None
)
self.loop.add_signal_handler(
signal.SIGABRT, self.handle_abort, signal.SIGABRT, None
)
# Don't let SIGTERM and SIGUSR1 disturb active requests
# by interrupting system calls
signal.siginterrupt(signal.SIGTERM, False)
signal.siginterrupt(signal.SIGUSR1, False)
# Reset signals so Gunicorn doesn't swallow subprocess return codes
# See: https://github.com/aio-libs/aiohttp/issues/6130
if sys.version_info < (3, 8):
# Starting from Python 3.8,
# the default child watcher is ThreadedChildWatcher.
# The watcher doesn't depend on SIGCHLD signal,
# there is no need to reset it.
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
def handle_quit(self, sig: int, frame: FrameType) -> None:
self.alive = False
# worker_int callback
self.cfg.worker_int(self)
# wakeup closing process
self._notify_waiter_done()
def handle_abort(self, sig: int, frame: FrameType) -> None:
self.alive = False
self.exit_code = 1
self.cfg.worker_abort(self)
sys.exit(1)
@staticmethod
def _create_ssl_context(cfg: Any) -> "SSLContext":
"""Creates SSLContext instance for usage in asyncio.create_server.
See ssl.SSLSocket.__init__ for more details.
"""
if ssl is None: # pragma: no cover
raise RuntimeError("SSL is not supported.")
ctx = ssl.SSLContext(cfg.ssl_version)
ctx.load_cert_chain(cfg.certfile, cfg.keyfile)
ctx.verify_mode = cfg.cert_reqs
if cfg.ca_certs:
ctx.load_verify_locations(cfg.ca_certs)
if cfg.ciphers:
ctx.set_ciphers(cfg.ciphers)
return ctx
def _get_valid_log_format(self, source_format: str) -> str:
if source_format == self.DEFAULT_GUNICORN_LOG_FORMAT:
return self.DEFAULT_AIOHTTP_LOG_FORMAT
elif re.search(r"%\([^\)]+\)", source_format):
raise ValueError(
"Gunicorn's style options in form of `%(name)s` are not "
"supported for the log formatting. Please use aiohttp's "
"format specification to configure access log formatting: "
"http://docs.aiohttp.org/en/stable/logging.html"
"#format-specification"
)
else:
return source_format
class GunicornUVLoopWebWorker(GunicornWebWorker):
def init_process(self) -> None:
import uvloop
# Close any existing event loop before setting a
# new policy.
asyncio.get_event_loop().close()
# Setup uvloop policy, so that every
# asyncio.get_event_loop() will create an instance
# of uvloop event loop.
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
super().init_process()
class GunicornTokioWebWorker(GunicornWebWorker):
def init_process(self) -> None: # pragma: no cover
import tokio
# Close any existing event loop before setting a
# new policy.
asyncio.get_event_loop().close()
# Setup tokio policy, so that every
# asyncio.get_event_loop() will create an instance
# of tokio event loop.
asyncio.set_event_loop_policy(tokio.EventLoopPolicy())
super().init_process()
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiohttp/web_request.py | import asyncio
import datetime
import io
import re
import socket
import string
import tempfile
import types
import warnings
from http.cookies import SimpleCookie
from types import MappingProxyType
from typing import (
TYPE_CHECKING,
Any,
Dict,
Iterator,
Mapping,
MutableMapping,
Optional,
Pattern,
Tuple,
Union,
cast,
)
from urllib.parse import parse_qsl
import attr
from multidict import CIMultiDict, CIMultiDictProxy, MultiDict, MultiDictProxy
from yarl import URL
from . import hdrs
from .abc import AbstractStreamWriter
from .helpers import (
DEBUG,
ETAG_ANY,
LIST_QUOTED_ETAG_RE,
ChainMapProxy,
ETag,
HeadersMixin,
parse_http_date,
reify,
sentinel,
)
from .http_parser import RawRequestMessage
from .http_writer import HttpVersion
from .multipart import BodyPartReader, MultipartReader
from .streams import EmptyStreamReader, StreamReader
from .typedefs import (
DEFAULT_JSON_DECODER,
Final,
JSONDecoder,
LooseHeaders,
RawHeaders,
StrOrURL,
)
from .web_exceptions import HTTPRequestEntityTooLarge
from .web_response import StreamResponse
__all__ = ("BaseRequest", "FileField", "Request")
if TYPE_CHECKING: # pragma: no cover
from .web_app import Application
from .web_protocol import RequestHandler
from .web_urldispatcher import UrlMappingMatchInfo
@attr.s(auto_attribs=True, frozen=True, slots=True)
class FileField:
name: str
filename: str
file: io.BufferedReader
content_type: str
headers: "CIMultiDictProxy[str]"
_TCHAR: Final[str] = string.digits + string.ascii_letters + r"!#$%&'*+.^_`|~-"
# '-' at the end to prevent interpretation as range in a char class
_TOKEN: Final[str] = rf"[{_TCHAR}]+"
_QDTEXT: Final[str] = r"[{}]".format(
r"".join(chr(c) for c in (0x09, 0x20, 0x21) + tuple(range(0x23, 0x7F)))
)
# qdtext includes 0x5C to escape 0x5D ('\]')
# qdtext excludes obs-text (because obsoleted, and encoding not specified)
_QUOTED_PAIR: Final[str] = r"\\[\t !-~]"
_QUOTED_STRING: Final[str] = r'"(?:{quoted_pair}|{qdtext})*"'.format(
qdtext=_QDTEXT, quoted_pair=_QUOTED_PAIR
)
_FORWARDED_PAIR: Final[
str
] = r"({token})=({token}|{quoted_string})(:\d{{1,4}})?".format(
token=_TOKEN, quoted_string=_QUOTED_STRING
)
_QUOTED_PAIR_REPLACE_RE: Final[Pattern[str]] = re.compile(r"\\([\t !-~])")
# same pattern as _QUOTED_PAIR but contains a capture group
_FORWARDED_PAIR_RE: Final[Pattern[str]] = re.compile(_FORWARDED_PAIR)
############################################################
# HTTP Request
############################################################
class BaseRequest(MutableMapping[str, Any], HeadersMixin):
POST_METHODS = {
hdrs.METH_PATCH,
hdrs.METH_POST,
hdrs.METH_PUT,
hdrs.METH_TRACE,
hdrs.METH_DELETE,
}
ATTRS = HeadersMixin.ATTRS | frozenset(
[
"_message",
"_protocol",
"_payload_writer",
"_payload",
"_headers",
"_method",
"_version",
"_rel_url",
"_post",
"_read_bytes",
"_state",
"_cache",
"_task",
"_client_max_size",
"_loop",
"_transport_sslcontext",
"_transport_peername",
]
)
def __init__(
self,
message: RawRequestMessage,
payload: StreamReader,
protocol: "RequestHandler",
payload_writer: AbstractStreamWriter,
task: "asyncio.Task[None]",
loop: asyncio.AbstractEventLoop,
*,
client_max_size: int = 1024**2,
state: Optional[Dict[str, Any]] = None,
scheme: Optional[str] = None,
host: Optional[str] = None,
remote: Optional[str] = None,
) -> None:
if state is None:
state = {}
self._message = message
self._protocol = protocol
self._payload_writer = payload_writer
self._payload = payload
self._headers = message.headers
self._method = message.method
self._version = message.version
self._cache: Dict[str, Any] = {}
url = message.url
if url.is_absolute():
# absolute URL is given,
# override auto-calculating url, host, and scheme
# all other properties should be good
self._cache["url"] = url
self._cache["host"] = url.host
self._cache["scheme"] = url.scheme
self._rel_url = url.relative()
else:
self._rel_url = message.url
self._post: Optional[MultiDictProxy[Union[str, bytes, FileField]]] = None
self._read_bytes: Optional[bytes] = None
self._state = state
self._task = task
self._client_max_size = client_max_size
self._loop = loop
transport = self._protocol.transport
assert transport is not None
self._transport_sslcontext = transport.get_extra_info("sslcontext")
self._transport_peername = transport.get_extra_info("peername")
if scheme is not None:
self._cache["scheme"] = scheme
if host is not None:
self._cache["host"] = host
if remote is not None:
self._cache["remote"] = remote
def clone(
self,
*,
method: str = sentinel,
rel_url: StrOrURL = sentinel,
headers: LooseHeaders = sentinel,
scheme: str = sentinel,
host: str = sentinel,
remote: str = sentinel,
) -> "BaseRequest":
"""Clone itself with replacement some attributes.
Creates and returns a new instance of Request object. If no parameters
are given, an exact copy is returned. If a parameter is not passed, it
will reuse the one from the current request object.
"""
if self._read_bytes:
raise RuntimeError("Cannot clone request " "after reading its content")
dct: Dict[str, Any] = {}
if method is not sentinel:
dct["method"] = method
if rel_url is not sentinel:
new_url = URL(rel_url)
dct["url"] = new_url
dct["path"] = str(new_url)
if headers is not sentinel:
# a copy semantic
dct["headers"] = CIMultiDictProxy(CIMultiDict(headers))
dct["raw_headers"] = tuple(
(k.encode("utf-8"), v.encode("utf-8")) for k, v in headers.items()
)
message = self._message._replace(**dct)
kwargs = {}
if scheme is not sentinel:
kwargs["scheme"] = scheme
if host is not sentinel:
kwargs["host"] = host
if remote is not sentinel:
kwargs["remote"] = remote
return self.__class__(
message,
self._payload,
self._protocol,
self._payload_writer,
self._task,
self._loop,
client_max_size=self._client_max_size,
state=self._state.copy(),
**kwargs,
)
@property
def task(self) -> "asyncio.Task[None]":
return self._task
@property
def protocol(self) -> "RequestHandler":
return self._protocol
@property
def transport(self) -> Optional[asyncio.Transport]:
if self._protocol is None:
return None
return self._protocol.transport
@property
def writer(self) -> AbstractStreamWriter:
return self._payload_writer
@reify
def message(self) -> RawRequestMessage:
warnings.warn("Request.message is deprecated", DeprecationWarning, stacklevel=3)
return self._message
@reify
def rel_url(self) -> URL:
return self._rel_url
@reify
def loop(self) -> asyncio.AbstractEventLoop:
warnings.warn(
"request.loop property is deprecated", DeprecationWarning, stacklevel=2
)
return self._loop
# MutableMapping API
def __getitem__(self, key: str) -> Any:
return self._state[key]
def __setitem__(self, key: str, value: Any) -> None:
self._state[key] = value
def __delitem__(self, key: str) -> None:
del self._state[key]
def __len__(self) -> int:
return len(self._state)
def __iter__(self) -> Iterator[str]:
return iter(self._state)
########
@reify
def secure(self) -> bool:
"""A bool indicating if the request is handled with SSL."""
return self.scheme == "https"
@reify
def forwarded(self) -> Tuple[Mapping[str, str], ...]:
"""A tuple containing all parsed Forwarded header(s).
Makes an effort to parse Forwarded headers as specified by RFC 7239:
- It adds one (immutable) dictionary per Forwarded 'field-value', ie
per proxy. The element corresponds to the data in the Forwarded
field-value added by the first proxy encountered by the client. Each
subsequent item corresponds to those added by later proxies.
- It checks that every value has valid syntax in general as specified
in section 4: either a 'token' or a 'quoted-string'.
- It un-escapes found escape sequences.
- It does NOT validate 'by' and 'for' contents as specified in section
6.
- It does NOT validate 'host' contents (Host ABNF).
- It does NOT validate 'proto' contents for valid URI scheme names.
Returns a tuple containing one or more immutable dicts
"""
elems = []
for field_value in self._message.headers.getall(hdrs.FORWARDED, ()):
length = len(field_value)
pos = 0
need_separator = False
elem: Dict[str, str] = {}
elems.append(types.MappingProxyType(elem))
while 0 <= pos < length:
match = _FORWARDED_PAIR_RE.match(field_value, pos)
if match is not None: # got a valid forwarded-pair
if need_separator:
# bad syntax here, skip to next comma
pos = field_value.find(",", pos)
else:
name, value, port = match.groups()
if value[0] == '"':
# quoted string: remove quotes and unescape
value = _QUOTED_PAIR_REPLACE_RE.sub(r"\1", value[1:-1])
if port:
value += port
elem[name.lower()] = value
pos += len(match.group(0))
need_separator = True
elif field_value[pos] == ",": # next forwarded-element
need_separator = False
elem = {}
elems.append(types.MappingProxyType(elem))
pos += 1
elif field_value[pos] == ";": # next forwarded-pair
need_separator = False
pos += 1
elif field_value[pos] in " \t":
# Allow whitespace even between forwarded-pairs, though
# RFC 7239 doesn't. This simplifies code and is in line
# with Postel's law.
pos += 1
else:
# bad syntax here, skip to next comma
pos = field_value.find(",", pos)
return tuple(elems)
@reify
def scheme(self) -> str:
"""A string representing the scheme of the request.
Hostname is resolved in this order:
- overridden value by .clone(scheme=new_scheme) call.
- type of connection to peer: HTTPS if socket is SSL, HTTP otherwise.
'http' or 'https'.
"""
if self._transport_sslcontext:
return "https"
else:
return "http"
@reify
def method(self) -> str:
"""Read only property for getting HTTP method.
The value is upper-cased str like 'GET', 'POST', 'PUT' etc.
"""
return self._method
@reify
def version(self) -> HttpVersion:
"""Read only property for getting HTTP version of request.
Returns aiohttp.protocol.HttpVersion instance.
"""
return self._version
@reify
def host(self) -> str:
"""Hostname of the request.
Hostname is resolved in this order:
- overridden value by .clone(host=new_host) call.
- HOST HTTP header
- socket.getfqdn() value
"""
host = self._message.headers.get(hdrs.HOST)
if host is not None:
return host
return socket.getfqdn()
@reify
def remote(self) -> Optional[str]:
"""Remote IP of client initiated HTTP request.
The IP is resolved in this order:
- overridden value by .clone(remote=new_remote) call.
- peername of opened socket
"""
if self._transport_peername is None:
return None
if isinstance(self._transport_peername, (list, tuple)):
return str(self._transport_peername[0])
return str(self._transport_peername)
@reify
def url(self) -> URL:
url = URL.build(scheme=self.scheme, host=self.host)
return url.join(self._rel_url)
@reify
def path(self) -> str:
"""The URL including *PATH INFO* without the host or scheme.
E.g., ``/app/blog``
"""
return self._rel_url.path
@reify
def path_qs(self) -> str:
"""The URL including PATH_INFO and the query string.
E.g, /app/blog?id=10
"""
return str(self._rel_url)
@reify
def raw_path(self) -> str:
"""The URL including raw *PATH INFO* without the host or scheme.
Warning, the path is unquoted and may contains non valid URL characters
E.g., ``/my%2Fpath%7Cwith%21some%25strange%24characters``
"""
return self._message.path
@reify
def query(self) -> "MultiDictProxy[str]":
"""A multidict with all the variables in the query string."""
return MultiDictProxy(self._rel_url.query)
@reify
def query_string(self) -> str:
"""The query string in the URL.
E.g., id=10
"""
return self._rel_url.query_string
@reify
def headers(self) -> "CIMultiDictProxy[str]":
"""A case-insensitive multidict proxy with all headers."""
return self._headers
@reify
def raw_headers(self) -> RawHeaders:
"""A sequence of pairs for all headers."""
return self._message.raw_headers
@reify
def if_modified_since(self) -> Optional[datetime.datetime]:
"""The value of If-Modified-Since HTTP header, or None.
This header is represented as a `datetime` object.
"""
return parse_http_date(self.headers.get(hdrs.IF_MODIFIED_SINCE))
@reify
def if_unmodified_since(self) -> Optional[datetime.datetime]:
"""The value of If-Unmodified-Since HTTP header, or None.
This header is represented as a `datetime` object.
"""
return parse_http_date(self.headers.get(hdrs.IF_UNMODIFIED_SINCE))
@staticmethod
def _etag_values(etag_header: str) -> Iterator[ETag]:
"""Extract `ETag` objects from raw header."""
if etag_header == ETAG_ANY:
yield ETag(
is_weak=False,
value=ETAG_ANY,
)
else:
for match in LIST_QUOTED_ETAG_RE.finditer(etag_header):
is_weak, value, garbage = match.group(2, 3, 4)
# Any symbol captured by 4th group means
# that the following sequence is invalid.
if garbage:
break
yield ETag(
is_weak=bool(is_weak),
value=value,
)
@classmethod
def _if_match_or_none_impl(
cls, header_value: Optional[str]
) -> Optional[Tuple[ETag, ...]]:
if not header_value:
return None
return tuple(cls._etag_values(header_value))
@reify
def if_match(self) -> Optional[Tuple[ETag, ...]]:
"""The value of If-Match HTTP header, or None.
This header is represented as a `tuple` of `ETag` objects.
"""
return self._if_match_or_none_impl(self.headers.get(hdrs.IF_MATCH))
@reify
def if_none_match(self) -> Optional[Tuple[ETag, ...]]:
"""The value of If-None-Match HTTP header, or None.
This header is represented as a `tuple` of `ETag` objects.
"""
return self._if_match_or_none_impl(self.headers.get(hdrs.IF_NONE_MATCH))
@reify
def if_range(self) -> Optional[datetime.datetime]:
"""The value of If-Range HTTP header, or None.
This header is represented as a `datetime` object.
"""
return parse_http_date(self.headers.get(hdrs.IF_RANGE))
@reify
def keep_alive(self) -> bool:
"""Is keepalive enabled by client?"""
return not self._message.should_close
@reify
def cookies(self) -> Mapping[str, str]:
"""Return request cookies.
A read-only dictionary-like object.
"""
raw = self.headers.get(hdrs.COOKIE, "")
parsed: SimpleCookie[str] = SimpleCookie(raw)
return MappingProxyType({key: val.value for key, val in parsed.items()})
@reify
def http_range(self) -> slice:
"""The content of Range HTTP header.
Return a slice instance.
"""
rng = self._headers.get(hdrs.RANGE)
start, end = None, None
if rng is not None:
try:
pattern = r"^bytes=(\d*)-(\d*)$"
start, end = re.findall(pattern, rng)[0]
except IndexError: # pattern was not found in header
raise ValueError("range not in acceptable format")
end = int(end) if end else None
start = int(start) if start else None
if start is None and end is not None:
# end with no start is to return tail of content
start = -end
end = None
if start is not None and end is not None:
# end is inclusive in range header, exclusive for slice
end += 1
if start >= end:
raise ValueError("start cannot be after end")
if start is end is None: # No valid range supplied
raise ValueError("No start or end of range specified")
return slice(start, end, 1)
@reify
def content(self) -> StreamReader:
"""Return raw payload stream."""
return self._payload
@property
def has_body(self) -> bool:
"""Return True if request's HTTP BODY can be read, False otherwise."""
warnings.warn(
"Deprecated, use .can_read_body #2005", DeprecationWarning, stacklevel=2
)
return not self._payload.at_eof()
@property
def can_read_body(self) -> bool:
"""Return True if request's HTTP BODY can be read, False otherwise."""
return not self._payload.at_eof()
@reify
def body_exists(self) -> bool:
"""Return True if request has HTTP BODY, False otherwise."""
return type(self._payload) is not EmptyStreamReader
async def release(self) -> None:
"""Release request.
Eat unread part of HTTP BODY if present.
"""
while not self._payload.at_eof():
await self._payload.readany()
async def read(self) -> bytes:
"""Read request body if present.
Returns bytes object with full request content.
"""
if self._read_bytes is None:
body = bytearray()
while True:
chunk = await self._payload.readany()
body.extend(chunk)
if self._client_max_size:
body_size = len(body)
if body_size >= self._client_max_size:
raise HTTPRequestEntityTooLarge(
max_size=self._client_max_size, actual_size=body_size
)
if not chunk:
break
self._read_bytes = bytes(body)
return self._read_bytes
async def text(self) -> str:
"""Return BODY as text using encoding from .charset."""
bytes_body = await self.read()
encoding = self.charset or "utf-8"
return bytes_body.decode(encoding)
async def json(self, *, loads: JSONDecoder = DEFAULT_JSON_DECODER) -> Any:
"""Return BODY as JSON."""
body = await self.text()
return loads(body)
async def multipart(self) -> MultipartReader:
"""Return async iterator to process BODY as multipart."""
return MultipartReader(self._headers, self._payload)
async def post(self) -> "MultiDictProxy[Union[str, bytes, FileField]]":
"""Return POST parameters."""
if self._post is not None:
return self._post
if self._method not in self.POST_METHODS:
self._post = MultiDictProxy(MultiDict())
return self._post
content_type = self.content_type
if content_type not in (
"",
"application/x-www-form-urlencoded",
"multipart/form-data",
):
self._post = MultiDictProxy(MultiDict())
return self._post
out: MultiDict[Union[str, bytes, FileField]] = MultiDict()
if content_type == "multipart/form-data":
multipart = await self.multipart()
max_size = self._client_max_size
field = await multipart.next()
while field is not None:
size = 0
field_ct = field.headers.get(hdrs.CONTENT_TYPE)
if isinstance(field, BodyPartReader):
assert field.name is not None
# Note that according to RFC 7578, the Content-Type header
# is optional, even for files, so we can't assume it's
# present.
# https://tools.ietf.org/html/rfc7578#section-4.4
if field.filename:
# store file in temp file
tmp = tempfile.TemporaryFile()
chunk = await field.read_chunk(size=2**16)
while chunk:
chunk = field.decode(chunk)
tmp.write(chunk)
size += len(chunk)
if 0 < max_size < size:
tmp.close()
raise HTTPRequestEntityTooLarge(
max_size=max_size, actual_size=size
)
chunk = await field.read_chunk(size=2**16)
tmp.seek(0)
if field_ct is None:
field_ct = "application/octet-stream"
ff = FileField(
field.name,
field.filename,
cast(io.BufferedReader, tmp),
field_ct,
field.headers,
)
out.add(field.name, ff)
else:
# deal with ordinary data
value = await field.read(decode=True)
if field_ct is None or field_ct.startswith("text/"):
charset = field.get_charset(default="utf-8")
out.add(field.name, value.decode(charset))
else:
out.add(field.name, value)
size += len(value)
if 0 < max_size < size:
raise HTTPRequestEntityTooLarge(
max_size=max_size, actual_size=size
)
else:
raise ValueError(
"To decode nested multipart you need " "to use custom reader",
)
field = await multipart.next()
else:
data = await self.read()
if data:
charset = self.charset or "utf-8"
out.extend(
parse_qsl(
data.rstrip().decode(charset),
keep_blank_values=True,
encoding=charset,
)
)
self._post = MultiDictProxy(out)
return self._post
def get_extra_info(self, name: str, default: Any = None) -> Any:
"""Extra info from protocol transport"""
protocol = self._protocol
if protocol is None:
return default
transport = protocol.transport
if transport is None:
return default
return transport.get_extra_info(name, default)
def __repr__(self) -> str:
ascii_encodable_path = self.path.encode("ascii", "backslashreplace").decode(
"ascii"
)
return "<{} {} {} >".format(
self.__class__.__name__, self._method, ascii_encodable_path
)
def __eq__(self, other: object) -> bool:
return id(self) == id(other)
def __bool__(self) -> bool:
return True
async def _prepare_hook(self, response: StreamResponse) -> None:
return
def _cancel(self, exc: BaseException) -> None:
self._payload.set_exception(exc)
class Request(BaseRequest):
ATTRS = BaseRequest.ATTRS | frozenset(["_match_info"])
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
# matchdict, route_name, handler
# or information about traversal lookup
# initialized after route resolving
self._match_info: Optional[UrlMappingMatchInfo] = None
if DEBUG:
def __setattr__(self, name: str, val: Any) -> None:
if name not in self.ATTRS:
warnings.warn(
"Setting custom {}.{} attribute "
"is discouraged".format(self.__class__.__name__, name),
DeprecationWarning,
stacklevel=2,
)
super().__setattr__(name, val)
def clone(
self,
*,
method: str = sentinel,
rel_url: StrOrURL = sentinel,
headers: LooseHeaders = sentinel,
scheme: str = sentinel,
host: str = sentinel,
remote: str = sentinel,
) -> "Request":
ret = super().clone(
method=method,
rel_url=rel_url,
headers=headers,
scheme=scheme,
host=host,
remote=remote,
)
new_ret = cast(Request, ret)
new_ret._match_info = self._match_info
return new_ret
@reify
def match_info(self) -> "UrlMappingMatchInfo":
"""Result of route resolving."""
match_info = self._match_info
assert match_info is not None
return match_info
@property
def app(self) -> "Application":
"""Application instance."""
match_info = self._match_info
assert match_info is not None
return match_info.current_app
@property
def config_dict(self) -> ChainMapProxy:
match_info = self._match_info
assert match_info is not None
lst = match_info.apps
app = self.app
idx = lst.index(app)
sublist = list(reversed(lst[: idx + 1]))
return ChainMapProxy(sublist)
async def _prepare_hook(self, response: StreamResponse) -> None:
match_info = self._match_info
if match_info is None:
return
for app in match_info._apps:
await app.on_response_prepare.send(self, response)
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiohttp/web_urldispatcher.py | import abc
import asyncio
import base64
import hashlib
import inspect
import keyword
import os
import re
import warnings
from contextlib import contextmanager
from functools import wraps
from pathlib import Path
from types import MappingProxyType
from typing import (
TYPE_CHECKING,
Any,
Awaitable,
Callable,
Container,
Dict,
Generator,
Iterable,
Iterator,
List,
Mapping,
Optional,
Pattern,
Set,
Sized,
Tuple,
Type,
Union,
cast,
)
from yarl import URL, __version__ as yarl_version # type: ignore[attr-defined]
from . import hdrs
from .abc import AbstractMatchInfo, AbstractRouter, AbstractView
from .helpers import DEBUG
from .http import HttpVersion11
from .typedefs import Final, Handler, PathLike, TypedDict
from .web_exceptions import (
HTTPException,
HTTPExpectationFailed,
HTTPForbidden,
HTTPMethodNotAllowed,
HTTPNotFound,
)
from .web_fileresponse import FileResponse
from .web_request import Request
from .web_response import Response, StreamResponse
from .web_routedef import AbstractRouteDef
__all__ = (
"UrlDispatcher",
"UrlMappingMatchInfo",
"AbstractResource",
"Resource",
"PlainResource",
"DynamicResource",
"AbstractRoute",
"ResourceRoute",
"StaticResource",
"View",
)
if TYPE_CHECKING: # pragma: no cover
from .web_app import Application
BaseDict = Dict[str, str]
else:
BaseDict = dict
YARL_VERSION: Final[Tuple[int, ...]] = tuple(map(int, yarl_version.split(".")[:2]))
HTTP_METHOD_RE: Final[Pattern[str]] = re.compile(
r"^[0-9A-Za-z!#\$%&'\*\+\-\.\^_`\|~]+$"
)
ROUTE_RE: Final[Pattern[str]] = re.compile(
r"(\{[_a-zA-Z][^{}]*(?:\{[^{}]*\}[^{}]*)*\})"
)
PATH_SEP: Final[str] = re.escape("/")
_ExpectHandler = Callable[[Request], Awaitable[None]]
_Resolve = Tuple[Optional["UrlMappingMatchInfo"], Set[str]]
class _InfoDict(TypedDict, total=False):
path: str
formatter: str
pattern: Pattern[str]
directory: Path
prefix: str
routes: Mapping[str, "AbstractRoute"]
app: "Application"
domain: str
rule: "AbstractRuleMatching"
http_exception: HTTPException
class AbstractResource(Sized, Iterable["AbstractRoute"]):
def __init__(self, *, name: Optional[str] = None) -> None:
self._name = name
@property
def name(self) -> Optional[str]:
return self._name
@property
@abc.abstractmethod
def canonical(self) -> str:
"""Exposes the resource's canonical path.
For example '/foo/bar/{name}'
"""
@abc.abstractmethod # pragma: no branch
def url_for(self, **kwargs: str) -> URL:
"""Construct url for resource with additional params."""
@abc.abstractmethod # pragma: no branch
async def resolve(self, request: Request) -> _Resolve:
"""Resolve resource.
Return (UrlMappingMatchInfo, allowed_methods) pair.
"""
@abc.abstractmethod
def add_prefix(self, prefix: str) -> None:
"""Add a prefix to processed URLs.
Required for subapplications support.
"""
@abc.abstractmethod
def get_info(self) -> _InfoDict:
"""Return a dict with additional info useful for introspection"""
def freeze(self) -> None:
pass
@abc.abstractmethod
def raw_match(self, path: str) -> bool:
"""Perform a raw match against path"""
class AbstractRoute(abc.ABC):
def __init__(
self,
method: str,
handler: Union[Handler, Type[AbstractView]],
*,
expect_handler: Optional[_ExpectHandler] = None,
resource: Optional[AbstractResource] = None,
) -> None:
if expect_handler is None:
expect_handler = _default_expect_handler
assert asyncio.iscoroutinefunction(
expect_handler
), f"Coroutine is expected, got {expect_handler!r}"
method = method.upper()
if not HTTP_METHOD_RE.match(method):
raise ValueError(f"{method} is not allowed HTTP method")
assert callable(handler), handler
if asyncio.iscoroutinefunction(handler):
pass
elif inspect.isgeneratorfunction(handler):
warnings.warn(
"Bare generators are deprecated, " "use @coroutine wrapper",
DeprecationWarning,
)
elif isinstance(handler, type) and issubclass(handler, AbstractView):
pass
else:
warnings.warn(
"Bare functions are deprecated, " "use async ones", DeprecationWarning
)
@wraps(handler)
async def handler_wrapper(request: Request) -> StreamResponse:
result = old_handler(request)
if asyncio.iscoroutine(result):
return await result
return result # type: ignore[return-value]
old_handler = handler
handler = handler_wrapper
self._method = method
self._handler = handler
self._expect_handler = expect_handler
self._resource = resource
@property
def method(self) -> str:
return self._method
@property
def handler(self) -> Handler:
return self._handler
@property
@abc.abstractmethod
def name(self) -> Optional[str]:
"""Optional route's name, always equals to resource's name."""
@property
def resource(self) -> Optional[AbstractResource]:
return self._resource
@abc.abstractmethod
def get_info(self) -> _InfoDict:
"""Return a dict with additional info useful for introspection"""
@abc.abstractmethod # pragma: no branch
def url_for(self, *args: str, **kwargs: str) -> URL:
"""Construct url for route with additional params."""
async def handle_expect_header(self, request: Request) -> None:
await self._expect_handler(request)
class UrlMappingMatchInfo(BaseDict, AbstractMatchInfo):
def __init__(self, match_dict: Dict[str, str], route: AbstractRoute):
super().__init__(match_dict)
self._route = route
self._apps: List[Application] = []
self._current_app: Optional[Application] = None
self._frozen = False
@property
def handler(self) -> Handler:
return self._route.handler
@property
def route(self) -> AbstractRoute:
return self._route
@property
def expect_handler(self) -> _ExpectHandler:
return self._route.handle_expect_header
@property
def http_exception(self) -> Optional[HTTPException]:
return None
def get_info(self) -> _InfoDict: # type: ignore[override]
return self._route.get_info()
@property
def apps(self) -> Tuple["Application", ...]:
return tuple(self._apps)
def add_app(self, app: "Application") -> None:
if self._frozen:
raise RuntimeError("Cannot change apps stack after .freeze() call")
if self._current_app is None:
self._current_app = app
self._apps.insert(0, app)
@property
def current_app(self) -> "Application":
app = self._current_app
assert app is not None
return app
@contextmanager
def set_current_app(self, app: "Application") -> Generator[None, None, None]:
if DEBUG: # pragma: no cover
if app not in self._apps:
raise RuntimeError(
"Expected one of the following apps {!r}, got {!r}".format(
self._apps, app
)
)
prev = self._current_app
self._current_app = app
try:
yield
finally:
self._current_app = prev
def freeze(self) -> None:
self._frozen = True
def __repr__(self) -> str:
return f"<MatchInfo {super().__repr__()}: {self._route}>"
class MatchInfoError(UrlMappingMatchInfo):
def __init__(self, http_exception: HTTPException) -> None:
self._exception = http_exception
super().__init__({}, SystemRoute(self._exception))
@property
def http_exception(self) -> HTTPException:
return self._exception
def __repr__(self) -> str:
return "<MatchInfoError {}: {}>".format(
self._exception.status, self._exception.reason
)
async def _default_expect_handler(request: Request) -> None:
"""Default handler for Expect header.
Just send "100 Continue" to client.
raise HTTPExpectationFailed if value of header is not "100-continue"
"""
expect = request.headers.get(hdrs.EXPECT, "")
if request.version == HttpVersion11:
if expect.lower() == "100-continue":
await request.writer.write(b"HTTP/1.1 100 Continue\r\n\r\n")
else:
raise HTTPExpectationFailed(text="Unknown Expect: %s" % expect)
class Resource(AbstractResource):
def __init__(self, *, name: Optional[str] = None) -> None:
super().__init__(name=name)
self._routes: List[ResourceRoute] = []
def add_route(
self,
method: str,
handler: Union[Type[AbstractView], Handler],
*,
expect_handler: Optional[_ExpectHandler] = None,
) -> "ResourceRoute":
for route_obj in self._routes:
if route_obj.method == method or route_obj.method == hdrs.METH_ANY:
raise RuntimeError(
"Added route will never be executed, "
"method {route.method} is already "
"registered".format(route=route_obj)
)
route_obj = ResourceRoute(method, handler, self, expect_handler=expect_handler)
self.register_route(route_obj)
return route_obj
def register_route(self, route: "ResourceRoute") -> None:
assert isinstance(
route, ResourceRoute
), f"Instance of Route class is required, got {route!r}"
self._routes.append(route)
async def resolve(self, request: Request) -> _Resolve:
allowed_methods: Set[str] = set()
match_dict = self._match(request.rel_url.raw_path)
if match_dict is None:
return None, allowed_methods
for route_obj in self._routes:
route_method = route_obj.method
allowed_methods.add(route_method)
if route_method == request.method or route_method == hdrs.METH_ANY:
return (UrlMappingMatchInfo(match_dict, route_obj), allowed_methods)
else:
return None, allowed_methods
@abc.abstractmethod
def _match(self, path: str) -> Optional[Dict[str, str]]:
pass # pragma: no cover
def __len__(self) -> int:
return len(self._routes)
def __iter__(self) -> Iterator[AbstractRoute]:
return iter(self._routes)
# TODO: implement all abstract methods
class PlainResource(Resource):
def __init__(self, path: str, *, name: Optional[str] = None) -> None:
super().__init__(name=name)
assert not path or path.startswith("/")
self._path = path
@property
def canonical(self) -> str:
return self._path
def freeze(self) -> None:
if not self._path:
self._path = "/"
def add_prefix(self, prefix: str) -> None:
assert prefix.startswith("/")
assert not prefix.endswith("/")
assert len(prefix) > 1
self._path = prefix + self._path
def _match(self, path: str) -> Optional[Dict[str, str]]:
# string comparison is about 10 times faster than regexp matching
if self._path == path:
return {}
else:
return None
def raw_match(self, path: str) -> bool:
return self._path == path
def get_info(self) -> _InfoDict:
return {"path": self._path}
def url_for(self) -> URL: # type: ignore[override]
return URL.build(path=self._path, encoded=True)
def __repr__(self) -> str:
name = "'" + self.name + "' " if self.name is not None else ""
return f"<PlainResource {name} {self._path}>"
class DynamicResource(Resource):
DYN = re.compile(r"\{(?P<var>[_a-zA-Z][_a-zA-Z0-9]*)\}")
DYN_WITH_RE = re.compile(r"\{(?P<var>[_a-zA-Z][_a-zA-Z0-9]*):(?P<re>.+)\}")
GOOD = r"[^{}/]+"
def __init__(self, path: str, *, name: Optional[str] = None) -> None:
super().__init__(name=name)
pattern = ""
formatter = ""
for part in ROUTE_RE.split(path):
match = self.DYN.fullmatch(part)
if match:
pattern += "(?P<{}>{})".format(match.group("var"), self.GOOD)
formatter += "{" + match.group("var") + "}"
continue
match = self.DYN_WITH_RE.fullmatch(part)
if match:
pattern += "(?P<{var}>{re})".format(**match.groupdict())
formatter += "{" + match.group("var") + "}"
continue
if "{" in part or "}" in part:
raise ValueError(f"Invalid path '{path}'['{part}']")
part = _requote_path(part)
formatter += part
pattern += re.escape(part)
try:
compiled = re.compile(pattern)
except re.error as exc:
raise ValueError(f"Bad pattern '{pattern}': {exc}") from None
assert compiled.pattern.startswith(PATH_SEP)
assert formatter.startswith("/")
self._pattern = compiled
self._formatter = formatter
@property
def canonical(self) -> str:
return self._formatter
def add_prefix(self, prefix: str) -> None:
assert prefix.startswith("/")
assert not prefix.endswith("/")
assert len(prefix) > 1
self._pattern = re.compile(re.escape(prefix) + self._pattern.pattern)
self._formatter = prefix + self._formatter
def _match(self, path: str) -> Optional[Dict[str, str]]:
match = self._pattern.fullmatch(path)
if match is None:
return None
else:
return {
key: _unquote_path(value) for key, value in match.groupdict().items()
}
def raw_match(self, path: str) -> bool:
return self._formatter == path
def get_info(self) -> _InfoDict:
return {"formatter": self._formatter, "pattern": self._pattern}
def url_for(self, **parts: str) -> URL:
url = self._formatter.format_map({k: _quote_path(v) for k, v in parts.items()})
return URL.build(path=url, encoded=True)
def __repr__(self) -> str:
name = "'" + self.name + "' " if self.name is not None else ""
return "<DynamicResource {name} {formatter}>".format(
name=name, formatter=self._formatter
)
class PrefixResource(AbstractResource):
def __init__(self, prefix: str, *, name: Optional[str] = None) -> None:
assert not prefix or prefix.startswith("/"), prefix
assert prefix in ("", "/") or not prefix.endswith("/"), prefix
super().__init__(name=name)
self._prefix = _requote_path(prefix)
self._prefix2 = self._prefix + "/"
@property
def canonical(self) -> str:
return self._prefix
def add_prefix(self, prefix: str) -> None:
assert prefix.startswith("/")
assert not prefix.endswith("/")
assert len(prefix) > 1
self._prefix = prefix + self._prefix
self._prefix2 = self._prefix + "/"
def raw_match(self, prefix: str) -> bool:
return False
# TODO: impl missing abstract methods
class StaticResource(PrefixResource):
VERSION_KEY = "v"
def __init__(
self,
prefix: str,
directory: PathLike,
*,
name: Optional[str] = None,
expect_handler: Optional[_ExpectHandler] = None,
chunk_size: int = 256 * 1024,
show_index: bool = False,
follow_symlinks: bool = False,
append_version: bool = False,
) -> None:
super().__init__(prefix, name=name)
try:
directory = Path(directory)
if str(directory).startswith("~"):
directory = Path(os.path.expanduser(str(directory)))
directory = directory.resolve()
if not directory.is_dir():
raise ValueError("Not a directory")
except (FileNotFoundError, ValueError) as error:
raise ValueError(f"No directory exists at '{directory}'") from error
self._directory = directory
self._show_index = show_index
self._chunk_size = chunk_size
self._follow_symlinks = follow_symlinks
self._expect_handler = expect_handler
self._append_version = append_version
self._routes = {
"GET": ResourceRoute(
"GET", self._handle, self, expect_handler=expect_handler
),
"HEAD": ResourceRoute(
"HEAD", self._handle, self, expect_handler=expect_handler
),
}
def url_for( # type: ignore[override]
self,
*,
filename: Union[str, Path],
append_version: Optional[bool] = None,
) -> URL:
if append_version is None:
append_version = self._append_version
if isinstance(filename, Path):
filename = str(filename)
filename = filename.lstrip("/")
url = URL.build(path=self._prefix, encoded=True)
# filename is not encoded
if YARL_VERSION < (1, 6):
url = url / filename.replace("%", "%25")
else:
url = url / filename
if append_version:
try:
filepath = self._directory.joinpath(filename).resolve()
if not self._follow_symlinks:
filepath.relative_to(self._directory)
except (ValueError, FileNotFoundError):
# ValueError for case when path point to symlink
# with follow_symlinks is False
return url # relatively safe
if filepath.is_file():
# TODO cache file content
# with file watcher for cache invalidation
with filepath.open("rb") as f:
file_bytes = f.read()
h = self._get_file_hash(file_bytes)
url = url.with_query({self.VERSION_KEY: h})
return url
return url
@staticmethod
def _get_file_hash(byte_array: bytes) -> str:
m = hashlib.sha256() # todo sha256 can be configurable param
m.update(byte_array)
b64 = base64.urlsafe_b64encode(m.digest())
return b64.decode("ascii")
def get_info(self) -> _InfoDict:
return {
"directory": self._directory,
"prefix": self._prefix,
"routes": self._routes,
}
def set_options_route(self, handler: Handler) -> None:
if "OPTIONS" in self._routes:
raise RuntimeError("OPTIONS route was set already")
self._routes["OPTIONS"] = ResourceRoute(
"OPTIONS", handler, self, expect_handler=self._expect_handler
)
async def resolve(self, request: Request) -> _Resolve:
path = request.rel_url.raw_path
method = request.method
allowed_methods = set(self._routes)
if not path.startswith(self._prefix2) and path != self._prefix:
return None, set()
if method not in allowed_methods:
return None, allowed_methods
match_dict = {"filename": _unquote_path(path[len(self._prefix) + 1 :])}
return (UrlMappingMatchInfo(match_dict, self._routes[method]), allowed_methods)
def __len__(self) -> int:
return len(self._routes)
def __iter__(self) -> Iterator[AbstractRoute]:
return iter(self._routes.values())
async def _handle(self, request: Request) -> StreamResponse:
rel_url = request.match_info["filename"]
try:
filename = Path(rel_url)
if filename.anchor:
# rel_url is an absolute name like
# /static/\\machine_name\c$ or /static/D:\path
# where the static dir is totally different
raise HTTPForbidden()
filepath = self._directory.joinpath(filename).resolve()
if not self._follow_symlinks:
filepath.relative_to(self._directory)
except (ValueError, FileNotFoundError) as error:
# relatively safe
raise HTTPNotFound() from error
except HTTPForbidden:
raise
except Exception as error:
# perm error or other kind!
request.app.logger.exception(error)
raise HTTPNotFound() from error
# on opening a dir, load its contents if allowed
if filepath.is_dir():
if self._show_index:
try:
return Response(
text=self._directory_as_html(filepath), content_type="text/html"
)
except PermissionError:
raise HTTPForbidden()
else:
raise HTTPForbidden()
elif filepath.is_file():
return FileResponse(filepath, chunk_size=self._chunk_size)
else:
raise HTTPNotFound
def _directory_as_html(self, filepath: Path) -> str:
# returns directory's index as html
# sanity check
assert filepath.is_dir()
relative_path_to_dir = filepath.relative_to(self._directory).as_posix()
index_of = f"Index of /{relative_path_to_dir}"
h1 = f"<h1>{index_of}</h1>"
index_list = []
dir_index = filepath.iterdir()
for _file in sorted(dir_index):
# show file url as relative to static path
rel_path = _file.relative_to(self._directory).as_posix()
file_url = self._prefix + "/" + rel_path
# if file is a directory, add '/' to the end of the name
if _file.is_dir():
file_name = f"{_file.name}/"
else:
file_name = _file.name
index_list.append(
'<li><a href="{url}">{name}</a></li>'.format(
url=file_url, name=file_name
)
)
ul = "<ul>\n{}\n</ul>".format("\n".join(index_list))
body = f"<body>\n{h1}\n{ul}\n</body>"
head_str = f"<head>\n<title>{index_of}</title>\n</head>"
html = f"<html>\n{head_str}\n{body}\n</html>"
return html
def __repr__(self) -> str:
name = "'" + self.name + "'" if self.name is not None else ""
return "<StaticResource {name} {path} -> {directory!r}>".format(
name=name, path=self._prefix, directory=self._directory
)
class PrefixedSubAppResource(PrefixResource):
def __init__(self, prefix: str, app: "Application") -> None:
super().__init__(prefix)
self._app = app
for resource in app.router.resources():
resource.add_prefix(prefix)
def add_prefix(self, prefix: str) -> None:
super().add_prefix(prefix)
for resource in self._app.router.resources():
resource.add_prefix(prefix)
def url_for(self, *args: str, **kwargs: str) -> URL:
raise RuntimeError(".url_for() is not supported " "by sub-application root")
def get_info(self) -> _InfoDict:
return {"app": self._app, "prefix": self._prefix}
async def resolve(self, request: Request) -> _Resolve:
if (
not request.url.raw_path.startswith(self._prefix2)
and request.url.raw_path != self._prefix
):
return None, set()
match_info = await self._app.router.resolve(request)
match_info.add_app(self._app)
if isinstance(match_info.http_exception, HTTPMethodNotAllowed):
methods = match_info.http_exception.allowed_methods
else:
methods = set()
return match_info, methods
def __len__(self) -> int:
return len(self._app.router.routes())
def __iter__(self) -> Iterator[AbstractRoute]:
return iter(self._app.router.routes())
def __repr__(self) -> str:
return "<PrefixedSubAppResource {prefix} -> {app!r}>".format(
prefix=self._prefix, app=self._app
)
class AbstractRuleMatching(abc.ABC):
@abc.abstractmethod # pragma: no branch
async def match(self, request: Request) -> bool:
"""Return bool if the request satisfies the criteria"""
@abc.abstractmethod # pragma: no branch
def get_info(self) -> _InfoDict:
"""Return a dict with additional info useful for introspection"""
@property
@abc.abstractmethod # pragma: no branch
def canonical(self) -> str:
"""Return a str"""
class Domain(AbstractRuleMatching):
re_part = re.compile(r"(?!-)[a-z\d-]{1,63}(?<!-)")
def __init__(self, domain: str) -> None:
super().__init__()
self._domain = self.validation(domain)
@property
def canonical(self) -> str:
return self._domain
def validation(self, domain: str) -> str:
if not isinstance(domain, str):
raise TypeError("Domain must be str")
domain = domain.rstrip(".").lower()
if not domain:
raise ValueError("Domain cannot be empty")
elif "://" in domain:
raise ValueError("Scheme not supported")
url = URL("http://" + domain)
assert url.raw_host is not None
if not all(self.re_part.fullmatch(x) for x in url.raw_host.split(".")):
raise ValueError("Domain not valid")
if url.port == 80:
return url.raw_host
return f"{url.raw_host}:{url.port}"
async def match(self, request: Request) -> bool:
host = request.headers.get(hdrs.HOST)
if not host:
return False
return self.match_domain(host)
def match_domain(self, host: str) -> bool:
return host.lower() == self._domain
def get_info(self) -> _InfoDict:
return {"domain": self._domain}
class MaskDomain(Domain):
re_part = re.compile(r"(?!-)[a-z\d\*-]{1,63}(?<!-)")
def __init__(self, domain: str) -> None:
super().__init__(domain)
mask = self._domain.replace(".", r"\.").replace("*", ".*")
self._mask = re.compile(mask)
@property
def canonical(self) -> str:
return self._mask.pattern
def match_domain(self, host: str) -> bool:
return self._mask.fullmatch(host) is not None
class MatchedSubAppResource(PrefixedSubAppResource):
def __init__(self, rule: AbstractRuleMatching, app: "Application") -> None:
AbstractResource.__init__(self)
self._prefix = ""
self._app = app
self._rule = rule
@property
def canonical(self) -> str:
return self._rule.canonical
def get_info(self) -> _InfoDict:
return {"app": self._app, "rule": self._rule}
async def resolve(self, request: Request) -> _Resolve:
if not await self._rule.match(request):
return None, set()
match_info = await self._app.router.resolve(request)
match_info.add_app(self._app)
if isinstance(match_info.http_exception, HTTPMethodNotAllowed):
methods = match_info.http_exception.allowed_methods
else:
methods = set()
return match_info, methods
def __repr__(self) -> str:
return "<MatchedSubAppResource -> {app!r}>" "".format(app=self._app)
class ResourceRoute(AbstractRoute):
"""A route with resource"""
def __init__(
self,
method: str,
handler: Union[Handler, Type[AbstractView]],
resource: AbstractResource,
*,
expect_handler: Optional[_ExpectHandler] = None,
) -> None:
super().__init__(
method, handler, expect_handler=expect_handler, resource=resource
)
def __repr__(self) -> str:
return "<ResourceRoute [{method}] {resource} -> {handler!r}".format(
method=self.method, resource=self._resource, handler=self.handler
)
@property
def name(self) -> Optional[str]:
if self._resource is None:
return None
return self._resource.name
def url_for(self, *args: str, **kwargs: str) -> URL:
"""Construct url for route with additional params."""
assert self._resource is not None
return self._resource.url_for(*args, **kwargs)
def get_info(self) -> _InfoDict:
assert self._resource is not None
return self._resource.get_info()
class SystemRoute(AbstractRoute):
def __init__(self, http_exception: HTTPException) -> None:
super().__init__(hdrs.METH_ANY, self._handle)
self._http_exception = http_exception
def url_for(self, *args: str, **kwargs: str) -> URL:
raise RuntimeError(".url_for() is not allowed for SystemRoute")
@property
def name(self) -> Optional[str]:
return None
def get_info(self) -> _InfoDict:
return {"http_exception": self._http_exception}
async def _handle(self, request: Request) -> StreamResponse:
raise self._http_exception
@property
def status(self) -> int:
return self._http_exception.status
@property
def reason(self) -> str:
return self._http_exception.reason
def __repr__(self) -> str:
return "<SystemRoute {self.status}: {self.reason}>".format(self=self)
class View(AbstractView):
async def _iter(self) -> StreamResponse:
if self.request.method not in hdrs.METH_ALL:
self._raise_allowed_methods()
method: Callable[[], Awaitable[StreamResponse]] = getattr(
self, self.request.method.lower(), None
)
if method is None:
self._raise_allowed_methods()
resp = await method()
return resp
def __await__(self) -> Generator[Any, None, StreamResponse]:
return self._iter().__await__()
def _raise_allowed_methods(self) -> None:
allowed_methods = {m for m in hdrs.METH_ALL if hasattr(self, m.lower())}
raise HTTPMethodNotAllowed(self.request.method, allowed_methods)
class ResourcesView(Sized, Iterable[AbstractResource], Container[AbstractResource]):
def __init__(self, resources: List[AbstractResource]) -> None:
self._resources = resources
def __len__(self) -> int:
return len(self._resources)
def __iter__(self) -> Iterator[AbstractResource]:
yield from self._resources
def __contains__(self, resource: object) -> bool:
return resource in self._resources
class RoutesView(Sized, Iterable[AbstractRoute], Container[AbstractRoute]):
def __init__(self, resources: List[AbstractResource]):
self._routes: List[AbstractRoute] = []
for resource in resources:
for route in resource:
self._routes.append(route)
def __len__(self) -> int:
return len(self._routes)
def __iter__(self) -> Iterator[AbstractRoute]:
yield from self._routes
def __contains__(self, route: object) -> bool:
return route in self._routes
class UrlDispatcher(AbstractRouter, Mapping[str, AbstractResource]):
NAME_SPLIT_RE = re.compile(r"[.:-]")
def __init__(self) -> None:
super().__init__()
self._resources: List[AbstractResource] = []
self._named_resources: Dict[str, AbstractResource] = {}
async def resolve(self, request: Request) -> UrlMappingMatchInfo:
method = request.method
allowed_methods: Set[str] = set()
for resource in self._resources:
match_dict, allowed = await resource.resolve(request)
if match_dict is not None:
return match_dict
else:
allowed_methods |= allowed
if allowed_methods:
return MatchInfoError(HTTPMethodNotAllowed(method, allowed_methods))
else:
return MatchInfoError(HTTPNotFound())
def __iter__(self) -> Iterator[str]:
return iter(self._named_resources)
def __len__(self) -> int:
return len(self._named_resources)
def __contains__(self, resource: object) -> bool:
return resource in self._named_resources
def __getitem__(self, name: str) -> AbstractResource:
return self._named_resources[name]
def resources(self) -> ResourcesView:
return ResourcesView(self._resources)
def routes(self) -> RoutesView:
return RoutesView(self._resources)
def named_resources(self) -> Mapping[str, AbstractResource]:
return MappingProxyType(self._named_resources)
def register_resource(self, resource: AbstractResource) -> None:
assert isinstance(
resource, AbstractResource
), f"Instance of AbstractResource class is required, got {resource!r}"
if self.frozen:
raise RuntimeError("Cannot register a resource into frozen router.")
name = resource.name
if name is not None:
parts = self.NAME_SPLIT_RE.split(name)
for part in parts:
if keyword.iskeyword(part):
raise ValueError(
f"Incorrect route name {name!r}, "
"python keywords cannot be used "
"for route name"
)
if not part.isidentifier():
raise ValueError(
"Incorrect route name {!r}, "
"the name should be a sequence of "
"python identifiers separated "
"by dash, dot or column".format(name)
)
if name in self._named_resources:
raise ValueError(
"Duplicate {!r}, "
"already handled by {!r}".format(name, self._named_resources[name])
)
self._named_resources[name] = resource
self._resources.append(resource)
def add_resource(self, path: str, *, name: Optional[str] = None) -> Resource:
if path and not path.startswith("/"):
raise ValueError("path should be started with / or be empty")
# Reuse last added resource if path and name are the same
if self._resources:
resource = self._resources[-1]
if resource.name == name and resource.raw_match(path):
return cast(Resource, resource)
if not ("{" in path or "}" in path or ROUTE_RE.search(path)):
resource = PlainResource(_requote_path(path), name=name)
self.register_resource(resource)
return resource
resource = DynamicResource(path, name=name)
self.register_resource(resource)
return resource
def add_route(
self,
method: str,
path: str,
handler: Union[Handler, Type[AbstractView]],
*,
name: Optional[str] = None,
expect_handler: Optional[_ExpectHandler] = None,
) -> AbstractRoute:
resource = self.add_resource(path, name=name)
return resource.add_route(method, handler, expect_handler=expect_handler)
def add_static(
self,
prefix: str,
path: PathLike,
*,
name: Optional[str] = None,
expect_handler: Optional[_ExpectHandler] = None,
chunk_size: int = 256 * 1024,
show_index: bool = False,
follow_symlinks: bool = False,
append_version: bool = False,
) -> AbstractResource:
"""Add static files view.
prefix - url prefix
path - folder with files
"""
assert prefix.startswith("/")
if prefix.endswith("/"):
prefix = prefix[:-1]
resource = StaticResource(
prefix,
path,
name=name,
expect_handler=expect_handler,
chunk_size=chunk_size,
show_index=show_index,
follow_symlinks=follow_symlinks,
append_version=append_version,
)
self.register_resource(resource)
return resource
def add_head(self, path: str, handler: Handler, **kwargs: Any) -> AbstractRoute:
"""Shortcut for add_route with method HEAD."""
return self.add_route(hdrs.METH_HEAD, path, handler, **kwargs)
def add_options(self, path: str, handler: Handler, **kwargs: Any) -> AbstractRoute:
"""Shortcut for add_route with method OPTIONS."""
return self.add_route(hdrs.METH_OPTIONS, path, handler, **kwargs)
def add_get(
self,
path: str,
handler: Handler,
*,
name: Optional[str] = None,
allow_head: bool = True,
**kwargs: Any,
) -> AbstractRoute:
"""Shortcut for add_route with method GET.
If allow_head is true, another
route is added allowing head requests to the same endpoint.
"""
resource = self.add_resource(path, name=name)
if allow_head:
resource.add_route(hdrs.METH_HEAD, handler, **kwargs)
return resource.add_route(hdrs.METH_GET, handler, **kwargs)
def add_post(self, path: str, handler: Handler, **kwargs: Any) -> AbstractRoute:
"""Shortcut for add_route with method POST."""
return self.add_route(hdrs.METH_POST, path, handler, **kwargs)
def add_put(self, path: str, handler: Handler, **kwargs: Any) -> AbstractRoute:
"""Shortcut for add_route with method PUT."""
return self.add_route(hdrs.METH_PUT, path, handler, **kwargs)
def add_patch(self, path: str, handler: Handler, **kwargs: Any) -> AbstractRoute:
"""Shortcut for add_route with method PATCH."""
return self.add_route(hdrs.METH_PATCH, path, handler, **kwargs)
def add_delete(self, path: str, handler: Handler, **kwargs: Any) -> AbstractRoute:
"""Shortcut for add_route with method DELETE."""
return self.add_route(hdrs.METH_DELETE, path, handler, **kwargs)
def add_view(
self, path: str, handler: Type[AbstractView], **kwargs: Any
) -> AbstractRoute:
"""Shortcut for add_route with ANY methods for a class-based view."""
return self.add_route(hdrs.METH_ANY, path, handler, **kwargs)
def freeze(self) -> None:
super().freeze()
for resource in self._resources:
resource.freeze()
def add_routes(self, routes: Iterable[AbstractRouteDef]) -> List[AbstractRoute]:
"""Append routes to route table.
Parameter should be a sequence of RouteDef objects.
Returns a list of registered AbstractRoute instances.
"""
registered_routes = []
for route_def in routes:
registered_routes.extend(route_def.register(self))
return registered_routes
def _quote_path(value: str) -> str:
if YARL_VERSION < (1, 6):
value = value.replace("%", "%25")
return URL.build(path=value, encoded=False).raw_path
def _unquote_path(value: str) -> str:
return URL.build(path=value, encoded=True).path
def _requote_path(value: str) -> str:
# Quote non-ascii characters and other characters which must be quoted,
# but preserve existing %-sequences.
result = _quote_path(value)
if "%" in value:
result = result.replace("%25", "%")
return result
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/six-1.16.0.dist-info/top_level.txt | six
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/colorama-0.4.6.dist-info/licenses/LICENSE.txt | Copyright (c) 2010 Jonathan Hartley
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holders, nor those of its contributors
may be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/watchdog/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011 Yesudeep Mangalapilly <[email protected]>
# Copyright 2012 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/watchdog/version.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011 Yesudeep Mangalapilly <[email protected]>
# Copyright 2012 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# When updating this version number, please update the
# ``docs/source/global.rst.inc`` file as well.
VERSION_MAJOR = 0
VERSION_MINOR = 10
VERSION_BUILD = 4
VERSION_INFO = (VERSION_MAJOR, VERSION_MINOR, VERSION_BUILD)
VERSION_STRING = "%d.%d.%d" % VERSION_INFO
__version__ = VERSION_INFO
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/watchdog/events.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011 Yesudeep Mangalapilly <[email protected]>
# Copyright 2012 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:module: watchdog.events
:synopsis: File system events and event handlers.
:author: [email protected] (Yesudeep Mangalapilly)
Event Classes
-------------
.. autoclass:: FileSystemEvent
:members:
:show-inheritance:
:inherited-members:
.. autoclass:: FileSystemMovedEvent
:members:
:show-inheritance:
.. autoclass:: FileMovedEvent
:members:
:show-inheritance:
.. autoclass:: DirMovedEvent
:members:
:show-inheritance:
.. autoclass:: FileModifiedEvent
:members:
:show-inheritance:
.. autoclass:: DirModifiedEvent
:members:
:show-inheritance:
.. autoclass:: FileCreatedEvent
:members:
:show-inheritance:
.. autoclass:: DirCreatedEvent
:members:
:show-inheritance:
.. autoclass:: FileDeletedEvent
:members:
:show-inheritance:
.. autoclass:: DirDeletedEvent
:members:
:show-inheritance:
Event Handler Classes
---------------------
.. autoclass:: FileSystemEventHandler
:members:
:show-inheritance:
.. autoclass:: PatternMatchingEventHandler
:members:
:show-inheritance:
.. autoclass:: RegexMatchingEventHandler
:members:
:show-inheritance:
.. autoclass:: LoggingEventHandler
:members:
:show-inheritance:
"""
import os.path
import logging
import re
from pathtools.patterns import match_any_paths
from watchdog.utils import has_attribute
from watchdog.utils import unicode_paths
EVENT_TYPE_MOVED = 'moved'
EVENT_TYPE_DELETED = 'deleted'
EVENT_TYPE_CREATED = 'created'
EVENT_TYPE_MODIFIED = 'modified'
class FileSystemEvent(object):
"""
Immutable type that represents a file system event that is triggered
when a change occurs on the monitored file system.
All FileSystemEvent objects are required to be immutable and hence
can be used as keys in dictionaries or be added to sets.
"""
event_type = None
"""The type of the event as a string."""
is_directory = False
"""True if event was emitted for a directory; False otherwise."""
is_synthetic = False
"""
True if event was synthesized; False otherwise.
These are events that weren't actually broadcast by the OS, but
are presumed to have happened based on other, actual events.
"""
def __init__(self, src_path):
self._src_path = src_path
@property
def src_path(self):
"""Source path of the file system object that triggered this event."""
return self._src_path
def __str__(self):
return self.__repr__()
def __repr__(self):
return ("<%(class_name)s: event_type=%(event_type)s, "
"src_path=%(src_path)r, "
"is_directory=%(is_directory)s>"
) % (dict(
class_name=self.__class__.__name__,
event_type=self.event_type,
src_path=self.src_path,
is_directory=self.is_directory))
# Used for comparison of events.
@property
def key(self):
return (self.event_type, self.src_path, self.is_directory)
def __eq__(self, event):
return self.key == event.key
def __ne__(self, event):
return self.key != event.key
def __hash__(self):
return hash(self.key)
class FileSystemMovedEvent(FileSystemEvent):
"""
File system event representing any kind of file system movement.
"""
event_type = EVENT_TYPE_MOVED
def __init__(self, src_path, dest_path):
super(FileSystemMovedEvent, self).__init__(src_path)
self._dest_path = dest_path
@property
def dest_path(self):
"""The destination path of the move event."""
return self._dest_path
# Used for hashing this as an immutable object.
@property
def key(self):
return (self.event_type, self.src_path, self.dest_path, self.is_directory)
def __repr__(self):
return ("<%(class_name)s: src_path=%(src_path)r, "
"dest_path=%(dest_path)r, "
"is_directory=%(is_directory)s>"
) % (dict(class_name=self.__class__.__name__,
src_path=self.src_path,
dest_path=self.dest_path,
is_directory=self.is_directory))
# File events.
class FileDeletedEvent(FileSystemEvent):
"""File system event representing file deletion on the file system."""
event_type = EVENT_TYPE_DELETED
def __init__(self, src_path):
super(FileDeletedEvent, self).__init__(src_path)
def __repr__(self):
return "<%(class_name)s: src_path=%(src_path)r>" %\
dict(class_name=self.__class__.__name__,
src_path=self.src_path)
class FileModifiedEvent(FileSystemEvent):
"""File system event representing file modification on the file system."""
event_type = EVENT_TYPE_MODIFIED
def __init__(self, src_path):
super(FileModifiedEvent, self).__init__(src_path)
def __repr__(self):
return ("<%(class_name)s: src_path=%(src_path)r>"
) % (dict(class_name=self.__class__.__name__,
src_path=self.src_path))
class FileCreatedEvent(FileSystemEvent):
"""File system event representing file creation on the file system."""
event_type = EVENT_TYPE_CREATED
def __init__(self, src_path):
super(FileCreatedEvent, self).__init__(src_path)
def __repr__(self):
return ("<%(class_name)s: src_path=%(src_path)r>"
) % (dict(class_name=self.__class__.__name__,
src_path=self.src_path))
class FileMovedEvent(FileSystemMovedEvent):
"""File system event representing file movement on the file system."""
def __init__(self, src_path, dest_path):
super(FileMovedEvent, self).__init__(src_path, dest_path)
def __repr__(self):
return ("<%(class_name)s: src_path=%(src_path)r, "
"dest_path=%(dest_path)r>"
) % (dict(class_name=self.__class__.__name__,
src_path=self.src_path,
dest_path=self.dest_path))
# Directory events.
class DirDeletedEvent(FileSystemEvent):
"""File system event representing directory deletion on the file system."""
event_type = EVENT_TYPE_DELETED
is_directory = True
def __init__(self, src_path):
super(DirDeletedEvent, self).__init__(src_path)
def __repr__(self):
return ("<%(class_name)s: src_path=%(src_path)r>"
) % (dict(class_name=self.__class__.__name__,
src_path=self.src_path))
class DirModifiedEvent(FileSystemEvent):
"""
File system event representing directory modification on the file system.
"""
event_type = EVENT_TYPE_MODIFIED
is_directory = True
def __init__(self, src_path):
super(DirModifiedEvent, self).__init__(src_path)
def __repr__(self):
return ("<%(class_name)s: src_path=%(src_path)r>"
) % (dict(class_name=self.__class__.__name__,
src_path=self.src_path))
class DirCreatedEvent(FileSystemEvent):
"""File system event representing directory creation on the file system."""
event_type = EVENT_TYPE_CREATED
is_directory = True
def __init__(self, src_path):
super(DirCreatedEvent, self).__init__(src_path)
def __repr__(self):
return ("<%(class_name)s: src_path=%(src_path)r>"
) % (dict(class_name=self.__class__.__name__,
src_path=self.src_path))
class DirMovedEvent(FileSystemMovedEvent):
"""File system event representing directory movement on the file system."""
is_directory = True
def __init__(self, src_path, dest_path):
super(DirMovedEvent, self).__init__(src_path, dest_path)
def __repr__(self):
return ("<%(class_name)s: src_path=%(src_path)r, "
"dest_path=%(dest_path)r>"
) % (dict(class_name=self.__class__.__name__,
src_path=self.src_path,
dest_path=self.dest_path))
class FileSystemEventHandler(object):
"""
Base file system event handler that you can override methods from.
"""
def dispatch(self, event):
"""Dispatches events to the appropriate methods.
:param event:
The event object representing the file system event.
:type event:
:class:`FileSystemEvent`
"""
self.on_any_event(event)
{
EVENT_TYPE_CREATED: self.on_created,
EVENT_TYPE_DELETED: self.on_deleted,
EVENT_TYPE_MODIFIED: self.on_modified,
EVENT_TYPE_MOVED: self.on_moved,
}[event.event_type](event)
def on_any_event(self, event):
"""Catch-all event handler.
:param event:
The event object representing the file system event.
:type event:
:class:`FileSystemEvent`
"""
def on_moved(self, event):
"""Called when a file or a directory is moved or renamed.
:param event:
Event representing file/directory movement.
:type event:
:class:`DirMovedEvent` or :class:`FileMovedEvent`
"""
def on_created(self, event):
"""Called when a file or directory is created.
:param event:
Event representing file/directory creation.
:type event:
:class:`DirCreatedEvent` or :class:`FileCreatedEvent`
"""
def on_deleted(self, event):
"""Called when a file or directory is deleted.
:param event:
Event representing file/directory deletion.
:type event:
:class:`DirDeletedEvent` or :class:`FileDeletedEvent`
"""
def on_modified(self, event):
"""Called when a file or directory is modified.
:param event:
Event representing file/directory modification.
:type event:
:class:`DirModifiedEvent` or :class:`FileModifiedEvent`
"""
class PatternMatchingEventHandler(FileSystemEventHandler):
"""
Matches given patterns with file paths associated with occurring events.
"""
def __init__(self, patterns=None, ignore_patterns=None,
ignore_directories=False, case_sensitive=False):
super(PatternMatchingEventHandler, self).__init__()
self._patterns = patterns
self._ignore_patterns = ignore_patterns
self._ignore_directories = ignore_directories
self._case_sensitive = case_sensitive
@property
def patterns(self):
"""
(Read-only)
Patterns to allow matching event paths.
"""
return self._patterns
@property
def ignore_patterns(self):
"""
(Read-only)
Patterns to ignore matching event paths.
"""
return self._ignore_patterns
@property
def ignore_directories(self):
"""
(Read-only)
``True`` if directories should be ignored; ``False`` otherwise.
"""
return self._ignore_directories
@property
def case_sensitive(self):
"""
(Read-only)
``True`` if path names should be matched sensitive to case; ``False``
otherwise.
"""
return self._case_sensitive
def dispatch(self, event):
"""Dispatches events to the appropriate methods.
:param event:
The event object representing the file system event.
:type event:
:class:`FileSystemEvent`
"""
if self.ignore_directories and event.is_directory:
return
paths = []
if has_attribute(event, 'dest_path'):
paths.append(unicode_paths.decode(event.dest_path))
if event.src_path:
paths.append(unicode_paths.decode(event.src_path))
if match_any_paths(paths,
included_patterns=self.patterns,
excluded_patterns=self.ignore_patterns,
case_sensitive=self.case_sensitive):
super(PatternMatchingEventHandler, self).dispatch(event)
class RegexMatchingEventHandler(FileSystemEventHandler):
"""
Matches given regexes with file paths associated with occurring events.
"""
def __init__(self, regexes=None, ignore_regexes=None,
ignore_directories=False, case_sensitive=False):
super(RegexMatchingEventHandler, self).__init__()
if regexes is None:
regexes = [r".*"]
if ignore_regexes is None:
ignore_regexes = []
if case_sensitive:
self._regexes = [re.compile(r) for r in regexes]
self._ignore_regexes = [re.compile(r) for r in ignore_regexes]
else:
self._regexes = [re.compile(r, re.I) for r in regexes]
self._ignore_regexes = [re.compile(r, re.I) for r in ignore_regexes]
self._ignore_directories = ignore_directories
self._case_sensitive = case_sensitive
@property
def regexes(self):
"""
(Read-only)
Regexes to allow matching event paths.
"""
return self._regexes
@property
def ignore_regexes(self):
"""
(Read-only)
Regexes to ignore matching event paths.
"""
return self._ignore_regexes
@property
def ignore_directories(self):
"""
(Read-only)
``True`` if directories should be ignored; ``False`` otherwise.
"""
return self._ignore_directories
@property
def case_sensitive(self):
"""
(Read-only)
``True`` if path names should be matched sensitive to case; ``False``
otherwise.
"""
return self._case_sensitive
def dispatch(self, event):
"""Dispatches events to the appropriate methods.
:param event:
The event object representing the file system event.
:type event:
:class:`FileSystemEvent`
"""
if self.ignore_directories and event.is_directory:
return
paths = []
if has_attribute(event, 'dest_path'):
paths.append(unicode_paths.decode(event.dest_path))
if event.src_path:
paths.append(unicode_paths.decode(event.src_path))
if any(r.match(p) for r in self.ignore_regexes for p in paths):
return
if any(r.match(p) for r in self.regexes for p in paths):
super(RegexMatchingEventHandler, self).dispatch(event)
class LoggingEventHandler(FileSystemEventHandler):
"""Logs all the events captured."""
def __init__(self, logger=None):
super(LoggingEventHandler, self).__init__()
self.logger = logger or logging.root
def on_moved(self, event):
super(LoggingEventHandler, self).on_moved(event)
what = 'directory' if event.is_directory else 'file'
self.logger.info("Moved %s: from %s to %s", what, event.src_path,
event.dest_path)
def on_created(self, event):
super(LoggingEventHandler, self).on_created(event)
what = 'directory' if event.is_directory else 'file'
self.logger.info("Created %s: %s", what, event.src_path)
def on_deleted(self, event):
super(LoggingEventHandler, self).on_deleted(event)
what = 'directory' if event.is_directory else 'file'
self.logger.info("Deleted %s: %s", what, event.src_path)
def on_modified(self, event):
super(LoggingEventHandler, self).on_modified(event)
what = 'directory' if event.is_directory else 'file'
self.logger.info("Modified %s: %s", what, event.src_path)
class LoggingFileSystemEventHandler(LoggingEventHandler):
"""
For backwards-compatibility. Please use :class:`LoggingEventHandler`
instead.
"""
def generate_sub_moved_events(src_dir_path, dest_dir_path):
"""Generates an event list of :class:`DirMovedEvent` and
:class:`FileMovedEvent` objects for all the files and directories within
the given moved directory that were moved along with the directory.
:param src_dir_path:
The source path of the moved directory.
:param dest_dir_path:
The destination path of the moved directory.
:returns:
An iterable of file system events of type :class:`DirMovedEvent` and
:class:`FileMovedEvent`.
"""
for root, directories, filenames in os.walk(dest_dir_path):
for directory in directories:
full_path = os.path.join(root, directory)
renamed_path = full_path.replace(dest_dir_path, src_dir_path) if src_dir_path else None
event = DirMovedEvent(renamed_path, full_path)
event.is_synthetic = True
yield event
for filename in filenames:
full_path = os.path.join(root, filename)
renamed_path = full_path.replace(dest_dir_path, src_dir_path) if src_dir_path else None
event = FileMovedEvent(renamed_path, full_path)
event.is_synthetic = True
yield event
def generate_sub_created_events(src_dir_path):
"""Generates an event list of :class:`DirCreatedEvent` and
:class:`FileCreatedEvent` objects for all the files and directories within
the given moved directory that were moved along with the directory.
:param src_dir_path:
The source path of the created directory.
:returns:
An iterable of file system events of type :class:`DirCreatedEvent` and
:class:`FileCreatedEvent`.
"""
for root, directories, filenames in os.walk(src_dir_path):
for directory in directories:
event = DirCreatedEvent(os.path.join(root, directory))
event.is_synthetic = True
yield event
for filename in filenames:
event = FileCreatedEvent(os.path.join(root, filename))
event.is_synthetic = True
yield event
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/watchdog/watchmedo.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011 Yesudeep Mangalapilly <[email protected]>
# Copyright 2012 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:module: watchdog.watchmedo
:author: [email protected] (Yesudeep Mangalapilly)
:synopsis: ``watchmedo`` shell script utility.
"""
import os.path
import sys
import yaml
import time
import logging
try:
from cStringIO import StringIO
except ImportError:
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from argh import arg, aliases, ArghParser, expects_obj
from watchdog.version import VERSION_STRING
from watchdog.utils import WatchdogShutdown, load_class
logging.basicConfig(level=logging.INFO)
CONFIG_KEY_TRICKS = 'tricks'
CONFIG_KEY_PYTHON_PATH = 'python-path'
def path_split(pathname_spec, separator=os.pathsep):
"""
Splits a pathname specification separated by an OS-dependent separator.
:param pathname_spec:
The pathname specification.
:param separator:
(OS Dependent) `:` on Unix and `;` on Windows or user-specified.
"""
return list(pathname_spec.split(separator))
def add_to_sys_path(pathnames, index=0):
"""
Adds specified paths at specified index into the sys.path list.
:param paths:
A list of paths to add to the sys.path
:param index:
(Default 0) The index in the sys.path list where the paths will be
added.
"""
for pathname in pathnames[::-1]:
sys.path.insert(index, pathname)
def load_config(tricks_file_pathname):
"""
Loads the YAML configuration from the specified file.
:param tricks_file_path:
The path to the tricks configuration file.
:returns:
A dictionary of configuration information.
"""
with open(tricks_file_pathname, 'rb') as f:
return yaml.safe_load(f.read())
def parse_patterns(patterns_spec, ignore_patterns_spec, separator=';'):
"""
Parses pattern argument specs and returns a two-tuple of
(patterns, ignore_patterns).
"""
patterns = patterns_spec.split(separator)
ignore_patterns = ignore_patterns_spec.split(separator)
if ignore_patterns == ['']:
ignore_patterns = []
return (patterns, ignore_patterns)
def observe_with(observer, event_handler, pathnames, recursive):
"""
Single observer thread with a scheduled path and event handler.
:param observer:
The observer thread.
:param event_handler:
Event handler which will be called in response to file system events.
:param pathnames:
A list of pathnames to monitor.
:param recursive:
``True`` if recursive; ``False`` otherwise.
"""
for pathname in set(pathnames):
observer.schedule(event_handler, pathname, recursive)
observer.start()
try:
while True:
time.sleep(1)
except WatchdogShutdown:
observer.stop()
observer.join()
def schedule_tricks(observer, tricks, pathname, recursive):
"""
Schedules tricks with the specified observer and for the given watch
path.
:param observer:
The observer thread into which to schedule the trick and watch.
:param tricks:
A list of tricks.
:param pathname:
A path name which should be watched.
:param recursive:
``True`` if recursive; ``False`` otherwise.
"""
for trick in tricks:
for name, value in list(trick.items()):
TrickClass = load_class(name)
handler = TrickClass(**value)
trick_pathname = getattr(handler, 'source_directory', None) or pathname
observer.schedule(handler, trick_pathname, recursive)
@aliases('tricks')
@arg('files',
nargs='*',
help='perform tricks from given file')
@arg('--python-path',
default='.',
help='paths separated by %s to add to the python path' % os.pathsep)
@arg('--interval',
'--timeout',
dest='timeout',
default=1.0,
help='use this as the polling interval/blocking timeout (in seconds)')
@arg('--recursive',
default=True,
help='recursively monitor paths')
@expects_obj
def tricks_from(args):
"""
Subcommand to execute tricks from a tricks configuration file.
:param args:
Command line argument options.
"""
from watchdog.observers import Observer
add_to_sys_path(path_split(args.python_path))
observers = []
for tricks_file in args.files:
observer = Observer(timeout=args.timeout)
if not os.path.exists(tricks_file):
raise IOError("cannot find tricks file: %s" % tricks_file)
config = load_config(tricks_file)
try:
tricks = config[CONFIG_KEY_TRICKS]
except KeyError:
raise KeyError("No `%s' key specified in %s." % (
CONFIG_KEY_TRICKS, tricks_file))
if CONFIG_KEY_PYTHON_PATH in config:
add_to_sys_path(config[CONFIG_KEY_PYTHON_PATH])
dir_path = os.path.dirname(tricks_file)
if not dir_path:
dir_path = os.path.relpath(os.getcwd())
schedule_tricks(observer, tricks, dir_path, args.recursive)
observer.start()
observers.append(observer)
try:
while True:
time.sleep(1)
except WatchdogShutdown:
for o in observers:
o.unschedule_all()
o.stop()
for o in observers:
o.join()
@aliases('generate-tricks-yaml')
@arg('trick_paths',
nargs='*',
help='Dotted paths for all the tricks you want to generate')
@arg('--python-path',
default='.',
help='paths separated by %s to add to the python path' % os.pathsep)
@arg('--append-to-file',
default=None,
help='appends the generated tricks YAML to a file; \
if not specified, prints to standard output')
@arg('-a',
'--append-only',
dest='append_only',
default=False,
help='if --append-to-file is not specified, produces output for \
appending instead of a complete tricks yaml file.')
@expects_obj
def tricks_generate_yaml(args):
"""
Subcommand to generate Yaml configuration for tricks named on the command
line.
:param args:
Command line argument options.
"""
python_paths = path_split(args.python_path)
add_to_sys_path(python_paths)
output = StringIO()
for trick_path in args.trick_paths:
TrickClass = load_class(trick_path)
output.write(TrickClass.generate_yaml())
content = output.getvalue()
output.close()
header = yaml.dump({CONFIG_KEY_PYTHON_PATH: python_paths})
header += "%s:\n" % CONFIG_KEY_TRICKS
if args.append_to_file is None:
# Output to standard output.
if not args.append_only:
content = header + content
sys.stdout.write(content)
else:
if not os.path.exists(args.append_to_file):
content = header + content
with open(args.append_to_file, 'ab') as output:
output.write(content)
@arg('directories',
nargs='*',
default='.',
help='directories to watch.')
@arg('-p',
'--pattern',
'--patterns',
dest='patterns',
default='*',
help='matches event paths with these patterns (separated by ;).')
@arg('-i',
'--ignore-pattern',
'--ignore-patterns',
dest='ignore_patterns',
default='',
help='ignores event paths with these patterns (separated by ;).')
@arg('-D',
'--ignore-directories',
dest='ignore_directories',
default=False,
help='ignores events for directories')
@arg('-R',
'--recursive',
dest='recursive',
default=False,
help='monitors the directories recursively')
@arg('--interval',
'--timeout',
dest='timeout',
default=1.0,
help='use this as the polling interval/blocking timeout')
@arg('--trace',
default=False,
help='dumps complete dispatching trace')
@arg('--debug-force-polling',
default=False,
help='[debug] forces polling')
@arg('--debug-force-kqueue',
default=False,
help='[debug] forces BSD kqueue(2)')
@arg('--debug-force-winapi',
default=False,
help='[debug] forces Windows API')
@arg('--debug-force-winapi-async',
default=False,
help='[debug] forces Windows API + I/O completion')
@arg('--debug-force-fsevents',
default=False,
help='[debug] forces Mac OS X FSEvents')
@arg('--debug-force-inotify',
default=False,
help='[debug] forces Linux inotify(7)')
@expects_obj
def log(args):
"""
Subcommand to log file system events to the console.
:param args:
Command line argument options.
"""
from watchdog.utils import echo
from watchdog.tricks import LoggerTrick
if args.trace:
echo.echo_class(LoggerTrick)
patterns, ignore_patterns =\
parse_patterns(args.patterns, args.ignore_patterns)
handler = LoggerTrick(patterns=patterns,
ignore_patterns=ignore_patterns,
ignore_directories=args.ignore_directories)
if args.debug_force_polling:
from watchdog.observers.polling import PollingObserver as Observer
elif args.debug_force_kqueue:
from watchdog.observers.kqueue import KqueueObserver as Observer
elif args.debug_force_winapi_async:
from watchdog.observers.read_directory_changes_async import\
WindowsApiAsyncObserver as Observer
elif args.debug_force_winapi:
from watchdog.observers.read_directory_changes import\
WindowsApiObserver as Observer
elif args.debug_force_inotify:
from watchdog.observers.inotify import InotifyObserver as Observer
elif args.debug_force_fsevents:
from watchdog.observers.fsevents import FSEventsObserver as Observer
else:
# Automatically picks the most appropriate observer for the platform
# on which it is running.
from watchdog.observers import Observer
observer = Observer(timeout=args.timeout)
observe_with(observer, handler, args.directories, args.recursive)
@arg('directories',
nargs='*',
default='.',
help='directories to watch')
@arg('-c',
'--command',
dest='command',
default=None,
help='''shell command executed in response to matching events.
These interpolation variables are available to your command string::
${watch_src_path} - event source path;
${watch_dest_path} - event destination path (for moved events);
${watch_event_type} - event type;
${watch_object} - ``file`` or ``directory``
Note::
Please ensure you do not use double quotes (") to quote
your command string. That will force your shell to
interpolate before the command is processed by this
subcommand.
Example option usage::
--command='echo "${watch_src_path}"'
''')
@arg('-p',
'--pattern',
'--patterns',
dest='patterns',
default='*',
help='matches event paths with these patterns (separated by ;).')
@arg('-i',
'--ignore-pattern',
'--ignore-patterns',
dest='ignore_patterns',
default='',
help='ignores event paths with these patterns (separated by ;).')
@arg('-D',
'--ignore-directories',
dest='ignore_directories',
default=False,
help='ignores events for directories')
@arg('-R',
'--recursive',
dest='recursive',
default=False,
help='monitors the directories recursively')
@arg('--interval',
'--timeout',
dest='timeout',
default=1.0,
help='use this as the polling interval/blocking timeout')
@arg('-w', '--wait',
dest='wait_for_process',
action='store_true',
default=False,
help="wait for process to finish to avoid multiple simultaneous instances")
@arg('-W', '--drop',
dest='drop_during_process',
action='store_true',
default=False,
help="Ignore events that occur while command is still being executed "
"to avoid multiple simultaneous instances")
@arg('--debug-force-polling',
default=False,
help='[debug] forces polling')
@expects_obj
def shell_command(args):
"""
Subcommand to execute shell commands in response to file system events.
:param args:
Command line argument options.
"""
from watchdog.tricks import ShellCommandTrick
if not args.command:
args.command = None
if args.debug_force_polling:
from watchdog.observers.polling import PollingObserver as Observer
else:
from watchdog.observers import Observer
patterns, ignore_patterns = parse_patterns(args.patterns,
args.ignore_patterns)
handler = ShellCommandTrick(shell_command=args.command,
patterns=patterns,
ignore_patterns=ignore_patterns,
ignore_directories=args.ignore_directories,
wait_for_process=args.wait_for_process,
drop_during_process=args.drop_during_process)
observer = Observer(timeout=args.timeout)
observe_with(observer, handler, args.directories, args.recursive)
@arg('command',
help='''Long-running command to run in a subprocess.
''')
@arg('command_args',
metavar='arg',
nargs='*',
help='''Command arguments.
Note: Use -- before the command arguments, otherwise watchmedo will
try to interpret them.
''')
@arg('-d',
'--directory',
dest='directories',
metavar='directory',
action='append',
help='Directory to watch. Use another -d or --directory option '
'for each directory.')
@arg('-p',
'--pattern',
'--patterns',
dest='patterns',
default='*',
help='matches event paths with these patterns (separated by ;).')
@arg('-i',
'--ignore-pattern',
'--ignore-patterns',
dest='ignore_patterns',
default='',
help='ignores event paths with these patterns (separated by ;).')
@arg('-D',
'--ignore-directories',
dest='ignore_directories',
default=False,
help='ignores events for directories')
@arg('-R',
'--recursive',
dest='recursive',
default=False,
help='monitors the directories recursively')
@arg('--interval',
'--timeout',
dest='timeout',
default=1.0,
help='use this as the polling interval/blocking timeout')
@arg('--signal',
dest='signal',
default='SIGINT',
help='stop the subprocess with this signal (default SIGINT)')
@arg('--debug-force-polling',
default=False,
help='[debug] forces polling')
@arg('--kill-after',
dest='kill_after',
default=10.0,
help='when stopping, kill the subprocess after the specified timeout '
'(default 10)')
@expects_obj
def auto_restart(args):
"""
Subcommand to start a long-running subprocess and restart it
on matched events.
:param args:
Command line argument options.
"""
if args.debug_force_polling:
from watchdog.observers.polling import PollingObserver as Observer
else:
from watchdog.observers import Observer
from watchdog.tricks import AutoRestartTrick
import signal
if not args.directories:
args.directories = ['.']
# Allow either signal name or number.
if args.signal.startswith("SIG"):
stop_signal = getattr(signal, args.signal)
else:
stop_signal = int(args.signal)
# Handle termination signals by raising a semantic exception which will
# allow us to gracefully unwind and stop the observer
termination_signals = {signal.SIGTERM, signal.SIGINT}
def handler_termination_signal(_signum, _frame):
# Neuter all signals so that we don't attempt a double shutdown
for signum in termination_signals:
signal.signal(signum, signal.SIG_IGN)
raise WatchdogShutdown
for signum in termination_signals:
signal.signal(signum, handler_termination_signal)
patterns, ignore_patterns = parse_patterns(args.patterns,
args.ignore_patterns)
command = [args.command]
command.extend(args.command_args)
handler = AutoRestartTrick(command=command,
patterns=patterns,
ignore_patterns=ignore_patterns,
ignore_directories=args.ignore_directories,
stop_signal=stop_signal,
kill_after=args.kill_after)
handler.start()
observer = Observer(timeout=args.timeout)
try:
observe_with(observer, handler, args.directories, args.recursive)
except WatchdogShutdown:
pass
finally:
handler.stop()
epilog = """Copyright 2011 Yesudeep Mangalapilly <[email protected]>.
Copyright 2012 Google, Inc.
Licensed under the terms of the Apache license, version 2.0. Please see
LICENSE in the source code for more information."""
parser = ArghParser(epilog=epilog)
parser.add_commands([tricks_from,
tricks_generate_yaml,
log,
shell_command,
auto_restart])
parser.add_argument('--version',
action='version',
version='%(prog)s ' + VERSION_STRING)
def main():
"""Entry-point function."""
parser.dispatch()
if __name__ == '__main__':
main()
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/watchdog/tricks/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011 Yesudeep Mangalapilly <[email protected]>
# Copyright 2012 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:module: watchdog.tricks
:synopsis: Utility event handlers.
:author: [email protected] (Yesudeep Mangalapilly)
Classes
-------
.. autoclass:: Trick
:members:
:show-inheritance:
.. autoclass:: LoggerTrick
:members:
:show-inheritance:
.. autoclass:: ShellCommandTrick
:members:
:show-inheritance:
.. autoclass:: AutoRestartTrick
:members:
:show-inheritance:
"""
import os
import signal
import subprocess
import time
from watchdog.utils import echo, has_attribute
from watchdog.events import PatternMatchingEventHandler
class Trick(PatternMatchingEventHandler):
"""Your tricks should subclass this class."""
@classmethod
def generate_yaml(cls):
context = dict(module_name=cls.__module__,
klass_name=cls.__name__)
template_yaml = """- %(module_name)s.%(klass_name)s:
args:
- argument1
- argument2
kwargs:
patterns:
- "*.py"
- "*.js"
ignore_patterns:
- "version.py"
ignore_directories: false
"""
return template_yaml % context
class LoggerTrick(Trick):
"""A simple trick that does only logs events."""
def on_any_event(self, event):
pass
@echo.echo
def on_modified(self, event):
pass
@echo.echo
def on_deleted(self, event):
pass
@echo.echo
def on_created(self, event):
pass
@echo.echo
def on_moved(self, event):
pass
class ShellCommandTrick(Trick):
"""Executes shell commands in response to matched events."""
def __init__(self, shell_command=None, patterns=None, ignore_patterns=None,
ignore_directories=False, wait_for_process=False,
drop_during_process=False):
super(ShellCommandTrick, self).__init__(patterns, ignore_patterns,
ignore_directories)
self.shell_command = shell_command
self.wait_for_process = wait_for_process
self.drop_during_process = drop_during_process
self.process = None
def on_any_event(self, event):
from string import Template
if self.drop_during_process and self.process and self.process.poll() is None:
return
if event.is_directory:
object_type = 'directory'
else:
object_type = 'file'
context = {
'watch_src_path': event.src_path,
'watch_dest_path': '',
'watch_event_type': event.event_type,
'watch_object': object_type,
}
if self.shell_command is None:
if has_attribute(event, 'dest_path'):
context.update({'dest_path': event.dest_path})
command = 'echo "${watch_event_type} ${watch_object} from ${watch_src_path} to ${watch_dest_path}"'
else:
command = 'echo "${watch_event_type} ${watch_object} ${watch_src_path}"'
else:
if has_attribute(event, 'dest_path'):
context.update({'watch_dest_path': event.dest_path})
command = self.shell_command
command = Template(command).safe_substitute(**context)
self.process = subprocess.Popen(command, shell=True)
if self.wait_for_process:
self.process.wait()
class AutoRestartTrick(Trick):
"""Starts a long-running subprocess and restarts it on matched events.
The command parameter is a list of command arguments, such as
`['bin/myserver', '-c', 'etc/myconfig.ini']`.
Call `start()` after creating the Trick. Call `stop()` when stopping
the process.
"""
def __init__(self, command, patterns=None, ignore_patterns=None,
ignore_directories=False, stop_signal=signal.SIGINT,
kill_after=10):
super(AutoRestartTrick, self).__init__(
patterns, ignore_patterns, ignore_directories)
self.command = command
self.stop_signal = stop_signal
self.kill_after = kill_after
self.process = None
def start(self):
self.process = subprocess.Popen(self.command, preexec_fn=os.setsid)
def stop(self):
if self.process is None:
return
try:
os.killpg(os.getpgid(self.process.pid), self.stop_signal)
except OSError:
# Process is already gone
pass
else:
kill_time = time.time() + self.kill_after
while time.time() < kill_time:
if self.process.poll() is not None:
break
time.sleep(0.25)
else:
try:
os.killpg(os.getpgid(self.process.pid), 9)
except OSError:
# Process is already gone
pass
self.process = None
@echo.echo
def on_any_event(self, event):
self.stop()
self.start()
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/watchdog/utils/win32stat.py | # -*- coding: utf-8 -*-
#
# Copyright 2014 Thomas Amland <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:module: watchdog.utils.win32stat
:synopsis: Implementation of stat with st_ino and st_dev support.
Functions
---------
.. autofunction:: stat
"""
import ctypes
import ctypes.wintypes
import stat as stdstat
from collections import namedtuple
INVALID_HANDLE_VALUE = ctypes.c_void_p(-1).value
OPEN_EXISTING = 3
FILE_READ_ATTRIBUTES = 0x80
FILE_ATTRIBUTE_NORMAL = 0x80
FILE_ATTRIBUTE_READONLY = 0x1
FILE_ATTRIBUTE_DIRECTORY = 0x10
FILE_FLAG_BACKUP_SEMANTICS = 0x02000000
FILE_FLAG_OPEN_REPARSE_POINT = 0x00200000
class FILETIME(ctypes.Structure):
_fields_ = [("dwLowDateTime", ctypes.wintypes.DWORD),
("dwHighDateTime", ctypes.wintypes.DWORD)]
class BY_HANDLE_FILE_INFORMATION(ctypes.Structure):
_fields_ = [('dwFileAttributes', ctypes.wintypes.DWORD),
('ftCreationTime', FILETIME),
('ftLastAccessTime', FILETIME),
('ftLastWriteTime', FILETIME),
('dwVolumeSerialNumber', ctypes.wintypes.DWORD),
('nFileSizeHigh', ctypes.wintypes.DWORD),
('nFileSizeLow', ctypes.wintypes.DWORD),
('nNumberOfLinks', ctypes.wintypes.DWORD),
('nFileIndexHigh', ctypes.wintypes.DWORD),
('nFileIndexLow', ctypes.wintypes.DWORD)]
kernel32 = ctypes.WinDLL("kernel32")
CreateFile = kernel32.CreateFileW
CreateFile.restype = ctypes.wintypes.HANDLE
CreateFile.argtypes = (
ctypes.c_wchar_p,
ctypes.wintypes.DWORD,
ctypes.wintypes.DWORD,
ctypes.c_void_p,
ctypes.wintypes.DWORD,
ctypes.wintypes.DWORD,
ctypes.wintypes.HANDLE,
)
GetFileInformationByHandle = kernel32.GetFileInformationByHandle
GetFileInformationByHandle.restype = ctypes.wintypes.BOOL
GetFileInformationByHandle.argtypes = (
ctypes.wintypes.HANDLE,
ctypes.wintypes.POINTER(BY_HANDLE_FILE_INFORMATION),
)
CloseHandle = kernel32.CloseHandle
CloseHandle.restype = ctypes.wintypes.BOOL
CloseHandle.argtypes = (ctypes.wintypes.HANDLE,)
StatResult = namedtuple('StatResult', 'st_dev st_ino st_mode st_mtime st_size')
def _to_mode(attr):
m = 0
if (attr & FILE_ATTRIBUTE_DIRECTORY):
m |= stdstat.S_IFDIR | 0o111
else:
m |= stdstat.S_IFREG
if (attr & FILE_ATTRIBUTE_READONLY):
m |= 0o444
else:
m |= 0o666
return m
def _to_unix_time(ft):
t = (ft.dwHighDateTime) << 32 | ft.dwLowDateTime
return (t / 10000000) - 11644473600
def stat(path):
hfile = CreateFile(path,
FILE_READ_ATTRIBUTES,
0,
None,
OPEN_EXISTING,
FILE_ATTRIBUTE_NORMAL
| FILE_FLAG_BACKUP_SEMANTICS
| FILE_FLAG_OPEN_REPARSE_POINT,
None)
if hfile == INVALID_HANDLE_VALUE:
raise ctypes.WinError()
info = BY_HANDLE_FILE_INFORMATION()
r = GetFileInformationByHandle(hfile, info)
CloseHandle(hfile)
if not r:
raise ctypes.WinError()
return StatResult(st_dev=info.dwVolumeSerialNumber,
st_ino=(info.nFileIndexHigh << 32) + info.nFileIndexLow,
st_mode=_to_mode(info.dwFileAttributes),
st_mtime=_to_unix_time(info.ftLastWriteTime),
st_size=(info.nFileSizeHigh << 32) + info.nFileSizeLow
)
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/watchdog/utils/delayed_queue.py | # -*- coding: utf-8 -*-
#
# Copyright 2014 Thomas Amland <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import threading
from collections import deque
class DelayedQueue(object):
def __init__(self, delay):
self.delay_sec = delay
self._lock = threading.Lock()
self._not_empty = threading.Condition(self._lock)
self._queue = deque()
self._closed = False
def put(self, element, delay=False):
"""Add element to queue."""
self._lock.acquire()
self._queue.append((element, time.time(), delay))
self._not_empty.notify()
self._lock.release()
def close(self):
"""Close queue, indicating no more items will be added."""
self._closed = True
# Interrupt the blocking _not_empty.wait() call in get
self._not_empty.acquire()
self._not_empty.notify()
self._not_empty.release()
def get(self):
"""Remove and return an element from the queue, or this queue has been
closed raise the Closed exception.
"""
while True:
# wait for element to be added to queue
self._not_empty.acquire()
while len(self._queue) == 0 and not self._closed:
self._not_empty.wait()
if self._closed:
self._not_empty.release()
return None
head, insert_time, delay = self._queue[0]
self._not_empty.release()
# wait for delay if required
if delay:
time_left = insert_time + self.delay_sec - time.time()
while time_left > 0:
time.sleep(time_left)
time_left = insert_time + self.delay_sec - time.time()
# return element if it's still in the queue
with self._lock:
if len(self._queue) > 0 and self._queue[0][0] is head:
self._queue.popleft()
return head
def remove(self, predicate):
"""Remove and return the first items for which predicate is True,
ignoring delay."""
with self._lock:
for i, (elem, t, delay) in enumerate(self._queue):
if predicate(elem):
del self._queue[i]
return elem
return None
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/watchdog/utils/bricks.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011 Yesudeep Mangalapilly <[email protected]>
# Copyright 2012 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utility collections or "bricks".
:module: watchdog.utils.bricks
:author: [email protected] (Yesudeep Mangalapilly)
:author: [email protected] (Lukáš Lalinský)
:author: [email protected] (Raymond Hettinger)
Classes
=======
.. autoclass:: OrderedSetQueue
:members:
:show-inheritance:
:inherited-members:
.. autoclass:: OrderedSet
"""
from .compat import queue
class SkipRepeatsQueue(queue.Queue, object):
"""Thread-safe implementation of an special queue where a
put of the last-item put'd will be dropped.
The implementation leverages locking already implemented in the base class
redefining only the primitives.
Queued items must be immutable and hashable so that they can be used
as dictionary keys. You must implement **only read-only properties** and
the :meth:`Item.__hash__()`, :meth:`Item.__eq__()`, and
:meth:`Item.__ne__()` methods for items to be hashable.
An example implementation follows::
class Item(object):
def __init__(self, a, b):
self._a = a
self._b = b
@property
def a(self):
return self._a
@property
def b(self):
return self._b
def _key(self):
return (self._a, self._b)
def __eq__(self, item):
return self._key() == item._key()
def __ne__(self, item):
return self._key() != item._key()
def __hash__(self):
return hash(self._key())
based on the OrderedSetQueue below
"""
def _init(self, maxsize):
super(SkipRepeatsQueue, self)._init(maxsize)
self._last_item = None
def _put(self, item):
if item != self._last_item:
super(SkipRepeatsQueue, self)._put(item)
self._last_item = item
else:
# `put` increments `unfinished_tasks` even if we did not put
# anything into the queue here
self.unfinished_tasks -= 1
def _get(self):
item = super(SkipRepeatsQueue, self)._get()
if item is self._last_item:
self._last_item = None
return item
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/watchdog/utils/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011 Yesudeep Mangalapilly <[email protected]>
# Copyright 2012 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:module: watchdog.utils
:synopsis: Utility classes and functions.
:author: [email protected] (Yesudeep Mangalapilly)
Classes
-------
.. autoclass:: BaseThread
:members:
:show-inheritance:
:inherited-members:
"""
import os
import sys
import threading
from watchdog.utils import platform
from watchdog.utils.compat import Event
if sys.version_info[0] == 2 and platform.is_windows():
# st_ino is not implemented in os.stat on this platform
import win32stat
stat = win32stat.stat
else:
stat = os.stat
def has_attribute(ob, attribute):
"""
:func:`hasattr` swallows exceptions. :func:`has_attribute` tests a Python object for the
presence of an attribute.
:param ob:
object to inspect
:param attribute:
``str`` for the name of the attribute.
"""
return getattr(ob, attribute, None) is not None
class UnsupportedLibc(Exception):
pass
class WatchdogShutdown(Exception):
"""
Semantic exception used to signal an external shutdown event.
"""
pass
class BaseThread(threading.Thread):
""" Convenience class for creating stoppable threads. """
def __init__(self):
threading.Thread.__init__(self)
if has_attribute(self, 'daemon'):
self.daemon = True
else:
self.setDaemon(True)
self._stopped_event = Event()
if not has_attribute(self._stopped_event, 'is_set'):
self._stopped_event.is_set = self._stopped_event.isSet
@property
def stopped_event(self):
return self._stopped_event
def should_keep_running(self):
"""Determines whether the thread should continue running."""
return not self._stopped_event.is_set()
def on_thread_stop(self):
"""Override this method instead of :meth:`stop()`.
:meth:`stop()` calls this method.
This method is called immediately after the thread is signaled to stop.
"""
pass
def stop(self):
"""Signals the thread to stop."""
self._stopped_event.set()
self.on_thread_stop()
def on_thread_start(self):
"""Override this method instead of :meth:`start()`. :meth:`start()`
calls this method.
This method is called right before this thread is started and this
object’s run() method is invoked.
"""
pass
def start(self):
self.on_thread_start()
threading.Thread.start(self)
def load_module(module_name):
"""Imports a module given its name and returns a handle to it."""
try:
__import__(module_name)
except ImportError:
raise ImportError('No module named %s' % module_name)
return sys.modules[module_name]
def load_class(dotted_path):
"""Loads and returns a class definition provided a dotted path
specification the last part of the dotted path is the class name
and there is at least one module name preceding the class name.
Notes:
You will need to ensure that the module you are trying to load
exists in the Python path.
Examples:
- module.name.ClassName # Provided module.name is in the Python path.
- module.ClassName # Provided module is in the Python path.
What won't work:
- ClassName
- modle.name.ClassName # Typo in module name.
- module.name.ClasNam # Typo in classname.
"""
dotted_path_split = dotted_path.split('.')
if len(dotted_path_split) > 1:
klass_name = dotted_path_split[-1]
module_name = '.'.join(dotted_path_split[:-1])
module = load_module(module_name)
if has_attribute(module, klass_name):
klass = getattr(module, klass_name)
return klass
# Finally create and return an instance of the class
# return klass(*args, **kwargs)
else:
raise AttributeError('Module %s does not have class attribute %s' % (
module_name, klass_name))
else:
raise ValueError(
'Dotted module path %s must contain a module name and a classname' % dotted_path)
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/watchdog/utils/unicode_paths.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2013 Will Bond <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
from watchdog.utils import platform
try:
# Python 2
str_cls = unicode
bytes_cls = str
except NameError:
# Python 3
str_cls = str
bytes_cls = bytes
# This is used by Linux when the locale seems to be improperly set. UTF-8 tends
# to be the encoding used by all distros, so this is a good fallback.
fs_fallback_encoding = 'utf-8'
fs_encoding = sys.getfilesystemencoding() or fs_fallback_encoding
def encode(path):
if isinstance(path, str_cls):
try:
path = path.encode(fs_encoding, 'strict')
except UnicodeEncodeError:
if not platform.is_linux():
raise
path = path.encode(fs_fallback_encoding, 'strict')
return path
def decode(path):
if isinstance(path, bytes_cls):
try:
path = path.decode(fs_encoding, 'strict')
except UnicodeDecodeError:
if not platform.is_linux():
raise
path = path.decode(fs_fallback_encoding, 'strict')
return path
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/watchdog/utils/echo.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# echo.py: Tracing function calls using Python decorators.
#
# Written by Thomas Guest <[email protected]>
# Please see http://wordaligned.org/articles/echo
#
# Place into the public domain.
""" Echo calls made to functions and methods in a module.
"Echoing" a function call means printing out the name of the function
and the values of its arguments before making the call (which is more
commonly referred to as "tracing", but Python already has a trace module).
Example: to echo calls made to functions in "my_module" do:
import echo
import my_module
echo.echo_module(my_module)
Example: to echo calls made to functions in "my_module.my_class" do:
echo.echo_class(my_module.my_class)
Alternatively, echo.echo can be used to decorate functions. Calls to the
decorated function will be echoed.
Example:
@echo.echo
def my_function(args):
pass
"""
import inspect
import sys
def name(item):
" Return an item's name. "
return item.__name__
def is_classmethod(instancemethod, klass):
" Determine if an instancemethod is a classmethod. "
return inspect.ismethod(instancemethod) and instancemethod.__self__ is klass
def is_static_method(method, klass):
"""Returns True if method is an instance method of klass."""
for c in klass.mro():
if name(method) in c.__dict__:
return isinstance(c.__dict__[name(method)], staticmethod)
else:
return False
def is_class_private_name(name):
" Determine if a name is a class private name. "
# Exclude system defined names such as __init__, __add__ etc
return name.startswith("__") and not name.endswith("__")
def method_name(method):
""" Return a method's name.
This function returns the name the method is accessed by from
outside the class (i.e. it prefixes "private" methods appropriately).
"""
mname = name(method)
if is_class_private_name(mname):
mname = "_%s%s" % (name(method.__self__.__class__), mname)
return mname
def format_arg_value(arg_val):
""" Return a string representing a (name, value) pair.
>>> format_arg_value(('x', (1, 2, 3)))
'x=(1, 2, 3)'
"""
arg, val = arg_val
return "%s=%r" % (arg, val)
def echo(fn, write=sys.stdout.write):
""" Echo calls to a function.
Returns a decorated version of the input function which "echoes" calls
made to it by writing out the function's name and the arguments it was
called with.
"""
import functools
# Unpack function's arg count, arg names, arg defaults
code = fn.__code__
argcount = code.co_argcount
argnames = code.co_varnames[:argcount]
fn_defaults = fn.__defaults__ or list()
argdefs = dict(list(zip(argnames[-len(fn_defaults):], fn_defaults)))
@functools.wraps(fn)
def wrapped(*v, **k):
# Collect function arguments by chaining together positional,
# defaulted, extra positional and keyword arguments.
positional = list(map(format_arg_value, list(zip(argnames, v))))
defaulted = [format_arg_value((a, argdefs[a]))
for a in argnames[len(v):] if a not in k]
nameless = list(map(repr, v[argcount:]))
keyword = list(map(format_arg_value, list(k.items())))
args = positional + defaulted + nameless + keyword
write("%s(%s)\n" % (name(fn), ", ".join(args)))
return fn(*v, **k)
return wrapped
def echo_instancemethod(klass, method, write=sys.stdout.write):
""" Change an instancemethod so that calls to it are echoed.
Replacing a classmethod is a little more tricky.
See: http://www.python.org/doc/current/ref/types.html
"""
mname = method_name(method)
never_echo = "__str__", "__repr__", # Avoid recursion printing method calls
if mname in never_echo:
pass
elif is_classmethod(method, klass):
setattr(klass, mname, classmethod(echo(method.__func__, write)))
else:
setattr(klass, mname, echo(method, write))
def echo_class(klass, write=sys.stdout.write):
""" Echo calls to class methods and static functions
"""
for _, method in inspect.getmembers(klass, inspect.ismethod):
# In python 3 only class methods are returned here, but in python2 instance methods are too.
echo_instancemethod(klass, method, write)
for _, fn in inspect.getmembers(klass, inspect.isfunction):
if is_static_method(fn, klass):
setattr(klass, name(fn), staticmethod(echo(fn, write)))
else:
# It's not a class or a static method, so it must be an instance method.
# This should only be called in python 3, because in python 3 instance methods are considered functions.
echo_instancemethod(klass, fn, write)
def echo_module(mod, write=sys.stdout.write):
""" Echo calls to functions and methods in a module.
"""
for fname, fn in inspect.getmembers(mod, inspect.isfunction):
setattr(mod, fname, echo(fn, write))
for _, klass in inspect.getmembers(mod, inspect.isclass):
echo_class(klass, write)
if __name__ == "__main__":
import doctest
optionflags = doctest.ELLIPSIS
doctest.testfile('echoexample.txt', optionflags=optionflags)
doctest.testmod(optionflags=optionflags)
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/watchdog/utils/dirsnapshot.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011 Yesudeep Mangalapilly <[email protected]>
# Copyright 2012 Google, Inc.
# Copyright 2014 Thomas Amland <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:module: watchdog.utils.dirsnapshot
:synopsis: Directory snapshots and comparison.
:author: [email protected] (Yesudeep Mangalapilly)
.. ADMONITION:: Where are the moved events? They "disappeared"
This implementation does not take partition boundaries
into consideration. It will only work when the directory
tree is entirely on the same file system. More specifically,
any part of the code that depends on inode numbers can
break if partition boundaries are crossed. In these cases,
the snapshot diff will represent file/directory movement as
created and deleted events.
Classes
-------
.. autoclass:: DirectorySnapshot
:members:
:show-inheritance:
.. autoclass:: DirectorySnapshotDiff
:members:
:show-inheritance:
.. autoclass:: EmptyDirectorySnapshot
:members:
:show-inheritance:
"""
import errno
import os
from stat import S_ISDIR
from watchdog.utils import stat as default_stat
try:
from os import scandir
except ImportError:
from os import listdir as scandir
class DirectorySnapshotDiff(object):
"""
Compares two directory snapshots and creates an object that represents
the difference between the two snapshots.
:param ref:
The reference directory snapshot.
:type ref:
:class:`DirectorySnapshot`
:param snapshot:
The directory snapshot which will be compared
with the reference snapshot.
:type snapshot:
:class:`DirectorySnapshot`
:param ignore_device:
A boolean indicating whether to ignore the device id or not.
By default, a file may be uniquely identified by a combination of its first
inode and its device id. The problem is that the device id may (or may not)
change between system boots. This problem would cause the DirectorySnapshotDiff
to think a file has been deleted and created again but it would be the
exact same file.
Set to True only if you are sure you will always use the same device.
:type ignore_device:
:class:`bool`
"""
def __init__(self, ref, snapshot, ignore_device=False):
created = snapshot.paths - ref.paths
deleted = ref.paths - snapshot.paths
if ignore_device:
def get_inode(directory, full_path):
return directory.inode(full_path)[0]
else:
def get_inode(directory, full_path):
return directory.inode(full_path)
# check that all unchanged paths have the same inode
for path in ref.paths & snapshot.paths:
if get_inode(ref, path) != get_inode(snapshot, path):
created.add(path)
deleted.add(path)
# find moved paths
moved = set()
for path in set(deleted):
inode = ref.inode(path)
new_path = snapshot.path(inode)
if new_path:
# file is not deleted but moved
deleted.remove(path)
moved.add((path, new_path))
for path in set(created):
inode = snapshot.inode(path)
old_path = ref.path(inode)
if old_path:
created.remove(path)
moved.add((old_path, path))
# find modified paths
# first check paths that have not moved
modified = set()
for path in ref.paths & snapshot.paths:
if get_inode(ref, path) == get_inode(snapshot, path):
if ref.mtime(path) != snapshot.mtime(path) or ref.size(path) != snapshot.size(path):
modified.add(path)
for (old_path, new_path) in moved:
if ref.mtime(old_path) != snapshot.mtime(new_path) or ref.size(old_path) != snapshot.size(new_path):
modified.add(old_path)
self._dirs_created = [path for path in created if snapshot.isdir(path)]
self._dirs_deleted = [path for path in deleted if ref.isdir(path)]
self._dirs_modified = [path for path in modified if ref.isdir(path)]
self._dirs_moved = [(frm, to) for (frm, to) in moved if ref.isdir(frm)]
self._files_created = list(created - set(self._dirs_created))
self._files_deleted = list(deleted - set(self._dirs_deleted))
self._files_modified = list(modified - set(self._dirs_modified))
self._files_moved = list(moved - set(self._dirs_moved))
def __str__(self):
return self.__repr__()
def __repr__(self):
fmt = (
'<{0} files(created={1}, deleted={2}, modified={3}, moved={4}),'
' folders(created={5}, deleted={6}, modified={7}, moved={8})>'
)
return fmt.format(
type(self).__name__,
len(self._files_created),
len(self._files_deleted),
len(self._files_modified),
len(self._files_moved),
len(self._dirs_created),
len(self._dirs_deleted),
len(self._dirs_modified),
len(self._dirs_moved)
)
@property
def files_created(self):
"""List of files that were created."""
return self._files_created
@property
def files_deleted(self):
"""List of files that were deleted."""
return self._files_deleted
@property
def files_modified(self):
"""List of files that were modified."""
return self._files_modified
@property
def files_moved(self):
"""
List of files that were moved.
Each event is a two-tuple the first item of which is the path
that has been renamed to the second item in the tuple.
"""
return self._files_moved
@property
def dirs_modified(self):
"""
List of directories that were modified.
"""
return self._dirs_modified
@property
def dirs_moved(self):
"""
List of directories that were moved.
Each event is a two-tuple the first item of which is the path
that has been renamed to the second item in the tuple.
"""
return self._dirs_moved
@property
def dirs_deleted(self):
"""
List of directories that were deleted.
"""
return self._dirs_deleted
@property
def dirs_created(self):
"""
List of directories that were created.
"""
return self._dirs_created
class DirectorySnapshot(object):
"""
A snapshot of stat information of files in a directory.
:param path:
The directory path for which a snapshot should be taken.
:type path:
``str``
:param recursive:
``True`` if the entire directory tree should be included in the
snapshot; ``False`` otherwise.
:type recursive:
``bool``
:param stat:
Use custom stat function that returns a stat structure for path.
Currently only st_dev, st_ino, st_mode and st_mtime are needed.
A function taking a ``path`` as argument which will be called
for every entry in the directory tree.
:param listdir:
Use custom listdir function. For details see ``os.scandir`` if available, else ``os.listdir``.
"""
def __init__(self, path, recursive=True,
stat=default_stat,
listdir=scandir):
self.recursive = recursive
self.stat = stat
self.listdir = listdir
self._stat_info = {}
self._inode_to_path = {}
st = self.stat(path)
self._stat_info[path] = st
self._inode_to_path[(st.st_ino, st.st_dev)] = path
for p, st in self.walk(path):
i = (st.st_ino, st.st_dev)
self._inode_to_path[i] = p
self._stat_info[p] = st
def walk(self, root):
try:
paths = [os.path.join(root, entry if isinstance(entry, str) else entry.name)
for entry in self.listdir(root)]
except OSError as e:
# Directory may have been deleted between finding it in the directory
# list of its parent and trying to delete its contents. If this
# happens we treat it as empty. Likewise if the directory was replaced
# with a file of the same name (less likely, but possible).
if e.errno in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
return
else:
raise
entries = []
for p in paths:
try:
entry = (p, self.stat(p))
entries.append(entry)
yield entry
except OSError:
continue
if self.recursive:
for path, st in entries:
try:
if S_ISDIR(st.st_mode):
for entry in self.walk(path):
yield entry
except (IOError, OSError) as e:
# IOError for Python 2
# OSError for Python 3
# (should be only PermissionError when dropping Python 2 support)
if e.errno != errno.EACCES:
raise
@property
def paths(self):
"""
Set of file/directory paths in the snapshot.
"""
return set(self._stat_info.keys())
def path(self, id):
"""
Returns path for id. None if id is unknown to this snapshot.
"""
return self._inode_to_path.get(id)
def inode(self, path):
""" Returns an id for path. """
st = self._stat_info[path]
return (st.st_ino, st.st_dev)
def isdir(self, path):
return S_ISDIR(self._stat_info[path].st_mode)
def mtime(self, path):
return self._stat_info[path].st_mtime
def size(self, path):
return self._stat_info[path].st_size
def stat_info(self, path):
"""
Returns a stat information object for the specified path from
the snapshot.
Attached information is subject to change. Do not use unless
you specify `stat` in constructor. Use :func:`inode`, :func:`mtime`,
:func:`isdir` instead.
:param path:
The path for which stat information should be obtained
from a snapshot.
"""
return self._stat_info[path]
def __sub__(self, previous_dirsnap):
"""Allow subtracting a DirectorySnapshot object instance from
another.
:returns:
A :class:`DirectorySnapshotDiff` object.
"""
return DirectorySnapshotDiff(previous_dirsnap, self)
def __str__(self):
return self.__repr__()
def __repr__(self):
return str(self._stat_info)
class EmptyDirectorySnapshot(object):
"""Class to implement an empty snapshot. This is used together with
DirectorySnapshot and DirectorySnapshotDiff in order to get all the files/folders
in the directory as created.
"""
@staticmethod
def path(_):
"""Mock up method to return the path of the received inode. As the snapshot
is intended to be empty, it always returns None.
:returns:
None.
"""
return None
@property
def paths(self):
"""Mock up method to return a set of file/directory paths in the snapshot. As
the snapshot is intended to be empty, it always returns an empty set.
:returns:
An empty set.
"""
return set()
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/watchdog/utils/compat.py | # -*- coding: utf-8 -*-
#
# Copyright 2014 Thomas Amland <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = ['queue', 'Event']
try:
import queue
except ImportError:
import Queue as queue
from threading import Event
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/watchdog/utils/platform.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011 Yesudeep Mangalapilly <[email protected]>
# Copyright 2012 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
PLATFORM_WINDOWS = 'windows'
PLATFORM_LINUX = 'linux'
PLATFORM_BSD = 'bsd'
PLATFORM_DARWIN = 'darwin'
PLATFORM_UNKNOWN = 'unknown'
def get_platform_name():
if sys.platform.startswith("win"):
return PLATFORM_WINDOWS
elif sys.platform.startswith('darwin'):
return PLATFORM_DARWIN
elif sys.platform.startswith('linux'):
return PLATFORM_LINUX
elif sys.platform.startswith(('dragonfly', 'freebsd', 'netbsd', 'openbsd', 'bsd')):
return PLATFORM_BSD
else:
return PLATFORM_UNKNOWN
__platform__ = get_platform_name()
def is_linux():
return __platform__ == PLATFORM_LINUX
def is_bsd():
return __platform__ == PLATFORM_BSD
def is_darwin():
return __platform__ == PLATFORM_DARWIN
def is_windows():
return __platform__ == PLATFORM_WINDOWS
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/watchdog/observers/fsevents.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011 Yesudeep Mangalapilly <[email protected]>
# Copyright 2012 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:module: watchdog.observers.fsevents
:synopsis: FSEvents based emitter implementation.
:author: [email protected] (Yesudeep Mangalapilly)
:platforms: Mac OS X
"""
from __future__ import with_statement
import os
import sys
import threading
import unicodedata
import _watchdog_fsevents as _fsevents
from watchdog.events import (
FileDeletedEvent,
FileModifiedEvent,
FileCreatedEvent,
FileMovedEvent,
DirDeletedEvent,
DirModifiedEvent,
DirCreatedEvent,
DirMovedEvent
)
from watchdog.observers.api import (
BaseObserver,
EventEmitter,
DEFAULT_EMITTER_TIMEOUT,
DEFAULT_OBSERVER_TIMEOUT
)
class FSEventsEmitter(EventEmitter):
"""
Mac OS X FSEvents Emitter class.
:param event_queue:
The event queue to fill with events.
:param watch:
A watch object representing the directory to monitor.
:type watch:
:class:`watchdog.observers.api.ObservedWatch`
:param timeout:
Read events blocking timeout (in seconds).
:type timeout:
``float``
"""
def __init__(self, event_queue, watch, timeout=DEFAULT_EMITTER_TIMEOUT):
EventEmitter.__init__(self, event_queue, watch, timeout)
self._lock = threading.Lock()
def on_thread_stop(self):
if self.watch:
_fsevents.remove_watch(self.watch)
_fsevents.stop(self)
self._watch = None
def queue_events(self, timeout):
with self._lock:
events = self.native_events
i = 0
while i < len(events):
event = events[i]
# For some reason the create and remove flags are sometimes also
# set for rename and modify type events, so let those take
# precedence.
if event.is_renamed:
# Internal moves appears to always be consecutive in the same
# buffer and have IDs differ by exactly one (while others
# don't) making it possible to pair up the two events coming
# from a singe move operation. (None of this is documented!)
# Otherwise, guess whether file was moved in or out.
# TODO: handle id wrapping
if (i + 1 < len(events) and events[i + 1].is_renamed
and events[i + 1].event_id == event.event_id + 1):
cls = DirMovedEvent if event.is_directory else FileMovedEvent
self.queue_event(cls(event.path, events[i + 1].path))
self.queue_event(DirModifiedEvent(os.path.dirname(event.path)))
self.queue_event(DirModifiedEvent(os.path.dirname(events[i + 1].path)))
i += 1
elif os.path.exists(event.path):
cls = DirCreatedEvent if event.is_directory else FileCreatedEvent
self.queue_event(cls(event.path))
self.queue_event(DirModifiedEvent(os.path.dirname(event.path)))
else:
cls = DirDeletedEvent if event.is_directory else FileDeletedEvent
self.queue_event(cls(event.path))
self.queue_event(DirModifiedEvent(os.path.dirname(event.path)))
# TODO: generate events for tree
elif event.is_modified or event.is_inode_meta_mod or event.is_xattr_mod :
cls = DirModifiedEvent if event.is_directory else FileModifiedEvent
self.queue_event(cls(event.path))
elif event.is_created:
cls = DirCreatedEvent if event.is_directory else FileCreatedEvent
self.queue_event(cls(event.path))
self.queue_event(DirModifiedEvent(os.path.dirname(event.path)))
elif event.is_removed:
cls = DirDeletedEvent if event.is_directory else FileDeletedEvent
self.queue_event(cls(event.path))
self.queue_event(DirModifiedEvent(os.path.dirname(event.path)))
i += 1
def run(self):
try:
def callback(pathnames, flags, ids, emitter=self):
with emitter._lock:
emitter.native_events = [
_fsevents.NativeEvent(event_path, event_flags, event_id)
for event_path, event_flags, event_id in zip(pathnames, flags, ids)
]
emitter.queue_events(emitter.timeout)
# for pathname, flag in zip(pathnames, flags):
# if emitter.watch.is_recursive: # and pathname != emitter.watch.path:
# new_sub_snapshot = DirectorySnapshot(pathname, True)
# old_sub_snapshot = self.snapshot.copy(pathname)
# diff = new_sub_snapshot - old_sub_snapshot
# self.snapshot += new_subsnapshot
# else:
# new_snapshot = DirectorySnapshot(emitter.watch.path, False)
# diff = new_snapshot - emitter.snapshot
# emitter.snapshot = new_snapshot
# INFO: FSEvents reports directory notifications recursively
# by default, so we do not need to add subdirectory paths.
# pathnames = set([self.watch.path])
# if self.watch.is_recursive:
# for root, directory_names, _ in os.walk(self.watch.path):
# for directory_name in directory_names:
# full_path = absolute_path(
# os.path.join(root, directory_name))
# pathnames.add(full_path)
self.pathnames = [self.watch.path]
_fsevents.add_watch(self,
self.watch,
callback,
self.pathnames)
_fsevents.read_events(self)
except Exception:
pass
class FSEventsObserver(BaseObserver):
def __init__(self, timeout=DEFAULT_OBSERVER_TIMEOUT):
BaseObserver.__init__(self, emitter_class=FSEventsEmitter,
timeout=timeout)
def schedule(self, event_handler, path, recursive=False):
# Python 2/3 compat
try:
str_class = unicode
except NameError:
str_class = str
# Fix for issue #26: Trace/BPT error when given a unicode path
# string. https://github.com/gorakhargosh/watchdog/issues#issue/26
if isinstance(path, str_class):
# path = unicode(path, 'utf-8')
path = unicodedata.normalize('NFC', path)
# We only encode the path in Python 2 for backwards compatibility.
# On Python 3 we want the path to stay as unicode if possible for
# the sake of path matching not having to be rewritten to use the
# bytes API instead of strings. The _watchdog_fsevent.so code for
# Python 3 can handle both str and bytes paths, which is why we
# do not HAVE to encode it with Python 3. The Python 2 code in
# _watchdog_fsevents.so was not changed for the sake of backwards
# compatibility.
if sys.version_info < (3,):
path = path.encode('utf-8')
return BaseObserver.schedule(self, event_handler, path, recursive)
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/watchdog/observers/winapi.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# winapi.py: Windows API-Python interface (removes dependency on pywin32)
#
# Copyright (C) 2007 Thomas Heller <[email protected]>
# Copyright (C) 2010 Will McGugan <[email protected]>
# Copyright (C) 2010 Ryan Kelly <[email protected]>
# Copyright (C) 2010 Yesudeep Mangalapilly <[email protected]>
# Copyright (C) 2014 Thomas Amland
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and / or other materials provided with the distribution.
# * Neither the name of the organization nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Portions of this code were taken from pyfilesystem, which uses the above
# new BSD license.
import ctypes.wintypes
from functools import reduce
LPVOID = ctypes.wintypes.LPVOID
# Invalid handle value.
INVALID_HANDLE_VALUE = ctypes.c_void_p(-1).value
# File notification constants.
FILE_NOTIFY_CHANGE_FILE_NAME = 0x01
FILE_NOTIFY_CHANGE_DIR_NAME = 0x02
FILE_NOTIFY_CHANGE_ATTRIBUTES = 0x04
FILE_NOTIFY_CHANGE_SIZE = 0x08
FILE_NOTIFY_CHANGE_LAST_WRITE = 0x010
FILE_NOTIFY_CHANGE_LAST_ACCESS = 0x020
FILE_NOTIFY_CHANGE_CREATION = 0x040
FILE_NOTIFY_CHANGE_SECURITY = 0x0100
FILE_FLAG_BACKUP_SEMANTICS = 0x02000000
FILE_FLAG_OVERLAPPED = 0x40000000
FILE_LIST_DIRECTORY = 1
FILE_SHARE_READ = 0x01
FILE_SHARE_WRITE = 0x02
FILE_SHARE_DELETE = 0x04
OPEN_EXISTING = 3
VOLUME_NAME_NT = 0x02
# File action constants.
FILE_ACTION_CREATED = 1
FILE_ACTION_DELETED = 2
FILE_ACTION_MODIFIED = 3
FILE_ACTION_RENAMED_OLD_NAME = 4
FILE_ACTION_RENAMED_NEW_NAME = 5
FILE_ACTION_DELETED_SELF = 0xFFFE
FILE_ACTION_OVERFLOW = 0xFFFF
# Aliases
FILE_ACTION_ADDED = FILE_ACTION_CREATED
FILE_ACTION_REMOVED = FILE_ACTION_DELETED
FILE_ACTION_REMOVED_SELF = FILE_ACTION_DELETED_SELF
THREAD_TERMINATE = 0x0001
# IO waiting constants.
WAIT_ABANDONED = 0x00000080
WAIT_IO_COMPLETION = 0x000000C0
WAIT_OBJECT_0 = 0x00000000
WAIT_TIMEOUT = 0x00000102
# Error codes
ERROR_OPERATION_ABORTED = 995
class OVERLAPPED(ctypes.Structure):
_fields_ = [('Internal', LPVOID),
('InternalHigh', LPVOID),
('Offset', ctypes.wintypes.DWORD),
('OffsetHigh', ctypes.wintypes.DWORD),
('Pointer', LPVOID),
('hEvent', ctypes.wintypes.HANDLE),
]
def _errcheck_bool(value, func, args):
if not value:
raise ctypes.WinError()
return args
def _errcheck_handle(value, func, args):
if not value:
raise ctypes.WinError()
if value == INVALID_HANDLE_VALUE:
raise ctypes.WinError()
return args
def _errcheck_dword(value, func, args):
if value == 0xFFFFFFFF:
raise ctypes.WinError()
return args
kernel32 = ctypes.WinDLL("kernel32")
ReadDirectoryChangesW = kernel32.ReadDirectoryChangesW
ReadDirectoryChangesW.restype = ctypes.wintypes.BOOL
ReadDirectoryChangesW.errcheck = _errcheck_bool
ReadDirectoryChangesW.argtypes = (
ctypes.wintypes.HANDLE, # hDirectory
LPVOID, # lpBuffer
ctypes.wintypes.DWORD, # nBufferLength
ctypes.wintypes.BOOL, # bWatchSubtree
ctypes.wintypes.DWORD, # dwNotifyFilter
ctypes.POINTER(ctypes.wintypes.DWORD), # lpBytesReturned
ctypes.POINTER(OVERLAPPED), # lpOverlapped
LPVOID # FileIOCompletionRoutine # lpCompletionRoutine
)
CreateFileW = kernel32.CreateFileW
CreateFileW.restype = ctypes.wintypes.HANDLE
CreateFileW.errcheck = _errcheck_handle
CreateFileW.argtypes = (
ctypes.wintypes.LPCWSTR, # lpFileName
ctypes.wintypes.DWORD, # dwDesiredAccess
ctypes.wintypes.DWORD, # dwShareMode
LPVOID, # lpSecurityAttributes
ctypes.wintypes.DWORD, # dwCreationDisposition
ctypes.wintypes.DWORD, # dwFlagsAndAttributes
ctypes.wintypes.HANDLE # hTemplateFile
)
CloseHandle = kernel32.CloseHandle
CloseHandle.restype = ctypes.wintypes.BOOL
CloseHandle.argtypes = (
ctypes.wintypes.HANDLE, # hObject
)
CancelIoEx = kernel32.CancelIoEx
CancelIoEx.restype = ctypes.wintypes.BOOL
CancelIoEx.errcheck = _errcheck_bool
CancelIoEx.argtypes = (
ctypes.wintypes.HANDLE, # hObject
ctypes.POINTER(OVERLAPPED) # lpOverlapped
)
CreateEvent = kernel32.CreateEventW
CreateEvent.restype = ctypes.wintypes.HANDLE
CreateEvent.errcheck = _errcheck_handle
CreateEvent.argtypes = (
LPVOID, # lpEventAttributes
ctypes.wintypes.BOOL, # bManualReset
ctypes.wintypes.BOOL, # bInitialState
ctypes.wintypes.LPCWSTR, # lpName
)
SetEvent = kernel32.SetEvent
SetEvent.restype = ctypes.wintypes.BOOL
SetEvent.errcheck = _errcheck_bool
SetEvent.argtypes = (
ctypes.wintypes.HANDLE, # hEvent
)
WaitForSingleObjectEx = kernel32.WaitForSingleObjectEx
WaitForSingleObjectEx.restype = ctypes.wintypes.DWORD
WaitForSingleObjectEx.errcheck = _errcheck_dword
WaitForSingleObjectEx.argtypes = (
ctypes.wintypes.HANDLE, # hObject
ctypes.wintypes.DWORD, # dwMilliseconds
ctypes.wintypes.BOOL, # bAlertable
)
CreateIoCompletionPort = kernel32.CreateIoCompletionPort
CreateIoCompletionPort.restype = ctypes.wintypes.HANDLE
CreateIoCompletionPort.errcheck = _errcheck_handle
CreateIoCompletionPort.argtypes = (
ctypes.wintypes.HANDLE, # FileHandle
ctypes.wintypes.HANDLE, # ExistingCompletionPort
LPVOID, # CompletionKey
ctypes.wintypes.DWORD, # NumberOfConcurrentThreads
)
GetQueuedCompletionStatus = kernel32.GetQueuedCompletionStatus
GetQueuedCompletionStatus.restype = ctypes.wintypes.BOOL
GetQueuedCompletionStatus.errcheck = _errcheck_bool
GetQueuedCompletionStatus.argtypes = (
ctypes.wintypes.HANDLE, # CompletionPort
LPVOID, # lpNumberOfBytesTransferred
LPVOID, # lpCompletionKey
ctypes.POINTER(OVERLAPPED), # lpOverlapped
ctypes.wintypes.DWORD, # dwMilliseconds
)
PostQueuedCompletionStatus = kernel32.PostQueuedCompletionStatus
PostQueuedCompletionStatus.restype = ctypes.wintypes.BOOL
PostQueuedCompletionStatus.errcheck = _errcheck_bool
PostQueuedCompletionStatus.argtypes = (
ctypes.wintypes.HANDLE, # CompletionPort
ctypes.wintypes.DWORD, # lpNumberOfBytesTransferred
ctypes.wintypes.DWORD, # lpCompletionKey
ctypes.POINTER(OVERLAPPED), # lpOverlapped
)
GetFinalPathNameByHandleW = kernel32.GetFinalPathNameByHandleW
GetFinalPathNameByHandleW.restype = ctypes.wintypes.DWORD
GetFinalPathNameByHandleW.errcheck = _errcheck_dword
GetFinalPathNameByHandleW.argtypes = (
ctypes.wintypes.HANDLE, # hFile
ctypes.wintypes.LPWSTR, # lpszFilePath
ctypes.wintypes.DWORD, # cchFilePath
ctypes.wintypes.DWORD, # DWORD
)
class FILE_NOTIFY_INFORMATION(ctypes.Structure):
_fields_ = [("NextEntryOffset", ctypes.wintypes.DWORD),
("Action", ctypes.wintypes.DWORD),
("FileNameLength", ctypes.wintypes.DWORD),
# ("FileName", (ctypes.wintypes.WCHAR * 1))]
("FileName", (ctypes.c_char * 1))]
LPFNI = ctypes.POINTER(FILE_NOTIFY_INFORMATION)
# We don't need to recalculate these flags every time a call is made to
# the win32 API functions.
WATCHDOG_FILE_FLAGS = FILE_FLAG_BACKUP_SEMANTICS
WATCHDOG_FILE_SHARE_FLAGS = reduce(
lambda x, y: x | y, [
FILE_SHARE_READ,
FILE_SHARE_WRITE,
FILE_SHARE_DELETE,
])
WATCHDOG_FILE_NOTIFY_FLAGS = reduce(
lambda x, y: x | y, [
FILE_NOTIFY_CHANGE_FILE_NAME,
FILE_NOTIFY_CHANGE_DIR_NAME,
FILE_NOTIFY_CHANGE_ATTRIBUTES,
FILE_NOTIFY_CHANGE_SIZE,
FILE_NOTIFY_CHANGE_LAST_WRITE,
FILE_NOTIFY_CHANGE_SECURITY,
FILE_NOTIFY_CHANGE_LAST_ACCESS,
FILE_NOTIFY_CHANGE_CREATION,
])
BUFFER_SIZE = 2048
def _parse_event_buffer(readBuffer, nBytes):
results = []
while nBytes > 0:
fni = ctypes.cast(readBuffer, LPFNI)[0]
ptr = ctypes.addressof(fni) + FILE_NOTIFY_INFORMATION.FileName.offset
# filename = ctypes.wstring_at(ptr, fni.FileNameLength)
filename = ctypes.string_at(ptr, fni.FileNameLength)
results.append((fni.Action, filename.decode('utf-16')))
numToSkip = fni.NextEntryOffset
if numToSkip <= 0:
break
readBuffer = readBuffer[numToSkip:]
nBytes -= numToSkip # numToSkip is long. nBytes should be long too.
return results
def _is_observed_path_deleted(handle, path):
# Comparison of observed path and actual path, returned by
# GetFinalPathNameByHandleW. If directory moved to the trash bin, or
# deleted, actual path will not be equal to observed path.
buff = ctypes.create_unicode_buffer(BUFFER_SIZE)
GetFinalPathNameByHandleW(handle, buff, BUFFER_SIZE, VOLUME_NAME_NT)
return buff.value != path
def _generate_observed_path_deleted_event():
# Create synthetic event for notify that observed directory is deleted
path = ctypes.create_unicode_buffer('.')
event = FILE_NOTIFY_INFORMATION(0, FILE_ACTION_DELETED_SELF, len(path), path.value.encode("utf-8"))
event_size = ctypes.sizeof(event)
buff = ctypes.create_string_buffer(BUFFER_SIZE)
ctypes.memmove(buff, ctypes.addressof(event), event_size)
return buff, event_size
def get_directory_handle(path):
"""Returns a Windows handle to the specified directory path."""
return CreateFileW(path, FILE_LIST_DIRECTORY, WATCHDOG_FILE_SHARE_FLAGS,
None, OPEN_EXISTING, WATCHDOG_FILE_FLAGS, None)
def close_directory_handle(handle):
try:
CancelIoEx(handle, None) # force ReadDirectoryChangesW to return
CloseHandle(handle) # close directory handle
except WindowsError:
try:
CloseHandle(handle) # close directory handle
except Exception:
return
def read_directory_changes(handle, path, recursive):
"""Read changes to the directory using the specified directory handle.
http://timgolden.me.uk/pywin32-docs/win32file__ReadDirectoryChangesW_meth.html
"""
event_buffer = ctypes.create_string_buffer(BUFFER_SIZE)
nbytes = ctypes.wintypes.DWORD()
try:
ReadDirectoryChangesW(handle, ctypes.byref(event_buffer),
len(event_buffer), recursive,
WATCHDOG_FILE_NOTIFY_FLAGS,
ctypes.byref(nbytes), None, None)
except WindowsError as e:
if e.winerror == ERROR_OPERATION_ABORTED:
return [], 0
# Handle the case when the root path is deleted
if _is_observed_path_deleted(handle, path):
return _generate_observed_path_deleted_event()
raise e
# Python 2/3 compat
try:
int_class = long
except NameError:
int_class = int
return event_buffer.raw, int_class(nbytes.value)
class WinAPINativeEvent(object):
def __init__(self, action, src_path):
self.action = action
self.src_path = src_path
@property
def is_added(self):
return self.action == FILE_ACTION_CREATED
@property
def is_removed(self):
return self.action == FILE_ACTION_REMOVED
@property
def is_modified(self):
return self.action == FILE_ACTION_MODIFIED
@property
def is_renamed_old(self):
return self.action == FILE_ACTION_RENAMED_OLD_NAME
@property
def is_renamed_new(self):
return self.action == FILE_ACTION_RENAMED_NEW_NAME
@property
def is_removed_self(self):
return self.action == FILE_ACTION_REMOVED_SELF
def __repr__(self):
return ("<%s: action=%d, src_path=%r>" % (
type(self).__name__, self.action, self.src_path))
def read_events(handle, path, recursive):
buf, nbytes = read_directory_changes(handle, path, recursive)
events = _parse_event_buffer(buf, nbytes)
return [WinAPINativeEvent(action, src_path) for action, src_path in events]
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/watchdog/observers/polling.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011 Yesudeep Mangalapilly <[email protected]>
# Copyright 2012 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:module: watchdog.observers.polling
:synopsis: Polling emitter implementation.
:author: [email protected] (Yesudeep Mangalapilly)
Classes
-------
.. autoclass:: PollingObserver
:members:
:show-inheritance:
.. autoclass:: PollingObserverVFS
:members:
:show-inheritance:
:special-members:
"""
from __future__ import with_statement
import threading
from functools import partial
from watchdog.utils import stat as default_stat
from watchdog.utils.dirsnapshot import DirectorySnapshot, DirectorySnapshotDiff
from watchdog.observers.api import (
EventEmitter,
BaseObserver,
DEFAULT_OBSERVER_TIMEOUT,
DEFAULT_EMITTER_TIMEOUT
)
from watchdog.events import (
DirMovedEvent,
DirDeletedEvent,
DirCreatedEvent,
DirModifiedEvent,
FileMovedEvent,
FileDeletedEvent,
FileCreatedEvent,
FileModifiedEvent
)
try:
from os import scandir
except ImportError:
from os import listdir as scandir
class PollingEmitter(EventEmitter):
"""
Platform-independent emitter that polls a directory to detect file
system changes.
"""
def __init__(self, event_queue, watch, timeout=DEFAULT_EMITTER_TIMEOUT,
stat=default_stat, listdir=scandir):
EventEmitter.__init__(self, event_queue, watch, timeout)
self._snapshot = None
self._lock = threading.Lock()
self._take_snapshot = lambda: DirectorySnapshot(
self.watch.path, self.watch.is_recursive, stat=stat, listdir=listdir)
def on_thread_start(self):
self._snapshot = self._take_snapshot()
def queue_events(self, timeout):
# We don't want to hit the disk continuously.
# timeout behaves like an interval for polling emitters.
if self.stopped_event.wait(timeout):
return
with self._lock:
if not self.should_keep_running():
return
# Get event diff between fresh snapshot and previous snapshot.
# Update snapshot.
try:
new_snapshot = self._take_snapshot()
except OSError:
self.queue_event(DirDeletedEvent(self.watch.path))
self.stop()
return
events = DirectorySnapshotDiff(self._snapshot, new_snapshot)
self._snapshot = new_snapshot
# Files.
for src_path in events.files_deleted:
self.queue_event(FileDeletedEvent(src_path))
for src_path in events.files_modified:
self.queue_event(FileModifiedEvent(src_path))
for src_path in events.files_created:
self.queue_event(FileCreatedEvent(src_path))
for src_path, dest_path in events.files_moved:
self.queue_event(FileMovedEvent(src_path, dest_path))
# Directories.
for src_path in events.dirs_deleted:
self.queue_event(DirDeletedEvent(src_path))
for src_path in events.dirs_modified:
self.queue_event(DirModifiedEvent(src_path))
for src_path in events.dirs_created:
self.queue_event(DirCreatedEvent(src_path))
for src_path, dest_path in events.dirs_moved:
self.queue_event(DirMovedEvent(src_path, dest_path))
class PollingObserver(BaseObserver):
"""
Platform-independent observer that polls a directory to detect file
system changes.
"""
def __init__(self, timeout=DEFAULT_OBSERVER_TIMEOUT):
BaseObserver.__init__(self, emitter_class=PollingEmitter, timeout=timeout)
class PollingObserverVFS(BaseObserver):
"""
File system independent observer that polls a directory to detect changes.
"""
def __init__(self, stat, listdir, polling_interval=1):
"""
:param stat: stat function. See ``os.stat`` for details.
:param listdir: listdir function. See ``os.listdir`` for details.
:type polling_interval: float
:param polling_interval: interval in seconds between polling the file system.
"""
emitter_cls = partial(PollingEmitter, stat=stat, listdir=listdir)
BaseObserver.__init__(self, emitter_class=emitter_cls, timeout=polling_interval)
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/watchdog/observers/inotify.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011 Yesudeep Mangalapilly <[email protected]>
# Copyright 2012 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:module: watchdog.observers.inotify
:synopsis: ``inotify(7)`` based emitter implementation.
:author: Sebastien Martini <[email protected]>
:author: Luke McCarthy <[email protected]>
:author: [email protected] (Yesudeep Mangalapilly)
:author: Tim Cuthbertson <[email protected]>
:platforms: Linux 2.6.13+.
.. ADMONITION:: About system requirements
Recommended minimum kernel version: 2.6.25.
Quote from the inotify(7) man page:
"Inotify was merged into the 2.6.13 Linux kernel. The required library
interfaces were added to glibc in version 2.4. (IN_DONT_FOLLOW,
IN_MASK_ADD, and IN_ONLYDIR were only added in version 2.5.)"
Therefore, you must ensure the system is running at least these versions
appropriate libraries and the kernel.
.. ADMONITION:: About recursiveness, event order, and event coalescing
Quote from the inotify(7) man page:
If successive output inotify events produced on the inotify file
descriptor are identical (same wd, mask, cookie, and name) then they
are coalesced into a single event if the older event has not yet been
read (but see BUGS).
The events returned by reading from an inotify file descriptor form
an ordered queue. Thus, for example, it is guaranteed that when
renaming from one directory to another, events will be produced in
the correct order on the inotify file descriptor.
...
Inotify monitoring of directories is not recursive: to monitor
subdirectories under a directory, additional watches must be created.
This emitter implementation therefore automatically adds watches for
sub-directories if running in recursive mode.
Some extremely useful articles and documentation:
.. _inotify FAQ: http://inotify.aiken.cz/?section=inotify&page=faq&lang=en
.. _intro to inotify: http://www.linuxjournal.com/article/8478
"""
from __future__ import with_statement
import os
import threading
from .inotify_buffer import InotifyBuffer
from watchdog.observers.api import (
EventEmitter,
BaseObserver,
DEFAULT_EMITTER_TIMEOUT,
DEFAULT_OBSERVER_TIMEOUT
)
from watchdog.events import (
DirDeletedEvent,
DirModifiedEvent,
DirMovedEvent,
DirCreatedEvent,
FileDeletedEvent,
FileModifiedEvent,
FileMovedEvent,
FileCreatedEvent,
generate_sub_moved_events,
generate_sub_created_events,
)
from watchdog.utils import unicode_paths
class InotifyEmitter(EventEmitter):
"""
inotify(7)-based event emitter.
:param event_queue:
The event queue to fill with events.
:param watch:
A watch object representing the directory to monitor.
:type watch:
:class:`watchdog.observers.api.ObservedWatch`
:param timeout:
Read events blocking timeout (in seconds).
:type timeout:
``float``
"""
def __init__(self, event_queue, watch, timeout=DEFAULT_EMITTER_TIMEOUT):
EventEmitter.__init__(self, event_queue, watch, timeout)
self._lock = threading.Lock()
self._inotify = None
def on_thread_start(self):
path = unicode_paths.encode(self.watch.path)
self._inotify = InotifyBuffer(path, self.watch.is_recursive)
def on_thread_stop(self):
if self._inotify:
self._inotify.close()
def queue_events(self, timeout, full_events=False):
# If "full_events" is true, then the method will report unmatched move events as separate events
# This behavior is by default only called by a InotifyFullEmitter
with self._lock:
event = self._inotify.read_event()
if event is None:
return
if isinstance(event, tuple):
move_from, move_to = event
src_path = self._decode_path(move_from.src_path)
dest_path = self._decode_path(move_to.src_path)
cls = DirMovedEvent if move_from.is_directory else FileMovedEvent
self.queue_event(cls(src_path, dest_path))
self.queue_event(DirModifiedEvent(os.path.dirname(src_path)))
self.queue_event(DirModifiedEvent(os.path.dirname(dest_path)))
if move_from.is_directory and self.watch.is_recursive:
for sub_event in generate_sub_moved_events(src_path, dest_path):
self.queue_event(sub_event)
return
src_path = self._decode_path(event.src_path)
if event.is_moved_to:
if full_events:
cls = DirMovedEvent if event.is_directory else FileMovedEvent
self.queue_event(cls(None, src_path))
else:
cls = DirCreatedEvent if event.is_directory else FileCreatedEvent
self.queue_event(cls(src_path))
self.queue_event(DirModifiedEvent(os.path.dirname(src_path)))
if event.is_directory and self.watch.is_recursive:
for sub_event in generate_sub_created_events(src_path):
self.queue_event(sub_event)
elif event.is_attrib:
cls = DirModifiedEvent if event.is_directory else FileModifiedEvent
self.queue_event(cls(src_path))
elif event.is_modify:
cls = DirModifiedEvent if event.is_directory else FileModifiedEvent
self.queue_event(cls(src_path))
elif event.is_delete or (event.is_moved_from and not full_events):
cls = DirDeletedEvent if event.is_directory else FileDeletedEvent
self.queue_event(cls(src_path))
self.queue_event(DirModifiedEvent(os.path.dirname(src_path)))
elif event.is_moved_from and full_events:
cls = DirMovedEvent if event.is_directory else FileMovedEvent
self.queue_event(cls(src_path, None))
self.queue_event(DirModifiedEvent(os.path.dirname(src_path)))
elif event.is_create:
cls = DirCreatedEvent if event.is_directory else FileCreatedEvent
self.queue_event(cls(src_path))
self.queue_event(DirModifiedEvent(os.path.dirname(src_path)))
def _decode_path(self, path):
""" Decode path only if unicode string was passed to this emitter. """
if isinstance(self.watch.path, bytes):
return path
return unicode_paths.decode(path)
class InotifyFullEmitter(InotifyEmitter):
"""
inotify(7)-based event emitter. By default this class produces move events even if they are not matched
Such move events will have a ``None`` value for the unmatched part.
:param event_queue:
The event queue to fill with events.
:param watch:
A watch object representing the directory to monitor.
:type watch:
:class:`watchdog.observers.api.ObservedWatch`
:param timeout:
Read events blocking timeout (in seconds).
:type timeout:
``float``
"""
def __init__(self, event_queue, watch, timeout=DEFAULT_EMITTER_TIMEOUT):
InotifyEmitter.__init__(self, event_queue, watch, timeout)
def queue_events(self, timeout, events=True):
InotifyEmitter.queue_events(self, timeout, full_events=events)
class InotifyObserver(BaseObserver):
"""
Observer thread that schedules watching directories and dispatches
calls to event handlers.
"""
def __init__(self, timeout=DEFAULT_OBSERVER_TIMEOUT, generate_full_events=False):
if (generate_full_events):
BaseObserver.__init__(self, emitter_class=InotifyFullEmitter, timeout=timeout)
else:
BaseObserver.__init__(self, emitter_class=InotifyEmitter,
timeout=timeout)
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/watchdog/observers/fsevents2.py | # -*- coding: utf-8 -*-
#
# Copyright 2014 Thomas Amland <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:module: watchdog.observers.fsevents2
:synopsis: FSEvents based emitter implementation.
:platforms: Mac OS X
"""
import os
import logging
import unicodedata
from threading import Thread
from watchdog.utils.compat import queue
from watchdog.events import (
FileDeletedEvent,
FileModifiedEvent,
FileCreatedEvent,
FileMovedEvent,
DirDeletedEvent,
DirModifiedEvent,
DirCreatedEvent,
DirMovedEvent
)
from watchdog.observers.api import (
BaseObserver,
EventEmitter,
DEFAULT_EMITTER_TIMEOUT,
DEFAULT_OBSERVER_TIMEOUT,
)
# pyobjc
import AppKit
from FSEvents import (
FSEventStreamCreate,
CFRunLoopGetCurrent,
FSEventStreamScheduleWithRunLoop,
FSEventStreamStart,
CFRunLoopRun,
CFRunLoopStop,
FSEventStreamStop,
FSEventStreamInvalidate,
FSEventStreamRelease,
)
from FSEvents import (
kCFAllocatorDefault,
kCFRunLoopDefaultMode,
kFSEventStreamEventIdSinceNow,
kFSEventStreamCreateFlagNoDefer,
kFSEventStreamCreateFlagFileEvents,
kFSEventStreamEventFlagItemCreated,
kFSEventStreamEventFlagItemRemoved,
kFSEventStreamEventFlagItemInodeMetaMod,
kFSEventStreamEventFlagItemRenamed,
kFSEventStreamEventFlagItemModified,
kFSEventStreamEventFlagItemFinderInfoMod,
kFSEventStreamEventFlagItemChangeOwner,
kFSEventStreamEventFlagItemXattrMod,
kFSEventStreamEventFlagItemIsDir,
kFSEventStreamEventFlagItemIsSymlink,
)
logger = logging.getLogger(__name__)
class FSEventsQueue(Thread):
""" Low level FSEvents client. """
def __init__(self, path):
Thread.__init__(self)
self._queue = queue.Queue()
self._run_loop = None
if isinstance(path, bytes):
path = path.decode('utf-8')
self._path = unicodedata.normalize('NFC', path)
context = None
latency = 1.0
self._stream_ref = FSEventStreamCreate(
kCFAllocatorDefault, self._callback, context, [self._path],
kFSEventStreamEventIdSinceNow, latency,
kFSEventStreamCreateFlagNoDefer | kFSEventStreamCreateFlagFileEvents)
if self._stream_ref is None:
raise IOError("FSEvents. Could not create stream.")
def run(self):
pool = AppKit.NSAutoreleasePool.alloc().init()
self._run_loop = CFRunLoopGetCurrent()
FSEventStreamScheduleWithRunLoop(
self._stream_ref, self._run_loop, kCFRunLoopDefaultMode)
if not FSEventStreamStart(self._stream_ref):
FSEventStreamInvalidate(self._stream_ref)
FSEventStreamRelease(self._stream_ref)
raise IOError("FSEvents. Could not start stream.")
CFRunLoopRun()
FSEventStreamStop(self._stream_ref)
FSEventStreamInvalidate(self._stream_ref)
FSEventStreamRelease(self._stream_ref)
del pool
# Make sure waiting thread is notified
self._queue.put(None)
def stop(self):
if self._run_loop is not None:
CFRunLoopStop(self._run_loop)
def _callback(self, streamRef, clientCallBackInfo, numEvents, eventPaths, eventFlags, eventIDs):
events = [NativeEvent(path, flags, _id) for path, flags, _id in
zip(eventPaths, eventFlags, eventIDs)]
logger.debug("FSEvents callback. Got %d events:" % numEvents)
for e in events:
logger.debug(e)
self._queue.put(events)
def read_events(self):
"""
Returns a list or one or more events, or None if there are no more
events to be read.
"""
if not self.is_alive():
return None
return self._queue.get()
class NativeEvent(object):
def __init__(self, path, flags, event_id):
self.path = path
self.flags = flags
self.event_id = event_id
self.is_created = bool(flags & kFSEventStreamEventFlagItemCreated)
self.is_removed = bool(flags & kFSEventStreamEventFlagItemRemoved)
self.is_renamed = bool(flags & kFSEventStreamEventFlagItemRenamed)
self.is_modified = bool(flags & kFSEventStreamEventFlagItemModified)
self.is_change_owner = bool(flags & kFSEventStreamEventFlagItemChangeOwner)
self.is_inode_meta_mod = bool(flags & kFSEventStreamEventFlagItemInodeMetaMod)
self.is_finder_info_mod = bool(flags & kFSEventStreamEventFlagItemFinderInfoMod)
self.is_xattr_mod = bool(flags & kFSEventStreamEventFlagItemXattrMod)
self.is_symlink = bool(flags & kFSEventStreamEventFlagItemIsSymlink)
self.is_directory = bool(flags & kFSEventStreamEventFlagItemIsDir)
@property
def _event_type(self):
if self.is_created:
return "Created"
if self.is_removed:
return "Removed"
if self.is_renamed:
return "Renamed"
if self.is_modified:
return "Modified"
if self.is_inode_meta_mod:
return "InodeMetaMod"
if self.is_xattr_mod:
return "XattrMod"
return "Unknown"
def __repr__(self):
s = "<%s: path=%s, type=%s, is_dir=%s, flags=%s, id=%s>"
return s % (type(self).__name__, repr(self.path), self._event_type,
self.is_directory, hex(self.flags), self.event_id)
class FSEventsEmitter(EventEmitter):
"""
FSEvents based event emitter. Handles conversion of native events.
"""
def __init__(self, event_queue, watch, timeout=DEFAULT_EMITTER_TIMEOUT):
EventEmitter.__init__(self, event_queue, watch, timeout)
self._fsevents = FSEventsQueue(watch.path)
self._fsevents.start()
def on_thread_stop(self):
self._fsevents.stop()
def queue_events(self, timeout):
events = self._fsevents.read_events()
if events is None:
return
i = 0
while i < len(events):
event = events[i]
# For some reason the create and remove flags are sometimes also
# set for rename and modify type events, so let those take
# precedence.
if event.is_renamed:
# Internal moves appears to always be consecutive in the same
# buffer and have IDs differ by exactly one (while others
# don't) making it possible to pair up the two events coming
# from a singe move operation. (None of this is documented!)
# Otherwise, guess whether file was moved in or out.
# TODO: handle id wrapping
if (i + 1 < len(events) and events[i + 1].is_renamed
and events[i + 1].event_id == event.event_id + 1):
cls = DirMovedEvent if event.is_directory else FileMovedEvent
self.queue_event(cls(event.path, events[i + 1].path))
self.queue_event(DirModifiedEvent(os.path.dirname(event.path)))
self.queue_event(DirModifiedEvent(os.path.dirname(events[i + 1].path)))
i += 1
elif os.path.exists(event.path):
cls = DirCreatedEvent if event.is_directory else FileCreatedEvent
self.queue_event(cls(event.path))
self.queue_event(DirModifiedEvent(os.path.dirname(event.path)))
else:
cls = DirDeletedEvent if event.is_directory else FileDeletedEvent
self.queue_event(cls(event.path))
self.queue_event(DirModifiedEvent(os.path.dirname(event.path)))
# TODO: generate events for tree
elif event.is_modified or event.is_inode_meta_mod or event.is_xattr_mod :
cls = DirModifiedEvent if event.is_directory else FileModifiedEvent
self.queue_event(cls(event.path))
elif event.is_created:
cls = DirCreatedEvent if event.is_directory else FileCreatedEvent
self.queue_event(cls(event.path))
self.queue_event(DirModifiedEvent(os.path.dirname(event.path)))
elif event.is_removed:
cls = DirDeletedEvent if event.is_directory else FileDeletedEvent
self.queue_event(cls(event.path))
self.queue_event(DirModifiedEvent(os.path.dirname(event.path)))
i += 1
class FSEventsObserver2(BaseObserver):
def __init__(self, timeout=DEFAULT_OBSERVER_TIMEOUT):
BaseObserver.__init__(self, emitter_class=FSEventsEmitter, timeout=timeout)
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/watchdog/observers/kqueue.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011 Yesudeep Mangalapilly <[email protected]>
# Copyright 2012 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:module: watchdog.observers.kqueue
:synopsis: ``kqueue(2)`` based emitter implementation.
:author: [email protected] (Yesudeep Mangalapilly)
:platforms: Mac OS X and BSD with kqueue(2).
.. WARNING:: kqueue is a very heavyweight way to monitor file systems.
Each kqueue-detected directory modification triggers
a full directory scan. Traversing the entire directory tree
and opening file descriptors for all files will create
performance problems. We need to find a way to re-scan
only those directories which report changes and do a diff
between two sub-DirectorySnapshots perhaps.
.. ADMONITION:: About OS X performance guidelines
Quote from the `Mac OS X File System Performance Guidelines`_:
"When you only want to track changes on a file or directory, be sure to
open it using the ``O_EVTONLY`` flag. This flag prevents the file or
directory from being marked as open or in use. This is important
if you are tracking files on a removable volume and the user tries to
unmount the volume. With this flag in place, the system knows it can
dismiss the volume. If you had opened the files or directories without
this flag, the volume would be marked as busy and would not be
unmounted."
``O_EVTONLY`` is defined as ``0x8000`` in the OS X header files.
More information here: http://www.mlsite.net/blog/?p=2312
Classes
-------
.. autoclass:: KqueueEmitter
:members:
:show-inheritance:
Collections and Utility Classes
-------------------------------
.. autoclass:: KeventDescriptor
:members:
:show-inheritance:
.. autoclass:: KeventDescriptorSet
:members:
:show-inheritance:
.. _Mac OS X File System Performance Guidelines:
http://developer.apple.com/library/ios/#documentation/Performance/Conceptual/FileSystem/Articles/TrackingChanges.html#//apple_ref/doc/uid/20001993-CJBJFIDD
"""
from __future__ import with_statement
from watchdog.utils import platform
import threading
import errno
from stat import S_ISDIR
import os
import os.path
import select
from pathtools.path import absolute_path
from watchdog.observers.api import (
BaseObserver,
EventEmitter,
DEFAULT_OBSERVER_TIMEOUT,
DEFAULT_EMITTER_TIMEOUT
)
from watchdog.utils import stat as default_stat
from watchdog.utils.dirsnapshot import DirectorySnapshot
from watchdog.events import (
DirMovedEvent,
DirDeletedEvent,
DirCreatedEvent,
DirModifiedEvent,
FileMovedEvent,
FileDeletedEvent,
FileCreatedEvent,
FileModifiedEvent,
EVENT_TYPE_MOVED,
EVENT_TYPE_DELETED,
EVENT_TYPE_CREATED,
generate_sub_moved_events,
)
# Maximum number of events to process.
MAX_EVENTS = 4096
# O_EVTONLY value from the header files for OS X only.
O_EVTONLY = 0x8000
# Pre-calculated values for the kevent filter, flags, and fflags attributes.
if platform.is_darwin():
WATCHDOG_OS_OPEN_FLAGS = O_EVTONLY
else:
WATCHDOG_OS_OPEN_FLAGS = os.O_RDONLY | os.O_NONBLOCK
WATCHDOG_KQ_FILTER = select.KQ_FILTER_VNODE
WATCHDOG_KQ_EV_FLAGS = select.KQ_EV_ADD | select.KQ_EV_ENABLE | select.KQ_EV_CLEAR
WATCHDOG_KQ_FFLAGS = (
select.KQ_NOTE_DELETE
| select.KQ_NOTE_WRITE
| select.KQ_NOTE_EXTEND
| select.KQ_NOTE_ATTRIB
| select.KQ_NOTE_LINK
| select.KQ_NOTE_RENAME
| select.KQ_NOTE_REVOKE
)
# Flag tests.
def is_deleted(kev):
"""Determines whether the given kevent represents deletion."""
return kev.fflags & select.KQ_NOTE_DELETE
def is_modified(kev):
"""Determines whether the given kevent represents modification."""
fflags = kev.fflags
return (fflags & select.KQ_NOTE_EXTEND) or (fflags & select.KQ_NOTE_WRITE)
def is_attrib_modified(kev):
"""Determines whether the given kevent represents attribute modification."""
return kev.fflags & select.KQ_NOTE_ATTRIB
def is_renamed(kev):
"""Determines whether the given kevent represents movement."""
return kev.fflags & select.KQ_NOTE_RENAME
class KeventDescriptorSet(object):
"""
Thread-safe kevent descriptor collection.
"""
def __init__(self):
# Set of KeventDescriptor
self._descriptors = set()
# Descriptor for a given path.
self._descriptor_for_path = dict()
# Descriptor for a given fd.
self._descriptor_for_fd = dict()
# List of kevent objects.
self._kevents = list()
self._lock = threading.Lock()
@property
def kevents(self):
"""
List of kevents monitored.
"""
with self._lock:
return self._kevents
@property
def paths(self):
"""
List of paths for which kevents have been created.
"""
with self._lock:
return list(self._descriptor_for_path.keys())
def get_for_fd(self, fd):
"""
Given a file descriptor, returns the kevent descriptor object
for it.
:param fd:
OS file descriptor.
:type fd:
``int``
:returns:
A :class:`KeventDescriptor` object.
"""
with self._lock:
return self._descriptor_for_fd[fd]
def get(self, path):
"""
Obtains a :class:`KeventDescriptor` object for the specified path.
:param path:
Path for which the descriptor will be obtained.
"""
with self._lock:
path = absolute_path(path)
return self._get(path)
def __contains__(self, path):
"""
Determines whether a :class:`KeventDescriptor has been registered
for the specified path.
:param path:
Path for which the descriptor will be obtained.
"""
with self._lock:
path = absolute_path(path)
return self._has_path(path)
def add(self, path, is_directory):
"""
Adds a :class:`KeventDescriptor` to the collection for the given
path.
:param path:
The path for which a :class:`KeventDescriptor` object will be
added.
:param is_directory:
``True`` if the path refers to a directory; ``False`` otherwise.
:type is_directory:
``bool``
"""
with self._lock:
path = absolute_path(path)
if not self._has_path(path):
self._add_descriptor(KeventDescriptor(path, is_directory))
def remove(self, path):
"""
Removes the :class:`KeventDescriptor` object for the given path
if it already exists.
:param path:
Path for which the :class:`KeventDescriptor` object will be
removed.
"""
with self._lock:
path = absolute_path(path)
if self._has_path(path):
self._remove_descriptor(self._get(path))
def clear(self):
"""
Clears the collection and closes all open descriptors.
"""
with self._lock:
for descriptor in self._descriptors:
descriptor.close()
self._descriptors.clear()
self._descriptor_for_fd.clear()
self._descriptor_for_path.clear()
self._kevents = []
# Thread-unsafe methods. Locking is provided at a higher level.
def _get(self, path):
"""Returns a kevent descriptor for a given path."""
return self._descriptor_for_path[path]
def _has_path(self, path):
"""Determines whether a :class:`KeventDescriptor` for the specified
path exists already in the collection."""
return path in self._descriptor_for_path
def _add_descriptor(self, descriptor):
"""
Adds a descriptor to the collection.
:param descriptor:
An instance of :class:`KeventDescriptor` to be added.
"""
self._descriptors.add(descriptor)
self._kevents.append(descriptor.kevent)
self._descriptor_for_path[descriptor.path] = descriptor
self._descriptor_for_fd[descriptor.fd] = descriptor
def _remove_descriptor(self, descriptor):
"""
Removes a descriptor from the collection.
:param descriptor:
An instance of :class:`KeventDescriptor` to be removed.
"""
self._descriptors.remove(descriptor)
del self._descriptor_for_fd[descriptor.fd]
del self._descriptor_for_path[descriptor.path]
self._kevents.remove(descriptor.kevent)
descriptor.close()
class KeventDescriptor(object):
"""
A kevent descriptor convenience data structure to keep together:
* kevent
* directory status
* path
* file descriptor
:param path:
Path string for which a kevent descriptor will be created.
:param is_directory:
``True`` if the path refers to a directory; ``False`` otherwise.
:type is_directory:
``bool``
"""
def __init__(self, path, is_directory):
self._path = absolute_path(path)
self._is_directory = is_directory
self._fd = os.open(path, WATCHDOG_OS_OPEN_FLAGS)
self._kev = select.kevent(self._fd,
filter=WATCHDOG_KQ_FILTER,
flags=WATCHDOG_KQ_EV_FLAGS,
fflags=WATCHDOG_KQ_FFLAGS)
@property
def fd(self):
"""OS file descriptor for the kevent descriptor."""
return self._fd
@property
def path(self):
"""The path associated with the kevent descriptor."""
return self._path
@property
def kevent(self):
"""The kevent object associated with the kevent descriptor."""
return self._kev
@property
def is_directory(self):
"""Determines whether the kevent descriptor refers to a directory.
:returns:
``True`` or ``False``
"""
return self._is_directory
def close(self):
"""
Closes the file descriptor associated with a kevent descriptor.
"""
try:
os.close(self.fd)
except OSError:
pass
@property
def key(self):
return (self.path, self.is_directory)
def __eq__(self, descriptor):
return self.key == descriptor.key
def __ne__(self, descriptor):
return self.key != descriptor.key
def __hash__(self):
return hash(self.key)
def __repr__(self):
return "<%s: path=%s, is_directory=%s>"\
% (type(self).__name__, self.path, self.is_directory)
class KqueueEmitter(EventEmitter):
"""
kqueue(2)-based event emitter.
.. ADMONITION:: About ``kqueue(2)`` behavior and this implementation
``kqueue(2)`` monitors file system events only for
open descriptors, which means, this emitter does a lot of
book-keeping behind the scenes to keep track of open
descriptors for every entry in the monitored directory tree.
This also means the number of maximum open file descriptors
on your system must be increased **manually**.
Usually, issuing a call to ``ulimit`` should suffice::
ulimit -n 1024
Ensure that you pick a number that is larger than the
number of files you expect to be monitored.
``kqueue(2)`` does not provide enough information about the
following things:
* The destination path of a file or directory that is renamed.
* Creation of a file or directory within a directory; in this
case, ``kqueue(2)`` only indicates a modified event on the
parent directory.
Therefore, this emitter takes a snapshot of the directory
tree when ``kqueue(2)`` detects a change on the file system
to be able to determine the above information.
:param event_queue:
The event queue to fill with events.
:param watch:
A watch object representing the directory to monitor.
:type watch:
:class:`watchdog.observers.api.ObservedWatch`
:param timeout:
Read events blocking timeout (in seconds).
:type timeout:
``float``
:param stat: stat function. See ``os.stat`` for details.
"""
def __init__(self, event_queue, watch, timeout=DEFAULT_EMITTER_TIMEOUT,
stat=default_stat):
EventEmitter.__init__(self, event_queue, watch, timeout)
self._kq = select.kqueue()
self._lock = threading.RLock()
# A collection of KeventDescriptor.
self._descriptors = KeventDescriptorSet()
def custom_stat(path, self=self):
stat_info = stat(path)
self._register_kevent(path, S_ISDIR(stat_info.st_mode))
return stat_info
self._snapshot = DirectorySnapshot(watch.path,
recursive=watch.is_recursive,
stat=custom_stat)
def _register_kevent(self, path, is_directory):
"""
Registers a kevent descriptor for the given path.
:param path:
Path for which a kevent descriptor will be created.
:param is_directory:
``True`` if the path refers to a directory; ``False`` otherwise.
:type is_directory:
``bool``
"""
try:
self._descriptors.add(path, is_directory)
except OSError as e:
if e.errno == errno.ENOENT:
# Probably dealing with a temporary file that was created
# and then quickly deleted before we could open
# a descriptor for it. Therefore, simply queue a sequence
# of created and deleted events for the path.
# path = absolute_path(path)
# if is_directory:
# self.queue_event(DirCreatedEvent(path))
# self.queue_event(DirDeletedEvent(path))
# else:
# self.queue_event(FileCreatedEvent(path))
# self.queue_event(FileDeletedEvent(path))
# TODO: We could simply ignore these files.
# Locked files cause the python process to die with
# a bus error when we handle temporary files.
# eg. .git/index.lock when running tig operations.
# I don't fully understand this at the moment.
pass
elif e.errno == errno.EOPNOTSUPP:
# Probably dealing with the socket or special file
# mounted through a file system that does not support
# access to it (e.g. NFS). On BSD systems look at
# EOPNOTSUPP in man 2 open.
pass
else:
# All other errors are propagated.
raise
def _unregister_kevent(self, path):
"""
Convenience function to close the kevent descriptor for a
specified kqueue-monitored path.
:param path:
Path for which the kevent descriptor will be closed.
"""
self._descriptors.remove(path)
def queue_event(self, event):
"""
Handles queueing a single event object.
:param event:
An instance of :class:`watchdog.events.FileSystemEvent`
or a subclass.
"""
# Handles all the book keeping for queued events.
# We do not need to fire moved/deleted events for all subitems in
# a directory tree here, because this function is called by kqueue
# for all those events anyway.
EventEmitter.queue_event(self, event)
if event.event_type == EVENT_TYPE_CREATED:
self._register_kevent(event.src_path, event.is_directory)
elif event.event_type == EVENT_TYPE_MOVED:
self._unregister_kevent(event.src_path)
self._register_kevent(event.dest_path, event.is_directory)
elif event.event_type == EVENT_TYPE_DELETED:
self._unregister_kevent(event.src_path)
def _gen_kqueue_events(self,
kev,
ref_snapshot,
new_snapshot):
"""
Generate events from the kevent list returned from the call to
:meth:`select.kqueue.control`.
.. NOTE:: kqueue only tells us about deletions, file modifications,
attribute modifications. The other events, namely,
file creation, directory modification, file rename,
directory rename, directory creation, etc. are
determined by comparing directory snapshots.
"""
descriptor = self._descriptors.get_for_fd(kev.ident)
src_path = descriptor.path
if is_renamed(kev):
# Kqueue does not specify the destination names for renames
# to, so we have to process these using the a snapshot
# of the directory.
for event in self._gen_renamed_events(src_path,
descriptor.is_directory,
ref_snapshot,
new_snapshot):
yield event
elif is_attrib_modified(kev):
if descriptor.is_directory:
yield DirModifiedEvent(src_path)
else:
yield FileModifiedEvent(src_path)
elif is_modified(kev):
if descriptor.is_directory:
if self.watch.is_recursive or self.watch.path == src_path:
# When a directory is modified, it may be due to
# sub-file/directory renames or new file/directory
# creation. We determine all this by comparing
# snapshots later.
yield DirModifiedEvent(src_path)
else:
yield FileModifiedEvent(src_path)
elif is_deleted(kev):
if descriptor.is_directory:
yield DirDeletedEvent(src_path)
else:
yield FileDeletedEvent(src_path)
def _parent_dir_modified(self, src_path):
"""
Helper to generate a DirModifiedEvent on the parent of src_path.
"""
return DirModifiedEvent(os.path.dirname(src_path))
def _gen_renamed_events(self,
src_path,
is_directory,
ref_snapshot,
new_snapshot):
"""
Compares information from two directory snapshots (one taken before
the rename operation and another taken right after) to determine the
destination path of the file system object renamed, and yields
the appropriate events to be queued.
"""
try:
f_inode = ref_snapshot.inode(src_path)
except KeyError:
# Probably caught a temporary file/directory that was renamed
# and deleted. Fires a sequence of created and deleted events
# for the path.
if is_directory:
yield DirCreatedEvent(src_path)
yield DirDeletedEvent(src_path)
else:
yield FileCreatedEvent(src_path)
yield FileDeletedEvent(src_path)
# We don't process any further and bail out assuming
# the event represents deletion/creation instead of movement.
return
dest_path = new_snapshot.path(f_inode)
if dest_path is not None:
dest_path = absolute_path(dest_path)
if is_directory:
event = DirMovedEvent(src_path, dest_path)
yield event
else:
yield FileMovedEvent(src_path, dest_path)
yield self._parent_dir_modified(src_path)
yield self._parent_dir_modified(dest_path)
if is_directory:
# TODO: Do we need to fire moved events for the items
# inside the directory tree? Does kqueue does this
# all by itself? Check this and then enable this code
# only if it doesn't already.
# A: It doesn't. So I've enabled this block.
if self.watch.is_recursive:
for sub_event in generate_sub_moved_events(src_path, dest_path):
yield sub_event
else:
# If the new snapshot does not have an inode for the
# old path, we haven't found the new name. Therefore,
# we mark it as deleted and remove unregister the path.
if is_directory:
yield DirDeletedEvent(src_path)
else:
yield FileDeletedEvent(src_path)
yield self._parent_dir_modified(src_path)
def _read_events(self, timeout=None):
"""
Reads events from a call to the blocking
:meth:`select.kqueue.control()` method.
:param timeout:
Blocking timeout for reading events.
:type timeout:
``float`` (seconds)
"""
return self._kq.control(self._descriptors.kevents,
MAX_EVENTS,
timeout)
def queue_events(self, timeout):
"""
Queues events by reading them from a call to the blocking
:meth:`select.kqueue.control()` method.
:param timeout:
Blocking timeout for reading events.
:type timeout:
``float`` (seconds)
"""
with self._lock:
try:
event_list = self._read_events(timeout)
# TODO: investigate why order appears to be reversed
event_list.reverse()
# Take a fresh snapshot of the directory and update the
# saved snapshot.
new_snapshot = DirectorySnapshot(self.watch.path,
self.watch.is_recursive)
ref_snapshot = self._snapshot
self._snapshot = new_snapshot
diff_events = new_snapshot - ref_snapshot
# Process events
for directory_created in diff_events.dirs_created:
self.queue_event(DirCreatedEvent(directory_created))
for file_created in diff_events.files_created:
self.queue_event(FileCreatedEvent(file_created))
for file_modified in diff_events.files_modified:
self.queue_event(FileModifiedEvent(file_modified))
for kev in event_list:
for event in self._gen_kqueue_events(kev,
ref_snapshot,
new_snapshot):
self.queue_event(event)
except OSError as e:
if e.errno != errno.EBADF:
raise
def on_thread_stop(self):
# Clean up.
with self._lock:
self._descriptors.clear()
self._kq.close()
class KqueueObserver(BaseObserver):
"""
Observer thread that schedules watching directories and dispatches
calls to event handlers.
"""
def __init__(self, timeout=DEFAULT_OBSERVER_TIMEOUT):
BaseObserver.__init__(self, emitter_class=KqueueEmitter, timeout=timeout)
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/watchdog/observers/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011 Yesudeep Mangalapilly <[email protected]>
# Copyright 2012 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:module: watchdog.observers
:synopsis: Observer that picks a native implementation if available.
:author: [email protected] (Yesudeep Mangalapilly)
Classes
=======
.. autoclass:: Observer
:members:
:show-inheritance:
:inherited-members:
Observer thread that schedules watching directories and dispatches
calls to event handlers.
You can also import platform specific classes directly and use it instead
of :class:`Observer`. Here is a list of implemented observer classes.:
============== ================================ ==============================
Class Platforms Note
============== ================================ ==============================
|Inotify| Linux 2.6.13+ ``inotify(7)`` based observer
|FSEvents| Mac OS X FSEvents based observer
|Kqueue| Mac OS X and BSD with kqueue(2) ``kqueue(2)`` based observer
|WinApi| MS Windows Windows API-based observer
|Polling| Any fallback implementation
============== ================================ ==============================
.. |Inotify| replace:: :class:`.inotify.InotifyObserver`
.. |FSEvents| replace:: :class:`.fsevents.FSEventsObserver`
.. |Kqueue| replace:: :class:`.kqueue.KqueueObserver`
.. |WinApi| replace:: :class:`.read_directory_changes.WindowsApiObserver`
.. |WinApiAsync| replace:: :class:`.read_directory_changes_async.WindowsApiAsyncObserver`
.. |Polling| replace:: :class:`.polling.PollingObserver`
"""
import warnings
from watchdog.utils import platform
from watchdog.utils import UnsupportedLibc
if platform.is_linux():
try:
from .inotify import InotifyObserver as Observer
except UnsupportedLibc:
from .polling import PollingObserver as Observer
elif platform.is_darwin():
try:
from .fsevents import FSEventsObserver as Observer
except Exception:
try:
from .kqueue import KqueueObserver as Observer
warnings.warn("Failed to import fsevents. Fall back to kqueue")
except Exception:
from .polling import PollingObserver as Observer
warnings.warn("Failed to import fsevents and kqueue. Fall back to polling.")
elif platform.is_bsd():
from .kqueue import KqueueObserver as Observer
elif platform.is_windows():
# TODO: find a reliable way of checking Windows version and import
# polling explicitly for Windows XP
try:
from .read_directory_changes import WindowsApiObserver as Observer
except Exception:
from .polling import PollingObserver as Observer
warnings.warn("Failed to import read_directory_changes. Fall back to polling.")
else:
from .polling import PollingObserver as Observer
__all__ = ["Observer"]
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/watchdog/observers/read_directory_changes.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011 Yesudeep Mangalapilly <[email protected]>
# Copyright 2012 Google, Inc.
# Copyright 2014 Thomas Amland
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import os.path
import time
from watchdog.events import (
DirCreatedEvent,
DirMovedEvent,
DirModifiedEvent,
FileCreatedEvent,
FileDeletedEvent,
FileMovedEvent,
FileModifiedEvent,
generate_sub_moved_events,
generate_sub_created_events,
)
from watchdog.observers.api import (
EventEmitter,
BaseObserver,
DEFAULT_OBSERVER_TIMEOUT,
DEFAULT_EMITTER_TIMEOUT
)
from watchdog.observers.winapi import (
read_events,
get_directory_handle,
close_directory_handle,
)
# HACK:
WATCHDOG_TRAVERSE_MOVED_DIR_DELAY = 1 # seconds
class WindowsApiEmitter(EventEmitter):
"""
Windows API-based emitter that uses ReadDirectoryChangesW
to detect file system changes for a watch.
"""
def __init__(self, event_queue, watch, timeout=DEFAULT_EMITTER_TIMEOUT):
EventEmitter.__init__(self, event_queue, watch, timeout)
self._lock = threading.Lock()
self._handle = None
def on_thread_start(self):
self._handle = get_directory_handle(self.watch.path)
def on_thread_stop(self):
if self._handle:
close_directory_handle(self._handle)
def _read_events(self):
return read_events(self._handle, self.watch.path, self.watch.is_recursive)
def queue_events(self, timeout):
winapi_events = self._read_events()
with self._lock:
last_renamed_src_path = ""
for winapi_event in winapi_events:
src_path = os.path.join(self.watch.path, winapi_event.src_path)
if winapi_event.is_renamed_old:
last_renamed_src_path = src_path
elif winapi_event.is_renamed_new:
dest_path = src_path
src_path = last_renamed_src_path
if os.path.isdir(dest_path):
event = DirMovedEvent(src_path, dest_path)
if self.watch.is_recursive:
# HACK: We introduce a forced delay before
# traversing the moved directory. This will read
# only file movement that finishes within this
# delay time.
time.sleep(WATCHDOG_TRAVERSE_MOVED_DIR_DELAY)
# The following block of code may not
# obtain moved events for the entire tree if
# the I/O is not completed within the above
# delay time. So, it's not guaranteed to work.
# TODO: Come up with a better solution, possibly
# a way to wait for I/O to complete before
# queuing events.
for sub_moved_event in generate_sub_moved_events(src_path, dest_path):
self.queue_event(sub_moved_event)
self.queue_event(event)
else:
self.queue_event(FileMovedEvent(src_path, dest_path))
elif winapi_event.is_modified:
cls = DirModifiedEvent if os.path.isdir(src_path) else FileModifiedEvent
self.queue_event(cls(src_path))
elif winapi_event.is_added:
isdir = os.path.isdir(src_path)
cls = DirCreatedEvent if isdir else FileCreatedEvent
self.queue_event(cls(src_path))
if isdir and self.watch.is_recursive:
# If a directory is moved from outside the watched folder to inside it
# we only get a created directory event out of it, not any events for its children
# so use the same hack as for file moves to get the child events
time.sleep(WATCHDOG_TRAVERSE_MOVED_DIR_DELAY)
sub_events = generate_sub_created_events(src_path)
for sub_created_event in sub_events:
self.queue_event(sub_created_event)
elif winapi_event.is_removed:
self.queue_event(FileDeletedEvent(src_path))
elif winapi_event.is_removed_self:
self.stop()
class WindowsApiObserver(BaseObserver):
"""
Observer thread that schedules watching directories and dispatches
calls to event handlers.
"""
def __init__(self, timeout=DEFAULT_OBSERVER_TIMEOUT):
BaseObserver.__init__(self, emitter_class=WindowsApiEmitter,
timeout=timeout)
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/watchdog/observers/api.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011 Yesudeep Mangalapilly <[email protected]>
# Copyright 2012 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import threading
from watchdog.utils import BaseThread
from watchdog.utils.compat import queue
from watchdog.utils.bricks import SkipRepeatsQueue
try:
from pathlib import Path as _PATH_CLASSES
except ImportError:
_PATH_CLASSES = ()
DEFAULT_EMITTER_TIMEOUT = 1 # in seconds.
DEFAULT_OBSERVER_TIMEOUT = 1 # in seconds.
# Collection classes
class EventQueue(SkipRepeatsQueue):
"""Thread-safe event queue based on a special queue that skips adding
the same event (:class:`FileSystemEvent`) multiple times consecutively.
Thus avoiding dispatching multiple event handling
calls when multiple identical events are produced quicker than an observer
can consume them.
"""
class ObservedWatch(object):
"""An scheduled watch.
:param path:
Path string.
:param recursive:
``True`` if watch is recursive; ``False`` otherwise.
"""
def __init__(self, path, recursive):
if isinstance(path, _PATH_CLASSES):
self._path = str(path)
else:
self._path = path
self._is_recursive = recursive
@property
def path(self):
"""The path that this watch monitors."""
return self._path
@property
def is_recursive(self):
"""Determines whether subdirectories are watched for the path."""
return self._is_recursive
@property
def key(self):
return self.path, self.is_recursive
def __eq__(self, watch):
return self.key == watch.key
def __ne__(self, watch):
return self.key != watch.key
def __hash__(self):
return hash(self.key)
def __repr__(self):
return "<%s: path=%s, is_recursive=%s>" % (
type(self).__name__, self.path, self.is_recursive)
# Observer classes
class EventEmitter(BaseThread):
"""
Producer thread base class subclassed by event emitters
that generate events and populate a queue with them.
:param event_queue:
The event queue to populate with generated events.
:type event_queue:
:class:`watchdog.events.EventQueue`
:param watch:
The watch to observe and produce events for.
:type watch:
:class:`ObservedWatch`
:param timeout:
Timeout (in seconds) between successive attempts at reading events.
:type timeout:
``float``
"""
def __init__(self, event_queue, watch, timeout=DEFAULT_EMITTER_TIMEOUT):
BaseThread.__init__(self)
self._event_queue = event_queue
self._watch = watch
self._timeout = timeout
@property
def timeout(self):
"""
Blocking timeout for reading events.
"""
return self._timeout
@property
def watch(self):
"""
The watch associated with this emitter.
"""
return self._watch
def queue_event(self, event):
"""
Queues a single event.
:param event:
Event to be queued.
:type event:
An instance of :class:`watchdog.events.FileSystemEvent`
or a subclass.
"""
self._event_queue.put((event, self.watch))
def queue_events(self, timeout):
"""Override this method to populate the event queue with events
per interval period.
:param timeout:
Timeout (in seconds) between successive attempts at
reading events.
:type timeout:
``float``
"""
def run(self):
while self.should_keep_running():
self.queue_events(self.timeout)
class EventDispatcher(BaseThread):
"""
Consumer thread base class subclassed by event observer threads
that dispatch events from an event queue to appropriate event handlers.
:param timeout:
Event queue blocking timeout (in seconds).
:type timeout:
``float``
"""
def __init__(self, timeout=DEFAULT_OBSERVER_TIMEOUT):
BaseThread.__init__(self)
self._event_queue = EventQueue()
self._timeout = timeout
@property
def timeout(self):
"""Event queue block timeout."""
return self._timeout
@property
def event_queue(self):
"""The event queue which is populated with file system events
by emitters and from which events are dispatched by a dispatcher
thread."""
return self._event_queue
def dispatch_events(self, event_queue, timeout):
"""Override this method to consume events from an event queue, blocking
on the queue for the specified timeout before raising :class:`queue.Empty`.
:param event_queue:
Event queue to populate with one set of events.
:type event_queue:
:class:`EventQueue`
:param timeout:
Interval period (in seconds) to wait before timing out on the
event queue.
:type timeout:
``float``
:raises:
:class:`queue.Empty`
"""
def run(self):
while self.should_keep_running():
try:
self.dispatch_events(self.event_queue, self.timeout)
except queue.Empty:
continue
class BaseObserver(EventDispatcher):
"""Base observer."""
def __init__(self, emitter_class, timeout=DEFAULT_OBSERVER_TIMEOUT):
EventDispatcher.__init__(self, timeout)
self._emitter_class = emitter_class
self._lock = threading.RLock()
self._watches = set()
self._handlers = dict()
self._emitters = set()
self._emitter_for_watch = dict()
def _add_emitter(self, emitter):
self._emitter_for_watch[emitter.watch] = emitter
self._emitters.add(emitter)
def _remove_emitter(self, emitter):
del self._emitter_for_watch[emitter.watch]
self._emitters.remove(emitter)
emitter.stop()
try:
emitter.join()
except RuntimeError:
pass
def _clear_emitters(self):
for emitter in self._emitters:
emitter.stop()
for emitter in self._emitters:
try:
emitter.join()
except RuntimeError:
pass
self._emitters.clear()
self._emitter_for_watch.clear()
def _add_handler_for_watch(self, event_handler, watch):
if watch not in self._handlers:
self._handlers[watch] = set()
self._handlers[watch].add(event_handler)
def _remove_handlers_for_watch(self, watch):
del self._handlers[watch]
@property
def emitters(self):
"""Returns event emitter created by this observer."""
return self._emitters
def start(self):
for emitter in self._emitters.copy():
try:
emitter.start()
except Exception:
self._remove_emitter(emitter)
raise
super(BaseObserver, self).start()
def schedule(self, event_handler, path, recursive=False):
"""
Schedules watching a path and calls appropriate methods specified
in the given event handler in response to file system events.
:param event_handler:
An event handler instance that has appropriate event handling
methods which will be called by the observer in response to
file system events.
:type event_handler:
:class:`watchdog.events.FileSystemEventHandler` or a subclass
:param path:
Directory path that will be monitored.
:type path:
``str``
:param recursive:
``True`` if events will be emitted for sub-directories
traversed recursively; ``False`` otherwise.
:type recursive:
``bool``
:return:
An :class:`ObservedWatch` object instance representing
a watch.
"""
with self._lock:
watch = ObservedWatch(path, recursive)
self._add_handler_for_watch(event_handler, watch)
# If we don't have an emitter for this watch already, create it.
if self._emitter_for_watch.get(watch) is None:
emitter = self._emitter_class(event_queue=self.event_queue,
watch=watch,
timeout=self.timeout)
self._add_emitter(emitter)
if self.is_alive():
emitter.start()
self._watches.add(watch)
return watch
def add_handler_for_watch(self, event_handler, watch):
"""Adds a handler for the given watch.
:param event_handler:
An event handler instance that has appropriate event handling
methods which will be called by the observer in response to
file system events.
:type event_handler:
:class:`watchdog.events.FileSystemEventHandler` or a subclass
:param watch:
The watch to add a handler for.
:type watch:
An instance of :class:`ObservedWatch` or a subclass of
:class:`ObservedWatch`
"""
with self._lock:
self._add_handler_for_watch(event_handler, watch)
def remove_handler_for_watch(self, event_handler, watch):
"""Removes a handler for the given watch.
:param event_handler:
An event handler instance that has appropriate event handling
methods which will be called by the observer in response to
file system events.
:type event_handler:
:class:`watchdog.events.FileSystemEventHandler` or a subclass
:param watch:
The watch to remove a handler for.
:type watch:
An instance of :class:`ObservedWatch` or a subclass of
:class:`ObservedWatch`
"""
with self._lock:
self._handlers[watch].remove(event_handler)
def unschedule(self, watch):
"""Unschedules a watch.
:param watch:
The watch to unschedule.
:type watch:
An instance of :class:`ObservedWatch` or a subclass of
:class:`ObservedWatch`
"""
with self._lock:
emitter = self._emitter_for_watch[watch]
del self._handlers[watch]
self._remove_emitter(emitter)
self._watches.remove(watch)
def unschedule_all(self):
"""Unschedules all watches and detaches all associated event
handlers."""
with self._lock:
self._handlers.clear()
self._clear_emitters()
self._watches.clear()
def on_thread_stop(self):
self.unschedule_all()
def dispatch_events(self, event_queue, timeout):
event, watch = event_queue.get(block=True, timeout=timeout)
with self._lock:
# To allow unschedule/stop and safe removal of event handlers
# within event handlers itself, check if the handler is still
# registered after every dispatch.
for handler in list(self._handlers.get(watch, [])):
if handler in self._handlers.get(watch, []):
handler.dispatch(event)
event_queue.task_done()
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/watchdog/observers/inotify_c.py | # -*- coding: utf-8 -*-
#
# Copyright 2011 Yesudeep Mangalapilly <[email protected]>
# Copyright 2012 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import os
import errno
import struct
import threading
import ctypes
import ctypes.util
from functools import reduce
from ctypes import c_int, c_char_p, c_uint32
from watchdog.utils import has_attribute
from watchdog.utils import UnsupportedLibc
from watchdog.utils.unicode_paths import decode
def _load_libc():
libc_path = None
try:
libc_path = ctypes.util.find_library('c')
except (OSError, IOError, RuntimeError):
# Note: find_library will on some platforms raise these undocumented
# errors, e.g.on android IOError "No usable temporary directory found"
# will be raised.
pass
if libc_path is not None:
return ctypes.CDLL(libc_path)
# Fallbacks
try:
return ctypes.CDLL('libc.so')
except (OSError, IOError):
pass
try:
return ctypes.CDLL('libc.so.6')
except (OSError, IOError):
pass
# uClibc
try:
return ctypes.CDLL('libc.so.0')
except (OSError, IOError) as err:
raise err
libc = _load_libc()
if not has_attribute(libc, 'inotify_init') or \
not has_attribute(libc, 'inotify_add_watch') or \
not has_attribute(libc, 'inotify_rm_watch'):
raise UnsupportedLibc("Unsupported libc version found: %s" % libc._name)
inotify_add_watch = ctypes.CFUNCTYPE(c_int, c_int, c_char_p, c_uint32, use_errno=True)(
("inotify_add_watch", libc))
inotify_rm_watch = ctypes.CFUNCTYPE(c_int, c_int, c_uint32, use_errno=True)(
("inotify_rm_watch", libc))
inotify_init = ctypes.CFUNCTYPE(c_int, use_errno=True)(
("inotify_init", libc))
class InotifyConstants(object):
# User-space events
IN_ACCESS = 0x00000001 # File was accessed.
IN_MODIFY = 0x00000002 # File was modified.
IN_ATTRIB = 0x00000004 # Meta-data changed.
IN_CLOSE_WRITE = 0x00000008 # Writable file was closed.
IN_CLOSE_NOWRITE = 0x00000010 # Unwritable file closed.
IN_OPEN = 0x00000020 # File was opened.
IN_MOVED_FROM = 0x00000040 # File was moved from X.
IN_MOVED_TO = 0x00000080 # File was moved to Y.
IN_CREATE = 0x00000100 # Subfile was created.
IN_DELETE = 0x00000200 # Subfile was deleted.
IN_DELETE_SELF = 0x00000400 # Self was deleted.
IN_MOVE_SELF = 0x00000800 # Self was moved.
# Helper user-space events.
IN_CLOSE = IN_CLOSE_WRITE | IN_CLOSE_NOWRITE # Close.
IN_MOVE = IN_MOVED_FROM | IN_MOVED_TO # Moves.
# Events sent by the kernel to a watch.
IN_UNMOUNT = 0x00002000 # Backing file system was unmounted.
IN_Q_OVERFLOW = 0x00004000 # Event queued overflowed.
IN_IGNORED = 0x00008000 # File was ignored.
# Special flags.
IN_ONLYDIR = 0x01000000 # Only watch the path if it's a directory.
IN_DONT_FOLLOW = 0x02000000 # Do not follow a symbolic link.
IN_EXCL_UNLINK = 0x04000000 # Exclude events on unlinked objects
IN_MASK_ADD = 0x20000000 # Add to the mask of an existing watch.
IN_ISDIR = 0x40000000 # Event occurred against directory.
IN_ONESHOT = 0x80000000 # Only send event once.
# All user-space events.
IN_ALL_EVENTS = reduce(
lambda x, y: x | y, [
IN_ACCESS,
IN_MODIFY,
IN_ATTRIB,
IN_CLOSE_WRITE,
IN_CLOSE_NOWRITE,
IN_OPEN,
IN_MOVED_FROM,
IN_MOVED_TO,
IN_DELETE,
IN_CREATE,
IN_DELETE_SELF,
IN_MOVE_SELF,
])
# Flags for ``inotify_init1``
IN_CLOEXEC = 0x02000000
IN_NONBLOCK = 0x00004000
# Watchdog's API cares only about these events.
WATCHDOG_ALL_EVENTS = reduce(
lambda x, y: x | y, [
InotifyConstants.IN_MODIFY,
InotifyConstants.IN_ATTRIB,
InotifyConstants.IN_MOVED_FROM,
InotifyConstants.IN_MOVED_TO,
InotifyConstants.IN_CREATE,
InotifyConstants.IN_DELETE,
InotifyConstants.IN_DELETE_SELF,
InotifyConstants.IN_DONT_FOLLOW,
])
class inotify_event_struct(ctypes.Structure):
"""
Structure representation of the inotify_event structure
(used in buffer size calculations)::
struct inotify_event {
__s32 wd; /* watch descriptor */
__u32 mask; /* watch mask */
__u32 cookie; /* cookie to synchronize two events */
__u32 len; /* length (including nulls) of name */
char name[0]; /* stub for possible name */
};
"""
_fields_ = [('wd', c_int),
('mask', c_uint32),
('cookie', c_uint32),
('len', c_uint32),
('name', c_char_p)]
EVENT_SIZE = ctypes.sizeof(inotify_event_struct)
DEFAULT_NUM_EVENTS = 2048
DEFAULT_EVENT_BUFFER_SIZE = DEFAULT_NUM_EVENTS * (EVENT_SIZE + 16)
class Inotify(object):
"""
Linux inotify(7) API wrapper class.
:param path:
The directory path for which we want an inotify object.
:type path:
:class:`bytes`
:param recursive:
``True`` if subdirectories should be monitored; ``False`` otherwise.
"""
def __init__(self, path, recursive=False, event_mask=WATCHDOG_ALL_EVENTS):
# The file descriptor associated with the inotify instance.
inotify_fd = inotify_init()
if inotify_fd == -1:
Inotify._raise_error()
self._inotify_fd = inotify_fd
self._lock = threading.Lock()
# Stores the watch descriptor for a given path.
self._wd_for_path = dict()
self._path_for_wd = dict()
self._path = path
self._event_mask = event_mask
self._is_recursive = recursive
if os.path.isdir(path):
self._add_dir_watch(path, recursive, event_mask)
else:
self._add_watch(path, event_mask)
self._moved_from_events = dict()
@property
def event_mask(self):
"""The event mask for this inotify instance."""
return self._event_mask
@property
def path(self):
"""The path associated with the inotify instance."""
return self._path
@property
def is_recursive(self):
"""Whether we are watching directories recursively."""
return self._is_recursive
@property
def fd(self):
"""The file descriptor associated with the inotify instance."""
return self._inotify_fd
def clear_move_records(self):
"""Clear cached records of MOVED_FROM events"""
self._moved_from_events = dict()
def source_for_move(self, destination_event):
"""
The source path corresponding to the given MOVED_TO event.
If the source path is outside the monitored directories, None
is returned instead.
"""
if destination_event.cookie in self._moved_from_events:
return self._moved_from_events[destination_event.cookie].src_path
else:
return None
def remember_move_from_event(self, event):
"""
Save this event as the source event for future MOVED_TO events to
reference.
"""
self._moved_from_events[event.cookie] = event
def add_watch(self, path):
"""
Adds a watch for the given path.
:param path:
Path to begin monitoring.
"""
with self._lock:
self._add_watch(path, self._event_mask)
def remove_watch(self, path):
"""
Removes a watch for the given path.
:param path:
Path string for which the watch will be removed.
"""
with self._lock:
wd = self._wd_for_path.pop(path)
del self._path_for_wd[wd]
if inotify_rm_watch(self._inotify_fd, wd) == -1:
Inotify._raise_error()
def close(self):
"""
Closes the inotify instance and removes all associated watches.
"""
with self._lock:
if self._path in self._wd_for_path:
wd = self._wd_for_path[self._path]
inotify_rm_watch(self._inotify_fd, wd)
os.close(self._inotify_fd)
def read_events(self, event_buffer_size=DEFAULT_EVENT_BUFFER_SIZE):
"""
Reads events from inotify and yields them.
"""
# HACK: We need to traverse the directory path
# recursively and simulate events for newly
# created subdirectories/files. This will handle
# mkdir -p foobar/blah/bar; touch foobar/afile
def _recursive_simulate(src_path):
events = []
for root, dirnames, filenames in os.walk(src_path):
for dirname in dirnames:
try:
full_path = os.path.join(root, dirname)
wd_dir = self._add_watch(full_path, self._event_mask)
e = InotifyEvent(
wd_dir, InotifyConstants.IN_CREATE | InotifyConstants.IN_ISDIR, 0, dirname, full_path)
events.append(e)
except OSError:
pass
for filename in filenames:
full_path = os.path.join(root, filename)
wd_parent_dir = self._wd_for_path[os.path.dirname(full_path)]
e = InotifyEvent(
wd_parent_dir, InotifyConstants.IN_CREATE, 0, filename, full_path)
events.append(e)
return events
event_buffer = None
while True:
try:
event_buffer = os.read(self._inotify_fd, event_buffer_size)
except OSError as e:
if e.errno == errno.EINTR:
continue
break
with self._lock:
event_list = []
for wd, mask, cookie, name in Inotify._parse_event_buffer(event_buffer):
if wd == -1:
continue
wd_path = self._path_for_wd[wd]
src_path = os.path.join(wd_path, name) if name else wd_path # avoid trailing slash
inotify_event = InotifyEvent(wd, mask, cookie, name, src_path)
if inotify_event.is_moved_from:
self.remember_move_from_event(inotify_event)
elif inotify_event.is_moved_to:
move_src_path = self.source_for_move(inotify_event)
if move_src_path in self._wd_for_path:
moved_wd = self._wd_for_path[move_src_path]
del self._wd_for_path[move_src_path]
self._wd_for_path[inotify_event.src_path] = moved_wd
self._path_for_wd[moved_wd] = inotify_event.src_path
if self.is_recursive:
for _path, _wd in self._wd_for_path.copy().items():
if _path.startswith(move_src_path + os.path.sep.encode()):
moved_wd = self._wd_for_path.pop(_path)
_move_to_path = _path.replace(move_src_path, inotify_event.src_path)
self._wd_for_path[_move_to_path] = moved_wd
self._path_for_wd[moved_wd] = _move_to_path
src_path = os.path.join(wd_path, name)
inotify_event = InotifyEvent(wd, mask, cookie, name, src_path)
if inotify_event.is_ignored:
# Clean up book-keeping for deleted watches.
path = self._path_for_wd.pop(wd)
if self._wd_for_path[path] == wd:
del self._wd_for_path[path]
continue
event_list.append(inotify_event)
if (self.is_recursive and inotify_event.is_directory
and inotify_event.is_create):
# TODO: When a directory from another part of the
# filesystem is moved into a watched directory, this
# will not generate events for the directory tree.
# We need to coalesce IN_MOVED_TO events and those
# IN_MOVED_TO events which don't pair up with
# IN_MOVED_FROM events should be marked IN_CREATE
# instead relative to this directory.
try:
self._add_watch(src_path, self._event_mask)
except OSError:
continue
event_list.extend(_recursive_simulate(src_path))
return event_list
# Non-synchronized methods.
def _add_dir_watch(self, path, recursive, mask):
"""
Adds a watch (optionally recursively) for the given directory path
to monitor events specified by the mask.
:param path:
Path to monitor
:param recursive:
``True`` to monitor recursively.
:param mask:
Event bit mask.
"""
if not os.path.isdir(path):
raise OSError(errno.ENOTDIR, os.strerror(errno.ENOTDIR), path)
self._add_watch(path, mask)
if recursive:
for root, dirnames, _ in os.walk(path):
for dirname in dirnames:
full_path = os.path.join(root, dirname)
if os.path.islink(full_path):
continue
self._add_watch(full_path, mask)
def _add_watch(self, path, mask):
"""
Adds a watch for the given path to monitor events specified by the
mask.
:param path:
Path to monitor
:param mask:
Event bit mask.
"""
wd = inotify_add_watch(self._inotify_fd, path, mask)
if wd == -1:
Inotify._raise_error()
self._wd_for_path[path] = wd
self._path_for_wd[wd] = path
return wd
@staticmethod
def _raise_error():
"""
Raises errors for inotify failures.
"""
err = ctypes.get_errno()
if err == errno.ENOSPC:
raise OSError(errno.ENOSPC, "inotify watch limit reached")
elif err == errno.EMFILE:
raise OSError(errno.EMFILE, "inotify instance limit reached")
elif err == errno.EACCES:
# Prevent raising an exception when a file with no permissions
# changes
pass
else:
raise OSError(err, os.strerror(err))
@staticmethod
def _parse_event_buffer(event_buffer):
"""
Parses an event buffer of ``inotify_event`` structs returned by
inotify::
struct inotify_event {
__s32 wd; /* watch descriptor */
__u32 mask; /* watch mask */
__u32 cookie; /* cookie to synchronize two events */
__u32 len; /* length (including nulls) of name */
char name[0]; /* stub for possible name */
};
The ``cookie`` member of this struct is used to pair two related
events, for example, it pairs an IN_MOVED_FROM event with an
IN_MOVED_TO event.
"""
i = 0
while i + 16 <= len(event_buffer):
wd, mask, cookie, length = struct.unpack_from('iIII', event_buffer, i)
name = event_buffer[i + 16:i + 16 + length].rstrip(b'\0')
i += 16 + length
yield wd, mask, cookie, name
class InotifyEvent(object):
"""
Inotify event struct wrapper.
:param wd:
Watch descriptor
:param mask:
Event mask
:param cookie:
Event cookie
:param name:
Base name of the event source path.
:param src_path:
Full event source path.
"""
def __init__(self, wd, mask, cookie, name, src_path):
self._wd = wd
self._mask = mask
self._cookie = cookie
self._name = name
self._src_path = src_path
@property
def src_path(self):
return self._src_path
@property
def wd(self):
return self._wd
@property
def mask(self):
return self._mask
@property
def cookie(self):
return self._cookie
@property
def name(self):
return self._name
@property
def is_modify(self):
return self._mask & InotifyConstants.IN_MODIFY > 0
@property
def is_close_write(self):
return self._mask & InotifyConstants.IN_CLOSE_WRITE > 0
@property
def is_close_nowrite(self):
return self._mask & InotifyConstants.IN_CLOSE_NOWRITE > 0
@property
def is_access(self):
return self._mask & InotifyConstants.IN_ACCESS > 0
@property
def is_delete(self):
return self._mask & InotifyConstants.IN_DELETE > 0
@property
def is_delete_self(self):
return self._mask & InotifyConstants.IN_DELETE_SELF > 0
@property
def is_create(self):
return self._mask & InotifyConstants.IN_CREATE > 0
@property
def is_moved_from(self):
return self._mask & InotifyConstants.IN_MOVED_FROM > 0
@property
def is_moved_to(self):
return self._mask & InotifyConstants.IN_MOVED_TO > 0
@property
def is_move(self):
return self._mask & InotifyConstants.IN_MOVE > 0
@property
def is_move_self(self):
return self._mask & InotifyConstants.IN_MOVE_SELF > 0
@property
def is_attrib(self):
return self._mask & InotifyConstants.IN_ATTRIB > 0
@property
def is_ignored(self):
return self._mask & InotifyConstants.IN_IGNORED > 0
@property
def is_directory(self):
# It looks like the kernel does not provide this information for
# IN_DELETE_SELF and IN_MOVE_SELF. In this case, assume it's a dir.
# See also: https://github.com/seb-m/pyinotify/blob/2c7e8f8/python2/pyinotify.py#L897
return (self.is_delete_self or self.is_move_self
or self._mask & InotifyConstants.IN_ISDIR > 0)
@property
def key(self):
return self._src_path, self._wd, self._mask, self._cookie, self._name
def __eq__(self, inotify_event):
return self.key == inotify_event.key
def __ne__(self, inotify_event):
return self.key == inotify_event.key
def __hash__(self):
return hash(self.key)
@staticmethod
def _get_mask_string(mask):
masks = []
for c in dir(InotifyConstants):
if c.startswith('IN_') and c not in ['IN_ALL_EVENTS', 'IN_CLOSE', 'IN_MOVE']:
c_val = getattr(InotifyConstants, c)
if mask & c_val:
masks.append(c)
mask_string = '|'.join(masks)
return mask_string
def __repr__(self):
mask_string = self._get_mask_string(self.mask)
s = '<%s: src_path=%r, wd=%d, mask=%s, cookie=%d, name=%s>'
return s % (type(self).__name__, self.src_path, self.wd, mask_string,
self.cookie, decode(self.name))
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/watchdog/observers/inotify_buffer.py | # -*- coding: utf-8 -*-
#
# Copyright 2014 Thomas Amland <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from watchdog.utils import BaseThread
from watchdog.utils.delayed_queue import DelayedQueue
from watchdog.observers.inotify_c import Inotify
logger = logging.getLogger(__name__)
class InotifyBuffer(BaseThread):
"""A wrapper for `Inotify` that holds events for `delay` seconds. During
this time, IN_MOVED_FROM and IN_MOVED_TO events are paired.
"""
delay = 0.5
def __init__(self, path, recursive=False):
BaseThread.__init__(self)
self._queue = DelayedQueue(self.delay)
self._inotify = Inotify(path, recursive)
self.start()
def read_event(self):
"""Returns a single event or a tuple of from/to events in case of a
paired move event. If this buffer has been closed, immediately return
None.
"""
return self._queue.get()
def on_thread_stop(self):
self._inotify.close()
self._queue.close()
def close(self):
self.stop()
self.join()
def _group_events(self, event_list):
"""Group any matching move events"""
grouped = []
for inotify_event in event_list:
logger.debug("in-event %s", inotify_event)
def matching_from_event(event):
return (not isinstance(event, tuple) and event.is_moved_from
and event.cookie == inotify_event.cookie)
if inotify_event.is_moved_to:
# Check if move_from is already in the buffer
for index, event in enumerate(grouped):
if matching_from_event(event):
grouped[index] = (event, inotify_event)
break
else:
# Check if move_from is in delayqueue already
from_event = self._queue.remove(matching_from_event)
if from_event is not None:
grouped.append((from_event, inotify_event))
else:
logger.debug("could not find matching move_from event")
grouped.append(inotify_event)
else:
grouped.append(inotify_event)
return grouped
def run(self):
"""Read event from `inotify` and add them to `queue`. When reading a
IN_MOVE_TO event, remove the previous added matching IN_MOVE_FROM event
and add them back to the queue as a tuple.
"""
deleted_self = False
while self.should_keep_running() and not deleted_self:
inotify_events = self._inotify.read_events()
grouped_events = self._group_events(inotify_events)
for inotify_event in grouped_events:
# Only add delay for unmatched move_from events
delay = not isinstance(inotify_event, tuple) and inotify_event.is_moved_from
self._queue.put(inotify_event, delay)
if not isinstance(inotify_event, tuple) and inotify_event.is_delete_self and \
inotify_event.src_path == self._inotify.path:
# Deleted the watched directory, stop watching for events
deleted_self = True
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pkg_resources/__init__.py | """
Package resource API
--------------------
A resource is a logical file contained within a package, or a logical
subdirectory thereof. The package resource API expects resource names
to have their path parts separated with ``/``, *not* whatever the local
path separator is. Do not use os.path operations to manipulate resource
names being passed into the API.
The package resource API is designed to work with normal filesystem packages,
.egg files, and unpacked .egg files. It can also work in a limited way with
.zip files and with custom PEP 302 loaders that support the ``get_data()``
method.
This module is deprecated. Users are directed to :mod:`importlib.resources`,
:mod:`importlib.metadata` and :pypi:`packaging` instead.
"""
import sys
import os
import io
import time
import re
import types
import zipfile
import zipimport
import warnings
import stat
import functools
import pkgutil
import operator
import platform
import collections
import plistlib
import email.parser
import errno
import tempfile
import textwrap
import inspect
import ntpath
import posixpath
import importlib
from pkgutil import get_importer
try:
import _imp
except ImportError:
# Python 3.2 compatibility
import imp as _imp
try:
FileExistsError
except NameError:
FileExistsError = OSError
# capture these to bypass sandboxing
from os import utime
try:
from os import mkdir, rename, unlink
WRITE_SUPPORT = True
except ImportError:
# no write support, probably under GAE
WRITE_SUPPORT = False
from os import open as os_open
from os.path import isdir, split
try:
import importlib.machinery as importlib_machinery
# access attribute to force import under delayed import mechanisms.
importlib_machinery.__name__
except ImportError:
importlib_machinery = None
from pkg_resources.extern.jaraco.text import (
yield_lines,
drop_comment,
join_continuation,
)
from pkg_resources.extern import platformdirs
from pkg_resources.extern import packaging
__import__('pkg_resources.extern.packaging.version')
__import__('pkg_resources.extern.packaging.specifiers')
__import__('pkg_resources.extern.packaging.requirements')
__import__('pkg_resources.extern.packaging.markers')
__import__('pkg_resources.extern.packaging.utils')
if sys.version_info < (3, 5):
raise RuntimeError("Python 3.5 or later is required")
# declare some globals that will be defined later to
# satisfy the linters.
require = None
working_set = None
add_activation_listener = None
resources_stream = None
cleanup_resources = None
resource_dir = None
resource_stream = None
set_extraction_path = None
resource_isdir = None
resource_string = None
iter_entry_points = None
resource_listdir = None
resource_filename = None
resource_exists = None
_distribution_finders = None
_namespace_handlers = None
_namespace_packages = None
warnings.warn(
"pkg_resources is deprecated as an API. "
"See https://setuptools.pypa.io/en/latest/pkg_resources.html",
DeprecationWarning,
stacklevel=2
)
_PEP440_FALLBACK = re.compile(r"^v?(?P<safe>(?:[0-9]+!)?[0-9]+(?:\.[0-9]+)*)", re.I)
class PEP440Warning(RuntimeWarning):
"""
Used when there is an issue with a version or specifier not complying with
PEP 440.
"""
parse_version = packaging.version.Version
_state_vars = {}
def _declare_state(vartype, **kw):
globals().update(kw)
_state_vars.update(dict.fromkeys(kw, vartype))
def __getstate__():
state = {}
g = globals()
for k, v in _state_vars.items():
state[k] = g['_sget_' + v](g[k])
return state
def __setstate__(state):
g = globals()
for k, v in state.items():
g['_sset_' + _state_vars[k]](k, g[k], v)
return state
def _sget_dict(val):
return val.copy()
def _sset_dict(key, ob, state):
ob.clear()
ob.update(state)
def _sget_object(val):
return val.__getstate__()
def _sset_object(key, ob, state):
ob.__setstate__(state)
_sget_none = _sset_none = lambda *args: None
def get_supported_platform():
"""Return this platform's maximum compatible version.
distutils.util.get_platform() normally reports the minimum version
of macOS that would be required to *use* extensions produced by
distutils. But what we want when checking compatibility is to know the
version of macOS that we are *running*. To allow usage of packages that
explicitly require a newer version of macOS, we must also know the
current version of the OS.
If this condition occurs for any other platform with a version in its
platform strings, this function should be extended accordingly.
"""
plat = get_build_platform()
m = macosVersionString.match(plat)
if m is not None and sys.platform == "darwin":
try:
plat = 'macosx-%s-%s' % ('.'.join(_macos_vers()[:2]), m.group(3))
except ValueError:
# not macOS
pass
return plat
__all__ = [
# Basic resource access and distribution/entry point discovery
'require',
'run_script',
'get_provider',
'get_distribution',
'load_entry_point',
'get_entry_map',
'get_entry_info',
'iter_entry_points',
'resource_string',
'resource_stream',
'resource_filename',
'resource_listdir',
'resource_exists',
'resource_isdir',
# Environmental control
'declare_namespace',
'working_set',
'add_activation_listener',
'find_distributions',
'set_extraction_path',
'cleanup_resources',
'get_default_cache',
# Primary implementation classes
'Environment',
'WorkingSet',
'ResourceManager',
'Distribution',
'Requirement',
'EntryPoint',
# Exceptions
'ResolutionError',
'VersionConflict',
'DistributionNotFound',
'UnknownExtra',
'ExtractionError',
# Warnings
'PEP440Warning',
# Parsing functions and string utilities
'parse_requirements',
'parse_version',
'safe_name',
'safe_version',
'get_platform',
'compatible_platforms',
'yield_lines',
'split_sections',
'safe_extra',
'to_filename',
'invalid_marker',
'evaluate_marker',
# filesystem utilities
'ensure_directory',
'normalize_path',
# Distribution "precedence" constants
'EGG_DIST',
'BINARY_DIST',
'SOURCE_DIST',
'CHECKOUT_DIST',
'DEVELOP_DIST',
# "Provider" interfaces, implementations, and registration/lookup APIs
'IMetadataProvider',
'IResourceProvider',
'FileMetadata',
'PathMetadata',
'EggMetadata',
'EmptyProvider',
'empty_provider',
'NullProvider',
'EggProvider',
'DefaultProvider',
'ZipProvider',
'register_finder',
'register_namespace_handler',
'register_loader_type',
'fixup_namespace_packages',
'get_importer',
# Warnings
'PkgResourcesDeprecationWarning',
# Deprecated/backward compatibility only
'run_main',
'AvailableDistributions',
]
class ResolutionError(Exception):
"""Abstract base for dependency resolution errors"""
def __repr__(self):
return self.__class__.__name__ + repr(self.args)
class VersionConflict(ResolutionError):
"""
An already-installed version conflicts with the requested version.
Should be initialized with the installed Distribution and the requested
Requirement.
"""
_template = "{self.dist} is installed but {self.req} is required"
@property
def dist(self):
return self.args[0]
@property
def req(self):
return self.args[1]
def report(self):
return self._template.format(**locals())
def with_context(self, required_by):
"""
If required_by is non-empty, return a version of self that is a
ContextualVersionConflict.
"""
if not required_by:
return self
args = self.args + (required_by,)
return ContextualVersionConflict(*args)
class ContextualVersionConflict(VersionConflict):
"""
A VersionConflict that accepts a third parameter, the set of the
requirements that required the installed Distribution.
"""
_template = VersionConflict._template + ' by {self.required_by}'
@property
def required_by(self):
return self.args[2]
class DistributionNotFound(ResolutionError):
"""A requested distribution was not found"""
_template = (
"The '{self.req}' distribution was not found "
"and is required by {self.requirers_str}"
)
@property
def req(self):
return self.args[0]
@property
def requirers(self):
return self.args[1]
@property
def requirers_str(self):
if not self.requirers:
return 'the application'
return ', '.join(self.requirers)
def report(self):
return self._template.format(**locals())
def __str__(self):
return self.report()
class UnknownExtra(ResolutionError):
"""Distribution doesn't have an "extra feature" of the given name"""
_provider_factories = {}
PY_MAJOR = '{}.{}'.format(*sys.version_info)
EGG_DIST = 3
BINARY_DIST = 2
SOURCE_DIST = 1
CHECKOUT_DIST = 0
DEVELOP_DIST = -1
def register_loader_type(loader_type, provider_factory):
"""Register `provider_factory` to make providers for `loader_type`
`loader_type` is the type or class of a PEP 302 ``module.__loader__``,
and `provider_factory` is a function that, passed a *module* object,
returns an ``IResourceProvider`` for that module.
"""
_provider_factories[loader_type] = provider_factory
def get_provider(moduleOrReq):
"""Return an IResourceProvider for the named module or requirement"""
if isinstance(moduleOrReq, Requirement):
return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
try:
module = sys.modules[moduleOrReq]
except KeyError:
__import__(moduleOrReq)
module = sys.modules[moduleOrReq]
loader = getattr(module, '__loader__', None)
return _find_adapter(_provider_factories, loader)(module)
def _macos_vers(_cache=[]):
if not _cache:
version = platform.mac_ver()[0]
# fallback for MacPorts
if version == '':
plist = '/System/Library/CoreServices/SystemVersion.plist'
if os.path.exists(plist):
if hasattr(plistlib, 'readPlist'):
plist_content = plistlib.readPlist(plist)
if 'ProductVersion' in plist_content:
version = plist_content['ProductVersion']
_cache.append(version.split('.'))
return _cache[0]
def _macos_arch(machine):
return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine)
def get_build_platform():
"""Return this platform's string for platform-specific distributions
XXX Currently this is the same as ``distutils.util.get_platform()``, but it
needs some hacks for Linux and macOS.
"""
from sysconfig import get_platform
plat = get_platform()
if sys.platform == "darwin" and not plat.startswith('macosx-'):
try:
version = _macos_vers()
machine = os.uname()[4].replace(" ", "_")
return "macosx-%d.%d-%s" % (
int(version[0]),
int(version[1]),
_macos_arch(machine),
)
except ValueError:
# if someone is running a non-Mac darwin system, this will fall
# through to the default implementation
pass
return plat
macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)")
darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)")
# XXX backward compat
get_platform = get_build_platform
def compatible_platforms(provided, required):
"""Can code for the `provided` platform run on the `required` platform?
Returns true if either platform is ``None``, or the platforms are equal.
XXX Needs compatibility checks for Linux and other unixy OSes.
"""
if provided is None or required is None or provided == required:
# easy case
return True
# macOS special cases
reqMac = macosVersionString.match(required)
if reqMac:
provMac = macosVersionString.match(provided)
# is this a Mac package?
if not provMac:
# this is backwards compatibility for packages built before
# setuptools 0.6. All packages built after this point will
# use the new macOS designation.
provDarwin = darwinVersionString.match(provided)
if provDarwin:
dversion = int(provDarwin.group(1))
macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
if (
dversion == 7
and macosversion >= "10.3"
or dversion == 8
and macosversion >= "10.4"
):
return True
# egg isn't macOS or legacy darwin
return False
# are they the same major version and machine type?
if provMac.group(1) != reqMac.group(1) or provMac.group(3) != reqMac.group(3):
return False
# is the required OS major update >= the provided one?
if int(provMac.group(2)) > int(reqMac.group(2)):
return False
return True
# XXX Linux and other platforms' special cases should go here
return False
def run_script(dist_spec, script_name):
"""Locate distribution `dist_spec` and run its `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
require(dist_spec)[0].run_script(script_name, ns)
# backward compatibility
run_main = run_script
def get_distribution(dist):
"""Return a current distribution object for a Requirement or string"""
if isinstance(dist, str):
dist = Requirement.parse(dist)
if isinstance(dist, Requirement):
dist = get_provider(dist)
if not isinstance(dist, Distribution):
raise TypeError("Expected string, Requirement, or Distribution", dist)
return dist
def load_entry_point(dist, group, name):
"""Return `name` entry point of `group` for `dist` or raise ImportError"""
return get_distribution(dist).load_entry_point(group, name)
def get_entry_map(dist, group=None):
"""Return the entry point map for `group`, or the full entry map"""
return get_distribution(dist).get_entry_map(group)
def get_entry_info(dist, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return get_distribution(dist).get_entry_info(group, name)
class IMetadataProvider:
def has_metadata(name):
"""Does the package's distribution contain the named metadata?"""
def get_metadata(name):
"""The named metadata resource as a string"""
def get_metadata_lines(name):
"""Yield named metadata resource as list of non-blank non-comment lines
Leading and trailing whitespace is stripped from each line, and lines
with ``#`` as the first non-blank character are omitted."""
def metadata_isdir(name):
"""Is the named metadata a directory? (like ``os.path.isdir()``)"""
def metadata_listdir(name):
"""List of metadata names in the directory (like ``os.listdir()``)"""
def run_script(script_name, namespace):
"""Execute the named script in the supplied namespace dictionary"""
class IResourceProvider(IMetadataProvider):
"""An object that provides access to package resources"""
def get_resource_filename(manager, resource_name):
"""Return a true filesystem path for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_stream(manager, resource_name):
"""Return a readable file-like object for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_string(manager, resource_name):
"""Return a string containing the contents of `resource_name`
`manager` must be an ``IResourceManager``"""
def has_resource(resource_name):
"""Does the package contain the named resource?"""
def resource_isdir(resource_name):
"""Is the named resource a directory? (like ``os.path.isdir()``)"""
def resource_listdir(resource_name):
"""List of resource names in the directory (like ``os.listdir()``)"""
class WorkingSet:
"""A collection of active distributions on sys.path (or a similar list)"""
def __init__(self, entries=None):
"""Create working set from list of path entries (default=sys.path)"""
self.entries = []
self.entry_keys = {}
self.by_key = {}
self.normalized_to_canonical_keys = {}
self.callbacks = []
if entries is None:
entries = sys.path
for entry in entries:
self.add_entry(entry)
@classmethod
def _build_master(cls):
"""
Prepare the master working set.
"""
ws = cls()
try:
from __main__ import __requires__
except ImportError:
# The main program does not list any requirements
return ws
# ensure the requirements are met
try:
ws.require(__requires__)
except VersionConflict:
return cls._build_from_requirements(__requires__)
return ws
@classmethod
def _build_from_requirements(cls, req_spec):
"""
Build a working set from a requirement spec. Rewrites sys.path.
"""
# try it without defaults already on sys.path
# by starting with an empty path
ws = cls([])
reqs = parse_requirements(req_spec)
dists = ws.resolve(reqs, Environment())
for dist in dists:
ws.add(dist)
# add any missing entries from sys.path
for entry in sys.path:
if entry not in ws.entries:
ws.add_entry(entry)
# then copy back to sys.path
sys.path[:] = ws.entries
return ws
def add_entry(self, entry):
"""Add a path item to ``.entries``, finding any distributions on it
``find_distributions(entry, True)`` is used to find distributions
corresponding to the path entry, and they are added. `entry` is
always appended to ``.entries``, even if it is already present.
(This is because ``sys.path`` can contain the same value more than
once, and the ``.entries`` of the ``sys.path`` WorkingSet should always
equal ``sys.path``.)
"""
self.entry_keys.setdefault(entry, [])
self.entries.append(entry)
for dist in find_distributions(entry, True):
self.add(dist, entry, False)
def __contains__(self, dist):
"""True if `dist` is the active distribution for its project"""
return self.by_key.get(dist.key) == dist
def find(self, req):
"""Find a distribution matching requirement `req`
If there is an active distribution for the requested project, this
returns it as long as it meets the version requirement specified by
`req`. But, if there is an active distribution for the project and it
does *not* meet the `req` requirement, ``VersionConflict`` is raised.
If there is no active distribution for the requested project, ``None``
is returned.
"""
dist = self.by_key.get(req.key)
if dist is None:
canonical_key = self.normalized_to_canonical_keys.get(req.key)
if canonical_key is not None:
req.key = canonical_key
dist = self.by_key.get(canonical_key)
if dist is not None and dist not in req:
# XXX add more info
raise VersionConflict(dist, req)
return dist
def iter_entry_points(self, group, name=None):
"""Yield entry point objects from `group` matching `name`
If `name` is None, yields all entry points in `group` from all
distributions in the working set, otherwise only ones matching
both `group` and `name` are yielded (in distribution order).
"""
return (
entry
for dist in self
for entry in dist.get_entry_map(group).values()
if name is None or name == entry.name
)
def run_script(self, requires, script_name):
"""Locate distribution for `requires` and run `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
self.require(requires)[0].run_script(script_name, ns)
def __iter__(self):
"""Yield distributions for non-duplicate projects in the working set
The yield order is the order in which the items' path entries were
added to the working set.
"""
seen = {}
for item in self.entries:
if item not in self.entry_keys:
# workaround a cache issue
continue
for key in self.entry_keys[item]:
if key not in seen:
seen[key] = 1
yield self.by_key[key]
def add(self, dist, entry=None, insert=True, replace=False):
"""Add `dist` to working set, associated with `entry`
If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
On exit from this routine, `entry` is added to the end of the working
set's ``.entries`` (if it wasn't already present).
`dist` is only added to the working set if it's for a project that
doesn't already have a distribution in the set, unless `replace=True`.
If it's added, any callbacks registered with the ``subscribe()`` method
will be called.
"""
if insert:
dist.insert_on(self.entries, entry, replace=replace)
if entry is None:
entry = dist.location
keys = self.entry_keys.setdefault(entry, [])
keys2 = self.entry_keys.setdefault(dist.location, [])
if not replace and dist.key in self.by_key:
# ignore hidden distros
return
self.by_key[dist.key] = dist
normalized_name = packaging.utils.canonicalize_name(dist.key)
self.normalized_to_canonical_keys[normalized_name] = dist.key
if dist.key not in keys:
keys.append(dist.key)
if dist.key not in keys2:
keys2.append(dist.key)
self._added_new(dist)
def resolve(
self,
requirements,
env=None,
installer=None,
replace_conflicting=False,
extras=None,
):
"""List all distributions needed to (recursively) meet `requirements`
`requirements` must be a sequence of ``Requirement`` objects. `env`,
if supplied, should be an ``Environment`` instance. If
not supplied, it defaults to all distributions available within any
entry or distribution in the working set. `installer`, if supplied,
will be invoked with each requirement that cannot be met by an
already-installed distribution; it should return a ``Distribution`` or
``None``.
Unless `replace_conflicting=True`, raises a VersionConflict exception
if
any requirements are found on the path that have the correct name but
the wrong version. Otherwise, if an `installer` is supplied it will be
invoked to obtain the correct version of the requirement and activate
it.
`extras` is a list of the extras to be used with these requirements.
This is important because extra requirements may look like `my_req;
extra = "my_extra"`, which would otherwise be interpreted as a purely
optional requirement. Instead, we want to be able to assert that these
requirements are truly required.
"""
# set up the stack
requirements = list(requirements)[::-1]
# set of processed requirements
processed = {}
# key -> dist
best = {}
to_activate = []
req_extras = _ReqExtras()
# Mapping of requirement to set of distributions that required it;
# useful for reporting info about conflicts.
required_by = collections.defaultdict(set)
while requirements:
# process dependencies breadth-first
req = requirements.pop(0)
if req in processed:
# Ignore cyclic or redundant dependencies
continue
if not req_extras.markers_pass(req, extras):
continue
dist = self._resolve_dist(
req, best, replace_conflicting, env, installer, required_by, to_activate
)
# push the new requirements onto the stack
new_requirements = dist.requires(req.extras)[::-1]
requirements.extend(new_requirements)
# Register the new requirements needed by req
for new_requirement in new_requirements:
required_by[new_requirement].add(req.project_name)
req_extras[new_requirement] = req.extras
processed[req] = True
# return list of distros to activate
return to_activate
def _resolve_dist(
self, req, best, replace_conflicting, env, installer, required_by, to_activate
):
dist = best.get(req.key)
if dist is None:
# Find the best distribution and add it to the map
dist = self.by_key.get(req.key)
if dist is None or (dist not in req and replace_conflicting):
ws = self
if env is None:
if dist is None:
env = Environment(self.entries)
else:
# Use an empty environment and workingset to avoid
# any further conflicts with the conflicting
# distribution
env = Environment([])
ws = WorkingSet([])
dist = best[req.key] = env.best_match(
req, ws, installer, replace_conflicting=replace_conflicting
)
if dist is None:
requirers = required_by.get(req, None)
raise DistributionNotFound(req, requirers)
to_activate.append(dist)
if dist not in req:
# Oops, the "best" so far conflicts with a dependency
dependent_req = required_by[req]
raise VersionConflict(dist, req).with_context(dependent_req)
return dist
def find_plugins(self, plugin_env, full_env=None, installer=None, fallback=True):
"""Find all activatable distributions in `plugin_env`
Example usage::
distributions, errors = working_set.find_plugins(
Environment(plugin_dirlist)
)
# add plugins+libs to sys.path
map(working_set.add, distributions)
# display errors
print('Could not load', errors)
The `plugin_env` should be an ``Environment`` instance that contains
only distributions that are in the project's "plugin directory" or
directories. The `full_env`, if supplied, should be an ``Environment``
contains all currently-available distributions. If `full_env` is not
supplied, one is created automatically from the ``WorkingSet`` this
method is called on, which will typically mean that every directory on
``sys.path`` will be scanned for distributions.
`installer` is a standard installer callback as used by the
``resolve()`` method. The `fallback` flag indicates whether we should
attempt to resolve older versions of a plugin if the newest version
cannot be resolved.
This method returns a 2-tuple: (`distributions`, `error_info`), where
`distributions` is a list of the distributions found in `plugin_env`
that were loadable, along with any other distributions that are needed
to resolve their dependencies. `error_info` is a dictionary mapping
unloadable plugin distributions to an exception instance describing the
error that occurred. Usually this will be a ``DistributionNotFound`` or
``VersionConflict`` instance.
"""
plugin_projects = list(plugin_env)
# scan project names in alphabetic order
plugin_projects.sort()
error_info = {}
distributions = {}
if full_env is None:
env = Environment(self.entries)
env += plugin_env
else:
env = full_env + plugin_env
shadow_set = self.__class__([])
# put all our entries in shadow_set
list(map(shadow_set.add, self))
for project_name in plugin_projects:
for dist in plugin_env[project_name]:
req = [dist.as_requirement()]
try:
resolvees = shadow_set.resolve(req, env, installer)
except ResolutionError as v:
# save error info
error_info[dist] = v
if fallback:
# try the next older version of project
continue
else:
# give up on this project, keep going
break
else:
list(map(shadow_set.add, resolvees))
distributions.update(dict.fromkeys(resolvees))
# success, no need to try any more versions of this project
break
distributions = list(distributions)
distributions.sort()
return distributions, error_info
def require(self, *requirements):
"""Ensure that distributions matching `requirements` are activated
`requirements` must be a string or a (possibly-nested) sequence
thereof, specifying the distributions and versions required. The
return value is a sequence of the distributions that needed to be
activated to fulfill the requirements; all relevant distributions are
included, even if they were already activated in this working set.
"""
needed = self.resolve(parse_requirements(requirements))
for dist in needed:
self.add(dist)
return needed
def subscribe(self, callback, existing=True):
"""Invoke `callback` for all distributions
If `existing=True` (default),
call on all existing ones, as well.
"""
if callback in self.callbacks:
return
self.callbacks.append(callback)
if not existing:
return
for dist in self:
callback(dist)
def _added_new(self, dist):
for callback in self.callbacks:
callback(dist)
def __getstate__(self):
return (
self.entries[:],
self.entry_keys.copy(),
self.by_key.copy(),
self.normalized_to_canonical_keys.copy(),
self.callbacks[:],
)
def __setstate__(self, e_k_b_n_c):
entries, keys, by_key, normalized_to_canonical_keys, callbacks = e_k_b_n_c
self.entries = entries[:]
self.entry_keys = keys.copy()
self.by_key = by_key.copy()
self.normalized_to_canonical_keys = normalized_to_canonical_keys.copy()
self.callbacks = callbacks[:]
class _ReqExtras(dict):
"""
Map each requirement to the extras that demanded it.
"""
def markers_pass(self, req, extras=None):
"""
Evaluate markers for req against each extra that
demanded it.
Return False if the req has a marker and fails
evaluation. Otherwise, return True.
"""
extra_evals = (
req.marker.evaluate({'extra': extra})
for extra in self.get(req, ()) + (extras or (None,))
)
return not req.marker or any(extra_evals)
class Environment:
"""Searchable snapshot of distributions on a search path"""
def __init__(
self, search_path=None, platform=get_supported_platform(), python=PY_MAJOR
):
"""Snapshot distributions available on a search path
Any distributions found on `search_path` are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used.
`platform` is an optional string specifying the name of the platform
that platform-specific distributions must be compatible with. If
unspecified, it defaults to the current platform. `python` is an
optional string naming the desired version of Python (e.g. ``'3.6'``);
it defaults to the current version.
You may explicitly set `platform` (and/or `python`) to ``None`` if you
wish to map *all* distributions, not just those compatible with the
running platform or Python version.
"""
self._distmap = {}
self.platform = platform
self.python = python
self.scan(search_path)
def can_add(self, dist):
"""Is distribution `dist` acceptable for this environment?
The distribution must match the platform and python version
requirements specified when this environment was created, or False
is returned.
"""
py_compat = (
self.python is None
or dist.py_version is None
or dist.py_version == self.python
)
return py_compat and compatible_platforms(dist.platform, self.platform)
def remove(self, dist):
"""Remove `dist` from the environment"""
self._distmap[dist.key].remove(dist)
def scan(self, search_path=None):
"""Scan `search_path` for distributions usable in this environment
Any distributions found are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used. Only distributions conforming to
the platform/python version defined at initialization are added.
"""
if search_path is None:
search_path = sys.path
for item in search_path:
for dist in find_distributions(item):
self.add(dist)
def __getitem__(self, project_name):
"""Return a newest-to-oldest list of distributions for `project_name`
Uses case-insensitive `project_name` comparison, assuming all the
project's distributions use their project's name converted to all
lowercase as their key.
"""
distribution_key = project_name.lower()
return self._distmap.get(distribution_key, [])
def add(self, dist):
"""Add `dist` if we ``can_add()`` it and it has not already been added"""
if self.can_add(dist) and dist.has_version():
dists = self._distmap.setdefault(dist.key, [])
if dist not in dists:
dists.append(dist)
dists.sort(key=operator.attrgetter('hashcmp'), reverse=True)
def best_match(self, req, working_set, installer=None, replace_conflicting=False):
"""Find distribution best matching `req` and usable on `working_set`
This calls the ``find(req)`` method of the `working_set` to see if a
suitable distribution is already active. (This may raise
``VersionConflict`` if an unsuitable version of the project is already
active in the specified `working_set`.) If a suitable distribution
isn't active, this method returns the newest distribution in the
environment that meets the ``Requirement`` in `req`. If no suitable
distribution is found, and `installer` is supplied, then the result of
calling the environment's ``obtain(req, installer)`` method will be
returned.
"""
try:
dist = working_set.find(req)
except VersionConflict:
if not replace_conflicting:
raise
dist = None
if dist is not None:
return dist
for dist in self[req.key]:
if dist in req:
return dist
# try to download/install
return self.obtain(req, installer)
def obtain(self, requirement, installer=None):
"""Obtain a distribution matching `requirement` (e.g. via download)
Obtain a distro that matches requirement (e.g. via download). In the
base ``Environment`` class, this routine just returns
``installer(requirement)``, unless `installer` is None, in which case
None is returned instead. This method is a hook that allows subclasses
to attempt other ways of obtaining a distribution before falling back
to the `installer` argument."""
if installer is not None:
return installer(requirement)
def __iter__(self):
"""Yield the unique project names of the available distributions"""
for key in self._distmap.keys():
if self[key]:
yield key
def __iadd__(self, other):
"""In-place addition of a distribution or environment"""
if isinstance(other, Distribution):
self.add(other)
elif isinstance(other, Environment):
for project in other:
for dist in other[project]:
self.add(dist)
else:
raise TypeError("Can't add %r to environment" % (other,))
return self
def __add__(self, other):
"""Add an environment or distribution to an environment"""
new = self.__class__([], platform=None, python=None)
for env in self, other:
new += env
return new
# XXX backward compatibility
AvailableDistributions = Environment
class ExtractionError(RuntimeError):
"""An error occurred extracting a resource
The following attributes are available from instances of this exception:
manager
The resource manager that raised this exception
cache_path
The base directory for resource extraction
original_error
The exception instance that caused extraction to fail
"""
class ResourceManager:
"""Manage resource extraction and packages"""
extraction_path = None
def __init__(self):
self.cached_files = {}
def resource_exists(self, package_or_requirement, resource_name):
"""Does the named resource exist?"""
return get_provider(package_or_requirement).has_resource(resource_name)
def resource_isdir(self, package_or_requirement, resource_name):
"""Is the named resource an existing directory?"""
return get_provider(package_or_requirement).resource_isdir(resource_name)
def resource_filename(self, package_or_requirement, resource_name):
"""Return a true filesystem path for specified resource"""
return get_provider(package_or_requirement).get_resource_filename(
self, resource_name
)
def resource_stream(self, package_or_requirement, resource_name):
"""Return a readable file-like object for specified resource"""
return get_provider(package_or_requirement).get_resource_stream(
self, resource_name
)
def resource_string(self, package_or_requirement, resource_name):
"""Return specified resource as a string"""
return get_provider(package_or_requirement).get_resource_string(
self, resource_name
)
def resource_listdir(self, package_or_requirement, resource_name):
"""List the contents of the named resource directory"""
return get_provider(package_or_requirement).resource_listdir(resource_name)
def extraction_error(self):
"""Give an error message for problems extracting file(s)"""
old_exc = sys.exc_info()[1]
cache_path = self.extraction_path or get_default_cache()
tmpl = textwrap.dedent(
"""
Can't extract file(s) to egg cache
The following error occurred while trying to extract file(s)
to the Python egg cache:
{old_exc}
The Python egg cache directory is currently set to:
{cache_path}
Perhaps your account does not have write access to this directory?
You can change the cache directory by setting the PYTHON_EGG_CACHE
environment variable to point to an accessible directory.
"""
).lstrip()
err = ExtractionError(tmpl.format(**locals()))
err.manager = self
err.cache_path = cache_path
err.original_error = old_exc
raise err
def get_cache_path(self, archive_name, names=()):
"""Return absolute location in cache for `archive_name` and `names`
The parent directory of the resulting path will be created if it does
not already exist. `archive_name` should be the base filename of the
enclosing egg (which may not be the name of the enclosing zipfile!),
including its ".egg" extension. `names`, if provided, should be a
sequence of path name parts "under" the egg's extraction location.
This method should only be called by resource providers that need to
obtain an extraction location, and only for names they intend to
extract, as it tracks the generated names for possible cleanup later.
"""
extract_path = self.extraction_path or get_default_cache()
target_path = os.path.join(extract_path, archive_name + '-tmp', *names)
try:
_bypass_ensure_directory(target_path)
except Exception:
self.extraction_error()
self._warn_unsafe_extraction_path(extract_path)
self.cached_files[target_path] = 1
return target_path
@staticmethod
def _warn_unsafe_extraction_path(path):
"""
If the default extraction path is overridden and set to an insecure
location, such as /tmp, it opens up an opportunity for an attacker to
replace an extracted file with an unauthorized payload. Warn the user
if a known insecure location is used.
See Distribute #375 for more details.
"""
if os.name == 'nt' and not path.startswith(os.environ['windir']):
# On Windows, permissions are generally restrictive by default
# and temp directories are not writable by other users, so
# bypass the warning.
return
mode = os.stat(path).st_mode
if mode & stat.S_IWOTH or mode & stat.S_IWGRP:
msg = (
"Extraction path is writable by group/others "
"and vulnerable to attack when "
"used with get_resource_filename ({path}). "
"Consider a more secure "
"location (set with .set_extraction_path or the "
"PYTHON_EGG_CACHE environment variable)."
).format(**locals())
warnings.warn(msg, UserWarning)
def postprocess(self, tempname, filename):
"""Perform any platform-specific postprocessing of `tempname`
This is where Mac header rewrites should be done; other platforms don't
have anything special they should do.
Resource providers should call this method ONLY after successfully
extracting a compressed resource. They must NOT call it on resources
that are already in the filesystem.
`tempname` is the current (temporary) name of the file, and `filename`
is the name it will be renamed to by the caller after this routine
returns.
"""
if os.name == 'posix':
# Make the resource executable
mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777
os.chmod(tempname, mode)
def set_extraction_path(self, path):
"""Set the base path where resources will be extracted to, if needed.
If you do not call this routine before any extractions take place, the
path defaults to the return value of ``get_default_cache()``. (Which
is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
platform-specific fallbacks. See that routine's documentation for more
details.)
Resources are extracted to subdirectories of this path based upon
information given by the ``IResourceProvider``. You may set this to a
temporary directory, but then you must call ``cleanup_resources()`` to
delete the extracted files when done. There is no guarantee that
``cleanup_resources()`` will be able to remove all extracted files.
(Note: you may not change the extraction path for a given resource
manager once resources have been extracted, unless you first call
``cleanup_resources()``.)
"""
if self.cached_files:
raise ValueError("Can't change extraction path, files already extracted")
self.extraction_path = path
def cleanup_resources(self, force=False):
"""
Delete all extracted resource files and directories, returning a list
of the file and directory names that could not be successfully removed.
This function does not have any concurrency protection, so it should
generally only be called when the extraction path is a temporary
directory exclusive to a single process. This method is not
automatically called; you must call it explicitly or register it as an
``atexit`` function if you wish to ensure cleanup of a temporary
directory used for extractions.
"""
# XXX
def get_default_cache():
"""
Return the ``PYTHON_EGG_CACHE`` environment variable
or a platform-relevant user cache dir for an app
named "Python-Eggs".
"""
return os.environ.get('PYTHON_EGG_CACHE') or platformdirs.user_cache_dir(
appname='Python-Eggs'
)
def safe_name(name):
"""Convert an arbitrary string to a standard distribution name
Any runs of non-alphanumeric/. characters are replaced with a single '-'.
"""
return re.sub('[^A-Za-z0-9.]+', '-', name)
def safe_version(version):
"""
Convert an arbitrary string to a standard version string
"""
try:
# normalize the version
return str(packaging.version.Version(version))
except packaging.version.InvalidVersion:
version = version.replace(' ', '.')
return re.sub('[^A-Za-z0-9.]+', '-', version)
def _forgiving_version(version):
"""Fallback when ``safe_version`` is not safe enough
>>> parse_version(_forgiving_version('0.23ubuntu1'))
<Version('0.23.dev0+sanitized.ubuntu1')>
>>> parse_version(_forgiving_version('0.23-'))
<Version('0.23.dev0+sanitized')>
>>> parse_version(_forgiving_version('0.-_'))
<Version('0.dev0+sanitized')>
>>> parse_version(_forgiving_version('42.+?1'))
<Version('42.dev0+sanitized.1')>
>>> parse_version(_forgiving_version('hello world'))
<Version('0.dev0+sanitized.hello.world')>
"""
version = version.replace(' ', '.')
match = _PEP440_FALLBACK.search(version)
if match:
safe = match["safe"]
rest = version[len(safe):]
else:
safe = "0"
rest = version
local = f"sanitized.{_safe_segment(rest)}".strip(".")
return f"{safe}.dev0+{local}"
def _safe_segment(segment):
"""Convert an arbitrary string into a safe segment"""
segment = re.sub('[^A-Za-z0-9.]+', '-', segment)
segment = re.sub('-[^A-Za-z0-9]+', '-', segment)
return re.sub(r'\.[^A-Za-z0-9]+', '.', segment).strip(".-")
def safe_extra(extra):
"""Convert an arbitrary string to a standard 'extra' name
Any runs of non-alphanumeric characters are replaced with a single '_',
and the result is always lowercased.
"""
return re.sub('[^A-Za-z0-9.-]+', '_', extra).lower()
def to_filename(name):
"""Convert a project or version name to its filename-escaped form
Any '-' characters are currently replaced with '_'.
"""
return name.replace('-', '_')
def invalid_marker(text):
"""
Validate text as a PEP 508 environment marker; return an exception
if invalid or False otherwise.
"""
try:
evaluate_marker(text)
except SyntaxError as e:
e.filename = None
e.lineno = None
return e
return False
def evaluate_marker(text, extra=None):
"""
Evaluate a PEP 508 environment marker.
Return a boolean indicating the marker result in this environment.
Raise SyntaxError if marker is invalid.
This implementation uses the 'pyparsing' module.
"""
try:
marker = packaging.markers.Marker(text)
return marker.evaluate()
except packaging.markers.InvalidMarker as e:
raise SyntaxError(e) from e
class NullProvider:
"""Try to implement resources and metadata for arbitrary PEP 302 loaders"""
egg_name = None
egg_info = None
loader = None
def __init__(self, module):
self.loader = getattr(module, '__loader__', None)
self.module_path = os.path.dirname(getattr(module, '__file__', ''))
def get_resource_filename(self, manager, resource_name):
return self._fn(self.module_path, resource_name)
def get_resource_stream(self, manager, resource_name):
return io.BytesIO(self.get_resource_string(manager, resource_name))
def get_resource_string(self, manager, resource_name):
return self._get(self._fn(self.module_path, resource_name))
def has_resource(self, resource_name):
return self._has(self._fn(self.module_path, resource_name))
def _get_metadata_path(self, name):
return self._fn(self.egg_info, name)
def has_metadata(self, name):
if not self.egg_info:
return self.egg_info
path = self._get_metadata_path(name)
return self._has(path)
def get_metadata(self, name):
if not self.egg_info:
return ""
path = self._get_metadata_path(name)
value = self._get(path)
try:
return value.decode('utf-8')
except UnicodeDecodeError as exc:
# Include the path in the error message to simplify
# troubleshooting, and without changing the exception type.
exc.reason += ' in {} file at path: {}'.format(name, path)
raise
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
def resource_isdir(self, resource_name):
return self._isdir(self._fn(self.module_path, resource_name))
def metadata_isdir(self, name):
return self.egg_info and self._isdir(self._fn(self.egg_info, name))
def resource_listdir(self, resource_name):
return self._listdir(self._fn(self.module_path, resource_name))
def metadata_listdir(self, name):
if self.egg_info:
return self._listdir(self._fn(self.egg_info, name))
return []
def run_script(self, script_name, namespace):
script = 'scripts/' + script_name
if not self.has_metadata(script):
raise ResolutionError(
"Script {script!r} not found in metadata at {self.egg_info!r}".format(
**locals()
),
)
script_text = self.get_metadata(script).replace('\r\n', '\n')
script_text = script_text.replace('\r', '\n')
script_filename = self._fn(self.egg_info, script)
namespace['__file__'] = script_filename
if os.path.exists(script_filename):
with open(script_filename) as fid:
source = fid.read()
code = compile(source, script_filename, 'exec')
exec(code, namespace, namespace)
else:
from linecache import cache
cache[script_filename] = (
len(script_text),
0,
script_text.split('\n'),
script_filename,
)
script_code = compile(script_text, script_filename, 'exec')
exec(script_code, namespace, namespace)
def _has(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _isdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _listdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _fn(self, base, resource_name):
self._validate_resource_path(resource_name)
if resource_name:
return os.path.join(base, *resource_name.split('/'))
return base
@staticmethod
def _validate_resource_path(path):
"""
Validate the resource paths according to the docs.
https://setuptools.pypa.io/en/latest/pkg_resources.html#basic-resource-access
>>> warned = getfixture('recwarn')
>>> warnings.simplefilter('always')
>>> vrp = NullProvider._validate_resource_path
>>> vrp('foo/bar.txt')
>>> bool(warned)
False
>>> vrp('../foo/bar.txt')
>>> bool(warned)
True
>>> warned.clear()
>>> vrp('/foo/bar.txt')
>>> bool(warned)
True
>>> vrp('foo/../../bar.txt')
>>> bool(warned)
True
>>> warned.clear()
>>> vrp('foo/f../bar.txt')
>>> bool(warned)
False
Windows path separators are straight-up disallowed.
>>> vrp(r'\\foo/bar.txt')
Traceback (most recent call last):
...
ValueError: Use of .. or absolute path in a resource path \
is not allowed.
>>> vrp(r'C:\\foo/bar.txt')
Traceback (most recent call last):
...
ValueError: Use of .. or absolute path in a resource path \
is not allowed.
Blank values are allowed
>>> vrp('')
>>> bool(warned)
False
Non-string values are not.
>>> vrp(None)
Traceback (most recent call last):
...
AttributeError: ...
"""
invalid = (
os.path.pardir in path.split(posixpath.sep)
or posixpath.isabs(path)
or ntpath.isabs(path)
)
if not invalid:
return
msg = "Use of .. or absolute path in a resource path is not allowed."
# Aggressively disallow Windows absolute paths
if ntpath.isabs(path) and not posixpath.isabs(path):
raise ValueError(msg)
# for compatibility, warn; in future
# raise ValueError(msg)
issue_warning(
msg[:-1] + " and will raise exceptions in a future release.",
DeprecationWarning,
)
def _get(self, path):
if hasattr(self.loader, 'get_data'):
return self.loader.get_data(path)
raise NotImplementedError(
"Can't perform this operation for loaders without 'get_data()'"
)
register_loader_type(object, NullProvider)
def _parents(path):
"""
yield all parents of path including path
"""
last = None
while path != last:
yield path
last = path
path, _ = os.path.split(path)
class EggProvider(NullProvider):
"""Provider based on a virtual filesystem"""
def __init__(self, module):
super().__init__(module)
self._setup_prefix()
def _setup_prefix(self):
# Assume that metadata may be nested inside a "basket"
# of multiple eggs and use module_path instead of .archive.
eggs = filter(_is_egg_path, _parents(self.module_path))
egg = next(eggs, None)
egg and self._set_egg(egg)
def _set_egg(self, path):
self.egg_name = os.path.basename(path)
self.egg_info = os.path.join(path, 'EGG-INFO')
self.egg_root = path
class DefaultProvider(EggProvider):
"""Provides access to package resources in the filesystem"""
def _has(self, path):
return os.path.exists(path)
def _isdir(self, path):
return os.path.isdir(path)
def _listdir(self, path):
return os.listdir(path)
def get_resource_stream(self, manager, resource_name):
return open(self._fn(self.module_path, resource_name), 'rb')
def _get(self, path):
with open(path, 'rb') as stream:
return stream.read()
@classmethod
def _register(cls):
loader_names = (
'SourceFileLoader',
'SourcelessFileLoader',
)
for name in loader_names:
loader_cls = getattr(importlib_machinery, name, type(None))
register_loader_type(loader_cls, cls)
DefaultProvider._register()
class EmptyProvider(NullProvider):
"""Provider that returns nothing for all requests"""
module_path = None
_isdir = _has = lambda self, path: False
def _get(self, path):
return ''
def _listdir(self, path):
return []
def __init__(self):
pass
empty_provider = EmptyProvider()
class ZipManifests(dict):
"""
zip manifest builder
"""
@classmethod
def build(cls, path):
"""
Build a dictionary similar to the zipimport directory
caches, except instead of tuples, store ZipInfo objects.
Use a platform-specific path separator (os.sep) for the path keys
for compatibility with pypy on Windows.
"""
with zipfile.ZipFile(path) as zfile:
items = (
(
name.replace('/', os.sep),
zfile.getinfo(name),
)
for name in zfile.namelist()
)
return dict(items)
load = build
class MemoizedZipManifests(ZipManifests):
"""
Memoized zipfile manifests.
"""
manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime')
def load(self, path):
"""
Load a manifest at path or return a suitable manifest already loaded.
"""
path = os.path.normpath(path)
mtime = os.stat(path).st_mtime
if path not in self or self[path].mtime != mtime:
manifest = self.build(path)
self[path] = self.manifest_mod(manifest, mtime)
return self[path].manifest
class ZipProvider(EggProvider):
"""Resource support for zips and eggs"""
eagers = None
_zip_manifests = MemoizedZipManifests()
def __init__(self, module):
super().__init__(module)
self.zip_pre = self.loader.archive + os.sep
def _zipinfo_name(self, fspath):
# Convert a virtual filename (full path to file) into a zipfile subpath
# usable with the zipimport directory cache for our target archive
fspath = fspath.rstrip(os.sep)
if fspath == self.loader.archive:
return ''
if fspath.startswith(self.zip_pre):
return fspath[len(self.zip_pre) :]
raise AssertionError("%s is not a subpath of %s" % (fspath, self.zip_pre))
def _parts(self, zip_path):
# Convert a zipfile subpath into an egg-relative path part list.
# pseudo-fs path
fspath = self.zip_pre + zip_path
if fspath.startswith(self.egg_root + os.sep):
return fspath[len(self.egg_root) + 1 :].split(os.sep)
raise AssertionError("%s is not a subpath of %s" % (fspath, self.egg_root))
@property
def zipinfo(self):
return self._zip_manifests.load(self.loader.archive)
def get_resource_filename(self, manager, resource_name):
if not self.egg_name:
raise NotImplementedError(
"resource_filename() only supported for .egg, not .zip"
)
# no need to lock for extraction, since we use temp names
zip_path = self._resource_to_zip(resource_name)
eagers = self._get_eager_resources()
if '/'.join(self._parts(zip_path)) in eagers:
for name in eagers:
self._extract_resource(manager, self._eager_to_zip(name))
return self._extract_resource(manager, zip_path)
@staticmethod
def _get_date_and_size(zip_stat):
size = zip_stat.file_size
# ymdhms+wday, yday, dst
date_time = zip_stat.date_time + (0, 0, -1)
# 1980 offset already done
timestamp = time.mktime(date_time)
return timestamp, size
# FIXME: 'ZipProvider._extract_resource' is too complex (12)
def _extract_resource(self, manager, zip_path): # noqa: C901
if zip_path in self._index():
for name in self._index()[zip_path]:
last = self._extract_resource(manager, os.path.join(zip_path, name))
# return the extracted directory name
return os.path.dirname(last)
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not WRITE_SUPPORT:
raise IOError(
'"os.rename" and "os.unlink" are not supported ' 'on this platform'
)
try:
real_path = manager.get_cache_path(self.egg_name, self._parts(zip_path))
if self._is_current(real_path, zip_path):
return real_path
outf, tmpnam = _mkstemp(
".$extract",
dir=os.path.dirname(real_path),
)
os.write(outf, self.loader.get_data(zip_path))
os.close(outf)
utime(tmpnam, (timestamp, timestamp))
manager.postprocess(tmpnam, real_path)
try:
rename(tmpnam, real_path)
except os.error:
if os.path.isfile(real_path):
if self._is_current(real_path, zip_path):
# the file became current since it was checked above,
# so proceed.
return real_path
# Windows, del old file and retry
elif os.name == 'nt':
unlink(real_path)
rename(tmpnam, real_path)
return real_path
raise
except os.error:
# report a user-friendly error
manager.extraction_error()
return real_path
def _is_current(self, file_path, zip_path):
"""
Return True if the file_path is current for this zip_path
"""
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not os.path.isfile(file_path):
return False
stat = os.stat(file_path)
if stat.st_size != size or stat.st_mtime != timestamp:
return False
# check that the contents match
zip_contents = self.loader.get_data(zip_path)
with open(file_path, 'rb') as f:
file_contents = f.read()
return zip_contents == file_contents
def _get_eager_resources(self):
if self.eagers is None:
eagers = []
for name in ('native_libs.txt', 'eager_resources.txt'):
if self.has_metadata(name):
eagers.extend(self.get_metadata_lines(name))
self.eagers = eagers
return self.eagers
def _index(self):
try:
return self._dirindex
except AttributeError:
ind = {}
for path in self.zipinfo:
parts = path.split(os.sep)
while parts:
parent = os.sep.join(parts[:-1])
if parent in ind:
ind[parent].append(parts[-1])
break
else:
ind[parent] = [parts.pop()]
self._dirindex = ind
return ind
def _has(self, fspath):
zip_path = self._zipinfo_name(fspath)
return zip_path in self.zipinfo or zip_path in self._index()
def _isdir(self, fspath):
return self._zipinfo_name(fspath) in self._index()
def _listdir(self, fspath):
return list(self._index().get(self._zipinfo_name(fspath), ()))
def _eager_to_zip(self, resource_name):
return self._zipinfo_name(self._fn(self.egg_root, resource_name))
def _resource_to_zip(self, resource_name):
return self._zipinfo_name(self._fn(self.module_path, resource_name))
register_loader_type(zipimport.zipimporter, ZipProvider)
class FileMetadata(EmptyProvider):
"""Metadata handler for standalone PKG-INFO files
Usage::
metadata = FileMetadata("/path/to/PKG-INFO")
This provider rejects all data and metadata requests except for PKG-INFO,
which is treated as existing, and will be the contents of the file at
the provided location.
"""
def __init__(self, path):
self.path = path
def _get_metadata_path(self, name):
return self.path
def has_metadata(self, name):
return name == 'PKG-INFO' and os.path.isfile(self.path)
def get_metadata(self, name):
if name != 'PKG-INFO':
raise KeyError("No metadata except PKG-INFO is available")
with io.open(self.path, encoding='utf-8', errors="replace") as f:
metadata = f.read()
self._warn_on_replacement(metadata)
return metadata
def _warn_on_replacement(self, metadata):
replacement_char = '�'
if replacement_char in metadata:
tmpl = "{self.path} could not be properly decoded in UTF-8"
msg = tmpl.format(**locals())
warnings.warn(msg)
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
class PathMetadata(DefaultProvider):
"""Metadata provider for egg directories
Usage::
# Development eggs:
egg_info = "/path/to/PackageName.egg-info"
base_dir = os.path.dirname(egg_info)
metadata = PathMetadata(base_dir, egg_info)
dist_name = os.path.splitext(os.path.basename(egg_info))[0]
dist = Distribution(basedir, project_name=dist_name, metadata=metadata)
# Unpacked egg directories:
egg_path = "/path/to/PackageName-ver-pyver-etc.egg"
metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO'))
dist = Distribution.from_filename(egg_path, metadata=metadata)
"""
def __init__(self, path, egg_info):
self.module_path = path
self.egg_info = egg_info
class EggMetadata(ZipProvider):
"""Metadata provider for .egg files"""
def __init__(self, importer):
"""Create a metadata provider from a zipimporter"""
self.zip_pre = importer.archive + os.sep
self.loader = importer
if importer.prefix:
self.module_path = os.path.join(importer.archive, importer.prefix)
else:
self.module_path = importer.archive
self._setup_prefix()
_declare_state('dict', _distribution_finders={})
def register_finder(importer_type, distribution_finder):
"""Register `distribution_finder` to find distributions in sys.path items
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `distribution_finder` is a callable that, passed a path
item and the importer instance, yields ``Distribution`` instances found on
that path item. See ``pkg_resources.find_on_path`` for an example."""
_distribution_finders[importer_type] = distribution_finder
def find_distributions(path_item, only=False):
"""Yield distributions accessible via `path_item`"""
importer = get_importer(path_item)
finder = _find_adapter(_distribution_finders, importer)
return finder(importer, path_item, only)
def find_eggs_in_zip(importer, path_item, only=False):
"""
Find eggs in zip files; possibly multiple nested eggs.
"""
if importer.archive.endswith('.whl'):
# wheels are not supported with this finder
# they don't have PKG-INFO metadata, and won't ever contain eggs
return
metadata = EggMetadata(importer)
if metadata.has_metadata('PKG-INFO'):
yield Distribution.from_filename(path_item, metadata=metadata)
if only:
# don't yield nested distros
return
for subitem in metadata.resource_listdir(''):
if _is_egg_path(subitem):
subpath = os.path.join(path_item, subitem)
dists = find_eggs_in_zip(zipimport.zipimporter(subpath), subpath)
for dist in dists:
yield dist
elif subitem.lower().endswith(('.dist-info', '.egg-info')):
subpath = os.path.join(path_item, subitem)
submeta = EggMetadata(zipimport.zipimporter(subpath))
submeta.egg_info = subpath
yield Distribution.from_location(path_item, subitem, submeta)
register_finder(zipimport.zipimporter, find_eggs_in_zip)
def find_nothing(importer, path_item, only=False):
return ()
register_finder(object, find_nothing)
def find_on_path(importer, path_item, only=False):
"""Yield distributions accessible on a sys.path directory"""
path_item = _normalize_cached(path_item)
if _is_unpacked_egg(path_item):
yield Distribution.from_filename(
path_item,
metadata=PathMetadata(path_item, os.path.join(path_item, 'EGG-INFO')),
)
return
entries = (os.path.join(path_item, child) for child in safe_listdir(path_item))
# scan for .egg and .egg-info in directory
for entry in sorted(entries):
fullpath = os.path.join(path_item, entry)
factory = dist_factory(path_item, entry, only)
for dist in factory(fullpath):
yield dist
def dist_factory(path_item, entry, only):
"""Return a dist_factory for the given entry."""
lower = entry.lower()
is_egg_info = lower.endswith('.egg-info')
is_dist_info = lower.endswith('.dist-info') and os.path.isdir(
os.path.join(path_item, entry)
)
is_meta = is_egg_info or is_dist_info
return (
distributions_from_metadata
if is_meta
else find_distributions
if not only and _is_egg_path(entry)
else resolve_egg_link
if not only and lower.endswith('.egg-link')
else NoDists()
)
class NoDists:
"""
>>> bool(NoDists())
False
>>> list(NoDists()('anything'))
[]
"""
def __bool__(self):
return False
def __call__(self, fullpath):
return iter(())
def safe_listdir(path):
"""
Attempt to list contents of path, but suppress some exceptions.
"""
try:
return os.listdir(path)
except (PermissionError, NotADirectoryError):
pass
except OSError as e:
# Ignore the directory if does not exist, not a directory or
# permission denied
if e.errno not in (errno.ENOTDIR, errno.EACCES, errno.ENOENT):
raise
return ()
def distributions_from_metadata(path):
root = os.path.dirname(path)
if os.path.isdir(path):
if len(os.listdir(path)) == 0:
# empty metadata dir; skip
return
metadata = PathMetadata(root, path)
else:
metadata = FileMetadata(path)
entry = os.path.basename(path)
yield Distribution.from_location(
root,
entry,
metadata,
precedence=DEVELOP_DIST,
)
def non_empty_lines(path):
"""
Yield non-empty lines from file at path
"""
with open(path) as f:
for line in f:
line = line.strip()
if line:
yield line
def resolve_egg_link(path):
"""
Given a path to an .egg-link, resolve distributions
present in the referenced path.
"""
referenced_paths = non_empty_lines(path)
resolved_paths = (
os.path.join(os.path.dirname(path), ref) for ref in referenced_paths
)
dist_groups = map(find_distributions, resolved_paths)
return next(dist_groups, ())
if hasattr(pkgutil, 'ImpImporter'):
register_finder(pkgutil.ImpImporter, find_on_path)
register_finder(importlib_machinery.FileFinder, find_on_path)
_declare_state('dict', _namespace_handlers={})
_declare_state('dict', _namespace_packages={})
def register_namespace_handler(importer_type, namespace_handler):
"""Register `namespace_handler` to declare namespace packages
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `namespace_handler` is a callable like this::
def namespace_handler(importer, path_entry, moduleName, module):
# return a path_entry to use for child packages
Namespace handlers are only called if the importer object has already
agreed that it can handle the relevant path item, and they should only
return a subpath if the module __path__ does not already contain an
equivalent subpath. For an example namespace handler, see
``pkg_resources.file_ns_handler``.
"""
_namespace_handlers[importer_type] = namespace_handler
def _handle_ns(packageName, path_item):
"""Ensure that named package includes a subpath of path_item (if needed)"""
importer = get_importer(path_item)
if importer is None:
return None
# use find_spec (PEP 451) and fall-back to find_module (PEP 302)
try:
spec = importer.find_spec(packageName)
except AttributeError:
# capture warnings due to #1111
with warnings.catch_warnings():
warnings.simplefilter("ignore")
loader = importer.find_module(packageName)
else:
loader = spec.loader if spec else None
if loader is None:
return None
module = sys.modules.get(packageName)
if module is None:
module = sys.modules[packageName] = types.ModuleType(packageName)
module.__path__ = []
_set_parent_ns(packageName)
elif not hasattr(module, '__path__'):
raise TypeError("Not a package:", packageName)
handler = _find_adapter(_namespace_handlers, importer)
subpath = handler(importer, path_item, packageName, module)
if subpath is not None:
path = module.__path__
path.append(subpath)
importlib.import_module(packageName)
_rebuild_mod_path(path, packageName, module)
return subpath
def _rebuild_mod_path(orig_path, package_name, module):
"""
Rebuild module.__path__ ensuring that all entries are ordered
corresponding to their sys.path order
"""
sys_path = [_normalize_cached(p) for p in sys.path]
def safe_sys_path_index(entry):
"""
Workaround for #520 and #513.
"""
try:
return sys_path.index(entry)
except ValueError:
return float('inf')
def position_in_sys_path(path):
"""
Return the ordinal of the path based on its position in sys.path
"""
path_parts = path.split(os.sep)
module_parts = package_name.count('.') + 1
parts = path_parts[:-module_parts]
return safe_sys_path_index(_normalize_cached(os.sep.join(parts)))
new_path = sorted(orig_path, key=position_in_sys_path)
new_path = [_normalize_cached(p) for p in new_path]
if isinstance(module.__path__, list):
module.__path__[:] = new_path
else:
module.__path__ = new_path
def declare_namespace(packageName):
"""Declare that package 'packageName' is a namespace package"""
msg = (
f"Deprecated call to `pkg_resources.declare_namespace({packageName!r})`.\n"
"Implementing implicit namespace packages (as specified in PEP 420) "
"is preferred to `pkg_resources.declare_namespace`. "
"See https://setuptools.pypa.io/en/latest/references/"
"keywords.html#keyword-namespace-packages"
)
warnings.warn(msg, DeprecationWarning, stacklevel=2)
_imp.acquire_lock()
try:
if packageName in _namespace_packages:
return
path = sys.path
parent, _, _ = packageName.rpartition('.')
if parent:
declare_namespace(parent)
if parent not in _namespace_packages:
__import__(parent)
try:
path = sys.modules[parent].__path__
except AttributeError as e:
raise TypeError("Not a package:", parent) from e
# Track what packages are namespaces, so when new path items are added,
# they can be updated
_namespace_packages.setdefault(parent or None, []).append(packageName)
_namespace_packages.setdefault(packageName, [])
for path_item in path:
# Ensure all the parent's path items are reflected in the child,
# if they apply
_handle_ns(packageName, path_item)
finally:
_imp.release_lock()
def fixup_namespace_packages(path_item, parent=None):
"""Ensure that previously-declared namespace packages include path_item"""
_imp.acquire_lock()
try:
for package in _namespace_packages.get(parent, ()):
subpath = _handle_ns(package, path_item)
if subpath:
fixup_namespace_packages(subpath, package)
finally:
_imp.release_lock()
def file_ns_handler(importer, path_item, packageName, module):
"""Compute an ns-package subpath for a filesystem or zipfile importer"""
subpath = os.path.join(path_item, packageName.split('.')[-1])
normalized = _normalize_cached(subpath)
for item in module.__path__:
if _normalize_cached(item) == normalized:
break
else:
# Only return the path if it's not already there
return subpath
if hasattr(pkgutil, 'ImpImporter'):
register_namespace_handler(pkgutil.ImpImporter, file_ns_handler)
register_namespace_handler(zipimport.zipimporter, file_ns_handler)
register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler)
def null_ns_handler(importer, path_item, packageName, module):
return None
register_namespace_handler(object, null_ns_handler)
def normalize_path(filename):
"""Normalize a file/dir name for comparison purposes"""
return os.path.normcase(os.path.realpath(os.path.normpath(_cygwin_patch(filename))))
def _cygwin_patch(filename): # pragma: nocover
"""
Contrary to POSIX 2008, on Cygwin, getcwd (3) contains
symlink components. Using
os.path.abspath() works around this limitation. A fix in os.getcwd()
would probably better, in Cygwin even more so, except
that this seems to be by design...
"""
return os.path.abspath(filename) if sys.platform == 'cygwin' else filename
def _normalize_cached(filename, _cache={}):
try:
return _cache[filename]
except KeyError:
_cache[filename] = result = normalize_path(filename)
return result
def _is_egg_path(path):
"""
Determine if given path appears to be an egg.
"""
return _is_zip_egg(path) or _is_unpacked_egg(path)
def _is_zip_egg(path):
return (
path.lower().endswith('.egg')
and os.path.isfile(path)
and zipfile.is_zipfile(path)
)
def _is_unpacked_egg(path):
"""
Determine if given path appears to be an unpacked egg.
"""
return path.lower().endswith('.egg') and os.path.isfile(
os.path.join(path, 'EGG-INFO', 'PKG-INFO')
)
def _set_parent_ns(packageName):
parts = packageName.split('.')
name = parts.pop()
if parts:
parent = '.'.join(parts)
setattr(sys.modules[parent], name, sys.modules[packageName])
MODULE = re.compile(r"\w+(\.\w+)*$").match
EGG_NAME = re.compile(
r"""
(?P<name>[^-]+) (
-(?P<ver>[^-]+) (
-py(?P<pyver>[^-]+) (
-(?P<plat>.+)
)?
)?
)?
""",
re.VERBOSE | re.IGNORECASE,
).match
class EntryPoint:
"""Object representing an advertised importable object"""
def __init__(self, name, module_name, attrs=(), extras=(), dist=None):
if not MODULE(module_name):
raise ValueError("Invalid module name", module_name)
self.name = name
self.module_name = module_name
self.attrs = tuple(attrs)
self.extras = tuple(extras)
self.dist = dist
def __str__(self):
s = "%s = %s" % (self.name, self.module_name)
if self.attrs:
s += ':' + '.'.join(self.attrs)
if self.extras:
s += ' [%s]' % ','.join(self.extras)
return s
def __repr__(self):
return "EntryPoint.parse(%r)" % str(self)
def load(self, require=True, *args, **kwargs):
"""
Require packages for this EntryPoint, then resolve it.
"""
if not require or args or kwargs:
warnings.warn(
"Parameters to load are deprecated. Call .resolve and "
".require separately.",
PkgResourcesDeprecationWarning,
stacklevel=2,
)
if require:
self.require(*args, **kwargs)
return self.resolve()
def resolve(self):
"""
Resolve the entry point from its module and attrs.
"""
module = __import__(self.module_name, fromlist=['__name__'], level=0)
try:
return functools.reduce(getattr, self.attrs, module)
except AttributeError as exc:
raise ImportError(str(exc)) from exc
def require(self, env=None, installer=None):
if self.extras and not self.dist:
raise UnknownExtra("Can't require() without a distribution", self)
# Get the requirements for this entry point with all its extras and
# then resolve them. We have to pass `extras` along when resolving so
# that the working set knows what extras we want. Otherwise, for
# dist-info distributions, the working set will assume that the
# requirements for that extra are purely optional and skip over them.
reqs = self.dist.requires(self.extras)
items = working_set.resolve(reqs, env, installer, extras=self.extras)
list(map(working_set.add, items))
pattern = re.compile(
r'\s*'
r'(?P<name>.+?)\s*'
r'=\s*'
r'(?P<module>[\w.]+)\s*'
r'(:\s*(?P<attr>[\w.]+))?\s*'
r'(?P<extras>\[.*\])?\s*$'
)
@classmethod
def parse(cls, src, dist=None):
"""Parse a single entry point from string `src`
Entry point syntax follows the form::
name = some.module:some.attr [extra1, extra2]
The entry name and module name are required, but the ``:attrs`` and
``[extras]`` parts are optional
"""
m = cls.pattern.match(src)
if not m:
msg = "EntryPoint must be in 'name=module:attrs [extras]' format"
raise ValueError(msg, src)
res = m.groupdict()
extras = cls._parse_extras(res['extras'])
attrs = res['attr'].split('.') if res['attr'] else ()
return cls(res['name'], res['module'], attrs, extras, dist)
@classmethod
def _parse_extras(cls, extras_spec):
if not extras_spec:
return ()
req = Requirement.parse('x' + extras_spec)
if req.specs:
raise ValueError()
return req.extras
@classmethod
def parse_group(cls, group, lines, dist=None):
"""Parse an entry point group"""
if not MODULE(group):
raise ValueError("Invalid group name", group)
this = {}
for line in yield_lines(lines):
ep = cls.parse(line, dist)
if ep.name in this:
raise ValueError("Duplicate entry point", group, ep.name)
this[ep.name] = ep
return this
@classmethod
def parse_map(cls, data, dist=None):
"""Parse a map of entry point groups"""
if isinstance(data, dict):
data = data.items()
else:
data = split_sections(data)
maps = {}
for group, lines in data:
if group is None:
if not lines:
continue
raise ValueError("Entry points must be listed in groups")
group = group.strip()
if group in maps:
raise ValueError("Duplicate group name", group)
maps[group] = cls.parse_group(group, lines, dist)
return maps
def _version_from_file(lines):
"""
Given an iterable of lines from a Metadata file, return
the value of the Version field, if present, or None otherwise.
"""
def is_version_line(line):
return line.lower().startswith('version:')
version_lines = filter(is_version_line, lines)
line = next(iter(version_lines), '')
_, _, value = line.partition(':')
return safe_version(value.strip()) or None
class Distribution:
"""Wrap an actual or potential sys.path entry w/metadata"""
PKG_INFO = 'PKG-INFO'
def __init__(
self,
location=None,
metadata=None,
project_name=None,
version=None,
py_version=PY_MAJOR,
platform=None,
precedence=EGG_DIST,
):
self.project_name = safe_name(project_name or 'Unknown')
if version is not None:
self._version = safe_version(version)
self.py_version = py_version
self.platform = platform
self.location = location
self.precedence = precedence
self._provider = metadata or empty_provider
@classmethod
def from_location(cls, location, basename, metadata=None, **kw):
project_name, version, py_version, platform = [None] * 4
basename, ext = os.path.splitext(basename)
if ext.lower() in _distributionImpl:
cls = _distributionImpl[ext.lower()]
match = EGG_NAME(basename)
if match:
project_name, version, py_version, platform = match.group(
'name', 'ver', 'pyver', 'plat'
)
return cls(
location,
metadata,
project_name=project_name,
version=version,
py_version=py_version,
platform=platform,
**kw,
)._reload_version()
def _reload_version(self):
return self
@property
def hashcmp(self):
return (
self._forgiving_parsed_version,
self.precedence,
self.key,
self.location,
self.py_version or '',
self.platform or '',
)
def __hash__(self):
return hash(self.hashcmp)
def __lt__(self, other):
return self.hashcmp < other.hashcmp
def __le__(self, other):
return self.hashcmp <= other.hashcmp
def __gt__(self, other):
return self.hashcmp > other.hashcmp
def __ge__(self, other):
return self.hashcmp >= other.hashcmp
def __eq__(self, other):
if not isinstance(other, self.__class__):
# It's not a Distribution, so they are not equal
return False
return self.hashcmp == other.hashcmp
def __ne__(self, other):
return not self == other
# These properties have to be lazy so that we don't have to load any
# metadata until/unless it's actually needed. (i.e., some distributions
# may not know their name or version without loading PKG-INFO)
@property
def key(self):
try:
return self._key
except AttributeError:
self._key = key = self.project_name.lower()
return key
@property
def parsed_version(self):
if not hasattr(self, "_parsed_version"):
try:
self._parsed_version = parse_version(self.version)
except packaging.version.InvalidVersion as ex:
info = f"(package: {self.project_name})"
if hasattr(ex, "add_note"):
ex.add_note(info) # PEP 678
raise
raise packaging.version.InvalidVersion(f"{str(ex)} {info}") from None
return self._parsed_version
@property
def _forgiving_parsed_version(self):
try:
return self.parsed_version
except packaging.version.InvalidVersion as ex:
self._parsed_version = parse_version(_forgiving_version(self.version))
notes = "\n".join(getattr(ex, "__notes__", [])) # PEP 678
msg = f"""!!\n\n
*************************************************************************
{str(ex)}\n{notes}
This is a long overdue deprecation.
For the time being, `pkg_resources` will use `{self._parsed_version}`
as a replacement to avoid breaking existing environments,
but no future compatibility is guaranteed.
If you maintain package {self.project_name} you should implement
the relevant changes to adequate the project to PEP 440 immediately.
*************************************************************************
\n\n!!
"""
warnings.warn(msg, DeprecationWarning)
return self._parsed_version
@property
def version(self):
try:
return self._version
except AttributeError as e:
version = self._get_version()
if version is None:
path = self._get_metadata_path_for_display(self.PKG_INFO)
msg = ("Missing 'Version:' header and/or {} file at path: {}").format(
self.PKG_INFO, path
)
raise ValueError(msg, self) from e
return version
@property
def _dep_map(self):
"""
A map of extra to its list of (direct) requirements
for this distribution, including the null extra.
"""
try:
return self.__dep_map
except AttributeError:
self.__dep_map = self._filter_extras(self._build_dep_map())
return self.__dep_map
@staticmethod
def _filter_extras(dm):
"""
Given a mapping of extras to dependencies, strip off
environment markers and filter out any dependencies
not matching the markers.
"""
for extra in list(filter(None, dm)):
new_extra = extra
reqs = dm.pop(extra)
new_extra, _, marker = extra.partition(':')
fails_marker = marker and (
invalid_marker(marker) or not evaluate_marker(marker)
)
if fails_marker:
reqs = []
new_extra = safe_extra(new_extra) or None
dm.setdefault(new_extra, []).extend(reqs)
return dm
def _build_dep_map(self):
dm = {}
for name in 'requires.txt', 'depends.txt':
for extra, reqs in split_sections(self._get_metadata(name)):
dm.setdefault(extra, []).extend(parse_requirements(reqs))
return dm
def requires(self, extras=()):
"""List of Requirements needed for this distro if `extras` are used"""
dm = self._dep_map
deps = []
deps.extend(dm.get(None, ()))
for ext in extras:
try:
deps.extend(dm[safe_extra(ext)])
except KeyError as e:
raise UnknownExtra(
"%s has no such extra feature %r" % (self, ext)
) from e
return deps
def _get_metadata_path_for_display(self, name):
"""
Return the path to the given metadata file, if available.
"""
try:
# We need to access _get_metadata_path() on the provider object
# directly rather than through this class's __getattr__()
# since _get_metadata_path() is marked private.
path = self._provider._get_metadata_path(name)
# Handle exceptions e.g. in case the distribution's metadata
# provider doesn't support _get_metadata_path().
except Exception:
return '[could not detect]'
return path
def _get_metadata(self, name):
if self.has_metadata(name):
for line in self.get_metadata_lines(name):
yield line
def _get_version(self):
lines = self._get_metadata(self.PKG_INFO)
version = _version_from_file(lines)
return version
def activate(self, path=None, replace=False):
"""Ensure distribution is importable on `path` (default=sys.path)"""
if path is None:
path = sys.path
self.insert_on(path, replace=replace)
if path is sys.path:
fixup_namespace_packages(self.location)
for pkg in self._get_metadata('namespace_packages.txt'):
if pkg in sys.modules:
declare_namespace(pkg)
def egg_name(self):
"""Return what this distribution's standard .egg filename should be"""
filename = "%s-%s-py%s" % (
to_filename(self.project_name),
to_filename(self.version),
self.py_version or PY_MAJOR,
)
if self.platform:
filename += '-' + self.platform
return filename
def __repr__(self):
if self.location:
return "%s (%s)" % (self, self.location)
else:
return str(self)
def __str__(self):
try:
version = getattr(self, 'version', None)
except ValueError:
version = None
version = version or "[unknown version]"
return "%s %s" % (self.project_name, version)
def __getattr__(self, attr):
"""Delegate all unrecognized public attributes to .metadata provider"""
if attr.startswith('_'):
raise AttributeError(attr)
return getattr(self._provider, attr)
def __dir__(self):
return list(
set(super(Distribution, self).__dir__())
| set(attr for attr in self._provider.__dir__() if not attr.startswith('_'))
)
@classmethod
def from_filename(cls, filename, metadata=None, **kw):
return cls.from_location(
_normalize_cached(filename), os.path.basename(filename), metadata, **kw
)
def as_requirement(self):
"""Return a ``Requirement`` that matches this distribution exactly"""
if isinstance(self.parsed_version, packaging.version.Version):
spec = "%s==%s" % (self.project_name, self.parsed_version)
else:
spec = "%s===%s" % (self.project_name, self.parsed_version)
return Requirement.parse(spec)
def load_entry_point(self, group, name):
"""Return the `name` entry point of `group` or raise ImportError"""
ep = self.get_entry_info(group, name)
if ep is None:
raise ImportError("Entry point %r not found" % ((group, name),))
return ep.load()
def get_entry_map(self, group=None):
"""Return the entry point map for `group`, or the full entry map"""
try:
ep_map = self._ep_map
except AttributeError:
ep_map = self._ep_map = EntryPoint.parse_map(
self._get_metadata('entry_points.txt'), self
)
if group is not None:
return ep_map.get(group, {})
return ep_map
def get_entry_info(self, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return self.get_entry_map(group).get(name)
# FIXME: 'Distribution.insert_on' is too complex (13)
def insert_on(self, path, loc=None, replace=False): # noqa: C901
"""Ensure self.location is on path
If replace=False (default):
- If location is already in path anywhere, do nothing.
- Else:
- If it's an egg and its parent directory is on path,
insert just ahead of the parent.
- Else: add to the end of path.
If replace=True:
- If location is already on path anywhere (not eggs)
or higher priority than its parent (eggs)
do nothing.
- Else:
- If it's an egg and its parent directory is on path,
insert just ahead of the parent,
removing any lower-priority entries.
- Else: add it to the front of path.
"""
loc = loc or self.location
if not loc:
return
nloc = _normalize_cached(loc)
bdir = os.path.dirname(nloc)
npath = [(p and _normalize_cached(p) or p) for p in path]
for p, item in enumerate(npath):
if item == nloc:
if replace:
break
else:
# don't modify path (even removing duplicates) if
# found and not replace
return
elif item == bdir and self.precedence == EGG_DIST:
# if it's an .egg, give it precedence over its directory
# UNLESS it's already been added to sys.path and replace=False
if (not replace) and nloc in npath[p:]:
return
if path is sys.path:
self.check_version_conflict()
path.insert(p, loc)
npath.insert(p, nloc)
break
else:
if path is sys.path:
self.check_version_conflict()
if replace:
path.insert(0, loc)
else:
path.append(loc)
return
# p is the spot where we found or inserted loc; now remove duplicates
while True:
try:
np = npath.index(nloc, p + 1)
except ValueError:
break
else:
del npath[np], path[np]
# ha!
p = np
return
def check_version_conflict(self):
if self.key == 'setuptools':
# ignore the inevitable setuptools self-conflicts :(
return
nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt'))
loc = normalize_path(self.location)
for modname in self._get_metadata('top_level.txt'):
if (
modname not in sys.modules
or modname in nsp
or modname in _namespace_packages
):
continue
if modname in ('pkg_resources', 'setuptools', 'site'):
continue
fn = getattr(sys.modules[modname], '__file__', None)
if fn and (
normalize_path(fn).startswith(loc) or fn.startswith(self.location)
):
continue
issue_warning(
"Module %s was already imported from %s, but %s is being added"
" to sys.path" % (modname, fn, self.location),
)
def has_version(self):
try:
self.version
except ValueError:
issue_warning("Unbuilt egg for " + repr(self))
return False
except SystemError:
# TODO: remove this except clause when python/cpython#103632 is fixed.
return False
return True
def clone(self, **kw):
"""Copy this distribution, substituting in any changed keyword args"""
names = 'project_name version py_version platform location precedence'
for attr in names.split():
kw.setdefault(attr, getattr(self, attr, None))
kw.setdefault('metadata', self._provider)
return self.__class__(**kw)
@property
def extras(self):
return [dep for dep in self._dep_map if dep]
class EggInfoDistribution(Distribution):
def _reload_version(self):
"""
Packages installed by distutils (e.g. numpy or scipy),
which uses an old safe_version, and so
their version numbers can get mangled when
converted to filenames (e.g., 1.11.0.dev0+2329eae to
1.11.0.dev0_2329eae). These distributions will not be
parsed properly
downstream by Distribution and safe_version, so
take an extra step and try to get the version number from
the metadata file itself instead of the filename.
"""
md_version = self._get_version()
if md_version:
self._version = md_version
return self
class DistInfoDistribution(Distribution):
"""
Wrap an actual or potential sys.path entry
w/metadata, .dist-info style.
"""
PKG_INFO = 'METADATA'
EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])")
@property
def _parsed_pkg_info(self):
"""Parse and cache metadata"""
try:
return self._pkg_info
except AttributeError:
metadata = self.get_metadata(self.PKG_INFO)
self._pkg_info = email.parser.Parser().parsestr(metadata)
return self._pkg_info
@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
self.__dep_map = self._compute_dependencies()
return self.__dep_map
def _compute_dependencies(self):
"""Recompute this distribution's dependencies."""
dm = self.__dep_map = {None: []}
reqs = []
# Including any condition expressions
for req in self._parsed_pkg_info.get_all('Requires-Dist') or []:
reqs.extend(parse_requirements(req))
def reqs_for_extra(extra):
for req in reqs:
if not req.marker or req.marker.evaluate({'extra': extra}):
yield req
common = types.MappingProxyType(dict.fromkeys(reqs_for_extra(None)))
dm[None].extend(common)
for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []:
s_extra = safe_extra(extra.strip())
dm[s_extra] = [r for r in reqs_for_extra(extra) if r not in common]
return dm
_distributionImpl = {
'.egg': Distribution,
'.egg-info': EggInfoDistribution,
'.dist-info': DistInfoDistribution,
}
def issue_warning(*args, **kw):
level = 1
g = globals()
try:
# find the first stack frame that is *not* code in
# the pkg_resources module, to use for the warning
while sys._getframe(level).f_globals is g:
level += 1
except ValueError:
pass
warnings.warn(stacklevel=level + 1, *args, **kw)
def parse_requirements(strs):
"""
Yield ``Requirement`` objects for each specification in `strs`.
`strs` must be a string, or a (possibly-nested) iterable thereof.
"""
return map(Requirement, join_continuation(map(drop_comment, yield_lines(strs))))
class RequirementParseError(packaging.requirements.InvalidRequirement):
"Compatibility wrapper for InvalidRequirement"
class Requirement(packaging.requirements.Requirement):
def __init__(self, requirement_string):
"""DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!"""
super(Requirement, self).__init__(requirement_string)
self.unsafe_name = self.name
project_name = safe_name(self.name)
self.project_name, self.key = project_name, project_name.lower()
self.specs = [(spec.operator, spec.version) for spec in self.specifier]
self.extras = tuple(map(safe_extra, self.extras))
self.hashCmp = (
self.key,
self.url,
self.specifier,
frozenset(self.extras),
str(self.marker) if self.marker else None,
)
self.__hash = hash(self.hashCmp)
def __eq__(self, other):
return isinstance(other, Requirement) and self.hashCmp == other.hashCmp
def __ne__(self, other):
return not self == other
def __contains__(self, item):
if isinstance(item, Distribution):
if item.key != self.key:
return False
item = item.version
# Allow prereleases always in order to match the previous behavior of
# this method. In the future this should be smarter and follow PEP 440
# more accurately.
return self.specifier.contains(item, prereleases=True)
def __hash__(self):
return self.__hash
def __repr__(self):
return "Requirement.parse(%r)" % str(self)
@staticmethod
def parse(s):
(req,) = parse_requirements(s)
return req
def _always_object(classes):
"""
Ensure object appears in the mro even
for old-style classes.
"""
if object not in classes:
return classes + (object,)
return classes
def _find_adapter(registry, ob):
"""Return an adapter factory for `ob` from `registry`"""
types = _always_object(inspect.getmro(getattr(ob, '__class__', type(ob))))
for t in types:
if t in registry:
return registry[t]
def ensure_directory(path):
"""Ensure that the parent directory of `path` exists"""
dirname = os.path.dirname(path)
os.makedirs(dirname, exist_ok=True)
def _bypass_ensure_directory(path):
"""Sandbox-bypassing version of ensure_directory()"""
if not WRITE_SUPPORT:
raise IOError('"os.mkdir" not supported on this platform.')
dirname, filename = split(path)
if dirname and filename and not isdir(dirname):
_bypass_ensure_directory(dirname)
try:
mkdir(dirname, 0o755)
except FileExistsError:
pass
def split_sections(s):
"""Split a string or iterable thereof into (section, content) pairs
Each ``section`` is a stripped version of the section header ("[section]")
and each ``content`` is a list of stripped lines excluding blank lines and
comment-only lines. If there are any such lines before the first section
header, they're returned in a first ``section`` of ``None``.
"""
section = None
content = []
for line in yield_lines(s):
if line.startswith("["):
if line.endswith("]"):
if section or content:
yield section, content
section = line[1:-1].strip()
content = []
else:
raise ValueError("Invalid section heading", line)
else:
content.append(line)
# wrap up last segment
yield section, content
def _mkstemp(*args, **kw):
old_open = os.open
try:
# temporarily bypass sandboxing
os.open = os_open
return tempfile.mkstemp(*args, **kw)
finally:
# and then put it back
os.open = old_open
# Silence the PEP440Warning by default, so that end users don't get hit by it
# randomly just because they use pkg_resources. We want to append the rule
# because we want earlier uses of filterwarnings to take precedence over this
# one.
warnings.filterwarnings("ignore", category=PEP440Warning, append=True)
# from jaraco.functools 1.3
def _call_aside(f, *args, **kwargs):
f(*args, **kwargs)
return f
@_call_aside
def _initialize(g=globals()):
"Set up global resource manager (deliberately not state-saved)"
manager = ResourceManager()
g['_manager'] = manager
g.update(
(name, getattr(manager, name))
for name in dir(manager)
if not name.startswith('_')
)
class PkgResourcesDeprecationWarning(Warning):
"""
Base class for warning about deprecations in ``pkg_resources``
This class is not derived from ``DeprecationWarning``, and as such is
visible by default.
"""
@_call_aside
def _initialize_master_working_set():
"""
Prepare the master working set and make the ``require()``
API available.
This function has explicit effects on the global state
of pkg_resources. It is intended to be invoked once at
the initialization of this module.
Invocation by other packages is unsupported and done
at their own risk.
"""
working_set = WorkingSet._build_master()
_declare_state('object', working_set=working_set)
require = working_set.require
iter_entry_points = working_set.iter_entry_points
add_activation_listener = working_set.subscribe
run_script = working_set.run_script
# backward compatibility
run_main = run_script
# Activate all distributions already on sys.path with replace=False and
# ensure that all distributions added to the working set in the future
# (e.g. by calling ``require()``) will get activated as well,
# with higher priority (replace=True).
tuple(dist.activate(replace=False) for dist in working_set)
add_activation_listener(
lambda dist: dist.activate(replace=True),
existing=False,
)
working_set.entries = []
# match order
list(map(working_set.add_entry, sys.path))
globals().update(locals())
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pkg_resources/_vendor/__init__.py | |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pkg_resources/_vendor/zipp.py | import io
import posixpath
import zipfile
import itertools
import contextlib
import sys
import pathlib
if sys.version_info < (3, 7):
from collections import OrderedDict
else:
OrderedDict = dict
__all__ = ['Path']
def _parents(path):
"""
Given a path with elements separated by
posixpath.sep, generate all parents of that path.
>>> list(_parents('b/d'))
['b']
>>> list(_parents('/b/d/'))
['/b']
>>> list(_parents('b/d/f/'))
['b/d', 'b']
>>> list(_parents('b'))
[]
>>> list(_parents(''))
[]
"""
return itertools.islice(_ancestry(path), 1, None)
def _ancestry(path):
"""
Given a path with elements separated by
posixpath.sep, generate all elements of that path
>>> list(_ancestry('b/d'))
['b/d', 'b']
>>> list(_ancestry('/b/d/'))
['/b/d', '/b']
>>> list(_ancestry('b/d/f/'))
['b/d/f', 'b/d', 'b']
>>> list(_ancestry('b'))
['b']
>>> list(_ancestry(''))
[]
"""
path = path.rstrip(posixpath.sep)
while path and path != posixpath.sep:
yield path
path, tail = posixpath.split(path)
_dedupe = OrderedDict.fromkeys
"""Deduplicate an iterable in original order"""
def _difference(minuend, subtrahend):
"""
Return items in minuend not in subtrahend, retaining order
with O(1) lookup.
"""
return itertools.filterfalse(set(subtrahend).__contains__, minuend)
class CompleteDirs(zipfile.ZipFile):
"""
A ZipFile subclass that ensures that implied directories
are always included in the namelist.
"""
@staticmethod
def _implied_dirs(names):
parents = itertools.chain.from_iterable(map(_parents, names))
as_dirs = (p + posixpath.sep for p in parents)
return _dedupe(_difference(as_dirs, names))
def namelist(self):
names = super(CompleteDirs, self).namelist()
return names + list(self._implied_dirs(names))
def _name_set(self):
return set(self.namelist())
def resolve_dir(self, name):
"""
If the name represents a directory, return that name
as a directory (with the trailing slash).
"""
names = self._name_set()
dirname = name + '/'
dir_match = name not in names and dirname in names
return dirname if dir_match else name
@classmethod
def make(cls, source):
"""
Given a source (filename or zipfile), return an
appropriate CompleteDirs subclass.
"""
if isinstance(source, CompleteDirs):
return source
if not isinstance(source, zipfile.ZipFile):
return cls(_pathlib_compat(source))
# Only allow for FastLookup when supplied zipfile is read-only
if 'r' not in source.mode:
cls = CompleteDirs
source.__class__ = cls
return source
class FastLookup(CompleteDirs):
"""
ZipFile subclass to ensure implicit
dirs exist and are resolved rapidly.
"""
def namelist(self):
with contextlib.suppress(AttributeError):
return self.__names
self.__names = super(FastLookup, self).namelist()
return self.__names
def _name_set(self):
with contextlib.suppress(AttributeError):
return self.__lookup
self.__lookup = super(FastLookup, self)._name_set()
return self.__lookup
def _pathlib_compat(path):
"""
For path-like objects, convert to a filename for compatibility
on Python 3.6.1 and earlier.
"""
try:
return path.__fspath__()
except AttributeError:
return str(path)
class Path:
"""
A pathlib-compatible interface for zip files.
Consider a zip file with this structure::
.
├── a.txt
└── b
├── c.txt
└── d
└── e.txt
>>> data = io.BytesIO()
>>> zf = zipfile.ZipFile(data, 'w')
>>> zf.writestr('a.txt', 'content of a')
>>> zf.writestr('b/c.txt', 'content of c')
>>> zf.writestr('b/d/e.txt', 'content of e')
>>> zf.filename = 'mem/abcde.zip'
Path accepts the zipfile object itself or a filename
>>> root = Path(zf)
From there, several path operations are available.
Directory iteration (including the zip file itself):
>>> a, b = root.iterdir()
>>> a
Path('mem/abcde.zip', 'a.txt')
>>> b
Path('mem/abcde.zip', 'b/')
name property:
>>> b.name
'b'
join with divide operator:
>>> c = b / 'c.txt'
>>> c
Path('mem/abcde.zip', 'b/c.txt')
>>> c.name
'c.txt'
Read text:
>>> c.read_text()
'content of c'
existence:
>>> c.exists()
True
>>> (b / 'missing.txt').exists()
False
Coercion to string:
>>> import os
>>> str(c).replace(os.sep, posixpath.sep)
'mem/abcde.zip/b/c.txt'
At the root, ``name``, ``filename``, and ``parent``
resolve to the zipfile. Note these attributes are not
valid and will raise a ``ValueError`` if the zipfile
has no filename.
>>> root.name
'abcde.zip'
>>> str(root.filename).replace(os.sep, posixpath.sep)
'mem/abcde.zip'
>>> str(root.parent)
'mem'
"""
__repr = "{self.__class__.__name__}({self.root.filename!r}, {self.at!r})"
def __init__(self, root, at=""):
"""
Construct a Path from a ZipFile or filename.
Note: When the source is an existing ZipFile object,
its type (__class__) will be mutated to a
specialized type. If the caller wishes to retain the
original type, the caller should either create a
separate ZipFile object or pass a filename.
"""
self.root = FastLookup.make(root)
self.at = at
def open(self, mode='r', *args, pwd=None, **kwargs):
"""
Open this entry as text or binary following the semantics
of ``pathlib.Path.open()`` by passing arguments through
to io.TextIOWrapper().
"""
if self.is_dir():
raise IsADirectoryError(self)
zip_mode = mode[0]
if not self.exists() and zip_mode == 'r':
raise FileNotFoundError(self)
stream = self.root.open(self.at, zip_mode, pwd=pwd)
if 'b' in mode:
if args or kwargs:
raise ValueError("encoding args invalid for binary operation")
return stream
return io.TextIOWrapper(stream, *args, **kwargs)
@property
def name(self):
return pathlib.Path(self.at).name or self.filename.name
@property
def suffix(self):
return pathlib.Path(self.at).suffix or self.filename.suffix
@property
def suffixes(self):
return pathlib.Path(self.at).suffixes or self.filename.suffixes
@property
def stem(self):
return pathlib.Path(self.at).stem or self.filename.stem
@property
def filename(self):
return pathlib.Path(self.root.filename).joinpath(self.at)
def read_text(self, *args, **kwargs):
with self.open('r', *args, **kwargs) as strm:
return strm.read()
def read_bytes(self):
with self.open('rb') as strm:
return strm.read()
def _is_child(self, path):
return posixpath.dirname(path.at.rstrip("/")) == self.at.rstrip("/")
def _next(self, at):
return self.__class__(self.root, at)
def is_dir(self):
return not self.at or self.at.endswith("/")
def is_file(self):
return self.exists() and not self.is_dir()
def exists(self):
return self.at in self.root._name_set()
def iterdir(self):
if not self.is_dir():
raise ValueError("Can't listdir a file")
subs = map(self._next, self.root.namelist())
return filter(self._is_child, subs)
def __str__(self):
return posixpath.join(self.root.filename, self.at)
def __repr__(self):
return self.__repr.format(self=self)
def joinpath(self, *other):
next = posixpath.join(self.at, *map(_pathlib_compat, other))
return self._next(self.root.resolve_dir(next))
__truediv__ = joinpath
@property
def parent(self):
if not self.at:
return self.filename.parent
parent_at = posixpath.dirname(self.at.rstrip('/'))
if parent_at:
parent_at += '/'
return self._next(parent_at)
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pkg_resources/_vendor/typing_extensions.py | import abc
import collections
import collections.abc
import functools
import operator
import sys
import types as _types
import typing
__all__ = [
# Super-special typing primitives.
'Any',
'ClassVar',
'Concatenate',
'Final',
'LiteralString',
'ParamSpec',
'ParamSpecArgs',
'ParamSpecKwargs',
'Self',
'Type',
'TypeVar',
'TypeVarTuple',
'Unpack',
# ABCs (from collections.abc).
'Awaitable',
'AsyncIterator',
'AsyncIterable',
'Coroutine',
'AsyncGenerator',
'AsyncContextManager',
'ChainMap',
# Concrete collection types.
'ContextManager',
'Counter',
'Deque',
'DefaultDict',
'NamedTuple',
'OrderedDict',
'TypedDict',
# Structural checks, a.k.a. protocols.
'SupportsIndex',
# One-off things.
'Annotated',
'assert_never',
'assert_type',
'clear_overloads',
'dataclass_transform',
'get_overloads',
'final',
'get_args',
'get_origin',
'get_type_hints',
'IntVar',
'is_typeddict',
'Literal',
'NewType',
'overload',
'override',
'Protocol',
'reveal_type',
'runtime',
'runtime_checkable',
'Text',
'TypeAlias',
'TypeGuard',
'TYPE_CHECKING',
'Never',
'NoReturn',
'Required',
'NotRequired',
]
# for backward compatibility
PEP_560 = True
GenericMeta = type
# The functions below are modified copies of typing internal helpers.
# They are needed by _ProtocolMeta and they provide support for PEP 646.
_marker = object()
def _check_generic(cls, parameters, elen=_marker):
"""Check correct count for parameters of a generic cls (internal helper).
This gives a nice error message in case of count mismatch.
"""
if not elen:
raise TypeError(f"{cls} is not a generic class")
if elen is _marker:
if not hasattr(cls, "__parameters__") or not cls.__parameters__:
raise TypeError(f"{cls} is not a generic class")
elen = len(cls.__parameters__)
alen = len(parameters)
if alen != elen:
if hasattr(cls, "__parameters__"):
parameters = [p for p in cls.__parameters__ if not _is_unpack(p)]
num_tv_tuples = sum(isinstance(p, TypeVarTuple) for p in parameters)
if (num_tv_tuples > 0) and (alen >= elen - num_tv_tuples):
return
raise TypeError(f"Too {'many' if alen > elen else 'few'} parameters for {cls};"
f" actual {alen}, expected {elen}")
if sys.version_info >= (3, 10):
def _should_collect_from_parameters(t):
return isinstance(
t, (typing._GenericAlias, _types.GenericAlias, _types.UnionType)
)
elif sys.version_info >= (3, 9):
def _should_collect_from_parameters(t):
return isinstance(t, (typing._GenericAlias, _types.GenericAlias))
else:
def _should_collect_from_parameters(t):
return isinstance(t, typing._GenericAlias) and not t._special
def _collect_type_vars(types, typevar_types=None):
"""Collect all type variable contained in types in order of
first appearance (lexicographic order). For example::
_collect_type_vars((T, List[S, T])) == (T, S)
"""
if typevar_types is None:
typevar_types = typing.TypeVar
tvars = []
for t in types:
if (
isinstance(t, typevar_types) and
t not in tvars and
not _is_unpack(t)
):
tvars.append(t)
if _should_collect_from_parameters(t):
tvars.extend([t for t in t.__parameters__ if t not in tvars])
return tuple(tvars)
NoReturn = typing.NoReturn
# Some unconstrained type variables. These are used by the container types.
# (These are not for export.)
T = typing.TypeVar('T') # Any type.
KT = typing.TypeVar('KT') # Key type.
VT = typing.TypeVar('VT') # Value type.
T_co = typing.TypeVar('T_co', covariant=True) # Any type covariant containers.
T_contra = typing.TypeVar('T_contra', contravariant=True) # Ditto contravariant.
if sys.version_info >= (3, 11):
from typing import Any
else:
class _AnyMeta(type):
def __instancecheck__(self, obj):
if self is Any:
raise TypeError("typing_extensions.Any cannot be used with isinstance()")
return super().__instancecheck__(obj)
def __repr__(self):
if self is Any:
return "typing_extensions.Any"
return super().__repr__()
class Any(metaclass=_AnyMeta):
"""Special type indicating an unconstrained type.
- Any is compatible with every type.
- Any assumed to have all methods.
- All values assumed to be instances of Any.
Note that all the above statements are true from the point of view of
static type checkers. At runtime, Any should not be used with instance
checks.
"""
def __new__(cls, *args, **kwargs):
if cls is Any:
raise TypeError("Any cannot be instantiated")
return super().__new__(cls, *args, **kwargs)
ClassVar = typing.ClassVar
# On older versions of typing there is an internal class named "Final".
# 3.8+
if hasattr(typing, 'Final') and sys.version_info[:2] >= (3, 7):
Final = typing.Final
# 3.7
else:
class _FinalForm(typing._SpecialForm, _root=True):
def __repr__(self):
return 'typing_extensions.' + self._name
def __getitem__(self, parameters):
item = typing._type_check(parameters,
f'{self._name} accepts only a single type.')
return typing._GenericAlias(self, (item,))
Final = _FinalForm('Final',
doc="""A special typing construct to indicate that a name
cannot be re-assigned or overridden in a subclass.
For example:
MAX_SIZE: Final = 9000
MAX_SIZE += 1 # Error reported by type checker
class Connection:
TIMEOUT: Final[int] = 10
class FastConnector(Connection):
TIMEOUT = 1 # Error reported by type checker
There is no runtime checking of these properties.""")
if sys.version_info >= (3, 11):
final = typing.final
else:
# @final exists in 3.8+, but we backport it for all versions
# before 3.11 to keep support for the __final__ attribute.
# See https://bugs.python.org/issue46342
def final(f):
"""This decorator can be used to indicate to type checkers that
the decorated method cannot be overridden, and decorated class
cannot be subclassed. For example:
class Base:
@final
def done(self) -> None:
...
class Sub(Base):
def done(self) -> None: # Error reported by type checker
...
@final
class Leaf:
...
class Other(Leaf): # Error reported by type checker
...
There is no runtime checking of these properties. The decorator
sets the ``__final__`` attribute to ``True`` on the decorated object
to allow runtime introspection.
"""
try:
f.__final__ = True
except (AttributeError, TypeError):
# Skip the attribute silently if it is not writable.
# AttributeError happens if the object has __slots__ or a
# read-only property, TypeError if it's a builtin class.
pass
return f
def IntVar(name):
return typing.TypeVar(name)
# 3.8+:
if hasattr(typing, 'Literal'):
Literal = typing.Literal
# 3.7:
else:
class _LiteralForm(typing._SpecialForm, _root=True):
def __repr__(self):
return 'typing_extensions.' + self._name
def __getitem__(self, parameters):
return typing._GenericAlias(self, parameters)
Literal = _LiteralForm('Literal',
doc="""A type that can be used to indicate to type checkers
that the corresponding value has a value literally equivalent
to the provided parameter. For example:
var: Literal[4] = 4
The type checker understands that 'var' is literally equal to
the value 4 and no other value.
Literal[...] cannot be subclassed. There is no runtime
checking verifying that the parameter is actually a value
instead of a type.""")
_overload_dummy = typing._overload_dummy # noqa
if hasattr(typing, "get_overloads"): # 3.11+
overload = typing.overload
get_overloads = typing.get_overloads
clear_overloads = typing.clear_overloads
else:
# {module: {qualname: {firstlineno: func}}}
_overload_registry = collections.defaultdict(
functools.partial(collections.defaultdict, dict)
)
def overload(func):
"""Decorator for overloaded functions/methods.
In a stub file, place two or more stub definitions for the same
function in a row, each decorated with @overload. For example:
@overload
def utf8(value: None) -> None: ...
@overload
def utf8(value: bytes) -> bytes: ...
@overload
def utf8(value: str) -> bytes: ...
In a non-stub file (i.e. a regular .py file), do the same but
follow it with an implementation. The implementation should *not*
be decorated with @overload. For example:
@overload
def utf8(value: None) -> None: ...
@overload
def utf8(value: bytes) -> bytes: ...
@overload
def utf8(value: str) -> bytes: ...
def utf8(value):
# implementation goes here
The overloads for a function can be retrieved at runtime using the
get_overloads() function.
"""
# classmethod and staticmethod
f = getattr(func, "__func__", func)
try:
_overload_registry[f.__module__][f.__qualname__][
f.__code__.co_firstlineno
] = func
except AttributeError:
# Not a normal function; ignore.
pass
return _overload_dummy
def get_overloads(func):
"""Return all defined overloads for *func* as a sequence."""
# classmethod and staticmethod
f = getattr(func, "__func__", func)
if f.__module__ not in _overload_registry:
return []
mod_dict = _overload_registry[f.__module__]
if f.__qualname__ not in mod_dict:
return []
return list(mod_dict[f.__qualname__].values())
def clear_overloads():
"""Clear all overloads in the registry."""
_overload_registry.clear()
# This is not a real generic class. Don't use outside annotations.
Type = typing.Type
# Various ABCs mimicking those in collections.abc.
# A few are simply re-exported for completeness.
Awaitable = typing.Awaitable
Coroutine = typing.Coroutine
AsyncIterable = typing.AsyncIterable
AsyncIterator = typing.AsyncIterator
Deque = typing.Deque
ContextManager = typing.ContextManager
AsyncContextManager = typing.AsyncContextManager
DefaultDict = typing.DefaultDict
# 3.7.2+
if hasattr(typing, 'OrderedDict'):
OrderedDict = typing.OrderedDict
# 3.7.0-3.7.2
else:
OrderedDict = typing._alias(collections.OrderedDict, (KT, VT))
Counter = typing.Counter
ChainMap = typing.ChainMap
AsyncGenerator = typing.AsyncGenerator
NewType = typing.NewType
Text = typing.Text
TYPE_CHECKING = typing.TYPE_CHECKING
_PROTO_WHITELIST = ['Callable', 'Awaitable',
'Iterable', 'Iterator', 'AsyncIterable', 'AsyncIterator',
'Hashable', 'Sized', 'Container', 'Collection', 'Reversible',
'ContextManager', 'AsyncContextManager']
def _get_protocol_attrs(cls):
attrs = set()
for base in cls.__mro__[:-1]: # without object
if base.__name__ in ('Protocol', 'Generic'):
continue
annotations = getattr(base, '__annotations__', {})
for attr in list(base.__dict__.keys()) + list(annotations.keys()):
if (not attr.startswith('_abc_') and attr not in (
'__abstractmethods__', '__annotations__', '__weakref__',
'_is_protocol', '_is_runtime_protocol', '__dict__',
'__args__', '__slots__',
'__next_in_mro__', '__parameters__', '__origin__',
'__orig_bases__', '__extra__', '__tree_hash__',
'__doc__', '__subclasshook__', '__init__', '__new__',
'__module__', '_MutableMapping__marker', '_gorg')):
attrs.add(attr)
return attrs
def _is_callable_members_only(cls):
return all(callable(getattr(cls, attr, None)) for attr in _get_protocol_attrs(cls))
def _maybe_adjust_parameters(cls):
"""Helper function used in Protocol.__init_subclass__ and _TypedDictMeta.__new__.
The contents of this function are very similar
to logic found in typing.Generic.__init_subclass__
on the CPython main branch.
"""
tvars = []
if '__orig_bases__' in cls.__dict__:
tvars = typing._collect_type_vars(cls.__orig_bases__)
# Look for Generic[T1, ..., Tn] or Protocol[T1, ..., Tn].
# If found, tvars must be a subset of it.
# If not found, tvars is it.
# Also check for and reject plain Generic,
# and reject multiple Generic[...] and/or Protocol[...].
gvars = None
for base in cls.__orig_bases__:
if (isinstance(base, typing._GenericAlias) and
base.__origin__ in (typing.Generic, Protocol)):
# for error messages
the_base = base.__origin__.__name__
if gvars is not None:
raise TypeError(
"Cannot inherit from Generic[...]"
" and/or Protocol[...] multiple types.")
gvars = base.__parameters__
if gvars is None:
gvars = tvars
else:
tvarset = set(tvars)
gvarset = set(gvars)
if not tvarset <= gvarset:
s_vars = ', '.join(str(t) for t in tvars if t not in gvarset)
s_args = ', '.join(str(g) for g in gvars)
raise TypeError(f"Some type variables ({s_vars}) are"
f" not listed in {the_base}[{s_args}]")
tvars = gvars
cls.__parameters__ = tuple(tvars)
# 3.8+
if hasattr(typing, 'Protocol'):
Protocol = typing.Protocol
# 3.7
else:
def _no_init(self, *args, **kwargs):
if type(self)._is_protocol:
raise TypeError('Protocols cannot be instantiated')
class _ProtocolMeta(abc.ABCMeta): # noqa: B024
# This metaclass is a bit unfortunate and exists only because of the lack
# of __instancehook__.
def __instancecheck__(cls, instance):
# We need this method for situations where attributes are
# assigned in __init__.
if ((not getattr(cls, '_is_protocol', False) or
_is_callable_members_only(cls)) and
issubclass(instance.__class__, cls)):
return True
if cls._is_protocol:
if all(hasattr(instance, attr) and
(not callable(getattr(cls, attr, None)) or
getattr(instance, attr) is not None)
for attr in _get_protocol_attrs(cls)):
return True
return super().__instancecheck__(instance)
class Protocol(metaclass=_ProtocolMeta):
# There is quite a lot of overlapping code with typing.Generic.
# Unfortunately it is hard to avoid this while these live in two different
# modules. The duplicated code will be removed when Protocol is moved to typing.
"""Base class for protocol classes. Protocol classes are defined as::
class Proto(Protocol):
def meth(self) -> int:
...
Such classes are primarily used with static type checkers that recognize
structural subtyping (static duck-typing), for example::
class C:
def meth(self) -> int:
return 0
def func(x: Proto) -> int:
return x.meth()
func(C()) # Passes static type check
See PEP 544 for details. Protocol classes decorated with
@typing_extensions.runtime act as simple-minded runtime protocol that checks
only the presence of given attributes, ignoring their type signatures.
Protocol classes can be generic, they are defined as::
class GenProto(Protocol[T]):
def meth(self) -> T:
...
"""
__slots__ = ()
_is_protocol = True
def __new__(cls, *args, **kwds):
if cls is Protocol:
raise TypeError("Type Protocol cannot be instantiated; "
"it can only be used as a base class")
return super().__new__(cls)
@typing._tp_cache
def __class_getitem__(cls, params):
if not isinstance(params, tuple):
params = (params,)
if not params and cls is not typing.Tuple:
raise TypeError(
f"Parameter list to {cls.__qualname__}[...] cannot be empty")
msg = "Parameters to generic types must be types."
params = tuple(typing._type_check(p, msg) for p in params) # noqa
if cls is Protocol:
# Generic can only be subscripted with unique type variables.
if not all(isinstance(p, typing.TypeVar) for p in params):
i = 0
while isinstance(params[i], typing.TypeVar):
i += 1
raise TypeError(
"Parameters to Protocol[...] must all be type variables."
f" Parameter {i + 1} is {params[i]}")
if len(set(params)) != len(params):
raise TypeError(
"Parameters to Protocol[...] must all be unique")
else:
# Subscripting a regular Generic subclass.
_check_generic(cls, params, len(cls.__parameters__))
return typing._GenericAlias(cls, params)
def __init_subclass__(cls, *args, **kwargs):
if '__orig_bases__' in cls.__dict__:
error = typing.Generic in cls.__orig_bases__
else:
error = typing.Generic in cls.__bases__
if error:
raise TypeError("Cannot inherit from plain Generic")
_maybe_adjust_parameters(cls)
# Determine if this is a protocol or a concrete subclass.
if not cls.__dict__.get('_is_protocol', None):
cls._is_protocol = any(b is Protocol for b in cls.__bases__)
# Set (or override) the protocol subclass hook.
def _proto_hook(other):
if not cls.__dict__.get('_is_protocol', None):
return NotImplemented
if not getattr(cls, '_is_runtime_protocol', False):
if sys._getframe(2).f_globals['__name__'] in ['abc', 'functools']:
return NotImplemented
raise TypeError("Instance and class checks can only be used with"
" @runtime protocols")
if not _is_callable_members_only(cls):
if sys._getframe(2).f_globals['__name__'] in ['abc', 'functools']:
return NotImplemented
raise TypeError("Protocols with non-method members"
" don't support issubclass()")
if not isinstance(other, type):
# Same error as for issubclass(1, int)
raise TypeError('issubclass() arg 1 must be a class')
for attr in _get_protocol_attrs(cls):
for base in other.__mro__:
if attr in base.__dict__:
if base.__dict__[attr] is None:
return NotImplemented
break
annotations = getattr(base, '__annotations__', {})
if (isinstance(annotations, typing.Mapping) and
attr in annotations and
isinstance(other, _ProtocolMeta) and
other._is_protocol):
break
else:
return NotImplemented
return True
if '__subclasshook__' not in cls.__dict__:
cls.__subclasshook__ = _proto_hook
# We have nothing more to do for non-protocols.
if not cls._is_protocol:
return
# Check consistency of bases.
for base in cls.__bases__:
if not (base in (object, typing.Generic) or
base.__module__ == 'collections.abc' and
base.__name__ in _PROTO_WHITELIST or
isinstance(base, _ProtocolMeta) and base._is_protocol):
raise TypeError('Protocols can only inherit from other'
f' protocols, got {repr(base)}')
cls.__init__ = _no_init
# 3.8+
if hasattr(typing, 'runtime_checkable'):
runtime_checkable = typing.runtime_checkable
# 3.7
else:
def runtime_checkable(cls):
"""Mark a protocol class as a runtime protocol, so that it
can be used with isinstance() and issubclass(). Raise TypeError
if applied to a non-protocol class.
This allows a simple-minded structural check very similar to the
one-offs in collections.abc such as Hashable.
"""
if not isinstance(cls, _ProtocolMeta) or not cls._is_protocol:
raise TypeError('@runtime_checkable can be only applied to protocol classes,'
f' got {cls!r}')
cls._is_runtime_protocol = True
return cls
# Exists for backwards compatibility.
runtime = runtime_checkable
# 3.8+
if hasattr(typing, 'SupportsIndex'):
SupportsIndex = typing.SupportsIndex
# 3.7
else:
@runtime_checkable
class SupportsIndex(Protocol):
__slots__ = ()
@abc.abstractmethod
def __index__(self) -> int:
pass
if hasattr(typing, "Required"):
# The standard library TypedDict in Python 3.8 does not store runtime information
# about which (if any) keys are optional. See https://bugs.python.org/issue38834
# The standard library TypedDict in Python 3.9.0/1 does not honour the "total"
# keyword with old-style TypedDict(). See https://bugs.python.org/issue42059
# The standard library TypedDict below Python 3.11 does not store runtime
# information about optional and required keys when using Required or NotRequired.
# Generic TypedDicts are also impossible using typing.TypedDict on Python <3.11.
TypedDict = typing.TypedDict
_TypedDictMeta = typing._TypedDictMeta
is_typeddict = typing.is_typeddict
else:
def _check_fails(cls, other):
try:
if sys._getframe(1).f_globals['__name__'] not in ['abc',
'functools',
'typing']:
# Typed dicts are only for static structural subtyping.
raise TypeError('TypedDict does not support instance and class checks')
except (AttributeError, ValueError):
pass
return False
def _dict_new(*args, **kwargs):
if not args:
raise TypeError('TypedDict.__new__(): not enough arguments')
_, args = args[0], args[1:] # allow the "cls" keyword be passed
return dict(*args, **kwargs)
_dict_new.__text_signature__ = '($cls, _typename, _fields=None, /, **kwargs)'
def _typeddict_new(*args, total=True, **kwargs):
if not args:
raise TypeError('TypedDict.__new__(): not enough arguments')
_, args = args[0], args[1:] # allow the "cls" keyword be passed
if args:
typename, args = args[0], args[1:] # allow the "_typename" keyword be passed
elif '_typename' in kwargs:
typename = kwargs.pop('_typename')
import warnings
warnings.warn("Passing '_typename' as keyword argument is deprecated",
DeprecationWarning, stacklevel=2)
else:
raise TypeError("TypedDict.__new__() missing 1 required positional "
"argument: '_typename'")
if args:
try:
fields, = args # allow the "_fields" keyword be passed
except ValueError:
raise TypeError('TypedDict.__new__() takes from 2 to 3 '
f'positional arguments but {len(args) + 2} '
'were given')
elif '_fields' in kwargs and len(kwargs) == 1:
fields = kwargs.pop('_fields')
import warnings
warnings.warn("Passing '_fields' as keyword argument is deprecated",
DeprecationWarning, stacklevel=2)
else:
fields = None
if fields is None:
fields = kwargs
elif kwargs:
raise TypeError("TypedDict takes either a dict or keyword arguments,"
" but not both")
ns = {'__annotations__': dict(fields)}
try:
# Setting correct module is necessary to make typed dict classes pickleable.
ns['__module__'] = sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
return _TypedDictMeta(typename, (), ns, total=total)
_typeddict_new.__text_signature__ = ('($cls, _typename, _fields=None,'
' /, *, total=True, **kwargs)')
class _TypedDictMeta(type):
def __init__(cls, name, bases, ns, total=True):
super().__init__(name, bases, ns)
def __new__(cls, name, bases, ns, total=True):
# Create new typed dict class object.
# This method is called directly when TypedDict is subclassed,
# or via _typeddict_new when TypedDict is instantiated. This way
# TypedDict supports all three syntaxes described in its docstring.
# Subclasses and instances of TypedDict return actual dictionaries
# via _dict_new.
ns['__new__'] = _typeddict_new if name == 'TypedDict' else _dict_new
# Don't insert typing.Generic into __bases__ here,
# or Generic.__init_subclass__ will raise TypeError
# in the super().__new__() call.
# Instead, monkey-patch __bases__ onto the class after it's been created.
tp_dict = super().__new__(cls, name, (dict,), ns)
if any(issubclass(base, typing.Generic) for base in bases):
tp_dict.__bases__ = (typing.Generic, dict)
_maybe_adjust_parameters(tp_dict)
annotations = {}
own_annotations = ns.get('__annotations__', {})
msg = "TypedDict('Name', {f0: t0, f1: t1, ...}); each t must be a type"
own_annotations = {
n: typing._type_check(tp, msg) for n, tp in own_annotations.items()
}
required_keys = set()
optional_keys = set()
for base in bases:
annotations.update(base.__dict__.get('__annotations__', {}))
required_keys.update(base.__dict__.get('__required_keys__', ()))
optional_keys.update(base.__dict__.get('__optional_keys__', ()))
annotations.update(own_annotations)
for annotation_key, annotation_type in own_annotations.items():
annotation_origin = get_origin(annotation_type)
if annotation_origin is Annotated:
annotation_args = get_args(annotation_type)
if annotation_args:
annotation_type = annotation_args[0]
annotation_origin = get_origin(annotation_type)
if annotation_origin is Required:
required_keys.add(annotation_key)
elif annotation_origin is NotRequired:
optional_keys.add(annotation_key)
elif total:
required_keys.add(annotation_key)
else:
optional_keys.add(annotation_key)
tp_dict.__annotations__ = annotations
tp_dict.__required_keys__ = frozenset(required_keys)
tp_dict.__optional_keys__ = frozenset(optional_keys)
if not hasattr(tp_dict, '__total__'):
tp_dict.__total__ = total
return tp_dict
__instancecheck__ = __subclasscheck__ = _check_fails
TypedDict = _TypedDictMeta('TypedDict', (dict,), {})
TypedDict.__module__ = __name__
TypedDict.__doc__ = \
"""A simple typed name space. At runtime it is equivalent to a plain dict.
TypedDict creates a dictionary type that expects all of its
instances to have a certain set of keys, with each key
associated with a value of a consistent type. This expectation
is not checked at runtime but is only enforced by type checkers.
Usage::
class Point2D(TypedDict):
x: int
y: int
label: str
a: Point2D = {'x': 1, 'y': 2, 'label': 'good'} # OK
b: Point2D = {'z': 3, 'label': 'bad'} # Fails type check
assert Point2D(x=1, y=2, label='first') == dict(x=1, y=2, label='first')
The type info can be accessed via the Point2D.__annotations__ dict, and
the Point2D.__required_keys__ and Point2D.__optional_keys__ frozensets.
TypedDict supports two additional equivalent forms::
Point2D = TypedDict('Point2D', x=int, y=int, label=str)
Point2D = TypedDict('Point2D', {'x': int, 'y': int, 'label': str})
The class syntax is only supported in Python 3.6+, while two other
syntax forms work for Python 2.7 and 3.2+
"""
if hasattr(typing, "_TypedDictMeta"):
_TYPEDDICT_TYPES = (typing._TypedDictMeta, _TypedDictMeta)
else:
_TYPEDDICT_TYPES = (_TypedDictMeta,)
def is_typeddict(tp):
"""Check if an annotation is a TypedDict class
For example::
class Film(TypedDict):
title: str
year: int
is_typeddict(Film) # => True
is_typeddict(Union[list, str]) # => False
"""
return isinstance(tp, tuple(_TYPEDDICT_TYPES))
if hasattr(typing, "assert_type"):
assert_type = typing.assert_type
else:
def assert_type(__val, __typ):
"""Assert (to the type checker) that the value is of the given type.
When the type checker encounters a call to assert_type(), it
emits an error if the value is not of the specified type::
def greet(name: str) -> None:
assert_type(name, str) # ok
assert_type(name, int) # type checker error
At runtime this returns the first argument unchanged and otherwise
does nothing.
"""
return __val
if hasattr(typing, "Required"):
get_type_hints = typing.get_type_hints
else:
import functools
import types
# replaces _strip_annotations()
def _strip_extras(t):
"""Strips Annotated, Required and NotRequired from a given type."""
if isinstance(t, _AnnotatedAlias):
return _strip_extras(t.__origin__)
if hasattr(t, "__origin__") and t.__origin__ in (Required, NotRequired):
return _strip_extras(t.__args__[0])
if isinstance(t, typing._GenericAlias):
stripped_args = tuple(_strip_extras(a) for a in t.__args__)
if stripped_args == t.__args__:
return t
return t.copy_with(stripped_args)
if hasattr(types, "GenericAlias") and isinstance(t, types.GenericAlias):
stripped_args = tuple(_strip_extras(a) for a in t.__args__)
if stripped_args == t.__args__:
return t
return types.GenericAlias(t.__origin__, stripped_args)
if hasattr(types, "UnionType") and isinstance(t, types.UnionType):
stripped_args = tuple(_strip_extras(a) for a in t.__args__)
if stripped_args == t.__args__:
return t
return functools.reduce(operator.or_, stripped_args)
return t
def get_type_hints(obj, globalns=None, localns=None, include_extras=False):
"""Return type hints for an object.
This is often the same as obj.__annotations__, but it handles
forward references encoded as string literals, adds Optional[t] if a
default value equal to None is set and recursively replaces all
'Annotated[T, ...]', 'Required[T]' or 'NotRequired[T]' with 'T'
(unless 'include_extras=True').
The argument may be a module, class, method, or function. The annotations
are returned as a dictionary. For classes, annotations include also
inherited members.
TypeError is raised if the argument is not of a type that can contain
annotations, and an empty dictionary is returned if no annotations are
present.
BEWARE -- the behavior of globalns and localns is counterintuitive
(unless you are familiar with how eval() and exec() work). The
search order is locals first, then globals.
- If no dict arguments are passed, an attempt is made to use the
globals from obj (or the respective module's globals for classes),
and these are also used as the locals. If the object does not appear
to have globals, an empty dictionary is used.
- If one dict argument is passed, it is used for both globals and
locals.
- If two dict arguments are passed, they specify globals and
locals, respectively.
"""
if hasattr(typing, "Annotated"):
hint = typing.get_type_hints(
obj, globalns=globalns, localns=localns, include_extras=True
)
else:
hint = typing.get_type_hints(obj, globalns=globalns, localns=localns)
if include_extras:
return hint
return {k: _strip_extras(t) for k, t in hint.items()}
# Python 3.9+ has PEP 593 (Annotated)
if hasattr(typing, 'Annotated'):
Annotated = typing.Annotated
# Not exported and not a public API, but needed for get_origin() and get_args()
# to work.
_AnnotatedAlias = typing._AnnotatedAlias
# 3.7-3.8
else:
class _AnnotatedAlias(typing._GenericAlias, _root=True):
"""Runtime representation of an annotated type.
At its core 'Annotated[t, dec1, dec2, ...]' is an alias for the type 't'
with extra annotations. The alias behaves like a normal typing alias,
instantiating is the same as instantiating the underlying type, binding
it to types is also the same.
"""
def __init__(self, origin, metadata):
if isinstance(origin, _AnnotatedAlias):
metadata = origin.__metadata__ + metadata
origin = origin.__origin__
super().__init__(origin, origin)
self.__metadata__ = metadata
def copy_with(self, params):
assert len(params) == 1
new_type = params[0]
return _AnnotatedAlias(new_type, self.__metadata__)
def __repr__(self):
return (f"typing_extensions.Annotated[{typing._type_repr(self.__origin__)}, "
f"{', '.join(repr(a) for a in self.__metadata__)}]")
def __reduce__(self):
return operator.getitem, (
Annotated, (self.__origin__,) + self.__metadata__
)
def __eq__(self, other):
if not isinstance(other, _AnnotatedAlias):
return NotImplemented
if self.__origin__ != other.__origin__:
return False
return self.__metadata__ == other.__metadata__
def __hash__(self):
return hash((self.__origin__, self.__metadata__))
class Annotated:
"""Add context specific metadata to a type.
Example: Annotated[int, runtime_check.Unsigned] indicates to the
hypothetical runtime_check module that this type is an unsigned int.
Every other consumer of this type can ignore this metadata and treat
this type as int.
The first argument to Annotated must be a valid type (and will be in
the __origin__ field), the remaining arguments are kept as a tuple in
the __extra__ field.
Details:
- It's an error to call `Annotated` with less than two arguments.
- Nested Annotated are flattened::
Annotated[Annotated[T, Ann1, Ann2], Ann3] == Annotated[T, Ann1, Ann2, Ann3]
- Instantiating an annotated type is equivalent to instantiating the
underlying type::
Annotated[C, Ann1](5) == C(5)
- Annotated can be used as a generic type alias::
Optimized = Annotated[T, runtime.Optimize()]
Optimized[int] == Annotated[int, runtime.Optimize()]
OptimizedList = Annotated[List[T], runtime.Optimize()]
OptimizedList[int] == Annotated[List[int], runtime.Optimize()]
"""
__slots__ = ()
def __new__(cls, *args, **kwargs):
raise TypeError("Type Annotated cannot be instantiated.")
@typing._tp_cache
def __class_getitem__(cls, params):
if not isinstance(params, tuple) or len(params) < 2:
raise TypeError("Annotated[...] should be used "
"with at least two arguments (a type and an "
"annotation).")
allowed_special_forms = (ClassVar, Final)
if get_origin(params[0]) in allowed_special_forms:
origin = params[0]
else:
msg = "Annotated[t, ...]: t must be a type."
origin = typing._type_check(params[0], msg)
metadata = tuple(params[1:])
return _AnnotatedAlias(origin, metadata)
def __init_subclass__(cls, *args, **kwargs):
raise TypeError(
f"Cannot subclass {cls.__module__}.Annotated"
)
# Python 3.8 has get_origin() and get_args() but those implementations aren't
# Annotated-aware, so we can't use those. Python 3.9's versions don't support
# ParamSpecArgs and ParamSpecKwargs, so only Python 3.10's versions will do.
if sys.version_info[:2] >= (3, 10):
get_origin = typing.get_origin
get_args = typing.get_args
# 3.7-3.9
else:
try:
# 3.9+
from typing import _BaseGenericAlias
except ImportError:
_BaseGenericAlias = typing._GenericAlias
try:
# 3.9+
from typing import GenericAlias as _typing_GenericAlias
except ImportError:
_typing_GenericAlias = typing._GenericAlias
def get_origin(tp):
"""Get the unsubscripted version of a type.
This supports generic types, Callable, Tuple, Union, Literal, Final, ClassVar
and Annotated. Return None for unsupported types. Examples::
get_origin(Literal[42]) is Literal
get_origin(int) is None
get_origin(ClassVar[int]) is ClassVar
get_origin(Generic) is Generic
get_origin(Generic[T]) is Generic
get_origin(Union[T, int]) is Union
get_origin(List[Tuple[T, T]][int]) == list
get_origin(P.args) is P
"""
if isinstance(tp, _AnnotatedAlias):
return Annotated
if isinstance(tp, (typing._GenericAlias, _typing_GenericAlias, _BaseGenericAlias,
ParamSpecArgs, ParamSpecKwargs)):
return tp.__origin__
if tp is typing.Generic:
return typing.Generic
return None
def get_args(tp):
"""Get type arguments with all substitutions performed.
For unions, basic simplifications used by Union constructor are performed.
Examples::
get_args(Dict[str, int]) == (str, int)
get_args(int) == ()
get_args(Union[int, Union[T, int], str][int]) == (int, str)
get_args(Union[int, Tuple[T, int]][str]) == (int, Tuple[str, int])
get_args(Callable[[], T][int]) == ([], int)
"""
if isinstance(tp, _AnnotatedAlias):
return (tp.__origin__,) + tp.__metadata__
if isinstance(tp, (typing._GenericAlias, _typing_GenericAlias)):
if getattr(tp, "_special", False):
return ()
res = tp.__args__
if get_origin(tp) is collections.abc.Callable and res[0] is not Ellipsis:
res = (list(res[:-1]), res[-1])
return res
return ()
# 3.10+
if hasattr(typing, 'TypeAlias'):
TypeAlias = typing.TypeAlias
# 3.9
elif sys.version_info[:2] >= (3, 9):
class _TypeAliasForm(typing._SpecialForm, _root=True):
def __repr__(self):
return 'typing_extensions.' + self._name
@_TypeAliasForm
def TypeAlias(self, parameters):
"""Special marker indicating that an assignment should
be recognized as a proper type alias definition by type
checkers.
For example::
Predicate: TypeAlias = Callable[..., bool]
It's invalid when used anywhere except as in the example above.
"""
raise TypeError(f"{self} is not subscriptable")
# 3.7-3.8
else:
class _TypeAliasForm(typing._SpecialForm, _root=True):
def __repr__(self):
return 'typing_extensions.' + self._name
TypeAlias = _TypeAliasForm('TypeAlias',
doc="""Special marker indicating that an assignment should
be recognized as a proper type alias definition by type
checkers.
For example::
Predicate: TypeAlias = Callable[..., bool]
It's invalid when used anywhere except as in the example
above.""")
class _DefaultMixin:
"""Mixin for TypeVarLike defaults."""
__slots__ = ()
def __init__(self, default):
if isinstance(default, (tuple, list)):
self.__default__ = tuple((typing._type_check(d, "Default must be a type")
for d in default))
elif default:
self.__default__ = typing._type_check(default, "Default must be a type")
else:
self.__default__ = None
# Add default and infer_variance parameters from PEP 696 and 695
class TypeVar(typing.TypeVar, _DefaultMixin, _root=True):
"""Type variable."""
__module__ = 'typing'
def __init__(self, name, *constraints, bound=None,
covariant=False, contravariant=False,
default=None, infer_variance=False):
super().__init__(name, *constraints, bound=bound, covariant=covariant,
contravariant=contravariant)
_DefaultMixin.__init__(self, default)
self.__infer_variance__ = infer_variance
# for pickling:
try:
def_mod = sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
def_mod = None
if def_mod != 'typing_extensions':
self.__module__ = def_mod
# Python 3.10+ has PEP 612
if hasattr(typing, 'ParamSpecArgs'):
ParamSpecArgs = typing.ParamSpecArgs
ParamSpecKwargs = typing.ParamSpecKwargs
# 3.7-3.9
else:
class _Immutable:
"""Mixin to indicate that object should not be copied."""
__slots__ = ()
def __copy__(self):
return self
def __deepcopy__(self, memo):
return self
class ParamSpecArgs(_Immutable):
"""The args for a ParamSpec object.
Given a ParamSpec object P, P.args is an instance of ParamSpecArgs.
ParamSpecArgs objects have a reference back to their ParamSpec:
P.args.__origin__ is P
This type is meant for runtime introspection and has no special meaning to
static type checkers.
"""
def __init__(self, origin):
self.__origin__ = origin
def __repr__(self):
return f"{self.__origin__.__name__}.args"
def __eq__(self, other):
if not isinstance(other, ParamSpecArgs):
return NotImplemented
return self.__origin__ == other.__origin__
class ParamSpecKwargs(_Immutable):
"""The kwargs for a ParamSpec object.
Given a ParamSpec object P, P.kwargs is an instance of ParamSpecKwargs.
ParamSpecKwargs objects have a reference back to their ParamSpec:
P.kwargs.__origin__ is P
This type is meant for runtime introspection and has no special meaning to
static type checkers.
"""
def __init__(self, origin):
self.__origin__ = origin
def __repr__(self):
return f"{self.__origin__.__name__}.kwargs"
def __eq__(self, other):
if not isinstance(other, ParamSpecKwargs):
return NotImplemented
return self.__origin__ == other.__origin__
# 3.10+
if hasattr(typing, 'ParamSpec'):
# Add default Parameter - PEP 696
class ParamSpec(typing.ParamSpec, _DefaultMixin, _root=True):
"""Parameter specification variable."""
__module__ = 'typing'
def __init__(self, name, *, bound=None, covariant=False, contravariant=False,
default=None):
super().__init__(name, bound=bound, covariant=covariant,
contravariant=contravariant)
_DefaultMixin.__init__(self, default)
# for pickling:
try:
def_mod = sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
def_mod = None
if def_mod != 'typing_extensions':
self.__module__ = def_mod
# 3.7-3.9
else:
# Inherits from list as a workaround for Callable checks in Python < 3.9.2.
class ParamSpec(list, _DefaultMixin):
"""Parameter specification variable.
Usage::
P = ParamSpec('P')
Parameter specification variables exist primarily for the benefit of static
type checkers. They are used to forward the parameter types of one
callable to another callable, a pattern commonly found in higher order
functions and decorators. They are only valid when used in ``Concatenate``,
or s the first argument to ``Callable``. In Python 3.10 and higher,
they are also supported in user-defined Generics at runtime.
See class Generic for more information on generic types. An
example for annotating a decorator::
T = TypeVar('T')
P = ParamSpec('P')
def add_logging(f: Callable[P, T]) -> Callable[P, T]:
'''A type-safe decorator to add logging to a function.'''
def inner(*args: P.args, **kwargs: P.kwargs) -> T:
logging.info(f'{f.__name__} was called')
return f(*args, **kwargs)
return inner
@add_logging
def add_two(x: float, y: float) -> float:
'''Add two numbers together.'''
return x + y
Parameter specification variables defined with covariant=True or
contravariant=True can be used to declare covariant or contravariant
generic types. These keyword arguments are valid, but their actual semantics
are yet to be decided. See PEP 612 for details.
Parameter specification variables can be introspected. e.g.:
P.__name__ == 'T'
P.__bound__ == None
P.__covariant__ == False
P.__contravariant__ == False
Note that only parameter specification variables defined in global scope can
be pickled.
"""
# Trick Generic __parameters__.
__class__ = typing.TypeVar
@property
def args(self):
return ParamSpecArgs(self)
@property
def kwargs(self):
return ParamSpecKwargs(self)
def __init__(self, name, *, bound=None, covariant=False, contravariant=False,
default=None):
super().__init__([self])
self.__name__ = name
self.__covariant__ = bool(covariant)
self.__contravariant__ = bool(contravariant)
if bound:
self.__bound__ = typing._type_check(bound, 'Bound must be a type.')
else:
self.__bound__ = None
_DefaultMixin.__init__(self, default)
# for pickling:
try:
def_mod = sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
def_mod = None
if def_mod != 'typing_extensions':
self.__module__ = def_mod
def __repr__(self):
if self.__covariant__:
prefix = '+'
elif self.__contravariant__:
prefix = '-'
else:
prefix = '~'
return prefix + self.__name__
def __hash__(self):
return object.__hash__(self)
def __eq__(self, other):
return self is other
def __reduce__(self):
return self.__name__
# Hack to get typing._type_check to pass.
def __call__(self, *args, **kwargs):
pass
# 3.7-3.9
if not hasattr(typing, 'Concatenate'):
# Inherits from list as a workaround for Callable checks in Python < 3.9.2.
class _ConcatenateGenericAlias(list):
# Trick Generic into looking into this for __parameters__.
__class__ = typing._GenericAlias
# Flag in 3.8.
_special = False
def __init__(self, origin, args):
super().__init__(args)
self.__origin__ = origin
self.__args__ = args
def __repr__(self):
_type_repr = typing._type_repr
return (f'{_type_repr(self.__origin__)}'
f'[{", ".join(_type_repr(arg) for arg in self.__args__)}]')
def __hash__(self):
return hash((self.__origin__, self.__args__))
# Hack to get typing._type_check to pass in Generic.
def __call__(self, *args, **kwargs):
pass
@property
def __parameters__(self):
return tuple(
tp for tp in self.__args__ if isinstance(tp, (typing.TypeVar, ParamSpec))
)
# 3.7-3.9
@typing._tp_cache
def _concatenate_getitem(self, parameters):
if parameters == ():
raise TypeError("Cannot take a Concatenate of no types.")
if not isinstance(parameters, tuple):
parameters = (parameters,)
if not isinstance(parameters[-1], ParamSpec):
raise TypeError("The last parameter to Concatenate should be a "
"ParamSpec variable.")
msg = "Concatenate[arg, ...]: each arg must be a type."
parameters = tuple(typing._type_check(p, msg) for p in parameters)
return _ConcatenateGenericAlias(self, parameters)
# 3.10+
if hasattr(typing, 'Concatenate'):
Concatenate = typing.Concatenate
_ConcatenateGenericAlias = typing._ConcatenateGenericAlias # noqa
# 3.9
elif sys.version_info[:2] >= (3, 9):
@_TypeAliasForm
def Concatenate(self, parameters):
"""Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a
higher order function which adds, removes or transforms parameters of a
callable.
For example::
Callable[Concatenate[int, P], int]
See PEP 612 for detailed information.
"""
return _concatenate_getitem(self, parameters)
# 3.7-8
else:
class _ConcatenateForm(typing._SpecialForm, _root=True):
def __repr__(self):
return 'typing_extensions.' + self._name
def __getitem__(self, parameters):
return _concatenate_getitem(self, parameters)
Concatenate = _ConcatenateForm(
'Concatenate',
doc="""Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a
higher order function which adds, removes or transforms parameters of a
callable.
For example::
Callable[Concatenate[int, P], int]
See PEP 612 for detailed information.
""")
# 3.10+
if hasattr(typing, 'TypeGuard'):
TypeGuard = typing.TypeGuard
# 3.9
elif sys.version_info[:2] >= (3, 9):
class _TypeGuardForm(typing._SpecialForm, _root=True):
def __repr__(self):
return 'typing_extensions.' + self._name
@_TypeGuardForm
def TypeGuard(self, parameters):
"""Special typing form used to annotate the return type of a user-defined
type guard function. ``TypeGuard`` only accepts a single type argument.
At runtime, functions marked this way should return a boolean.
``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static
type checkers to determine a more precise type of an expression within a
program's code flow. Usually type narrowing is done by analyzing
conditional code flow and applying the narrowing to a block of code. The
conditional expression here is sometimes referred to as a "type guard".
Sometimes it would be convenient to use a user-defined boolean function
as a type guard. Such a function should use ``TypeGuard[...]`` as its
return type to alert static type checkers to this intention.
Using ``-> TypeGuard`` tells the static type checker that for a given
function:
1. The return value is a boolean.
2. If the return value is ``True``, the type of its argument
is the type inside ``TypeGuard``.
For example::
def is_str(val: Union[str, float]):
# "isinstance" type guard
if isinstance(val, str):
# Type of ``val`` is narrowed to ``str``
...
else:
# Else, type of ``val`` is narrowed to ``float``.
...
Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower
form of ``TypeA`` (it can even be a wider form) and this may lead to
type-unsafe results. The main reason is to allow for things like
narrowing ``List[object]`` to ``List[str]`` even though the latter is not
a subtype of the former, since ``List`` is invariant. The responsibility of
writing type-safe type guards is left to the user.
``TypeGuard`` also works with type variables. For more information, see
PEP 647 (User-Defined Type Guards).
"""
item = typing._type_check(parameters, f'{self} accepts only a single type.')
return typing._GenericAlias(self, (item,))
# 3.7-3.8
else:
class _TypeGuardForm(typing._SpecialForm, _root=True):
def __repr__(self):
return 'typing_extensions.' + self._name
def __getitem__(self, parameters):
item = typing._type_check(parameters,
f'{self._name} accepts only a single type')
return typing._GenericAlias(self, (item,))
TypeGuard = _TypeGuardForm(
'TypeGuard',
doc="""Special typing form used to annotate the return type of a user-defined
type guard function. ``TypeGuard`` only accepts a single type argument.
At runtime, functions marked this way should return a boolean.
``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static
type checkers to determine a more precise type of an expression within a
program's code flow. Usually type narrowing is done by analyzing
conditional code flow and applying the narrowing to a block of code. The
conditional expression here is sometimes referred to as a "type guard".
Sometimes it would be convenient to use a user-defined boolean function
as a type guard. Such a function should use ``TypeGuard[...]`` as its
return type to alert static type checkers to this intention.
Using ``-> TypeGuard`` tells the static type checker that for a given
function:
1. The return value is a boolean.
2. If the return value is ``True``, the type of its argument
is the type inside ``TypeGuard``.
For example::
def is_str(val: Union[str, float]):
# "isinstance" type guard
if isinstance(val, str):
# Type of ``val`` is narrowed to ``str``
...
else:
# Else, type of ``val`` is narrowed to ``float``.
...
Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower
form of ``TypeA`` (it can even be a wider form) and this may lead to
type-unsafe results. The main reason is to allow for things like
narrowing ``List[object]`` to ``List[str]`` even though the latter is not
a subtype of the former, since ``List`` is invariant. The responsibility of
writing type-safe type guards is left to the user.
``TypeGuard`` also works with type variables. For more information, see
PEP 647 (User-Defined Type Guards).
""")
# Vendored from cpython typing._SpecialFrom
class _SpecialForm(typing._Final, _root=True):
__slots__ = ('_name', '__doc__', '_getitem')
def __init__(self, getitem):
self._getitem = getitem
self._name = getitem.__name__
self.__doc__ = getitem.__doc__
def __getattr__(self, item):
if item in {'__name__', '__qualname__'}:
return self._name
raise AttributeError(item)
def __mro_entries__(self, bases):
raise TypeError(f"Cannot subclass {self!r}")
def __repr__(self):
return f'typing_extensions.{self._name}'
def __reduce__(self):
return self._name
def __call__(self, *args, **kwds):
raise TypeError(f"Cannot instantiate {self!r}")
def __or__(self, other):
return typing.Union[self, other]
def __ror__(self, other):
return typing.Union[other, self]
def __instancecheck__(self, obj):
raise TypeError(f"{self} cannot be used with isinstance()")
def __subclasscheck__(self, cls):
raise TypeError(f"{self} cannot be used with issubclass()")
@typing._tp_cache
def __getitem__(self, parameters):
return self._getitem(self, parameters)
if hasattr(typing, "LiteralString"):
LiteralString = typing.LiteralString
else:
@_SpecialForm
def LiteralString(self, params):
"""Represents an arbitrary literal string.
Example::
from typing_extensions import LiteralString
def query(sql: LiteralString) -> ...:
...
query("SELECT * FROM table") # ok
query(f"SELECT * FROM {input()}") # not ok
See PEP 675 for details.
"""
raise TypeError(f"{self} is not subscriptable")
if hasattr(typing, "Self"):
Self = typing.Self
else:
@_SpecialForm
def Self(self, params):
"""Used to spell the type of "self" in classes.
Example::
from typing import Self
class ReturnsSelf:
def parse(self, data: bytes) -> Self:
...
return self
"""
raise TypeError(f"{self} is not subscriptable")
if hasattr(typing, "Never"):
Never = typing.Never
else:
@_SpecialForm
def Never(self, params):
"""The bottom type, a type that has no members.
This can be used to define a function that should never be
called, or a function that never returns::
from typing_extensions import Never
def never_call_me(arg: Never) -> None:
pass
def int_or_str(arg: int | str) -> None:
never_call_me(arg) # type checker error
match arg:
case int():
print("It's an int")
case str():
print("It's a str")
case _:
never_call_me(arg) # ok, arg is of type Never
"""
raise TypeError(f"{self} is not subscriptable")
if hasattr(typing, 'Required'):
Required = typing.Required
NotRequired = typing.NotRequired
elif sys.version_info[:2] >= (3, 9):
class _ExtensionsSpecialForm(typing._SpecialForm, _root=True):
def __repr__(self):
return 'typing_extensions.' + self._name
@_ExtensionsSpecialForm
def Required(self, parameters):
"""A special typing construct to mark a key of a total=False TypedDict
as required. For example:
class Movie(TypedDict, total=False):
title: Required[str]
year: int
m = Movie(
title='The Matrix', # typechecker error if key is omitted
year=1999,
)
There is no runtime checking that a required key is actually provided
when instantiating a related TypedDict.
"""
item = typing._type_check(parameters, f'{self._name} accepts only a single type.')
return typing._GenericAlias(self, (item,))
@_ExtensionsSpecialForm
def NotRequired(self, parameters):
"""A special typing construct to mark a key of a TypedDict as
potentially missing. For example:
class Movie(TypedDict):
title: str
year: NotRequired[int]
m = Movie(
title='The Matrix', # typechecker error if key is omitted
year=1999,
)
"""
item = typing._type_check(parameters, f'{self._name} accepts only a single type.')
return typing._GenericAlias(self, (item,))
else:
class _RequiredForm(typing._SpecialForm, _root=True):
def __repr__(self):
return 'typing_extensions.' + self._name
def __getitem__(self, parameters):
item = typing._type_check(parameters,
f'{self._name} accepts only a single type.')
return typing._GenericAlias(self, (item,))
Required = _RequiredForm(
'Required',
doc="""A special typing construct to mark a key of a total=False TypedDict
as required. For example:
class Movie(TypedDict, total=False):
title: Required[str]
year: int
m = Movie(
title='The Matrix', # typechecker error if key is omitted
year=1999,
)
There is no runtime checking that a required key is actually provided
when instantiating a related TypedDict.
""")
NotRequired = _RequiredForm(
'NotRequired',
doc="""A special typing construct to mark a key of a TypedDict as
potentially missing. For example:
class Movie(TypedDict):
title: str
year: NotRequired[int]
m = Movie(
title='The Matrix', # typechecker error if key is omitted
year=1999,
)
""")
if hasattr(typing, "Unpack"): # 3.11+
Unpack = typing.Unpack
elif sys.version_info[:2] >= (3, 9):
class _UnpackSpecialForm(typing._SpecialForm, _root=True):
def __repr__(self):
return 'typing_extensions.' + self._name
class _UnpackAlias(typing._GenericAlias, _root=True):
__class__ = typing.TypeVar
@_UnpackSpecialForm
def Unpack(self, parameters):
"""A special typing construct to unpack a variadic type. For example:
Shape = TypeVarTuple('Shape')
Batch = NewType('Batch', int)
def add_batch_axis(
x: Array[Unpack[Shape]]
) -> Array[Batch, Unpack[Shape]]: ...
"""
item = typing._type_check(parameters, f'{self._name} accepts only a single type.')
return _UnpackAlias(self, (item,))
def _is_unpack(obj):
return isinstance(obj, _UnpackAlias)
else:
class _UnpackAlias(typing._GenericAlias, _root=True):
__class__ = typing.TypeVar
class _UnpackForm(typing._SpecialForm, _root=True):
def __repr__(self):
return 'typing_extensions.' + self._name
def __getitem__(self, parameters):
item = typing._type_check(parameters,
f'{self._name} accepts only a single type.')
return _UnpackAlias(self, (item,))
Unpack = _UnpackForm(
'Unpack',
doc="""A special typing construct to unpack a variadic type. For example:
Shape = TypeVarTuple('Shape')
Batch = NewType('Batch', int)
def add_batch_axis(
x: Array[Unpack[Shape]]
) -> Array[Batch, Unpack[Shape]]: ...
""")
def _is_unpack(obj):
return isinstance(obj, _UnpackAlias)
if hasattr(typing, "TypeVarTuple"): # 3.11+
# Add default Parameter - PEP 696
class TypeVarTuple(typing.TypeVarTuple, _DefaultMixin, _root=True):
"""Type variable tuple."""
def __init__(self, name, *, default=None):
super().__init__(name)
_DefaultMixin.__init__(self, default)
# for pickling:
try:
def_mod = sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
def_mod = None
if def_mod != 'typing_extensions':
self.__module__ = def_mod
else:
class TypeVarTuple(_DefaultMixin):
"""Type variable tuple.
Usage::
Ts = TypeVarTuple('Ts')
In the same way that a normal type variable is a stand-in for a single
type such as ``int``, a type variable *tuple* is a stand-in for a *tuple*
type such as ``Tuple[int, str]``.
Type variable tuples can be used in ``Generic`` declarations.
Consider the following example::
class Array(Generic[*Ts]): ...
The ``Ts`` type variable tuple here behaves like ``tuple[T1, T2]``,
where ``T1`` and ``T2`` are type variables. To use these type variables
as type parameters of ``Array``, we must *unpack* the type variable tuple using
the star operator: ``*Ts``. The signature of ``Array`` then behaves
as if we had simply written ``class Array(Generic[T1, T2]): ...``.
In contrast to ``Generic[T1, T2]``, however, ``Generic[*Shape]`` allows
us to parameterise the class with an *arbitrary* number of type parameters.
Type variable tuples can be used anywhere a normal ``TypeVar`` can.
This includes class definitions, as shown above, as well as function
signatures and variable annotations::
class Array(Generic[*Ts]):
def __init__(self, shape: Tuple[*Ts]):
self._shape: Tuple[*Ts] = shape
def get_shape(self) -> Tuple[*Ts]:
return self._shape
shape = (Height(480), Width(640))
x: Array[Height, Width] = Array(shape)
y = abs(x) # Inferred type is Array[Height, Width]
z = x + x # ... is Array[Height, Width]
x.get_shape() # ... is tuple[Height, Width]
"""
# Trick Generic __parameters__.
__class__ = typing.TypeVar
def __iter__(self):
yield self.__unpacked__
def __init__(self, name, *, default=None):
self.__name__ = name
_DefaultMixin.__init__(self, default)
# for pickling:
try:
def_mod = sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
def_mod = None
if def_mod != 'typing_extensions':
self.__module__ = def_mod
self.__unpacked__ = Unpack[self]
def __repr__(self):
return self.__name__
def __hash__(self):
return object.__hash__(self)
def __eq__(self, other):
return self is other
def __reduce__(self):
return self.__name__
def __init_subclass__(self, *args, **kwds):
if '_root' not in kwds:
raise TypeError("Cannot subclass special typing classes")
if hasattr(typing, "reveal_type"):
reveal_type = typing.reveal_type
else:
def reveal_type(__obj: T) -> T:
"""Reveal the inferred type of a variable.
When a static type checker encounters a call to ``reveal_type()``,
it will emit the inferred type of the argument::
x: int = 1
reveal_type(x)
Running a static type checker (e.g., ``mypy``) on this example
will produce output similar to 'Revealed type is "builtins.int"'.
At runtime, the function prints the runtime type of the
argument and returns it unchanged.
"""
print(f"Runtime type is {type(__obj).__name__!r}", file=sys.stderr)
return __obj
if hasattr(typing, "assert_never"):
assert_never = typing.assert_never
else:
def assert_never(__arg: Never) -> Never:
"""Assert to the type checker that a line of code is unreachable.
Example::
def int_or_str(arg: int | str) -> None:
match arg:
case int():
print("It's an int")
case str():
print("It's a str")
case _:
assert_never(arg)
If a type checker finds that a call to assert_never() is
reachable, it will emit an error.
At runtime, this throws an exception when called.
"""
raise AssertionError("Expected code to be unreachable")
if hasattr(typing, 'dataclass_transform'):
dataclass_transform = typing.dataclass_transform
else:
def dataclass_transform(
*,
eq_default: bool = True,
order_default: bool = False,
kw_only_default: bool = False,
field_specifiers: typing.Tuple[
typing.Union[typing.Type[typing.Any], typing.Callable[..., typing.Any]],
...
] = (),
**kwargs: typing.Any,
) -> typing.Callable[[T], T]:
"""Decorator that marks a function, class, or metaclass as providing
dataclass-like behavior.
Example:
from typing_extensions import dataclass_transform
_T = TypeVar("_T")
# Used on a decorator function
@dataclass_transform()
def create_model(cls: type[_T]) -> type[_T]:
...
return cls
@create_model
class CustomerModel:
id: int
name: str
# Used on a base class
@dataclass_transform()
class ModelBase: ...
class CustomerModel(ModelBase):
id: int
name: str
# Used on a metaclass
@dataclass_transform()
class ModelMeta(type): ...
class ModelBase(metaclass=ModelMeta): ...
class CustomerModel(ModelBase):
id: int
name: str
Each of the ``CustomerModel`` classes defined in this example will now
behave similarly to a dataclass created with the ``@dataclasses.dataclass``
decorator. For example, the type checker will synthesize an ``__init__``
method.
The arguments to this decorator can be used to customize this behavior:
- ``eq_default`` indicates whether the ``eq`` parameter is assumed to be
True or False if it is omitted by the caller.
- ``order_default`` indicates whether the ``order`` parameter is
assumed to be True or False if it is omitted by the caller.
- ``kw_only_default`` indicates whether the ``kw_only`` parameter is
assumed to be True or False if it is omitted by the caller.
- ``field_specifiers`` specifies a static list of supported classes
or functions that describe fields, similar to ``dataclasses.field()``.
At runtime, this decorator records its arguments in the
``__dataclass_transform__`` attribute on the decorated object.
See PEP 681 for details.
"""
def decorator(cls_or_fn):
cls_or_fn.__dataclass_transform__ = {
"eq_default": eq_default,
"order_default": order_default,
"kw_only_default": kw_only_default,
"field_specifiers": field_specifiers,
"kwargs": kwargs,
}
return cls_or_fn
return decorator
if hasattr(typing, "override"):
override = typing.override
else:
_F = typing.TypeVar("_F", bound=typing.Callable[..., typing.Any])
def override(__arg: _F) -> _F:
"""Indicate that a method is intended to override a method in a base class.
Usage:
class Base:
def method(self) -> None: ...
pass
class Child(Base):
@override
def method(self) -> None:
super().method()
When this decorator is applied to a method, the type checker will
validate that it overrides a method with the same name on a base class.
This helps prevent bugs that may occur when a base class is changed
without an equivalent change to a child class.
See PEP 698 for details.
"""
return __arg
# We have to do some monkey patching to deal with the dual nature of
# Unpack/TypeVarTuple:
# - We want Unpack to be a kind of TypeVar so it gets accepted in
# Generic[Unpack[Ts]]
# - We want it to *not* be treated as a TypeVar for the purposes of
# counting generic parameters, so that when we subscript a generic,
# the runtime doesn't try to substitute the Unpack with the subscripted type.
if not hasattr(typing, "TypeVarTuple"):
typing._collect_type_vars = _collect_type_vars
typing._check_generic = _check_generic
# Backport typing.NamedTuple as it exists in Python 3.11.
# In 3.11, the ability to define generic `NamedTuple`s was supported.
# This was explicitly disallowed in 3.9-3.10, and only half-worked in <=3.8.
if sys.version_info >= (3, 11):
NamedTuple = typing.NamedTuple
else:
def _caller():
try:
return sys._getframe(2).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError): # For platforms without _getframe()
return None
def _make_nmtuple(name, types, module, defaults=()):
fields = [n for n, t in types]
annotations = {n: typing._type_check(t, f"field {n} annotation must be a type")
for n, t in types}
nm_tpl = collections.namedtuple(name, fields,
defaults=defaults, module=module)
nm_tpl.__annotations__ = nm_tpl.__new__.__annotations__ = annotations
# The `_field_types` attribute was removed in 3.9;
# in earlier versions, it is the same as the `__annotations__` attribute
if sys.version_info < (3, 9):
nm_tpl._field_types = annotations
return nm_tpl
_prohibited_namedtuple_fields = typing._prohibited
_special_namedtuple_fields = frozenset({'__module__', '__name__', '__annotations__'})
class _NamedTupleMeta(type):
def __new__(cls, typename, bases, ns):
assert _NamedTuple in bases
for base in bases:
if base is not _NamedTuple and base is not typing.Generic:
raise TypeError(
'can only inherit from a NamedTuple type and Generic')
bases = tuple(tuple if base is _NamedTuple else base for base in bases)
types = ns.get('__annotations__', {})
default_names = []
for field_name in types:
if field_name in ns:
default_names.append(field_name)
elif default_names:
raise TypeError(f"Non-default namedtuple field {field_name} "
f"cannot follow default field"
f"{'s' if len(default_names) > 1 else ''} "
f"{', '.join(default_names)}")
nm_tpl = _make_nmtuple(
typename, types.items(),
defaults=[ns[n] for n in default_names],
module=ns['__module__']
)
nm_tpl.__bases__ = bases
if typing.Generic in bases:
class_getitem = typing.Generic.__class_getitem__.__func__
nm_tpl.__class_getitem__ = classmethod(class_getitem)
# update from user namespace without overriding special namedtuple attributes
for key in ns:
if key in _prohibited_namedtuple_fields:
raise AttributeError("Cannot overwrite NamedTuple attribute " + key)
elif key not in _special_namedtuple_fields and key not in nm_tpl._fields:
setattr(nm_tpl, key, ns[key])
if typing.Generic in bases:
nm_tpl.__init_subclass__()
return nm_tpl
def NamedTuple(__typename, __fields=None, **kwargs):
if __fields is None:
__fields = kwargs.items()
elif kwargs:
raise TypeError("Either list of fields or keywords"
" can be provided to NamedTuple, not both")
return _make_nmtuple(__typename, __fields, module=_caller())
NamedTuple.__doc__ = typing.NamedTuple.__doc__
_NamedTuple = type.__new__(_NamedTupleMeta, 'NamedTuple', (), {})
# On 3.8+, alter the signature so that it matches typing.NamedTuple.
# The signature of typing.NamedTuple on >=3.8 is invalid syntax in Python 3.7,
# so just leave the signature as it is on 3.7.
if sys.version_info >= (3, 8):
NamedTuple.__text_signature__ = '(typename, fields=None, /, **kwargs)'
def _namedtuple_mro_entries(bases):
assert NamedTuple in bases
return (_NamedTuple,)
NamedTuple.__mro_entries__ = _namedtuple_mro_entries
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pkg_resources/_vendor/more_itertools/recipes.py | """Imported from the recipes section of the itertools documentation.
All functions taken from the recipes section of the itertools library docs
[1]_.
Some backward-compatible usability improvements have been made.
.. [1] http://docs.python.org/library/itertools.html#recipes
"""
import math
import operator
import warnings
from collections import deque
from collections.abc import Sized
from functools import reduce
from itertools import (
chain,
combinations,
compress,
count,
cycle,
groupby,
islice,
product,
repeat,
starmap,
tee,
zip_longest,
)
from random import randrange, sample, choice
from sys import hexversion
__all__ = [
'all_equal',
'batched',
'before_and_after',
'consume',
'convolve',
'dotproduct',
'first_true',
'factor',
'flatten',
'grouper',
'iter_except',
'iter_index',
'matmul',
'ncycles',
'nth',
'nth_combination',
'padnone',
'pad_none',
'pairwise',
'partition',
'polynomial_from_roots',
'powerset',
'prepend',
'quantify',
'random_combination_with_replacement',
'random_combination',
'random_permutation',
'random_product',
'repeatfunc',
'roundrobin',
'sieve',
'sliding_window',
'subslices',
'tabulate',
'tail',
'take',
'transpose',
'triplewise',
'unique_everseen',
'unique_justseen',
]
_marker = object()
def take(n, iterable):
"""Return first *n* items of the iterable as a list.
>>> take(3, range(10))
[0, 1, 2]
If there are fewer than *n* items in the iterable, all of them are
returned.
>>> take(10, range(3))
[0, 1, 2]
"""
return list(islice(iterable, n))
def tabulate(function, start=0):
"""Return an iterator over the results of ``func(start)``,
``func(start + 1)``, ``func(start + 2)``...
*func* should be a function that accepts one integer argument.
If *start* is not specified it defaults to 0. It will be incremented each
time the iterator is advanced.
>>> square = lambda x: x ** 2
>>> iterator = tabulate(square, -3)
>>> take(4, iterator)
[9, 4, 1, 0]
"""
return map(function, count(start))
def tail(n, iterable):
"""Return an iterator over the last *n* items of *iterable*.
>>> t = tail(3, 'ABCDEFG')
>>> list(t)
['E', 'F', 'G']
"""
# If the given iterable has a length, then we can use islice to get its
# final elements. Note that if the iterable is not actually Iterable,
# either islice or deque will throw a TypeError. This is why we don't
# check if it is Iterable.
if isinstance(iterable, Sized):
yield from islice(iterable, max(0, len(iterable) - n), None)
else:
yield from iter(deque(iterable, maxlen=n))
def consume(iterator, n=None):
"""Advance *iterable* by *n* steps. If *n* is ``None``, consume it
entirely.
Efficiently exhausts an iterator without returning values. Defaults to
consuming the whole iterator, but an optional second argument may be
provided to limit consumption.
>>> i = (x for x in range(10))
>>> next(i)
0
>>> consume(i, 3)
>>> next(i)
4
>>> consume(i)
>>> next(i)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
StopIteration
If the iterator has fewer items remaining than the provided limit, the
whole iterator will be consumed.
>>> i = (x for x in range(3))
>>> consume(i, 5)
>>> next(i)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
StopIteration
"""
# Use functions that consume iterators at C speed.
if n is None:
# feed the entire iterator into a zero-length deque
deque(iterator, maxlen=0)
else:
# advance to the empty slice starting at position n
next(islice(iterator, n, n), None)
def nth(iterable, n, default=None):
"""Returns the nth item or a default value.
>>> l = range(10)
>>> nth(l, 3)
3
>>> nth(l, 20, "zebra")
'zebra'
"""
return next(islice(iterable, n, None), default)
def all_equal(iterable):
"""
Returns ``True`` if all the elements are equal to each other.
>>> all_equal('aaaa')
True
>>> all_equal('aaab')
False
"""
g = groupby(iterable)
return next(g, True) and not next(g, False)
def quantify(iterable, pred=bool):
"""Return the how many times the predicate is true.
>>> quantify([True, False, True])
2
"""
return sum(map(pred, iterable))
def pad_none(iterable):
"""Returns the sequence of elements and then returns ``None`` indefinitely.
>>> take(5, pad_none(range(3)))
[0, 1, 2, None, None]
Useful for emulating the behavior of the built-in :func:`map` function.
See also :func:`padded`.
"""
return chain(iterable, repeat(None))
padnone = pad_none
def ncycles(iterable, n):
"""Returns the sequence elements *n* times
>>> list(ncycles(["a", "b"], 3))
['a', 'b', 'a', 'b', 'a', 'b']
"""
return chain.from_iterable(repeat(tuple(iterable), n))
def dotproduct(vec1, vec2):
"""Returns the dot product of the two iterables.
>>> dotproduct([10, 10], [20, 20])
400
"""
return sum(map(operator.mul, vec1, vec2))
def flatten(listOfLists):
"""Return an iterator flattening one level of nesting in a list of lists.
>>> list(flatten([[0, 1], [2, 3]]))
[0, 1, 2, 3]
See also :func:`collapse`, which can flatten multiple levels of nesting.
"""
return chain.from_iterable(listOfLists)
def repeatfunc(func, times=None, *args):
"""Call *func* with *args* repeatedly, returning an iterable over the
results.
If *times* is specified, the iterable will terminate after that many
repetitions:
>>> from operator import add
>>> times = 4
>>> args = 3, 5
>>> list(repeatfunc(add, times, *args))
[8, 8, 8, 8]
If *times* is ``None`` the iterable will not terminate:
>>> from random import randrange
>>> times = None
>>> args = 1, 11
>>> take(6, repeatfunc(randrange, times, *args)) # doctest:+SKIP
[2, 4, 8, 1, 8, 4]
"""
if times is None:
return starmap(func, repeat(args))
return starmap(func, repeat(args, times))
def _pairwise(iterable):
"""Returns an iterator of paired items, overlapping, from the original
>>> take(4, pairwise(count()))
[(0, 1), (1, 2), (2, 3), (3, 4)]
On Python 3.10 and above, this is an alias for :func:`itertools.pairwise`.
"""
a, b = tee(iterable)
next(b, None)
yield from zip(a, b)
try:
from itertools import pairwise as itertools_pairwise
except ImportError:
pairwise = _pairwise
else:
def pairwise(iterable):
yield from itertools_pairwise(iterable)
pairwise.__doc__ = _pairwise.__doc__
class UnequalIterablesError(ValueError):
def __init__(self, details=None):
msg = 'Iterables have different lengths'
if details is not None:
msg += (': index 0 has length {}; index {} has length {}').format(
*details
)
super().__init__(msg)
def _zip_equal_generator(iterables):
for combo in zip_longest(*iterables, fillvalue=_marker):
for val in combo:
if val is _marker:
raise UnequalIterablesError()
yield combo
def _zip_equal(*iterables):
# Check whether the iterables are all the same size.
try:
first_size = len(iterables[0])
for i, it in enumerate(iterables[1:], 1):
size = len(it)
if size != first_size:
break
else:
# If we didn't break out, we can use the built-in zip.
return zip(*iterables)
# If we did break out, there was a mismatch.
raise UnequalIterablesError(details=(first_size, i, size))
# If any one of the iterables didn't have a length, start reading
# them until one runs out.
except TypeError:
return _zip_equal_generator(iterables)
def grouper(iterable, n, incomplete='fill', fillvalue=None):
"""Group elements from *iterable* into fixed-length groups of length *n*.
>>> list(grouper('ABCDEF', 3))
[('A', 'B', 'C'), ('D', 'E', 'F')]
The keyword arguments *incomplete* and *fillvalue* control what happens for
iterables whose length is not a multiple of *n*.
When *incomplete* is `'fill'`, the last group will contain instances of
*fillvalue*.
>>> list(grouper('ABCDEFG', 3, incomplete='fill', fillvalue='x'))
[('A', 'B', 'C'), ('D', 'E', 'F'), ('G', 'x', 'x')]
When *incomplete* is `'ignore'`, the last group will not be emitted.
>>> list(grouper('ABCDEFG', 3, incomplete='ignore', fillvalue='x'))
[('A', 'B', 'C'), ('D', 'E', 'F')]
When *incomplete* is `'strict'`, a subclass of `ValueError` will be raised.
>>> it = grouper('ABCDEFG', 3, incomplete='strict')
>>> list(it) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
UnequalIterablesError
"""
args = [iter(iterable)] * n
if incomplete == 'fill':
return zip_longest(*args, fillvalue=fillvalue)
if incomplete == 'strict':
return _zip_equal(*args)
if incomplete == 'ignore':
return zip(*args)
else:
raise ValueError('Expected fill, strict, or ignore')
def roundrobin(*iterables):
"""Yields an item from each iterable, alternating between them.
>>> list(roundrobin('ABC', 'D', 'EF'))
['A', 'D', 'E', 'B', 'F', 'C']
This function produces the same output as :func:`interleave_longest`, but
may perform better for some inputs (in particular when the number of
iterables is small).
"""
# Recipe credited to George Sakkis
pending = len(iterables)
nexts = cycle(iter(it).__next__ for it in iterables)
while pending:
try:
for next in nexts:
yield next()
except StopIteration:
pending -= 1
nexts = cycle(islice(nexts, pending))
def partition(pred, iterable):
"""
Returns a 2-tuple of iterables derived from the input iterable.
The first yields the items that have ``pred(item) == False``.
The second yields the items that have ``pred(item) == True``.
>>> is_odd = lambda x: x % 2 != 0
>>> iterable = range(10)
>>> even_items, odd_items = partition(is_odd, iterable)
>>> list(even_items), list(odd_items)
([0, 2, 4, 6, 8], [1, 3, 5, 7, 9])
If *pred* is None, :func:`bool` is used.
>>> iterable = [0, 1, False, True, '', ' ']
>>> false_items, true_items = partition(None, iterable)
>>> list(false_items), list(true_items)
([0, False, ''], [1, True, ' '])
"""
if pred is None:
pred = bool
evaluations = ((pred(x), x) for x in iterable)
t1, t2 = tee(evaluations)
return (
(x for (cond, x) in t1 if not cond),
(x for (cond, x) in t2 if cond),
)
def powerset(iterable):
"""Yields all possible subsets of the iterable.
>>> list(powerset([1, 2, 3]))
[(), (1,), (2,), (3,), (1, 2), (1, 3), (2, 3), (1, 2, 3)]
:func:`powerset` will operate on iterables that aren't :class:`set`
instances, so repeated elements in the input will produce repeated elements
in the output. Use :func:`unique_everseen` on the input to avoid generating
duplicates:
>>> seq = [1, 1, 0]
>>> list(powerset(seq))
[(), (1,), (1,), (0,), (1, 1), (1, 0), (1, 0), (1, 1, 0)]
>>> from more_itertools import unique_everseen
>>> list(powerset(unique_everseen(seq)))
[(), (1,), (0,), (1, 0)]
"""
s = list(iterable)
return chain.from_iterable(combinations(s, r) for r in range(len(s) + 1))
def unique_everseen(iterable, key=None):
"""
Yield unique elements, preserving order.
>>> list(unique_everseen('AAAABBBCCDAABBB'))
['A', 'B', 'C', 'D']
>>> list(unique_everseen('ABBCcAD', str.lower))
['A', 'B', 'C', 'D']
Sequences with a mix of hashable and unhashable items can be used.
The function will be slower (i.e., `O(n^2)`) for unhashable items.
Remember that ``list`` objects are unhashable - you can use the *key*
parameter to transform the list to a tuple (which is hashable) to
avoid a slowdown.
>>> iterable = ([1, 2], [2, 3], [1, 2])
>>> list(unique_everseen(iterable)) # Slow
[[1, 2], [2, 3]]
>>> list(unique_everseen(iterable, key=tuple)) # Faster
[[1, 2], [2, 3]]
Similary, you may want to convert unhashable ``set`` objects with
``key=frozenset``. For ``dict`` objects,
``key=lambda x: frozenset(x.items())`` can be used.
"""
seenset = set()
seenset_add = seenset.add
seenlist = []
seenlist_add = seenlist.append
use_key = key is not None
for element in iterable:
k = key(element) if use_key else element
try:
if k not in seenset:
seenset_add(k)
yield element
except TypeError:
if k not in seenlist:
seenlist_add(k)
yield element
def unique_justseen(iterable, key=None):
"""Yields elements in order, ignoring serial duplicates
>>> list(unique_justseen('AAAABBBCCDAABBB'))
['A', 'B', 'C', 'D', 'A', 'B']
>>> list(unique_justseen('ABBCcAD', str.lower))
['A', 'B', 'C', 'A', 'D']
"""
return map(next, map(operator.itemgetter(1), groupby(iterable, key)))
def iter_except(func, exception, first=None):
"""Yields results from a function repeatedly until an exception is raised.
Converts a call-until-exception interface to an iterator interface.
Like ``iter(func, sentinel)``, but uses an exception instead of a sentinel
to end the loop.
>>> l = [0, 1, 2]
>>> list(iter_except(l.pop, IndexError))
[2, 1, 0]
Multiple exceptions can be specified as a stopping condition:
>>> l = [1, 2, 3, '...', 4, 5, 6]
>>> list(iter_except(lambda: 1 + l.pop(), (IndexError, TypeError)))
[7, 6, 5]
>>> list(iter_except(lambda: 1 + l.pop(), (IndexError, TypeError)))
[4, 3, 2]
>>> list(iter_except(lambda: 1 + l.pop(), (IndexError, TypeError)))
[]
"""
try:
if first is not None:
yield first()
while 1:
yield func()
except exception:
pass
def first_true(iterable, default=None, pred=None):
"""
Returns the first true value in the iterable.
If no true value is found, returns *default*
If *pred* is not None, returns the first item for which
``pred(item) == True`` .
>>> first_true(range(10))
1
>>> first_true(range(10), pred=lambda x: x > 5)
6
>>> first_true(range(10), default='missing', pred=lambda x: x > 9)
'missing'
"""
return next(filter(pred, iterable), default)
def random_product(*args, repeat=1):
"""Draw an item at random from each of the input iterables.
>>> random_product('abc', range(4), 'XYZ') # doctest:+SKIP
('c', 3, 'Z')
If *repeat* is provided as a keyword argument, that many items will be
drawn from each iterable.
>>> random_product('abcd', range(4), repeat=2) # doctest:+SKIP
('a', 2, 'd', 3)
This equivalent to taking a random selection from
``itertools.product(*args, **kwarg)``.
"""
pools = [tuple(pool) for pool in args] * repeat
return tuple(choice(pool) for pool in pools)
def random_permutation(iterable, r=None):
"""Return a random *r* length permutation of the elements in *iterable*.
If *r* is not specified or is ``None``, then *r* defaults to the length of
*iterable*.
>>> random_permutation(range(5)) # doctest:+SKIP
(3, 4, 0, 1, 2)
This equivalent to taking a random selection from
``itertools.permutations(iterable, r)``.
"""
pool = tuple(iterable)
r = len(pool) if r is None else r
return tuple(sample(pool, r))
def random_combination(iterable, r):
"""Return a random *r* length subsequence of the elements in *iterable*.
>>> random_combination(range(5), 3) # doctest:+SKIP
(2, 3, 4)
This equivalent to taking a random selection from
``itertools.combinations(iterable, r)``.
"""
pool = tuple(iterable)
n = len(pool)
indices = sorted(sample(range(n), r))
return tuple(pool[i] for i in indices)
def random_combination_with_replacement(iterable, r):
"""Return a random *r* length subsequence of elements in *iterable*,
allowing individual elements to be repeated.
>>> random_combination_with_replacement(range(3), 5) # doctest:+SKIP
(0, 0, 1, 2, 2)
This equivalent to taking a random selection from
``itertools.combinations_with_replacement(iterable, r)``.
"""
pool = tuple(iterable)
n = len(pool)
indices = sorted(randrange(n) for i in range(r))
return tuple(pool[i] for i in indices)
def nth_combination(iterable, r, index):
"""Equivalent to ``list(combinations(iterable, r))[index]``.
The subsequences of *iterable* that are of length *r* can be ordered
lexicographically. :func:`nth_combination` computes the subsequence at
sort position *index* directly, without computing the previous
subsequences.
>>> nth_combination(range(5), 3, 5)
(0, 3, 4)
``ValueError`` will be raised If *r* is negative or greater than the length
of *iterable*.
``IndexError`` will be raised if the given *index* is invalid.
"""
pool = tuple(iterable)
n = len(pool)
if (r < 0) or (r > n):
raise ValueError
c = 1
k = min(r, n - r)
for i in range(1, k + 1):
c = c * (n - k + i) // i
if index < 0:
index += c
if (index < 0) or (index >= c):
raise IndexError
result = []
while r:
c, n, r = c * r // n, n - 1, r - 1
while index >= c:
index -= c
c, n = c * (n - r) // n, n - 1
result.append(pool[-1 - n])
return tuple(result)
def prepend(value, iterator):
"""Yield *value*, followed by the elements in *iterator*.
>>> value = '0'
>>> iterator = ['1', '2', '3']
>>> list(prepend(value, iterator))
['0', '1', '2', '3']
To prepend multiple values, see :func:`itertools.chain`
or :func:`value_chain`.
"""
return chain([value], iterator)
def convolve(signal, kernel):
"""Convolve the iterable *signal* with the iterable *kernel*.
>>> signal = (1, 2, 3, 4, 5)
>>> kernel = [3, 2, 1]
>>> list(convolve(signal, kernel))
[3, 8, 14, 20, 26, 14, 5]
Note: the input arguments are not interchangeable, as the *kernel*
is immediately consumed and stored.
"""
kernel = tuple(kernel)[::-1]
n = len(kernel)
window = deque([0], maxlen=n) * n
for x in chain(signal, repeat(0, n - 1)):
window.append(x)
yield sum(map(operator.mul, kernel, window))
def before_and_after(predicate, it):
"""A variant of :func:`takewhile` that allows complete access to the
remainder of the iterator.
>>> it = iter('ABCdEfGhI')
>>> all_upper, remainder = before_and_after(str.isupper, it)
>>> ''.join(all_upper)
'ABC'
>>> ''.join(remainder) # takewhile() would lose the 'd'
'dEfGhI'
Note that the first iterator must be fully consumed before the second
iterator can generate valid results.
"""
it = iter(it)
transition = []
def true_iterator():
for elem in it:
if predicate(elem):
yield elem
else:
transition.append(elem)
return
# Note: this is different from itertools recipes to allow nesting
# before_and_after remainders into before_and_after again. See tests
# for an example.
remainder_iterator = chain(transition, it)
return true_iterator(), remainder_iterator
def triplewise(iterable):
"""Return overlapping triplets from *iterable*.
>>> list(triplewise('ABCDE'))
[('A', 'B', 'C'), ('B', 'C', 'D'), ('C', 'D', 'E')]
"""
for (a, _), (b, c) in pairwise(pairwise(iterable)):
yield a, b, c
def sliding_window(iterable, n):
"""Return a sliding window of width *n* over *iterable*.
>>> list(sliding_window(range(6), 4))
[(0, 1, 2, 3), (1, 2, 3, 4), (2, 3, 4, 5)]
If *iterable* has fewer than *n* items, then nothing is yielded:
>>> list(sliding_window(range(3), 4))
[]
For a variant with more features, see :func:`windowed`.
"""
it = iter(iterable)
window = deque(islice(it, n), maxlen=n)
if len(window) == n:
yield tuple(window)
for x in it:
window.append(x)
yield tuple(window)
def subslices(iterable):
"""Return all contiguous non-empty subslices of *iterable*.
>>> list(subslices('ABC'))
[['A'], ['A', 'B'], ['A', 'B', 'C'], ['B'], ['B', 'C'], ['C']]
This is similar to :func:`substrings`, but emits items in a different
order.
"""
seq = list(iterable)
slices = starmap(slice, combinations(range(len(seq) + 1), 2))
return map(operator.getitem, repeat(seq), slices)
def polynomial_from_roots(roots):
"""Compute a polynomial's coefficients from its roots.
>>> roots = [5, -4, 3] # (x - 5) * (x + 4) * (x - 3)
>>> polynomial_from_roots(roots) # x^3 - 4 * x^2 - 17 * x + 60
[1, -4, -17, 60]
"""
# Use math.prod for Python 3.8+,
prod = getattr(math, 'prod', lambda x: reduce(operator.mul, x, 1))
roots = list(map(operator.neg, roots))
return [
sum(map(prod, combinations(roots, k))) for k in range(len(roots) + 1)
]
def iter_index(iterable, value, start=0):
"""Yield the index of each place in *iterable* that *value* occurs,
beginning with index *start*.
See :func:`locate` for a more general means of finding the indexes
associated with particular values.
>>> list(iter_index('AABCADEAF', 'A'))
[0, 1, 4, 7]
"""
try:
seq_index = iterable.index
except AttributeError:
# Slow path for general iterables
it = islice(iterable, start, None)
for i, element in enumerate(it, start):
if element is value or element == value:
yield i
else:
# Fast path for sequences
i = start - 1
try:
while True:
i = seq_index(value, i + 1)
yield i
except ValueError:
pass
def sieve(n):
"""Yield the primes less than n.
>>> list(sieve(30))
[2, 3, 5, 7, 11, 13, 17, 19, 23, 29]
"""
isqrt = getattr(math, 'isqrt', lambda x: int(math.sqrt(x)))
data = bytearray((0, 1)) * (n // 2)
data[:3] = 0, 0, 0
limit = isqrt(n) + 1
for p in compress(range(limit), data):
data[p * p : n : p + p] = bytes(len(range(p * p, n, p + p)))
data[2] = 1
return iter_index(data, 1) if n > 2 else iter([])
def batched(iterable, n):
"""Batch data into lists of length *n*. The last batch may be shorter.
>>> list(batched('ABCDEFG', 3))
[['A', 'B', 'C'], ['D', 'E', 'F'], ['G']]
This recipe is from the ``itertools`` docs. This library also provides
:func:`chunked`, which has a different implementation.
"""
if hexversion >= 0x30C00A0: # Python 3.12.0a0
warnings.warn(
(
'batched will be removed in a future version of '
'more-itertools. Use the standard library '
'itertools.batched function instead'
),
DeprecationWarning,
)
it = iter(iterable)
while True:
batch = list(islice(it, n))
if not batch:
break
yield batch
def transpose(it):
"""Swap the rows and columns of the input.
>>> list(transpose([(1, 2, 3), (11, 22, 33)]))
[(1, 11), (2, 22), (3, 33)]
The caller should ensure that the dimensions of the input are compatible.
"""
# TODO: when 3.9 goes end-of-life, add stric=True to this.
return zip(*it)
def matmul(m1, m2):
"""Multiply two matrices.
>>> list(matmul([(7, 5), (3, 5)], [(2, 5), (7, 9)]))
[[49, 80], [41, 60]]
The caller should ensure that the dimensions of the input matrices are
compatible with each other.
"""
n = len(m2[0])
return batched(starmap(dotproduct, product(m1, transpose(m2))), n)
def factor(n):
"""Yield the prime factors of n.
>>> list(factor(360))
[2, 2, 2, 3, 3, 5]
"""
isqrt = getattr(math, 'isqrt', lambda x: int(math.sqrt(x)))
for prime in sieve(isqrt(n) + 1):
while True:
quotient, remainder = divmod(n, prime)
if remainder:
break
yield prime
n = quotient
if n == 1:
return
if n >= 2:
yield n
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pkg_resources/_vendor/more_itertools/__init__.py | """More routines for operating on iterables, beyond itertools"""
from .more import * # noqa
from .recipes import * # noqa
__version__ = '9.1.0'
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pkg_resources/_vendor/more_itertools/more.py | import warnings
from collections import Counter, defaultdict, deque, abc
from collections.abc import Sequence
from functools import partial, reduce, wraps
from heapq import heapify, heapreplace, heappop
from itertools import (
chain,
compress,
count,
cycle,
dropwhile,
groupby,
islice,
repeat,
starmap,
takewhile,
tee,
zip_longest,
)
from math import exp, factorial, floor, log
from queue import Empty, Queue
from random import random, randrange, uniform
from operator import itemgetter, mul, sub, gt, lt, ge, le
from sys import hexversion, maxsize
from time import monotonic
from .recipes import (
_marker,
_zip_equal,
UnequalIterablesError,
consume,
flatten,
pairwise,
powerset,
take,
unique_everseen,
all_equal,
)
__all__ = [
'AbortThread',
'SequenceView',
'UnequalIterablesError',
'adjacent',
'all_unique',
'always_iterable',
'always_reversible',
'bucket',
'callback_iter',
'chunked',
'chunked_even',
'circular_shifts',
'collapse',
'combination_index',
'consecutive_groups',
'constrained_batches',
'consumer',
'count_cycle',
'countable',
'difference',
'distinct_combinations',
'distinct_permutations',
'distribute',
'divide',
'duplicates_everseen',
'duplicates_justseen',
'exactly_n',
'filter_except',
'first',
'gray_product',
'groupby_transform',
'ichunked',
'iequals',
'ilen',
'interleave',
'interleave_evenly',
'interleave_longest',
'intersperse',
'is_sorted',
'islice_extended',
'iterate',
'last',
'locate',
'longest_common_prefix',
'lstrip',
'make_decorator',
'map_except',
'map_if',
'map_reduce',
'mark_ends',
'minmax',
'nth_or_last',
'nth_permutation',
'nth_product',
'numeric_range',
'one',
'only',
'padded',
'partitions',
'peekable',
'permutation_index',
'product_index',
'raise_',
'repeat_each',
'repeat_last',
'replace',
'rlocate',
'rstrip',
'run_length',
'sample',
'seekable',
'set_partitions',
'side_effect',
'sliced',
'sort_together',
'split_after',
'split_at',
'split_before',
'split_into',
'split_when',
'spy',
'stagger',
'strip',
'strictly_n',
'substrings',
'substrings_indexes',
'time_limited',
'unique_in_window',
'unique_to_each',
'unzip',
'value_chain',
'windowed',
'windowed_complete',
'with_iter',
'zip_broadcast',
'zip_equal',
'zip_offset',
]
def chunked(iterable, n, strict=False):
"""Break *iterable* into lists of length *n*:
>>> list(chunked([1, 2, 3, 4, 5, 6], 3))
[[1, 2, 3], [4, 5, 6]]
By the default, the last yielded list will have fewer than *n* elements
if the length of *iterable* is not divisible by *n*:
>>> list(chunked([1, 2, 3, 4, 5, 6, 7, 8], 3))
[[1, 2, 3], [4, 5, 6], [7, 8]]
To use a fill-in value instead, see the :func:`grouper` recipe.
If the length of *iterable* is not divisible by *n* and *strict* is
``True``, then ``ValueError`` will be raised before the last
list is yielded.
"""
iterator = iter(partial(take, n, iter(iterable)), [])
if strict:
if n is None:
raise ValueError('n must not be None when using strict mode.')
def ret():
for chunk in iterator:
if len(chunk) != n:
raise ValueError('iterable is not divisible by n.')
yield chunk
return iter(ret())
else:
return iterator
def first(iterable, default=_marker):
"""Return the first item of *iterable*, or *default* if *iterable* is
empty.
>>> first([0, 1, 2, 3])
0
>>> first([], 'some default')
'some default'
If *default* is not provided and there are no items in the iterable,
raise ``ValueError``.
:func:`first` is useful when you have a generator of expensive-to-retrieve
values and want any arbitrary one. It is marginally shorter than
``next(iter(iterable), default)``.
"""
try:
return next(iter(iterable))
except StopIteration as e:
if default is _marker:
raise ValueError(
'first() was called on an empty iterable, and no '
'default value was provided.'
) from e
return default
def last(iterable, default=_marker):
"""Return the last item of *iterable*, or *default* if *iterable* is
empty.
>>> last([0, 1, 2, 3])
3
>>> last([], 'some default')
'some default'
If *default* is not provided and there are no items in the iterable,
raise ``ValueError``.
"""
try:
if isinstance(iterable, Sequence):
return iterable[-1]
# Work around https://bugs.python.org/issue38525
elif hasattr(iterable, '__reversed__') and (hexversion != 0x030800F0):
return next(reversed(iterable))
else:
return deque(iterable, maxlen=1)[-1]
except (IndexError, TypeError, StopIteration):
if default is _marker:
raise ValueError(
'last() was called on an empty iterable, and no default was '
'provided.'
)
return default
def nth_or_last(iterable, n, default=_marker):
"""Return the nth or the last item of *iterable*,
or *default* if *iterable* is empty.
>>> nth_or_last([0, 1, 2, 3], 2)
2
>>> nth_or_last([0, 1], 2)
1
>>> nth_or_last([], 0, 'some default')
'some default'
If *default* is not provided and there are no items in the iterable,
raise ``ValueError``.
"""
return last(islice(iterable, n + 1), default=default)
class peekable:
"""Wrap an iterator to allow lookahead and prepending elements.
Call :meth:`peek` on the result to get the value that will be returned
by :func:`next`. This won't advance the iterator:
>>> p = peekable(['a', 'b'])
>>> p.peek()
'a'
>>> next(p)
'a'
Pass :meth:`peek` a default value to return that instead of raising
``StopIteration`` when the iterator is exhausted.
>>> p = peekable([])
>>> p.peek('hi')
'hi'
peekables also offer a :meth:`prepend` method, which "inserts" items
at the head of the iterable:
>>> p = peekable([1, 2, 3])
>>> p.prepend(10, 11, 12)
>>> next(p)
10
>>> p.peek()
11
>>> list(p)
[11, 12, 1, 2, 3]
peekables can be indexed. Index 0 is the item that will be returned by
:func:`next`, index 1 is the item after that, and so on:
The values up to the given index will be cached.
>>> p = peekable(['a', 'b', 'c', 'd'])
>>> p[0]
'a'
>>> p[1]
'b'
>>> next(p)
'a'
Negative indexes are supported, but be aware that they will cache the
remaining items in the source iterator, which may require significant
storage.
To check whether a peekable is exhausted, check its truth value:
>>> p = peekable(['a', 'b'])
>>> if p: # peekable has items
... list(p)
['a', 'b']
>>> if not p: # peekable is exhausted
... list(p)
[]
"""
def __init__(self, iterable):
self._it = iter(iterable)
self._cache = deque()
def __iter__(self):
return self
def __bool__(self):
try:
self.peek()
except StopIteration:
return False
return True
def peek(self, default=_marker):
"""Return the item that will be next returned from ``next()``.
Return ``default`` if there are no items left. If ``default`` is not
provided, raise ``StopIteration``.
"""
if not self._cache:
try:
self._cache.append(next(self._it))
except StopIteration:
if default is _marker:
raise
return default
return self._cache[0]
def prepend(self, *items):
"""Stack up items to be the next ones returned from ``next()`` or
``self.peek()``. The items will be returned in
first in, first out order::
>>> p = peekable([1, 2, 3])
>>> p.prepend(10, 11, 12)
>>> next(p)
10
>>> list(p)
[11, 12, 1, 2, 3]
It is possible, by prepending items, to "resurrect" a peekable that
previously raised ``StopIteration``.
>>> p = peekable([])
>>> next(p)
Traceback (most recent call last):
...
StopIteration
>>> p.prepend(1)
>>> next(p)
1
>>> next(p)
Traceback (most recent call last):
...
StopIteration
"""
self._cache.extendleft(reversed(items))
def __next__(self):
if self._cache:
return self._cache.popleft()
return next(self._it)
def _get_slice(self, index):
# Normalize the slice's arguments
step = 1 if (index.step is None) else index.step
if step > 0:
start = 0 if (index.start is None) else index.start
stop = maxsize if (index.stop is None) else index.stop
elif step < 0:
start = -1 if (index.start is None) else index.start
stop = (-maxsize - 1) if (index.stop is None) else index.stop
else:
raise ValueError('slice step cannot be zero')
# If either the start or stop index is negative, we'll need to cache
# the rest of the iterable in order to slice from the right side.
if (start < 0) or (stop < 0):
self._cache.extend(self._it)
# Otherwise we'll need to find the rightmost index and cache to that
# point.
else:
n = min(max(start, stop) + 1, maxsize)
cache_len = len(self._cache)
if n >= cache_len:
self._cache.extend(islice(self._it, n - cache_len))
return list(self._cache)[index]
def __getitem__(self, index):
if isinstance(index, slice):
return self._get_slice(index)
cache_len = len(self._cache)
if index < 0:
self._cache.extend(self._it)
elif index >= cache_len:
self._cache.extend(islice(self._it, index + 1 - cache_len))
return self._cache[index]
def consumer(func):
"""Decorator that automatically advances a PEP-342-style "reverse iterator"
to its first yield point so you don't have to call ``next()`` on it
manually.
>>> @consumer
... def tally():
... i = 0
... while True:
... print('Thing number %s is %s.' % (i, (yield)))
... i += 1
...
>>> t = tally()
>>> t.send('red')
Thing number 0 is red.
>>> t.send('fish')
Thing number 1 is fish.
Without the decorator, you would have to call ``next(t)`` before
``t.send()`` could be used.
"""
@wraps(func)
def wrapper(*args, **kwargs):
gen = func(*args, **kwargs)
next(gen)
return gen
return wrapper
def ilen(iterable):
"""Return the number of items in *iterable*.
>>> ilen(x for x in range(1000000) if x % 3 == 0)
333334
This consumes the iterable, so handle with care.
"""
# This approach was selected because benchmarks showed it's likely the
# fastest of the known implementations at the time of writing.
# See GitHub tracker: #236, #230.
counter = count()
deque(zip(iterable, counter), maxlen=0)
return next(counter)
def iterate(func, start):
"""Return ``start``, ``func(start)``, ``func(func(start))``, ...
>>> from itertools import islice
>>> list(islice(iterate(lambda x: 2*x, 1), 10))
[1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
"""
while True:
yield start
start = func(start)
def with_iter(context_manager):
"""Wrap an iterable in a ``with`` statement, so it closes once exhausted.
For example, this will close the file when the iterator is exhausted::
upper_lines = (line.upper() for line in with_iter(open('foo')))
Any context manager which returns an iterable is a candidate for
``with_iter``.
"""
with context_manager as iterable:
yield from iterable
def one(iterable, too_short=None, too_long=None):
"""Return the first item from *iterable*, which is expected to contain only
that item. Raise an exception if *iterable* is empty or has more than one
item.
:func:`one` is useful for ensuring that an iterable contains only one item.
For example, it can be used to retrieve the result of a database query
that is expected to return a single row.
If *iterable* is empty, ``ValueError`` will be raised. You may specify a
different exception with the *too_short* keyword:
>>> it = []
>>> one(it) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: too many items in iterable (expected 1)'
>>> too_short = IndexError('too few items')
>>> one(it, too_short=too_short) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
IndexError: too few items
Similarly, if *iterable* contains more than one item, ``ValueError`` will
be raised. You may specify a different exception with the *too_long*
keyword:
>>> it = ['too', 'many']
>>> one(it) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: Expected exactly one item in iterable, but got 'too',
'many', and perhaps more.
>>> too_long = RuntimeError
>>> one(it, too_long=too_long) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
RuntimeError
Note that :func:`one` attempts to advance *iterable* twice to ensure there
is only one item. See :func:`spy` or :func:`peekable` to check iterable
contents less destructively.
"""
it = iter(iterable)
try:
first_value = next(it)
except StopIteration as e:
raise (
too_short or ValueError('too few items in iterable (expected 1)')
) from e
try:
second_value = next(it)
except StopIteration:
pass
else:
msg = (
'Expected exactly one item in iterable, but got {!r}, {!r}, '
'and perhaps more.'.format(first_value, second_value)
)
raise too_long or ValueError(msg)
return first_value
def raise_(exception, *args):
raise exception(*args)
def strictly_n(iterable, n, too_short=None, too_long=None):
"""Validate that *iterable* has exactly *n* items and return them if
it does. If it has fewer than *n* items, call function *too_short*
with those items. If it has more than *n* items, call function
*too_long* with the first ``n + 1`` items.
>>> iterable = ['a', 'b', 'c', 'd']
>>> n = 4
>>> list(strictly_n(iterable, n))
['a', 'b', 'c', 'd']
By default, *too_short* and *too_long* are functions that raise
``ValueError``.
>>> list(strictly_n('ab', 3)) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: too few items in iterable (got 2)
>>> list(strictly_n('abc', 2)) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: too many items in iterable (got at least 3)
You can instead supply functions that do something else.
*too_short* will be called with the number of items in *iterable*.
*too_long* will be called with `n + 1`.
>>> def too_short(item_count):
... raise RuntimeError
>>> it = strictly_n('abcd', 6, too_short=too_short)
>>> list(it) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
RuntimeError
>>> def too_long(item_count):
... print('The boss is going to hear about this')
>>> it = strictly_n('abcdef', 4, too_long=too_long)
>>> list(it)
The boss is going to hear about this
['a', 'b', 'c', 'd']
"""
if too_short is None:
too_short = lambda item_count: raise_(
ValueError,
'Too few items in iterable (got {})'.format(item_count),
)
if too_long is None:
too_long = lambda item_count: raise_(
ValueError,
'Too many items in iterable (got at least {})'.format(item_count),
)
it = iter(iterable)
for i in range(n):
try:
item = next(it)
except StopIteration:
too_short(i)
return
else:
yield item
try:
next(it)
except StopIteration:
pass
else:
too_long(n + 1)
def distinct_permutations(iterable, r=None):
"""Yield successive distinct permutations of the elements in *iterable*.
>>> sorted(distinct_permutations([1, 0, 1]))
[(0, 1, 1), (1, 0, 1), (1, 1, 0)]
Equivalent to ``set(permutations(iterable))``, except duplicates are not
generated and thrown away. For larger input sequences this is much more
efficient.
Duplicate permutations arise when there are duplicated elements in the
input iterable. The number of items returned is
`n! / (x_1! * x_2! * ... * x_n!)`, where `n` is the total number of
items input, and each `x_i` is the count of a distinct item in the input
sequence.
If *r* is given, only the *r*-length permutations are yielded.
>>> sorted(distinct_permutations([1, 0, 1], r=2))
[(0, 1), (1, 0), (1, 1)]
>>> sorted(distinct_permutations(range(3), r=2))
[(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)]
"""
# Algorithm: https://w.wiki/Qai
def _full(A):
while True:
# Yield the permutation we have
yield tuple(A)
# Find the largest index i such that A[i] < A[i + 1]
for i in range(size - 2, -1, -1):
if A[i] < A[i + 1]:
break
# If no such index exists, this permutation is the last one
else:
return
# Find the largest index j greater than j such that A[i] < A[j]
for j in range(size - 1, i, -1):
if A[i] < A[j]:
break
# Swap the value of A[i] with that of A[j], then reverse the
# sequence from A[i + 1] to form the new permutation
A[i], A[j] = A[j], A[i]
A[i + 1 :] = A[: i - size : -1] # A[i + 1:][::-1]
# Algorithm: modified from the above
def _partial(A, r):
# Split A into the first r items and the last r items
head, tail = A[:r], A[r:]
right_head_indexes = range(r - 1, -1, -1)
left_tail_indexes = range(len(tail))
while True:
# Yield the permutation we have
yield tuple(head)
# Starting from the right, find the first index of the head with
# value smaller than the maximum value of the tail - call it i.
pivot = tail[-1]
for i in right_head_indexes:
if head[i] < pivot:
break
pivot = head[i]
else:
return
# Starting from the left, find the first value of the tail
# with a value greater than head[i] and swap.
for j in left_tail_indexes:
if tail[j] > head[i]:
head[i], tail[j] = tail[j], head[i]
break
# If we didn't find one, start from the right and find the first
# index of the head with a value greater than head[i] and swap.
else:
for j in right_head_indexes:
if head[j] > head[i]:
head[i], head[j] = head[j], head[i]
break
# Reverse head[i + 1:] and swap it with tail[:r - (i + 1)]
tail += head[: i - r : -1] # head[i + 1:][::-1]
i += 1
head[i:], tail[:] = tail[: r - i], tail[r - i :]
items = sorted(iterable)
size = len(items)
if r is None:
r = size
if 0 < r <= size:
return _full(items) if (r == size) else _partial(items, r)
return iter(() if r else ((),))
def intersperse(e, iterable, n=1):
"""Intersperse filler element *e* among the items in *iterable*, leaving
*n* items between each filler element.
>>> list(intersperse('!', [1, 2, 3, 4, 5]))
[1, '!', 2, '!', 3, '!', 4, '!', 5]
>>> list(intersperse(None, [1, 2, 3, 4, 5], n=2))
[1, 2, None, 3, 4, None, 5]
"""
if n == 0:
raise ValueError('n must be > 0')
elif n == 1:
# interleave(repeat(e), iterable) -> e, x_0, e, x_1, e, x_2...
# islice(..., 1, None) -> x_0, e, x_1, e, x_2...
return islice(interleave(repeat(e), iterable), 1, None)
else:
# interleave(filler, chunks) -> [e], [x_0, x_1], [e], [x_2, x_3]...
# islice(..., 1, None) -> [x_0, x_1], [e], [x_2, x_3]...
# flatten(...) -> x_0, x_1, e, x_2, x_3...
filler = repeat([e])
chunks = chunked(iterable, n)
return flatten(islice(interleave(filler, chunks), 1, None))
def unique_to_each(*iterables):
"""Return the elements from each of the input iterables that aren't in the
other input iterables.
For example, suppose you have a set of packages, each with a set of
dependencies::
{'pkg_1': {'A', 'B'}, 'pkg_2': {'B', 'C'}, 'pkg_3': {'B', 'D'}}
If you remove one package, which dependencies can also be removed?
If ``pkg_1`` is removed, then ``A`` is no longer necessary - it is not
associated with ``pkg_2`` or ``pkg_3``. Similarly, ``C`` is only needed for
``pkg_2``, and ``D`` is only needed for ``pkg_3``::
>>> unique_to_each({'A', 'B'}, {'B', 'C'}, {'B', 'D'})
[['A'], ['C'], ['D']]
If there are duplicates in one input iterable that aren't in the others
they will be duplicated in the output. Input order is preserved::
>>> unique_to_each("mississippi", "missouri")
[['p', 'p'], ['o', 'u', 'r']]
It is assumed that the elements of each iterable are hashable.
"""
pool = [list(it) for it in iterables]
counts = Counter(chain.from_iterable(map(set, pool)))
uniques = {element for element in counts if counts[element] == 1}
return [list(filter(uniques.__contains__, it)) for it in pool]
def windowed(seq, n, fillvalue=None, step=1):
"""Return a sliding window of width *n* over the given iterable.
>>> all_windows = windowed([1, 2, 3, 4, 5], 3)
>>> list(all_windows)
[(1, 2, 3), (2, 3, 4), (3, 4, 5)]
When the window is larger than the iterable, *fillvalue* is used in place
of missing values:
>>> list(windowed([1, 2, 3], 4))
[(1, 2, 3, None)]
Each window will advance in increments of *step*:
>>> list(windowed([1, 2, 3, 4, 5, 6], 3, fillvalue='!', step=2))
[(1, 2, 3), (3, 4, 5), (5, 6, '!')]
To slide into the iterable's items, use :func:`chain` to add filler items
to the left:
>>> iterable = [1, 2, 3, 4]
>>> n = 3
>>> padding = [None] * (n - 1)
>>> list(windowed(chain(padding, iterable), 3))
[(None, None, 1), (None, 1, 2), (1, 2, 3), (2, 3, 4)]
"""
if n < 0:
raise ValueError('n must be >= 0')
if n == 0:
yield tuple()
return
if step < 1:
raise ValueError('step must be >= 1')
window = deque(maxlen=n)
i = n
for _ in map(window.append, seq):
i -= 1
if not i:
i = step
yield tuple(window)
size = len(window)
if size == 0:
return
elif size < n:
yield tuple(chain(window, repeat(fillvalue, n - size)))
elif 0 < i < min(step, n):
window += (fillvalue,) * i
yield tuple(window)
def substrings(iterable):
"""Yield all of the substrings of *iterable*.
>>> [''.join(s) for s in substrings('more')]
['m', 'o', 'r', 'e', 'mo', 'or', 're', 'mor', 'ore', 'more']
Note that non-string iterables can also be subdivided.
>>> list(substrings([0, 1, 2]))
[(0,), (1,), (2,), (0, 1), (1, 2), (0, 1, 2)]
"""
# The length-1 substrings
seq = []
for item in iter(iterable):
seq.append(item)
yield (item,)
seq = tuple(seq)
item_count = len(seq)
# And the rest
for n in range(2, item_count + 1):
for i in range(item_count - n + 1):
yield seq[i : i + n]
def substrings_indexes(seq, reverse=False):
"""Yield all substrings and their positions in *seq*
The items yielded will be a tuple of the form ``(substr, i, j)``, where
``substr == seq[i:j]``.
This function only works for iterables that support slicing, such as
``str`` objects.
>>> for item in substrings_indexes('more'):
... print(item)
('m', 0, 1)
('o', 1, 2)
('r', 2, 3)
('e', 3, 4)
('mo', 0, 2)
('or', 1, 3)
('re', 2, 4)
('mor', 0, 3)
('ore', 1, 4)
('more', 0, 4)
Set *reverse* to ``True`` to yield the same items in the opposite order.
"""
r = range(1, len(seq) + 1)
if reverse:
r = reversed(r)
return (
(seq[i : i + L], i, i + L) for L in r for i in range(len(seq) - L + 1)
)
class bucket:
"""Wrap *iterable* and return an object that buckets it iterable into
child iterables based on a *key* function.
>>> iterable = ['a1', 'b1', 'c1', 'a2', 'b2', 'c2', 'b3']
>>> s = bucket(iterable, key=lambda x: x[0]) # Bucket by 1st character
>>> sorted(list(s)) # Get the keys
['a', 'b', 'c']
>>> a_iterable = s['a']
>>> next(a_iterable)
'a1'
>>> next(a_iterable)
'a2'
>>> list(s['b'])
['b1', 'b2', 'b3']
The original iterable will be advanced and its items will be cached until
they are used by the child iterables. This may require significant storage.
By default, attempting to select a bucket to which no items belong will
exhaust the iterable and cache all values.
If you specify a *validator* function, selected buckets will instead be
checked against it.
>>> from itertools import count
>>> it = count(1, 2) # Infinite sequence of odd numbers
>>> key = lambda x: x % 10 # Bucket by last digit
>>> validator = lambda x: x in {1, 3, 5, 7, 9} # Odd digits only
>>> s = bucket(it, key=key, validator=validator)
>>> 2 in s
False
>>> list(s[2])
[]
"""
def __init__(self, iterable, key, validator=None):
self._it = iter(iterable)
self._key = key
self._cache = defaultdict(deque)
self._validator = validator or (lambda x: True)
def __contains__(self, value):
if not self._validator(value):
return False
try:
item = next(self[value])
except StopIteration:
return False
else:
self._cache[value].appendleft(item)
return True
def _get_values(self, value):
"""
Helper to yield items from the parent iterator that match *value*.
Items that don't match are stored in the local cache as they
are encountered.
"""
while True:
# If we've cached some items that match the target value, emit
# the first one and evict it from the cache.
if self._cache[value]:
yield self._cache[value].popleft()
# Otherwise we need to advance the parent iterator to search for
# a matching item, caching the rest.
else:
while True:
try:
item = next(self._it)
except StopIteration:
return
item_value = self._key(item)
if item_value == value:
yield item
break
elif self._validator(item_value):
self._cache[item_value].append(item)
def __iter__(self):
for item in self._it:
item_value = self._key(item)
if self._validator(item_value):
self._cache[item_value].append(item)
yield from self._cache.keys()
def __getitem__(self, value):
if not self._validator(value):
return iter(())
return self._get_values(value)
def spy(iterable, n=1):
"""Return a 2-tuple with a list containing the first *n* elements of
*iterable*, and an iterator with the same items as *iterable*.
This allows you to "look ahead" at the items in the iterable without
advancing it.
There is one item in the list by default:
>>> iterable = 'abcdefg'
>>> head, iterable = spy(iterable)
>>> head
['a']
>>> list(iterable)
['a', 'b', 'c', 'd', 'e', 'f', 'g']
You may use unpacking to retrieve items instead of lists:
>>> (head,), iterable = spy('abcdefg')
>>> head
'a'
>>> (first, second), iterable = spy('abcdefg', 2)
>>> first
'a'
>>> second
'b'
The number of items requested can be larger than the number of items in
the iterable:
>>> iterable = [1, 2, 3, 4, 5]
>>> head, iterable = spy(iterable, 10)
>>> head
[1, 2, 3, 4, 5]
>>> list(iterable)
[1, 2, 3, 4, 5]
"""
it = iter(iterable)
head = take(n, it)
return head.copy(), chain(head, it)
def interleave(*iterables):
"""Return a new iterable yielding from each iterable in turn,
until the shortest is exhausted.
>>> list(interleave([1, 2, 3], [4, 5], [6, 7, 8]))
[1, 4, 6, 2, 5, 7]
For a version that doesn't terminate after the shortest iterable is
exhausted, see :func:`interleave_longest`.
"""
return chain.from_iterable(zip(*iterables))
def interleave_longest(*iterables):
"""Return a new iterable yielding from each iterable in turn,
skipping any that are exhausted.
>>> list(interleave_longest([1, 2, 3], [4, 5], [6, 7, 8]))
[1, 4, 6, 2, 5, 7, 3, 8]
This function produces the same output as :func:`roundrobin`, but may
perform better for some inputs (in particular when the number of iterables
is large).
"""
i = chain.from_iterable(zip_longest(*iterables, fillvalue=_marker))
return (x for x in i if x is not _marker)
def interleave_evenly(iterables, lengths=None):
"""
Interleave multiple iterables so that their elements are evenly distributed
throughout the output sequence.
>>> iterables = [1, 2, 3, 4, 5], ['a', 'b']
>>> list(interleave_evenly(iterables))
[1, 2, 'a', 3, 4, 'b', 5]
>>> iterables = [[1, 2, 3], [4, 5], [6, 7, 8]]
>>> list(interleave_evenly(iterables))
[1, 6, 4, 2, 7, 3, 8, 5]
This function requires iterables of known length. Iterables without
``__len__()`` can be used by manually specifying lengths with *lengths*:
>>> from itertools import combinations, repeat
>>> iterables = [combinations(range(4), 2), ['a', 'b', 'c']]
>>> lengths = [4 * (4 - 1) // 2, 3]
>>> list(interleave_evenly(iterables, lengths=lengths))
[(0, 1), (0, 2), 'a', (0, 3), (1, 2), 'b', (1, 3), (2, 3), 'c']
Based on Bresenham's algorithm.
"""
if lengths is None:
try:
lengths = [len(it) for it in iterables]
except TypeError:
raise ValueError(
'Iterable lengths could not be determined automatically. '
'Specify them with the lengths keyword.'
)
elif len(iterables) != len(lengths):
raise ValueError('Mismatching number of iterables and lengths.')
dims = len(lengths)
# sort iterables by length, descending
lengths_permute = sorted(
range(dims), key=lambda i: lengths[i], reverse=True
)
lengths_desc = [lengths[i] for i in lengths_permute]
iters_desc = [iter(iterables[i]) for i in lengths_permute]
# the longest iterable is the primary one (Bresenham: the longest
# distance along an axis)
delta_primary, deltas_secondary = lengths_desc[0], lengths_desc[1:]
iter_primary, iters_secondary = iters_desc[0], iters_desc[1:]
errors = [delta_primary // dims] * len(deltas_secondary)
to_yield = sum(lengths)
while to_yield:
yield next(iter_primary)
to_yield -= 1
# update errors for each secondary iterable
errors = [e - delta for e, delta in zip(errors, deltas_secondary)]
# those iterables for which the error is negative are yielded
# ("diagonal step" in Bresenham)
for i, e in enumerate(errors):
if e < 0:
yield next(iters_secondary[i])
to_yield -= 1
errors[i] += delta_primary
def collapse(iterable, base_type=None, levels=None):
"""Flatten an iterable with multiple levels of nesting (e.g., a list of
lists of tuples) into non-iterable types.
>>> iterable = [(1, 2), ([3, 4], [[5], [6]])]
>>> list(collapse(iterable))
[1, 2, 3, 4, 5, 6]
Binary and text strings are not considered iterable and
will not be collapsed.
To avoid collapsing other types, specify *base_type*:
>>> iterable = ['ab', ('cd', 'ef'), ['gh', 'ij']]
>>> list(collapse(iterable, base_type=tuple))
['ab', ('cd', 'ef'), 'gh', 'ij']
Specify *levels* to stop flattening after a certain level:
>>> iterable = [('a', ['b']), ('c', ['d'])]
>>> list(collapse(iterable)) # Fully flattened
['a', 'b', 'c', 'd']
>>> list(collapse(iterable, levels=1)) # Only one level flattened
['a', ['b'], 'c', ['d']]
"""
def walk(node, level):
if (
((levels is not None) and (level > levels))
or isinstance(node, (str, bytes))
or ((base_type is not None) and isinstance(node, base_type))
):
yield node
return
try:
tree = iter(node)
except TypeError:
yield node
return
else:
for child in tree:
yield from walk(child, level + 1)
yield from walk(iterable, 0)
def side_effect(func, iterable, chunk_size=None, before=None, after=None):
"""Invoke *func* on each item in *iterable* (or on each *chunk_size* group
of items) before yielding the item.
`func` must be a function that takes a single argument. Its return value
will be discarded.
*before* and *after* are optional functions that take no arguments. They
will be executed before iteration starts and after it ends, respectively.
`side_effect` can be used for logging, updating progress bars, or anything
that is not functionally "pure."
Emitting a status message:
>>> from more_itertools import consume
>>> func = lambda item: print('Received {}'.format(item))
>>> consume(side_effect(func, range(2)))
Received 0
Received 1
Operating on chunks of items:
>>> pair_sums = []
>>> func = lambda chunk: pair_sums.append(sum(chunk))
>>> list(side_effect(func, [0, 1, 2, 3, 4, 5], 2))
[0, 1, 2, 3, 4, 5]
>>> list(pair_sums)
[1, 5, 9]
Writing to a file-like object:
>>> from io import StringIO
>>> from more_itertools import consume
>>> f = StringIO()
>>> func = lambda x: print(x, file=f)
>>> before = lambda: print(u'HEADER', file=f)
>>> after = f.close
>>> it = [u'a', u'b', u'c']
>>> consume(side_effect(func, it, before=before, after=after))
>>> f.closed
True
"""
try:
if before is not None:
before()
if chunk_size is None:
for item in iterable:
func(item)
yield item
else:
for chunk in chunked(iterable, chunk_size):
func(chunk)
yield from chunk
finally:
if after is not None:
after()
def sliced(seq, n, strict=False):
"""Yield slices of length *n* from the sequence *seq*.
>>> list(sliced((1, 2, 3, 4, 5, 6), 3))
[(1, 2, 3), (4, 5, 6)]
By the default, the last yielded slice will have fewer than *n* elements
if the length of *seq* is not divisible by *n*:
>>> list(sliced((1, 2, 3, 4, 5, 6, 7, 8), 3))
[(1, 2, 3), (4, 5, 6), (7, 8)]
If the length of *seq* is not divisible by *n* and *strict* is
``True``, then ``ValueError`` will be raised before the last
slice is yielded.
This function will only work for iterables that support slicing.
For non-sliceable iterables, see :func:`chunked`.
"""
iterator = takewhile(len, (seq[i : i + n] for i in count(0, n)))
if strict:
def ret():
for _slice in iterator:
if len(_slice) != n:
raise ValueError("seq is not divisible by n.")
yield _slice
return iter(ret())
else:
return iterator
def split_at(iterable, pred, maxsplit=-1, keep_separator=False):
"""Yield lists of items from *iterable*, where each list is delimited by
an item where callable *pred* returns ``True``.
>>> list(split_at('abcdcba', lambda x: x == 'b'))
[['a'], ['c', 'd', 'c'], ['a']]
>>> list(split_at(range(10), lambda n: n % 2 == 1))
[[0], [2], [4], [6], [8], []]
At most *maxsplit* splits are done. If *maxsplit* is not specified or -1,
then there is no limit on the number of splits:
>>> list(split_at(range(10), lambda n: n % 2 == 1, maxsplit=2))
[[0], [2], [4, 5, 6, 7, 8, 9]]
By default, the delimiting items are not included in the output.
To include them, set *keep_separator* to ``True``.
>>> list(split_at('abcdcba', lambda x: x == 'b', keep_separator=True))
[['a'], ['b'], ['c', 'd', 'c'], ['b'], ['a']]
"""
if maxsplit == 0:
yield list(iterable)
return
buf = []
it = iter(iterable)
for item in it:
if pred(item):
yield buf
if keep_separator:
yield [item]
if maxsplit == 1:
yield list(it)
return
buf = []
maxsplit -= 1
else:
buf.append(item)
yield buf
def split_before(iterable, pred, maxsplit=-1):
"""Yield lists of items from *iterable*, where each list ends just before
an item for which callable *pred* returns ``True``:
>>> list(split_before('OneTwo', lambda s: s.isupper()))
[['O', 'n', 'e'], ['T', 'w', 'o']]
>>> list(split_before(range(10), lambda n: n % 3 == 0))
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
At most *maxsplit* splits are done. If *maxsplit* is not specified or -1,
then there is no limit on the number of splits:
>>> list(split_before(range(10), lambda n: n % 3 == 0, maxsplit=2))
[[0, 1, 2], [3, 4, 5], [6, 7, 8, 9]]
"""
if maxsplit == 0:
yield list(iterable)
return
buf = []
it = iter(iterable)
for item in it:
if pred(item) and buf:
yield buf
if maxsplit == 1:
yield [item] + list(it)
return
buf = []
maxsplit -= 1
buf.append(item)
if buf:
yield buf
def split_after(iterable, pred, maxsplit=-1):
"""Yield lists of items from *iterable*, where each list ends with an
item where callable *pred* returns ``True``:
>>> list(split_after('one1two2', lambda s: s.isdigit()))
[['o', 'n', 'e', '1'], ['t', 'w', 'o', '2']]
>>> list(split_after(range(10), lambda n: n % 3 == 0))
[[0], [1, 2, 3], [4, 5, 6], [7, 8, 9]]
At most *maxsplit* splits are done. If *maxsplit* is not specified or -1,
then there is no limit on the number of splits:
>>> list(split_after(range(10), lambda n: n % 3 == 0, maxsplit=2))
[[0], [1, 2, 3], [4, 5, 6, 7, 8, 9]]
"""
if maxsplit == 0:
yield list(iterable)
return
buf = []
it = iter(iterable)
for item in it:
buf.append(item)
if pred(item) and buf:
yield buf
if maxsplit == 1:
buf = list(it)
if buf:
yield buf
return
buf = []
maxsplit -= 1
if buf:
yield buf
def split_when(iterable, pred, maxsplit=-1):
"""Split *iterable* into pieces based on the output of *pred*.
*pred* should be a function that takes successive pairs of items and
returns ``True`` if the iterable should be split in between them.
For example, to find runs of increasing numbers, split the iterable when
element ``i`` is larger than element ``i + 1``:
>>> list(split_when([1, 2, 3, 3, 2, 5, 2, 4, 2], lambda x, y: x > y))
[[1, 2, 3, 3], [2, 5], [2, 4], [2]]
At most *maxsplit* splits are done. If *maxsplit* is not specified or -1,
then there is no limit on the number of splits:
>>> list(split_when([1, 2, 3, 3, 2, 5, 2, 4, 2],
... lambda x, y: x > y, maxsplit=2))
[[1, 2, 3, 3], [2, 5], [2, 4, 2]]
"""
if maxsplit == 0:
yield list(iterable)
return
it = iter(iterable)
try:
cur_item = next(it)
except StopIteration:
return
buf = [cur_item]
for next_item in it:
if pred(cur_item, next_item):
yield buf
if maxsplit == 1:
yield [next_item] + list(it)
return
buf = []
maxsplit -= 1
buf.append(next_item)
cur_item = next_item
yield buf
def split_into(iterable, sizes):
"""Yield a list of sequential items from *iterable* of length 'n' for each
integer 'n' in *sizes*.
>>> list(split_into([1,2,3,4,5,6], [1,2,3]))
[[1], [2, 3], [4, 5, 6]]
If the sum of *sizes* is smaller than the length of *iterable*, then the
remaining items of *iterable* will not be returned.
>>> list(split_into([1,2,3,4,5,6], [2,3]))
[[1, 2], [3, 4, 5]]
If the sum of *sizes* is larger than the length of *iterable*, fewer items
will be returned in the iteration that overruns *iterable* and further
lists will be empty:
>>> list(split_into([1,2,3,4], [1,2,3,4]))
[[1], [2, 3], [4], []]
When a ``None`` object is encountered in *sizes*, the returned list will
contain items up to the end of *iterable* the same way that itertools.slice
does:
>>> list(split_into([1,2,3,4,5,6,7,8,9,0], [2,3,None]))
[[1, 2], [3, 4, 5], [6, 7, 8, 9, 0]]
:func:`split_into` can be useful for grouping a series of items where the
sizes of the groups are not uniform. An example would be where in a row
from a table, multiple columns represent elements of the same feature
(e.g. a point represented by x,y,z) but, the format is not the same for
all columns.
"""
# convert the iterable argument into an iterator so its contents can
# be consumed by islice in case it is a generator
it = iter(iterable)
for size in sizes:
if size is None:
yield list(it)
return
else:
yield list(islice(it, size))
def padded(iterable, fillvalue=None, n=None, next_multiple=False):
"""Yield the elements from *iterable*, followed by *fillvalue*, such that
at least *n* items are emitted.
>>> list(padded([1, 2, 3], '?', 5))
[1, 2, 3, '?', '?']
If *next_multiple* is ``True``, *fillvalue* will be emitted until the
number of items emitted is a multiple of *n*::
>>> list(padded([1, 2, 3, 4], n=3, next_multiple=True))
[1, 2, 3, 4, None, None]
If *n* is ``None``, *fillvalue* will be emitted indefinitely.
"""
it = iter(iterable)
if n is None:
yield from chain(it, repeat(fillvalue))
elif n < 1:
raise ValueError('n must be at least 1')
else:
item_count = 0
for item in it:
yield item
item_count += 1
remaining = (n - item_count) % n if next_multiple else n - item_count
for _ in range(remaining):
yield fillvalue
def repeat_each(iterable, n=2):
"""Repeat each element in *iterable* *n* times.
>>> list(repeat_each('ABC', 3))
['A', 'A', 'A', 'B', 'B', 'B', 'C', 'C', 'C']
"""
return chain.from_iterable(map(repeat, iterable, repeat(n)))
def repeat_last(iterable, default=None):
"""After the *iterable* is exhausted, keep yielding its last element.
>>> list(islice(repeat_last(range(3)), 5))
[0, 1, 2, 2, 2]
If the iterable is empty, yield *default* forever::
>>> list(islice(repeat_last(range(0), 42), 5))
[42, 42, 42, 42, 42]
"""
item = _marker
for item in iterable:
yield item
final = default if item is _marker else item
yield from repeat(final)
def distribute(n, iterable):
"""Distribute the items from *iterable* among *n* smaller iterables.
>>> group_1, group_2 = distribute(2, [1, 2, 3, 4, 5, 6])
>>> list(group_1)
[1, 3, 5]
>>> list(group_2)
[2, 4, 6]
If the length of *iterable* is not evenly divisible by *n*, then the
length of the returned iterables will not be identical:
>>> children = distribute(3, [1, 2, 3, 4, 5, 6, 7])
>>> [list(c) for c in children]
[[1, 4, 7], [2, 5], [3, 6]]
If the length of *iterable* is smaller than *n*, then the last returned
iterables will be empty:
>>> children = distribute(5, [1, 2, 3])
>>> [list(c) for c in children]
[[1], [2], [3], [], []]
This function uses :func:`itertools.tee` and may require significant
storage. If you need the order items in the smaller iterables to match the
original iterable, see :func:`divide`.
"""
if n < 1:
raise ValueError('n must be at least 1')
children = tee(iterable, n)
return [islice(it, index, None, n) for index, it in enumerate(children)]
def stagger(iterable, offsets=(-1, 0, 1), longest=False, fillvalue=None):
"""Yield tuples whose elements are offset from *iterable*.
The amount by which the `i`-th item in each tuple is offset is given by
the `i`-th item in *offsets*.
>>> list(stagger([0, 1, 2, 3]))
[(None, 0, 1), (0, 1, 2), (1, 2, 3)]
>>> list(stagger(range(8), offsets=(0, 2, 4)))
[(0, 2, 4), (1, 3, 5), (2, 4, 6), (3, 5, 7)]
By default, the sequence will end when the final element of a tuple is the
last item in the iterable. To continue until the first element of a tuple
is the last item in the iterable, set *longest* to ``True``::
>>> list(stagger([0, 1, 2, 3], longest=True))
[(None, 0, 1), (0, 1, 2), (1, 2, 3), (2, 3, None), (3, None, None)]
By default, ``None`` will be used to replace offsets beyond the end of the
sequence. Specify *fillvalue* to use some other value.
"""
children = tee(iterable, len(offsets))
return zip_offset(
*children, offsets=offsets, longest=longest, fillvalue=fillvalue
)
def zip_equal(*iterables):
"""``zip`` the input *iterables* together, but raise
``UnequalIterablesError`` if they aren't all the same length.
>>> it_1 = range(3)
>>> it_2 = iter('abc')
>>> list(zip_equal(it_1, it_2))
[(0, 'a'), (1, 'b'), (2, 'c')]
>>> it_1 = range(3)
>>> it_2 = iter('abcd')
>>> list(zip_equal(it_1, it_2)) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
more_itertools.more.UnequalIterablesError: Iterables have different
lengths
"""
if hexversion >= 0x30A00A6:
warnings.warn(
(
'zip_equal will be removed in a future version of '
'more-itertools. Use the builtin zip function with '
'strict=True instead.'
),
DeprecationWarning,
)
return _zip_equal(*iterables)
def zip_offset(*iterables, offsets, longest=False, fillvalue=None):
"""``zip`` the input *iterables* together, but offset the `i`-th iterable
by the `i`-th item in *offsets*.
>>> list(zip_offset('0123', 'abcdef', offsets=(0, 1)))
[('0', 'b'), ('1', 'c'), ('2', 'd'), ('3', 'e')]
This can be used as a lightweight alternative to SciPy or pandas to analyze
data sets in which some series have a lead or lag relationship.
By default, the sequence will end when the shortest iterable is exhausted.
To continue until the longest iterable is exhausted, set *longest* to
``True``.
>>> list(zip_offset('0123', 'abcdef', offsets=(0, 1), longest=True))
[('0', 'b'), ('1', 'c'), ('2', 'd'), ('3', 'e'), (None, 'f')]
By default, ``None`` will be used to replace offsets beyond the end of the
sequence. Specify *fillvalue* to use some other value.
"""
if len(iterables) != len(offsets):
raise ValueError("Number of iterables and offsets didn't match")
staggered = []
for it, n in zip(iterables, offsets):
if n < 0:
staggered.append(chain(repeat(fillvalue, -n), it))
elif n > 0:
staggered.append(islice(it, n, None))
else:
staggered.append(it)
if longest:
return zip_longest(*staggered, fillvalue=fillvalue)
return zip(*staggered)
def sort_together(iterables, key_list=(0,), key=None, reverse=False):
"""Return the input iterables sorted together, with *key_list* as the
priority for sorting. All iterables are trimmed to the length of the
shortest one.
This can be used like the sorting function in a spreadsheet. If each
iterable represents a column of data, the key list determines which
columns are used for sorting.
By default, all iterables are sorted using the ``0``-th iterable::
>>> iterables = [(4, 3, 2, 1), ('a', 'b', 'c', 'd')]
>>> sort_together(iterables)
[(1, 2, 3, 4), ('d', 'c', 'b', 'a')]
Set a different key list to sort according to another iterable.
Specifying multiple keys dictates how ties are broken::
>>> iterables = [(3, 1, 2), (0, 1, 0), ('c', 'b', 'a')]
>>> sort_together(iterables, key_list=(1, 2))
[(2, 3, 1), (0, 0, 1), ('a', 'c', 'b')]
To sort by a function of the elements of the iterable, pass a *key*
function. Its arguments are the elements of the iterables corresponding to
the key list::
>>> names = ('a', 'b', 'c')
>>> lengths = (1, 2, 3)
>>> widths = (5, 2, 1)
>>> def area(length, width):
... return length * width
>>> sort_together([names, lengths, widths], key_list=(1, 2), key=area)
[('c', 'b', 'a'), (3, 2, 1), (1, 2, 5)]
Set *reverse* to ``True`` to sort in descending order.
>>> sort_together([(1, 2, 3), ('c', 'b', 'a')], reverse=True)
[(3, 2, 1), ('a', 'b', 'c')]
"""
if key is None:
# if there is no key function, the key argument to sorted is an
# itemgetter
key_argument = itemgetter(*key_list)
else:
# if there is a key function, call it with the items at the offsets
# specified by the key function as arguments
key_list = list(key_list)
if len(key_list) == 1:
# if key_list contains a single item, pass the item at that offset
# as the only argument to the key function
key_offset = key_list[0]
key_argument = lambda zipped_items: key(zipped_items[key_offset])
else:
# if key_list contains multiple items, use itemgetter to return a
# tuple of items, which we pass as *args to the key function
get_key_items = itemgetter(*key_list)
key_argument = lambda zipped_items: key(
*get_key_items(zipped_items)
)
return list(
zip(*sorted(zip(*iterables), key=key_argument, reverse=reverse))
)
def unzip(iterable):
"""The inverse of :func:`zip`, this function disaggregates the elements
of the zipped *iterable*.
The ``i``-th iterable contains the ``i``-th element from each element
of the zipped iterable. The first element is used to determine the
length of the remaining elements.
>>> iterable = [('a', 1), ('b', 2), ('c', 3), ('d', 4)]
>>> letters, numbers = unzip(iterable)
>>> list(letters)
['a', 'b', 'c', 'd']
>>> list(numbers)
[1, 2, 3, 4]
This is similar to using ``zip(*iterable)``, but it avoids reading
*iterable* into memory. Note, however, that this function uses
:func:`itertools.tee` and thus may require significant storage.
"""
head, iterable = spy(iter(iterable))
if not head:
# empty iterable, e.g. zip([], [], [])
return ()
# spy returns a one-length iterable as head
head = head[0]
iterables = tee(iterable, len(head))
def itemgetter(i):
def getter(obj):
try:
return obj[i]
except IndexError:
# basically if we have an iterable like
# iter([(1, 2, 3), (4, 5), (6,)])
# the second unzipped iterable would fail at the third tuple
# since it would try to access tup[1]
# same with the third unzipped iterable and the second tuple
# to support these "improperly zipped" iterables,
# we create a custom itemgetter
# which just stops the unzipped iterables
# at first length mismatch
raise StopIteration
return getter
return tuple(map(itemgetter(i), it) for i, it in enumerate(iterables))
def divide(n, iterable):
"""Divide the elements from *iterable* into *n* parts, maintaining
order.
>>> group_1, group_2 = divide(2, [1, 2, 3, 4, 5, 6])
>>> list(group_1)
[1, 2, 3]
>>> list(group_2)
[4, 5, 6]
If the length of *iterable* is not evenly divisible by *n*, then the
length of the returned iterables will not be identical:
>>> children = divide(3, [1, 2, 3, 4, 5, 6, 7])
>>> [list(c) for c in children]
[[1, 2, 3], [4, 5], [6, 7]]
If the length of the iterable is smaller than n, then the last returned
iterables will be empty:
>>> children = divide(5, [1, 2, 3])
>>> [list(c) for c in children]
[[1], [2], [3], [], []]
This function will exhaust the iterable before returning and may require
significant storage. If order is not important, see :func:`distribute`,
which does not first pull the iterable into memory.
"""
if n < 1:
raise ValueError('n must be at least 1')
try:
iterable[:0]
except TypeError:
seq = tuple(iterable)
else:
seq = iterable
q, r = divmod(len(seq), n)
ret = []
stop = 0
for i in range(1, n + 1):
start = stop
stop += q + 1 if i <= r else q
ret.append(iter(seq[start:stop]))
return ret
def always_iterable(obj, base_type=(str, bytes)):
"""If *obj* is iterable, return an iterator over its items::
>>> obj = (1, 2, 3)
>>> list(always_iterable(obj))
[1, 2, 3]
If *obj* is not iterable, return a one-item iterable containing *obj*::
>>> obj = 1
>>> list(always_iterable(obj))
[1]
If *obj* is ``None``, return an empty iterable:
>>> obj = None
>>> list(always_iterable(None))
[]
By default, binary and text strings are not considered iterable::
>>> obj = 'foo'
>>> list(always_iterable(obj))
['foo']
If *base_type* is set, objects for which ``isinstance(obj, base_type)``
returns ``True`` won't be considered iterable.
>>> obj = {'a': 1}
>>> list(always_iterable(obj)) # Iterate over the dict's keys
['a']
>>> list(always_iterable(obj, base_type=dict)) # Treat dicts as a unit
[{'a': 1}]
Set *base_type* to ``None`` to avoid any special handling and treat objects
Python considers iterable as iterable:
>>> obj = 'foo'
>>> list(always_iterable(obj, base_type=None))
['f', 'o', 'o']
"""
if obj is None:
return iter(())
if (base_type is not None) and isinstance(obj, base_type):
return iter((obj,))
try:
return iter(obj)
except TypeError:
return iter((obj,))
def adjacent(predicate, iterable, distance=1):
"""Return an iterable over `(bool, item)` tuples where the `item` is
drawn from *iterable* and the `bool` indicates whether
that item satisfies the *predicate* or is adjacent to an item that does.
For example, to find whether items are adjacent to a ``3``::
>>> list(adjacent(lambda x: x == 3, range(6)))
[(False, 0), (False, 1), (True, 2), (True, 3), (True, 4), (False, 5)]
Set *distance* to change what counts as adjacent. For example, to find
whether items are two places away from a ``3``:
>>> list(adjacent(lambda x: x == 3, range(6), distance=2))
[(False, 0), (True, 1), (True, 2), (True, 3), (True, 4), (True, 5)]
This is useful for contextualizing the results of a search function.
For example, a code comparison tool might want to identify lines that
have changed, but also surrounding lines to give the viewer of the diff
context.
The predicate function will only be called once for each item in the
iterable.
See also :func:`groupby_transform`, which can be used with this function
to group ranges of items with the same `bool` value.
"""
# Allow distance=0 mainly for testing that it reproduces results with map()
if distance < 0:
raise ValueError('distance must be at least 0')
i1, i2 = tee(iterable)
padding = [False] * distance
selected = chain(padding, map(predicate, i1), padding)
adjacent_to_selected = map(any, windowed(selected, 2 * distance + 1))
return zip(adjacent_to_selected, i2)
def groupby_transform(iterable, keyfunc=None, valuefunc=None, reducefunc=None):
"""An extension of :func:`itertools.groupby` that can apply transformations
to the grouped data.
* *keyfunc* is a function computing a key value for each item in *iterable*
* *valuefunc* is a function that transforms the individual items from
*iterable* after grouping
* *reducefunc* is a function that transforms each group of items
>>> iterable = 'aAAbBBcCC'
>>> keyfunc = lambda k: k.upper()
>>> valuefunc = lambda v: v.lower()
>>> reducefunc = lambda g: ''.join(g)
>>> list(groupby_transform(iterable, keyfunc, valuefunc, reducefunc))
[('A', 'aaa'), ('B', 'bbb'), ('C', 'ccc')]
Each optional argument defaults to an identity function if not specified.
:func:`groupby_transform` is useful when grouping elements of an iterable
using a separate iterable as the key. To do this, :func:`zip` the iterables
and pass a *keyfunc* that extracts the first element and a *valuefunc*
that extracts the second element::
>>> from operator import itemgetter
>>> keys = [0, 0, 1, 1, 1, 2, 2, 2, 3]
>>> values = 'abcdefghi'
>>> iterable = zip(keys, values)
>>> grouper = groupby_transform(iterable, itemgetter(0), itemgetter(1))
>>> [(k, ''.join(g)) for k, g in grouper]
[(0, 'ab'), (1, 'cde'), (2, 'fgh'), (3, 'i')]
Note that the order of items in the iterable is significant.
Only adjacent items are grouped together, so if you don't want any
duplicate groups, you should sort the iterable by the key function.
"""
ret = groupby(iterable, keyfunc)
if valuefunc:
ret = ((k, map(valuefunc, g)) for k, g in ret)
if reducefunc:
ret = ((k, reducefunc(g)) for k, g in ret)
return ret
class numeric_range(abc.Sequence, abc.Hashable):
"""An extension of the built-in ``range()`` function whose arguments can
be any orderable numeric type.
With only *stop* specified, *start* defaults to ``0`` and *step*
defaults to ``1``. The output items will match the type of *stop*:
>>> list(numeric_range(3.5))
[0.0, 1.0, 2.0, 3.0]
With only *start* and *stop* specified, *step* defaults to ``1``. The
output items will match the type of *start*:
>>> from decimal import Decimal
>>> start = Decimal('2.1')
>>> stop = Decimal('5.1')
>>> list(numeric_range(start, stop))
[Decimal('2.1'), Decimal('3.1'), Decimal('4.1')]
With *start*, *stop*, and *step* specified the output items will match
the type of ``start + step``:
>>> from fractions import Fraction
>>> start = Fraction(1, 2) # Start at 1/2
>>> stop = Fraction(5, 2) # End at 5/2
>>> step = Fraction(1, 2) # Count by 1/2
>>> list(numeric_range(start, stop, step))
[Fraction(1, 2), Fraction(1, 1), Fraction(3, 2), Fraction(2, 1)]
If *step* is zero, ``ValueError`` is raised. Negative steps are supported:
>>> list(numeric_range(3, -1, -1.0))
[3.0, 2.0, 1.0, 0.0]
Be aware of the limitations of floating point numbers; the representation
of the yielded numbers may be surprising.
``datetime.datetime`` objects can be used for *start* and *stop*, if *step*
is a ``datetime.timedelta`` object:
>>> import datetime
>>> start = datetime.datetime(2019, 1, 1)
>>> stop = datetime.datetime(2019, 1, 3)
>>> step = datetime.timedelta(days=1)
>>> items = iter(numeric_range(start, stop, step))
>>> next(items)
datetime.datetime(2019, 1, 1, 0, 0)
>>> next(items)
datetime.datetime(2019, 1, 2, 0, 0)
"""
_EMPTY_HASH = hash(range(0, 0))
def __init__(self, *args):
argc = len(args)
if argc == 1:
(self._stop,) = args
self._start = type(self._stop)(0)
self._step = type(self._stop - self._start)(1)
elif argc == 2:
self._start, self._stop = args
self._step = type(self._stop - self._start)(1)
elif argc == 3:
self._start, self._stop, self._step = args
elif argc == 0:
raise TypeError(
'numeric_range expected at least '
'1 argument, got {}'.format(argc)
)
else:
raise TypeError(
'numeric_range expected at most '
'3 arguments, got {}'.format(argc)
)
self._zero = type(self._step)(0)
if self._step == self._zero:
raise ValueError('numeric_range() arg 3 must not be zero')
self._growing = self._step > self._zero
self._init_len()
def __bool__(self):
if self._growing:
return self._start < self._stop
else:
return self._start > self._stop
def __contains__(self, elem):
if self._growing:
if self._start <= elem < self._stop:
return (elem - self._start) % self._step == self._zero
else:
if self._start >= elem > self._stop:
return (self._start - elem) % (-self._step) == self._zero
return False
def __eq__(self, other):
if isinstance(other, numeric_range):
empty_self = not bool(self)
empty_other = not bool(other)
if empty_self or empty_other:
return empty_self and empty_other # True if both empty
else:
return (
self._start == other._start
and self._step == other._step
and self._get_by_index(-1) == other._get_by_index(-1)
)
else:
return False
def __getitem__(self, key):
if isinstance(key, int):
return self._get_by_index(key)
elif isinstance(key, slice):
step = self._step if key.step is None else key.step * self._step
if key.start is None or key.start <= -self._len:
start = self._start
elif key.start >= self._len:
start = self._stop
else: # -self._len < key.start < self._len
start = self._get_by_index(key.start)
if key.stop is None or key.stop >= self._len:
stop = self._stop
elif key.stop <= -self._len:
stop = self._start
else: # -self._len < key.stop < self._len
stop = self._get_by_index(key.stop)
return numeric_range(start, stop, step)
else:
raise TypeError(
'numeric range indices must be '
'integers or slices, not {}'.format(type(key).__name__)
)
def __hash__(self):
if self:
return hash((self._start, self._get_by_index(-1), self._step))
else:
return self._EMPTY_HASH
def __iter__(self):
values = (self._start + (n * self._step) for n in count())
if self._growing:
return takewhile(partial(gt, self._stop), values)
else:
return takewhile(partial(lt, self._stop), values)
def __len__(self):
return self._len
def _init_len(self):
if self._growing:
start = self._start
stop = self._stop
step = self._step
else:
start = self._stop
stop = self._start
step = -self._step
distance = stop - start
if distance <= self._zero:
self._len = 0
else: # distance > 0 and step > 0: regular euclidean division
q, r = divmod(distance, step)
self._len = int(q) + int(r != self._zero)
def __reduce__(self):
return numeric_range, (self._start, self._stop, self._step)
def __repr__(self):
if self._step == 1:
return "numeric_range({}, {})".format(
repr(self._start), repr(self._stop)
)
else:
return "numeric_range({}, {}, {})".format(
repr(self._start), repr(self._stop), repr(self._step)
)
def __reversed__(self):
return iter(
numeric_range(
self._get_by_index(-1), self._start - self._step, -self._step
)
)
def count(self, value):
return int(value in self)
def index(self, value):
if self._growing:
if self._start <= value < self._stop:
q, r = divmod(value - self._start, self._step)
if r == self._zero:
return int(q)
else:
if self._start >= value > self._stop:
q, r = divmod(self._start - value, -self._step)
if r == self._zero:
return int(q)
raise ValueError("{} is not in numeric range".format(value))
def _get_by_index(self, i):
if i < 0:
i += self._len
if i < 0 or i >= self._len:
raise IndexError("numeric range object index out of range")
return self._start + i * self._step
def count_cycle(iterable, n=None):
"""Cycle through the items from *iterable* up to *n* times, yielding
the number of completed cycles along with each item. If *n* is omitted the
process repeats indefinitely.
>>> list(count_cycle('AB', 3))
[(0, 'A'), (0, 'B'), (1, 'A'), (1, 'B'), (2, 'A'), (2, 'B')]
"""
iterable = tuple(iterable)
if not iterable:
return iter(())
counter = count() if n is None else range(n)
return ((i, item) for i in counter for item in iterable)
def mark_ends(iterable):
"""Yield 3-tuples of the form ``(is_first, is_last, item)``.
>>> list(mark_ends('ABC'))
[(True, False, 'A'), (False, False, 'B'), (False, True, 'C')]
Use this when looping over an iterable to take special action on its first
and/or last items:
>>> iterable = ['Header', 100, 200, 'Footer']
>>> total = 0
>>> for is_first, is_last, item in mark_ends(iterable):
... if is_first:
... continue # Skip the header
... if is_last:
... continue # Skip the footer
... total += item
>>> print(total)
300
"""
it = iter(iterable)
try:
b = next(it)
except StopIteration:
return
try:
for i in count():
a = b
b = next(it)
yield i == 0, False, a
except StopIteration:
yield i == 0, True, a
def locate(iterable, pred=bool, window_size=None):
"""Yield the index of each item in *iterable* for which *pred* returns
``True``.
*pred* defaults to :func:`bool`, which will select truthy items:
>>> list(locate([0, 1, 1, 0, 1, 0, 0]))
[1, 2, 4]
Set *pred* to a custom function to, e.g., find the indexes for a particular
item.
>>> list(locate(['a', 'b', 'c', 'b'], lambda x: x == 'b'))
[1, 3]
If *window_size* is given, then the *pred* function will be called with
that many items. This enables searching for sub-sequences:
>>> iterable = [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3]
>>> pred = lambda *args: args == (1, 2, 3)
>>> list(locate(iterable, pred=pred, window_size=3))
[1, 5, 9]
Use with :func:`seekable` to find indexes and then retrieve the associated
items:
>>> from itertools import count
>>> from more_itertools import seekable
>>> source = (3 * n + 1 if (n % 2) else n // 2 for n in count())
>>> it = seekable(source)
>>> pred = lambda x: x > 100
>>> indexes = locate(it, pred=pred)
>>> i = next(indexes)
>>> it.seek(i)
>>> next(it)
106
"""
if window_size is None:
return compress(count(), map(pred, iterable))
if window_size < 1:
raise ValueError('window size must be at least 1')
it = windowed(iterable, window_size, fillvalue=_marker)
return compress(count(), starmap(pred, it))
def longest_common_prefix(iterables):
"""Yield elements of the longest common prefix amongst given *iterables*.
>>> ''.join(longest_common_prefix(['abcd', 'abc', 'abf']))
'ab'
"""
return (c[0] for c in takewhile(all_equal, zip(*iterables)))
def lstrip(iterable, pred):
"""Yield the items from *iterable*, but strip any from the beginning
for which *pred* returns ``True``.
For example, to remove a set of items from the start of an iterable:
>>> iterable = (None, False, None, 1, 2, None, 3, False, None)
>>> pred = lambda x: x in {None, False, ''}
>>> list(lstrip(iterable, pred))
[1, 2, None, 3, False, None]
This function is analogous to to :func:`str.lstrip`, and is essentially
an wrapper for :func:`itertools.dropwhile`.
"""
return dropwhile(pred, iterable)
def rstrip(iterable, pred):
"""Yield the items from *iterable*, but strip any from the end
for which *pred* returns ``True``.
For example, to remove a set of items from the end of an iterable:
>>> iterable = (None, False, None, 1, 2, None, 3, False, None)
>>> pred = lambda x: x in {None, False, ''}
>>> list(rstrip(iterable, pred))
[None, False, None, 1, 2, None, 3]
This function is analogous to :func:`str.rstrip`.
"""
cache = []
cache_append = cache.append
cache_clear = cache.clear
for x in iterable:
if pred(x):
cache_append(x)
else:
yield from cache
cache_clear()
yield x
def strip(iterable, pred):
"""Yield the items from *iterable*, but strip any from the
beginning and end for which *pred* returns ``True``.
For example, to remove a set of items from both ends of an iterable:
>>> iterable = (None, False, None, 1, 2, None, 3, False, None)
>>> pred = lambda x: x in {None, False, ''}
>>> list(strip(iterable, pred))
[1, 2, None, 3]
This function is analogous to :func:`str.strip`.
"""
return rstrip(lstrip(iterable, pred), pred)
class islice_extended:
"""An extension of :func:`itertools.islice` that supports negative values
for *stop*, *start*, and *step*.
>>> iterable = iter('abcdefgh')
>>> list(islice_extended(iterable, -4, -1))
['e', 'f', 'g']
Slices with negative values require some caching of *iterable*, but this
function takes care to minimize the amount of memory required.
For example, you can use a negative step with an infinite iterator:
>>> from itertools import count
>>> list(islice_extended(count(), 110, 99, -2))
[110, 108, 106, 104, 102, 100]
You can also use slice notation directly:
>>> iterable = map(str, count())
>>> it = islice_extended(iterable)[10:20:2]
>>> list(it)
['10', '12', '14', '16', '18']
"""
def __init__(self, iterable, *args):
it = iter(iterable)
if args:
self._iterable = _islice_helper(it, slice(*args))
else:
self._iterable = it
def __iter__(self):
return self
def __next__(self):
return next(self._iterable)
def __getitem__(self, key):
if isinstance(key, slice):
return islice_extended(_islice_helper(self._iterable, key))
raise TypeError('islice_extended.__getitem__ argument must be a slice')
def _islice_helper(it, s):
start = s.start
stop = s.stop
if s.step == 0:
raise ValueError('step argument must be a non-zero integer or None.')
step = s.step or 1
if step > 0:
start = 0 if (start is None) else start
if start < 0:
# Consume all but the last -start items
cache = deque(enumerate(it, 1), maxlen=-start)
len_iter = cache[-1][0] if cache else 0
# Adjust start to be positive
i = max(len_iter + start, 0)
# Adjust stop to be positive
if stop is None:
j = len_iter
elif stop >= 0:
j = min(stop, len_iter)
else:
j = max(len_iter + stop, 0)
# Slice the cache
n = j - i
if n <= 0:
return
for index, item in islice(cache, 0, n, step):
yield item
elif (stop is not None) and (stop < 0):
# Advance to the start position
next(islice(it, start, start), None)
# When stop is negative, we have to carry -stop items while
# iterating
cache = deque(islice(it, -stop), maxlen=-stop)
for index, item in enumerate(it):
cached_item = cache.popleft()
if index % step == 0:
yield cached_item
cache.append(item)
else:
# When both start and stop are positive we have the normal case
yield from islice(it, start, stop, step)
else:
start = -1 if (start is None) else start
if (stop is not None) and (stop < 0):
# Consume all but the last items
n = -stop - 1
cache = deque(enumerate(it, 1), maxlen=n)
len_iter = cache[-1][0] if cache else 0
# If start and stop are both negative they are comparable and
# we can just slice. Otherwise we can adjust start to be negative
# and then slice.
if start < 0:
i, j = start, stop
else:
i, j = min(start - len_iter, -1), None
for index, item in list(cache)[i:j:step]:
yield item
else:
# Advance to the stop position
if stop is not None:
m = stop + 1
next(islice(it, m, m), None)
# stop is positive, so if start is negative they are not comparable
# and we need the rest of the items.
if start < 0:
i = start
n = None
# stop is None and start is positive, so we just need items up to
# the start index.
elif stop is None:
i = None
n = start + 1
# Both stop and start are positive, so they are comparable.
else:
i = None
n = start - stop
if n <= 0:
return
cache = list(islice(it, n))
yield from cache[i::step]
def always_reversible(iterable):
"""An extension of :func:`reversed` that supports all iterables, not
just those which implement the ``Reversible`` or ``Sequence`` protocols.
>>> print(*always_reversible(x for x in range(3)))
2 1 0
If the iterable is already reversible, this function returns the
result of :func:`reversed()`. If the iterable is not reversible,
this function will cache the remaining items in the iterable and
yield them in reverse order, which may require significant storage.
"""
try:
return reversed(iterable)
except TypeError:
return reversed(list(iterable))
def consecutive_groups(iterable, ordering=lambda x: x):
"""Yield groups of consecutive items using :func:`itertools.groupby`.
The *ordering* function determines whether two items are adjacent by
returning their position.
By default, the ordering function is the identity function. This is
suitable for finding runs of numbers:
>>> iterable = [1, 10, 11, 12, 20, 30, 31, 32, 33, 40]
>>> for group in consecutive_groups(iterable):
... print(list(group))
[1]
[10, 11, 12]
[20]
[30, 31, 32, 33]
[40]
For finding runs of adjacent letters, try using the :meth:`index` method
of a string of letters:
>>> from string import ascii_lowercase
>>> iterable = 'abcdfgilmnop'
>>> ordering = ascii_lowercase.index
>>> for group in consecutive_groups(iterable, ordering):
... print(list(group))
['a', 'b', 'c', 'd']
['f', 'g']
['i']
['l', 'm', 'n', 'o', 'p']
Each group of consecutive items is an iterator that shares it source with
*iterable*. When an an output group is advanced, the previous group is
no longer available unless its elements are copied (e.g., into a ``list``).
>>> iterable = [1, 2, 11, 12, 21, 22]
>>> saved_groups = []
>>> for group in consecutive_groups(iterable):
... saved_groups.append(list(group)) # Copy group elements
>>> saved_groups
[[1, 2], [11, 12], [21, 22]]
"""
for k, g in groupby(
enumerate(iterable), key=lambda x: x[0] - ordering(x[1])
):
yield map(itemgetter(1), g)
def difference(iterable, func=sub, *, initial=None):
"""This function is the inverse of :func:`itertools.accumulate`. By default
it will compute the first difference of *iterable* using
:func:`operator.sub`:
>>> from itertools import accumulate
>>> iterable = accumulate([0, 1, 2, 3, 4]) # produces 0, 1, 3, 6, 10
>>> list(difference(iterable))
[0, 1, 2, 3, 4]
*func* defaults to :func:`operator.sub`, but other functions can be
specified. They will be applied as follows::
A, B, C, D, ... --> A, func(B, A), func(C, B), func(D, C), ...
For example, to do progressive division:
>>> iterable = [1, 2, 6, 24, 120]
>>> func = lambda x, y: x // y
>>> list(difference(iterable, func))
[1, 2, 3, 4, 5]
If the *initial* keyword is set, the first element will be skipped when
computing successive differences.
>>> it = [10, 11, 13, 16] # from accumulate([1, 2, 3], initial=10)
>>> list(difference(it, initial=10))
[1, 2, 3]
"""
a, b = tee(iterable)
try:
first = [next(b)]
except StopIteration:
return iter([])
if initial is not None:
first = []
return chain(first, map(func, b, a))
class SequenceView(Sequence):
"""Return a read-only view of the sequence object *target*.
:class:`SequenceView` objects are analogous to Python's built-in
"dictionary view" types. They provide a dynamic view of a sequence's items,
meaning that when the sequence updates, so does the view.
>>> seq = ['0', '1', '2']
>>> view = SequenceView(seq)
>>> view
SequenceView(['0', '1', '2'])
>>> seq.append('3')
>>> view
SequenceView(['0', '1', '2', '3'])
Sequence views support indexing, slicing, and length queries. They act
like the underlying sequence, except they don't allow assignment:
>>> view[1]
'1'
>>> view[1:-1]
['1', '2']
>>> len(view)
4
Sequence views are useful as an alternative to copying, as they don't
require (much) extra storage.
"""
def __init__(self, target):
if not isinstance(target, Sequence):
raise TypeError
self._target = target
def __getitem__(self, index):
return self._target[index]
def __len__(self):
return len(self._target)
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, repr(self._target))
class seekable:
"""Wrap an iterator to allow for seeking backward and forward. This
progressively caches the items in the source iterable so they can be
re-visited.
Call :meth:`seek` with an index to seek to that position in the source
iterable.
To "reset" an iterator, seek to ``0``:
>>> from itertools import count
>>> it = seekable((str(n) for n in count()))
>>> next(it), next(it), next(it)
('0', '1', '2')
>>> it.seek(0)
>>> next(it), next(it), next(it)
('0', '1', '2')
>>> next(it)
'3'
You can also seek forward:
>>> it = seekable((str(n) for n in range(20)))
>>> it.seek(10)
>>> next(it)
'10'
>>> it.seek(20) # Seeking past the end of the source isn't a problem
>>> list(it)
[]
>>> it.seek(0) # Resetting works even after hitting the end
>>> next(it), next(it), next(it)
('0', '1', '2')
Call :meth:`peek` to look ahead one item without advancing the iterator:
>>> it = seekable('1234')
>>> it.peek()
'1'
>>> list(it)
['1', '2', '3', '4']
>>> it.peek(default='empty')
'empty'
Before the iterator is at its end, calling :func:`bool` on it will return
``True``. After it will return ``False``:
>>> it = seekable('5678')
>>> bool(it)
True
>>> list(it)
['5', '6', '7', '8']
>>> bool(it)
False
You may view the contents of the cache with the :meth:`elements` method.
That returns a :class:`SequenceView`, a view that updates automatically:
>>> it = seekable((str(n) for n in range(10)))
>>> next(it), next(it), next(it)
('0', '1', '2')
>>> elements = it.elements()
>>> elements
SequenceView(['0', '1', '2'])
>>> next(it)
'3'
>>> elements
SequenceView(['0', '1', '2', '3'])
By default, the cache grows as the source iterable progresses, so beware of
wrapping very large or infinite iterables. Supply *maxlen* to limit the
size of the cache (this of course limits how far back you can seek).
>>> from itertools import count
>>> it = seekable((str(n) for n in count()), maxlen=2)
>>> next(it), next(it), next(it), next(it)
('0', '1', '2', '3')
>>> list(it.elements())
['2', '3']
>>> it.seek(0)
>>> next(it), next(it), next(it), next(it)
('2', '3', '4', '5')
>>> next(it)
'6'
"""
def __init__(self, iterable, maxlen=None):
self._source = iter(iterable)
if maxlen is None:
self._cache = []
else:
self._cache = deque([], maxlen)
self._index = None
def __iter__(self):
return self
def __next__(self):
if self._index is not None:
try:
item = self._cache[self._index]
except IndexError:
self._index = None
else:
self._index += 1
return item
item = next(self._source)
self._cache.append(item)
return item
def __bool__(self):
try:
self.peek()
except StopIteration:
return False
return True
def peek(self, default=_marker):
try:
peeked = next(self)
except StopIteration:
if default is _marker:
raise
return default
if self._index is None:
self._index = len(self._cache)
self._index -= 1
return peeked
def elements(self):
return SequenceView(self._cache)
def seek(self, index):
self._index = index
remainder = index - len(self._cache)
if remainder > 0:
consume(self, remainder)
class run_length:
"""
:func:`run_length.encode` compresses an iterable with run-length encoding.
It yields groups of repeated items with the count of how many times they
were repeated:
>>> uncompressed = 'abbcccdddd'
>>> list(run_length.encode(uncompressed))
[('a', 1), ('b', 2), ('c', 3), ('d', 4)]
:func:`run_length.decode` decompresses an iterable that was previously
compressed with run-length encoding. It yields the items of the
decompressed iterable:
>>> compressed = [('a', 1), ('b', 2), ('c', 3), ('d', 4)]
>>> list(run_length.decode(compressed))
['a', 'b', 'b', 'c', 'c', 'c', 'd', 'd', 'd', 'd']
"""
@staticmethod
def encode(iterable):
return ((k, ilen(g)) for k, g in groupby(iterable))
@staticmethod
def decode(iterable):
return chain.from_iterable(repeat(k, n) for k, n in iterable)
def exactly_n(iterable, n, predicate=bool):
"""Return ``True`` if exactly ``n`` items in the iterable are ``True``
according to the *predicate* function.
>>> exactly_n([True, True, False], 2)
True
>>> exactly_n([True, True, False], 1)
False
>>> exactly_n([0, 1, 2, 3, 4, 5], 3, lambda x: x < 3)
True
The iterable will be advanced until ``n + 1`` truthy items are encountered,
so avoid calling it on infinite iterables.
"""
return len(take(n + 1, filter(predicate, iterable))) == n
def circular_shifts(iterable):
"""Return a list of circular shifts of *iterable*.
>>> circular_shifts(range(4))
[(0, 1, 2, 3), (1, 2, 3, 0), (2, 3, 0, 1), (3, 0, 1, 2)]
"""
lst = list(iterable)
return take(len(lst), windowed(cycle(lst), len(lst)))
def make_decorator(wrapping_func, result_index=0):
"""Return a decorator version of *wrapping_func*, which is a function that
modifies an iterable. *result_index* is the position in that function's
signature where the iterable goes.
This lets you use itertools on the "production end," i.e. at function
definition. This can augment what the function returns without changing the
function's code.
For example, to produce a decorator version of :func:`chunked`:
>>> from more_itertools import chunked
>>> chunker = make_decorator(chunked, result_index=0)
>>> @chunker(3)
... def iter_range(n):
... return iter(range(n))
...
>>> list(iter_range(9))
[[0, 1, 2], [3, 4, 5], [6, 7, 8]]
To only allow truthy items to be returned:
>>> truth_serum = make_decorator(filter, result_index=1)
>>> @truth_serum(bool)
... def boolean_test():
... return [0, 1, '', ' ', False, True]
...
>>> list(boolean_test())
[1, ' ', True]
The :func:`peekable` and :func:`seekable` wrappers make for practical
decorators:
>>> from more_itertools import peekable
>>> peekable_function = make_decorator(peekable)
>>> @peekable_function()
... def str_range(*args):
... return (str(x) for x in range(*args))
...
>>> it = str_range(1, 20, 2)
>>> next(it), next(it), next(it)
('1', '3', '5')
>>> it.peek()
'7'
>>> next(it)
'7'
"""
# See https://sites.google.com/site/bbayles/index/decorator_factory for
# notes on how this works.
def decorator(*wrapping_args, **wrapping_kwargs):
def outer_wrapper(f):
def inner_wrapper(*args, **kwargs):
result = f(*args, **kwargs)
wrapping_args_ = list(wrapping_args)
wrapping_args_.insert(result_index, result)
return wrapping_func(*wrapping_args_, **wrapping_kwargs)
return inner_wrapper
return outer_wrapper
return decorator
def map_reduce(iterable, keyfunc, valuefunc=None, reducefunc=None):
"""Return a dictionary that maps the items in *iterable* to categories
defined by *keyfunc*, transforms them with *valuefunc*, and
then summarizes them by category with *reducefunc*.
*valuefunc* defaults to the identity function if it is unspecified.
If *reducefunc* is unspecified, no summarization takes place:
>>> keyfunc = lambda x: x.upper()
>>> result = map_reduce('abbccc', keyfunc)
>>> sorted(result.items())
[('A', ['a']), ('B', ['b', 'b']), ('C', ['c', 'c', 'c'])]
Specifying *valuefunc* transforms the categorized items:
>>> keyfunc = lambda x: x.upper()
>>> valuefunc = lambda x: 1
>>> result = map_reduce('abbccc', keyfunc, valuefunc)
>>> sorted(result.items())
[('A', [1]), ('B', [1, 1]), ('C', [1, 1, 1])]
Specifying *reducefunc* summarizes the categorized items:
>>> keyfunc = lambda x: x.upper()
>>> valuefunc = lambda x: 1
>>> reducefunc = sum
>>> result = map_reduce('abbccc', keyfunc, valuefunc, reducefunc)
>>> sorted(result.items())
[('A', 1), ('B', 2), ('C', 3)]
You may want to filter the input iterable before applying the map/reduce
procedure:
>>> all_items = range(30)
>>> items = [x for x in all_items if 10 <= x <= 20] # Filter
>>> keyfunc = lambda x: x % 2 # Evens map to 0; odds to 1
>>> categories = map_reduce(items, keyfunc=keyfunc)
>>> sorted(categories.items())
[(0, [10, 12, 14, 16, 18, 20]), (1, [11, 13, 15, 17, 19])]
>>> summaries = map_reduce(items, keyfunc=keyfunc, reducefunc=sum)
>>> sorted(summaries.items())
[(0, 90), (1, 75)]
Note that all items in the iterable are gathered into a list before the
summarization step, which may require significant storage.
The returned object is a :obj:`collections.defaultdict` with the
``default_factory`` set to ``None``, such that it behaves like a normal
dictionary.
"""
valuefunc = (lambda x: x) if (valuefunc is None) else valuefunc
ret = defaultdict(list)
for item in iterable:
key = keyfunc(item)
value = valuefunc(item)
ret[key].append(value)
if reducefunc is not None:
for key, value_list in ret.items():
ret[key] = reducefunc(value_list)
ret.default_factory = None
return ret
def rlocate(iterable, pred=bool, window_size=None):
"""Yield the index of each item in *iterable* for which *pred* returns
``True``, starting from the right and moving left.
*pred* defaults to :func:`bool`, which will select truthy items:
>>> list(rlocate([0, 1, 1, 0, 1, 0, 0])) # Truthy at 1, 2, and 4
[4, 2, 1]
Set *pred* to a custom function to, e.g., find the indexes for a particular
item:
>>> iterable = iter('abcb')
>>> pred = lambda x: x == 'b'
>>> list(rlocate(iterable, pred))
[3, 1]
If *window_size* is given, then the *pred* function will be called with
that many items. This enables searching for sub-sequences:
>>> iterable = [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3]
>>> pred = lambda *args: args == (1, 2, 3)
>>> list(rlocate(iterable, pred=pred, window_size=3))
[9, 5, 1]
Beware, this function won't return anything for infinite iterables.
If *iterable* is reversible, ``rlocate`` will reverse it and search from
the right. Otherwise, it will search from the left and return the results
in reverse order.
See :func:`locate` to for other example applications.
"""
if window_size is None:
try:
len_iter = len(iterable)
return (len_iter - i - 1 for i in locate(reversed(iterable), pred))
except TypeError:
pass
return reversed(list(locate(iterable, pred, window_size)))
def replace(iterable, pred, substitutes, count=None, window_size=1):
"""Yield the items from *iterable*, replacing the items for which *pred*
returns ``True`` with the items from the iterable *substitutes*.
>>> iterable = [1, 1, 0, 1, 1, 0, 1, 1]
>>> pred = lambda x: x == 0
>>> substitutes = (2, 3)
>>> list(replace(iterable, pred, substitutes))
[1, 1, 2, 3, 1, 1, 2, 3, 1, 1]
If *count* is given, the number of replacements will be limited:
>>> iterable = [1, 1, 0, 1, 1, 0, 1, 1, 0]
>>> pred = lambda x: x == 0
>>> substitutes = [None]
>>> list(replace(iterable, pred, substitutes, count=2))
[1, 1, None, 1, 1, None, 1, 1, 0]
Use *window_size* to control the number of items passed as arguments to
*pred*. This allows for locating and replacing subsequences.
>>> iterable = [0, 1, 2, 5, 0, 1, 2, 5]
>>> window_size = 3
>>> pred = lambda *args: args == (0, 1, 2) # 3 items passed to pred
>>> substitutes = [3, 4] # Splice in these items
>>> list(replace(iterable, pred, substitutes, window_size=window_size))
[3, 4, 5, 3, 4, 5]
"""
if window_size < 1:
raise ValueError('window_size must be at least 1')
# Save the substitutes iterable, since it's used more than once
substitutes = tuple(substitutes)
# Add padding such that the number of windows matches the length of the
# iterable
it = chain(iterable, [_marker] * (window_size - 1))
windows = windowed(it, window_size)
n = 0
for w in windows:
# If the current window matches our predicate (and we haven't hit
# our maximum number of replacements), splice in the substitutes
# and then consume the following windows that overlap with this one.
# For example, if the iterable is (0, 1, 2, 3, 4...)
# and the window size is 2, we have (0, 1), (1, 2), (2, 3)...
# If the predicate matches on (0, 1), we need to zap (0, 1) and (1, 2)
if pred(*w):
if (count is None) or (n < count):
n += 1
yield from substitutes
consume(windows, window_size - 1)
continue
# If there was no match (or we've reached the replacement limit),
# yield the first item from the window.
if w and (w[0] is not _marker):
yield w[0]
def partitions(iterable):
"""Yield all possible order-preserving partitions of *iterable*.
>>> iterable = 'abc'
>>> for part in partitions(iterable):
... print([''.join(p) for p in part])
['abc']
['a', 'bc']
['ab', 'c']
['a', 'b', 'c']
This is unrelated to :func:`partition`.
"""
sequence = list(iterable)
n = len(sequence)
for i in powerset(range(1, n)):
yield [sequence[i:j] for i, j in zip((0,) + i, i + (n,))]
def set_partitions(iterable, k=None):
"""
Yield the set partitions of *iterable* into *k* parts. Set partitions are
not order-preserving.
>>> iterable = 'abc'
>>> for part in set_partitions(iterable, 2):
... print([''.join(p) for p in part])
['a', 'bc']
['ab', 'c']
['b', 'ac']
If *k* is not given, every set partition is generated.
>>> iterable = 'abc'
>>> for part in set_partitions(iterable):
... print([''.join(p) for p in part])
['abc']
['a', 'bc']
['ab', 'c']
['b', 'ac']
['a', 'b', 'c']
"""
L = list(iterable)
n = len(L)
if k is not None:
if k < 1:
raise ValueError(
"Can't partition in a negative or zero number of groups"
)
elif k > n:
return
def set_partitions_helper(L, k):
n = len(L)
if k == 1:
yield [L]
elif n == k:
yield [[s] for s in L]
else:
e, *M = L
for p in set_partitions_helper(M, k - 1):
yield [[e], *p]
for p in set_partitions_helper(M, k):
for i in range(len(p)):
yield p[:i] + [[e] + p[i]] + p[i + 1 :]
if k is None:
for k in range(1, n + 1):
yield from set_partitions_helper(L, k)
else:
yield from set_partitions_helper(L, k)
class time_limited:
"""
Yield items from *iterable* until *limit_seconds* have passed.
If the time limit expires before all items have been yielded, the
``timed_out`` parameter will be set to ``True``.
>>> from time import sleep
>>> def generator():
... yield 1
... yield 2
... sleep(0.2)
... yield 3
>>> iterable = time_limited(0.1, generator())
>>> list(iterable)
[1, 2]
>>> iterable.timed_out
True
Note that the time is checked before each item is yielded, and iteration
stops if the time elapsed is greater than *limit_seconds*. If your time
limit is 1 second, but it takes 2 seconds to generate the first item from
the iterable, the function will run for 2 seconds and not yield anything.
"""
def __init__(self, limit_seconds, iterable):
if limit_seconds < 0:
raise ValueError('limit_seconds must be positive')
self.limit_seconds = limit_seconds
self._iterable = iter(iterable)
self._start_time = monotonic()
self.timed_out = False
def __iter__(self):
return self
def __next__(self):
item = next(self._iterable)
if monotonic() - self._start_time > self.limit_seconds:
self.timed_out = True
raise StopIteration
return item
def only(iterable, default=None, too_long=None):
"""If *iterable* has only one item, return it.
If it has zero items, return *default*.
If it has more than one item, raise the exception given by *too_long*,
which is ``ValueError`` by default.
>>> only([], default='missing')
'missing'
>>> only([1])
1
>>> only([1, 2]) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: Expected exactly one item in iterable, but got 1, 2,
and perhaps more.'
>>> only([1, 2], too_long=TypeError) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError
Note that :func:`only` attempts to advance *iterable* twice to ensure there
is only one item. See :func:`spy` or :func:`peekable` to check
iterable contents less destructively.
"""
it = iter(iterable)
first_value = next(it, default)
try:
second_value = next(it)
except StopIteration:
pass
else:
msg = (
'Expected exactly one item in iterable, but got {!r}, {!r}, '
'and perhaps more.'.format(first_value, second_value)
)
raise too_long or ValueError(msg)
return first_value
class _IChunk:
def __init__(self, iterable, n):
self._it = islice(iterable, n)
self._cache = deque()
def fill_cache(self):
self._cache.extend(self._it)
def __iter__(self):
return self
def __next__(self):
try:
return next(self._it)
except StopIteration:
if self._cache:
return self._cache.popleft()
else:
raise
def ichunked(iterable, n):
"""Break *iterable* into sub-iterables with *n* elements each.
:func:`ichunked` is like :func:`chunked`, but it yields iterables
instead of lists.
If the sub-iterables are read in order, the elements of *iterable*
won't be stored in memory.
If they are read out of order, :func:`itertools.tee` is used to cache
elements as necessary.
>>> from itertools import count
>>> all_chunks = ichunked(count(), 4)
>>> c_1, c_2, c_3 = next(all_chunks), next(all_chunks), next(all_chunks)
>>> list(c_2) # c_1's elements have been cached; c_3's haven't been
[4, 5, 6, 7]
>>> list(c_1)
[0, 1, 2, 3]
>>> list(c_3)
[8, 9, 10, 11]
"""
source = peekable(iter(iterable))
ichunk_marker = object()
while True:
# Check to see whether we're at the end of the source iterable
item = source.peek(ichunk_marker)
if item is ichunk_marker:
return
chunk = _IChunk(source, n)
yield chunk
# Advance the source iterable and fill previous chunk's cache
chunk.fill_cache()
def iequals(*iterables):
"""Return ``True`` if all given *iterables* are equal to each other,
which means that they contain the same elements in the same order.
The function is useful for comparing iterables of different data types
or iterables that do not support equality checks.
>>> iequals("abc", ['a', 'b', 'c'], ('a', 'b', 'c'), iter("abc"))
True
>>> iequals("abc", "acb")
False
Not to be confused with :func:`all_equals`, which checks whether all
elements of iterable are equal to each other.
"""
return all(map(all_equal, zip_longest(*iterables, fillvalue=object())))
def distinct_combinations(iterable, r):
"""Yield the distinct combinations of *r* items taken from *iterable*.
>>> list(distinct_combinations([0, 0, 1], 2))
[(0, 0), (0, 1)]
Equivalent to ``set(combinations(iterable))``, except duplicates are not
generated and thrown away. For larger input sequences this is much more
efficient.
"""
if r < 0:
raise ValueError('r must be non-negative')
elif r == 0:
yield ()
return
pool = tuple(iterable)
generators = [unique_everseen(enumerate(pool), key=itemgetter(1))]
current_combo = [None] * r
level = 0
while generators:
try:
cur_idx, p = next(generators[-1])
except StopIteration:
generators.pop()
level -= 1
continue
current_combo[level] = p
if level + 1 == r:
yield tuple(current_combo)
else:
generators.append(
unique_everseen(
enumerate(pool[cur_idx + 1 :], cur_idx + 1),
key=itemgetter(1),
)
)
level += 1
def filter_except(validator, iterable, *exceptions):
"""Yield the items from *iterable* for which the *validator* function does
not raise one of the specified *exceptions*.
*validator* is called for each item in *iterable*.
It should be a function that accepts one argument and raises an exception
if that item is not valid.
>>> iterable = ['1', '2', 'three', '4', None]
>>> list(filter_except(int, iterable, ValueError, TypeError))
['1', '2', '4']
If an exception other than one given by *exceptions* is raised by
*validator*, it is raised like normal.
"""
for item in iterable:
try:
validator(item)
except exceptions:
pass
else:
yield item
def map_except(function, iterable, *exceptions):
"""Transform each item from *iterable* with *function* and yield the
result, unless *function* raises one of the specified *exceptions*.
*function* is called to transform each item in *iterable*.
It should accept one argument.
>>> iterable = ['1', '2', 'three', '4', None]
>>> list(map_except(int, iterable, ValueError, TypeError))
[1, 2, 4]
If an exception other than one given by *exceptions* is raised by
*function*, it is raised like normal.
"""
for item in iterable:
try:
yield function(item)
except exceptions:
pass
def map_if(iterable, pred, func, func_else=lambda x: x):
"""Evaluate each item from *iterable* using *pred*. If the result is
equivalent to ``True``, transform the item with *func* and yield it.
Otherwise, transform the item with *func_else* and yield it.
*pred*, *func*, and *func_else* should each be functions that accept
one argument. By default, *func_else* is the identity function.
>>> from math import sqrt
>>> iterable = list(range(-5, 5))
>>> iterable
[-5, -4, -3, -2, -1, 0, 1, 2, 3, 4]
>>> list(map_if(iterable, lambda x: x > 3, lambda x: 'toobig'))
[-5, -4, -3, -2, -1, 0, 1, 2, 3, 'toobig']
>>> list(map_if(iterable, lambda x: x >= 0,
... lambda x: f'{sqrt(x):.2f}', lambda x: None))
[None, None, None, None, None, '0.00', '1.00', '1.41', '1.73', '2.00']
"""
for item in iterable:
yield func(item) if pred(item) else func_else(item)
def _sample_unweighted(iterable, k):
# Implementation of "Algorithm L" from the 1994 paper by Kim-Hung Li:
# "Reservoir-Sampling Algorithms of Time Complexity O(n(1+log(N/n)))".
# Fill up the reservoir (collection of samples) with the first `k` samples
reservoir = take(k, iterable)
# Generate random number that's the largest in a sample of k U(0,1) numbers
# Largest order statistic: https://en.wikipedia.org/wiki/Order_statistic
W = exp(log(random()) / k)
# The number of elements to skip before changing the reservoir is a random
# number with a geometric distribution. Sample it using random() and logs.
next_index = k + floor(log(random()) / log(1 - W))
for index, element in enumerate(iterable, k):
if index == next_index:
reservoir[randrange(k)] = element
# The new W is the largest in a sample of k U(0, `old_W`) numbers
W *= exp(log(random()) / k)
next_index += floor(log(random()) / log(1 - W)) + 1
return reservoir
def _sample_weighted(iterable, k, weights):
# Implementation of "A-ExpJ" from the 2006 paper by Efraimidis et al. :
# "Weighted random sampling with a reservoir".
# Log-transform for numerical stability for weights that are small/large
weight_keys = (log(random()) / weight for weight in weights)
# Fill up the reservoir (collection of samples) with the first `k`
# weight-keys and elements, then heapify the list.
reservoir = take(k, zip(weight_keys, iterable))
heapify(reservoir)
# The number of jumps before changing the reservoir is a random variable
# with an exponential distribution. Sample it using random() and logs.
smallest_weight_key, _ = reservoir[0]
weights_to_skip = log(random()) / smallest_weight_key
for weight, element in zip(weights, iterable):
if weight >= weights_to_skip:
# The notation here is consistent with the paper, but we store
# the weight-keys in log-space for better numerical stability.
smallest_weight_key, _ = reservoir[0]
t_w = exp(weight * smallest_weight_key)
r_2 = uniform(t_w, 1) # generate U(t_w, 1)
weight_key = log(r_2) / weight
heapreplace(reservoir, (weight_key, element))
smallest_weight_key, _ = reservoir[0]
weights_to_skip = log(random()) / smallest_weight_key
else:
weights_to_skip -= weight
# Equivalent to [element for weight_key, element in sorted(reservoir)]
return [heappop(reservoir)[1] for _ in range(k)]
def sample(iterable, k, weights=None):
"""Return a *k*-length list of elements chosen (without replacement)
from the *iterable*. Like :func:`random.sample`, but works on iterables
of unknown length.
>>> iterable = range(100)
>>> sample(iterable, 5) # doctest: +SKIP
[81, 60, 96, 16, 4]
An iterable with *weights* may also be given:
>>> iterable = range(100)
>>> weights = (i * i + 1 for i in range(100))
>>> sampled = sample(iterable, 5, weights=weights) # doctest: +SKIP
[79, 67, 74, 66, 78]
The algorithm can also be used to generate weighted random permutations.
The relative weight of each item determines the probability that it
appears late in the permutation.
>>> data = "abcdefgh"
>>> weights = range(1, len(data) + 1)
>>> sample(data, k=len(data), weights=weights) # doctest: +SKIP
['c', 'a', 'b', 'e', 'g', 'd', 'h', 'f']
"""
if k == 0:
return []
iterable = iter(iterable)
if weights is None:
return _sample_unweighted(iterable, k)
else:
weights = iter(weights)
return _sample_weighted(iterable, k, weights)
def is_sorted(iterable, key=None, reverse=False, strict=False):
"""Returns ``True`` if the items of iterable are in sorted order, and
``False`` otherwise. *key* and *reverse* have the same meaning that they do
in the built-in :func:`sorted` function.
>>> is_sorted(['1', '2', '3', '4', '5'], key=int)
True
>>> is_sorted([5, 4, 3, 1, 2], reverse=True)
False
If *strict*, tests for strict sorting, that is, returns ``False`` if equal
elements are found:
>>> is_sorted([1, 2, 2])
True
>>> is_sorted([1, 2, 2], strict=True)
False
The function returns ``False`` after encountering the first out-of-order
item. If there are no out-of-order items, the iterable is exhausted.
"""
compare = (le if reverse else ge) if strict else (lt if reverse else gt)
it = iterable if key is None else map(key, iterable)
return not any(starmap(compare, pairwise(it)))
class AbortThread(BaseException):
pass
class callback_iter:
"""Convert a function that uses callbacks to an iterator.
Let *func* be a function that takes a `callback` keyword argument.
For example:
>>> def func(callback=None):
... for i, c in [(1, 'a'), (2, 'b'), (3, 'c')]:
... if callback:
... callback(i, c)
... return 4
Use ``with callback_iter(func)`` to get an iterator over the parameters
that are delivered to the callback.
>>> with callback_iter(func) as it:
... for args, kwargs in it:
... print(args)
(1, 'a')
(2, 'b')
(3, 'c')
The function will be called in a background thread. The ``done`` property
indicates whether it has completed execution.
>>> it.done
True
If it completes successfully, its return value will be available
in the ``result`` property.
>>> it.result
4
Notes:
* If the function uses some keyword argument besides ``callback``, supply
*callback_kwd*.
* If it finished executing, but raised an exception, accessing the
``result`` property will raise the same exception.
* If it hasn't finished executing, accessing the ``result``
property from within the ``with`` block will raise ``RuntimeError``.
* If it hasn't finished executing, accessing the ``result`` property from
outside the ``with`` block will raise a
``more_itertools.AbortThread`` exception.
* Provide *wait_seconds* to adjust how frequently the it is polled for
output.
"""
def __init__(self, func, callback_kwd='callback', wait_seconds=0.1):
self._func = func
self._callback_kwd = callback_kwd
self._aborted = False
self._future = None
self._wait_seconds = wait_seconds
# Lazily import concurrent.future
self._executor = __import__(
).futures.__import__("concurrent.futures").futures.ThreadPoolExecutor(max_workers=1)
self._iterator = self._reader()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self._aborted = True
self._executor.shutdown()
def __iter__(self):
return self
def __next__(self):
return next(self._iterator)
@property
def done(self):
if self._future is None:
return False
return self._future.done()
@property
def result(self):
if not self.done:
raise RuntimeError('Function has not yet completed')
return self._future.result()
def _reader(self):
q = Queue()
def callback(*args, **kwargs):
if self._aborted:
raise AbortThread('canceled by user')
q.put((args, kwargs))
self._future = self._executor.submit(
self._func, **{self._callback_kwd: callback}
)
while True:
try:
item = q.get(timeout=self._wait_seconds)
except Empty:
pass
else:
q.task_done()
yield item
if self._future.done():
break
remaining = []
while True:
try:
item = q.get_nowait()
except Empty:
break
else:
q.task_done()
remaining.append(item)
q.join()
yield from remaining
def windowed_complete(iterable, n):
"""
Yield ``(beginning, middle, end)`` tuples, where:
* Each ``middle`` has *n* items from *iterable*
* Each ``beginning`` has the items before the ones in ``middle``
* Each ``end`` has the items after the ones in ``middle``
>>> iterable = range(7)
>>> n = 3
>>> for beginning, middle, end in windowed_complete(iterable, n):
... print(beginning, middle, end)
() (0, 1, 2) (3, 4, 5, 6)
(0,) (1, 2, 3) (4, 5, 6)
(0, 1) (2, 3, 4) (5, 6)
(0, 1, 2) (3, 4, 5) (6,)
(0, 1, 2, 3) (4, 5, 6) ()
Note that *n* must be at least 0 and most equal to the length of
*iterable*.
This function will exhaust the iterable and may require significant
storage.
"""
if n < 0:
raise ValueError('n must be >= 0')
seq = tuple(iterable)
size = len(seq)
if n > size:
raise ValueError('n must be <= len(seq)')
for i in range(size - n + 1):
beginning = seq[:i]
middle = seq[i : i + n]
end = seq[i + n :]
yield beginning, middle, end
def all_unique(iterable, key=None):
"""
Returns ``True`` if all the elements of *iterable* are unique (no two
elements are equal).
>>> all_unique('ABCB')
False
If a *key* function is specified, it will be used to make comparisons.
>>> all_unique('ABCb')
True
>>> all_unique('ABCb', str.lower)
False
The function returns as soon as the first non-unique element is
encountered. Iterables with a mix of hashable and unhashable items can
be used, but the function will be slower for unhashable items.
"""
seenset = set()
seenset_add = seenset.add
seenlist = []
seenlist_add = seenlist.append
for element in map(key, iterable) if key else iterable:
try:
if element in seenset:
return False
seenset_add(element)
except TypeError:
if element in seenlist:
return False
seenlist_add(element)
return True
def nth_product(index, *args):
"""Equivalent to ``list(product(*args))[index]``.
The products of *args* can be ordered lexicographically.
:func:`nth_product` computes the product at sort position *index* without
computing the previous products.
>>> nth_product(8, range(2), range(2), range(2), range(2))
(1, 0, 0, 0)
``IndexError`` will be raised if the given *index* is invalid.
"""
pools = list(map(tuple, reversed(args)))
ns = list(map(len, pools))
c = reduce(mul, ns)
if index < 0:
index += c
if not 0 <= index < c:
raise IndexError
result = []
for pool, n in zip(pools, ns):
result.append(pool[index % n])
index //= n
return tuple(reversed(result))
def nth_permutation(iterable, r, index):
"""Equivalent to ``list(permutations(iterable, r))[index]```
The subsequences of *iterable* that are of length *r* where order is
important can be ordered lexicographically. :func:`nth_permutation`
computes the subsequence at sort position *index* directly, without
computing the previous subsequences.
>>> nth_permutation('ghijk', 2, 5)
('h', 'i')
``ValueError`` will be raised If *r* is negative or greater than the length
of *iterable*.
``IndexError`` will be raised if the given *index* is invalid.
"""
pool = list(iterable)
n = len(pool)
if r is None or r == n:
r, c = n, factorial(n)
elif not 0 <= r < n:
raise ValueError
else:
c = factorial(n) // factorial(n - r)
if index < 0:
index += c
if not 0 <= index < c:
raise IndexError
if c == 0:
return tuple()
result = [0] * r
q = index * factorial(n) // c if r < n else index
for d in range(1, n + 1):
q, i = divmod(q, d)
if 0 <= n - d < r:
result[n - d] = i
if q == 0:
break
return tuple(map(pool.pop, result))
def value_chain(*args):
"""Yield all arguments passed to the function in the same order in which
they were passed. If an argument itself is iterable then iterate over its
values.
>>> list(value_chain(1, 2, 3, [4, 5, 6]))
[1, 2, 3, 4, 5, 6]
Binary and text strings are not considered iterable and are emitted
as-is:
>>> list(value_chain('12', '34', ['56', '78']))
['12', '34', '56', '78']
Multiple levels of nesting are not flattened.
"""
for value in args:
if isinstance(value, (str, bytes)):
yield value
continue
try:
yield from value
except TypeError:
yield value
def product_index(element, *args):
"""Equivalent to ``list(product(*args)).index(element)``
The products of *args* can be ordered lexicographically.
:func:`product_index` computes the first index of *element* without
computing the previous products.
>>> product_index([8, 2], range(10), range(5))
42
``ValueError`` will be raised if the given *element* isn't in the product
of *args*.
"""
index = 0
for x, pool in zip_longest(element, args, fillvalue=_marker):
if x is _marker or pool is _marker:
raise ValueError('element is not a product of args')
pool = tuple(pool)
index = index * len(pool) + pool.index(x)
return index
def combination_index(element, iterable):
"""Equivalent to ``list(combinations(iterable, r)).index(element)``
The subsequences of *iterable* that are of length *r* can be ordered
lexicographically. :func:`combination_index` computes the index of the
first *element*, without computing the previous combinations.
>>> combination_index('adf', 'abcdefg')
10
``ValueError`` will be raised if the given *element* isn't one of the
combinations of *iterable*.
"""
element = enumerate(element)
k, y = next(element, (None, None))
if k is None:
return 0
indexes = []
pool = enumerate(iterable)
for n, x in pool:
if x == y:
indexes.append(n)
tmp, y = next(element, (None, None))
if tmp is None:
break
else:
k = tmp
else:
raise ValueError('element is not a combination of iterable')
n, _ = last(pool, default=(n, None))
# Python versions below 3.8 don't have math.comb
index = 1
for i, j in enumerate(reversed(indexes), start=1):
j = n - j
if i <= j:
index += factorial(j) // (factorial(i) * factorial(j - i))
return factorial(n + 1) // (factorial(k + 1) * factorial(n - k)) - index
def permutation_index(element, iterable):
"""Equivalent to ``list(permutations(iterable, r)).index(element)```
The subsequences of *iterable* that are of length *r* where order is
important can be ordered lexicographically. :func:`permutation_index`
computes the index of the first *element* directly, without computing
the previous permutations.
>>> permutation_index([1, 3, 2], range(5))
19
``ValueError`` will be raised if the given *element* isn't one of the
permutations of *iterable*.
"""
index = 0
pool = list(iterable)
for i, x in zip(range(len(pool), -1, -1), element):
r = pool.index(x)
index = index * i + r
del pool[r]
return index
class countable:
"""Wrap *iterable* and keep a count of how many items have been consumed.
The ``items_seen`` attribute starts at ``0`` and increments as the iterable
is consumed:
>>> iterable = map(str, range(10))
>>> it = countable(iterable)
>>> it.items_seen
0
>>> next(it), next(it)
('0', '1')
>>> list(it)
['2', '3', '4', '5', '6', '7', '8', '9']
>>> it.items_seen
10
"""
def __init__(self, iterable):
self._it = iter(iterable)
self.items_seen = 0
def __iter__(self):
return self
def __next__(self):
item = next(self._it)
self.items_seen += 1
return item
def chunked_even(iterable, n):
"""Break *iterable* into lists of approximately length *n*.
Items are distributed such the lengths of the lists differ by at most
1 item.
>>> iterable = [1, 2, 3, 4, 5, 6, 7]
>>> n = 3
>>> list(chunked_even(iterable, n)) # List lengths: 3, 2, 2
[[1, 2, 3], [4, 5], [6, 7]]
>>> list(chunked(iterable, n)) # List lengths: 3, 3, 1
[[1, 2, 3], [4, 5, 6], [7]]
"""
len_method = getattr(iterable, '__len__', None)
if len_method is None:
return _chunked_even_online(iterable, n)
else:
return _chunked_even_finite(iterable, len_method(), n)
def _chunked_even_online(iterable, n):
buffer = []
maxbuf = n + (n - 2) * (n - 1)
for x in iterable:
buffer.append(x)
if len(buffer) == maxbuf:
yield buffer[:n]
buffer = buffer[n:]
yield from _chunked_even_finite(buffer, len(buffer), n)
def _chunked_even_finite(iterable, N, n):
if N < 1:
return
# Lists are either size `full_size <= n` or `partial_size = full_size - 1`
q, r = divmod(N, n)
num_lists = q + (1 if r > 0 else 0)
q, r = divmod(N, num_lists)
full_size = q + (1 if r > 0 else 0)
partial_size = full_size - 1
num_full = N - partial_size * num_lists
num_partial = num_lists - num_full
buffer = []
iterator = iter(iterable)
# Yield num_full lists of full_size
for x in iterator:
buffer.append(x)
if len(buffer) == full_size:
yield buffer
buffer = []
num_full -= 1
if num_full <= 0:
break
# Yield num_partial lists of partial_size
for x in iterator:
buffer.append(x)
if len(buffer) == partial_size:
yield buffer
buffer = []
num_partial -= 1
def zip_broadcast(*objects, scalar_types=(str, bytes), strict=False):
"""A version of :func:`zip` that "broadcasts" any scalar
(i.e., non-iterable) items into output tuples.
>>> iterable_1 = [1, 2, 3]
>>> iterable_2 = ['a', 'b', 'c']
>>> scalar = '_'
>>> list(zip_broadcast(iterable_1, iterable_2, scalar))
[(1, 'a', '_'), (2, 'b', '_'), (3, 'c', '_')]
The *scalar_types* keyword argument determines what types are considered
scalar. It is set to ``(str, bytes)`` by default. Set it to ``None`` to
treat strings and byte strings as iterable:
>>> list(zip_broadcast('abc', 0, 'xyz', scalar_types=None))
[('a', 0, 'x'), ('b', 0, 'y'), ('c', 0, 'z')]
If the *strict* keyword argument is ``True``, then
``UnequalIterablesError`` will be raised if any of the iterables have
different lengths.
"""
def is_scalar(obj):
if scalar_types and isinstance(obj, scalar_types):
return True
try:
iter(obj)
except TypeError:
return True
else:
return False
size = len(objects)
if not size:
return
iterables, iterable_positions = [], []
scalars, scalar_positions = [], []
for i, obj in enumerate(objects):
if is_scalar(obj):
scalars.append(obj)
scalar_positions.append(i)
else:
iterables.append(iter(obj))
iterable_positions.append(i)
if len(scalars) == size:
yield tuple(objects)
return
zipper = _zip_equal if strict else zip
for item in zipper(*iterables):
new_item = [None] * size
for i, elem in zip(iterable_positions, item):
new_item[i] = elem
for i, elem in zip(scalar_positions, scalars):
new_item[i] = elem
yield tuple(new_item)
def unique_in_window(iterable, n, key=None):
"""Yield the items from *iterable* that haven't been seen recently.
*n* is the size of the lookback window.
>>> iterable = [0, 1, 0, 2, 3, 0]
>>> n = 3
>>> list(unique_in_window(iterable, n))
[0, 1, 2, 3, 0]
The *key* function, if provided, will be used to determine uniqueness:
>>> list(unique_in_window('abAcda', 3, key=lambda x: x.lower()))
['a', 'b', 'c', 'd', 'a']
The items in *iterable* must be hashable.
"""
if n <= 0:
raise ValueError('n must be greater than 0')
window = deque(maxlen=n)
uniques = set()
use_key = key is not None
for item in iterable:
k = key(item) if use_key else item
if k in uniques:
continue
if len(uniques) == n:
uniques.discard(window[0])
uniques.add(k)
window.append(k)
yield item
def duplicates_everseen(iterable, key=None):
"""Yield duplicate elements after their first appearance.
>>> list(duplicates_everseen('mississippi'))
['s', 'i', 's', 's', 'i', 'p', 'i']
>>> list(duplicates_everseen('AaaBbbCccAaa', str.lower))
['a', 'a', 'b', 'b', 'c', 'c', 'A', 'a', 'a']
This function is analagous to :func:`unique_everseen` and is subject to
the same performance considerations.
"""
seen_set = set()
seen_list = []
use_key = key is not None
for element in iterable:
k = key(element) if use_key else element
try:
if k not in seen_set:
seen_set.add(k)
else:
yield element
except TypeError:
if k not in seen_list:
seen_list.append(k)
else:
yield element
def duplicates_justseen(iterable, key=None):
"""Yields serially-duplicate elements after their first appearance.
>>> list(duplicates_justseen('mississippi'))
['s', 's', 'p']
>>> list(duplicates_justseen('AaaBbbCccAaa', str.lower))
['a', 'a', 'b', 'b', 'c', 'c', 'a', 'a']
This function is analagous to :func:`unique_justseen`.
"""
return flatten(
map(
lambda group_tuple: islice_extended(group_tuple[1])[1:],
groupby(iterable, key),
)
)
def minmax(iterable_or_value, *others, key=None, default=_marker):
"""Returns both the smallest and largest items in an iterable
or the largest of two or more arguments.
>>> minmax([3, 1, 5])
(1, 5)
>>> minmax(4, 2, 6)
(2, 6)
If a *key* function is provided, it will be used to transform the input
items for comparison.
>>> minmax([5, 30], key=str) # '30' sorts before '5'
(30, 5)
If a *default* value is provided, it will be returned if there are no
input items.
>>> minmax([], default=(0, 0))
(0, 0)
Otherwise ``ValueError`` is raised.
This function is based on the
`recipe <http://code.activestate.com/recipes/577916/>`__ by
Raymond Hettinger and takes care to minimize the number of comparisons
performed.
"""
iterable = (iterable_or_value, *others) if others else iterable_or_value
it = iter(iterable)
try:
lo = hi = next(it)
except StopIteration as e:
if default is _marker:
raise ValueError(
'`minmax()` argument is an empty iterable. '
'Provide a `default` value to suppress this error.'
) from e
return default
# Different branches depending on the presence of key. This saves a lot
# of unimportant copies which would slow the "key=None" branch
# significantly down.
if key is None:
for x, y in zip_longest(it, it, fillvalue=lo):
if y < x:
x, y = y, x
if x < lo:
lo = x
if hi < y:
hi = y
else:
lo_key = hi_key = key(lo)
for x, y in zip_longest(it, it, fillvalue=lo):
x_key, y_key = key(x), key(y)
if y_key < x_key:
x, y, x_key, y_key = y, x, y_key, x_key
if x_key < lo_key:
lo, lo_key = x, x_key
if hi_key < y_key:
hi, hi_key = y, y_key
return lo, hi
def constrained_batches(
iterable, max_size, max_count=None, get_len=len, strict=True
):
"""Yield batches of items from *iterable* with a combined size limited by
*max_size*.
>>> iterable = [b'12345', b'123', b'12345678', b'1', b'1', b'12', b'1']
>>> list(constrained_batches(iterable, 10))
[(b'12345', b'123'), (b'12345678', b'1', b'1'), (b'12', b'1')]
If a *max_count* is supplied, the number of items per batch is also
limited:
>>> iterable = [b'12345', b'123', b'12345678', b'1', b'1', b'12', b'1']
>>> list(constrained_batches(iterable, 10, max_count = 2))
[(b'12345', b'123'), (b'12345678', b'1'), (b'1', b'12'), (b'1',)]
If a *get_len* function is supplied, use that instead of :func:`len` to
determine item size.
If *strict* is ``True``, raise ``ValueError`` if any single item is bigger
than *max_size*. Otherwise, allow single items to exceed *max_size*.
"""
if max_size <= 0:
raise ValueError('maximum size must be greater than zero')
batch = []
batch_size = 0
batch_count = 0
for item in iterable:
item_len = get_len(item)
if strict and item_len > max_size:
raise ValueError('item size exceeds maximum size')
reached_count = batch_count == max_count
reached_size = item_len + batch_size > max_size
if batch_count and (reached_size or reached_count):
yield tuple(batch)
batch.clear()
batch_size = 0
batch_count = 0
batch.append(item)
batch_size += item_len
batch_count += 1
if batch:
yield tuple(batch)
def gray_product(*iterables):
"""Like :func:`itertools.product`, but return tuples in an order such
that only one element in the generated tuple changes from one iteration
to the next.
>>> list(gray_product('AB','CD'))
[('A', 'C'), ('B', 'C'), ('B', 'D'), ('A', 'D')]
This function consumes all of the input iterables before producing output.
If any of the input iterables have fewer than two items, ``ValueError``
is raised.
For information on the algorithm, see
`this section <https://www-cs-faculty.stanford.edu/~knuth/fasc2a.ps.gz>`__
of Donald Knuth's *The Art of Computer Programming*.
"""
all_iterables = tuple(tuple(x) for x in iterables)
iterable_count = len(all_iterables)
for iterable in all_iterables:
if len(iterable) < 2:
raise ValueError("each iterable must have two or more items")
# This is based on "Algorithm H" from section 7.2.1.1, page 20.
# a holds the indexes of the source iterables for the n-tuple to be yielded
# f is the array of "focus pointers"
# o is the array of "directions"
a = [0] * iterable_count
f = list(range(iterable_count + 1))
o = [1] * iterable_count
while True:
yield tuple(all_iterables[i][a[i]] for i in range(iterable_count))
j = f[0]
f[0] = 0
if j == iterable_count:
break
a[j] = a[j] + o[j]
if a[j] == 0 or a[j] == len(all_iterables[j]) - 1:
o[j] = -o[j]
f[j] = f[j + 1]
f[j + 1] = j + 1
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pkg_resources/_vendor/platformdirs/android.py | from __future__ import annotations
import os
import re
import sys
from functools import lru_cache
from typing import cast
from .api import PlatformDirsABC
class Android(PlatformDirsABC):
"""
Follows the guidance `from here <https://android.stackexchange.com/a/216132>`_. Makes use of the
`appname <platformdirs.api.PlatformDirsABC.appname>` and
`version <platformdirs.api.PlatformDirsABC.version>`.
"""
@property
def user_data_dir(self) -> str:
""":return: data directory tied to the user, e.g. ``/data/user/<userid>/<packagename>/files/<AppName>``"""
return self._append_app_name_and_version(cast(str, _android_folder()), "files")
@property
def site_data_dir(self) -> str:
""":return: data directory shared by users, same as `user_data_dir`"""
return self.user_data_dir
@property
def user_config_dir(self) -> str:
"""
:return: config directory tied to the user, e.g. ``/data/user/<userid>/<packagename>/shared_prefs/<AppName>``
"""
return self._append_app_name_and_version(cast(str, _android_folder()), "shared_prefs")
@property
def site_config_dir(self) -> str:
""":return: config directory shared by the users, same as `user_config_dir`"""
return self.user_config_dir
@property
def user_cache_dir(self) -> str:
""":return: cache directory tied to the user, e.g. e.g. ``/data/user/<userid>/<packagename>/cache/<AppName>``"""
return self._append_app_name_and_version(cast(str, _android_folder()), "cache")
@property
def user_state_dir(self) -> str:
""":return: state directory tied to the user, same as `user_data_dir`"""
return self.user_data_dir
@property
def user_log_dir(self) -> str:
"""
:return: log directory tied to the user, same as `user_cache_dir` if not opinionated else ``log`` in it,
e.g. ``/data/user/<userid>/<packagename>/cache/<AppName>/log``
"""
path = self.user_cache_dir
if self.opinion:
path = os.path.join(path, "log")
return path
@property
def user_documents_dir(self) -> str:
"""
:return: documents directory tied to the user e.g. ``/storage/emulated/0/Documents``
"""
return _android_documents_folder()
@property
def user_runtime_dir(self) -> str:
"""
:return: runtime directory tied to the user, same as `user_cache_dir` if not opinionated else ``tmp`` in it,
e.g. ``/data/user/<userid>/<packagename>/cache/<AppName>/tmp``
"""
path = self.user_cache_dir
if self.opinion:
path = os.path.join(path, "tmp")
return path
@lru_cache(maxsize=1)
def _android_folder() -> str | None:
""":return: base folder for the Android OS or None if cannot be found"""
try:
# First try to get path to android app via pyjnius
from jnius import autoclass
Context = autoclass("android.content.Context") # noqa: N806
result: str | None = Context.getFilesDir().getParentFile().getAbsolutePath()
except Exception:
# if fails find an android folder looking path on the sys.path
pattern = re.compile(r"/data/(data|user/\d+)/(.+)/files")
for path in sys.path:
if pattern.match(path):
result = path.split("/files")[0]
break
else:
result = None
return result
@lru_cache(maxsize=1)
def _android_documents_folder() -> str:
""":return: documents folder for the Android OS"""
# Get directories with pyjnius
try:
from jnius import autoclass
Context = autoclass("android.content.Context") # noqa: N806
Environment = autoclass("android.os.Environment") # noqa: N806
documents_dir: str = Context.getExternalFilesDir(Environment.DIRECTORY_DOCUMENTS).getAbsolutePath()
except Exception:
documents_dir = "/storage/emulated/0/Documents"
return documents_dir
__all__ = [
"Android",
]
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pkg_resources/_vendor/platformdirs/__init__.py | """
Utilities for determining application-specific dirs. See <https://github.com/platformdirs/platformdirs> for details and
usage.
"""
from __future__ import annotations
import os
import sys
from pathlib import Path
if sys.version_info >= (3, 8): # pragma: no cover (py38+)
from typing import Literal
else: # pragma: no cover (py38+)
from ..typing_extensions import Literal
from .api import PlatformDirsABC
from .version import __version__
from .version import __version_tuple__ as __version_info__
def _set_platform_dir_class() -> type[PlatformDirsABC]:
if sys.platform == "win32":
from .windows import Windows as Result
elif sys.platform == "darwin":
from .macos import MacOS as Result
else:
from .unix import Unix as Result
if os.getenv("ANDROID_DATA") == "/data" and os.getenv("ANDROID_ROOT") == "/system":
if os.getenv("SHELL") or os.getenv("PREFIX"):
return Result
from .android import _android_folder
if _android_folder() is not None:
from .android import Android
return Android # return to avoid redefinition of result
return Result
PlatformDirs = _set_platform_dir_class() #: Currently active platform
AppDirs = PlatformDirs #: Backwards compatibility with appdirs
def user_data_dir(
appname: str | None = None,
appauthor: str | None | Literal[False] = None,
version: str | None = None,
roaming: bool = False,
) -> str:
"""
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
:param roaming: See `roaming <platformdirs.api.PlatformDirsABC.version>`.
:returns: data directory tied to the user
"""
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, roaming=roaming).user_data_dir
def site_data_dir(
appname: str | None = None,
appauthor: str | None | Literal[False] = None,
version: str | None = None,
multipath: bool = False,
) -> str:
"""
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
:param multipath: See `roaming <platformdirs.api.PlatformDirsABC.multipath>`.
:returns: data directory shared by users
"""
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, multipath=multipath).site_data_dir
def user_config_dir(
appname: str | None = None,
appauthor: str | None | Literal[False] = None,
version: str | None = None,
roaming: bool = False,
) -> str:
"""
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
:param roaming: See `roaming <platformdirs.api.PlatformDirsABC.version>`.
:returns: config directory tied to the user
"""
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, roaming=roaming).user_config_dir
def site_config_dir(
appname: str | None = None,
appauthor: str | None | Literal[False] = None,
version: str | None = None,
multipath: bool = False,
) -> str:
"""
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
:param multipath: See `roaming <platformdirs.api.PlatformDirsABC.multipath>`.
:returns: config directory shared by the users
"""
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, multipath=multipath).site_config_dir
def user_cache_dir(
appname: str | None = None,
appauthor: str | None | Literal[False] = None,
version: str | None = None,
opinion: bool = True,
) -> str:
"""
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
:param opinion: See `roaming <platformdirs.api.PlatformDirsABC.opinion>`.
:returns: cache directory tied to the user
"""
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, opinion=opinion).user_cache_dir
def user_state_dir(
appname: str | None = None,
appauthor: str | None | Literal[False] = None,
version: str | None = None,
roaming: bool = False,
) -> str:
"""
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
:param roaming: See `roaming <platformdirs.api.PlatformDirsABC.version>`.
:returns: state directory tied to the user
"""
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, roaming=roaming).user_state_dir
def user_log_dir(
appname: str | None = None,
appauthor: str | None | Literal[False] = None,
version: str | None = None,
opinion: bool = True,
) -> str:
"""
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
:param opinion: See `roaming <platformdirs.api.PlatformDirsABC.opinion>`.
:returns: log directory tied to the user
"""
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, opinion=opinion).user_log_dir
def user_documents_dir() -> str:
"""
:returns: documents directory tied to the user
"""
return PlatformDirs().user_documents_dir
def user_runtime_dir(
appname: str | None = None,
appauthor: str | None | Literal[False] = None,
version: str | None = None,
opinion: bool = True,
) -> str:
"""
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
:param opinion: See `opinion <platformdirs.api.PlatformDirsABC.opinion>`.
:returns: runtime directory tied to the user
"""
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, opinion=opinion).user_runtime_dir
def user_data_path(
appname: str | None = None,
appauthor: str | None | Literal[False] = None,
version: str | None = None,
roaming: bool = False,
) -> Path:
"""
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
:param roaming: See `roaming <platformdirs.api.PlatformDirsABC.version>`.
:returns: data path tied to the user
"""
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, roaming=roaming).user_data_path
def site_data_path(
appname: str | None = None,
appauthor: str | None | Literal[False] = None,
version: str | None = None,
multipath: bool = False,
) -> Path:
"""
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
:param multipath: See `multipath <platformdirs.api.PlatformDirsABC.multipath>`.
:returns: data path shared by users
"""
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, multipath=multipath).site_data_path
def user_config_path(
appname: str | None = None,
appauthor: str | None | Literal[False] = None,
version: str | None = None,
roaming: bool = False,
) -> Path:
"""
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
:param roaming: See `roaming <platformdirs.api.PlatformDirsABC.version>`.
:returns: config path tied to the user
"""
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, roaming=roaming).user_config_path
def site_config_path(
appname: str | None = None,
appauthor: str | None | Literal[False] = None,
version: str | None = None,
multipath: bool = False,
) -> Path:
"""
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
:param multipath: See `roaming <platformdirs.api.PlatformDirsABC.multipath>`.
:returns: config path shared by the users
"""
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, multipath=multipath).site_config_path
def user_cache_path(
appname: str | None = None,
appauthor: str | None | Literal[False] = None,
version: str | None = None,
opinion: bool = True,
) -> Path:
"""
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
:param opinion: See `roaming <platformdirs.api.PlatformDirsABC.opinion>`.
:returns: cache path tied to the user
"""
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, opinion=opinion).user_cache_path
def user_state_path(
appname: str | None = None,
appauthor: str | None | Literal[False] = None,
version: str | None = None,
roaming: bool = False,
) -> Path:
"""
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
:param roaming: See `roaming <platformdirs.api.PlatformDirsABC.version>`.
:returns: state path tied to the user
"""
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, roaming=roaming).user_state_path
def user_log_path(
appname: str | None = None,
appauthor: str | None | Literal[False] = None,
version: str | None = None,
opinion: bool = True,
) -> Path:
"""
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
:param opinion: See `roaming <platformdirs.api.PlatformDirsABC.opinion>`.
:returns: log path tied to the user
"""
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, opinion=opinion).user_log_path
def user_documents_path() -> Path:
"""
:returns: documents path tied to the user
"""
return PlatformDirs().user_documents_path
def user_runtime_path(
appname: str | None = None,
appauthor: str | None | Literal[False] = None,
version: str | None = None,
opinion: bool = True,
) -> Path:
"""
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
:param opinion: See `opinion <platformdirs.api.PlatformDirsABC.opinion>`.
:returns: runtime path tied to the user
"""
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, opinion=opinion).user_runtime_path
__all__ = [
"__version__",
"__version_info__",
"PlatformDirs",
"AppDirs",
"PlatformDirsABC",
"user_data_dir",
"user_config_dir",
"user_cache_dir",
"user_state_dir",
"user_log_dir",
"user_documents_dir",
"user_runtime_dir",
"site_data_dir",
"site_config_dir",
"user_data_path",
"user_config_path",
"user_cache_path",
"user_state_path",
"user_log_path",
"user_documents_path",
"user_runtime_path",
"site_data_path",
"site_config_path",
]
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pkg_resources/_vendor/platformdirs/version.py | # file generated by setuptools_scm
# don't change, don't track in version control
__version__ = version = '2.6.2'
__version_tuple__ = version_tuple = (2, 6, 2)
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pkg_resources/_vendor/platformdirs/unix.py | from __future__ import annotations
import os
import sys
from configparser import ConfigParser
from pathlib import Path
from .api import PlatformDirsABC
if sys.platform.startswith("linux"): # pragma: no branch # no op check, only to please the type checker
from os import getuid
else:
def getuid() -> int:
raise RuntimeError("should only be used on Linux")
class Unix(PlatformDirsABC):
"""
On Unix/Linux, we follow the
`XDG Basedir Spec <https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html>`_. The spec allows
overriding directories with environment variables. The examples show are the default values, alongside the name of
the environment variable that overrides them. Makes use of the
`appname <platformdirs.api.PlatformDirsABC.appname>`,
`version <platformdirs.api.PlatformDirsABC.version>`,
`multipath <platformdirs.api.PlatformDirsABC.multipath>`,
`opinion <platformdirs.api.PlatformDirsABC.opinion>`.
"""
@property
def user_data_dir(self) -> str:
"""
:return: data directory tied to the user, e.g. ``~/.local/share/$appname/$version`` or
``$XDG_DATA_HOME/$appname/$version``
"""
path = os.environ.get("XDG_DATA_HOME", "")
if not path.strip():
path = os.path.expanduser("~/.local/share")
return self._append_app_name_and_version(path)
@property
def site_data_dir(self) -> str:
"""
:return: data directories shared by users (if `multipath <platformdirs.api.PlatformDirsABC.multipath>` is
enabled and ``XDG_DATA_DIR`` is set and a multi path the response is also a multi path separated by the OS
path separator), e.g. ``/usr/local/share/$appname/$version`` or ``/usr/share/$appname/$version``
"""
# XDG default for $XDG_DATA_DIRS; only first, if multipath is False
path = os.environ.get("XDG_DATA_DIRS", "")
if not path.strip():
path = f"/usr/local/share{os.pathsep}/usr/share"
return self._with_multi_path(path)
def _with_multi_path(self, path: str) -> str:
path_list = path.split(os.pathsep)
if not self.multipath:
path_list = path_list[0:1]
path_list = [self._append_app_name_and_version(os.path.expanduser(p)) for p in path_list]
return os.pathsep.join(path_list)
@property
def user_config_dir(self) -> str:
"""
:return: config directory tied to the user, e.g. ``~/.config/$appname/$version`` or
``$XDG_CONFIG_HOME/$appname/$version``
"""
path = os.environ.get("XDG_CONFIG_HOME", "")
if not path.strip():
path = os.path.expanduser("~/.config")
return self._append_app_name_and_version(path)
@property
def site_config_dir(self) -> str:
"""
:return: config directories shared by users (if `multipath <platformdirs.api.PlatformDirsABC.multipath>`
is enabled and ``XDG_DATA_DIR`` is set and a multi path the response is also a multi path separated by the OS
path separator), e.g. ``/etc/xdg/$appname/$version``
"""
# XDG default for $XDG_CONFIG_DIRS only first, if multipath is False
path = os.environ.get("XDG_CONFIG_DIRS", "")
if not path.strip():
path = "/etc/xdg"
return self._with_multi_path(path)
@property
def user_cache_dir(self) -> str:
"""
:return: cache directory tied to the user, e.g. ``~/.cache/$appname/$version`` or
``~/$XDG_CACHE_HOME/$appname/$version``
"""
path = os.environ.get("XDG_CACHE_HOME", "")
if not path.strip():
path = os.path.expanduser("~/.cache")
return self._append_app_name_and_version(path)
@property
def user_state_dir(self) -> str:
"""
:return: state directory tied to the user, e.g. ``~/.local/state/$appname/$version`` or
``$XDG_STATE_HOME/$appname/$version``
"""
path = os.environ.get("XDG_STATE_HOME", "")
if not path.strip():
path = os.path.expanduser("~/.local/state")
return self._append_app_name_and_version(path)
@property
def user_log_dir(self) -> str:
"""
:return: log directory tied to the user, same as `user_state_dir` if not opinionated else ``log`` in it
"""
path = self.user_state_dir
if self.opinion:
path = os.path.join(path, "log")
return path
@property
def user_documents_dir(self) -> str:
"""
:return: documents directory tied to the user, e.g. ``~/Documents``
"""
documents_dir = _get_user_dirs_folder("XDG_DOCUMENTS_DIR")
if documents_dir is None:
documents_dir = os.environ.get("XDG_DOCUMENTS_DIR", "").strip()
if not documents_dir:
documents_dir = os.path.expanduser("~/Documents")
return documents_dir
@property
def user_runtime_dir(self) -> str:
"""
:return: runtime directory tied to the user, e.g. ``/run/user/$(id -u)/$appname/$version`` or
``$XDG_RUNTIME_DIR/$appname/$version``
"""
path = os.environ.get("XDG_RUNTIME_DIR", "")
if not path.strip():
path = f"/run/user/{getuid()}"
return self._append_app_name_and_version(path)
@property
def site_data_path(self) -> Path:
""":return: data path shared by users. Only return first item, even if ``multipath`` is set to ``True``"""
return self._first_item_as_path_if_multipath(self.site_data_dir)
@property
def site_config_path(self) -> Path:
""":return: config path shared by the users. Only return first item, even if ``multipath`` is set to ``True``"""
return self._first_item_as_path_if_multipath(self.site_config_dir)
def _first_item_as_path_if_multipath(self, directory: str) -> Path:
if self.multipath:
# If multipath is True, the first path is returned.
directory = directory.split(os.pathsep)[0]
return Path(directory)
def _get_user_dirs_folder(key: str) -> str | None:
"""Return directory from user-dirs.dirs config file. See https://freedesktop.org/wiki/Software/xdg-user-dirs/"""
user_dirs_config_path = os.path.join(Unix().user_config_dir, "user-dirs.dirs")
if os.path.exists(user_dirs_config_path):
parser = ConfigParser()
with open(user_dirs_config_path) as stream:
# Add fake section header, so ConfigParser doesn't complain
parser.read_string(f"[top]\n{stream.read()}")
if key not in parser["top"]:
return None
path = parser["top"][key].strip('"')
# Handle relative home paths
path = path.replace("$HOME", os.path.expanduser("~"))
return path
return None
__all__ = [
"Unix",
]
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pkg_resources/_vendor/platformdirs/windows.py | from __future__ import annotations
import ctypes
import os
import sys
from functools import lru_cache
from typing import Callable
from .api import PlatformDirsABC
class Windows(PlatformDirsABC):
"""`MSDN on where to store app data files
<http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120>`_.
Makes use of the
`appname <platformdirs.api.PlatformDirsABC.appname>`,
`appauthor <platformdirs.api.PlatformDirsABC.appauthor>`,
`version <platformdirs.api.PlatformDirsABC.version>`,
`roaming <platformdirs.api.PlatformDirsABC.roaming>`,
`opinion <platformdirs.api.PlatformDirsABC.opinion>`."""
@property
def user_data_dir(self) -> str:
"""
:return: data directory tied to the user, e.g.
``%USERPROFILE%\\AppData\\Local\\$appauthor\\$appname`` (not roaming) or
``%USERPROFILE%\\AppData\\Roaming\\$appauthor\\$appname`` (roaming)
"""
const = "CSIDL_APPDATA" if self.roaming else "CSIDL_LOCAL_APPDATA"
path = os.path.normpath(get_win_folder(const))
return self._append_parts(path)
def _append_parts(self, path: str, *, opinion_value: str | None = None) -> str:
params = []
if self.appname:
if self.appauthor is not False:
author = self.appauthor or self.appname
params.append(author)
params.append(self.appname)
if opinion_value is not None and self.opinion:
params.append(opinion_value)
if self.version:
params.append(self.version)
return os.path.join(path, *params)
@property
def site_data_dir(self) -> str:
""":return: data directory shared by users, e.g. ``C:\\ProgramData\\$appauthor\\$appname``"""
path = os.path.normpath(get_win_folder("CSIDL_COMMON_APPDATA"))
return self._append_parts(path)
@property
def user_config_dir(self) -> str:
""":return: config directory tied to the user, same as `user_data_dir`"""
return self.user_data_dir
@property
def site_config_dir(self) -> str:
""":return: config directory shared by the users, same as `site_data_dir`"""
return self.site_data_dir
@property
def user_cache_dir(self) -> str:
"""
:return: cache directory tied to the user (if opinionated with ``Cache`` folder within ``$appname``) e.g.
``%USERPROFILE%\\AppData\\Local\\$appauthor\\$appname\\Cache\\$version``
"""
path = os.path.normpath(get_win_folder("CSIDL_LOCAL_APPDATA"))
return self._append_parts(path, opinion_value="Cache")
@property
def user_state_dir(self) -> str:
""":return: state directory tied to the user, same as `user_data_dir`"""
return self.user_data_dir
@property
def user_log_dir(self) -> str:
"""
:return: log directory tied to the user, same as `user_data_dir` if not opinionated else ``Logs`` in it
"""
path = self.user_data_dir
if self.opinion:
path = os.path.join(path, "Logs")
return path
@property
def user_documents_dir(self) -> str:
"""
:return: documents directory tied to the user e.g. ``%USERPROFILE%\\Documents``
"""
return os.path.normpath(get_win_folder("CSIDL_PERSONAL"))
@property
def user_runtime_dir(self) -> str:
"""
:return: runtime directory tied to the user, e.g.
``%USERPROFILE%\\AppData\\Local\\Temp\\$appauthor\\$appname``
"""
path = os.path.normpath(os.path.join(get_win_folder("CSIDL_LOCAL_APPDATA"), "Temp"))
return self._append_parts(path)
def get_win_folder_from_env_vars(csidl_name: str) -> str:
"""Get folder from environment variables."""
if csidl_name == "CSIDL_PERSONAL": # does not have an environment name
return os.path.join(os.path.normpath(os.environ["USERPROFILE"]), "Documents")
env_var_name = {
"CSIDL_APPDATA": "APPDATA",
"CSIDL_COMMON_APPDATA": "ALLUSERSPROFILE",
"CSIDL_LOCAL_APPDATA": "LOCALAPPDATA",
}.get(csidl_name)
if env_var_name is None:
raise ValueError(f"Unknown CSIDL name: {csidl_name}")
result = os.environ.get(env_var_name)
if result is None:
raise ValueError(f"Unset environment variable: {env_var_name}")
return result
def get_win_folder_from_registry(csidl_name: str) -> str:
"""Get folder from the registry.
This is a fallback technique at best. I'm not sure if using the
registry for this guarantees us the correct answer for all CSIDL_*
names.
"""
shell_folder_name = {
"CSIDL_APPDATA": "AppData",
"CSIDL_COMMON_APPDATA": "Common AppData",
"CSIDL_LOCAL_APPDATA": "Local AppData",
"CSIDL_PERSONAL": "Personal",
}.get(csidl_name)
if shell_folder_name is None:
raise ValueError(f"Unknown CSIDL name: {csidl_name}")
if sys.platform != "win32": # only needed for mypy type checker to know that this code runs only on Windows
raise NotImplementedError
import winreg
key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders")
directory, _ = winreg.QueryValueEx(key, shell_folder_name)
return str(directory)
def get_win_folder_via_ctypes(csidl_name: str) -> str:
"""Get folder with ctypes."""
csidl_const = {
"CSIDL_APPDATA": 26,
"CSIDL_COMMON_APPDATA": 35,
"CSIDL_LOCAL_APPDATA": 28,
"CSIDL_PERSONAL": 5,
}.get(csidl_name)
if csidl_const is None:
raise ValueError(f"Unknown CSIDL name: {csidl_name}")
buf = ctypes.create_unicode_buffer(1024)
windll = getattr(ctypes, "windll") # noqa: B009 # using getattr to avoid false positive with mypy type checker
windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
# Downgrade to short path name if it has highbit chars.
if any(ord(c) > 255 for c in buf):
buf2 = ctypes.create_unicode_buffer(1024)
if windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
buf = buf2
return buf.value
def _pick_get_win_folder() -> Callable[[str], str]:
if hasattr(ctypes, "windll"):
return get_win_folder_via_ctypes
try:
import winreg # noqa: F401
except ImportError:
return get_win_folder_from_env_vars
else:
return get_win_folder_from_registry
get_win_folder = lru_cache(maxsize=None)(_pick_get_win_folder())
__all__ = [
"Windows",
]
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pkg_resources/_vendor/platformdirs/api.py | from __future__ import annotations
import os
import sys
from abc import ABC, abstractmethod
from pathlib import Path
if sys.version_info >= (3, 8): # pragma: no branch
from typing import Literal # pragma: no cover
class PlatformDirsABC(ABC):
"""
Abstract base class for platform directories.
"""
def __init__(
self,
appname: str | None = None,
appauthor: str | None | Literal[False] = None,
version: str | None = None,
roaming: bool = False,
multipath: bool = False,
opinion: bool = True,
):
"""
Create a new platform directory.
:param appname: See `appname`.
:param appauthor: See `appauthor`.
:param version: See `version`.
:param roaming: See `roaming`.
:param multipath: See `multipath`.
:param opinion: See `opinion`.
"""
self.appname = appname #: The name of application.
self.appauthor = appauthor
"""
The name of the app author or distributing body for this application. Typically, it is the owning company name.
Defaults to `appname`. You may pass ``False`` to disable it.
"""
self.version = version
"""
An optional version path element to append to the path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this would typically be ``<major>.<minor>``.
"""
self.roaming = roaming
"""
Whether to use the roaming appdata directory on Windows. That means that for users on a Windows network setup
for roaming profiles, this user data will be synced on login (see
`here <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>`_).
"""
self.multipath = multipath
"""
An optional parameter only applicable to Unix/Linux which indicates that the entire list of data dirs should be
returned. By default, the first item would only be returned.
"""
self.opinion = opinion #: A flag to indicating to use opinionated values.
def _append_app_name_and_version(self, *base: str) -> str:
params = list(base[1:])
if self.appname:
params.append(self.appname)
if self.version:
params.append(self.version)
return os.path.join(base[0], *params)
@property
@abstractmethod
def user_data_dir(self) -> str:
""":return: data directory tied to the user"""
@property
@abstractmethod
def site_data_dir(self) -> str:
""":return: data directory shared by users"""
@property
@abstractmethod
def user_config_dir(self) -> str:
""":return: config directory tied to the user"""
@property
@abstractmethod
def site_config_dir(self) -> str:
""":return: config directory shared by the users"""
@property
@abstractmethod
def user_cache_dir(self) -> str:
""":return: cache directory tied to the user"""
@property
@abstractmethod
def user_state_dir(self) -> str:
""":return: state directory tied to the user"""
@property
@abstractmethod
def user_log_dir(self) -> str:
""":return: log directory tied to the user"""
@property
@abstractmethod
def user_documents_dir(self) -> str:
""":return: documents directory tied to the user"""
@property
@abstractmethod
def user_runtime_dir(self) -> str:
""":return: runtime directory tied to the user"""
@property
def user_data_path(self) -> Path:
""":return: data path tied to the user"""
return Path(self.user_data_dir)
@property
def site_data_path(self) -> Path:
""":return: data path shared by users"""
return Path(self.site_data_dir)
@property
def user_config_path(self) -> Path:
""":return: config path tied to the user"""
return Path(self.user_config_dir)
@property
def site_config_path(self) -> Path:
""":return: config path shared by the users"""
return Path(self.site_config_dir)
@property
def user_cache_path(self) -> Path:
""":return: cache path tied to the user"""
return Path(self.user_cache_dir)
@property
def user_state_path(self) -> Path:
""":return: state path tied to the user"""
return Path(self.user_state_dir)
@property
def user_log_path(self) -> Path:
""":return: log path tied to the user"""
return Path(self.user_log_dir)
@property
def user_documents_path(self) -> Path:
""":return: documents path tied to the user"""
return Path(self.user_documents_dir)
@property
def user_runtime_path(self) -> Path:
""":return: runtime path tied to the user"""
return Path(self.user_runtime_dir)
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pkg_resources/_vendor/platformdirs/__main__.py | from __future__ import annotations
from platformdirs import PlatformDirs, __version__
PROPS = (
"user_data_dir",
"user_config_dir",
"user_cache_dir",
"user_state_dir",
"user_log_dir",
"user_documents_dir",
"user_runtime_dir",
"site_data_dir",
"site_config_dir",
)
def main() -> None:
app_name = "MyApp"
app_author = "MyCompany"
print(f"-- platformdirs {__version__} --")
print("-- app dirs (with optional 'version')")
dirs = PlatformDirs(app_name, app_author, version="1.0")
for prop in PROPS:
print(f"{prop}: {getattr(dirs, prop)}")
print("\n-- app dirs (without optional 'version')")
dirs = PlatformDirs(app_name, app_author)
for prop in PROPS:
print(f"{prop}: {getattr(dirs, prop)}")
print("\n-- app dirs (without optional 'appauthor')")
dirs = PlatformDirs(app_name)
for prop in PROPS:
print(f"{prop}: {getattr(dirs, prop)}")
print("\n-- app dirs (with disabled 'appauthor')")
dirs = PlatformDirs(app_name, appauthor=False)
for prop in PROPS:
print(f"{prop}: {getattr(dirs, prop)}")
if __name__ == "__main__":
main()
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pkg_resources/_vendor/platformdirs/macos.py | from __future__ import annotations
import os
from .api import PlatformDirsABC
class MacOS(PlatformDirsABC):
"""
Platform directories for the macOS operating system. Follows the guidance from `Apple documentation
<https://developer.apple.com/library/archive/documentation/FileManagement/Conceptual/FileSystemProgrammingGuide/MacOSXDirectories/MacOSXDirectories.html>`_.
Makes use of the `appname <platformdirs.api.PlatformDirsABC.appname>` and
`version <platformdirs.api.PlatformDirsABC.version>`.
"""
@property
def user_data_dir(self) -> str:
""":return: data directory tied to the user, e.g. ``~/Library/Application Support/$appname/$version``"""
return self._append_app_name_and_version(os.path.expanduser("~/Library/Application Support/"))
@property
def site_data_dir(self) -> str:
""":return: data directory shared by users, e.g. ``/Library/Application Support/$appname/$version``"""
return self._append_app_name_and_version("/Library/Application Support")
@property
def user_config_dir(self) -> str:
""":return: config directory tied to the user, e.g. ``~/Library/Preferences/$appname/$version``"""
return self._append_app_name_and_version(os.path.expanduser("~/Library/Preferences/"))
@property
def site_config_dir(self) -> str:
""":return: config directory shared by the users, e.g. ``/Library/Preferences/$appname``"""
return self._append_app_name_and_version("/Library/Preferences")
@property
def user_cache_dir(self) -> str:
""":return: cache directory tied to the user, e.g. ``~/Library/Caches/$appname/$version``"""
return self._append_app_name_and_version(os.path.expanduser("~/Library/Caches"))
@property
def user_state_dir(self) -> str:
""":return: state directory tied to the user, same as `user_data_dir`"""
return self.user_data_dir
@property
def user_log_dir(self) -> str:
""":return: log directory tied to the user, e.g. ``~/Library/Logs/$appname/$version``"""
return self._append_app_name_and_version(os.path.expanduser("~/Library/Logs"))
@property
def user_documents_dir(self) -> str:
""":return: documents directory tied to the user, e.g. ``~/Documents``"""
return os.path.expanduser("~/Documents")
@property
def user_runtime_dir(self) -> str:
""":return: runtime directory tied to the user, e.g. ``~/Library/Caches/TemporaryItems/$appname/$version``"""
return self._append_app_name_and_version(os.path.expanduser("~/Library/Caches/TemporaryItems"))
__all__ = [
"MacOS",
]
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pkg_resources/_vendor/packaging/_structures.py | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
class InfinityType:
def __repr__(self) -> str:
return "Infinity"
def __hash__(self) -> int:
return hash(repr(self))
def __lt__(self, other: object) -> bool:
return False
def __le__(self, other: object) -> bool:
return False
def __eq__(self, other: object) -> bool:
return isinstance(other, self.__class__)
def __gt__(self, other: object) -> bool:
return True
def __ge__(self, other: object) -> bool:
return True
def __neg__(self: object) -> "NegativeInfinityType":
return NegativeInfinity
Infinity = InfinityType()
class NegativeInfinityType:
def __repr__(self) -> str:
return "-Infinity"
def __hash__(self) -> int:
return hash(repr(self))
def __lt__(self, other: object) -> bool:
return True
def __le__(self, other: object) -> bool:
return True
def __eq__(self, other: object) -> bool:
return isinstance(other, self.__class__)
def __gt__(self, other: object) -> bool:
return False
def __ge__(self, other: object) -> bool:
return False
def __neg__(self: object) -> InfinityType:
return Infinity
NegativeInfinity = NegativeInfinityType()
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pkg_resources/_vendor/packaging/requirements.py | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
import urllib.parse
from typing import Any, List, Optional, Set
from ._parser import parse_requirement as _parse_requirement
from ._tokenizer import ParserSyntaxError
from .markers import Marker, _normalize_extra_values
from .specifiers import SpecifierSet
class InvalidRequirement(ValueError):
"""
An invalid requirement was found, users should refer to PEP 508.
"""
class Requirement:
"""Parse a requirement.
Parse a given requirement string into its parts, such as name, specifier,
URL, and extras. Raises InvalidRequirement on a badly-formed requirement
string.
"""
# TODO: Can we test whether something is contained within a requirement?
# If so how do we do that? Do we need to test against the _name_ of
# the thing as well as the version? What about the markers?
# TODO: Can we normalize the name and extra name?
def __init__(self, requirement_string: str) -> None:
try:
parsed = _parse_requirement(requirement_string)
except ParserSyntaxError as e:
raise InvalidRequirement(str(e)) from e
self.name: str = parsed.name
if parsed.url:
parsed_url = urllib.parse.urlparse(parsed.url)
if parsed_url.scheme == "file":
if urllib.parse.urlunparse(parsed_url) != parsed.url:
raise InvalidRequirement("Invalid URL given")
elif not (parsed_url.scheme and parsed_url.netloc) or (
not parsed_url.scheme and not parsed_url.netloc
):
raise InvalidRequirement(f"Invalid URL: {parsed.url}")
self.url: Optional[str] = parsed.url
else:
self.url = None
self.extras: Set[str] = set(parsed.extras if parsed.extras else [])
self.specifier: SpecifierSet = SpecifierSet(parsed.specifier)
self.marker: Optional[Marker] = None
if parsed.marker is not None:
self.marker = Marker.__new__(Marker)
self.marker._markers = _normalize_extra_values(parsed.marker)
def __str__(self) -> str:
parts: List[str] = [self.name]
if self.extras:
formatted_extras = ",".join(sorted(self.extras))
parts.append(f"[{formatted_extras}]")
if self.specifier:
parts.append(str(self.specifier))
if self.url:
parts.append(f"@ {self.url}")
if self.marker:
parts.append(" ")
if self.marker:
parts.append(f"; {self.marker}")
return "".join(parts)
def __repr__(self) -> str:
return f"<Requirement('{self}')>"
def __hash__(self) -> int:
return hash((self.__class__.__name__, str(self)))
def __eq__(self, other: Any) -> bool:
if not isinstance(other, Requirement):
return NotImplemented
return (
self.name == other.name
and self.extras == other.extras
and self.specifier == other.specifier
and self.url == other.url
and self.marker == other.marker
)
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pkg_resources/_vendor/packaging/_tokenizer.py | import contextlib
import re
from dataclasses import dataclass
from typing import Dict, Iterator, NoReturn, Optional, Tuple, Union
from .specifiers import Specifier
@dataclass
class Token:
name: str
text: str
position: int
class ParserSyntaxError(Exception):
"""The provided source text could not be parsed correctly."""
def __init__(
self,
message: str,
*,
source: str,
span: Tuple[int, int],
) -> None:
self.span = span
self.message = message
self.source = source
super().__init__()
def __str__(self) -> str:
marker = " " * self.span[0] + "~" * (self.span[1] - self.span[0]) + "^"
return "\n ".join([self.message, self.source, marker])
DEFAULT_RULES: "Dict[str, Union[str, re.Pattern[str]]]" = {
"LEFT_PARENTHESIS": r"\(",
"RIGHT_PARENTHESIS": r"\)",
"LEFT_BRACKET": r"\[",
"RIGHT_BRACKET": r"\]",
"SEMICOLON": r";",
"COMMA": r",",
"QUOTED_STRING": re.compile(
r"""
(
('[^']*')
|
("[^"]*")
)
""",
re.VERBOSE,
),
"OP": r"(===|==|~=|!=|<=|>=|<|>)",
"BOOLOP": r"\b(or|and)\b",
"IN": r"\bin\b",
"NOT": r"\bnot\b",
"VARIABLE": re.compile(
r"""
\b(
python_version
|python_full_version
|os[._]name
|sys[._]platform
|platform_(release|system)
|platform[._](version|machine|python_implementation)
|python_implementation
|implementation_(name|version)
|extra
)\b
""",
re.VERBOSE,
),
"SPECIFIER": re.compile(
Specifier._operator_regex_str + Specifier._version_regex_str,
re.VERBOSE | re.IGNORECASE,
),
"AT": r"\@",
"URL": r"[^ \t]+",
"IDENTIFIER": r"\b[a-zA-Z0-9][a-zA-Z0-9._-]*\b",
"VERSION_PREFIX_TRAIL": r"\.\*",
"VERSION_LOCAL_LABEL_TRAIL": r"\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*",
"WS": r"[ \t]+",
"END": r"$",
}
class Tokenizer:
"""Context-sensitive token parsing.
Provides methods to examine the input stream to check whether the next token
matches.
"""
def __init__(
self,
source: str,
*,
rules: "Dict[str, Union[str, re.Pattern[str]]]",
) -> None:
self.source = source
self.rules: Dict[str, re.Pattern[str]] = {
name: re.compile(pattern) for name, pattern in rules.items()
}
self.next_token: Optional[Token] = None
self.position = 0
def consume(self, name: str) -> None:
"""Move beyond provided token name, if at current position."""
if self.check(name):
self.read()
def check(self, name: str, *, peek: bool = False) -> bool:
"""Check whether the next token has the provided name.
By default, if the check succeeds, the token *must* be read before
another check. If `peek` is set to `True`, the token is not loaded and
would need to be checked again.
"""
assert (
self.next_token is None
), f"Cannot check for {name!r}, already have {self.next_token!r}"
assert name in self.rules, f"Unknown token name: {name!r}"
expression = self.rules[name]
match = expression.match(self.source, self.position)
if match is None:
return False
if not peek:
self.next_token = Token(name, match[0], self.position)
return True
def expect(self, name: str, *, expected: str) -> Token:
"""Expect a certain token name next, failing with a syntax error otherwise.
The token is *not* read.
"""
if not self.check(name):
raise self.raise_syntax_error(f"Expected {expected}")
return self.read()
def read(self) -> Token:
"""Consume the next token and return it."""
token = self.next_token
assert token is not None
self.position += len(token.text)
self.next_token = None
return token
def raise_syntax_error(
self,
message: str,
*,
span_start: Optional[int] = None,
span_end: Optional[int] = None,
) -> NoReturn:
"""Raise ParserSyntaxError at the given position."""
span = (
self.position if span_start is None else span_start,
self.position if span_end is None else span_end,
)
raise ParserSyntaxError(
message,
source=self.source,
span=span,
)
@contextlib.contextmanager
def enclosing_tokens(
self, open_token: str, close_token: str, *, around: str
) -> Iterator[None]:
if self.check(open_token):
open_position = self.position
self.read()
else:
open_position = None
yield
if open_position is None:
return
if not self.check(close_token):
self.raise_syntax_error(
f"Expected matching {close_token} for {open_token}, after {around}",
span_start=open_position,
)
self.read()
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pkg_resources/_vendor/packaging/specifiers.py | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
"""
.. testsetup::
from packaging.specifiers import Specifier, SpecifierSet, InvalidSpecifier
from packaging.version import Version
"""
import abc
import itertools
import re
from typing import (
Callable,
Iterable,
Iterator,
List,
Optional,
Set,
Tuple,
TypeVar,
Union,
)
from .utils import canonicalize_version
from .version import Version
UnparsedVersion = Union[Version, str]
UnparsedVersionVar = TypeVar("UnparsedVersionVar", bound=UnparsedVersion)
CallableOperator = Callable[[Version, str], bool]
def _coerce_version(version: UnparsedVersion) -> Version:
if not isinstance(version, Version):
version = Version(version)
return version
class InvalidSpecifier(ValueError):
"""
Raised when attempting to create a :class:`Specifier` with a specifier
string that is invalid.
>>> Specifier("lolwat")
Traceback (most recent call last):
...
packaging.specifiers.InvalidSpecifier: Invalid specifier: 'lolwat'
"""
class BaseSpecifier(metaclass=abc.ABCMeta):
@abc.abstractmethod
def __str__(self) -> str:
"""
Returns the str representation of this Specifier-like object. This
should be representative of the Specifier itself.
"""
@abc.abstractmethod
def __hash__(self) -> int:
"""
Returns a hash value for this Specifier-like object.
"""
@abc.abstractmethod
def __eq__(self, other: object) -> bool:
"""
Returns a boolean representing whether or not the two Specifier-like
objects are equal.
:param other: The other object to check against.
"""
@property
@abc.abstractmethod
def prereleases(self) -> Optional[bool]:
"""Whether or not pre-releases as a whole are allowed.
This can be set to either ``True`` or ``False`` to explicitly enable or disable
prereleases or it can be set to ``None`` (the default) to use default semantics.
"""
@prereleases.setter
def prereleases(self, value: bool) -> None:
"""Setter for :attr:`prereleases`.
:param value: The value to set.
"""
@abc.abstractmethod
def contains(self, item: str, prereleases: Optional[bool] = None) -> bool:
"""
Determines if the given item is contained within this specifier.
"""
@abc.abstractmethod
def filter(
self, iterable: Iterable[UnparsedVersionVar], prereleases: Optional[bool] = None
) -> Iterator[UnparsedVersionVar]:
"""
Takes an iterable of items and filters them so that only items which
are contained within this specifier are allowed in it.
"""
class Specifier(BaseSpecifier):
"""This class abstracts handling of version specifiers.
.. tip::
It is generally not required to instantiate this manually. You should instead
prefer to work with :class:`SpecifierSet` instead, which can parse
comma-separated version specifiers (which is what package metadata contains).
"""
_operator_regex_str = r"""
(?P<operator>(~=|==|!=|<=|>=|<|>|===))
"""
_version_regex_str = r"""
(?P<version>
(?:
# The identity operators allow for an escape hatch that will
# do an exact string match of the version you wish to install.
# This will not be parsed by PEP 440 and we cannot determine
# any semantic meaning from it. This operator is discouraged
# but included entirely as an escape hatch.
(?<====) # Only match for the identity operator
\s*
[^\s;)]* # The arbitrary version can be just about anything,
# we match everything except for whitespace, a
# semi-colon for marker support, and a closing paren
# since versions can be enclosed in them.
)
|
(?:
# The (non)equality operators allow for wild card and local
# versions to be specified so we have to define these two
# operators separately to enable that.
(?<===|!=) # Only match for equals and not equals
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)* # release
# You cannot use a wild card and a pre-release, post-release, a dev or
# local version together so group them with a | and make them optional.
(?:
\.\* # Wild card syntax of .*
|
(?: # pre release
[-_\.]?
(alpha|beta|preview|pre|a|b|c|rc)
[-_\.]?
[0-9]*
)?
(?: # post release
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
)?
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
(?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local
)?
)
|
(?:
# The compatible operator requires at least two digits in the
# release segment.
(?<=~=) # Only match for the compatible operator
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *)
(?: # pre release
[-_\.]?
(alpha|beta|preview|pre|a|b|c|rc)
[-_\.]?
[0-9]*
)?
(?: # post release
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
)?
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
)
|
(?:
# All other operators only allow a sub set of what the
# (non)equality operators do. Specifically they do not allow
# local versions to be specified nor do they allow the prefix
# matching wild cards.
(?<!==|!=|~=) # We have special cases for these
# operators so we want to make sure they
# don't match here.
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)* # release
(?: # pre release
[-_\.]?
(alpha|beta|preview|pre|a|b|c|rc)
[-_\.]?
[0-9]*
)?
(?: # post release
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
)?
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
)
)
"""
_regex = re.compile(
r"^\s*" + _operator_regex_str + _version_regex_str + r"\s*$",
re.VERBOSE | re.IGNORECASE,
)
_operators = {
"~=": "compatible",
"==": "equal",
"!=": "not_equal",
"<=": "less_than_equal",
">=": "greater_than_equal",
"<": "less_than",
">": "greater_than",
"===": "arbitrary",
}
def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None:
"""Initialize a Specifier instance.
:param spec:
The string representation of a specifier which will be parsed and
normalized before use.
:param prereleases:
This tells the specifier if it should accept prerelease versions if
applicable or not. The default of ``None`` will autodetect it from the
given specifiers.
:raises InvalidSpecifier:
If the given specifier is invalid (i.e. bad syntax).
"""
match = self._regex.search(spec)
if not match:
raise InvalidSpecifier(f"Invalid specifier: '{spec}'")
self._spec: Tuple[str, str] = (
match.group("operator").strip(),
match.group("version").strip(),
)
# Store whether or not this Specifier should accept prereleases
self._prereleases = prereleases
# https://github.com/python/mypy/pull/13475#pullrequestreview-1079784515
@property # type: ignore[override]
def prereleases(self) -> bool:
# If there is an explicit prereleases set for this, then we'll just
# blindly use that.
if self._prereleases is not None:
return self._prereleases
# Look at all of our specifiers and determine if they are inclusive
# operators, and if they are if they are including an explicit
# prerelease.
operator, version = self._spec
if operator in ["==", ">=", "<=", "~=", "==="]:
# The == specifier can include a trailing .*, if it does we
# want to remove before parsing.
if operator == "==" and version.endswith(".*"):
version = version[:-2]
# Parse the version, and if it is a pre-release than this
# specifier allows pre-releases.
if Version(version).is_prerelease:
return True
return False
@prereleases.setter
def prereleases(self, value: bool) -> None:
self._prereleases = value
@property
def operator(self) -> str:
"""The operator of this specifier.
>>> Specifier("==1.2.3").operator
'=='
"""
return self._spec[0]
@property
def version(self) -> str:
"""The version of this specifier.
>>> Specifier("==1.2.3").version
'1.2.3'
"""
return self._spec[1]
def __repr__(self) -> str:
"""A representation of the Specifier that shows all internal state.
>>> Specifier('>=1.0.0')
<Specifier('>=1.0.0')>
>>> Specifier('>=1.0.0', prereleases=False)
<Specifier('>=1.0.0', prereleases=False)>
>>> Specifier('>=1.0.0', prereleases=True)
<Specifier('>=1.0.0', prereleases=True)>
"""
pre = (
f", prereleases={self.prereleases!r}"
if self._prereleases is not None
else ""
)
return f"<{self.__class__.__name__}({str(self)!r}{pre})>"
def __str__(self) -> str:
"""A string representation of the Specifier that can be round-tripped.
>>> str(Specifier('>=1.0.0'))
'>=1.0.0'
>>> str(Specifier('>=1.0.0', prereleases=False))
'>=1.0.0'
"""
return "{}{}".format(*self._spec)
@property
def _canonical_spec(self) -> Tuple[str, str]:
canonical_version = canonicalize_version(
self._spec[1],
strip_trailing_zero=(self._spec[0] != "~="),
)
return self._spec[0], canonical_version
def __hash__(self) -> int:
return hash(self._canonical_spec)
def __eq__(self, other: object) -> bool:
"""Whether or not the two Specifier-like objects are equal.
:param other: The other object to check against.
The value of :attr:`prereleases` is ignored.
>>> Specifier("==1.2.3") == Specifier("== 1.2.3.0")
True
>>> (Specifier("==1.2.3", prereleases=False) ==
... Specifier("==1.2.3", prereleases=True))
True
>>> Specifier("==1.2.3") == "==1.2.3"
True
>>> Specifier("==1.2.3") == Specifier("==1.2.4")
False
>>> Specifier("==1.2.3") == Specifier("~=1.2.3")
False
"""
if isinstance(other, str):
try:
other = self.__class__(str(other))
except InvalidSpecifier:
return NotImplemented
elif not isinstance(other, self.__class__):
return NotImplemented
return self._canonical_spec == other._canonical_spec
def _get_operator(self, op: str) -> CallableOperator:
operator_callable: CallableOperator = getattr(
self, f"_compare_{self._operators[op]}"
)
return operator_callable
def _compare_compatible(self, prospective: Version, spec: str) -> bool:
# Compatible releases have an equivalent combination of >= and ==. That
# is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to
# implement this in terms of the other specifiers instead of
# implementing it ourselves. The only thing we need to do is construct
# the other specifiers.
# We want everything but the last item in the version, but we want to
# ignore suffix segments.
prefix = ".".join(
list(itertools.takewhile(_is_not_suffix, _version_split(spec)))[:-1]
)
# Add the prefix notation to the end of our string
prefix += ".*"
return self._get_operator(">=")(prospective, spec) and self._get_operator("==")(
prospective, prefix
)
def _compare_equal(self, prospective: Version, spec: str) -> bool:
# We need special logic to handle prefix matching
if spec.endswith(".*"):
# In the case of prefix matching we want to ignore local segment.
normalized_prospective = canonicalize_version(
prospective.public, strip_trailing_zero=False
)
# Get the normalized version string ignoring the trailing .*
normalized_spec = canonicalize_version(spec[:-2], strip_trailing_zero=False)
# Split the spec out by dots, and pretend that there is an implicit
# dot in between a release segment and a pre-release segment.
split_spec = _version_split(normalized_spec)
# Split the prospective version out by dots, and pretend that there
# is an implicit dot in between a release segment and a pre-release
# segment.
split_prospective = _version_split(normalized_prospective)
# 0-pad the prospective version before shortening it to get the correct
# shortened version.
padded_prospective, _ = _pad_version(split_prospective, split_spec)
# Shorten the prospective version to be the same length as the spec
# so that we can determine if the specifier is a prefix of the
# prospective version or not.
shortened_prospective = padded_prospective[: len(split_spec)]
return shortened_prospective == split_spec
else:
# Convert our spec string into a Version
spec_version = Version(spec)
# If the specifier does not have a local segment, then we want to
# act as if the prospective version also does not have a local
# segment.
if not spec_version.local:
prospective = Version(prospective.public)
return prospective == spec_version
def _compare_not_equal(self, prospective: Version, spec: str) -> bool:
return not self._compare_equal(prospective, spec)
def _compare_less_than_equal(self, prospective: Version, spec: str) -> bool:
# NB: Local version identifiers are NOT permitted in the version
# specifier, so local version labels can be universally removed from
# the prospective version.
return Version(prospective.public) <= Version(spec)
def _compare_greater_than_equal(self, prospective: Version, spec: str) -> bool:
# NB: Local version identifiers are NOT permitted in the version
# specifier, so local version labels can be universally removed from
# the prospective version.
return Version(prospective.public) >= Version(spec)
def _compare_less_than(self, prospective: Version, spec_str: str) -> bool:
# Convert our spec to a Version instance, since we'll want to work with
# it as a version.
spec = Version(spec_str)
# Check to see if the prospective version is less than the spec
# version. If it's not we can short circuit and just return False now
# instead of doing extra unneeded work.
if not prospective < spec:
return False
# This special case is here so that, unless the specifier itself
# includes is a pre-release version, that we do not accept pre-release
# versions for the version mentioned in the specifier (e.g. <3.1 should
# not match 3.1.dev0, but should match 3.0.dev0).
if not spec.is_prerelease and prospective.is_prerelease:
if Version(prospective.base_version) == Version(spec.base_version):
return False
# If we've gotten to here, it means that prospective version is both
# less than the spec version *and* it's not a pre-release of the same
# version in the spec.
return True
def _compare_greater_than(self, prospective: Version, spec_str: str) -> bool:
# Convert our spec to a Version instance, since we'll want to work with
# it as a version.
spec = Version(spec_str)
# Check to see if the prospective version is greater than the spec
# version. If it's not we can short circuit and just return False now
# instead of doing extra unneeded work.
if not prospective > spec:
return False
# This special case is here so that, unless the specifier itself
# includes is a post-release version, that we do not accept
# post-release versions for the version mentioned in the specifier
# (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0).
if not spec.is_postrelease and prospective.is_postrelease:
if Version(prospective.base_version) == Version(spec.base_version):
return False
# Ensure that we do not allow a local version of the version mentioned
# in the specifier, which is technically greater than, to match.
if prospective.local is not None:
if Version(prospective.base_version) == Version(spec.base_version):
return False
# If we've gotten to here, it means that prospective version is both
# greater than the spec version *and* it's not a pre-release of the
# same version in the spec.
return True
def _compare_arbitrary(self, prospective: Version, spec: str) -> bool:
return str(prospective).lower() == str(spec).lower()
def __contains__(self, item: Union[str, Version]) -> bool:
"""Return whether or not the item is contained in this specifier.
:param item: The item to check for.
This is used for the ``in`` operator and behaves the same as
:meth:`contains` with no ``prereleases`` argument passed.
>>> "1.2.3" in Specifier(">=1.2.3")
True
>>> Version("1.2.3") in Specifier(">=1.2.3")
True
>>> "1.0.0" in Specifier(">=1.2.3")
False
>>> "1.3.0a1" in Specifier(">=1.2.3")
False
>>> "1.3.0a1" in Specifier(">=1.2.3", prereleases=True)
True
"""
return self.contains(item)
def contains(
self, item: UnparsedVersion, prereleases: Optional[bool] = None
) -> bool:
"""Return whether or not the item is contained in this specifier.
:param item:
The item to check for, which can be a version string or a
:class:`Version` instance.
:param prereleases:
Whether or not to match prereleases with this Specifier. If set to
``None`` (the default), it uses :attr:`prereleases` to determine
whether or not prereleases are allowed.
>>> Specifier(">=1.2.3").contains("1.2.3")
True
>>> Specifier(">=1.2.3").contains(Version("1.2.3"))
True
>>> Specifier(">=1.2.3").contains("1.0.0")
False
>>> Specifier(">=1.2.3").contains("1.3.0a1")
False
>>> Specifier(">=1.2.3", prereleases=True).contains("1.3.0a1")
True
>>> Specifier(">=1.2.3").contains("1.3.0a1", prereleases=True)
True
"""
# Determine if prereleases are to be allowed or not.
if prereleases is None:
prereleases = self.prereleases
# Normalize item to a Version, this allows us to have a shortcut for
# "2.0" in Specifier(">=2")
normalized_item = _coerce_version(item)
# Determine if we should be supporting prereleases in this specifier
# or not, if we do not support prereleases than we can short circuit
# logic if this version is a prereleases.
if normalized_item.is_prerelease and not prereleases:
return False
# Actually do the comparison to determine if this item is contained
# within this Specifier or not.
operator_callable: CallableOperator = self._get_operator(self.operator)
return operator_callable(normalized_item, self.version)
def filter(
self, iterable: Iterable[UnparsedVersionVar], prereleases: Optional[bool] = None
) -> Iterator[UnparsedVersionVar]:
"""Filter items in the given iterable, that match the specifier.
:param iterable:
An iterable that can contain version strings and :class:`Version` instances.
The items in the iterable will be filtered according to the specifier.
:param prereleases:
Whether or not to allow prereleases in the returned iterator. If set to
``None`` (the default), it will be intelligently decide whether to allow
prereleases or not (based on the :attr:`prereleases` attribute, and
whether the only versions matching are prereleases).
This method is smarter than just ``filter(Specifier().contains, [...])``
because it implements the rule from :pep:`440` that a prerelease item
SHOULD be accepted if no other versions match the given specifier.
>>> list(Specifier(">=1.2.3").filter(["1.2", "1.3", "1.5a1"]))
['1.3']
>>> list(Specifier(">=1.2.3").filter(["1.2", "1.2.3", "1.3", Version("1.4")]))
['1.2.3', '1.3', <Version('1.4')>]
>>> list(Specifier(">=1.2.3").filter(["1.2", "1.5a1"]))
['1.5a1']
>>> list(Specifier(">=1.2.3").filter(["1.3", "1.5a1"], prereleases=True))
['1.3', '1.5a1']
>>> list(Specifier(">=1.2.3", prereleases=True).filter(["1.3", "1.5a1"]))
['1.3', '1.5a1']
"""
yielded = False
found_prereleases = []
kw = {"prereleases": prereleases if prereleases is not None else True}
# Attempt to iterate over all the values in the iterable and if any of
# them match, yield them.
for version in iterable:
parsed_version = _coerce_version(version)
if self.contains(parsed_version, **kw):
# If our version is a prerelease, and we were not set to allow
# prereleases, then we'll store it for later in case nothing
# else matches this specifier.
if parsed_version.is_prerelease and not (
prereleases or self.prereleases
):
found_prereleases.append(version)
# Either this is not a prerelease, or we should have been
# accepting prereleases from the beginning.
else:
yielded = True
yield version
# Now that we've iterated over everything, determine if we've yielded
# any values, and if we have not and we have any prereleases stored up
# then we will go ahead and yield the prereleases.
if not yielded and found_prereleases:
for version in found_prereleases:
yield version
_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$")
def _version_split(version: str) -> List[str]:
result: List[str] = []
for item in version.split("."):
match = _prefix_regex.search(item)
if match:
result.extend(match.groups())
else:
result.append(item)
return result
def _is_not_suffix(segment: str) -> bool:
return not any(
segment.startswith(prefix) for prefix in ("dev", "a", "b", "rc", "post")
)
def _pad_version(left: List[str], right: List[str]) -> Tuple[List[str], List[str]]:
left_split, right_split = [], []
# Get the release segment of our versions
left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left)))
right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right)))
# Get the rest of our versions
left_split.append(left[len(left_split[0]) :])
right_split.append(right[len(right_split[0]) :])
# Insert our padding
left_split.insert(1, ["0"] * max(0, len(right_split[0]) - len(left_split[0])))
right_split.insert(1, ["0"] * max(0, len(left_split[0]) - len(right_split[0])))
return (list(itertools.chain(*left_split)), list(itertools.chain(*right_split)))
class SpecifierSet(BaseSpecifier):
"""This class abstracts handling of a set of version specifiers.
It can be passed a single specifier (``>=3.0``), a comma-separated list of
specifiers (``>=3.0,!=3.1``), or no specifier at all.
"""
def __init__(
self, specifiers: str = "", prereleases: Optional[bool] = None
) -> None:
"""Initialize a SpecifierSet instance.
:param specifiers:
The string representation of a specifier or a comma-separated list of
specifiers which will be parsed and normalized before use.
:param prereleases:
This tells the SpecifierSet if it should accept prerelease versions if
applicable or not. The default of ``None`` will autodetect it from the
given specifiers.
:raises InvalidSpecifier:
If the given ``specifiers`` are not parseable than this exception will be
raised.
"""
# Split on `,` to break each individual specifier into it's own item, and
# strip each item to remove leading/trailing whitespace.
split_specifiers = [s.strip() for s in specifiers.split(",") if s.strip()]
# Parsed each individual specifier, attempting first to make it a
# Specifier.
parsed: Set[Specifier] = set()
for specifier in split_specifiers:
parsed.add(Specifier(specifier))
# Turn our parsed specifiers into a frozen set and save them for later.
self._specs = frozenset(parsed)
# Store our prereleases value so we can use it later to determine if
# we accept prereleases or not.
self._prereleases = prereleases
@property
def prereleases(self) -> Optional[bool]:
# If we have been given an explicit prerelease modifier, then we'll
# pass that through here.
if self._prereleases is not None:
return self._prereleases
# If we don't have any specifiers, and we don't have a forced value,
# then we'll just return None since we don't know if this should have
# pre-releases or not.
if not self._specs:
return None
# Otherwise we'll see if any of the given specifiers accept
# prereleases, if any of them do we'll return True, otherwise False.
return any(s.prereleases for s in self._specs)
@prereleases.setter
def prereleases(self, value: bool) -> None:
self._prereleases = value
def __repr__(self) -> str:
"""A representation of the specifier set that shows all internal state.
Note that the ordering of the individual specifiers within the set may not
match the input string.
>>> SpecifierSet('>=1.0.0,!=2.0.0')
<SpecifierSet('!=2.0.0,>=1.0.0')>
>>> SpecifierSet('>=1.0.0,!=2.0.0', prereleases=False)
<SpecifierSet('!=2.0.0,>=1.0.0', prereleases=False)>
>>> SpecifierSet('>=1.0.0,!=2.0.0', prereleases=True)
<SpecifierSet('!=2.0.0,>=1.0.0', prereleases=True)>
"""
pre = (
f", prereleases={self.prereleases!r}"
if self._prereleases is not None
else ""
)
return f"<SpecifierSet({str(self)!r}{pre})>"
def __str__(self) -> str:
"""A string representation of the specifier set that can be round-tripped.
Note that the ordering of the individual specifiers within the set may not
match the input string.
>>> str(SpecifierSet(">=1.0.0,!=1.0.1"))
'!=1.0.1,>=1.0.0'
>>> str(SpecifierSet(">=1.0.0,!=1.0.1", prereleases=False))
'!=1.0.1,>=1.0.0'
"""
return ",".join(sorted(str(s) for s in self._specs))
def __hash__(self) -> int:
return hash(self._specs)
def __and__(self, other: Union["SpecifierSet", str]) -> "SpecifierSet":
"""Return a SpecifierSet which is a combination of the two sets.
:param other: The other object to combine with.
>>> SpecifierSet(">=1.0.0,!=1.0.1") & '<=2.0.0,!=2.0.1'
<SpecifierSet('!=1.0.1,!=2.0.1,<=2.0.0,>=1.0.0')>
>>> SpecifierSet(">=1.0.0,!=1.0.1") & SpecifierSet('<=2.0.0,!=2.0.1')
<SpecifierSet('!=1.0.1,!=2.0.1,<=2.0.0,>=1.0.0')>
"""
if isinstance(other, str):
other = SpecifierSet(other)
elif not isinstance(other, SpecifierSet):
return NotImplemented
specifier = SpecifierSet()
specifier._specs = frozenset(self._specs | other._specs)
if self._prereleases is None and other._prereleases is not None:
specifier._prereleases = other._prereleases
elif self._prereleases is not None and other._prereleases is None:
specifier._prereleases = self._prereleases
elif self._prereleases == other._prereleases:
specifier._prereleases = self._prereleases
else:
raise ValueError(
"Cannot combine SpecifierSets with True and False prerelease "
"overrides."
)
return specifier
def __eq__(self, other: object) -> bool:
"""Whether or not the two SpecifierSet-like objects are equal.
:param other: The other object to check against.
The value of :attr:`prereleases` is ignored.
>>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0,!=1.0.1")
True
>>> (SpecifierSet(">=1.0.0,!=1.0.1", prereleases=False) ==
... SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True))
True
>>> SpecifierSet(">=1.0.0,!=1.0.1") == ">=1.0.0,!=1.0.1"
True
>>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0")
False
>>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0,!=1.0.2")
False
"""
if isinstance(other, (str, Specifier)):
other = SpecifierSet(str(other))
elif not isinstance(other, SpecifierSet):
return NotImplemented
return self._specs == other._specs
def __len__(self) -> int:
"""Returns the number of specifiers in this specifier set."""
return len(self._specs)
def __iter__(self) -> Iterator[Specifier]:
"""
Returns an iterator over all the underlying :class:`Specifier` instances
in this specifier set.
>>> sorted(SpecifierSet(">=1.0.0,!=1.0.1"), key=str)
[<Specifier('!=1.0.1')>, <Specifier('>=1.0.0')>]
"""
return iter(self._specs)
def __contains__(self, item: UnparsedVersion) -> bool:
"""Return whether or not the item is contained in this specifier.
:param item: The item to check for.
This is used for the ``in`` operator and behaves the same as
:meth:`contains` with no ``prereleases`` argument passed.
>>> "1.2.3" in SpecifierSet(">=1.0.0,!=1.0.1")
True
>>> Version("1.2.3") in SpecifierSet(">=1.0.0,!=1.0.1")
True
>>> "1.0.1" in SpecifierSet(">=1.0.0,!=1.0.1")
False
>>> "1.3.0a1" in SpecifierSet(">=1.0.0,!=1.0.1")
False
>>> "1.3.0a1" in SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True)
True
"""
return self.contains(item)
def contains(
self,
item: UnparsedVersion,
prereleases: Optional[bool] = None,
installed: Optional[bool] = None,
) -> bool:
"""Return whether or not the item is contained in this SpecifierSet.
:param item:
The item to check for, which can be a version string or a
:class:`Version` instance.
:param prereleases:
Whether or not to match prereleases with this SpecifierSet. If set to
``None`` (the default), it uses :attr:`prereleases` to determine
whether or not prereleases are allowed.
>>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.2.3")
True
>>> SpecifierSet(">=1.0.0,!=1.0.1").contains(Version("1.2.3"))
True
>>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.0.1")
False
>>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.3.0a1")
False
>>> SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True).contains("1.3.0a1")
True
>>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.3.0a1", prereleases=True)
True
"""
# Ensure that our item is a Version instance.
if not isinstance(item, Version):
item = Version(item)
# Determine if we're forcing a prerelease or not, if we're not forcing
# one for this particular filter call, then we'll use whatever the
# SpecifierSet thinks for whether or not we should support prereleases.
if prereleases is None:
prereleases = self.prereleases
# We can determine if we're going to allow pre-releases by looking to
# see if any of the underlying items supports them. If none of them do
# and this item is a pre-release then we do not allow it and we can
# short circuit that here.
# Note: This means that 1.0.dev1 would not be contained in something
# like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0
if not prereleases and item.is_prerelease:
return False
if installed and item.is_prerelease:
item = Version(item.base_version)
# We simply dispatch to the underlying specs here to make sure that the
# given version is contained within all of them.
# Note: This use of all() here means that an empty set of specifiers
# will always return True, this is an explicit design decision.
return all(s.contains(item, prereleases=prereleases) for s in self._specs)
def filter(
self, iterable: Iterable[UnparsedVersionVar], prereleases: Optional[bool] = None
) -> Iterator[UnparsedVersionVar]:
"""Filter items in the given iterable, that match the specifiers in this set.
:param iterable:
An iterable that can contain version strings and :class:`Version` instances.
The items in the iterable will be filtered according to the specifier.
:param prereleases:
Whether or not to allow prereleases in the returned iterator. If set to
``None`` (the default), it will be intelligently decide whether to allow
prereleases or not (based on the :attr:`prereleases` attribute, and
whether the only versions matching are prereleases).
This method is smarter than just ``filter(SpecifierSet(...).contains, [...])``
because it implements the rule from :pep:`440` that a prerelease item
SHOULD be accepted if no other versions match the given specifier.
>>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.3", "1.5a1"]))
['1.3']
>>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.3", Version("1.4")]))
['1.3', <Version('1.4')>]
>>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.5a1"]))
[]
>>> list(SpecifierSet(">=1.2.3").filter(["1.3", "1.5a1"], prereleases=True))
['1.3', '1.5a1']
>>> list(SpecifierSet(">=1.2.3", prereleases=True).filter(["1.3", "1.5a1"]))
['1.3', '1.5a1']
An "empty" SpecifierSet will filter items based on the presence of prerelease
versions in the set.
>>> list(SpecifierSet("").filter(["1.3", "1.5a1"]))
['1.3']
>>> list(SpecifierSet("").filter(["1.5a1"]))
['1.5a1']
>>> list(SpecifierSet("", prereleases=True).filter(["1.3", "1.5a1"]))
['1.3', '1.5a1']
>>> list(SpecifierSet("").filter(["1.3", "1.5a1"], prereleases=True))
['1.3', '1.5a1']
"""
# Determine if we're forcing a prerelease or not, if we're not forcing
# one for this particular filter call, then we'll use whatever the
# SpecifierSet thinks for whether or not we should support prereleases.
if prereleases is None:
prereleases = self.prereleases
# If we have any specifiers, then we want to wrap our iterable in the
# filter method for each one, this will act as a logical AND amongst
# each specifier.
if self._specs:
for spec in self._specs:
iterable = spec.filter(iterable, prereleases=bool(prereleases))
return iter(iterable)
# If we do not have any specifiers, then we need to have a rough filter
# which will filter out any pre-releases, unless there are no final
# releases.
else:
filtered: List[UnparsedVersionVar] = []
found_prereleases: List[UnparsedVersionVar] = []
for item in iterable:
parsed_version = _coerce_version(item)
# Store any item which is a pre-release for later unless we've
# already found a final version or we are accepting prereleases
if parsed_version.is_prerelease and not prereleases:
if not filtered:
found_prereleases.append(item)
else:
filtered.append(item)
# If we've found no items except for pre-releases, then we'll go
# ahead and use the pre-releases
if not filtered and found_prereleases and prereleases is None:
return iter(found_prereleases)
return iter(filtered)
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pkg_resources/_vendor/packaging/markers.py | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
import operator
import os
import platform
import sys
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from ._parser import (
MarkerAtom,
MarkerList,
Op,
Value,
Variable,
parse_marker as _parse_marker,
)
from ._tokenizer import ParserSyntaxError
from .specifiers import InvalidSpecifier, Specifier
from .utils import canonicalize_name
__all__ = [
"InvalidMarker",
"UndefinedComparison",
"UndefinedEnvironmentName",
"Marker",
"default_environment",
]
Operator = Callable[[str, str], bool]
class InvalidMarker(ValueError):
"""
An invalid marker was found, users should refer to PEP 508.
"""
class UndefinedComparison(ValueError):
"""
An invalid operation was attempted on a value that doesn't support it.
"""
class UndefinedEnvironmentName(ValueError):
"""
A name was attempted to be used that does not exist inside of the
environment.
"""
def _normalize_extra_values(results: Any) -> Any:
"""
Normalize extra values.
"""
if isinstance(results[0], tuple):
lhs, op, rhs = results[0]
if isinstance(lhs, Variable) and lhs.value == "extra":
normalized_extra = canonicalize_name(rhs.value)
rhs = Value(normalized_extra)
elif isinstance(rhs, Variable) and rhs.value == "extra":
normalized_extra = canonicalize_name(lhs.value)
lhs = Value(normalized_extra)
results[0] = lhs, op, rhs
return results
def _format_marker(
marker: Union[List[str], MarkerAtom, str], first: Optional[bool] = True
) -> str:
assert isinstance(marker, (list, tuple, str))
# Sometimes we have a structure like [[...]] which is a single item list
# where the single item is itself it's own list. In that case we want skip
# the rest of this function so that we don't get extraneous () on the
# outside.
if (
isinstance(marker, list)
and len(marker) == 1
and isinstance(marker[0], (list, tuple))
):
return _format_marker(marker[0])
if isinstance(marker, list):
inner = (_format_marker(m, first=False) for m in marker)
if first:
return " ".join(inner)
else:
return "(" + " ".join(inner) + ")"
elif isinstance(marker, tuple):
return " ".join([m.serialize() for m in marker])
else:
return marker
_operators: Dict[str, Operator] = {
"in": lambda lhs, rhs: lhs in rhs,
"not in": lambda lhs, rhs: lhs not in rhs,
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def _eval_op(lhs: str, op: Op, rhs: str) -> bool:
try:
spec = Specifier("".join([op.serialize(), rhs]))
except InvalidSpecifier:
pass
else:
return spec.contains(lhs, prereleases=True)
oper: Optional[Operator] = _operators.get(op.serialize())
if oper is None:
raise UndefinedComparison(f"Undefined {op!r} on {lhs!r} and {rhs!r}.")
return oper(lhs, rhs)
def _normalize(*values: str, key: str) -> Tuple[str, ...]:
# PEP 685 – Comparison of extra names for optional distribution dependencies
# https://peps.python.org/pep-0685/
# > When comparing extra names, tools MUST normalize the names being
# > compared using the semantics outlined in PEP 503 for names
if key == "extra":
return tuple(canonicalize_name(v) for v in values)
# other environment markers don't have such standards
return values
def _evaluate_markers(markers: MarkerList, environment: Dict[str, str]) -> bool:
groups: List[List[bool]] = [[]]
for marker in markers:
assert isinstance(marker, (list, tuple, str))
if isinstance(marker, list):
groups[-1].append(_evaluate_markers(marker, environment))
elif isinstance(marker, tuple):
lhs, op, rhs = marker
if isinstance(lhs, Variable):
environment_key = lhs.value
lhs_value = environment[environment_key]
rhs_value = rhs.value
else:
lhs_value = lhs.value
environment_key = rhs.value
rhs_value = environment[environment_key]
lhs_value, rhs_value = _normalize(lhs_value, rhs_value, key=environment_key)
groups[-1].append(_eval_op(lhs_value, op, rhs_value))
else:
assert marker in ["and", "or"]
if marker == "or":
groups.append([])
return any(all(item) for item in groups)
def format_full_version(info: "sys._version_info") -> str:
version = "{0.major}.{0.minor}.{0.micro}".format(info)
kind = info.releaselevel
if kind != "final":
version += kind[0] + str(info.serial)
return version
def default_environment() -> Dict[str, str]:
iver = format_full_version(sys.implementation.version)
implementation_name = sys.implementation.name
return {
"implementation_name": implementation_name,
"implementation_version": iver,
"os_name": os.name,
"platform_machine": platform.machine(),
"platform_release": platform.release(),
"platform_system": platform.system(),
"platform_version": platform.version(),
"python_full_version": platform.python_version(),
"platform_python_implementation": platform.python_implementation(),
"python_version": ".".join(platform.python_version_tuple()[:2]),
"sys_platform": sys.platform,
}
class Marker:
def __init__(self, marker: str) -> None:
# Note: We create a Marker object without calling this constructor in
# packaging.requirements.Requirement. If any additional logic is
# added here, make sure to mirror/adapt Requirement.
try:
self._markers = _normalize_extra_values(_parse_marker(marker))
# The attribute `_markers` can be described in terms of a recursive type:
# MarkerList = List[Union[Tuple[Node, ...], str, MarkerList]]
#
# For example, the following expression:
# python_version > "3.6" or (python_version == "3.6" and os_name == "unix")
#
# is parsed into:
# [
# (<Variable('python_version')>, <Op('>')>, <Value('3.6')>),
# 'and',
# [
# (<Variable('python_version')>, <Op('==')>, <Value('3.6')>),
# 'or',
# (<Variable('os_name')>, <Op('==')>, <Value('unix')>)
# ]
# ]
except ParserSyntaxError as e:
raise InvalidMarker(str(e)) from e
def __str__(self) -> str:
return _format_marker(self._markers)
def __repr__(self) -> str:
return f"<Marker('{self}')>"
def __hash__(self) -> int:
return hash((self.__class__.__name__, str(self)))
def __eq__(self, other: Any) -> bool:
if not isinstance(other, Marker):
return NotImplemented
return str(self) == str(other)
def evaluate(self, environment: Optional[Dict[str, str]] = None) -> bool:
"""Evaluate a marker.
Return the boolean from evaluating the given marker against the
environment. environment is an optional argument to override all or
part of the determined environment.
The environment is determined from the current Python process.
"""
current_environment = default_environment()
current_environment["extra"] = ""
if environment is not None:
current_environment.update(environment)
# The API used to allow setting extra to None. We need to handle this
# case for backwards compatibility.
if current_environment["extra"] is None:
current_environment["extra"] = ""
return _evaluate_markers(self._markers, current_environment)
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pkg_resources/_vendor/packaging/__init__.py | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
__title__ = "packaging"
__summary__ = "Core utilities for Python packages"
__uri__ = "https://github.com/pypa/packaging"
__version__ = "23.1"
__author__ = "Donald Stufft and individual contributors"
__email__ = "[email protected]"
__license__ = "BSD-2-Clause or Apache-2.0"
__copyright__ = "2014-2019 %s" % __author__
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pkg_resources/_vendor/packaging/version.py | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
"""
.. testsetup::
from packaging.version import parse, Version
"""
import collections
import itertools
import re
from typing import Any, Callable, Optional, SupportsInt, Tuple, Union
from ._structures import Infinity, InfinityType, NegativeInfinity, NegativeInfinityType
__all__ = ["VERSION_PATTERN", "parse", "Version", "InvalidVersion"]
InfiniteTypes = Union[InfinityType, NegativeInfinityType]
PrePostDevType = Union[InfiniteTypes, Tuple[str, int]]
SubLocalType = Union[InfiniteTypes, int, str]
LocalType = Union[
NegativeInfinityType,
Tuple[
Union[
SubLocalType,
Tuple[SubLocalType, str],
Tuple[NegativeInfinityType, SubLocalType],
],
...,
],
]
CmpKey = Tuple[
int, Tuple[int, ...], PrePostDevType, PrePostDevType, PrePostDevType, LocalType
]
VersionComparisonMethod = Callable[[CmpKey, CmpKey], bool]
_Version = collections.namedtuple(
"_Version", ["epoch", "release", "dev", "pre", "post", "local"]
)
def parse(version: str) -> "Version":
"""Parse the given version string.
>>> parse('1.0.dev1')
<Version('1.0.dev1')>
:param version: The version string to parse.
:raises InvalidVersion: When the version string is not a valid version.
"""
return Version(version)
class InvalidVersion(ValueError):
"""Raised when a version string is not a valid version.
>>> Version("invalid")
Traceback (most recent call last):
...
packaging.version.InvalidVersion: Invalid version: 'invalid'
"""
class _BaseVersion:
_key: Tuple[Any, ...]
def __hash__(self) -> int:
return hash(self._key)
# Please keep the duplicated `isinstance` check
# in the six comparisons hereunder
# unless you find a way to avoid adding overhead function calls.
def __lt__(self, other: "_BaseVersion") -> bool:
if not isinstance(other, _BaseVersion):
return NotImplemented
return self._key < other._key
def __le__(self, other: "_BaseVersion") -> bool:
if not isinstance(other, _BaseVersion):
return NotImplemented
return self._key <= other._key
def __eq__(self, other: object) -> bool:
if not isinstance(other, _BaseVersion):
return NotImplemented
return self._key == other._key
def __ge__(self, other: "_BaseVersion") -> bool:
if not isinstance(other, _BaseVersion):
return NotImplemented
return self._key >= other._key
def __gt__(self, other: "_BaseVersion") -> bool:
if not isinstance(other, _BaseVersion):
return NotImplemented
return self._key > other._key
def __ne__(self, other: object) -> bool:
if not isinstance(other, _BaseVersion):
return NotImplemented
return self._key != other._key
# Deliberately not anchored to the start and end of the string, to make it
# easier for 3rd party code to reuse
_VERSION_PATTERN = r"""
v?
(?:
(?:(?P<epoch>[0-9]+)!)? # epoch
(?P<release>[0-9]+(?:\.[0-9]+)*) # release segment
(?P<pre> # pre-release
[-_\.]?
(?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))
[-_\.]?
(?P<pre_n>[0-9]+)?
)?
(?P<post> # post release
(?:-(?P<post_n1>[0-9]+))
|
(?:
[-_\.]?
(?P<post_l>post|rev|r)
[-_\.]?
(?P<post_n2>[0-9]+)?
)
)?
(?P<dev> # dev release
[-_\.]?
(?P<dev_l>dev)
[-_\.]?
(?P<dev_n>[0-9]+)?
)?
)
(?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version
"""
VERSION_PATTERN = _VERSION_PATTERN
"""
A string containing the regular expression used to match a valid version.
The pattern is not anchored at either end, and is intended for embedding in larger
expressions (for example, matching a version number as part of a file name). The
regular expression should be compiled with the ``re.VERBOSE`` and ``re.IGNORECASE``
flags set.
:meta hide-value:
"""
class Version(_BaseVersion):
"""This class abstracts handling of a project's versions.
A :class:`Version` instance is comparison aware and can be compared and
sorted using the standard Python interfaces.
>>> v1 = Version("1.0a5")
>>> v2 = Version("1.0")
>>> v1
<Version('1.0a5')>
>>> v2
<Version('1.0')>
>>> v1 < v2
True
>>> v1 == v2
False
>>> v1 > v2
False
>>> v1 >= v2
False
>>> v1 <= v2
True
"""
_regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
_key: CmpKey
def __init__(self, version: str) -> None:
"""Initialize a Version object.
:param version:
The string representation of a version which will be parsed and normalized
before use.
:raises InvalidVersion:
If the ``version`` does not conform to PEP 440 in any way then this
exception will be raised.
"""
# Validate the version and parse it into pieces
match = self._regex.search(version)
if not match:
raise InvalidVersion(f"Invalid version: '{version}'")
# Store the parsed out pieces of the version
self._version = _Version(
epoch=int(match.group("epoch")) if match.group("epoch") else 0,
release=tuple(int(i) for i in match.group("release").split(".")),
pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
post=_parse_letter_version(
match.group("post_l"), match.group("post_n1") or match.group("post_n2")
),
dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
local=_parse_local_version(match.group("local")),
)
# Generate a key which will be used for sorting
self._key = _cmpkey(
self._version.epoch,
self._version.release,
self._version.pre,
self._version.post,
self._version.dev,
self._version.local,
)
def __repr__(self) -> str:
"""A representation of the Version that shows all internal state.
>>> Version('1.0.0')
<Version('1.0.0')>
"""
return f"<Version('{self}')>"
def __str__(self) -> str:
"""A string representation of the version that can be rounded-tripped.
>>> str(Version("1.0a5"))
'1.0a5'
"""
parts = []
# Epoch
if self.epoch != 0:
parts.append(f"{self.epoch}!")
# Release segment
parts.append(".".join(str(x) for x in self.release))
# Pre-release
if self.pre is not None:
parts.append("".join(str(x) for x in self.pre))
# Post-release
if self.post is not None:
parts.append(f".post{self.post}")
# Development release
if self.dev is not None:
parts.append(f".dev{self.dev}")
# Local version segment
if self.local is not None:
parts.append(f"+{self.local}")
return "".join(parts)
@property
def epoch(self) -> int:
"""The epoch of the version.
>>> Version("2.0.0").epoch
0
>>> Version("1!2.0.0").epoch
1
"""
_epoch: int = self._version.epoch
return _epoch
@property
def release(self) -> Tuple[int, ...]:
"""The components of the "release" segment of the version.
>>> Version("1.2.3").release
(1, 2, 3)
>>> Version("2.0.0").release
(2, 0, 0)
>>> Version("1!2.0.0.post0").release
(2, 0, 0)
Includes trailing zeroes but not the epoch or any pre-release / development /
post-release suffixes.
"""
_release: Tuple[int, ...] = self._version.release
return _release
@property
def pre(self) -> Optional[Tuple[str, int]]:
"""The pre-release segment of the version.
>>> print(Version("1.2.3").pre)
None
>>> Version("1.2.3a1").pre
('a', 1)
>>> Version("1.2.3b1").pre
('b', 1)
>>> Version("1.2.3rc1").pre
('rc', 1)
"""
_pre: Optional[Tuple[str, int]] = self._version.pre
return _pre
@property
def post(self) -> Optional[int]:
"""The post-release number of the version.
>>> print(Version("1.2.3").post)
None
>>> Version("1.2.3.post1").post
1
"""
return self._version.post[1] if self._version.post else None
@property
def dev(self) -> Optional[int]:
"""The development number of the version.
>>> print(Version("1.2.3").dev)
None
>>> Version("1.2.3.dev1").dev
1
"""
return self._version.dev[1] if self._version.dev else None
@property
def local(self) -> Optional[str]:
"""The local version segment of the version.
>>> print(Version("1.2.3").local)
None
>>> Version("1.2.3+abc").local
'abc'
"""
if self._version.local:
return ".".join(str(x) for x in self._version.local)
else:
return None
@property
def public(self) -> str:
"""The public portion of the version.
>>> Version("1.2.3").public
'1.2.3'
>>> Version("1.2.3+abc").public
'1.2.3'
>>> Version("1.2.3+abc.dev1").public
'1.2.3'
"""
return str(self).split("+", 1)[0]
@property
def base_version(self) -> str:
"""The "base version" of the version.
>>> Version("1.2.3").base_version
'1.2.3'
>>> Version("1.2.3+abc").base_version
'1.2.3'
>>> Version("1!1.2.3+abc.dev1").base_version
'1!1.2.3'
The "base version" is the public version of the project without any pre or post
release markers.
"""
parts = []
# Epoch
if self.epoch != 0:
parts.append(f"{self.epoch}!")
# Release segment
parts.append(".".join(str(x) for x in self.release))
return "".join(parts)
@property
def is_prerelease(self) -> bool:
"""Whether this version is a pre-release.
>>> Version("1.2.3").is_prerelease
False
>>> Version("1.2.3a1").is_prerelease
True
>>> Version("1.2.3b1").is_prerelease
True
>>> Version("1.2.3rc1").is_prerelease
True
>>> Version("1.2.3dev1").is_prerelease
True
"""
return self.dev is not None or self.pre is not None
@property
def is_postrelease(self) -> bool:
"""Whether this version is a post-release.
>>> Version("1.2.3").is_postrelease
False
>>> Version("1.2.3.post1").is_postrelease
True
"""
return self.post is not None
@property
def is_devrelease(self) -> bool:
"""Whether this version is a development release.
>>> Version("1.2.3").is_devrelease
False
>>> Version("1.2.3.dev1").is_devrelease
True
"""
return self.dev is not None
@property
def major(self) -> int:
"""The first item of :attr:`release` or ``0`` if unavailable.
>>> Version("1.2.3").major
1
"""
return self.release[0] if len(self.release) >= 1 else 0
@property
def minor(self) -> int:
"""The second item of :attr:`release` or ``0`` if unavailable.
>>> Version("1.2.3").minor
2
>>> Version("1").minor
0
"""
return self.release[1] if len(self.release) >= 2 else 0
@property
def micro(self) -> int:
"""The third item of :attr:`release` or ``0`` if unavailable.
>>> Version("1.2.3").micro
3
>>> Version("1").micro
0
"""
return self.release[2] if len(self.release) >= 3 else 0
def _parse_letter_version(
letter: str, number: Union[str, bytes, SupportsInt]
) -> Optional[Tuple[str, int]]:
if letter:
# We consider there to be an implicit 0 in a pre-release if there is
# not a numeral associated with it.
if number is None:
number = 0
# We normalize any letters to their lower case form
letter = letter.lower()
# We consider some words to be alternate spellings of other words and
# in those cases we want to normalize the spellings to our preferred
# spelling.
if letter == "alpha":
letter = "a"
elif letter == "beta":
letter = "b"
elif letter in ["c", "pre", "preview"]:
letter = "rc"
elif letter in ["rev", "r"]:
letter = "post"
return letter, int(number)
if not letter and number:
# We assume if we are given a number, but we are not given a letter
# then this is using the implicit post release syntax (e.g. 1.0-1)
letter = "post"
return letter, int(number)
return None
_local_version_separators = re.compile(r"[\._-]")
def _parse_local_version(local: str) -> Optional[LocalType]:
"""
Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
"""
if local is not None:
return tuple(
part.lower() if not part.isdigit() else int(part)
for part in _local_version_separators.split(local)
)
return None
def _cmpkey(
epoch: int,
release: Tuple[int, ...],
pre: Optional[Tuple[str, int]],
post: Optional[Tuple[str, int]],
dev: Optional[Tuple[str, int]],
local: Optional[Tuple[SubLocalType]],
) -> CmpKey:
# When we compare a release version, we want to compare it with all of the
# trailing zeros removed. So we'll use a reverse the list, drop all the now
# leading zeros until we come to something non zero, then take the rest
# re-reverse it back into the correct order and make it a tuple and use
# that for our sorting key.
_release = tuple(
reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
)
# We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
# We'll do this by abusing the pre segment, but we _only_ want to do this
# if there is not a pre or a post segment. If we have one of those then
# the normal sorting rules will handle this case correctly.
if pre is None and post is None and dev is not None:
_pre: PrePostDevType = NegativeInfinity
# Versions without a pre-release (except as noted above) should sort after
# those with one.
elif pre is None:
_pre = Infinity
else:
_pre = pre
# Versions without a post segment should sort before those with one.
if post is None:
_post: PrePostDevType = NegativeInfinity
else:
_post = post
# Versions without a development segment should sort after those with one.
if dev is None:
_dev: PrePostDevType = Infinity
else:
_dev = dev
if local is None:
# Versions without a local segment should sort before those with one.
_local: LocalType = NegativeInfinity
else:
# Versions with a local segment need that segment parsed to implement
# the sorting rules in PEP440.
# - Alpha numeric segments sort before numeric segments
# - Alpha numeric segments sort lexicographically
# - Numeric segments sort numerically
# - Shorter versions sort before longer versions when the prefixes
# match exactly
_local = tuple(
(i, "") if isinstance(i, int) else (NegativeInfinity, i) for i in local
)
return epoch, _release, _pre, _post, _dev, _local
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.