file_path
stringlengths 32
153
| content
stringlengths 0
3.14M
|
---|---|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pytz-2022.7.1.dist-info/LICENSE.txt | Copyright (c) 2003-2019 Stuart Bishop <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/cffi/vengine_gen.py | #
# DEPRECATED: implementation for ffi.verify()
#
import sys, os
import types
from . import model
from .error import VerificationError
class VGenericEngine(object):
_class_key = 'g'
_gen_python_module = False
def __init__(self, verifier):
self.verifier = verifier
self.ffi = verifier.ffi
self.export_symbols = []
self._struct_pending_verification = {}
def patch_extension_kwds(self, kwds):
# add 'export_symbols' to the dictionary. Note that we add the
# list before filling it. When we fill it, it will thus also show
# up in kwds['export_symbols'].
kwds.setdefault('export_symbols', self.export_symbols)
def find_module(self, module_name, path, so_suffixes):
for so_suffix in so_suffixes:
basename = module_name + so_suffix
if path is None:
path = sys.path
for dirname in path:
filename = os.path.join(dirname, basename)
if os.path.isfile(filename):
return filename
def collect_types(self):
pass # not needed in the generic engine
def _prnt(self, what=''):
self._f.write(what + '\n')
def write_source_to_f(self):
prnt = self._prnt
# first paste some standard set of lines that are mostly '#include'
prnt(cffimod_header)
# then paste the C source given by the user, verbatim.
prnt(self.verifier.preamble)
#
# call generate_gen_xxx_decl(), for every xxx found from
# ffi._parser._declarations. This generates all the functions.
self._generate('decl')
#
# on Windows, distutils insists on putting init_cffi_xyz in
# 'export_symbols', so instead of fighting it, just give up and
# give it one
if sys.platform == 'win32':
if sys.version_info >= (3,):
prefix = 'PyInit_'
else:
prefix = 'init'
modname = self.verifier.get_module_name()
prnt("void %s%s(void) { }\n" % (prefix, modname))
def load_library(self, flags=0):
# import it with the CFFI backend
backend = self.ffi._backend
# needs to make a path that contains '/', on Posix
filename = os.path.join(os.curdir, self.verifier.modulefilename)
module = backend.load_library(filename, flags)
#
# call loading_gen_struct() to get the struct layout inferred by
# the C compiler
self._load(module, 'loading')
# build the FFILibrary class and instance, this is a module subclass
# because modules are expected to have usually-constant-attributes and
# in PyPy this means the JIT is able to treat attributes as constant,
# which we want.
class FFILibrary(types.ModuleType):
_cffi_generic_module = module
_cffi_ffi = self.ffi
_cffi_dir = []
def __dir__(self):
return FFILibrary._cffi_dir
library = FFILibrary("")
#
# finally, call the loaded_gen_xxx() functions. This will set
# up the 'library' object.
self._load(module, 'loaded', library=library)
return library
def _get_declarations(self):
lst = [(key, tp) for (key, (tp, qual)) in
self.ffi._parser._declarations.items()]
lst.sort()
return lst
def _generate(self, step_name):
for name, tp in self._get_declarations():
kind, realname = name.split(' ', 1)
try:
method = getattr(self, '_generate_gen_%s_%s' % (kind,
step_name))
except AttributeError:
raise VerificationError(
"not implemented in verify(): %r" % name)
try:
method(tp, realname)
except Exception as e:
model.attach_exception_info(e, name)
raise
def _load(self, module, step_name, **kwds):
for name, tp in self._get_declarations():
kind, realname = name.split(' ', 1)
method = getattr(self, '_%s_gen_%s' % (step_name, kind))
try:
method(tp, realname, module, **kwds)
except Exception as e:
model.attach_exception_info(e, name)
raise
def _generate_nothing(self, tp, name):
pass
def _loaded_noop(self, tp, name, module, **kwds):
pass
# ----------
# typedefs: generates no code so far
_generate_gen_typedef_decl = _generate_nothing
_loading_gen_typedef = _loaded_noop
_loaded_gen_typedef = _loaded_noop
# ----------
# function declarations
def _generate_gen_function_decl(self, tp, name):
assert isinstance(tp, model.FunctionPtrType)
if tp.ellipsis:
# cannot support vararg functions better than this: check for its
# exact type (including the fixed arguments), and build it as a
# constant function pointer (no _cffi_f_%s wrapper)
self._generate_gen_const(False, name, tp)
return
prnt = self._prnt
numargs = len(tp.args)
argnames = []
for i, type in enumerate(tp.args):
indirection = ''
if isinstance(type, model.StructOrUnion):
indirection = '*'
argnames.append('%sx%d' % (indirection, i))
context = 'argument of %s' % name
arglist = [type.get_c_name(' %s' % arg, context)
for type, arg in zip(tp.args, argnames)]
tpresult = tp.result
if isinstance(tpresult, model.StructOrUnion):
arglist.insert(0, tpresult.get_c_name(' *r', context))
tpresult = model.void_type
arglist = ', '.join(arglist) or 'void'
wrappername = '_cffi_f_%s' % name
self.export_symbols.append(wrappername)
if tp.abi:
abi = tp.abi + ' '
else:
abi = ''
funcdecl = ' %s%s(%s)' % (abi, wrappername, arglist)
context = 'result of %s' % name
prnt(tpresult.get_c_name(funcdecl, context))
prnt('{')
#
if isinstance(tp.result, model.StructOrUnion):
result_code = '*r = '
elif not isinstance(tp.result, model.VoidType):
result_code = 'return '
else:
result_code = ''
prnt(' %s%s(%s);' % (result_code, name, ', '.join(argnames)))
prnt('}')
prnt()
_loading_gen_function = _loaded_noop
def _loaded_gen_function(self, tp, name, module, library):
assert isinstance(tp, model.FunctionPtrType)
if tp.ellipsis:
newfunction = self._load_constant(False, tp, name, module)
else:
indirections = []
base_tp = tp
if (any(isinstance(typ, model.StructOrUnion) for typ in tp.args)
or isinstance(tp.result, model.StructOrUnion)):
indirect_args = []
for i, typ in enumerate(tp.args):
if isinstance(typ, model.StructOrUnion):
typ = model.PointerType(typ)
indirections.append((i, typ))
indirect_args.append(typ)
indirect_result = tp.result
if isinstance(indirect_result, model.StructOrUnion):
if indirect_result.fldtypes is None:
raise TypeError("'%s' is used as result type, "
"but is opaque" % (
indirect_result._get_c_name(),))
indirect_result = model.PointerType(indirect_result)
indirect_args.insert(0, indirect_result)
indirections.insert(0, ("result", indirect_result))
indirect_result = model.void_type
tp = model.FunctionPtrType(tuple(indirect_args),
indirect_result, tp.ellipsis)
BFunc = self.ffi._get_cached_btype(tp)
wrappername = '_cffi_f_%s' % name
newfunction = module.load_function(BFunc, wrappername)
for i, typ in indirections:
newfunction = self._make_struct_wrapper(newfunction, i, typ,
base_tp)
setattr(library, name, newfunction)
type(library)._cffi_dir.append(name)
def _make_struct_wrapper(self, oldfunc, i, tp, base_tp):
backend = self.ffi._backend
BType = self.ffi._get_cached_btype(tp)
if i == "result":
ffi = self.ffi
def newfunc(*args):
res = ffi.new(BType)
oldfunc(res, *args)
return res[0]
else:
def newfunc(*args):
args = args[:i] + (backend.newp(BType, args[i]),) + args[i+1:]
return oldfunc(*args)
newfunc._cffi_base_type = base_tp
return newfunc
# ----------
# named structs
def _generate_gen_struct_decl(self, tp, name):
assert name == tp.name
self._generate_struct_or_union_decl(tp, 'struct', name)
def _loading_gen_struct(self, tp, name, module):
self._loading_struct_or_union(tp, 'struct', name, module)
def _loaded_gen_struct(self, tp, name, module, **kwds):
self._loaded_struct_or_union(tp)
def _generate_gen_union_decl(self, tp, name):
assert name == tp.name
self._generate_struct_or_union_decl(tp, 'union', name)
def _loading_gen_union(self, tp, name, module):
self._loading_struct_or_union(tp, 'union', name, module)
def _loaded_gen_union(self, tp, name, module, **kwds):
self._loaded_struct_or_union(tp)
def _generate_struct_or_union_decl(self, tp, prefix, name):
if tp.fldnames is None:
return # nothing to do with opaque structs
checkfuncname = '_cffi_check_%s_%s' % (prefix, name)
layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name)
cname = ('%s %s' % (prefix, name)).strip()
#
prnt = self._prnt
prnt('static void %s(%s *p)' % (checkfuncname, cname))
prnt('{')
prnt(' /* only to generate compile-time warnings or errors */')
prnt(' (void)p;')
for fname, ftype, fbitsize, fqual in tp.enumfields():
if (isinstance(ftype, model.PrimitiveType)
and ftype.is_integer_type()) or fbitsize >= 0:
# accept all integers, but complain on float or double
prnt(' (void)((p->%s) << 1);' % fname)
else:
# only accept exactly the type declared.
try:
prnt(' { %s = &p->%s; (void)tmp; }' % (
ftype.get_c_name('*tmp', 'field %r'%fname, quals=fqual),
fname))
except VerificationError as e:
prnt(' /* %s */' % str(e)) # cannot verify it, ignore
prnt('}')
self.export_symbols.append(layoutfuncname)
prnt('intptr_t %s(intptr_t i)' % (layoutfuncname,))
prnt('{')
prnt(' struct _cffi_aligncheck { char x; %s y; };' % cname)
prnt(' static intptr_t nums[] = {')
prnt(' sizeof(%s),' % cname)
prnt(' offsetof(struct _cffi_aligncheck, y),')
for fname, ftype, fbitsize, fqual in tp.enumfields():
if fbitsize >= 0:
continue # xxx ignore fbitsize for now
prnt(' offsetof(%s, %s),' % (cname, fname))
if isinstance(ftype, model.ArrayType) and ftype.length is None:
prnt(' 0, /* %s */' % ftype._get_c_name())
else:
prnt(' sizeof(((%s *)0)->%s),' % (cname, fname))
prnt(' -1')
prnt(' };')
prnt(' return nums[i];')
prnt(' /* the next line is not executed, but compiled */')
prnt(' %s(0);' % (checkfuncname,))
prnt('}')
prnt()
def _loading_struct_or_union(self, tp, prefix, name, module):
if tp.fldnames is None:
return # nothing to do with opaque structs
layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name)
#
BFunc = self.ffi._typeof_locked("intptr_t(*)(intptr_t)")[0]
function = module.load_function(BFunc, layoutfuncname)
layout = []
num = 0
while True:
x = function(num)
if x < 0: break
layout.append(x)
num += 1
if isinstance(tp, model.StructOrUnion) and tp.partial:
# use the function()'s sizes and offsets to guide the
# layout of the struct
totalsize = layout[0]
totalalignment = layout[1]
fieldofs = layout[2::2]
fieldsize = layout[3::2]
tp.force_flatten()
assert len(fieldofs) == len(fieldsize) == len(tp.fldnames)
tp.fixedlayout = fieldofs, fieldsize, totalsize, totalalignment
else:
cname = ('%s %s' % (prefix, name)).strip()
self._struct_pending_verification[tp] = layout, cname
def _loaded_struct_or_union(self, tp):
if tp.fldnames is None:
return # nothing to do with opaque structs
self.ffi._get_cached_btype(tp) # force 'fixedlayout' to be considered
if tp in self._struct_pending_verification:
# check that the layout sizes and offsets match the real ones
def check(realvalue, expectedvalue, msg):
if realvalue != expectedvalue:
raise VerificationError(
"%s (we have %d, but C compiler says %d)"
% (msg, expectedvalue, realvalue))
ffi = self.ffi
BStruct = ffi._get_cached_btype(tp)
layout, cname = self._struct_pending_verification.pop(tp)
check(layout[0], ffi.sizeof(BStruct), "wrong total size")
check(layout[1], ffi.alignof(BStruct), "wrong total alignment")
i = 2
for fname, ftype, fbitsize, fqual in tp.enumfields():
if fbitsize >= 0:
continue # xxx ignore fbitsize for now
check(layout[i], ffi.offsetof(BStruct, fname),
"wrong offset for field %r" % (fname,))
if layout[i+1] != 0:
BField = ffi._get_cached_btype(ftype)
check(layout[i+1], ffi.sizeof(BField),
"wrong size for field %r" % (fname,))
i += 2
assert i == len(layout)
# ----------
# 'anonymous' declarations. These are produced for anonymous structs
# or unions; the 'name' is obtained by a typedef.
def _generate_gen_anonymous_decl(self, tp, name):
if isinstance(tp, model.EnumType):
self._generate_gen_enum_decl(tp, name, '')
else:
self._generate_struct_or_union_decl(tp, '', name)
def _loading_gen_anonymous(self, tp, name, module):
if isinstance(tp, model.EnumType):
self._loading_gen_enum(tp, name, module, '')
else:
self._loading_struct_or_union(tp, '', name, module)
def _loaded_gen_anonymous(self, tp, name, module, **kwds):
if isinstance(tp, model.EnumType):
self._loaded_gen_enum(tp, name, module, **kwds)
else:
self._loaded_struct_or_union(tp)
# ----------
# constants, likely declared with '#define'
def _generate_gen_const(self, is_int, name, tp=None, category='const',
check_value=None):
prnt = self._prnt
funcname = '_cffi_%s_%s' % (category, name)
self.export_symbols.append(funcname)
if check_value is not None:
assert is_int
assert category == 'const'
prnt('int %s(char *out_error)' % funcname)
prnt('{')
self._check_int_constant_value(name, check_value)
prnt(' return 0;')
prnt('}')
elif is_int:
assert category == 'const'
prnt('int %s(long long *out_value)' % funcname)
prnt('{')
prnt(' *out_value = (long long)(%s);' % (name,))
prnt(' return (%s) <= 0;' % (name,))
prnt('}')
else:
assert tp is not None
assert check_value is None
if category == 'var':
ampersand = '&'
else:
ampersand = ''
extra = ''
if category == 'const' and isinstance(tp, model.StructOrUnion):
extra = 'const *'
ampersand = '&'
prnt(tp.get_c_name(' %s%s(void)' % (extra, funcname), name))
prnt('{')
prnt(' return (%s%s);' % (ampersand, name))
prnt('}')
prnt()
def _generate_gen_constant_decl(self, tp, name):
is_int = isinstance(tp, model.PrimitiveType) and tp.is_integer_type()
self._generate_gen_const(is_int, name, tp)
_loading_gen_constant = _loaded_noop
def _load_constant(self, is_int, tp, name, module, check_value=None):
funcname = '_cffi_const_%s' % name
if check_value is not None:
assert is_int
self._load_known_int_constant(module, funcname)
value = check_value
elif is_int:
BType = self.ffi._typeof_locked("long long*")[0]
BFunc = self.ffi._typeof_locked("int(*)(long long*)")[0]
function = module.load_function(BFunc, funcname)
p = self.ffi.new(BType)
negative = function(p)
value = int(p[0])
if value < 0 and not negative:
BLongLong = self.ffi._typeof_locked("long long")[0]
value += (1 << (8*self.ffi.sizeof(BLongLong)))
else:
assert check_value is None
fntypeextra = '(*)(void)'
if isinstance(tp, model.StructOrUnion):
fntypeextra = '*' + fntypeextra
BFunc = self.ffi._typeof_locked(tp.get_c_name(fntypeextra, name))[0]
function = module.load_function(BFunc, funcname)
value = function()
if isinstance(tp, model.StructOrUnion):
value = value[0]
return value
def _loaded_gen_constant(self, tp, name, module, library):
is_int = isinstance(tp, model.PrimitiveType) and tp.is_integer_type()
value = self._load_constant(is_int, tp, name, module)
setattr(library, name, value)
type(library)._cffi_dir.append(name)
# ----------
# enums
def _check_int_constant_value(self, name, value):
prnt = self._prnt
if value <= 0:
prnt(' if ((%s) > 0 || (long)(%s) != %dL) {' % (
name, name, value))
else:
prnt(' if ((%s) <= 0 || (unsigned long)(%s) != %dUL) {' % (
name, name, value))
prnt(' char buf[64];')
prnt(' if ((%s) <= 0)' % name)
prnt(' sprintf(buf, "%%ld", (long)(%s));' % name)
prnt(' else')
prnt(' sprintf(buf, "%%lu", (unsigned long)(%s));' %
name)
prnt(' sprintf(out_error, "%s has the real value %s, not %s",')
prnt(' "%s", buf, "%d");' % (name[:100], value))
prnt(' return -1;')
prnt(' }')
def _load_known_int_constant(self, module, funcname):
BType = self.ffi._typeof_locked("char[]")[0]
BFunc = self.ffi._typeof_locked("int(*)(char*)")[0]
function = module.load_function(BFunc, funcname)
p = self.ffi.new(BType, 256)
if function(p) < 0:
error = self.ffi.string(p)
if sys.version_info >= (3,):
error = str(error, 'utf-8')
raise VerificationError(error)
def _enum_funcname(self, prefix, name):
# "$enum_$1" => "___D_enum____D_1"
name = name.replace('$', '___D_')
return '_cffi_e_%s_%s' % (prefix, name)
def _generate_gen_enum_decl(self, tp, name, prefix='enum'):
if tp.partial:
for enumerator in tp.enumerators:
self._generate_gen_const(True, enumerator)
return
#
funcname = self._enum_funcname(prefix, name)
self.export_symbols.append(funcname)
prnt = self._prnt
prnt('int %s(char *out_error)' % funcname)
prnt('{')
for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues):
self._check_int_constant_value(enumerator, enumvalue)
prnt(' return 0;')
prnt('}')
prnt()
def _loading_gen_enum(self, tp, name, module, prefix='enum'):
if tp.partial:
enumvalues = [self._load_constant(True, tp, enumerator, module)
for enumerator in tp.enumerators]
tp.enumvalues = tuple(enumvalues)
tp.partial_resolved = True
else:
funcname = self._enum_funcname(prefix, name)
self._load_known_int_constant(module, funcname)
def _loaded_gen_enum(self, tp, name, module, library):
for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues):
setattr(library, enumerator, enumvalue)
type(library)._cffi_dir.append(enumerator)
# ----------
# macros: for now only for integers
def _generate_gen_macro_decl(self, tp, name):
if tp == '...':
check_value = None
else:
check_value = tp # an integer
self._generate_gen_const(True, name, check_value=check_value)
_loading_gen_macro = _loaded_noop
def _loaded_gen_macro(self, tp, name, module, library):
if tp == '...':
check_value = None
else:
check_value = tp # an integer
value = self._load_constant(True, tp, name, module,
check_value=check_value)
setattr(library, name, value)
type(library)._cffi_dir.append(name)
# ----------
# global variables
def _generate_gen_variable_decl(self, tp, name):
if isinstance(tp, model.ArrayType):
if tp.length_is_unknown():
prnt = self._prnt
funcname = '_cffi_sizeof_%s' % (name,)
self.export_symbols.append(funcname)
prnt("size_t %s(void)" % funcname)
prnt("{")
prnt(" return sizeof(%s);" % (name,))
prnt("}")
tp_ptr = model.PointerType(tp.item)
self._generate_gen_const(False, name, tp_ptr)
else:
tp_ptr = model.PointerType(tp)
self._generate_gen_const(False, name, tp_ptr, category='var')
_loading_gen_variable = _loaded_noop
def _loaded_gen_variable(self, tp, name, module, library):
if isinstance(tp, model.ArrayType): # int a[5] is "constant" in the
# sense that "a=..." is forbidden
if tp.length_is_unknown():
funcname = '_cffi_sizeof_%s' % (name,)
BFunc = self.ffi._typeof_locked('size_t(*)(void)')[0]
function = module.load_function(BFunc, funcname)
size = function()
BItemType = self.ffi._get_cached_btype(tp.item)
length, rest = divmod(size, self.ffi.sizeof(BItemType))
if rest != 0:
raise VerificationError(
"bad size: %r does not seem to be an array of %s" %
(name, tp.item))
tp = tp.resolve_length(length)
tp_ptr = model.PointerType(tp.item)
value = self._load_constant(False, tp_ptr, name, module)
# 'value' is a <cdata 'type *'> which we have to replace with
# a <cdata 'type[N]'> if the N is actually known
if tp.length is not None:
BArray = self.ffi._get_cached_btype(tp)
value = self.ffi.cast(BArray, value)
setattr(library, name, value)
type(library)._cffi_dir.append(name)
return
# remove ptr=<cdata 'int *'> from the library instance, and replace
# it by a property on the class, which reads/writes into ptr[0].
funcname = '_cffi_var_%s' % name
BFunc = self.ffi._typeof_locked(tp.get_c_name('*(*)(void)', name))[0]
function = module.load_function(BFunc, funcname)
ptr = function()
def getter(library):
return ptr[0]
def setter(library, value):
ptr[0] = value
setattr(type(library), name, property(getter, setter))
type(library)._cffi_dir.append(name)
cffimod_header = r'''
#include <stdio.h>
#include <stddef.h>
#include <stdarg.h>
#include <errno.h>
#include <sys/types.h> /* XXX for ssize_t on some platforms */
/* this block of #ifs should be kept exactly identical between
c/_cffi_backend.c, cffi/vengine_cpy.py, cffi/vengine_gen.py
and cffi/_cffi_include.h */
#if defined(_MSC_VER)
# include <malloc.h> /* for alloca() */
# if _MSC_VER < 1600 /* MSVC < 2010 */
typedef __int8 int8_t;
typedef __int16 int16_t;
typedef __int32 int32_t;
typedef __int64 int64_t;
typedef unsigned __int8 uint8_t;
typedef unsigned __int16 uint16_t;
typedef unsigned __int32 uint32_t;
typedef unsigned __int64 uint64_t;
typedef __int8 int_least8_t;
typedef __int16 int_least16_t;
typedef __int32 int_least32_t;
typedef __int64 int_least64_t;
typedef unsigned __int8 uint_least8_t;
typedef unsigned __int16 uint_least16_t;
typedef unsigned __int32 uint_least32_t;
typedef unsigned __int64 uint_least64_t;
typedef __int8 int_fast8_t;
typedef __int16 int_fast16_t;
typedef __int32 int_fast32_t;
typedef __int64 int_fast64_t;
typedef unsigned __int8 uint_fast8_t;
typedef unsigned __int16 uint_fast16_t;
typedef unsigned __int32 uint_fast32_t;
typedef unsigned __int64 uint_fast64_t;
typedef __int64 intmax_t;
typedef unsigned __int64 uintmax_t;
# else
# include <stdint.h>
# endif
# if _MSC_VER < 1800 /* MSVC < 2013 */
# ifndef __cplusplus
typedef unsigned char _Bool;
# endif
# endif
#else
# include <stdint.h>
# if (defined (__SVR4) && defined (__sun)) || defined(_AIX) || defined(__hpux)
# include <alloca.h>
# endif
#endif
'''
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/cffi/pkgconfig.py | # pkg-config, https://www.freedesktop.org/wiki/Software/pkg-config/ integration for cffi
import sys, os, subprocess
from .error import PkgConfigError
def merge_flags(cfg1, cfg2):
"""Merge values from cffi config flags cfg2 to cf1
Example:
merge_flags({"libraries": ["one"]}, {"libraries": ["two"]})
{"libraries": ["one", "two"]}
"""
for key, value in cfg2.items():
if key not in cfg1:
cfg1[key] = value
else:
if not isinstance(cfg1[key], list):
raise TypeError("cfg1[%r] should be a list of strings" % (key,))
if not isinstance(value, list):
raise TypeError("cfg2[%r] should be a list of strings" % (key,))
cfg1[key].extend(value)
return cfg1
def call(libname, flag, encoding=sys.getfilesystemencoding()):
"""Calls pkg-config and returns the output if found
"""
a = ["pkg-config", "--print-errors"]
a.append(flag)
a.append(libname)
try:
pc = subprocess.Popen(a, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except EnvironmentError as e:
raise PkgConfigError("cannot run pkg-config: %s" % (str(e).strip(),))
bout, berr = pc.communicate()
if pc.returncode != 0:
try:
berr = berr.decode(encoding)
except Exception:
pass
raise PkgConfigError(berr.strip())
if sys.version_info >= (3,) and not isinstance(bout, str): # Python 3.x
try:
bout = bout.decode(encoding)
except UnicodeDecodeError:
raise PkgConfigError("pkg-config %s %s returned bytes that cannot "
"be decoded with encoding %r:\n%r" %
(flag, libname, encoding, bout))
if os.altsep != '\\' and '\\' in bout:
raise PkgConfigError("pkg-config %s %s returned an unsupported "
"backslash-escaped output:\n%r" %
(flag, libname, bout))
return bout
def flags_from_pkgconfig(libs):
r"""Return compiler line flags for FFI.set_source based on pkg-config output
Usage
...
ffibuilder.set_source("_foo", pkgconfig = ["libfoo", "libbar >= 1.8.3"])
If pkg-config is installed on build machine, then arguments include_dirs,
library_dirs, libraries, define_macros, extra_compile_args and
extra_link_args are extended with an output of pkg-config for libfoo and
libbar.
Raises PkgConfigError in case the pkg-config call fails.
"""
def get_include_dirs(string):
return [x[2:] for x in string.split() if x.startswith("-I")]
def get_library_dirs(string):
return [x[2:] for x in string.split() if x.startswith("-L")]
def get_libraries(string):
return [x[2:] for x in string.split() if x.startswith("-l")]
# convert -Dfoo=bar to list of tuples [("foo", "bar")] expected by distutils
def get_macros(string):
def _macro(x):
x = x[2:] # drop "-D"
if '=' in x:
return tuple(x.split("=", 1)) # "-Dfoo=bar" => ("foo", "bar")
else:
return (x, None) # "-Dfoo" => ("foo", None)
return [_macro(x) for x in string.split() if x.startswith("-D")]
def get_other_cflags(string):
return [x for x in string.split() if not x.startswith("-I") and
not x.startswith("-D")]
def get_other_libs(string):
return [x for x in string.split() if not x.startswith("-L") and
not x.startswith("-l")]
# return kwargs for given libname
def kwargs(libname):
fse = sys.getfilesystemencoding()
all_cflags = call(libname, "--cflags")
all_libs = call(libname, "--libs")
return {
"include_dirs": get_include_dirs(all_cflags),
"library_dirs": get_library_dirs(all_libs),
"libraries": get_libraries(all_libs),
"define_macros": get_macros(all_cflags),
"extra_compile_args": get_other_cflags(all_cflags),
"extra_link_args": get_other_libs(all_libs),
}
# merge all arguments together
ret = {}
for libname in libs:
lib_flags = kwargs(libname)
merge_flags(ret, lib_flags)
return ret
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/cffi/_cffi_include.h | #define _CFFI_
/* We try to define Py_LIMITED_API before including Python.h.
Mess: we can only define it if Py_DEBUG, Py_TRACE_REFS and
Py_REF_DEBUG are not defined. This is a best-effort approximation:
we can learn about Py_DEBUG from pyconfig.h, but it is unclear if
the same works for the other two macros. Py_DEBUG implies them,
but not the other way around.
The implementation is messy (issue #350): on Windows, with _MSC_VER,
we have to define Py_LIMITED_API even before including pyconfig.h.
In that case, we guess what pyconfig.h will do to the macros above,
and check our guess after the #include.
Note that on Windows, with CPython 3.x, you need >= 3.5 and virtualenv
version >= 16.0.0. With older versions of either, you don't get a
copy of PYTHON3.DLL in the virtualenv. We can't check the version of
CPython *before* we even include pyconfig.h. ffi.set_source() puts
a ``#define _CFFI_NO_LIMITED_API'' at the start of this file if it is
running on Windows < 3.5, as an attempt at fixing it, but that's
arguably wrong because it may not be the target version of Python.
Still better than nothing I guess. As another workaround, you can
remove the definition of Py_LIMITED_API here.
See also 'py_limited_api' in cffi/setuptools_ext.py.
*/
#if !defined(_CFFI_USE_EMBEDDING) && !defined(Py_LIMITED_API)
# ifdef _MSC_VER
# if !defined(_DEBUG) && !defined(Py_DEBUG) && !defined(Py_TRACE_REFS) && !defined(Py_REF_DEBUG) && !defined(_CFFI_NO_LIMITED_API)
# define Py_LIMITED_API
# endif
# include <pyconfig.h>
/* sanity-check: Py_LIMITED_API will cause crashes if any of these
are also defined. Normally, the Python file PC/pyconfig.h does not
cause any of these to be defined, with the exception that _DEBUG
causes Py_DEBUG. Double-check that. */
# ifdef Py_LIMITED_API
# if defined(Py_DEBUG)
# error "pyconfig.h unexpectedly defines Py_DEBUG, but Py_LIMITED_API is set"
# endif
# if defined(Py_TRACE_REFS)
# error "pyconfig.h unexpectedly defines Py_TRACE_REFS, but Py_LIMITED_API is set"
# endif
# if defined(Py_REF_DEBUG)
# error "pyconfig.h unexpectedly defines Py_REF_DEBUG, but Py_LIMITED_API is set"
# endif
# endif
# else
# include <pyconfig.h>
# if !defined(Py_DEBUG) && !defined(Py_TRACE_REFS) && !defined(Py_REF_DEBUG) && !defined(_CFFI_NO_LIMITED_API)
# define Py_LIMITED_API
# endif
# endif
#endif
#include <Python.h>
#ifdef __cplusplus
extern "C" {
#endif
#include <stddef.h>
#include "parse_c_type.h"
/* this block of #ifs should be kept exactly identical between
c/_cffi_backend.c, cffi/vengine_cpy.py, cffi/vengine_gen.py
and cffi/_cffi_include.h */
#if defined(_MSC_VER)
# include <malloc.h> /* for alloca() */
# if _MSC_VER < 1600 /* MSVC < 2010 */
typedef __int8 int8_t;
typedef __int16 int16_t;
typedef __int32 int32_t;
typedef __int64 int64_t;
typedef unsigned __int8 uint8_t;
typedef unsigned __int16 uint16_t;
typedef unsigned __int32 uint32_t;
typedef unsigned __int64 uint64_t;
typedef __int8 int_least8_t;
typedef __int16 int_least16_t;
typedef __int32 int_least32_t;
typedef __int64 int_least64_t;
typedef unsigned __int8 uint_least8_t;
typedef unsigned __int16 uint_least16_t;
typedef unsigned __int32 uint_least32_t;
typedef unsigned __int64 uint_least64_t;
typedef __int8 int_fast8_t;
typedef __int16 int_fast16_t;
typedef __int32 int_fast32_t;
typedef __int64 int_fast64_t;
typedef unsigned __int8 uint_fast8_t;
typedef unsigned __int16 uint_fast16_t;
typedef unsigned __int32 uint_fast32_t;
typedef unsigned __int64 uint_fast64_t;
typedef __int64 intmax_t;
typedef unsigned __int64 uintmax_t;
# else
# include <stdint.h>
# endif
# if _MSC_VER < 1800 /* MSVC < 2013 */
# ifndef __cplusplus
typedef unsigned char _Bool;
# endif
# endif
#else
# include <stdint.h>
# if (defined (__SVR4) && defined (__sun)) || defined(_AIX) || defined(__hpux)
# include <alloca.h>
# endif
#endif
#ifdef __GNUC__
# define _CFFI_UNUSED_FN __attribute__((unused))
#else
# define _CFFI_UNUSED_FN /* nothing */
#endif
#ifdef __cplusplus
# ifndef _Bool
typedef bool _Bool; /* semi-hackish: C++ has no _Bool; bool is builtin */
# endif
#endif
/********** CPython-specific section **********/
#ifndef PYPY_VERSION
#if PY_MAJOR_VERSION >= 3
# define PyInt_FromLong PyLong_FromLong
#endif
#define _cffi_from_c_double PyFloat_FromDouble
#define _cffi_from_c_float PyFloat_FromDouble
#define _cffi_from_c_long PyInt_FromLong
#define _cffi_from_c_ulong PyLong_FromUnsignedLong
#define _cffi_from_c_longlong PyLong_FromLongLong
#define _cffi_from_c_ulonglong PyLong_FromUnsignedLongLong
#define _cffi_from_c__Bool PyBool_FromLong
#define _cffi_to_c_double PyFloat_AsDouble
#define _cffi_to_c_float PyFloat_AsDouble
#define _cffi_from_c_int(x, type) \
(((type)-1) > 0 ? /* unsigned */ \
(sizeof(type) < sizeof(long) ? \
PyInt_FromLong((long)x) : \
sizeof(type) == sizeof(long) ? \
PyLong_FromUnsignedLong((unsigned long)x) : \
PyLong_FromUnsignedLongLong((unsigned long long)x)) : \
(sizeof(type) <= sizeof(long) ? \
PyInt_FromLong((long)x) : \
PyLong_FromLongLong((long long)x)))
#define _cffi_to_c_int(o, type) \
((type)( \
sizeof(type) == 1 ? (((type)-1) > 0 ? (type)_cffi_to_c_u8(o) \
: (type)_cffi_to_c_i8(o)) : \
sizeof(type) == 2 ? (((type)-1) > 0 ? (type)_cffi_to_c_u16(o) \
: (type)_cffi_to_c_i16(o)) : \
sizeof(type) == 4 ? (((type)-1) > 0 ? (type)_cffi_to_c_u32(o) \
: (type)_cffi_to_c_i32(o)) : \
sizeof(type) == 8 ? (((type)-1) > 0 ? (type)_cffi_to_c_u64(o) \
: (type)_cffi_to_c_i64(o)) : \
(Py_FatalError("unsupported size for type " #type), (type)0)))
#define _cffi_to_c_i8 \
((int(*)(PyObject *))_cffi_exports[1])
#define _cffi_to_c_u8 \
((int(*)(PyObject *))_cffi_exports[2])
#define _cffi_to_c_i16 \
((int(*)(PyObject *))_cffi_exports[3])
#define _cffi_to_c_u16 \
((int(*)(PyObject *))_cffi_exports[4])
#define _cffi_to_c_i32 \
((int(*)(PyObject *))_cffi_exports[5])
#define _cffi_to_c_u32 \
((unsigned int(*)(PyObject *))_cffi_exports[6])
#define _cffi_to_c_i64 \
((long long(*)(PyObject *))_cffi_exports[7])
#define _cffi_to_c_u64 \
((unsigned long long(*)(PyObject *))_cffi_exports[8])
#define _cffi_to_c_char \
((int(*)(PyObject *))_cffi_exports[9])
#define _cffi_from_c_pointer \
((PyObject *(*)(char *, struct _cffi_ctypedescr *))_cffi_exports[10])
#define _cffi_to_c_pointer \
((char *(*)(PyObject *, struct _cffi_ctypedescr *))_cffi_exports[11])
#define _cffi_get_struct_layout \
not used any more
#define _cffi_restore_errno \
((void(*)(void))_cffi_exports[13])
#define _cffi_save_errno \
((void(*)(void))_cffi_exports[14])
#define _cffi_from_c_char \
((PyObject *(*)(char))_cffi_exports[15])
#define _cffi_from_c_deref \
((PyObject *(*)(char *, struct _cffi_ctypedescr *))_cffi_exports[16])
#define _cffi_to_c \
((int(*)(char *, struct _cffi_ctypedescr *, PyObject *))_cffi_exports[17])
#define _cffi_from_c_struct \
((PyObject *(*)(char *, struct _cffi_ctypedescr *))_cffi_exports[18])
#define _cffi_to_c_wchar_t \
((_cffi_wchar_t(*)(PyObject *))_cffi_exports[19])
#define _cffi_from_c_wchar_t \
((PyObject *(*)(_cffi_wchar_t))_cffi_exports[20])
#define _cffi_to_c_long_double \
((long double(*)(PyObject *))_cffi_exports[21])
#define _cffi_to_c__Bool \
((_Bool(*)(PyObject *))_cffi_exports[22])
#define _cffi_prepare_pointer_call_argument \
((Py_ssize_t(*)(struct _cffi_ctypedescr *, \
PyObject *, char **))_cffi_exports[23])
#define _cffi_convert_array_from_object \
((int(*)(char *, struct _cffi_ctypedescr *, PyObject *))_cffi_exports[24])
#define _CFFI_CPIDX 25
#define _cffi_call_python \
((void(*)(struct _cffi_externpy_s *, char *))_cffi_exports[_CFFI_CPIDX])
#define _cffi_to_c_wchar3216_t \
((int(*)(PyObject *))_cffi_exports[26])
#define _cffi_from_c_wchar3216_t \
((PyObject *(*)(int))_cffi_exports[27])
#define _CFFI_NUM_EXPORTS 28
struct _cffi_ctypedescr;
static void *_cffi_exports[_CFFI_NUM_EXPORTS];
#define _cffi_type(index) ( \
assert((((uintptr_t)_cffi_types[index]) & 1) == 0), \
(struct _cffi_ctypedescr *)_cffi_types[index])
static PyObject *_cffi_init(const char *module_name, Py_ssize_t version,
const struct _cffi_type_context_s *ctx)
{
PyObject *module, *o_arg, *new_module;
void *raw[] = {
(void *)module_name,
(void *)version,
(void *)_cffi_exports,
(void *)ctx,
};
module = PyImport_ImportModule("_cffi_backend");
if (module == NULL)
goto failure;
o_arg = PyLong_FromVoidPtr((void *)raw);
if (o_arg == NULL)
goto failure;
new_module = PyObject_CallMethod(
module, (char *)"_init_cffi_1_0_external_module", (char *)"O", o_arg);
Py_DECREF(o_arg);
Py_DECREF(module);
return new_module;
failure:
Py_XDECREF(module);
return NULL;
}
#ifdef HAVE_WCHAR_H
typedef wchar_t _cffi_wchar_t;
#else
typedef uint16_t _cffi_wchar_t; /* same random pick as _cffi_backend.c */
#endif
_CFFI_UNUSED_FN static uint16_t _cffi_to_c_char16_t(PyObject *o)
{
if (sizeof(_cffi_wchar_t) == 2)
return (uint16_t)_cffi_to_c_wchar_t(o);
else
return (uint16_t)_cffi_to_c_wchar3216_t(o);
}
_CFFI_UNUSED_FN static PyObject *_cffi_from_c_char16_t(uint16_t x)
{
if (sizeof(_cffi_wchar_t) == 2)
return _cffi_from_c_wchar_t((_cffi_wchar_t)x);
else
return _cffi_from_c_wchar3216_t((int)x);
}
_CFFI_UNUSED_FN static int _cffi_to_c_char32_t(PyObject *o)
{
if (sizeof(_cffi_wchar_t) == 4)
return (int)_cffi_to_c_wchar_t(o);
else
return (int)_cffi_to_c_wchar3216_t(o);
}
_CFFI_UNUSED_FN static PyObject *_cffi_from_c_char32_t(unsigned int x)
{
if (sizeof(_cffi_wchar_t) == 4)
return _cffi_from_c_wchar_t((_cffi_wchar_t)x);
else
return _cffi_from_c_wchar3216_t((int)x);
}
union _cffi_union_alignment_u {
unsigned char m_char;
unsigned short m_short;
unsigned int m_int;
unsigned long m_long;
unsigned long long m_longlong;
float m_float;
double m_double;
long double m_longdouble;
};
struct _cffi_freeme_s {
struct _cffi_freeme_s *next;
union _cffi_union_alignment_u alignment;
};
_CFFI_UNUSED_FN static int
_cffi_convert_array_argument(struct _cffi_ctypedescr *ctptr, PyObject *arg,
char **output_data, Py_ssize_t datasize,
struct _cffi_freeme_s **freeme)
{
char *p;
if (datasize < 0)
return -1;
p = *output_data;
if (p == NULL) {
struct _cffi_freeme_s *fp = (struct _cffi_freeme_s *)PyObject_Malloc(
offsetof(struct _cffi_freeme_s, alignment) + (size_t)datasize);
if (fp == NULL)
return -1;
fp->next = *freeme;
*freeme = fp;
p = *output_data = (char *)&fp->alignment;
}
memset((void *)p, 0, (size_t)datasize);
return _cffi_convert_array_from_object(p, ctptr, arg);
}
_CFFI_UNUSED_FN static void
_cffi_free_array_arguments(struct _cffi_freeme_s *freeme)
{
do {
void *p = (void *)freeme;
freeme = freeme->next;
PyObject_Free(p);
} while (freeme != NULL);
}
/********** end CPython-specific section **********/
#else
_CFFI_UNUSED_FN
static void (*_cffi_call_python_org)(struct _cffi_externpy_s *, char *);
# define _cffi_call_python _cffi_call_python_org
#endif
#define _cffi_array_len(array) (sizeof(array) / sizeof((array)[0]))
#define _cffi_prim_int(size, sign) \
((size) == 1 ? ((sign) ? _CFFI_PRIM_INT8 : _CFFI_PRIM_UINT8) : \
(size) == 2 ? ((sign) ? _CFFI_PRIM_INT16 : _CFFI_PRIM_UINT16) : \
(size) == 4 ? ((sign) ? _CFFI_PRIM_INT32 : _CFFI_PRIM_UINT32) : \
(size) == 8 ? ((sign) ? _CFFI_PRIM_INT64 : _CFFI_PRIM_UINT64) : \
_CFFI__UNKNOWN_PRIM)
#define _cffi_prim_float(size) \
((size) == sizeof(float) ? _CFFI_PRIM_FLOAT : \
(size) == sizeof(double) ? _CFFI_PRIM_DOUBLE : \
(size) == sizeof(long double) ? _CFFI__UNKNOWN_LONG_DOUBLE : \
_CFFI__UNKNOWN_FLOAT_PRIM)
#define _cffi_check_int(got, got_nonpos, expected) \
((got_nonpos) == (expected <= 0) && \
(got) == (unsigned long long)expected)
#ifdef MS_WIN32
# define _cffi_stdcall __stdcall
#else
# define _cffi_stdcall /* nothing */
#endif
#ifdef __cplusplus
}
#endif
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/cffi/cparser.py | from . import model
from .commontypes import COMMON_TYPES, resolve_common_type
from .error import FFIError, CDefError
try:
from . import _pycparser as pycparser
except ImportError:
import pycparser
import weakref, re, sys
try:
if sys.version_info < (3,):
import thread as _thread
else:
import _thread
lock = _thread.allocate_lock()
except ImportError:
lock = None
def _workaround_for_static_import_finders():
# Issue #392: packaging tools like cx_Freeze can not find these
# because pycparser uses exec dynamic import. This is an obscure
# workaround. This function is never called.
import pycparser.yacctab
import pycparser.lextab
CDEF_SOURCE_STRING = "<cdef source string>"
_r_comment = re.compile(r"/\*.*?\*/|//([^\n\\]|\\.)*?$",
re.DOTALL | re.MULTILINE)
_r_define = re.compile(r"^\s*#\s*define\s+([A-Za-z_][A-Za-z_0-9]*)"
r"\b((?:[^\n\\]|\\.)*?)$",
re.DOTALL | re.MULTILINE)
_r_line_directive = re.compile(r"^[ \t]*#[ \t]*(?:line|\d+)\b.*$", re.MULTILINE)
_r_partial_enum = re.compile(r"=\s*\.\.\.\s*[,}]|\.\.\.\s*\}")
_r_enum_dotdotdot = re.compile(r"__dotdotdot\d+__$")
_r_partial_array = re.compile(r"\[\s*\.\.\.\s*\]")
_r_words = re.compile(r"\w+|\S")
_parser_cache = None
_r_int_literal = re.compile(r"-?0?x?[0-9a-f]+[lu]*$", re.IGNORECASE)
_r_stdcall1 = re.compile(r"\b(__stdcall|WINAPI)\b")
_r_stdcall2 = re.compile(r"[(]\s*(__stdcall|WINAPI)\b")
_r_cdecl = re.compile(r"\b__cdecl\b")
_r_extern_python = re.compile(r'\bextern\s*"'
r'(Python|Python\s*\+\s*C|C\s*\+\s*Python)"\s*.')
_r_star_const_space = re.compile( # matches "* const "
r"[*]\s*((const|volatile|restrict)\b\s*)+")
_r_int_dotdotdot = re.compile(r"(\b(int|long|short|signed|unsigned|char)\s*)+"
r"\.\.\.")
_r_float_dotdotdot = re.compile(r"\b(double|float)\s*\.\.\.")
def _get_parser():
global _parser_cache
if _parser_cache is None:
_parser_cache = pycparser.CParser()
return _parser_cache
def _workaround_for_old_pycparser(csource):
# Workaround for a pycparser issue (fixed between pycparser 2.10 and
# 2.14): "char*const***" gives us a wrong syntax tree, the same as
# for "char***(*const)". This means we can't tell the difference
# afterwards. But "char(*const(***))" gives us the right syntax
# tree. The issue only occurs if there are several stars in
# sequence with no parenthesis inbetween, just possibly qualifiers.
# Attempt to fix it by adding some parentheses in the source: each
# time we see "* const" or "* const *", we add an opening
# parenthesis before each star---the hard part is figuring out where
# to close them.
parts = []
while True:
match = _r_star_const_space.search(csource)
if not match:
break
#print repr(''.join(parts)+csource), '=>',
parts.append(csource[:match.start()])
parts.append('('); closing = ')'
parts.append(match.group()) # e.g. "* const "
endpos = match.end()
if csource.startswith('*', endpos):
parts.append('('); closing += ')'
level = 0
i = endpos
while i < len(csource):
c = csource[i]
if c == '(':
level += 1
elif c == ')':
if level == 0:
break
level -= 1
elif c in ',;=':
if level == 0:
break
i += 1
csource = csource[endpos:i] + closing + csource[i:]
#print repr(''.join(parts)+csource)
parts.append(csource)
return ''.join(parts)
def _preprocess_extern_python(csource):
# input: `extern "Python" int foo(int);` or
# `extern "Python" { int foo(int); }`
# output:
# void __cffi_extern_python_start;
# int foo(int);
# void __cffi_extern_python_stop;
#
# input: `extern "Python+C" int foo(int);`
# output:
# void __cffi_extern_python_plus_c_start;
# int foo(int);
# void __cffi_extern_python_stop;
parts = []
while True:
match = _r_extern_python.search(csource)
if not match:
break
endpos = match.end() - 1
#print
#print ''.join(parts)+csource
#print '=>'
parts.append(csource[:match.start()])
if 'C' in match.group(1):
parts.append('void __cffi_extern_python_plus_c_start; ')
else:
parts.append('void __cffi_extern_python_start; ')
if csource[endpos] == '{':
# grouping variant
closing = csource.find('}', endpos)
if closing < 0:
raise CDefError("'extern \"Python\" {': no '}' found")
if csource.find('{', endpos + 1, closing) >= 0:
raise NotImplementedError("cannot use { } inside a block "
"'extern \"Python\" { ... }'")
parts.append(csource[endpos+1:closing])
csource = csource[closing+1:]
else:
# non-grouping variant
semicolon = csource.find(';', endpos)
if semicolon < 0:
raise CDefError("'extern \"Python\": no ';' found")
parts.append(csource[endpos:semicolon+1])
csource = csource[semicolon+1:]
parts.append(' void __cffi_extern_python_stop;')
#print ''.join(parts)+csource
#print
parts.append(csource)
return ''.join(parts)
def _warn_for_string_literal(csource):
if '"' not in csource:
return
for line in csource.splitlines():
if '"' in line and not line.lstrip().startswith('#'):
import warnings
warnings.warn("String literal found in cdef() or type source. "
"String literals are ignored here, but you should "
"remove them anyway because some character sequences "
"confuse pre-parsing.")
break
def _warn_for_non_extern_non_static_global_variable(decl):
if not decl.storage:
import warnings
warnings.warn("Global variable '%s' in cdef(): for consistency "
"with C it should have a storage class specifier "
"(usually 'extern')" % (decl.name,))
def _remove_line_directives(csource):
# _r_line_directive matches whole lines, without the final \n, if they
# start with '#line' with some spacing allowed, or '#NUMBER'. This
# function stores them away and replaces them with exactly the string
# '#line@N', where N is the index in the list 'line_directives'.
line_directives = []
def replace(m):
i = len(line_directives)
line_directives.append(m.group())
return '#line@%d' % i
csource = _r_line_directive.sub(replace, csource)
return csource, line_directives
def _put_back_line_directives(csource, line_directives):
def replace(m):
s = m.group()
if not s.startswith('#line@'):
raise AssertionError("unexpected #line directive "
"(should have been processed and removed")
return line_directives[int(s[6:])]
return _r_line_directive.sub(replace, csource)
def _preprocess(csource):
# First, remove the lines of the form '#line N "filename"' because
# the "filename" part could confuse the rest
csource, line_directives = _remove_line_directives(csource)
# Remove comments. NOTE: this only work because the cdef() section
# should not contain any string literals (except in line directives)!
def replace_keeping_newlines(m):
return ' ' + m.group().count('\n') * '\n'
csource = _r_comment.sub(replace_keeping_newlines, csource)
# Remove the "#define FOO x" lines
macros = {}
for match in _r_define.finditer(csource):
macroname, macrovalue = match.groups()
macrovalue = macrovalue.replace('\\\n', '').strip()
macros[macroname] = macrovalue
csource = _r_define.sub('', csource)
#
if pycparser.__version__ < '2.14':
csource = _workaround_for_old_pycparser(csource)
#
# BIG HACK: replace WINAPI or __stdcall with "volatile const".
# It doesn't make sense for the return type of a function to be
# "volatile volatile const", so we abuse it to detect __stdcall...
# Hack number 2 is that "int(volatile *fptr)();" is not valid C
# syntax, so we place the "volatile" before the opening parenthesis.
csource = _r_stdcall2.sub(' volatile volatile const(', csource)
csource = _r_stdcall1.sub(' volatile volatile const ', csource)
csource = _r_cdecl.sub(' ', csource)
#
# Replace `extern "Python"` with start/end markers
csource = _preprocess_extern_python(csource)
#
# Now there should not be any string literal left; warn if we get one
_warn_for_string_literal(csource)
#
# Replace "[...]" with "[__dotdotdotarray__]"
csource = _r_partial_array.sub('[__dotdotdotarray__]', csource)
#
# Replace "...}" with "__dotdotdotNUM__}". This construction should
# occur only at the end of enums; at the end of structs we have "...;}"
# and at the end of vararg functions "...);". Also replace "=...[,}]"
# with ",__dotdotdotNUM__[,}]": this occurs in the enums too, when
# giving an unknown value.
matches = list(_r_partial_enum.finditer(csource))
for number, match in enumerate(reversed(matches)):
p = match.start()
if csource[p] == '=':
p2 = csource.find('...', p, match.end())
assert p2 > p
csource = '%s,__dotdotdot%d__ %s' % (csource[:p], number,
csource[p2+3:])
else:
assert csource[p:p+3] == '...'
csource = '%s __dotdotdot%d__ %s' % (csource[:p], number,
csource[p+3:])
# Replace "int ..." or "unsigned long int..." with "__dotdotdotint__"
csource = _r_int_dotdotdot.sub(' __dotdotdotint__ ', csource)
# Replace "float ..." or "double..." with "__dotdotdotfloat__"
csource = _r_float_dotdotdot.sub(' __dotdotdotfloat__ ', csource)
# Replace all remaining "..." with the same name, "__dotdotdot__",
# which is declared with a typedef for the purpose of C parsing.
csource = csource.replace('...', ' __dotdotdot__ ')
# Finally, put back the line directives
csource = _put_back_line_directives(csource, line_directives)
return csource, macros
def _common_type_names(csource):
# Look in the source for what looks like usages of types from the
# list of common types. A "usage" is approximated here as the
# appearance of the word, minus a "definition" of the type, which
# is the last word in a "typedef" statement. Approximative only
# but should be fine for all the common types.
look_for_words = set(COMMON_TYPES)
look_for_words.add(';')
look_for_words.add(',')
look_for_words.add('(')
look_for_words.add(')')
look_for_words.add('typedef')
words_used = set()
is_typedef = False
paren = 0
previous_word = ''
for word in _r_words.findall(csource):
if word in look_for_words:
if word == ';':
if is_typedef:
words_used.discard(previous_word)
look_for_words.discard(previous_word)
is_typedef = False
elif word == 'typedef':
is_typedef = True
paren = 0
elif word == '(':
paren += 1
elif word == ')':
paren -= 1
elif word == ',':
if is_typedef and paren == 0:
words_used.discard(previous_word)
look_for_words.discard(previous_word)
else: # word in COMMON_TYPES
words_used.add(word)
previous_word = word
return words_used
class Parser(object):
def __init__(self):
self._declarations = {}
self._included_declarations = set()
self._anonymous_counter = 0
self._structnode2type = weakref.WeakKeyDictionary()
self._options = {}
self._int_constants = {}
self._recomplete = []
self._uses_new_feature = None
def _parse(self, csource):
csource, macros = _preprocess(csource)
# XXX: for more efficiency we would need to poke into the
# internals of CParser... the following registers the
# typedefs, because their presence or absence influences the
# parsing itself (but what they are typedef'ed to plays no role)
ctn = _common_type_names(csource)
typenames = []
for name in sorted(self._declarations):
if name.startswith('typedef '):
name = name[8:]
typenames.append(name)
ctn.discard(name)
typenames += sorted(ctn)
#
csourcelines = []
csourcelines.append('# 1 "<cdef automatic initialization code>"')
for typename in typenames:
csourcelines.append('typedef int %s;' % typename)
csourcelines.append('typedef int __dotdotdotint__, __dotdotdotfloat__,'
' __dotdotdot__;')
# this forces pycparser to consider the following in the file
# called <cdef source string> from line 1
csourcelines.append('# 1 "%s"' % (CDEF_SOURCE_STRING,))
csourcelines.append(csource)
fullcsource = '\n'.join(csourcelines)
if lock is not None:
lock.acquire() # pycparser is not thread-safe...
try:
ast = _get_parser().parse(fullcsource)
except pycparser.c_parser.ParseError as e:
self.convert_pycparser_error(e, csource)
finally:
if lock is not None:
lock.release()
# csource will be used to find buggy source text
return ast, macros, csource
def _convert_pycparser_error(self, e, csource):
# xxx look for "<cdef source string>:NUM:" at the start of str(e)
# and interpret that as a line number. This will not work if
# the user gives explicit ``# NUM "FILE"`` directives.
line = None
msg = str(e)
match = re.match(r"%s:(\d+):" % (CDEF_SOURCE_STRING,), msg)
if match:
linenum = int(match.group(1), 10)
csourcelines = csource.splitlines()
if 1 <= linenum <= len(csourcelines):
line = csourcelines[linenum-1]
return line
def convert_pycparser_error(self, e, csource):
line = self._convert_pycparser_error(e, csource)
msg = str(e)
if line:
msg = 'cannot parse "%s"\n%s' % (line.strip(), msg)
else:
msg = 'parse error\n%s' % (msg,)
raise CDefError(msg)
def parse(self, csource, override=False, packed=False, pack=None,
dllexport=False):
if packed:
if packed != True:
raise ValueError("'packed' should be False or True; use "
"'pack' to give another value")
if pack:
raise ValueError("cannot give both 'pack' and 'packed'")
pack = 1
elif pack:
if pack & (pack - 1):
raise ValueError("'pack' must be a power of two, not %r" %
(pack,))
else:
pack = 0
prev_options = self._options
try:
self._options = {'override': override,
'packed': pack,
'dllexport': dllexport}
self._internal_parse(csource)
finally:
self._options = prev_options
def _internal_parse(self, csource):
ast, macros, csource = self._parse(csource)
# add the macros
self._process_macros(macros)
# find the first "__dotdotdot__" and use that as a separator
# between the repeated typedefs and the real csource
iterator = iter(ast.ext)
for decl in iterator:
if decl.name == '__dotdotdot__':
break
else:
assert 0
current_decl = None
#
try:
self._inside_extern_python = '__cffi_extern_python_stop'
for decl in iterator:
current_decl = decl
if isinstance(decl, pycparser.c_ast.Decl):
self._parse_decl(decl)
elif isinstance(decl, pycparser.c_ast.Typedef):
if not decl.name:
raise CDefError("typedef does not declare any name",
decl)
quals = 0
if (isinstance(decl.type.type, pycparser.c_ast.IdentifierType) and
decl.type.type.names[-1].startswith('__dotdotdot')):
realtype = self._get_unknown_type(decl)
elif (isinstance(decl.type, pycparser.c_ast.PtrDecl) and
isinstance(decl.type.type, pycparser.c_ast.TypeDecl) and
isinstance(decl.type.type.type,
pycparser.c_ast.IdentifierType) and
decl.type.type.type.names[-1].startswith('__dotdotdot')):
realtype = self._get_unknown_ptr_type(decl)
else:
realtype, quals = self._get_type_and_quals(
decl.type, name=decl.name, partial_length_ok=True,
typedef_example="*(%s *)0" % (decl.name,))
self._declare('typedef ' + decl.name, realtype, quals=quals)
elif decl.__class__.__name__ == 'Pragma':
pass # skip pragma, only in pycparser 2.15
else:
raise CDefError("unexpected <%s>: this construct is valid "
"C but not valid in cdef()" %
decl.__class__.__name__, decl)
except CDefError as e:
if len(e.args) == 1:
e.args = e.args + (current_decl,)
raise
except FFIError as e:
msg = self._convert_pycparser_error(e, csource)
if msg:
e.args = (e.args[0] + "\n *** Err: %s" % msg,)
raise
def _add_constants(self, key, val):
if key in self._int_constants:
if self._int_constants[key] == val:
return # ignore identical double declarations
raise FFIError(
"multiple declarations of constant: %s" % (key,))
self._int_constants[key] = val
def _add_integer_constant(self, name, int_str):
int_str = int_str.lower().rstrip("ul")
neg = int_str.startswith('-')
if neg:
int_str = int_str[1:]
# "010" is not valid oct in py3
if (int_str.startswith("0") and int_str != '0'
and not int_str.startswith("0x")):
int_str = "0o" + int_str[1:]
pyvalue = int(int_str, 0)
if neg:
pyvalue = -pyvalue
self._add_constants(name, pyvalue)
self._declare('macro ' + name, pyvalue)
def _process_macros(self, macros):
for key, value in macros.items():
value = value.strip()
if _r_int_literal.match(value):
self._add_integer_constant(key, value)
elif value == '...':
self._declare('macro ' + key, value)
else:
raise CDefError(
'only supports one of the following syntax:\n'
' #define %s ... (literally dot-dot-dot)\n'
' #define %s NUMBER (with NUMBER an integer'
' constant, decimal/hex/octal)\n'
'got:\n'
' #define %s %s'
% (key, key, key, value))
def _declare_function(self, tp, quals, decl):
tp = self._get_type_pointer(tp, quals)
if self._options.get('dllexport'):
tag = 'dllexport_python '
elif self._inside_extern_python == '__cffi_extern_python_start':
tag = 'extern_python '
elif self._inside_extern_python == '__cffi_extern_python_plus_c_start':
tag = 'extern_python_plus_c '
else:
tag = 'function '
self._declare(tag + decl.name, tp)
def _parse_decl(self, decl):
node = decl.type
if isinstance(node, pycparser.c_ast.FuncDecl):
tp, quals = self._get_type_and_quals(node, name=decl.name)
assert isinstance(tp, model.RawFunctionType)
self._declare_function(tp, quals, decl)
else:
if isinstance(node, pycparser.c_ast.Struct):
self._get_struct_union_enum_type('struct', node)
elif isinstance(node, pycparser.c_ast.Union):
self._get_struct_union_enum_type('union', node)
elif isinstance(node, pycparser.c_ast.Enum):
self._get_struct_union_enum_type('enum', node)
elif not decl.name:
raise CDefError("construct does not declare any variable",
decl)
#
if decl.name:
tp, quals = self._get_type_and_quals(node,
partial_length_ok=True)
if tp.is_raw_function:
self._declare_function(tp, quals, decl)
elif (tp.is_integer_type() and
hasattr(decl, 'init') and
hasattr(decl.init, 'value') and
_r_int_literal.match(decl.init.value)):
self._add_integer_constant(decl.name, decl.init.value)
elif (tp.is_integer_type() and
isinstance(decl.init, pycparser.c_ast.UnaryOp) and
decl.init.op == '-' and
hasattr(decl.init.expr, 'value') and
_r_int_literal.match(decl.init.expr.value)):
self._add_integer_constant(decl.name,
'-' + decl.init.expr.value)
elif (tp is model.void_type and
decl.name.startswith('__cffi_extern_python_')):
# hack: `extern "Python"` in the C source is replaced
# with "void __cffi_extern_python_start;" and
# "void __cffi_extern_python_stop;"
self._inside_extern_python = decl.name
else:
if self._inside_extern_python !='__cffi_extern_python_stop':
raise CDefError(
"cannot declare constants or "
"variables with 'extern \"Python\"'")
if (quals & model.Q_CONST) and not tp.is_array_type:
self._declare('constant ' + decl.name, tp, quals=quals)
else:
_warn_for_non_extern_non_static_global_variable(decl)
self._declare('variable ' + decl.name, tp, quals=quals)
def parse_type(self, cdecl):
return self.parse_type_and_quals(cdecl)[0]
def parse_type_and_quals(self, cdecl):
ast, macros = self._parse('void __dummy(\n%s\n);' % cdecl)[:2]
assert not macros
exprnode = ast.ext[-1].type.args.params[0]
if isinstance(exprnode, pycparser.c_ast.ID):
raise CDefError("unknown identifier '%s'" % (exprnode.name,))
return self._get_type_and_quals(exprnode.type)
def _declare(self, name, obj, included=False, quals=0):
if name in self._declarations:
prevobj, prevquals = self._declarations[name]
if prevobj is obj and prevquals == quals:
return
if not self._options.get('override'):
raise FFIError(
"multiple declarations of %s (for interactive usage, "
"try cdef(xx, override=True))" % (name,))
assert '__dotdotdot__' not in name.split()
self._declarations[name] = (obj, quals)
if included:
self._included_declarations.add(obj)
def _extract_quals(self, type):
quals = 0
if isinstance(type, (pycparser.c_ast.TypeDecl,
pycparser.c_ast.PtrDecl)):
if 'const' in type.quals:
quals |= model.Q_CONST
if 'volatile' in type.quals:
quals |= model.Q_VOLATILE
if 'restrict' in type.quals:
quals |= model.Q_RESTRICT
return quals
def _get_type_pointer(self, type, quals, declname=None):
if isinstance(type, model.RawFunctionType):
return type.as_function_pointer()
if (isinstance(type, model.StructOrUnionOrEnum) and
type.name.startswith('$') and type.name[1:].isdigit() and
type.forcename is None and declname is not None):
return model.NamedPointerType(type, declname, quals)
return model.PointerType(type, quals)
def _get_type_and_quals(self, typenode, name=None, partial_length_ok=False,
typedef_example=None):
# first, dereference typedefs, if we have it already parsed, we're good
if (isinstance(typenode, pycparser.c_ast.TypeDecl) and
isinstance(typenode.type, pycparser.c_ast.IdentifierType) and
len(typenode.type.names) == 1 and
('typedef ' + typenode.type.names[0]) in self._declarations):
tp, quals = self._declarations['typedef ' + typenode.type.names[0]]
quals |= self._extract_quals(typenode)
return tp, quals
#
if isinstance(typenode, pycparser.c_ast.ArrayDecl):
# array type
if typenode.dim is None:
length = None
else:
length = self._parse_constant(
typenode.dim, partial_length_ok=partial_length_ok)
# a hack: in 'typedef int foo_t[...][...];', don't use '...' as
# the length but use directly the C expression that would be
# generated by recompiler.py. This lets the typedef be used in
# many more places within recompiler.py
if typedef_example is not None:
if length == '...':
length = '_cffi_array_len(%s)' % (typedef_example,)
typedef_example = "*" + typedef_example
#
tp, quals = self._get_type_and_quals(typenode.type,
partial_length_ok=partial_length_ok,
typedef_example=typedef_example)
return model.ArrayType(tp, length), quals
#
if isinstance(typenode, pycparser.c_ast.PtrDecl):
# pointer type
itemtype, itemquals = self._get_type_and_quals(typenode.type)
tp = self._get_type_pointer(itemtype, itemquals, declname=name)
quals = self._extract_quals(typenode)
return tp, quals
#
if isinstance(typenode, pycparser.c_ast.TypeDecl):
quals = self._extract_quals(typenode)
type = typenode.type
if isinstance(type, pycparser.c_ast.IdentifierType):
# assume a primitive type. get it from .names, but reduce
# synonyms to a single chosen combination
names = list(type.names)
if names != ['signed', 'char']: # keep this unmodified
prefixes = {}
while names:
name = names[0]
if name in ('short', 'long', 'signed', 'unsigned'):
prefixes[name] = prefixes.get(name, 0) + 1
del names[0]
else:
break
# ignore the 'signed' prefix below, and reorder the others
newnames = []
for prefix in ('unsigned', 'short', 'long'):
for i in range(prefixes.get(prefix, 0)):
newnames.append(prefix)
if not names:
names = ['int'] # implicitly
if names == ['int']: # but kill it if 'short' or 'long'
if 'short' in prefixes or 'long' in prefixes:
names = []
names = newnames + names
ident = ' '.join(names)
if ident == 'void':
return model.void_type, quals
if ident == '__dotdotdot__':
raise FFIError(':%d: bad usage of "..."' %
typenode.coord.line)
tp0, quals0 = resolve_common_type(self, ident)
return tp0, (quals | quals0)
#
if isinstance(type, pycparser.c_ast.Struct):
# 'struct foobar'
tp = self._get_struct_union_enum_type('struct', type, name)
return tp, quals
#
if isinstance(type, pycparser.c_ast.Union):
# 'union foobar'
tp = self._get_struct_union_enum_type('union', type, name)
return tp, quals
#
if isinstance(type, pycparser.c_ast.Enum):
# 'enum foobar'
tp = self._get_struct_union_enum_type('enum', type, name)
return tp, quals
#
if isinstance(typenode, pycparser.c_ast.FuncDecl):
# a function type
return self._parse_function_type(typenode, name), 0
#
# nested anonymous structs or unions end up here
if isinstance(typenode, pycparser.c_ast.Struct):
return self._get_struct_union_enum_type('struct', typenode, name,
nested=True), 0
if isinstance(typenode, pycparser.c_ast.Union):
return self._get_struct_union_enum_type('union', typenode, name,
nested=True), 0
#
raise FFIError(":%d: bad or unsupported type declaration" %
typenode.coord.line)
def _parse_function_type(self, typenode, funcname=None):
params = list(getattr(typenode.args, 'params', []))
for i, arg in enumerate(params):
if not hasattr(arg, 'type'):
raise CDefError("%s arg %d: unknown type '%s'"
" (if you meant to use the old C syntax of giving"
" untyped arguments, it is not supported)"
% (funcname or 'in expression', i + 1,
getattr(arg, 'name', '?')))
ellipsis = (
len(params) > 0 and
isinstance(params[-1].type, pycparser.c_ast.TypeDecl) and
isinstance(params[-1].type.type,
pycparser.c_ast.IdentifierType) and
params[-1].type.type.names == ['__dotdotdot__'])
if ellipsis:
params.pop()
if not params:
raise CDefError(
"%s: a function with only '(...)' as argument"
" is not correct C" % (funcname or 'in expression'))
args = [self._as_func_arg(*self._get_type_and_quals(argdeclnode.type))
for argdeclnode in params]
if not ellipsis and args == [model.void_type]:
args = []
result, quals = self._get_type_and_quals(typenode.type)
# the 'quals' on the result type are ignored. HACK: we absure them
# to detect __stdcall functions: we textually replace "__stdcall"
# with "volatile volatile const" above.
abi = None
if hasattr(typenode.type, 'quals'): # else, probable syntax error anyway
if typenode.type.quals[-3:] == ['volatile', 'volatile', 'const']:
abi = '__stdcall'
return model.RawFunctionType(tuple(args), result, ellipsis, abi)
def _as_func_arg(self, type, quals):
if isinstance(type, model.ArrayType):
return model.PointerType(type.item, quals)
elif isinstance(type, model.RawFunctionType):
return type.as_function_pointer()
else:
return type
def _get_struct_union_enum_type(self, kind, type, name=None, nested=False):
# First, a level of caching on the exact 'type' node of the AST.
# This is obscure, but needed because pycparser "unrolls" declarations
# such as "typedef struct { } foo_t, *foo_p" and we end up with
# an AST that is not a tree, but a DAG, with the "type" node of the
# two branches foo_t and foo_p of the trees being the same node.
# It's a bit silly but detecting "DAG-ness" in the AST tree seems
# to be the only way to distinguish this case from two independent
# structs. See test_struct_with_two_usages.
try:
return self._structnode2type[type]
except KeyError:
pass
#
# Note that this must handle parsing "struct foo" any number of
# times and always return the same StructType object. Additionally,
# one of these times (not necessarily the first), the fields of
# the struct can be specified with "struct foo { ...fields... }".
# If no name is given, then we have to create a new anonymous struct
# with no caching; in this case, the fields are either specified
# right now or never.
#
force_name = name
name = type.name
#
# get the type or create it if needed
if name is None:
# 'force_name' is used to guess a more readable name for
# anonymous structs, for the common case "typedef struct { } foo".
if force_name is not None:
explicit_name = '$%s' % force_name
else:
self._anonymous_counter += 1
explicit_name = '$%d' % self._anonymous_counter
tp = None
else:
explicit_name = name
key = '%s %s' % (kind, name)
tp, _ = self._declarations.get(key, (None, None))
#
if tp is None:
if kind == 'struct':
tp = model.StructType(explicit_name, None, None, None)
elif kind == 'union':
tp = model.UnionType(explicit_name, None, None, None)
elif kind == 'enum':
if explicit_name == '__dotdotdot__':
raise CDefError("Enums cannot be declared with ...")
tp = self._build_enum_type(explicit_name, type.values)
else:
raise AssertionError("kind = %r" % (kind,))
if name is not None:
self._declare(key, tp)
else:
if kind == 'enum' and type.values is not None:
raise NotImplementedError(
"enum %s: the '{}' declaration should appear on the first "
"time the enum is mentioned, not later" % explicit_name)
if not tp.forcename:
tp.force_the_name(force_name)
if tp.forcename and '$' in tp.name:
self._declare('anonymous %s' % tp.forcename, tp)
#
self._structnode2type[type] = tp
#
# enums: done here
if kind == 'enum':
return tp
#
# is there a 'type.decls'? If yes, then this is the place in the
# C sources that declare the fields. If no, then just return the
# existing type, possibly still incomplete.
if type.decls is None:
return tp
#
if tp.fldnames is not None:
raise CDefError("duplicate declaration of struct %s" % name)
fldnames = []
fldtypes = []
fldbitsize = []
fldquals = []
for decl in type.decls:
if (isinstance(decl.type, pycparser.c_ast.IdentifierType) and
''.join(decl.type.names) == '__dotdotdot__'):
# XXX pycparser is inconsistent: 'names' should be a list
# of strings, but is sometimes just one string. Use
# str.join() as a way to cope with both.
self._make_partial(tp, nested)
continue
if decl.bitsize is None:
bitsize = -1
else:
bitsize = self._parse_constant(decl.bitsize)
self._partial_length = False
type, fqual = self._get_type_and_quals(decl.type,
partial_length_ok=True)
if self._partial_length:
self._make_partial(tp, nested)
if isinstance(type, model.StructType) and type.partial:
self._make_partial(tp, nested)
fldnames.append(decl.name or '')
fldtypes.append(type)
fldbitsize.append(bitsize)
fldquals.append(fqual)
tp.fldnames = tuple(fldnames)
tp.fldtypes = tuple(fldtypes)
tp.fldbitsize = tuple(fldbitsize)
tp.fldquals = tuple(fldquals)
if fldbitsize != [-1] * len(fldbitsize):
if isinstance(tp, model.StructType) and tp.partial:
raise NotImplementedError("%s: using both bitfields and '...;'"
% (tp,))
tp.packed = self._options.get('packed')
if tp.completed: # must be re-completed: it is not opaque any more
tp.completed = 0
self._recomplete.append(tp)
return tp
def _make_partial(self, tp, nested):
if not isinstance(tp, model.StructOrUnion):
raise CDefError("%s cannot be partial" % (tp,))
if not tp.has_c_name() and not nested:
raise NotImplementedError("%s is partial but has no C name" %(tp,))
tp.partial = True
def _parse_constant(self, exprnode, partial_length_ok=False):
# for now, limited to expressions that are an immediate number
# or positive/negative number
if isinstance(exprnode, pycparser.c_ast.Constant):
s = exprnode.value
if '0' <= s[0] <= '9':
s = s.rstrip('uUlL')
try:
if s.startswith('0'):
return int(s, 8)
else:
return int(s, 10)
except ValueError:
if len(s) > 1:
if s.lower()[0:2] == '0x':
return int(s, 16)
elif s.lower()[0:2] == '0b':
return int(s, 2)
raise CDefError("invalid constant %r" % (s,))
elif s[0] == "'" and s[-1] == "'" and (
len(s) == 3 or (len(s) == 4 and s[1] == "\\")):
return ord(s[-2])
else:
raise CDefError("invalid constant %r" % (s,))
#
if (isinstance(exprnode, pycparser.c_ast.UnaryOp) and
exprnode.op == '+'):
return self._parse_constant(exprnode.expr)
#
if (isinstance(exprnode, pycparser.c_ast.UnaryOp) and
exprnode.op == '-'):
return -self._parse_constant(exprnode.expr)
# load previously defined int constant
if (isinstance(exprnode, pycparser.c_ast.ID) and
exprnode.name in self._int_constants):
return self._int_constants[exprnode.name]
#
if (isinstance(exprnode, pycparser.c_ast.ID) and
exprnode.name == '__dotdotdotarray__'):
if partial_length_ok:
self._partial_length = True
return '...'
raise FFIError(":%d: unsupported '[...]' here, cannot derive "
"the actual array length in this context"
% exprnode.coord.line)
#
if isinstance(exprnode, pycparser.c_ast.BinaryOp):
left = self._parse_constant(exprnode.left)
right = self._parse_constant(exprnode.right)
if exprnode.op == '+':
return left + right
elif exprnode.op == '-':
return left - right
elif exprnode.op == '*':
return left * right
elif exprnode.op == '/':
return self._c_div(left, right)
elif exprnode.op == '%':
return left - self._c_div(left, right) * right
elif exprnode.op == '<<':
return left << right
elif exprnode.op == '>>':
return left >> right
elif exprnode.op == '&':
return left & right
elif exprnode.op == '|':
return left | right
elif exprnode.op == '^':
return left ^ right
#
raise FFIError(":%d: unsupported expression: expected a "
"simple numeric constant" % exprnode.coord.line)
def _c_div(self, a, b):
result = a // b
if ((a < 0) ^ (b < 0)) and (a % b) != 0:
result += 1
return result
def _build_enum_type(self, explicit_name, decls):
if decls is not None:
partial = False
enumerators = []
enumvalues = []
nextenumvalue = 0
for enum in decls.enumerators:
if _r_enum_dotdotdot.match(enum.name):
partial = True
continue
if enum.value is not None:
nextenumvalue = self._parse_constant(enum.value)
enumerators.append(enum.name)
enumvalues.append(nextenumvalue)
self._add_constants(enum.name, nextenumvalue)
nextenumvalue += 1
enumerators = tuple(enumerators)
enumvalues = tuple(enumvalues)
tp = model.EnumType(explicit_name, enumerators, enumvalues)
tp.partial = partial
else: # opaque enum
tp = model.EnumType(explicit_name, (), ())
return tp
def include(self, other):
for name, (tp, quals) in other._declarations.items():
if name.startswith('anonymous $enum_$'):
continue # fix for test_anonymous_enum_include
kind = name.split(' ', 1)[0]
if kind in ('struct', 'union', 'enum', 'anonymous', 'typedef'):
self._declare(name, tp, included=True, quals=quals)
for k, v in other._int_constants.items():
self._add_constants(k, v)
def _get_unknown_type(self, decl):
typenames = decl.type.type.names
if typenames == ['__dotdotdot__']:
return model.unknown_type(decl.name)
if typenames == ['__dotdotdotint__']:
if self._uses_new_feature is None:
self._uses_new_feature = "'typedef int... %s'" % decl.name
return model.UnknownIntegerType(decl.name)
if typenames == ['__dotdotdotfloat__']:
# note: not for 'long double' so far
if self._uses_new_feature is None:
self._uses_new_feature = "'typedef float... %s'" % decl.name
return model.UnknownFloatType(decl.name)
raise FFIError(':%d: unsupported usage of "..." in typedef'
% decl.coord.line)
def _get_unknown_ptr_type(self, decl):
if decl.type.type.type.names == ['__dotdotdot__']:
return model.unknown_ptr_type(decl.name)
raise FFIError(':%d: unsupported usage of "..." in typedef'
% decl.coord.line)
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/cffi/recompiler.py | import os, sys, io
from . import ffiplatform, model
from .error import VerificationError
from .cffi_opcode import *
VERSION_BASE = 0x2601
VERSION_EMBEDDED = 0x2701
VERSION_CHAR16CHAR32 = 0x2801
USE_LIMITED_API = (sys.platform != 'win32' or sys.version_info < (3, 0) or
sys.version_info >= (3, 5))
class GlobalExpr:
def __init__(self, name, address, type_op, size=0, check_value=0):
self.name = name
self.address = address
self.type_op = type_op
self.size = size
self.check_value = check_value
def as_c_expr(self):
return ' { "%s", (void *)%s, %s, (void *)%s },' % (
self.name, self.address, self.type_op.as_c_expr(), self.size)
def as_python_expr(self):
return "b'%s%s',%d" % (self.type_op.as_python_bytes(), self.name,
self.check_value)
class FieldExpr:
def __init__(self, name, field_offset, field_size, fbitsize, field_type_op):
self.name = name
self.field_offset = field_offset
self.field_size = field_size
self.fbitsize = fbitsize
self.field_type_op = field_type_op
def as_c_expr(self):
spaces = " " * len(self.name)
return (' { "%s", %s,\n' % (self.name, self.field_offset) +
' %s %s,\n' % (spaces, self.field_size) +
' %s %s },' % (spaces, self.field_type_op.as_c_expr()))
def as_python_expr(self):
raise NotImplementedError
def as_field_python_expr(self):
if self.field_type_op.op == OP_NOOP:
size_expr = ''
elif self.field_type_op.op == OP_BITFIELD:
size_expr = format_four_bytes(self.fbitsize)
else:
raise NotImplementedError
return "b'%s%s%s'" % (self.field_type_op.as_python_bytes(),
size_expr,
self.name)
class StructUnionExpr:
def __init__(self, name, type_index, flags, size, alignment, comment,
first_field_index, c_fields):
self.name = name
self.type_index = type_index
self.flags = flags
self.size = size
self.alignment = alignment
self.comment = comment
self.first_field_index = first_field_index
self.c_fields = c_fields
def as_c_expr(self):
return (' { "%s", %d, %s,' % (self.name, self.type_index, self.flags)
+ '\n %s, %s, ' % (self.size, self.alignment)
+ '%d, %d ' % (self.first_field_index, len(self.c_fields))
+ ('/* %s */ ' % self.comment if self.comment else '')
+ '},')
def as_python_expr(self):
flags = eval(self.flags, G_FLAGS)
fields_expr = [c_field.as_field_python_expr()
for c_field in self.c_fields]
return "(b'%s%s%s',%s)" % (
format_four_bytes(self.type_index),
format_four_bytes(flags),
self.name,
','.join(fields_expr))
class EnumExpr:
def __init__(self, name, type_index, size, signed, allenums):
self.name = name
self.type_index = type_index
self.size = size
self.signed = signed
self.allenums = allenums
def as_c_expr(self):
return (' { "%s", %d, _cffi_prim_int(%s, %s),\n'
' "%s" },' % (self.name, self.type_index,
self.size, self.signed, self.allenums))
def as_python_expr(self):
prim_index = {
(1, 0): PRIM_UINT8, (1, 1): PRIM_INT8,
(2, 0): PRIM_UINT16, (2, 1): PRIM_INT16,
(4, 0): PRIM_UINT32, (4, 1): PRIM_INT32,
(8, 0): PRIM_UINT64, (8, 1): PRIM_INT64,
}[self.size, self.signed]
return "b'%s%s%s\\x00%s'" % (format_four_bytes(self.type_index),
format_four_bytes(prim_index),
self.name, self.allenums)
class TypenameExpr:
def __init__(self, name, type_index):
self.name = name
self.type_index = type_index
def as_c_expr(self):
return ' { "%s", %d },' % (self.name, self.type_index)
def as_python_expr(self):
return "b'%s%s'" % (format_four_bytes(self.type_index), self.name)
# ____________________________________________________________
class Recompiler:
_num_externpy = 0
def __init__(self, ffi, module_name, target_is_python=False):
self.ffi = ffi
self.module_name = module_name
self.target_is_python = target_is_python
self._version = VERSION_BASE
def needs_version(self, ver):
self._version = max(self._version, ver)
def collect_type_table(self):
self._typesdict = {}
self._generate("collecttype")
#
all_decls = sorted(self._typesdict, key=str)
#
# prepare all FUNCTION bytecode sequences first
self.cffi_types = []
for tp in all_decls:
if tp.is_raw_function:
assert self._typesdict[tp] is None
self._typesdict[tp] = len(self.cffi_types)
self.cffi_types.append(tp) # placeholder
for tp1 in tp.args:
assert isinstance(tp1, (model.VoidType,
model.BasePrimitiveType,
model.PointerType,
model.StructOrUnionOrEnum,
model.FunctionPtrType))
if self._typesdict[tp1] is None:
self._typesdict[tp1] = len(self.cffi_types)
self.cffi_types.append(tp1) # placeholder
self.cffi_types.append('END') # placeholder
#
# prepare all OTHER bytecode sequences
for tp in all_decls:
if not tp.is_raw_function and self._typesdict[tp] is None:
self._typesdict[tp] = len(self.cffi_types)
self.cffi_types.append(tp) # placeholder
if tp.is_array_type and tp.length is not None:
self.cffi_types.append('LEN') # placeholder
assert None not in self._typesdict.values()
#
# collect all structs and unions and enums
self._struct_unions = {}
self._enums = {}
for tp in all_decls:
if isinstance(tp, model.StructOrUnion):
self._struct_unions[tp] = None
elif isinstance(tp, model.EnumType):
self._enums[tp] = None
for i, tp in enumerate(sorted(self._struct_unions,
key=lambda tp: tp.name)):
self._struct_unions[tp] = i
for i, tp in enumerate(sorted(self._enums,
key=lambda tp: tp.name)):
self._enums[tp] = i
#
# emit all bytecode sequences now
for tp in all_decls:
method = getattr(self, '_emit_bytecode_' + tp.__class__.__name__)
method(tp, self._typesdict[tp])
#
# consistency check
for op in self.cffi_types:
assert isinstance(op, CffiOp)
self.cffi_types = tuple(self.cffi_types) # don't change any more
def _enum_fields(self, tp):
# When producing C, expand all anonymous struct/union fields.
# That's necessary to have C code checking the offsets of the
# individual fields contained in them. When producing Python,
# don't do it and instead write it like it is, with the
# corresponding fields having an empty name. Empty names are
# recognized at runtime when we import the generated Python
# file.
expand_anonymous_struct_union = not self.target_is_python
return tp.enumfields(expand_anonymous_struct_union)
def _do_collect_type(self, tp):
if not isinstance(tp, model.BaseTypeByIdentity):
if isinstance(tp, tuple):
for x in tp:
self._do_collect_type(x)
return
if tp not in self._typesdict:
self._typesdict[tp] = None
if isinstance(tp, model.FunctionPtrType):
self._do_collect_type(tp.as_raw_function())
elif isinstance(tp, model.StructOrUnion):
if tp.fldtypes is not None and (
tp not in self.ffi._parser._included_declarations):
for name1, tp1, _, _ in self._enum_fields(tp):
self._do_collect_type(self._field_type(tp, name1, tp1))
else:
for _, x in tp._get_items():
self._do_collect_type(x)
def _generate(self, step_name):
lst = self.ffi._parser._declarations.items()
for name, (tp, quals) in sorted(lst):
kind, realname = name.split(' ', 1)
try:
method = getattr(self, '_generate_cpy_%s_%s' % (kind,
step_name))
except AttributeError:
raise VerificationError(
"not implemented in recompile(): %r" % name)
try:
self._current_quals = quals
method(tp, realname)
except Exception as e:
model.attach_exception_info(e, name)
raise
# ----------
ALL_STEPS = ["global", "field", "struct_union", "enum", "typename"]
def collect_step_tables(self):
# collect the declarations for '_cffi_globals', '_cffi_typenames', etc.
self._lsts = {}
for step_name in self.ALL_STEPS:
self._lsts[step_name] = []
self._seen_struct_unions = set()
self._generate("ctx")
self._add_missing_struct_unions()
#
for step_name in self.ALL_STEPS:
lst = self._lsts[step_name]
if step_name != "field":
lst.sort(key=lambda entry: entry.name)
self._lsts[step_name] = tuple(lst) # don't change any more
#
# check for a possible internal inconsistency: _cffi_struct_unions
# should have been generated with exactly self._struct_unions
lst = self._lsts["struct_union"]
for tp, i in self._struct_unions.items():
assert i < len(lst)
assert lst[i].name == tp.name
assert len(lst) == len(self._struct_unions)
# same with enums
lst = self._lsts["enum"]
for tp, i in self._enums.items():
assert i < len(lst)
assert lst[i].name == tp.name
assert len(lst) == len(self._enums)
# ----------
def _prnt(self, what=''):
self._f.write(what + '\n')
def write_source_to_f(self, f, preamble):
if self.target_is_python:
assert preamble is None
self.write_py_source_to_f(f)
else:
assert preamble is not None
self.write_c_source_to_f(f, preamble)
def _rel_readlines(self, filename):
g = open(os.path.join(os.path.dirname(__file__), filename), 'r')
lines = g.readlines()
g.close()
return lines
def write_c_source_to_f(self, f, preamble):
self._f = f
prnt = self._prnt
if self.ffi._embedding is not None:
prnt('#define _CFFI_USE_EMBEDDING')
if not USE_LIMITED_API:
prnt('#define _CFFI_NO_LIMITED_API')
#
# first the '#include' (actually done by inlining the file's content)
lines = self._rel_readlines('_cffi_include.h')
i = lines.index('#include "parse_c_type.h"\n')
lines[i:i+1] = self._rel_readlines('parse_c_type.h')
prnt(''.join(lines))
#
# if we have ffi._embedding != None, we give it here as a macro
# and include an extra file
base_module_name = self.module_name.split('.')[-1]
if self.ffi._embedding is not None:
prnt('#define _CFFI_MODULE_NAME "%s"' % (self.module_name,))
prnt('static const char _CFFI_PYTHON_STARTUP_CODE[] = {')
self._print_string_literal_in_array(self.ffi._embedding)
prnt('0 };')
prnt('#ifdef PYPY_VERSION')
prnt('# define _CFFI_PYTHON_STARTUP_FUNC _cffi_pypyinit_%s' % (
base_module_name,))
prnt('#elif PY_MAJOR_VERSION >= 3')
prnt('# define _CFFI_PYTHON_STARTUP_FUNC PyInit_%s' % (
base_module_name,))
prnt('#else')
prnt('# define _CFFI_PYTHON_STARTUP_FUNC init%s' % (
base_module_name,))
prnt('#endif')
lines = self._rel_readlines('_embedding.h')
i = lines.index('#include "_cffi_errors.h"\n')
lines[i:i+1] = self._rel_readlines('_cffi_errors.h')
prnt(''.join(lines))
self.needs_version(VERSION_EMBEDDED)
#
# then paste the C source given by the user, verbatim.
prnt('/************************************************************/')
prnt()
prnt(preamble)
prnt()
prnt('/************************************************************/')
prnt()
#
# the declaration of '_cffi_types'
prnt('static void *_cffi_types[] = {')
typeindex2type = dict([(i, tp) for (tp, i) in self._typesdict.items()])
for i, op in enumerate(self.cffi_types):
comment = ''
if i in typeindex2type:
comment = ' // ' + typeindex2type[i]._get_c_name()
prnt('/* %2d */ %s,%s' % (i, op.as_c_expr(), comment))
if not self.cffi_types:
prnt(' 0')
prnt('};')
prnt()
#
# call generate_cpy_xxx_decl(), for every xxx found from
# ffi._parser._declarations. This generates all the functions.
self._seen_constants = set()
self._generate("decl")
#
# the declaration of '_cffi_globals' and '_cffi_typenames'
nums = {}
for step_name in self.ALL_STEPS:
lst = self._lsts[step_name]
nums[step_name] = len(lst)
if nums[step_name] > 0:
prnt('static const struct _cffi_%s_s _cffi_%ss[] = {' % (
step_name, step_name))
for entry in lst:
prnt(entry.as_c_expr())
prnt('};')
prnt()
#
# the declaration of '_cffi_includes'
if self.ffi._included_ffis:
prnt('static const char * const _cffi_includes[] = {')
for ffi_to_include in self.ffi._included_ffis:
try:
included_module_name, included_source = (
ffi_to_include._assigned_source[:2])
except AttributeError:
raise VerificationError(
"ffi object %r includes %r, but the latter has not "
"been prepared with set_source()" % (
self.ffi, ffi_to_include,))
if included_source is None:
raise VerificationError(
"not implemented yet: ffi.include() of a Python-based "
"ffi inside a C-based ffi")
prnt(' "%s",' % (included_module_name,))
prnt(' NULL')
prnt('};')
prnt()
#
# the declaration of '_cffi_type_context'
prnt('static const struct _cffi_type_context_s _cffi_type_context = {')
prnt(' _cffi_types,')
for step_name in self.ALL_STEPS:
if nums[step_name] > 0:
prnt(' _cffi_%ss,' % step_name)
else:
prnt(' NULL, /* no %ss */' % step_name)
for step_name in self.ALL_STEPS:
if step_name != "field":
prnt(' %d, /* num_%ss */' % (nums[step_name], step_name))
if self.ffi._included_ffis:
prnt(' _cffi_includes,')
else:
prnt(' NULL, /* no includes */')
prnt(' %d, /* num_types */' % (len(self.cffi_types),))
flags = 0
if self._num_externpy > 0 or self.ffi._embedding is not None:
flags |= 1 # set to mean that we use extern "Python"
prnt(' %d, /* flags */' % flags)
prnt('};')
prnt()
#
# the init function
prnt('#ifdef __GNUC__')
prnt('# pragma GCC visibility push(default) /* for -fvisibility= */')
prnt('#endif')
prnt()
prnt('#ifdef PYPY_VERSION')
prnt('PyMODINIT_FUNC')
prnt('_cffi_pypyinit_%s(const void *p[])' % (base_module_name,))
prnt('{')
if flags & 1:
prnt(' if (((intptr_t)p[0]) >= 0x0A03) {')
prnt(' _cffi_call_python_org = '
'(void(*)(struct _cffi_externpy_s *, char *))p[1];')
prnt(' }')
prnt(' p[0] = (const void *)0x%x;' % self._version)
prnt(' p[1] = &_cffi_type_context;')
prnt('#if PY_MAJOR_VERSION >= 3')
prnt(' return NULL;')
prnt('#endif')
prnt('}')
# on Windows, distutils insists on putting init_cffi_xyz in
# 'export_symbols', so instead of fighting it, just give up and
# give it one
prnt('# ifdef _MSC_VER')
prnt(' PyMODINIT_FUNC')
prnt('# if PY_MAJOR_VERSION >= 3')
prnt(' PyInit_%s(void) { return NULL; }' % (base_module_name,))
prnt('# else')
prnt(' init%s(void) { }' % (base_module_name,))
prnt('# endif')
prnt('# endif')
prnt('#elif PY_MAJOR_VERSION >= 3')
prnt('PyMODINIT_FUNC')
prnt('PyInit_%s(void)' % (base_module_name,))
prnt('{')
prnt(' return _cffi_init("%s", 0x%x, &_cffi_type_context);' % (
self.module_name, self._version))
prnt('}')
prnt('#else')
prnt('PyMODINIT_FUNC')
prnt('init%s(void)' % (base_module_name,))
prnt('{')
prnt(' _cffi_init("%s", 0x%x, &_cffi_type_context);' % (
self.module_name, self._version))
prnt('}')
prnt('#endif')
prnt()
prnt('#ifdef __GNUC__')
prnt('# pragma GCC visibility pop')
prnt('#endif')
self._version = None
def _to_py(self, x):
if isinstance(x, str):
return "b'%s'" % (x,)
if isinstance(x, (list, tuple)):
rep = [self._to_py(item) for item in x]
if len(rep) == 1:
rep.append('')
return "(%s)" % (','.join(rep),)
return x.as_python_expr() # Py2: unicode unexpected; Py3: bytes unexp.
def write_py_source_to_f(self, f):
self._f = f
prnt = self._prnt
#
# header
prnt("# auto-generated file")
prnt("import _cffi_backend")
#
# the 'import' of the included ffis
num_includes = len(self.ffi._included_ffis or ())
for i in range(num_includes):
ffi_to_include = self.ffi._included_ffis[i]
try:
included_module_name, included_source = (
ffi_to_include._assigned_source[:2])
except AttributeError:
raise VerificationError(
"ffi object %r includes %r, but the latter has not "
"been prepared with set_source()" % (
self.ffi, ffi_to_include,))
if included_source is not None:
raise VerificationError(
"not implemented yet: ffi.include() of a C-based "
"ffi inside a Python-based ffi")
prnt('from %s import ffi as _ffi%d' % (included_module_name, i))
prnt()
prnt("ffi = _cffi_backend.FFI('%s'," % (self.module_name,))
prnt(" _version = 0x%x," % (self._version,))
self._version = None
#
# the '_types' keyword argument
self.cffi_types = tuple(self.cffi_types) # don't change any more
types_lst = [op.as_python_bytes() for op in self.cffi_types]
prnt(' _types = %s,' % (self._to_py(''.join(types_lst)),))
typeindex2type = dict([(i, tp) for (tp, i) in self._typesdict.items()])
#
# the keyword arguments from ALL_STEPS
for step_name in self.ALL_STEPS:
lst = self._lsts[step_name]
if len(lst) > 0 and step_name != "field":
prnt(' _%ss = %s,' % (step_name, self._to_py(lst)))
#
# the '_includes' keyword argument
if num_includes > 0:
prnt(' _includes = (%s,),' % (
', '.join(['_ffi%d' % i for i in range(num_includes)]),))
#
# the footer
prnt(')')
# ----------
def _gettypenum(self, type):
# a KeyError here is a bug. please report it! :-)
return self._typesdict[type]
def _convert_funcarg_to_c(self, tp, fromvar, tovar, errcode):
extraarg = ''
if isinstance(tp, model.BasePrimitiveType) and not tp.is_complex_type():
if tp.is_integer_type() and tp.name != '_Bool':
converter = '_cffi_to_c_int'
extraarg = ', %s' % tp.name
elif isinstance(tp, model.UnknownFloatType):
# don't check with is_float_type(): it may be a 'long
# double' here, and _cffi_to_c_double would loose precision
converter = '(%s)_cffi_to_c_double' % (tp.get_c_name(''),)
else:
cname = tp.get_c_name('')
converter = '(%s)_cffi_to_c_%s' % (cname,
tp.name.replace(' ', '_'))
if cname in ('char16_t', 'char32_t'):
self.needs_version(VERSION_CHAR16CHAR32)
errvalue = '-1'
#
elif isinstance(tp, model.PointerType):
self._convert_funcarg_to_c_ptr_or_array(tp, fromvar,
tovar, errcode)
return
#
elif (isinstance(tp, model.StructOrUnionOrEnum) or
isinstance(tp, model.BasePrimitiveType)):
# a struct (not a struct pointer) as a function argument;
# or, a complex (the same code works)
self._prnt(' if (_cffi_to_c((char *)&%s, _cffi_type(%d), %s) < 0)'
% (tovar, self._gettypenum(tp), fromvar))
self._prnt(' %s;' % errcode)
return
#
elif isinstance(tp, model.FunctionPtrType):
converter = '(%s)_cffi_to_c_pointer' % tp.get_c_name('')
extraarg = ', _cffi_type(%d)' % self._gettypenum(tp)
errvalue = 'NULL'
#
else:
raise NotImplementedError(tp)
#
self._prnt(' %s = %s(%s%s);' % (tovar, converter, fromvar, extraarg))
self._prnt(' if (%s == (%s)%s && PyErr_Occurred())' % (
tovar, tp.get_c_name(''), errvalue))
self._prnt(' %s;' % errcode)
def _extra_local_variables(self, tp, localvars, freelines):
if isinstance(tp, model.PointerType):
localvars.add('Py_ssize_t datasize')
localvars.add('struct _cffi_freeme_s *large_args_free = NULL')
freelines.add('if (large_args_free != NULL)'
' _cffi_free_array_arguments(large_args_free);')
def _convert_funcarg_to_c_ptr_or_array(self, tp, fromvar, tovar, errcode):
self._prnt(' datasize = _cffi_prepare_pointer_call_argument(')
self._prnt(' _cffi_type(%d), %s, (char **)&%s);' % (
self._gettypenum(tp), fromvar, tovar))
self._prnt(' if (datasize != 0) {')
self._prnt(' %s = ((size_t)datasize) <= 640 ? '
'(%s)alloca((size_t)datasize) : NULL;' % (
tovar, tp.get_c_name('')))
self._prnt(' if (_cffi_convert_array_argument(_cffi_type(%d), %s, '
'(char **)&%s,' % (self._gettypenum(tp), fromvar, tovar))
self._prnt(' datasize, &large_args_free) < 0)')
self._prnt(' %s;' % errcode)
self._prnt(' }')
def _convert_expr_from_c(self, tp, var, context):
if isinstance(tp, model.BasePrimitiveType):
if tp.is_integer_type() and tp.name != '_Bool':
return '_cffi_from_c_int(%s, %s)' % (var, tp.name)
elif isinstance(tp, model.UnknownFloatType):
return '_cffi_from_c_double(%s)' % (var,)
elif tp.name != 'long double' and not tp.is_complex_type():
cname = tp.name.replace(' ', '_')
if cname in ('char16_t', 'char32_t'):
self.needs_version(VERSION_CHAR16CHAR32)
return '_cffi_from_c_%s(%s)' % (cname, var)
else:
return '_cffi_from_c_deref((char *)&%s, _cffi_type(%d))' % (
var, self._gettypenum(tp))
elif isinstance(tp, (model.PointerType, model.FunctionPtrType)):
return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % (
var, self._gettypenum(tp))
elif isinstance(tp, model.ArrayType):
return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % (
var, self._gettypenum(model.PointerType(tp.item)))
elif isinstance(tp, model.StructOrUnion):
if tp.fldnames is None:
raise TypeError("'%s' is used as %s, but is opaque" % (
tp._get_c_name(), context))
return '_cffi_from_c_struct((char *)&%s, _cffi_type(%d))' % (
var, self._gettypenum(tp))
elif isinstance(tp, model.EnumType):
return '_cffi_from_c_deref((char *)&%s, _cffi_type(%d))' % (
var, self._gettypenum(tp))
else:
raise NotImplementedError(tp)
# ----------
# typedefs
def _typedef_type(self, tp, name):
return self._global_type(tp, "(*(%s *)0)" % (name,))
def _generate_cpy_typedef_collecttype(self, tp, name):
self._do_collect_type(self._typedef_type(tp, name))
def _generate_cpy_typedef_decl(self, tp, name):
pass
def _typedef_ctx(self, tp, name):
type_index = self._typesdict[tp]
self._lsts["typename"].append(TypenameExpr(name, type_index))
def _generate_cpy_typedef_ctx(self, tp, name):
tp = self._typedef_type(tp, name)
self._typedef_ctx(tp, name)
if getattr(tp, "origin", None) == "unknown_type":
self._struct_ctx(tp, tp.name, approxname=None)
elif isinstance(tp, model.NamedPointerType):
self._struct_ctx(tp.totype, tp.totype.name, approxname=tp.name,
named_ptr=tp)
# ----------
# function declarations
def _generate_cpy_function_collecttype(self, tp, name):
self._do_collect_type(tp.as_raw_function())
if tp.ellipsis and not self.target_is_python:
self._do_collect_type(tp)
def _generate_cpy_function_decl(self, tp, name):
assert not self.target_is_python
assert isinstance(tp, model.FunctionPtrType)
if tp.ellipsis:
# cannot support vararg functions better than this: check for its
# exact type (including the fixed arguments), and build it as a
# constant function pointer (no CPython wrapper)
self._generate_cpy_constant_decl(tp, name)
return
prnt = self._prnt
numargs = len(tp.args)
if numargs == 0:
argname = 'noarg'
elif numargs == 1:
argname = 'arg0'
else:
argname = 'args'
#
# ------------------------------
# the 'd' version of the function, only for addressof(lib, 'func')
arguments = []
call_arguments = []
context = 'argument of %s' % name
for i, type in enumerate(tp.args):
arguments.append(type.get_c_name(' x%d' % i, context))
call_arguments.append('x%d' % i)
repr_arguments = ', '.join(arguments)
repr_arguments = repr_arguments or 'void'
if tp.abi:
abi = tp.abi + ' '
else:
abi = ''
name_and_arguments = '%s_cffi_d_%s(%s)' % (abi, name, repr_arguments)
prnt('static %s' % (tp.result.get_c_name(name_and_arguments),))
prnt('{')
call_arguments = ', '.join(call_arguments)
result_code = 'return '
if isinstance(tp.result, model.VoidType):
result_code = ''
prnt(' %s%s(%s);' % (result_code, name, call_arguments))
prnt('}')
#
prnt('#ifndef PYPY_VERSION') # ------------------------------
#
prnt('static PyObject *')
prnt('_cffi_f_%s(PyObject *self, PyObject *%s)' % (name, argname))
prnt('{')
#
context = 'argument of %s' % name
for i, type in enumerate(tp.args):
arg = type.get_c_name(' x%d' % i, context)
prnt(' %s;' % arg)
#
localvars = set()
freelines = set()
for type in tp.args:
self._extra_local_variables(type, localvars, freelines)
for decl in sorted(localvars):
prnt(' %s;' % (decl,))
#
if not isinstance(tp.result, model.VoidType):
result_code = 'result = '
context = 'result of %s' % name
result_decl = ' %s;' % tp.result.get_c_name(' result', context)
prnt(result_decl)
prnt(' PyObject *pyresult;')
else:
result_decl = None
result_code = ''
#
if len(tp.args) > 1:
rng = range(len(tp.args))
for i in rng:
prnt(' PyObject *arg%d;' % i)
prnt()
prnt(' if (!PyArg_UnpackTuple(args, "%s", %d, %d, %s))' % (
name, len(rng), len(rng),
', '.join(['&arg%d' % i for i in rng])))
prnt(' return NULL;')
prnt()
#
for i, type in enumerate(tp.args):
self._convert_funcarg_to_c(type, 'arg%d' % i, 'x%d' % i,
'return NULL')
prnt()
#
prnt(' Py_BEGIN_ALLOW_THREADS')
prnt(' _cffi_restore_errno();')
call_arguments = ['x%d' % i for i in range(len(tp.args))]
call_arguments = ', '.join(call_arguments)
prnt(' { %s%s(%s); }' % (result_code, name, call_arguments))
prnt(' _cffi_save_errno();')
prnt(' Py_END_ALLOW_THREADS')
prnt()
#
prnt(' (void)self; /* unused */')
if numargs == 0:
prnt(' (void)noarg; /* unused */')
if result_code:
prnt(' pyresult = %s;' %
self._convert_expr_from_c(tp.result, 'result', 'result type'))
for freeline in freelines:
prnt(' ' + freeline)
prnt(' return pyresult;')
else:
for freeline in freelines:
prnt(' ' + freeline)
prnt(' Py_INCREF(Py_None);')
prnt(' return Py_None;')
prnt('}')
#
prnt('#else') # ------------------------------
#
# the PyPy version: need to replace struct/union arguments with
# pointers, and if the result is a struct/union, insert a first
# arg that is a pointer to the result. We also do that for
# complex args and return type.
def need_indirection(type):
return (isinstance(type, model.StructOrUnion) or
(isinstance(type, model.PrimitiveType) and
type.is_complex_type()))
difference = False
arguments = []
call_arguments = []
context = 'argument of %s' % name
for i, type in enumerate(tp.args):
indirection = ''
if need_indirection(type):
indirection = '*'
difference = True
arg = type.get_c_name(' %sx%d' % (indirection, i), context)
arguments.append(arg)
call_arguments.append('%sx%d' % (indirection, i))
tp_result = tp.result
if need_indirection(tp_result):
context = 'result of %s' % name
arg = tp_result.get_c_name(' *result', context)
arguments.insert(0, arg)
tp_result = model.void_type
result_decl = None
result_code = '*result = '
difference = True
if difference:
repr_arguments = ', '.join(arguments)
repr_arguments = repr_arguments or 'void'
name_and_arguments = '%s_cffi_f_%s(%s)' % (abi, name,
repr_arguments)
prnt('static %s' % (tp_result.get_c_name(name_and_arguments),))
prnt('{')
if result_decl:
prnt(result_decl)
call_arguments = ', '.join(call_arguments)
prnt(' { %s%s(%s); }' % (result_code, name, call_arguments))
if result_decl:
prnt(' return result;')
prnt('}')
else:
prnt('# define _cffi_f_%s _cffi_d_%s' % (name, name))
#
prnt('#endif') # ------------------------------
prnt()
def _generate_cpy_function_ctx(self, tp, name):
if tp.ellipsis and not self.target_is_python:
self._generate_cpy_constant_ctx(tp, name)
return
type_index = self._typesdict[tp.as_raw_function()]
numargs = len(tp.args)
if self.target_is_python:
meth_kind = OP_DLOPEN_FUNC
elif numargs == 0:
meth_kind = OP_CPYTHON_BLTN_N # 'METH_NOARGS'
elif numargs == 1:
meth_kind = OP_CPYTHON_BLTN_O # 'METH_O'
else:
meth_kind = OP_CPYTHON_BLTN_V # 'METH_VARARGS'
self._lsts["global"].append(
GlobalExpr(name, '_cffi_f_%s' % name,
CffiOp(meth_kind, type_index),
size='_cffi_d_%s' % name))
# ----------
# named structs or unions
def _field_type(self, tp_struct, field_name, tp_field):
if isinstance(tp_field, model.ArrayType):
actual_length = tp_field.length
if actual_length == '...':
ptr_struct_name = tp_struct.get_c_name('*')
actual_length = '_cffi_array_len(((%s)0)->%s)' % (
ptr_struct_name, field_name)
tp_item = self._field_type(tp_struct, '%s[0]' % field_name,
tp_field.item)
tp_field = model.ArrayType(tp_item, actual_length)
return tp_field
def _struct_collecttype(self, tp):
self._do_collect_type(tp)
if self.target_is_python:
# also requires nested anon struct/unions in ABI mode, recursively
for fldtype in tp.anonymous_struct_fields():
self._struct_collecttype(fldtype)
def _struct_decl(self, tp, cname, approxname):
if tp.fldtypes is None:
return
prnt = self._prnt
checkfuncname = '_cffi_checkfld_%s' % (approxname,)
prnt('_CFFI_UNUSED_FN')
prnt('static void %s(%s *p)' % (checkfuncname, cname))
prnt('{')
prnt(' /* only to generate compile-time warnings or errors */')
prnt(' (void)p;')
for fname, ftype, fbitsize, fqual in self._enum_fields(tp):
try:
if ftype.is_integer_type() or fbitsize >= 0:
# accept all integers, but complain on float or double
if fname != '':
prnt(" (void)((p->%s) | 0); /* check that '%s.%s' is "
"an integer */" % (fname, cname, fname))
continue
# only accept exactly the type declared, except that '[]'
# is interpreted as a '*' and so will match any array length.
# (It would also match '*', but that's harder to detect...)
while (isinstance(ftype, model.ArrayType)
and (ftype.length is None or ftype.length == '...')):
ftype = ftype.item
fname = fname + '[0]'
prnt(' { %s = &p->%s; (void)tmp; }' % (
ftype.get_c_name('*tmp', 'field %r'%fname, quals=fqual),
fname))
except VerificationError as e:
prnt(' /* %s */' % str(e)) # cannot verify it, ignore
prnt('}')
prnt('struct _cffi_align_%s { char x; %s y; };' % (approxname, cname))
prnt()
def _struct_ctx(self, tp, cname, approxname, named_ptr=None):
type_index = self._typesdict[tp]
reason_for_not_expanding = None
flags = []
if isinstance(tp, model.UnionType):
flags.append("_CFFI_F_UNION")
if tp.fldtypes is None:
flags.append("_CFFI_F_OPAQUE")
reason_for_not_expanding = "opaque"
if (tp not in self.ffi._parser._included_declarations and
(named_ptr is None or
named_ptr not in self.ffi._parser._included_declarations)):
if tp.fldtypes is None:
pass # opaque
elif tp.partial or any(tp.anonymous_struct_fields()):
pass # field layout obtained silently from the C compiler
else:
flags.append("_CFFI_F_CHECK_FIELDS")
if tp.packed:
if tp.packed > 1:
raise NotImplementedError(
"%r is declared with 'pack=%r'; only 0 or 1 are "
"supported in API mode (try to use \"...;\", which "
"does not require a 'pack' declaration)" %
(tp, tp.packed))
flags.append("_CFFI_F_PACKED")
else:
flags.append("_CFFI_F_EXTERNAL")
reason_for_not_expanding = "external"
flags = '|'.join(flags) or '0'
c_fields = []
if reason_for_not_expanding is None:
enumfields = list(self._enum_fields(tp))
for fldname, fldtype, fbitsize, fqual in enumfields:
fldtype = self._field_type(tp, fldname, fldtype)
self._check_not_opaque(fldtype,
"field '%s.%s'" % (tp.name, fldname))
# cname is None for _add_missing_struct_unions() only
op = OP_NOOP
if fbitsize >= 0:
op = OP_BITFIELD
size = '%d /* bits */' % fbitsize
elif cname is None or (
isinstance(fldtype, model.ArrayType) and
fldtype.length is None):
size = '(size_t)-1'
else:
size = 'sizeof(((%s)0)->%s)' % (
tp.get_c_name('*') if named_ptr is None
else named_ptr.name,
fldname)
if cname is None or fbitsize >= 0:
offset = '(size_t)-1'
elif named_ptr is not None:
offset = '((char *)&((%s)0)->%s) - (char *)0' % (
named_ptr.name, fldname)
else:
offset = 'offsetof(%s, %s)' % (tp.get_c_name(''), fldname)
c_fields.append(
FieldExpr(fldname, offset, size, fbitsize,
CffiOp(op, self._typesdict[fldtype])))
first_field_index = len(self._lsts["field"])
self._lsts["field"].extend(c_fields)
#
if cname is None: # unknown name, for _add_missing_struct_unions
size = '(size_t)-2'
align = -2
comment = "unnamed"
else:
if named_ptr is not None:
size = 'sizeof(*(%s)0)' % (named_ptr.name,)
align = '-1 /* unknown alignment */'
else:
size = 'sizeof(%s)' % (cname,)
align = 'offsetof(struct _cffi_align_%s, y)' % (approxname,)
comment = None
else:
size = '(size_t)-1'
align = -1
first_field_index = -1
comment = reason_for_not_expanding
self._lsts["struct_union"].append(
StructUnionExpr(tp.name, type_index, flags, size, align, comment,
first_field_index, c_fields))
self._seen_struct_unions.add(tp)
def _check_not_opaque(self, tp, location):
while isinstance(tp, model.ArrayType):
tp = tp.item
if isinstance(tp, model.StructOrUnion) and tp.fldtypes is None:
raise TypeError(
"%s is of an opaque type (not declared in cdef())" % location)
def _add_missing_struct_unions(self):
# not very nice, but some struct declarations might be missing
# because they don't have any known C name. Check that they are
# not partial (we can't complete or verify them!) and emit them
# anonymously.
lst = list(self._struct_unions.items())
lst.sort(key=lambda tp_order: tp_order[1])
for tp, order in lst:
if tp not in self._seen_struct_unions:
if tp.partial:
raise NotImplementedError("internal inconsistency: %r is "
"partial but was not seen at "
"this point" % (tp,))
if tp.name.startswith('$') and tp.name[1:].isdigit():
approxname = tp.name[1:]
elif tp.name == '_IO_FILE' and tp.forcename == 'FILE':
approxname = 'FILE'
self._typedef_ctx(tp, 'FILE')
else:
raise NotImplementedError("internal inconsistency: %r" %
(tp,))
self._struct_ctx(tp, None, approxname)
def _generate_cpy_struct_collecttype(self, tp, name):
self._struct_collecttype(tp)
_generate_cpy_union_collecttype = _generate_cpy_struct_collecttype
def _struct_names(self, tp):
cname = tp.get_c_name('')
if ' ' in cname:
return cname, cname.replace(' ', '_')
else:
return cname, '_' + cname
def _generate_cpy_struct_decl(self, tp, name):
self._struct_decl(tp, *self._struct_names(tp))
_generate_cpy_union_decl = _generate_cpy_struct_decl
def _generate_cpy_struct_ctx(self, tp, name):
self._struct_ctx(tp, *self._struct_names(tp))
_generate_cpy_union_ctx = _generate_cpy_struct_ctx
# ----------
# 'anonymous' declarations. These are produced for anonymous structs
# or unions; the 'name' is obtained by a typedef.
def _generate_cpy_anonymous_collecttype(self, tp, name):
if isinstance(tp, model.EnumType):
self._generate_cpy_enum_collecttype(tp, name)
else:
self._struct_collecttype(tp)
def _generate_cpy_anonymous_decl(self, tp, name):
if isinstance(tp, model.EnumType):
self._generate_cpy_enum_decl(tp)
else:
self._struct_decl(tp, name, 'typedef_' + name)
def _generate_cpy_anonymous_ctx(self, tp, name):
if isinstance(tp, model.EnumType):
self._enum_ctx(tp, name)
else:
self._struct_ctx(tp, name, 'typedef_' + name)
# ----------
# constants, declared with "static const ..."
def _generate_cpy_const(self, is_int, name, tp=None, category='const',
check_value=None):
if (category, name) in self._seen_constants:
raise VerificationError(
"duplicate declaration of %s '%s'" % (category, name))
self._seen_constants.add((category, name))
#
prnt = self._prnt
funcname = '_cffi_%s_%s' % (category, name)
if is_int:
prnt('static int %s(unsigned long long *o)' % funcname)
prnt('{')
prnt(' int n = (%s) <= 0;' % (name,))
prnt(' *o = (unsigned long long)((%s) | 0);'
' /* check that %s is an integer */' % (name, name))
if check_value is not None:
if check_value > 0:
check_value = '%dU' % (check_value,)
prnt(' if (!_cffi_check_int(*o, n, %s))' % (check_value,))
prnt(' n |= 2;')
prnt(' return n;')
prnt('}')
else:
assert check_value is None
prnt('static void %s(char *o)' % funcname)
prnt('{')
prnt(' *(%s)o = %s;' % (tp.get_c_name('*'), name))
prnt('}')
prnt()
def _generate_cpy_constant_collecttype(self, tp, name):
is_int = tp.is_integer_type()
if not is_int or self.target_is_python:
self._do_collect_type(tp)
def _generate_cpy_constant_decl(self, tp, name):
is_int = tp.is_integer_type()
self._generate_cpy_const(is_int, name, tp)
def _generate_cpy_constant_ctx(self, tp, name):
if not self.target_is_python and tp.is_integer_type():
type_op = CffiOp(OP_CONSTANT_INT, -1)
else:
if self.target_is_python:
const_kind = OP_DLOPEN_CONST
else:
const_kind = OP_CONSTANT
type_index = self._typesdict[tp]
type_op = CffiOp(const_kind, type_index)
self._lsts["global"].append(
GlobalExpr(name, '_cffi_const_%s' % name, type_op))
# ----------
# enums
def _generate_cpy_enum_collecttype(self, tp, name):
self._do_collect_type(tp)
def _generate_cpy_enum_decl(self, tp, name=None):
for enumerator in tp.enumerators:
self._generate_cpy_const(True, enumerator)
def _enum_ctx(self, tp, cname):
type_index = self._typesdict[tp]
type_op = CffiOp(OP_ENUM, -1)
if self.target_is_python:
tp.check_not_partial()
for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues):
self._lsts["global"].append(
GlobalExpr(enumerator, '_cffi_const_%s' % enumerator, type_op,
check_value=enumvalue))
#
if cname is not None and '$' not in cname and not self.target_is_python:
size = "sizeof(%s)" % cname
signed = "((%s)-1) <= 0" % cname
else:
basetp = tp.build_baseinttype(self.ffi, [])
size = self.ffi.sizeof(basetp)
signed = int(int(self.ffi.cast(basetp, -1)) < 0)
allenums = ",".join(tp.enumerators)
self._lsts["enum"].append(
EnumExpr(tp.name, type_index, size, signed, allenums))
def _generate_cpy_enum_ctx(self, tp, name):
self._enum_ctx(tp, tp._get_c_name())
# ----------
# macros: for now only for integers
def _generate_cpy_macro_collecttype(self, tp, name):
pass
def _generate_cpy_macro_decl(self, tp, name):
if tp == '...':
check_value = None
else:
check_value = tp # an integer
self._generate_cpy_const(True, name, check_value=check_value)
def _generate_cpy_macro_ctx(self, tp, name):
if tp == '...':
if self.target_is_python:
raise VerificationError(
"cannot use the syntax '...' in '#define %s ...' when "
"using the ABI mode" % (name,))
check_value = None
else:
check_value = tp # an integer
type_op = CffiOp(OP_CONSTANT_INT, -1)
self._lsts["global"].append(
GlobalExpr(name, '_cffi_const_%s' % name, type_op,
check_value=check_value))
# ----------
# global variables
def _global_type(self, tp, global_name):
if isinstance(tp, model.ArrayType):
actual_length = tp.length
if actual_length == '...':
actual_length = '_cffi_array_len(%s)' % (global_name,)
tp_item = self._global_type(tp.item, '%s[0]' % global_name)
tp = model.ArrayType(tp_item, actual_length)
return tp
def _generate_cpy_variable_collecttype(self, tp, name):
self._do_collect_type(self._global_type(tp, name))
def _generate_cpy_variable_decl(self, tp, name):
prnt = self._prnt
tp = self._global_type(tp, name)
if isinstance(tp, model.ArrayType) and tp.length is None:
tp = tp.item
ampersand = ''
else:
ampersand = '&'
# This code assumes that casts from "tp *" to "void *" is a
# no-op, i.e. a function that returns a "tp *" can be called
# as if it returned a "void *". This should be generally true
# on any modern machine. The only exception to that rule (on
# uncommon architectures, and as far as I can tell) might be
# if 'tp' were a function type, but that is not possible here.
# (If 'tp' is a function _pointer_ type, then casts from "fn_t
# **" to "void *" are again no-ops, as far as I can tell.)
decl = '*_cffi_var_%s(void)' % (name,)
prnt('static ' + tp.get_c_name(decl, quals=self._current_quals))
prnt('{')
prnt(' return %s(%s);' % (ampersand, name))
prnt('}')
prnt()
def _generate_cpy_variable_ctx(self, tp, name):
tp = self._global_type(tp, name)
type_index = self._typesdict[tp]
if self.target_is_python:
op = OP_GLOBAL_VAR
else:
op = OP_GLOBAL_VAR_F
self._lsts["global"].append(
GlobalExpr(name, '_cffi_var_%s' % name, CffiOp(op, type_index)))
# ----------
# extern "Python"
def _generate_cpy_extern_python_collecttype(self, tp, name):
assert isinstance(tp, model.FunctionPtrType)
self._do_collect_type(tp)
_generate_cpy_dllexport_python_collecttype = \
_generate_cpy_extern_python_plus_c_collecttype = \
_generate_cpy_extern_python_collecttype
def _extern_python_decl(self, tp, name, tag_and_space):
prnt = self._prnt
if isinstance(tp.result, model.VoidType):
size_of_result = '0'
else:
context = 'result of %s' % name
size_of_result = '(int)sizeof(%s)' % (
tp.result.get_c_name('', context),)
prnt('static struct _cffi_externpy_s _cffi_externpy__%s =' % name)
prnt(' { "%s.%s", %s, 0, 0 };' % (
self.module_name, name, size_of_result))
prnt()
#
arguments = []
context = 'argument of %s' % name
for i, type in enumerate(tp.args):
arg = type.get_c_name(' a%d' % i, context)
arguments.append(arg)
#
repr_arguments = ', '.join(arguments)
repr_arguments = repr_arguments or 'void'
name_and_arguments = '%s(%s)' % (name, repr_arguments)
if tp.abi == "__stdcall":
name_and_arguments = '_cffi_stdcall ' + name_and_arguments
#
def may_need_128_bits(tp):
return (isinstance(tp, model.PrimitiveType) and
tp.name == 'long double')
#
size_of_a = max(len(tp.args)*8, 8)
if may_need_128_bits(tp.result):
size_of_a = max(size_of_a, 16)
if isinstance(tp.result, model.StructOrUnion):
size_of_a = 'sizeof(%s) > %d ? sizeof(%s) : %d' % (
tp.result.get_c_name(''), size_of_a,
tp.result.get_c_name(''), size_of_a)
prnt('%s%s' % (tag_and_space, tp.result.get_c_name(name_and_arguments)))
prnt('{')
prnt(' char a[%s];' % size_of_a)
prnt(' char *p = a;')
for i, type in enumerate(tp.args):
arg = 'a%d' % i
if (isinstance(type, model.StructOrUnion) or
may_need_128_bits(type)):
arg = '&' + arg
type = model.PointerType(type)
prnt(' *(%s)(p + %d) = %s;' % (type.get_c_name('*'), i*8, arg))
prnt(' _cffi_call_python(&_cffi_externpy__%s, p);' % name)
if not isinstance(tp.result, model.VoidType):
prnt(' return *(%s)p;' % (tp.result.get_c_name('*'),))
prnt('}')
prnt()
self._num_externpy += 1
def _generate_cpy_extern_python_decl(self, tp, name):
self._extern_python_decl(tp, name, 'static ')
def _generate_cpy_dllexport_python_decl(self, tp, name):
self._extern_python_decl(tp, name, 'CFFI_DLLEXPORT ')
def _generate_cpy_extern_python_plus_c_decl(self, tp, name):
self._extern_python_decl(tp, name, '')
def _generate_cpy_extern_python_ctx(self, tp, name):
if self.target_is_python:
raise VerificationError(
"cannot use 'extern \"Python\"' in the ABI mode")
if tp.ellipsis:
raise NotImplementedError("a vararg function is extern \"Python\"")
type_index = self._typesdict[tp]
type_op = CffiOp(OP_EXTERN_PYTHON, type_index)
self._lsts["global"].append(
GlobalExpr(name, '&_cffi_externpy__%s' % name, type_op, name))
_generate_cpy_dllexport_python_ctx = \
_generate_cpy_extern_python_plus_c_ctx = \
_generate_cpy_extern_python_ctx
def _print_string_literal_in_array(self, s):
prnt = self._prnt
prnt('// # NB. this is not a string because of a size limit in MSVC')
if not isinstance(s, bytes): # unicode
s = s.encode('utf-8') # -> bytes
else:
s.decode('utf-8') # got bytes, check for valid utf-8
try:
s.decode('ascii')
except UnicodeDecodeError:
s = b'# -*- encoding: utf8 -*-\n' + s
for line in s.splitlines(True):
comment = line
if type('//') is bytes: # python2
line = map(ord, line) # make a list of integers
else: # python3
# type(line) is bytes, which enumerates like a list of integers
comment = ascii(comment)[1:-1]
prnt(('// ' + comment).rstrip())
printed_line = ''
for c in line:
if len(printed_line) >= 76:
prnt(printed_line)
printed_line = ''
printed_line += '%d,' % (c,)
prnt(printed_line)
# ----------
# emitting the opcodes for individual types
def _emit_bytecode_VoidType(self, tp, index):
self.cffi_types[index] = CffiOp(OP_PRIMITIVE, PRIM_VOID)
def _emit_bytecode_PrimitiveType(self, tp, index):
prim_index = PRIMITIVE_TO_INDEX[tp.name]
self.cffi_types[index] = CffiOp(OP_PRIMITIVE, prim_index)
def _emit_bytecode_UnknownIntegerType(self, tp, index):
s = ('_cffi_prim_int(sizeof(%s), (\n'
' ((%s)-1) | 0 /* check that %s is an integer type */\n'
' ) <= 0)' % (tp.name, tp.name, tp.name))
self.cffi_types[index] = CffiOp(OP_PRIMITIVE, s)
def _emit_bytecode_UnknownFloatType(self, tp, index):
s = ('_cffi_prim_float(sizeof(%s) *\n'
' (((%s)1) / 2) * 2 /* integer => 0, float => 1 */\n'
' )' % (tp.name, tp.name))
self.cffi_types[index] = CffiOp(OP_PRIMITIVE, s)
def _emit_bytecode_RawFunctionType(self, tp, index):
self.cffi_types[index] = CffiOp(OP_FUNCTION, self._typesdict[tp.result])
index += 1
for tp1 in tp.args:
realindex = self._typesdict[tp1]
if index != realindex:
if isinstance(tp1, model.PrimitiveType):
self._emit_bytecode_PrimitiveType(tp1, index)
else:
self.cffi_types[index] = CffiOp(OP_NOOP, realindex)
index += 1
flags = int(tp.ellipsis)
if tp.abi is not None:
if tp.abi == '__stdcall':
flags |= 2
else:
raise NotImplementedError("abi=%r" % (tp.abi,))
self.cffi_types[index] = CffiOp(OP_FUNCTION_END, flags)
def _emit_bytecode_PointerType(self, tp, index):
self.cffi_types[index] = CffiOp(OP_POINTER, self._typesdict[tp.totype])
_emit_bytecode_ConstPointerType = _emit_bytecode_PointerType
_emit_bytecode_NamedPointerType = _emit_bytecode_PointerType
def _emit_bytecode_FunctionPtrType(self, tp, index):
raw = tp.as_raw_function()
self.cffi_types[index] = CffiOp(OP_POINTER, self._typesdict[raw])
def _emit_bytecode_ArrayType(self, tp, index):
item_index = self._typesdict[tp.item]
if tp.length is None:
self.cffi_types[index] = CffiOp(OP_OPEN_ARRAY, item_index)
elif tp.length == '...':
raise VerificationError(
"type %s badly placed: the '...' array length can only be "
"used on global arrays or on fields of structures" % (
str(tp).replace('/*...*/', '...'),))
else:
assert self.cffi_types[index + 1] == 'LEN'
self.cffi_types[index] = CffiOp(OP_ARRAY, item_index)
self.cffi_types[index + 1] = CffiOp(None, str(tp.length))
def _emit_bytecode_StructType(self, tp, index):
struct_index = self._struct_unions[tp]
self.cffi_types[index] = CffiOp(OP_STRUCT_UNION, struct_index)
_emit_bytecode_UnionType = _emit_bytecode_StructType
def _emit_bytecode_EnumType(self, tp, index):
enum_index = self._enums[tp]
self.cffi_types[index] = CffiOp(OP_ENUM, enum_index)
if sys.version_info >= (3,):
NativeIO = io.StringIO
else:
class NativeIO(io.BytesIO):
def write(self, s):
if isinstance(s, unicode):
s = s.encode('ascii')
super(NativeIO, self).write(s)
def _make_c_or_py_source(ffi, module_name, preamble, target_file, verbose):
if verbose:
print("generating %s" % (target_file,))
recompiler = Recompiler(ffi, module_name,
target_is_python=(preamble is None))
recompiler.collect_type_table()
recompiler.collect_step_tables()
f = NativeIO()
recompiler.write_source_to_f(f, preamble)
output = f.getvalue()
try:
with open(target_file, 'r') as f1:
if f1.read(len(output) + 1) != output:
raise IOError
if verbose:
print("(already up-to-date)")
return False # already up-to-date
except IOError:
tmp_file = '%s.~%d' % (target_file, os.getpid())
with open(tmp_file, 'w') as f1:
f1.write(output)
try:
os.rename(tmp_file, target_file)
except OSError:
os.unlink(target_file)
os.rename(tmp_file, target_file)
return True
def make_c_source(ffi, module_name, preamble, target_c_file, verbose=False):
assert preamble is not None
return _make_c_or_py_source(ffi, module_name, preamble, target_c_file,
verbose)
def make_py_source(ffi, module_name, target_py_file, verbose=False):
return _make_c_or_py_source(ffi, module_name, None, target_py_file,
verbose)
def _modname_to_file(outputdir, modname, extension):
parts = modname.split('.')
try:
os.makedirs(os.path.join(outputdir, *parts[:-1]))
except OSError:
pass
parts[-1] += extension
return os.path.join(outputdir, *parts), parts
# Aaargh. Distutils is not tested at all for the purpose of compiling
# DLLs that are not extension modules. Here are some hacks to work
# around that, in the _patch_for_*() functions...
def _patch_meth(patchlist, cls, name, new_meth):
old = getattr(cls, name)
patchlist.append((cls, name, old))
setattr(cls, name, new_meth)
return old
def _unpatch_meths(patchlist):
for cls, name, old_meth in reversed(patchlist):
setattr(cls, name, old_meth)
def _patch_for_embedding(patchlist):
if sys.platform == 'win32':
# we must not remove the manifest when building for embedding!
from distutils.msvc9compiler import MSVCCompiler
_patch_meth(patchlist, MSVCCompiler, '_remove_visual_c_ref',
lambda self, manifest_file: manifest_file)
if sys.platform == 'darwin':
# we must not make a '-bundle', but a '-dynamiclib' instead
from distutils.ccompiler import CCompiler
def my_link_shared_object(self, *args, **kwds):
if '-bundle' in self.linker_so:
self.linker_so = list(self.linker_so)
i = self.linker_so.index('-bundle')
self.linker_so[i] = '-dynamiclib'
return old_link_shared_object(self, *args, **kwds)
old_link_shared_object = _patch_meth(patchlist, CCompiler,
'link_shared_object',
my_link_shared_object)
def _patch_for_target(patchlist, target):
from distutils.command.build_ext import build_ext
# if 'target' is different from '*', we need to patch some internal
# method to just return this 'target' value, instead of having it
# built from module_name
if target.endswith('.*'):
target = target[:-2]
if sys.platform == 'win32':
target += '.dll'
elif sys.platform == 'darwin':
target += '.dylib'
else:
target += '.so'
_patch_meth(patchlist, build_ext, 'get_ext_filename',
lambda self, ext_name: target)
def recompile(ffi, module_name, preamble, tmpdir='.', call_c_compiler=True,
c_file=None, source_extension='.c', extradir=None,
compiler_verbose=1, target=None, debug=None, **kwds):
if not isinstance(module_name, str):
module_name = module_name.encode('ascii')
if ffi._windows_unicode:
ffi._apply_windows_unicode(kwds)
if preamble is not None:
embedding = (ffi._embedding is not None)
if embedding:
ffi._apply_embedding_fix(kwds)
if c_file is None:
c_file, parts = _modname_to_file(tmpdir, module_name,
source_extension)
if extradir:
parts = [extradir] + parts
ext_c_file = os.path.join(*parts)
else:
ext_c_file = c_file
#
if target is None:
if embedding:
target = '%s.*' % module_name
else:
target = '*'
#
ext = ffiplatform.get_extension(ext_c_file, module_name, **kwds)
updated = make_c_source(ffi, module_name, preamble, c_file,
verbose=compiler_verbose)
if call_c_compiler:
patchlist = []
cwd = os.getcwd()
try:
if embedding:
_patch_for_embedding(patchlist)
if target != '*':
_patch_for_target(patchlist, target)
if compiler_verbose:
if tmpdir == '.':
msg = 'the current directory is'
else:
msg = 'setting the current directory to'
print('%s %r' % (msg, os.path.abspath(tmpdir)))
os.chdir(tmpdir)
outputfilename = ffiplatform.compile('.', ext,
compiler_verbose, debug)
finally:
os.chdir(cwd)
_unpatch_meths(patchlist)
return outputfilename
else:
return ext, updated
else:
if c_file is None:
c_file, _ = _modname_to_file(tmpdir, module_name, '.py')
updated = make_py_source(ffi, module_name, c_file,
verbose=compiler_verbose)
if call_c_compiler:
return c_file
else:
return None, updated
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/cffi/backend_ctypes.py | import ctypes, ctypes.util, operator, sys
from . import model
if sys.version_info < (3,):
bytechr = chr
else:
unicode = str
long = int
xrange = range
bytechr = lambda num: bytes([num])
class CTypesType(type):
pass
class CTypesData(object):
__metaclass__ = CTypesType
__slots__ = ['__weakref__']
__name__ = '<cdata>'
def __init__(self, *args):
raise TypeError("cannot instantiate %r" % (self.__class__,))
@classmethod
def _newp(cls, init):
raise TypeError("expected a pointer or array ctype, got '%s'"
% (cls._get_c_name(),))
@staticmethod
def _to_ctypes(value):
raise TypeError
@classmethod
def _arg_to_ctypes(cls, *value):
try:
ctype = cls._ctype
except AttributeError:
raise TypeError("cannot create an instance of %r" % (cls,))
if value:
res = cls._to_ctypes(*value)
if not isinstance(res, ctype):
res = cls._ctype(res)
else:
res = cls._ctype()
return res
@classmethod
def _create_ctype_obj(cls, init):
if init is None:
return cls._arg_to_ctypes()
else:
return cls._arg_to_ctypes(init)
@staticmethod
def _from_ctypes(ctypes_value):
raise TypeError
@classmethod
def _get_c_name(cls, replace_with=''):
return cls._reftypename.replace(' &', replace_with)
@classmethod
def _fix_class(cls):
cls.__name__ = 'CData<%s>' % (cls._get_c_name(),)
cls.__qualname__ = 'CData<%s>' % (cls._get_c_name(),)
cls.__module__ = 'ffi'
def _get_own_repr(self):
raise NotImplementedError
def _addr_repr(self, address):
if address == 0:
return 'NULL'
else:
if address < 0:
address += 1 << (8*ctypes.sizeof(ctypes.c_void_p))
return '0x%x' % address
def __repr__(self, c_name=None):
own = self._get_own_repr()
return '<cdata %r %s>' % (c_name or self._get_c_name(), own)
def _convert_to_address(self, BClass):
if BClass is None:
raise TypeError("cannot convert %r to an address" % (
self._get_c_name(),))
else:
raise TypeError("cannot convert %r to %r" % (
self._get_c_name(), BClass._get_c_name()))
@classmethod
def _get_size(cls):
return ctypes.sizeof(cls._ctype)
def _get_size_of_instance(self):
return ctypes.sizeof(self._ctype)
@classmethod
def _cast_from(cls, source):
raise TypeError("cannot cast to %r" % (cls._get_c_name(),))
def _cast_to_integer(self):
return self._convert_to_address(None)
@classmethod
def _alignment(cls):
return ctypes.alignment(cls._ctype)
def __iter__(self):
raise TypeError("cdata %r does not support iteration" % (
self._get_c_name()),)
def _make_cmp(name):
cmpfunc = getattr(operator, name)
def cmp(self, other):
v_is_ptr = not isinstance(self, CTypesGenericPrimitive)
w_is_ptr = (isinstance(other, CTypesData) and
not isinstance(other, CTypesGenericPrimitive))
if v_is_ptr and w_is_ptr:
return cmpfunc(self._convert_to_address(None),
other._convert_to_address(None))
elif v_is_ptr or w_is_ptr:
return NotImplemented
else:
if isinstance(self, CTypesGenericPrimitive):
self = self._value
if isinstance(other, CTypesGenericPrimitive):
other = other._value
return cmpfunc(self, other)
cmp.func_name = name
return cmp
__eq__ = _make_cmp('__eq__')
__ne__ = _make_cmp('__ne__')
__lt__ = _make_cmp('__lt__')
__le__ = _make_cmp('__le__')
__gt__ = _make_cmp('__gt__')
__ge__ = _make_cmp('__ge__')
def __hash__(self):
return hash(self._convert_to_address(None))
def _to_string(self, maxlen):
raise TypeError("string(): %r" % (self,))
class CTypesGenericPrimitive(CTypesData):
__slots__ = []
def __hash__(self):
return hash(self._value)
def _get_own_repr(self):
return repr(self._from_ctypes(self._value))
class CTypesGenericArray(CTypesData):
__slots__ = []
@classmethod
def _newp(cls, init):
return cls(init)
def __iter__(self):
for i in xrange(len(self)):
yield self[i]
def _get_own_repr(self):
return self._addr_repr(ctypes.addressof(self._blob))
class CTypesGenericPtr(CTypesData):
__slots__ = ['_address', '_as_ctype_ptr']
_automatic_casts = False
kind = "pointer"
@classmethod
def _newp(cls, init):
return cls(init)
@classmethod
def _cast_from(cls, source):
if source is None:
address = 0
elif isinstance(source, CTypesData):
address = source._cast_to_integer()
elif isinstance(source, (int, long)):
address = source
else:
raise TypeError("bad type for cast to %r: %r" %
(cls, type(source).__name__))
return cls._new_pointer_at(address)
@classmethod
def _new_pointer_at(cls, address):
self = cls.__new__(cls)
self._address = address
self._as_ctype_ptr = ctypes.cast(address, cls._ctype)
return self
def _get_own_repr(self):
try:
return self._addr_repr(self._address)
except AttributeError:
return '???'
def _cast_to_integer(self):
return self._address
def __nonzero__(self):
return bool(self._address)
__bool__ = __nonzero__
@classmethod
def _to_ctypes(cls, value):
if not isinstance(value, CTypesData):
raise TypeError("unexpected %s object" % type(value).__name__)
address = value._convert_to_address(cls)
return ctypes.cast(address, cls._ctype)
@classmethod
def _from_ctypes(cls, ctypes_ptr):
address = ctypes.cast(ctypes_ptr, ctypes.c_void_p).value or 0
return cls._new_pointer_at(address)
@classmethod
def _initialize(cls, ctypes_ptr, value):
if value:
ctypes_ptr.contents = cls._to_ctypes(value).contents
def _convert_to_address(self, BClass):
if (BClass in (self.__class__, None) or BClass._automatic_casts
or self._automatic_casts):
return self._address
else:
return CTypesData._convert_to_address(self, BClass)
class CTypesBaseStructOrUnion(CTypesData):
__slots__ = ['_blob']
@classmethod
def _create_ctype_obj(cls, init):
# may be overridden
raise TypeError("cannot instantiate opaque type %s" % (cls,))
def _get_own_repr(self):
return self._addr_repr(ctypes.addressof(self._blob))
@classmethod
def _offsetof(cls, fieldname):
return getattr(cls._ctype, fieldname).offset
def _convert_to_address(self, BClass):
if getattr(BClass, '_BItem', None) is self.__class__:
return ctypes.addressof(self._blob)
else:
return CTypesData._convert_to_address(self, BClass)
@classmethod
def _from_ctypes(cls, ctypes_struct_or_union):
self = cls.__new__(cls)
self._blob = ctypes_struct_or_union
return self
@classmethod
def _to_ctypes(cls, value):
return value._blob
def __repr__(self, c_name=None):
return CTypesData.__repr__(self, c_name or self._get_c_name(' &'))
class CTypesBackend(object):
PRIMITIVE_TYPES = {
'char': ctypes.c_char,
'short': ctypes.c_short,
'int': ctypes.c_int,
'long': ctypes.c_long,
'long long': ctypes.c_longlong,
'signed char': ctypes.c_byte,
'unsigned char': ctypes.c_ubyte,
'unsigned short': ctypes.c_ushort,
'unsigned int': ctypes.c_uint,
'unsigned long': ctypes.c_ulong,
'unsigned long long': ctypes.c_ulonglong,
'float': ctypes.c_float,
'double': ctypes.c_double,
'_Bool': ctypes.c_bool,
}
for _name in ['unsigned long long', 'unsigned long',
'unsigned int', 'unsigned short', 'unsigned char']:
_size = ctypes.sizeof(PRIMITIVE_TYPES[_name])
PRIMITIVE_TYPES['uint%d_t' % (8*_size)] = PRIMITIVE_TYPES[_name]
if _size == ctypes.sizeof(ctypes.c_void_p):
PRIMITIVE_TYPES['uintptr_t'] = PRIMITIVE_TYPES[_name]
if _size == ctypes.sizeof(ctypes.c_size_t):
PRIMITIVE_TYPES['size_t'] = PRIMITIVE_TYPES[_name]
for _name in ['long long', 'long', 'int', 'short', 'signed char']:
_size = ctypes.sizeof(PRIMITIVE_TYPES[_name])
PRIMITIVE_TYPES['int%d_t' % (8*_size)] = PRIMITIVE_TYPES[_name]
if _size == ctypes.sizeof(ctypes.c_void_p):
PRIMITIVE_TYPES['intptr_t'] = PRIMITIVE_TYPES[_name]
PRIMITIVE_TYPES['ptrdiff_t'] = PRIMITIVE_TYPES[_name]
if _size == ctypes.sizeof(ctypes.c_size_t):
PRIMITIVE_TYPES['ssize_t'] = PRIMITIVE_TYPES[_name]
def __init__(self):
self.RTLD_LAZY = 0 # not supported anyway by ctypes
self.RTLD_NOW = 0
self.RTLD_GLOBAL = ctypes.RTLD_GLOBAL
self.RTLD_LOCAL = ctypes.RTLD_LOCAL
def set_ffi(self, ffi):
self.ffi = ffi
def _get_types(self):
return CTypesData, CTypesType
def load_library(self, path, flags=0):
cdll = ctypes.CDLL(path, flags)
return CTypesLibrary(self, cdll)
def new_void_type(self):
class CTypesVoid(CTypesData):
__slots__ = []
_reftypename = 'void &'
@staticmethod
def _from_ctypes(novalue):
return None
@staticmethod
def _to_ctypes(novalue):
if novalue is not None:
raise TypeError("None expected, got %s object" %
(type(novalue).__name__,))
return None
CTypesVoid._fix_class()
return CTypesVoid
def new_primitive_type(self, name):
if name == 'wchar_t':
raise NotImplementedError(name)
ctype = self.PRIMITIVE_TYPES[name]
if name == 'char':
kind = 'char'
elif name in ('float', 'double'):
kind = 'float'
else:
if name in ('signed char', 'unsigned char'):
kind = 'byte'
elif name == '_Bool':
kind = 'bool'
else:
kind = 'int'
is_signed = (ctype(-1).value == -1)
#
def _cast_source_to_int(source):
if isinstance(source, (int, long, float)):
source = int(source)
elif isinstance(source, CTypesData):
source = source._cast_to_integer()
elif isinstance(source, bytes):
source = ord(source)
elif source is None:
source = 0
else:
raise TypeError("bad type for cast to %r: %r" %
(CTypesPrimitive, type(source).__name__))
return source
#
kind1 = kind
class CTypesPrimitive(CTypesGenericPrimitive):
__slots__ = ['_value']
_ctype = ctype
_reftypename = '%s &' % name
kind = kind1
def __init__(self, value):
self._value = value
@staticmethod
def _create_ctype_obj(init):
if init is None:
return ctype()
return ctype(CTypesPrimitive._to_ctypes(init))
if kind == 'int' or kind == 'byte':
@classmethod
def _cast_from(cls, source):
source = _cast_source_to_int(source)
source = ctype(source).value # cast within range
return cls(source)
def __int__(self):
return self._value
if kind == 'bool':
@classmethod
def _cast_from(cls, source):
if not isinstance(source, (int, long, float)):
source = _cast_source_to_int(source)
return cls(bool(source))
def __int__(self):
return int(self._value)
if kind == 'char':
@classmethod
def _cast_from(cls, source):
source = _cast_source_to_int(source)
source = bytechr(source & 0xFF)
return cls(source)
def __int__(self):
return ord(self._value)
if kind == 'float':
@classmethod
def _cast_from(cls, source):
if isinstance(source, float):
pass
elif isinstance(source, CTypesGenericPrimitive):
if hasattr(source, '__float__'):
source = float(source)
else:
source = int(source)
else:
source = _cast_source_to_int(source)
source = ctype(source).value # fix precision
return cls(source)
def __int__(self):
return int(self._value)
def __float__(self):
return self._value
_cast_to_integer = __int__
if kind == 'int' or kind == 'byte' or kind == 'bool':
@staticmethod
def _to_ctypes(x):
if not isinstance(x, (int, long)):
if isinstance(x, CTypesData):
x = int(x)
else:
raise TypeError("integer expected, got %s" %
type(x).__name__)
if ctype(x).value != x:
if not is_signed and x < 0:
raise OverflowError("%s: negative integer" % name)
else:
raise OverflowError("%s: integer out of bounds"
% name)
return x
if kind == 'char':
@staticmethod
def _to_ctypes(x):
if isinstance(x, bytes) and len(x) == 1:
return x
if isinstance(x, CTypesPrimitive): # <CData <char>>
return x._value
raise TypeError("character expected, got %s" %
type(x).__name__)
def __nonzero__(self):
return ord(self._value) != 0
else:
def __nonzero__(self):
return self._value != 0
__bool__ = __nonzero__
if kind == 'float':
@staticmethod
def _to_ctypes(x):
if not isinstance(x, (int, long, float, CTypesData)):
raise TypeError("float expected, got %s" %
type(x).__name__)
return ctype(x).value
@staticmethod
def _from_ctypes(value):
return getattr(value, 'value', value)
@staticmethod
def _initialize(blob, init):
blob.value = CTypesPrimitive._to_ctypes(init)
if kind == 'char':
def _to_string(self, maxlen):
return self._value
if kind == 'byte':
def _to_string(self, maxlen):
return chr(self._value & 0xff)
#
CTypesPrimitive._fix_class()
return CTypesPrimitive
def new_pointer_type(self, BItem):
getbtype = self.ffi._get_cached_btype
if BItem is getbtype(model.PrimitiveType('char')):
kind = 'charp'
elif BItem in (getbtype(model.PrimitiveType('signed char')),
getbtype(model.PrimitiveType('unsigned char'))):
kind = 'bytep'
elif BItem is getbtype(model.void_type):
kind = 'voidp'
else:
kind = 'generic'
#
class CTypesPtr(CTypesGenericPtr):
__slots__ = ['_own']
if kind == 'charp':
__slots__ += ['__as_strbuf']
_BItem = BItem
if hasattr(BItem, '_ctype'):
_ctype = ctypes.POINTER(BItem._ctype)
_bitem_size = ctypes.sizeof(BItem._ctype)
else:
_ctype = ctypes.c_void_p
if issubclass(BItem, CTypesGenericArray):
_reftypename = BItem._get_c_name('(* &)')
else:
_reftypename = BItem._get_c_name(' * &')
def __init__(self, init):
ctypeobj = BItem._create_ctype_obj(init)
if kind == 'charp':
self.__as_strbuf = ctypes.create_string_buffer(
ctypeobj.value + b'\x00')
self._as_ctype_ptr = ctypes.cast(
self.__as_strbuf, self._ctype)
else:
self._as_ctype_ptr = ctypes.pointer(ctypeobj)
self._address = ctypes.cast(self._as_ctype_ptr,
ctypes.c_void_p).value
self._own = True
def __add__(self, other):
if isinstance(other, (int, long)):
return self._new_pointer_at(self._address +
other * self._bitem_size)
else:
return NotImplemented
def __sub__(self, other):
if isinstance(other, (int, long)):
return self._new_pointer_at(self._address -
other * self._bitem_size)
elif type(self) is type(other):
return (self._address - other._address) // self._bitem_size
else:
return NotImplemented
def __getitem__(self, index):
if getattr(self, '_own', False) and index != 0:
raise IndexError
return BItem._from_ctypes(self._as_ctype_ptr[index])
def __setitem__(self, index, value):
self._as_ctype_ptr[index] = BItem._to_ctypes(value)
if kind == 'charp' or kind == 'voidp':
@classmethod
def _arg_to_ctypes(cls, *value):
if value and isinstance(value[0], bytes):
return ctypes.c_char_p(value[0])
else:
return super(CTypesPtr, cls)._arg_to_ctypes(*value)
if kind == 'charp' or kind == 'bytep':
def _to_string(self, maxlen):
if maxlen < 0:
maxlen = sys.maxsize
p = ctypes.cast(self._as_ctype_ptr,
ctypes.POINTER(ctypes.c_char))
n = 0
while n < maxlen and p[n] != b'\x00':
n += 1
return b''.join([p[i] for i in range(n)])
def _get_own_repr(self):
if getattr(self, '_own', False):
return 'owning %d bytes' % (
ctypes.sizeof(self._as_ctype_ptr.contents),)
return super(CTypesPtr, self)._get_own_repr()
#
if (BItem is self.ffi._get_cached_btype(model.void_type) or
BItem is self.ffi._get_cached_btype(model.PrimitiveType('char'))):
CTypesPtr._automatic_casts = True
#
CTypesPtr._fix_class()
return CTypesPtr
def new_array_type(self, CTypesPtr, length):
if length is None:
brackets = ' &[]'
else:
brackets = ' &[%d]' % length
BItem = CTypesPtr._BItem
getbtype = self.ffi._get_cached_btype
if BItem is getbtype(model.PrimitiveType('char')):
kind = 'char'
elif BItem in (getbtype(model.PrimitiveType('signed char')),
getbtype(model.PrimitiveType('unsigned char'))):
kind = 'byte'
else:
kind = 'generic'
#
class CTypesArray(CTypesGenericArray):
__slots__ = ['_blob', '_own']
if length is not None:
_ctype = BItem._ctype * length
else:
__slots__.append('_ctype')
_reftypename = BItem._get_c_name(brackets)
_declared_length = length
_CTPtr = CTypesPtr
def __init__(self, init):
if length is None:
if isinstance(init, (int, long)):
len1 = init
init = None
elif kind == 'char' and isinstance(init, bytes):
len1 = len(init) + 1 # extra null
else:
init = tuple(init)
len1 = len(init)
self._ctype = BItem._ctype * len1
self._blob = self._ctype()
self._own = True
if init is not None:
self._initialize(self._blob, init)
@staticmethod
def _initialize(blob, init):
if isinstance(init, bytes):
init = [init[i:i+1] for i in range(len(init))]
else:
if isinstance(init, CTypesGenericArray):
if (len(init) != len(blob) or
not isinstance(init, CTypesArray)):
raise TypeError("length/type mismatch: %s" % (init,))
init = tuple(init)
if len(init) > len(blob):
raise IndexError("too many initializers")
addr = ctypes.cast(blob, ctypes.c_void_p).value
PTR = ctypes.POINTER(BItem._ctype)
itemsize = ctypes.sizeof(BItem._ctype)
for i, value in enumerate(init):
p = ctypes.cast(addr + i * itemsize, PTR)
BItem._initialize(p.contents, value)
def __len__(self):
return len(self._blob)
def __getitem__(self, index):
if not (0 <= index < len(self._blob)):
raise IndexError
return BItem._from_ctypes(self._blob[index])
def __setitem__(self, index, value):
if not (0 <= index < len(self._blob)):
raise IndexError
self._blob[index] = BItem._to_ctypes(value)
if kind == 'char' or kind == 'byte':
def _to_string(self, maxlen):
if maxlen < 0:
maxlen = len(self._blob)
p = ctypes.cast(self._blob,
ctypes.POINTER(ctypes.c_char))
n = 0
while n < maxlen and p[n] != b'\x00':
n += 1
return b''.join([p[i] for i in range(n)])
def _get_own_repr(self):
if getattr(self, '_own', False):
return 'owning %d bytes' % (ctypes.sizeof(self._blob),)
return super(CTypesArray, self)._get_own_repr()
def _convert_to_address(self, BClass):
if BClass in (CTypesPtr, None) or BClass._automatic_casts:
return ctypes.addressof(self._blob)
else:
return CTypesData._convert_to_address(self, BClass)
@staticmethod
def _from_ctypes(ctypes_array):
self = CTypesArray.__new__(CTypesArray)
self._blob = ctypes_array
return self
@staticmethod
def _arg_to_ctypes(value):
return CTypesPtr._arg_to_ctypes(value)
def __add__(self, other):
if isinstance(other, (int, long)):
return CTypesPtr._new_pointer_at(
ctypes.addressof(self._blob) +
other * ctypes.sizeof(BItem._ctype))
else:
return NotImplemented
@classmethod
def _cast_from(cls, source):
raise NotImplementedError("casting to %r" % (
cls._get_c_name(),))
#
CTypesArray._fix_class()
return CTypesArray
def _new_struct_or_union(self, kind, name, base_ctypes_class):
#
class struct_or_union(base_ctypes_class):
pass
struct_or_union.__name__ = '%s_%s' % (kind, name)
kind1 = kind
#
class CTypesStructOrUnion(CTypesBaseStructOrUnion):
__slots__ = ['_blob']
_ctype = struct_or_union
_reftypename = '%s &' % (name,)
_kind = kind = kind1
#
CTypesStructOrUnion._fix_class()
return CTypesStructOrUnion
def new_struct_type(self, name):
return self._new_struct_or_union('struct', name, ctypes.Structure)
def new_union_type(self, name):
return self._new_struct_or_union('union', name, ctypes.Union)
def complete_struct_or_union(self, CTypesStructOrUnion, fields, tp,
totalsize=-1, totalalignment=-1, sflags=0,
pack=0):
if totalsize >= 0 or totalalignment >= 0:
raise NotImplementedError("the ctypes backend of CFFI does not support "
"structures completed by verify(); please "
"compile and install the _cffi_backend module.")
struct_or_union = CTypesStructOrUnion._ctype
fnames = [fname for (fname, BField, bitsize) in fields]
btypes = [BField for (fname, BField, bitsize) in fields]
bitfields = [bitsize for (fname, BField, bitsize) in fields]
#
bfield_types = {}
cfields = []
for (fname, BField, bitsize) in fields:
if bitsize < 0:
cfields.append((fname, BField._ctype))
bfield_types[fname] = BField
else:
cfields.append((fname, BField._ctype, bitsize))
bfield_types[fname] = Ellipsis
if sflags & 8:
struct_or_union._pack_ = 1
elif pack:
struct_or_union._pack_ = pack
struct_or_union._fields_ = cfields
CTypesStructOrUnion._bfield_types = bfield_types
#
@staticmethod
def _create_ctype_obj(init):
result = struct_or_union()
if init is not None:
initialize(result, init)
return result
CTypesStructOrUnion._create_ctype_obj = _create_ctype_obj
#
def initialize(blob, init):
if is_union:
if len(init) > 1:
raise ValueError("union initializer: %d items given, but "
"only one supported (use a dict if needed)"
% (len(init),))
if not isinstance(init, dict):
if isinstance(init, (bytes, unicode)):
raise TypeError("union initializer: got a str")
init = tuple(init)
if len(init) > len(fnames):
raise ValueError("too many values for %s initializer" %
CTypesStructOrUnion._get_c_name())
init = dict(zip(fnames, init))
addr = ctypes.addressof(blob)
for fname, value in init.items():
BField, bitsize = name2fieldtype[fname]
assert bitsize < 0, \
"not implemented: initializer with bit fields"
offset = CTypesStructOrUnion._offsetof(fname)
PTR = ctypes.POINTER(BField._ctype)
p = ctypes.cast(addr + offset, PTR)
BField._initialize(p.contents, value)
is_union = CTypesStructOrUnion._kind == 'union'
name2fieldtype = dict(zip(fnames, zip(btypes, bitfields)))
#
for fname, BField, bitsize in fields:
if fname == '':
raise NotImplementedError("nested anonymous structs/unions")
if hasattr(CTypesStructOrUnion, fname):
raise ValueError("the field name %r conflicts in "
"the ctypes backend" % fname)
if bitsize < 0:
def getter(self, fname=fname, BField=BField,
offset=CTypesStructOrUnion._offsetof(fname),
PTR=ctypes.POINTER(BField._ctype)):
addr = ctypes.addressof(self._blob)
p = ctypes.cast(addr + offset, PTR)
return BField._from_ctypes(p.contents)
def setter(self, value, fname=fname, BField=BField):
setattr(self._blob, fname, BField._to_ctypes(value))
#
if issubclass(BField, CTypesGenericArray):
setter = None
if BField._declared_length == 0:
def getter(self, fname=fname, BFieldPtr=BField._CTPtr,
offset=CTypesStructOrUnion._offsetof(fname),
PTR=ctypes.POINTER(BField._ctype)):
addr = ctypes.addressof(self._blob)
p = ctypes.cast(addr + offset, PTR)
return BFieldPtr._from_ctypes(p)
#
else:
def getter(self, fname=fname, BField=BField):
return BField._from_ctypes(getattr(self._blob, fname))
def setter(self, value, fname=fname, BField=BField):
# xxx obscure workaround
value = BField._to_ctypes(value)
oldvalue = getattr(self._blob, fname)
setattr(self._blob, fname, value)
if value != getattr(self._blob, fname):
setattr(self._blob, fname, oldvalue)
raise OverflowError("value too large for bitfield")
setattr(CTypesStructOrUnion, fname, property(getter, setter))
#
CTypesPtr = self.ffi._get_cached_btype(model.PointerType(tp))
for fname in fnames:
if hasattr(CTypesPtr, fname):
raise ValueError("the field name %r conflicts in "
"the ctypes backend" % fname)
def getter(self, fname=fname):
return getattr(self[0], fname)
def setter(self, value, fname=fname):
setattr(self[0], fname, value)
setattr(CTypesPtr, fname, property(getter, setter))
def new_function_type(self, BArgs, BResult, has_varargs):
nameargs = [BArg._get_c_name() for BArg in BArgs]
if has_varargs:
nameargs.append('...')
nameargs = ', '.join(nameargs)
#
class CTypesFunctionPtr(CTypesGenericPtr):
__slots__ = ['_own_callback', '_name']
_ctype = ctypes.CFUNCTYPE(getattr(BResult, '_ctype', None),
*[BArg._ctype for BArg in BArgs],
use_errno=True)
_reftypename = BResult._get_c_name('(* &)(%s)' % (nameargs,))
def __init__(self, init, error=None):
# create a callback to the Python callable init()
import traceback
assert not has_varargs, "varargs not supported for callbacks"
if getattr(BResult, '_ctype', None) is not None:
error = BResult._from_ctypes(
BResult._create_ctype_obj(error))
else:
error = None
def callback(*args):
args2 = []
for arg, BArg in zip(args, BArgs):
args2.append(BArg._from_ctypes(arg))
try:
res2 = init(*args2)
res2 = BResult._to_ctypes(res2)
except:
traceback.print_exc()
res2 = error
if issubclass(BResult, CTypesGenericPtr):
if res2:
res2 = ctypes.cast(res2, ctypes.c_void_p).value
# .value: http://bugs.python.org/issue1574593
else:
res2 = None
#print repr(res2)
return res2
if issubclass(BResult, CTypesGenericPtr):
# The only pointers callbacks can return are void*s:
# http://bugs.python.org/issue5710
callback_ctype = ctypes.CFUNCTYPE(
ctypes.c_void_p,
*[BArg._ctype for BArg in BArgs],
use_errno=True)
else:
callback_ctype = CTypesFunctionPtr._ctype
self._as_ctype_ptr = callback_ctype(callback)
self._address = ctypes.cast(self._as_ctype_ptr,
ctypes.c_void_p).value
self._own_callback = init
@staticmethod
def _initialize(ctypes_ptr, value):
if value:
raise NotImplementedError("ctypes backend: not supported: "
"initializers for function pointers")
def __repr__(self):
c_name = getattr(self, '_name', None)
if c_name:
i = self._reftypename.index('(* &)')
if self._reftypename[i-1] not in ' )*':
c_name = ' ' + c_name
c_name = self._reftypename.replace('(* &)', c_name)
return CTypesData.__repr__(self, c_name)
def _get_own_repr(self):
if getattr(self, '_own_callback', None) is not None:
return 'calling %r' % (self._own_callback,)
return super(CTypesFunctionPtr, self)._get_own_repr()
def __call__(self, *args):
if has_varargs:
assert len(args) >= len(BArgs)
extraargs = args[len(BArgs):]
args = args[:len(BArgs)]
else:
assert len(args) == len(BArgs)
ctypes_args = []
for arg, BArg in zip(args, BArgs):
ctypes_args.append(BArg._arg_to_ctypes(arg))
if has_varargs:
for i, arg in enumerate(extraargs):
if arg is None:
ctypes_args.append(ctypes.c_void_p(0)) # NULL
continue
if not isinstance(arg, CTypesData):
raise TypeError(
"argument %d passed in the variadic part "
"needs to be a cdata object (got %s)" %
(1 + len(BArgs) + i, type(arg).__name__))
ctypes_args.append(arg._arg_to_ctypes(arg))
result = self._as_ctype_ptr(*ctypes_args)
return BResult._from_ctypes(result)
#
CTypesFunctionPtr._fix_class()
return CTypesFunctionPtr
def new_enum_type(self, name, enumerators, enumvalues, CTypesInt):
assert isinstance(name, str)
reverse_mapping = dict(zip(reversed(enumvalues),
reversed(enumerators)))
#
class CTypesEnum(CTypesInt):
__slots__ = []
_reftypename = '%s &' % name
def _get_own_repr(self):
value = self._value
try:
return '%d: %s' % (value, reverse_mapping[value])
except KeyError:
return str(value)
def _to_string(self, maxlen):
value = self._value
try:
return reverse_mapping[value]
except KeyError:
return str(value)
#
CTypesEnum._fix_class()
return CTypesEnum
def get_errno(self):
return ctypes.get_errno()
def set_errno(self, value):
ctypes.set_errno(value)
def string(self, b, maxlen=-1):
return b._to_string(maxlen)
def buffer(self, bptr, size=-1):
raise NotImplementedError("buffer() with ctypes backend")
def sizeof(self, cdata_or_BType):
if isinstance(cdata_or_BType, CTypesData):
return cdata_or_BType._get_size_of_instance()
else:
assert issubclass(cdata_or_BType, CTypesData)
return cdata_or_BType._get_size()
def alignof(self, BType):
assert issubclass(BType, CTypesData)
return BType._alignment()
def newp(self, BType, source):
if not issubclass(BType, CTypesData):
raise TypeError
return BType._newp(source)
def cast(self, BType, source):
return BType._cast_from(source)
def callback(self, BType, source, error, onerror):
assert onerror is None # XXX not implemented
return BType(source, error)
_weakref_cache_ref = None
def gcp(self, cdata, destructor, size=0):
if self._weakref_cache_ref is None:
import weakref
class MyRef(weakref.ref):
def __eq__(self, other):
myref = self()
return self is other or (
myref is not None and myref is other())
def __ne__(self, other):
return not (self == other)
def __hash__(self):
try:
return self._hash
except AttributeError:
self._hash = hash(self())
return self._hash
self._weakref_cache_ref = {}, MyRef
weak_cache, MyRef = self._weakref_cache_ref
if destructor is None:
try:
del weak_cache[MyRef(cdata)]
except KeyError:
raise TypeError("Can remove destructor only on a object "
"previously returned by ffi.gc()")
return None
def remove(k):
cdata, destructor = weak_cache.pop(k, (None, None))
if destructor is not None:
destructor(cdata)
new_cdata = self.cast(self.typeof(cdata), cdata)
assert new_cdata is not cdata
weak_cache[MyRef(new_cdata, remove)] = (cdata, destructor)
return new_cdata
typeof = type
def getcname(self, BType, replace_with):
return BType._get_c_name(replace_with)
def typeoffsetof(self, BType, fieldname, num=0):
if isinstance(fieldname, str):
if num == 0 and issubclass(BType, CTypesGenericPtr):
BType = BType._BItem
if not issubclass(BType, CTypesBaseStructOrUnion):
raise TypeError("expected a struct or union ctype")
BField = BType._bfield_types[fieldname]
if BField is Ellipsis:
raise TypeError("not supported for bitfields")
return (BField, BType._offsetof(fieldname))
elif isinstance(fieldname, (int, long)):
if issubclass(BType, CTypesGenericArray):
BType = BType._CTPtr
if not issubclass(BType, CTypesGenericPtr):
raise TypeError("expected an array or ptr ctype")
BItem = BType._BItem
offset = BItem._get_size() * fieldname
if offset > sys.maxsize:
raise OverflowError
return (BItem, offset)
else:
raise TypeError(type(fieldname))
def rawaddressof(self, BTypePtr, cdata, offset=None):
if isinstance(cdata, CTypesBaseStructOrUnion):
ptr = ctypes.pointer(type(cdata)._to_ctypes(cdata))
elif isinstance(cdata, CTypesGenericPtr):
if offset is None or not issubclass(type(cdata)._BItem,
CTypesBaseStructOrUnion):
raise TypeError("unexpected cdata type")
ptr = type(cdata)._to_ctypes(cdata)
elif isinstance(cdata, CTypesGenericArray):
ptr = type(cdata)._to_ctypes(cdata)
else:
raise TypeError("expected a <cdata 'struct-or-union'>")
if offset:
ptr = ctypes.cast(
ctypes.c_void_p(
ctypes.cast(ptr, ctypes.c_void_p).value + offset),
type(ptr))
return BTypePtr._from_ctypes(ptr)
class CTypesLibrary(object):
def __init__(self, backend, cdll):
self.backend = backend
self.cdll = cdll
def load_function(self, BType, name):
c_func = getattr(self.cdll, name)
funcobj = BType._from_ctypes(c_func)
funcobj._name = name
return funcobj
def read_variable(self, BType, name):
try:
ctypes_obj = BType._ctype.in_dll(self.cdll, name)
except AttributeError as e:
raise NotImplementedError(e)
return BType._from_ctypes(ctypes_obj)
def write_variable(self, BType, name, value):
new_ctypes_obj = BType._to_ctypes(value)
ctypes_obj = BType._ctype.in_dll(self.cdll, name)
ctypes.memmove(ctypes.addressof(ctypes_obj),
ctypes.addressof(new_ctypes_obj),
ctypes.sizeof(BType._ctype))
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/cffi/vengine_cpy.py | #
# DEPRECATED: implementation for ffi.verify()
#
import sys, imp
from . import model
from .error import VerificationError
class VCPythonEngine(object):
_class_key = 'x'
_gen_python_module = True
def __init__(self, verifier):
self.verifier = verifier
self.ffi = verifier.ffi
self._struct_pending_verification = {}
self._types_of_builtin_functions = {}
def patch_extension_kwds(self, kwds):
pass
def find_module(self, module_name, path, so_suffixes):
try:
f, filename, descr = imp.find_module(module_name, path)
except ImportError:
return None
if f is not None:
f.close()
# Note that after a setuptools installation, there are both .py
# and .so files with the same basename. The code here relies on
# imp.find_module() locating the .so in priority.
if descr[0] not in so_suffixes:
return None
return filename
def collect_types(self):
self._typesdict = {}
self._generate("collecttype")
def _prnt(self, what=''):
self._f.write(what + '\n')
def _gettypenum(self, type):
# a KeyError here is a bug. please report it! :-)
return self._typesdict[type]
def _do_collect_type(self, tp):
if ((not isinstance(tp, model.PrimitiveType)
or tp.name == 'long double')
and tp not in self._typesdict):
num = len(self._typesdict)
self._typesdict[tp] = num
def write_source_to_f(self):
self.collect_types()
#
# The new module will have a _cffi_setup() function that receives
# objects from the ffi world, and that calls some setup code in
# the module. This setup code is split in several independent
# functions, e.g. one per constant. The functions are "chained"
# by ending in a tail call to each other.
#
# This is further split in two chained lists, depending on if we
# can do it at import-time or if we must wait for _cffi_setup() to
# provide us with the <ctype> objects. This is needed because we
# need the values of the enum constants in order to build the
# <ctype 'enum'> that we may have to pass to _cffi_setup().
#
# The following two 'chained_list_constants' items contains
# the head of these two chained lists, as a string that gives the
# call to do, if any.
self._chained_list_constants = ['((void)lib,0)', '((void)lib,0)']
#
prnt = self._prnt
# first paste some standard set of lines that are mostly '#define'
prnt(cffimod_header)
prnt()
# then paste the C source given by the user, verbatim.
prnt(self.verifier.preamble)
prnt()
#
# call generate_cpy_xxx_decl(), for every xxx found from
# ffi._parser._declarations. This generates all the functions.
self._generate("decl")
#
# implement the function _cffi_setup_custom() as calling the
# head of the chained list.
self._generate_setup_custom()
prnt()
#
# produce the method table, including the entries for the
# generated Python->C function wrappers, which are done
# by generate_cpy_function_method().
prnt('static PyMethodDef _cffi_methods[] = {')
self._generate("method")
prnt(' {"_cffi_setup", _cffi_setup, METH_VARARGS, NULL},')
prnt(' {NULL, NULL, 0, NULL} /* Sentinel */')
prnt('};')
prnt()
#
# standard init.
modname = self.verifier.get_module_name()
constants = self._chained_list_constants[False]
prnt('#if PY_MAJOR_VERSION >= 3')
prnt()
prnt('static struct PyModuleDef _cffi_module_def = {')
prnt(' PyModuleDef_HEAD_INIT,')
prnt(' "%s",' % modname)
prnt(' NULL,')
prnt(' -1,')
prnt(' _cffi_methods,')
prnt(' NULL, NULL, NULL, NULL')
prnt('};')
prnt()
prnt('PyMODINIT_FUNC')
prnt('PyInit_%s(void)' % modname)
prnt('{')
prnt(' PyObject *lib;')
prnt(' lib = PyModule_Create(&_cffi_module_def);')
prnt(' if (lib == NULL)')
prnt(' return NULL;')
prnt(' if (%s < 0 || _cffi_init() < 0) {' % (constants,))
prnt(' Py_DECREF(lib);')
prnt(' return NULL;')
prnt(' }')
prnt(' return lib;')
prnt('}')
prnt()
prnt('#else')
prnt()
prnt('PyMODINIT_FUNC')
prnt('init%s(void)' % modname)
prnt('{')
prnt(' PyObject *lib;')
prnt(' lib = Py_InitModule("%s", _cffi_methods);' % modname)
prnt(' if (lib == NULL)')
prnt(' return;')
prnt(' if (%s < 0 || _cffi_init() < 0)' % (constants,))
prnt(' return;')
prnt(' return;')
prnt('}')
prnt()
prnt('#endif')
def load_library(self, flags=None):
# XXX review all usages of 'self' here!
# import it as a new extension module
imp.acquire_lock()
try:
if hasattr(sys, "getdlopenflags"):
previous_flags = sys.getdlopenflags()
try:
if hasattr(sys, "setdlopenflags") and flags is not None:
sys.setdlopenflags(flags)
module = imp.load_dynamic(self.verifier.get_module_name(),
self.verifier.modulefilename)
except ImportError as e:
error = "importing %r: %s" % (self.verifier.modulefilename, e)
raise VerificationError(error)
finally:
if hasattr(sys, "setdlopenflags"):
sys.setdlopenflags(previous_flags)
finally:
imp.release_lock()
#
# call loading_cpy_struct() to get the struct layout inferred by
# the C compiler
self._load(module, 'loading')
#
# the C code will need the <ctype> objects. Collect them in
# order in a list.
revmapping = dict([(value, key)
for (key, value) in self._typesdict.items()])
lst = [revmapping[i] for i in range(len(revmapping))]
lst = list(map(self.ffi._get_cached_btype, lst))
#
# build the FFILibrary class and instance and call _cffi_setup().
# this will set up some fields like '_cffi_types', and only then
# it will invoke the chained list of functions that will really
# build (notably) the constant objects, as <cdata> if they are
# pointers, and store them as attributes on the 'library' object.
class FFILibrary(object):
_cffi_python_module = module
_cffi_ffi = self.ffi
_cffi_dir = []
def __dir__(self):
return FFILibrary._cffi_dir + list(self.__dict__)
library = FFILibrary()
if module._cffi_setup(lst, VerificationError, library):
import warnings
warnings.warn("reimporting %r might overwrite older definitions"
% (self.verifier.get_module_name()))
#
# finally, call the loaded_cpy_xxx() functions. This will perform
# the final adjustments, like copying the Python->C wrapper
# functions from the module to the 'library' object, and setting
# up the FFILibrary class with properties for the global C variables.
self._load(module, 'loaded', library=library)
module._cffi_original_ffi = self.ffi
module._cffi_types_of_builtin_funcs = self._types_of_builtin_functions
return library
def _get_declarations(self):
lst = [(key, tp) for (key, (tp, qual)) in
self.ffi._parser._declarations.items()]
lst.sort()
return lst
def _generate(self, step_name):
for name, tp in self._get_declarations():
kind, realname = name.split(' ', 1)
try:
method = getattr(self, '_generate_cpy_%s_%s' % (kind,
step_name))
except AttributeError:
raise VerificationError(
"not implemented in verify(): %r" % name)
try:
method(tp, realname)
except Exception as e:
model.attach_exception_info(e, name)
raise
def _load(self, module, step_name, **kwds):
for name, tp in self._get_declarations():
kind, realname = name.split(' ', 1)
method = getattr(self, '_%s_cpy_%s' % (step_name, kind))
try:
method(tp, realname, module, **kwds)
except Exception as e:
model.attach_exception_info(e, name)
raise
def _generate_nothing(self, tp, name):
pass
def _loaded_noop(self, tp, name, module, **kwds):
pass
# ----------
def _convert_funcarg_to_c(self, tp, fromvar, tovar, errcode):
extraarg = ''
if isinstance(tp, model.PrimitiveType):
if tp.is_integer_type() and tp.name != '_Bool':
converter = '_cffi_to_c_int'
extraarg = ', %s' % tp.name
else:
converter = '(%s)_cffi_to_c_%s' % (tp.get_c_name(''),
tp.name.replace(' ', '_'))
errvalue = '-1'
#
elif isinstance(tp, model.PointerType):
self._convert_funcarg_to_c_ptr_or_array(tp, fromvar,
tovar, errcode)
return
#
elif isinstance(tp, (model.StructOrUnion, model.EnumType)):
# a struct (not a struct pointer) as a function argument
self._prnt(' if (_cffi_to_c((char *)&%s, _cffi_type(%d), %s) < 0)'
% (tovar, self._gettypenum(tp), fromvar))
self._prnt(' %s;' % errcode)
return
#
elif isinstance(tp, model.FunctionPtrType):
converter = '(%s)_cffi_to_c_pointer' % tp.get_c_name('')
extraarg = ', _cffi_type(%d)' % self._gettypenum(tp)
errvalue = 'NULL'
#
else:
raise NotImplementedError(tp)
#
self._prnt(' %s = %s(%s%s);' % (tovar, converter, fromvar, extraarg))
self._prnt(' if (%s == (%s)%s && PyErr_Occurred())' % (
tovar, tp.get_c_name(''), errvalue))
self._prnt(' %s;' % errcode)
def _extra_local_variables(self, tp, localvars, freelines):
if isinstance(tp, model.PointerType):
localvars.add('Py_ssize_t datasize')
localvars.add('struct _cffi_freeme_s *large_args_free = NULL')
freelines.add('if (large_args_free != NULL)'
' _cffi_free_array_arguments(large_args_free);')
def _convert_funcarg_to_c_ptr_or_array(self, tp, fromvar, tovar, errcode):
self._prnt(' datasize = _cffi_prepare_pointer_call_argument(')
self._prnt(' _cffi_type(%d), %s, (char **)&%s);' % (
self._gettypenum(tp), fromvar, tovar))
self._prnt(' if (datasize != 0) {')
self._prnt(' %s = ((size_t)datasize) <= 640 ? '
'alloca((size_t)datasize) : NULL;' % (tovar,))
self._prnt(' if (_cffi_convert_array_argument(_cffi_type(%d), %s, '
'(char **)&%s,' % (self._gettypenum(tp), fromvar, tovar))
self._prnt(' datasize, &large_args_free) < 0)')
self._prnt(' %s;' % errcode)
self._prnt(' }')
def _convert_expr_from_c(self, tp, var, context):
if isinstance(tp, model.PrimitiveType):
if tp.is_integer_type() and tp.name != '_Bool':
return '_cffi_from_c_int(%s, %s)' % (var, tp.name)
elif tp.name != 'long double':
return '_cffi_from_c_%s(%s)' % (tp.name.replace(' ', '_'), var)
else:
return '_cffi_from_c_deref((char *)&%s, _cffi_type(%d))' % (
var, self._gettypenum(tp))
elif isinstance(tp, (model.PointerType, model.FunctionPtrType)):
return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % (
var, self._gettypenum(tp))
elif isinstance(tp, model.ArrayType):
return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % (
var, self._gettypenum(model.PointerType(tp.item)))
elif isinstance(tp, model.StructOrUnion):
if tp.fldnames is None:
raise TypeError("'%s' is used as %s, but is opaque" % (
tp._get_c_name(), context))
return '_cffi_from_c_struct((char *)&%s, _cffi_type(%d))' % (
var, self._gettypenum(tp))
elif isinstance(tp, model.EnumType):
return '_cffi_from_c_deref((char *)&%s, _cffi_type(%d))' % (
var, self._gettypenum(tp))
else:
raise NotImplementedError(tp)
# ----------
# typedefs: generates no code so far
_generate_cpy_typedef_collecttype = _generate_nothing
_generate_cpy_typedef_decl = _generate_nothing
_generate_cpy_typedef_method = _generate_nothing
_loading_cpy_typedef = _loaded_noop
_loaded_cpy_typedef = _loaded_noop
# ----------
# function declarations
def _generate_cpy_function_collecttype(self, tp, name):
assert isinstance(tp, model.FunctionPtrType)
if tp.ellipsis:
self._do_collect_type(tp)
else:
# don't call _do_collect_type(tp) in this common case,
# otherwise test_autofilled_struct_as_argument fails
for type in tp.args:
self._do_collect_type(type)
self._do_collect_type(tp.result)
def _generate_cpy_function_decl(self, tp, name):
assert isinstance(tp, model.FunctionPtrType)
if tp.ellipsis:
# cannot support vararg functions better than this: check for its
# exact type (including the fixed arguments), and build it as a
# constant function pointer (no CPython wrapper)
self._generate_cpy_const(False, name, tp)
return
prnt = self._prnt
numargs = len(tp.args)
if numargs == 0:
argname = 'noarg'
elif numargs == 1:
argname = 'arg0'
else:
argname = 'args'
prnt('static PyObject *')
prnt('_cffi_f_%s(PyObject *self, PyObject *%s)' % (name, argname))
prnt('{')
#
context = 'argument of %s' % name
for i, type in enumerate(tp.args):
prnt(' %s;' % type.get_c_name(' x%d' % i, context))
#
localvars = set()
freelines = set()
for type in tp.args:
self._extra_local_variables(type, localvars, freelines)
for decl in sorted(localvars):
prnt(' %s;' % (decl,))
#
if not isinstance(tp.result, model.VoidType):
result_code = 'result = '
context = 'result of %s' % name
prnt(' %s;' % tp.result.get_c_name(' result', context))
prnt(' PyObject *pyresult;')
else:
result_code = ''
#
if len(tp.args) > 1:
rng = range(len(tp.args))
for i in rng:
prnt(' PyObject *arg%d;' % i)
prnt()
prnt(' if (!PyArg_ParseTuple(args, "%s:%s", %s))' % (
'O' * numargs, name, ', '.join(['&arg%d' % i for i in rng])))
prnt(' return NULL;')
prnt()
#
for i, type in enumerate(tp.args):
self._convert_funcarg_to_c(type, 'arg%d' % i, 'x%d' % i,
'return NULL')
prnt()
#
prnt(' Py_BEGIN_ALLOW_THREADS')
prnt(' _cffi_restore_errno();')
prnt(' { %s%s(%s); }' % (
result_code, name,
', '.join(['x%d' % i for i in range(len(tp.args))])))
prnt(' _cffi_save_errno();')
prnt(' Py_END_ALLOW_THREADS')
prnt()
#
prnt(' (void)self; /* unused */')
if numargs == 0:
prnt(' (void)noarg; /* unused */')
if result_code:
prnt(' pyresult = %s;' %
self._convert_expr_from_c(tp.result, 'result', 'result type'))
for freeline in freelines:
prnt(' ' + freeline)
prnt(' return pyresult;')
else:
for freeline in freelines:
prnt(' ' + freeline)
prnt(' Py_INCREF(Py_None);')
prnt(' return Py_None;')
prnt('}')
prnt()
def _generate_cpy_function_method(self, tp, name):
if tp.ellipsis:
return
numargs = len(tp.args)
if numargs == 0:
meth = 'METH_NOARGS'
elif numargs == 1:
meth = 'METH_O'
else:
meth = 'METH_VARARGS'
self._prnt(' {"%s", _cffi_f_%s, %s, NULL},' % (name, name, meth))
_loading_cpy_function = _loaded_noop
def _loaded_cpy_function(self, tp, name, module, library):
if tp.ellipsis:
return
func = getattr(module, name)
setattr(library, name, func)
self._types_of_builtin_functions[func] = tp
# ----------
# named structs
_generate_cpy_struct_collecttype = _generate_nothing
def _generate_cpy_struct_decl(self, tp, name):
assert name == tp.name
self._generate_struct_or_union_decl(tp, 'struct', name)
def _generate_cpy_struct_method(self, tp, name):
self._generate_struct_or_union_method(tp, 'struct', name)
def _loading_cpy_struct(self, tp, name, module):
self._loading_struct_or_union(tp, 'struct', name, module)
def _loaded_cpy_struct(self, tp, name, module, **kwds):
self._loaded_struct_or_union(tp)
_generate_cpy_union_collecttype = _generate_nothing
def _generate_cpy_union_decl(self, tp, name):
assert name == tp.name
self._generate_struct_or_union_decl(tp, 'union', name)
def _generate_cpy_union_method(self, tp, name):
self._generate_struct_or_union_method(tp, 'union', name)
def _loading_cpy_union(self, tp, name, module):
self._loading_struct_or_union(tp, 'union', name, module)
def _loaded_cpy_union(self, tp, name, module, **kwds):
self._loaded_struct_or_union(tp)
def _generate_struct_or_union_decl(self, tp, prefix, name):
if tp.fldnames is None:
return # nothing to do with opaque structs
checkfuncname = '_cffi_check_%s_%s' % (prefix, name)
layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name)
cname = ('%s %s' % (prefix, name)).strip()
#
prnt = self._prnt
prnt('static void %s(%s *p)' % (checkfuncname, cname))
prnt('{')
prnt(' /* only to generate compile-time warnings or errors */')
prnt(' (void)p;')
for fname, ftype, fbitsize, fqual in tp.enumfields():
if (isinstance(ftype, model.PrimitiveType)
and ftype.is_integer_type()) or fbitsize >= 0:
# accept all integers, but complain on float or double
prnt(' (void)((p->%s) << 1);' % fname)
else:
# only accept exactly the type declared.
try:
prnt(' { %s = &p->%s; (void)tmp; }' % (
ftype.get_c_name('*tmp', 'field %r'%fname, quals=fqual),
fname))
except VerificationError as e:
prnt(' /* %s */' % str(e)) # cannot verify it, ignore
prnt('}')
prnt('static PyObject *')
prnt('%s(PyObject *self, PyObject *noarg)' % (layoutfuncname,))
prnt('{')
prnt(' struct _cffi_aligncheck { char x; %s y; };' % cname)
prnt(' static Py_ssize_t nums[] = {')
prnt(' sizeof(%s),' % cname)
prnt(' offsetof(struct _cffi_aligncheck, y),')
for fname, ftype, fbitsize, fqual in tp.enumfields():
if fbitsize >= 0:
continue # xxx ignore fbitsize for now
prnt(' offsetof(%s, %s),' % (cname, fname))
if isinstance(ftype, model.ArrayType) and ftype.length is None:
prnt(' 0, /* %s */' % ftype._get_c_name())
else:
prnt(' sizeof(((%s *)0)->%s),' % (cname, fname))
prnt(' -1')
prnt(' };')
prnt(' (void)self; /* unused */')
prnt(' (void)noarg; /* unused */')
prnt(' return _cffi_get_struct_layout(nums);')
prnt(' /* the next line is not executed, but compiled */')
prnt(' %s(0);' % (checkfuncname,))
prnt('}')
prnt()
def _generate_struct_or_union_method(self, tp, prefix, name):
if tp.fldnames is None:
return # nothing to do with opaque structs
layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name)
self._prnt(' {"%s", %s, METH_NOARGS, NULL},' % (layoutfuncname,
layoutfuncname))
def _loading_struct_or_union(self, tp, prefix, name, module):
if tp.fldnames is None:
return # nothing to do with opaque structs
layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name)
#
function = getattr(module, layoutfuncname)
layout = function()
if isinstance(tp, model.StructOrUnion) and tp.partial:
# use the function()'s sizes and offsets to guide the
# layout of the struct
totalsize = layout[0]
totalalignment = layout[1]
fieldofs = layout[2::2]
fieldsize = layout[3::2]
tp.force_flatten()
assert len(fieldofs) == len(fieldsize) == len(tp.fldnames)
tp.fixedlayout = fieldofs, fieldsize, totalsize, totalalignment
else:
cname = ('%s %s' % (prefix, name)).strip()
self._struct_pending_verification[tp] = layout, cname
def _loaded_struct_or_union(self, tp):
if tp.fldnames is None:
return # nothing to do with opaque structs
self.ffi._get_cached_btype(tp) # force 'fixedlayout' to be considered
if tp in self._struct_pending_verification:
# check that the layout sizes and offsets match the real ones
def check(realvalue, expectedvalue, msg):
if realvalue != expectedvalue:
raise VerificationError(
"%s (we have %d, but C compiler says %d)"
% (msg, expectedvalue, realvalue))
ffi = self.ffi
BStruct = ffi._get_cached_btype(tp)
layout, cname = self._struct_pending_verification.pop(tp)
check(layout[0], ffi.sizeof(BStruct), "wrong total size")
check(layout[1], ffi.alignof(BStruct), "wrong total alignment")
i = 2
for fname, ftype, fbitsize, fqual in tp.enumfields():
if fbitsize >= 0:
continue # xxx ignore fbitsize for now
check(layout[i], ffi.offsetof(BStruct, fname),
"wrong offset for field %r" % (fname,))
if layout[i+1] != 0:
BField = ffi._get_cached_btype(ftype)
check(layout[i+1], ffi.sizeof(BField),
"wrong size for field %r" % (fname,))
i += 2
assert i == len(layout)
# ----------
# 'anonymous' declarations. These are produced for anonymous structs
# or unions; the 'name' is obtained by a typedef.
_generate_cpy_anonymous_collecttype = _generate_nothing
def _generate_cpy_anonymous_decl(self, tp, name):
if isinstance(tp, model.EnumType):
self._generate_cpy_enum_decl(tp, name, '')
else:
self._generate_struct_or_union_decl(tp, '', name)
def _generate_cpy_anonymous_method(self, tp, name):
if not isinstance(tp, model.EnumType):
self._generate_struct_or_union_method(tp, '', name)
def _loading_cpy_anonymous(self, tp, name, module):
if isinstance(tp, model.EnumType):
self._loading_cpy_enum(tp, name, module)
else:
self._loading_struct_or_union(tp, '', name, module)
def _loaded_cpy_anonymous(self, tp, name, module, **kwds):
if isinstance(tp, model.EnumType):
self._loaded_cpy_enum(tp, name, module, **kwds)
else:
self._loaded_struct_or_union(tp)
# ----------
# constants, likely declared with '#define'
def _generate_cpy_const(self, is_int, name, tp=None, category='const',
vartp=None, delayed=True, size_too=False,
check_value=None):
prnt = self._prnt
funcname = '_cffi_%s_%s' % (category, name)
prnt('static int %s(PyObject *lib)' % funcname)
prnt('{')
prnt(' PyObject *o;')
prnt(' int res;')
if not is_int:
prnt(' %s;' % (vartp or tp).get_c_name(' i', name))
else:
assert category == 'const'
#
if check_value is not None:
self._check_int_constant_value(name, check_value)
#
if not is_int:
if category == 'var':
realexpr = '&' + name
else:
realexpr = name
prnt(' i = (%s);' % (realexpr,))
prnt(' o = %s;' % (self._convert_expr_from_c(tp, 'i',
'variable type'),))
assert delayed
else:
prnt(' o = _cffi_from_c_int_const(%s);' % name)
prnt(' if (o == NULL)')
prnt(' return -1;')
if size_too:
prnt(' {')
prnt(' PyObject *o1 = o;')
prnt(' o = Py_BuildValue("On", o1, (Py_ssize_t)sizeof(%s));'
% (name,))
prnt(' Py_DECREF(o1);')
prnt(' if (o == NULL)')
prnt(' return -1;')
prnt(' }')
prnt(' res = PyObject_SetAttrString(lib, "%s", o);' % name)
prnt(' Py_DECREF(o);')
prnt(' if (res < 0)')
prnt(' return -1;')
prnt(' return %s;' % self._chained_list_constants[delayed])
self._chained_list_constants[delayed] = funcname + '(lib)'
prnt('}')
prnt()
def _generate_cpy_constant_collecttype(self, tp, name):
is_int = isinstance(tp, model.PrimitiveType) and tp.is_integer_type()
if not is_int:
self._do_collect_type(tp)
def _generate_cpy_constant_decl(self, tp, name):
is_int = isinstance(tp, model.PrimitiveType) and tp.is_integer_type()
self._generate_cpy_const(is_int, name, tp)
_generate_cpy_constant_method = _generate_nothing
_loading_cpy_constant = _loaded_noop
_loaded_cpy_constant = _loaded_noop
# ----------
# enums
def _check_int_constant_value(self, name, value, err_prefix=''):
prnt = self._prnt
if value <= 0:
prnt(' if ((%s) > 0 || (long)(%s) != %dL) {' % (
name, name, value))
else:
prnt(' if ((%s) <= 0 || (unsigned long)(%s) != %dUL) {' % (
name, name, value))
prnt(' char buf[64];')
prnt(' if ((%s) <= 0)' % name)
prnt(' snprintf(buf, 63, "%%ld", (long)(%s));' % name)
prnt(' else')
prnt(' snprintf(buf, 63, "%%lu", (unsigned long)(%s));' %
name)
prnt(' PyErr_Format(_cffi_VerificationError,')
prnt(' "%s%s has the real value %s, not %s",')
prnt(' "%s", "%s", buf, "%d");' % (
err_prefix, name, value))
prnt(' return -1;')
prnt(' }')
def _enum_funcname(self, prefix, name):
# "$enum_$1" => "___D_enum____D_1"
name = name.replace('$', '___D_')
return '_cffi_e_%s_%s' % (prefix, name)
def _generate_cpy_enum_decl(self, tp, name, prefix='enum'):
if tp.partial:
for enumerator in tp.enumerators:
self._generate_cpy_const(True, enumerator, delayed=False)
return
#
funcname = self._enum_funcname(prefix, name)
prnt = self._prnt
prnt('static int %s(PyObject *lib)' % funcname)
prnt('{')
for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues):
self._check_int_constant_value(enumerator, enumvalue,
"enum %s: " % name)
prnt(' return %s;' % self._chained_list_constants[True])
self._chained_list_constants[True] = funcname + '(lib)'
prnt('}')
prnt()
_generate_cpy_enum_collecttype = _generate_nothing
_generate_cpy_enum_method = _generate_nothing
def _loading_cpy_enum(self, tp, name, module):
if tp.partial:
enumvalues = [getattr(module, enumerator)
for enumerator in tp.enumerators]
tp.enumvalues = tuple(enumvalues)
tp.partial_resolved = True
def _loaded_cpy_enum(self, tp, name, module, library):
for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues):
setattr(library, enumerator, enumvalue)
# ----------
# macros: for now only for integers
def _generate_cpy_macro_decl(self, tp, name):
if tp == '...':
check_value = None
else:
check_value = tp # an integer
self._generate_cpy_const(True, name, check_value=check_value)
_generate_cpy_macro_collecttype = _generate_nothing
_generate_cpy_macro_method = _generate_nothing
_loading_cpy_macro = _loaded_noop
_loaded_cpy_macro = _loaded_noop
# ----------
# global variables
def _generate_cpy_variable_collecttype(self, tp, name):
if isinstance(tp, model.ArrayType):
tp_ptr = model.PointerType(tp.item)
else:
tp_ptr = model.PointerType(tp)
self._do_collect_type(tp_ptr)
def _generate_cpy_variable_decl(self, tp, name):
if isinstance(tp, model.ArrayType):
tp_ptr = model.PointerType(tp.item)
self._generate_cpy_const(False, name, tp, vartp=tp_ptr,
size_too = tp.length_is_unknown())
else:
tp_ptr = model.PointerType(tp)
self._generate_cpy_const(False, name, tp_ptr, category='var')
_generate_cpy_variable_method = _generate_nothing
_loading_cpy_variable = _loaded_noop
def _loaded_cpy_variable(self, tp, name, module, library):
value = getattr(library, name)
if isinstance(tp, model.ArrayType): # int a[5] is "constant" in the
# sense that "a=..." is forbidden
if tp.length_is_unknown():
assert isinstance(value, tuple)
(value, size) = value
BItemType = self.ffi._get_cached_btype(tp.item)
length, rest = divmod(size, self.ffi.sizeof(BItemType))
if rest != 0:
raise VerificationError(
"bad size: %r does not seem to be an array of %s" %
(name, tp.item))
tp = tp.resolve_length(length)
# 'value' is a <cdata 'type *'> which we have to replace with
# a <cdata 'type[N]'> if the N is actually known
if tp.length is not None:
BArray = self.ffi._get_cached_btype(tp)
value = self.ffi.cast(BArray, value)
setattr(library, name, value)
return
# remove ptr=<cdata 'int *'> from the library instance, and replace
# it by a property on the class, which reads/writes into ptr[0].
ptr = value
delattr(library, name)
def getter(library):
return ptr[0]
def setter(library, value):
ptr[0] = value
setattr(type(library), name, property(getter, setter))
type(library)._cffi_dir.append(name)
# ----------
def _generate_setup_custom(self):
prnt = self._prnt
prnt('static int _cffi_setup_custom(PyObject *lib)')
prnt('{')
prnt(' return %s;' % self._chained_list_constants[True])
prnt('}')
cffimod_header = r'''
#include <Python.h>
#include <stddef.h>
/* this block of #ifs should be kept exactly identical between
c/_cffi_backend.c, cffi/vengine_cpy.py, cffi/vengine_gen.py
and cffi/_cffi_include.h */
#if defined(_MSC_VER)
# include <malloc.h> /* for alloca() */
# if _MSC_VER < 1600 /* MSVC < 2010 */
typedef __int8 int8_t;
typedef __int16 int16_t;
typedef __int32 int32_t;
typedef __int64 int64_t;
typedef unsigned __int8 uint8_t;
typedef unsigned __int16 uint16_t;
typedef unsigned __int32 uint32_t;
typedef unsigned __int64 uint64_t;
typedef __int8 int_least8_t;
typedef __int16 int_least16_t;
typedef __int32 int_least32_t;
typedef __int64 int_least64_t;
typedef unsigned __int8 uint_least8_t;
typedef unsigned __int16 uint_least16_t;
typedef unsigned __int32 uint_least32_t;
typedef unsigned __int64 uint_least64_t;
typedef __int8 int_fast8_t;
typedef __int16 int_fast16_t;
typedef __int32 int_fast32_t;
typedef __int64 int_fast64_t;
typedef unsigned __int8 uint_fast8_t;
typedef unsigned __int16 uint_fast16_t;
typedef unsigned __int32 uint_fast32_t;
typedef unsigned __int64 uint_fast64_t;
typedef __int64 intmax_t;
typedef unsigned __int64 uintmax_t;
# else
# include <stdint.h>
# endif
# if _MSC_VER < 1800 /* MSVC < 2013 */
# ifndef __cplusplus
typedef unsigned char _Bool;
# endif
# endif
#else
# include <stdint.h>
# if (defined (__SVR4) && defined (__sun)) || defined(_AIX) || defined(__hpux)
# include <alloca.h>
# endif
#endif
#if PY_MAJOR_VERSION < 3
# undef PyCapsule_CheckExact
# undef PyCapsule_GetPointer
# define PyCapsule_CheckExact(capsule) (PyCObject_Check(capsule))
# define PyCapsule_GetPointer(capsule, name) \
(PyCObject_AsVoidPtr(capsule))
#endif
#if PY_MAJOR_VERSION >= 3
# define PyInt_FromLong PyLong_FromLong
#endif
#define _cffi_from_c_double PyFloat_FromDouble
#define _cffi_from_c_float PyFloat_FromDouble
#define _cffi_from_c_long PyInt_FromLong
#define _cffi_from_c_ulong PyLong_FromUnsignedLong
#define _cffi_from_c_longlong PyLong_FromLongLong
#define _cffi_from_c_ulonglong PyLong_FromUnsignedLongLong
#define _cffi_from_c__Bool PyBool_FromLong
#define _cffi_to_c_double PyFloat_AsDouble
#define _cffi_to_c_float PyFloat_AsDouble
#define _cffi_from_c_int_const(x) \
(((x) > 0) ? \
((unsigned long long)(x) <= (unsigned long long)LONG_MAX) ? \
PyInt_FromLong((long)(x)) : \
PyLong_FromUnsignedLongLong((unsigned long long)(x)) : \
((long long)(x) >= (long long)LONG_MIN) ? \
PyInt_FromLong((long)(x)) : \
PyLong_FromLongLong((long long)(x)))
#define _cffi_from_c_int(x, type) \
(((type)-1) > 0 ? /* unsigned */ \
(sizeof(type) < sizeof(long) ? \
PyInt_FromLong((long)x) : \
sizeof(type) == sizeof(long) ? \
PyLong_FromUnsignedLong((unsigned long)x) : \
PyLong_FromUnsignedLongLong((unsigned long long)x)) : \
(sizeof(type) <= sizeof(long) ? \
PyInt_FromLong((long)x) : \
PyLong_FromLongLong((long long)x)))
#define _cffi_to_c_int(o, type) \
((type)( \
sizeof(type) == 1 ? (((type)-1) > 0 ? (type)_cffi_to_c_u8(o) \
: (type)_cffi_to_c_i8(o)) : \
sizeof(type) == 2 ? (((type)-1) > 0 ? (type)_cffi_to_c_u16(o) \
: (type)_cffi_to_c_i16(o)) : \
sizeof(type) == 4 ? (((type)-1) > 0 ? (type)_cffi_to_c_u32(o) \
: (type)_cffi_to_c_i32(o)) : \
sizeof(type) == 8 ? (((type)-1) > 0 ? (type)_cffi_to_c_u64(o) \
: (type)_cffi_to_c_i64(o)) : \
(Py_FatalError("unsupported size for type " #type), (type)0)))
#define _cffi_to_c_i8 \
((int(*)(PyObject *))_cffi_exports[1])
#define _cffi_to_c_u8 \
((int(*)(PyObject *))_cffi_exports[2])
#define _cffi_to_c_i16 \
((int(*)(PyObject *))_cffi_exports[3])
#define _cffi_to_c_u16 \
((int(*)(PyObject *))_cffi_exports[4])
#define _cffi_to_c_i32 \
((int(*)(PyObject *))_cffi_exports[5])
#define _cffi_to_c_u32 \
((unsigned int(*)(PyObject *))_cffi_exports[6])
#define _cffi_to_c_i64 \
((long long(*)(PyObject *))_cffi_exports[7])
#define _cffi_to_c_u64 \
((unsigned long long(*)(PyObject *))_cffi_exports[8])
#define _cffi_to_c_char \
((int(*)(PyObject *))_cffi_exports[9])
#define _cffi_from_c_pointer \
((PyObject *(*)(char *, CTypeDescrObject *))_cffi_exports[10])
#define _cffi_to_c_pointer \
((char *(*)(PyObject *, CTypeDescrObject *))_cffi_exports[11])
#define _cffi_get_struct_layout \
((PyObject *(*)(Py_ssize_t[]))_cffi_exports[12])
#define _cffi_restore_errno \
((void(*)(void))_cffi_exports[13])
#define _cffi_save_errno \
((void(*)(void))_cffi_exports[14])
#define _cffi_from_c_char \
((PyObject *(*)(char))_cffi_exports[15])
#define _cffi_from_c_deref \
((PyObject *(*)(char *, CTypeDescrObject *))_cffi_exports[16])
#define _cffi_to_c \
((int(*)(char *, CTypeDescrObject *, PyObject *))_cffi_exports[17])
#define _cffi_from_c_struct \
((PyObject *(*)(char *, CTypeDescrObject *))_cffi_exports[18])
#define _cffi_to_c_wchar_t \
((wchar_t(*)(PyObject *))_cffi_exports[19])
#define _cffi_from_c_wchar_t \
((PyObject *(*)(wchar_t))_cffi_exports[20])
#define _cffi_to_c_long_double \
((long double(*)(PyObject *))_cffi_exports[21])
#define _cffi_to_c__Bool \
((_Bool(*)(PyObject *))_cffi_exports[22])
#define _cffi_prepare_pointer_call_argument \
((Py_ssize_t(*)(CTypeDescrObject *, PyObject *, char **))_cffi_exports[23])
#define _cffi_convert_array_from_object \
((int(*)(char *, CTypeDescrObject *, PyObject *))_cffi_exports[24])
#define _CFFI_NUM_EXPORTS 25
typedef struct _ctypedescr CTypeDescrObject;
static void *_cffi_exports[_CFFI_NUM_EXPORTS];
static PyObject *_cffi_types, *_cffi_VerificationError;
static int _cffi_setup_custom(PyObject *lib); /* forward */
static PyObject *_cffi_setup(PyObject *self, PyObject *args)
{
PyObject *library;
int was_alive = (_cffi_types != NULL);
(void)self; /* unused */
if (!PyArg_ParseTuple(args, "OOO", &_cffi_types, &_cffi_VerificationError,
&library))
return NULL;
Py_INCREF(_cffi_types);
Py_INCREF(_cffi_VerificationError);
if (_cffi_setup_custom(library) < 0)
return NULL;
return PyBool_FromLong(was_alive);
}
union _cffi_union_alignment_u {
unsigned char m_char;
unsigned short m_short;
unsigned int m_int;
unsigned long m_long;
unsigned long long m_longlong;
float m_float;
double m_double;
long double m_longdouble;
};
struct _cffi_freeme_s {
struct _cffi_freeme_s *next;
union _cffi_union_alignment_u alignment;
};
#ifdef __GNUC__
__attribute__((unused))
#endif
static int _cffi_convert_array_argument(CTypeDescrObject *ctptr, PyObject *arg,
char **output_data, Py_ssize_t datasize,
struct _cffi_freeme_s **freeme)
{
char *p;
if (datasize < 0)
return -1;
p = *output_data;
if (p == NULL) {
struct _cffi_freeme_s *fp = (struct _cffi_freeme_s *)PyObject_Malloc(
offsetof(struct _cffi_freeme_s, alignment) + (size_t)datasize);
if (fp == NULL)
return -1;
fp->next = *freeme;
*freeme = fp;
p = *output_data = (char *)&fp->alignment;
}
memset((void *)p, 0, (size_t)datasize);
return _cffi_convert_array_from_object(p, ctptr, arg);
}
#ifdef __GNUC__
__attribute__((unused))
#endif
static void _cffi_free_array_arguments(struct _cffi_freeme_s *freeme)
{
do {
void *p = (void *)freeme;
freeme = freeme->next;
PyObject_Free(p);
} while (freeme != NULL);
}
static int _cffi_init(void)
{
PyObject *module, *c_api_object = NULL;
module = PyImport_ImportModule("_cffi_backend");
if (module == NULL)
goto failure;
c_api_object = PyObject_GetAttrString(module, "_C_API");
if (c_api_object == NULL)
goto failure;
if (!PyCapsule_CheckExact(c_api_object)) {
PyErr_SetNone(PyExc_ImportError);
goto failure;
}
memcpy(_cffi_exports, PyCapsule_GetPointer(c_api_object, "cffi"),
_CFFI_NUM_EXPORTS * sizeof(void *));
Py_DECREF(module);
Py_DECREF(c_api_object);
return 0;
failure:
Py_XDECREF(module);
Py_XDECREF(c_api_object);
return -1;
}
#define _cffi_type(num) ((CTypeDescrObject *)PyList_GET_ITEM(_cffi_types, num))
/**********/
'''
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/cffi/verifier.py | #
# DEPRECATED: implementation for ffi.verify()
#
import sys, os, binascii, shutil, io
from . import __version_verifier_modules__
from . import ffiplatform
from .error import VerificationError
if sys.version_info >= (3, 3):
import importlib.machinery
def _extension_suffixes():
return importlib.machinery.EXTENSION_SUFFIXES[:]
else:
import imp
def _extension_suffixes():
return [suffix for suffix, _, type in imp.get_suffixes()
if type == imp.C_EXTENSION]
if sys.version_info >= (3,):
NativeIO = io.StringIO
else:
class NativeIO(io.BytesIO):
def write(self, s):
if isinstance(s, unicode):
s = s.encode('ascii')
super(NativeIO, self).write(s)
class Verifier(object):
def __init__(self, ffi, preamble, tmpdir=None, modulename=None,
ext_package=None, tag='', force_generic_engine=False,
source_extension='.c', flags=None, relative_to=None, **kwds):
if ffi._parser._uses_new_feature:
raise VerificationError(
"feature not supported with ffi.verify(), but only "
"with ffi.set_source(): %s" % (ffi._parser._uses_new_feature,))
self.ffi = ffi
self.preamble = preamble
if not modulename:
flattened_kwds = ffiplatform.flatten(kwds)
vengine_class = _locate_engine_class(ffi, force_generic_engine)
self._vengine = vengine_class(self)
self._vengine.patch_extension_kwds(kwds)
self.flags = flags
self.kwds = self.make_relative_to(kwds, relative_to)
#
if modulename:
if tag:
raise TypeError("can't specify both 'modulename' and 'tag'")
else:
key = '\x00'.join(['%d.%d' % sys.version_info[:2],
__version_verifier_modules__,
preamble, flattened_kwds] +
ffi._cdefsources)
if sys.version_info >= (3,):
key = key.encode('utf-8')
k1 = hex(binascii.crc32(key[0::2]) & 0xffffffff)
k1 = k1.lstrip('0x').rstrip('L')
k2 = hex(binascii.crc32(key[1::2]) & 0xffffffff)
k2 = k2.lstrip('0').rstrip('L')
modulename = '_cffi_%s_%s%s%s' % (tag, self._vengine._class_key,
k1, k2)
suffix = _get_so_suffixes()[0]
self.tmpdir = tmpdir or _caller_dir_pycache()
self.sourcefilename = os.path.join(self.tmpdir, modulename + source_extension)
self.modulefilename = os.path.join(self.tmpdir, modulename + suffix)
self.ext_package = ext_package
self._has_source = False
self._has_module = False
def write_source(self, file=None):
"""Write the C source code. It is produced in 'self.sourcefilename',
which can be tweaked beforehand."""
with self.ffi._lock:
if self._has_source and file is None:
raise VerificationError(
"source code already written")
self._write_source(file)
def compile_module(self):
"""Write the C source code (if not done already) and compile it.
This produces a dynamic link library in 'self.modulefilename'."""
with self.ffi._lock:
if self._has_module:
raise VerificationError("module already compiled")
if not self._has_source:
self._write_source()
self._compile_module()
def load_library(self):
"""Get a C module from this Verifier instance.
Returns an instance of a FFILibrary class that behaves like the
objects returned by ffi.dlopen(), but that delegates all
operations to the C module. If necessary, the C code is written
and compiled first.
"""
with self.ffi._lock:
if not self._has_module:
self._locate_module()
if not self._has_module:
if not self._has_source:
self._write_source()
self._compile_module()
return self._load_library()
def get_module_name(self):
basename = os.path.basename(self.modulefilename)
# kill both the .so extension and the other .'s, as introduced
# by Python 3: 'basename.cpython-33m.so'
basename = basename.split('.', 1)[0]
# and the _d added in Python 2 debug builds --- but try to be
# conservative and not kill a legitimate _d
if basename.endswith('_d') and hasattr(sys, 'gettotalrefcount'):
basename = basename[:-2]
return basename
def get_extension(self):
ffiplatform._hack_at_distutils() # backward compatibility hack
if not self._has_source:
with self.ffi._lock:
if not self._has_source:
self._write_source()
sourcename = ffiplatform.maybe_relative_path(self.sourcefilename)
modname = self.get_module_name()
return ffiplatform.get_extension(sourcename, modname, **self.kwds)
def generates_python_module(self):
return self._vengine._gen_python_module
def make_relative_to(self, kwds, relative_to):
if relative_to and os.path.dirname(relative_to):
dirname = os.path.dirname(relative_to)
kwds = kwds.copy()
for key in ffiplatform.LIST_OF_FILE_NAMES:
if key in kwds:
lst = kwds[key]
if not isinstance(lst, (list, tuple)):
raise TypeError("keyword '%s' should be a list or tuple"
% (key,))
lst = [os.path.join(dirname, fn) for fn in lst]
kwds[key] = lst
return kwds
# ----------
def _locate_module(self):
if not os.path.isfile(self.modulefilename):
if self.ext_package:
try:
pkg = __import__(self.ext_package, None, None, ['__doc__'])
except ImportError:
return # cannot import the package itself, give up
# (e.g. it might be called differently before installation)
path = pkg.__path__
else:
path = None
filename = self._vengine.find_module(self.get_module_name(), path,
_get_so_suffixes())
if filename is None:
return
self.modulefilename = filename
self._vengine.collect_types()
self._has_module = True
def _write_source_to(self, file):
self._vengine._f = file
try:
self._vengine.write_source_to_f()
finally:
del self._vengine._f
def _write_source(self, file=None):
if file is not None:
self._write_source_to(file)
else:
# Write our source file to an in memory file.
f = NativeIO()
self._write_source_to(f)
source_data = f.getvalue()
# Determine if this matches the current file
if os.path.exists(self.sourcefilename):
with open(self.sourcefilename, "r") as fp:
needs_written = not (fp.read() == source_data)
else:
needs_written = True
# Actually write the file out if it doesn't match
if needs_written:
_ensure_dir(self.sourcefilename)
with open(self.sourcefilename, "w") as fp:
fp.write(source_data)
# Set this flag
self._has_source = True
def _compile_module(self):
# compile this C source
tmpdir = os.path.dirname(self.sourcefilename)
outputfilename = ffiplatform.compile(tmpdir, self.get_extension())
try:
same = ffiplatform.samefile(outputfilename, self.modulefilename)
except OSError:
same = False
if not same:
_ensure_dir(self.modulefilename)
shutil.move(outputfilename, self.modulefilename)
self._has_module = True
def _load_library(self):
assert self._has_module
if self.flags is not None:
return self._vengine.load_library(self.flags)
else:
return self._vengine.load_library()
# ____________________________________________________________
_FORCE_GENERIC_ENGINE = False # for tests
def _locate_engine_class(ffi, force_generic_engine):
if _FORCE_GENERIC_ENGINE:
force_generic_engine = True
if not force_generic_engine:
if '__pypy__' in sys.builtin_module_names:
force_generic_engine = True
else:
try:
import _cffi_backend
except ImportError:
_cffi_backend = '?'
if ffi._backend is not _cffi_backend:
force_generic_engine = True
if force_generic_engine:
from . import vengine_gen
return vengine_gen.VGenericEngine
else:
from . import vengine_cpy
return vengine_cpy.VCPythonEngine
# ____________________________________________________________
_TMPDIR = None
def _caller_dir_pycache():
if _TMPDIR:
return _TMPDIR
result = os.environ.get('CFFI_TMPDIR')
if result:
return result
filename = sys._getframe(2).f_code.co_filename
return os.path.abspath(os.path.join(os.path.dirname(filename),
'__pycache__'))
def set_tmpdir(dirname):
"""Set the temporary directory to use instead of __pycache__."""
global _TMPDIR
_TMPDIR = dirname
def cleanup_tmpdir(tmpdir=None, keep_so=False):
"""Clean up the temporary directory by removing all files in it
called `_cffi_*.{c,so}` as well as the `build` subdirectory."""
tmpdir = tmpdir or _caller_dir_pycache()
try:
filelist = os.listdir(tmpdir)
except OSError:
return
if keep_so:
suffix = '.c' # only remove .c files
else:
suffix = _get_so_suffixes()[0].lower()
for fn in filelist:
if fn.lower().startswith('_cffi_') and (
fn.lower().endswith(suffix) or fn.lower().endswith('.c')):
try:
os.unlink(os.path.join(tmpdir, fn))
except OSError:
pass
clean_dir = [os.path.join(tmpdir, 'build')]
for dir in clean_dir:
try:
for fn in os.listdir(dir):
fn = os.path.join(dir, fn)
if os.path.isdir(fn):
clean_dir.append(fn)
else:
os.unlink(fn)
except OSError:
pass
def _get_so_suffixes():
suffixes = _extension_suffixes()
if not suffixes:
# bah, no C_EXTENSION available. Occurs on pypy without cpyext
if sys.platform == 'win32':
suffixes = [".pyd"]
else:
suffixes = [".so"]
return suffixes
def _ensure_dir(filename):
dirname = os.path.dirname(filename)
if dirname and not os.path.isdir(dirname):
os.makedirs(dirname)
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/cffi/parse_c_type.h |
/* This part is from file 'cffi/parse_c_type.h'. It is copied at the
beginning of C sources generated by CFFI's ffi.set_source(). */
typedef void *_cffi_opcode_t;
#define _CFFI_OP(opcode, arg) (_cffi_opcode_t)(opcode | (((uintptr_t)(arg)) << 8))
#define _CFFI_GETOP(cffi_opcode) ((unsigned char)(uintptr_t)cffi_opcode)
#define _CFFI_GETARG(cffi_opcode) (((intptr_t)cffi_opcode) >> 8)
#define _CFFI_OP_PRIMITIVE 1
#define _CFFI_OP_POINTER 3
#define _CFFI_OP_ARRAY 5
#define _CFFI_OP_OPEN_ARRAY 7
#define _CFFI_OP_STRUCT_UNION 9
#define _CFFI_OP_ENUM 11
#define _CFFI_OP_FUNCTION 13
#define _CFFI_OP_FUNCTION_END 15
#define _CFFI_OP_NOOP 17
#define _CFFI_OP_BITFIELD 19
#define _CFFI_OP_TYPENAME 21
#define _CFFI_OP_CPYTHON_BLTN_V 23 // varargs
#define _CFFI_OP_CPYTHON_BLTN_N 25 // noargs
#define _CFFI_OP_CPYTHON_BLTN_O 27 // O (i.e. a single arg)
#define _CFFI_OP_CONSTANT 29
#define _CFFI_OP_CONSTANT_INT 31
#define _CFFI_OP_GLOBAL_VAR 33
#define _CFFI_OP_DLOPEN_FUNC 35
#define _CFFI_OP_DLOPEN_CONST 37
#define _CFFI_OP_GLOBAL_VAR_F 39
#define _CFFI_OP_EXTERN_PYTHON 41
#define _CFFI_PRIM_VOID 0
#define _CFFI_PRIM_BOOL 1
#define _CFFI_PRIM_CHAR 2
#define _CFFI_PRIM_SCHAR 3
#define _CFFI_PRIM_UCHAR 4
#define _CFFI_PRIM_SHORT 5
#define _CFFI_PRIM_USHORT 6
#define _CFFI_PRIM_INT 7
#define _CFFI_PRIM_UINT 8
#define _CFFI_PRIM_LONG 9
#define _CFFI_PRIM_ULONG 10
#define _CFFI_PRIM_LONGLONG 11
#define _CFFI_PRIM_ULONGLONG 12
#define _CFFI_PRIM_FLOAT 13
#define _CFFI_PRIM_DOUBLE 14
#define _CFFI_PRIM_LONGDOUBLE 15
#define _CFFI_PRIM_WCHAR 16
#define _CFFI_PRIM_INT8 17
#define _CFFI_PRIM_UINT8 18
#define _CFFI_PRIM_INT16 19
#define _CFFI_PRIM_UINT16 20
#define _CFFI_PRIM_INT32 21
#define _CFFI_PRIM_UINT32 22
#define _CFFI_PRIM_INT64 23
#define _CFFI_PRIM_UINT64 24
#define _CFFI_PRIM_INTPTR 25
#define _CFFI_PRIM_UINTPTR 26
#define _CFFI_PRIM_PTRDIFF 27
#define _CFFI_PRIM_SIZE 28
#define _CFFI_PRIM_SSIZE 29
#define _CFFI_PRIM_INT_LEAST8 30
#define _CFFI_PRIM_UINT_LEAST8 31
#define _CFFI_PRIM_INT_LEAST16 32
#define _CFFI_PRIM_UINT_LEAST16 33
#define _CFFI_PRIM_INT_LEAST32 34
#define _CFFI_PRIM_UINT_LEAST32 35
#define _CFFI_PRIM_INT_LEAST64 36
#define _CFFI_PRIM_UINT_LEAST64 37
#define _CFFI_PRIM_INT_FAST8 38
#define _CFFI_PRIM_UINT_FAST8 39
#define _CFFI_PRIM_INT_FAST16 40
#define _CFFI_PRIM_UINT_FAST16 41
#define _CFFI_PRIM_INT_FAST32 42
#define _CFFI_PRIM_UINT_FAST32 43
#define _CFFI_PRIM_INT_FAST64 44
#define _CFFI_PRIM_UINT_FAST64 45
#define _CFFI_PRIM_INTMAX 46
#define _CFFI_PRIM_UINTMAX 47
#define _CFFI_PRIM_FLOATCOMPLEX 48
#define _CFFI_PRIM_DOUBLECOMPLEX 49
#define _CFFI_PRIM_CHAR16 50
#define _CFFI_PRIM_CHAR32 51
#define _CFFI__NUM_PRIM 52
#define _CFFI__UNKNOWN_PRIM (-1)
#define _CFFI__UNKNOWN_FLOAT_PRIM (-2)
#define _CFFI__UNKNOWN_LONG_DOUBLE (-3)
#define _CFFI__IO_FILE_STRUCT (-1)
struct _cffi_global_s {
const char *name;
void *address;
_cffi_opcode_t type_op;
void *size_or_direct_fn; // OP_GLOBAL_VAR: size, or 0 if unknown
// OP_CPYTHON_BLTN_*: addr of direct function
};
struct _cffi_getconst_s {
unsigned long long value;
const struct _cffi_type_context_s *ctx;
int gindex;
};
struct _cffi_struct_union_s {
const char *name;
int type_index; // -> _cffi_types, on a OP_STRUCT_UNION
int flags; // _CFFI_F_* flags below
size_t size;
int alignment;
int first_field_index; // -> _cffi_fields array
int num_fields;
};
#define _CFFI_F_UNION 0x01 // is a union, not a struct
#define _CFFI_F_CHECK_FIELDS 0x02 // complain if fields are not in the
// "standard layout" or if some are missing
#define _CFFI_F_PACKED 0x04 // for CHECK_FIELDS, assume a packed struct
#define _CFFI_F_EXTERNAL 0x08 // in some other ffi.include()
#define _CFFI_F_OPAQUE 0x10 // opaque
struct _cffi_field_s {
const char *name;
size_t field_offset;
size_t field_size;
_cffi_opcode_t field_type_op;
};
struct _cffi_enum_s {
const char *name;
int type_index; // -> _cffi_types, on a OP_ENUM
int type_prim; // _CFFI_PRIM_xxx
const char *enumerators; // comma-delimited string
};
struct _cffi_typename_s {
const char *name;
int type_index; /* if opaque, points to a possibly artificial
OP_STRUCT which is itself opaque */
};
struct _cffi_type_context_s {
_cffi_opcode_t *types;
const struct _cffi_global_s *globals;
const struct _cffi_field_s *fields;
const struct _cffi_struct_union_s *struct_unions;
const struct _cffi_enum_s *enums;
const struct _cffi_typename_s *typenames;
int num_globals;
int num_struct_unions;
int num_enums;
int num_typenames;
const char *const *includes;
int num_types;
int flags; /* future extension */
};
struct _cffi_parse_info_s {
const struct _cffi_type_context_s *ctx;
_cffi_opcode_t *output;
unsigned int output_size;
size_t error_location;
const char *error_message;
};
struct _cffi_externpy_s {
const char *name;
size_t size_of_result;
void *reserved1, *reserved2;
};
#ifdef _CFFI_INTERNAL
static int parse_c_type(struct _cffi_parse_info_s *info, const char *input);
static int search_in_globals(const struct _cffi_type_context_s *ctx,
const char *search, size_t search_len);
static int search_in_struct_unions(const struct _cffi_type_context_s *ctx,
const char *search, size_t search_len);
#endif
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/cffi/__init__.py | __all__ = ['FFI', 'VerificationError', 'VerificationMissing', 'CDefError',
'FFIError']
from .api import FFI
from .error import CDefError, FFIError, VerificationError, VerificationMissing
from .error import PkgConfigError
__version__ = "1.15.1"
__version_info__ = (1, 15, 1)
# The verifier module file names are based on the CRC32 of a string that
# contains the following version number. It may be older than __version__
# if nothing is clearly incompatible.
__version_verifier_modules__ = "0.8.6"
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/cffi/commontypes.py | import sys
from . import model
from .error import FFIError
COMMON_TYPES = {}
try:
# fetch "bool" and all simple Windows types
from _cffi_backend import _get_common_types
_get_common_types(COMMON_TYPES)
except ImportError:
pass
COMMON_TYPES['FILE'] = model.unknown_type('FILE', '_IO_FILE')
COMMON_TYPES['bool'] = '_Bool' # in case we got ImportError above
for _type in model.PrimitiveType.ALL_PRIMITIVE_TYPES:
if _type.endswith('_t'):
COMMON_TYPES[_type] = _type
del _type
_CACHE = {}
def resolve_common_type(parser, commontype):
try:
return _CACHE[commontype]
except KeyError:
cdecl = COMMON_TYPES.get(commontype, commontype)
if not isinstance(cdecl, str):
result, quals = cdecl, 0 # cdecl is already a BaseType
elif cdecl in model.PrimitiveType.ALL_PRIMITIVE_TYPES:
result, quals = model.PrimitiveType(cdecl), 0
elif cdecl == 'set-unicode-needed':
raise FFIError("The Windows type %r is only available after "
"you call ffi.set_unicode()" % (commontype,))
else:
if commontype == cdecl:
raise FFIError(
"Unsupported type: %r. Please look at "
"http://cffi.readthedocs.io/en/latest/cdef.html#ffi-cdef-limitations "
"and file an issue if you think this type should really "
"be supported." % (commontype,))
result, quals = parser.parse_type_and_quals(cdecl) # recursive
assert isinstance(result, model.BaseTypeByIdentity)
_CACHE[commontype] = result, quals
return result, quals
# ____________________________________________________________
# extra types for Windows (most of them are in commontypes.c)
def win_common_types():
return {
"UNICODE_STRING": model.StructType(
"_UNICODE_STRING",
["Length",
"MaximumLength",
"Buffer"],
[model.PrimitiveType("unsigned short"),
model.PrimitiveType("unsigned short"),
model.PointerType(model.PrimitiveType("wchar_t"))],
[-1, -1, -1]),
"PUNICODE_STRING": "UNICODE_STRING *",
"PCUNICODE_STRING": "const UNICODE_STRING *",
"TBYTE": "set-unicode-needed",
"TCHAR": "set-unicode-needed",
"LPCTSTR": "set-unicode-needed",
"PCTSTR": "set-unicode-needed",
"LPTSTR": "set-unicode-needed",
"PTSTR": "set-unicode-needed",
"PTBYTE": "set-unicode-needed",
"PTCHAR": "set-unicode-needed",
}
if sys.platform == 'win32':
COMMON_TYPES.update(win_common_types())
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/cffi/model.py | import types
import weakref
from .lock import allocate_lock
from .error import CDefError, VerificationError, VerificationMissing
# type qualifiers
Q_CONST = 0x01
Q_RESTRICT = 0x02
Q_VOLATILE = 0x04
def qualify(quals, replace_with):
if quals & Q_CONST:
replace_with = ' const ' + replace_with.lstrip()
if quals & Q_VOLATILE:
replace_with = ' volatile ' + replace_with.lstrip()
if quals & Q_RESTRICT:
# It seems that __restrict is supported by gcc and msvc.
# If you hit some different compiler, add a #define in
# _cffi_include.h for it (and in its copies, documented there)
replace_with = ' __restrict ' + replace_with.lstrip()
return replace_with
class BaseTypeByIdentity(object):
is_array_type = False
is_raw_function = False
def get_c_name(self, replace_with='', context='a C file', quals=0):
result = self.c_name_with_marker
assert result.count('&') == 1
# some logic duplication with ffi.getctype()... :-(
replace_with = replace_with.strip()
if replace_with:
if replace_with.startswith('*') and '&[' in result:
replace_with = '(%s)' % replace_with
elif not replace_with[0] in '[(':
replace_with = ' ' + replace_with
replace_with = qualify(quals, replace_with)
result = result.replace('&', replace_with)
if '$' in result:
raise VerificationError(
"cannot generate '%s' in %s: unknown type name"
% (self._get_c_name(), context))
return result
def _get_c_name(self):
return self.c_name_with_marker.replace('&', '')
def has_c_name(self):
return '$' not in self._get_c_name()
def is_integer_type(self):
return False
def get_cached_btype(self, ffi, finishlist, can_delay=False):
try:
BType = ffi._cached_btypes[self]
except KeyError:
BType = self.build_backend_type(ffi, finishlist)
BType2 = ffi._cached_btypes.setdefault(self, BType)
assert BType2 is BType
return BType
def __repr__(self):
return '<%s>' % (self._get_c_name(),)
def _get_items(self):
return [(name, getattr(self, name)) for name in self._attrs_]
class BaseType(BaseTypeByIdentity):
def __eq__(self, other):
return (self.__class__ == other.__class__ and
self._get_items() == other._get_items())
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self.__class__, tuple(self._get_items())))
class VoidType(BaseType):
_attrs_ = ()
def __init__(self):
self.c_name_with_marker = 'void&'
def build_backend_type(self, ffi, finishlist):
return global_cache(self, ffi, 'new_void_type')
void_type = VoidType()
class BasePrimitiveType(BaseType):
def is_complex_type(self):
return False
class PrimitiveType(BasePrimitiveType):
_attrs_ = ('name',)
ALL_PRIMITIVE_TYPES = {
'char': 'c',
'short': 'i',
'int': 'i',
'long': 'i',
'long long': 'i',
'signed char': 'i',
'unsigned char': 'i',
'unsigned short': 'i',
'unsigned int': 'i',
'unsigned long': 'i',
'unsigned long long': 'i',
'float': 'f',
'double': 'f',
'long double': 'f',
'float _Complex': 'j',
'double _Complex': 'j',
'_Bool': 'i',
# the following types are not primitive in the C sense
'wchar_t': 'c',
'char16_t': 'c',
'char32_t': 'c',
'int8_t': 'i',
'uint8_t': 'i',
'int16_t': 'i',
'uint16_t': 'i',
'int32_t': 'i',
'uint32_t': 'i',
'int64_t': 'i',
'uint64_t': 'i',
'int_least8_t': 'i',
'uint_least8_t': 'i',
'int_least16_t': 'i',
'uint_least16_t': 'i',
'int_least32_t': 'i',
'uint_least32_t': 'i',
'int_least64_t': 'i',
'uint_least64_t': 'i',
'int_fast8_t': 'i',
'uint_fast8_t': 'i',
'int_fast16_t': 'i',
'uint_fast16_t': 'i',
'int_fast32_t': 'i',
'uint_fast32_t': 'i',
'int_fast64_t': 'i',
'uint_fast64_t': 'i',
'intptr_t': 'i',
'uintptr_t': 'i',
'intmax_t': 'i',
'uintmax_t': 'i',
'ptrdiff_t': 'i',
'size_t': 'i',
'ssize_t': 'i',
}
def __init__(self, name):
assert name in self.ALL_PRIMITIVE_TYPES
self.name = name
self.c_name_with_marker = name + '&'
def is_char_type(self):
return self.ALL_PRIMITIVE_TYPES[self.name] == 'c'
def is_integer_type(self):
return self.ALL_PRIMITIVE_TYPES[self.name] == 'i'
def is_float_type(self):
return self.ALL_PRIMITIVE_TYPES[self.name] == 'f'
def is_complex_type(self):
return self.ALL_PRIMITIVE_TYPES[self.name] == 'j'
def build_backend_type(self, ffi, finishlist):
return global_cache(self, ffi, 'new_primitive_type', self.name)
class UnknownIntegerType(BasePrimitiveType):
_attrs_ = ('name',)
def __init__(self, name):
self.name = name
self.c_name_with_marker = name + '&'
def is_integer_type(self):
return True
def build_backend_type(self, ffi, finishlist):
raise NotImplementedError("integer type '%s' can only be used after "
"compilation" % self.name)
class UnknownFloatType(BasePrimitiveType):
_attrs_ = ('name', )
def __init__(self, name):
self.name = name
self.c_name_with_marker = name + '&'
def build_backend_type(self, ffi, finishlist):
raise NotImplementedError("float type '%s' can only be used after "
"compilation" % self.name)
class BaseFunctionType(BaseType):
_attrs_ = ('args', 'result', 'ellipsis', 'abi')
def __init__(self, args, result, ellipsis, abi=None):
self.args = args
self.result = result
self.ellipsis = ellipsis
self.abi = abi
#
reprargs = [arg._get_c_name() for arg in self.args]
if self.ellipsis:
reprargs.append('...')
reprargs = reprargs or ['void']
replace_with = self._base_pattern % (', '.join(reprargs),)
if abi is not None:
replace_with = replace_with[:1] + abi + ' ' + replace_with[1:]
self.c_name_with_marker = (
self.result.c_name_with_marker.replace('&', replace_with))
class RawFunctionType(BaseFunctionType):
# Corresponds to a C type like 'int(int)', which is the C type of
# a function, but not a pointer-to-function. The backend has no
# notion of such a type; it's used temporarily by parsing.
_base_pattern = '(&)(%s)'
is_raw_function = True
def build_backend_type(self, ffi, finishlist):
raise CDefError("cannot render the type %r: it is a function "
"type, not a pointer-to-function type" % (self,))
def as_function_pointer(self):
return FunctionPtrType(self.args, self.result, self.ellipsis, self.abi)
class FunctionPtrType(BaseFunctionType):
_base_pattern = '(*&)(%s)'
def build_backend_type(self, ffi, finishlist):
result = self.result.get_cached_btype(ffi, finishlist)
args = []
for tp in self.args:
args.append(tp.get_cached_btype(ffi, finishlist))
abi_args = ()
if self.abi == "__stdcall":
if not self.ellipsis: # __stdcall ignored for variadic funcs
try:
abi_args = (ffi._backend.FFI_STDCALL,)
except AttributeError:
pass
return global_cache(self, ffi, 'new_function_type',
tuple(args), result, self.ellipsis, *abi_args)
def as_raw_function(self):
return RawFunctionType(self.args, self.result, self.ellipsis, self.abi)
class PointerType(BaseType):
_attrs_ = ('totype', 'quals')
def __init__(self, totype, quals=0):
self.totype = totype
self.quals = quals
extra = qualify(quals, " *&")
if totype.is_array_type:
extra = "(%s)" % (extra.lstrip(),)
self.c_name_with_marker = totype.c_name_with_marker.replace('&', extra)
def build_backend_type(self, ffi, finishlist):
BItem = self.totype.get_cached_btype(ffi, finishlist, can_delay=True)
return global_cache(self, ffi, 'new_pointer_type', BItem)
voidp_type = PointerType(void_type)
def ConstPointerType(totype):
return PointerType(totype, Q_CONST)
const_voidp_type = ConstPointerType(void_type)
class NamedPointerType(PointerType):
_attrs_ = ('totype', 'name')
def __init__(self, totype, name, quals=0):
PointerType.__init__(self, totype, quals)
self.name = name
self.c_name_with_marker = name + '&'
class ArrayType(BaseType):
_attrs_ = ('item', 'length')
is_array_type = True
def __init__(self, item, length):
self.item = item
self.length = length
#
if length is None:
brackets = '&[]'
elif length == '...':
brackets = '&[/*...*/]'
else:
brackets = '&[%s]' % length
self.c_name_with_marker = (
self.item.c_name_with_marker.replace('&', brackets))
def length_is_unknown(self):
return isinstance(self.length, str)
def resolve_length(self, newlength):
return ArrayType(self.item, newlength)
def build_backend_type(self, ffi, finishlist):
if self.length_is_unknown():
raise CDefError("cannot render the type %r: unknown length" %
(self,))
self.item.get_cached_btype(ffi, finishlist) # force the item BType
BPtrItem = PointerType(self.item).get_cached_btype(ffi, finishlist)
return global_cache(self, ffi, 'new_array_type', BPtrItem, self.length)
char_array_type = ArrayType(PrimitiveType('char'), None)
class StructOrUnionOrEnum(BaseTypeByIdentity):
_attrs_ = ('name',)
forcename = None
def build_c_name_with_marker(self):
name = self.forcename or '%s %s' % (self.kind, self.name)
self.c_name_with_marker = name + '&'
def force_the_name(self, forcename):
self.forcename = forcename
self.build_c_name_with_marker()
def get_official_name(self):
assert self.c_name_with_marker.endswith('&')
return self.c_name_with_marker[:-1]
class StructOrUnion(StructOrUnionOrEnum):
fixedlayout = None
completed = 0
partial = False
packed = 0
def __init__(self, name, fldnames, fldtypes, fldbitsize, fldquals=None):
self.name = name
self.fldnames = fldnames
self.fldtypes = fldtypes
self.fldbitsize = fldbitsize
self.fldquals = fldquals
self.build_c_name_with_marker()
def anonymous_struct_fields(self):
if self.fldtypes is not None:
for name, type in zip(self.fldnames, self.fldtypes):
if name == '' and isinstance(type, StructOrUnion):
yield type
def enumfields(self, expand_anonymous_struct_union=True):
fldquals = self.fldquals
if fldquals is None:
fldquals = (0,) * len(self.fldnames)
for name, type, bitsize, quals in zip(self.fldnames, self.fldtypes,
self.fldbitsize, fldquals):
if (name == '' and isinstance(type, StructOrUnion)
and expand_anonymous_struct_union):
# nested anonymous struct/union
for result in type.enumfields():
yield result
else:
yield (name, type, bitsize, quals)
def force_flatten(self):
# force the struct or union to have a declaration that lists
# directly all fields returned by enumfields(), flattening
# nested anonymous structs/unions.
names = []
types = []
bitsizes = []
fldquals = []
for name, type, bitsize, quals in self.enumfields():
names.append(name)
types.append(type)
bitsizes.append(bitsize)
fldquals.append(quals)
self.fldnames = tuple(names)
self.fldtypes = tuple(types)
self.fldbitsize = tuple(bitsizes)
self.fldquals = tuple(fldquals)
def get_cached_btype(self, ffi, finishlist, can_delay=False):
BType = StructOrUnionOrEnum.get_cached_btype(self, ffi, finishlist,
can_delay)
if not can_delay:
self.finish_backend_type(ffi, finishlist)
return BType
def finish_backend_type(self, ffi, finishlist):
if self.completed:
if self.completed != 2:
raise NotImplementedError("recursive structure declaration "
"for '%s'" % (self.name,))
return
BType = ffi._cached_btypes[self]
#
self.completed = 1
#
if self.fldtypes is None:
pass # not completing it: it's an opaque struct
#
elif self.fixedlayout is None:
fldtypes = [tp.get_cached_btype(ffi, finishlist)
for tp in self.fldtypes]
lst = list(zip(self.fldnames, fldtypes, self.fldbitsize))
extra_flags = ()
if self.packed:
if self.packed == 1:
extra_flags = (8,) # SF_PACKED
else:
extra_flags = (0, self.packed)
ffi._backend.complete_struct_or_union(BType, lst, self,
-1, -1, *extra_flags)
#
else:
fldtypes = []
fieldofs, fieldsize, totalsize, totalalignment = self.fixedlayout
for i in range(len(self.fldnames)):
fsize = fieldsize[i]
ftype = self.fldtypes[i]
#
if isinstance(ftype, ArrayType) and ftype.length_is_unknown():
# fix the length to match the total size
BItemType = ftype.item.get_cached_btype(ffi, finishlist)
nlen, nrest = divmod(fsize, ffi.sizeof(BItemType))
if nrest != 0:
self._verification_error(
"field '%s.%s' has a bogus size?" % (
self.name, self.fldnames[i] or '{}'))
ftype = ftype.resolve_length(nlen)
self.fldtypes = (self.fldtypes[:i] + (ftype,) +
self.fldtypes[i+1:])
#
BFieldType = ftype.get_cached_btype(ffi, finishlist)
if isinstance(ftype, ArrayType) and ftype.length is None:
assert fsize == 0
else:
bitemsize = ffi.sizeof(BFieldType)
if bitemsize != fsize:
self._verification_error(
"field '%s.%s' is declared as %d bytes, but is "
"really %d bytes" % (self.name,
self.fldnames[i] or '{}',
bitemsize, fsize))
fldtypes.append(BFieldType)
#
lst = list(zip(self.fldnames, fldtypes, self.fldbitsize, fieldofs))
ffi._backend.complete_struct_or_union(BType, lst, self,
totalsize, totalalignment)
self.completed = 2
def _verification_error(self, msg):
raise VerificationError(msg)
def check_not_partial(self):
if self.partial and self.fixedlayout is None:
raise VerificationMissing(self._get_c_name())
def build_backend_type(self, ffi, finishlist):
self.check_not_partial()
finishlist.append(self)
#
return global_cache(self, ffi, 'new_%s_type' % self.kind,
self.get_official_name(), key=self)
class StructType(StructOrUnion):
kind = 'struct'
class UnionType(StructOrUnion):
kind = 'union'
class EnumType(StructOrUnionOrEnum):
kind = 'enum'
partial = False
partial_resolved = False
def __init__(self, name, enumerators, enumvalues, baseinttype=None):
self.name = name
self.enumerators = enumerators
self.enumvalues = enumvalues
self.baseinttype = baseinttype
self.build_c_name_with_marker()
def force_the_name(self, forcename):
StructOrUnionOrEnum.force_the_name(self, forcename)
if self.forcename is None:
name = self.get_official_name()
self.forcename = '$' + name.replace(' ', '_')
def check_not_partial(self):
if self.partial and not self.partial_resolved:
raise VerificationMissing(self._get_c_name())
def build_backend_type(self, ffi, finishlist):
self.check_not_partial()
base_btype = self.build_baseinttype(ffi, finishlist)
return global_cache(self, ffi, 'new_enum_type',
self.get_official_name(),
self.enumerators, self.enumvalues,
base_btype, key=self)
def build_baseinttype(self, ffi, finishlist):
if self.baseinttype is not None:
return self.baseinttype.get_cached_btype(ffi, finishlist)
#
if self.enumvalues:
smallest_value = min(self.enumvalues)
largest_value = max(self.enumvalues)
else:
import warnings
try:
# XXX! The goal is to ensure that the warnings.warn()
# will not suppress the warning. We want to get it
# several times if we reach this point several times.
__warningregistry__.clear()
except NameError:
pass
warnings.warn("%r has no values explicitly defined; "
"guessing that it is equivalent to 'unsigned int'"
% self._get_c_name())
smallest_value = largest_value = 0
if smallest_value < 0: # needs a signed type
sign = 1
candidate1 = PrimitiveType("int")
candidate2 = PrimitiveType("long")
else:
sign = 0
candidate1 = PrimitiveType("unsigned int")
candidate2 = PrimitiveType("unsigned long")
btype1 = candidate1.get_cached_btype(ffi, finishlist)
btype2 = candidate2.get_cached_btype(ffi, finishlist)
size1 = ffi.sizeof(btype1)
size2 = ffi.sizeof(btype2)
if (smallest_value >= ((-1) << (8*size1-1)) and
largest_value < (1 << (8*size1-sign))):
return btype1
if (smallest_value >= ((-1) << (8*size2-1)) and
largest_value < (1 << (8*size2-sign))):
return btype2
raise CDefError("%s values don't all fit into either 'long' "
"or 'unsigned long'" % self._get_c_name())
def unknown_type(name, structname=None):
if structname is None:
structname = '$%s' % name
tp = StructType(structname, None, None, None)
tp.force_the_name(name)
tp.origin = "unknown_type"
return tp
def unknown_ptr_type(name, structname=None):
if structname is None:
structname = '$$%s' % name
tp = StructType(structname, None, None, None)
return NamedPointerType(tp, name)
global_lock = allocate_lock()
_typecache_cffi_backend = weakref.WeakValueDictionary()
def get_typecache(backend):
# returns _typecache_cffi_backend if backend is the _cffi_backend
# module, or type(backend).__typecache if backend is an instance of
# CTypesBackend (or some FakeBackend class during tests)
if isinstance(backend, types.ModuleType):
return _typecache_cffi_backend
with global_lock:
if not hasattr(type(backend), '__typecache'):
type(backend).__typecache = weakref.WeakValueDictionary()
return type(backend).__typecache
def global_cache(srctype, ffi, funcname, *args, **kwds):
key = kwds.pop('key', (funcname, args))
assert not kwds
try:
return ffi._typecache[key]
except KeyError:
pass
try:
res = getattr(ffi._backend, funcname)(*args)
except NotImplementedError as e:
raise NotImplementedError("%s: %r: %s" % (funcname, srctype, e))
# note that setdefault() on WeakValueDictionary is not atomic
# and contains a rare bug (http://bugs.python.org/issue19542);
# we have to use a lock and do it ourselves
cache = ffi._typecache
with global_lock:
res1 = cache.get(key)
if res1 is None:
cache[key] = res
return res
else:
return res1
def pointer_cache(ffi, BType):
return global_cache('?', ffi, 'new_pointer_type', BType)
def attach_exception_info(e, name):
if e.args and type(e.args[0]) is str:
e.args = ('%s: %s' % (name, e.args[0]),) + e.args[1:]
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/cffi/error.py |
class FFIError(Exception):
__module__ = 'cffi'
class CDefError(Exception):
__module__ = 'cffi'
def __str__(self):
try:
current_decl = self.args[1]
filename = current_decl.coord.file
linenum = current_decl.coord.line
prefix = '%s:%d: ' % (filename, linenum)
except (AttributeError, TypeError, IndexError):
prefix = ''
return '%s%s' % (prefix, self.args[0])
class VerificationError(Exception):
""" An error raised when verification fails
"""
__module__ = 'cffi'
class VerificationMissing(Exception):
""" An error raised when incomplete structures are passed into
cdef, but no verification has been done
"""
__module__ = 'cffi'
class PkgConfigError(Exception):
""" An error raised for missing modules in pkg-config
"""
__module__ = 'cffi'
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/cffi/cffi_opcode.py | from .error import VerificationError
class CffiOp(object):
def __init__(self, op, arg):
self.op = op
self.arg = arg
def as_c_expr(self):
if self.op is None:
assert isinstance(self.arg, str)
return '(_cffi_opcode_t)(%s)' % (self.arg,)
classname = CLASS_NAME[self.op]
return '_CFFI_OP(_CFFI_OP_%s, %s)' % (classname, self.arg)
def as_python_bytes(self):
if self.op is None and self.arg.isdigit():
value = int(self.arg) # non-negative: '-' not in self.arg
if value >= 2**31:
raise OverflowError("cannot emit %r: limited to 2**31-1"
% (self.arg,))
return format_four_bytes(value)
if isinstance(self.arg, str):
raise VerificationError("cannot emit to Python: %r" % (self.arg,))
return format_four_bytes((self.arg << 8) | self.op)
def __str__(self):
classname = CLASS_NAME.get(self.op, self.op)
return '(%s %s)' % (classname, self.arg)
def format_four_bytes(num):
return '\\x%02X\\x%02X\\x%02X\\x%02X' % (
(num >> 24) & 0xFF,
(num >> 16) & 0xFF,
(num >> 8) & 0xFF,
(num ) & 0xFF)
OP_PRIMITIVE = 1
OP_POINTER = 3
OP_ARRAY = 5
OP_OPEN_ARRAY = 7
OP_STRUCT_UNION = 9
OP_ENUM = 11
OP_FUNCTION = 13
OP_FUNCTION_END = 15
OP_NOOP = 17
OP_BITFIELD = 19
OP_TYPENAME = 21
OP_CPYTHON_BLTN_V = 23 # varargs
OP_CPYTHON_BLTN_N = 25 # noargs
OP_CPYTHON_BLTN_O = 27 # O (i.e. a single arg)
OP_CONSTANT = 29
OP_CONSTANT_INT = 31
OP_GLOBAL_VAR = 33
OP_DLOPEN_FUNC = 35
OP_DLOPEN_CONST = 37
OP_GLOBAL_VAR_F = 39
OP_EXTERN_PYTHON = 41
PRIM_VOID = 0
PRIM_BOOL = 1
PRIM_CHAR = 2
PRIM_SCHAR = 3
PRIM_UCHAR = 4
PRIM_SHORT = 5
PRIM_USHORT = 6
PRIM_INT = 7
PRIM_UINT = 8
PRIM_LONG = 9
PRIM_ULONG = 10
PRIM_LONGLONG = 11
PRIM_ULONGLONG = 12
PRIM_FLOAT = 13
PRIM_DOUBLE = 14
PRIM_LONGDOUBLE = 15
PRIM_WCHAR = 16
PRIM_INT8 = 17
PRIM_UINT8 = 18
PRIM_INT16 = 19
PRIM_UINT16 = 20
PRIM_INT32 = 21
PRIM_UINT32 = 22
PRIM_INT64 = 23
PRIM_UINT64 = 24
PRIM_INTPTR = 25
PRIM_UINTPTR = 26
PRIM_PTRDIFF = 27
PRIM_SIZE = 28
PRIM_SSIZE = 29
PRIM_INT_LEAST8 = 30
PRIM_UINT_LEAST8 = 31
PRIM_INT_LEAST16 = 32
PRIM_UINT_LEAST16 = 33
PRIM_INT_LEAST32 = 34
PRIM_UINT_LEAST32 = 35
PRIM_INT_LEAST64 = 36
PRIM_UINT_LEAST64 = 37
PRIM_INT_FAST8 = 38
PRIM_UINT_FAST8 = 39
PRIM_INT_FAST16 = 40
PRIM_UINT_FAST16 = 41
PRIM_INT_FAST32 = 42
PRIM_UINT_FAST32 = 43
PRIM_INT_FAST64 = 44
PRIM_UINT_FAST64 = 45
PRIM_INTMAX = 46
PRIM_UINTMAX = 47
PRIM_FLOATCOMPLEX = 48
PRIM_DOUBLECOMPLEX = 49
PRIM_CHAR16 = 50
PRIM_CHAR32 = 51
_NUM_PRIM = 52
_UNKNOWN_PRIM = -1
_UNKNOWN_FLOAT_PRIM = -2
_UNKNOWN_LONG_DOUBLE = -3
_IO_FILE_STRUCT = -1
PRIMITIVE_TO_INDEX = {
'char': PRIM_CHAR,
'short': PRIM_SHORT,
'int': PRIM_INT,
'long': PRIM_LONG,
'long long': PRIM_LONGLONG,
'signed char': PRIM_SCHAR,
'unsigned char': PRIM_UCHAR,
'unsigned short': PRIM_USHORT,
'unsigned int': PRIM_UINT,
'unsigned long': PRIM_ULONG,
'unsigned long long': PRIM_ULONGLONG,
'float': PRIM_FLOAT,
'double': PRIM_DOUBLE,
'long double': PRIM_LONGDOUBLE,
'float _Complex': PRIM_FLOATCOMPLEX,
'double _Complex': PRIM_DOUBLECOMPLEX,
'_Bool': PRIM_BOOL,
'wchar_t': PRIM_WCHAR,
'char16_t': PRIM_CHAR16,
'char32_t': PRIM_CHAR32,
'int8_t': PRIM_INT8,
'uint8_t': PRIM_UINT8,
'int16_t': PRIM_INT16,
'uint16_t': PRIM_UINT16,
'int32_t': PRIM_INT32,
'uint32_t': PRIM_UINT32,
'int64_t': PRIM_INT64,
'uint64_t': PRIM_UINT64,
'intptr_t': PRIM_INTPTR,
'uintptr_t': PRIM_UINTPTR,
'ptrdiff_t': PRIM_PTRDIFF,
'size_t': PRIM_SIZE,
'ssize_t': PRIM_SSIZE,
'int_least8_t': PRIM_INT_LEAST8,
'uint_least8_t': PRIM_UINT_LEAST8,
'int_least16_t': PRIM_INT_LEAST16,
'uint_least16_t': PRIM_UINT_LEAST16,
'int_least32_t': PRIM_INT_LEAST32,
'uint_least32_t': PRIM_UINT_LEAST32,
'int_least64_t': PRIM_INT_LEAST64,
'uint_least64_t': PRIM_UINT_LEAST64,
'int_fast8_t': PRIM_INT_FAST8,
'uint_fast8_t': PRIM_UINT_FAST8,
'int_fast16_t': PRIM_INT_FAST16,
'uint_fast16_t': PRIM_UINT_FAST16,
'int_fast32_t': PRIM_INT_FAST32,
'uint_fast32_t': PRIM_UINT_FAST32,
'int_fast64_t': PRIM_INT_FAST64,
'uint_fast64_t': PRIM_UINT_FAST64,
'intmax_t': PRIM_INTMAX,
'uintmax_t': PRIM_UINTMAX,
}
F_UNION = 0x01
F_CHECK_FIELDS = 0x02
F_PACKED = 0x04
F_EXTERNAL = 0x08
F_OPAQUE = 0x10
G_FLAGS = dict([('_CFFI_' + _key, globals()[_key])
for _key in ['F_UNION', 'F_CHECK_FIELDS', 'F_PACKED',
'F_EXTERNAL', 'F_OPAQUE']])
CLASS_NAME = {}
for _name, _value in list(globals().items()):
if _name.startswith('OP_') and isinstance(_value, int):
CLASS_NAME[_value] = _name[3:]
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/cffi/api.py | import sys, types
from .lock import allocate_lock
from .error import CDefError
from . import model
try:
callable
except NameError:
# Python 3.1
from collections import Callable
callable = lambda x: isinstance(x, Callable)
try:
basestring
except NameError:
# Python 3.x
basestring = str
_unspecified = object()
class FFI(object):
r'''
The main top-level class that you instantiate once, or once per module.
Example usage:
ffi = FFI()
ffi.cdef("""
int printf(const char *, ...);
""")
C = ffi.dlopen(None) # standard library
-or-
C = ffi.verify() # use a C compiler: verify the decl above is right
C.printf("hello, %s!\n", ffi.new("char[]", "world"))
'''
def __init__(self, backend=None):
"""Create an FFI instance. The 'backend' argument is used to
select a non-default backend, mostly for tests.
"""
if backend is None:
# You need PyPy (>= 2.0 beta), or a CPython (>= 2.6) with
# _cffi_backend.so compiled.
import _cffi_backend as backend
from . import __version__
if backend.__version__ != __version__:
# bad version! Try to be as explicit as possible.
if hasattr(backend, '__file__'):
# CPython
raise Exception("Version mismatch: this is the 'cffi' package version %s, located in %r. When we import the top-level '_cffi_backend' extension module, we get version %s, located in %r. The two versions should be equal; check your installation." % (
__version__, __file__,
backend.__version__, backend.__file__))
else:
# PyPy
raise Exception("Version mismatch: this is the 'cffi' package version %s, located in %r. This interpreter comes with a built-in '_cffi_backend' module, which is version %s. The two versions should be equal; check your installation." % (
__version__, __file__, backend.__version__))
# (If you insist you can also try to pass the option
# 'backend=backend_ctypes.CTypesBackend()', but don't
# rely on it! It's probably not going to work well.)
from . import cparser
self._backend = backend
self._lock = allocate_lock()
self._parser = cparser.Parser()
self._cached_btypes = {}
self._parsed_types = types.ModuleType('parsed_types').__dict__
self._new_types = types.ModuleType('new_types').__dict__
self._function_caches = []
self._libraries = []
self._cdefsources = []
self._included_ffis = []
self._windows_unicode = None
self._init_once_cache = {}
self._cdef_version = None
self._embedding = None
self._typecache = model.get_typecache(backend)
if hasattr(backend, 'set_ffi'):
backend.set_ffi(self)
for name in list(backend.__dict__):
if name.startswith('RTLD_'):
setattr(self, name, getattr(backend, name))
#
with self._lock:
self.BVoidP = self._get_cached_btype(model.voidp_type)
self.BCharA = self._get_cached_btype(model.char_array_type)
if isinstance(backend, types.ModuleType):
# _cffi_backend: attach these constants to the class
if not hasattr(FFI, 'NULL'):
FFI.NULL = self.cast(self.BVoidP, 0)
FFI.CData, FFI.CType = backend._get_types()
else:
# ctypes backend: attach these constants to the instance
self.NULL = self.cast(self.BVoidP, 0)
self.CData, self.CType = backend._get_types()
self.buffer = backend.buffer
def cdef(self, csource, override=False, packed=False, pack=None):
"""Parse the given C source. This registers all declared functions,
types, and global variables. The functions and global variables can
then be accessed via either 'ffi.dlopen()' or 'ffi.verify()'.
The types can be used in 'ffi.new()' and other functions.
If 'packed' is specified as True, all structs declared inside this
cdef are packed, i.e. laid out without any field alignment at all.
Alternatively, 'pack' can be a small integer, and requests for
alignment greater than that are ignored (pack=1 is equivalent to
packed=True).
"""
self._cdef(csource, override=override, packed=packed, pack=pack)
def embedding_api(self, csource, packed=False, pack=None):
self._cdef(csource, packed=packed, pack=pack, dllexport=True)
if self._embedding is None:
self._embedding = ''
def _cdef(self, csource, override=False, **options):
if not isinstance(csource, str): # unicode, on Python 2
if not isinstance(csource, basestring):
raise TypeError("cdef() argument must be a string")
csource = csource.encode('ascii')
with self._lock:
self._cdef_version = object()
self._parser.parse(csource, override=override, **options)
self._cdefsources.append(csource)
if override:
for cache in self._function_caches:
cache.clear()
finishlist = self._parser._recomplete
if finishlist:
self._parser._recomplete = []
for tp in finishlist:
tp.finish_backend_type(self, finishlist)
def dlopen(self, name, flags=0):
"""Load and return a dynamic library identified by 'name'.
The standard C library can be loaded by passing None.
Note that functions and types declared by 'ffi.cdef()' are not
linked to a particular library, just like C headers; in the
library we only look for the actual (untyped) symbols.
"""
if not (isinstance(name, basestring) or
name is None or
isinstance(name, self.CData)):
raise TypeError("dlopen(name): name must be a file name, None, "
"or an already-opened 'void *' handle")
with self._lock:
lib, function_cache = _make_ffi_library(self, name, flags)
self._function_caches.append(function_cache)
self._libraries.append(lib)
return lib
def dlclose(self, lib):
"""Close a library obtained with ffi.dlopen(). After this call,
access to functions or variables from the library will fail
(possibly with a segmentation fault).
"""
type(lib).__cffi_close__(lib)
def _typeof_locked(self, cdecl):
# call me with the lock!
key = cdecl
if key in self._parsed_types:
return self._parsed_types[key]
#
if not isinstance(cdecl, str): # unicode, on Python 2
cdecl = cdecl.encode('ascii')
#
type = self._parser.parse_type(cdecl)
really_a_function_type = type.is_raw_function
if really_a_function_type:
type = type.as_function_pointer()
btype = self._get_cached_btype(type)
result = btype, really_a_function_type
self._parsed_types[key] = result
return result
def _typeof(self, cdecl, consider_function_as_funcptr=False):
# string -> ctype object
try:
result = self._parsed_types[cdecl]
except KeyError:
with self._lock:
result = self._typeof_locked(cdecl)
#
btype, really_a_function_type = result
if really_a_function_type and not consider_function_as_funcptr:
raise CDefError("the type %r is a function type, not a "
"pointer-to-function type" % (cdecl,))
return btype
def typeof(self, cdecl):
"""Parse the C type given as a string and return the
corresponding <ctype> object.
It can also be used on 'cdata' instance to get its C type.
"""
if isinstance(cdecl, basestring):
return self._typeof(cdecl)
if isinstance(cdecl, self.CData):
return self._backend.typeof(cdecl)
if isinstance(cdecl, types.BuiltinFunctionType):
res = _builtin_function_type(cdecl)
if res is not None:
return res
if (isinstance(cdecl, types.FunctionType)
and hasattr(cdecl, '_cffi_base_type')):
with self._lock:
return self._get_cached_btype(cdecl._cffi_base_type)
raise TypeError(type(cdecl))
def sizeof(self, cdecl):
"""Return the size in bytes of the argument. It can be a
string naming a C type, or a 'cdata' instance.
"""
if isinstance(cdecl, basestring):
BType = self._typeof(cdecl)
return self._backend.sizeof(BType)
else:
return self._backend.sizeof(cdecl)
def alignof(self, cdecl):
"""Return the natural alignment size in bytes of the C type
given as a string.
"""
if isinstance(cdecl, basestring):
cdecl = self._typeof(cdecl)
return self._backend.alignof(cdecl)
def offsetof(self, cdecl, *fields_or_indexes):
"""Return the offset of the named field inside the given
structure or array, which must be given as a C type name.
You can give several field names in case of nested structures.
You can also give numeric values which correspond to array
items, in case of an array type.
"""
if isinstance(cdecl, basestring):
cdecl = self._typeof(cdecl)
return self._typeoffsetof(cdecl, *fields_or_indexes)[1]
def new(self, cdecl, init=None):
"""Allocate an instance according to the specified C type and
return a pointer to it. The specified C type must be either a
pointer or an array: ``new('X *')`` allocates an X and returns
a pointer to it, whereas ``new('X[n]')`` allocates an array of
n X'es and returns an array referencing it (which works
mostly like a pointer, like in C). You can also use
``new('X[]', n)`` to allocate an array of a non-constant
length n.
The memory is initialized following the rules of declaring a
global variable in C: by default it is zero-initialized, but
an explicit initializer can be given which can be used to
fill all or part of the memory.
When the returned <cdata> object goes out of scope, the memory
is freed. In other words the returned <cdata> object has
ownership of the value of type 'cdecl' that it points to. This
means that the raw data can be used as long as this object is
kept alive, but must not be used for a longer time. Be careful
about that when copying the pointer to the memory somewhere
else, e.g. into another structure.
"""
if isinstance(cdecl, basestring):
cdecl = self._typeof(cdecl)
return self._backend.newp(cdecl, init)
def new_allocator(self, alloc=None, free=None,
should_clear_after_alloc=True):
"""Return a new allocator, i.e. a function that behaves like ffi.new()
but uses the provided low-level 'alloc' and 'free' functions.
'alloc' is called with the size as argument. If it returns NULL, a
MemoryError is raised. 'free' is called with the result of 'alloc'
as argument. Both can be either Python function or directly C
functions. If 'free' is None, then no free function is called.
If both 'alloc' and 'free' are None, the default is used.
If 'should_clear_after_alloc' is set to False, then the memory
returned by 'alloc' is assumed to be already cleared (or you are
fine with garbage); otherwise CFFI will clear it.
"""
compiled_ffi = self._backend.FFI()
allocator = compiled_ffi.new_allocator(alloc, free,
should_clear_after_alloc)
def allocate(cdecl, init=None):
if isinstance(cdecl, basestring):
cdecl = self._typeof(cdecl)
return allocator(cdecl, init)
return allocate
def cast(self, cdecl, source):
"""Similar to a C cast: returns an instance of the named C
type initialized with the given 'source'. The source is
casted between integers or pointers of any type.
"""
if isinstance(cdecl, basestring):
cdecl = self._typeof(cdecl)
return self._backend.cast(cdecl, source)
def string(self, cdata, maxlen=-1):
"""Return a Python string (or unicode string) from the 'cdata'.
If 'cdata' is a pointer or array of characters or bytes, returns
the null-terminated string. The returned string extends until
the first null character, or at most 'maxlen' characters. If
'cdata' is an array then 'maxlen' defaults to its length.
If 'cdata' is a pointer or array of wchar_t, returns a unicode
string following the same rules.
If 'cdata' is a single character or byte or a wchar_t, returns
it as a string or unicode string.
If 'cdata' is an enum, returns the value of the enumerator as a
string, or 'NUMBER' if the value is out of range.
"""
return self._backend.string(cdata, maxlen)
def unpack(self, cdata, length):
"""Unpack an array of C data of the given length,
returning a Python string/unicode/list.
If 'cdata' is a pointer to 'char', returns a byte string.
It does not stop at the first null. This is equivalent to:
ffi.buffer(cdata, length)[:]
If 'cdata' is a pointer to 'wchar_t', returns a unicode string.
'length' is measured in wchar_t's; it is not the size in bytes.
If 'cdata' is a pointer to anything else, returns a list of
'length' items. This is a faster equivalent to:
[cdata[i] for i in range(length)]
"""
return self._backend.unpack(cdata, length)
#def buffer(self, cdata, size=-1):
# """Return a read-write buffer object that references the raw C data
# pointed to by the given 'cdata'. The 'cdata' must be a pointer or
# an array. Can be passed to functions expecting a buffer, or directly
# manipulated with:
#
# buf[:] get a copy of it in a regular string, or
# buf[idx] as a single character
# buf[:] = ...
# buf[idx] = ... change the content
# """
# note that 'buffer' is a type, set on this instance by __init__
def from_buffer(self, cdecl, python_buffer=_unspecified,
require_writable=False):
"""Return a cdata of the given type pointing to the data of the
given Python object, which must support the buffer interface.
Note that this is not meant to be used on the built-in types
str or unicode (you can build 'char[]' arrays explicitly)
but only on objects containing large quantities of raw data
in some other format, like 'array.array' or numpy arrays.
The first argument is optional and default to 'char[]'.
"""
if python_buffer is _unspecified:
cdecl, python_buffer = self.BCharA, cdecl
elif isinstance(cdecl, basestring):
cdecl = self._typeof(cdecl)
return self._backend.from_buffer(cdecl, python_buffer,
require_writable)
def memmove(self, dest, src, n):
"""ffi.memmove(dest, src, n) copies n bytes of memory from src to dest.
Like the C function memmove(), the memory areas may overlap;
apart from that it behaves like the C function memcpy().
'src' can be any cdata ptr or array, or any Python buffer object.
'dest' can be any cdata ptr or array, or a writable Python buffer
object. The size to copy, 'n', is always measured in bytes.
Unlike other methods, this one supports all Python buffer including
byte strings and bytearrays---but it still does not support
non-contiguous buffers.
"""
return self._backend.memmove(dest, src, n)
def callback(self, cdecl, python_callable=None, error=None, onerror=None):
"""Return a callback object or a decorator making such a
callback object. 'cdecl' must name a C function pointer type.
The callback invokes the specified 'python_callable' (which may
be provided either directly or via a decorator). Important: the
callback object must be manually kept alive for as long as the
callback may be invoked from the C level.
"""
def callback_decorator_wrap(python_callable):
if not callable(python_callable):
raise TypeError("the 'python_callable' argument "
"is not callable")
return self._backend.callback(cdecl, python_callable,
error, onerror)
if isinstance(cdecl, basestring):
cdecl = self._typeof(cdecl, consider_function_as_funcptr=True)
if python_callable is None:
return callback_decorator_wrap # decorator mode
else:
return callback_decorator_wrap(python_callable) # direct mode
def getctype(self, cdecl, replace_with=''):
"""Return a string giving the C type 'cdecl', which may be itself
a string or a <ctype> object. If 'replace_with' is given, it gives
extra text to append (or insert for more complicated C types), like
a variable name, or '*' to get actually the C type 'pointer-to-cdecl'.
"""
if isinstance(cdecl, basestring):
cdecl = self._typeof(cdecl)
replace_with = replace_with.strip()
if (replace_with.startswith('*')
and '&[' in self._backend.getcname(cdecl, '&')):
replace_with = '(%s)' % replace_with
elif replace_with and not replace_with[0] in '[(':
replace_with = ' ' + replace_with
return self._backend.getcname(cdecl, replace_with)
def gc(self, cdata, destructor, size=0):
"""Return a new cdata object that points to the same
data. Later, when this new cdata object is garbage-collected,
'destructor(old_cdata_object)' will be called.
The optional 'size' gives an estimate of the size, used to
trigger the garbage collection more eagerly. So far only used
on PyPy. It tells the GC that the returned object keeps alive
roughly 'size' bytes of external memory.
"""
return self._backend.gcp(cdata, destructor, size)
def _get_cached_btype(self, type):
assert self._lock.acquire(False) is False
# call me with the lock!
try:
BType = self._cached_btypes[type]
except KeyError:
finishlist = []
BType = type.get_cached_btype(self, finishlist)
for type in finishlist:
type.finish_backend_type(self, finishlist)
return BType
def verify(self, source='', tmpdir=None, **kwargs):
"""Verify that the current ffi signatures compile on this
machine, and return a dynamic library object. The dynamic
library can be used to call functions and access global
variables declared in this 'ffi'. The library is compiled
by the C compiler: it gives you C-level API compatibility
(including calling macros). This is unlike 'ffi.dlopen()',
which requires binary compatibility in the signatures.
"""
from .verifier import Verifier, _caller_dir_pycache
#
# If set_unicode(True) was called, insert the UNICODE and
# _UNICODE macro declarations
if self._windows_unicode:
self._apply_windows_unicode(kwargs)
#
# Set the tmpdir here, and not in Verifier.__init__: it picks
# up the caller's directory, which we want to be the caller of
# ffi.verify(), as opposed to the caller of Veritier().
tmpdir = tmpdir or _caller_dir_pycache()
#
# Make a Verifier() and use it to load the library.
self.verifier = Verifier(self, source, tmpdir, **kwargs)
lib = self.verifier.load_library()
#
# Save the loaded library for keep-alive purposes, even
# if the caller doesn't keep it alive itself (it should).
self._libraries.append(lib)
return lib
def _get_errno(self):
return self._backend.get_errno()
def _set_errno(self, errno):
self._backend.set_errno(errno)
errno = property(_get_errno, _set_errno, None,
"the value of 'errno' from/to the C calls")
def getwinerror(self, code=-1):
return self._backend.getwinerror(code)
def _pointer_to(self, ctype):
with self._lock:
return model.pointer_cache(self, ctype)
def addressof(self, cdata, *fields_or_indexes):
"""Return the address of a <cdata 'struct-or-union'>.
If 'fields_or_indexes' are given, returns the address of that
field or array item in the structure or array, recursively in
case of nested structures.
"""
try:
ctype = self._backend.typeof(cdata)
except TypeError:
if '__addressof__' in type(cdata).__dict__:
return type(cdata).__addressof__(cdata, *fields_or_indexes)
raise
if fields_or_indexes:
ctype, offset = self._typeoffsetof(ctype, *fields_or_indexes)
else:
if ctype.kind == "pointer":
raise TypeError("addressof(pointer)")
offset = 0
ctypeptr = self._pointer_to(ctype)
return self._backend.rawaddressof(ctypeptr, cdata, offset)
def _typeoffsetof(self, ctype, field_or_index, *fields_or_indexes):
ctype, offset = self._backend.typeoffsetof(ctype, field_or_index)
for field1 in fields_or_indexes:
ctype, offset1 = self._backend.typeoffsetof(ctype, field1, 1)
offset += offset1
return ctype, offset
def include(self, ffi_to_include):
"""Includes the typedefs, structs, unions and enums defined
in another FFI instance. Usage is similar to a #include in C,
where a part of the program might include types defined in
another part for its own usage. Note that the include()
method has no effect on functions, constants and global
variables, which must anyway be accessed directly from the
lib object returned by the original FFI instance.
"""
if not isinstance(ffi_to_include, FFI):
raise TypeError("ffi.include() expects an argument that is also of"
" type cffi.FFI, not %r" % (
type(ffi_to_include).__name__,))
if ffi_to_include is self:
raise ValueError("self.include(self)")
with ffi_to_include._lock:
with self._lock:
self._parser.include(ffi_to_include._parser)
self._cdefsources.append('[')
self._cdefsources.extend(ffi_to_include._cdefsources)
self._cdefsources.append(']')
self._included_ffis.append(ffi_to_include)
def new_handle(self, x):
return self._backend.newp_handle(self.BVoidP, x)
def from_handle(self, x):
return self._backend.from_handle(x)
def release(self, x):
self._backend.release(x)
def set_unicode(self, enabled_flag):
"""Windows: if 'enabled_flag' is True, enable the UNICODE and
_UNICODE defines in C, and declare the types like TCHAR and LPTCSTR
to be (pointers to) wchar_t. If 'enabled_flag' is False,
declare these types to be (pointers to) plain 8-bit characters.
This is mostly for backward compatibility; you usually want True.
"""
if self._windows_unicode is not None:
raise ValueError("set_unicode() can only be called once")
enabled_flag = bool(enabled_flag)
if enabled_flag:
self.cdef("typedef wchar_t TBYTE;"
"typedef wchar_t TCHAR;"
"typedef const wchar_t *LPCTSTR;"
"typedef const wchar_t *PCTSTR;"
"typedef wchar_t *LPTSTR;"
"typedef wchar_t *PTSTR;"
"typedef TBYTE *PTBYTE;"
"typedef TCHAR *PTCHAR;")
else:
self.cdef("typedef char TBYTE;"
"typedef char TCHAR;"
"typedef const char *LPCTSTR;"
"typedef const char *PCTSTR;"
"typedef char *LPTSTR;"
"typedef char *PTSTR;"
"typedef TBYTE *PTBYTE;"
"typedef TCHAR *PTCHAR;")
self._windows_unicode = enabled_flag
def _apply_windows_unicode(self, kwds):
defmacros = kwds.get('define_macros', ())
if not isinstance(defmacros, (list, tuple)):
raise TypeError("'define_macros' must be a list or tuple")
defmacros = list(defmacros) + [('UNICODE', '1'),
('_UNICODE', '1')]
kwds['define_macros'] = defmacros
def _apply_embedding_fix(self, kwds):
# must include an argument like "-lpython2.7" for the compiler
def ensure(key, value):
lst = kwds.setdefault(key, [])
if value not in lst:
lst.append(value)
#
if '__pypy__' in sys.builtin_module_names:
import os
if sys.platform == "win32":
# we need 'libpypy-c.lib'. Current distributions of
# pypy (>= 4.1) contain it as 'libs/python27.lib'.
pythonlib = "python{0[0]}{0[1]}".format(sys.version_info)
if hasattr(sys, 'prefix'):
ensure('library_dirs', os.path.join(sys.prefix, 'libs'))
else:
# we need 'libpypy-c.{so,dylib}', which should be by
# default located in 'sys.prefix/bin' for installed
# systems.
if sys.version_info < (3,):
pythonlib = "pypy-c"
else:
pythonlib = "pypy3-c"
if hasattr(sys, 'prefix'):
ensure('library_dirs', os.path.join(sys.prefix, 'bin'))
# On uninstalled pypy's, the libpypy-c is typically found in
# .../pypy/goal/.
if hasattr(sys, 'prefix'):
ensure('library_dirs', os.path.join(sys.prefix, 'pypy', 'goal'))
else:
if sys.platform == "win32":
template = "python%d%d"
if hasattr(sys, 'gettotalrefcount'):
template += '_d'
else:
try:
import sysconfig
except ImportError: # 2.6
from distutils import sysconfig
template = "python%d.%d"
if sysconfig.get_config_var('DEBUG_EXT'):
template += sysconfig.get_config_var('DEBUG_EXT')
pythonlib = (template %
(sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff))
if hasattr(sys, 'abiflags'):
pythonlib += sys.abiflags
ensure('libraries', pythonlib)
if sys.platform == "win32":
ensure('extra_link_args', '/MANIFEST')
def set_source(self, module_name, source, source_extension='.c', **kwds):
import os
if hasattr(self, '_assigned_source'):
raise ValueError("set_source() cannot be called several times "
"per ffi object")
if not isinstance(module_name, basestring):
raise TypeError("'module_name' must be a string")
if os.sep in module_name or (os.altsep and os.altsep in module_name):
raise ValueError("'module_name' must not contain '/': use a dotted "
"name to make a 'package.module' location")
self._assigned_source = (str(module_name), source,
source_extension, kwds)
def set_source_pkgconfig(self, module_name, pkgconfig_libs, source,
source_extension='.c', **kwds):
from . import pkgconfig
if not isinstance(pkgconfig_libs, list):
raise TypeError("the pkgconfig_libs argument must be a list "
"of package names")
kwds2 = pkgconfig.flags_from_pkgconfig(pkgconfig_libs)
pkgconfig.merge_flags(kwds, kwds2)
self.set_source(module_name, source, source_extension, **kwds)
def distutils_extension(self, tmpdir='build', verbose=True):
from distutils.dir_util import mkpath
from .recompiler import recompile
#
if not hasattr(self, '_assigned_source'):
if hasattr(self, 'verifier'): # fallback, 'tmpdir' ignored
return self.verifier.get_extension()
raise ValueError("set_source() must be called before"
" distutils_extension()")
module_name, source, source_extension, kwds = self._assigned_source
if source is None:
raise TypeError("distutils_extension() is only for C extension "
"modules, not for dlopen()-style pure Python "
"modules")
mkpath(tmpdir)
ext, updated = recompile(self, module_name,
source, tmpdir=tmpdir, extradir=tmpdir,
source_extension=source_extension,
call_c_compiler=False, **kwds)
if verbose:
if updated:
sys.stderr.write("regenerated: %r\n" % (ext.sources[0],))
else:
sys.stderr.write("not modified: %r\n" % (ext.sources[0],))
return ext
def emit_c_code(self, filename):
from .recompiler import recompile
#
if not hasattr(self, '_assigned_source'):
raise ValueError("set_source() must be called before emit_c_code()")
module_name, source, source_extension, kwds = self._assigned_source
if source is None:
raise TypeError("emit_c_code() is only for C extension modules, "
"not for dlopen()-style pure Python modules")
recompile(self, module_name, source,
c_file=filename, call_c_compiler=False, **kwds)
def emit_python_code(self, filename):
from .recompiler import recompile
#
if not hasattr(self, '_assigned_source'):
raise ValueError("set_source() must be called before emit_c_code()")
module_name, source, source_extension, kwds = self._assigned_source
if source is not None:
raise TypeError("emit_python_code() is only for dlopen()-style "
"pure Python modules, not for C extension modules")
recompile(self, module_name, source,
c_file=filename, call_c_compiler=False, **kwds)
def compile(self, tmpdir='.', verbose=0, target=None, debug=None):
"""The 'target' argument gives the final file name of the
compiled DLL. Use '*' to force distutils' choice, suitable for
regular CPython C API modules. Use a file name ending in '.*'
to ask for the system's default extension for dynamic libraries
(.so/.dll/.dylib).
The default is '*' when building a non-embedded C API extension,
and (module_name + '.*') when building an embedded library.
"""
from .recompiler import recompile
#
if not hasattr(self, '_assigned_source'):
raise ValueError("set_source() must be called before compile()")
module_name, source, source_extension, kwds = self._assigned_source
return recompile(self, module_name, source, tmpdir=tmpdir,
target=target, source_extension=source_extension,
compiler_verbose=verbose, debug=debug, **kwds)
def init_once(self, func, tag):
# Read _init_once_cache[tag], which is either (False, lock) if
# we're calling the function now in some thread, or (True, result).
# Don't call setdefault() in most cases, to avoid allocating and
# immediately freeing a lock; but still use setdefaut() to avoid
# races.
try:
x = self._init_once_cache[tag]
except KeyError:
x = self._init_once_cache.setdefault(tag, (False, allocate_lock()))
# Common case: we got (True, result), so we return the result.
if x[0]:
return x[1]
# Else, it's a lock. Acquire it to serialize the following tests.
with x[1]:
# Read again from _init_once_cache the current status.
x = self._init_once_cache[tag]
if x[0]:
return x[1]
# Call the function and store the result back.
result = func()
self._init_once_cache[tag] = (True, result)
return result
def embedding_init_code(self, pysource):
if self._embedding:
raise ValueError("embedding_init_code() can only be called once")
# fix 'pysource' before it gets dumped into the C file:
# - remove empty lines at the beginning, so it starts at "line 1"
# - dedent, if all non-empty lines are indented
# - check for SyntaxErrors
import re
match = re.match(r'\s*\n', pysource)
if match:
pysource = pysource[match.end():]
lines = pysource.splitlines() or ['']
prefix = re.match(r'\s*', lines[0]).group()
for i in range(1, len(lines)):
line = lines[i]
if line.rstrip():
while not line.startswith(prefix):
prefix = prefix[:-1]
i = len(prefix)
lines = [line[i:]+'\n' for line in lines]
pysource = ''.join(lines)
#
compile(pysource, "cffi_init", "exec")
#
self._embedding = pysource
def def_extern(self, *args, **kwds):
raise ValueError("ffi.def_extern() is only available on API-mode FFI "
"objects")
def list_types(self):
"""Returns the user type names known to this FFI instance.
This returns a tuple containing three lists of names:
(typedef_names, names_of_structs, names_of_unions)
"""
typedefs = []
structs = []
unions = []
for key in self._parser._declarations:
if key.startswith('typedef '):
typedefs.append(key[8:])
elif key.startswith('struct '):
structs.append(key[7:])
elif key.startswith('union '):
unions.append(key[6:])
typedefs.sort()
structs.sort()
unions.sort()
return (typedefs, structs, unions)
def _load_backend_lib(backend, name, flags):
import os
if not isinstance(name, basestring):
if sys.platform != "win32" or name is not None:
return backend.load_library(name, flags)
name = "c" # Windows: load_library(None) fails, but this works
# on Python 2 (backward compatibility hack only)
first_error = None
if '.' in name or '/' in name or os.sep in name:
try:
return backend.load_library(name, flags)
except OSError as e:
first_error = e
import ctypes.util
path = ctypes.util.find_library(name)
if path is None:
if name == "c" and sys.platform == "win32" and sys.version_info >= (3,):
raise OSError("dlopen(None) cannot work on Windows for Python 3 "
"(see http://bugs.python.org/issue23606)")
msg = ("ctypes.util.find_library() did not manage "
"to locate a library called %r" % (name,))
if first_error is not None:
msg = "%s. Additionally, %s" % (first_error, msg)
raise OSError(msg)
return backend.load_library(path, flags)
def _make_ffi_library(ffi, libname, flags):
backend = ffi._backend
backendlib = _load_backend_lib(backend, libname, flags)
#
def accessor_function(name):
key = 'function ' + name
tp, _ = ffi._parser._declarations[key]
BType = ffi._get_cached_btype(tp)
value = backendlib.load_function(BType, name)
library.__dict__[name] = value
#
def accessor_variable(name):
key = 'variable ' + name
tp, _ = ffi._parser._declarations[key]
BType = ffi._get_cached_btype(tp)
read_variable = backendlib.read_variable
write_variable = backendlib.write_variable
setattr(FFILibrary, name, property(
lambda self: read_variable(BType, name),
lambda self, value: write_variable(BType, name, value)))
#
def addressof_var(name):
try:
return addr_variables[name]
except KeyError:
with ffi._lock:
if name not in addr_variables:
key = 'variable ' + name
tp, _ = ffi._parser._declarations[key]
BType = ffi._get_cached_btype(tp)
if BType.kind != 'array':
BType = model.pointer_cache(ffi, BType)
p = backendlib.load_function(BType, name)
addr_variables[name] = p
return addr_variables[name]
#
def accessor_constant(name):
raise NotImplementedError("non-integer constant '%s' cannot be "
"accessed from a dlopen() library" % (name,))
#
def accessor_int_constant(name):
library.__dict__[name] = ffi._parser._int_constants[name]
#
accessors = {}
accessors_version = [False]
addr_variables = {}
#
def update_accessors():
if accessors_version[0] is ffi._cdef_version:
return
#
for key, (tp, _) in ffi._parser._declarations.items():
if not isinstance(tp, model.EnumType):
tag, name = key.split(' ', 1)
if tag == 'function':
accessors[name] = accessor_function
elif tag == 'variable':
accessors[name] = accessor_variable
elif tag == 'constant':
accessors[name] = accessor_constant
else:
for i, enumname in enumerate(tp.enumerators):
def accessor_enum(name, tp=tp, i=i):
tp.check_not_partial()
library.__dict__[name] = tp.enumvalues[i]
accessors[enumname] = accessor_enum
for name in ffi._parser._int_constants:
accessors.setdefault(name, accessor_int_constant)
accessors_version[0] = ffi._cdef_version
#
def make_accessor(name):
with ffi._lock:
if name in library.__dict__ or name in FFILibrary.__dict__:
return # added by another thread while waiting for the lock
if name not in accessors:
update_accessors()
if name not in accessors:
raise AttributeError(name)
accessors[name](name)
#
class FFILibrary(object):
def __getattr__(self, name):
make_accessor(name)
return getattr(self, name)
def __setattr__(self, name, value):
try:
property = getattr(self.__class__, name)
except AttributeError:
make_accessor(name)
setattr(self, name, value)
else:
property.__set__(self, value)
def __dir__(self):
with ffi._lock:
update_accessors()
return accessors.keys()
def __addressof__(self, name):
if name in library.__dict__:
return library.__dict__[name]
if name in FFILibrary.__dict__:
return addressof_var(name)
make_accessor(name)
if name in library.__dict__:
return library.__dict__[name]
if name in FFILibrary.__dict__:
return addressof_var(name)
raise AttributeError("cffi library has no function or "
"global variable named '%s'" % (name,))
def __cffi_close__(self):
backendlib.close_lib()
self.__dict__.clear()
#
if isinstance(libname, basestring):
try:
if not isinstance(libname, str): # unicode, on Python 2
libname = libname.encode('utf-8')
FFILibrary.__name__ = 'FFILibrary_%s' % libname
except UnicodeError:
pass
library = FFILibrary()
return library, library.__dict__
def _builtin_function_type(func):
# a hack to make at least ffi.typeof(builtin_function) work,
# if the builtin function was obtained by 'vengine_cpy'.
import sys
try:
module = sys.modules[func.__module__]
ffi = module._cffi_original_ffi
types_of_builtin_funcs = module._cffi_types_of_builtin_funcs
tp = types_of_builtin_funcs[func]
except (KeyError, AttributeError, TypeError):
return None
else:
with ffi._lock:
return ffi._get_cached_btype(tp)
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/cffi/setuptools_ext.py | import os
import sys
try:
basestring
except NameError:
# Python 3.x
basestring = str
def error(msg):
from distutils.errors import DistutilsSetupError
raise DistutilsSetupError(msg)
def execfile(filename, glob):
# We use execfile() (here rewritten for Python 3) instead of
# __import__() to load the build script. The problem with
# a normal import is that in some packages, the intermediate
# __init__.py files may already try to import the file that
# we are generating.
with open(filename) as f:
src = f.read()
src += '\n' # Python 2.6 compatibility
code = compile(src, filename, 'exec')
exec(code, glob, glob)
def add_cffi_module(dist, mod_spec):
from cffi.api import FFI
if not isinstance(mod_spec, basestring):
error("argument to 'cffi_modules=...' must be a str or a list of str,"
" not %r" % (type(mod_spec).__name__,))
mod_spec = str(mod_spec)
try:
build_file_name, ffi_var_name = mod_spec.split(':')
except ValueError:
error("%r must be of the form 'path/build.py:ffi_variable'" %
(mod_spec,))
if not os.path.exists(build_file_name):
ext = ''
rewritten = build_file_name.replace('.', '/') + '.py'
if os.path.exists(rewritten):
ext = ' (rewrite cffi_modules to [%r])' % (
rewritten + ':' + ffi_var_name,)
error("%r does not name an existing file%s" % (build_file_name, ext))
mod_vars = {'__name__': '__cffi__', '__file__': build_file_name}
execfile(build_file_name, mod_vars)
try:
ffi = mod_vars[ffi_var_name]
except KeyError:
error("%r: object %r not found in module" % (mod_spec,
ffi_var_name))
if not isinstance(ffi, FFI):
ffi = ffi() # maybe it's a function instead of directly an ffi
if not isinstance(ffi, FFI):
error("%r is not an FFI instance (got %r)" % (mod_spec,
type(ffi).__name__))
if not hasattr(ffi, '_assigned_source'):
error("%r: the set_source() method was not called" % (mod_spec,))
module_name, source, source_extension, kwds = ffi._assigned_source
if ffi._windows_unicode:
kwds = kwds.copy()
ffi._apply_windows_unicode(kwds)
if source is None:
_add_py_module(dist, ffi, module_name)
else:
_add_c_module(dist, ffi, module_name, source, source_extension, kwds)
def _set_py_limited_api(Extension, kwds):
"""
Add py_limited_api to kwds if setuptools >= 26 is in use.
Do not alter the setting if it already exists.
Setuptools takes care of ignoring the flag on Python 2 and PyPy.
CPython itself should ignore the flag in a debugging version
(by not listing .abi3.so in the extensions it supports), but
it doesn't so far, creating troubles. That's why we check
for "not hasattr(sys, 'gettotalrefcount')" (the 2.7 compatible equivalent
of 'd' not in sys.abiflags). (http://bugs.python.org/issue28401)
On Windows, with CPython <= 3.4, it's better not to use py_limited_api
because virtualenv *still* doesn't copy PYTHON3.DLL on these versions.
Recently (2020) we started shipping only >= 3.5 wheels, though. So
we'll give it another try and set py_limited_api on Windows >= 3.5.
"""
from cffi import recompiler
if ('py_limited_api' not in kwds and not hasattr(sys, 'gettotalrefcount')
and recompiler.USE_LIMITED_API):
import setuptools
try:
setuptools_major_version = int(setuptools.__version__.partition('.')[0])
if setuptools_major_version >= 26:
kwds['py_limited_api'] = True
except ValueError: # certain development versions of setuptools
# If we don't know the version number of setuptools, we
# try to set 'py_limited_api' anyway. At worst, we get a
# warning.
kwds['py_limited_api'] = True
return kwds
def _add_c_module(dist, ffi, module_name, source, source_extension, kwds):
from distutils.core import Extension
# We are a setuptools extension. Need this build_ext for py_limited_api.
from setuptools.command.build_ext import build_ext
from distutils.dir_util import mkpath
from distutils import log
from cffi import recompiler
allsources = ['$PLACEHOLDER']
allsources.extend(kwds.pop('sources', []))
kwds = _set_py_limited_api(Extension, kwds)
ext = Extension(name=module_name, sources=allsources, **kwds)
def make_mod(tmpdir, pre_run=None):
c_file = os.path.join(tmpdir, module_name + source_extension)
log.info("generating cffi module %r" % c_file)
mkpath(tmpdir)
# a setuptools-only, API-only hook: called with the "ext" and "ffi"
# arguments just before we turn the ffi into C code. To use it,
# subclass the 'distutils.command.build_ext.build_ext' class and
# add a method 'def pre_run(self, ext, ffi)'.
if pre_run is not None:
pre_run(ext, ffi)
updated = recompiler.make_c_source(ffi, module_name, source, c_file)
if not updated:
log.info("already up-to-date")
return c_file
if dist.ext_modules is None:
dist.ext_modules = []
dist.ext_modules.append(ext)
base_class = dist.cmdclass.get('build_ext', build_ext)
class build_ext_make_mod(base_class):
def run(self):
if ext.sources[0] == '$PLACEHOLDER':
pre_run = getattr(self, 'pre_run', None)
ext.sources[0] = make_mod(self.build_temp, pre_run)
base_class.run(self)
dist.cmdclass['build_ext'] = build_ext_make_mod
# NB. multiple runs here will create multiple 'build_ext_make_mod'
# classes. Even in this case the 'build_ext' command should be
# run once; but just in case, the logic above does nothing if
# called again.
def _add_py_module(dist, ffi, module_name):
from distutils.dir_util import mkpath
from setuptools.command.build_py import build_py
from setuptools.command.build_ext import build_ext
from distutils import log
from cffi import recompiler
def generate_mod(py_file):
log.info("generating cffi module %r" % py_file)
mkpath(os.path.dirname(py_file))
updated = recompiler.make_py_source(ffi, module_name, py_file)
if not updated:
log.info("already up-to-date")
base_class = dist.cmdclass.get('build_py', build_py)
class build_py_make_mod(base_class):
def run(self):
base_class.run(self)
module_path = module_name.split('.')
module_path[-1] += '.py'
generate_mod(os.path.join(self.build_lib, *module_path))
def get_source_files(self):
# This is called from 'setup.py sdist' only. Exclude
# the generate .py module in this case.
saved_py_modules = self.py_modules
try:
if saved_py_modules:
self.py_modules = [m for m in saved_py_modules
if m != module_name]
return base_class.get_source_files(self)
finally:
self.py_modules = saved_py_modules
dist.cmdclass['build_py'] = build_py_make_mod
# distutils and setuptools have no notion I could find of a
# generated python module. If we don't add module_name to
# dist.py_modules, then things mostly work but there are some
# combination of options (--root and --record) that will miss
# the module. So we add it here, which gives a few apparently
# harmless warnings about not finding the file outside the
# build directory.
# Then we need to hack more in get_source_files(); see above.
if dist.py_modules is None:
dist.py_modules = []
dist.py_modules.append(module_name)
# the following is only for "build_ext -i"
base_class_2 = dist.cmdclass.get('build_ext', build_ext)
class build_ext_make_mod(base_class_2):
def run(self):
base_class_2.run(self)
if self.inplace:
# from get_ext_fullpath() in distutils/command/build_ext.py
module_path = module_name.split('.')
package = '.'.join(module_path[:-1])
build_py = self.get_finalized_command('build_py')
package_dir = build_py.get_package_dir(package)
file_name = module_path[-1] + '.py'
generate_mod(os.path.join(package_dir, file_name))
dist.cmdclass['build_ext'] = build_ext_make_mod
def cffi_modules(dist, attr, value):
assert attr == 'cffi_modules'
if isinstance(value, basestring):
value = [value]
for cffi_module in value:
add_cffi_module(dist, cffi_module)
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/cffi/_cffi_errors.h | #ifndef CFFI_MESSAGEBOX
# ifdef _MSC_VER
# define CFFI_MESSAGEBOX 1
# else
# define CFFI_MESSAGEBOX 0
# endif
#endif
#if CFFI_MESSAGEBOX
/* Windows only: logic to take the Python-CFFI embedding logic
initialization errors and display them in a background thread
with MessageBox. The idea is that if the whole program closes
as a result of this problem, then likely it is already a console
program and you can read the stderr output in the console too.
If it is not a console program, then it will likely show its own
dialog to complain, or generally not abruptly close, and for this
case the background thread should stay alive.
*/
static void *volatile _cffi_bootstrap_text;
static PyObject *_cffi_start_error_capture(void)
{
PyObject *result = NULL;
PyObject *x, *m, *bi;
if (InterlockedCompareExchangePointer(&_cffi_bootstrap_text,
(void *)1, NULL) != NULL)
return (PyObject *)1;
m = PyImport_AddModule("_cffi_error_capture");
if (m == NULL)
goto error;
result = PyModule_GetDict(m);
if (result == NULL)
goto error;
#if PY_MAJOR_VERSION >= 3
bi = PyImport_ImportModule("builtins");
#else
bi = PyImport_ImportModule("__builtin__");
#endif
if (bi == NULL)
goto error;
PyDict_SetItemString(result, "__builtins__", bi);
Py_DECREF(bi);
x = PyRun_String(
"import sys\n"
"class FileLike:\n"
" def write(self, x):\n"
" try:\n"
" of.write(x)\n"
" except: pass\n"
" self.buf += x\n"
" def flush(self):\n"
" pass\n"
"fl = FileLike()\n"
"fl.buf = ''\n"
"of = sys.stderr\n"
"sys.stderr = fl\n"
"def done():\n"
" sys.stderr = of\n"
" return fl.buf\n", /* make sure the returned value stays alive */
Py_file_input,
result, result);
Py_XDECREF(x);
error:
if (PyErr_Occurred())
{
PyErr_WriteUnraisable(Py_None);
PyErr_Clear();
}
return result;
}
#pragma comment(lib, "user32.lib")
static DWORD WINAPI _cffi_bootstrap_dialog(LPVOID ignored)
{
Sleep(666); /* may be interrupted if the whole process is closing */
#if PY_MAJOR_VERSION >= 3
MessageBoxW(NULL, (wchar_t *)_cffi_bootstrap_text,
L"Python-CFFI error",
MB_OK | MB_ICONERROR);
#else
MessageBoxA(NULL, (char *)_cffi_bootstrap_text,
"Python-CFFI error",
MB_OK | MB_ICONERROR);
#endif
_cffi_bootstrap_text = NULL;
return 0;
}
static void _cffi_stop_error_capture(PyObject *ecap)
{
PyObject *s;
void *text;
if (ecap == (PyObject *)1)
return;
if (ecap == NULL)
goto error;
s = PyRun_String("done()", Py_eval_input, ecap, ecap);
if (s == NULL)
goto error;
/* Show a dialog box, but in a background thread, and
never show multiple dialog boxes at once. */
#if PY_MAJOR_VERSION >= 3
text = PyUnicode_AsWideCharString(s, NULL);
#else
text = PyString_AsString(s);
#endif
_cffi_bootstrap_text = text;
if (text != NULL)
{
HANDLE h;
h = CreateThread(NULL, 0, _cffi_bootstrap_dialog,
NULL, 0, NULL);
if (h != NULL)
CloseHandle(h);
}
/* decref the string, but it should stay alive as 'fl.buf'
in the small module above. It will really be freed only if
we later get another similar error. So it's a leak of at
most one copy of the small module. That's fine for this
situation which is usually a "fatal error" anyway. */
Py_DECREF(s);
PyErr_Clear();
return;
error:
_cffi_bootstrap_text = NULL;
PyErr_Clear();
}
#else
static PyObject *_cffi_start_error_capture(void) { return NULL; }
static void _cffi_stop_error_capture(PyObject *ecap) { }
#endif
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/cffi/lock.py | import sys
if sys.version_info < (3,):
try:
from thread import allocate_lock
except ImportError:
from dummy_thread import allocate_lock
else:
try:
from _thread import allocate_lock
except ImportError:
from _dummy_thread import allocate_lock
##import sys
##l1 = allocate_lock
##class allocate_lock(object):
## def __init__(self):
## self._real = l1()
## def __enter__(self):
## for i in range(4, 0, -1):
## print sys._getframe(i).f_code
## print
## return self._real.__enter__()
## def __exit__(self, *args):
## return self._real.__exit__(*args)
## def acquire(self, f):
## assert f is False
## return self._real.acquire(f)
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/cffi/ffiplatform.py | import sys, os
from .error import VerificationError
LIST_OF_FILE_NAMES = ['sources', 'include_dirs', 'library_dirs',
'extra_objects', 'depends']
def get_extension(srcfilename, modname, sources=(), **kwds):
_hack_at_distutils()
from distutils.core import Extension
allsources = [srcfilename]
for src in sources:
allsources.append(os.path.normpath(src))
return Extension(name=modname, sources=allsources, **kwds)
def compile(tmpdir, ext, compiler_verbose=0, debug=None):
"""Compile a C extension module using distutils."""
_hack_at_distutils()
saved_environ = os.environ.copy()
try:
outputfilename = _build(tmpdir, ext, compiler_verbose, debug)
outputfilename = os.path.abspath(outputfilename)
finally:
# workaround for a distutils bugs where some env vars can
# become longer and longer every time it is used
for key, value in saved_environ.items():
if os.environ.get(key) != value:
os.environ[key] = value
return outputfilename
def _build(tmpdir, ext, compiler_verbose=0, debug=None):
# XXX compact but horrible :-(
from distutils.core import Distribution
import distutils.errors, distutils.log
#
dist = Distribution({'ext_modules': [ext]})
dist.parse_config_files()
options = dist.get_option_dict('build_ext')
if debug is None:
debug = sys.flags.debug
options['debug'] = ('ffiplatform', debug)
options['force'] = ('ffiplatform', True)
options['build_lib'] = ('ffiplatform', tmpdir)
options['build_temp'] = ('ffiplatform', tmpdir)
#
try:
old_level = distutils.log.set_threshold(0) or 0
try:
distutils.log.set_verbosity(compiler_verbose)
dist.run_command('build_ext')
cmd_obj = dist.get_command_obj('build_ext')
[soname] = cmd_obj.get_outputs()
finally:
distutils.log.set_threshold(old_level)
except (distutils.errors.CompileError,
distutils.errors.LinkError) as e:
raise VerificationError('%s: %s' % (e.__class__.__name__, e))
#
return soname
try:
from os.path import samefile
except ImportError:
def samefile(f1, f2):
return os.path.abspath(f1) == os.path.abspath(f2)
def maybe_relative_path(path):
if not os.path.isabs(path):
return path # already relative
dir = path
names = []
while True:
prevdir = dir
dir, name = os.path.split(prevdir)
if dir == prevdir or not dir:
return path # failed to make it relative
names.append(name)
try:
if samefile(dir, os.curdir):
names.reverse()
return os.path.join(*names)
except OSError:
pass
# ____________________________________________________________
try:
int_or_long = (int, long)
import cStringIO
except NameError:
int_or_long = int # Python 3
import io as cStringIO
def _flatten(x, f):
if isinstance(x, str):
f.write('%ds%s' % (len(x), x))
elif isinstance(x, dict):
keys = sorted(x.keys())
f.write('%dd' % len(keys))
for key in keys:
_flatten(key, f)
_flatten(x[key], f)
elif isinstance(x, (list, tuple)):
f.write('%dl' % len(x))
for value in x:
_flatten(value, f)
elif isinstance(x, int_or_long):
f.write('%di' % (x,))
else:
raise TypeError(
"the keywords to verify() contains unsupported object %r" % (x,))
def flatten(x):
f = cStringIO.StringIO()
_flatten(x, f)
return f.getvalue()
def _hack_at_distutils():
# Windows-only workaround for some configurations: see
# https://bugs.python.org/issue23246 (Python 2.7 with
# a specific MS compiler suite download)
if sys.platform == "win32":
try:
import setuptools # for side-effects, patches distutils
except ImportError:
pass
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/cffi/_embedding.h |
/***** Support code for embedding *****/
#ifdef __cplusplus
extern "C" {
#endif
#if defined(_WIN32)
# define CFFI_DLLEXPORT __declspec(dllexport)
#elif defined(__GNUC__)
# define CFFI_DLLEXPORT __attribute__((visibility("default")))
#else
# define CFFI_DLLEXPORT /* nothing */
#endif
/* There are two global variables of type _cffi_call_python_fnptr:
* _cffi_call_python, which we declare just below, is the one called
by ``extern "Python"`` implementations.
* _cffi_call_python_org, which on CPython is actually part of the
_cffi_exports[] array, is the function pointer copied from
_cffi_backend. If _cffi_start_python() fails, then this is set
to NULL; otherwise, it should never be NULL.
After initialization is complete, both are equal. However, the
first one remains equal to &_cffi_start_and_call_python until the
very end of initialization, when we are (or should be) sure that
concurrent threads also see a completely initialized world, and
only then is it changed.
*/
#undef _cffi_call_python
typedef void (*_cffi_call_python_fnptr)(struct _cffi_externpy_s *, char *);
static void _cffi_start_and_call_python(struct _cffi_externpy_s *, char *);
static _cffi_call_python_fnptr _cffi_call_python = &_cffi_start_and_call_python;
#ifndef _MSC_VER
/* --- Assuming a GCC not infinitely old --- */
# define cffi_compare_and_swap(l,o,n) __sync_bool_compare_and_swap(l,o,n)
# define cffi_write_barrier() __sync_synchronize()
# if !defined(__amd64__) && !defined(__x86_64__) && \
!defined(__i386__) && !defined(__i386)
# define cffi_read_barrier() __sync_synchronize()
# else
# define cffi_read_barrier() (void)0
# endif
#else
/* --- Windows threads version --- */
# include <Windows.h>
# define cffi_compare_and_swap(l,o,n) \
(InterlockedCompareExchangePointer(l,n,o) == (o))
# define cffi_write_barrier() InterlockedCompareExchange(&_cffi_dummy,0,0)
# define cffi_read_barrier() (void)0
static volatile LONG _cffi_dummy;
#endif
#ifdef WITH_THREAD
# ifndef _MSC_VER
# include <pthread.h>
static pthread_mutex_t _cffi_embed_startup_lock;
# else
static CRITICAL_SECTION _cffi_embed_startup_lock;
# endif
static char _cffi_embed_startup_lock_ready = 0;
#endif
static void _cffi_acquire_reentrant_mutex(void)
{
static void *volatile lock = NULL;
while (!cffi_compare_and_swap(&lock, NULL, (void *)1)) {
/* should ideally do a spin loop instruction here, but
hard to do it portably and doesn't really matter I
think: pthread_mutex_init() should be very fast, and
this is only run at start-up anyway. */
}
#ifdef WITH_THREAD
if (!_cffi_embed_startup_lock_ready) {
# ifndef _MSC_VER
pthread_mutexattr_t attr;
pthread_mutexattr_init(&attr);
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
pthread_mutex_init(&_cffi_embed_startup_lock, &attr);
# else
InitializeCriticalSection(&_cffi_embed_startup_lock);
# endif
_cffi_embed_startup_lock_ready = 1;
}
#endif
while (!cffi_compare_and_swap(&lock, (void *)1, NULL))
;
#ifndef _MSC_VER
pthread_mutex_lock(&_cffi_embed_startup_lock);
#else
EnterCriticalSection(&_cffi_embed_startup_lock);
#endif
}
static void _cffi_release_reentrant_mutex(void)
{
#ifndef _MSC_VER
pthread_mutex_unlock(&_cffi_embed_startup_lock);
#else
LeaveCriticalSection(&_cffi_embed_startup_lock);
#endif
}
/********** CPython-specific section **********/
#ifndef PYPY_VERSION
#include "_cffi_errors.h"
#define _cffi_call_python_org _cffi_exports[_CFFI_CPIDX]
PyMODINIT_FUNC _CFFI_PYTHON_STARTUP_FUNC(void); /* forward */
static void _cffi_py_initialize(void)
{
/* XXX use initsigs=0, which "skips initialization registration of
signal handlers, which might be useful when Python is
embedded" according to the Python docs. But review and think
if it should be a user-controllable setting.
XXX we should also give a way to write errors to a buffer
instead of to stderr.
XXX if importing 'site' fails, CPython (any version) calls
exit(). Should we try to work around this behavior here?
*/
Py_InitializeEx(0);
}
static int _cffi_initialize_python(void)
{
/* This initializes Python, imports _cffi_backend, and then the
present .dll/.so is set up as a CPython C extension module.
*/
int result;
PyGILState_STATE state;
PyObject *pycode=NULL, *global_dict=NULL, *x;
PyObject *builtins;
state = PyGILState_Ensure();
/* Call the initxxx() function from the present module. It will
create and initialize us as a CPython extension module, instead
of letting the startup Python code do it---it might reimport
the same .dll/.so and get maybe confused on some platforms.
It might also have troubles locating the .dll/.so again for all
I know.
*/
(void)_CFFI_PYTHON_STARTUP_FUNC();
if (PyErr_Occurred())
goto error;
/* Now run the Python code provided to ffi.embedding_init_code().
*/
pycode = Py_CompileString(_CFFI_PYTHON_STARTUP_CODE,
"<init code for '" _CFFI_MODULE_NAME "'>",
Py_file_input);
if (pycode == NULL)
goto error;
global_dict = PyDict_New();
if (global_dict == NULL)
goto error;
builtins = PyEval_GetBuiltins();
if (builtins == NULL)
goto error;
if (PyDict_SetItemString(global_dict, "__builtins__", builtins) < 0)
goto error;
x = PyEval_EvalCode(
#if PY_MAJOR_VERSION < 3
(PyCodeObject *)
#endif
pycode, global_dict, global_dict);
if (x == NULL)
goto error;
Py_DECREF(x);
/* Done! Now if we've been called from
_cffi_start_and_call_python() in an ``extern "Python"``, we can
only hope that the Python code did correctly set up the
corresponding @ffi.def_extern() function. Otherwise, the
general logic of ``extern "Python"`` functions (inside the
_cffi_backend module) will find that the reference is still
missing and print an error.
*/
result = 0;
done:
Py_XDECREF(pycode);
Py_XDECREF(global_dict);
PyGILState_Release(state);
return result;
error:;
{
/* Print as much information as potentially useful.
Debugging load-time failures with embedding is not fun
*/
PyObject *ecap;
PyObject *exception, *v, *tb, *f, *modules, *mod;
PyErr_Fetch(&exception, &v, &tb);
ecap = _cffi_start_error_capture();
f = PySys_GetObject((char *)"stderr");
if (f != NULL && f != Py_None) {
PyFile_WriteString(
"Failed to initialize the Python-CFFI embedding logic:\n\n", f);
}
if (exception != NULL) {
PyErr_NormalizeException(&exception, &v, &tb);
PyErr_Display(exception, v, tb);
}
Py_XDECREF(exception);
Py_XDECREF(v);
Py_XDECREF(tb);
if (f != NULL && f != Py_None) {
PyFile_WriteString("\nFrom: " _CFFI_MODULE_NAME
"\ncompiled with cffi version: 1.15.1"
"\n_cffi_backend module: ", f);
modules = PyImport_GetModuleDict();
mod = PyDict_GetItemString(modules, "_cffi_backend");
if (mod == NULL) {
PyFile_WriteString("not loaded", f);
}
else {
v = PyObject_GetAttrString(mod, "__file__");
PyFile_WriteObject(v, f, 0);
Py_XDECREF(v);
}
PyFile_WriteString("\nsys.path: ", f);
PyFile_WriteObject(PySys_GetObject((char *)"path"), f, 0);
PyFile_WriteString("\n\n", f);
}
_cffi_stop_error_capture(ecap);
}
result = -1;
goto done;
}
#if PY_VERSION_HEX < 0x03080000
PyAPI_DATA(char *) _PyParser_TokenNames[]; /* from CPython */
#endif
static int _cffi_carefully_make_gil(void)
{
/* This does the basic initialization of Python. It can be called
completely concurrently from unrelated threads. It assumes
that we don't hold the GIL before (if it exists), and we don't
hold it afterwards.
(What it really does used to be completely different in Python 2
and Python 3, with the Python 2 solution avoiding the spin-lock
around the Py_InitializeEx() call. However, after recent changes
to CPython 2.7 (issue #358) it no longer works. So we use the
Python 3 solution everywhere.)
This initializes Python by calling Py_InitializeEx().
Important: this must not be called concurrently at all.
So we use a global variable as a simple spin lock. This global
variable must be from 'libpythonX.Y.so', not from this
cffi-based extension module, because it must be shared from
different cffi-based extension modules.
In Python < 3.8, we choose
_PyParser_TokenNames[0] as a completely arbitrary pointer value
that is never written to. The default is to point to the
string "ENDMARKER". We change it temporarily to point to the
next character in that string. (Yes, I know it's REALLY
obscure.)
In Python >= 3.8, this string array is no longer writable, so
instead we pick PyCapsuleType.tp_version_tag. We can't change
Python < 3.8 because someone might use a mixture of cffi
embedded modules, some of which were compiled before this file
changed.
*/
#ifdef WITH_THREAD
# if PY_VERSION_HEX < 0x03080000
char *volatile *lock = (char *volatile *)_PyParser_TokenNames;
char *old_value, *locked_value;
while (1) { /* spin loop */
old_value = *lock;
locked_value = old_value + 1;
if (old_value[0] == 'E') {
assert(old_value[1] == 'N');
if (cffi_compare_and_swap(lock, old_value, locked_value))
break;
}
else {
assert(old_value[0] == 'N');
/* should ideally do a spin loop instruction here, but
hard to do it portably and doesn't really matter I
think: PyEval_InitThreads() should be very fast, and
this is only run at start-up anyway. */
}
}
# else
int volatile *lock = (int volatile *)&PyCapsule_Type.tp_version_tag;
int old_value, locked_value;
assert(!(PyCapsule_Type.tp_flags & Py_TPFLAGS_HAVE_VERSION_TAG));
while (1) { /* spin loop */
old_value = *lock;
locked_value = -42;
if (old_value == 0) {
if (cffi_compare_and_swap(lock, old_value, locked_value))
break;
}
else {
assert(old_value == locked_value);
/* should ideally do a spin loop instruction here, but
hard to do it portably and doesn't really matter I
think: PyEval_InitThreads() should be very fast, and
this is only run at start-up anyway. */
}
}
# endif
#endif
/* call Py_InitializeEx() */
if (!Py_IsInitialized()) {
_cffi_py_initialize();
#if PY_VERSION_HEX < 0x03070000
PyEval_InitThreads();
#endif
PyEval_SaveThread(); /* release the GIL */
/* the returned tstate must be the one that has been stored into the
autoTLSkey by _PyGILState_Init() called from Py_Initialize(). */
}
else {
#if PY_VERSION_HEX < 0x03070000
/* PyEval_InitThreads() is always a no-op from CPython 3.7 */
PyGILState_STATE state = PyGILState_Ensure();
PyEval_InitThreads();
PyGILState_Release(state);
#endif
}
#ifdef WITH_THREAD
/* release the lock */
while (!cffi_compare_and_swap(lock, locked_value, old_value))
;
#endif
return 0;
}
/********** end CPython-specific section **********/
#else
/********** PyPy-specific section **********/
PyMODINIT_FUNC _CFFI_PYTHON_STARTUP_FUNC(const void *[]); /* forward */
static struct _cffi_pypy_init_s {
const char *name;
void *func; /* function pointer */
const char *code;
} _cffi_pypy_init = {
_CFFI_MODULE_NAME,
_CFFI_PYTHON_STARTUP_FUNC,
_CFFI_PYTHON_STARTUP_CODE,
};
extern int pypy_carefully_make_gil(const char *);
extern int pypy_init_embedded_cffi_module(int, struct _cffi_pypy_init_s *);
static int _cffi_carefully_make_gil(void)
{
return pypy_carefully_make_gil(_CFFI_MODULE_NAME);
}
static int _cffi_initialize_python(void)
{
return pypy_init_embedded_cffi_module(0xB011, &_cffi_pypy_init);
}
/********** end PyPy-specific section **********/
#endif
#ifdef __GNUC__
__attribute__((noinline))
#endif
static _cffi_call_python_fnptr _cffi_start_python(void)
{
/* Delicate logic to initialize Python. This function can be
called multiple times concurrently, e.g. when the process calls
its first ``extern "Python"`` functions in multiple threads at
once. It can also be called recursively, in which case we must
ignore it. We also have to consider what occurs if several
different cffi-based extensions reach this code in parallel
threads---it is a different copy of the code, then, and we
can't have any shared global variable unless it comes from
'libpythonX.Y.so'.
Idea:
* _cffi_carefully_make_gil(): "carefully" call
PyEval_InitThreads() (possibly with Py_InitializeEx() first).
* then we use a (local) custom lock to make sure that a call to this
cffi-based extension will wait if another call to the *same*
extension is running the initialization in another thread.
It is reentrant, so that a recursive call will not block, but
only one from a different thread.
* then we grab the GIL and (Python 2) we call Py_InitializeEx().
At this point, concurrent calls to Py_InitializeEx() are not
possible: we have the GIL.
* do the rest of the specific initialization, which may
temporarily release the GIL but not the custom lock.
Only release the custom lock when we are done.
*/
static char called = 0;
if (_cffi_carefully_make_gil() != 0)
return NULL;
_cffi_acquire_reentrant_mutex();
/* Here the GIL exists, but we don't have it. We're only protected
from concurrency by the reentrant mutex. */
/* This file only initializes the embedded module once, the first
time this is called, even if there are subinterpreters. */
if (!called) {
called = 1; /* invoke _cffi_initialize_python() only once,
but don't set '_cffi_call_python' right now,
otherwise concurrent threads won't call
this function at all (we need them to wait) */
if (_cffi_initialize_python() == 0) {
/* now initialization is finished. Switch to the fast-path. */
/* We would like nobody to see the new value of
'_cffi_call_python' without also seeing the rest of the
data initialized. However, this is not possible. But
the new value of '_cffi_call_python' is the function
'cffi_call_python()' from _cffi_backend. So: */
cffi_write_barrier();
/* ^^^ we put a write barrier here, and a corresponding
read barrier at the start of cffi_call_python(). This
ensures that after that read barrier, we see everything
done here before the write barrier.
*/
assert(_cffi_call_python_org != NULL);
_cffi_call_python = (_cffi_call_python_fnptr)_cffi_call_python_org;
}
else {
/* initialization failed. Reset this to NULL, even if it was
already set to some other value. Future calls to
_cffi_start_python() are still forced to occur, and will
always return NULL from now on. */
_cffi_call_python_org = NULL;
}
}
_cffi_release_reentrant_mutex();
return (_cffi_call_python_fnptr)_cffi_call_python_org;
}
static
void _cffi_start_and_call_python(struct _cffi_externpy_s *externpy, char *args)
{
_cffi_call_python_fnptr fnptr;
int current_err = errno;
#ifdef _MSC_VER
int current_lasterr = GetLastError();
#endif
fnptr = _cffi_start_python();
if (fnptr == NULL) {
fprintf(stderr, "function %s() called, but initialization code "
"failed. Returning 0.\n", externpy->name);
memset(args, 0, externpy->size_of_result);
}
#ifdef _MSC_VER
SetLastError(current_lasterr);
#endif
errno = current_err;
if (fnptr != NULL)
fnptr(externpy, args);
}
/* The cffi_start_python() function makes sure Python is initialized
and our cffi module is set up. It can be called manually from the
user C code. The same effect is obtained automatically from any
dll-exported ``extern "Python"`` function. This function returns
-1 if initialization failed, 0 if all is OK. */
_CFFI_UNUSED_FN
static int cffi_start_python(void)
{
if (_cffi_call_python == &_cffi_start_and_call_python) {
if (_cffi_start_python() == NULL)
return -1;
}
cffi_read_barrier();
return 0;
}
#undef cffi_compare_and_swap
#undef cffi_write_barrier
#undef cffi_read_barrier
#ifdef __cplusplus
}
#endif
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy-1.23.5.dist-info/top_level.txt | numpy
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy-1.23.5.dist-info/entry_points.txt | [array_api]
numpy = numpy.array_api
[console_scripts]
f2py = numpy.f2py.f2py2e:main
[pyinstaller40]
hook-dirs = numpy:_pyinstaller_hooks_dir
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy-1.23.5.dist-info/LICENSES_bundled.txt | The NumPy repository and source distributions bundle several libraries that are
compatibly licensed. We list these here.
Name: lapack-lite
Files: numpy/linalg/lapack_lite/*
License: BSD-3-Clause
For details, see numpy/linalg/lapack_lite/LICENSE.txt
Name: tempita
Files: tools/npy_tempita/*
License: MIT
For details, see tools/npy_tempita/license.txt
Name: dragon4
Files: numpy/core/src/multiarray/dragon4.c
License: MIT
For license text, see numpy/core/src/multiarray/dragon4.c
Name: libdivide
Files: numpy/core/include/numpy/libdivide/*
License: Zlib
For license text, see numpy/core/include/numpy/libdivide/LICENSE.txt
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy-1.23.5.dist-info/LICENSE.txt | Copyright (c) 2005-2022, NumPy Developers.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of the NumPy Developers nor the names of any
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
----
This binary distribution of NumPy also bundles the following software:
Name: OpenBLAS
Files: extra-dll\libopenb*.dll
Description: bundled as a dynamically linked library
Availability: https://github.com/xianyi/OpenBLAS/
License: 3-clause BSD
Copyright (c) 2011-2014, The OpenBLAS Project
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
3. Neither the name of the OpenBLAS project nor the names of
its contributors may be used to endorse or promote products
derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Name: LAPACK
Files: extra-dll\libopenb*.dll
Description: bundled in OpenBLAS
Availability: https://github.com/xianyi/OpenBLAS/
License 3-clause BSD
Copyright (c) 1992-2013 The University of Tennessee and The University
of Tennessee Research Foundation. All rights
reserved.
Copyright (c) 2000-2013 The University of California Berkeley. All
rights reserved.
Copyright (c) 2006-2013 The University of Colorado Denver. All rights
reserved.
$COPYRIGHT$
Additional copyrights may follow
$HEADER$
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer listed
in this license in the documentation and/or other materials
provided with the distribution.
- Neither the name of the copyright holders nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
The copyright holders provide no reassurances that the source code
provided does not infringe any patent, copyright, or any other
intellectual property rights of third parties. The copyright holders
disclaim any liability to any recipient for claims brought against
recipient by any third party for infringement of that parties
intellectual property rights.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Name: GCC runtime library
Files: extra-dll\*.dll
Description: statically linked, in DLL files compiled with gfortran only
Availability: https://gcc.gnu.org/viewcvs/gcc/
License: GPLv3 + runtime exception
Copyright (C) 2002-2017 Free Software Foundation, Inc.
Libgfortran is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
Libgfortran is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>.
Name: Microsoft Visual C++ Runtime Files
Files: extra-dll\msvcp140.dll
License: MSVC
https://www.visualstudio.com/license-terms/distributable-code-microsoft-visual-studio-2015-rc-microsoft-visual-studio-2015-sdk-rc-includes-utilities-buildserver-files/#visual-c-runtime
Subject to the License Terms for the software, you may copy and
distribute with your program any of the files within the followng
folder and its subfolders except as noted below. You may not modify
these files.
C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\redist
You may not distribute the contents of the following folders:
C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\redist\debug_nonredist
C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\redist\onecore\debug_nonredist
Subject to the License Terms for the software, you may copy and
distribute the following files with your program in your program’s
application local folder or by deploying them into the Global
Assembly Cache (GAC):
VC\atlmfc\lib\mfcmifc80.dll
VC\atlmfc\lib\amd64\mfcmifc80.dll
Name: Microsoft Visual C++ Runtime Files
Files: extra-dll\msvc*90.dll, extra-dll\Microsoft.VC90.CRT.manifest
License: MSVC
For your convenience, we have provided the following folders for
use when redistributing VC++ runtime files. Subject to the license
terms for the software, you may redistribute the folder
(unmodified) in the application local folder as a sub-folder with
no change to the folder name. You may also redistribute all the
files (*.dll and *.manifest) within a folder, listed below the
folder for your convenience, as an entire set.
\VC\redist\x86\Microsoft.VC90.ATL\
atl90.dll
Microsoft.VC90.ATL.manifest
\VC\redist\ia64\Microsoft.VC90.ATL\
atl90.dll
Microsoft.VC90.ATL.manifest
\VC\redist\amd64\Microsoft.VC90.ATL\
atl90.dll
Microsoft.VC90.ATL.manifest
\VC\redist\x86\Microsoft.VC90.CRT\
msvcm90.dll
msvcp90.dll
msvcr90.dll
Microsoft.VC90.CRT.manifest
\VC\redist\ia64\Microsoft.VC90.CRT\
msvcm90.dll
msvcp90.dll
msvcr90.dll
Microsoft.VC90.CRT.manifest
----
Full text of license texts referred to above follows (that they are
listed below does not necessarily imply the conditions apply to the
present binary release):
----
GCC RUNTIME LIBRARY EXCEPTION
Version 3.1, 31 March 2009
Copyright (C) 2009 Free Software Foundation, Inc. <http://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies of this
license document, but changing it is not allowed.
This GCC Runtime Library Exception ("Exception") is an additional
permission under section 7 of the GNU General Public License, version
3 ("GPLv3"). It applies to a given file (the "Runtime Library") that
bears a notice placed by the copyright holder of the file stating that
the file is governed by GPLv3 along with this Exception.
When you use GCC to compile a program, GCC may combine portions of
certain GCC header files and runtime libraries with the compiled
program. The purpose of this Exception is to allow compilation of
non-GPL (including proprietary) programs to use, in this way, the
header files and runtime libraries covered by this Exception.
0. Definitions.
A file is an "Independent Module" if it either requires the Runtime
Library for execution after a Compilation Process, or makes use of an
interface provided by the Runtime Library, but is not otherwise based
on the Runtime Library.
"GCC" means a version of the GNU Compiler Collection, with or without
modifications, governed by version 3 (or a specified later version) of
the GNU General Public License (GPL) with the option of using any
subsequent versions published by the FSF.
"GPL-compatible Software" is software whose conditions of propagation,
modification and use would permit combination with GCC in accord with
the license of GCC.
"Target Code" refers to output from any compiler for a real or virtual
target processor architecture, in executable form or suitable for
input to an assembler, loader, linker and/or execution
phase. Notwithstanding that, Target Code does not include data in any
format that is used as a compiler intermediate representation, or used
for producing a compiler intermediate representation.
The "Compilation Process" transforms code entirely represented in
non-intermediate languages designed for human-written code, and/or in
Java Virtual Machine byte code, into Target Code. Thus, for example,
use of source code generators and preprocessors need not be considered
part of the Compilation Process, since the Compilation Process can be
understood as starting with the output of the generators or
preprocessors.
A Compilation Process is "Eligible" if it is done using GCC, alone or
with other GPL-compatible software, or if it is done without using any
work based on GCC. For example, using non-GPL-compatible Software to
optimize any GCC intermediate representations would not qualify as an
Eligible Compilation Process.
1. Grant of Additional Permission.
You have permission to propagate a work of Target Code formed by
combining the Runtime Library with Independent Modules, even if such
propagation would otherwise violate the terms of GPLv3, provided that
all Target Code was generated by Eligible Compilation Processes. You
may then convey such a combination under terms of your choice,
consistent with the licensing of the Independent Modules.
2. No Weakening of GCC Copyleft.
The availability of this Exception does not imply any general
presumption that third-party software is unaffected by the copyleft
requirements of the license of GCC.
----
GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:
<program> Copyright (C) <year> <name of author>
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
<http://www.gnu.org/licenses/>.
The GNU General Public License does not permit incorporating your program
into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
<http://www.gnu.org/philosophy/why-not-lgpl.html>. |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pycares/_version.py |
__version__ = '3.1.1'
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pycares/__init__.py |
from ._cares import ffi as _ffi, lib as _lib
import _cffi_backend # hint for bundler tools
if _lib.ARES_SUCCESS != _lib.ares_library_init(_lib.ARES_LIB_INIT_ALL):
raise RuntimeError('Could not initialize c-ares')
from . import errno
from .utils import ascii_bytes, maybe_str, parse_name
from ._version import __version__
import collections.abc
import socket
import math
import functools
import sys
exported_pycares_symbols = [
# Flag values
'ARES_FLAG_USEVC',
'ARES_FLAG_PRIMARY',
'ARES_FLAG_IGNTC',
'ARES_FLAG_NORECURSE',
'ARES_FLAG_STAYOPEN',
'ARES_FLAG_NOSEARCH',
'ARES_FLAG_NOALIASES',
'ARES_FLAG_NOCHECKRESP',
# Nameinfo flag values
'ARES_NI_NOFQDN',
'ARES_NI_NUMERICHOST',
'ARES_NI_NAMEREQD',
'ARES_NI_NUMERICSERV',
'ARES_NI_DGRAM',
'ARES_NI_TCP',
'ARES_NI_UDP',
'ARES_NI_SCTP',
'ARES_NI_DCCP',
'ARES_NI_NUMERICSCOPE',
'ARES_NI_LOOKUPHOST',
'ARES_NI_LOOKUPSERVICE',
'ARES_NI_IDN',
'ARES_NI_IDN_ALLOW_UNASSIGNED',
'ARES_NI_IDN_USE_STD3_ASCII_RULES',
# Bad socket
'ARES_SOCKET_BAD',
]
for symbol in exported_pycares_symbols:
globals()[symbol] = getattr(_lib, symbol)
exported_pycares_symbols_map = {
# Query types
"QUERY_TYPE_A" : "T_A",
"QUERY_TYPE_AAAA" : "T_AAAA",
"QUERY_TYPE_ANY" : "T_ANY",
"QUERY_TYPE_CNAME" : "T_CNAME",
"QUERY_TYPE_MX" : "T_MX",
"QUERY_TYPE_NAPTR" : "T_NAPTR",
"QUERY_TYPE_NS" : "T_NS",
"QUERY_TYPE_PTR" : "T_PTR",
"QUERY_TYPE_SOA" : "T_SOA",
"QUERY_TYPE_SRV" : "T_SRV",
"QUERY_TYPE_TXT" : "T_TXT",
}
for k, v in exported_pycares_symbols_map.items():
globals()[k] = getattr(_lib, v)
globals()['ARES_VERSION'] = maybe_str(_ffi.string(_lib.ares_version(_ffi.NULL)))
PYCARES_ADDRTTL_SIZE = 256
class AresError(Exception):
pass
# callback helpers
_global_set = set()
@_ffi.def_extern()
def _sock_state_cb(data, socket_fd, readable, writable):
sock_state_cb = _ffi.from_handle(data)
sock_state_cb(socket_fd, readable, writable)
@_ffi.def_extern()
def _host_cb(arg, status, timeouts, hostent):
callback = _ffi.from_handle(arg)
_global_set.discard(arg)
if status != _lib.ARES_SUCCESS:
result = None
else:
result = ares_host_result(hostent)
status = None
callback(result, status)
@_ffi.def_extern()
def _nameinfo_cb(arg, status, timeouts, node, service):
callback = _ffi.from_handle(arg)
_global_set.discard(arg)
if status != _lib.ARES_SUCCESS:
result = None
else:
result = ares_nameinfo_result(node, service)
status = None
callback(result, status)
@_ffi.def_extern()
def _query_cb(arg, status, timeouts, abuf, alen):
callback, query_type = _ffi.from_handle(arg)
_global_set.discard(arg)
if status == _lib.ARES_SUCCESS:
if query_type == _lib.T_ANY:
result = []
for qtype in (_lib.T_A, _lib.T_AAAA, _lib.T_CNAME, _lib.T_MX, _lib.T_NAPTR, _lib.T_NS, _lib.T_PTR, _lib.T_SOA, _lib.T_SRV, _lib.T_TXT):
r, status = parse_result(qtype, abuf, alen)
if status not in (None, _lib.ARES_ENODATA, _lib.ARES_EBADRESP):
result = None
break
if r is not None:
if isinstance(r, collections.abc.Iterable):
result.extend(r)
else:
result.append(r)
else:
status = None
else:
result, status = parse_result(query_type, abuf, alen)
else:
result = None
callback(result, status)
def parse_result(query_type, abuf, alen):
if query_type == _lib.T_A:
addrttls = _ffi.new("struct ares_addrttl[]", PYCARES_ADDRTTL_SIZE)
naddrttls = _ffi.new("int*", PYCARES_ADDRTTL_SIZE)
parse_status = _lib.ares_parse_a_reply(abuf, alen, _ffi.NULL, addrttls, naddrttls)
if parse_status != _lib.ARES_SUCCESS:
result = None
status = parse_status
else:
result = [ares_query_a_result(addrttls[i]) for i in range(naddrttls[0])]
status = None
elif query_type == _lib.T_AAAA:
addrttls = _ffi.new("struct ares_addr6ttl[]", PYCARES_ADDRTTL_SIZE)
naddrttls = _ffi.new("int*", PYCARES_ADDRTTL_SIZE)
parse_status = _lib.ares_parse_aaaa_reply(abuf, alen, _ffi.NULL, addrttls, naddrttls)
if parse_status != _lib.ARES_SUCCESS:
result = None
status = parse_status
else:
result = [ares_query_aaaa_result(addrttls[i]) for i in range(naddrttls[0])]
status = None
elif query_type == _lib.T_CNAME:
host = _ffi.new("struct hostent **")
parse_status = _lib.ares_parse_a_reply(abuf, alen, host, _ffi.NULL, _ffi.NULL)
if parse_status != _lib.ARES_SUCCESS:
result = None
status = parse_status
else:
result = ares_query_cname_result(host[0])
_lib.ares_free_hostent(host[0])
status = None
elif query_type == _lib.T_MX:
mx_reply = _ffi.new("struct ares_mx_reply **")
parse_status = _lib.ares_parse_mx_reply(abuf, alen, mx_reply);
if parse_status != _lib.ARES_SUCCESS:
result = None
status = parse_status
else:
result = []
mx_reply_ptr = mx_reply[0]
while mx_reply_ptr != _ffi.NULL:
result.append(ares_query_mx_result(mx_reply_ptr))
mx_reply_ptr = mx_reply_ptr.next
_lib.ares_free_data(mx_reply[0])
status = None
elif query_type == _lib.T_NAPTR:
naptr_reply = _ffi.new("struct ares_naptr_reply **")
parse_status = _lib.ares_parse_naptr_reply(abuf, alen, naptr_reply);
if parse_status != _lib.ARES_SUCCESS:
result = None
status = parse_status
else:
result = []
naptr_reply_ptr = naptr_reply[0]
while naptr_reply_ptr != _ffi.NULL:
result.append(ares_query_naptr_result(naptr_reply_ptr))
naptr_reply_ptr = naptr_reply_ptr.next
_lib.ares_free_data(naptr_reply[0])
status = None
elif query_type == _lib.T_NS:
hostent = _ffi.new("struct hostent **")
parse_status = _lib.ares_parse_ns_reply(abuf, alen, hostent);
if parse_status != _lib.ARES_SUCCESS:
result = None
status = parse_status
else:
result = []
host = hostent[0]
i = 0
while host.h_aliases[i] != _ffi.NULL:
result.append(ares_query_ns_result(host.h_aliases[i]))
i += 1
_lib.ares_free_hostent(host)
status = None
elif query_type == _lib.T_PTR:
hostent = _ffi.new("struct hostent **")
hostttl = _ffi.new("int*", PYCARES_ADDRTTL_SIZE)
parse_status = _lib.ares_parse_ptr_reply(abuf, alen, _ffi.NULL, 0, socket.AF_UNSPEC, hostent, hostttl);
if parse_status != _lib.ARES_SUCCESS:
result = None
status = parse_status
else:
aliases = []
host = hostent[0]
i = 0
while host.h_aliases[i] != _ffi.NULL:
aliases.append(maybe_str(_ffi.string(host.h_aliases[i])))
i += 1
result = ares_query_ptr_result(host, hostttl[0], aliases)
_lib.ares_free_hostent(host)
status = None
elif query_type == _lib.T_SOA:
soa_reply = _ffi.new("struct ares_soa_reply **")
parse_status = _lib.ares_parse_soa_reply(abuf, alen, soa_reply);
if parse_status != _lib.ARES_SUCCESS:
result = None
status = parse_status
else:
result = ares_query_soa_result(soa_reply[0])
_lib.ares_free_data(soa_reply[0])
status = None
elif query_type == _lib.T_SRV:
srv_reply = _ffi.new("struct ares_srv_reply **")
parse_status = _lib.ares_parse_srv_reply(abuf, alen, srv_reply);
if parse_status != _lib.ARES_SUCCESS:
result = None
status = parse_status
else:
result = []
srv_reply_ptr = srv_reply[0]
while srv_reply_ptr != _ffi.NULL:
result.append(ares_query_srv_result(srv_reply_ptr))
srv_reply_ptr = srv_reply_ptr.next
_lib.ares_free_data(srv_reply[0])
status = None
elif query_type == _lib.T_TXT:
txt_reply = _ffi.new("struct ares_txt_ext **")
parse_status = _lib.ares_parse_txt_reply_ext(abuf, alen, txt_reply);
if parse_status != _lib.ARES_SUCCESS:
result = None
status = parse_status
else:
result = []
txt_reply_ptr = txt_reply[0]
tmp_obj = None
while True:
if txt_reply_ptr == _ffi.NULL:
if tmp_obj is not None:
result.append(ares_query_txt_result(tmp_obj))
break
if txt_reply_ptr.record_start == 1:
if tmp_obj is not None:
result.append(ares_query_txt_result(tmp_obj))
tmp_obj = ares_query_txt_result_chunk(txt_reply_ptr)
else:
new_chunk = ares_query_txt_result_chunk(txt_reply_ptr)
tmp_obj.text += new_chunk.text
txt_reply_ptr = txt_reply_ptr.next
_lib.ares_free_data(txt_reply[0])
status = None
else:
raise ValueError("invalid query type specified")
return result, status
class Channel:
__qtypes__ = (_lib.T_A, _lib.T_AAAA, _lib.T_ANY, _lib.T_CNAME, _lib.T_MX, _lib.T_NAPTR, _lib.T_NS, _lib.T_PTR, _lib.T_SOA, _lib.T_SRV, _lib.T_TXT)
def __init__(self,
flags = None,
timeout = None,
tries = None,
ndots = None,
tcp_port = None,
udp_port = None,
servers = None,
domains = None,
lookups = None,
sock_state_cb = None,
socket_send_buffer_size = None,
socket_receive_buffer_size = None,
rotate = False,
local_ip = None,
local_dev = None,
resolvconf_path = None):
channel = _ffi.new("ares_channel *")
options = _ffi.new("struct ares_options *")
optmask = 0
if flags is not None:
options.flags = flags
optmask = optmask | _lib.ARES_OPT_FLAGS
if timeout is not None:
options.timeout = int(timeout * 1000)
optmask = optmask | _lib.ARES_OPT_TIMEOUTMS
if tries is not None:
options.tries = tries
optmask = optmask | _lib.ARES_OPT_TRIES
if ndots is not None:
options.ndots = ndots
optmask = optmask | _lib.ARES_OPT_NDOTS
if tcp_port is not None:
options.tcp_port = tcp_port
optmask = optmask | _lib.ARES_OPT_TCP_PORT
if udp_port is not None:
options.udp_port = udp_port
optmask = optmask | _lib.ARES_OPT_UDP_PORT
if socket_send_buffer_size is not None:
options.socket_send_buffer_size = socket_send_buffer_size
optmask = optmask | _lib.ARES_OPT_SOCK_SNDBUF
if socket_receive_buffer_size is not None:
options.socket_receive_buffer_size = socket_receive_buffer_size
optmask = optmask | _lib.ARES_OPT_SOCK_RCVBUF
if sock_state_cb:
if not callable(sock_state_cb):
raise TypeError("sock_state_cb is not callable")
userdata = _ffi.new_handle(sock_state_cb)
# This must be kept alive while the channel is alive.
self._sock_state_cb_handle = userdata
options.sock_state_cb = _lib._sock_state_cb
options.sock_state_cb_data = userdata
optmask = optmask | _lib.ARES_OPT_SOCK_STATE_CB
if lookups:
options.lookups = _ffi.new('char[]', ascii_bytes(lookups))
optmask = optmask | _lib.ARES_OPT_LOOKUPS
if domains:
strs = [_ffi.new("char[]", ascii_bytes(i)) for i in domains]
c = _ffi.new("char *[%d]" % (len(domains) + 1))
for i in range(len(domains)):
c[i] = strs[i]
options.domains = c
options.ndomains = len(domains)
optmask = optmask | _lib.ARES_OPT_DOMAINS
if rotate:
optmask = optmask | _lib.ARES_OPT_ROTATE
if resolvconf_path is not None:
optmask = optmask | _lib.ARES_OPT_RESOLVCONF
options.resolvconf_path = _ffi.new('char[]', ascii_bytes(resolvconf_path))
r = _lib.ares_init_options(channel, options, optmask)
if r != _lib.ARES_SUCCESS:
raise AresError('Failed to initialize c-ares channel')
self._channel = _ffi.gc(channel, lambda x: _lib.ares_destroy(x[0]))
if servers:
self.servers = servers
if local_ip:
self.set_local_ip(local_ip)
if local_dev:
self.set_local_dev(local_dev)
def cancel(self):
_lib.ares_cancel(self._channel[0])
@property
def servers(self):
servers = _ffi.new("struct ares_addr_node **")
r = _lib.ares_get_servers(self._channel[0], servers)
if r != _lib.ARES_SUCCESS:
raise AresError(r, errno.strerror(r))
server_list = []
server = _ffi.new("struct ares_addr_node **", servers[0])
while True:
if server == _ffi.NULL:
break
ip = _ffi.new("char []", _lib.INET6_ADDRSTRLEN)
s = server[0]
if _ffi.NULL != _lib.ares_inet_ntop(s.family, _ffi.addressof(s.addr), ip, _lib.INET6_ADDRSTRLEN):
server_list.append(maybe_str(_ffi.string(ip, _lib.INET6_ADDRSTRLEN)))
server = s.next
return server_list
@servers.setter
def servers(self, servers):
c = _ffi.new("struct ares_addr_node[%d]" % len(servers))
for i, server in enumerate(servers):
if _lib.ares_inet_pton(socket.AF_INET, ascii_bytes(server), _ffi.addressof(c[i].addr.addr4)) == 1:
c[i].family = socket.AF_INET
elif _lib.ares_inet_pton(socket.AF_INET6, ascii_bytes(server), _ffi.addressof(c[i].addr.addr6)) == 1:
c[i].family = socket.AF_INET6
else:
raise ValueError("invalid IP address")
if i > 0:
c[i - 1].next = _ffi.addressof(c[i])
r = _lib.ares_set_servers(self._channel[0], c)
if r != _lib.ARES_SUCCESS:
raise AresError(r, errno.strerror(r))
def getsock(self):
rfds = []
wfds = []
socks = _ffi.new("ares_socket_t [%d]" % _lib.ARES_GETSOCK_MAXNUM)
bitmask = _lib.ares_getsock(self._channel[0], socks, _lib.ARES_GETSOCK_MAXNUM)
for i in range(_lib.ARES_GETSOCK_MAXNUM):
if _lib.ARES_GETSOCK_READABLE(bitmask, i):
rfds.append(socks[i])
if _lib.ARES_GETSOCK_WRITABLE(bitmask, i):
wfds.append(socks[i])
return rfds, wfds
def process_fd(self, read_fd, write_fd):
_lib.ares_process_fd(self._channel[0], _ffi.cast("ares_socket_t", read_fd), _ffi.cast("ares_socket_t", write_fd))
def timeout(self, t = None):
maxtv = _ffi.NULL
tv = _ffi.new("struct timeval*")
if t is not None:
if t >= 0.0:
maxtv = _ffi.new("struct timeval*")
maxtv.tv_sec = int(math.floor(t))
maxtv.tv_usec = int(math.fmod(t, 1.0) * 1000000)
else:
raise ValueError("timeout needs to be a positive number or None")
_lib.ares_timeout(self._channel[0], maxtv, tv)
if tv == _ffi.NULL:
return 0.0
return (tv.tv_sec + tv.tv_usec / 1000000.0)
def gethostbyaddr(self, addr, callback):
if not callable(callback):
raise TypeError("a callable is required")
addr4 = _ffi.new("struct in_addr*")
addr6 = _ffi.new("struct ares_in6_addr*")
if _lib.ares_inet_pton(socket.AF_INET, ascii_bytes(addr), (addr4)) == 1:
address = addr4
family = socket.AF_INET
elif _lib.ares_inet_pton(socket.AF_INET6, ascii_bytes(addr), (addr6)) == 1:
address = addr6
family = socket.AF_INET6
else:
raise ValueError("invalid IP address")
userdata = _ffi.new_handle(callback)
_global_set.add(userdata)
_lib.ares_gethostbyaddr(self._channel[0], address, _ffi.sizeof(address[0]), family, _lib._host_cb, userdata)
def gethostbyname(self, name, family, callback):
if not callable(callback):
raise TypeError("a callable is required")
userdata = _ffi.new_handle(callback)
_global_set.add(userdata)
_lib.ares_gethostbyname(self._channel[0], parse_name(name), family, _lib._host_cb, userdata)
def query(self, name, query_type, callback):
self._do_query(_lib.ares_query, name, query_type, callback)
def search(self, name, query_type, callback):
self._do_query(_lib.ares_search, name, query_type, callback)
def _do_query(self, func, name, query_type, callback):
if not callable(callback):
raise TypeError('a callable is required')
if query_type not in self.__qtypes__:
raise ValueError('invalid query type specified')
userdata = _ffi.new_handle((callback, query_type))
_global_set.add(userdata)
func(self._channel[0], parse_name(name), _lib.C_IN, query_type, _lib._query_cb, userdata)
def set_local_ip(self, ip):
addr4 = _ffi.new("struct in_addr*")
addr6 = _ffi.new("struct ares_in6_addr*")
if _lib.ares_inet_pton(socket.AF_INET, ascii_bytes(ip), addr4) == 1:
_lib.ares_set_local_ip4(self._channel[0], socket.ntohl(addr4.s_addr))
elif _lib.ares_inet_pton(socket.AF_INET6, ascii_bytes(ip), addr6) == 1:
_lib.ares_set_local_ip6(self._channel[0], addr6)
else:
raise ValueError("invalid IP address")
def getnameinfo(self, ip_port, flags, callback):
if not callable(callback):
raise TypeError("a callable is required")
ip, port = ip_port
if port < 0 or port > 65535:
raise ValueError("port must be between 0 and 65535")
sa4 = _ffi.new("struct sockaddr_in*")
sa6 = _ffi.new("struct sockaddr_in6*")
if _lib.ares_inet_pton(socket.AF_INET, ascii_bytes(ip), _ffi.addressof(sa4.sin_addr)) == 1:
sa4.sin_family = socket.AF_INET
sa4.sin_port = socket.htons(port)
sa = sa4
elif _lib.ares_inet_pton(socket.AF_INET6, ascii_bytes(ip), _ffi.addressof(sa6.sin6_addr)) == 1:
sa6.sin6_family = socket.AF_INET6
sa6.sin6_port = socket.htons(port)
sa = sa6
else:
raise ValueError("invalid IP address")
userdata = _ffi.new_handle(callback)
_global_set.add(userdata)
_lib.ares_getnameinfo(self._channel[0], _ffi.cast("struct sockaddr*", sa), _ffi.sizeof(sa[0]), flags, _lib._nameinfo_cb, userdata)
def set_local_dev(self, dev):
_lib.ares_set_local_dev(self._channel[0], dev)
class AresResult:
__slots__ = ()
def __repr__(self):
attrs = ['%s=%s' % (a, getattr(self, a)) for a in self.__slots__]
return '<%s> %s' % (self.__class__.__name__, ', '.join(attrs))
# DNS query result types
#
class ares_query_a_result(AresResult):
__slots__ = ('host', 'ttl')
type = 'A'
def __init__(self, ares_addrttl):
buf = _ffi.new("char[]", _lib.INET6_ADDRSTRLEN)
_lib.ares_inet_ntop(socket.AF_INET, _ffi.addressof(ares_addrttl.ipaddr), buf, _lib.INET6_ADDRSTRLEN)
self.host = maybe_str(_ffi.string(buf, _lib.INET6_ADDRSTRLEN))
self.ttl = ares_addrttl.ttl
class ares_query_aaaa_result(AresResult):
__slots__ = ('host', 'ttl')
type = 'AAAA'
def __init__(self, ares_addrttl):
buf = _ffi.new("char[]", _lib.INET6_ADDRSTRLEN)
_lib.ares_inet_ntop(socket.AF_INET6, _ffi.addressof(ares_addrttl.ip6addr), buf, _lib.INET6_ADDRSTRLEN)
self.host = maybe_str(_ffi.string(buf, _lib.INET6_ADDRSTRLEN))
self.ttl = ares_addrttl.ttl
class ares_query_cname_result(AresResult):
__slots__ = ('cname', 'ttl')
type = 'CNAME'
def __init__(self, host):
self.cname = maybe_str(_ffi.string(host.h_name))
self.ttl = -1
class ares_query_mx_result(AresResult):
__slots__ = ('host', 'priority', 'ttl')
type = 'MX'
def __init__(self, mx):
self.host = maybe_str(_ffi.string(mx.host))
self.priority = mx.priority
self.ttl = mx.ttl
class ares_query_naptr_result(AresResult):
__slots__ = ('order', 'preference', 'flags', 'service', 'regex', 'replacement', 'ttl')
type = 'NAPTR'
def __init__(self, naptr):
self.order = naptr.order
self.preference = naptr.preference
self.flags = maybe_str(_ffi.string(naptr.flags))
self.service = maybe_str(_ffi.string(naptr.service))
self.regex = maybe_str(_ffi.string(naptr.regexp))
self.replacement = maybe_str(_ffi.string(naptr.replacement))
self.ttl = naptr.ttl
class ares_query_ns_result(AresResult):
__slots__ = ('host', 'ttl')
type = 'NS'
def __init__(self, ns):
self.host = maybe_str(_ffi.string(ns))
self.ttl = -1
class ares_query_ptr_result(AresResult):
__slots__ = ('name', 'ttl', 'aliases')
type = 'PTR'
def __init__(self, hostent, ttl, aliases):
self.name = maybe_str(_ffi.string(hostent.h_name))
self.ttl = ttl
self.aliases = aliases
class ares_query_soa_result(AresResult):
__slots__ = ('nsname', 'hostmaster', 'serial', 'refresh', 'retry', 'expires', 'minttl', 'ttl')
type = 'SOA'
def __init__(self, soa):
self.nsname = maybe_str(_ffi.string(soa.nsname))
self.hostmaster = maybe_str(_ffi.string(soa.hostmaster))
self.serial = soa.serial
self.refresh = soa.refresh
self.retry = soa.retry
self.expires = soa.expire
self.minttl = soa.minttl
self.ttl = soa.ttl
class ares_query_srv_result(AresResult):
__slots__ = ('host', 'port', 'priority', 'weight', 'ttl')
type = 'SRV'
def __init__(self, srv):
self.host = maybe_str(_ffi.string(srv.host))
self.port = srv.port
self.priority = srv.priority
self.weight = srv.weight
self.ttl = srv.ttl
class ares_query_txt_result(AresResult):
__slots__ = ('text', 'ttl')
type = 'TXT'
def __init__(self, txt_chunk):
self.text = maybe_str(txt_chunk.text)
self.ttl = txt_chunk.ttl
class ares_query_txt_result_chunk(AresResult):
__slots__ = ('text', 'ttl')
type = 'TXT'
def __init__(self, txt):
self.text = _ffi.string(txt.txt)
self.ttl = txt.ttl
# Other result types
#
class ares_host_result(AresResult):
__slots__ = ('name', 'aliases', 'addresses')
def __init__(self, hostent):
self.name = maybe_str(_ffi.string(hostent.h_name))
self.aliases = []
self.addresses = []
i = 0
while hostent.h_aliases[i] != _ffi.NULL:
self.aliases.append(maybe_str(_ffi.string(hostent.h_aliases[i])))
i += 1
i = 0
while hostent.h_addr_list[i] != _ffi.NULL:
buf = _ffi.new("char[]", _lib.INET6_ADDRSTRLEN)
if _ffi.NULL != _lib.ares_inet_ntop(hostent.h_addrtype, hostent.h_addr_list[i], buf, _lib.INET6_ADDRSTRLEN):
self.addresses.append(maybe_str(_ffi.string(buf, _lib.INET6_ADDRSTRLEN)))
i += 1
class ares_nameinfo_result(AresResult):
__slots__ = ('node', 'service')
def __init__(self, node, service):
self.node = maybe_str(_ffi.string(node))
self.service = maybe_str(_ffi.string(service)) if service != _ffi.NULL else None
__all__ = exported_pycares_symbols + list(exported_pycares_symbols_map.keys()) + ['AresError', 'Channel', 'errno', '__version__']
del exported_pycares_symbols, exported_pycares_symbols_map
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pycares/utils.py |
try:
import idna as idna2008
except ImportError:
idna2008 = None
def ascii_bytes(data):
if isinstance(data, str):
return data.encode('ascii')
if isinstance(data, bytes):
return data
raise TypeError('only str (ascii encoding) and bytes are supported')
def maybe_str(data):
if isinstance(data, str):
return data
if isinstance(data, bytes):
try:
return data.decode('ascii')
except UnicodeDecodeError:
return data
raise TypeError('only str (ascii encoding) and bytes are supported')
def is_all_ascii(text):
for c in text:
if ord(c) > 0x7f:
return False
return True
def parse_name_idna2008(name):
parts = name.split('.')
r = []
for part in parts:
if is_all_ascii(part):
r.append(part.encode('ascii'))
else:
r.append(idna2008.encode(part))
return b'.'.join(r)
def parse_name(name):
if isinstance(name, str):
if is_all_ascii(name):
return name.encode('ascii')
if idna2008 is not None:
return parse_name_idna2008(name)
return name.encode('idna')
if isinstance(name, bytes):
return name
raise TypeError('only str and bytes are supported')
__all__ = ['ascii_bytes', 'maybe_str', 'parse_name']
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pycares/errno.py |
from ._cares import ffi as _ffi, lib as _lib
from .utils import maybe_str
exported_pycares_symbols = [
'ARES_SUCCESS',
# error codes
'ARES_ENODATA',
'ARES_EFORMERR',
'ARES_ESERVFAIL',
'ARES_ENOTFOUND',
'ARES_ENOTIMP',
'ARES_EREFUSED',
'ARES_EBADQUERY',
'ARES_EBADNAME',
'ARES_EBADFAMILY',
'ARES_EBADRESP',
'ARES_ECONNREFUSED',
'ARES_ETIMEOUT',
'ARES_EOF',
'ARES_EFILE',
'ARES_ENOMEM',
'ARES_EDESTRUCTION',
'ARES_EBADSTR',
'ARES_EBADFLAGS',
'ARES_ENONAME',
'ARES_EBADHINTS',
'ARES_ENOTINITIALIZED',
'ARES_ELOADIPHLPAPI',
'ARES_EADDRGETNETWORKPARAMS',
'ARES_ECANCELLED',
]
errorcode = {}
for symbol in exported_pycares_symbols:
value = getattr(_lib, symbol)
globals()[symbol] = value
globals()["errorcode"][value] = symbol
def strerror(code):
return maybe_str(_ffi.string(_lib.ares_strerror(code)))
__all__ = exported_pycares_symbols + ['errorcode', 'strerror']
del exported_pycares_symbols
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pycares/__main__.py |
import collections.abc
import pycares
import select
import socket
import sys
def wait_channel(channel):
while True:
read_fds, write_fds = channel.getsock()
if not read_fds and not write_fds:
break
timeout = channel.timeout()
if not timeout:
channel.process_fd(pycares.ARES_SOCKET_BAD, pycares.ARES_SOCKET_BAD)
continue
rlist, wlist, xlist = select.select(read_fds, write_fds, [], timeout)
for fd in rlist:
channel.process_fd(fd, pycares.ARES_SOCKET_BAD)
for fd in wlist:
channel.process_fd(pycares.ARES_SOCKET_BAD, fd)
def cb(result, error):
if error is not None:
print('Error: (%d) %s' % (error, pycares.errno.strerror(error)))
else:
parts = [
';; QUESTION SECTION:',
';%s\t\t\tIN\t%s' % (hostname, qtype.upper()),
'',
';; ANSWER SECTION:'
]
if not isinstance(result, collections.abc.Iterable):
result = [result]
for r in result:
txt = '%s\t\t%d\tIN\t%s' % (hostname, r.ttl, r.type)
if r.type in ('A', 'AAAA'):
parts.append('%s\t%s' % (txt, r.host))
elif r.type == 'CNAME':
parts.append('%s\t%s' % (txt, r.cname))
elif r.type == 'MX':
parts.append('%s\t%d %s' % (txt, r.priority, r.host))
elif r.type == 'NAPTR':
parts.append('%s\t%d %d "%s" "%s" "%s" %s' % (txt, r.order, r.preference, r.flags, r.service, r.regex, r.replacement))
elif r.type == 'NS':
parts.append('%s\t%s' % (txt, r.host))
elif r.type == 'PTR':
parts.append('%s\t%s' % (txt, r.name))
elif r.type == 'SOA':
parts.append('%s\t%s %s %d %d %d %d %d' % (txt, r.nsname, r.hostmaster, r.serial, r.refresh, r.retry, r.expires, r.minttl))
elif r.type == 'SRV':
parts.append('%s\t%d %d %d %s' % (txt, r.priority, r.weight, r.port, r.host))
elif r.type == 'TXT':
parts.append('%s\t"%s"' % (txt, r.text))
print('\n'.join(parts))
channel = pycares.Channel()
if len(sys.argv) not in (2, 3):
print('Invalid arguments! Usage: python -m pycares [query_type] hostname')
sys.exit(1)
if len(sys.argv) == 2:
_, hostname = sys.argv
qtype = 'A'
else:
_, qtype, hostname = sys.argv
try:
query_type = getattr(pycares, 'QUERY_TYPE_%s' % qtype.upper())
except Exception:
print('Invalid query type: %s' % qtype)
sys.exit(1)
channel.query(hostname, query_type, cb)
wait_channel(channel)
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/bin/watchmedo-script.py | #!C:\buildAgent\work\kit\kit\_build\target-deps\python\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'watchdog==0.10.4','console_scripts','watchmedo'
import re
import sys
# for compatibility with easy_install; see #2198
__requires__ = 'watchdog==0.10.4'
try:
from importlib.metadata import distribution
except ImportError:
try:
from importlib_metadata import distribution
except ImportError:
from pkg_resources import load_entry_point
def importlib_load_entry_point(spec, group, name):
dist_name, _, _ = spec.partition('==')
matches = (
entry_point
for entry_point in distribution(dist_name).entry_points
if entry_point.group == group and entry_point.name == name
)
return next(matches).load()
globals().setdefault('load_entry_point', importlib_load_entry_point)
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(load_entry_point('watchdog==0.10.4', 'console_scripts', 'watchmedo')())
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/bin/jp.py | #!C:\buildAgent\work\kit\kit\_build\target-deps\python\python.exe
import sys
import json
import argparse
from pprint import pformat
import jmespath
from jmespath import exceptions
def main():
parser = argparse.ArgumentParser()
parser.add_argument('expression')
parser.add_argument('-f', '--filename',
help=('The filename containing the input data. '
'If a filename is not given then data is '
'read from stdin.'))
parser.add_argument('--ast', action='store_true',
help=('Pretty print the AST, do not search the data.'))
args = parser.parse_args()
expression = args.expression
if args.ast:
# Only print the AST
expression = jmespath.compile(args.expression)
sys.stdout.write(pformat(expression.parsed))
sys.stdout.write('\n')
return 0
if args.filename:
with open(args.filename, 'r') as f:
data = json.load(f)
else:
data = sys.stdin.read()
data = json.loads(data)
try:
sys.stdout.write(json.dumps(
jmespath.search(expression, data), indent=4, ensure_ascii=False))
sys.stdout.write('\n')
except exceptions.ArityError as e:
sys.stderr.write("invalid-arity: %s\n" % e)
return 1
except exceptions.JMESPathTypeError as e:
sys.stderr.write("invalid-type: %s\n" % e)
return 1
except exceptions.UnknownFunctionError as e:
sys.stderr.write("unknown-function: %s\n" % e)
return 1
except exceptions.ParseError as e:
sys.stderr.write("syntax-error: %s\n" % e)
return 1
if __name__ == '__main__':
sys.exit(main())
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/watchdog-0.10.4-py3.10.egg-info/SOURCES.txt | AUTHORS
COPYING
LICENSE
MANIFEST.in
README.rst
changelog.rst
setup.cfg
setup.py
docs/Makefile
docs/echo.py.txt
docs/eclipse_cdt_style.xml
docs/make.bat
docs/requirements.txt
docs/source/api.rst
docs/source/conf.py
docs/source/global.rst.inc
docs/source/hacking.rst
docs/source/index.rst
docs/source/installation.rst
docs/source/quickstart.rst
docs/source/examples/logger.py
docs/source/examples/patterns.py
docs/source/examples/simple.py
docs/source/examples/tricks.json
docs/source/examples/tricks.yaml
src/watchdog_fsevents.c
src/watchdog/__init__.py
src/watchdog/events.py
src/watchdog/version.py
src/watchdog/watchmedo.py
src/watchdog.egg-info/PKG-INFO
src/watchdog.egg-info/SOURCES.txt
src/watchdog.egg-info/dependency_links.txt
src/watchdog.egg-info/entry_points.txt
src/watchdog.egg-info/not-zip-safe
src/watchdog.egg-info/requires.txt
src/watchdog.egg-info/top_level.txt
src/watchdog/observers/__init__.py
src/watchdog/observers/api.py
src/watchdog/observers/fsevents.py
src/watchdog/observers/fsevents2.py
src/watchdog/observers/inotify.py
src/watchdog/observers/inotify_buffer.py
src/watchdog/observers/inotify_c.py
src/watchdog/observers/kqueue.py
src/watchdog/observers/polling.py
src/watchdog/observers/read_directory_changes.py
src/watchdog/observers/winapi.py
src/watchdog/tricks/__init__.py
src/watchdog/utils/__init__.py
src/watchdog/utils/bricks.py
src/watchdog/utils/compat.py
src/watchdog/utils/delayed_queue.py
src/watchdog/utils/dirsnapshot.py
src/watchdog/utils/echo.py
src/watchdog/utils/platform.py
src/watchdog/utils/unicode_paths.py
src/watchdog/utils/win32stat.py
tests/__init__.py
tests/conftest.py
tests/markers.py
tests/shell.py
tests/test_delayed_queue.py
tests/test_emitter.py
tests/test_events.py
tests/test_fsevents.py
tests/test_inotify_buffer.py
tests/test_inotify_c.py
tests/test_logging_event_handler.py
tests/test_observer.py
tests/test_observers_api.py
tests/test_observers_polling.py
tests/test_observers_winapi.py
tests/test_pattern_matching_event_handler.py
tests/test_regex_matching_event_handler.py
tests/test_skip_repeats_queue.py
tests/test_snapshot_diff.py
tests/test_watchmedo.py |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/watchdog-0.10.4-py3.10.egg-info/top_level.txt | watchdog
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/watchdog-0.10.4-py3.10.egg-info/requires.txt | pathtools>=0.1.1
[watchmedo]
PyYAML>=3.10
argh>=0.24.1
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/watchdog-0.10.4-py3.10.egg-info/entry_points.txt | [console_scripts]
watchmedo = watchdog.watchmedo:main [watchmedo]
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/watchdog-0.10.4-py3.10.egg-info/installed-files.txt | ..\..\..\bin\watchmedo-script.py
..\..\..\bin\watchmedo.exe
..\watchdog\__init__.py
..\watchdog\__pycache__\__init__.cpython-310.pyc
..\watchdog\__pycache__\events.cpython-310.pyc
..\watchdog\__pycache__\version.cpython-310.pyc
..\watchdog\__pycache__\watchmedo.cpython-310.pyc
..\watchdog\events.py
..\watchdog\observers\__init__.py
..\watchdog\observers\__pycache__\__init__.cpython-310.pyc
..\watchdog\observers\__pycache__\api.cpython-310.pyc
..\watchdog\observers\__pycache__\fsevents.cpython-310.pyc
..\watchdog\observers\__pycache__\fsevents2.cpython-310.pyc
..\watchdog\observers\__pycache__\inotify.cpython-310.pyc
..\watchdog\observers\__pycache__\inotify_buffer.cpython-310.pyc
..\watchdog\observers\__pycache__\inotify_c.cpython-310.pyc
..\watchdog\observers\__pycache__\kqueue.cpython-310.pyc
..\watchdog\observers\__pycache__\polling.cpython-310.pyc
..\watchdog\observers\__pycache__\read_directory_changes.cpython-310.pyc
..\watchdog\observers\__pycache__\winapi.cpython-310.pyc
..\watchdog\observers\api.py
..\watchdog\observers\fsevents.py
..\watchdog\observers\fsevents2.py
..\watchdog\observers\inotify.py
..\watchdog\observers\inotify_buffer.py
..\watchdog\observers\inotify_c.py
..\watchdog\observers\kqueue.py
..\watchdog\observers\polling.py
..\watchdog\observers\read_directory_changes.py
..\watchdog\observers\winapi.py
..\watchdog\tricks\__init__.py
..\watchdog\tricks\__pycache__\__init__.cpython-310.pyc
..\watchdog\utils\__init__.py
..\watchdog\utils\__pycache__\__init__.cpython-310.pyc
..\watchdog\utils\__pycache__\bricks.cpython-310.pyc
..\watchdog\utils\__pycache__\compat.cpython-310.pyc
..\watchdog\utils\__pycache__\delayed_queue.cpython-310.pyc
..\watchdog\utils\__pycache__\dirsnapshot.cpython-310.pyc
..\watchdog\utils\__pycache__\echo.cpython-310.pyc
..\watchdog\utils\__pycache__\platform.cpython-310.pyc
..\watchdog\utils\__pycache__\unicode_paths.cpython-310.pyc
..\watchdog\utils\__pycache__\win32stat.cpython-310.pyc
..\watchdog\utils\bricks.py
..\watchdog\utils\compat.py
..\watchdog\utils\delayed_queue.py
..\watchdog\utils\dirsnapshot.py
..\watchdog\utils\echo.py
..\watchdog\utils\platform.py
..\watchdog\utils\unicode_paths.py
..\watchdog\utils\win32stat.py
..\watchdog\version.py
..\watchdog\watchmedo.py
PKG-INFO
SOURCES.txt
dependency_links.txt
entry_points.txt
not-zip-safe
requires.txt
top_level.txt
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/watchdog-0.10.4-py3.10.egg-info/dependency_links.txt | |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/charset_normalizer/__init__.py | # -*- coding: utf-8 -*-
"""
Charset-Normalizer
~~~~~~~~~~~~~~
The Real First Universal Charset Detector.
A library that helps you read text from an unknown charset encoding.
Motivated by chardet, This package is trying to resolve the issue by taking a new approach.
All IANA character set names for which the Python core library provides codecs are supported.
Basic usage:
>>> from charset_normalizer import from_bytes
>>> results = from_bytes('Bсеки човек има право на образование. Oбразованието!'.encode('utf_8'))
>>> best_guess = results.best()
>>> str(best_guess)
'Bсеки човек има право на образование. Oбразованието!'
Others methods and usages are available - see the full documentation
at <https://github.com/Ousret/charset_normalizer>.
:copyright: (c) 2021 by Ahmed TAHRI
:license: MIT, see LICENSE for more details.
"""
import logging
from .api import from_bytes, from_fp, from_path, normalize
from .legacy import (
CharsetDetector,
CharsetDoctor,
CharsetNormalizerMatch,
CharsetNormalizerMatches,
detect,
)
from .models import CharsetMatch, CharsetMatches
from .utils import set_logging_handler
from .version import VERSION, __version__
__all__ = (
"from_fp",
"from_path",
"from_bytes",
"normalize",
"detect",
"CharsetMatch",
"CharsetMatches",
"CharsetNormalizerMatch",
"CharsetNormalizerMatches",
"CharsetDetector",
"CharsetDoctor",
"__version__",
"VERSION",
"set_logging_handler",
)
# Attach a NullHandler to the top level logger by default
# https://docs.python.org/3.3/howto/logging.html#configuring-logging-for-a-library
logging.getLogger("charset_normalizer").addHandler(logging.NullHandler())
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/charset_normalizer/version.py | """
Expose version
"""
__version__ = "2.1.1"
VERSION = __version__.split(".")
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/charset_normalizer/utils.py | try:
# WARNING: unicodedata2 support is going to be removed in 3.0
# Python is quickly catching up.
import unicodedata2 as unicodedata
except ImportError:
import unicodedata # type: ignore[no-redef]
import importlib
import logging
from codecs import IncrementalDecoder
from encodings.aliases import aliases
from functools import lru_cache
from re import findall
from typing import Generator, List, Optional, Set, Tuple, Union
from _multibytecodec import MultibyteIncrementalDecoder
from .constant import (
ENCODING_MARKS,
IANA_SUPPORTED_SIMILAR,
RE_POSSIBLE_ENCODING_INDICATION,
UNICODE_RANGES_COMBINED,
UNICODE_SECONDARY_RANGE_KEYWORD,
UTF8_MAXIMAL_ALLOCATION,
)
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
def is_accentuated(character: str) -> bool:
try:
description: str = unicodedata.name(character)
except ValueError:
return False
return (
"WITH GRAVE" in description
or "WITH ACUTE" in description
or "WITH CEDILLA" in description
or "WITH DIAERESIS" in description
or "WITH CIRCUMFLEX" in description
or "WITH TILDE" in description
)
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
def remove_accent(character: str) -> str:
decomposed: str = unicodedata.decomposition(character)
if not decomposed:
return character
codes: List[str] = decomposed.split(" ")
return chr(int(codes[0], 16))
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
def unicode_range(character: str) -> Optional[str]:
"""
Retrieve the Unicode range official name from a single character.
"""
character_ord: int = ord(character)
for range_name, ord_range in UNICODE_RANGES_COMBINED.items():
if character_ord in ord_range:
return range_name
return None
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
def is_latin(character: str) -> bool:
try:
description: str = unicodedata.name(character)
except ValueError:
return False
return "LATIN" in description
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
def is_ascii(character: str) -> bool:
try:
character.encode("ascii")
except UnicodeEncodeError:
return False
return True
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
def is_punctuation(character: str) -> bool:
character_category: str = unicodedata.category(character)
if "P" in character_category:
return True
character_range: Optional[str] = unicode_range(character)
if character_range is None:
return False
return "Punctuation" in character_range
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
def is_symbol(character: str) -> bool:
character_category: str = unicodedata.category(character)
if "S" in character_category or "N" in character_category:
return True
character_range: Optional[str] = unicode_range(character)
if character_range is None:
return False
return "Forms" in character_range
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
def is_emoticon(character: str) -> bool:
character_range: Optional[str] = unicode_range(character)
if character_range is None:
return False
return "Emoticons" in character_range
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
def is_separator(character: str) -> bool:
if character.isspace() or character in {"|", "+", ",", ";", "<", ">"}:
return True
character_category: str = unicodedata.category(character)
return "Z" in character_category
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
def is_case_variable(character: str) -> bool:
return character.islower() != character.isupper()
def is_private_use_only(character: str) -> bool:
character_category: str = unicodedata.category(character)
return character_category == "Co"
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
def is_cjk(character: str) -> bool:
try:
character_name = unicodedata.name(character)
except ValueError:
return False
return "CJK" in character_name
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
def is_hiragana(character: str) -> bool:
try:
character_name = unicodedata.name(character)
except ValueError:
return False
return "HIRAGANA" in character_name
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
def is_katakana(character: str) -> bool:
try:
character_name = unicodedata.name(character)
except ValueError:
return False
return "KATAKANA" in character_name
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
def is_hangul(character: str) -> bool:
try:
character_name = unicodedata.name(character)
except ValueError:
return False
return "HANGUL" in character_name
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
def is_thai(character: str) -> bool:
try:
character_name = unicodedata.name(character)
except ValueError:
return False
return "THAI" in character_name
@lru_cache(maxsize=len(UNICODE_RANGES_COMBINED))
def is_unicode_range_secondary(range_name: str) -> bool:
return any(keyword in range_name for keyword in UNICODE_SECONDARY_RANGE_KEYWORD)
@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
def is_unprintable(character: str) -> bool:
return (
character.isspace() is False # includes \n \t \r \v
and character.isprintable() is False
and character != "\x1A" # Why? Its the ASCII substitute character.
and character != "\ufeff" # bug discovered in Python,
# Zero Width No-Break Space located in Arabic Presentation Forms-B, Unicode 1.1 not acknowledged as space.
)
def any_specified_encoding(sequence: bytes, search_zone: int = 4096) -> Optional[str]:
"""
Extract using ASCII-only decoder any specified encoding in the first n-bytes.
"""
if not isinstance(sequence, bytes):
raise TypeError
seq_len: int = len(sequence)
results: List[str] = findall(
RE_POSSIBLE_ENCODING_INDICATION,
sequence[: min(seq_len, search_zone)].decode("ascii", errors="ignore"),
)
if len(results) == 0:
return None
for specified_encoding in results:
specified_encoding = specified_encoding.lower().replace("-", "_")
encoding_alias: str
encoding_iana: str
for encoding_alias, encoding_iana in aliases.items():
if encoding_alias == specified_encoding:
return encoding_iana
if encoding_iana == specified_encoding:
return encoding_iana
return None
@lru_cache(maxsize=128)
def is_multi_byte_encoding(name: str) -> bool:
"""
Verify is a specific encoding is a multi byte one based on it IANA name
"""
return name in {
"utf_8",
"utf_8_sig",
"utf_16",
"utf_16_be",
"utf_16_le",
"utf_32",
"utf_32_le",
"utf_32_be",
"utf_7",
} or issubclass(
importlib.import_module("encodings.{}".format(name)).IncrementalDecoder,
MultibyteIncrementalDecoder,
)
def identify_sig_or_bom(sequence: bytes) -> Tuple[Optional[str], bytes]:
"""
Identify and extract SIG/BOM in given sequence.
"""
for iana_encoding in ENCODING_MARKS:
marks: Union[bytes, List[bytes]] = ENCODING_MARKS[iana_encoding]
if isinstance(marks, bytes):
marks = [marks]
for mark in marks:
if sequence.startswith(mark):
return iana_encoding, mark
return None, b""
def should_strip_sig_or_bom(iana_encoding: str) -> bool:
return iana_encoding not in {"utf_16", "utf_32"}
def iana_name(cp_name: str, strict: bool = True) -> str:
cp_name = cp_name.lower().replace("-", "_")
encoding_alias: str
encoding_iana: str
for encoding_alias, encoding_iana in aliases.items():
if cp_name in [encoding_alias, encoding_iana]:
return encoding_iana
if strict:
raise ValueError("Unable to retrieve IANA for '{}'".format(cp_name))
return cp_name
def range_scan(decoded_sequence: str) -> List[str]:
ranges: Set[str] = set()
for character in decoded_sequence:
character_range: Optional[str] = unicode_range(character)
if character_range is None:
continue
ranges.add(character_range)
return list(ranges)
def cp_similarity(iana_name_a: str, iana_name_b: str) -> float:
if is_multi_byte_encoding(iana_name_a) or is_multi_byte_encoding(iana_name_b):
return 0.0
decoder_a = importlib.import_module(
"encodings.{}".format(iana_name_a)
).IncrementalDecoder
decoder_b = importlib.import_module(
"encodings.{}".format(iana_name_b)
).IncrementalDecoder
id_a: IncrementalDecoder = decoder_a(errors="ignore")
id_b: IncrementalDecoder = decoder_b(errors="ignore")
character_match_count: int = 0
for i in range(255):
to_be_decoded: bytes = bytes([i])
if id_a.decode(to_be_decoded) == id_b.decode(to_be_decoded):
character_match_count += 1
return character_match_count / 254
def is_cp_similar(iana_name_a: str, iana_name_b: str) -> bool:
"""
Determine if two code page are at least 80% similar. IANA_SUPPORTED_SIMILAR dict was generated using
the function cp_similarity.
"""
return (
iana_name_a in IANA_SUPPORTED_SIMILAR
and iana_name_b in IANA_SUPPORTED_SIMILAR[iana_name_a]
)
def set_logging_handler(
name: str = "charset_normalizer",
level: int = logging.INFO,
format_string: str = "%(asctime)s | %(levelname)s | %(message)s",
) -> None:
logger = logging.getLogger(name)
logger.setLevel(level)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter(format_string))
logger.addHandler(handler)
def cut_sequence_chunks(
sequences: bytes,
encoding_iana: str,
offsets: range,
chunk_size: int,
bom_or_sig_available: bool,
strip_sig_or_bom: bool,
sig_payload: bytes,
is_multi_byte_decoder: bool,
decoded_payload: Optional[str] = None,
) -> Generator[str, None, None]:
if decoded_payload and is_multi_byte_decoder is False:
for i in offsets:
chunk = decoded_payload[i : i + chunk_size]
if not chunk:
break
yield chunk
else:
for i in offsets:
chunk_end = i + chunk_size
if chunk_end > len(sequences) + 8:
continue
cut_sequence = sequences[i : i + chunk_size]
if bom_or_sig_available and strip_sig_or_bom is False:
cut_sequence = sig_payload + cut_sequence
chunk = cut_sequence.decode(
encoding_iana,
errors="ignore" if is_multi_byte_decoder else "strict",
)
# multi-byte bad cutting detector and adjustment
# not the cleanest way to perform that fix but clever enough for now.
if is_multi_byte_decoder and i > 0 and sequences[i] >= 0x80:
chunk_partial_size_chk: int = min(chunk_size, 16)
if (
decoded_payload
and chunk[:chunk_partial_size_chk] not in decoded_payload
):
for j in range(i, i - 4, -1):
cut_sequence = sequences[j:chunk_end]
if bom_or_sig_available and strip_sig_or_bom is False:
cut_sequence = sig_payload + cut_sequence
chunk = cut_sequence.decode(encoding_iana, errors="ignore")
if chunk[:chunk_partial_size_chk] in decoded_payload:
break
yield chunk
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/charset_normalizer/md.py | from functools import lru_cache
from typing import List, Optional
from .constant import COMMON_SAFE_ASCII_CHARACTERS, UNICODE_SECONDARY_RANGE_KEYWORD
from .utils import (
is_accentuated,
is_ascii,
is_case_variable,
is_cjk,
is_emoticon,
is_hangul,
is_hiragana,
is_katakana,
is_latin,
is_punctuation,
is_separator,
is_symbol,
is_thai,
is_unprintable,
remove_accent,
unicode_range,
)
class MessDetectorPlugin:
"""
Base abstract class used for mess detection plugins.
All detectors MUST extend and implement given methods.
"""
def eligible(self, character: str) -> bool:
"""
Determine if given character should be fed in.
"""
raise NotImplementedError # pragma: nocover
def feed(self, character: str) -> None:
"""
The main routine to be executed upon character.
Insert the logic in witch the text would be considered chaotic.
"""
raise NotImplementedError # pragma: nocover
def reset(self) -> None: # pragma: no cover
"""
Permit to reset the plugin to the initial state.
"""
raise NotImplementedError
@property
def ratio(self) -> float:
"""
Compute the chaos ratio based on what your feed() has seen.
Must NOT be lower than 0.; No restriction gt 0.
"""
raise NotImplementedError # pragma: nocover
class TooManySymbolOrPunctuationPlugin(MessDetectorPlugin):
def __init__(self) -> None:
self._punctuation_count: int = 0
self._symbol_count: int = 0
self._character_count: int = 0
self._last_printable_char: Optional[str] = None
self._frenzy_symbol_in_word: bool = False
def eligible(self, character: str) -> bool:
return character.isprintable()
def feed(self, character: str) -> None:
self._character_count += 1
if (
character != self._last_printable_char
and character not in COMMON_SAFE_ASCII_CHARACTERS
):
if is_punctuation(character):
self._punctuation_count += 1
elif (
character.isdigit() is False
and is_symbol(character)
and is_emoticon(character) is False
):
self._symbol_count += 2
self._last_printable_char = character
def reset(self) -> None: # pragma: no cover
self._punctuation_count = 0
self._character_count = 0
self._symbol_count = 0
@property
def ratio(self) -> float:
if self._character_count == 0:
return 0.0
ratio_of_punctuation: float = (
self._punctuation_count + self._symbol_count
) / self._character_count
return ratio_of_punctuation if ratio_of_punctuation >= 0.3 else 0.0
class TooManyAccentuatedPlugin(MessDetectorPlugin):
def __init__(self) -> None:
self._character_count: int = 0
self._accentuated_count: int = 0
def eligible(self, character: str) -> bool:
return character.isalpha()
def feed(self, character: str) -> None:
self._character_count += 1
if is_accentuated(character):
self._accentuated_count += 1
def reset(self) -> None: # pragma: no cover
self._character_count = 0
self._accentuated_count = 0
@property
def ratio(self) -> float:
if self._character_count == 0:
return 0.0
ratio_of_accentuation: float = self._accentuated_count / self._character_count
return ratio_of_accentuation if ratio_of_accentuation >= 0.35 else 0.0
class UnprintablePlugin(MessDetectorPlugin):
def __init__(self) -> None:
self._unprintable_count: int = 0
self._character_count: int = 0
def eligible(self, character: str) -> bool:
return True
def feed(self, character: str) -> None:
if is_unprintable(character):
self._unprintable_count += 1
self._character_count += 1
def reset(self) -> None: # pragma: no cover
self._unprintable_count = 0
@property
def ratio(self) -> float:
if self._character_count == 0:
return 0.0
return (self._unprintable_count * 8) / self._character_count
class SuspiciousDuplicateAccentPlugin(MessDetectorPlugin):
def __init__(self) -> None:
self._successive_count: int = 0
self._character_count: int = 0
self._last_latin_character: Optional[str] = None
def eligible(self, character: str) -> bool:
return character.isalpha() and is_latin(character)
def feed(self, character: str) -> None:
self._character_count += 1
if (
self._last_latin_character is not None
and is_accentuated(character)
and is_accentuated(self._last_latin_character)
):
if character.isupper() and self._last_latin_character.isupper():
self._successive_count += 1
# Worse if its the same char duplicated with different accent.
if remove_accent(character) == remove_accent(self._last_latin_character):
self._successive_count += 1
self._last_latin_character = character
def reset(self) -> None: # pragma: no cover
self._successive_count = 0
self._character_count = 0
self._last_latin_character = None
@property
def ratio(self) -> float:
if self._character_count == 0:
return 0.0
return (self._successive_count * 2) / self._character_count
class SuspiciousRange(MessDetectorPlugin):
def __init__(self) -> None:
self._suspicious_successive_range_count: int = 0
self._character_count: int = 0
self._last_printable_seen: Optional[str] = None
def eligible(self, character: str) -> bool:
return character.isprintable()
def feed(self, character: str) -> None:
self._character_count += 1
if (
character.isspace()
or is_punctuation(character)
or character in COMMON_SAFE_ASCII_CHARACTERS
):
self._last_printable_seen = None
return
if self._last_printable_seen is None:
self._last_printable_seen = character
return
unicode_range_a: Optional[str] = unicode_range(self._last_printable_seen)
unicode_range_b: Optional[str] = unicode_range(character)
if is_suspiciously_successive_range(unicode_range_a, unicode_range_b):
self._suspicious_successive_range_count += 1
self._last_printable_seen = character
def reset(self) -> None: # pragma: no cover
self._character_count = 0
self._suspicious_successive_range_count = 0
self._last_printable_seen = None
@property
def ratio(self) -> float:
if self._character_count == 0:
return 0.0
ratio_of_suspicious_range_usage: float = (
self._suspicious_successive_range_count * 2
) / self._character_count
if ratio_of_suspicious_range_usage < 0.1:
return 0.0
return ratio_of_suspicious_range_usage
class SuperWeirdWordPlugin(MessDetectorPlugin):
def __init__(self) -> None:
self._word_count: int = 0
self._bad_word_count: int = 0
self._foreign_long_count: int = 0
self._is_current_word_bad: bool = False
self._foreign_long_watch: bool = False
self._character_count: int = 0
self._bad_character_count: int = 0
self._buffer: str = ""
self._buffer_accent_count: int = 0
def eligible(self, character: str) -> bool:
return True
def feed(self, character: str) -> None:
if character.isalpha():
self._buffer += character
if is_accentuated(character):
self._buffer_accent_count += 1
if (
self._foreign_long_watch is False
and (is_latin(character) is False or is_accentuated(character))
and is_cjk(character) is False
and is_hangul(character) is False
and is_katakana(character) is False
and is_hiragana(character) is False
and is_thai(character) is False
):
self._foreign_long_watch = True
return
if not self._buffer:
return
if (
character.isspace() or is_punctuation(character) or is_separator(character)
) and self._buffer:
self._word_count += 1
buffer_length: int = len(self._buffer)
self._character_count += buffer_length
if buffer_length >= 4:
if self._buffer_accent_count / buffer_length > 0.34:
self._is_current_word_bad = True
# Word/Buffer ending with a upper case accentuated letter are so rare,
# that we will consider them all as suspicious. Same weight as foreign_long suspicious.
if is_accentuated(self._buffer[-1]) and self._buffer[-1].isupper():
self._foreign_long_count += 1
self._is_current_word_bad = True
if buffer_length >= 24 and self._foreign_long_watch:
self._foreign_long_count += 1
self._is_current_word_bad = True
if self._is_current_word_bad:
self._bad_word_count += 1
self._bad_character_count += len(self._buffer)
self._is_current_word_bad = False
self._foreign_long_watch = False
self._buffer = ""
self._buffer_accent_count = 0
elif (
character not in {"<", ">", "-", "=", "~", "|", "_"}
and character.isdigit() is False
and is_symbol(character)
):
self._is_current_word_bad = True
self._buffer += character
def reset(self) -> None: # pragma: no cover
self._buffer = ""
self._is_current_word_bad = False
self._foreign_long_watch = False
self._bad_word_count = 0
self._word_count = 0
self._character_count = 0
self._bad_character_count = 0
self._foreign_long_count = 0
@property
def ratio(self) -> float:
if self._word_count <= 10 and self._foreign_long_count == 0:
return 0.0
return self._bad_character_count / self._character_count
class CjkInvalidStopPlugin(MessDetectorPlugin):
"""
GB(Chinese) based encoding often render the stop incorrectly when the content does not fit and
can be easily detected. Searching for the overuse of '丅' and '丄'.
"""
def __init__(self) -> None:
self._wrong_stop_count: int = 0
self._cjk_character_count: int = 0
def eligible(self, character: str) -> bool:
return True
def feed(self, character: str) -> None:
if character in {"丅", "丄"}:
self._wrong_stop_count += 1
return
if is_cjk(character):
self._cjk_character_count += 1
def reset(self) -> None: # pragma: no cover
self._wrong_stop_count = 0
self._cjk_character_count = 0
@property
def ratio(self) -> float:
if self._cjk_character_count < 16:
return 0.0
return self._wrong_stop_count / self._cjk_character_count
class ArchaicUpperLowerPlugin(MessDetectorPlugin):
def __init__(self) -> None:
self._buf: bool = False
self._character_count_since_last_sep: int = 0
self._successive_upper_lower_count: int = 0
self._successive_upper_lower_count_final: int = 0
self._character_count: int = 0
self._last_alpha_seen: Optional[str] = None
self._current_ascii_only: bool = True
def eligible(self, character: str) -> bool:
return True
def feed(self, character: str) -> None:
is_concerned = character.isalpha() and is_case_variable(character)
chunk_sep = is_concerned is False
if chunk_sep and self._character_count_since_last_sep > 0:
if (
self._character_count_since_last_sep <= 64
and character.isdigit() is False
and self._current_ascii_only is False
):
self._successive_upper_lower_count_final += (
self._successive_upper_lower_count
)
self._successive_upper_lower_count = 0
self._character_count_since_last_sep = 0
self._last_alpha_seen = None
self._buf = False
self._character_count += 1
self._current_ascii_only = True
return
if self._current_ascii_only is True and is_ascii(character) is False:
self._current_ascii_only = False
if self._last_alpha_seen is not None:
if (character.isupper() and self._last_alpha_seen.islower()) or (
character.islower() and self._last_alpha_seen.isupper()
):
if self._buf is True:
self._successive_upper_lower_count += 2
self._buf = False
else:
self._buf = True
else:
self._buf = False
self._character_count += 1
self._character_count_since_last_sep += 1
self._last_alpha_seen = character
def reset(self) -> None: # pragma: no cover
self._character_count = 0
self._character_count_since_last_sep = 0
self._successive_upper_lower_count = 0
self._successive_upper_lower_count_final = 0
self._last_alpha_seen = None
self._buf = False
self._current_ascii_only = True
@property
def ratio(self) -> float:
if self._character_count == 0:
return 0.0
return self._successive_upper_lower_count_final / self._character_count
@lru_cache(maxsize=1024)
def is_suspiciously_successive_range(
unicode_range_a: Optional[str], unicode_range_b: Optional[str]
) -> bool:
"""
Determine if two Unicode range seen next to each other can be considered as suspicious.
"""
if unicode_range_a is None or unicode_range_b is None:
return True
if unicode_range_a == unicode_range_b:
return False
if "Latin" in unicode_range_a and "Latin" in unicode_range_b:
return False
if "Emoticons" in unicode_range_a or "Emoticons" in unicode_range_b:
return False
# Latin characters can be accompanied with a combining diacritical mark
# eg. Vietnamese.
if ("Latin" in unicode_range_a or "Latin" in unicode_range_b) and (
"Combining" in unicode_range_a or "Combining" in unicode_range_b
):
return False
keywords_range_a, keywords_range_b = unicode_range_a.split(
" "
), unicode_range_b.split(" ")
for el in keywords_range_a:
if el in UNICODE_SECONDARY_RANGE_KEYWORD:
continue
if el in keywords_range_b:
return False
# Japanese Exception
range_a_jp_chars, range_b_jp_chars = (
unicode_range_a
in (
"Hiragana",
"Katakana",
),
unicode_range_b in ("Hiragana", "Katakana"),
)
if (range_a_jp_chars or range_b_jp_chars) and (
"CJK" in unicode_range_a or "CJK" in unicode_range_b
):
return False
if range_a_jp_chars and range_b_jp_chars:
return False
if "Hangul" in unicode_range_a or "Hangul" in unicode_range_b:
if "CJK" in unicode_range_a or "CJK" in unicode_range_b:
return False
if unicode_range_a == "Basic Latin" or unicode_range_b == "Basic Latin":
return False
# Chinese/Japanese use dedicated range for punctuation and/or separators.
if ("CJK" in unicode_range_a or "CJK" in unicode_range_b) or (
unicode_range_a in ["Katakana", "Hiragana"]
and unicode_range_b in ["Katakana", "Hiragana"]
):
if "Punctuation" in unicode_range_a or "Punctuation" in unicode_range_b:
return False
if "Forms" in unicode_range_a or "Forms" in unicode_range_b:
return False
return True
@lru_cache(maxsize=2048)
def mess_ratio(
decoded_sequence: str, maximum_threshold: float = 0.2, debug: bool = False
) -> float:
"""
Compute a mess ratio given a decoded bytes sequence. The maximum threshold does stop the computation earlier.
"""
detectors: List[MessDetectorPlugin] = [
md_class() for md_class in MessDetectorPlugin.__subclasses__()
]
length: int = len(decoded_sequence) + 1
mean_mess_ratio: float = 0.0
if length < 512:
intermediary_mean_mess_ratio_calc: int = 32
elif length <= 1024:
intermediary_mean_mess_ratio_calc = 64
else:
intermediary_mean_mess_ratio_calc = 128
for character, index in zip(decoded_sequence + "\n", range(length)):
for detector in detectors:
if detector.eligible(character):
detector.feed(character)
if (
index > 0 and index % intermediary_mean_mess_ratio_calc == 0
) or index == length - 1:
mean_mess_ratio = sum(dt.ratio for dt in detectors)
if mean_mess_ratio >= maximum_threshold:
break
if debug:
for dt in detectors: # pragma: nocover
print(dt.__class__, dt.ratio)
return round(mean_mess_ratio, 3)
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/charset_normalizer/legacy.py | import warnings
from typing import Dict, Optional, Union
from .api import from_bytes, from_fp, from_path, normalize
from .constant import CHARDET_CORRESPONDENCE
from .models import CharsetMatch, CharsetMatches
def detect(byte_str: bytes) -> Dict[str, Optional[Union[str, float]]]:
"""
chardet legacy method
Detect the encoding of the given byte string. It should be mostly backward-compatible.
Encoding name will match Chardet own writing whenever possible. (Not on encoding name unsupported by it)
This function is deprecated and should be used to migrate your project easily, consult the documentation for
further information. Not planned for removal.
:param byte_str: The byte sequence to examine.
"""
if not isinstance(byte_str, (bytearray, bytes)):
raise TypeError( # pragma: nocover
"Expected object of type bytes or bytearray, got: "
"{0}".format(type(byte_str))
)
if isinstance(byte_str, bytearray):
byte_str = bytes(byte_str)
r = from_bytes(byte_str).best()
encoding = r.encoding if r is not None else None
language = r.language if r is not None and r.language != "Unknown" else ""
confidence = 1.0 - r.chaos if r is not None else None
# Note: CharsetNormalizer does not return 'UTF-8-SIG' as the sig get stripped in the detection/normalization process
# but chardet does return 'utf-8-sig' and it is a valid codec name.
if r is not None and encoding == "utf_8" and r.bom:
encoding += "_sig"
return {
"encoding": encoding
if encoding not in CHARDET_CORRESPONDENCE
else CHARDET_CORRESPONDENCE[encoding],
"language": language,
"confidence": confidence,
}
class CharsetNormalizerMatch(CharsetMatch):
pass
class CharsetNormalizerMatches(CharsetMatches):
@staticmethod
def from_fp(*args, **kwargs): # type: ignore
warnings.warn( # pragma: nocover
"staticmethod from_fp, from_bytes, from_path and normalize are deprecated "
"and scheduled to be removed in 3.0",
DeprecationWarning,
)
return from_fp(*args, **kwargs) # pragma: nocover
@staticmethod
def from_bytes(*args, **kwargs): # type: ignore
warnings.warn( # pragma: nocover
"staticmethod from_fp, from_bytes, from_path and normalize are deprecated "
"and scheduled to be removed in 3.0",
DeprecationWarning,
)
return from_bytes(*args, **kwargs) # pragma: nocover
@staticmethod
def from_path(*args, **kwargs): # type: ignore
warnings.warn( # pragma: nocover
"staticmethod from_fp, from_bytes, from_path and normalize are deprecated "
"and scheduled to be removed in 3.0",
DeprecationWarning,
)
return from_path(*args, **kwargs) # pragma: nocover
@staticmethod
def normalize(*args, **kwargs): # type: ignore
warnings.warn( # pragma: nocover
"staticmethod from_fp, from_bytes, from_path and normalize are deprecated "
"and scheduled to be removed in 3.0",
DeprecationWarning,
)
return normalize(*args, **kwargs) # pragma: nocover
class CharsetDetector(CharsetNormalizerMatches):
pass
class CharsetDoctor(CharsetNormalizerMatches):
pass
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/charset_normalizer/api.py | import logging
import warnings
from os import PathLike
from os.path import basename, splitext
from typing import Any, BinaryIO, List, Optional, Set
from .cd import (
coherence_ratio,
encoding_languages,
mb_encoding_languages,
merge_coherence_ratios,
)
from .constant import IANA_SUPPORTED, TOO_BIG_SEQUENCE, TOO_SMALL_SEQUENCE, TRACE
from .md import mess_ratio
from .models import CharsetMatch, CharsetMatches
from .utils import (
any_specified_encoding,
cut_sequence_chunks,
iana_name,
identify_sig_or_bom,
is_cp_similar,
is_multi_byte_encoding,
should_strip_sig_or_bom,
)
# Will most likely be controversial
# logging.addLevelName(TRACE, "TRACE")
logger = logging.getLogger("charset_normalizer")
explain_handler = logging.StreamHandler()
explain_handler.setFormatter(
logging.Formatter("%(asctime)s | %(levelname)s | %(message)s")
)
def from_bytes(
sequences: bytes,
steps: int = 5,
chunk_size: int = 512,
threshold: float = 0.2,
cp_isolation: Optional[List[str]] = None,
cp_exclusion: Optional[List[str]] = None,
preemptive_behaviour: bool = True,
explain: bool = False,
) -> CharsetMatches:
"""
Given a raw bytes sequence, return the best possibles charset usable to render str objects.
If there is no results, it is a strong indicator that the source is binary/not text.
By default, the process will extract 5 blocs of 512o each to assess the mess and coherence of a given sequence.
And will give up a particular code page after 20% of measured mess. Those criteria are customizable at will.
The preemptive behavior DOES NOT replace the traditional detection workflow, it prioritize a particular code page
but never take it for granted. Can improve the performance.
You may want to focus your attention to some code page or/and not others, use cp_isolation and cp_exclusion for that
purpose.
This function will strip the SIG in the payload/sequence every time except on UTF-16, UTF-32.
By default the library does not setup any handler other than the NullHandler, if you choose to set the 'explain'
toggle to True it will alter the logger configuration to add a StreamHandler that is suitable for debugging.
Custom logging format and handler can be set manually.
"""
if not isinstance(sequences, (bytearray, bytes)):
raise TypeError(
"Expected object of type bytes or bytearray, got: {0}".format(
type(sequences)
)
)
if explain:
previous_logger_level: int = logger.level
logger.addHandler(explain_handler)
logger.setLevel(TRACE)
length: int = len(sequences)
if length == 0:
logger.debug("Encoding detection on empty bytes, assuming utf_8 intention.")
if explain:
logger.removeHandler(explain_handler)
logger.setLevel(previous_logger_level or logging.WARNING)
return CharsetMatches([CharsetMatch(sequences, "utf_8", 0.0, False, [], "")])
if cp_isolation is not None:
logger.log(
TRACE,
"cp_isolation is set. use this flag for debugging purpose. "
"limited list of encoding allowed : %s.",
", ".join(cp_isolation),
)
cp_isolation = [iana_name(cp, False) for cp in cp_isolation]
else:
cp_isolation = []
if cp_exclusion is not None:
logger.log(
TRACE,
"cp_exclusion is set. use this flag for debugging purpose. "
"limited list of encoding excluded : %s.",
", ".join(cp_exclusion),
)
cp_exclusion = [iana_name(cp, False) for cp in cp_exclusion]
else:
cp_exclusion = []
if length <= (chunk_size * steps):
logger.log(
TRACE,
"override steps (%i) and chunk_size (%i) as content does not fit (%i byte(s) given) parameters.",
steps,
chunk_size,
length,
)
steps = 1
chunk_size = length
if steps > 1 and length / steps < chunk_size:
chunk_size = int(length / steps)
is_too_small_sequence: bool = len(sequences) < TOO_SMALL_SEQUENCE
is_too_large_sequence: bool = len(sequences) >= TOO_BIG_SEQUENCE
if is_too_small_sequence:
logger.log(
TRACE,
"Trying to detect encoding from a tiny portion of ({}) byte(s).".format(
length
),
)
elif is_too_large_sequence:
logger.log(
TRACE,
"Using lazy str decoding because the payload is quite large, ({}) byte(s).".format(
length
),
)
prioritized_encodings: List[str] = []
specified_encoding: Optional[str] = (
any_specified_encoding(sequences) if preemptive_behaviour else None
)
if specified_encoding is not None:
prioritized_encodings.append(specified_encoding)
logger.log(
TRACE,
"Detected declarative mark in sequence. Priority +1 given for %s.",
specified_encoding,
)
tested: Set[str] = set()
tested_but_hard_failure: List[str] = []
tested_but_soft_failure: List[str] = []
fallback_ascii: Optional[CharsetMatch] = None
fallback_u8: Optional[CharsetMatch] = None
fallback_specified: Optional[CharsetMatch] = None
results: CharsetMatches = CharsetMatches()
sig_encoding, sig_payload = identify_sig_or_bom(sequences)
if sig_encoding is not None:
prioritized_encodings.append(sig_encoding)
logger.log(
TRACE,
"Detected a SIG or BOM mark on first %i byte(s). Priority +1 given for %s.",
len(sig_payload),
sig_encoding,
)
prioritized_encodings.append("ascii")
if "utf_8" not in prioritized_encodings:
prioritized_encodings.append("utf_8")
for encoding_iana in prioritized_encodings + IANA_SUPPORTED:
if cp_isolation and encoding_iana not in cp_isolation:
continue
if cp_exclusion and encoding_iana in cp_exclusion:
continue
if encoding_iana in tested:
continue
tested.add(encoding_iana)
decoded_payload: Optional[str] = None
bom_or_sig_available: bool = sig_encoding == encoding_iana
strip_sig_or_bom: bool = bom_or_sig_available and should_strip_sig_or_bom(
encoding_iana
)
if encoding_iana in {"utf_16", "utf_32"} and not bom_or_sig_available:
logger.log(
TRACE,
"Encoding %s wont be tested as-is because it require a BOM. Will try some sub-encoder LE/BE.",
encoding_iana,
)
continue
try:
is_multi_byte_decoder: bool = is_multi_byte_encoding(encoding_iana)
except (ModuleNotFoundError, ImportError):
logger.log(
TRACE,
"Encoding %s does not provide an IncrementalDecoder",
encoding_iana,
)
continue
try:
if is_too_large_sequence and is_multi_byte_decoder is False:
str(
sequences[: int(50e4)]
if strip_sig_or_bom is False
else sequences[len(sig_payload) : int(50e4)],
encoding=encoding_iana,
)
else:
decoded_payload = str(
sequences
if strip_sig_or_bom is False
else sequences[len(sig_payload) :],
encoding=encoding_iana,
)
except (UnicodeDecodeError, LookupError) as e:
if not isinstance(e, LookupError):
logger.log(
TRACE,
"Code page %s does not fit given bytes sequence at ALL. %s",
encoding_iana,
str(e),
)
tested_but_hard_failure.append(encoding_iana)
continue
similar_soft_failure_test: bool = False
for encoding_soft_failed in tested_but_soft_failure:
if is_cp_similar(encoding_iana, encoding_soft_failed):
similar_soft_failure_test = True
break
if similar_soft_failure_test:
logger.log(
TRACE,
"%s is deemed too similar to code page %s and was consider unsuited already. Continuing!",
encoding_iana,
encoding_soft_failed,
)
continue
r_ = range(
0 if not bom_or_sig_available else len(sig_payload),
length,
int(length / steps),
)
multi_byte_bonus: bool = (
is_multi_byte_decoder
and decoded_payload is not None
and len(decoded_payload) < length
)
if multi_byte_bonus:
logger.log(
TRACE,
"Code page %s is a multi byte encoding table and it appear that at least one character "
"was encoded using n-bytes.",
encoding_iana,
)
max_chunk_gave_up: int = int(len(r_) / 4)
max_chunk_gave_up = max(max_chunk_gave_up, 2)
early_stop_count: int = 0
lazy_str_hard_failure = False
md_chunks: List[str] = []
md_ratios = []
try:
for chunk in cut_sequence_chunks(
sequences,
encoding_iana,
r_,
chunk_size,
bom_or_sig_available,
strip_sig_or_bom,
sig_payload,
is_multi_byte_decoder,
decoded_payload,
):
md_chunks.append(chunk)
md_ratios.append(mess_ratio(chunk, threshold))
if md_ratios[-1] >= threshold:
early_stop_count += 1
if (early_stop_count >= max_chunk_gave_up) or (
bom_or_sig_available and strip_sig_or_bom is False
):
break
except UnicodeDecodeError as e: # Lazy str loading may have missed something there
logger.log(
TRACE,
"LazyStr Loading: After MD chunk decode, code page %s does not fit given bytes sequence at ALL. %s",
encoding_iana,
str(e),
)
early_stop_count = max_chunk_gave_up
lazy_str_hard_failure = True
# We might want to check the sequence again with the whole content
# Only if initial MD tests passes
if (
not lazy_str_hard_failure
and is_too_large_sequence
and not is_multi_byte_decoder
):
try:
sequences[int(50e3) :].decode(encoding_iana, errors="strict")
except UnicodeDecodeError as e:
logger.log(
TRACE,
"LazyStr Loading: After final lookup, code page %s does not fit given bytes sequence at ALL. %s",
encoding_iana,
str(e),
)
tested_but_hard_failure.append(encoding_iana)
continue
mean_mess_ratio: float = sum(md_ratios) / len(md_ratios) if md_ratios else 0.0
if mean_mess_ratio >= threshold or early_stop_count >= max_chunk_gave_up:
tested_but_soft_failure.append(encoding_iana)
logger.log(
TRACE,
"%s was excluded because of initial chaos probing. Gave up %i time(s). "
"Computed mean chaos is %f %%.",
encoding_iana,
early_stop_count,
round(mean_mess_ratio * 100, ndigits=3),
)
# Preparing those fallbacks in case we got nothing.
if (
encoding_iana in ["ascii", "utf_8", specified_encoding]
and not lazy_str_hard_failure
):
fallback_entry = CharsetMatch(
sequences, encoding_iana, threshold, False, [], decoded_payload
)
if encoding_iana == specified_encoding:
fallback_specified = fallback_entry
elif encoding_iana == "ascii":
fallback_ascii = fallback_entry
else:
fallback_u8 = fallback_entry
continue
logger.log(
TRACE,
"%s passed initial chaos probing. Mean measured chaos is %f %%",
encoding_iana,
round(mean_mess_ratio * 100, ndigits=3),
)
if not is_multi_byte_decoder:
target_languages: List[str] = encoding_languages(encoding_iana)
else:
target_languages = mb_encoding_languages(encoding_iana)
if target_languages:
logger.log(
TRACE,
"{} should target any language(s) of {}".format(
encoding_iana, str(target_languages)
),
)
cd_ratios = []
# We shall skip the CD when its about ASCII
# Most of the time its not relevant to run "language-detection" on it.
if encoding_iana != "ascii":
for chunk in md_chunks:
chunk_languages = coherence_ratio(
chunk, 0.1, ",".join(target_languages) if target_languages else None
)
cd_ratios.append(chunk_languages)
cd_ratios_merged = merge_coherence_ratios(cd_ratios)
if cd_ratios_merged:
logger.log(
TRACE,
"We detected language {} using {}".format(
cd_ratios_merged, encoding_iana
),
)
results.append(
CharsetMatch(
sequences,
encoding_iana,
mean_mess_ratio,
bom_or_sig_available,
cd_ratios_merged,
decoded_payload,
)
)
if (
encoding_iana in [specified_encoding, "ascii", "utf_8"]
and mean_mess_ratio < 0.1
):
logger.debug(
"Encoding detection: %s is most likely the one.", encoding_iana
)
if explain:
logger.removeHandler(explain_handler)
logger.setLevel(previous_logger_level)
return CharsetMatches([results[encoding_iana]])
if encoding_iana == sig_encoding:
logger.debug(
"Encoding detection: %s is most likely the one as we detected a BOM or SIG within "
"the beginning of the sequence.",
encoding_iana,
)
if explain:
logger.removeHandler(explain_handler)
logger.setLevel(previous_logger_level)
return CharsetMatches([results[encoding_iana]])
if len(results) == 0:
if fallback_u8 or fallback_ascii or fallback_specified:
logger.log(
TRACE,
"Nothing got out of the detection process. Using ASCII/UTF-8/Specified fallback.",
)
if fallback_specified:
logger.debug(
"Encoding detection: %s will be used as a fallback match",
fallback_specified.encoding,
)
results.append(fallback_specified)
elif (
(fallback_u8 and fallback_ascii is None)
or (
fallback_u8
and fallback_ascii
and fallback_u8.fingerprint != fallback_ascii.fingerprint
)
or (fallback_u8 is not None)
):
logger.debug("Encoding detection: utf_8 will be used as a fallback match")
results.append(fallback_u8)
elif fallback_ascii:
logger.debug("Encoding detection: ascii will be used as a fallback match")
results.append(fallback_ascii)
if results:
logger.debug(
"Encoding detection: Found %s as plausible (best-candidate) for content. With %i alternatives.",
results.best().encoding, # type: ignore
len(results) - 1,
)
else:
logger.debug("Encoding detection: Unable to determine any suitable charset.")
if explain:
logger.removeHandler(explain_handler)
logger.setLevel(previous_logger_level)
return results
def from_fp(
fp: BinaryIO,
steps: int = 5,
chunk_size: int = 512,
threshold: float = 0.20,
cp_isolation: Optional[List[str]] = None,
cp_exclusion: Optional[List[str]] = None,
preemptive_behaviour: bool = True,
explain: bool = False,
) -> CharsetMatches:
"""
Same thing than the function from_bytes but using a file pointer that is already ready.
Will not close the file pointer.
"""
return from_bytes(
fp.read(),
steps,
chunk_size,
threshold,
cp_isolation,
cp_exclusion,
preemptive_behaviour,
explain,
)
def from_path(
path: "PathLike[Any]",
steps: int = 5,
chunk_size: int = 512,
threshold: float = 0.20,
cp_isolation: Optional[List[str]] = None,
cp_exclusion: Optional[List[str]] = None,
preemptive_behaviour: bool = True,
explain: bool = False,
) -> CharsetMatches:
"""
Same thing than the function from_bytes but with one extra step. Opening and reading given file path in binary mode.
Can raise IOError.
"""
with open(path, "rb") as fp:
return from_fp(
fp,
steps,
chunk_size,
threshold,
cp_isolation,
cp_exclusion,
preemptive_behaviour,
explain,
)
def normalize(
path: "PathLike[Any]",
steps: int = 5,
chunk_size: int = 512,
threshold: float = 0.20,
cp_isolation: Optional[List[str]] = None,
cp_exclusion: Optional[List[str]] = None,
preemptive_behaviour: bool = True,
) -> CharsetMatch:
"""
Take a (text-based) file path and try to create another file next to it, this time using UTF-8.
"""
warnings.warn(
"normalize is deprecated and will be removed in 3.0",
DeprecationWarning,
)
results = from_path(
path,
steps,
chunk_size,
threshold,
cp_isolation,
cp_exclusion,
preemptive_behaviour,
)
filename = basename(path)
target_extensions = list(splitext(filename))
if len(results) == 0:
raise IOError(
'Unable to normalize "{}", no encoding charset seems to fit.'.format(
filename
)
)
result = results.best()
target_extensions[0] += "-" + result.encoding # type: ignore
with open(
"{}".format(str(path).replace(filename, "".join(target_extensions))), "wb"
) as fp:
fp.write(result.output()) # type: ignore
return result # type: ignore
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/charset_normalizer/models.py | import warnings
from collections import Counter
from encodings.aliases import aliases
from hashlib import sha256
from json import dumps
from re import sub
from typing import (
Any,
Counter as TypeCounter,
Dict,
Iterator,
List,
Optional,
Tuple,
Union,
)
from .constant import NOT_PRINTABLE_PATTERN, TOO_BIG_SEQUENCE
from .md import mess_ratio
from .utils import iana_name, is_multi_byte_encoding, unicode_range
class CharsetMatch:
def __init__(
self,
payload: bytes,
guessed_encoding: str,
mean_mess_ratio: float,
has_sig_or_bom: bool,
languages: "CoherenceMatches",
decoded_payload: Optional[str] = None,
):
self._payload: bytes = payload
self._encoding: str = guessed_encoding
self._mean_mess_ratio: float = mean_mess_ratio
self._languages: CoherenceMatches = languages
self._has_sig_or_bom: bool = has_sig_or_bom
self._unicode_ranges: Optional[List[str]] = None
self._leaves: List[CharsetMatch] = []
self._mean_coherence_ratio: float = 0.0
self._output_payload: Optional[bytes] = None
self._output_encoding: Optional[str] = None
self._string: Optional[str] = decoded_payload
def __eq__(self, other: object) -> bool:
if not isinstance(other, CharsetMatch):
raise TypeError(
"__eq__ cannot be invoked on {} and {}.".format(
str(other.__class__), str(self.__class__)
)
)
return self.encoding == other.encoding and self.fingerprint == other.fingerprint
def __lt__(self, other: object) -> bool:
"""
Implemented to make sorted available upon CharsetMatches items.
"""
if not isinstance(other, CharsetMatch):
raise ValueError
chaos_difference: float = abs(self.chaos - other.chaos)
coherence_difference: float = abs(self.coherence - other.coherence)
# Bellow 1% difference --> Use Coherence
if chaos_difference < 0.01 and coherence_difference > 0.02:
# When having a tough decision, use the result that decoded as many multi-byte as possible.
if chaos_difference == 0.0 and self.coherence == other.coherence:
return self.multi_byte_usage > other.multi_byte_usage
return self.coherence > other.coherence
return self.chaos < other.chaos
@property
def multi_byte_usage(self) -> float:
return 1.0 - len(str(self)) / len(self.raw)
@property
def chaos_secondary_pass(self) -> float:
"""
Check once again chaos in decoded text, except this time, with full content.
Use with caution, this can be very slow.
Notice: Will be removed in 3.0
"""
warnings.warn(
"chaos_secondary_pass is deprecated and will be removed in 3.0",
DeprecationWarning,
)
return mess_ratio(str(self), 1.0)
@property
def coherence_non_latin(self) -> float:
"""
Coherence ratio on the first non-latin language detected if ANY.
Notice: Will be removed in 3.0
"""
warnings.warn(
"coherence_non_latin is deprecated and will be removed in 3.0",
DeprecationWarning,
)
return 0.0
@property
def w_counter(self) -> TypeCounter[str]:
"""
Word counter instance on decoded text.
Notice: Will be removed in 3.0
"""
warnings.warn(
"w_counter is deprecated and will be removed in 3.0", DeprecationWarning
)
string_printable_only = sub(NOT_PRINTABLE_PATTERN, " ", str(self).lower())
return Counter(string_printable_only.split())
def __str__(self) -> str:
# Lazy Str Loading
if self._string is None:
self._string = str(self._payload, self._encoding, "strict")
return self._string
def __repr__(self) -> str:
return "<CharsetMatch '{}' bytes({})>".format(self.encoding, self.fingerprint)
def add_submatch(self, other: "CharsetMatch") -> None:
if not isinstance(other, CharsetMatch) or other == self:
raise ValueError(
"Unable to add instance <{}> as a submatch of a CharsetMatch".format(
other.__class__
)
)
other._string = None # Unload RAM usage; dirty trick.
self._leaves.append(other)
@property
def encoding(self) -> str:
return self._encoding
@property
def encoding_aliases(self) -> List[str]:
"""
Encoding name are known by many name, using this could help when searching for IBM855 when it's listed as CP855.
"""
also_known_as: List[str] = []
for u, p in aliases.items():
if self.encoding == u:
also_known_as.append(p)
elif self.encoding == p:
also_known_as.append(u)
return also_known_as
@property
def bom(self) -> bool:
return self._has_sig_or_bom
@property
def byte_order_mark(self) -> bool:
return self._has_sig_or_bom
@property
def languages(self) -> List[str]:
"""
Return the complete list of possible languages found in decoded sequence.
Usually not really useful. Returned list may be empty even if 'language' property return something != 'Unknown'.
"""
return [e[0] for e in self._languages]
@property
def language(self) -> str:
"""
Most probable language found in decoded sequence. If none were detected or inferred, the property will return
"Unknown".
"""
if not self._languages:
# Trying to infer the language based on the given encoding
# Its either English or we should not pronounce ourselves in certain cases.
if "ascii" in self.could_be_from_charset:
return "English"
# doing it there to avoid circular import
from charset_normalizer.cd import encoding_languages, mb_encoding_languages
languages = (
mb_encoding_languages(self.encoding)
if is_multi_byte_encoding(self.encoding)
else encoding_languages(self.encoding)
)
if len(languages) == 0 or "Latin Based" in languages:
return "Unknown"
return languages[0]
return self._languages[0][0]
@property
def chaos(self) -> float:
return self._mean_mess_ratio
@property
def coherence(self) -> float:
if not self._languages:
return 0.0
return self._languages[0][1]
@property
def percent_chaos(self) -> float:
return round(self.chaos * 100, ndigits=3)
@property
def percent_coherence(self) -> float:
return round(self.coherence * 100, ndigits=3)
@property
def raw(self) -> bytes:
"""
Original untouched bytes.
"""
return self._payload
@property
def submatch(self) -> List["CharsetMatch"]:
return self._leaves
@property
def has_submatch(self) -> bool:
return len(self._leaves) > 0
@property
def alphabets(self) -> List[str]:
if self._unicode_ranges is not None:
return self._unicode_ranges
# list detected ranges
detected_ranges: List[Optional[str]] = [
unicode_range(char) for char in str(self)
]
# filter and sort
self._unicode_ranges = sorted(list({r for r in detected_ranges if r}))
return self._unicode_ranges
@property
def could_be_from_charset(self) -> List[str]:
"""
The complete list of encoding that output the exact SAME str result and therefore could be the originating
encoding.
This list does include the encoding available in property 'encoding'.
"""
return [self._encoding] + [m.encoding for m in self._leaves]
def first(self) -> "CharsetMatch":
"""
Kept for BC reasons. Will be removed in 3.0.
"""
return self
def best(self) -> "CharsetMatch":
"""
Kept for BC reasons. Will be removed in 3.0.
"""
return self
def output(self, encoding: str = "utf_8") -> bytes:
"""
Method to get re-encoded bytes payload using given target encoding. Default to UTF-8.
Any errors will be simply ignored by the encoder NOT replaced.
"""
if self._output_encoding is None or self._output_encoding != encoding:
self._output_encoding = encoding
self._output_payload = str(self).encode(encoding, "replace")
return self._output_payload # type: ignore
@property
def fingerprint(self) -> str:
"""
Retrieve the unique SHA256 computed using the transformed (re-encoded) payload. Not the original one.
"""
return sha256(self.output()).hexdigest()
class CharsetMatches:
"""
Container with every CharsetMatch items ordered by default from most probable to the less one.
Act like a list(iterable) but does not implements all related methods.
"""
def __init__(self, results: Optional[List[CharsetMatch]] = None):
self._results: List[CharsetMatch] = sorted(results) if results else []
def __iter__(self) -> Iterator[CharsetMatch]:
yield from self._results
def __getitem__(self, item: Union[int, str]) -> CharsetMatch:
"""
Retrieve a single item either by its position or encoding name (alias may be used here).
Raise KeyError upon invalid index or encoding not present in results.
"""
if isinstance(item, int):
return self._results[item]
if isinstance(item, str):
item = iana_name(item, False)
for result in self._results:
if item in result.could_be_from_charset:
return result
raise KeyError
def __len__(self) -> int:
return len(self._results)
def __bool__(self) -> bool:
return len(self._results) > 0
def append(self, item: CharsetMatch) -> None:
"""
Insert a single match. Will be inserted accordingly to preserve sort.
Can be inserted as a submatch.
"""
if not isinstance(item, CharsetMatch):
raise ValueError(
"Cannot append instance '{}' to CharsetMatches".format(
str(item.__class__)
)
)
# We should disable the submatch factoring when the input file is too heavy (conserve RAM usage)
if len(item.raw) <= TOO_BIG_SEQUENCE:
for match in self._results:
if match.fingerprint == item.fingerprint and match.chaos == item.chaos:
match.add_submatch(item)
return
self._results.append(item)
self._results = sorted(self._results)
def best(self) -> Optional["CharsetMatch"]:
"""
Simply return the first match. Strict equivalent to matches[0].
"""
if not self._results:
return None
return self._results[0]
def first(self) -> Optional["CharsetMatch"]:
"""
Redundant method, call the method best(). Kept for BC reasons.
"""
return self.best()
CoherenceMatch = Tuple[str, float]
CoherenceMatches = List[CoherenceMatch]
class CliDetectionResult:
def __init__(
self,
path: str,
encoding: Optional[str],
encoding_aliases: List[str],
alternative_encodings: List[str],
language: str,
alphabets: List[str],
has_sig_or_bom: bool,
chaos: float,
coherence: float,
unicode_path: Optional[str],
is_preferred: bool,
):
self.path: str = path
self.unicode_path: Optional[str] = unicode_path
self.encoding: Optional[str] = encoding
self.encoding_aliases: List[str] = encoding_aliases
self.alternative_encodings: List[str] = alternative_encodings
self.language: str = language
self.alphabets: List[str] = alphabets
self.has_sig_or_bom: bool = has_sig_or_bom
self.chaos: float = chaos
self.coherence: float = coherence
self.is_preferred: bool = is_preferred
@property
def __dict__(self) -> Dict[str, Any]: # type: ignore
return {
"path": self.path,
"encoding": self.encoding,
"encoding_aliases": self.encoding_aliases,
"alternative_encodings": self.alternative_encodings,
"language": self.language,
"alphabets": self.alphabets,
"has_sig_or_bom": self.has_sig_or_bom,
"chaos": self.chaos,
"coherence": self.coherence,
"unicode_path": self.unicode_path,
"is_preferred": self.is_preferred,
}
def to_json(self) -> str:
return dumps(self.__dict__, ensure_ascii=True, indent=4)
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/charset_normalizer/cd.py | import importlib
from codecs import IncrementalDecoder
from collections import Counter
from functools import lru_cache
from typing import Counter as TypeCounter, Dict, List, Optional, Tuple
from .assets import FREQUENCIES
from .constant import KO_NAMES, LANGUAGE_SUPPORTED_COUNT, TOO_SMALL_SEQUENCE, ZH_NAMES
from .md import is_suspiciously_successive_range
from .models import CoherenceMatches
from .utils import (
is_accentuated,
is_latin,
is_multi_byte_encoding,
is_unicode_range_secondary,
unicode_range,
)
def encoding_unicode_range(iana_name: str) -> List[str]:
"""
Return associated unicode ranges in a single byte code page.
"""
if is_multi_byte_encoding(iana_name):
raise IOError("Function not supported on multi-byte code page")
decoder = importlib.import_module(
"encodings.{}".format(iana_name)
).IncrementalDecoder
p: IncrementalDecoder = decoder(errors="ignore")
seen_ranges: Dict[str, int] = {}
character_count: int = 0
for i in range(0x40, 0xFF):
chunk: str = p.decode(bytes([i]))
if chunk:
character_range: Optional[str] = unicode_range(chunk)
if character_range is None:
continue
if is_unicode_range_secondary(character_range) is False:
if character_range not in seen_ranges:
seen_ranges[character_range] = 0
seen_ranges[character_range] += 1
character_count += 1
return sorted(
[
character_range
for character_range in seen_ranges
if seen_ranges[character_range] / character_count >= 0.15
]
)
def unicode_range_languages(primary_range: str) -> List[str]:
"""
Return inferred languages used with a unicode range.
"""
languages: List[str] = []
for language, characters in FREQUENCIES.items():
for character in characters:
if unicode_range(character) == primary_range:
languages.append(language)
break
return languages
@lru_cache()
def encoding_languages(iana_name: str) -> List[str]:
"""
Single-byte encoding language association. Some code page are heavily linked to particular language(s).
This function does the correspondence.
"""
unicode_ranges: List[str] = encoding_unicode_range(iana_name)
primary_range: Optional[str] = None
for specified_range in unicode_ranges:
if "Latin" not in specified_range:
primary_range = specified_range
break
if primary_range is None:
return ["Latin Based"]
return unicode_range_languages(primary_range)
@lru_cache()
def mb_encoding_languages(iana_name: str) -> List[str]:
"""
Multi-byte encoding language association. Some code page are heavily linked to particular language(s).
This function does the correspondence.
"""
if (
iana_name.startswith("shift_")
or iana_name.startswith("iso2022_jp")
or iana_name.startswith("euc_j")
or iana_name == "cp932"
):
return ["Japanese"]
if iana_name.startswith("gb") or iana_name in ZH_NAMES:
return ["Chinese", "Classical Chinese"]
if iana_name.startswith("iso2022_kr") or iana_name in KO_NAMES:
return ["Korean"]
return []
@lru_cache(maxsize=LANGUAGE_SUPPORTED_COUNT)
def get_target_features(language: str) -> Tuple[bool, bool]:
"""
Determine main aspects from a supported language if it contains accents and if is pure Latin.
"""
target_have_accents: bool = False
target_pure_latin: bool = True
for character in FREQUENCIES[language]:
if not target_have_accents and is_accentuated(character):
target_have_accents = True
if target_pure_latin and is_latin(character) is False:
target_pure_latin = False
return target_have_accents, target_pure_latin
def alphabet_languages(
characters: List[str], ignore_non_latin: bool = False
) -> List[str]:
"""
Return associated languages associated to given characters.
"""
languages: List[Tuple[str, float]] = []
source_have_accents = any(is_accentuated(character) for character in characters)
for language, language_characters in FREQUENCIES.items():
target_have_accents, target_pure_latin = get_target_features(language)
if ignore_non_latin and target_pure_latin is False:
continue
if target_have_accents is False and source_have_accents:
continue
character_count: int = len(language_characters)
character_match_count: int = len(
[c for c in language_characters if c in characters]
)
ratio: float = character_match_count / character_count
if ratio >= 0.2:
languages.append((language, ratio))
languages = sorted(languages, key=lambda x: x[1], reverse=True)
return [compatible_language[0] for compatible_language in languages]
def characters_popularity_compare(
language: str, ordered_characters: List[str]
) -> float:
"""
Determine if a ordered characters list (by occurrence from most appearance to rarest) match a particular language.
The result is a ratio between 0. (absolutely no correspondence) and 1. (near perfect fit).
Beware that is function is not strict on the match in order to ease the detection. (Meaning close match is 1.)
"""
if language not in FREQUENCIES:
raise ValueError("{} not available".format(language))
character_approved_count: int = 0
FREQUENCIES_language_set = set(FREQUENCIES[language])
for character in ordered_characters:
if character not in FREQUENCIES_language_set:
continue
characters_before_source: List[str] = FREQUENCIES[language][
0 : FREQUENCIES[language].index(character)
]
characters_after_source: List[str] = FREQUENCIES[language][
FREQUENCIES[language].index(character) :
]
characters_before: List[str] = ordered_characters[
0 : ordered_characters.index(character)
]
characters_after: List[str] = ordered_characters[
ordered_characters.index(character) :
]
before_match_count: int = len(
set(characters_before) & set(characters_before_source)
)
after_match_count: int = len(
set(characters_after) & set(characters_after_source)
)
if len(characters_before_source) == 0 and before_match_count <= 4:
character_approved_count += 1
continue
if len(characters_after_source) == 0 and after_match_count <= 4:
character_approved_count += 1
continue
if (
before_match_count / len(characters_before_source) >= 0.4
or after_match_count / len(characters_after_source) >= 0.4
):
character_approved_count += 1
continue
return character_approved_count / len(ordered_characters)
def alpha_unicode_split(decoded_sequence: str) -> List[str]:
"""
Given a decoded text sequence, return a list of str. Unicode range / alphabet separation.
Ex. a text containing English/Latin with a bit a Hebrew will return two items in the resulting list;
One containing the latin letters and the other hebrew.
"""
layers: Dict[str, str] = {}
for character in decoded_sequence:
if character.isalpha() is False:
continue
character_range: Optional[str] = unicode_range(character)
if character_range is None:
continue
layer_target_range: Optional[str] = None
for discovered_range in layers:
if (
is_suspiciously_successive_range(discovered_range, character_range)
is False
):
layer_target_range = discovered_range
break
if layer_target_range is None:
layer_target_range = character_range
if layer_target_range not in layers:
layers[layer_target_range] = character.lower()
continue
layers[layer_target_range] += character.lower()
return list(layers.values())
def merge_coherence_ratios(results: List[CoherenceMatches]) -> CoherenceMatches:
"""
This function merge results previously given by the function coherence_ratio.
The return type is the same as coherence_ratio.
"""
per_language_ratios: Dict[str, List[float]] = {}
for result in results:
for sub_result in result:
language, ratio = sub_result
if language not in per_language_ratios:
per_language_ratios[language] = [ratio]
continue
per_language_ratios[language].append(ratio)
merge = [
(
language,
round(
sum(per_language_ratios[language]) / len(per_language_ratios[language]),
4,
),
)
for language in per_language_ratios
]
return sorted(merge, key=lambda x: x[1], reverse=True)
@lru_cache(maxsize=2048)
def coherence_ratio(
decoded_sequence: str, threshold: float = 0.1, lg_inclusion: Optional[str] = None
) -> CoherenceMatches:
"""
Detect ANY language that can be identified in given sequence. The sequence will be analysed by layers.
A layer = Character extraction by alphabets/ranges.
"""
results: List[Tuple[str, float]] = []
ignore_non_latin: bool = False
sufficient_match_count: int = 0
lg_inclusion_list = lg_inclusion.split(",") if lg_inclusion is not None else []
if "Latin Based" in lg_inclusion_list:
ignore_non_latin = True
lg_inclusion_list.remove("Latin Based")
for layer in alpha_unicode_split(decoded_sequence):
sequence_frequencies: TypeCounter[str] = Counter(layer)
most_common = sequence_frequencies.most_common()
character_count: int = sum(o for c, o in most_common)
if character_count <= TOO_SMALL_SEQUENCE:
continue
popular_character_ordered: List[str] = [c for c, o in most_common]
for language in lg_inclusion_list or alphabet_languages(
popular_character_ordered, ignore_non_latin
):
ratio: float = characters_popularity_compare(
language, popular_character_ordered
)
if ratio < threshold:
continue
elif ratio >= 0.8:
sufficient_match_count += 1
results.append((language, round(ratio, 4)))
if sufficient_match_count >= 3:
break
return sorted(results, key=lambda x: x[1], reverse=True)
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/charset_normalizer/constant.py | from codecs import BOM_UTF8, BOM_UTF16_BE, BOM_UTF16_LE, BOM_UTF32_BE, BOM_UTF32_LE
from encodings.aliases import aliases
from re import IGNORECASE, compile as re_compile
from typing import Dict, List, Set, Union
from .assets import FREQUENCIES
# Contain for each eligible encoding a list of/item bytes SIG/BOM
ENCODING_MARKS: Dict[str, Union[bytes, List[bytes]]] = {
"utf_8": BOM_UTF8,
"utf_7": [
b"\x2b\x2f\x76\x38",
b"\x2b\x2f\x76\x39",
b"\x2b\x2f\x76\x2b",
b"\x2b\x2f\x76\x2f",
b"\x2b\x2f\x76\x38\x2d",
],
"gb18030": b"\x84\x31\x95\x33",
"utf_32": [BOM_UTF32_BE, BOM_UTF32_LE],
"utf_16": [BOM_UTF16_BE, BOM_UTF16_LE],
}
TOO_SMALL_SEQUENCE: int = 32
TOO_BIG_SEQUENCE: int = int(10e6)
UTF8_MAXIMAL_ALLOCATION: int = 1112064
UNICODE_RANGES_COMBINED: Dict[str, range] = {
"Control character": range(31 + 1),
"Basic Latin": range(32, 127 + 1),
"Latin-1 Supplement": range(128, 255 + 1),
"Latin Extended-A": range(256, 383 + 1),
"Latin Extended-B": range(384, 591 + 1),
"IPA Extensions": range(592, 687 + 1),
"Spacing Modifier Letters": range(688, 767 + 1),
"Combining Diacritical Marks": range(768, 879 + 1),
"Greek and Coptic": range(880, 1023 + 1),
"Cyrillic": range(1024, 1279 + 1),
"Cyrillic Supplement": range(1280, 1327 + 1),
"Armenian": range(1328, 1423 + 1),
"Hebrew": range(1424, 1535 + 1),
"Arabic": range(1536, 1791 + 1),
"Syriac": range(1792, 1871 + 1),
"Arabic Supplement": range(1872, 1919 + 1),
"Thaana": range(1920, 1983 + 1),
"NKo": range(1984, 2047 + 1),
"Samaritan": range(2048, 2111 + 1),
"Mandaic": range(2112, 2143 + 1),
"Syriac Supplement": range(2144, 2159 + 1),
"Arabic Extended-A": range(2208, 2303 + 1),
"Devanagari": range(2304, 2431 + 1),
"Bengali": range(2432, 2559 + 1),
"Gurmukhi": range(2560, 2687 + 1),
"Gujarati": range(2688, 2815 + 1),
"Oriya": range(2816, 2943 + 1),
"Tamil": range(2944, 3071 + 1),
"Telugu": range(3072, 3199 + 1),
"Kannada": range(3200, 3327 + 1),
"Malayalam": range(3328, 3455 + 1),
"Sinhala": range(3456, 3583 + 1),
"Thai": range(3584, 3711 + 1),
"Lao": range(3712, 3839 + 1),
"Tibetan": range(3840, 4095 + 1),
"Myanmar": range(4096, 4255 + 1),
"Georgian": range(4256, 4351 + 1),
"Hangul Jamo": range(4352, 4607 + 1),
"Ethiopic": range(4608, 4991 + 1),
"Ethiopic Supplement": range(4992, 5023 + 1),
"Cherokee": range(5024, 5119 + 1),
"Unified Canadian Aboriginal Syllabics": range(5120, 5759 + 1),
"Ogham": range(5760, 5791 + 1),
"Runic": range(5792, 5887 + 1),
"Tagalog": range(5888, 5919 + 1),
"Hanunoo": range(5920, 5951 + 1),
"Buhid": range(5952, 5983 + 1),
"Tagbanwa": range(5984, 6015 + 1),
"Khmer": range(6016, 6143 + 1),
"Mongolian": range(6144, 6319 + 1),
"Unified Canadian Aboriginal Syllabics Extended": range(6320, 6399 + 1),
"Limbu": range(6400, 6479 + 1),
"Tai Le": range(6480, 6527 + 1),
"New Tai Lue": range(6528, 6623 + 1),
"Khmer Symbols": range(6624, 6655 + 1),
"Buginese": range(6656, 6687 + 1),
"Tai Tham": range(6688, 6831 + 1),
"Combining Diacritical Marks Extended": range(6832, 6911 + 1),
"Balinese": range(6912, 7039 + 1),
"Sundanese": range(7040, 7103 + 1),
"Batak": range(7104, 7167 + 1),
"Lepcha": range(7168, 7247 + 1),
"Ol Chiki": range(7248, 7295 + 1),
"Cyrillic Extended C": range(7296, 7311 + 1),
"Sundanese Supplement": range(7360, 7375 + 1),
"Vedic Extensions": range(7376, 7423 + 1),
"Phonetic Extensions": range(7424, 7551 + 1),
"Phonetic Extensions Supplement": range(7552, 7615 + 1),
"Combining Diacritical Marks Supplement": range(7616, 7679 + 1),
"Latin Extended Additional": range(7680, 7935 + 1),
"Greek Extended": range(7936, 8191 + 1),
"General Punctuation": range(8192, 8303 + 1),
"Superscripts and Subscripts": range(8304, 8351 + 1),
"Currency Symbols": range(8352, 8399 + 1),
"Combining Diacritical Marks for Symbols": range(8400, 8447 + 1),
"Letterlike Symbols": range(8448, 8527 + 1),
"Number Forms": range(8528, 8591 + 1),
"Arrows": range(8592, 8703 + 1),
"Mathematical Operators": range(8704, 8959 + 1),
"Miscellaneous Technical": range(8960, 9215 + 1),
"Control Pictures": range(9216, 9279 + 1),
"Optical Character Recognition": range(9280, 9311 + 1),
"Enclosed Alphanumerics": range(9312, 9471 + 1),
"Box Drawing": range(9472, 9599 + 1),
"Block Elements": range(9600, 9631 + 1),
"Geometric Shapes": range(9632, 9727 + 1),
"Miscellaneous Symbols": range(9728, 9983 + 1),
"Dingbats": range(9984, 10175 + 1),
"Miscellaneous Mathematical Symbols-A": range(10176, 10223 + 1),
"Supplemental Arrows-A": range(10224, 10239 + 1),
"Braille Patterns": range(10240, 10495 + 1),
"Supplemental Arrows-B": range(10496, 10623 + 1),
"Miscellaneous Mathematical Symbols-B": range(10624, 10751 + 1),
"Supplemental Mathematical Operators": range(10752, 11007 + 1),
"Miscellaneous Symbols and Arrows": range(11008, 11263 + 1),
"Glagolitic": range(11264, 11359 + 1),
"Latin Extended-C": range(11360, 11391 + 1),
"Coptic": range(11392, 11519 + 1),
"Georgian Supplement": range(11520, 11567 + 1),
"Tifinagh": range(11568, 11647 + 1),
"Ethiopic Extended": range(11648, 11743 + 1),
"Cyrillic Extended-A": range(11744, 11775 + 1),
"Supplemental Punctuation": range(11776, 11903 + 1),
"CJK Radicals Supplement": range(11904, 12031 + 1),
"Kangxi Radicals": range(12032, 12255 + 1),
"Ideographic Description Characters": range(12272, 12287 + 1),
"CJK Symbols and Punctuation": range(12288, 12351 + 1),
"Hiragana": range(12352, 12447 + 1),
"Katakana": range(12448, 12543 + 1),
"Bopomofo": range(12544, 12591 + 1),
"Hangul Compatibility Jamo": range(12592, 12687 + 1),
"Kanbun": range(12688, 12703 + 1),
"Bopomofo Extended": range(12704, 12735 + 1),
"CJK Strokes": range(12736, 12783 + 1),
"Katakana Phonetic Extensions": range(12784, 12799 + 1),
"Enclosed CJK Letters and Months": range(12800, 13055 + 1),
"CJK Compatibility": range(13056, 13311 + 1),
"CJK Unified Ideographs Extension A": range(13312, 19903 + 1),
"Yijing Hexagram Symbols": range(19904, 19967 + 1),
"CJK Unified Ideographs": range(19968, 40959 + 1),
"Yi Syllables": range(40960, 42127 + 1),
"Yi Radicals": range(42128, 42191 + 1),
"Lisu": range(42192, 42239 + 1),
"Vai": range(42240, 42559 + 1),
"Cyrillic Extended-B": range(42560, 42655 + 1),
"Bamum": range(42656, 42751 + 1),
"Modifier Tone Letters": range(42752, 42783 + 1),
"Latin Extended-D": range(42784, 43007 + 1),
"Syloti Nagri": range(43008, 43055 + 1),
"Common Indic Number Forms": range(43056, 43071 + 1),
"Phags-pa": range(43072, 43135 + 1),
"Saurashtra": range(43136, 43231 + 1),
"Devanagari Extended": range(43232, 43263 + 1),
"Kayah Li": range(43264, 43311 + 1),
"Rejang": range(43312, 43359 + 1),
"Hangul Jamo Extended-A": range(43360, 43391 + 1),
"Javanese": range(43392, 43487 + 1),
"Myanmar Extended-B": range(43488, 43519 + 1),
"Cham": range(43520, 43615 + 1),
"Myanmar Extended-A": range(43616, 43647 + 1),
"Tai Viet": range(43648, 43743 + 1),
"Meetei Mayek Extensions": range(43744, 43775 + 1),
"Ethiopic Extended-A": range(43776, 43823 + 1),
"Latin Extended-E": range(43824, 43887 + 1),
"Cherokee Supplement": range(43888, 43967 + 1),
"Meetei Mayek": range(43968, 44031 + 1),
"Hangul Syllables": range(44032, 55215 + 1),
"Hangul Jamo Extended-B": range(55216, 55295 + 1),
"High Surrogates": range(55296, 56191 + 1),
"High Private Use Surrogates": range(56192, 56319 + 1),
"Low Surrogates": range(56320, 57343 + 1),
"Private Use Area": range(57344, 63743 + 1),
"CJK Compatibility Ideographs": range(63744, 64255 + 1),
"Alphabetic Presentation Forms": range(64256, 64335 + 1),
"Arabic Presentation Forms-A": range(64336, 65023 + 1),
"Variation Selectors": range(65024, 65039 + 1),
"Vertical Forms": range(65040, 65055 + 1),
"Combining Half Marks": range(65056, 65071 + 1),
"CJK Compatibility Forms": range(65072, 65103 + 1),
"Small Form Variants": range(65104, 65135 + 1),
"Arabic Presentation Forms-B": range(65136, 65279 + 1),
"Halfwidth and Fullwidth Forms": range(65280, 65519 + 1),
"Specials": range(65520, 65535 + 1),
"Linear B Syllabary": range(65536, 65663 + 1),
"Linear B Ideograms": range(65664, 65791 + 1),
"Aegean Numbers": range(65792, 65855 + 1),
"Ancient Greek Numbers": range(65856, 65935 + 1),
"Ancient Symbols": range(65936, 65999 + 1),
"Phaistos Disc": range(66000, 66047 + 1),
"Lycian": range(66176, 66207 + 1),
"Carian": range(66208, 66271 + 1),
"Coptic Epact Numbers": range(66272, 66303 + 1),
"Old Italic": range(66304, 66351 + 1),
"Gothic": range(66352, 66383 + 1),
"Old Permic": range(66384, 66431 + 1),
"Ugaritic": range(66432, 66463 + 1),
"Old Persian": range(66464, 66527 + 1),
"Deseret": range(66560, 66639 + 1),
"Shavian": range(66640, 66687 + 1),
"Osmanya": range(66688, 66735 + 1),
"Osage": range(66736, 66815 + 1),
"Elbasan": range(66816, 66863 + 1),
"Caucasian Albanian": range(66864, 66927 + 1),
"Linear A": range(67072, 67455 + 1),
"Cypriot Syllabary": range(67584, 67647 + 1),
"Imperial Aramaic": range(67648, 67679 + 1),
"Palmyrene": range(67680, 67711 + 1),
"Nabataean": range(67712, 67759 + 1),
"Hatran": range(67808, 67839 + 1),
"Phoenician": range(67840, 67871 + 1),
"Lydian": range(67872, 67903 + 1),
"Meroitic Hieroglyphs": range(67968, 67999 + 1),
"Meroitic Cursive": range(68000, 68095 + 1),
"Kharoshthi": range(68096, 68191 + 1),
"Old South Arabian": range(68192, 68223 + 1),
"Old North Arabian": range(68224, 68255 + 1),
"Manichaean": range(68288, 68351 + 1),
"Avestan": range(68352, 68415 + 1),
"Inscriptional Parthian": range(68416, 68447 + 1),
"Inscriptional Pahlavi": range(68448, 68479 + 1),
"Psalter Pahlavi": range(68480, 68527 + 1),
"Old Turkic": range(68608, 68687 + 1),
"Old Hungarian": range(68736, 68863 + 1),
"Rumi Numeral Symbols": range(69216, 69247 + 1),
"Brahmi": range(69632, 69759 + 1),
"Kaithi": range(69760, 69839 + 1),
"Sora Sompeng": range(69840, 69887 + 1),
"Chakma": range(69888, 69967 + 1),
"Mahajani": range(69968, 70015 + 1),
"Sharada": range(70016, 70111 + 1),
"Sinhala Archaic Numbers": range(70112, 70143 + 1),
"Khojki": range(70144, 70223 + 1),
"Multani": range(70272, 70319 + 1),
"Khudawadi": range(70320, 70399 + 1),
"Grantha": range(70400, 70527 + 1),
"Newa": range(70656, 70783 + 1),
"Tirhuta": range(70784, 70879 + 1),
"Siddham": range(71040, 71167 + 1),
"Modi": range(71168, 71263 + 1),
"Mongolian Supplement": range(71264, 71295 + 1),
"Takri": range(71296, 71375 + 1),
"Ahom": range(71424, 71487 + 1),
"Warang Citi": range(71840, 71935 + 1),
"Zanabazar Square": range(72192, 72271 + 1),
"Soyombo": range(72272, 72367 + 1),
"Pau Cin Hau": range(72384, 72447 + 1),
"Bhaiksuki": range(72704, 72815 + 1),
"Marchen": range(72816, 72895 + 1),
"Masaram Gondi": range(72960, 73055 + 1),
"Cuneiform": range(73728, 74751 + 1),
"Cuneiform Numbers and Punctuation": range(74752, 74879 + 1),
"Early Dynastic Cuneiform": range(74880, 75087 + 1),
"Egyptian Hieroglyphs": range(77824, 78895 + 1),
"Anatolian Hieroglyphs": range(82944, 83583 + 1),
"Bamum Supplement": range(92160, 92735 + 1),
"Mro": range(92736, 92783 + 1),
"Bassa Vah": range(92880, 92927 + 1),
"Pahawh Hmong": range(92928, 93071 + 1),
"Miao": range(93952, 94111 + 1),
"Ideographic Symbols and Punctuation": range(94176, 94207 + 1),
"Tangut": range(94208, 100351 + 1),
"Tangut Components": range(100352, 101119 + 1),
"Kana Supplement": range(110592, 110847 + 1),
"Kana Extended-A": range(110848, 110895 + 1),
"Nushu": range(110960, 111359 + 1),
"Duployan": range(113664, 113823 + 1),
"Shorthand Format Controls": range(113824, 113839 + 1),
"Byzantine Musical Symbols": range(118784, 119039 + 1),
"Musical Symbols": range(119040, 119295 + 1),
"Ancient Greek Musical Notation": range(119296, 119375 + 1),
"Tai Xuan Jing Symbols": range(119552, 119647 + 1),
"Counting Rod Numerals": range(119648, 119679 + 1),
"Mathematical Alphanumeric Symbols": range(119808, 120831 + 1),
"Sutton SignWriting": range(120832, 121519 + 1),
"Glagolitic Supplement": range(122880, 122927 + 1),
"Mende Kikakui": range(124928, 125151 + 1),
"Adlam": range(125184, 125279 + 1),
"Arabic Mathematical Alphabetic Symbols": range(126464, 126719 + 1),
"Mahjong Tiles": range(126976, 127023 + 1),
"Domino Tiles": range(127024, 127135 + 1),
"Playing Cards": range(127136, 127231 + 1),
"Enclosed Alphanumeric Supplement": range(127232, 127487 + 1),
"Enclosed Ideographic Supplement": range(127488, 127743 + 1),
"Miscellaneous Symbols and Pictographs": range(127744, 128511 + 1),
"Emoticons range(Emoji)": range(128512, 128591 + 1),
"Ornamental Dingbats": range(128592, 128639 + 1),
"Transport and Map Symbols": range(128640, 128767 + 1),
"Alchemical Symbols": range(128768, 128895 + 1),
"Geometric Shapes Extended": range(128896, 129023 + 1),
"Supplemental Arrows-C": range(129024, 129279 + 1),
"Supplemental Symbols and Pictographs": range(129280, 129535 + 1),
"CJK Unified Ideographs Extension B": range(131072, 173791 + 1),
"CJK Unified Ideographs Extension C": range(173824, 177983 + 1),
"CJK Unified Ideographs Extension D": range(177984, 178207 + 1),
"CJK Unified Ideographs Extension E": range(178208, 183983 + 1),
"CJK Unified Ideographs Extension F": range(183984, 191471 + 1),
"CJK Compatibility Ideographs Supplement": range(194560, 195103 + 1),
"Tags": range(917504, 917631 + 1),
"Variation Selectors Supplement": range(917760, 917999 + 1),
}
UNICODE_SECONDARY_RANGE_KEYWORD: List[str] = [
"Supplement",
"Extended",
"Extensions",
"Modifier",
"Marks",
"Punctuation",
"Symbols",
"Forms",
"Operators",
"Miscellaneous",
"Drawing",
"Block",
"Shapes",
"Supplemental",
"Tags",
]
RE_POSSIBLE_ENCODING_INDICATION = re_compile(
r"(?:(?:encoding)|(?:charset)|(?:coding))(?:[\:= ]{1,10})(?:[\"\']?)([a-zA-Z0-9\-_]+)(?:[\"\']?)",
IGNORECASE,
)
IANA_SUPPORTED: List[str] = sorted(
filter(
lambda x: x.endswith("_codec") is False
and x not in {"rot_13", "tactis", "mbcs"},
list(set(aliases.values())),
)
)
IANA_SUPPORTED_COUNT: int = len(IANA_SUPPORTED)
# pre-computed code page that are similar using the function cp_similarity.
IANA_SUPPORTED_SIMILAR: Dict[str, List[str]] = {
"cp037": ["cp1026", "cp1140", "cp273", "cp500"],
"cp1026": ["cp037", "cp1140", "cp273", "cp500"],
"cp1125": ["cp866"],
"cp1140": ["cp037", "cp1026", "cp273", "cp500"],
"cp1250": ["iso8859_2"],
"cp1251": ["kz1048", "ptcp154"],
"cp1252": ["iso8859_15", "iso8859_9", "latin_1"],
"cp1253": ["iso8859_7"],
"cp1254": ["iso8859_15", "iso8859_9", "latin_1"],
"cp1257": ["iso8859_13"],
"cp273": ["cp037", "cp1026", "cp1140", "cp500"],
"cp437": ["cp850", "cp858", "cp860", "cp861", "cp862", "cp863", "cp865"],
"cp500": ["cp037", "cp1026", "cp1140", "cp273"],
"cp850": ["cp437", "cp857", "cp858", "cp865"],
"cp857": ["cp850", "cp858", "cp865"],
"cp858": ["cp437", "cp850", "cp857", "cp865"],
"cp860": ["cp437", "cp861", "cp862", "cp863", "cp865"],
"cp861": ["cp437", "cp860", "cp862", "cp863", "cp865"],
"cp862": ["cp437", "cp860", "cp861", "cp863", "cp865"],
"cp863": ["cp437", "cp860", "cp861", "cp862", "cp865"],
"cp865": ["cp437", "cp850", "cp857", "cp858", "cp860", "cp861", "cp862", "cp863"],
"cp866": ["cp1125"],
"iso8859_10": ["iso8859_14", "iso8859_15", "iso8859_4", "iso8859_9", "latin_1"],
"iso8859_11": ["tis_620"],
"iso8859_13": ["cp1257"],
"iso8859_14": [
"iso8859_10",
"iso8859_15",
"iso8859_16",
"iso8859_3",
"iso8859_9",
"latin_1",
],
"iso8859_15": [
"cp1252",
"cp1254",
"iso8859_10",
"iso8859_14",
"iso8859_16",
"iso8859_3",
"iso8859_9",
"latin_1",
],
"iso8859_16": [
"iso8859_14",
"iso8859_15",
"iso8859_2",
"iso8859_3",
"iso8859_9",
"latin_1",
],
"iso8859_2": ["cp1250", "iso8859_16", "iso8859_4"],
"iso8859_3": ["iso8859_14", "iso8859_15", "iso8859_16", "iso8859_9", "latin_1"],
"iso8859_4": ["iso8859_10", "iso8859_2", "iso8859_9", "latin_1"],
"iso8859_7": ["cp1253"],
"iso8859_9": [
"cp1252",
"cp1254",
"cp1258",
"iso8859_10",
"iso8859_14",
"iso8859_15",
"iso8859_16",
"iso8859_3",
"iso8859_4",
"latin_1",
],
"kz1048": ["cp1251", "ptcp154"],
"latin_1": [
"cp1252",
"cp1254",
"cp1258",
"iso8859_10",
"iso8859_14",
"iso8859_15",
"iso8859_16",
"iso8859_3",
"iso8859_4",
"iso8859_9",
],
"mac_iceland": ["mac_roman", "mac_turkish"],
"mac_roman": ["mac_iceland", "mac_turkish"],
"mac_turkish": ["mac_iceland", "mac_roman"],
"ptcp154": ["cp1251", "kz1048"],
"tis_620": ["iso8859_11"],
}
CHARDET_CORRESPONDENCE: Dict[str, str] = {
"iso2022_kr": "ISO-2022-KR",
"iso2022_jp": "ISO-2022-JP",
"euc_kr": "EUC-KR",
"tis_620": "TIS-620",
"utf_32": "UTF-32",
"euc_jp": "EUC-JP",
"koi8_r": "KOI8-R",
"iso8859_1": "ISO-8859-1",
"iso8859_2": "ISO-8859-2",
"iso8859_5": "ISO-8859-5",
"iso8859_6": "ISO-8859-6",
"iso8859_7": "ISO-8859-7",
"iso8859_8": "ISO-8859-8",
"utf_16": "UTF-16",
"cp855": "IBM855",
"mac_cyrillic": "MacCyrillic",
"gb2312": "GB2312",
"gb18030": "GB18030",
"cp932": "CP932",
"cp866": "IBM866",
"utf_8": "utf-8",
"utf_8_sig": "UTF-8-SIG",
"shift_jis": "SHIFT_JIS",
"big5": "Big5",
"cp1250": "windows-1250",
"cp1251": "windows-1251",
"cp1252": "Windows-1252",
"cp1253": "windows-1253",
"cp1255": "windows-1255",
"cp1256": "windows-1256",
"cp1254": "Windows-1254",
"cp949": "CP949",
}
COMMON_SAFE_ASCII_CHARACTERS: Set[str] = {
"<",
">",
"=",
":",
"/",
"&",
";",
"{",
"}",
"[",
"]",
",",
"|",
'"',
"-",
}
KO_NAMES: Set[str] = {"johab", "cp949", "euc_kr"}
ZH_NAMES: Set[str] = {"big5", "cp950", "big5hkscs", "hz"}
NOT_PRINTABLE_PATTERN = re_compile(r"[0-9\W\n\r\t]+")
LANGUAGE_SUPPORTED_COUNT: int = len(FREQUENCIES)
# Logging LEVEL bellow DEBUG
TRACE: int = 5
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/charset_normalizer/cli/__init__.py | |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/charset_normalizer/cli/normalizer.py | import argparse
import sys
from json import dumps
from os.path import abspath
from platform import python_version
from typing import List, Optional
try:
from unicodedata2 import unidata_version
except ImportError:
from unicodedata import unidata_version
from charset_normalizer import from_fp
from charset_normalizer.models import CliDetectionResult
from charset_normalizer.version import __version__
def query_yes_no(question: str, default: str = "yes") -> bool:
"""Ask a yes/no question via input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no".
Credit goes to (c) https://stackoverflow.com/questions/3041986/apt-command-line-interface-like-yes-no-input
"""
valid = {"yes": True, "y": True, "ye": True, "no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = input().lower()
if default is not None and choice == "":
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' " "(or 'y' or 'n').\n")
def cli_detect(argv: Optional[List[str]] = None) -> int:
"""
CLI assistant using ARGV and ArgumentParser
:param argv:
:return: 0 if everything is fine, anything else equal trouble
"""
parser = argparse.ArgumentParser(
description="The Real First Universal Charset Detector. "
"Discover originating encoding used on text file. "
"Normalize text to unicode."
)
parser.add_argument(
"files", type=argparse.FileType("rb"), nargs="+", help="File(s) to be analysed"
)
parser.add_argument(
"-v",
"--verbose",
action="store_true",
default=False,
dest="verbose",
help="Display complementary information about file if any. "
"Stdout will contain logs about the detection process.",
)
parser.add_argument(
"-a",
"--with-alternative",
action="store_true",
default=False,
dest="alternatives",
help="Output complementary possibilities if any. Top-level JSON WILL be a list.",
)
parser.add_argument(
"-n",
"--normalize",
action="store_true",
default=False,
dest="normalize",
help="Permit to normalize input file. If not set, program does not write anything.",
)
parser.add_argument(
"-m",
"--minimal",
action="store_true",
default=False,
dest="minimal",
help="Only output the charset detected to STDOUT. Disabling JSON output.",
)
parser.add_argument(
"-r",
"--replace",
action="store_true",
default=False,
dest="replace",
help="Replace file when trying to normalize it instead of creating a new one.",
)
parser.add_argument(
"-f",
"--force",
action="store_true",
default=False,
dest="force",
help="Replace file without asking if you are sure, use this flag with caution.",
)
parser.add_argument(
"-t",
"--threshold",
action="store",
default=0.2,
type=float,
dest="threshold",
help="Define a custom maximum amount of chaos allowed in decoded content. 0. <= chaos <= 1.",
)
parser.add_argument(
"--version",
action="version",
version="Charset-Normalizer {} - Python {} - Unicode {}".format(
__version__, python_version(), unidata_version
),
help="Show version information and exit.",
)
args = parser.parse_args(argv)
if args.replace is True and args.normalize is False:
print("Use --replace in addition of --normalize only.", file=sys.stderr)
return 1
if args.force is True and args.replace is False:
print("Use --force in addition of --replace only.", file=sys.stderr)
return 1
if args.threshold < 0.0 or args.threshold > 1.0:
print("--threshold VALUE should be between 0. AND 1.", file=sys.stderr)
return 1
x_ = []
for my_file in args.files:
matches = from_fp(my_file, threshold=args.threshold, explain=args.verbose)
best_guess = matches.best()
if best_guess is None:
print(
'Unable to identify originating encoding for "{}". {}'.format(
my_file.name,
"Maybe try increasing maximum amount of chaos."
if args.threshold < 1.0
else "",
),
file=sys.stderr,
)
x_.append(
CliDetectionResult(
abspath(my_file.name),
None,
[],
[],
"Unknown",
[],
False,
1.0,
0.0,
None,
True,
)
)
else:
x_.append(
CliDetectionResult(
abspath(my_file.name),
best_guess.encoding,
best_guess.encoding_aliases,
[
cp
for cp in best_guess.could_be_from_charset
if cp != best_guess.encoding
],
best_guess.language,
best_guess.alphabets,
best_guess.bom,
best_guess.percent_chaos,
best_guess.percent_coherence,
None,
True,
)
)
if len(matches) > 1 and args.alternatives:
for el in matches:
if el != best_guess:
x_.append(
CliDetectionResult(
abspath(my_file.name),
el.encoding,
el.encoding_aliases,
[
cp
for cp in el.could_be_from_charset
if cp != el.encoding
],
el.language,
el.alphabets,
el.bom,
el.percent_chaos,
el.percent_coherence,
None,
False,
)
)
if args.normalize is True:
if best_guess.encoding.startswith("utf") is True:
print(
'"{}" file does not need to be normalized, as it already came from unicode.'.format(
my_file.name
),
file=sys.stderr,
)
if my_file.closed is False:
my_file.close()
continue
o_: List[str] = my_file.name.split(".")
if args.replace is False:
o_.insert(-1, best_guess.encoding)
if my_file.closed is False:
my_file.close()
elif (
args.force is False
and query_yes_no(
'Are you sure to normalize "{}" by replacing it ?'.format(
my_file.name
),
"no",
)
is False
):
if my_file.closed is False:
my_file.close()
continue
try:
x_[0].unicode_path = abspath("./{}".format(".".join(o_)))
with open(x_[0].unicode_path, "w", encoding="utf-8") as fp:
fp.write(str(best_guess))
except IOError as e:
print(str(e), file=sys.stderr)
if my_file.closed is False:
my_file.close()
return 2
if my_file.closed is False:
my_file.close()
if args.minimal is False:
print(
dumps(
[el.__dict__ for el in x_] if len(x_) > 1 else x_[0].__dict__,
ensure_ascii=True,
indent=4,
)
)
else:
for my_file in args.files:
print(
", ".join(
[
el.encoding or "undefined"
for el in x_
if el.path == abspath(my_file.name)
]
)
)
return 0
if __name__ == "__main__":
cli_detect()
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/charset_normalizer/assets/__init__.py | # -*- coding: utf-8 -*-
from typing import Dict, List
FREQUENCIES: Dict[str, List[str]] = {
"English": [
"e",
"a",
"t",
"i",
"o",
"n",
"s",
"r",
"h",
"l",
"d",
"c",
"u",
"m",
"f",
"p",
"g",
"w",
"y",
"b",
"v",
"k",
"x",
"j",
"z",
"q",
],
"German": [
"e",
"n",
"i",
"r",
"s",
"t",
"a",
"d",
"h",
"u",
"l",
"g",
"o",
"c",
"m",
"b",
"f",
"k",
"w",
"z",
"p",
"v",
"ü",
"ä",
"ö",
"j",
],
"French": [
"e",
"a",
"s",
"n",
"i",
"t",
"r",
"l",
"u",
"o",
"d",
"c",
"p",
"m",
"é",
"v",
"g",
"f",
"b",
"h",
"q",
"à",
"x",
"è",
"y",
"j",
],
"Dutch": [
"e",
"n",
"a",
"i",
"r",
"t",
"o",
"d",
"s",
"l",
"g",
"h",
"v",
"m",
"u",
"k",
"c",
"p",
"b",
"w",
"j",
"z",
"f",
"y",
"x",
"ë",
],
"Italian": [
"e",
"i",
"a",
"o",
"n",
"l",
"t",
"r",
"s",
"c",
"d",
"u",
"p",
"m",
"g",
"v",
"f",
"b",
"z",
"h",
"q",
"è",
"à",
"k",
"y",
"ò",
],
"Polish": [
"a",
"i",
"o",
"e",
"n",
"r",
"z",
"w",
"s",
"c",
"t",
"k",
"y",
"d",
"p",
"m",
"u",
"l",
"j",
"ł",
"g",
"b",
"h",
"ą",
"ę",
"ó",
],
"Spanish": [
"e",
"a",
"o",
"n",
"s",
"r",
"i",
"l",
"d",
"t",
"c",
"u",
"m",
"p",
"b",
"g",
"v",
"f",
"y",
"ó",
"h",
"q",
"í",
"j",
"z",
"á",
],
"Russian": [
"о",
"а",
"е",
"и",
"н",
"с",
"т",
"р",
"в",
"л",
"к",
"м",
"д",
"п",
"у",
"г",
"я",
"ы",
"з",
"б",
"й",
"ь",
"ч",
"х",
"ж",
"ц",
],
"Japanese": [
"の",
"に",
"る",
"た",
"は",
"ー",
"と",
"し",
"を",
"で",
"て",
"が",
"い",
"ン",
"れ",
"な",
"年",
"ス",
"っ",
"ル",
"か",
"ら",
"あ",
"さ",
"も",
"り",
],
"Portuguese": [
"a",
"e",
"o",
"s",
"i",
"r",
"d",
"n",
"t",
"m",
"u",
"c",
"l",
"p",
"g",
"v",
"b",
"f",
"h",
"ã",
"q",
"é",
"ç",
"á",
"z",
"í",
],
"Swedish": [
"e",
"a",
"n",
"r",
"t",
"s",
"i",
"l",
"d",
"o",
"m",
"k",
"g",
"v",
"h",
"f",
"u",
"p",
"ä",
"c",
"b",
"ö",
"å",
"y",
"j",
"x",
],
"Chinese": [
"的",
"一",
"是",
"不",
"了",
"在",
"人",
"有",
"我",
"他",
"这",
"个",
"们",
"中",
"来",
"上",
"大",
"为",
"和",
"国",
"地",
"到",
"以",
"说",
"时",
"要",
"就",
"出",
"会",
],
"Ukrainian": [
"о",
"а",
"н",
"і",
"и",
"р",
"в",
"т",
"е",
"с",
"к",
"л",
"у",
"д",
"м",
"п",
"з",
"я",
"ь",
"б",
"г",
"й",
"ч",
"х",
"ц",
"ї",
],
"Norwegian": [
"e",
"r",
"n",
"t",
"a",
"s",
"i",
"o",
"l",
"d",
"g",
"k",
"m",
"v",
"f",
"p",
"u",
"b",
"h",
"å",
"y",
"j",
"ø",
"c",
"æ",
"w",
],
"Finnish": [
"a",
"i",
"n",
"t",
"e",
"s",
"l",
"o",
"u",
"k",
"ä",
"m",
"r",
"v",
"j",
"h",
"p",
"y",
"d",
"ö",
"g",
"c",
"b",
"f",
"w",
"z",
],
"Vietnamese": [
"n",
"h",
"t",
"i",
"c",
"g",
"a",
"o",
"u",
"m",
"l",
"r",
"à",
"đ",
"s",
"e",
"v",
"p",
"b",
"y",
"ư",
"d",
"á",
"k",
"ộ",
"ế",
],
"Czech": [
"o",
"e",
"a",
"n",
"t",
"s",
"i",
"l",
"v",
"r",
"k",
"d",
"u",
"m",
"p",
"í",
"c",
"h",
"z",
"á",
"y",
"j",
"b",
"ě",
"é",
"ř",
],
"Hungarian": [
"e",
"a",
"t",
"l",
"s",
"n",
"k",
"r",
"i",
"o",
"z",
"á",
"é",
"g",
"m",
"b",
"y",
"v",
"d",
"h",
"u",
"p",
"j",
"ö",
"f",
"c",
],
"Korean": [
"이",
"다",
"에",
"의",
"는",
"로",
"하",
"을",
"가",
"고",
"지",
"서",
"한",
"은",
"기",
"으",
"년",
"대",
"사",
"시",
"를",
"리",
"도",
"인",
"스",
"일",
],
"Indonesian": [
"a",
"n",
"e",
"i",
"r",
"t",
"u",
"s",
"d",
"k",
"m",
"l",
"g",
"p",
"b",
"o",
"h",
"y",
"j",
"c",
"w",
"f",
"v",
"z",
"x",
"q",
],
"Turkish": [
"a",
"e",
"i",
"n",
"r",
"l",
"ı",
"k",
"d",
"t",
"s",
"m",
"y",
"u",
"o",
"b",
"ü",
"ş",
"v",
"g",
"z",
"h",
"c",
"p",
"ç",
"ğ",
],
"Romanian": [
"e",
"i",
"a",
"r",
"n",
"t",
"u",
"l",
"o",
"c",
"s",
"d",
"p",
"m",
"ă",
"f",
"v",
"î",
"g",
"b",
"ș",
"ț",
"z",
"h",
"â",
"j",
],
"Farsi": [
"ا",
"ی",
"ر",
"د",
"ن",
"ه",
"و",
"م",
"ت",
"ب",
"س",
"ل",
"ک",
"ش",
"ز",
"ف",
"گ",
"ع",
"خ",
"ق",
"ج",
"آ",
"پ",
"ح",
"ط",
"ص",
],
"Arabic": [
"ا",
"ل",
"ي",
"م",
"و",
"ن",
"ر",
"ت",
"ب",
"ة",
"ع",
"د",
"س",
"ف",
"ه",
"ك",
"ق",
"أ",
"ح",
"ج",
"ش",
"ط",
"ص",
"ى",
"خ",
"إ",
],
"Danish": [
"e",
"r",
"n",
"t",
"a",
"i",
"s",
"d",
"l",
"o",
"g",
"m",
"k",
"f",
"v",
"u",
"b",
"h",
"p",
"å",
"y",
"ø",
"æ",
"c",
"j",
"w",
],
"Serbian": [
"а",
"и",
"о",
"е",
"н",
"р",
"с",
"у",
"т",
"к",
"ј",
"в",
"д",
"м",
"п",
"л",
"г",
"з",
"б",
"a",
"i",
"e",
"o",
"n",
"ц",
"ш",
],
"Lithuanian": [
"i",
"a",
"s",
"o",
"r",
"e",
"t",
"n",
"u",
"k",
"m",
"l",
"p",
"v",
"d",
"j",
"g",
"ė",
"b",
"y",
"ų",
"š",
"ž",
"c",
"ą",
"į",
],
"Slovene": [
"e",
"a",
"i",
"o",
"n",
"r",
"s",
"l",
"t",
"j",
"v",
"k",
"d",
"p",
"m",
"u",
"z",
"b",
"g",
"h",
"č",
"c",
"š",
"ž",
"f",
"y",
],
"Slovak": [
"o",
"a",
"e",
"n",
"i",
"r",
"v",
"t",
"s",
"l",
"k",
"d",
"m",
"p",
"u",
"c",
"h",
"j",
"b",
"z",
"á",
"y",
"ý",
"í",
"č",
"é",
],
"Hebrew": [
"י",
"ו",
"ה",
"ל",
"ר",
"ב",
"ת",
"מ",
"א",
"ש",
"נ",
"ע",
"ם",
"ד",
"ק",
"ח",
"פ",
"ס",
"כ",
"ג",
"ט",
"צ",
"ן",
"ז",
"ך",
],
"Bulgarian": [
"а",
"и",
"о",
"е",
"н",
"т",
"р",
"с",
"в",
"л",
"к",
"д",
"п",
"м",
"з",
"г",
"я",
"ъ",
"у",
"б",
"ч",
"ц",
"й",
"ж",
"щ",
"х",
],
"Croatian": [
"a",
"i",
"o",
"e",
"n",
"r",
"j",
"s",
"t",
"u",
"k",
"l",
"v",
"d",
"m",
"p",
"g",
"z",
"b",
"c",
"č",
"h",
"š",
"ž",
"ć",
"f",
],
"Hindi": [
"क",
"र",
"स",
"न",
"त",
"म",
"ह",
"प",
"य",
"ल",
"व",
"ज",
"द",
"ग",
"ब",
"श",
"ट",
"अ",
"ए",
"थ",
"भ",
"ड",
"च",
"ध",
"ष",
"इ",
],
"Estonian": [
"a",
"i",
"e",
"s",
"t",
"l",
"u",
"n",
"o",
"k",
"r",
"d",
"m",
"v",
"g",
"p",
"j",
"h",
"ä",
"b",
"õ",
"ü",
"f",
"c",
"ö",
"y",
],
"Simple English": [
"e",
"a",
"t",
"i",
"o",
"n",
"s",
"r",
"h",
"l",
"d",
"c",
"m",
"u",
"f",
"p",
"g",
"w",
"b",
"y",
"v",
"k",
"j",
"x",
"z",
"q",
],
"Thai": [
"า",
"น",
"ร",
"อ",
"ก",
"เ",
"ง",
"ม",
"ย",
"ล",
"ว",
"ด",
"ท",
"ส",
"ต",
"ะ",
"ป",
"บ",
"ค",
"ห",
"แ",
"จ",
"พ",
"ช",
"ข",
"ใ",
],
"Greek": [
"α",
"τ",
"ο",
"ι",
"ε",
"ν",
"ρ",
"σ",
"κ",
"η",
"π",
"ς",
"υ",
"μ",
"λ",
"ί",
"ό",
"ά",
"γ",
"έ",
"δ",
"ή",
"ω",
"χ",
"θ",
"ύ",
],
"Tamil": [
"க",
"த",
"ப",
"ட",
"ர",
"ம",
"ல",
"ன",
"வ",
"ற",
"ய",
"ள",
"ச",
"ந",
"இ",
"ண",
"அ",
"ஆ",
"ழ",
"ங",
"எ",
"உ",
"ஒ",
"ஸ",
],
"Classical Chinese": [
"之",
"年",
"為",
"也",
"以",
"一",
"人",
"其",
"者",
"國",
"有",
"二",
"十",
"於",
"曰",
"三",
"不",
"大",
"而",
"子",
"中",
"五",
"四",
],
"Kazakh": [
"а",
"ы",
"е",
"н",
"т",
"р",
"л",
"і",
"д",
"с",
"м",
"қ",
"к",
"о",
"б",
"и",
"у",
"ғ",
"ж",
"ң",
"з",
"ш",
"й",
"п",
"г",
"ө",
],
}
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiofiles-0.4.0.dist-info/top_level.txt | aiofiles
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiofiles-0.4.0.dist-info/DESCRIPTION.rst | aiofiles: file support for asyncio
==================================
.. image:: https://img.shields.io/pypi/v/aiofiles.svg
:target: https://pypi.python.org/pypi/aiofiles
.. image:: https://travis-ci.org/Tinche/aiofiles.svg?branch=master
:target: https://travis-ci.org/Tinche/aiofiles
.. image:: https://codecov.io/gh/Tinche/aiofiles/branch/master/graph/badge.svg
:target: https://codecov.io/gh/Tinche/aiofiles
**aiofiles** is an Apache2 licensed library, written in Python, for handling local
disk files in asyncio applications.
Ordinary local file IO is blocking, and cannot easily and portably made
asynchronous. This means doing file IO may interfere with asyncio applications,
which shouldn't block the executing thread. aiofiles helps with this by
introducing asynchronous versions of files that support delegating operations to
a separate thread pool.
.. code-block:: python
async with aiofiles.open('filename', mode='r') as f:
contents = await f.read()
print(contents)
'My file contents'
Asynchronous iteration is also supported.
.. code-block:: python
async with aiofiles.open('filename') as f:
async for line in f:
...
Features
--------
- a file API very similar to Python's standard, blocking API
- support for buffered and unbuffered binary files, and buffered text files
- support for ``async``/``await`` (:PEP:`492`) constructs
Installation
------------
To install aiofiles, simply:
.. code-block:: bash
$ pip install aiofiles
Usage
-----
Files are opened using the ``aiofiles.open()`` coroutine, which in addition to
mirroring the builtin ``open`` accepts optional ``loop`` and ``executor``
arguments. If ``loop`` is absent, the default loop will be used, as per the
set asyncio policy. If ``executor`` is not specified, the default event loop
executor will be used.
In case of success, an asynchronous file object is returned with an
API identical to an ordinary file, except the following methods are coroutines
and delegate to an executor:
* ``close``
* ``flush``
* ``isatty``
* ``read``
* ``readall``
* ``read1``
* ``readinto``
* ``readline``
* ``readlines``
* ``seek``
* ``seekable``
* ``tell``
* ``truncate``
* ``writable``
* ``write``
* ``writelines``
In case of failure, one of the usual exceptions will be raised.
The ``aiofiles.os`` module contains executor-enabled coroutine versions of
several useful ``os`` functions that deal with files:
* ``stat``
* ``sendfile``
Writing tests for aiofiles
~~~~~~~~~~~~~~~~~~~~~~~~~~
Real file IO can be mocked by patching ``aiofiles.threadpool.sync_open``
as desired. The return type also needs to be registered with the
``aiofiles.threadpool.wrap`` dispatcher:
.. code-block:: python
aiofiles.threadpool.wrap.register(mock.MagicMock)(
lambda *args, **kwargs: threadpool.AsyncBufferedIOBase(*args, **kwargs))
async def test_stuff():
data = 'data'
mock_file = mock.MagicMock()
with mock.patch('aiofiles.threadpool.sync_open', return_value=mock_file) as mock_open:
async with aiofiles.open('filename', 'w') as f:
await f.write(data)
mock_file.write.assert_called_once_with(data)
History
~~~~~~~
0.4.0 (2018-08-11)
``````````````````
- Python 3.7 support.
- Removed Python 3.3/3.4 support. If you use these versions, stick to aiofiles 0.3.x.
0.3.2 (2017-09-23)
``````````````````
- The LICENSE is now included in the sdist.
`#31 <https://github.com/Tinche/aiofiles/pull/31>`_
0.3.1 (2017-03-10)
``````````````````
- Introduced a changelog.
- ``aiofiles.os.sendfile`` will now work if the standard ``os`` module contains a ``sendfile`` function.
Contributing
~~~~~~~~~~~~
Contributions are very welcome. Tests can be run with ``tox``, please ensure
the coverage at least stays the same before you submit a pull request.
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/wrapt-1.10.10-py3.10.egg-info/SOURCES.txt | LICENSE
README.rst
setup.py
src/wrapt/_wrappers.c
src/wrapt/__init__.py
src/wrapt/arguments.py
src/wrapt/decorators.py
src/wrapt/importer.py
src/wrapt/wrappers.py
src/wrapt.egg-info/PKG-INFO
src/wrapt.egg-info/SOURCES.txt
src/wrapt.egg-info/dependency_links.txt
src/wrapt.egg-info/top_level.txt |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/wrapt-1.10.10-py3.10.egg-info/top_level.txt | wrapt
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/wrapt-1.10.10-py3.10.egg-info/installed-files.txt | ..\wrapt\__init__.py
..\wrapt\__pycache__\__init__.cpython-310.pyc
..\wrapt\__pycache__\arguments.cpython-310.pyc
..\wrapt\__pycache__\decorators.cpython-310.pyc
..\wrapt\__pycache__\importer.cpython-310.pyc
..\wrapt\__pycache__\wrappers.cpython-310.pyc
..\wrapt\_wrappers.cp310-win_amd64.pyd
..\wrapt\arguments.py
..\wrapt\decorators.py
..\wrapt\importer.py
..\wrapt\wrappers.py
PKG-INFO
SOURCES.txt
dependency_links.txt
top_level.txt
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/wrapt-1.10.10-py3.10.egg-info/dependency_links.txt | |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiobotocore/_endpoint_helpers.py | import aiohttp.http_exceptions
from aiohttp.client_reqrep import ClientResponse
import asyncio
import botocore.retryhandler
import wrapt
# Monkey patching: We need to insert the aiohttp exception equivalents
# The only other way to do this would be to have another config file :(
_aiohttp_retryable_exceptions = [
aiohttp.ClientConnectionError,
aiohttp.ClientPayloadError,
aiohttp.ServerDisconnectedError,
aiohttp.http_exceptions.HttpProcessingError,
asyncio.TimeoutError,
]
botocore.retryhandler.EXCEPTION_MAP['GENERAL_CONNECTION_ERROR'].extend(
_aiohttp_retryable_exceptions
)
def _text(s, encoding='utf-8', errors='strict'):
if isinstance(s, bytes):
return s.decode(encoding, errors)
return s # pragma: no cover
# Unfortunately aiohttp changed the behavior of streams:
# github.com/aio-libs/aiohttp/issues/1907
# We need this wrapper until we have a final resolution
class _IOBaseWrapper(wrapt.ObjectProxy):
def close(self):
# this stream should not be closed by aiohttp, like 1.x
pass
# This is similar to botocore.response.StreamingBody
class ClientResponseContentProxy(wrapt.ObjectProxy):
"""Proxy object for content stream of http response. This is here in case
you want to pass around the "Body" of the response without closing the
response itself."""
def __init__(self, response):
super().__init__(response.__wrapped__.content)
self._self_response = response
# Note: we don't have a __del__ method as the ClientResponse has a __del__
# which will warn the user if they didn't close/release the response
# explicitly. A release here would mean reading all the unread data
# (which could be very large), and a close would mean being unable to re-
# use the connection, so the user MUST chose. Default is to warn + close
async def __aenter__(self):
await self._self_response.__aenter__()
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
return await self._self_response.__aexit__(exc_type, exc_val, exc_tb)
@property
def url(self):
return self._self_response.url
def close(self):
self._self_response.close()
class ClientResponseProxy(wrapt.ObjectProxy):
"""Proxy object for http response useful for porting from
botocore underlying http library."""
def __init__(self, *args, **kwargs):
super().__init__(ClientResponse(*args, **kwargs))
# this matches ClientResponse._body
self._self_body = None
@property
def status_code(self):
return self.status
@status_code.setter
def status_code(self, value):
# botocore tries to set this, see:
# https://github.com/aio-libs/aiobotocore/issues/190
# Luckily status is an attribute we can set
self.status = value
@property
def content(self):
return self._self_body
@property
def raw(self):
return ClientResponseContentProxy(self)
async def read(self):
self._self_body = await self.__wrapped__.read()
return self._self_body
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiobotocore/config.py | import copy
import botocore.client
from botocore.exceptions import ParamValidationError
class AioConfig(botocore.client.Config):
def __init__(self, connector_args=None, **kwargs):
super().__init__(**kwargs)
self._validate_connector_args(connector_args)
self.connector_args = copy.copy(connector_args)
if not self.connector_args:
self.connector_args = dict()
if 'keepalive_timeout' not in self.connector_args:
# AWS has a 20 second idle timeout:
# https://forums.aws.amazon.com/message.jspa?messageID=215367
# and aiohttp default timeout is 30s so we set it to something
# reasonable here
self.connector_args['keepalive_timeout'] = 12
def merge(self, other_config):
# Adapted from parent class
config_options = copy.copy(self._user_provided_options)
config_options.update(other_config._user_provided_options)
return AioConfig(self.connector_args, **config_options)
@staticmethod
def _validate_connector_args(connector_args):
if connector_args is None:
return
for k, v in connector_args.items():
# verify_ssl is handled by verify parameter to create_client
if k == 'use_dns_cache':
if not isinstance(v, bool):
raise ParamValidationError(
report='{} value must be a boolean'.format(k))
elif k in ['keepalive_timeout']:
if not isinstance(v, (float, int)):
raise ParamValidationError(
report='{} value must be a float/int'.format(k))
elif k == 'force_close':
if not isinstance(v, bool):
raise ParamValidationError(
report='{} value must be a boolean'.format(k))
# limit is handled by max_pool_connections
elif k == 'ssl_context':
import ssl
if not isinstance(v, ssl.SSLContext):
raise ParamValidationError(
report='{} must be an SSLContext instance'.format(k))
else:
raise ParamValidationError(
report='invalid connector_arg:{}'.format(k))
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiobotocore/response.py | import asyncio
import wrapt
from botocore.exceptions import IncompleteReadError, ReadTimeoutError
class AioReadTimeoutError(ReadTimeoutError, asyncio.TimeoutError):
pass
class StreamingBody(wrapt.ObjectProxy):
"""Wrapper class for an http response body.
This provides a few additional conveniences that do not exist
in the urllib3 model:
* Set the timeout on the socket (i.e read() timeouts)
* Auto validation of content length, if the amount of bytes
we read does not match the content length, an exception
is raised.
"""
_DEFAULT_CHUNK_SIZE = 1024
def __init__(self, raw_stream, content_length):
super().__init__(raw_stream)
self._self_content_length = content_length
self._self_amount_read = 0
# https://github.com/GrahamDumpleton/wrapt/issues/73
async def __aenter__(self):
return await self.__wrapped__.__aenter__()
async def __aexit__(self, exc_type, exc_val, exc_tb):
return await self.__wrapped__.__aexit__(exc_type, exc_val, exc_tb)
# NOTE: set_socket_timeout was only for when requests didn't support
# read timeouts, so not needed
def tell(self):
return self._self_amount_read
async def read(self, amt=None):
"""Read at most amt bytes from the stream.
If the amt argument is omitted, read all data.
"""
# botocore to aiohttp mapping
try:
chunk = await self.__wrapped__.read(amt if amt is not None else -1)
except asyncio.TimeoutError as e:
raise AioReadTimeoutError(endpoint_url=self.__wrapped__.url,
error=e)
self._self_amount_read += len(chunk)
if amt is None or (not chunk and amt > 0):
# If the server sends empty contents or
# we ask to read all of the contents, then we know
# we need to verify the content length.
self._verify_content_length()
return chunk
def __aiter__(self):
"""Return an iterator to yield 1k chunks from the raw stream.
"""
return self.iter_chunks(self._DEFAULT_CHUNK_SIZE)
async def __anext__(self):
"""Return the next 1k chunk from the raw stream.
"""
current_chunk = await self.read(self._DEFAULT_CHUNK_SIZE)
if current_chunk:
return current_chunk
raise StopAsyncIteration
anext = __anext__
async def iter_lines(self, chunk_size=1024, keepends=False):
"""Return an iterator to yield lines from the raw stream.
This is achieved by reading chunk of bytes (of size chunk_size) at a
time from the raw stream, and then yielding lines from there.
"""
pending = b''
async for chunk in self.iter_chunks(chunk_size):
lines = (pending + chunk).splitlines(True)
for line in lines[:-1]:
yield line.splitlines(keepends)[0]
pending = lines[-1]
if pending:
yield pending.splitlines(keepends)[0]
async def iter_chunks(self, chunk_size=_DEFAULT_CHUNK_SIZE):
"""Return an iterator to yield chunks of chunk_size bytes from the raw
stream.
"""
while True:
current_chunk = await self.read(chunk_size)
if current_chunk == b"":
break
yield current_chunk
def _verify_content_length(self):
# See: https://github.com/kennethreitz/requests/issues/1855
# Basically, our http library doesn't do this for us, so we have
# to do this ourself.
if self._self_content_length is not None and \
self._self_amount_read != int(self._self_content_length):
raise IncompleteReadError(
actual_bytes=self._self_amount_read,
expected_bytes=int(self._self_content_length))
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiobotocore/signers.py | import datetime
import botocore
import botocore.auth
from botocore.signers import RequestSigner, UnknownSignatureVersionError, \
UnsupportedSignatureVersionError, create_request_object, prepare_request_dict, \
_should_use_global_endpoint, S3PostPresigner
from botocore.exceptions import UnknownClientMethodError
class AioRequestSigner(RequestSigner):
async def handler(self, operation_name=None, request=None, **kwargs):
# This is typically hooked up to the "request-created" event
# from a client's event emitter. When a new request is created
# this method is invoked to sign the request.
# Don't call this method directly.
return await self.sign(operation_name, request)
async def sign(self, operation_name, request, region_name=None,
signing_type='standard', expires_in=None,
signing_name=None):
explicit_region_name = region_name
if region_name is None:
region_name = self._region_name
if signing_name is None:
signing_name = self._signing_name
signature_version = await self._choose_signer(
operation_name, signing_type, request.context)
# Allow mutating request before signing
await self._event_emitter.emit(
'before-sign.{0}.{1}'.format(
self._service_id.hyphenize(), operation_name),
request=request, signing_name=signing_name,
region_name=self._region_name,
signature_version=signature_version, request_signer=self,
operation_name=operation_name
)
if signature_version != botocore.UNSIGNED:
kwargs = {
'signing_name': signing_name,
'region_name': region_name,
'signature_version': signature_version
}
if expires_in is not None:
kwargs['expires'] = expires_in
signing_context = request.context.get('signing', {})
if not explicit_region_name and signing_context.get('region'):
kwargs['region_name'] = signing_context['region']
if signing_context.get('signing_name'):
kwargs['signing_name'] = signing_context['signing_name']
try:
auth = await self.get_auth_instance(**kwargs)
except UnknownSignatureVersionError as e:
if signing_type != 'standard':
raise UnsupportedSignatureVersionError(
signature_version=signature_version)
else:
raise e
auth.add_auth(request)
async def get_auth_instance(self, signing_name, region_name,
signature_version=None, **kwargs):
if signature_version is None:
signature_version = self._signature_version
cls = botocore.auth.AUTH_TYPE_MAPS.get(signature_version)
if cls is None:
raise UnknownSignatureVersionError(
signature_version=signature_version)
frozen_credentials = None
if self._credentials is not None:
frozen_credentials = await self._credentials.get_frozen_credentials()
kwargs['credentials'] = frozen_credentials
if cls.REQUIRES_REGION:
if self._region_name is None:
raise botocore.exceptions.NoRegionError()
kwargs['region_name'] = region_name
kwargs['service_name'] = signing_name
auth = cls(**kwargs)
return auth
# Alias get_auth for backwards compatibility.
get_auth = get_auth_instance
async def _choose_signer(self, operation_name, signing_type, context):
signing_type_suffix_map = {
'presign-post': '-presign-post',
'presign-url': '-query'
}
suffix = signing_type_suffix_map.get(signing_type, '')
signature_version = self._signature_version
if signature_version is not botocore.UNSIGNED and not \
signature_version.endswith(suffix):
signature_version += suffix
handler, response = await self._event_emitter.emit_until_response(
'choose-signer.{0}.{1}'.format(
self._service_id.hyphenize(), operation_name),
signing_name=self._signing_name, region_name=self._region_name,
signature_version=signature_version, context=context)
if response is not None:
signature_version = response
# The suffix needs to be checked again in case we get an improper
# signature version from choose-signer.
if signature_version is not botocore.UNSIGNED and not \
signature_version.endswith(suffix):
signature_version += suffix
return signature_version
async def generate_presigned_url(self, request_dict, operation_name,
expires_in=3600, region_name=None,
signing_name=None):
request = create_request_object(request_dict)
await self.sign(operation_name, request, region_name,
'presign-url', expires_in, signing_name)
request.prepare()
return request.url
def add_generate_db_auth_token(class_attributes, **kwargs):
class_attributes['generate_db_auth_token'] = generate_db_auth_token
async def generate_db_auth_token(self, DBHostname, Port, DBUsername, Region=None):
"""Generates an auth token used to connect to a db with IAM credentials.
:type DBHostname: str
:param DBHostname: The hostname of the database to connect to.
:type Port: int
:param Port: The port number the database is listening on.
:type DBUsername: str
:param DBUsername: The username to log in as.
:type Region: str
:param Region: The region the database is in. If None, the client
region will be used.
:return: A presigned url which can be used as an auth token.
"""
region = Region
if region is None:
region = self.meta.region_name
params = {
'Action': 'connect',
'DBUser': DBUsername,
}
request_dict = {
'url_path': '/',
'query_string': '',
'headers': {},
'body': params,
'method': 'GET'
}
# RDS requires that the scheme not be set when sent over. This can cause
# issues when signing because the Python url parsing libraries follow
# RFC 1808 closely, which states that a netloc must be introduced by `//`.
# Otherwise the url is presumed to be relative, and thus the whole
# netloc would be treated as a path component. To work around this we
# introduce https here and remove it once we're done processing it.
scheme = 'https://'
endpoint_url = '%s%s:%s' % (scheme, DBHostname, Port)
prepare_request_dict(request_dict, endpoint_url)
presigned_url = await self._request_signer.generate_presigned_url(
operation_name='connect', request_dict=request_dict,
region_name=region, expires_in=900, signing_name='rds-db'
)
return presigned_url[len(scheme):]
def add_generate_presigned_url(class_attributes, **kwargs):
class_attributes['generate_presigned_url'] = generate_presigned_url
async def generate_presigned_url(self, ClientMethod, Params=None, ExpiresIn=3600,
HttpMethod=None):
"""Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to
``ClientMethod``.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid
for. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By
default, the http method is whatever is used in the method's model.
:returns: The presigned url
"""
client_method = ClientMethod
params = Params
if params is None:
params = {}
expires_in = ExpiresIn
http_method = HttpMethod
context = {
'is_presign_request': True,
'use_global_endpoint': _should_use_global_endpoint(self),
}
request_signer = self._request_signer
serializer = self._serializer
try:
operation_name = self._PY_TO_OP_NAME[client_method]
except KeyError:
raise UnknownClientMethodError(method_name=client_method)
operation_model = self.meta.service_model.operation_model(
operation_name)
params = await self._emit_api_params(params, operation_model, context)
# Create a request dict based on the params to serialize.
request_dict = serializer.serialize_to_request(
params, operation_model)
# Switch out the http method if user specified it.
if http_method is not None:
request_dict['method'] = http_method
# Prepare the request dict by including the client's endpoint url.
prepare_request_dict(
request_dict, endpoint_url=self.meta.endpoint_url, context=context)
# Generate the presigned url.
return await request_signer.generate_presigned_url(
request_dict=request_dict, expires_in=expires_in,
operation_name=operation_name)
class AioS3PostPresigner(S3PostPresigner):
async def generate_presigned_post(self, request_dict, fields=None,
conditions=None, expires_in=3600,
region_name=None):
if fields is None:
fields = {}
if conditions is None:
conditions = []
# Create the policy for the post.
policy = {}
# Create an expiration date for the policy
datetime_now = datetime.datetime.utcnow()
expire_date = datetime_now + datetime.timedelta(seconds=expires_in)
policy['expiration'] = expire_date.strftime(botocore.auth.ISO8601)
# Append all of the conditions that the user supplied.
policy['conditions'] = []
for condition in conditions:
policy['conditions'].append(condition)
# Store the policy and the fields in the request for signing
request = create_request_object(request_dict)
request.context['s3-presign-post-fields'] = fields
request.context['s3-presign-post-policy'] = policy
await self._request_signer.sign(
'PutObject', request, region_name, 'presign-post')
# Return the url and the fields for th form to post.
return {'url': request.url, 'fields': fields}
def add_generate_presigned_post(class_attributes, **kwargs):
class_attributes['generate_presigned_post'] = generate_presigned_post
async def generate_presigned_post(self, Bucket, Key, Fields=None, Conditions=None,
ExpiresIn=3600):
bucket = Bucket
key = Key
fields = Fields
conditions = Conditions
expires_in = ExpiresIn
if fields is None:
fields = {}
else:
fields = fields.copy()
if conditions is None:
conditions = []
post_presigner = AioS3PostPresigner(self._request_signer)
serializer = self._serializer
# We choose the CreateBucket operation model because its url gets
# serialized to what a presign post requires.
operation_model = self.meta.service_model.operation_model(
'CreateBucket')
# Create a request dict based on the params to serialize.
request_dict = serializer.serialize_to_request(
{'Bucket': bucket}, operation_model)
# Prepare the request dict by including the client's endpoint url.
prepare_request_dict(
request_dict, endpoint_url=self.meta.endpoint_url,
context={
'is_presign_request': True,
'use_global_endpoint': _should_use_global_endpoint(self),
},
)
# Append that the bucket name to the list of conditions.
conditions.append({'bucket': bucket})
# If the key ends with filename, the only constraint that can be
# imposed is if it starts with the specified prefix.
if key.endswith('${filename}'):
conditions.append(["starts-with", '$key', key[:-len('${filename}')]])
else:
conditions.append({'key': key})
# Add the key to the fields.
fields['key'] = key
return await post_presigner.generate_presigned_post(
request_dict=request_dict, fields=fields, conditions=conditions,
expires_in=expires_in)
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiobotocore/hooks.py | import asyncio
from botocore.hooks import HierarchicalEmitter, logger
class AioHierarchicalEmitter(HierarchicalEmitter):
async def _emit(self, event_name, kwargs, stop_on_response=False):
responses = []
# Invoke the event handlers from most specific
# to least specific, each time stripping off a dot.
handlers_to_call = self._lookup_cache.get(event_name)
if handlers_to_call is None:
handlers_to_call = self._handlers.prefix_search(event_name)
self._lookup_cache[event_name] = handlers_to_call
elif not handlers_to_call:
# Short circuit and return an empty response is we have
# no handlers to call. This is the common case where
# for the majority of signals, nothing is listening.
return []
kwargs['event_name'] = event_name
responses = []
for handler in handlers_to_call:
logger.debug('Event %s: calling handler %s', event_name, handler)
# Await the handler if its a coroutine.
if asyncio.iscoroutinefunction(handler):
response = await handler(**kwargs)
else:
response = handler(**kwargs)
responses.append((handler, response))
if stop_on_response and response is not None:
return responses
return responses
async def emit_until_response(self, event_name, **kwargs):
responses = await self._emit(event_name, kwargs, stop_on_response=True)
if responses:
return responses[-1]
else:
return None, None
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiobotocore/args.py | import copy
from botocore.args import ClientArgsCreator
import botocore.serialize
import botocore.parsers
from .config import AioConfig
from .endpoint import AioEndpointCreator
from .signers import AioRequestSigner
class AioClientArgsCreator(ClientArgsCreator):
# NOTE: we override this so we can pull out the custom AioConfig params and
# use an AioEndpointCreator
def get_client_args(self, service_model, region_name, is_secure,
endpoint_url, verify, credentials, scoped_config,
client_config, endpoint_bridge):
final_args = self.compute_client_args(
service_model, client_config, endpoint_bridge, region_name,
endpoint_url, is_secure, scoped_config)
# service_name = final_args['service_name']
parameter_validation = final_args['parameter_validation']
endpoint_config = final_args['endpoint_config']
protocol = final_args['protocol']
config_kwargs = final_args['config_kwargs']
s3_config = final_args['s3_config']
partition = endpoint_config['metadata'].get('partition', None)
socket_options = final_args['socket_options']
signing_region = endpoint_config['signing_region']
endpoint_region_name = endpoint_config['region_name']
event_emitter = copy.copy(self._event_emitter)
signer = AioRequestSigner(
service_model.service_id, signing_region,
endpoint_config['signing_name'],
endpoint_config['signature_version'],
credentials, event_emitter
)
config_kwargs['s3'] = s3_config
# aiobotocore addition
if isinstance(client_config, AioConfig):
connector_args = client_config.connector_args
else:
connector_args = None
new_config = AioConfig(connector_args, **config_kwargs)
endpoint_creator = AioEndpointCreator(event_emitter)
endpoint = endpoint_creator.create_endpoint(
service_model, region_name=endpoint_region_name,
endpoint_url=endpoint_config['endpoint_url'], verify=verify,
response_parser_factory=self._response_parser_factory,
max_pool_connections=new_config.max_pool_connections,
proxies=new_config.proxies,
timeout=(new_config.connect_timeout, new_config.read_timeout),
socket_options=socket_options,
client_cert=new_config.client_cert,
connector_args=new_config.connector_args)
serializer = botocore.serialize.create_serializer(
protocol, parameter_validation)
response_parser = botocore.parsers.create_parser(protocol)
return {
'serializer': serializer,
'endpoint': endpoint,
'response_parser': response_parser,
'event_emitter': event_emitter,
'request_signer': signer,
'service_model': service_model,
'loader': self._loader,
'client_config': new_config,
'partition': partition,
'exceptions_factory': self._exceptions_factory
}
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiobotocore/__init__.py | from .session import get_session, AioSession
__all__ = ['get_session', 'AioSession']
__version__ = '1.2.0'
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiobotocore/waiter.py | import asyncio
# WaiterModel is required for client.py import
from botocore.exceptions import ClientError
from botocore.waiter import WaiterModel # noqa: F401, lgtm[py/unused-import]
from botocore.waiter import Waiter, xform_name, logger, WaiterError, \
NormalizedOperationMethod as _NormalizedOperationMethod
from botocore.docs.docstring import WaiterDocstring
from botocore.utils import get_service_module_name
class NormalizedOperationMethod(_NormalizedOperationMethod):
async def __call__(self, **kwargs):
try:
return await self._client_method(**kwargs)
except ClientError as e:
return e.response
class AIOWaiter(Waiter):
async def wait(self, **kwargs):
acceptors = list(self.config.acceptors)
current_state = 'waiting'
# pop the invocation specific config
config = kwargs.pop('WaiterConfig', {})
sleep_amount = config.get('Delay', self.config.delay)
max_attempts = config.get('MaxAttempts', self.config.max_attempts)
last_matched_acceptor = None
num_attempts = 0
while True:
response = await self._operation_method(**kwargs)
num_attempts += 1
for acceptor in acceptors:
if acceptor.matcher_func(response):
last_matched_acceptor = acceptor
current_state = acceptor.state
break
else:
# If none of the acceptors matched, we should
# transition to the failure state if an error
# response was received.
if 'Error' in response:
# Transition to a failure state, which we
# can just handle here by raising an exception.
raise WaiterError(
name=self.name,
reason='An error occurred (%s): %s' % (
response['Error'].get('Code', 'Unknown'),
response['Error'].get('Message', 'Unknown'),
),
last_response=response,
)
if current_state == 'success':
logger.debug("Waiting complete, waiter matched the "
"success state.")
return
if current_state == 'failure':
reason = 'Waiter encountered a terminal failure state: %s' % (
acceptor.explanation
)
raise WaiterError(
name=self.name,
reason=reason,
last_response=response,
)
if num_attempts >= max_attempts:
if last_matched_acceptor is None:
reason = 'Max attempts exceeded'
else:
reason = 'Max attempts exceeded. Previously accepted state: %s' % (
acceptor.explanation
)
raise WaiterError(
name=self.name,
reason=reason,
last_response=response,
)
await asyncio.sleep(sleep_amount)
def create_waiter_with_client(waiter_name, waiter_model, client):
"""
:type waiter_name: str
:param waiter_name: The name of the waiter. The name should match
the name (including the casing) of the key name in the waiter
model file (typically this is CamelCasing).
:type waiter_model: botocore.waiter.WaiterModel
:param waiter_model: The model for the waiter configuration.
:type client: botocore.client.BaseClient
:param client: The botocore client associated with the service.
:rtype: botocore.waiter.Waiter
:return: The waiter object.
"""
single_waiter_config = waiter_model.get_waiter(waiter_name)
operation_name = xform_name(single_waiter_config.operation)
operation_method = NormalizedOperationMethod(
getattr(client, operation_name))
# Create a new wait method that will serve as a proxy to the underlying
# Waiter.wait method. This is needed to attach a docstring to the
# method.
async def wait(self, **kwargs):
await AIOWaiter.wait(self, **kwargs)
wait.__doc__ = WaiterDocstring(
waiter_name=waiter_name,
event_emitter=client.meta.events,
service_model=client.meta.service_model,
service_waiter_model=waiter_model,
include_signature=False
)
# Rename the waiter class based on the type of waiter.
waiter_class_name = str('%s.AIOWaiter.%s' % (
get_service_module_name(client.meta.service_model),
waiter_name))
# Create the new waiter class
documented_waiter_cls = type(
waiter_class_name, (AIOWaiter,), {'wait': wait})
# Return an instance of the new waiter class.
return documented_waiter_cls(
waiter_name, single_waiter_config, operation_method
)
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiobotocore/endpoint.py | import aiohttp
import asyncio
import io
import ssl
import aiohttp.http_exceptions
from aiohttp.client import URL
from botocore.endpoint import EndpointCreator, Endpoint, DEFAULT_TIMEOUT, \
MAX_POOL_CONNECTIONS, logger, history_recorder, create_request_object
from botocore.exceptions import ConnectionClosedError
from botocore.hooks import first_non_none_response
from botocore.utils import is_valid_endpoint_url
from multidict import MultiDict
from urllib.parse import urlparse
from urllib3.response import HTTPHeaderDict
from aiobotocore.response import StreamingBody
from aiobotocore._endpoint_helpers import _text, _IOBaseWrapper, \
ClientResponseProxy
async def convert_to_response_dict(http_response, operation_model):
"""Convert an HTTP response object to a request dict.
This converts the requests library's HTTP response object to
a dictionary.
:type http_response: botocore.vendored.requests.model.Response
:param http_response: The HTTP response from an AWS service request.
:rtype: dict
:return: A response dictionary which will contain the following keys:
* headers (dict)
* status_code (int)
* body (string or file-like object)
"""
response_dict = {
# botocore converts keys to str, so make sure that they are in
# the expected case. See detailed discussion here:
# https://github.com/aio-libs/aiobotocore/pull/116
# aiohttp's CIMultiDict camel cases the headers :(
'headers': HTTPHeaderDict(
{k.decode('utf-8').lower(): v.decode('utf-8')
for k, v in http_response.raw_headers}),
'status_code': http_response.status_code,
'context': {
'operation_name': operation_model.name,
}
}
if response_dict['status_code'] >= 300:
response_dict['body'] = await http_response.read()
elif operation_model.has_event_stream_output:
response_dict['body'] = http_response.raw
elif operation_model.has_streaming_output:
length = response_dict['headers'].get('content-length')
response_dict['body'] = StreamingBody(http_response.raw, length)
else:
response_dict['body'] = await http_response.read()
return response_dict
class AioEndpoint(Endpoint):
def __init__(self, *args, proxies=None, **kwargs):
super().__init__(*args, **kwargs)
self.proxies = proxies or {}
async def create_request(self, params, operation_model=None):
request = create_request_object(params)
if operation_model:
request.stream_output = any([
operation_model.has_streaming_output,
operation_model.has_event_stream_output
])
service_id = operation_model.service_model.service_id.hyphenize()
event_name = 'request-created.{service_id}.{op_name}'.format(
service_id=service_id,
op_name=operation_model.name)
await self._event_emitter.emit(event_name, request=request,
operation_name=operation_model.name)
prepared_request = self.prepare_request(request)
return prepared_request
async def _send_request(self, request_dict, operation_model):
attempts = 1
request = await self.create_request(request_dict, operation_model)
context = request_dict['context']
success_response, exception = await self._get_response(
request, operation_model, context)
while await self._needs_retry(attempts, operation_model,
request_dict, success_response,
exception):
attempts += 1
# If there is a stream associated with the request, we need
# to reset it before attempting to send the request again.
# This will ensure that we resend the entire contents of the
# body.
request.reset_stream()
# Create a new request when retried (including a new signature).
request = await self.create_request(
request_dict, operation_model)
success_response, exception = await self._get_response(
request, operation_model, context)
if success_response is not None and \
'ResponseMetadata' in success_response[1]:
# We want to share num retries, not num attempts.
total_retries = attempts - 1
success_response[1]['ResponseMetadata']['RetryAttempts'] = \
total_retries
if exception is not None:
raise exception
else:
return success_response
async def _get_response(self, request, operation_model, context):
# This will return a tuple of (success_response, exception)
# and success_response is itself a tuple of
# (http_response, parsed_dict).
# If an exception occurs then the success_response is None.
# If no exception occurs then exception is None.
success_response, exception = await self._do_get_response(
request, operation_model)
kwargs_to_emit = {
'response_dict': None,
'parsed_response': None,
'context': context,
'exception': exception,
}
if success_response is not None:
http_response, parsed_response = success_response
kwargs_to_emit['parsed_response'] = parsed_response
kwargs_to_emit['response_dict'] = await convert_to_response_dict(
http_response, operation_model)
service_id = operation_model.service_model.service_id.hyphenize()
await self._event_emitter.emit(
'response-received.%s.%s' % (
service_id, operation_model.name), **kwargs_to_emit)
return success_response, exception
async def _do_get_response(self, request, operation_model):
try:
logger.debug("Sending http request: %s", request)
history_recorder.record('HTTP_REQUEST', {
'method': request.method,
'headers': request.headers,
'streaming': operation_model.has_streaming_input,
'url': request.url,
'body': request.body
})
service_id = operation_model.service_model.service_id.hyphenize()
event_name = 'before-send.%s.%s' % (
service_id, operation_model.name)
responses = await self._event_emitter.emit(event_name,
request=request)
http_response = first_non_none_response(responses)
if http_response is None:
http_response = await self._send(request)
except aiohttp.ClientConnectionError as e:
e.request = request # botocore expects the request property
return None, e
except aiohttp.http_exceptions.BadStatusLine:
better_exception = ConnectionClosedError(
endpoint_url=request.url, request=request)
return None, better_exception
except Exception as e:
logger.debug("Exception received when sending HTTP request.",
exc_info=True)
return None, e
# This returns the http_response and the parsed_data.
response_dict = await convert_to_response_dict(http_response,
operation_model)
http_response_record_dict = response_dict.copy()
http_response_record_dict['streaming'] = \
operation_model.has_streaming_output
history_recorder.record('HTTP_RESPONSE', http_response_record_dict)
protocol = operation_model.metadata['protocol']
parser = self._response_parser_factory.create_parser(protocol)
parsed_response = parser.parse(
response_dict, operation_model.output_shape)
if http_response.status_code >= 300:
self._add_modeled_error_fields(
response_dict, parsed_response,
operation_model, parser,
)
history_recorder.record('PARSED_RESPONSE', parsed_response)
return (http_response, parsed_response), None
# NOTE: The only line changed here changing time.sleep to asyncio.sleep
async def _needs_retry(self, attempts, operation_model, request_dict,
response=None, caught_exception=None):
service_id = operation_model.service_model.service_id.hyphenize()
event_name = 'needs-retry.%s.%s' % (
service_id,
operation_model.name)
responses = await self._event_emitter.emit(
event_name, response=response, endpoint=self,
operation=operation_model, attempts=attempts,
caught_exception=caught_exception, request_dict=request_dict)
handler_response = first_non_none_response(responses)
if handler_response is None:
return False
else:
# Request needs to be retried, and we need to sleep
# for the specified number of times.
logger.debug("Response received to retry, sleeping for "
"%s seconds", handler_response)
await asyncio.sleep(handler_response)
return True
async def _send(self, request):
# Note: When using aiobotocore with dynamodb, requests fail on crc32
# checksum computation as soon as the response data reaches ~5KB.
# When AWS response is gzip compressed:
# 1. aiohttp is automatically decompressing the data
# (http://aiohttp.readthedocs.io/en/stable/client.html#binary-response-content)
# 2. botocore computes crc32 on the uncompressed data bytes and fails
# cause crc32 has been computed on the compressed data
# The following line forces aws not to use gzip compression,
# if there is a way to configure aiohttp not to perform decompression,
# we can remove the following line and take advantage of
# aws gzip compression.
# https://github.com/boto/botocore/issues/1255
url = request.url
headers = request.headers
data = request.body
headers['Accept-Encoding'] = 'identity'
headers_ = MultiDict(
(z[0], _text(z[1], encoding='utf-8')) for z in headers.items())
# botocore does this during the request so we do this here as well
# TODO: this should be part of the ClientSession, perhaps make wrapper
proxy = self.proxies.get(urlparse(url.lower()).scheme)
if isinstance(data, io.IOBase):
data = _IOBaseWrapper(data)
url = URL(url, encoded=True)
resp = await self.http_session.request(
request.method, url=url, headers=headers_, data=data, proxy=proxy)
# If we're not streaming, read the content so we can retry any timeout
# errors, see:
# https://github.com/boto/botocore/blob/develop/botocore/vendored/requests/sessions.py#L604
if not request.stream_output:
await resp.read()
return resp
class AioEndpointCreator(EndpointCreator):
# TODO: handle socket_options
def create_endpoint(self, service_model, region_name, endpoint_url,
verify=None, response_parser_factory=None,
timeout=DEFAULT_TIMEOUT,
max_pool_connections=MAX_POOL_CONNECTIONS,
http_session_cls=aiohttp.ClientSession,
proxies=None,
socket_options=None,
client_cert=None,
connector_args=None):
if not is_valid_endpoint_url(endpoint_url):
raise ValueError("Invalid endpoint: %s" % endpoint_url)
if proxies is None:
proxies = self._get_proxies(endpoint_url)
endpoint_prefix = service_model.endpoint_prefix
logger.debug('Setting %s timeout as %s', endpoint_prefix, timeout)
if isinstance(timeout, (list, tuple)):
conn_timeout, read_timeout = timeout
else:
conn_timeout = read_timeout = timeout
if connector_args is None:
# AWS has a 20 second idle timeout:
# https://forums.aws.amazon.com/message.jspa?messageID=215367
# aiohttp default timeout is 30s so set something reasonable here
connector_args = dict(keepalive_timeout=12)
timeout = aiohttp.ClientTimeout(
sock_connect=conn_timeout,
sock_read=read_timeout
)
ssl_context = None
if client_cert:
if isinstance(client_cert, str):
key_file = None
cert_file = client_cert
elif isinstance(client_cert, tuple):
cert_file, key_file = client_cert
else:
assert False
ssl_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ssl_context.load_cert_chain(cert_file, key_file)
connector = aiohttp.TCPConnector(
limit=max_pool_connections,
verify_ssl=self._get_verify_value(verify),
ssl_context=ssl_context,
**connector_args)
aio_session = http_session_cls(
connector=connector,
timeout=timeout,
skip_auto_headers={'CONTENT-TYPE'},
response_class=ClientResponseProxy,
auto_decompress=False)
return AioEndpoint(
endpoint_url,
endpoint_prefix=endpoint_prefix,
event_emitter=self._event_emitter,
response_parser_factory=response_parser_factory,
http_session=aio_session,
proxies=proxies)
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiobotocore/utils.py | import asyncio
import logging
import json
import aiohttp
import aiohttp.client_exceptions
from botocore.utils import ContainerMetadataFetcher, InstanceMetadataFetcher, \
IMDSFetcher, get_environ_proxies, BadIMDSRequestError, S3RegionRedirector, \
ClientError
from botocore.exceptions import (
InvalidIMDSEndpointError, MetadataRetrievalError,
)
import botocore.awsrequest
logger = logging.getLogger(__name__)
RETRYABLE_HTTP_ERRORS = (aiohttp.client_exceptions.ClientError, asyncio.TimeoutError)
class AioIMDSFetcher(IMDSFetcher):
class Response(object):
def __init__(self, status_code, text, url):
self.status_code = status_code
self.url = url
self.text = text
self.content = text
def __init__(self, *args, session=None, **kwargs):
super(AioIMDSFetcher, self).__init__(*args, **kwargs)
self._trust_env = bool(get_environ_proxies(self._base_url))
self._session = session or aiohttp.ClientSession
async def _fetch_metadata_token(self):
self._assert_enabled()
url = self._base_url + self._TOKEN_PATH
headers = {
'x-aws-ec2-metadata-token-ttl-seconds': self._TOKEN_TTL,
}
self._add_user_agent(headers)
request = botocore.awsrequest.AWSRequest(
method='PUT', url=url, headers=headers)
timeout = aiohttp.ClientTimeout(total=self._timeout)
async with self._session(timeout=timeout,
trust_env=self._trust_env) as session:
for i in range(self._num_attempts):
try:
async with session.put(url, headers=headers) as resp:
text = await resp.text()
if resp.status == 200:
return text
elif resp.status in (404, 403, 405):
return None
elif resp.status in (400,):
raise BadIMDSRequestError(request)
except asyncio.TimeoutError:
return None
except RETRYABLE_HTTP_ERRORS as e:
logger.debug(
"Caught retryable HTTP exception while making metadata "
"service request to %s: %s", url, e, exc_info=True)
except aiohttp.client_exceptions.ClientConnectorError as e:
if getattr(e, 'errno', None) == 8 or \
str(getattr(e, 'os_error', None)) == \
'Domain name not found': # threaded vs async resolver
raise InvalidIMDSEndpointError(endpoint=url, error=e)
else:
raise
return None
async def _get_request(self, url_path, retry_func, token=None):
self._assert_enabled()
if retry_func is None:
retry_func = self._default_retry
url = self._base_url + url_path
headers = {}
if token is not None:
headers['x-aws-ec2-metadata-token'] = token
self._add_user_agent(headers)
timeout = aiohttp.ClientTimeout(total=self._timeout)
async with self._session(timeout=timeout,
trust_env=self._trust_env) as session:
for i in range(self._num_attempts):
try:
async with session.get(url, headers=headers) as resp:
text = await resp.text()
response = self.Response(resp.status, text, resp.url)
if not retry_func(response):
return response
except RETRYABLE_HTTP_ERRORS as e:
logger.debug(
"Caught retryable HTTP exception while making metadata "
"service request to %s: %s", url, e, exc_info=True)
raise self._RETRIES_EXCEEDED_ERROR_CLS()
class AioInstanceMetadataFetcher(AioIMDSFetcher, InstanceMetadataFetcher):
async def retrieve_iam_role_credentials(self):
try:
token = await self._fetch_metadata_token()
role_name = await self._get_iam_role(token)
credentials = await self._get_credentials(role_name, token)
if self._contains_all_credential_fields(credentials):
return {
'role_name': role_name,
'access_key': credentials['AccessKeyId'],
'secret_key': credentials['SecretAccessKey'],
'token': credentials['Token'],
'expiry_time': credentials['Expiration'],
}
else:
if 'Code' in credentials and 'Message' in credentials:
logger.debug('Error response received when retrieving'
'credentials: %s.', credentials)
return {}
except self._RETRIES_EXCEEDED_ERROR_CLS:
logger.debug("Max number of attempts exceeded (%s) when "
"attempting to retrieve data from metadata service.",
self._num_attempts)
except BadIMDSRequestError as e:
logger.debug("Bad IMDS request: %s", e.request)
return {}
async def _get_iam_role(self, token=None):
r = await self._get_request(
url_path=self._URL_PATH,
retry_func=self._needs_retry_for_role_name,
token=token
)
return r.text
async def _get_credentials(self, role_name, token=None):
r = await self._get_request(
url_path=self._URL_PATH + role_name,
retry_func=self._needs_retry_for_credentials,
token=token
)
return json.loads(r.text)
class AioS3RegionRedirector(S3RegionRedirector):
async def redirect_from_error(self, request_dict, response, operation, **kwargs):
if response is None:
# This could be none if there was a ConnectionError or other
# transport error.
return
if self._is_s3_accesspoint(request_dict.get('context', {})):
logger.debug(
'S3 request was previously to an accesspoint, not redirecting.'
)
return
if request_dict.get('context', {}).get('s3_redirected'):
logger.debug(
'S3 request was previously redirected, not redirecting.')
return
error = response[1].get('Error', {})
error_code = error.get('Code')
response_metadata = response[1].get('ResponseMetadata', {})
# We have to account for 400 responses because
# if we sign a Head* request with the wrong region,
# we'll get a 400 Bad Request but we won't get a
# body saying it's an "AuthorizationHeaderMalformed".
is_special_head_object = (
error_code in ['301', '400'] and
operation.name == 'HeadObject'
)
is_special_head_bucket = (
error_code in ['301', '400'] and
operation.name == 'HeadBucket' and
'x-amz-bucket-region' in response_metadata.get('HTTPHeaders', {})
)
is_wrong_signing_region = (
error_code == 'AuthorizationHeaderMalformed' and
'Region' in error
)
is_redirect_status = response[0] is not None and \
response[0].status_code in [301, 302, 307]
is_permanent_redirect = error_code == 'PermanentRedirect'
if not any([is_special_head_object, is_wrong_signing_region,
is_permanent_redirect, is_special_head_bucket,
is_redirect_status]):
return
bucket = request_dict['context']['signing']['bucket']
client_region = request_dict['context'].get('client_region')
new_region = await self.get_bucket_region(bucket, response)
if new_region is None:
logger.debug(
"S3 client configured for region %s but the bucket %s is not "
"in that region and the proper region could not be "
"automatically determined." % (client_region, bucket))
return
logger.debug(
"S3 client configured for region %s but the bucket %s is in region"
" %s; Please configure the proper region to avoid multiple "
"unnecessary redirects and signing attempts." % (
client_region, bucket, new_region))
endpoint = self._endpoint_resolver.resolve('s3', new_region)
endpoint = endpoint['endpoint_url']
signing_context = {
'region': new_region,
'bucket': bucket,
'endpoint': endpoint
}
request_dict['context']['signing'] = signing_context
self._cache[bucket] = signing_context
self.set_request_url(request_dict, request_dict['context'])
request_dict['context']['s3_redirected'] = True
# Return 0 so it doesn't wait to retry
return 0
async def get_bucket_region(self, bucket, response):
# First try to source the region from the headers.
service_response = response[1]
response_headers = service_response['ResponseMetadata']['HTTPHeaders']
if 'x-amz-bucket-region' in response_headers:
return response_headers['x-amz-bucket-region']
# Next, check the error body
region = service_response.get('Error', {}).get('Region', None)
if region is not None:
return region
# Finally, HEAD the bucket. No other choice sadly.
try:
response = await self._client.head_bucket(Bucket=bucket)
headers = response['ResponseMetadata']['HTTPHeaders']
except ClientError as e:
headers = e.response['ResponseMetadata']['HTTPHeaders']
region = headers.get('x-amz-bucket-region', None)
return region
class AioContainerMetadataFetcher(ContainerMetadataFetcher):
def __init__(self, session=None, sleep=asyncio.sleep):
if session is None:
session = aiohttp.ClientSession
super(AioContainerMetadataFetcher, self).__init__(session, sleep)
async def retrieve_full_uri(self, full_url, headers=None):
self._validate_allowed_url(full_url)
return await self._retrieve_credentials(full_url, headers)
async def retrieve_uri(self, relative_uri):
"""Retrieve JSON metadata from ECS metadata.
:type relative_uri: str
:param relative_uri: A relative URI, e.g "/foo/bar?id=123"
:return: The parsed JSON response.
"""
full_url = self.full_url(relative_uri)
return await self._retrieve_credentials(full_url)
async def _retrieve_credentials(self, full_url, extra_headers=None):
headers = {'Accept': 'application/json'}
if extra_headers is not None:
headers.update(extra_headers)
attempts = 0
while True:
try:
return await self._get_response(
full_url, headers, self.TIMEOUT_SECONDS)
except MetadataRetrievalError as e:
logger.debug("Received error when attempting to retrieve "
"container metadata: %s", e, exc_info=True)
await self._sleep(self.SLEEP_TIME)
attempts += 1
if attempts >= self.RETRY_ATTEMPTS:
raise
async def _get_response(self, full_url, headers, timeout):
try:
timeout = aiohttp.ClientTimeout(total=self.TIMEOUT_SECONDS)
async with self._session(timeout=timeout) as session:
async with session.get(full_url, headers=headers) as resp:
if resp.status != 200:
text = await resp.text()
raise MetadataRetrievalError(
error_msg=(
"Received non 200 response (%d) "
"from ECS metadata: %s"
) % (resp.status, text))
try:
return await resp.json()
except ValueError:
text = await resp.text()
error_msg = (
"Unable to parse JSON returned from ECS metadata services"
)
logger.debug('%s:%s', error_msg, text)
raise MetadataRetrievalError(error_msg=error_msg)
except RETRYABLE_HTTP_ERRORS as e:
error_msg = ("Received error when attempting to retrieve "
"ECS metadata: %s" % e)
raise MetadataRetrievalError(error_msg=error_msg)
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiobotocore/credentials.py | import asyncio
import datetime
import logging
import subprocess
import json
from copy import deepcopy
from typing import Optional
from hashlib import sha1
from dateutil.tz import tzutc
from botocore import UNSIGNED
from botocore.config import Config
import botocore.compat
from botocore.credentials import EnvProvider, Credentials, RefreshableCredentials, \
ReadOnlyCredentials, ContainerProvider, ContainerMetadataFetcher, \
_parse_if_needed, InstanceMetadataProvider, _get_client_creator, \
ProfileProviderBuilder, ConfigProvider, SharedCredentialProvider, \
ProcessProvider, AssumeRoleWithWebIdentityProvider, _local_now, \
CachedCredentialFetcher, _serialize_if_needed, BaseAssumeRoleCredentialFetcher, \
AssumeRoleProvider, AssumeRoleCredentialFetcher, CredentialResolver, \
CanonicalNameCredentialSourcer, BotoProvider, OriginalEC2Provider, \
SSOProvider
from botocore.exceptions import UnauthorizedSSOTokenError
from botocore.exceptions import MetadataRetrievalError, CredentialRetrievalError, \
InvalidConfigError, PartialCredentialsError, RefreshWithMFAUnsupportedError, \
UnknownCredentialError
from botocore.compat import compat_shell_split
from botocore.utils import SSOTokenLoader
from aiobotocore.utils import AioContainerMetadataFetcher, AioInstanceMetadataFetcher
from aiobotocore.config import AioConfig
logger = logging.getLogger(__name__)
def create_credential_resolver(session, cache=None, region_name=None):
"""Create a default credential resolver.
This creates a pre-configured credential resolver
that includes the default lookup chain for
credentials.
"""
profile_name = session.get_config_variable('profile') or 'default'
metadata_timeout = session.get_config_variable('metadata_service_timeout')
num_attempts = session.get_config_variable('metadata_service_num_attempts')
disable_env_vars = session.instance_variables().get('profile') is not None
imds_config = {
'ec2_metadata_service_endpoint': session.get_config_variable(
'ec2_metadata_service_endpoint'),
'imds_use_ipv6': session.get_config_variable('imds_use_ipv6')
}
if cache is None:
cache = {}
env_provider = AioEnvProvider()
container_provider = AioContainerProvider()
instance_metadata_provider = AioInstanceMetadataProvider(
iam_role_fetcher=AioInstanceMetadataFetcher(
timeout=metadata_timeout,
num_attempts=num_attempts,
user_agent=session.user_agent(),
config=imds_config)
)
profile_provider_builder = AioProfileProviderBuilder(
session, cache=cache, region_name=region_name)
assume_role_provider = AioAssumeRoleProvider(
load_config=lambda: session.full_config,
client_creator=_get_client_creator(session, region_name),
cache=cache,
profile_name=profile_name,
credential_sourcer=AioCanonicalNameCredentialSourcer([
env_provider, container_provider, instance_metadata_provider
]),
profile_provider_builder=profile_provider_builder,
)
pre_profile = [
env_provider,
assume_role_provider,
]
profile_providers = profile_provider_builder.providers(
profile_name=profile_name,
disable_env_vars=disable_env_vars,
)
post_profile = [
AioOriginalEC2Provider(),
AioBotoProvider(),
container_provider,
instance_metadata_provider,
]
providers = pre_profile + profile_providers + post_profile
if disable_env_vars:
# An explicitly provided profile will negate an EnvProvider.
# We will defer to providers that understand the "profile"
# concept to retrieve credentials.
# The one edge case if is all three values are provided via
# env vars:
# export AWS_ACCESS_KEY_ID=foo
# export AWS_SECRET_ACCESS_KEY=bar
# export AWS_PROFILE=baz
# Then, just like our client() calls, the explicit credentials
# will take precedence.
#
# This precedence is enforced by leaving the EnvProvider in the chain.
# This means that the only way a "profile" would win is if the
# EnvProvider does not return credentials, which is what we want
# in this scenario.
providers.remove(env_provider)
logger.debug('Skipping environment variable credential check'
' because profile name was explicitly set.')
resolver = AioCredentialResolver(providers=providers)
return resolver
class AioProfileProviderBuilder(ProfileProviderBuilder):
def _create_process_provider(self, profile_name):
return AioProcessProvider(
profile_name=profile_name,
load_config=lambda: self._session.full_config,
)
def _create_shared_credential_provider(self, profile_name):
credential_file = self._session.get_config_variable('credentials_file')
return AioSharedCredentialProvider(
profile_name=profile_name,
creds_filename=credential_file,
)
def _create_config_provider(self, profile_name):
config_file = self._session.get_config_variable('config_file')
return AioConfigProvider(
profile_name=profile_name,
config_filename=config_file,
)
def _create_web_identity_provider(self, profile_name, disable_env_vars):
return AioAssumeRoleWithWebIdentityProvider(
load_config=lambda: self._session.full_config,
client_creator=_get_client_creator(
self._session, self._region_name),
cache=self._cache,
profile_name=profile_name,
disable_env_vars=disable_env_vars,
)
def _create_sso_provider(self, profile_name):
return AioSSOProvider(
load_config=lambda: self._session.full_config,
client_creator=self._session.create_client,
profile_name=profile_name,
cache=self._cache,
token_cache=self._sso_token_cache,
)
async def get_credentials(session):
resolver = create_credential_resolver(session)
return await resolver.load_credentials()
def create_assume_role_refresher(client, params):
async def refresh():
async with client as sts:
response = await sts.assume_role(**params)
credentials = response['Credentials']
# We need to normalize the credential names to
# the values expected by the refresh creds.
return {
'access_key': credentials['AccessKeyId'],
'secret_key': credentials['SecretAccessKey'],
'token': credentials['SessionToken'],
'expiry_time': _serialize_if_needed(credentials['Expiration']),
}
return refresh
def create_aio_mfa_serial_refresher(actual_refresh):
class _Refresher(object):
def __init__(self, refresh):
self._refresh = refresh
self._has_been_called = False
async def call(self):
if self._has_been_called:
# We can explore an option in the future to support
# reprompting for MFA, but for now we just error out
# when the temp creds expire.
raise RefreshWithMFAUnsupportedError()
self._has_been_called = True
return await self._refresh()
return _Refresher(actual_refresh).call
class AioCredentials(Credentials):
async def get_frozen_credentials(self):
return ReadOnlyCredentials(self.access_key,
self.secret_key,
self.token)
@classmethod
def from_credentials(cls, obj: Optional[Credentials]):
if obj is None:
return None
return cls(
obj.access_key, obj.secret_key,
obj.token, obj.method)
class AioRefreshableCredentials(RefreshableCredentials):
def __init__(self, *args, **kwargs):
super(AioRefreshableCredentials, self).__init__(*args, **kwargs)
self._refresh_lock = asyncio.Lock()
@classmethod
def from_refreshable_credentials(cls, obj: Optional[RefreshableCredentials]):
if obj is None:
return None
return cls( # Using interval values here to skip property calling .refresh()
obj._access_key, obj._secret_key,
obj._token, obj._expiry_time,
obj._refresh_using, obj.method,
obj._time_fetcher
)
# Redeclaring the properties so it doesnt call refresh
# Have to redeclare setter as we're overriding the getter
@property
def access_key(self):
# TODO: this needs to be resolved
raise NotImplementedError("missing call to self._refresh. "
"Use get_frozen_credentials instead")
return self._access_key
@access_key.setter
def access_key(self, value):
self._access_key = value
@property
def secret_key(self):
# TODO: this needs to be resolved
raise NotImplementedError("missing call to self._refresh. "
"Use get_frozen_credentials instead")
return self._secret_key
@secret_key.setter
def secret_key(self, value):
self._secret_key = value
@property
def token(self):
# TODO: this needs to be resolved
raise NotImplementedError("missing call to self._refresh. "
"Use get_frozen_credentials instead")
return self._token
@token.setter
def token(self, value):
self._token = value
async def _refresh(self):
if not self.refresh_needed(self._advisory_refresh_timeout):
return
# By this point we need a refresh but its not critical
if not self._refresh_lock.locked():
async with self._refresh_lock:
if not self.refresh_needed(self._advisory_refresh_timeout):
return
is_mandatory_refresh = self.refresh_needed(
self._mandatory_refresh_timeout)
await self._protected_refresh(is_mandatory=is_mandatory_refresh)
return
elif self.refresh_needed(self._mandatory_refresh_timeout):
# If we're here, we absolutely need a refresh and the
# lock is held so wait for it
async with self._refresh_lock:
# Might have refreshed by now
if not self.refresh_needed(self._mandatory_refresh_timeout):
return
await self._protected_refresh(is_mandatory=True)
async def _protected_refresh(self, is_mandatory):
try:
metadata = await self._refresh_using()
except Exception:
period_name = 'mandatory' if is_mandatory else 'advisory'
logger.warning("Refreshing temporary credentials failed "
"during %s refresh period.",
period_name, exc_info=True)
if is_mandatory:
# If this is a mandatory refresh, then
# all errors that occur when we attempt to refresh
# credentials are propagated back to the user.
raise
# Otherwise we'll just return.
# The end result will be that we'll use the current
# set of temporary credentials we have.
return
self._set_from_data(metadata)
self._frozen_credentials = ReadOnlyCredentials(
self._access_key, self._secret_key, self._token)
if self._is_expired():
msg = ("Credentials were refreshed, but the "
"refreshed credentials are still expired.")
logger.warning(msg)
raise RuntimeError(msg)
async def get_frozen_credentials(self):
await self._refresh()
return self._frozen_credentials
class AioDeferredRefreshableCredentials(AioRefreshableCredentials):
def __init__(self, refresh_using, method, time_fetcher=_local_now):
self._refresh_using = refresh_using
self._access_key = None
self._secret_key = None
self._token = None
self._expiry_time = None
self._time_fetcher = time_fetcher
self._refresh_lock = asyncio.Lock()
self.method = method
self._frozen_credentials = None
def refresh_needed(self, refresh_in=None):
if self._frozen_credentials is None:
return True
return super(AioDeferredRefreshableCredentials, self).refresh_needed(
refresh_in
)
class AioCachedCredentialFetcher(CachedCredentialFetcher):
async def _get_credentials(self):
raise NotImplementedError('_get_credentials()')
async def fetch_credentials(self):
return await self._get_cached_credentials()
async def _get_cached_credentials(self):
"""Get up-to-date credentials.
This will check the cache for up-to-date credentials, calling assume
role if none are available.
"""
response = self._load_from_cache()
if response is None:
response = await self._get_credentials()
self._write_to_cache(response)
else:
logger.debug("Credentials for role retrieved from cache.")
creds = response['Credentials']
expiration = _serialize_if_needed(creds['Expiration'], iso=True)
return {
'access_key': creds['AccessKeyId'],
'secret_key': creds['SecretAccessKey'],
'token': creds['SessionToken'],
'expiry_time': expiration,
}
class AioBaseAssumeRoleCredentialFetcher(BaseAssumeRoleCredentialFetcher,
AioCachedCredentialFetcher):
pass
class AioAssumeRoleCredentialFetcher(AssumeRoleCredentialFetcher,
AioBaseAssumeRoleCredentialFetcher):
async def _get_credentials(self):
"""Get credentials by calling assume role."""
kwargs = self._assume_role_kwargs()
client = await self._create_client()
async with client as sts:
return await sts.assume_role(**kwargs)
async def _create_client(self):
"""Create an STS client using the source credentials."""
frozen_credentials = await self._source_credentials.get_frozen_credentials()
return self._client_creator(
'sts',
aws_access_key_id=frozen_credentials.access_key,
aws_secret_access_key=frozen_credentials.secret_key,
aws_session_token=frozen_credentials.token,
)
class AioAssumeRoleWithWebIdentityCredentialFetcher(
AioBaseAssumeRoleCredentialFetcher
):
def __init__(self, client_creator, web_identity_token_loader, role_arn,
extra_args=None, cache=None, expiry_window_seconds=None):
self._web_identity_token_loader = web_identity_token_loader
super(AioAssumeRoleWithWebIdentityCredentialFetcher, self).__init__(
client_creator, role_arn, extra_args=extra_args,
cache=cache, expiry_window_seconds=expiry_window_seconds
)
async def _get_credentials(self):
"""Get credentials by calling assume role."""
kwargs = self._assume_role_kwargs()
# Assume role with web identity does not require credentials other than
# the token, explicitly configure the client to not sign requests.
config = AioConfig(signature_version=UNSIGNED)
async with self._client_creator('sts', config=config) as client:
return await client.assume_role_with_web_identity(**kwargs)
def _assume_role_kwargs(self):
"""Get the arguments for assume role based on current configuration."""
assume_role_kwargs = deepcopy(self._assume_kwargs)
identity_token = self._web_identity_token_loader()
assume_role_kwargs['WebIdentityToken'] = identity_token
return assume_role_kwargs
class AioProcessProvider(ProcessProvider):
def __init__(self, *args, popen=asyncio.create_subprocess_exec, **kwargs):
super(AioProcessProvider, self).__init__(*args, **kwargs, popen=popen)
async def load(self):
credential_process = self._credential_process
if credential_process is None:
return
creds_dict = await self._retrieve_credentials_using(credential_process)
if creds_dict.get('expiry_time') is not None:
return AioRefreshableCredentials.create_from_metadata(
creds_dict,
lambda: self._retrieve_credentials_using(credential_process),
self.METHOD
)
return AioCredentials(
access_key=creds_dict['access_key'],
secret_key=creds_dict['secret_key'],
token=creds_dict.get('token'),
method=self.METHOD
)
async def _retrieve_credentials_using(self, credential_process):
# We're not using shell=True, so we need to pass the
# command and all arguments as a list.
process_list = compat_shell_split(credential_process)
p = await self._popen(*process_list,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = await p.communicate()
if p.returncode != 0:
raise CredentialRetrievalError(
provider=self.METHOD, error_msg=stderr.decode('utf-8'))
parsed = botocore.compat.json.loads(stdout.decode('utf-8'))
version = parsed.get('Version', '<Version key not provided>')
if version != 1:
raise CredentialRetrievalError(
provider=self.METHOD,
error_msg=("Unsupported version '%s' for credential process "
"provider, supported versions: 1" % version))
try:
return {
'access_key': parsed['AccessKeyId'],
'secret_key': parsed['SecretAccessKey'],
'token': parsed.get('SessionToken'),
'expiry_time': parsed.get('Expiration'),
}
except KeyError as e:
raise CredentialRetrievalError(
provider=self.METHOD,
error_msg="Missing required key in response: %s" % e
)
class AioInstanceMetadataProvider(InstanceMetadataProvider):
async def load(self):
fetcher = self._role_fetcher
metadata = await fetcher.retrieve_iam_role_credentials()
if not metadata:
return None
logger.debug('Found credentials from IAM Role: %s',
metadata['role_name'])
creds = AioRefreshableCredentials.create_from_metadata(
metadata,
method=self.METHOD,
refresh_using=fetcher.retrieve_iam_role_credentials,
)
return creds
class AioEnvProvider(EnvProvider):
async def load(self):
# It gets credentials from an env var,
# so just convert the response to Aio variants
result = super().load()
if isinstance(result, RefreshableCredentials):
return AioRefreshableCredentials.\
from_refreshable_credentials(result)
elif isinstance(result, Credentials):
return AioCredentials.from_credentials(result)
return None
class AioOriginalEC2Provider(OriginalEC2Provider):
async def load(self):
result = super(AioOriginalEC2Provider, self).load()
if isinstance(result, Credentials):
result = AioCredentials.from_credentials(result)
return result
class AioSharedCredentialProvider(SharedCredentialProvider):
async def load(self):
result = super(AioSharedCredentialProvider, self).load()
if isinstance(result, Credentials):
result = AioCredentials.from_credentials(result)
return result
class AioConfigProvider(ConfigProvider):
async def load(self):
result = super(AioConfigProvider, self).load()
if isinstance(result, Credentials):
result = AioCredentials.from_credentials(result)
return result
class AioBotoProvider(BotoProvider):
async def load(self):
result = super(AioBotoProvider, self).load()
if isinstance(result, Credentials):
result = AioCredentials.from_credentials(result)
return result
class AioAssumeRoleProvider(AssumeRoleProvider):
async def load(self):
self._loaded_config = self._load_config()
profiles = self._loaded_config.get('profiles', {})
profile = profiles.get(self._profile_name, {})
if self._has_assume_role_config_vars(profile):
return await self._load_creds_via_assume_role(self._profile_name)
async def _load_creds_via_assume_role(self, profile_name):
role_config = self._get_role_config(profile_name)
source_credentials = await self._resolve_source_credentials(
role_config, profile_name
)
extra_args = {}
role_session_name = role_config.get('role_session_name')
if role_session_name is not None:
extra_args['RoleSessionName'] = role_session_name
external_id = role_config.get('external_id')
if external_id is not None:
extra_args['ExternalId'] = external_id
mfa_serial = role_config.get('mfa_serial')
if mfa_serial is not None:
extra_args['SerialNumber'] = mfa_serial
duration_seconds = role_config.get('duration_seconds')
if duration_seconds is not None:
extra_args['DurationSeconds'] = duration_seconds
fetcher = AioAssumeRoleCredentialFetcher(
client_creator=self._client_creator,
source_credentials=source_credentials,
role_arn=role_config['role_arn'],
extra_args=extra_args,
mfa_prompter=self._prompter,
cache=self.cache,
)
refresher = fetcher.fetch_credentials
if mfa_serial is not None:
refresher = create_aio_mfa_serial_refresher(refresher)
# The initial credentials are empty and the expiration time is set
# to now so that we can delay the call to assume role until it is
# strictly needed.
return AioDeferredRefreshableCredentials(
method=self.METHOD,
refresh_using=refresher,
time_fetcher=_local_now
)
async def _resolve_source_credentials(self, role_config, profile_name):
credential_source = role_config.get('credential_source')
if credential_source is not None:
return await self._resolve_credentials_from_source(
credential_source, profile_name
)
source_profile = role_config['source_profile']
self._visited_profiles.append(source_profile)
return await self._resolve_credentials_from_profile(source_profile)
async def _resolve_credentials_from_profile(self, profile_name):
profiles = self._loaded_config.get('profiles', {})
profile = profiles[profile_name]
if self._has_static_credentials(profile) and \
not self._profile_provider_builder:
return self._resolve_static_credentials_from_profile(profile)
elif self._has_static_credentials(profile) or \
not self._has_assume_role_config_vars(profile):
profile_providers = self._profile_provider_builder.providers(
profile_name=profile_name,
disable_env_vars=True,
)
profile_chain = AioCredentialResolver(profile_providers)
credentials = await profile_chain.load_credentials()
if credentials is None:
error_message = (
'The source profile "%s" must have credentials.'
)
raise InvalidConfigError(
error_msg=error_message % profile_name,
)
return credentials
return self._load_creds_via_assume_role(profile_name)
def _resolve_static_credentials_from_profile(self, profile):
try:
return AioCredentials(
access_key=profile['aws_access_key_id'],
secret_key=profile['aws_secret_access_key'],
token=profile.get('aws_session_token')
)
except KeyError as e:
raise PartialCredentialsError(
provider=self.METHOD, cred_var=str(e))
async def _resolve_credentials_from_source(self, credential_source,
profile_name):
credentials = await self._credential_sourcer.source_credentials(
credential_source)
if credentials is None:
raise CredentialRetrievalError(
provider=credential_source,
error_msg=(
'No credentials found in credential_source referenced '
'in profile %s' % profile_name
)
)
return credentials
class AioAssumeRoleWithWebIdentityProvider(AssumeRoleWithWebIdentityProvider):
async def load(self):
return await self._assume_role_with_web_identity()
async def _assume_role_with_web_identity(self):
token_path = self._get_config('web_identity_token_file')
if not token_path:
return None
token_loader = self._token_loader_cls(token_path)
role_arn = self._get_config('role_arn')
if not role_arn:
error_msg = (
'The provided profile or the current environment is '
'configured to assume role with web identity but has no '
'role ARN configured. Ensure that the profile has the role_arn'
'configuration set or the AWS_ROLE_ARN env var is set.'
)
raise InvalidConfigError(error_msg=error_msg)
extra_args = {}
role_session_name = self._get_config('role_session_name')
if role_session_name is not None:
extra_args['RoleSessionName'] = role_session_name
fetcher = AioAssumeRoleWithWebIdentityCredentialFetcher(
client_creator=self._client_creator,
web_identity_token_loader=token_loader,
role_arn=role_arn,
extra_args=extra_args,
cache=self.cache,
)
# The initial credentials are empty and the expiration time is set
# to now so that we can delay the call to assume role until it is
# strictly needed.
return AioDeferredRefreshableCredentials(
method=self.METHOD,
refresh_using=fetcher.fetch_credentials,
)
class AioCanonicalNameCredentialSourcer(CanonicalNameCredentialSourcer):
async def source_credentials(self, source_name):
"""Loads source credentials based on the provided configuration.
:type source_name: str
:param source_name: The value of credential_source in the config
file. This is the canonical name of the credential provider.
:rtype: Credentials
"""
source = self._get_provider(source_name)
if isinstance(source, AioCredentialResolver):
return await source.load_credentials()
return await source.load()
def _get_provider(self, canonical_name):
"""Return a credential provider by its canonical name.
:type canonical_name: str
:param canonical_name: The canonical name of the provider.
:raises UnknownCredentialError: Raised if no
credential provider by the provided name
is found.
"""
provider = self._get_provider_by_canonical_name(canonical_name)
# The AssumeRole provider should really be part of the SharedConfig
# provider rather than being its own thing, but it is not. It is
# effectively part of both the SharedConfig provider and the
# SharedCredentials provider now due to the way it behaves.
# Therefore if we want either of those providers we should return
# the AssumeRole provider with it.
if canonical_name.lower() in ['sharedconfig', 'sharedcredentials']:
assume_role_provider = self._get_provider_by_method('assume-role')
if assume_role_provider is not None:
# The SharedConfig or SharedCredentials provider may not be
# present if it was removed for some reason, but the
# AssumeRole provider could still be present. In that case,
# return the assume role provider by itself.
if provider is None:
return assume_role_provider
# If both are present, return them both as a
# CredentialResolver so that calling code can treat them as
# a single entity.
return AioCredentialResolver([assume_role_provider, provider])
if provider is None:
raise UnknownCredentialError(name=canonical_name)
return provider
class AioContainerProvider(ContainerProvider):
def __init__(self, *args, **kwargs):
super(AioContainerProvider, self).__init__(*args, **kwargs)
# This will always run if no fetcher arg is provided
if isinstance(self._fetcher, ContainerMetadataFetcher):
self._fetcher = AioContainerMetadataFetcher()
async def load(self):
if self.ENV_VAR in self._environ or self.ENV_VAR_FULL in self._environ:
return await self._retrieve_or_fail()
async def _retrieve_or_fail(self):
if self._provided_relative_uri():
full_uri = self._fetcher.full_url(self._environ[self.ENV_VAR])
else:
full_uri = self._environ[self.ENV_VAR_FULL]
headers = self._build_headers()
fetcher = self._create_fetcher(full_uri, headers)
creds = await fetcher()
return AioRefreshableCredentials(
access_key=creds['access_key'],
secret_key=creds['secret_key'],
token=creds['token'],
method=self.METHOD,
expiry_time=_parse_if_needed(creds['expiry_time']),
refresh_using=fetcher,
)
def _create_fetcher(self, full_uri, headers):
async def fetch_creds():
try:
response = await self._fetcher.retrieve_full_uri(
full_uri, headers=headers)
except MetadataRetrievalError as e:
logger.debug("Error retrieving container metadata: %s", e,
exc_info=True)
raise CredentialRetrievalError(provider=self.METHOD,
error_msg=str(e))
return {
'access_key': response['AccessKeyId'],
'secret_key': response['SecretAccessKey'],
'token': response['Token'],
'expiry_time': response['Expiration'],
}
return fetch_creds
class AioCredentialResolver(CredentialResolver):
async def load_credentials(self):
"""
Goes through the credentials chain, returning the first ``Credentials``
that could be loaded.
"""
# First provider to return a non-None response wins.
for provider in self.providers:
logger.debug("Looking for credentials via: %s", provider.METHOD)
creds = await provider.load()
if creds is not None:
return creds
# If we got here, no credentials could be found.
# This feels like it should be an exception, but historically, ``None``
# is returned.
#
# +1
# -js
return None
class AioSSOCredentialFetcher(AioCachedCredentialFetcher):
_UTC_DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
def __init__(self, start_url, sso_region, role_name, account_id,
client_creator, token_loader=None, cache=None,
expiry_window_seconds=None):
self._client_creator = client_creator
self._sso_region = sso_region
self._role_name = role_name
self._account_id = account_id
self._start_url = start_url
self._token_loader = token_loader
super(AioSSOCredentialFetcher, self).__init__(
cache, expiry_window_seconds
)
def _create_cache_key(self):
args = {
'startUrl': self._start_url,
'roleName': self._role_name,
'accountId': self._account_id,
}
args = json.dumps(args, sort_keys=True, separators=(',', ':'))
argument_hash = sha1(args.encode('utf-8')).hexdigest()
return self._make_file_safe(argument_hash)
def _parse_timestamp(self, timestamp_ms):
# fromtimestamp expects seconds so: milliseconds / 1000 = seconds
timestamp_seconds = timestamp_ms / 1000.0
timestamp = datetime.datetime.fromtimestamp(timestamp_seconds, tzutc())
return timestamp.strftime(self._UTC_DATE_FORMAT)
async def _get_credentials(self):
"""Get credentials by calling SSO get role credentials."""
config = Config(
signature_version=UNSIGNED,
region_name=self._sso_region,
)
async with self._client_creator('sso', config=config) as client:
kwargs = {
'roleName': self._role_name,
'accountId': self._account_id,
'accessToken': self._token_loader(self._start_url),
}
try:
response = await client.get_role_credentials(**kwargs)
except client.exceptions.UnauthorizedException:
raise UnauthorizedSSOTokenError()
credentials = response['roleCredentials']
credentials = {
'ProviderType': 'sso',
'Credentials': {
'AccessKeyId': credentials['accessKeyId'],
'SecretAccessKey': credentials['secretAccessKey'],
'SessionToken': credentials['sessionToken'],
'Expiration': self._parse_timestamp(credentials['expiration']),
}
}
return credentials
class AioSSOProvider(SSOProvider):
async def load(self):
sso_config = self._load_sso_config()
if not sso_config:
return None
sso_fetcher = AioSSOCredentialFetcher(
sso_config['sso_start_url'],
sso_config['sso_region'],
sso_config['sso_role_name'],
sso_config['sso_account_id'],
self._client_creator,
token_loader=SSOTokenLoader(cache=self._token_cache),
cache=self.cache,
)
return AioDeferredRefreshableCredentials(
method=self.METHOD,
refresh_using=sso_fetcher.fetch_credentials,
)
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiobotocore/paginate.py | from botocore.exceptions import PaginationError
from botocore.paginate import Paginator, PageIterator
from botocore.utils import set_value_from_jmespath, merge_dicts
from botocore.compat import six
import jmespath
import aioitertools
class AioPageIterator(PageIterator):
def __aiter__(self):
return self.__anext__()
async def __anext__(self):
current_kwargs = self._op_kwargs
previous_next_token = None
next_token = dict((key, None) for key in self._input_token)
if self._starting_token is not None:
# If the starting token exists, populate the next_token with the
# values inside it. This ensures that we have the service's
# pagination token on hand if we need to truncate after the
# first response.
next_token = self._parse_starting_token()[0]
# The number of items from result_key we've seen so far.
total_items = 0
first_request = True
primary_result_key = self.result_keys[0]
starting_truncation = 0
self._inject_starting_params(current_kwargs)
while True:
response = await self._make_request(current_kwargs)
parsed = self._extract_parsed_response(response)
if first_request:
# The first request is handled differently. We could
# possibly have a resume/starting token that tells us where
# to index into the retrieved page.
if self._starting_token is not None:
starting_truncation = self._handle_first_request(
parsed, primary_result_key, starting_truncation)
first_request = False
self._record_non_aggregate_key_values(parsed)
else:
# If this isn't the first request, we have already sliced into
# the first request and had to make additional requests after.
# We no longer need to add this to truncation.
starting_truncation = 0
current_response = primary_result_key.search(parsed)
if current_response is None:
current_response = []
num_current_response = len(current_response)
truncate_amount = 0
if self._max_items is not None:
truncate_amount = (total_items + num_current_response) \
- self._max_items
if truncate_amount > 0:
self._truncate_response(parsed, primary_result_key,
truncate_amount, starting_truncation,
next_token)
yield response
break
else:
yield response
total_items += num_current_response
next_token = self._get_next_token(parsed)
if all(t is None for t in next_token.values()):
break
if self._max_items is not None and \
total_items == self._max_items:
# We're on a page boundary so we can set the current
# next token to be the resume token.
self.resume_token = next_token
break
if previous_next_token is not None and \
previous_next_token == next_token:
message = ("The same next token was received "
"twice: %s" % next_token)
raise PaginationError(message=message)
self._inject_token_into_kwargs(current_kwargs, next_token)
previous_next_token = next_token
def result_key_iters(self):
teed_results = aioitertools.tee(self, len(self.result_keys))
return [ResultKeyIterator(i, result_key) for i, result_key
in zip(teed_results, self.result_keys)]
async def build_full_result(self):
complete_result = {}
async for response in self:
page = response
# We want to try to catch operation object pagination
# and format correctly for those. They come in the form
# of a tuple of two elements: (http_response, parsed_responsed).
# We want the parsed_response as that is what the page iterator
# uses. We can remove it though once operation objects are removed.
if isinstance(response, tuple) and len(response) == 2:
page = response[1]
# We're incrementally building the full response page
# by page. For each page in the response we need to
# inject the necessary components from the page
# into the complete_result.
for result_expression in self.result_keys:
# In order to incrementally update a result key
# we need to search the existing value from complete_result,
# then we need to search the _current_ page for the
# current result key value. Then we append the current
# value onto the existing value, and re-set that value
# as the new value.
result_value = result_expression.search(page)
if result_value is None:
continue
existing_value = result_expression.search(complete_result)
if existing_value is None:
# Set the initial result
set_value_from_jmespath(
complete_result, result_expression.expression,
result_value)
continue
# Now both result_value and existing_value contain something
if isinstance(result_value, list):
existing_value.extend(result_value)
elif isinstance(result_value, (int, float, six.string_types)):
# Modify the existing result with the sum or concatenation
set_value_from_jmespath(
complete_result, result_expression.expression,
existing_value + result_value)
merge_dicts(complete_result, self.non_aggregate_part)
if self.resume_token is not None:
complete_result['NextToken'] = self.resume_token
return complete_result
async def search(self, expression):
compiled = jmespath.compile(expression)
async for page in self:
results = compiled.search(page)
if isinstance(results, list):
for element in results:
yield element
else:
yield results
class AioPaginator(Paginator):
PAGE_ITERATOR_CLS = AioPageIterator
class ResultKeyIterator:
"""Iterates over the results of paginated responses.
Each iterator is associated with a single result key.
Iterating over this object will give you each element in
the result key list.
:param pages_iterator: An iterator that will give you
pages of results (a ``PageIterator`` class).
:param result_key: The JMESPath expression representing
the result key.
"""
def __init__(self, pages_iterator, result_key):
self._pages_iterator = pages_iterator
self.result_key = result_key
def __aiter__(self):
return self.__anext__()
async def __anext__(self):
async for page in self._pages_iterator:
results = self.result_key.search(page)
if results is None:
results = []
for result in results:
yield result
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiobotocore/eventstream.py | from botocore.eventstream import EventStream, EventStreamBuffer
class AioEventStream(EventStream):
async def _create_raw_event_generator(self):
event_stream_buffer = EventStreamBuffer()
async for chunk, _ in self._raw_stream.iter_chunks():
event_stream_buffer.add_data(chunk)
for event in event_stream_buffer:
yield event
def __iter__(self):
raise NotImplementedError('Use async-for instead')
def __aiter__(self):
return self.__anext__()
async def __anext__(self):
async for event in self._event_generator:
parsed_event = self._parse_event(event)
if parsed_event:
yield parsed_event
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiobotocore/client.py | from botocore.awsrequest import prepare_request_dict
from botocore.client import logger, PaginatorDocstring, ClientCreator, \
BaseClient, ClientEndpointBridge, S3ArnParamHandler, S3EndpointSetter
from botocore.exceptions import OperationNotPageableError
from botocore.history import get_global_history_recorder
from botocore.utils import get_service_module_name
from botocore.waiter import xform_name
from botocore.hooks import first_non_none_response
from .paginate import AioPaginator
from .args import AioClientArgsCreator
from .utils import AioS3RegionRedirector
from . import waiter
history_recorder = get_global_history_recorder()
class AioClientCreator(ClientCreator):
async def create_client(self, service_name, region_name, is_secure=True,
endpoint_url=None, verify=None,
credentials=None, scoped_config=None,
api_version=None,
client_config=None):
responses = await self._event_emitter.emit(
'choose-service-name', service_name=service_name)
service_name = first_non_none_response(responses, default=service_name)
service_model = self._load_service_model(service_name, api_version)
cls = await self._create_client_class(service_name, service_model)
endpoint_bridge = ClientEndpointBridge(
self._endpoint_resolver, scoped_config, client_config,
service_signing_name=service_model.metadata.get('signingName'))
client_args = self._get_client_args(
service_model, region_name, is_secure, endpoint_url,
verify, credentials, scoped_config, client_config, endpoint_bridge)
service_client = cls(**client_args)
self._register_retries(service_client)
self._register_s3_events(
service_client, endpoint_bridge, endpoint_url, client_config,
scoped_config)
self._register_s3_events(
service_client, endpoint_bridge, endpoint_url, client_config,
scoped_config)
self._register_endpoint_discovery(
service_client, endpoint_url, client_config
)
return service_client
async def _create_client_class(self, service_name, service_model):
class_attributes = self._create_methods(service_model)
py_name_to_operation_name = self._create_name_mapping(service_model)
class_attributes['_PY_TO_OP_NAME'] = py_name_to_operation_name
bases = [AioBaseClient]
service_id = service_model.service_id.hyphenize()
await self._event_emitter.emit(
'creating-client-class.%s' % service_id,
class_attributes=class_attributes,
base_classes=bases)
class_name = get_service_module_name(service_model)
cls = type(str(class_name), tuple(bases), class_attributes)
return cls
def _register_s3_events(self, client, endpoint_bridge, endpoint_url,
client_config, scoped_config):
if client.meta.service_model.service_name != 's3':
return
AioS3RegionRedirector(endpoint_bridge, client).register()
S3ArnParamHandler().register(client.meta.events)
S3EndpointSetter(
endpoint_resolver=self._endpoint_resolver,
region=client.meta.region_name,
s3_config=client.meta.config.s3,
endpoint_url=endpoint_url,
partition=client.meta.partition
).register(client.meta.events)
self._set_s3_presign_signature_version(
client.meta, client_config, scoped_config)
def _get_client_args(self, service_model, region_name, is_secure,
endpoint_url, verify, credentials,
scoped_config, client_config, endpoint_bridge):
# This is a near copy of ClientCreator. What's replaced
# is ClientArgsCreator->AioClientArgsCreator
args_creator = AioClientArgsCreator(
self._event_emitter, self._user_agent,
self._response_parser_factory, self._loader,
self._exceptions_factory, config_store=self._config_store)
return args_creator.get_client_args(
service_model, region_name, is_secure, endpoint_url,
verify, credentials, scoped_config, client_config, endpoint_bridge)
class AioBaseClient(BaseClient):
async def _async_getattr(self, item):
event_name = 'getattr.%s.%s' % (
self._service_model.service_id.hyphenize(), item
)
handler, event_response = await self.meta.events.emit_until_response(
event_name, client=self)
return event_response
def __getattr__(self, item):
# NOTE: we can not reliably support this because if we were to make this a
# deferred attrgetter (See #803), it would resolve in hasattr always returning
# true. This ends up breaking ddtrace for example when it tries to set a pin.
raise AttributeError(
"'%s' object has no attribute '%s'" % (self.__class__.__name__, item))
async def _make_api_call(self, operation_name, api_params):
operation_model = self._service_model.operation_model(operation_name)
service_name = self._service_model.service_name
history_recorder.record('API_CALL', {
'service': service_name,
'operation': operation_name,
'params': api_params,
})
if operation_model.deprecated:
logger.debug('Warning: %s.%s() is deprecated',
service_name, operation_name)
request_context = {
'client_region': self.meta.region_name,
'client_config': self.meta.config,
'has_streaming_input': operation_model.has_streaming_input,
'auth_type': operation_model.auth_type,
}
request_dict = await self._convert_to_request_dict(
api_params, operation_model, context=request_context)
service_id = self._service_model.service_id.hyphenize()
handler, event_response = await self.meta.events.emit_until_response(
'before-call.{service_id}.{operation_name}'.format(
service_id=service_id,
operation_name=operation_name),
model=operation_model, params=request_dict,
request_signer=self._request_signer, context=request_context)
if event_response is not None:
http, parsed_response = event_response
else:
http, parsed_response = await self._make_request(
operation_model, request_dict, request_context)
await self.meta.events.emit(
'after-call.{service_id}.{operation_name}'.format(
service_id=service_id,
operation_name=operation_name),
http_response=http, parsed=parsed_response,
model=operation_model, context=request_context
)
if http.status_code >= 300:
error_code = parsed_response.get("Error", {}).get("Code")
error_class = self.exceptions.from_code(error_code)
raise error_class(parsed_response, operation_name)
else:
return parsed_response
async def _make_request(self, operation_model, request_dict, request_context):
try:
return await self._endpoint.make_request(operation_model, request_dict)
except Exception as e:
await self.meta.events.emit(
'after-call-error.{service_id}.{operation_name}'.format(
service_id=self._service_model.service_id.hyphenize(),
operation_name=operation_model.name),
exception=e, context=request_context
)
raise
async def _convert_to_request_dict(self, api_params, operation_model,
context=None):
api_params = await self._emit_api_params(
api_params, operation_model, context)
request_dict = self._serializer.serialize_to_request(
api_params, operation_model)
if not self._client_config.inject_host_prefix:
request_dict.pop('host_prefix', None)
prepare_request_dict(request_dict, endpoint_url=self._endpoint.host,
user_agent=self._client_config.user_agent,
context=context)
return request_dict
async def _emit_api_params(self, api_params, operation_model, context):
# Given the API params provided by the user and the operation_model
# we can serialize the request to a request_dict.
operation_name = operation_model.name
# Emit an event that allows users to modify the parameters at the
# beginning of the method. It allows handlers to modify existing
# parameters or return a new set of parameters to use.
service_id = self._service_model.service_id.hyphenize()
responses = await self.meta.events.emit(
'provide-client-params.{service_id}.{operation_name}'.format(
service_id=service_id,
operation_name=operation_name),
params=api_params, model=operation_model, context=context)
api_params = first_non_none_response(responses, default=api_params)
event_name = (
'before-parameter-build.{service_id}.{operation_name}')
await self.meta.events.emit(
event_name.format(
service_id=service_id,
operation_name=operation_name),
params=api_params, model=operation_model, context=context)
return api_params
def get_paginator(self, operation_name):
"""Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you'd normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator("create_foo")``.
:raise OperationNotPageableError: Raised if the operation is not
pageable. You can use the ``client.can_paginate`` method to
check if an operation is pageable.
:rtype: L{botocore.paginate.Paginator}
:return: A paginator object.
"""
if not self.can_paginate(operation_name):
raise OperationNotPageableError(operation_name=operation_name)
else:
actual_operation_name = self._PY_TO_OP_NAME[operation_name]
# Create a new paginate method that will serve as a proxy to
# the underlying Paginator.paginate method. This is needed to
# attach a docstring to the method.
def paginate(self, **kwargs):
return AioPaginator.paginate(self, **kwargs)
paginator_config = self._cache['page_config'][
actual_operation_name]
# Add the docstring for the paginate method.
paginate.__doc__ = PaginatorDocstring(
paginator_name=actual_operation_name,
event_emitter=self.meta.events,
service_model=self.meta.service_model,
paginator_config=paginator_config,
include_signature=False
)
# Rename the paginator class based on the type of paginator.
paginator_class_name = str('%s.Paginator.%s' % (
get_service_module_name(self.meta.service_model),
actual_operation_name))
# Create the new paginator class
documented_paginator_cls = type(
paginator_class_name, (AioPaginator,), {'paginate': paginate})
operation_model = self._service_model.operation_model(actual_operation_name)
paginator = documented_paginator_cls(
getattr(self, operation_name),
paginator_config,
operation_model)
return paginator
# NOTE: this method does not differ from botocore, however it's important to keep
# as the "waiter" value points to our own asyncio waiter module
def get_waiter(self, waiter_name):
"""Returns an object that can wait for some condition.
:type waiter_name: str
:param waiter_name: The name of the waiter to get. See the waiters
section of the service docs for a list of available waiters.
:returns: The specified waiter object.
:rtype: botocore.waiter.Waiter
"""
config = self._get_waiter_config()
if not config:
raise ValueError("Waiter does not exist: %s" % waiter_name)
model = waiter.WaiterModel(config)
mapping = {}
for name in model.waiter_names:
mapping[xform_name(name)] = name
if waiter_name not in mapping:
raise ValueError("Waiter does not exist: %s" % waiter_name)
return waiter.create_waiter_with_client(
mapping[waiter_name], model, self)
async def __aenter__(self):
await self._endpoint.http_session.__aenter__()
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self._endpoint.http_session.__aexit__(exc_type, exc_val, exc_tb)
async def close(self):
"""Close all http connections."""
return await self._endpoint.http_session.close()
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiobotocore/session.py | from botocore.session import Session, EVENT_ALIASES, ServiceModel, UnknownServiceError
from botocore import UNSIGNED
from botocore import retryhandler, translate
from botocore.exceptions import PartialCredentialsError
from .client import AioClientCreator, AioBaseClient
from .hooks import AioHierarchicalEmitter
from .parsers import AioResponseParserFactory
from .signers import add_generate_presigned_url, add_generate_presigned_post, \
add_generate_db_auth_token
from .credentials import create_credential_resolver, AioCredentials
class ClientCreatorContext:
def __init__(self, coro):
self._coro = coro
self._client = None
async def __aenter__(self) -> AioBaseClient:
self._client = await self._coro
return await self._client.__aenter__()
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self._client.__aexit__(exc_type, exc_val, exc_tb)
class AioSession(Session):
# noinspection PyMissingConstructor
def __init__(self, session_vars=None, event_hooks=None,
include_builtin_handlers=True, profile=None):
if event_hooks is None:
event_hooks = AioHierarchicalEmitter()
super().__init__(session_vars, event_hooks, include_builtin_handlers, profile)
# Register our own handlers. These normally happen via
# `botocore.handlers.BUILTIN_HANDLERS`
self.register('creating-client-class', add_generate_presigned_url)
self.register('creating-client-class.s3', add_generate_presigned_post)
self.register('creating-client-class.rds', add_generate_db_auth_token),
def _register_response_parser_factory(self):
self._components.register_component('response_parser_factory',
AioResponseParserFactory())
def create_client(self, *args, **kwargs):
return ClientCreatorContext(self._create_client(*args, **kwargs))
async def _create_client(self, service_name, region_name=None,
api_version=None,
use_ssl=True, verify=None, endpoint_url=None,
aws_access_key_id=None, aws_secret_access_key=None,
aws_session_token=None, config=None):
default_client_config = self.get_default_client_config()
# If a config is provided and a default config is set, then
# use the config resulting from merging the two.
if config is not None and default_client_config is not None:
config = default_client_config.merge(config)
# If a config was not provided then use the default
# client config from the session
elif default_client_config is not None:
config = default_client_config
region_name = self._resolve_region_name(region_name, config)
# Figure out the verify value base on the various
# configuration options.
if verify is None:
verify = self.get_config_variable('ca_bundle')
if api_version is None:
api_version = self.get_config_variable('api_versions').get(
service_name, None)
loader = self.get_component('data_loader')
event_emitter = self.get_component('event_emitter')
response_parser_factory = self.get_component(
'response_parser_factory')
if config is not None and config.signature_version is UNSIGNED:
credentials = None
elif aws_access_key_id is not None and \
aws_secret_access_key is not None:
credentials = AioCredentials(
access_key=aws_access_key_id,
secret_key=aws_secret_access_key,
token=aws_session_token)
elif self._missing_cred_vars(aws_access_key_id,
aws_secret_access_key):
raise PartialCredentialsError(
provider='explicit',
cred_var=self._missing_cred_vars(aws_access_key_id,
aws_secret_access_key))
else:
credentials = await self.get_credentials()
endpoint_resolver = self._get_internal_component('endpoint_resolver')
exceptions_factory = self._get_internal_component('exceptions_factory')
config_store = self.get_component('config_store')
client_creator = AioClientCreator(
loader, endpoint_resolver, self.user_agent(), event_emitter,
retryhandler, translate, response_parser_factory,
exceptions_factory, config_store)
client = await client_creator.create_client(
service_name=service_name, region_name=region_name,
is_secure=use_ssl, endpoint_url=endpoint_url, verify=verify,
credentials=credentials, scoped_config=self.get_scoped_config(),
client_config=config, api_version=api_version)
monitor = self._get_internal_component('monitor')
if monitor is not None:
monitor.register(client.meta.events)
return client
def _create_credential_resolver(self):
return create_credential_resolver(
self, region_name=self._last_client_region_used)
async def get_credentials(self):
if self._credentials is None:
self._credentials = await (self._components.get_component(
'credential_provider').load_credentials())
return self._credentials
def set_credentials(self, access_key, secret_key, token=None):
self._credentials = AioCredentials(access_key, secret_key, token)
async def get_service_model(self, service_name, api_version=None):
service_description = await self.get_service_data(service_name, api_version)
return ServiceModel(service_description, service_name=service_name)
async def get_service_data(self, service_name, api_version=None):
"""
Retrieve the fully merged data associated with a service.
"""
data_path = service_name
service_data = self.get_component('data_loader').load_service_model(
data_path,
type_name='service-2',
api_version=api_version
)
service_id = EVENT_ALIASES.get(service_name, service_name)
await self._events.emit('service-data-loaded.%s' % service_id,
service_data=service_data,
service_name=service_name, session=self)
return service_data
async def get_available_regions(self, service_name, partition_name='aws',
allow_non_regional=False):
resolver = self._get_internal_component('endpoint_resolver')
results = []
try:
service_data = await self.get_service_data(service_name)
endpoint_prefix = service_data['metadata'].get(
'endpointPrefix', service_name)
results = resolver.get_available_endpoints(
endpoint_prefix, partition_name, allow_non_regional)
except UnknownServiceError:
pass
return results
def get_session(env_vars=None):
"""
Return a new session object.
"""
return AioSession(env_vars)
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiobotocore/parsers.py | from botocore.parsers import ResponseParserFactory, RestXMLParser, \
RestJSONParser, JSONParser, QueryParser, EC2QueryParser
from .eventstream import AioEventStream
class AioRestXMLParser(RestXMLParser):
def _create_event_stream(self, response, shape):
parser = self._event_stream_parser
name = response['context'].get('operation_name')
return AioEventStream(response['body'], shape, parser, name)
class AioEC2QueryParser(EC2QueryParser):
def _create_event_stream(self, response, shape):
parser = self._event_stream_parser
name = response['context'].get('operation_name')
return AioEventStream(response['body'], shape, parser, name)
class AioQueryParser(QueryParser):
def _create_event_stream(self, response, shape):
parser = self._event_stream_parser
name = response['context'].get('operation_name')
return AioEventStream(response['body'], shape, parser, name)
class AioJSONParser(JSONParser):
def _create_event_stream(self, response, shape):
parser = self._event_stream_parser
name = response['context'].get('operation_name')
return AioEventStream(response['body'], shape, parser, name)
class AioRestJSONParser(RestJSONParser):
def _create_event_stream(self, response, shape):
parser = self._event_stream_parser
name = response['context'].get('operation_name')
return AioEventStream(response['body'], shape, parser, name)
PROTOCOL_PARSERS = {
'ec2': AioEC2QueryParser,
'query': AioQueryParser,
'json': AioJSONParser,
'rest-json': AioRestJSONParser,
'rest-xml': AioRestXMLParser,
}
class AioResponseParserFactory(ResponseParserFactory):
def create_parser(self, protocol_name):
parser_cls = PROTOCOL_PARSERS[protocol_name]
return parser_cls(**self._defaults)
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/sentry_sdk-1.14.0.dist-info/top_level.txt | sentry_sdk
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiohttp-3.8.3.dist-info/top_level.txt | aiohttp
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiohttp-3.8.3.dist-info/LICENSE.txt | Copyright aio-libs contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/certifi/__init__.py | from .core import contents, where
__all__ = ["contents", "where"]
__version__ = "2023.05.07"
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/certifi/core.py | """
certifi.py
~~~~~~~~~~
This module returns the installation location of cacert.pem or its contents.
"""
import sys
if sys.version_info >= (3, 11):
from importlib.resources import as_file, files
_CACERT_CTX = None
_CACERT_PATH = None
def where() -> str:
# This is slightly terrible, but we want to delay extracting the file
# in cases where we're inside of a zipimport situation until someone
# actually calls where(), but we don't want to re-extract the file
# on every call of where(), so we'll do it once then store it in a
# global variable.
global _CACERT_CTX
global _CACERT_PATH
if _CACERT_PATH is None:
# This is slightly janky, the importlib.resources API wants you to
# manage the cleanup of this file, so it doesn't actually return a
# path, it returns a context manager that will give you the path
# when you enter it and will do any cleanup when you leave it. In
# the common case of not needing a temporary file, it will just
# return the file system location and the __exit__() is a no-op.
#
# We also have to hold onto the actual context manager, because
# it will do the cleanup whenever it gets garbage collected, so
# we will also store that at the global level as well.
_CACERT_CTX = as_file(files("certifi").joinpath("cacert.pem"))
_CACERT_PATH = str(_CACERT_CTX.__enter__())
return _CACERT_PATH
def contents() -> str:
return files("certifi").joinpath("cacert.pem").read_text(encoding="ascii")
elif sys.version_info >= (3, 7):
from importlib.resources import path as get_path, read_text
_CACERT_CTX = None
_CACERT_PATH = None
def where() -> str:
# This is slightly terrible, but we want to delay extracting the
# file in cases where we're inside of a zipimport situation until
# someone actually calls where(), but we don't want to re-extract
# the file on every call of where(), so we'll do it once then store
# it in a global variable.
global _CACERT_CTX
global _CACERT_PATH
if _CACERT_PATH is None:
# This is slightly janky, the importlib.resources API wants you
# to manage the cleanup of this file, so it doesn't actually
# return a path, it returns a context manager that will give
# you the path when you enter it and will do any cleanup when
# you leave it. In the common case of not needing a temporary
# file, it will just return the file system location and the
# __exit__() is a no-op.
#
# We also have to hold onto the actual context manager, because
# it will do the cleanup whenever it gets garbage collected, so
# we will also store that at the global level as well.
_CACERT_CTX = get_path("certifi", "cacert.pem")
_CACERT_PATH = str(_CACERT_CTX.__enter__())
return _CACERT_PATH
def contents() -> str:
return read_text("certifi", "cacert.pem", encoding="ascii")
else:
import os
import types
from typing import Union
Package = Union[types.ModuleType, str]
Resource = Union[str, "os.PathLike"]
# This fallback will work for Python versions prior to 3.7 that lack the
# importlib.resources module but relies on the existing `where` function
# so won't address issues with environments like PyOxidizer that don't set
# __file__ on modules.
def read_text(
package: Package,
resource: Resource,
encoding: str = 'utf-8',
errors: str = 'strict'
) -> str:
with open(where(), encoding=encoding) as data:
return data.read()
# If we don't have importlib.resources, then we will just do the old logic
# of assuming we're on the filesystem and munge the path directly.
def where() -> str:
f = os.path.dirname(__file__)
return os.path.join(f, "cacert.pem")
def contents() -> str:
return read_text("certifi", "cacert.pem", encoding="ascii")
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/certifi/__main__.py | import argparse
from certifi import contents, where
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--contents", action="store_true")
args = parser.parse_args()
if args.contents:
print(contents())
else:
print(where())
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pypng-0.20220715.0.dist-info/top_level.txt | png
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/cffi-1.15.1.dist-info/top_level.txt | _cffi_backend
cffi
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/cffi-1.15.1.dist-info/entry_points.txt | [distutils.setup_keywords]
cffi_modules = cffi.setuptools_ext:cffi_modules
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/jsonschema/validators.py | """
Creation and extension of validators, with implementations for existing drafts.
"""
from __future__ import division
from warnings import warn
import contextlib
import json
import numbers
from six import add_metaclass
from jsonschema import (
_legacy_validators,
_types,
_utils,
_validators,
exceptions,
)
from jsonschema.compat import (
Sequence,
int_types,
iteritems,
lru_cache,
str_types,
unquote,
urldefrag,
urljoin,
urlopen,
urlsplit,
)
# Sigh. https://gitlab.com/pycqa/flake8/issues/280
# https://github.com/pyga/ebb-lint/issues/7
# Imported for backwards compatibility.
from jsonschema.exceptions import ErrorTree
ErrorTree
class _DontDoThat(Exception):
"""
Raised when a Validators with non-default type checker is misused.
Asking one for DEFAULT_TYPES doesn't make sense, since type checkers
exist for the unrepresentable cases where DEFAULT_TYPES can't
represent the type relationship.
"""
def __str__(self):
return "DEFAULT_TYPES cannot be used on Validators using TypeCheckers"
validators = {}
meta_schemas = _utils.URIDict()
def _generate_legacy_type_checks(types=()):
"""
Generate newer-style type checks out of JSON-type-name-to-type mappings.
Arguments:
types (dict):
A mapping of type names to their Python types
Returns:
A dictionary of definitions to pass to `TypeChecker`
"""
types = dict(types)
def gen_type_check(pytypes):
pytypes = _utils.flatten(pytypes)
def type_check(checker, instance):
if isinstance(instance, bool):
if bool not in pytypes:
return False
return isinstance(instance, pytypes)
return type_check
definitions = {}
for typename, pytypes in iteritems(types):
definitions[typename] = gen_type_check(pytypes)
return definitions
_DEPRECATED_DEFAULT_TYPES = {
u"array": list,
u"boolean": bool,
u"integer": int_types,
u"null": type(None),
u"number": numbers.Number,
u"object": dict,
u"string": str_types,
}
_TYPE_CHECKER_FOR_DEPRECATED_DEFAULT_TYPES = _types.TypeChecker(
type_checkers=_generate_legacy_type_checks(_DEPRECATED_DEFAULT_TYPES),
)
def validates(version):
"""
Register the decorated validator for a ``version`` of the specification.
Registered validators and their meta schemas will be considered when
parsing ``$schema`` properties' URIs.
Arguments:
version (str):
An identifier to use as the version's name
Returns:
collections.Callable:
a class decorator to decorate the validator with the version
"""
def _validates(cls):
validators[version] = cls
meta_schema_id = cls.ID_OF(cls.META_SCHEMA)
if meta_schema_id:
meta_schemas[meta_schema_id] = cls
return cls
return _validates
def _DEFAULT_TYPES(self):
if self._CREATED_WITH_DEFAULT_TYPES is None:
raise _DontDoThat()
warn(
(
"The DEFAULT_TYPES attribute is deprecated. "
"See the type checker attached to this validator instead."
),
DeprecationWarning,
stacklevel=2,
)
return self._DEFAULT_TYPES
class _DefaultTypesDeprecatingMetaClass(type):
DEFAULT_TYPES = property(_DEFAULT_TYPES)
def _id_of(schema):
if schema is True or schema is False:
return u""
return schema.get(u"$id", u"")
def create(
meta_schema,
validators=(),
version=None,
default_types=None,
type_checker=None,
id_of=_id_of,
):
"""
Create a new validator class.
Arguments:
meta_schema (collections.Mapping):
the meta schema for the new validator class
validators (collections.Mapping):
a mapping from names to callables, where each callable will
validate the schema property with the given name.
Each callable should take 4 arguments:
1. a validator instance,
2. the value of the property being validated within the
instance
3. the instance
4. the schema
version (str):
an identifier for the version that this validator class will
validate. If provided, the returned validator class will
have its ``__name__`` set to include the version, and also
will have `jsonschema.validators.validates` automatically
called for the given version.
type_checker (jsonschema.TypeChecker):
a type checker, used when applying the :validator:`type` validator.
If unprovided, a `jsonschema.TypeChecker` will be created
with a set of default types typical of JSON Schema drafts.
default_types (collections.Mapping):
.. deprecated:: 3.0.0
Please use the type_checker argument instead.
If set, it provides mappings of JSON types to Python types
that will be converted to functions and redefined in this
object's `jsonschema.TypeChecker`.
id_of (collections.Callable):
A function that given a schema, returns its ID.
Returns:
a new `jsonschema.IValidator` class
"""
if default_types is not None:
if type_checker is not None:
raise TypeError(
"Do not specify default_types when providing a type checker.",
)
_created_with_default_types = True
warn(
(
"The default_types argument is deprecated. "
"Use the type_checker argument instead."
),
DeprecationWarning,
stacklevel=2,
)
type_checker = _types.TypeChecker(
type_checkers=_generate_legacy_type_checks(default_types),
)
else:
default_types = _DEPRECATED_DEFAULT_TYPES
if type_checker is None:
_created_with_default_types = False
type_checker = _TYPE_CHECKER_FOR_DEPRECATED_DEFAULT_TYPES
elif type_checker is _TYPE_CHECKER_FOR_DEPRECATED_DEFAULT_TYPES:
_created_with_default_types = False
else:
_created_with_default_types = None
@add_metaclass(_DefaultTypesDeprecatingMetaClass)
class Validator(object):
VALIDATORS = dict(validators)
META_SCHEMA = dict(meta_schema)
TYPE_CHECKER = type_checker
ID_OF = staticmethod(id_of)
DEFAULT_TYPES = property(_DEFAULT_TYPES)
_DEFAULT_TYPES = dict(default_types)
_CREATED_WITH_DEFAULT_TYPES = _created_with_default_types
def __init__(
self,
schema,
types=(),
resolver=None,
format_checker=None,
):
if types:
warn(
(
"The types argument is deprecated. Provide "
"a type_checker to jsonschema.validators.extend "
"instead."
),
DeprecationWarning,
stacklevel=2,
)
self.TYPE_CHECKER = self.TYPE_CHECKER.redefine_many(
_generate_legacy_type_checks(types),
)
if resolver is None:
resolver = RefResolver.from_schema(schema, id_of=id_of)
self.resolver = resolver
self.format_checker = format_checker
self.schema = schema
@classmethod
def check_schema(cls, schema):
for error in cls(cls.META_SCHEMA).iter_errors(schema):
raise exceptions.SchemaError.create_from(error)
def iter_errors(self, instance, _schema=None):
if _schema is None:
_schema = self.schema
if _schema is True:
return
elif _schema is False:
yield exceptions.ValidationError(
"False schema does not allow %r" % (instance,),
validator=None,
validator_value=None,
instance=instance,
schema=_schema,
)
return
scope = id_of(_schema)
if scope:
self.resolver.push_scope(scope)
try:
ref = _schema.get(u"$ref")
if ref is not None:
validators = [(u"$ref", ref)]
else:
validators = iteritems(_schema)
for k, v in validators:
validator = self.VALIDATORS.get(k)
if validator is None:
continue
errors = validator(self, v, instance, _schema) or ()
for error in errors:
# set details if not already set by the called fn
error._set(
validator=k,
validator_value=v,
instance=instance,
schema=_schema,
)
if k != u"$ref":
error.schema_path.appendleft(k)
yield error
finally:
if scope:
self.resolver.pop_scope()
def descend(self, instance, schema, path=None, schema_path=None):
for error in self.iter_errors(instance, schema):
if path is not None:
error.path.appendleft(path)
if schema_path is not None:
error.schema_path.appendleft(schema_path)
yield error
def validate(self, *args, **kwargs):
for error in self.iter_errors(*args, **kwargs):
raise error
def is_type(self, instance, type):
try:
return self.TYPE_CHECKER.is_type(instance, type)
except exceptions.UndefinedTypeCheck:
raise exceptions.UnknownType(type, instance, self.schema)
def is_valid(self, instance, _schema=None):
error = next(self.iter_errors(instance, _schema), None)
return error is None
if version is not None:
Validator = validates(version)(Validator)
Validator.__name__ = version.title().replace(" ", "") + "Validator"
return Validator
def extend(validator, validators=(), version=None, type_checker=None):
"""
Create a new validator class by extending an existing one.
Arguments:
validator (jsonschema.IValidator):
an existing validator class
validators (collections.Mapping):
a mapping of new validator callables to extend with, whose
structure is as in `create`.
.. note::
Any validator callables with the same name as an
existing one will (silently) replace the old validator
callable entirely, effectively overriding any validation
done in the "parent" validator class.
If you wish to instead extend the behavior of a parent's
validator callable, delegate and call it directly in
the new validator function by retrieving it using
``OldValidator.VALIDATORS["validator_name"]``.
version (str):
a version for the new validator class
type_checker (jsonschema.TypeChecker):
a type checker, used when applying the :validator:`type` validator.
If unprovided, the type checker of the extended
`jsonschema.IValidator` will be carried along.`
Returns:
a new `jsonschema.IValidator` class extending the one provided
.. note:: Meta Schemas
The new validator class will have its parent's meta schema.
If you wish to change or extend the meta schema in the new
validator class, modify ``META_SCHEMA`` directly on the returned
class. Note that no implicit copying is done, so a copy should
likely be made before modifying it, in order to not affect the
old validator.
"""
all_validators = dict(validator.VALIDATORS)
all_validators.update(validators)
if type_checker is None:
type_checker = validator.TYPE_CHECKER
elif validator._CREATED_WITH_DEFAULT_TYPES:
raise TypeError(
"Cannot extend a validator created with default_types "
"with a type_checker. Update the validator to use a "
"type_checker when created."
)
return create(
meta_schema=validator.META_SCHEMA,
validators=all_validators,
version=version,
type_checker=type_checker,
id_of=validator.ID_OF,
)
Draft3Validator = create(
meta_schema=_utils.load_schema("draft3"),
validators={
u"$ref": _validators.ref,
u"additionalItems": _validators.additionalItems,
u"additionalProperties": _validators.additionalProperties,
u"dependencies": _legacy_validators.dependencies_draft3,
u"disallow": _legacy_validators.disallow_draft3,
u"divisibleBy": _validators.multipleOf,
u"enum": _validators.enum,
u"extends": _legacy_validators.extends_draft3,
u"format": _validators.format,
u"items": _legacy_validators.items_draft3_draft4,
u"maxItems": _validators.maxItems,
u"maxLength": _validators.maxLength,
u"maximum": _legacy_validators.maximum_draft3_draft4,
u"minItems": _validators.minItems,
u"minLength": _validators.minLength,
u"minimum": _legacy_validators.minimum_draft3_draft4,
u"pattern": _validators.pattern,
u"patternProperties": _validators.patternProperties,
u"properties": _legacy_validators.properties_draft3,
u"type": _legacy_validators.type_draft3,
u"uniqueItems": _validators.uniqueItems,
},
type_checker=_types.draft3_type_checker,
version="draft3",
id_of=lambda schema: schema.get(u"id", ""),
)
Draft4Validator = create(
meta_schema=_utils.load_schema("draft4"),
validators={
u"$ref": _validators.ref,
u"additionalItems": _validators.additionalItems,
u"additionalProperties": _validators.additionalProperties,
u"allOf": _validators.allOf,
u"anyOf": _validators.anyOf,
u"dependencies": _validators.dependencies,
u"enum": _validators.enum,
u"format": _validators.format,
u"items": _legacy_validators.items_draft3_draft4,
u"maxItems": _validators.maxItems,
u"maxLength": _validators.maxLength,
u"maxProperties": _validators.maxProperties,
u"maximum": _legacy_validators.maximum_draft3_draft4,
u"minItems": _validators.minItems,
u"minLength": _validators.minLength,
u"minProperties": _validators.minProperties,
u"minimum": _legacy_validators.minimum_draft3_draft4,
u"multipleOf": _validators.multipleOf,
u"not": _validators.not_,
u"oneOf": _validators.oneOf,
u"pattern": _validators.pattern,
u"patternProperties": _validators.patternProperties,
u"properties": _validators.properties,
u"required": _validators.required,
u"type": _validators.type,
u"uniqueItems": _validators.uniqueItems,
},
type_checker=_types.draft4_type_checker,
version="draft4",
id_of=lambda schema: schema.get(u"id", ""),
)
Draft6Validator = create(
meta_schema=_utils.load_schema("draft6"),
validators={
u"$ref": _validators.ref,
u"additionalItems": _validators.additionalItems,
u"additionalProperties": _validators.additionalProperties,
u"allOf": _validators.allOf,
u"anyOf": _validators.anyOf,
u"const": _validators.const,
u"contains": _validators.contains,
u"dependencies": _validators.dependencies,
u"enum": _validators.enum,
u"exclusiveMaximum": _validators.exclusiveMaximum,
u"exclusiveMinimum": _validators.exclusiveMinimum,
u"format": _validators.format,
u"items": _validators.items,
u"maxItems": _validators.maxItems,
u"maxLength": _validators.maxLength,
u"maxProperties": _validators.maxProperties,
u"maximum": _validators.maximum,
u"minItems": _validators.minItems,
u"minLength": _validators.minLength,
u"minProperties": _validators.minProperties,
u"minimum": _validators.minimum,
u"multipleOf": _validators.multipleOf,
u"not": _validators.not_,
u"oneOf": _validators.oneOf,
u"pattern": _validators.pattern,
u"patternProperties": _validators.patternProperties,
u"properties": _validators.properties,
u"propertyNames": _validators.propertyNames,
u"required": _validators.required,
u"type": _validators.type,
u"uniqueItems": _validators.uniqueItems,
},
type_checker=_types.draft6_type_checker,
version="draft6",
)
Draft7Validator = create(
meta_schema=_utils.load_schema("draft7"),
validators={
u"$ref": _validators.ref,
u"additionalItems": _validators.additionalItems,
u"additionalProperties": _validators.additionalProperties,
u"allOf": _validators.allOf,
u"anyOf": _validators.anyOf,
u"const": _validators.const,
u"contains": _validators.contains,
u"dependencies": _validators.dependencies,
u"enum": _validators.enum,
u"exclusiveMaximum": _validators.exclusiveMaximum,
u"exclusiveMinimum": _validators.exclusiveMinimum,
u"format": _validators.format,
u"if": _validators.if_,
u"items": _validators.items,
u"maxItems": _validators.maxItems,
u"maxLength": _validators.maxLength,
u"maxProperties": _validators.maxProperties,
u"maximum": _validators.maximum,
u"minItems": _validators.minItems,
u"minLength": _validators.minLength,
u"minProperties": _validators.minProperties,
u"minimum": _validators.minimum,
u"multipleOf": _validators.multipleOf,
u"oneOf": _validators.oneOf,
u"not": _validators.not_,
u"pattern": _validators.pattern,
u"patternProperties": _validators.patternProperties,
u"properties": _validators.properties,
u"propertyNames": _validators.propertyNames,
u"required": _validators.required,
u"type": _validators.type,
u"uniqueItems": _validators.uniqueItems,
},
type_checker=_types.draft7_type_checker,
version="draft7",
)
_LATEST_VERSION = Draft7Validator
class RefResolver(object):
"""
Resolve JSON References.
Arguments:
base_uri (str):
The URI of the referring document
referrer:
The actual referring document
store (dict):
A mapping from URIs to documents to cache
cache_remote (bool):
Whether remote refs should be cached after first resolution
handlers (dict):
A mapping from URI schemes to functions that should be used
to retrieve them
urljoin_cache (:func:`functools.lru_cache`):
A cache that will be used for caching the results of joining
the resolution scope to subscopes.
remote_cache (:func:`functools.lru_cache`):
A cache that will be used for caching the results of
resolved remote URLs.
Attributes:
cache_remote (bool):
Whether remote refs should be cached after first resolution
"""
def __init__(
self,
base_uri,
referrer,
store=(),
cache_remote=True,
handlers=(),
urljoin_cache=None,
remote_cache=None,
):
if urljoin_cache is None:
urljoin_cache = lru_cache(1024)(urljoin)
if remote_cache is None:
remote_cache = lru_cache(1024)(self.resolve_from_url)
self.referrer = referrer
self.cache_remote = cache_remote
self.handlers = dict(handlers)
self._scopes_stack = [base_uri]
self.store = _utils.URIDict(
(id, validator.META_SCHEMA)
for id, validator in iteritems(meta_schemas)
)
self.store.update(store)
self.store[base_uri] = referrer
self._urljoin_cache = urljoin_cache
self._remote_cache = remote_cache
@classmethod
def from_schema(cls, schema, id_of=_id_of, *args, **kwargs):
"""
Construct a resolver from a JSON schema object.
Arguments:
schema:
the referring schema
Returns:
`RefResolver`
"""
return cls(base_uri=id_of(schema), referrer=schema, *args, **kwargs)
def push_scope(self, scope):
"""
Enter a given sub-scope.
Treats further dereferences as being performed underneath the
given scope.
"""
self._scopes_stack.append(
self._urljoin_cache(self.resolution_scope, scope),
)
def pop_scope(self):
"""
Exit the most recent entered scope.
Treats further dereferences as being performed underneath the
original scope.
Don't call this method more times than `push_scope` has been
called.
"""
try:
self._scopes_stack.pop()
except IndexError:
raise exceptions.RefResolutionError(
"Failed to pop the scope from an empty stack. "
"`pop_scope()` should only be called once for every "
"`push_scope()`"
)
@property
def resolution_scope(self):
"""
Retrieve the current resolution scope.
"""
return self._scopes_stack[-1]
@property
def base_uri(self):
"""
Retrieve the current base URI, not including any fragment.
"""
uri, _ = urldefrag(self.resolution_scope)
return uri
@contextlib.contextmanager
def in_scope(self, scope):
"""
Temporarily enter the given scope for the duration of the context.
"""
self.push_scope(scope)
try:
yield
finally:
self.pop_scope()
@contextlib.contextmanager
def resolving(self, ref):
"""
Resolve the given ``ref`` and enter its resolution scope.
Exits the scope on exit of this context manager.
Arguments:
ref (str):
The reference to resolve
"""
url, resolved = self.resolve(ref)
self.push_scope(url)
try:
yield resolved
finally:
self.pop_scope()
def resolve(self, ref):
"""
Resolve the given reference.
"""
url = self._urljoin_cache(self.resolution_scope, ref)
return url, self._remote_cache(url)
def resolve_from_url(self, url):
"""
Resolve the given remote URL.
"""
url, fragment = urldefrag(url)
try:
document = self.store[url]
except KeyError:
try:
document = self.resolve_remote(url)
except Exception as exc:
raise exceptions.RefResolutionError(exc)
return self.resolve_fragment(document, fragment)
def resolve_fragment(self, document, fragment):
"""
Resolve a ``fragment`` within the referenced ``document``.
Arguments:
document:
The referent document
fragment (str):
a URI fragment to resolve within it
"""
fragment = fragment.lstrip(u"/")
parts = unquote(fragment).split(u"/") if fragment else []
for part in parts:
part = part.replace(u"~1", u"/").replace(u"~0", u"~")
if isinstance(document, Sequence):
# Array indexes should be turned into integers
try:
part = int(part)
except ValueError:
pass
try:
document = document[part]
except (TypeError, LookupError):
raise exceptions.RefResolutionError(
"Unresolvable JSON pointer: %r" % fragment
)
return document
def resolve_remote(self, uri):
"""
Resolve a remote ``uri``.
If called directly, does not check the store first, but after
retrieving the document at the specified URI it will be saved in
the store if :attr:`cache_remote` is True.
.. note::
If the requests_ library is present, ``jsonschema`` will use it to
request the remote ``uri``, so that the correct encoding is
detected and used.
If it isn't, or if the scheme of the ``uri`` is not ``http`` or
``https``, UTF-8 is assumed.
Arguments:
uri (str):
The URI to resolve
Returns:
The retrieved document
.. _requests: https://pypi.org/project/requests/
"""
try:
import requests
except ImportError:
requests = None
scheme = urlsplit(uri).scheme
if scheme in self.handlers:
result = self.handlers[scheme](uri)
elif scheme in [u"http", u"https"] and requests:
# Requests has support for detecting the correct encoding of
# json over http
result = requests.get(uri).json()
else:
# Otherwise, pass off to urllib and assume utf-8
with urlopen(uri) as url:
result = json.loads(url.read().decode("utf-8"))
if self.cache_remote:
self.store[uri] = result
return result
def validate(instance, schema, cls=None, *args, **kwargs):
"""
Validate an instance under the given schema.
>>> validate([2, 3, 4], {"maxItems": 2})
Traceback (most recent call last):
...
ValidationError: [2, 3, 4] is too long
:func:`validate` will first verify that the provided schema is
itself valid, since not doing so can lead to less obvious error
messages and fail in less obvious or consistent ways.
If you know you have a valid schema already, especially if you
intend to validate multiple instances with the same schema, you
likely would prefer using the `IValidator.validate` method directly
on a specific validator (e.g. ``Draft7Validator.validate``).
Arguments:
instance:
The instance to validate
schema:
The schema to validate with
cls (IValidator):
The class that will be used to validate the instance.
If the ``cls`` argument is not provided, two things will happen
in accordance with the specification. First, if the schema has a
:validator:`$schema` property containing a known meta-schema [#]_
then the proper validator will be used. The specification recommends
that all schemas contain :validator:`$schema` properties for this
reason. If no :validator:`$schema` property is found, the default
validator class is the latest released draft.
Any other provided positional and keyword arguments will be passed
on when instantiating the ``cls``.
Raises:
`jsonschema.exceptions.ValidationError` if the instance
is invalid
`jsonschema.exceptions.SchemaError` if the schema itself
is invalid
.. rubric:: Footnotes
.. [#] known by a validator registered with
`jsonschema.validators.validates`
"""
if cls is None:
cls = validator_for(schema)
cls.check_schema(schema)
validator = cls(schema, *args, **kwargs)
error = exceptions.best_match(validator.iter_errors(instance))
if error is not None:
raise error
def validator_for(schema, default=_LATEST_VERSION):
"""
Retrieve the validator class appropriate for validating the given schema.
Uses the :validator:`$schema` property that should be present in the
given schema to look up the appropriate validator class.
Arguments:
schema (collections.Mapping or bool):
the schema to look at
default:
the default to return if the appropriate validator class
cannot be determined.
If unprovided, the default is to return the latest supported
draft.
"""
if schema is True or schema is False or u"$schema" not in schema:
return default
if schema[u"$schema"] not in meta_schemas:
warn(
(
"The metaschema specified by $schema was not found. "
"Using the latest draft to validate, but this will raise "
"an error in the future."
),
DeprecationWarning,
stacklevel=2,
)
return meta_schemas.get(schema[u"$schema"], _LATEST_VERSION)
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/jsonschema/_format.py | import datetime
import re
import socket
import struct
from jsonschema.compat import str_types
from jsonschema.exceptions import FormatError
class FormatChecker(object):
"""
A ``format`` property checker.
JSON Schema does not mandate that the ``format`` property actually do any
validation. If validation is desired however, instances of this class can
be hooked into validators to enable format validation.
`FormatChecker` objects always return ``True`` when asked about
formats that they do not know how to validate.
To check a custom format using a function that takes an instance and
returns a ``bool``, use the `FormatChecker.checks` or
`FormatChecker.cls_checks` decorators.
Arguments:
formats (~collections.Iterable):
The known formats to validate. This argument can be used to
limit which formats will be used during validation.
"""
checkers = {}
def __init__(self, formats=None):
if formats is None:
self.checkers = self.checkers.copy()
else:
self.checkers = dict((k, self.checkers[k]) for k in formats)
def __repr__(self):
return "<FormatChecker checkers={}>".format(sorted(self.checkers))
def checks(self, format, raises=()):
"""
Register a decorated function as validating a new format.
Arguments:
format (str):
The format that the decorated function will check.
raises (Exception):
The exception(s) raised by the decorated function when an
invalid instance is found.
The exception object will be accessible as the
`jsonschema.exceptions.ValidationError.cause` attribute of the
resulting validation error.
"""
def _checks(func):
self.checkers[format] = (func, raises)
return func
return _checks
cls_checks = classmethod(checks)
def check(self, instance, format):
"""
Check whether the instance conforms to the given format.
Arguments:
instance (*any primitive type*, i.e. str, number, bool):
The instance to check
format (str):
The format that instance should conform to
Raises:
FormatError: if the instance does not conform to ``format``
"""
if format not in self.checkers:
return
func, raises = self.checkers[format]
result, cause = None, None
try:
result = func(instance)
except raises as e:
cause = e
if not result:
raise FormatError(
"%r is not a %r" % (instance, format), cause=cause,
)
def conforms(self, instance, format):
"""
Check whether the instance conforms to the given format.
Arguments:
instance (*any primitive type*, i.e. str, number, bool):
The instance to check
format (str):
The format that instance should conform to
Returns:
bool: whether it conformed
"""
try:
self.check(instance, format)
except FormatError:
return False
else:
return True
draft3_format_checker = FormatChecker()
draft4_format_checker = FormatChecker()
draft6_format_checker = FormatChecker()
draft7_format_checker = FormatChecker()
_draft_checkers = dict(
draft3=draft3_format_checker,
draft4=draft4_format_checker,
draft6=draft6_format_checker,
draft7=draft7_format_checker,
)
def _checks_drafts(
name=None,
draft3=None,
draft4=None,
draft6=None,
draft7=None,
raises=(),
):
draft3 = draft3 or name
draft4 = draft4 or name
draft6 = draft6 or name
draft7 = draft7 or name
def wrap(func):
if draft3:
func = _draft_checkers["draft3"].checks(draft3, raises)(func)
if draft4:
func = _draft_checkers["draft4"].checks(draft4, raises)(func)
if draft6:
func = _draft_checkers["draft6"].checks(draft6, raises)(func)
if draft7:
func = _draft_checkers["draft7"].checks(draft7, raises)(func)
# Oy. This is bad global state, but relied upon for now, until
# deprecation. See https://github.com/Julian/jsonschema/issues/519
# and test_format_checkers_come_with_defaults
FormatChecker.cls_checks(draft7 or draft6 or draft4 or draft3, raises)(
func,
)
return func
return wrap
@_checks_drafts(name="idn-email")
@_checks_drafts(name="email")
def is_email(instance):
if not isinstance(instance, str_types):
return True
return "@" in instance
_ipv4_re = re.compile(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$")
@_checks_drafts(
draft3="ip-address", draft4="ipv4", draft6="ipv4", draft7="ipv4",
)
def is_ipv4(instance):
if not isinstance(instance, str_types):
return True
if not _ipv4_re.match(instance):
return False
return all(0 <= int(component) <= 255 for component in instance.split("."))
if hasattr(socket, "inet_pton"):
# FIXME: Really this only should raise struct.error, but see the sadness
# that is https://twistedmatrix.com/trac/ticket/9409
@_checks_drafts(
name="ipv6", raises=(socket.error, struct.error, ValueError),
)
def is_ipv6(instance):
if not isinstance(instance, str_types):
return True
return socket.inet_pton(socket.AF_INET6, instance)
_host_name_re = re.compile(r"^[A-Za-z0-9][A-Za-z0-9\.\-]{1,255}$")
@_checks_drafts(
draft3="host-name",
draft4="hostname",
draft6="hostname",
draft7="hostname",
)
def is_host_name(instance):
if not isinstance(instance, str_types):
return True
if not _host_name_re.match(instance):
return False
components = instance.split(".")
for component in components:
if len(component) > 63:
return False
return True
try:
# The built-in `idna` codec only implements RFC 3890, so we go elsewhere.
import idna
except ImportError:
pass
else:
@_checks_drafts(draft7="idn-hostname", raises=idna.IDNAError)
def is_idn_host_name(instance):
if not isinstance(instance, str_types):
return True
idna.encode(instance)
return True
try:
import rfc3987
except ImportError:
try:
from rfc3986_validator import validate_rfc3986
except ImportError:
pass
else:
@_checks_drafts(name="uri")
def is_uri(instance):
if not isinstance(instance, str_types):
return True
return validate_rfc3986(instance, rule="URI")
@_checks_drafts(
draft6="uri-reference",
draft7="uri-reference",
raises=ValueError,
)
def is_uri_reference(instance):
if not isinstance(instance, str_types):
return True
return validate_rfc3986(instance, rule="URI_reference")
else:
@_checks_drafts(draft7="iri", raises=ValueError)
def is_iri(instance):
if not isinstance(instance, str_types):
return True
return rfc3987.parse(instance, rule="IRI")
@_checks_drafts(draft7="iri-reference", raises=ValueError)
def is_iri_reference(instance):
if not isinstance(instance, str_types):
return True
return rfc3987.parse(instance, rule="IRI_reference")
@_checks_drafts(name="uri", raises=ValueError)
def is_uri(instance):
if not isinstance(instance, str_types):
return True
return rfc3987.parse(instance, rule="URI")
@_checks_drafts(
draft6="uri-reference",
draft7="uri-reference",
raises=ValueError,
)
def is_uri_reference(instance):
if not isinstance(instance, str_types):
return True
return rfc3987.parse(instance, rule="URI_reference")
try:
from strict_rfc3339 import validate_rfc3339
except ImportError:
try:
from rfc3339_validator import validate_rfc3339
except ImportError:
validate_rfc3339 = None
if validate_rfc3339:
@_checks_drafts(name="date-time")
def is_datetime(instance):
if not isinstance(instance, str_types):
return True
return validate_rfc3339(instance)
@_checks_drafts(draft7="time")
def is_time(instance):
if not isinstance(instance, str_types):
return True
return is_datetime("1970-01-01T" + instance)
@_checks_drafts(name="regex", raises=re.error)
def is_regex(instance):
if not isinstance(instance, str_types):
return True
return re.compile(instance)
@_checks_drafts(draft3="date", draft7="date", raises=ValueError)
def is_date(instance):
if not isinstance(instance, str_types):
return True
return datetime.datetime.strptime(instance, "%Y-%m-%d")
@_checks_drafts(draft3="time", raises=ValueError)
def is_draft3_time(instance):
if not isinstance(instance, str_types):
return True
return datetime.datetime.strptime(instance, "%H:%M:%S")
try:
import webcolors
except ImportError:
pass
else:
def is_css_color_code(instance):
return webcolors.normalize_hex(instance)
@_checks_drafts(draft3="color", raises=(ValueError, TypeError))
def is_css21_color(instance):
if (
not isinstance(instance, str_types) or
instance.lower() in webcolors.css21_names_to_hex
):
return True
return is_css_color_code(instance)
def is_css3_color(instance):
if instance.lower() in webcolors.css3_names_to_hex:
return True
return is_css_color_code(instance)
try:
import jsonpointer
except ImportError:
pass
else:
@_checks_drafts(
draft6="json-pointer",
draft7="json-pointer",
raises=jsonpointer.JsonPointerException,
)
def is_json_pointer(instance):
if not isinstance(instance, str_types):
return True
return jsonpointer.JsonPointer(instance)
# TODO: I don't want to maintain this, so it
# needs to go either into jsonpointer (pending
# https://github.com/stefankoegl/python-json-pointer/issues/34) or
# into a new external library.
@_checks_drafts(
draft7="relative-json-pointer",
raises=jsonpointer.JsonPointerException,
)
def is_relative_json_pointer(instance):
# Definition taken from:
# https://tools.ietf.org/html/draft-handrews-relative-json-pointer-01#section-3
if not isinstance(instance, str_types):
return True
non_negative_integer, rest = [], ""
for i, character in enumerate(instance):
if character.isdigit():
non_negative_integer.append(character)
continue
if not non_negative_integer:
return False
rest = instance[i:]
break
return (rest == "#") or jsonpointer.JsonPointer(rest)
try:
import uritemplate.exceptions
except ImportError:
pass
else:
@_checks_drafts(
draft6="uri-template",
draft7="uri-template",
raises=uritemplate.exceptions.InvalidTemplate,
)
def is_uri_template(
instance,
template_validator=uritemplate.Validator().force_balanced_braces(),
):
template = uritemplate.URITemplate(instance)
return template_validator.validate(template)
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/jsonschema/exceptions.py | """
Validation errors, and some surrounding helpers.
"""
from collections import defaultdict, deque
import itertools
import pprint
import textwrap
import attr
from jsonschema import _utils
from jsonschema.compat import PY3, iteritems
WEAK_MATCHES = frozenset(["anyOf", "oneOf"])
STRONG_MATCHES = frozenset()
_unset = _utils.Unset()
class _Error(Exception):
def __init__(
self,
message,
validator=_unset,
path=(),
cause=None,
context=(),
validator_value=_unset,
instance=_unset,
schema=_unset,
schema_path=(),
parent=None,
):
super(_Error, self).__init__(
message,
validator,
path,
cause,
context,
validator_value,
instance,
schema,
schema_path,
parent,
)
self.message = message
self.path = self.relative_path = deque(path)
self.schema_path = self.relative_schema_path = deque(schema_path)
self.context = list(context)
self.cause = self.__cause__ = cause
self.validator = validator
self.validator_value = validator_value
self.instance = instance
self.schema = schema
self.parent = parent
for error in context:
error.parent = self
def __repr__(self):
return "<%s: %r>" % (self.__class__.__name__, self.message)
def __unicode__(self):
essential_for_verbose = (
self.validator, self.validator_value, self.instance, self.schema,
)
if any(m is _unset for m in essential_for_verbose):
return self.message
pschema = pprint.pformat(self.schema, width=72)
pinstance = pprint.pformat(self.instance, width=72)
return self.message + textwrap.dedent("""
Failed validating %r in %s%s:
%s
On %s%s:
%s
""".rstrip()
) % (
self.validator,
self._word_for_schema_in_error_message,
_utils.format_as_index(list(self.relative_schema_path)[:-1]),
_utils.indent(pschema),
self._word_for_instance_in_error_message,
_utils.format_as_index(self.relative_path),
_utils.indent(pinstance),
)
if PY3:
__str__ = __unicode__
else:
def __str__(self):
return unicode(self).encode("utf-8")
@classmethod
def create_from(cls, other):
return cls(**other._contents())
@property
def absolute_path(self):
parent = self.parent
if parent is None:
return self.relative_path
path = deque(self.relative_path)
path.extendleft(reversed(parent.absolute_path))
return path
@property
def absolute_schema_path(self):
parent = self.parent
if parent is None:
return self.relative_schema_path
path = deque(self.relative_schema_path)
path.extendleft(reversed(parent.absolute_schema_path))
return path
def _set(self, **kwargs):
for k, v in iteritems(kwargs):
if getattr(self, k) is _unset:
setattr(self, k, v)
def _contents(self):
attrs = (
"message", "cause", "context", "validator", "validator_value",
"path", "schema_path", "instance", "schema", "parent",
)
return dict((attr, getattr(self, attr)) for attr in attrs)
class ValidationError(_Error):
"""
An instance was invalid under a provided schema.
"""
_word_for_schema_in_error_message = "schema"
_word_for_instance_in_error_message = "instance"
class SchemaError(_Error):
"""
A schema was invalid under its corresponding metaschema.
"""
_word_for_schema_in_error_message = "metaschema"
_word_for_instance_in_error_message = "schema"
@attr.s(hash=True)
class RefResolutionError(Exception):
"""
A ref could not be resolved.
"""
_cause = attr.ib()
def __str__(self):
return str(self._cause)
class UndefinedTypeCheck(Exception):
"""
A type checker was asked to check a type it did not have registered.
"""
def __init__(self, type):
self.type = type
def __unicode__(self):
return "Type %r is unknown to this type checker" % self.type
if PY3:
__str__ = __unicode__
else:
def __str__(self):
return unicode(self).encode("utf-8")
class UnknownType(Exception):
"""
A validator was asked to validate an instance against an unknown type.
"""
def __init__(self, type, instance, schema):
self.type = type
self.instance = instance
self.schema = schema
def __unicode__(self):
pschema = pprint.pformat(self.schema, width=72)
pinstance = pprint.pformat(self.instance, width=72)
return textwrap.dedent("""
Unknown type %r for validator with schema:
%s
While checking instance:
%s
""".rstrip()
) % (self.type, _utils.indent(pschema), _utils.indent(pinstance))
if PY3:
__str__ = __unicode__
else:
def __str__(self):
return unicode(self).encode("utf-8")
class FormatError(Exception):
"""
Validating a format failed.
"""
def __init__(self, message, cause=None):
super(FormatError, self).__init__(message, cause)
self.message = message
self.cause = self.__cause__ = cause
def __unicode__(self):
return self.message
if PY3:
__str__ = __unicode__
else:
def __str__(self):
return self.message.encode("utf-8")
class ErrorTree(object):
"""
ErrorTrees make it easier to check which validations failed.
"""
_instance = _unset
def __init__(self, errors=()):
self.errors = {}
self._contents = defaultdict(self.__class__)
for error in errors:
container = self
for element in error.path:
container = container[element]
container.errors[error.validator] = error
container._instance = error.instance
def __contains__(self, index):
"""
Check whether ``instance[index]`` has any errors.
"""
return index in self._contents
def __getitem__(self, index):
"""
Retrieve the child tree one level down at the given ``index``.
If the index is not in the instance that this tree corresponds to and
is not known by this tree, whatever error would be raised by
``instance.__getitem__`` will be propagated (usually this is some
subclass of `exceptions.LookupError`.
"""
if self._instance is not _unset and index not in self:
self._instance[index]
return self._contents[index]
def __setitem__(self, index, value):
"""
Add an error to the tree at the given ``index``.
"""
self._contents[index] = value
def __iter__(self):
"""
Iterate (non-recursively) over the indices in the instance with errors.
"""
return iter(self._contents)
def __len__(self):
"""
Return the `total_errors`.
"""
return self.total_errors
def __repr__(self):
return "<%s (%s total errors)>" % (self.__class__.__name__, len(self))
@property
def total_errors(self):
"""
The total number of errors in the entire tree, including children.
"""
child_errors = sum(len(tree) for _, tree in iteritems(self._contents))
return len(self.errors) + child_errors
def by_relevance(weak=WEAK_MATCHES, strong=STRONG_MATCHES):
"""
Create a key function that can be used to sort errors by relevance.
Arguments:
weak (set):
a collection of validator names to consider to be "weak".
If there are two errors at the same level of the instance
and one is in the set of weak validator names, the other
error will take priority. By default, :validator:`anyOf` and
:validator:`oneOf` are considered weak validators and will
be superseded by other same-level validation errors.
strong (set):
a collection of validator names to consider to be "strong"
"""
def relevance(error):
validator = error.validator
return -len(error.path), validator not in weak, validator in strong
return relevance
relevance = by_relevance()
def best_match(errors, key=relevance):
"""
Try to find an error that appears to be the best match among given errors.
In general, errors that are higher up in the instance (i.e. for which
`ValidationError.path` is shorter) are considered better matches,
since they indicate "more" is wrong with the instance.
If the resulting match is either :validator:`oneOf` or :validator:`anyOf`,
the *opposite* assumption is made -- i.e. the deepest error is picked,
since these validators only need to match once, and any other errors may
not be relevant.
Arguments:
errors (collections.Iterable):
the errors to select from. Do not provide a mixture of
errors from different validation attempts (i.e. from
different instances or schemas), since it won't produce
sensical output.
key (collections.Callable):
the key to use when sorting errors. See `relevance` and
transitively `by_relevance` for more details (the default is
to sort with the defaults of that function). Changing the
default is only useful if you want to change the function
that rates errors but still want the error context descent
done by this function.
Returns:
the best matching error, or ``None`` if the iterable was empty
.. note::
This function is a heuristic. Its return value may change for a given
set of inputs from version to version if better heuristics are added.
"""
errors = iter(errors)
best = next(errors, None)
if best is None:
return
best = max(itertools.chain([best], errors), key=key)
while best.context:
best = min(best.context, key=key)
return best
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/jsonschema/_types.py | import numbers
from pyrsistent import pmap
import attr
from jsonschema.compat import int_types, str_types
from jsonschema.exceptions import UndefinedTypeCheck
def is_array(checker, instance):
return isinstance(instance, list)
def is_bool(checker, instance):
return isinstance(instance, bool)
def is_integer(checker, instance):
# bool inherits from int, so ensure bools aren't reported as ints
if isinstance(instance, bool):
return False
return isinstance(instance, int_types)
def is_null(checker, instance):
return instance is None
def is_number(checker, instance):
# bool inherits from int, so ensure bools aren't reported as ints
if isinstance(instance, bool):
return False
return isinstance(instance, numbers.Number)
def is_object(checker, instance):
return isinstance(instance, dict)
def is_string(checker, instance):
return isinstance(instance, str_types)
def is_any(checker, instance):
return True
@attr.s(frozen=True)
class TypeChecker(object):
"""
A ``type`` property checker.
A `TypeChecker` performs type checking for an `IValidator`. Type
checks to perform are updated using `TypeChecker.redefine` or
`TypeChecker.redefine_many` and removed via `TypeChecker.remove`.
Each of these return a new `TypeChecker` object.
Arguments:
type_checkers (dict):
The initial mapping of types to their checking functions.
"""
_type_checkers = attr.ib(default=pmap(), converter=pmap)
def is_type(self, instance, type):
"""
Check if the instance is of the appropriate type.
Arguments:
instance (object):
The instance to check
type (str):
The name of the type that is expected.
Returns:
bool: Whether it conformed.
Raises:
`jsonschema.exceptions.UndefinedTypeCheck`:
if type is unknown to this object.
"""
try:
fn = self._type_checkers[type]
except KeyError:
raise UndefinedTypeCheck(type)
return fn(self, instance)
def redefine(self, type, fn):
"""
Produce a new checker with the given type redefined.
Arguments:
type (str):
The name of the type to check.
fn (collections.Callable):
A function taking exactly two parameters - the type
checker calling the function and the instance to check.
The function should return true if instance is of this
type and false otherwise.
Returns:
A new `TypeChecker` instance.
"""
return self.redefine_many({type: fn})
def redefine_many(self, definitions=()):
"""
Produce a new checker with the given types redefined.
Arguments:
definitions (dict):
A dictionary mapping types to their checking functions.
Returns:
A new `TypeChecker` instance.
"""
return attr.evolve(
self, type_checkers=self._type_checkers.update(definitions),
)
def remove(self, *types):
"""
Produce a new checker with the given types forgotten.
Arguments:
types (~collections.Iterable):
the names of the types to remove.
Returns:
A new `TypeChecker` instance
Raises:
`jsonschema.exceptions.UndefinedTypeCheck`:
if any given type is unknown to this object
"""
checkers = self._type_checkers
for each in types:
try:
checkers = checkers.remove(each)
except KeyError:
raise UndefinedTypeCheck(each)
return attr.evolve(self, type_checkers=checkers)
draft3_type_checker = TypeChecker(
{
u"any": is_any,
u"array": is_array,
u"boolean": is_bool,
u"integer": is_integer,
u"object": is_object,
u"null": is_null,
u"number": is_number,
u"string": is_string,
},
)
draft4_type_checker = draft3_type_checker.remove(u"any")
draft6_type_checker = draft4_type_checker.redefine(
u"integer",
lambda checker, instance: (
is_integer(checker, instance) or
isinstance(instance, float) and instance.is_integer()
),
)
draft7_type_checker = draft6_type_checker
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/jsonschema/_reflect.py | # -*- test-case-name: twisted.test.test_reflect -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Standardized versions of various cool and/or strange things that you can do
with Python's reflection capabilities.
"""
import sys
from jsonschema.compat import PY3
class _NoModuleFound(Exception):
"""
No module was found because none exists.
"""
class InvalidName(ValueError):
"""
The given name is not a dot-separated list of Python objects.
"""
class ModuleNotFound(InvalidName):
"""
The module associated with the given name doesn't exist and it can't be
imported.
"""
class ObjectNotFound(InvalidName):
"""
The object associated with the given name doesn't exist and it can't be
imported.
"""
if PY3:
def reraise(exception, traceback):
raise exception.with_traceback(traceback)
else:
exec("""def reraise(exception, traceback):
raise exception.__class__, exception, traceback""")
reraise.__doc__ = """
Re-raise an exception, with an optional traceback, in a way that is compatible
with both Python 2 and Python 3.
Note that on Python 3, re-raised exceptions will be mutated, with their
C{__traceback__} attribute being set.
@param exception: The exception instance.
@param traceback: The traceback to use, or C{None} indicating a new traceback.
"""
def _importAndCheckStack(importName):
"""
Import the given name as a module, then walk the stack to determine whether
the failure was the module not existing, or some code in the module (for
example a dependent import) failing. This can be helpful to determine
whether any actual application code was run. For example, to distiguish
administrative error (entering the wrong module name), from programmer
error (writing buggy code in a module that fails to import).
@param importName: The name of the module to import.
@type importName: C{str}
@raise Exception: if something bad happens. This can be any type of
exception, since nobody knows what loading some arbitrary code might
do.
@raise _NoModuleFound: if no module was found.
"""
try:
return __import__(importName)
except ImportError:
excType, excValue, excTraceback = sys.exc_info()
while excTraceback:
execName = excTraceback.tb_frame.f_globals["__name__"]
# in Python 2 execName is None when an ImportError is encountered,
# where in Python 3 execName is equal to the importName.
if execName is None or execName == importName:
reraise(excValue, excTraceback)
excTraceback = excTraceback.tb_next
raise _NoModuleFound()
def namedAny(name):
"""
Retrieve a Python object by its fully qualified name from the global Python
module namespace. The first part of the name, that describes a module,
will be discovered and imported. Each subsequent part of the name is
treated as the name of an attribute of the object specified by all of the
name which came before it. For example, the fully-qualified name of this
object is 'twisted.python.reflect.namedAny'.
@type name: L{str}
@param name: The name of the object to return.
@raise InvalidName: If the name is an empty string, starts or ends with
a '.', or is otherwise syntactically incorrect.
@raise ModuleNotFound: If the name is syntactically correct but the
module it specifies cannot be imported because it does not appear to
exist.
@raise ObjectNotFound: If the name is syntactically correct, includes at
least one '.', but the module it specifies cannot be imported because
it does not appear to exist.
@raise AttributeError: If an attribute of an object along the way cannot be
accessed, or a module along the way is not found.
@return: the Python object identified by 'name'.
"""
if not name:
raise InvalidName('Empty module name')
names = name.split('.')
# if the name starts or ends with a '.' or contains '..', the __import__
# will raise an 'Empty module name' error. This will provide a better error
# message.
if '' in names:
raise InvalidName(
"name must be a string giving a '.'-separated list of Python "
"identifiers, not %r" % (name,))
topLevelPackage = None
moduleNames = names[:]
while not topLevelPackage:
if moduleNames:
trialname = '.'.join(moduleNames)
try:
topLevelPackage = _importAndCheckStack(trialname)
except _NoModuleFound:
moduleNames.pop()
else:
if len(names) == 1:
raise ModuleNotFound("No module named %r" % (name,))
else:
raise ObjectNotFound('%r does not name an object' % (name,))
obj = topLevelPackage
for n in names[1:]:
obj = getattr(obj, n)
return obj
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/jsonschema/_validators.py | import re
from jsonschema._utils import (
ensure_list,
equal,
extras_msg,
find_additional_properties,
types_msg,
unbool,
uniq,
)
from jsonschema.exceptions import FormatError, ValidationError
from jsonschema.compat import iteritems
def patternProperties(validator, patternProperties, instance, schema):
if not validator.is_type(instance, "object"):
return
for pattern, subschema in iteritems(patternProperties):
for k, v in iteritems(instance):
if re.search(pattern, k):
for error in validator.descend(
v, subschema, path=k, schema_path=pattern,
):
yield error
def propertyNames(validator, propertyNames, instance, schema):
if not validator.is_type(instance, "object"):
return
for property in instance:
for error in validator.descend(
instance=property,
schema=propertyNames,
):
yield error
def additionalProperties(validator, aP, instance, schema):
if not validator.is_type(instance, "object"):
return
extras = set(find_additional_properties(instance, schema))
if validator.is_type(aP, "object"):
for extra in extras:
for error in validator.descend(instance[extra], aP, path=extra):
yield error
elif not aP and extras:
if "patternProperties" in schema:
patterns = sorted(schema["patternProperties"])
if len(extras) == 1:
verb = "does"
else:
verb = "do"
error = "%s %s not match any of the regexes: %s" % (
", ".join(map(repr, sorted(extras))),
verb,
", ".join(map(repr, patterns)),
)
yield ValidationError(error)
else:
error = "Additional properties are not allowed (%s %s unexpected)"
yield ValidationError(error % extras_msg(extras))
def items(validator, items, instance, schema):
if not validator.is_type(instance, "array"):
return
if validator.is_type(items, "array"):
for (index, item), subschema in zip(enumerate(instance), items):
for error in validator.descend(
item, subschema, path=index, schema_path=index,
):
yield error
else:
for index, item in enumerate(instance):
for error in validator.descend(item, items, path=index):
yield error
def additionalItems(validator, aI, instance, schema):
if (
not validator.is_type(instance, "array") or
validator.is_type(schema.get("items", {}), "object")
):
return
len_items = len(schema.get("items", []))
if validator.is_type(aI, "object"):
for index, item in enumerate(instance[len_items:], start=len_items):
for error in validator.descend(item, aI, path=index):
yield error
elif not aI and len(instance) > len(schema.get("items", [])):
error = "Additional items are not allowed (%s %s unexpected)"
yield ValidationError(
error %
extras_msg(instance[len(schema.get("items", [])):])
)
def const(validator, const, instance, schema):
if not equal(instance, const):
yield ValidationError("%r was expected" % (const,))
def contains(validator, contains, instance, schema):
if not validator.is_type(instance, "array"):
return
if not any(validator.is_valid(element, contains) for element in instance):
yield ValidationError(
"None of %r are valid under the given schema" % (instance,)
)
def exclusiveMinimum(validator, minimum, instance, schema):
if not validator.is_type(instance, "number"):
return
if instance <= minimum:
yield ValidationError(
"%r is less than or equal to the minimum of %r" % (
instance, minimum,
),
)
def exclusiveMaximum(validator, maximum, instance, schema):
if not validator.is_type(instance, "number"):
return
if instance >= maximum:
yield ValidationError(
"%r is greater than or equal to the maximum of %r" % (
instance, maximum,
),
)
def minimum(validator, minimum, instance, schema):
if not validator.is_type(instance, "number"):
return
if instance < minimum:
yield ValidationError(
"%r is less than the minimum of %r" % (instance, minimum)
)
def maximum(validator, maximum, instance, schema):
if not validator.is_type(instance, "number"):
return
if instance > maximum:
yield ValidationError(
"%r is greater than the maximum of %r" % (instance, maximum)
)
def multipleOf(validator, dB, instance, schema):
if not validator.is_type(instance, "number"):
return
if isinstance(dB, float):
quotient = instance / dB
failed = int(quotient) != quotient
else:
failed = instance % dB
if failed:
yield ValidationError("%r is not a multiple of %r" % (instance, dB))
def minItems(validator, mI, instance, schema):
if validator.is_type(instance, "array") and len(instance) < mI:
yield ValidationError("%r is too short" % (instance,))
def maxItems(validator, mI, instance, schema):
if validator.is_type(instance, "array") and len(instance) > mI:
yield ValidationError("%r is too long" % (instance,))
def uniqueItems(validator, uI, instance, schema):
if (
uI and
validator.is_type(instance, "array") and
not uniq(instance)
):
yield ValidationError("%r has non-unique elements" % (instance,))
def pattern(validator, patrn, instance, schema):
if (
validator.is_type(instance, "string") and
not re.search(patrn, instance)
):
yield ValidationError("%r does not match %r" % (instance, patrn))
def format(validator, format, instance, schema):
if validator.format_checker is not None:
try:
validator.format_checker.check(instance, format)
except FormatError as error:
yield ValidationError(error.message, cause=error.cause)
def minLength(validator, mL, instance, schema):
if validator.is_type(instance, "string") and len(instance) < mL:
yield ValidationError("%r is too short" % (instance,))
def maxLength(validator, mL, instance, schema):
if validator.is_type(instance, "string") and len(instance) > mL:
yield ValidationError("%r is too long" % (instance,))
def dependencies(validator, dependencies, instance, schema):
if not validator.is_type(instance, "object"):
return
for property, dependency in iteritems(dependencies):
if property not in instance:
continue
if validator.is_type(dependency, "array"):
for each in dependency:
if each not in instance:
message = "%r is a dependency of %r"
yield ValidationError(message % (each, property))
else:
for error in validator.descend(
instance, dependency, schema_path=property,
):
yield error
def enum(validator, enums, instance, schema):
if instance == 0 or instance == 1:
unbooled = unbool(instance)
if all(unbooled != unbool(each) for each in enums):
yield ValidationError("%r is not one of %r" % (instance, enums))
elif instance not in enums:
yield ValidationError("%r is not one of %r" % (instance, enums))
def ref(validator, ref, instance, schema):
resolve = getattr(validator.resolver, "resolve", None)
if resolve is None:
with validator.resolver.resolving(ref) as resolved:
for error in validator.descend(instance, resolved):
yield error
else:
scope, resolved = validator.resolver.resolve(ref)
validator.resolver.push_scope(scope)
try:
for error in validator.descend(instance, resolved):
yield error
finally:
validator.resolver.pop_scope()
def type(validator, types, instance, schema):
types = ensure_list(types)
if not any(validator.is_type(instance, type) for type in types):
yield ValidationError(types_msg(instance, types))
def properties(validator, properties, instance, schema):
if not validator.is_type(instance, "object"):
return
for property, subschema in iteritems(properties):
if property in instance:
for error in validator.descend(
instance[property],
subschema,
path=property,
schema_path=property,
):
yield error
def required(validator, required, instance, schema):
if not validator.is_type(instance, "object"):
return
for property in required:
if property not in instance:
yield ValidationError("%r is a required property" % property)
def minProperties(validator, mP, instance, schema):
if validator.is_type(instance, "object") and len(instance) < mP:
yield ValidationError(
"%r does not have enough properties" % (instance,)
)
def maxProperties(validator, mP, instance, schema):
if not validator.is_type(instance, "object"):
return
if validator.is_type(instance, "object") and len(instance) > mP:
yield ValidationError("%r has too many properties" % (instance,))
def allOf(validator, allOf, instance, schema):
for index, subschema in enumerate(allOf):
for error in validator.descend(instance, subschema, schema_path=index):
yield error
def anyOf(validator, anyOf, instance, schema):
all_errors = []
for index, subschema in enumerate(anyOf):
errs = list(validator.descend(instance, subschema, schema_path=index))
if not errs:
break
all_errors.extend(errs)
else:
yield ValidationError(
"%r is not valid under any of the given schemas" % (instance,),
context=all_errors,
)
def oneOf(validator, oneOf, instance, schema):
subschemas = enumerate(oneOf)
all_errors = []
for index, subschema in subschemas:
errs = list(validator.descend(instance, subschema, schema_path=index))
if not errs:
first_valid = subschema
break
all_errors.extend(errs)
else:
yield ValidationError(
"%r is not valid under any of the given schemas" % (instance,),
context=all_errors,
)
more_valid = [s for i, s in subschemas if validator.is_valid(instance, s)]
if more_valid:
more_valid.append(first_valid)
reprs = ", ".join(repr(schema) for schema in more_valid)
yield ValidationError(
"%r is valid under each of %s" % (instance, reprs)
)
def not_(validator, not_schema, instance, schema):
if validator.is_valid(instance, not_schema):
yield ValidationError(
"%r is not allowed for %r" % (not_schema, instance)
)
def if_(validator, if_schema, instance, schema):
if validator.is_valid(instance, if_schema):
if u"then" in schema:
then = schema[u"then"]
for error in validator.descend(instance, then, schema_path="then"):
yield error
elif u"else" in schema:
else_ = schema[u"else"]
for error in validator.descend(instance, else_, schema_path="else"):
yield error
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/jsonschema/__init__.py | """
An implementation of JSON Schema for Python
The main functionality is provided by the validator classes for each of the
supported JSON Schema versions.
Most commonly, `validate` is the quickest way to simply validate a given
instance under a schema, and will create a validator for you.
"""
from jsonschema.exceptions import (
ErrorTree, FormatError, RefResolutionError, SchemaError, ValidationError
)
from jsonschema._format import (
FormatChecker,
draft3_format_checker,
draft4_format_checker,
draft6_format_checker,
draft7_format_checker,
)
from jsonschema._types import TypeChecker
from jsonschema.validators import (
Draft3Validator,
Draft4Validator,
Draft6Validator,
Draft7Validator,
RefResolver,
validate,
)
try:
from importlib import metadata
except ImportError: # for Python<3.8
import importlib_metadata as metadata
__version__ = metadata.version("jsonschema")
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/jsonschema/_legacy_validators.py | from jsonschema import _utils
from jsonschema.compat import iteritems
from jsonschema.exceptions import ValidationError
def dependencies_draft3(validator, dependencies, instance, schema):
if not validator.is_type(instance, "object"):
return
for property, dependency in iteritems(dependencies):
if property not in instance:
continue
if validator.is_type(dependency, "object"):
for error in validator.descend(
instance, dependency, schema_path=property,
):
yield error
elif validator.is_type(dependency, "string"):
if dependency not in instance:
yield ValidationError(
"%r is a dependency of %r" % (dependency, property)
)
else:
for each in dependency:
if each not in instance:
message = "%r is a dependency of %r"
yield ValidationError(message % (each, property))
def disallow_draft3(validator, disallow, instance, schema):
for disallowed in _utils.ensure_list(disallow):
if validator.is_valid(instance, {"type": [disallowed]}):
yield ValidationError(
"%r is disallowed for %r" % (disallowed, instance)
)
def extends_draft3(validator, extends, instance, schema):
if validator.is_type(extends, "object"):
for error in validator.descend(instance, extends):
yield error
return
for index, subschema in enumerate(extends):
for error in validator.descend(instance, subschema, schema_path=index):
yield error
def items_draft3_draft4(validator, items, instance, schema):
if not validator.is_type(instance, "array"):
return
if validator.is_type(items, "object"):
for index, item in enumerate(instance):
for error in validator.descend(item, items, path=index):
yield error
else:
for (index, item), subschema in zip(enumerate(instance), items):
for error in validator.descend(
item, subschema, path=index, schema_path=index,
):
yield error
def minimum_draft3_draft4(validator, minimum, instance, schema):
if not validator.is_type(instance, "number"):
return
if schema.get("exclusiveMinimum", False):
failed = instance <= minimum
cmp = "less than or equal to"
else:
failed = instance < minimum
cmp = "less than"
if failed:
yield ValidationError(
"%r is %s the minimum of %r" % (instance, cmp, minimum)
)
def maximum_draft3_draft4(validator, maximum, instance, schema):
if not validator.is_type(instance, "number"):
return
if schema.get("exclusiveMaximum", False):
failed = instance >= maximum
cmp = "greater than or equal to"
else:
failed = instance > maximum
cmp = "greater than"
if failed:
yield ValidationError(
"%r is %s the maximum of %r" % (instance, cmp, maximum)
)
def properties_draft3(validator, properties, instance, schema):
if not validator.is_type(instance, "object"):
return
for property, subschema in iteritems(properties):
if property in instance:
for error in validator.descend(
instance[property],
subschema,
path=property,
schema_path=property,
):
yield error
elif subschema.get("required", False):
error = ValidationError("%r is a required property" % property)
error._set(
validator="required",
validator_value=subschema["required"],
instance=instance,
schema=schema,
)
error.path.appendleft(property)
error.schema_path.extend([property, "required"])
yield error
def type_draft3(validator, types, instance, schema):
types = _utils.ensure_list(types)
all_errors = []
for index, type in enumerate(types):
if validator.is_type(type, "object"):
errors = list(validator.descend(instance, type, schema_path=index))
if not errors:
return
all_errors.extend(errors)
else:
if validator.is_type(instance, type):
return
else:
yield ValidationError(
_utils.types_msg(instance, types), context=all_errors,
)
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/jsonschema/_utils.py | import itertools
import json
import pkgutil
import re
from jsonschema.compat import MutableMapping, str_types, urlsplit
class URIDict(MutableMapping):
"""
Dictionary which uses normalized URIs as keys.
"""
def normalize(self, uri):
return urlsplit(uri).geturl()
def __init__(self, *args, **kwargs):
self.store = dict()
self.store.update(*args, **kwargs)
def __getitem__(self, uri):
return self.store[self.normalize(uri)]
def __setitem__(self, uri, value):
self.store[self.normalize(uri)] = value
def __delitem__(self, uri):
del self.store[self.normalize(uri)]
def __iter__(self):
return iter(self.store)
def __len__(self):
return len(self.store)
def __repr__(self):
return repr(self.store)
class Unset(object):
"""
An as-of-yet unset attribute or unprovided default parameter.
"""
def __repr__(self):
return "<unset>"
def load_schema(name):
"""
Load a schema from ./schemas/``name``.json and return it.
"""
data = pkgutil.get_data("jsonschema", "schemas/{0}.json".format(name))
return json.loads(data.decode("utf-8"))
def indent(string, times=1):
"""
A dumb version of `textwrap.indent` from Python 3.3.
"""
return "\n".join(" " * (4 * times) + line for line in string.splitlines())
def format_as_index(indices):
"""
Construct a single string containing indexing operations for the indices.
For example, [1, 2, "foo"] -> [1][2]["foo"]
Arguments:
indices (sequence):
The indices to format.
"""
if not indices:
return ""
return "[%s]" % "][".join(repr(index) for index in indices)
def find_additional_properties(instance, schema):
"""
Return the set of additional properties for the given ``instance``.
Weeds out properties that should have been validated by ``properties`` and
/ or ``patternProperties``.
Assumes ``instance`` is dict-like already.
"""
properties = schema.get("properties", {})
patterns = "|".join(schema.get("patternProperties", {}))
for property in instance:
if property not in properties:
if patterns and re.search(patterns, property):
continue
yield property
def extras_msg(extras):
"""
Create an error message for extra items or properties.
"""
if len(extras) == 1:
verb = "was"
else:
verb = "were"
return ", ".join(repr(extra) for extra in extras), verb
def types_msg(instance, types):
"""
Create an error message for a failure to match the given types.
If the ``instance`` is an object and contains a ``name`` property, it will
be considered to be a description of that object and used as its type.
Otherwise the message is simply the reprs of the given ``types``.
"""
reprs = []
for type in types:
try:
reprs.append(repr(type["name"]))
except Exception:
reprs.append(repr(type))
return "%r is not of type %s" % (instance, ", ".join(reprs))
def flatten(suitable_for_isinstance):
"""
isinstance() can accept a bunch of really annoying different types:
* a single type
* a tuple of types
* an arbitrary nested tree of tuples
Return a flattened tuple of the given argument.
"""
types = set()
if not isinstance(suitable_for_isinstance, tuple):
suitable_for_isinstance = (suitable_for_isinstance,)
for thing in suitable_for_isinstance:
if isinstance(thing, tuple):
types.update(flatten(thing))
else:
types.add(thing)
return tuple(types)
def ensure_list(thing):
"""
Wrap ``thing`` in a list if it's a single str.
Otherwise, return it unchanged.
"""
if isinstance(thing, str_types):
return [thing]
return thing
def equal(one, two):
"""
Check if two things are equal, but evade booleans and ints being equal.
"""
return unbool(one) == unbool(two)
def unbool(element, true=object(), false=object()):
"""
A hack to make True and 1 and False and 0 unique for ``uniq``.
"""
if element is True:
return true
elif element is False:
return false
return element
def uniq(container):
"""
Check if all of a container's elements are unique.
Successively tries first to rely that the elements are hashable, then
falls back on them being sortable, and finally falls back on brute
force.
"""
try:
return len(set(unbool(i) for i in container)) == len(container)
except TypeError:
try:
sort = sorted(unbool(i) for i in container)
sliced = itertools.islice(sort, 1, None)
for i, j in zip(sort, sliced):
if i == j:
return False
except (NotImplementedError, TypeError):
seen = []
for e in container:
e = unbool(e)
if e in seen:
return False
seen.append(e)
return True
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/jsonschema/compat.py | """
Python 2/3 compatibility helpers.
Note: This module is *not* public API.
"""
import contextlib
import operator
import sys
try:
from collections.abc import MutableMapping, Sequence # noqa
except ImportError:
from collections import MutableMapping, Sequence # noqa
PY3 = sys.version_info[0] >= 3
if PY3:
zip = zip
from functools import lru_cache
from io import StringIO as NativeIO
from urllib.parse import (
unquote, urljoin, urlunsplit, SplitResult, urlsplit
)
from urllib.request import pathname2url, urlopen
str_types = str,
int_types = int,
iteritems = operator.methodcaller("items")
else:
from itertools import izip as zip # noqa
from io import BytesIO as NativeIO
from urlparse import urljoin, urlunsplit, SplitResult, urlsplit
from urllib import pathname2url, unquote # noqa
import urllib2 # noqa
def urlopen(*args, **kwargs):
return contextlib.closing(urllib2.urlopen(*args, **kwargs))
str_types = basestring
int_types = int, long
iteritems = operator.methodcaller("iteritems")
from functools32 import lru_cache
def urldefrag(url):
if "#" in url:
s, n, p, q, frag = urlsplit(url)
defrag = urlunsplit((s, n, p, q, ""))
else:
defrag = url
frag = ""
return defrag, frag
# flake8: noqa
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/jsonschema/__main__.py | from jsonschema.cli import main
main()
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/jsonschema/cli.py | """
The ``jsonschema`` command line.
"""
from __future__ import absolute_import
import argparse
import json
import sys
from jsonschema import __version__
from jsonschema._reflect import namedAny
from jsonschema.validators import validator_for
def _namedAnyWithDefault(name):
if "." not in name:
name = "jsonschema." + name
return namedAny(name)
def _json_file(path):
with open(path) as file:
return json.load(file)
parser = argparse.ArgumentParser(
description="JSON Schema Validation CLI",
)
parser.add_argument(
"-i", "--instance",
action="append",
dest="instances",
type=_json_file,
help=(
"a path to a JSON instance (i.e. filename.json) "
"to validate (may be specified multiple times)"
),
)
parser.add_argument(
"-F", "--error-format",
default="{error.instance}: {error.message}\n",
help=(
"the format to use for each error output message, specified in "
"a form suitable for passing to str.format, which will be called "
"with 'error' for each error"
),
)
parser.add_argument(
"-V", "--validator",
type=_namedAnyWithDefault,
help=(
"the fully qualified object name of a validator to use, or, for "
"validators that are registered with jsonschema, simply the name "
"of the class."
),
)
parser.add_argument(
"--version",
action="version",
version=__version__,
)
parser.add_argument(
"schema",
help="the JSON Schema to validate with (i.e. schema.json)",
type=_json_file,
)
def parse_args(args):
arguments = vars(parser.parse_args(args=args or ["--help"]))
if arguments["validator"] is None:
arguments["validator"] = validator_for(arguments["schema"])
return arguments
def main(args=sys.argv[1:]):
sys.exit(run(arguments=parse_args(args=args)))
def run(arguments, stdout=sys.stdout, stderr=sys.stderr):
error_format = arguments["error_format"]
validator = arguments["validator"](schema=arguments["schema"])
validator.check_schema(arguments["schema"])
errored = False
for instance in arguments["instances"] or ():
for error in validator.iter_errors(instance):
stderr.write(error_format.format(error=error))
errored = True
return errored
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/jsonschema/tests/test_jsonschema_test_suite.py | """
Test runner for the JSON Schema official test suite
Tests comprehensive correctness of each draft's validator.
See https://github.com/json-schema-org/JSON-Schema-Test-Suite for details.
"""
import sys
import warnings
from jsonschema import (
Draft3Validator,
Draft4Validator,
Draft6Validator,
Draft7Validator,
draft3_format_checker,
draft4_format_checker,
draft6_format_checker,
draft7_format_checker,
)
from jsonschema.tests._helpers import bug
from jsonschema.tests._suite import Suite
from jsonschema.validators import _DEPRECATED_DEFAULT_TYPES, create
SUITE = Suite()
DRAFT3 = SUITE.version(name="draft3")
DRAFT4 = SUITE.version(name="draft4")
DRAFT6 = SUITE.version(name="draft6")
DRAFT7 = SUITE.version(name="draft7")
def skip(message, **kwargs):
def skipper(test):
if all(value == getattr(test, attr) for attr, value in kwargs.items()):
return message
return skipper
def missing_format(checker):
def missing_format(test):
schema = test.schema
if schema is True or schema is False or "format" not in schema:
return
if schema["format"] not in checker.checkers:
return "Format checker {0!r} not found.".format(schema["format"])
return missing_format
is_narrow_build = sys.maxunicode == 2 ** 16 - 1
if is_narrow_build: # pragma: no cover
message = "Not running surrogate Unicode case, this Python is narrow."
def narrow_unicode_build(test): # pragma: no cover
return skip(
message=message,
description="one supplementary Unicode code point is not long enough",
)(test) or skip(
message=message,
description="two supplementary Unicode code points is long enough",
)(test)
else:
def narrow_unicode_build(test): # pragma: no cover
return
TestDraft3 = DRAFT3.to_unittest_testcase(
DRAFT3.tests(),
DRAFT3.optional_tests_of(name="bignum"),
DRAFT3.optional_tests_of(name="format"),
DRAFT3.optional_tests_of(name="zeroTerminatedFloats"),
Validator=Draft3Validator,
format_checker=draft3_format_checker,
skip=lambda test: (
narrow_unicode_build(test)
or missing_format(draft3_format_checker)(test)
or skip(
message="Upstream bug in strict_rfc3339",
subject="format",
description="case-insensitive T and Z",
)(test)
),
)
TestDraft4 = DRAFT4.to_unittest_testcase(
DRAFT4.tests(),
DRAFT4.optional_tests_of(name="bignum"),
DRAFT4.optional_tests_of(name="format"),
DRAFT4.optional_tests_of(name="zeroTerminatedFloats"),
Validator=Draft4Validator,
format_checker=draft4_format_checker,
skip=lambda test: (
narrow_unicode_build(test)
or missing_format(draft4_format_checker)(test)
or skip(
message=bug(),
subject="ref",
case_description="Recursive references between schemas",
)(test)
or skip(
message=bug(371),
subject="ref",
case_description="Location-independent identifier",
)(test)
or skip(
message=bug(371),
subject="ref",
case_description=(
"Location-independent identifier with absolute URI"
),
)(test)
or skip(
message=bug(371),
subject="ref",
case_description=(
"Location-independent identifier with base URI change in subschema"
),
)(test)
or skip(
message=bug(),
subject="refRemote",
case_description="base URI change - change folder in subschema",
)(test)
or skip(
message="Upstream bug in strict_rfc3339",
subject="format",
description="case-insensitive T and Z",
)(test)
),
)
TestDraft6 = DRAFT6.to_unittest_testcase(
DRAFT6.tests(),
DRAFT6.optional_tests_of(name="bignum"),
DRAFT6.optional_tests_of(name="format"),
DRAFT6.optional_tests_of(name="zeroTerminatedFloats"),
Validator=Draft6Validator,
format_checker=draft6_format_checker,
skip=lambda test: (
narrow_unicode_build(test)
or missing_format(draft6_format_checker)(test)
or skip(
message=bug(),
subject="ref",
case_description="Recursive references between schemas",
)(test)
or skip(
message=bug(371),
subject="ref",
case_description="Location-independent identifier",
)(test)
or skip(
message=bug(371),
subject="ref",
case_description=(
"Location-independent identifier with absolute URI"
),
)(test)
or skip(
message=bug(371),
subject="ref",
case_description=(
"Location-independent identifier with base URI change in subschema"
),
)(test)
or skip(
message=bug(),
subject="refRemote",
case_description="base URI change - change folder in subschema",
)(test)
or skip(
message="Upstream bug in strict_rfc3339",
subject="format",
description="case-insensitive T and Z",
)(test)
),
)
TestDraft7 = DRAFT7.to_unittest_testcase(
DRAFT7.tests(),
DRAFT7.format_tests(),
DRAFT7.optional_tests_of(name="bignum"),
DRAFT7.optional_tests_of(name="content"),
DRAFT7.optional_tests_of(name="zeroTerminatedFloats"),
Validator=Draft7Validator,
format_checker=draft7_format_checker,
skip=lambda test: (
narrow_unicode_build(test)
or missing_format(draft7_format_checker)(test)
or skip(
message=bug(),
subject="ref",
case_description="Recursive references between schemas",
)(test)
or skip(
message=bug(371),
subject="ref",
case_description="Location-independent identifier",
)(test)
or skip(
message=bug(371),
subject="ref",
case_description=(
"Location-independent identifier with absolute URI"
),
)(test)
or skip(
message=bug(371),
subject="ref",
case_description=(
"Location-independent identifier with base URI change in subschema"
),
)(test)
or skip(
message=bug(),
subject="refRemote",
case_description="base URI change - change folder in subschema",
)(test)
or skip(
message="Upstream bug in strict_rfc3339",
subject="date-time",
description="case-insensitive T and Z",
)(test)
or skip(
message=bug(593),
subject="content",
case_description=(
"validation of string-encoded content based on media type"
),
)(test)
or skip(
message=bug(593),
subject="content",
case_description="validation of binary string-encoding",
)(test)
or skip(
message=bug(593),
subject="content",
case_description=(
"validation of binary-encoded media type documents"
),
)(test)
),
)
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
TestDraft3LegacyTypeCheck = DRAFT3.to_unittest_testcase(
# Interestingly the any part couldn't really be done w/the old API.
(
(test for test in each if test.schema != {"type": "any"})
for each in DRAFT3.tests_of(name="type")
),
name="TestDraft3LegacyTypeCheck",
Validator=create(
meta_schema=Draft3Validator.META_SCHEMA,
validators=Draft3Validator.VALIDATORS,
default_types=_DEPRECATED_DEFAULT_TYPES,
),
)
TestDraft4LegacyTypeCheck = DRAFT4.to_unittest_testcase(
DRAFT4.tests_of(name="type"),
name="TestDraft4LegacyTypeCheck",
Validator=create(
meta_schema=Draft4Validator.META_SCHEMA,
validators=Draft4Validator.VALIDATORS,
default_types=_DEPRECATED_DEFAULT_TYPES,
),
)
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/jsonschema/tests/test_cli.py | from unittest import TestCase
import json
import subprocess
import sys
from jsonschema import Draft4Validator, ValidationError, cli, __version__
from jsonschema.compat import NativeIO
from jsonschema.exceptions import SchemaError
def fake_validator(*errors):
errors = list(reversed(errors))
class FakeValidator(object):
def __init__(self, *args, **kwargs):
pass
def iter_errors(self, instance):
if errors:
return errors.pop()
return []
def check_schema(self, schema):
pass
return FakeValidator
class TestParser(TestCase):
FakeValidator = fake_validator()
instance_file = "foo.json"
schema_file = "schema.json"
def setUp(self):
cli.open = self.fake_open
self.addCleanup(delattr, cli, "open")
def fake_open(self, path):
if path == self.instance_file:
contents = ""
elif path == self.schema_file:
contents = {}
else: # pragma: no cover
self.fail("What is {!r}".format(path))
return NativeIO(json.dumps(contents))
def test_find_validator_by_fully_qualified_object_name(self):
arguments = cli.parse_args(
[
"--validator",
"jsonschema.tests.test_cli.TestParser.FakeValidator",
"--instance", self.instance_file,
self.schema_file,
]
)
self.assertIs(arguments["validator"], self.FakeValidator)
def test_find_validator_in_jsonschema(self):
arguments = cli.parse_args(
[
"--validator", "Draft4Validator",
"--instance", self.instance_file,
self.schema_file,
]
)
self.assertIs(arguments["validator"], Draft4Validator)
class TestCLI(TestCase):
def test_draft3_schema_draft4_validator(self):
stdout, stderr = NativeIO(), NativeIO()
with self.assertRaises(SchemaError):
cli.run(
{
"validator": Draft4Validator,
"schema": {
"anyOf": [
{"minimum": 20},
{"type": "string"},
{"required": True},
],
},
"instances": [1],
"error_format": "{error.message}",
},
stdout=stdout,
stderr=stderr,
)
def test_successful_validation(self):
stdout, stderr = NativeIO(), NativeIO()
exit_code = cli.run(
{
"validator": fake_validator(),
"schema": {},
"instances": [1],
"error_format": "{error.message}",
},
stdout=stdout,
stderr=stderr,
)
self.assertFalse(stdout.getvalue())
self.assertFalse(stderr.getvalue())
self.assertEqual(exit_code, 0)
def test_unsuccessful_validation(self):
error = ValidationError("I am an error!", instance=1)
stdout, stderr = NativeIO(), NativeIO()
exit_code = cli.run(
{
"validator": fake_validator([error]),
"schema": {},
"instances": [1],
"error_format": "{error.instance} - {error.message}",
},
stdout=stdout,
stderr=stderr,
)
self.assertFalse(stdout.getvalue())
self.assertEqual(stderr.getvalue(), "1 - I am an error!")
self.assertEqual(exit_code, 1)
def test_unsuccessful_validation_multiple_instances(self):
first_errors = [
ValidationError("9", instance=1),
ValidationError("8", instance=1),
]
second_errors = [ValidationError("7", instance=2)]
stdout, stderr = NativeIO(), NativeIO()
exit_code = cli.run(
{
"validator": fake_validator(first_errors, second_errors),
"schema": {},
"instances": [1, 2],
"error_format": "{error.instance} - {error.message}\t",
},
stdout=stdout,
stderr=stderr,
)
self.assertFalse(stdout.getvalue())
self.assertEqual(stderr.getvalue(), "1 - 9\t1 - 8\t2 - 7\t")
self.assertEqual(exit_code, 1)
def test_version(self):
version = subprocess.check_output(
[sys.executable, "-m", "jsonschema", "--version"],
stderr=subprocess.STDOUT,
)
version = version.decode("utf-8").strip()
self.assertEqual(version, __version__)
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/jsonschema/tests/_suite.py | """
Python representations of the JSON Schema Test Suite tests.
"""
from functools import partial
import json
import os
import re
import subprocess
import sys
import unittest
from twisted.python.filepath import FilePath
import attr
from jsonschema.compat import PY3
from jsonschema.validators import validators
import jsonschema
def _find_suite():
root = os.environ.get("JSON_SCHEMA_TEST_SUITE")
if root is not None:
return FilePath(root)
root = FilePath(jsonschema.__file__).parent().sibling("json")
if not root.isdir(): # pragma: no cover
raise ValueError(
(
"Can't find the JSON-Schema-Test-Suite directory. "
"Set the 'JSON_SCHEMA_TEST_SUITE' environment "
"variable or run the tests from alongside a checkout "
"of the suite."
),
)
return root
@attr.s(hash=True)
class Suite(object):
_root = attr.ib(default=attr.Factory(_find_suite))
def _remotes(self):
jsonschema_suite = self._root.descendant(["bin", "jsonschema_suite"])
remotes = subprocess.check_output(
[sys.executable, jsonschema_suite.path, "remotes"],
)
return {
"http://localhost:1234/" + name: schema
for name, schema in json.loads(remotes.decode("utf-8")).items()
}
def benchmark(self, runner): # pragma: no cover
for name in validators:
self.version(name=name).benchmark(runner=runner)
def version(self, name):
return Version(
name=name,
path=self._root.descendant(["tests", name]),
remotes=self._remotes(),
)
@attr.s(hash=True)
class Version(object):
_path = attr.ib()
_remotes = attr.ib()
name = attr.ib()
def benchmark(self, runner, **kwargs): # pragma: no cover
for suite in self.tests():
for test in suite:
runner.bench_func(
test.fully_qualified_name,
partial(test.validate_ignoring_errors, **kwargs),
)
def tests(self):
return (
test
for child in self._path.globChildren("*.json")
for test in self._tests_in(
subject=child.basename()[:-5],
path=child,
)
)
def format_tests(self):
path = self._path.descendant(["optional", "format"])
return (
test
for child in path.globChildren("*.json")
for test in self._tests_in(
subject=child.basename()[:-5],
path=child,
)
)
def tests_of(self, name):
return self._tests_in(
subject=name,
path=self._path.child(name + ".json"),
)
def optional_tests_of(self, name):
return self._tests_in(
subject=name,
path=self._path.descendant(["optional", name + ".json"]),
)
def to_unittest_testcase(self, *suites, **kwargs):
name = kwargs.pop("name", "Test" + self.name.title())
methods = {
test.method_name: test.to_unittest_method(**kwargs)
for suite in suites
for tests in suite
for test in tests
}
cls = type(name, (unittest.TestCase,), methods)
try:
cls.__module__ = _someone_save_us_the_module_of_the_caller()
except Exception: # pragma: no cover
# We're doing crazy things, so if they go wrong, like a function
# behaving differently on some other interpreter, just make them
# not happen.
pass
return cls
def _tests_in(self, subject, path):
for each in json.loads(path.getContent().decode("utf-8")):
yield (
_Test(
version=self,
subject=subject,
case_description=each["description"],
schema=each["schema"],
remotes=self._remotes,
**test
) for test in each["tests"]
)
@attr.s(hash=True, repr=False)
class _Test(object):
version = attr.ib()
subject = attr.ib()
case_description = attr.ib()
description = attr.ib()
data = attr.ib()
schema = attr.ib(repr=False)
valid = attr.ib()
_remotes = attr.ib()
def __repr__(self): # pragma: no cover
return "<Test {}>".format(self.fully_qualified_name)
@property
def fully_qualified_name(self): # pragma: no cover
return " > ".join(
[
self.version.name,
self.subject,
self.case_description,
self.description,
]
)
@property
def method_name(self):
delimiters = r"[\W\- ]+"
name = "test_%s_%s_%s" % (
re.sub(delimiters, "_", self.subject),
re.sub(delimiters, "_", self.case_description),
re.sub(delimiters, "_", self.description),
)
if not PY3: # pragma: no cover
name = name.encode("utf-8")
return name
def to_unittest_method(self, skip=lambda test: None, **kwargs):
if self.valid:
def fn(this):
self.validate(**kwargs)
else:
def fn(this):
with this.assertRaises(jsonschema.ValidationError):
self.validate(**kwargs)
fn.__name__ = self.method_name
reason = skip(self)
return unittest.skipIf(reason is not None, reason)(fn)
def validate(self, Validator, **kwargs):
resolver = jsonschema.RefResolver.from_schema(
schema=self.schema,
store=self._remotes,
id_of=Validator.ID_OF,
)
jsonschema.validate(
instance=self.data,
schema=self.schema,
cls=Validator,
resolver=resolver,
**kwargs
)
def validate_ignoring_errors(self, Validator): # pragma: no cover
try:
self.validate(Validator=Validator)
except jsonschema.ValidationError:
pass
def _someone_save_us_the_module_of_the_caller():
"""
The FQON of the module 2nd stack frames up from here.
This is intended to allow us to dynamicallly return test case classes that
are indistinguishable from being defined in the module that wants them.
Otherwise, trial will mis-print the FQON, and copy pasting it won't re-run
the class that really is running.
Save us all, this is all so so so so so terrible.
"""
return sys._getframe(2).f_globals["__name__"]
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/jsonschema/tests/test_exceptions.py | from unittest import TestCase
import textwrap
from jsonschema import Draft4Validator, exceptions
from jsonschema.compat import PY3
class TestBestMatch(TestCase):
def best_match(self, errors):
errors = list(errors)
best = exceptions.best_match(errors)
reversed_best = exceptions.best_match(reversed(errors))
msg = "Didn't return a consistent best match!\nGot: {0}\n\nThen: {1}"
self.assertEqual(
best._contents(), reversed_best._contents(),
msg=msg.format(best, reversed_best),
)
return best
def test_shallower_errors_are_better_matches(self):
validator = Draft4Validator(
{
"properties": {
"foo": {
"minProperties": 2,
"properties": {"bar": {"type": "object"}},
},
},
},
)
best = self.best_match(validator.iter_errors({"foo": {"bar": []}}))
self.assertEqual(best.validator, "minProperties")
def test_oneOf_and_anyOf_are_weak_matches(self):
"""
A property you *must* match is probably better than one you have to
match a part of.
"""
validator = Draft4Validator(
{
"minProperties": 2,
"anyOf": [{"type": "string"}, {"type": "number"}],
"oneOf": [{"type": "string"}, {"type": "number"}],
}
)
best = self.best_match(validator.iter_errors({}))
self.assertEqual(best.validator, "minProperties")
def test_if_the_most_relevant_error_is_anyOf_it_is_traversed(self):
"""
If the most relevant error is an anyOf, then we traverse its context
and select the otherwise *least* relevant error, since in this case
that means the most specific, deep, error inside the instance.
I.e. since only one of the schemas must match, we look for the most
relevant one.
"""
validator = Draft4Validator(
{
"properties": {
"foo": {
"anyOf": [
{"type": "string"},
{"properties": {"bar": {"type": "array"}}},
],
},
},
},
)
best = self.best_match(validator.iter_errors({"foo": {"bar": 12}}))
self.assertEqual(best.validator_value, "array")
def test_if_the_most_relevant_error_is_oneOf_it_is_traversed(self):
"""
If the most relevant error is an oneOf, then we traverse its context
and select the otherwise *least* relevant error, since in this case
that means the most specific, deep, error inside the instance.
I.e. since only one of the schemas must match, we look for the most
relevant one.
"""
validator = Draft4Validator(
{
"properties": {
"foo": {
"oneOf": [
{"type": "string"},
{"properties": {"bar": {"type": "array"}}},
],
},
},
},
)
best = self.best_match(validator.iter_errors({"foo": {"bar": 12}}))
self.assertEqual(best.validator_value, "array")
def test_if_the_most_relevant_error_is_allOf_it_is_traversed(self):
"""
Now, if the error is allOf, we traverse but select the *most* relevant
error from the context, because all schemas here must match anyways.
"""
validator = Draft4Validator(
{
"properties": {
"foo": {
"allOf": [
{"type": "string"},
{"properties": {"bar": {"type": "array"}}},
],
},
},
},
)
best = self.best_match(validator.iter_errors({"foo": {"bar": 12}}))
self.assertEqual(best.validator_value, "string")
def test_nested_context_for_oneOf(self):
validator = Draft4Validator(
{
"properties": {
"foo": {
"oneOf": [
{"type": "string"},
{
"oneOf": [
{"type": "string"},
{
"properties": {
"bar": {"type": "array"},
},
},
],
},
],
},
},
},
)
best = self.best_match(validator.iter_errors({"foo": {"bar": 12}}))
self.assertEqual(best.validator_value, "array")
def test_one_error(self):
validator = Draft4Validator({"minProperties": 2})
error, = validator.iter_errors({})
self.assertEqual(
exceptions.best_match(validator.iter_errors({})).validator,
"minProperties",
)
def test_no_errors(self):
validator = Draft4Validator({})
self.assertIsNone(exceptions.best_match(validator.iter_errors({})))
class TestByRelevance(TestCase):
def test_short_paths_are_better_matches(self):
shallow = exceptions.ValidationError("Oh no!", path=["baz"])
deep = exceptions.ValidationError("Oh yes!", path=["foo", "bar"])
match = max([shallow, deep], key=exceptions.relevance)
self.assertIs(match, shallow)
match = max([deep, shallow], key=exceptions.relevance)
self.assertIs(match, shallow)
def test_global_errors_are_even_better_matches(self):
shallow = exceptions.ValidationError("Oh no!", path=[])
deep = exceptions.ValidationError("Oh yes!", path=["foo"])
errors = sorted([shallow, deep], key=exceptions.relevance)
self.assertEqual(
[list(error.path) for error in errors],
[["foo"], []],
)
errors = sorted([deep, shallow], key=exceptions.relevance)
self.assertEqual(
[list(error.path) for error in errors],
[["foo"], []],
)
def test_weak_validators_are_lower_priority(self):
weak = exceptions.ValidationError("Oh no!", path=[], validator="a")
normal = exceptions.ValidationError("Oh yes!", path=[], validator="b")
best_match = exceptions.by_relevance(weak="a")
match = max([weak, normal], key=best_match)
self.assertIs(match, normal)
match = max([normal, weak], key=best_match)
self.assertIs(match, normal)
def test_strong_validators_are_higher_priority(self):
weak = exceptions.ValidationError("Oh no!", path=[], validator="a")
normal = exceptions.ValidationError("Oh yes!", path=[], validator="b")
strong = exceptions.ValidationError("Oh fine!", path=[], validator="c")
best_match = exceptions.by_relevance(weak="a", strong="c")
match = max([weak, normal, strong], key=best_match)
self.assertIs(match, strong)
match = max([strong, normal, weak], key=best_match)
self.assertIs(match, strong)
class TestErrorTree(TestCase):
def test_it_knows_how_many_total_errors_it_contains(self):
# FIXME: https://github.com/Julian/jsonschema/issues/442
errors = [
exceptions.ValidationError("Something", validator=i)
for i in range(8)
]
tree = exceptions.ErrorTree(errors)
self.assertEqual(tree.total_errors, 8)
def test_it_contains_an_item_if_the_item_had_an_error(self):
errors = [exceptions.ValidationError("a message", path=["bar"])]
tree = exceptions.ErrorTree(errors)
self.assertIn("bar", tree)
def test_it_does_not_contain_an_item_if_the_item_had_no_error(self):
errors = [exceptions.ValidationError("a message", path=["bar"])]
tree = exceptions.ErrorTree(errors)
self.assertNotIn("foo", tree)
def test_validators_that_failed_appear_in_errors_dict(self):
error = exceptions.ValidationError("a message", validator="foo")
tree = exceptions.ErrorTree([error])
self.assertEqual(tree.errors, {"foo": error})
def test_it_creates_a_child_tree_for_each_nested_path(self):
errors = [
exceptions.ValidationError("a bar message", path=["bar"]),
exceptions.ValidationError("a bar -> 0 message", path=["bar", 0]),
]
tree = exceptions.ErrorTree(errors)
self.assertIn(0, tree["bar"])
self.assertNotIn(1, tree["bar"])
def test_children_have_their_errors_dicts_built(self):
e1, e2 = (
exceptions.ValidationError("1", validator="foo", path=["bar", 0]),
exceptions.ValidationError("2", validator="quux", path=["bar", 0]),
)
tree = exceptions.ErrorTree([e1, e2])
self.assertEqual(tree["bar"][0].errors, {"foo": e1, "quux": e2})
def test_multiple_errors_with_instance(self):
e1, e2 = (
exceptions.ValidationError(
"1",
validator="foo",
path=["bar", "bar2"],
instance="i1"),
exceptions.ValidationError(
"2",
validator="quux",
path=["foobar", 2],
instance="i2"),
)
exceptions.ErrorTree([e1, e2])
def test_it_does_not_contain_subtrees_that_are_not_in_the_instance(self):
error = exceptions.ValidationError("123", validator="foo", instance=[])
tree = exceptions.ErrorTree([error])
with self.assertRaises(IndexError):
tree[0]
def test_if_its_in_the_tree_anyhow_it_does_not_raise_an_error(self):
"""
If a validator is dumb (like :validator:`required` in draft 3) and
refers to a path that isn't in the instance, the tree still properly
returns a subtree for that path.
"""
error = exceptions.ValidationError(
"a message", validator="foo", instance={}, path=["foo"],
)
tree = exceptions.ErrorTree([error])
self.assertIsInstance(tree["foo"], exceptions.ErrorTree)
class TestErrorInitReprStr(TestCase):
def make_error(self, **kwargs):
defaults = dict(
message=u"hello",
validator=u"type",
validator_value=u"string",
instance=5,
schema={u"type": u"string"},
)
defaults.update(kwargs)
return exceptions.ValidationError(**defaults)
def assertShows(self, expected, **kwargs):
if PY3: # pragma: no cover
expected = expected.replace("u'", "'")
expected = textwrap.dedent(expected).rstrip("\n")
error = self.make_error(**kwargs)
message_line, _, rest = str(error).partition("\n")
self.assertEqual(message_line, error.message)
self.assertEqual(rest, expected)
def test_it_calls_super_and_sets_args(self):
error = self.make_error()
self.assertGreater(len(error.args), 1)
def test_repr(self):
self.assertEqual(
repr(exceptions.ValidationError(message="Hello!")),
"<ValidationError: %r>" % "Hello!",
)
def test_unset_error(self):
error = exceptions.ValidationError("message")
self.assertEqual(str(error), "message")
kwargs = {
"validator": "type",
"validator_value": "string",
"instance": 5,
"schema": {"type": "string"},
}
# Just the message should show if any of the attributes are unset
for attr in kwargs:
k = dict(kwargs)
del k[attr]
error = exceptions.ValidationError("message", **k)
self.assertEqual(str(error), "message")
def test_empty_paths(self):
self.assertShows(
"""
Failed validating u'type' in schema:
{u'type': u'string'}
On instance:
5
""",
path=[],
schema_path=[],
)
def test_one_item_paths(self):
self.assertShows(
"""
Failed validating u'type' in schema:
{u'type': u'string'}
On instance[0]:
5
""",
path=[0],
schema_path=["items"],
)
def test_multiple_item_paths(self):
self.assertShows(
"""
Failed validating u'type' in schema[u'items'][0]:
{u'type': u'string'}
On instance[0][u'a']:
5
""",
path=[0, u"a"],
schema_path=[u"items", 0, 1],
)
def test_uses_pprint(self):
self.assertShows(
"""
Failed validating u'maxLength' in schema:
{0: 0,
1: 1,
2: 2,
3: 3,
4: 4,
5: 5,
6: 6,
7: 7,
8: 8,
9: 9,
10: 10,
11: 11,
12: 12,
13: 13,
14: 14,
15: 15,
16: 16,
17: 17,
18: 18,
19: 19}
On instance:
[0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24]
""",
instance=list(range(25)),
schema=dict(zip(range(20), range(20))),
validator=u"maxLength",
)
def test_str_works_with_instances_having_overriden_eq_operator(self):
"""
Check for https://github.com/Julian/jsonschema/issues/164 which
rendered exceptions unusable when a `ValidationError` involved
instances with an `__eq__` method that returned truthy values.
"""
class DontEQMeBro(object):
def __eq__(this, other): # pragma: no cover
self.fail("Don't!")
def __ne__(this, other): # pragma: no cover
self.fail("Don't!")
instance = DontEQMeBro()
error = exceptions.ValidationError(
"a message",
validator="foo",
instance=instance,
validator_value="some",
schema="schema",
)
self.assertIn(repr(instance), str(error))
class TestHashable(TestCase):
def test_hashable(self):
set([exceptions.ValidationError("")])
set([exceptions.SchemaError("")])
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/jsonschema/tests/_helpers.py | def bug(issue=None):
message = "A known bug."
if issue is not None:
message += " See issue #{issue}.".format(issue=issue)
return message
|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/jsonschema/tests/__init__.py | |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/jsonschema/tests/test_format.py | """
Tests for the parts of jsonschema related to the :validator:`format` property.
"""
from unittest import TestCase
from jsonschema import FormatError, ValidationError, FormatChecker
from jsonschema.validators import Draft4Validator
BOOM = ValueError("Boom!")
BANG = ZeroDivisionError("Bang!")
def boom(thing):
if thing == "bang":
raise BANG
raise BOOM
class TestFormatChecker(TestCase):
def test_it_can_validate_no_formats(self):
checker = FormatChecker(formats=())
self.assertFalse(checker.checkers)
def test_it_raises_a_key_error_for_unknown_formats(self):
with self.assertRaises(KeyError):
FormatChecker(formats=["o noes"])
def test_it_can_register_cls_checkers(self):
original = dict(FormatChecker.checkers)
self.addCleanup(FormatChecker.checkers.pop, "boom")
FormatChecker.cls_checks("boom")(boom)
self.assertEqual(
FormatChecker.checkers,
dict(original, boom=(boom, ())),
)
def test_it_can_register_checkers(self):
checker = FormatChecker()
checker.checks("boom")(boom)
self.assertEqual(
checker.checkers,
dict(FormatChecker.checkers, boom=(boom, ()))
)
def test_it_catches_registered_errors(self):
checker = FormatChecker()
checker.checks("boom", raises=type(BOOM))(boom)
with self.assertRaises(FormatError) as cm:
checker.check(instance=12, format="boom")
self.assertIs(cm.exception.cause, BOOM)
self.assertIs(cm.exception.__cause__, BOOM)
# Unregistered errors should not be caught
with self.assertRaises(type(BANG)):
checker.check(instance="bang", format="boom")
def test_format_error_causes_become_validation_error_causes(self):
checker = FormatChecker()
checker.checks("boom", raises=ValueError)(boom)
validator = Draft4Validator({"format": "boom"}, format_checker=checker)
with self.assertRaises(ValidationError) as cm:
validator.validate("BOOM")
self.assertIs(cm.exception.cause, BOOM)
self.assertIs(cm.exception.__cause__, BOOM)
def test_format_checkers_come_with_defaults(self):
# This is bad :/ but relied upon.
# The docs for quite awhile recommended people do things like
# validate(..., format_checker=FormatChecker())
# We should change that, but we can't without deprecation...
checker = FormatChecker()
with self.assertRaises(FormatError):
checker.check(instance="not-an-ipv4", format="ipv4")
def test_repr(self):
checker = FormatChecker(formats=())
checker.checks("foo")(lambda thing: True)
checker.checks("bar")(lambda thing: True)
checker.checks("baz")(lambda thing: True)
self.assertEqual(
repr(checker),
"<FormatChecker checkers=['bar', 'baz', 'foo']>",
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.