language
stringclasses 6
values | original_string
stringlengths 25
887k
| text
stringlengths 25
887k
|
---|---|---|
Python | def _minimize_cobyla(fun, x0, args=(), constraints=(),
rhobeg=1.0, tol=1e-4, iprint=1, maxiter=1000,
disp=False, catol=2e-4, **unknown_options):
"""
Minimize a scalar function of one or more variables using the
Constrained Optimization BY Linear Approximation (COBYLA) algorithm.
Options
-------
rhobeg : float
Reasonable initial changes to the variables.
tol : float
Final accuracy in the optimization (not precisely guaranteed).
This is a lower bound on the size of the trust region.
disp : bool
Set to True to print convergence messages. If False,
`verbosity` is ignored as set to 0.
maxiter : int
Maximum number of function evaluations.
catol : float
Tolerance (absolute) for constraint violations
"""
_check_unknown_options(unknown_options)
maxfun = maxiter
rhoend = tol
if not disp:
iprint = 0
# check constraints
if isinstance(constraints, dict):
constraints = (constraints, )
for ic, con in enumerate(constraints):
# check type
try:
ctype = con['type'].lower()
except KeyError:
raise KeyError('Constraint %d has no type defined.' % ic)
except TypeError:
raise TypeError('Constraints must be defined using a '
'dictionary.')
except AttributeError:
raise TypeError("Constraint's type must be a string.")
else:
if ctype != 'ineq':
raise ValueError("Constraints of type '%s' not handled by "
"COBYLA." % con['type'])
# check function
if 'fun' not in con:
raise KeyError('Constraint %d has no function defined.' % ic)
# check extra arguments
if 'args' not in con:
con['args'] = ()
# m is the total number of constraint values
# it takes into account that some constraints may be vector-valued
cons_lengths = []
for c in constraints:
f = c['fun'](x0, *c['args'])
try:
cons_length = len(f)
except TypeError:
cons_length = 1
cons_lengths.append(cons_length)
m = sum(cons_lengths)
def calcfc(x, con):
f = fun(x, *args)
i = 0
for size, c in izip(cons_lengths, constraints):
con[i: i + size] = c['fun'](x, *c['args'])
i += size
return f
info = np.zeros(4, np.float64)
xopt, info = _cobyla.minimize(calcfc, m=m, x=np.copy(x0), rhobeg=rhobeg,
rhoend=rhoend, iprint=iprint, maxfun=maxfun,
dinfo=info)
if info[3] > catol:
# Check constraint violation
info[0] = 4
return OptimizeResult(x=xopt,
status=int(info[0]),
success=info[0] == 1,
message={1: 'Optimization terminated successfully.',
2: 'Maximum number of function evaluations has '
'been exceeded.',
3: 'Rounding errors are becoming damaging in '
'COBYLA subroutine.',
4: 'Did not converge to a solution satisfying '
'the constraints. See `maxcv` for magnitude '
'of violation.'
}.get(info[0], 'Unknown exit status.'),
nfev=int(info[1]),
fun=info[2],
maxcv=info[3]) | def _minimize_cobyla(fun, x0, args=(), constraints=(),
rhobeg=1.0, tol=1e-4, iprint=1, maxiter=1000,
disp=False, catol=2e-4, **unknown_options):
"""
Minimize a scalar function of one or more variables using the
Constrained Optimization BY Linear Approximation (COBYLA) algorithm.
Options
-------
rhobeg : float
Reasonable initial changes to the variables.
tol : float
Final accuracy in the optimization (not precisely guaranteed).
This is a lower bound on the size of the trust region.
disp : bool
Set to True to print convergence messages. If False,
`verbosity` is ignored as set to 0.
maxiter : int
Maximum number of function evaluations.
catol : float
Tolerance (absolute) for constraint violations
"""
_check_unknown_options(unknown_options)
maxfun = maxiter
rhoend = tol
if not disp:
iprint = 0
# check constraints
if isinstance(constraints, dict):
constraints = (constraints, )
for ic, con in enumerate(constraints):
# check type
try:
ctype = con['type'].lower()
except KeyError:
raise KeyError('Constraint %d has no type defined.' % ic)
except TypeError:
raise TypeError('Constraints must be defined using a '
'dictionary.')
except AttributeError:
raise TypeError("Constraint's type must be a string.")
else:
if ctype != 'ineq':
raise ValueError("Constraints of type '%s' not handled by "
"COBYLA." % con['type'])
# check function
if 'fun' not in con:
raise KeyError('Constraint %d has no function defined.' % ic)
# check extra arguments
if 'args' not in con:
con['args'] = ()
# m is the total number of constraint values
# it takes into account that some constraints may be vector-valued
cons_lengths = []
for c in constraints:
f = c['fun'](x0, *c['args'])
try:
cons_length = len(f)
except TypeError:
cons_length = 1
cons_lengths.append(cons_length)
m = sum(cons_lengths)
def calcfc(x, con):
f = fun(x, *args)
i = 0
for size, c in izip(cons_lengths, constraints):
con[i: i + size] = c['fun'](x, *c['args'])
i += size
return f
info = np.zeros(4, np.float64)
xopt, info = _cobyla.minimize(calcfc, m=m, x=np.copy(x0), rhobeg=rhobeg,
rhoend=rhoend, iprint=iprint, maxfun=maxfun,
dinfo=info)
if info[3] > catol:
# Check constraint violation
info[0] = 4
return OptimizeResult(x=xopt,
status=int(info[0]),
success=info[0] == 1,
message={1: 'Optimization terminated successfully.',
2: 'Maximum number of function evaluations has '
'been exceeded.',
3: 'Rounding errors are becoming damaging in '
'COBYLA subroutine.',
4: 'Did not converge to a solution satisfying '
'the constraints. See `maxcv` for magnitude '
'of violation.'
}.get(info[0], 'Unknown exit status.'),
nfev=int(info[1]),
fun=info[2],
maxcv=info[3]) |
Python | def release(options):
"""Automate everything to be done for a release with numpy-vendor"""
# Source tarballs
sdist()
# Windows .exe installers
options.python_version = '2.7'
bdist_superpack(options)
options.python_version = '3.4'
bdist_superpack(options)
# README (gpg signed) and Changelog
write_release_and_log() | def release(options):
"""Automate everything to be done for a release with numpy-vendor"""
# Source tarballs
sdist()
# Windows .exe installers
options.python_version = '2.7'
bdist_superpack(options)
options.python_version = '3.4'
bdist_superpack(options)
# README (gpg signed) and Changelog
write_release_and_log() |
Python | def bdist_superpack(options):
"""Build all arch specific wininst installers."""
pyver = options.python_version
def copy_bdist(arch):
# Copy the wininst in dist into the release directory
source = os.path.join('dist', wininst_name(pyver))
target = os.path.join(options.superpack.bindir, internal_wininst_name(arch))
if os.path.exists(target):
os.remove(target)
if not os.path.exists(os.path.dirname(target)):
os.makedirs(os.path.dirname(target))
try:
os.rename(source, target)
except OSError:
# May be due to dev version having 'Unknown' in name, if git isn't
# found. This can be the case when compiling under Wine.
ix = source.find('.dev0+') + 6
source = source[:ix] + 'Unknown' + source[ix+7:]
os.rename(source, target)
bdist_wininst_arch(pyver, 'nosse')
copy_bdist("nosse")
bdist_wininst_arch(pyver, 'sse2')
copy_bdist("sse2")
bdist_wininst_arch(pyver, 'sse3')
copy_bdist("sse3")
prepare_nsis_script(pyver, FULLVERSION)
subprocess.check_call(MAKENSIS + ['scipy-superinstaller.nsi'],
cwd=options.superpack.builddir)
# Copy the superpack into installers dir
if not os.path.exists(options.installers.installersdir):
os.makedirs(options.installers.installersdir)
source = os.path.join(options.superpack.builddir,
superpack_name(pyver, FULLVERSION))
target = os.path.join(options.installers.installersdir,
superpack_name(pyver, FULLVERSION))
shutil.copy(source, target) | def bdist_superpack(options):
"""Build all arch specific wininst installers."""
pyver = options.python_version
def copy_bdist(arch):
# Copy the wininst in dist into the release directory
source = os.path.join('dist', wininst_name(pyver))
target = os.path.join(options.superpack.bindir, internal_wininst_name(arch))
if os.path.exists(target):
os.remove(target)
if not os.path.exists(os.path.dirname(target)):
os.makedirs(os.path.dirname(target))
try:
os.rename(source, target)
except OSError:
# May be due to dev version having 'Unknown' in name, if git isn't
# found. This can be the case when compiling under Wine.
ix = source.find('.dev0+') + 6
source = source[:ix] + 'Unknown' + source[ix+7:]
os.rename(source, target)
bdist_wininst_arch(pyver, 'nosse')
copy_bdist("nosse")
bdist_wininst_arch(pyver, 'sse2')
copy_bdist("sse2")
bdist_wininst_arch(pyver, 'sse3')
copy_bdist("sse3")
prepare_nsis_script(pyver, FULLVERSION)
subprocess.check_call(MAKENSIS + ['scipy-superinstaller.nsi'],
cwd=options.superpack.builddir)
# Copy the superpack into installers dir
if not os.path.exists(options.installers.installersdir):
os.makedirs(options.installers.installersdir)
source = os.path.join(options.superpack.builddir,
superpack_name(pyver, FULLVERSION))
target = os.path.join(options.installers.installersdir,
superpack_name(pyver, FULLVERSION))
shutil.copy(source, target) |
Python | def assert_wolfe(s, phi, derphi, c1=1e-4, c2=0.9, err_msg=""):
"""
Check that strong Wolfe conditions apply
"""
phi1 = phi(s)
phi0 = phi(0)
derphi0 = derphi(0)
derphi1 = derphi(s)
msg = "s = %s; phi(0) = %s; phi(s) = %s; phi'(0) = %s; phi'(s) = %s; %s" % (
s, phi0, phi1, derphi0, derphi1, err_msg)
assert_(phi1 <= phi0 + c1*s*derphi0, "Wolfe 1 failed: " + msg)
assert_(abs(derphi1) <= abs(c2*derphi0), "Wolfe 2 failed: " + msg) | def assert_wolfe(s, phi, derphi, c1=1e-4, c2=0.9, err_msg=""):
"""
Check that strong Wolfe conditions apply
"""
phi1 = phi(s)
phi0 = phi(0)
derphi0 = derphi(0)
derphi1 = derphi(s)
msg = "s = %s; phi(0) = %s; phi(s) = %s; phi'(0) = %s; phi'(s) = %s; %s" % (
s, phi0, phi1, derphi0, derphi1, err_msg)
assert_(phi1 <= phi0 + c1*s*derphi0, "Wolfe 1 failed: " + msg)
assert_(abs(derphi1) <= abs(c2*derphi0), "Wolfe 2 failed: " + msg) |
Python | def _resolve_arg_value_types(self, argnames, indirect):
"""Resolves if each parametrized argument must be considered a parameter to a fixture or a "funcarg"
to the function, based on the ``indirect`` parameter of the parametrized() call.
:param List[str] argnames: list of argument names passed to ``parametrize()``.
:param indirect: same ``indirect`` parameter of ``parametrize()``.
:rtype: Dict[str, str]
A dict mapping each arg name to either:
* "params" if the argname should be the parameter of a fixture of the same name.
* "funcargs" if the argname should be a parameter to the parametrized test function.
"""
valtypes = {}
if indirect is True:
valtypes = dict.fromkeys(argnames, "params")
elif indirect is False:
valtypes = dict.fromkeys(argnames, "funcargs")
elif isinstance(indirect, (tuple, list)):
valtypes = dict.fromkeys(argnames, "funcargs")
for arg in indirect:
if arg not in argnames:
fail(
"In {}: indirect fixture '{}' doesn't exist".format(
self.function.__name__, arg
),
pytrace=False,
)
valtypes[arg] = "params"
return valtypes | def _resolve_arg_value_types(self, argnames, indirect):
"""Resolves if each parametrized argument must be considered a parameter to a fixture or a "funcarg"
to the function, based on the ``indirect`` parameter of the parametrized() call.
:param List[str] argnames: list of argument names passed to ``parametrize()``.
:param indirect: same ``indirect`` parameter of ``parametrize()``.
:rtype: Dict[str, str]
A dict mapping each arg name to either:
* "params" if the argname should be the parameter of a fixture of the same name.
* "funcargs" if the argname should be a parameter to the parametrized test function.
"""
valtypes = {}
if indirect is True:
valtypes = dict.fromkeys(argnames, "params")
elif indirect is False:
valtypes = dict.fromkeys(argnames, "funcargs")
elif isinstance(indirect, (tuple, list)):
valtypes = dict.fromkeys(argnames, "funcargs")
for arg in indirect:
if arg not in argnames:
fail(
"In {}: indirect fixture '{}' doesn't exist".format(
self.function.__name__, arg
),
pytrace=False,
)
valtypes[arg] = "params"
return valtypes |
Python | def addcall(self, funcargs=None, id=NOTSET, param=NOTSET):
""" Add a new call to the underlying test function during the collection phase of a test run.
.. deprecated:: 3.3
Use :meth:`parametrize` instead.
Note that request.addcall() is called during the test collection phase prior and
independently to actual test execution. You should only use addcall()
if you need to specify multiple arguments of a test function.
:arg funcargs: argument keyword dictionary used when invoking
the test function.
:arg id: used for reporting and identification purposes. If you
don't supply an `id` an automatic unique id will be generated.
:arg param: a parameter which will be exposed to a later fixture function
invocation through the ``request.param`` attribute.
"""
warnings.warn(deprecated.METAFUNC_ADD_CALL, stacklevel=2)
assert funcargs is None or isinstance(funcargs, dict)
if funcargs is not None:
for name in funcargs:
if name not in self.fixturenames:
fail("funcarg %r not used in this function." % name)
else:
funcargs = {}
if id is None:
raise ValueError("id=None not allowed")
if id is NOTSET:
id = len(self._calls)
id = str(id)
if id in self._ids:
raise ValueError("duplicate id %r" % id)
self._ids.add(id)
cs = CallSpec2(self)
cs.setall(funcargs, id, param)
self._calls.append(cs) | def addcall(self, funcargs=None, id=NOTSET, param=NOTSET):
""" Add a new call to the underlying test function during the collection phase of a test run.
.. deprecated:: 3.3
Use :meth:`parametrize` instead.
Note that request.addcall() is called during the test collection phase prior and
independently to actual test execution. You should only use addcall()
if you need to specify multiple arguments of a test function.
:arg funcargs: argument keyword dictionary used when invoking
the test function.
:arg id: used for reporting and identification purposes. If you
don't supply an `id` an automatic unique id will be generated.
:arg param: a parameter which will be exposed to a later fixture function
invocation through the ``request.param`` attribute.
"""
warnings.warn(deprecated.METAFUNC_ADD_CALL, stacklevel=2)
assert funcargs is None or isinstance(funcargs, dict)
if funcargs is not None:
for name in funcargs:
if name not in self.fixturenames:
fail("funcarg %r not used in this function." % name)
else:
funcargs = {}
if id is None:
raise ValueError("id=None not allowed")
if id is NOTSET:
id = len(self._calls)
id = str(id)
if id in self._ids:
raise ValueError("duplicate id %r" % id)
self._ids.add(id)
cs = CallSpec2(self)
cs.setall(funcargs, id, param)
self._calls.append(cs) |
Python | def requires(resource, msg=None):
"""Raise ResourceDenied if the specified resource is not available."""
if not is_resource_enabled(resource):
if msg is None:
msg = "Use of the `%s' resource not enabled" % resource
raise ResourceDenied(msg)
if resource == 'gui' and not _is_gui_available():
raise ResourceDenied(_is_gui_available.reason) | def requires(resource, msg=None):
"""Raise ResourceDenied if the specified resource is not available."""
if not is_resource_enabled(resource):
if msg is None:
msg = "Use of the `%s' resource not enabled" % resource
raise ResourceDenied(msg)
if resource == 'gui' and not _is_gui_available():
raise ResourceDenied(_is_gui_available.reason) |
Python | def bind_port(sock, host=HOST):
"""Bind the socket to a free port and return the port number. Relies on
ephemeral ports in order to ensure we are using an unbound port. This is
important as many tests may be running simultaneously, especially in a
buildbot environment. This method raises an exception if the sock.family
is AF_INET and sock.type is SOCK_STREAM, *and* the socket has SO_REUSEADDR
or SO_REUSEPORT set on it. Tests should *never* set these socket options
for TCP/IP sockets. The only case for setting these options is testing
multicasting via multiple UDP sockets.
Additionally, if the SO_EXCLUSIVEADDRUSE socket option is available (i.e.
on Windows), it will be set on the socket. This will prevent anyone else
from bind()'ing to our host/port for the duration of the test.
"""
if sock.family == socket.AF_INET and sock.type == socket.SOCK_STREAM:
if hasattr(socket, 'SO_REUSEADDR'):
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) == 1:
raise TestFailed("tests should never set the SO_REUSEADDR " \
"socket option on TCP/IP sockets!")
if hasattr(socket, 'SO_REUSEPORT'):
try:
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT) == 1:
raise TestFailed("tests should never set the SO_REUSEPORT " \
"socket option on TCP/IP sockets!")
except EnvironmentError:
# Python's socket module was compiled using modern headers
# thus defining SO_REUSEPORT but this process is running
# under an older kernel that does not support SO_REUSEPORT.
pass
if hasattr(socket, 'SO_EXCLUSIVEADDRUSE'):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1)
sock.bind((host, 0))
port = sock.getsockname()[1]
return port | def bind_port(sock, host=HOST):
"""Bind the socket to a free port and return the port number. Relies on
ephemeral ports in order to ensure we are using an unbound port. This is
important as many tests may be running simultaneously, especially in a
buildbot environment. This method raises an exception if the sock.family
is AF_INET and sock.type is SOCK_STREAM, *and* the socket has SO_REUSEADDR
or SO_REUSEPORT set on it. Tests should *never* set these socket options
for TCP/IP sockets. The only case for setting these options is testing
multicasting via multiple UDP sockets.
Additionally, if the SO_EXCLUSIVEADDRUSE socket option is available (i.e.
on Windows), it will be set on the socket. This will prevent anyone else
from bind()'ing to our host/port for the duration of the test.
"""
if sock.family == socket.AF_INET and sock.type == socket.SOCK_STREAM:
if hasattr(socket, 'SO_REUSEADDR'):
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) == 1:
raise TestFailed("tests should never set the SO_REUSEADDR " \
"socket option on TCP/IP sockets!")
if hasattr(socket, 'SO_REUSEPORT'):
try:
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT) == 1:
raise TestFailed("tests should never set the SO_REUSEPORT " \
"socket option on TCP/IP sockets!")
except EnvironmentError:
# Python's socket module was compiled using modern headers
# thus defining SO_REUSEPORT but this process is running
# under an older kernel that does not support SO_REUSEPORT.
pass
if hasattr(socket, 'SO_EXCLUSIVEADDRUSE'):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1)
sock.bind((host, 0))
port = sock.getsockname()[1]
return port |
Python | def _is_ipv6_enabled():
"""Check whether IPv6 is enabled on this host."""
if socket.has_ipv6:
sock = None
try:
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
sock.bind((HOSTv6, 0))
return True
except socket.error:
pass
finally:
if sock:
sock.close()
return False | def _is_ipv6_enabled():
"""Check whether IPv6 is enabled on this host."""
if socket.has_ipv6:
sock = None
try:
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
sock.bind((HOSTv6, 0))
return True
except socket.error:
pass
finally:
if sock:
sock.close()
return False |
Python | def system_must_validate_cert(f):
"""Skip the test on TLS certificate validation failures."""
@functools.wraps(f)
def dec(*args, **kwargs):
try:
f(*args, **kwargs)
except IOError as e:
if "CERTIFICATE_VERIFY_FAILED" in str(e):
raise unittest.SkipTest("system does not contain "
"necessary certificates")
raise
return dec | def system_must_validate_cert(f):
"""Skip the test on TLS certificate validation failures."""
@functools.wraps(f)
def dec(*args, **kwargs):
try:
f(*args, **kwargs)
except IOError as e:
if "CERTIFICATE_VERIFY_FAILED" in str(e):
raise unittest.SkipTest("system does not contain "
"necessary certificates")
raise
return dec |
Python | def temp_dir(path=None, quiet=False):
"""Return a context manager that creates a temporary directory.
Arguments:
path: the directory to create temporarily. If omitted or None,
defaults to creating a temporary directory using tempfile.mkdtemp.
quiet: if False (the default), the context manager raises an exception
on error. Otherwise, if the path is specified and cannot be
created, only a warning is issued.
"""
dir_created = False
if path is None:
import tempfile
path = tempfile.mkdtemp()
dir_created = True
path = os.path.realpath(path)
else:
if (have_unicode and isinstance(path, unicode) and
not os.path.supports_unicode_filenames):
try:
path = path.encode(sys.getfilesystemencoding() or 'ascii')
except UnicodeEncodeError:
if not quiet:
raise unittest.SkipTest('unable to encode the cwd name with '
'the filesystem encoding.')
try:
os.mkdir(path)
dir_created = True
except OSError:
if not quiet:
raise
warnings.warn('tests may fail, unable to create temp dir: ' + path,
RuntimeWarning, stacklevel=3)
if dir_created:
pid = os.getpid()
try:
yield path
finally:
# In case the process forks, let only the parent remove the
# directory. The child has a diffent process id. (bpo-30028)
if dir_created and pid == os.getpid():
rmtree(path) | def temp_dir(path=None, quiet=False):
"""Return a context manager that creates a temporary directory.
Arguments:
path: the directory to create temporarily. If omitted or None,
defaults to creating a temporary directory using tempfile.mkdtemp.
quiet: if False (the default), the context manager raises an exception
on error. Otherwise, if the path is specified and cannot be
created, only a warning is issued.
"""
dir_created = False
if path is None:
import tempfile
path = tempfile.mkdtemp()
dir_created = True
path = os.path.realpath(path)
else:
if (have_unicode and isinstance(path, unicode) and
not os.path.supports_unicode_filenames):
try:
path = path.encode(sys.getfilesystemencoding() or 'ascii')
except UnicodeEncodeError:
if not quiet:
raise unittest.SkipTest('unable to encode the cwd name with '
'the filesystem encoding.')
try:
os.mkdir(path)
dir_created = True
except OSError:
if not quiet:
raise
warnings.warn('tests may fail, unable to create temp dir: ' + path,
RuntimeWarning, stacklevel=3)
if dir_created:
pid = os.getpid()
try:
yield path
finally:
# In case the process forks, let only the parent remove the
# directory. The child has a diffent process id. (bpo-30028)
if dir_created and pid == os.getpid():
rmtree(path) |
Python | def findfile(file, subdir=None):
"""Try to find a file on sys.path and the working directory. If it is not
found the argument passed to the function is returned (this does not
necessarily signal failure; could still be the legitimate path)."""
if os.path.isabs(file):
return file
if subdir is not None:
file = os.path.join(subdir, file)
path = [TEST_HOME_DIR] + sys.path
for dn in path:
fn = os.path.join(dn, file)
if os.path.exists(fn): return fn
return file | def findfile(file, subdir=None):
"""Try to find a file on sys.path and the working directory. If it is not
found the argument passed to the function is returned (this does not
necessarily signal failure; could still be the legitimate path)."""
if os.path.isabs(file):
return file
if subdir is not None:
file = os.path.join(subdir, file)
path = [TEST_HOME_DIR] + sys.path
for dn in path:
fn = os.path.join(dn, file)
if os.path.exists(fn): return fn
return file |
Python | def transient_internet(resource_name, timeout=30.0, errnos=()):
"""Return a context manager that raises ResourceDenied when various issues
with the Internet connection manifest themselves as exceptions."""
default_errnos = [
('ECONNREFUSED', 111),
('ECONNRESET', 104),
('EHOSTUNREACH', 113),
('ENETUNREACH', 101),
('ETIMEDOUT', 110),
# socket.create_connection() fails randomly with
# EADDRNOTAVAIL on Travis CI.
('EADDRNOTAVAIL', 99),
]
default_gai_errnos = [
('EAI_AGAIN', -3),
('EAI_FAIL', -4),
('EAI_NONAME', -2),
('EAI_NODATA', -5),
# Windows defines EAI_NODATA as 11001 but idiotic getaddrinfo()
# implementation actually returns WSANO_DATA i.e. 11004.
('WSANO_DATA', 11004),
]
denied = ResourceDenied("Resource '%s' is not available" % resource_name)
captured_errnos = errnos
gai_errnos = []
if not captured_errnos:
captured_errnos = [getattr(errno, name, num)
for (name, num) in default_errnos]
gai_errnos = [getattr(socket, name, num)
for (name, num) in default_gai_errnos]
def filter_error(err):
n = getattr(err, 'errno', None)
if (isinstance(err, socket.timeout) or
(isinstance(err, socket.gaierror) and n in gai_errnos) or
n in captured_errnos):
if not verbose:
sys.stderr.write(denied.args[0] + "\n")
raise denied
old_timeout = socket.getdefaulttimeout()
try:
if timeout is not None:
socket.setdefaulttimeout(timeout)
yield
except IOError as err:
# urllib can wrap original socket errors multiple times (!), we must
# unwrap to get at the original error.
while True:
a = err.args
if len(a) >= 1 and isinstance(a[0], IOError):
err = a[0]
# The error can also be wrapped as args[1]:
# except socket.error as msg:
# raise IOError('socket error', msg).with_traceback(sys.exc_info()[2])
elif len(a) >= 2 and isinstance(a[1], IOError):
err = a[1]
else:
break
filter_error(err)
raise
# XXX should we catch generic exceptions and look for their
# __cause__ or __context__?
finally:
socket.setdefaulttimeout(old_timeout) | def transient_internet(resource_name, timeout=30.0, errnos=()):
"""Return a context manager that raises ResourceDenied when various issues
with the Internet connection manifest themselves as exceptions."""
default_errnos = [
('ECONNREFUSED', 111),
('ECONNRESET', 104),
('EHOSTUNREACH', 113),
('ENETUNREACH', 101),
('ETIMEDOUT', 110),
# socket.create_connection() fails randomly with
# EADDRNOTAVAIL on Travis CI.
('EADDRNOTAVAIL', 99),
]
default_gai_errnos = [
('EAI_AGAIN', -3),
('EAI_FAIL', -4),
('EAI_NONAME', -2),
('EAI_NODATA', -5),
# Windows defines EAI_NODATA as 11001 but idiotic getaddrinfo()
# implementation actually returns WSANO_DATA i.e. 11004.
('WSANO_DATA', 11004),
]
denied = ResourceDenied("Resource '%s' is not available" % resource_name)
captured_errnos = errnos
gai_errnos = []
if not captured_errnos:
captured_errnos = [getattr(errno, name, num)
for (name, num) in default_errnos]
gai_errnos = [getattr(socket, name, num)
for (name, num) in default_gai_errnos]
def filter_error(err):
n = getattr(err, 'errno', None)
if (isinstance(err, socket.timeout) or
(isinstance(err, socket.gaierror) and n in gai_errnos) or
n in captured_errnos):
if not verbose:
sys.stderr.write(denied.args[0] + "\n")
raise denied
old_timeout = socket.getdefaulttimeout()
try:
if timeout is not None:
socket.setdefaulttimeout(timeout)
yield
except IOError as err:
# urllib can wrap original socket errors multiple times (!), we must
# unwrap to get at the original error.
while True:
a = err.args
if len(a) >= 1 and isinstance(a[0], IOError):
err = a[0]
# The error can also be wrapped as args[1]:
# except socket.error as msg:
# raise IOError('socket error', msg).with_traceback(sys.exc_info()[2])
elif len(a) >= 2 and isinstance(a[1], IOError):
err = a[1]
else:
break
filter_error(err)
raise
# XXX should we catch generic exceptions and look for their
# __cause__ or __context__?
finally:
socket.setdefaulttimeout(old_timeout) |
Python | def _run_suite(suite):
"""Run tests from a unittest.TestSuite-derived class."""
if verbose:
runner = unittest.TextTestRunner(sys.stdout, verbosity=2,
failfast=failfast)
else:
runner = BasicTestRunner()
result = runner.run(suite)
if not result.testsRun and not result.skipped:
raise TestDidNotRun
if not result.wasSuccessful():
if len(result.errors) == 1 and not result.failures:
err = result.errors[0][1]
elif len(result.failures) == 1 and not result.errors:
err = result.failures[0][1]
else:
err = "multiple errors occurred"
if not verbose:
err += "; run in verbose mode for details"
raise TestFailed(err) | def _run_suite(suite):
"""Run tests from a unittest.TestSuite-derived class."""
if verbose:
runner = unittest.TextTestRunner(sys.stdout, verbosity=2,
failfast=failfast)
else:
runner = BasicTestRunner()
result = runner.run(suite)
if not result.testsRun and not result.skipped:
raise TestDidNotRun
if not result.wasSuccessful():
if len(result.errors) == 1 and not result.failures:
err = result.errors[0][1]
elif len(result.failures) == 1 and not result.errors:
err = result.failures[0][1]
else:
err = "multiple errors occurred"
if not verbose:
err += "; run in verbose mode for details"
raise TestFailed(err) |
Python | def wait_threads_exit(timeout=60.0):
"""
bpo-31234: Context manager to wait until all threads created in the with
statement exit.
Use thread.count() to check if threads exited. Indirectly, wait until
threads exit the internal t_bootstrap() C function of the thread module.
threading_setup() and threading_cleanup() are designed to emit a warning
if a test leaves running threads in the background. This context manager
is designed to cleanup threads started by the thread.start_new_thread()
which doesn't allow to wait for thread exit, whereas thread.Thread has a
join() method.
"""
old_count = thread._count()
try:
yield
finally:
start_time = time.time()
deadline = start_time + timeout
while True:
count = thread._count()
if count <= old_count:
break
if time.time() > deadline:
dt = time.time() - start_time
msg = ("wait_threads() failed to cleanup %s "
"threads after %.1f seconds "
"(count: %s, old count: %s)"
% (count - old_count, dt, count, old_count))
raise AssertionError(msg)
time.sleep(0.010)
gc_collect() | def wait_threads_exit(timeout=60.0):
"""
bpo-31234: Context manager to wait until all threads created in the with
statement exit.
Use thread.count() to check if threads exited. Indirectly, wait until
threads exit the internal t_bootstrap() C function of the thread module.
threading_setup() and threading_cleanup() are designed to emit a warning
if a test leaves running threads in the background. This context manager
is designed to cleanup threads started by the thread.start_new_thread()
which doesn't allow to wait for thread exit, whereas thread.Thread has a
join() method.
"""
old_count = thread._count()
try:
yield
finally:
start_time = time.time()
deadline = start_time + timeout
while True:
count = thread._count()
if count <= old_count:
break
if time.time() > deadline:
dt = time.time() - start_time
msg = ("wait_threads() failed to cleanup %s "
"threads after %.1f seconds "
"(count: %s, old count: %s)"
% (count - old_count, dt, count, old_count))
raise AssertionError(msg)
time.sleep(0.010)
gc_collect() |
Python | def _crash_python():
"""Deliberate crash of Python.
Python can be killed by a segmentation fault (SIGSEGV), a bus error
(SIGBUS), or a different error depending on the platform.
Use SuppressCrashReport() to prevent a crash report from popping up.
"""
import _testcapi
with SuppressCrashReport():
_testcapi._read_null() | def _crash_python():
"""Deliberate crash of Python.
Python can be killed by a segmentation fault (SIGSEGV), a bus error
(SIGBUS), or a different error depending on the platform.
Use SuppressCrashReport() to prevent a crash report from popping up.
"""
import _testcapi
with SuppressCrashReport():
_testcapi._read_null() |
Python | def fd_count():
"""Count the number of open file descriptors.
"""
if sys.platform.startswith(('linux', 'freebsd')):
try:
names = os.listdir("/proc/self/fd")
# Substract one because listdir() opens internally a file
# descriptor to list the content of the /proc/self/fd/ directory.
return len(names) - 1
except OSError as exc:
if exc.errno != errno.ENOENT:
raise
MAXFD = 256
if hasattr(os, 'sysconf'):
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except OSError:
pass
old_modes = None
if sys.platform == 'win32':
# bpo-25306, bpo-31009: Call CrtSetReportMode() to not kill the process
# on invalid file descriptor if Python is compiled in debug mode
try:
import msvcrt
msvcrt.CrtSetReportMode
except (AttributeError, ImportError):
# no msvcrt or a release build
pass
else:
old_modes = {}
for report_type in (msvcrt.CRT_WARN,
msvcrt.CRT_ERROR,
msvcrt.CRT_ASSERT):
old_modes[report_type] = msvcrt.CrtSetReportMode(report_type, 0)
try:
count = 0
for fd in range(MAXFD):
try:
# Prefer dup() over fstat(). fstat() can require input/output
# whereas dup() doesn't.
fd2 = os.dup(fd)
except OSError as e:
if e.errno != errno.EBADF:
raise
else:
os.close(fd2)
count += 1
finally:
if old_modes is not None:
for report_type in (msvcrt.CRT_WARN,
msvcrt.CRT_ERROR,
msvcrt.CRT_ASSERT):
msvcrt.CrtSetReportMode(report_type, old_modes[report_type])
return count | def fd_count():
"""Count the number of open file descriptors.
"""
if sys.platform.startswith(('linux', 'freebsd')):
try:
names = os.listdir("/proc/self/fd")
# Substract one because listdir() opens internally a file
# descriptor to list the content of the /proc/self/fd/ directory.
return len(names) - 1
except OSError as exc:
if exc.errno != errno.ENOENT:
raise
MAXFD = 256
if hasattr(os, 'sysconf'):
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except OSError:
pass
old_modes = None
if sys.platform == 'win32':
# bpo-25306, bpo-31009: Call CrtSetReportMode() to not kill the process
# on invalid file descriptor if Python is compiled in debug mode
try:
import msvcrt
msvcrt.CrtSetReportMode
except (AttributeError, ImportError):
# no msvcrt or a release build
pass
else:
old_modes = {}
for report_type in (msvcrt.CRT_WARN,
msvcrt.CRT_ERROR,
msvcrt.CRT_ASSERT):
old_modes[report_type] = msvcrt.CrtSetReportMode(report_type, 0)
try:
count = 0
for fd in range(MAXFD):
try:
# Prefer dup() over fstat(). fstat() can require input/output
# whereas dup() doesn't.
fd2 = os.dup(fd)
except OSError as e:
if e.errno != errno.EBADF:
raise
else:
os.close(fd2)
count += 1
finally:
if old_modes is not None:
for report_type in (msvcrt.CRT_WARN,
msvcrt.CRT_ERROR,
msvcrt.CRT_ASSERT):
msvcrt.CrtSetReportMode(report_type, old_modes[report_type])
return count |
Python | def generate_fused_type(codes):
"""
Generate name of and cython code for a fused type.
Parameters
----------
typecodes : str
Valid inputs to CY_TYPES (i.e. f, d, g, ...).
"""
cytypes = map(lambda x: CY_TYPES[x], codes)
name = codes + "_number_t"
declaration = ["ctypedef fused " + name + ":"]
for cytype in cytypes:
declaration.append(" " + cytype)
declaration = "\n".join(declaration)
return name, declaration | def generate_fused_type(codes):
"""
Generate name of and cython code for a fused type.
Parameters
----------
typecodes : str
Valid inputs to CY_TYPES (i.e. f, d, g, ...).
"""
cytypes = map(lambda x: CY_TYPES[x], codes)
name = codes + "_number_t"
declaration = ["ctypedef fused " + name + ":"]
for cytype in cytypes:
declaration.append(" " + cytype)
declaration = "\n".join(declaration)
return name, declaration |
Python | def iter_variants(inputs, outputs):
"""
Generate variants of UFunc signatures, by changing variable types,
within the limitation that the corresponding C types casts still
work out.
This does not generate all possibilities, just the ones required
for the ufunc to work properly with the most common data types.
Parameters
----------
inputs, outputs : str
UFunc input and output signature strings
Yields
------
new_input, new_output : str
Modified input and output strings.
Also the original input/output pair is yielded.
"""
maps = [
# always use long instead of int (more common type on 64-bit)
('i', 'l'),
]
# float32-preserving signatures
if not ('i' in inputs or 'l' in inputs):
# Don't add float32 versions of ufuncs with integer arguments, as this
# can lead to incorrect dtype selection if the integer arguments are
# arrays, but float arguments are scalars.
# For instance sph_harm(0,[0],0,0).dtype == complex64
# This may be a Numpy bug, but we need to work around it.
# cf. gh-4895, https://github.com/numpy/numpy/issues/5895
maps = maps + [(a + 'dD', b + 'fF') for a, b in maps]
# do the replacements
for src, dst in maps:
new_inputs = inputs
new_outputs = outputs
for a, b in zip(src, dst):
new_inputs = new_inputs.replace(a, b)
new_outputs = new_outputs.replace(a, b)
yield new_inputs, new_outputs | def iter_variants(inputs, outputs):
"""
Generate variants of UFunc signatures, by changing variable types,
within the limitation that the corresponding C types casts still
work out.
This does not generate all possibilities, just the ones required
for the ufunc to work properly with the most common data types.
Parameters
----------
inputs, outputs : str
UFunc input and output signature strings
Yields
------
new_input, new_output : str
Modified input and output strings.
Also the original input/output pair is yielded.
"""
maps = [
# always use long instead of int (more common type on 64-bit)
('i', 'l'),
]
# float32-preserving signatures
if not ('i' in inputs or 'l' in inputs):
# Don't add float32 versions of ufuncs with integer arguments, as this
# can lead to incorrect dtype selection if the integer arguments are
# arrays, but float arguments are scalars.
# For instance sph_harm(0,[0],0,0).dtype == complex64
# This may be a Numpy bug, but we need to work around it.
# cf. gh-4895, https://github.com/numpy/numpy/issues/5895
maps = maps + [(a + 'dD', b + 'fF') for a, b in maps]
# do the replacements
for src, dst in maps:
new_inputs = inputs
new_outputs = outputs
for a, b in zip(src, dst):
new_inputs = new_inputs.replace(a, b)
new_outputs = new_outputs.replace(a, b)
yield new_inputs, new_outputs |
Python | def _get_conditional(self, types, codes, adverb):
"""Generate an if/elif/else clause that selects a specialization of
fused types.
"""
clauses = []
seen = set()
for (typ, typcode), code in zip(types, codes):
if len(typcode) == 1:
continue
if typ not in seen:
clauses.append("{} is {}".format(typ, underscore(CY_TYPES[code])))
seen.add(typ)
if clauses and adverb != "else":
line = "{} {}:".format(adverb, " and ".join(clauses))
elif clauses and adverb == "else":
line = "else:"
else:
line = None
return line | def _get_conditional(self, types, codes, adverb):
"""Generate an if/elif/else clause that selects a specialization of
fused types.
"""
clauses = []
seen = set()
for (typ, typcode), code in zip(types, codes):
if len(typcode) == 1:
continue
if typ not in seen:
clauses.append("{} is {}".format(typ, underscore(CY_TYPES[code])))
seen.add(typ)
if clauses and adverb != "else":
line = "{} {}:".format(adverb, " and ".join(clauses))
elif clauses and adverb == "else":
line = "else:"
else:
line = None
return line |
Python | def _get_incallvars(self, intypes, c):
"""Generate pure input variables to a specialization,
i.e. variables that aren't used to return a value.
"""
incallvars = []
for n, intype in enumerate(intypes):
var = self.invars[n]
if c and intype == "double complex":
var = npy_cdouble_from_double_complex(var)
incallvars.append(var)
return incallvars | def _get_incallvars(self, intypes, c):
"""Generate pure input variables to a specialization,
i.e. variables that aren't used to return a value.
"""
incallvars = []
for n, intype in enumerate(intypes):
var = self.invars[n]
if c and intype == "double complex":
var = npy_cdouble_from_double_complex(var)
incallvars.append(var)
return incallvars |
Python | def _get_outcallvars(self, outtypes, c):
"""Generate output variables to a specialization,
i.e. pointers that are used to return values.
"""
outcallvars, tmpvars, casts = [], [], []
# If there are more out variables than out types, we want the
# tail of the out variables
start = len(self.outvars) - len(outtypes)
outvars = self.outvars[start:]
for n, (var, outtype) in enumerate(zip(outvars, outtypes)):
if c and outtype == "double complex":
tmp = "tmp{}".format(n)
tmpvars.append(tmp)
outcallvars.append("&{}".format(tmp))
tmpcast = double_complex_from_npy_cdouble(tmp)
casts.append("{}[0] = {}".format(var, tmpcast))
else:
outcallvars.append("{}".format(var))
return outcallvars, tmpvars, casts | def _get_outcallvars(self, outtypes, c):
"""Generate output variables to a specialization,
i.e. pointers that are used to return values.
"""
outcallvars, tmpvars, casts = [], [], []
# If there are more out variables than out types, we want the
# tail of the out variables
start = len(self.outvars) - len(outtypes)
outvars = self.outvars[start:]
for n, (var, outtype) in enumerate(zip(outvars, outtypes)):
if c and outtype == "double complex":
tmp = "tmp{}".format(n)
tmpvars.append(tmp)
outcallvars.append("&{}".format(tmp))
tmpcast = double_complex_from_npy_cdouble(tmp)
casts.append("{}[0] = {}".format(var, tmpcast))
else:
outcallvars.append("{}".format(var))
return outcallvars, tmpvars, casts |
Python | def _get_nan_decs(self):
"""Set all variables to nan for specializations of fused types for
which don't have signatures.
"""
# Set non fused-type variables to nan
tab = " "*4
fused_types, lines = [], [tab + "else:"]
seen = set()
for outvar, outtype, code in zip(self.outvars, self.outtypes, self.outcodes):
if len(code) == 1:
line = "{}[0] = {}".format(outvar, NAN_VALUE[code])
lines.append(2*tab + line)
else:
fused_type = outtype
name, _ = fused_type
if name not in seen:
fused_types.append(fused_type)
seen.add(name)
if not fused_types:
return lines
# Set fused-type variables to nan
all_codes = []
for fused_type in fused_types:
_, codes = fused_type
all_codes.append(codes)
all_codes = tuple(all_codes)
codelens = list(map(lambda x: len(x), all_codes))
last = numpy.product(codelens) - 1
for m, codes in enumerate(itertools.product(*all_codes)):
fused_codes, decs = [], []
for n, fused_type in enumerate(fused_types):
code = codes[n]
fused_codes.append(underscore(CY_TYPES[code]))
for n, outvar in enumerate(self.outvars):
if self.outtypes[n] == fused_type:
line = "{}[0] = {}".format(outvar, NAN_VALUE[code])
decs.append(line)
if m == 0:
adverb = "if"
elif m == last:
adverb = "else"
else:
adverb = "elif"
cond = self._get_conditional(fused_types, codes, adverb)
lines.append(2*tab + cond)
lines.extend(map(lambda x: 3*tab + x, decs))
return lines | def _get_nan_decs(self):
"""Set all variables to nan for specializations of fused types for
which don't have signatures.
"""
# Set non fused-type variables to nan
tab = " "*4
fused_types, lines = [], [tab + "else:"]
seen = set()
for outvar, outtype, code in zip(self.outvars, self.outtypes, self.outcodes):
if len(code) == 1:
line = "{}[0] = {}".format(outvar, NAN_VALUE[code])
lines.append(2*tab + line)
else:
fused_type = outtype
name, _ = fused_type
if name not in seen:
fused_types.append(fused_type)
seen.add(name)
if not fused_types:
return lines
# Set fused-type variables to nan
all_codes = []
for fused_type in fused_types:
_, codes = fused_type
all_codes.append(codes)
all_codes = tuple(all_codes)
codelens = list(map(lambda x: len(x), all_codes))
last = numpy.product(codelens) - 1
for m, codes in enumerate(itertools.product(*all_codes)):
fused_codes, decs = [], []
for n, fused_type in enumerate(fused_types):
code = codes[n]
fused_codes.append(underscore(CY_TYPES[code]))
for n, outvar in enumerate(self.outvars):
if self.outtypes[n] == fused_type:
line = "{}[0] = {}".format(outvar, NAN_VALUE[code])
decs.append(line)
if m == 0:
adverb = "if"
elif m == last:
adverb = "else"
else:
adverb = "elif"
cond = self._get_conditional(fused_types, codes, adverb)
lines.append(2*tab + cond)
lines.extend(map(lambda x: 3*tab + x, decs))
return lines |
Python | def _get_tmp_decs(self, all_tmpvars):
"""Generate the declarations of any necessary temporary
variables.
"""
tab = " "*4
tmpvars = list(all_tmpvars)
tmpvars.sort()
tmpdecs = []
for tmpvar in tmpvars:
line = "cdef npy_cdouble {}".format(tmpvar)
tmpdecs.append(tab + line)
return tmpdecs | def _get_tmp_decs(self, all_tmpvars):
"""Generate the declarations of any necessary temporary
variables.
"""
tab = " "*4
tmpvars = list(all_tmpvars)
tmpvars.sort()
tmpdecs = []
for tmpvar in tmpvars:
line = "cdef npy_cdouble {}".format(tmpvar)
tmpdecs.append(tab + line)
return tmpdecs |
Python | def _get_python_wrap(self):
"""Generate a python wrapper for functions which pass their
arguments as pointers.
"""
tab = " "*4
body, callvars = [], []
for (intype, _), invar in zip(self.intypes, self.invars):
callvars.append("{} {}".format(intype, invar))
line = "def _{}_pywrap({}):".format(self.name, ", ".join(callvars))
body.append(line)
for (outtype, _), outvar in zip(self.outtypes, self.outvars):
line = "cdef {} {}".format(outtype, outvar)
body.append(tab + line)
addr_outvars = map(lambda x: "&{}".format(x), self.outvars)
line = "{}({}, {})".format(self.name, ", ".join(self.invars),
", ".join(addr_outvars))
body.append(tab + line)
line = "return {}".format(", ".join(self.outvars))
body.append(tab + line)
body = "\n".join(body)
return body | def _get_python_wrap(self):
"""Generate a python wrapper for functions which pass their
arguments as pointers.
"""
tab = " "*4
body, callvars = [], []
for (intype, _), invar in zip(self.intypes, self.invars):
callvars.append("{} {}".format(intype, invar))
line = "def _{}_pywrap({}):".format(self.name, ", ".join(callvars))
body.append(line)
for (outtype, _), outvar in zip(self.outtypes, self.outvars):
line = "cdef {} {}".format(outtype, outvar)
body.append(tab + line)
addr_outvars = map(lambda x: "&{}".format(x), self.outvars)
line = "{}({}, {})".format(self.name, ", ".join(self.invars),
", ".join(addr_outvars))
body.append(tab + line)
line = "return {}".format(", ".join(self.outvars))
body.append(tab + line)
body = "\n".join(body)
return body |
Python | def _get_common(self, signum, sig):
"""Generate code common to all the _generate_* methods."""
tab = " "*4
func_name, incodes, outcodes, retcode, header = sig
# Convert ints to longs; cf. iter_variants()
incodes = incodes.replace('i', 'l')
outcodes = outcodes.replace('i', 'l')
retcode = retcode.replace('i', 'l')
if header.endswith("h"):
c = True
else:
c = False
if header.endswith("++"):
cpp = True
else:
cpp = False
intypes = list(map(lambda x: CY_TYPES[x], incodes))
outtypes = list(map(lambda x: CY_TYPES[x], outcodes))
retcode = re.sub(r'\*.*', '', retcode)
if not retcode:
retcode = 'v'
rettype = CY_TYPES[retcode]
if cpp:
# Functions from _ufuncs_cxx are exported as a void*
# pointers; cast them to the correct types
func_name = "scipy.special._ufuncs_cxx._export_{}".format(func_name)
func_name = "(<{}(*)({}) nogil>{})"\
.format(rettype, ", ".join(intypes + outtypes), func_name)
else:
func_name = self.cython_func_name(func_name, specialized=True)
if signum == 0:
adverb = "if"
else:
adverb = "elif"
cond = self._get_conditional(self.intypes, incodes, adverb)
if cond:
lines = [tab + cond]
sp = 2*tab
else:
lines = []
sp = tab
return func_name, incodes, outcodes, retcode, \
intypes, outtypes, rettype, c, lines, sp | def _get_common(self, signum, sig):
"""Generate code common to all the _generate_* methods."""
tab = " "*4
func_name, incodes, outcodes, retcode, header = sig
# Convert ints to longs; cf. iter_variants()
incodes = incodes.replace('i', 'l')
outcodes = outcodes.replace('i', 'l')
retcode = retcode.replace('i', 'l')
if header.endswith("h"):
c = True
else:
c = False
if header.endswith("++"):
cpp = True
else:
cpp = False
intypes = list(map(lambda x: CY_TYPES[x], incodes))
outtypes = list(map(lambda x: CY_TYPES[x], outcodes))
retcode = re.sub(r'\*.*', '', retcode)
if not retcode:
retcode = 'v'
rettype = CY_TYPES[retcode]
if cpp:
# Functions from _ufuncs_cxx are exported as a void*
# pointers; cast them to the correct types
func_name = "scipy.special._ufuncs_cxx._export_{}".format(func_name)
func_name = "(<{}(*)({}) nogil>{})"\
.format(rettype, ", ".join(intypes + outtypes), func_name)
else:
func_name = self.cython_func_name(func_name, specialized=True)
if signum == 0:
adverb = "if"
else:
adverb = "elif"
cond = self._get_conditional(self.intypes, incodes, adverb)
if cond:
lines = [tab + cond]
sp = 2*tab
else:
lines = []
sp = tab
return func_name, incodes, outcodes, retcode, \
intypes, outtypes, rettype, c, lines, sp |
Python | def _is_safe_size(n):
"""
Is the size of FFT such that FFTPACK can handle it in single precision
with sufficient accuracy?
Composite numbers of 2, 3, and 5 are accepted, as FFTPACK has those
"""
n = int(n)
if n == 0:
return True
# Divide by 3 until you can't, then by 5 until you can't
for c in (3, 5):
while n % c == 0:
n //= c
# Return True if the remainder is a power of 2
return not n & (n-1) | def _is_safe_size(n):
"""
Is the size of FFT such that FFTPACK can handle it in single precision
with sufficient accuracy?
Composite numbers of 2, 3, and 5 are accepted, as FFTPACK has those
"""
n = int(n)
if n == 0:
return True
# Divide by 3 until you can't, then by 5 until you can't
for c in (3, 5):
while n % c == 0:
n //= c
# Return True if the remainder is a power of 2
return not n & (n-1) |
Python | def _asfarray(x):
"""Like numpy asfarray, except that it does not modify x dtype if x is
already an array with a float dtype, and do not cast complex types to
real."""
if hasattr(x, "dtype") and x.dtype.char in numpy.typecodes["AllFloat"]:
# 'dtype' attribute does not ensure that the
# object is an ndarray (e.g. Series class
# from the pandas library)
return numpy.asarray(x, dtype=x.dtype)
else:
# We cannot use asfarray directly because it converts sequences of
# complex to sequence of real
ret = numpy.asarray(x)
if ret.dtype.char not in numpy.typecodes["AllFloat"]:
return numpy.asfarray(x)
return ret | def _asfarray(x):
"""Like numpy asfarray, except that it does not modify x dtype if x is
already an array with a float dtype, and do not cast complex types to
real."""
if hasattr(x, "dtype") and x.dtype.char in numpy.typecodes["AllFloat"]:
# 'dtype' attribute does not ensure that the
# object is an ndarray (e.g. Series class
# from the pandas library)
return numpy.asarray(x, dtype=x.dtype)
else:
# We cannot use asfarray directly because it converts sequences of
# complex to sequence of real
ret = numpy.asarray(x)
if ret.dtype.char not in numpy.typecodes["AllFloat"]:
return numpy.asfarray(x)
return ret |
Python | def _raw_fft(x, n, axis, direction, overwrite_x, work_function):
""" Internal auxiliary function for fft, ifft, rfft, irfft."""
if n is None:
n = x.shape[axis]
elif n != x.shape[axis]:
x, copy_made = _fix_shape(x,n,axis)
overwrite_x = overwrite_x or copy_made
if n < 1:
raise ValueError("Invalid number of FFT data points "
"(%d) specified." % n)
if axis == -1 or axis == len(x.shape)-1:
r = work_function(x,n,direction,overwrite_x=overwrite_x)
else:
x = swapaxes(x, axis, -1)
r = work_function(x,n,direction,overwrite_x=overwrite_x)
r = swapaxes(r, axis, -1)
return r | def _raw_fft(x, n, axis, direction, overwrite_x, work_function):
""" Internal auxiliary function for fft, ifft, rfft, irfft."""
if n is None:
n = x.shape[axis]
elif n != x.shape[axis]:
x, copy_made = _fix_shape(x,n,axis)
overwrite_x = overwrite_x or copy_made
if n < 1:
raise ValueError("Invalid number of FFT data points "
"(%d) specified." % n)
if axis == -1 or axis == len(x.shape)-1:
r = work_function(x,n,direction,overwrite_x=overwrite_x)
else:
x = swapaxes(x, axis, -1)
r = work_function(x,n,direction,overwrite_x=overwrite_x)
r = swapaxes(r, axis, -1)
return r |
Python | def _raw_fftnd(x, s, axes, direction, overwrite_x, work_function):
""" Internal auxiliary function for fftnd, ifftnd."""
if s is None:
if axes is None:
s = x.shape
else:
s = numpy.take(x.shape, axes)
s = tuple(s)
if axes is None:
noaxes = True
axes = list(range(-x.ndim, 0))
else:
noaxes = False
if len(axes) != len(s):
raise ValueError("when given, axes and shape arguments "
"have to be of the same length")
for dim in s:
if dim < 1:
raise ValueError("Invalid number of FFT data points "
"(%s) specified." % (s,))
# No need to swap axes, array is in C order
if noaxes:
for i in axes:
x, copy_made = _fix_shape(x, s[i], i)
overwrite_x = overwrite_x or copy_made
return work_function(x,s,direction,overwrite_x=overwrite_x)
# We ordered axes, because the code below to push axes at the end of the
# array assumes axes argument is in ascending order.
id = numpy.argsort(axes)
axes = [axes[i] for i in id]
s = [s[i] for i in id]
# Swap the request axes, last first (i.e. First swap the axis which ends up
# at -1, then at -2, etc...), such as the request axes on which the
# operation is carried become the last ones
for i in range(1, len(axes)+1):
x = numpy.swapaxes(x, axes[-i], -i)
# We can now operate on the axes waxes, the p last axes (p = len(axes)), by
# fixing the shape of the input array to 1 for any axis the fft is not
# carried upon.
waxes = list(range(x.ndim - len(axes), x.ndim))
shape = numpy.ones(x.ndim)
shape[waxes] = s
for i in range(len(waxes)):
x, copy_made = _fix_shape(x, s[i], waxes[i])
overwrite_x = overwrite_x or copy_made
r = work_function(x, shape, direction, overwrite_x=overwrite_x)
# reswap in the reverse order (first axis first, etc...) to get original
# order
for i in range(len(axes), 0, -1):
r = numpy.swapaxes(r, -i, axes[-i])
return r | def _raw_fftnd(x, s, axes, direction, overwrite_x, work_function):
""" Internal auxiliary function for fftnd, ifftnd."""
if s is None:
if axes is None:
s = x.shape
else:
s = numpy.take(x.shape, axes)
s = tuple(s)
if axes is None:
noaxes = True
axes = list(range(-x.ndim, 0))
else:
noaxes = False
if len(axes) != len(s):
raise ValueError("when given, axes and shape arguments "
"have to be of the same length")
for dim in s:
if dim < 1:
raise ValueError("Invalid number of FFT data points "
"(%s) specified." % (s,))
# No need to swap axes, array is in C order
if noaxes:
for i in axes:
x, copy_made = _fix_shape(x, s[i], i)
overwrite_x = overwrite_x or copy_made
return work_function(x,s,direction,overwrite_x=overwrite_x)
# We ordered axes, because the code below to push axes at the end of the
# array assumes axes argument is in ascending order.
id = numpy.argsort(axes)
axes = [axes[i] for i in id]
s = [s[i] for i in id]
# Swap the request axes, last first (i.e. First swap the axis which ends up
# at -1, then at -2, etc...), such as the request axes on which the
# operation is carried become the last ones
for i in range(1, len(axes)+1):
x = numpy.swapaxes(x, axes[-i], -i)
# We can now operate on the axes waxes, the p last axes (p = len(axes)), by
# fixing the shape of the input array to 1 for any axis the fft is not
# carried upon.
waxes = list(range(x.ndim - len(axes), x.ndim))
shape = numpy.ones(x.ndim)
shape[waxes] = s
for i in range(len(waxes)):
x, copy_made = _fix_shape(x, s[i], waxes[i])
overwrite_x = overwrite_x or copy_made
r = work_function(x, shape, direction, overwrite_x=overwrite_x)
# reswap in the reverse order (first axis first, etc...) to get original
# order
for i in range(len(axes), 0, -1):
r = numpy.swapaxes(r, -i, axes[-i])
return r |
Python | def ifft2(x, shape=None, axes=(-2,-1), overwrite_x=False):
"""
2-D discrete inverse Fourier transform of real or complex sequence.
Return inverse two-dimensional discrete Fourier transform of
arbitrary type sequence x.
See `ifft` for more information.
See also
--------
fft2, ifft
"""
return ifftn(x,shape,axes,overwrite_x) | def ifft2(x, shape=None, axes=(-2,-1), overwrite_x=False):
"""
2-D discrete inverse Fourier transform of real or complex sequence.
Return inverse two-dimensional discrete Fourier transform of
arbitrary type sequence x.
See `ifft` for more information.
See also
--------
fft2, ifft
"""
return ifftn(x,shape,axes,overwrite_x) |
Python | def load_setuptools_entrypoints(self, group, name=None):
""" Load modules from querying the specified setuptools ``group``.
:param str group: entry point group to load plugins
:param str name: if given, loads only plugins with the given ``name``.
:rtype: int
:return: return the number of loaded plugins by this call.
"""
from pkg_resources import (
iter_entry_points,
DistributionNotFound,
VersionConflict,
)
count = 0
for ep in iter_entry_points(group, name=name):
# is the plugin registered or blocked?
if self.get_plugin(ep.name) or self.is_blocked(ep.name):
continue
try:
plugin = ep.load()
except DistributionNotFound:
continue
except VersionConflict as e:
raise PluginValidationError(
plugin=None,
message="Plugin %r could not be loaded: %s!" % (ep.name, e),
)
self.register(plugin, name=ep.name)
self._plugin_distinfo.append((plugin, ep.dist))
count += 1
return count | def load_setuptools_entrypoints(self, group, name=None):
""" Load modules from querying the specified setuptools ``group``.
:param str group: entry point group to load plugins
:param str name: if given, loads only plugins with the given ``name``.
:rtype: int
:return: return the number of loaded plugins by this call.
"""
from pkg_resources import (
iter_entry_points,
DistributionNotFound,
VersionConflict,
)
count = 0
for ep in iter_entry_points(group, name=name):
# is the plugin registered or blocked?
if self.get_plugin(ep.name) or self.is_blocked(ep.name):
continue
try:
plugin = ep.load()
except DistributionNotFound:
continue
except VersionConflict as e:
raise PluginValidationError(
plugin=None,
message="Plugin %r could not be loaded: %s!" % (ep.name, e),
)
self.register(plugin, name=ep.name)
self._plugin_distinfo.append((plugin, ep.dist))
count += 1
return count |
Python | def add_hookcall_monitoring(self, before, after):
""" add before/after tracing functions for all hooks
and return an undo function which, when called,
will remove the added tracers.
``before(hook_name, hook_impls, kwargs)`` will be called ahead
of all hook calls and receive a hookcaller instance, a list
of HookImpl instances and the keyword arguments for the hook call.
``after(outcome, hook_name, hook_impls, kwargs)`` receives the
same arguments as ``before`` but also a :py:class:`_Result`` object
which represents the result of the overall hook call.
"""
return _tracing._TracedHookExecution(self, before, after).undo | def add_hookcall_monitoring(self, before, after):
""" add before/after tracing functions for all hooks
and return an undo function which, when called,
will remove the added tracers.
``before(hook_name, hook_impls, kwargs)`` will be called ahead
of all hook calls and receive a hookcaller instance, a list
of HookImpl instances and the keyword arguments for the hook call.
``after(outcome, hook_name, hook_impls, kwargs)`` receives the
same arguments as ``before`` but also a :py:class:`_Result`` object
which represents the result of the overall hook call.
"""
return _tracing._TracedHookExecution(self, before, after).undo |
Python | def _mutate(self, candidate):
"""
create a trial vector based on a mutation strategy
"""
trial = np.copy(self.population[candidate])
rng = self.random_number_generator
fill_point = rng.randint(0, self.parameter_count)
if (self.strategy == 'randtobest1exp' or
self.strategy == 'randtobest1bin'):
bprime = self.mutation_func(candidate,
self._select_samples(candidate, 5))
else:
bprime = self.mutation_func(self._select_samples(candidate, 5))
if self.strategy in self._binomial:
crossovers = rng.rand(self.parameter_count)
crossovers = crossovers < self.cross_over_probability
# the last one is always from the bprime vector for binomial
# If you fill in modulo with a loop you have to set the last one to
# true. If you don't use a loop then you can have any random entry
# be True.
crossovers[fill_point] = True
trial = np.where(crossovers, bprime, trial)
return trial
elif self.strategy in self._exponential:
i = 0
while (i < self.parameter_count and
rng.rand() < self.cross_over_probability):
trial[fill_point] = bprime[fill_point]
fill_point = (fill_point + 1) % self.parameter_count
i += 1
return trial | def _mutate(self, candidate):
"""
create a trial vector based on a mutation strategy
"""
trial = np.copy(self.population[candidate])
rng = self.random_number_generator
fill_point = rng.randint(0, self.parameter_count)
if (self.strategy == 'randtobest1exp' or
self.strategy == 'randtobest1bin'):
bprime = self.mutation_func(candidate,
self._select_samples(candidate, 5))
else:
bprime = self.mutation_func(self._select_samples(candidate, 5))
if self.strategy in self._binomial:
crossovers = rng.rand(self.parameter_count)
crossovers = crossovers < self.cross_over_probability
# the last one is always from the bprime vector for binomial
# If you fill in modulo with a loop you have to set the last one to
# true. If you don't use a loop then you can have any random entry
# be True.
crossovers[fill_point] = True
trial = np.where(crossovers, bprime, trial)
return trial
elif self.strategy in self._exponential:
i = 0
while (i < self.parameter_count and
rng.rand() < self.cross_over_probability):
trial[fill_point] = bprime[fill_point]
fill_point = (fill_point + 1) % self.parameter_count
i += 1
return trial |
Python | def ascii_escaped(val):
"""In py2 bytes and str are the same type, so return if it's a bytes
object, return it unchanged if it is a full ascii string,
otherwise escape it into its binary form.
If it's a unicode string, change the unicode characters into
unicode escapes.
"""
if isinstance(val, bytes):
try:
return val.encode("utf-8")
except UnicodeDecodeError:
return val.decode("utf-8", "ignore").encode("utf-8", "replace")
else:
return val.encode("utf-8", "replace") | def ascii_escaped(val):
"""In py2 bytes and str are the same type, so return if it's a bytes
object, return it unchanged if it is a full ascii string,
otherwise escape it into its binary form.
If it's a unicode string, change the unicode characters into
unicode escapes.
"""
if isinstance(val, bytes):
try:
return val.encode("utf-8")
except UnicodeDecodeError:
return val.decode("utf-8", "ignore").encode("utf-8", "replace")
else:
return val.encode("utf-8", "replace") |
Python | def mktemp(self, basename, numbered=True):
"""makes a temporary directory managed by the factory"""
if not numbered:
p = self.getbasetemp().joinpath(basename)
p.mkdir()
else:
p = make_numbered_dir(root=self.getbasetemp(), prefix=basename)
self._trace("mktemp", p)
return p | def mktemp(self, basename, numbered=True):
"""makes a temporary directory managed by the factory"""
if not numbered:
p = self.getbasetemp().joinpath(basename)
p.mkdir()
else:
p = make_numbered_dir(root=self.getbasetemp(), prefix=basename)
self._trace("mktemp", p)
return p |
Python | def ensuretemp(self, string, dir=1):
""" (deprecated) return temporary directory path with
the given string as the trailing part. It is usually
better to use the 'tmpdir' function argument which
provides an empty unique-per-test-invocation directory
and is guaranteed to be empty.
"""
# py.log._apiwarn(">1.1", "use tmpdir function argument")
from .deprecated import PYTEST_ENSURETEMP
warnings.warn(PYTEST_ENSURETEMP, stacklevel=2)
return self.getbasetemp().ensure(string, dir=dir) | def ensuretemp(self, string, dir=1):
""" (deprecated) return temporary directory path with
the given string as the trailing part. It is usually
better to use the 'tmpdir' function argument which
provides an empty unique-per-test-invocation directory
and is guaranteed to be empty.
"""
# py.log._apiwarn(">1.1", "use tmpdir function argument")
from .deprecated import PYTEST_ENSURETEMP
warnings.warn(PYTEST_ENSURETEMP, stacklevel=2)
return self.getbasetemp().ensure(string, dir=dir) |
Python | def mktemp(self, basename, numbered=True):
"""Create a subdirectory of the base temporary directory and return it.
If ``numbered``, ensure the directory is unique by adding a number
prefix greater than any existing one.
"""
return py.path.local(self._tmppath_factory.mktemp(basename, numbered).resolve()) | def mktemp(self, basename, numbered=True):
"""Create a subdirectory of the base temporary directory and return it.
If ``numbered``, ensure the directory is unique by adding a number
prefix greater than any existing one.
"""
return py.path.local(self._tmppath_factory.mktemp(basename, numbered).resolve()) |
Python | def tmpdir(request, tmpdir_factory):
"""Return a temporary directory path object
which is unique to each test function invocation,
created as a sub directory of the base temporary
directory. The returned object is a `py.path.local`_
path object.
.. _`py.path.local`: https://py.readthedocs.io/en/latest/path.html
"""
return _mk_tmp(request, tmpdir_factory) | def tmpdir(request, tmpdir_factory):
"""Return a temporary directory path object
which is unique to each test function invocation,
created as a sub directory of the base temporary
directory. The returned object is a `py.path.local`_
path object.
.. _`py.path.local`: https://py.readthedocs.io/en/latest/path.html
"""
return _mk_tmp(request, tmpdir_factory) |
Python | def tmp_path(request, tmp_path_factory):
"""Return a temporary directory path object
which is unique to each test function invocation,
created as a sub directory of the base temporary
directory. The returned object is a :class:`pathlib.Path`
object.
.. note::
in python < 3.6 this is a pathlib2.Path
"""
return _mk_tmp(request, tmp_path_factory) | def tmp_path(request, tmp_path_factory):
"""Return a temporary directory path object
which is unique to each test function invocation,
created as a sub directory of the base temporary
directory. The returned object is a :class:`pathlib.Path`
object.
.. note::
in python < 3.6 this is a pathlib2.Path
"""
return _mk_tmp(request, tmp_path_factory) |
Python | def filesystem_loader():
'''returns FileSystemLoader initialized to res/templates directory
'''
try:
import yatest.common
here = yatest.common.test_source_path("..")
except ImportError:
here = os.path.dirname(os.path.abspath(__file__))
return loaders.FileSystemLoader(here + '/res/templates') | def filesystem_loader():
'''returns FileSystemLoader initialized to res/templates directory
'''
try:
import yatest.common
here = yatest.common.test_source_path("..")
except ImportError:
here = os.path.dirname(os.path.abspath(__file__))
return loaders.FileSystemLoader(here + '/res/templates') |
Python | def resize(self, shape):
""" Resize the matrix in-place to dimensions given by 'shape'.
Any non-zero elements that lie outside the new shape are removed.
"""
if not isshape(shape):
raise TypeError("dimensions must be a 2-tuple of positive"
" integers")
newM, newN = shape
M, N = self.shape
if newM < M or newN < N:
# Remove all elements outside new dimensions
for (i, j) in list(self.keys()):
if i >= newM or j >= newN:
del self[i, j]
self._shape = shape | def resize(self, shape):
""" Resize the matrix in-place to dimensions given by 'shape'.
Any non-zero elements that lie outside the new shape are removed.
"""
if not isshape(shape):
raise TypeError("dimensions must be a 2-tuple of positive"
" integers")
newM, newN = shape
M, N = self.shape
if newM < M or newN < N:
# Remove all elements outside new dimensions
for (i, j) in list(self.keys()):
if i >= newM or j >= newN:
del self[i, j]
self._shape = shape |
Python | def _list(x):
"""Force x to a list."""
if not isinstance(x, list):
x = list(x)
return x | def _list(x):
"""Force x to a list."""
if not isinstance(x, list):
x = list(x)
return x |
Python | def _prod(x):
"""Product of a list of numbers; ~40x faster vs np.prod for Python tuples"""
if len(x) == 0:
return 1
return functools.reduce(operator.mul, x) | def _prod(x):
"""Product of a list of numbers; ~40x faster vs np.prod for Python tuples"""
if len(x) == 0:
return 1
return functools.reduce(operator.mul, x) |
Python | def minidump_file_to_core(file_name):
"""
Convert minidump `file_name` to gdb stacktrace
:return stacktrace as string
"""
return minidump_text_to_core(open(file_name).read()) | def minidump_file_to_core(file_name):
"""
Convert minidump `file_name` to gdb stacktrace
:return stacktrace as string
"""
return minidump_text_to_core(open(file_name).read()) |
Python | def minidump_text_to_core(minidump_text):
"""
Convert minidump text to gdb stacktrace
:return stacktrace as string
"""
core_text = ''
threads = minidump_text_to_threads(minidump_text)
for thread in threads:
for line in thread:
core_text += line + '\n'
return core_text | def minidump_text_to_core(minidump_text):
"""
Convert minidump text to gdb stacktrace
:return stacktrace as string
"""
core_text = ''
threads = minidump_text_to_threads(minidump_text)
for thread in threads:
for line in thread:
core_text += line + '\n'
return core_text |
Python | def minidump_text_to_threads(minidump_text):
"""
Convert minidump text to threads list
:return list of threads
"""
minidump_lines = minidump_text.splitlines()
threads = []
def process_thread(stack, active_thread_id):
"""
0|0|libpthread-2.15.so|pthread_join|/build/buildd/eglibc-2.15/nptl/pthread_join.c|89|0x15
0|1|libpthread-2.15.so||||0x9070
0|2|srch-base-7332|TThread::Join|/place/sandbox-data/srcdir/arcadia_cache_3/util/system/thread.cpp|153|0x5
0|3|srch-base-7332|THttpServer::Wait|/place/sandbox-data/srcdir/arcadia_cache_3/library/http/server/http.cpp|191|0x5
Thread 583 (Thread 0x7ff317fa2700 (LWP 2125)):
#0 0x00007ff369cf8d84 in pthread_cond_wait@@GLIBC_2.3.2 () from /lib/x86_64-linux-gnu/libpthread.so.0
#1 0x0000000000fc6453 in TCondVar::WaitD(TMutex&, TInstant) () at /place/sandbox-data/srcdir/arcadia_cache_1/util/system/condvar.cpp:104
#2 0x0000000000fd612b in TMtpQueue::TImpl::DoExecute() () at /place/sandbox-data/srcdir/arcadia_cache_1/util/system/condvar.h:33
"""
if not stack:
return
output = []
thread_id = stack[0][0]
for frame in stack:
frame_no = frame[1]
func = frame[3] if frame[3] else '??'
line = frame[4] if frame[4] else '??'
line_no = ':' + frame[5] if frame[5] else ''
output.append("#{} 0x0 in {} () from {}{}".format(frame_no, func, line, line_no))
# generate fake thread id for gdb: active thread (that is coredumped)
# should be 1st one, marked as "Thread 1"
if thread_id == active_thread_id:
gdb_thread_id = '1'
else:
gdb_thread_id = str(len(threads) + 1)
output.insert(0, "Thread {} (Thread {})".format(gdb_thread_id, hex(int(thread_id))))
output.append('')
threads.append(output)
fake_thread = None
active_thread_id = '0'
thread_id = ''
stack = []
for line in minidump_lines:
line = line.strip()
if line.startswith('OS|'):
continue
if line.startswith('CPU|'):
continue
if line.startswith('Crash|'):
# detect active thread id
# "Crash|SIGSEGV|0x452e|0"
crash = line.split('|')
signal = crash[1]
fake_thread = [
'Program terminated with signal ' + signal,
'',
]
active_thread_id = crash[3]
continue
if line.startswith('Module|'):
continue
if not line:
continue
frame = line.split('|')
if frame[0] != thread_id:
process_thread(stack, active_thread_id)
stack = []
thread_id = frame[0]
stack.append(frame)
"""
OS|Linux|0.0.0 Linux 3.10.69-25 #28 SMP Fri Feb 20 15:46:36 MSK 2015 x86_64
CPU|amd64|family 6 model 45 stepping 7|32
Crash|SIGSEGV|0x452e|0
Module|srch-base-7332||srch-base-7332|4EB3424E24B86D22FABA35D0F8D672770|0x00400000|0x1c7c6fff|1
0|0|libpthread-2.15.so|pthread_join|/build/buildd/eglibc-2.15/nptl/pthread_join.c|89|0x15
0|1|libpthread-2.15.so||||0x9070
0|2|srch-base-7332|TThread::Join|/place/sandbox-data/srcdir/arcadia_cache_3/util/system/thread.cpp|153|0x5
0|3|srch-base-7332|THttpServer::Wait|/place/sandbox-data/srcdir/arcadia_cache_3/library/http/server/http.cpp|191|0x5
0|4|srch-base-7332|THttpService::Run|/place/sandbox-data/srcdir/arcadia_cache_3/search/daemons/httpsearch/httpsearch.cpp|278|0x5
0|5|srch-base-7332|RunPureMain|/place/sandbox-data/srcdir/arcadia_cache_3/search/daemons/httpsearch/httpsearch.cpp|347|0x12
0|6|libc-2.15.so|__libc_start_main|/build/buildd/eglibc-2.15/csu/libc-start.c|226|0x17
0|7|srch-base-7332||||0x69e320
0|8|srch-base-7332|_init|||0x1130
1|0|libpthread-2.15.so||||0xbd84
"""
process_thread(stack, active_thread_id)
threads.append(fake_thread)
threads.reverse()
return threads | def minidump_text_to_threads(minidump_text):
"""
Convert minidump text to threads list
:return list of threads
"""
minidump_lines = minidump_text.splitlines()
threads = []
def process_thread(stack, active_thread_id):
"""
0|0|libpthread-2.15.so|pthread_join|/build/buildd/eglibc-2.15/nptl/pthread_join.c|89|0x15
0|1|libpthread-2.15.so||||0x9070
0|2|srch-base-7332|TThread::Join|/place/sandbox-data/srcdir/arcadia_cache_3/util/system/thread.cpp|153|0x5
0|3|srch-base-7332|THttpServer::Wait|/place/sandbox-data/srcdir/arcadia_cache_3/library/http/server/http.cpp|191|0x5
Thread 583 (Thread 0x7ff317fa2700 (LWP 2125)):
#0 0x00007ff369cf8d84 in pthread_cond_wait@@GLIBC_2.3.2 () from /lib/x86_64-linux-gnu/libpthread.so.0
#1 0x0000000000fc6453 in TCondVar::WaitD(TMutex&, TInstant) () at /place/sandbox-data/srcdir/arcadia_cache_1/util/system/condvar.cpp:104
#2 0x0000000000fd612b in TMtpQueue::TImpl::DoExecute() () at /place/sandbox-data/srcdir/arcadia_cache_1/util/system/condvar.h:33
"""
if not stack:
return
output = []
thread_id = stack[0][0]
for frame in stack:
frame_no = frame[1]
func = frame[3] if frame[3] else '??'
line = frame[4] if frame[4] else '??'
line_no = ':' + frame[5] if frame[5] else ''
output.append("#{} 0x0 in {} () from {}{}".format(frame_no, func, line, line_no))
# generate fake thread id for gdb: active thread (that is coredumped)
# should be 1st one, marked as "Thread 1"
if thread_id == active_thread_id:
gdb_thread_id = '1'
else:
gdb_thread_id = str(len(threads) + 1)
output.insert(0, "Thread {} (Thread {})".format(gdb_thread_id, hex(int(thread_id))))
output.append('')
threads.append(output)
fake_thread = None
active_thread_id = '0'
thread_id = ''
stack = []
for line in minidump_lines:
line = line.strip()
if line.startswith('OS|'):
continue
if line.startswith('CPU|'):
continue
if line.startswith('Crash|'):
# detect active thread id
# "Crash|SIGSEGV|0x452e|0"
crash = line.split('|')
signal = crash[1]
fake_thread = [
'Program terminated with signal ' + signal,
'',
]
active_thread_id = crash[3]
continue
if line.startswith('Module|'):
continue
if not line:
continue
frame = line.split('|')
if frame[0] != thread_id:
process_thread(stack, active_thread_id)
stack = []
thread_id = frame[0]
stack.append(frame)
"""
OS|Linux|0.0.0 Linux 3.10.69-25 #28 SMP Fri Feb 20 15:46:36 MSK 2015 x86_64
CPU|amd64|family 6 model 45 stepping 7|32
Crash|SIGSEGV|0x452e|0
Module|srch-base-7332||srch-base-7332|4EB3424E24B86D22FABA35D0F8D672770|0x00400000|0x1c7c6fff|1
0|0|libpthread-2.15.so|pthread_join|/build/buildd/eglibc-2.15/nptl/pthread_join.c|89|0x15
0|1|libpthread-2.15.so||||0x9070
0|2|srch-base-7332|TThread::Join|/place/sandbox-data/srcdir/arcadia_cache_3/util/system/thread.cpp|153|0x5
0|3|srch-base-7332|THttpServer::Wait|/place/sandbox-data/srcdir/arcadia_cache_3/library/http/server/http.cpp|191|0x5
0|4|srch-base-7332|THttpService::Run|/place/sandbox-data/srcdir/arcadia_cache_3/search/daemons/httpsearch/httpsearch.cpp|278|0x5
0|5|srch-base-7332|RunPureMain|/place/sandbox-data/srcdir/arcadia_cache_3/search/daemons/httpsearch/httpsearch.cpp|347|0x12
0|6|libc-2.15.so|__libc_start_main|/build/buildd/eglibc-2.15/csu/libc-start.c|226|0x17
0|7|srch-base-7332||||0x69e320
0|8|srch-base-7332|_init|||0x1130
1|0|libpthread-2.15.so||||0xbd84
"""
process_thread(stack, active_thread_id)
threads.append(fake_thread)
threads.reverse()
return threads |
Python | def temporary_env(newenv):
"""Completely replace the environment variables with the specified dict.
Use as a context manager::
with temporary_env({'PATH': my_path}):
...
"""
orig_env = os.environ.copy()
os.environ.clear()
os.environ.update(newenv)
try:
yield
finally:
os.environ.clear()
os.environ.update(orig_env) | def temporary_env(newenv):
"""Completely replace the environment variables with the specified dict.
Use as a context manager::
with temporary_env({'PATH': my_path}):
...
"""
orig_env = os.environ.copy()
os.environ.clear()
os.environ.update(newenv)
try:
yield
finally:
os.environ.clear()
os.environ.update(orig_env) |
Python | def modified_env(changes, snapshot=True):
"""Temporarily modify environment variables.
Specify the changes as a dictionary mapping names to new values, using
None as the value for names that should be deleted.
Example use::
with modified_env({'SHELL': 'bash', 'PYTHONPATH': None}):
...
When the context exits, there are two possible ways to restore the
environment. If *snapshot* is True, the default, it will reset the whole
environment to its state when the context was entered. If *snapshot* is
False, it will restore only the specific variables it modified, leaving
any changes made to other environment variables in the context.
"""
def update_del(changes):
for k, v in changes.items():
if v is None:
os.environ.pop(k, None)
else:
os.environ[k] = v
if snapshot:
saved_variables = os.environ.copy()
else:
saved_variables = {}
for k,v in changes.items():
saved_variables[k] = os.environ.get(k, None)
update_del(changes)
try:
yield
finally:
if snapshot:
os.environ.clear()
os.environ.update(saved_variables)
else:
update_del(saved_variables) | def modified_env(changes, snapshot=True):
"""Temporarily modify environment variables.
Specify the changes as a dictionary mapping names to new values, using
None as the value for names that should be deleted.
Example use::
with modified_env({'SHELL': 'bash', 'PYTHONPATH': None}):
...
When the context exits, there are two possible ways to restore the
environment. If *snapshot* is True, the default, it will reset the whole
environment to its state when the context was entered. If *snapshot* is
False, it will restore only the specific variables it modified, leaving
any changes made to other environment variables in the context.
"""
def update_del(changes):
for k, v in changes.items():
if v is None:
os.environ.pop(k, None)
else:
os.environ[k] = v
if snapshot:
saved_variables = os.environ.copy()
else:
saved_variables = {}
for k,v in changes.items():
saved_variables[k] = os.environ.get(k, None)
update_del(changes)
try:
yield
finally:
if snapshot:
os.environ.clear()
os.environ.update(saved_variables)
else:
update_del(saved_variables) |
Python | def make_env_restorer():
"""Snapshot the current environment, return a function to restore that.
This is intended to produce cleanup functions for tests. For example,
using the :class:`unittest.TestCase` API::
def setUp(self):
self.addCleanup(testpath.make_env_restorer())
Any changes a test makes to the environment variables will be wiped out
before the next test is run.
"""
orig_env = os.environ.copy()
def restore():
os.environ.clear()
os.environ.update(orig_env)
return restore | def make_env_restorer():
"""Snapshot the current environment, return a function to restore that.
This is intended to produce cleanup functions for tests. For example,
using the :class:`unittest.TestCase` API::
def setUp(self):
self.addCleanup(testpath.make_env_restorer())
Any changes a test makes to the environment variables will be wiped out
before the next test is run.
"""
orig_env = os.environ.copy()
def restore():
os.environ.clear()
os.environ.update(orig_env)
return restore |
Python | def qmr(A, b, x0=None, tol=1e-5, maxiter=None, xtype=None, M1=None, M2=None, callback=None):
"""Use Quasi-Minimal Residual iteration to solve A x = b
Parameters
----------
A : {sparse matrix, dense matrix, LinearOperator}
The real-valued N-by-N matrix of the linear system.
It is required that the linear operator can produce
``Ax`` and ``A^T x``.
b : {array, matrix}
Right hand side of the linear system. Has shape (N,) or (N,1).
Returns
-------
x : {array, matrix}
The converged solution.
info : integer
Provides convergence information:
0 : successful exit
>0 : convergence to tolerance not achieved, number of iterations
<0 : illegal input or breakdown
Other Parameters
----------------
x0 : {array, matrix}
Starting guess for the solution.
tol : float
Tolerance to achieve. The algorithm terminates when either the relative
or the absolute residual is below `tol`.
maxiter : integer
Maximum number of iterations. Iteration will stop after maxiter
steps even if the specified tolerance has not been achieved.
M1 : {sparse matrix, dense matrix, LinearOperator}
Left preconditioner for A.
M2 : {sparse matrix, dense matrix, LinearOperator}
Right preconditioner for A. Used together with the left
preconditioner M1. The matrix M1*A*M2 should have better
conditioned than A alone.
callback : function
User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector.
xtype : {'f','d','F','D'}
This parameter is DEPRECATED -- avoid using it.
The type of the result. If None, then it will be determined from
A.dtype.char and b. If A does not have a typecode method then it
will compute A.matvec(x0) to get a typecode. To save the extra
computation when A does not have a typecode attribute use xtype=0
for the same type as b or use xtype='f','d','F',or 'D'.
This parameter has been superseded by LinearOperator.
See Also
--------
LinearOperator
"""
A_ = A
A,M,x,b,postprocess = make_system(A,None,x0,b,xtype)
if M1 is None and M2 is None:
if hasattr(A_,'psolve'):
def left_psolve(b):
return A_.psolve(b,'left')
def right_psolve(b):
return A_.psolve(b,'right')
def left_rpsolve(b):
return A_.rpsolve(b,'left')
def right_rpsolve(b):
return A_.rpsolve(b,'right')
M1 = LinearOperator(A.shape, matvec=left_psolve, rmatvec=left_rpsolve)
M2 = LinearOperator(A.shape, matvec=right_psolve, rmatvec=right_rpsolve)
else:
def id(b):
return b
M1 = LinearOperator(A.shape, matvec=id, rmatvec=id)
M2 = LinearOperator(A.shape, matvec=id, rmatvec=id)
n = len(b)
if maxiter is None:
maxiter = n*10
ltr = _type_conv[x.dtype.char]
revcom = getattr(_iterative, ltr + 'qmrrevcom')
stoptest = getattr(_iterative, ltr + 'stoptest2')
resid = tol
ndx1 = 1
ndx2 = -1
# Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1
work = _aligned_zeros(11*n,x.dtype)
ijob = 1
info = 0
ftflag = True
bnrm2 = -1.0
iter_ = maxiter
while True:
olditer = iter_
x, iter_, resid, info, ndx1, ndx2, sclr1, sclr2, ijob = \
revcom(b, x, work, iter_, resid, info, ndx1, ndx2, ijob)
if callback is not None and iter_ > olditer:
callback(x)
slice1 = slice(ndx1-1, ndx1-1+n)
slice2 = slice(ndx2-1, ndx2-1+n)
if (ijob == -1):
if callback is not None:
callback(x)
break
elif (ijob == 1):
work[slice2] *= sclr2
work[slice2] += sclr1*A.matvec(work[slice1])
elif (ijob == 2):
work[slice2] *= sclr2
work[slice2] += sclr1*A.rmatvec(work[slice1])
elif (ijob == 3):
work[slice1] = M1.matvec(work[slice2])
elif (ijob == 4):
work[slice1] = M2.matvec(work[slice2])
elif (ijob == 5):
work[slice1] = M1.rmatvec(work[slice2])
elif (ijob == 6):
work[slice1] = M2.rmatvec(work[slice2])
elif (ijob == 7):
work[slice2] *= sclr2
work[slice2] += sclr1*A.matvec(x)
elif (ijob == 8):
if ftflag:
info = -1
ftflag = False
bnrm2, resid, info = stoptest(work[slice1], b, bnrm2, tol, info)
ijob = 2
if info > 0 and iter_ == maxiter and resid > tol:
# info isn't set appropriately otherwise
info = iter_
return postprocess(x), info | def qmr(A, b, x0=None, tol=1e-5, maxiter=None, xtype=None, M1=None, M2=None, callback=None):
"""Use Quasi-Minimal Residual iteration to solve A x = b
Parameters
----------
A : {sparse matrix, dense matrix, LinearOperator}
The real-valued N-by-N matrix of the linear system.
It is required that the linear operator can produce
``Ax`` and ``A^T x``.
b : {array, matrix}
Right hand side of the linear system. Has shape (N,) or (N,1).
Returns
-------
x : {array, matrix}
The converged solution.
info : integer
Provides convergence information:
0 : successful exit
>0 : convergence to tolerance not achieved, number of iterations
<0 : illegal input or breakdown
Other Parameters
----------------
x0 : {array, matrix}
Starting guess for the solution.
tol : float
Tolerance to achieve. The algorithm terminates when either the relative
or the absolute residual is below `tol`.
maxiter : integer
Maximum number of iterations. Iteration will stop after maxiter
steps even if the specified tolerance has not been achieved.
M1 : {sparse matrix, dense matrix, LinearOperator}
Left preconditioner for A.
M2 : {sparse matrix, dense matrix, LinearOperator}
Right preconditioner for A. Used together with the left
preconditioner M1. The matrix M1*A*M2 should have better
conditioned than A alone.
callback : function
User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector.
xtype : {'f','d','F','D'}
This parameter is DEPRECATED -- avoid using it.
The type of the result. If None, then it will be determined from
A.dtype.char and b. If A does not have a typecode method then it
will compute A.matvec(x0) to get a typecode. To save the extra
computation when A does not have a typecode attribute use xtype=0
for the same type as b or use xtype='f','d','F',or 'D'.
This parameter has been superseded by LinearOperator.
See Also
--------
LinearOperator
"""
A_ = A
A,M,x,b,postprocess = make_system(A,None,x0,b,xtype)
if M1 is None and M2 is None:
if hasattr(A_,'psolve'):
def left_psolve(b):
return A_.psolve(b,'left')
def right_psolve(b):
return A_.psolve(b,'right')
def left_rpsolve(b):
return A_.rpsolve(b,'left')
def right_rpsolve(b):
return A_.rpsolve(b,'right')
M1 = LinearOperator(A.shape, matvec=left_psolve, rmatvec=left_rpsolve)
M2 = LinearOperator(A.shape, matvec=right_psolve, rmatvec=right_rpsolve)
else:
def id(b):
return b
M1 = LinearOperator(A.shape, matvec=id, rmatvec=id)
M2 = LinearOperator(A.shape, matvec=id, rmatvec=id)
n = len(b)
if maxiter is None:
maxiter = n*10
ltr = _type_conv[x.dtype.char]
revcom = getattr(_iterative, ltr + 'qmrrevcom')
stoptest = getattr(_iterative, ltr + 'stoptest2')
resid = tol
ndx1 = 1
ndx2 = -1
# Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1
work = _aligned_zeros(11*n,x.dtype)
ijob = 1
info = 0
ftflag = True
bnrm2 = -1.0
iter_ = maxiter
while True:
olditer = iter_
x, iter_, resid, info, ndx1, ndx2, sclr1, sclr2, ijob = \
revcom(b, x, work, iter_, resid, info, ndx1, ndx2, ijob)
if callback is not None and iter_ > olditer:
callback(x)
slice1 = slice(ndx1-1, ndx1-1+n)
slice2 = slice(ndx2-1, ndx2-1+n)
if (ijob == -1):
if callback is not None:
callback(x)
break
elif (ijob == 1):
work[slice2] *= sclr2
work[slice2] += sclr1*A.matvec(work[slice1])
elif (ijob == 2):
work[slice2] *= sclr2
work[slice2] += sclr1*A.rmatvec(work[slice1])
elif (ijob == 3):
work[slice1] = M1.matvec(work[slice2])
elif (ijob == 4):
work[slice1] = M2.matvec(work[slice2])
elif (ijob == 5):
work[slice1] = M1.rmatvec(work[slice2])
elif (ijob == 6):
work[slice1] = M2.rmatvec(work[slice2])
elif (ijob == 7):
work[slice2] *= sclr2
work[slice2] += sclr1*A.matvec(x)
elif (ijob == 8):
if ftflag:
info = -1
ftflag = False
bnrm2, resid, info = stoptest(work[slice1], b, bnrm2, tol, info)
ijob = 2
if info > 0 and iter_ == maxiter and resid > tol:
# info isn't set appropriately otherwise
info = iter_
return postprocess(x), info |
Python | def _monte_carlo_step(self):
"""Do one monte carlo iteration
Randomly displace the coordinates, minimize, and decide whether
or not to accept the new coordinates.
"""
# Take a random step. Make a copy of x because the step_taking
# algorithm might change x in place
x_after_step = np.copy(self.x)
x_after_step = self.step_taking(x_after_step)
# do a local minimization
minres = self.minimizer(x_after_step)
x_after_quench = minres.x
energy_after_quench = minres.fun
if not minres.success:
self.res.minimization_failures += 1
if self.disp:
print("warning: basinhopping: local minimization failure")
if hasattr(minres, "nfev"):
self.res.nfev += minres.nfev
if hasattr(minres, "njev"):
self.res.njev += minres.njev
if hasattr(minres, "nhev"):
self.res.nhev += minres.nhev
# accept the move based on self.accept_tests. If any test is False,
# than reject the step. If any test returns the special value, the
# string 'force accept', accept the step regardless. This can be used
# to forcefully escape from a local minimum if normal basin hopping
# steps are not sufficient.
accept = True
for test in self.accept_tests:
testres = test(f_new=energy_after_quench, x_new=x_after_quench,
f_old=self.energy, x_old=self.x)
if testres == 'force accept':
accept = True
break
elif not testres:
accept = False
# Report the result of the acceptance test to the take step class.
# This is for adaptive step taking
if hasattr(self.step_taking, "report"):
self.step_taking.report(accept, f_new=energy_after_quench,
x_new=x_after_quench, f_old=self.energy,
x_old=self.x)
return accept, minres | def _monte_carlo_step(self):
"""Do one monte carlo iteration
Randomly displace the coordinates, minimize, and decide whether
or not to accept the new coordinates.
"""
# Take a random step. Make a copy of x because the step_taking
# algorithm might change x in place
x_after_step = np.copy(self.x)
x_after_step = self.step_taking(x_after_step)
# do a local minimization
minres = self.minimizer(x_after_step)
x_after_quench = minres.x
energy_after_quench = minres.fun
if not minres.success:
self.res.minimization_failures += 1
if self.disp:
print("warning: basinhopping: local minimization failure")
if hasattr(minres, "nfev"):
self.res.nfev += minres.nfev
if hasattr(minres, "njev"):
self.res.njev += minres.njev
if hasattr(minres, "nhev"):
self.res.nhev += minres.nhev
# accept the move based on self.accept_tests. If any test is False,
# than reject the step. If any test returns the special value, the
# string 'force accept', accept the step regardless. This can be used
# to forcefully escape from a local minimum if normal basin hopping
# steps are not sufficient.
accept = True
for test in self.accept_tests:
testres = test(f_new=energy_after_quench, x_new=x_after_quench,
f_old=self.energy, x_old=self.x)
if testres == 'force accept':
accept = True
break
elif not testres:
accept = False
# Report the result of the acceptance test to the take step class.
# This is for adaptive step taking
if hasattr(self.step_taking, "report"):
self.step_taking.report(accept, f_new=energy_after_quench,
x_new=x_after_quench, f_old=self.energy,
x_old=self.x)
return accept, minres |
Python | def one_cycle(self):
"""Do one cycle of the basinhopping algorithm
"""
self.nstep += 1
new_global_min = False
accept, minres = self._monte_carlo_step()
if accept:
self.energy = minres.fun
self.x = np.copy(minres.x)
new_global_min = self.storage.update(minres)
# print some information
if self.disp:
self.print_report(minres.fun, accept)
if new_global_min:
print("found new global minimum on step %d with function"
" value %g" % (self.nstep, self.energy))
# save some variables as BasinHoppingRunner attributes
self.xtrial = minres.x
self.energy_trial = minres.fun
self.accept = accept
return new_global_min | def one_cycle(self):
"""Do one cycle of the basinhopping algorithm
"""
self.nstep += 1
new_global_min = False
accept, minres = self._monte_carlo_step()
if accept:
self.energy = minres.fun
self.x = np.copy(minres.x)
new_global_min = self.storage.update(minres)
# print some information
if self.disp:
self.print_report(minres.fun, accept)
if new_global_min:
print("found new global minimum on step %d with function"
" value %g" % (self.nstep, self.energy))
# save some variables as BasinHoppingRunner attributes
self.xtrial = minres.x
self.energy_trial = minres.fun
self.accept = accept
return new_global_min |
Python | def strip_python_stderr(stderr):
"""Strip the stderr of a Python process from potential debug output
emitted by the interpreter.
This will typically be run on the result of the communicate() method
of a subprocess.Popen object.
"""
stderr = re.sub(r"\[\d+ refs\]\r?\n?$", "", stderr).strip()
return stderr | def strip_python_stderr(stderr):
"""Strip the stderr of a Python process from potential debug output
emitted by the interpreter.
This will typically be run on the result of the communicate() method
of a subprocess.Popen object.
"""
stderr = re.sub(r"\[\d+ refs\]\r?\n?$", "", stderr).strip()
return stderr |
Python | def _addCleanup(self, function, *args, **kwargs):
"""Add a function, with arguments, to be called when the test is
completed. Functions added are called on a LIFO basis and are
called after tearDown on test failure or success.
Unlike unittest2 or python 2.7, cleanups are not if setUp fails.
That is easier to implement in this subclass and is all we need.
"""
self._cleanups.append((function, args, kwargs)) | def _addCleanup(self, function, *args, **kwargs):
"""Add a function, with arguments, to be called when the test is
completed. Functions added are called on a LIFO basis and are
called after tearDown on test failure or success.
Unlike unittest2 or python 2.7, cleanups are not if setUp fails.
That is easier to implement in this subclass and is all we need.
"""
self._cleanups.append((function, args, kwargs)) |
Python | def _doCleanups(self):
"""Execute all cleanup functions. Normally called for you after
tearDown."""
while self._cleanups:
function, args, kwargs = self._cleanups.pop()
try:
function(*args, **kwargs)
except KeyboardInterrupt:
raise
except:
pass | def _doCleanups(self):
"""Execute all cleanup functions. Normally called for you after
tearDown."""
while self._cleanups:
function, args, kwargs = self._cleanups.pop()
try:
function(*args, **kwargs)
except KeyboardInterrupt:
raise
except:
pass |
Python | def libc_ver(executable=sys.executable,lib='',version='', chunksize=2048):
""" Tries to determine the libc version that the file executable
(which defaults to the Python interpreter) is linked against.
Returns a tuple of strings (lib,version) which default to the
given parameters in case the lookup fails.
Note that the function has intimate knowledge of how different
libc versions add symbols to the executable and thus is probably
only useable for executables compiled using gcc.
The file is read and scanned in chunks of chunksize bytes.
"""
V = _comparable_version
if hasattr(os.path, 'realpath'):
# Python 2.2 introduced os.path.realpath(); it is used
# here to work around problems with Cygwin not being
# able to open symlinks for reading
executable = os.path.realpath(executable)
with open(executable, 'rb') as f:
binary = f.read(chunksize)
pos = 0
while pos < len(binary):
if 'libc' in binary or 'GLIBC' in binary:
m = _libc_search.search(binary, pos)
else:
m = None
if not m or m.end() == len(binary):
chunk = f.read(chunksize)
if chunk:
binary = binary[max(pos, len(binary) - 1000):] + chunk
pos = 0
continue
if not m:
break
libcinit,glibc,glibcversion,so,threads,soversion = m.groups()
if libcinit and not lib:
lib = 'libc'
elif glibc:
if lib != 'glibc':
lib = 'glibc'
version = glibcversion
elif V(glibcversion) > V(version):
version = glibcversion
elif so:
if lib != 'glibc':
lib = 'libc'
if soversion and (not version or V(soversion) > V(version)):
version = soversion
if threads and version[-len(threads):] != threads:
version = version + threads
pos = m.end()
return lib,version | def libc_ver(executable=sys.executable,lib='',version='', chunksize=2048):
""" Tries to determine the libc version that the file executable
(which defaults to the Python interpreter) is linked against.
Returns a tuple of strings (lib,version) which default to the
given parameters in case the lookup fails.
Note that the function has intimate knowledge of how different
libc versions add symbols to the executable and thus is probably
only useable for executables compiled using gcc.
The file is read and scanned in chunks of chunksize bytes.
"""
V = _comparable_version
if hasattr(os.path, 'realpath'):
# Python 2.2 introduced os.path.realpath(); it is used
# here to work around problems with Cygwin not being
# able to open symlinks for reading
executable = os.path.realpath(executable)
with open(executable, 'rb') as f:
binary = f.read(chunksize)
pos = 0
while pos < len(binary):
if 'libc' in binary or 'GLIBC' in binary:
m = _libc_search.search(binary, pos)
else:
m = None
if not m or m.end() == len(binary):
chunk = f.read(chunksize)
if chunk:
binary = binary[max(pos, len(binary) - 1000):] + chunk
pos = 0
continue
if not m:
break
libcinit,glibc,glibcversion,so,threads,soversion = m.groups()
if libcinit and not lib:
lib = 'libc'
elif glibc:
if lib != 'glibc':
lib = 'glibc'
version = glibcversion
elif V(glibcversion) > V(version):
version = glibcversion
elif so:
if lib != 'glibc':
lib = 'libc'
if soversion and (not version or V(soversion) > V(version)):
version = soversion
if threads and version[-len(threads):] != threads:
version = version + threads
pos = m.end()
return lib,version |
Python | def linux_distribution(distname='', version='', id='',
supported_dists=_supported_dists,
full_distribution_name=1):
""" Tries to determine the name of the Linux OS distribution name.
The function first looks for a distribution release file in
/etc and then reverts to _dist_try_harder() in case no
suitable files are found.
supported_dists may be given to define the set of Linux
distributions to look for. It defaults to a list of currently
supported Linux distributions identified by their release file
name.
If full_distribution_name is true (default), the full
distribution read from the OS is returned. Otherwise the short
name taken from supported_dists is used.
Returns a tuple (distname,version,id) which default to the
args given as parameters.
"""
# check for the LSB /etc/lsb-release file first, needed so
# that the distribution doesn't get identified as Debian.
try:
with open("/etc/lsb-release", "rU") as etclsbrel:
for line in etclsbrel:
m = _distributor_id_file_re.search(line)
if m:
_u_distname = m.group(1).strip()
m = _release_file_re.search(line)
if m:
_u_version = m.group(1).strip()
m = _codename_file_re.search(line)
if m:
_u_id = m.group(1).strip()
if _u_distname and _u_version:
return (_u_distname, _u_version, _u_id)
except (EnvironmentError, UnboundLocalError):
pass
try:
etc = os.listdir('/etc')
except os.error:
# Probably not a Unix system
return distname,version,id
etc.sort()
for file in etc:
m = _release_filename.match(file)
if m is not None:
_distname,dummy = m.groups()
if _distname in supported_dists:
distname = _distname
break
else:
return _dist_try_harder(distname,version,id)
# Read the first line
f = open('/etc/'+file, 'r')
firstline = f.readline()
f.close()
_distname, _version, _id = _parse_release_file(firstline)
if _distname and full_distribution_name:
distname = _distname
if _version:
version = _version
if _id:
id = _id
return distname, version, id | def linux_distribution(distname='', version='', id='',
supported_dists=_supported_dists,
full_distribution_name=1):
""" Tries to determine the name of the Linux OS distribution name.
The function first looks for a distribution release file in
/etc and then reverts to _dist_try_harder() in case no
suitable files are found.
supported_dists may be given to define the set of Linux
distributions to look for. It defaults to a list of currently
supported Linux distributions identified by their release file
name.
If full_distribution_name is true (default), the full
distribution read from the OS is returned. Otherwise the short
name taken from supported_dists is used.
Returns a tuple (distname,version,id) which default to the
args given as parameters.
"""
# check for the LSB /etc/lsb-release file first, needed so
# that the distribution doesn't get identified as Debian.
try:
with open("/etc/lsb-release", "rU") as etclsbrel:
for line in etclsbrel:
m = _distributor_id_file_re.search(line)
if m:
_u_distname = m.group(1).strip()
m = _release_file_re.search(line)
if m:
_u_version = m.group(1).strip()
m = _codename_file_re.search(line)
if m:
_u_id = m.group(1).strip()
if _u_distname and _u_version:
return (_u_distname, _u_version, _u_id)
except (EnvironmentError, UnboundLocalError):
pass
try:
etc = os.listdir('/etc')
except os.error:
# Probably not a Unix system
return distname,version,id
etc.sort()
for file in etc:
m = _release_filename.match(file)
if m is not None:
_distname,dummy = m.groups()
if _distname in supported_dists:
distname = _distname
break
else:
return _dist_try_harder(distname,version,id)
# Read the first line
f = open('/etc/'+file, 'r')
firstline = f.readline()
f.close()
_distname, _version, _id = _parse_release_file(firstline)
if _distname and full_distribution_name:
distname = _distname
if _version:
version = _version
if _id:
id = _id
return distname, version, id |
Python | def _syscmd_file(target,default=''):
""" Interface to the system's file command.
The function uses the -b option of the file command to have it
ommit the filename in its output and if possible the -L option
to have the command follow symlinks. It returns default in
case the command should fail.
"""
# We do the import here to avoid a bootstrap issue.
# See c73b90b6dadd changeset.
#
# [..]
# ranlib libpython2.7.a
# gcc -o python \
# Modules/python.o \
# libpython2.7.a -lsocket -lnsl -ldl -lm
# Traceback (most recent call last):
# File "./setup.py", line 8, in <module>
# from platform import machine as platform_machine
# File "[..]/build/Lib/platform.py", line 116, in <module>
# import sys,string,os,re,subprocess
# File "[..]/build/Lib/subprocess.py", line 429, in <module>
# import select
# ImportError: No module named select
import subprocess
if sys.platform in ('dos','win32','win16','os2'):
# XXX Others too ?
return default
target = _follow_symlinks(target)
try:
proc = subprocess.Popen(['file', target],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
except (AttributeError,os.error):
return default
output = proc.communicate()[0]
rc = proc.wait()
if not output or rc:
return default
else:
return output | def _syscmd_file(target,default=''):
""" Interface to the system's file command.
The function uses the -b option of the file command to have it
ommit the filename in its output and if possible the -L option
to have the command follow symlinks. It returns default in
case the command should fail.
"""
# We do the import here to avoid a bootstrap issue.
# See c73b90b6dadd changeset.
#
# [..]
# ranlib libpython2.7.a
# gcc -o python \
# Modules/python.o \
# libpython2.7.a -lsocket -lnsl -ldl -lm
# Traceback (most recent call last):
# File "./setup.py", line 8, in <module>
# from platform import machine as platform_machine
# File "[..]/build/Lib/platform.py", line 116, in <module>
# import sys,string,os,re,subprocess
# File "[..]/build/Lib/subprocess.py", line 429, in <module>
# import select
# ImportError: No module named select
import subprocess
if sys.platform in ('dos','win32','win16','os2'):
# XXX Others too ?
return default
target = _follow_symlinks(target)
try:
proc = subprocess.Popen(['file', target],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
except (AttributeError,os.error):
return default
output = proc.communicate()[0]
rc = proc.wait()
if not output or rc:
return default
else:
return output |
Python | def uname():
""" Fairly portable uname interface. Returns a tuple
of strings (system,node,release,version,machine,processor)
identifying the underlying platform.
Note that unlike the os.uname function this also returns
possible processor information as an additional tuple entry.
Entries which cannot be determined are set to ''.
"""
global _uname_cache
no_os_uname = 0
if _uname_cache is not None:
return _uname_cache
processor = ''
# Get some infos from the builtin os.uname API...
try:
system,node,release,version,machine = os.uname()
except AttributeError:
no_os_uname = 1
if no_os_uname or not filter(None, (system, node, release, version, machine)):
# Hmm, no there is either no uname or uname has returned
#'unknowns'... we'll have to poke around the system then.
if no_os_uname:
system = sys.platform
release = ''
version = ''
node = _node()
machine = ''
use_syscmd_ver = 1
# Try win32_ver() on win32 platforms
if system == 'win32':
release,version,csd,ptype = win32_ver()
if release and version:
use_syscmd_ver = 0
# Try to use the PROCESSOR_* environment variables
# available on Win XP and later; see
# http://support.microsoft.com/kb/888731 and
# http://www.geocities.com/rick_lively/MANUALS/ENV/MSWIN/PROCESSI.HTM
if not machine:
# WOW64 processes mask the native architecture
if "PROCESSOR_ARCHITEW6432" in os.environ:
machine = os.environ.get("PROCESSOR_ARCHITEW6432", '')
else:
machine = os.environ.get('PROCESSOR_ARCHITECTURE', '')
if not processor:
processor = os.environ.get('PROCESSOR_IDENTIFIER', machine)
# Try the 'ver' system command available on some
# platforms
if use_syscmd_ver:
system,release,version = _syscmd_ver(system)
# Normalize system to what win32_ver() normally returns
# (_syscmd_ver() tends to return the vendor name as well)
if system == 'Microsoft Windows':
system = 'Windows'
elif system == 'Microsoft' and release == 'Windows':
# Under Windows Vista and Windows Server 2008,
# Microsoft changed the output of the ver command. The
# release is no longer printed. This causes the
# system and release to be misidentified.
system = 'Windows'
if '6.0' == version[:3]:
release = 'Vista'
else:
release = ''
# In case we still don't know anything useful, we'll try to
# help ourselves
if system in ('win32','win16'):
if not version:
if system == 'win32':
version = '32bit'
else:
version = '16bit'
system = 'Windows'
elif system[:4] == 'java':
release,vendor,vminfo,osinfo = java_ver()
system = 'Java'
version = string.join(vminfo,', ')
if not version:
version = vendor
# System specific extensions
if system == 'OpenVMS':
# OpenVMS seems to have release and version mixed up
if not release or release == '0':
release = version
version = ''
# Get processor information
try:
import vms_lib
except ImportError:
pass
else:
csid, cpu_number = vms_lib.getsyi('SYI$_CPU',0)
if (cpu_number >= 128):
processor = 'Alpha'
else:
processor = 'VAX'
if not processor:
# Get processor information from the uname system command
processor = _syscmd_uname('-p','')
#If any unknowns still exist, replace them with ''s, which are more portable
if system == 'unknown':
system = ''
if node == 'unknown':
node = ''
if release == 'unknown':
release = ''
if version == 'unknown':
version = ''
if machine == 'unknown':
machine = ''
if processor == 'unknown':
processor = ''
# normalize name
if system == 'Microsoft' and release == 'Windows':
system = 'Windows'
release = 'Vista'
_uname_cache = system,node,release,version,machine,processor
return _uname_cache | def uname():
""" Fairly portable uname interface. Returns a tuple
of strings (system,node,release,version,machine,processor)
identifying the underlying platform.
Note that unlike the os.uname function this also returns
possible processor information as an additional tuple entry.
Entries which cannot be determined are set to ''.
"""
global _uname_cache
no_os_uname = 0
if _uname_cache is not None:
return _uname_cache
processor = ''
# Get some infos from the builtin os.uname API...
try:
system,node,release,version,machine = os.uname()
except AttributeError:
no_os_uname = 1
if no_os_uname or not filter(None, (system, node, release, version, machine)):
# Hmm, no there is either no uname or uname has returned
#'unknowns'... we'll have to poke around the system then.
if no_os_uname:
system = sys.platform
release = ''
version = ''
node = _node()
machine = ''
use_syscmd_ver = 1
# Try win32_ver() on win32 platforms
if system == 'win32':
release,version,csd,ptype = win32_ver()
if release and version:
use_syscmd_ver = 0
# Try to use the PROCESSOR_* environment variables
# available on Win XP and later; see
# http://support.microsoft.com/kb/888731 and
# http://www.geocities.com/rick_lively/MANUALS/ENV/MSWIN/PROCESSI.HTM
if not machine:
# WOW64 processes mask the native architecture
if "PROCESSOR_ARCHITEW6432" in os.environ:
machine = os.environ.get("PROCESSOR_ARCHITEW6432", '')
else:
machine = os.environ.get('PROCESSOR_ARCHITECTURE', '')
if not processor:
processor = os.environ.get('PROCESSOR_IDENTIFIER', machine)
# Try the 'ver' system command available on some
# platforms
if use_syscmd_ver:
system,release,version = _syscmd_ver(system)
# Normalize system to what win32_ver() normally returns
# (_syscmd_ver() tends to return the vendor name as well)
if system == 'Microsoft Windows':
system = 'Windows'
elif system == 'Microsoft' and release == 'Windows':
# Under Windows Vista and Windows Server 2008,
# Microsoft changed the output of the ver command. The
# release is no longer printed. This causes the
# system and release to be misidentified.
system = 'Windows'
if '6.0' == version[:3]:
release = 'Vista'
else:
release = ''
# In case we still don't know anything useful, we'll try to
# help ourselves
if system in ('win32','win16'):
if not version:
if system == 'win32':
version = '32bit'
else:
version = '16bit'
system = 'Windows'
elif system[:4] == 'java':
release,vendor,vminfo,osinfo = java_ver()
system = 'Java'
version = string.join(vminfo,', ')
if not version:
version = vendor
# System specific extensions
if system == 'OpenVMS':
# OpenVMS seems to have release and version mixed up
if not release or release == '0':
release = version
version = ''
# Get processor information
try:
import vms_lib
except ImportError:
pass
else:
csid, cpu_number = vms_lib.getsyi('SYI$_CPU',0)
if (cpu_number >= 128):
processor = 'Alpha'
else:
processor = 'VAX'
if not processor:
# Get processor information from the uname system command
processor = _syscmd_uname('-p','')
#If any unknowns still exist, replace them with ''s, which are more portable
if system == 'unknown':
system = ''
if node == 'unknown':
node = ''
if release == 'unknown':
release = ''
if version == 'unknown':
version = ''
if machine == 'unknown':
machine = ''
if processor == 'unknown':
processor = ''
# normalize name
if system == 'Microsoft' and release == 'Windows':
system = 'Windows'
release = 'Vista'
_uname_cache = system,node,release,version,machine,processor
return _uname_cache |
Python | def _sys_version(sys_version=None):
""" Returns a parsed version of Python's sys.version as tuple
(name, version, branch, revision, buildno, builddate, compiler)
referring to the Python implementation name, version, branch,
revision, build number, build date/time as string and the compiler
identification string.
Note that unlike the Python sys.version, the returned value
for the Python version will always include the patchlevel (it
defaults to '.0').
The function returns empty strings for tuple entries that
cannot be determined.
sys_version may be given to parse an alternative version
string, e.g. if the version was read from a different Python
interpreter.
"""
# Get the Python version
if sys_version is None:
sys_version = sys.version
# Try the cache first
result = _sys_version_cache.get(sys_version, None)
if result is not None:
return result
# Parse it
if 'IronPython' in sys_version:
# IronPython
name = 'IronPython'
if sys_version.startswith('IronPython'):
match = _ironpython_sys_version_parser.match(sys_version)
else:
match = _ironpython26_sys_version_parser.match(sys_version)
if match is None:
raise ValueError(
'failed to parse IronPython sys.version: %s' %
repr(sys_version))
version, alt_version, compiler = match.groups()
buildno = ''
builddate = ''
elif sys.platform.startswith('java'):
# Jython
name = 'Jython'
match = _sys_version_parser.match(sys_version)
if match is None:
raise ValueError(
'failed to parse Jython sys.version: %s' %
repr(sys_version))
version, buildno, builddate, buildtime, _ = match.groups()
if builddate is None:
builddate = ''
compiler = sys.platform
elif "PyPy" in sys_version:
# PyPy
name = "PyPy"
match = _pypy_sys_version_parser.match(sys_version)
if match is None:
raise ValueError("failed to parse PyPy sys.version: %s" %
repr(sys_version))
version, buildno, builddate, buildtime = match.groups()
compiler = ""
else:
# CPython
match = _sys_version_parser.match(sys_version)
if match is None:
raise ValueError(
'failed to parse CPython sys.version: %s' %
repr(sys_version))
version, buildno, builddate, buildtime, compiler = \
match.groups()
name = 'CPython'
if builddate is None:
builddate = ''
elif buildtime:
builddate = builddate + ' ' + buildtime
if hasattr(sys, 'subversion'):
# sys.subversion was added in Python 2.5
_, branch, revision = sys.subversion
else:
branch = ''
revision = ''
# Add the patchlevel version if missing
l = string.split(version, '.')
if len(l) == 2:
l.append('0')
version = string.join(l, '.')
# Build and cache the result
result = (name, version, branch, revision, buildno, builddate, compiler)
_sys_version_cache[sys_version] = result
return result | def _sys_version(sys_version=None):
""" Returns a parsed version of Python's sys.version as tuple
(name, version, branch, revision, buildno, builddate, compiler)
referring to the Python implementation name, version, branch,
revision, build number, build date/time as string and the compiler
identification string.
Note that unlike the Python sys.version, the returned value
for the Python version will always include the patchlevel (it
defaults to '.0').
The function returns empty strings for tuple entries that
cannot be determined.
sys_version may be given to parse an alternative version
string, e.g. if the version was read from a different Python
interpreter.
"""
# Get the Python version
if sys_version is None:
sys_version = sys.version
# Try the cache first
result = _sys_version_cache.get(sys_version, None)
if result is not None:
return result
# Parse it
if 'IronPython' in sys_version:
# IronPython
name = 'IronPython'
if sys_version.startswith('IronPython'):
match = _ironpython_sys_version_parser.match(sys_version)
else:
match = _ironpython26_sys_version_parser.match(sys_version)
if match is None:
raise ValueError(
'failed to parse IronPython sys.version: %s' %
repr(sys_version))
version, alt_version, compiler = match.groups()
buildno = ''
builddate = ''
elif sys.platform.startswith('java'):
# Jython
name = 'Jython'
match = _sys_version_parser.match(sys_version)
if match is None:
raise ValueError(
'failed to parse Jython sys.version: %s' %
repr(sys_version))
version, buildno, builddate, buildtime, _ = match.groups()
if builddate is None:
builddate = ''
compiler = sys.platform
elif "PyPy" in sys_version:
# PyPy
name = "PyPy"
match = _pypy_sys_version_parser.match(sys_version)
if match is None:
raise ValueError("failed to parse PyPy sys.version: %s" %
repr(sys_version))
version, buildno, builddate, buildtime = match.groups()
compiler = ""
else:
# CPython
match = _sys_version_parser.match(sys_version)
if match is None:
raise ValueError(
'failed to parse CPython sys.version: %s' %
repr(sys_version))
version, buildno, builddate, buildtime, compiler = \
match.groups()
name = 'CPython'
if builddate is None:
builddate = ''
elif buildtime:
builddate = builddate + ' ' + buildtime
if hasattr(sys, 'subversion'):
# sys.subversion was added in Python 2.5
_, branch, revision = sys.subversion
else:
branch = ''
revision = ''
# Add the patchlevel version if missing
l = string.split(version, '.')
if len(l) == 2:
l.append('0')
version = string.join(l, '.')
# Build and cache the result
result = (name, version, branch, revision, buildno, builddate, compiler)
_sys_version_cache[sys_version] = result
return result |
Python | def onlarge_files(unit, *args):
"""
@usage LARGE_FILES(Files...)
Use alrge file ether from working copy or from remote storage via placeholder <File>.remote
If <File> is presemt locally (and not a symlink!) it will be copied to build directory.
Otherwise macro will try to locate <File>.remote, parse it retrieve ot during build phase.
"""
args = list(args)
for arg in args:
src = unit.resolve_arc_path(arg)
if src.startswith("$S"):
msg = "Used local large file {}. Don't forget to run 'ya upload --update-external' and commit {}.{}".format(src, src, PLACEHOLDER_EXT)
unit.message(["warn", msg])
unit.oncopy_file([arg, arg])
else:
out_file = strip_roots(os.path.join(unit.path(), arg))
external = "{}.{}".format(arg, PLACEHOLDER_EXT)
unit.on_from_external([external, out_file, 'OUT_NOAUTO', arg])
unit.onadd_check(['check.external', external]) | def onlarge_files(unit, *args):
"""
@usage LARGE_FILES(Files...)
Use alrge file ether from working copy or from remote storage via placeholder <File>.remote
If <File> is presemt locally (and not a symlink!) it will be copied to build directory.
Otherwise macro will try to locate <File>.remote, parse it retrieve ot during build phase.
"""
args = list(args)
for arg in args:
src = unit.resolve_arc_path(arg)
if src.startswith("$S"):
msg = "Used local large file {}. Don't forget to run 'ya upload --update-external' and commit {}.{}".format(src, src, PLACEHOLDER_EXT)
unit.message(["warn", msg])
unit.oncopy_file([arg, arg])
else:
out_file = strip_roots(os.path.join(unit.path(), arg))
external = "{}.{}".format(arg, PLACEHOLDER_EXT)
unit.on_from_external([external, out_file, 'OUT_NOAUTO', arg])
unit.onadd_check(['check.external', external]) |
Python | def build_project(args):
"""
Build a dev version of the project.
Returns
-------
site_dir
site-packages directory where it was installed
"""
root_ok = [os.path.exists(os.path.join(ROOT_DIR, fn))
for fn in PROJECT_ROOT_FILES]
if not all(root_ok):
print("To build the project, run runtests.py in "
"git checkout or unpacked source")
sys.exit(1)
dst_dir = os.path.join(ROOT_DIR, 'build', 'testenv')
env = dict(os.environ)
cmd = [sys.executable, 'setup.py']
# Always use ccache, if installed
env['PATH'] = os.pathsep.join(EXTRA_PATH + env.get('PATH', '').split(os.pathsep))
if args.debug or args.gcov:
# assume everyone uses gcc/gfortran
env['OPT'] = '-O0 -ggdb'
env['FOPT'] = '-O0 -ggdb'
if args.gcov:
import distutils.sysconfig
cvars = distutils.sysconfig.get_config_vars()
env['OPT'] = '-O0 -ggdb'
env['FOPT'] = '-O0 -ggdb'
env['CC'] = cvars['CC'] + ' --coverage'
env['CXX'] = cvars['CXX'] + ' --coverage'
env['F77'] = 'gfortran --coverage '
env['F90'] = 'gfortran --coverage '
env['LDSHARED'] = cvars['LDSHARED'] + ' --coverage'
env['LDFLAGS'] = " ".join(cvars['LDSHARED'].split()[1:]) + ' --coverage'
cmd += ['build']
if args.parallel > 1:
cmd += ['-j', str(args.parallel)]
# Install; avoid producing eggs so scipy can be imported from dst_dir.
cmd += ['install', '--prefix=' + dst_dir,
'--single-version-externally-managed',
'--record=' + dst_dir + 'tmp_install_log.txt']
from distutils.sysconfig import get_python_lib
site_dir = get_python_lib(prefix=dst_dir, plat_specific=True)
# easy_install won't install to a path that Python by default cannot see
# and isn't on the PYTHONPATH. Plus, it has to exist.
if not os.path.exists(site_dir):
os.makedirs(site_dir)
env['PYTHONPATH'] = site_dir
log_filename = os.path.join(ROOT_DIR, 'build.log')
if args.show_build_log:
ret = subprocess.call(cmd, env=env, cwd=ROOT_DIR)
else:
log_filename = os.path.join(ROOT_DIR, 'build.log')
print("Building, see build.log...")
with open(log_filename, 'w') as log:
p = subprocess.Popen(cmd, env=env, stdout=log, stderr=log,
cwd=ROOT_DIR)
try:
# Wait for it to finish, and print something to indicate the
# process is alive, but only if the log file has grown (to
# allow continuous integration environments kill a hanging
# process accurately if it produces no output)
last_blip = time.time()
last_log_size = os.stat(log_filename).st_size
while p.poll() is None:
time.sleep(0.5)
if time.time() - last_blip > 60:
log_size = os.stat(log_filename).st_size
if log_size > last_log_size:
print(" ... build in progress")
last_blip = time.time()
last_log_size = log_size
ret = p.wait()
except:
p.terminate()
raise
if ret == 0:
print("Build OK")
else:
if not args.show_build_log:
with open(log_filename, 'r') as f:
print(f.read())
print("Build failed!")
sys.exit(1)
return site_dir | def build_project(args):
"""
Build a dev version of the project.
Returns
-------
site_dir
site-packages directory where it was installed
"""
root_ok = [os.path.exists(os.path.join(ROOT_DIR, fn))
for fn in PROJECT_ROOT_FILES]
if not all(root_ok):
print("To build the project, run runtests.py in "
"git checkout or unpacked source")
sys.exit(1)
dst_dir = os.path.join(ROOT_DIR, 'build', 'testenv')
env = dict(os.environ)
cmd = [sys.executable, 'setup.py']
# Always use ccache, if installed
env['PATH'] = os.pathsep.join(EXTRA_PATH + env.get('PATH', '').split(os.pathsep))
if args.debug or args.gcov:
# assume everyone uses gcc/gfortran
env['OPT'] = '-O0 -ggdb'
env['FOPT'] = '-O0 -ggdb'
if args.gcov:
import distutils.sysconfig
cvars = distutils.sysconfig.get_config_vars()
env['OPT'] = '-O0 -ggdb'
env['FOPT'] = '-O0 -ggdb'
env['CC'] = cvars['CC'] + ' --coverage'
env['CXX'] = cvars['CXX'] + ' --coverage'
env['F77'] = 'gfortran --coverage '
env['F90'] = 'gfortran --coverage '
env['LDSHARED'] = cvars['LDSHARED'] + ' --coverage'
env['LDFLAGS'] = " ".join(cvars['LDSHARED'].split()[1:]) + ' --coverage'
cmd += ['build']
if args.parallel > 1:
cmd += ['-j', str(args.parallel)]
# Install; avoid producing eggs so scipy can be imported from dst_dir.
cmd += ['install', '--prefix=' + dst_dir,
'--single-version-externally-managed',
'--record=' + dst_dir + 'tmp_install_log.txt']
from distutils.sysconfig import get_python_lib
site_dir = get_python_lib(prefix=dst_dir, plat_specific=True)
# easy_install won't install to a path that Python by default cannot see
# and isn't on the PYTHONPATH. Plus, it has to exist.
if not os.path.exists(site_dir):
os.makedirs(site_dir)
env['PYTHONPATH'] = site_dir
log_filename = os.path.join(ROOT_DIR, 'build.log')
if args.show_build_log:
ret = subprocess.call(cmd, env=env, cwd=ROOT_DIR)
else:
log_filename = os.path.join(ROOT_DIR, 'build.log')
print("Building, see build.log...")
with open(log_filename, 'w') as log:
p = subprocess.Popen(cmd, env=env, stdout=log, stderr=log,
cwd=ROOT_DIR)
try:
# Wait for it to finish, and print something to indicate the
# process is alive, but only if the log file has grown (to
# allow continuous integration environments kill a hanging
# process accurately if it produces no output)
last_blip = time.time()
last_log_size = os.stat(log_filename).st_size
while p.poll() is None:
time.sleep(0.5)
if time.time() - last_blip > 60:
log_size = os.stat(log_filename).st_size
if log_size > last_log_size:
print(" ... build in progress")
last_blip = time.time()
last_log_size = log_size
ret = p.wait()
except:
p.terminate()
raise
if ret == 0:
print("Build OK")
else:
if not args.show_build_log:
with open(log_filename, 'r') as f:
print(f.read())
print("Build failed!")
sys.exit(1)
return site_dir |
Python | def fingerprint(self):
"""
Stack fingerprint: concatenation of non-common stack frames
"""
# if self.low_important():
# return ""
stack_fp = ""
for f in self.frames:
fp = f.fingerprint()
if len(fp) == 0:
continue
if fp in Stack.fingerprint_blacklist:
continue
stack_fp += fp + "\n"
stack_fp = stack_fp.strip()
return stack_fp | def fingerprint(self):
"""
Stack fingerprint: concatenation of non-common stack frames
"""
# if self.low_important():
# return ""
stack_fp = ""
for f in self.frames:
fp = f.fingerprint()
if len(fp) == 0:
continue
if fp in Stack.fingerprint_blacklist:
continue
stack_fp += fp + "\n"
stack_fp = stack_fp.strip()
return stack_fp |
Python | def hash(self):
"""
Entire stack hash for merging same stacks
"""
if self.fingerprint_hash is None:
self.fingerprint_hash = hash(self.fingerprint())
return self.fingerprint_hash | def hash(self):
"""
Entire stack hash for merging same stacks
"""
if self.fingerprint_hash is None:
self.fingerprint_hash = hash(self.fingerprint())
return self.fingerprint_hash |
Python | def dis(x=None):
"""Disassemble classes, methods, functions, or code.
With no argument, disassemble the last traceback.
"""
if x is None:
distb()
return
if isinstance(x, types.InstanceType):
x = x.__class__
if hasattr(x, 'im_func'):
x = x.im_func
if hasattr(x, 'func_code'):
x = x.func_code
if hasattr(x, '__dict__'):
items = x.__dict__.items()
items.sort()
for name, x1 in items:
if isinstance(x1, _have_code):
print "Disassembly of %s:" % name
try:
dis(x1)
except TypeError, msg:
print "Sorry:", msg
print
elif hasattr(x, 'co_code'):
disassemble(x)
elif isinstance(x, str):
disassemble_string(x)
else:
raise TypeError, \
"don't know how to disassemble %s objects" % \
type(x).__name__ | def dis(x=None):
"""Disassemble classes, methods, functions, or code.
With no argument, disassemble the last traceback.
"""
if x is None:
distb()
return
if isinstance(x, types.InstanceType):
x = x.__class__
if hasattr(x, 'im_func'):
x = x.im_func
if hasattr(x, 'func_code'):
x = x.func_code
if hasattr(x, '__dict__'):
items = x.__dict__.items()
items.sort()
for name, x1 in items:
if isinstance(x1, _have_code):
print "Disassembly of %s:" % name
try:
dis(x1)
except TypeError, msg:
print "Sorry:", msg
print
elif hasattr(x, 'co_code'):
disassemble(x)
elif isinstance(x, str):
disassemble_string(x)
else:
raise TypeError, \
"don't know how to disassemble %s objects" % \
type(x).__name__ |
Python | def findlinestarts(code):
"""Find the offsets in a byte code which are start of lines in the source.
Generate pairs (offset, lineno) as described in Python/compile.c.
"""
byte_increments = [ord(c) for c in code.co_lnotab[0::2]]
line_increments = [ord(c) for c in code.co_lnotab[1::2]]
lastlineno = None
lineno = code.co_firstlineno
addr = 0
for byte_incr, line_incr in zip(byte_increments, line_increments):
if byte_incr:
if lineno != lastlineno:
yield (addr, lineno)
lastlineno = lineno
addr += byte_incr
if line_incr >= 0x80:
# line_increments is an array of 8-bit signed integers
line_incr -= 0x100
lineno += line_incr
if lineno != lastlineno:
yield (addr, lineno) | def findlinestarts(code):
"""Find the offsets in a byte code which are start of lines in the source.
Generate pairs (offset, lineno) as described in Python/compile.c.
"""
byte_increments = [ord(c) for c in code.co_lnotab[0::2]]
line_increments = [ord(c) for c in code.co_lnotab[1::2]]
lastlineno = None
lineno = code.co_firstlineno
addr = 0
for byte_incr, line_incr in zip(byte_increments, line_increments):
if byte_incr:
if lineno != lastlineno:
yield (addr, lineno)
lastlineno = lineno
addr += byte_incr
if line_incr >= 0x80:
# line_increments is an array of 8-bit signed integers
line_incr -= 0x100
lineno += line_incr
if lineno != lastlineno:
yield (addr, lineno) |
Python | def multiply(self, other):
"""Point-wise multiplication by another matrix, vector, or
scalar.
"""
# Scalar multiplication.
if isscalarlike(other):
return self._mul_scalar(other)
# Sparse matrix or vector.
if isspmatrix(other):
if self.shape == other.shape:
other = self.__class__(other)
return self._binopt(other, '_elmul_')
# Single element.
elif other.shape == (1,1):
return self._mul_scalar(other.toarray()[0, 0])
elif self.shape == (1,1):
return other._mul_scalar(self.toarray()[0, 0])
# A row times a column.
elif self.shape[1] == other.shape[0] and self.shape[1] == 1:
return self._mul_sparse_matrix(other.tocsc())
elif self.shape[0] == other.shape[1] and self.shape[0] == 1:
return other._mul_sparse_matrix(self.tocsc())
# Row vector times matrix. other is a row.
elif other.shape[0] == 1 and self.shape[1] == other.shape[1]:
other = dia_matrix((other.toarray().ravel(), [0]),
shape=(other.shape[1], other.shape[1]))
return self._mul_sparse_matrix(other)
# self is a row.
elif self.shape[0] == 1 and self.shape[1] == other.shape[1]:
copy = dia_matrix((self.toarray().ravel(), [0]),
shape=(self.shape[1], self.shape[1]))
return other._mul_sparse_matrix(copy)
# Column vector times matrix. other is a column.
elif other.shape[1] == 1 and self.shape[0] == other.shape[0]:
other = dia_matrix((other.toarray().ravel(), [0]),
shape=(other.shape[0], other.shape[0]))
return other._mul_sparse_matrix(self)
# self is a column.
elif self.shape[1] == 1 and self.shape[0] == other.shape[0]:
copy = dia_matrix((self.toarray().ravel(), [0]),
shape=(self.shape[0], self.shape[0]))
return copy._mul_sparse_matrix(other)
else:
raise ValueError("inconsistent shapes")
# Dense matrix.
if isdense(other):
if self.shape == other.shape:
ret = self.tocoo()
ret.data = np.multiply(ret.data, other[ret.row, ret.col]
).view(np.ndarray).ravel()
return ret
# Single element.
elif other.size == 1:
return self._mul_scalar(other.flat[0])
# Anything else.
return np.multiply(self.todense(), other) | def multiply(self, other):
"""Point-wise multiplication by another matrix, vector, or
scalar.
"""
# Scalar multiplication.
if isscalarlike(other):
return self._mul_scalar(other)
# Sparse matrix or vector.
if isspmatrix(other):
if self.shape == other.shape:
other = self.__class__(other)
return self._binopt(other, '_elmul_')
# Single element.
elif other.shape == (1,1):
return self._mul_scalar(other.toarray()[0, 0])
elif self.shape == (1,1):
return other._mul_scalar(self.toarray()[0, 0])
# A row times a column.
elif self.shape[1] == other.shape[0] and self.shape[1] == 1:
return self._mul_sparse_matrix(other.tocsc())
elif self.shape[0] == other.shape[1] and self.shape[0] == 1:
return other._mul_sparse_matrix(self.tocsc())
# Row vector times matrix. other is a row.
elif other.shape[0] == 1 and self.shape[1] == other.shape[1]:
other = dia_matrix((other.toarray().ravel(), [0]),
shape=(other.shape[1], other.shape[1]))
return self._mul_sparse_matrix(other)
# self is a row.
elif self.shape[0] == 1 and self.shape[1] == other.shape[1]:
copy = dia_matrix((self.toarray().ravel(), [0]),
shape=(self.shape[1], self.shape[1]))
return other._mul_sparse_matrix(copy)
# Column vector times matrix. other is a column.
elif other.shape[1] == 1 and self.shape[0] == other.shape[0]:
other = dia_matrix((other.toarray().ravel(), [0]),
shape=(other.shape[0], other.shape[0]))
return other._mul_sparse_matrix(self)
# self is a column.
elif self.shape[1] == 1 and self.shape[0] == other.shape[0]:
copy = dia_matrix((self.toarray().ravel(), [0]),
shape=(self.shape[0], self.shape[0]))
return copy._mul_sparse_matrix(other)
else:
raise ValueError("inconsistent shapes")
# Dense matrix.
if isdense(other):
if self.shape == other.shape:
ret = self.tocoo()
ret.data = np.multiply(ret.data, other[ret.row, ret.col]
).view(np.ndarray).ravel()
return ret
# Single element.
elif other.size == 1:
return self._mul_scalar(other.flat[0])
# Anything else.
return np.multiply(self.todense(), other) |
Python | def sum(self, axis=None, dtype=None, out=None):
"""Sum the matrix over the given axis. If the axis is None, sum
over both rows and columns, returning a scalar.
"""
# The spmatrix base class already does axis=0 and axis=1 efficiently
# so we only do the case axis=None here
if (not hasattr(self, 'blocksize') and
axis in self._swap(((1, -1), (0, 2)))[0]):
# faster than multiplication for large minor axis in CSC/CSR
res_dtype = get_sum_dtype(self.dtype)
ret = np.zeros(len(self.indptr) - 1, dtype=res_dtype)
major_index, value = self._minor_reduce(np.add)
ret[major_index] = value
ret = asmatrix(ret)
if axis % 2 == 1:
ret = ret.T
if out is not None and out.shape != ret.shape:
raise ValueError('dimensions do not match')
return ret.sum(axis=(), dtype=dtype, out=out)
# spmatrix will handle the remaining situations when axis
# is in {None, -1, 0, 1}
else:
return spmatrix.sum(self, axis=axis, dtype=dtype, out=out) | def sum(self, axis=None, dtype=None, out=None):
"""Sum the matrix over the given axis. If the axis is None, sum
over both rows and columns, returning a scalar.
"""
# The spmatrix base class already does axis=0 and axis=1 efficiently
# so we only do the case axis=None here
if (not hasattr(self, 'blocksize') and
axis in self._swap(((1, -1), (0, 2)))[0]):
# faster than multiplication for large minor axis in CSC/CSR
res_dtype = get_sum_dtype(self.dtype)
ret = np.zeros(len(self.indptr) - 1, dtype=res_dtype)
major_index, value = self._minor_reduce(np.add)
ret[major_index] = value
ret = asmatrix(ret)
if axis % 2 == 1:
ret = ret.T
if out is not None and out.shape != ret.shape:
raise ValueError('dimensions do not match')
return ret.sum(axis=(), dtype=dtype, out=out)
# spmatrix will handle the remaining situations when axis
# is in {None, -1, 0, 1}
else:
return spmatrix.sum(self, axis=axis, dtype=dtype, out=out) |
Python | def __get_sorted(self):
"""Determine whether the matrix has sorted indices
Returns
- True: if the indices of the matrix are in sorted order
- False: otherwise
"""
# first check to see if result was cached
if not hasattr(self,'_has_sorted_indices'):
self._has_sorted_indices = _sparsetools.csr_has_sorted_indices(
len(self.indptr) - 1, self.indptr, self.indices)
return self._has_sorted_indices | def __get_sorted(self):
"""Determine whether the matrix has sorted indices
Returns
- True: if the indices of the matrix are in sorted order
- False: otherwise
"""
# first check to see if result was cached
if not hasattr(self,'_has_sorted_indices'):
self._has_sorted_indices = _sparsetools.csr_has_sorted_indices(
len(self.indptr) - 1, self.indptr, self.indices)
return self._has_sorted_indices |
Python | def _binopt(self, other, op):
"""apply the binary operation fn to two sparse matrices."""
other = self.__class__(other)
# e.g. csr_plus_csr, csr_minus_csr, etc.
fn = getattr(_sparsetools, self.format + op + self.format)
maxnnz = self.nnz + other.nnz
idx_dtype = get_index_dtype((self.indptr, self.indices,
other.indptr, other.indices),
maxval=maxnnz)
indptr = np.empty(self.indptr.shape, dtype=idx_dtype)
indices = np.empty(maxnnz, dtype=idx_dtype)
bool_ops = ['_ne_', '_lt_', '_gt_', '_le_', '_ge_']
if op in bool_ops:
data = np.empty(maxnnz, dtype=np.bool_)
else:
data = np.empty(maxnnz, dtype=upcast(self.dtype, other.dtype))
fn(self.shape[0], self.shape[1],
np.asarray(self.indptr, dtype=idx_dtype),
np.asarray(self.indices, dtype=idx_dtype),
self.data,
np.asarray(other.indptr, dtype=idx_dtype),
np.asarray(other.indices, dtype=idx_dtype),
other.data,
indptr, indices, data)
actual_nnz = indptr[-1]
indices = indices[:actual_nnz]
data = data[:actual_nnz]
if actual_nnz < maxnnz // 2:
# too much waste, trim arrays
indices = indices.copy()
data = data.copy()
A = self.__class__((data, indices, indptr), shape=self.shape)
return A | def _binopt(self, other, op):
"""apply the binary operation fn to two sparse matrices."""
other = self.__class__(other)
# e.g. csr_plus_csr, csr_minus_csr, etc.
fn = getattr(_sparsetools, self.format + op + self.format)
maxnnz = self.nnz + other.nnz
idx_dtype = get_index_dtype((self.indptr, self.indices,
other.indptr, other.indices),
maxval=maxnnz)
indptr = np.empty(self.indptr.shape, dtype=idx_dtype)
indices = np.empty(maxnnz, dtype=idx_dtype)
bool_ops = ['_ne_', '_lt_', '_gt_', '_le_', '_ge_']
if op in bool_ops:
data = np.empty(maxnnz, dtype=np.bool_)
else:
data = np.empty(maxnnz, dtype=upcast(self.dtype, other.dtype))
fn(self.shape[0], self.shape[1],
np.asarray(self.indptr, dtype=idx_dtype),
np.asarray(self.indices, dtype=idx_dtype),
self.data,
np.asarray(other.indptr, dtype=idx_dtype),
np.asarray(other.indices, dtype=idx_dtype),
other.data,
indptr, indices, data)
actual_nnz = indptr[-1]
indices = indices[:actual_nnz]
data = data[:actual_nnz]
if actual_nnz < maxnnz // 2:
# too much waste, trim arrays
indices = indices.copy()
data = data.copy()
A = self.__class__((data, indices, indptr), shape=self.shape)
return A |
Python | def minimize_scalar(fun, bracket=None, bounds=None, args=(),
method='brent', tol=None, options=None):
"""Minimization of scalar function of one variable.
Parameters
----------
fun : callable
Objective function.
Scalar function, must return a scalar.
bracket : sequence, optional
For methods 'brent' and 'golden', `bracket` defines the bracketing
interval and can either have three items `(a, b, c)` so that `a < b
< c` and `fun(b) < fun(a), fun(c)` or two items `a` and `c` which
are assumed to be a starting interval for a downhill bracket search
(see `bracket`); it doesn't always mean that the obtained solution
will satisfy `a <= x <= c`.
bounds : sequence, optional
For method 'bounded', `bounds` is mandatory and must have two items
corresponding to the optimization bounds.
args : tuple, optional
Extra arguments passed to the objective function.
method : str or callable, optional
Type of solver. Should be one of
- 'Brent' :ref:`(see here) <optimize.minimize_scalar-brent>`
- 'Bounded' :ref:`(see here) <optimize.minimize_scalar-bounded>`
- 'Golden' :ref:`(see here) <optimize.minimize_scalar-golden>`
- custom - a callable object (added in version 0.14.0),
see below
tol : float, optional
Tolerance for termination. For detailed control, use solver-specific
options.
options : dict, optional
A dictionary of solver options.
maxiter : int
Maximum number of iterations to perform.
disp : bool
Set to True to print convergence messages.
See :func:`show_options()` for solver-specific options.
Returns
-------
res : OptimizeResult
The optimization result represented as a ``OptimizeResult`` object.
Important attributes are: ``x`` the solution array, ``success`` a
Boolean flag indicating if the optimizer exited successfully and
``message`` which describes the cause of the termination. See
`OptimizeResult` for a description of other attributes.
See also
--------
minimize : Interface to minimization algorithms for scalar multivariate
functions
show_options : Additional options accepted by the solvers
Notes
-----
This section describes the available solvers that can be selected by the
'method' parameter. The default method is *Brent*.
Method :ref:`Brent <optimize.minimize_scalar-brent>` uses Brent's
algorithm to find a local minimum. The algorithm uses inverse
parabolic interpolation when possible to speed up convergence of
the golden section method.
Method :ref:`Golden <optimize.minimize_scalar-golden>` uses the
golden section search technique. It uses analog of the bisection
method to decrease the bracketed interval. It is usually
preferable to use the *Brent* method.
Method :ref:`Bounded <optimize.minimize_scalar-bounded>` can
perform bounded minimization. It uses the Brent method to find a
local minimum in the interval x1 < xopt < x2.
**Custom minimizers**
It may be useful to pass a custom minimization method, for example
when using some library frontend to minimize_scalar. You can simply
pass a callable as the ``method`` parameter.
The callable is called as ``method(fun, args, **kwargs, **options)``
where ``kwargs`` corresponds to any other parameters passed to `minimize`
(such as `bracket`, `tol`, etc.), except the `options` dict, which has
its contents also passed as `method` parameters pair by pair. The method
shall return an ``OptimizeResult`` object.
The provided `method` callable must be able to accept (and possibly ignore)
arbitrary parameters; the set of parameters accepted by `minimize` may
expand in future versions and then these parameters will be passed to
the method. You can find an example in the scipy.optimize tutorial.
.. versionadded:: 0.11.0
Examples
--------
Consider the problem of minimizing the following function.
>>> def f(x):
... return (x - 2) * x * (x + 2)**2
Using the *Brent* method, we find the local minimum as:
>>> from scipy.optimize import minimize_scalar
>>> res = minimize_scalar(f)
>>> res.x
1.28077640403
Using the *Bounded* method, we find a local minimum with specified
bounds as:
>>> res = minimize_scalar(f, bounds=(-3, -1), method='bounded')
>>> res.x
-2.0000002026
"""
if not isinstance(args, tuple):
args = (args,)
if callable(method):
meth = "_custom"
else:
meth = method.lower()
if options is None:
options = {}
if tol is not None:
options = dict(options)
if meth == 'bounded' and 'xatol' not in options:
warn("Method 'bounded' does not support relative tolerance in x; "
"defaulting to absolute tolerance.", RuntimeWarning)
options['xatol'] = tol
elif meth == '_custom':
options.setdefault('tol', tol)
else:
options.setdefault('xtol', tol)
if meth == '_custom':
return method(fun, args=args, bracket=bracket, bounds=bounds, **options)
elif meth == 'brent':
return _minimize_scalar_brent(fun, bracket, args, **options)
elif meth == 'bounded':
if bounds is None:
raise ValueError('The `bounds` parameter is mandatory for '
'method `bounded`.')
return _minimize_scalar_bounded(fun, bounds, args, **options)
elif meth == 'golden':
return _minimize_scalar_golden(fun, bracket, args, **options)
else:
raise ValueError('Unknown solver %s' % method) | def minimize_scalar(fun, bracket=None, bounds=None, args=(),
method='brent', tol=None, options=None):
"""Minimization of scalar function of one variable.
Parameters
----------
fun : callable
Objective function.
Scalar function, must return a scalar.
bracket : sequence, optional
For methods 'brent' and 'golden', `bracket` defines the bracketing
interval and can either have three items `(a, b, c)` so that `a < b
< c` and `fun(b) < fun(a), fun(c)` or two items `a` and `c` which
are assumed to be a starting interval for a downhill bracket search
(see `bracket`); it doesn't always mean that the obtained solution
will satisfy `a <= x <= c`.
bounds : sequence, optional
For method 'bounded', `bounds` is mandatory and must have two items
corresponding to the optimization bounds.
args : tuple, optional
Extra arguments passed to the objective function.
method : str or callable, optional
Type of solver. Should be one of
- 'Brent' :ref:`(see here) <optimize.minimize_scalar-brent>`
- 'Bounded' :ref:`(see here) <optimize.minimize_scalar-bounded>`
- 'Golden' :ref:`(see here) <optimize.minimize_scalar-golden>`
- custom - a callable object (added in version 0.14.0),
see below
tol : float, optional
Tolerance for termination. For detailed control, use solver-specific
options.
options : dict, optional
A dictionary of solver options.
maxiter : int
Maximum number of iterations to perform.
disp : bool
Set to True to print convergence messages.
See :func:`show_options()` for solver-specific options.
Returns
-------
res : OptimizeResult
The optimization result represented as a ``OptimizeResult`` object.
Important attributes are: ``x`` the solution array, ``success`` a
Boolean flag indicating if the optimizer exited successfully and
``message`` which describes the cause of the termination. See
`OptimizeResult` for a description of other attributes.
See also
--------
minimize : Interface to minimization algorithms for scalar multivariate
functions
show_options : Additional options accepted by the solvers
Notes
-----
This section describes the available solvers that can be selected by the
'method' parameter. The default method is *Brent*.
Method :ref:`Brent <optimize.minimize_scalar-brent>` uses Brent's
algorithm to find a local minimum. The algorithm uses inverse
parabolic interpolation when possible to speed up convergence of
the golden section method.
Method :ref:`Golden <optimize.minimize_scalar-golden>` uses the
golden section search technique. It uses analog of the bisection
method to decrease the bracketed interval. It is usually
preferable to use the *Brent* method.
Method :ref:`Bounded <optimize.minimize_scalar-bounded>` can
perform bounded minimization. It uses the Brent method to find a
local minimum in the interval x1 < xopt < x2.
**Custom minimizers**
It may be useful to pass a custom minimization method, for example
when using some library frontend to minimize_scalar. You can simply
pass a callable as the ``method`` parameter.
The callable is called as ``method(fun, args, **kwargs, **options)``
where ``kwargs`` corresponds to any other parameters passed to `minimize`
(such as `bracket`, `tol`, etc.), except the `options` dict, which has
its contents also passed as `method` parameters pair by pair. The method
shall return an ``OptimizeResult`` object.
The provided `method` callable must be able to accept (and possibly ignore)
arbitrary parameters; the set of parameters accepted by `minimize` may
expand in future versions and then these parameters will be passed to
the method. You can find an example in the scipy.optimize tutorial.
.. versionadded:: 0.11.0
Examples
--------
Consider the problem of minimizing the following function.
>>> def f(x):
... return (x - 2) * x * (x + 2)**2
Using the *Brent* method, we find the local minimum as:
>>> from scipy.optimize import minimize_scalar
>>> res = minimize_scalar(f)
>>> res.x
1.28077640403
Using the *Bounded* method, we find a local minimum with specified
bounds as:
>>> res = minimize_scalar(f, bounds=(-3, -1), method='bounded')
>>> res.x
-2.0000002026
"""
if not isinstance(args, tuple):
args = (args,)
if callable(method):
meth = "_custom"
else:
meth = method.lower()
if options is None:
options = {}
if tol is not None:
options = dict(options)
if meth == 'bounded' and 'xatol' not in options:
warn("Method 'bounded' does not support relative tolerance in x; "
"defaulting to absolute tolerance.", RuntimeWarning)
options['xatol'] = tol
elif meth == '_custom':
options.setdefault('tol', tol)
else:
options.setdefault('xtol', tol)
if meth == '_custom':
return method(fun, args=args, bracket=bracket, bounds=bounds, **options)
elif meth == 'brent':
return _minimize_scalar_brent(fun, bracket, args, **options)
elif meth == 'bounded':
if bounds is None:
raise ValueError('The `bounds` parameter is mandatory for '
'method `bounded`.')
return _minimize_scalar_bounded(fun, bounds, args, **options)
elif meth == 'golden':
return _minimize_scalar_golden(fun, bracket, args, **options)
else:
raise ValueError('Unknown solver %s' % method) |
Python | def _root_hybr(func, x0, args=(), jac=None,
col_deriv=0, xtol=1.49012e-08, maxfev=0, band=None, eps=None,
factor=100, diag=None, **unknown_options):
"""
Find the roots of a multivariate function using MINPACK's hybrd and
hybrj routines (modified Powell method).
Options
-------
col_deriv : bool
Specify whether the Jacobian function computes derivatives down
the columns (faster, because there is no transpose operation).
xtol : float
The calculation will terminate if the relative error between two
consecutive iterates is at most `xtol`.
maxfev : int
The maximum number of calls to the function. If zero, then
``100*(N+1)`` is the maximum where N is the number of elements
in `x0`.
band : tuple
If set to a two-sequence containing the number of sub- and
super-diagonals within the band of the Jacobi matrix, the
Jacobi matrix is considered banded (only for ``fprime=None``).
eps : float
A suitable step length for the forward-difference
approximation of the Jacobian (for ``fprime=None``). If
`eps` is less than the machine precision, it is assumed
that the relative errors in the functions are of the order of
the machine precision.
factor : float
A parameter determining the initial step bound
(``factor * || diag * x||``). Should be in the interval
``(0.1, 100)``.
diag : sequence
N positive entries that serve as a scale factors for the
variables.
"""
_check_unknown_options(unknown_options)
epsfcn = eps
x0 = asarray(x0).flatten()
n = len(x0)
if not isinstance(args, tuple):
args = (args,)
shape, dtype = _check_func('fsolve', 'func', func, x0, args, n, (n,))
if epsfcn is None:
epsfcn = finfo(dtype).eps
Dfun = jac
if Dfun is None:
if band is None:
ml, mu = -10, -10
else:
ml, mu = band[:2]
if maxfev == 0:
maxfev = 200 * (n + 1)
retval = _minpack._hybrd(func, x0, args, 1, xtol, maxfev,
ml, mu, epsfcn, factor, diag)
else:
_check_func('fsolve', 'fprime', Dfun, x0, args, n, (n, n))
if (maxfev == 0):
maxfev = 100 * (n + 1)
retval = _minpack._hybrj(func, Dfun, x0, args, 1,
col_deriv, xtol, maxfev, factor, diag)
x, status = retval[0], retval[-1]
errors = {0: "Improper input parameters were entered.",
1: "The solution converged.",
2: "The number of calls to function has "
"reached maxfev = %d." % maxfev,
3: "xtol=%f is too small, no further improvement "
"in the approximate\n solution "
"is possible." % xtol,
4: "The iteration is not making good progress, as measured "
"by the \n improvement from the last five "
"Jacobian evaluations.",
5: "The iteration is not making good progress, "
"as measured by the \n improvement from the last "
"ten iterations.",
'unknown': "An error occurred."}
info = retval[1]
info['fun'] = info.pop('fvec')
sol = OptimizeResult(x=x, success=(status == 1), status=status)
sol.update(info)
try:
sol['message'] = errors[status]
except KeyError:
info['message'] = errors['unknown']
return sol | def _root_hybr(func, x0, args=(), jac=None,
col_deriv=0, xtol=1.49012e-08, maxfev=0, band=None, eps=None,
factor=100, diag=None, **unknown_options):
"""
Find the roots of a multivariate function using MINPACK's hybrd and
hybrj routines (modified Powell method).
Options
-------
col_deriv : bool
Specify whether the Jacobian function computes derivatives down
the columns (faster, because there is no transpose operation).
xtol : float
The calculation will terminate if the relative error between two
consecutive iterates is at most `xtol`.
maxfev : int
The maximum number of calls to the function. If zero, then
``100*(N+1)`` is the maximum where N is the number of elements
in `x0`.
band : tuple
If set to a two-sequence containing the number of sub- and
super-diagonals within the band of the Jacobi matrix, the
Jacobi matrix is considered banded (only for ``fprime=None``).
eps : float
A suitable step length for the forward-difference
approximation of the Jacobian (for ``fprime=None``). If
`eps` is less than the machine precision, it is assumed
that the relative errors in the functions are of the order of
the machine precision.
factor : float
A parameter determining the initial step bound
(``factor * || diag * x||``). Should be in the interval
``(0.1, 100)``.
diag : sequence
N positive entries that serve as a scale factors for the
variables.
"""
_check_unknown_options(unknown_options)
epsfcn = eps
x0 = asarray(x0).flatten()
n = len(x0)
if not isinstance(args, tuple):
args = (args,)
shape, dtype = _check_func('fsolve', 'func', func, x0, args, n, (n,))
if epsfcn is None:
epsfcn = finfo(dtype).eps
Dfun = jac
if Dfun is None:
if band is None:
ml, mu = -10, -10
else:
ml, mu = band[:2]
if maxfev == 0:
maxfev = 200 * (n + 1)
retval = _minpack._hybrd(func, x0, args, 1, xtol, maxfev,
ml, mu, epsfcn, factor, diag)
else:
_check_func('fsolve', 'fprime', Dfun, x0, args, n, (n, n))
if (maxfev == 0):
maxfev = 100 * (n + 1)
retval = _minpack._hybrj(func, Dfun, x0, args, 1,
col_deriv, xtol, maxfev, factor, diag)
x, status = retval[0], retval[-1]
errors = {0: "Improper input parameters were entered.",
1: "The solution converged.",
2: "The number of calls to function has "
"reached maxfev = %d." % maxfev,
3: "xtol=%f is too small, no further improvement "
"in the approximate\n solution "
"is possible." % xtol,
4: "The iteration is not making good progress, as measured "
"by the \n improvement from the last five "
"Jacobian evaluations.",
5: "The iteration is not making good progress, "
"as measured by the \n improvement from the last "
"ten iterations.",
'unknown': "An error occurred."}
info = retval[1]
info['fun'] = info.pop('fvec')
sol = OptimizeResult(x=x, success=(status == 1), status=status)
sol.update(info)
try:
sol['message'] = errors[status]
except KeyError:
info['message'] = errors['unknown']
return sol |
Python | def leastsq(func, x0, args=(), Dfun=None, full_output=0,
col_deriv=0, ftol=1.49012e-8, xtol=1.49012e-8,
gtol=0.0, maxfev=0, epsfcn=None, factor=100, diag=None):
"""
Minimize the sum of squares of a set of equations.
::
x = arg min(sum(func(y)**2,axis=0))
y
Parameters
----------
func : callable
should take at least one (possibly length N vector) argument and
returns M floating point numbers. It must not return NaNs or
fitting might fail.
x0 : ndarray
The starting estimate for the minimization.
args : tuple, optional
Any extra arguments to func are placed in this tuple.
Dfun : callable, optional
A function or method to compute the Jacobian of func with derivatives
across the rows. If this is None, the Jacobian will be estimated.
full_output : bool, optional
non-zero to return all optional outputs.
col_deriv : bool, optional
non-zero to specify that the Jacobian function computes derivatives
down the columns (faster, because there is no transpose operation).
ftol : float, optional
Relative error desired in the sum of squares.
xtol : float, optional
Relative error desired in the approximate solution.
gtol : float, optional
Orthogonality desired between the function vector and the columns of
the Jacobian.
maxfev : int, optional
The maximum number of calls to the function. If `Dfun` is provided
then the default `maxfev` is 100*(N+1) where N is the number of elements
in x0, otherwise the default `maxfev` is 200*(N+1).
epsfcn : float, optional
A variable used in determining a suitable step length for the forward-
difference approximation of the Jacobian (for Dfun=None).
Normally the actual step length will be sqrt(epsfcn)*x
If epsfcn is less than the machine precision, it is assumed that the
relative errors are of the order of the machine precision.
factor : float, optional
A parameter determining the initial step bound
(``factor * || diag * x||``). Should be in interval ``(0.1, 100)``.
diag : sequence, optional
N positive entries that serve as a scale factors for the variables.
Returns
-------
x : ndarray
The solution (or the result of the last iteration for an unsuccessful
call).
cov_x : ndarray
Uses the fjac and ipvt optional outputs to construct an
estimate of the jacobian around the solution. None if a
singular matrix encountered (indicates very flat curvature in
some direction). This matrix must be multiplied by the
residual variance to get the covariance of the
parameter estimates -- see curve_fit.
infodict : dict
a dictionary of optional outputs with the key s:
``nfev``
The number of function calls
``fvec``
The function evaluated at the output
``fjac``
A permutation of the R matrix of a QR
factorization of the final approximate
Jacobian matrix, stored column wise.
Together with ipvt, the covariance of the
estimate can be approximated.
``ipvt``
An integer array of length N which defines
a permutation matrix, p, such that
fjac*p = q*r, where r is upper triangular
with diagonal elements of nonincreasing
magnitude. Column j of p is column ipvt(j)
of the identity matrix.
``qtf``
The vector (transpose(q) * fvec).
mesg : str
A string message giving information about the cause of failure.
ier : int
An integer flag. If it is equal to 1, 2, 3 or 4, the solution was
found. Otherwise, the solution was not found. In either case, the
optional output variable 'mesg' gives more information.
Notes
-----
"leastsq" is a wrapper around MINPACK's lmdif and lmder algorithms.
cov_x is a Jacobian approximation to the Hessian of the least squares
objective function.
This approximation assumes that the objective function is based on the
difference between some observed target data (ydata) and a (non-linear)
function of the parameters `f(xdata, params)` ::
func(params) = ydata - f(xdata, params)
so that the objective function is ::
min sum((ydata - f(xdata, params))**2, axis=0)
params
"""
x0 = asarray(x0).flatten()
n = len(x0)
if not isinstance(args, tuple):
args = (args,)
shape, dtype = _check_func('leastsq', 'func', func, x0, args, n)
m = shape[0]
if n > m:
raise TypeError('Improper input: N=%s must not exceed M=%s' % (n, m))
if epsfcn is None:
epsfcn = finfo(dtype).eps
if Dfun is None:
if maxfev == 0:
maxfev = 200*(n + 1)
retval = _minpack._lmdif(func, x0, args, full_output, ftol, xtol,
gtol, maxfev, epsfcn, factor, diag)
else:
if col_deriv:
_check_func('leastsq', 'Dfun', Dfun, x0, args, n, (n, m))
else:
_check_func('leastsq', 'Dfun', Dfun, x0, args, n, (m, n))
if maxfev == 0:
maxfev = 100 * (n + 1)
retval = _minpack._lmder(func, Dfun, x0, args, full_output, col_deriv,
ftol, xtol, gtol, maxfev, factor, diag)
errors = {0: ["Improper input parameters.", TypeError],
1: ["Both actual and predicted relative reductions "
"in the sum of squares\n are at most %f" % ftol, None],
2: ["The relative error between two consecutive "
"iterates is at most %f" % xtol, None],
3: ["Both actual and predicted relative reductions in "
"the sum of squares\n are at most %f and the "
"relative error between two consecutive "
"iterates is at \n most %f" % (ftol, xtol), None],
4: ["The cosine of the angle between func(x) and any "
"column of the\n Jacobian is at most %f in "
"absolute value" % gtol, None],
5: ["Number of calls to function has reached "
"maxfev = %d." % maxfev, ValueError],
6: ["ftol=%f is too small, no further reduction "
"in the sum of squares\n is possible.""" % ftol,
ValueError],
7: ["xtol=%f is too small, no further improvement in "
"the approximate\n solution is possible." % xtol,
ValueError],
8: ["gtol=%f is too small, func(x) is orthogonal to the "
"columns of\n the Jacobian to machine "
"precision." % gtol, ValueError],
'unknown': ["Unknown error.", TypeError]}
info = retval[-1] # The FORTRAN return value
if info not in [1, 2, 3, 4] and not full_output:
if info in [5, 6, 7, 8]:
warnings.warn(errors[info][0], RuntimeWarning)
else:
try:
raise errors[info][1](errors[info][0])
except KeyError:
raise errors['unknown'][1](errors['unknown'][0])
mesg = errors[info][0]
if full_output:
cov_x = None
if info in [1, 2, 3, 4]:
from numpy.dual import inv
perm = take(eye(n), retval[1]['ipvt'] - 1, 0)
r = triu(transpose(retval[1]['fjac'])[:n, :])
R = dot(r, perm)
try:
cov_x = inv(dot(transpose(R), R))
except (LinAlgError, ValueError):
pass
return (retval[0], cov_x) + retval[1:-1] + (mesg, info)
else:
return (retval[0], info) | def leastsq(func, x0, args=(), Dfun=None, full_output=0,
col_deriv=0, ftol=1.49012e-8, xtol=1.49012e-8,
gtol=0.0, maxfev=0, epsfcn=None, factor=100, diag=None):
"""
Minimize the sum of squares of a set of equations.
::
x = arg min(sum(func(y)**2,axis=0))
y
Parameters
----------
func : callable
should take at least one (possibly length N vector) argument and
returns M floating point numbers. It must not return NaNs or
fitting might fail.
x0 : ndarray
The starting estimate for the minimization.
args : tuple, optional
Any extra arguments to func are placed in this tuple.
Dfun : callable, optional
A function or method to compute the Jacobian of func with derivatives
across the rows. If this is None, the Jacobian will be estimated.
full_output : bool, optional
non-zero to return all optional outputs.
col_deriv : bool, optional
non-zero to specify that the Jacobian function computes derivatives
down the columns (faster, because there is no transpose operation).
ftol : float, optional
Relative error desired in the sum of squares.
xtol : float, optional
Relative error desired in the approximate solution.
gtol : float, optional
Orthogonality desired between the function vector and the columns of
the Jacobian.
maxfev : int, optional
The maximum number of calls to the function. If `Dfun` is provided
then the default `maxfev` is 100*(N+1) where N is the number of elements
in x0, otherwise the default `maxfev` is 200*(N+1).
epsfcn : float, optional
A variable used in determining a suitable step length for the forward-
difference approximation of the Jacobian (for Dfun=None).
Normally the actual step length will be sqrt(epsfcn)*x
If epsfcn is less than the machine precision, it is assumed that the
relative errors are of the order of the machine precision.
factor : float, optional
A parameter determining the initial step bound
(``factor * || diag * x||``). Should be in interval ``(0.1, 100)``.
diag : sequence, optional
N positive entries that serve as a scale factors for the variables.
Returns
-------
x : ndarray
The solution (or the result of the last iteration for an unsuccessful
call).
cov_x : ndarray
Uses the fjac and ipvt optional outputs to construct an
estimate of the jacobian around the solution. None if a
singular matrix encountered (indicates very flat curvature in
some direction). This matrix must be multiplied by the
residual variance to get the covariance of the
parameter estimates -- see curve_fit.
infodict : dict
a dictionary of optional outputs with the key s:
``nfev``
The number of function calls
``fvec``
The function evaluated at the output
``fjac``
A permutation of the R matrix of a QR
factorization of the final approximate
Jacobian matrix, stored column wise.
Together with ipvt, the covariance of the
estimate can be approximated.
``ipvt``
An integer array of length N which defines
a permutation matrix, p, such that
fjac*p = q*r, where r is upper triangular
with diagonal elements of nonincreasing
magnitude. Column j of p is column ipvt(j)
of the identity matrix.
``qtf``
The vector (transpose(q) * fvec).
mesg : str
A string message giving information about the cause of failure.
ier : int
An integer flag. If it is equal to 1, 2, 3 or 4, the solution was
found. Otherwise, the solution was not found. In either case, the
optional output variable 'mesg' gives more information.
Notes
-----
"leastsq" is a wrapper around MINPACK's lmdif and lmder algorithms.
cov_x is a Jacobian approximation to the Hessian of the least squares
objective function.
This approximation assumes that the objective function is based on the
difference between some observed target data (ydata) and a (non-linear)
function of the parameters `f(xdata, params)` ::
func(params) = ydata - f(xdata, params)
so that the objective function is ::
min sum((ydata - f(xdata, params))**2, axis=0)
params
"""
x0 = asarray(x0).flatten()
n = len(x0)
if not isinstance(args, tuple):
args = (args,)
shape, dtype = _check_func('leastsq', 'func', func, x0, args, n)
m = shape[0]
if n > m:
raise TypeError('Improper input: N=%s must not exceed M=%s' % (n, m))
if epsfcn is None:
epsfcn = finfo(dtype).eps
if Dfun is None:
if maxfev == 0:
maxfev = 200*(n + 1)
retval = _minpack._lmdif(func, x0, args, full_output, ftol, xtol,
gtol, maxfev, epsfcn, factor, diag)
else:
if col_deriv:
_check_func('leastsq', 'Dfun', Dfun, x0, args, n, (n, m))
else:
_check_func('leastsq', 'Dfun', Dfun, x0, args, n, (m, n))
if maxfev == 0:
maxfev = 100 * (n + 1)
retval = _minpack._lmder(func, Dfun, x0, args, full_output, col_deriv,
ftol, xtol, gtol, maxfev, factor, diag)
errors = {0: ["Improper input parameters.", TypeError],
1: ["Both actual and predicted relative reductions "
"in the sum of squares\n are at most %f" % ftol, None],
2: ["The relative error between two consecutive "
"iterates is at most %f" % xtol, None],
3: ["Both actual and predicted relative reductions in "
"the sum of squares\n are at most %f and the "
"relative error between two consecutive "
"iterates is at \n most %f" % (ftol, xtol), None],
4: ["The cosine of the angle between func(x) and any "
"column of the\n Jacobian is at most %f in "
"absolute value" % gtol, None],
5: ["Number of calls to function has reached "
"maxfev = %d." % maxfev, ValueError],
6: ["ftol=%f is too small, no further reduction "
"in the sum of squares\n is possible.""" % ftol,
ValueError],
7: ["xtol=%f is too small, no further improvement in "
"the approximate\n solution is possible." % xtol,
ValueError],
8: ["gtol=%f is too small, func(x) is orthogonal to the "
"columns of\n the Jacobian to machine "
"precision." % gtol, ValueError],
'unknown': ["Unknown error.", TypeError]}
info = retval[-1] # The FORTRAN return value
if info not in [1, 2, 3, 4] and not full_output:
if info in [5, 6, 7, 8]:
warnings.warn(errors[info][0], RuntimeWarning)
else:
try:
raise errors[info][1](errors[info][0])
except KeyError:
raise errors['unknown'][1](errors['unknown'][0])
mesg = errors[info][0]
if full_output:
cov_x = None
if info in [1, 2, 3, 4]:
from numpy.dual import inv
perm = take(eye(n), retval[1]['ipvt'] - 1, 0)
r = triu(transpose(retval[1]['fjac'])[:n, :])
R = dot(r, perm)
try:
cov_x = inv(dot(transpose(R), R))
except (LinAlgError, ValueError):
pass
return (retval[0], cov_x) + retval[1:-1] + (mesg, info)
else:
return (retval[0], info) |
Python | def lagrange_inversion(a):
"""Given a series
f(x) = a[1]*x + a[2]*x**2 + ... + a[n-1]*x**(n - 1),
use the Lagrange inversion formula to compute a series
g(x) = b[1]*x + b[2]*x**2 + ... + b[n-1]*x**(n - 1)
so that f(g(x)) = g(f(x)) = x mod x**n. We must have a[0] = 0, so
necessarily b[0] = 0 too.
The algorithm is naive and could be improved, but speed isn't an
issue here and it's easy to read.
"""
n = len(a)
f = sum(a[i]*x**i for i in range(len(a)))
h = (x/f).series(x, 0, n).removeO()
hpower = [h**0]
for k in range(n):
hpower.append((hpower[-1]*h).expand())
b = [mp.mpf(0)]
for k in range(1, n):
b.append(hpower[k].coeff(x, k - 1)/k)
b = map(lambda x: mp.mpf(x), b)
return b | def lagrange_inversion(a):
"""Given a series
f(x) = a[1]*x + a[2]*x**2 + ... + a[n-1]*x**(n - 1),
use the Lagrange inversion formula to compute a series
g(x) = b[1]*x + b[2]*x**2 + ... + b[n-1]*x**(n - 1)
so that f(g(x)) = g(f(x)) = x mod x**n. We must have a[0] = 0, so
necessarily b[0] = 0 too.
The algorithm is naive and could be improved, but speed isn't an
issue here and it's easy to read.
"""
n = len(a)
f = sum(a[i]*x**i for i in range(len(a)))
h = (x/f).series(x, 0, n).removeO()
hpower = [h**0]
for k in range(n):
hpower.append((hpower[-1]*h).expand())
b = [mp.mpf(0)]
for k in range(1, n):
b.append(hpower[k].coeff(x, k - 1)/k)
b = map(lambda x: mp.mpf(x), b)
return b |
Python | def preprocess(config):
"""
Preprocess the data to save GPU time while training because
ground truth generation is very time consuming for FOTS.
"""
dataset = Synth800kDataset(config["data_dir"])
data_loader = DataLoader(
dataset,
num_workers=config["num_workers"],
pin_memory=True,
batch_size=config["batch_size"],
shuffle=False,
collate_fn=synth800k_collate
)
os.makedirs(os.path.join(config["output_dir"], "image"), exist_ok=True)
os.makedirs(os.path.join(config["output_dir"], "score"), exist_ok=True)
os.makedirs(os.path.join(config["output_dir"], "geo"), exist_ok=True)
os.makedirs(os.path.join(config["output_dir"], "training_mask"), exist_ok=True)
img_list, sm_list, gm_list, tm_list = [], [], [], []
for idx, batch in tqdm(enumerate(data_loader), total=len(data_loader), position=0, leave=True):
image_paths, images, score_maps, geo_maps, training_masks = batch
for pth, i, s, g, tm in zip(image_paths, images, score_maps, geo_maps, training_masks):
img_pth = pth.split("/")[-2:]
img_name = img_pth[-1].split(".")[0]
img_list.append(f"image/{img_name}.npy")
sm_list.append(f"score/{img_name}_score_map.npy")
gm_list.append(f"geo/{img_name}_geo_map.npy")
tm_list.append(f"training_mask/{img_name}_tm.npy")
np.save(f"{config['output_dir']}/image/{img_name}.npy", i.numpy().astype(np.uint8))
np.save(f"{config['output_dir']}/score/{img_name}_score_map.npy", s.numpy().astype(np.uint8))
np.save(f"{config['output_dir']}/geo/{img_name}_geo_map.npy", g.numpy().astype(np.float32))
np.save(f"{config['output_dir']}/training_mask/{img_name}_tm.npy", tm.numpy().astype(np.uint8))
if idx == config["num_iterations"]:
break
data_df = pd.DataFrame({
"images": img_list,
"score_maps": sm_list,
"geo_maps": gm_list,
"training_masks": tm_list
})
data_df.to_csv(f"{config['output_dir']}/train.csv", index=False)
print(f"Generated ground truths for {len(data_df)} images.") | def preprocess(config):
"""
Preprocess the data to save GPU time while training because
ground truth generation is very time consuming for FOTS.
"""
dataset = Synth800kDataset(config["data_dir"])
data_loader = DataLoader(
dataset,
num_workers=config["num_workers"],
pin_memory=True,
batch_size=config["batch_size"],
shuffle=False,
collate_fn=synth800k_collate
)
os.makedirs(os.path.join(config["output_dir"], "image"), exist_ok=True)
os.makedirs(os.path.join(config["output_dir"], "score"), exist_ok=True)
os.makedirs(os.path.join(config["output_dir"], "geo"), exist_ok=True)
os.makedirs(os.path.join(config["output_dir"], "training_mask"), exist_ok=True)
img_list, sm_list, gm_list, tm_list = [], [], [], []
for idx, batch in tqdm(enumerate(data_loader), total=len(data_loader), position=0, leave=True):
image_paths, images, score_maps, geo_maps, training_masks = batch
for pth, i, s, g, tm in zip(image_paths, images, score_maps, geo_maps, training_masks):
img_pth = pth.split("/")[-2:]
img_name = img_pth[-1].split(".")[0]
img_list.append(f"image/{img_name}.npy")
sm_list.append(f"score/{img_name}_score_map.npy")
gm_list.append(f"geo/{img_name}_geo_map.npy")
tm_list.append(f"training_mask/{img_name}_tm.npy")
np.save(f"{config['output_dir']}/image/{img_name}.npy", i.numpy().astype(np.uint8))
np.save(f"{config['output_dir']}/score/{img_name}_score_map.npy", s.numpy().astype(np.uint8))
np.save(f"{config['output_dir']}/geo/{img_name}_geo_map.npy", g.numpy().astype(np.float32))
np.save(f"{config['output_dir']}/training_mask/{img_name}_tm.npy", tm.numpy().astype(np.uint8))
if idx == config["num_iterations"]:
break
data_df = pd.DataFrame({
"images": img_list,
"score_maps": sm_list,
"geo_maps": gm_list,
"training_masks": tm_list
})
data_df.to_csv(f"{config['output_dir']}/train.csv", index=False)
print(f"Generated ground truths for {len(data_df)} images.") |
Python | def minimum_bounding_rectangle(points):
"""
Find the smallest bounding rectangle for a set of points.
Returns a set of points representing the corners of the bounding box.
:param points: an n * 2 matrix of coordinates
:rval: an n * 2 matrix of coordinates
"""
from scipy.ndimage.interpolation import rotate
pi2 = np.pi/2.
# get the convex hull for the points
hull_points = points[ConvexHull(points).vertices]
# calculate edge angles
edges = hull_points[1:] - hull_points[:-1]
angles = np.arctan2(edges[:, 1], edges[:, 0])
angles = np.abs(np.mod(angles, pi2))
angles = np.unique(angles)
# find rotation matrices
# XXX both work
rotations = np.vstack([
np.cos(angles),
np.cos(angles-pi2),
np.cos(angles+pi2),
np.cos(angles)]).T
rotations = rotations.reshape((-1, 2, 2))
# apply rotations to the hull
rot_points = np.dot(rotations, hull_points.T)
# find the bounding points
min_x = np.nanmin(rot_points[:, 0], axis=1)
max_x = np.nanmax(rot_points[:, 0], axis=1)
min_y = np.nanmin(rot_points[:, 1], axis=1)
max_y = np.nanmax(rot_points[:, 1], axis=1)
# find the box with the best area
areas = (max_x - min_x) * (max_y - min_y)
best_idx = np.argmin(areas)
# return the best box
x1 = max_x[best_idx]
x2 = min_x[best_idx]
y1 = max_y[best_idx]
y2 = min_y[best_idx]
r = rotations[best_idx]
rval = np.zeros((4, 2))
rval[0] = np.dot([x1, y2], r)
rval[1] = np.dot([x2, y2], r)
rval[2] = np.dot([x2, y1], r)
rval[3] = np.dot([x1, y1], r)
return rval | def minimum_bounding_rectangle(points):
"""
Find the smallest bounding rectangle for a set of points.
Returns a set of points representing the corners of the bounding box.
:param points: an n * 2 matrix of coordinates
:rval: an n * 2 matrix of coordinates
"""
from scipy.ndimage.interpolation import rotate
pi2 = np.pi/2.
# get the convex hull for the points
hull_points = points[ConvexHull(points).vertices]
# calculate edge angles
edges = hull_points[1:] - hull_points[:-1]
angles = np.arctan2(edges[:, 1], edges[:, 0])
angles = np.abs(np.mod(angles, pi2))
angles = np.unique(angles)
# find rotation matrices
# XXX both work
rotations = np.vstack([
np.cos(angles),
np.cos(angles-pi2),
np.cos(angles+pi2),
np.cos(angles)]).T
rotations = rotations.reshape((-1, 2, 2))
# apply rotations to the hull
rot_points = np.dot(rotations, hull_points.T)
# find the bounding points
min_x = np.nanmin(rot_points[:, 0], axis=1)
max_x = np.nanmax(rot_points[:, 0], axis=1)
min_y = np.nanmin(rot_points[:, 1], axis=1)
max_y = np.nanmax(rot_points[:, 1], axis=1)
# find the box with the best area
areas = (max_x - min_x) * (max_y - min_y)
best_idx = np.argmin(areas)
# return the best box
x1 = max_x[best_idx]
x2 = min_x[best_idx]
y1 = max_y[best_idx]
y2 = min_y[best_idx]
r = rotations[best_idx]
rval = np.zeros((4, 2))
rval[0] = np.dot([x1, y2], r)
rval[1] = np.dot([x2, y2], r)
rval[2] = np.dot([x2, y1], r)
rval[3] = np.dot([x1, y1], r)
return rval |
Python | def icdar_collate(batch):
"""
Collate function for ICDAR dataset. It receives a batch of ground truths
and formats it in required format.
"""
image_paths, img, boxes, training_mask, transcripts, score_map, geo_map = zip(*batch)
batch_size = len(score_map)
images, score_maps, geo_maps, training_masks = [], [], [], []
# convert all numpy arrays to tensors
for idx in range(batch_size):
if img[idx] is not None:
images.append(torch.from_numpy(img[idx]).permute(2, 0, 1))
score_maps.append(torch.from_numpy(score_map[idx]).permute(2, 0, 1))
geo_maps.append(torch.from_numpy(geo_map[idx]).permute(2, 0, 1))
training_masks.append(torch.from_numpy(training_mask[idx]).permute(2, 0, 1))
images = torch.stack(images, 0)
score_maps = torch.stack(score_maps, 0)
geo_maps = torch.stack(geo_maps, 0)
training_masks = torch.stack(training_masks, 0)
texts, bboxs, mapping = [], [], []
for idx, (text, bbox) in enumerate(zip(transcripts, boxes)):
for txt, box in zip(text, bbox):
mapping.append(idx)
texts.append(txt)
bboxs.append(box)
mapping = np.array(mapping)
texts = np.array(texts)
bboxs = np.stack(bboxs, axis=0)
bboxs = np.concatenate([bboxs, np.ones((len(bboxs), 1))], axis = 1).astype(np.float32)
return image_paths, images, bboxs, training_masks, texts, score_maps, geo_maps, mapping | def icdar_collate(batch):
"""
Collate function for ICDAR dataset. It receives a batch of ground truths
and formats it in required format.
"""
image_paths, img, boxes, training_mask, transcripts, score_map, geo_map = zip(*batch)
batch_size = len(score_map)
images, score_maps, geo_maps, training_masks = [], [], [], []
# convert all numpy arrays to tensors
for idx in range(batch_size):
if img[idx] is not None:
images.append(torch.from_numpy(img[idx]).permute(2, 0, 1))
score_maps.append(torch.from_numpy(score_map[idx]).permute(2, 0, 1))
geo_maps.append(torch.from_numpy(geo_map[idx]).permute(2, 0, 1))
training_masks.append(torch.from_numpy(training_mask[idx]).permute(2, 0, 1))
images = torch.stack(images, 0)
score_maps = torch.stack(score_maps, 0)
geo_maps = torch.stack(geo_maps, 0)
training_masks = torch.stack(training_masks, 0)
texts, bboxs, mapping = [], [], []
for idx, (text, bbox) in enumerate(zip(transcripts, boxes)):
for txt, box in zip(text, bbox):
mapping.append(idx)
texts.append(txt)
bboxs.append(box)
mapping = np.array(mapping)
texts = np.array(texts)
bboxs = np.stack(bboxs, axis=0)
bboxs = np.concatenate([bboxs, np.ones((len(bboxs), 1))], axis = 1).astype(np.float32)
return image_paths, images, bboxs, training_masks, texts, score_maps, geo_maps, mapping |
Python | def synth800k_collate(batch):
"""
Collate function for ICDAR dataset. It receives a batch of ground truths
and formats it in required format.
"""
image_paths, img, boxes, training_mask, transcripts, score_map, geo_map = zip(*batch)
batch_size = len(score_map)
images, score_maps, geo_maps, training_masks = [], [], [], []
# convert all numpy arrays to tensors
for idx in range(batch_size):
if img[idx] is not None:
images.append(torch.from_numpy(img[idx]).permute(2, 0, 1))
score_maps.append(torch.from_numpy(score_map[idx]).permute(2, 0, 1))
geo_maps.append(torch.from_numpy(geo_map[idx]).permute(2, 0, 1))
training_masks.append(torch.from_numpy(training_mask[idx]).permute(2, 0, 1))
images = torch.stack(images, 0)
score_maps = torch.stack(score_maps, 0)
geo_maps = torch.stack(geo_maps, 0)
training_masks = torch.stack(training_masks, 0)
return image_paths, images, score_maps, geo_maps, training_masks | def synth800k_collate(batch):
"""
Collate function for ICDAR dataset. It receives a batch of ground truths
and formats it in required format.
"""
image_paths, img, boxes, training_mask, transcripts, score_map, geo_map = zip(*batch)
batch_size = len(score_map)
images, score_maps, geo_maps, training_masks = [], [], [], []
# convert all numpy arrays to tensors
for idx in range(batch_size):
if img[idx] is not None:
images.append(torch.from_numpy(img[idx]).permute(2, 0, 1))
score_maps.append(torch.from_numpy(score_map[idx]).permute(2, 0, 1))
geo_maps.append(torch.from_numpy(geo_map[idx]).permute(2, 0, 1))
training_masks.append(torch.from_numpy(training_mask[idx]).permute(2, 0, 1))
images = torch.stack(images, 0)
score_maps = torch.stack(score_maps, 0)
geo_maps = torch.stack(geo_maps, 0)
training_masks = torch.stack(training_masks, 0)
return image_paths, images, score_maps, geo_maps, training_masks |
Python | def l2_norm(p1, p2=np.array([0, 0])):
"""
Calculates the L2 norm (euclidean distance) between given two points.
point (pi) should be in format (x, y)
"""
return np.linalg.norm(p1 - p2) | def l2_norm(p1, p2=np.array([0, 0])):
"""
Calculates the L2 norm (euclidean distance) between given two points.
point (pi) should be in format (x, y)
"""
return np.linalg.norm(p1 - p2) |
Python | def _point_to_line_dist(p1, p2, p3):
"""
Find perpendicular distance from point p3 to line passing through
p1 and p2.
Reference: https://stackoverflow.com/a/39840218/5353128
"""
return np.linalg.norm(np.cross(p2 - p1, p1 - p3)) / np.linalg.norm(p2 - p1) | def _point_to_line_dist(p1, p2, p3):
"""
Find perpendicular distance from point p3 to line passing through
p1 and p2.
Reference: https://stackoverflow.com/a/39840218/5353128
"""
return np.linalg.norm(np.cross(p2 - p1, p1 - p3)) / np.linalg.norm(p2 - p1) |
Python | def _align_vertices(bbox):
"""
Align (sort) the vertices of the given bbox (rectangle) in such a way
that the base of the rectangle forms minimum angle with horizontal axis.
This is required because a single rectangle can be written in many
ways (just by rotating the vertices in the list notation) such that the
base of the rectangle will get changed in different notations and will form
the angle which is multiple of original minimum angle.
Reference: EAST implementation for ICDAR-2015 dataset:
https://github.com/argman/EAST/blob/dca414de39a3a4915a019c9a02c1832a31cdd0ca/icdar.py#L352
"""
p_lowest = np.argmax(bbox[:, 1])
if np.count_nonzero(bbox[:, 1] == bbox[p_lowest, 1]) == 2:
# This means there are two points in the horizantal axis (because two lowest points).
# That means 0 angle.
# The bottom edge is parallel to the X-axis, then p0 is the upper left corner.
p0_index = np.argmin(np.sum(bbox, axis=1))
p1_index = (p0_index + 1) % 4
p2_index = (p0_index + 2) % 4
p3_index = (p0_index + 3) % 4
return bbox[[p0_index, p1_index, p2_index, p3_index]], 0.0
else:
# Find the point to the right of the lowest point.
p_lowest_right = (p_lowest - 1) % 4
angle = np.arctan(
-(bbox[p_lowest][1] - bbox[p_lowest_right][1]) / (bbox[p_lowest][0] - bbox[p_lowest_right][0])
)
if angle / np.pi * 180 > 45:
# Lowest point is p2
p2_index = p_lowest
p1_index = (p2_index - 1) % 4
p0_index = (p2_index - 2) % 4
p3_index = (p2_index + 1) % 4
return bbox[[p0_index, p1_index, p2_index, p3_index]], -(np.pi/2 - angle)
else:
# Lowest point is p3
p3_index = p_lowest
p0_index = (p3_index + 1) % 4
p1_index = (p3_index + 2) % 4
p2_index = (p3_index + 3) % 4
return bbox[[p0_index, p1_index, p2_index, p3_index]], angle | def _align_vertices(bbox):
"""
Align (sort) the vertices of the given bbox (rectangle) in such a way
that the base of the rectangle forms minimum angle with horizontal axis.
This is required because a single rectangle can be written in many
ways (just by rotating the vertices in the list notation) such that the
base of the rectangle will get changed in different notations and will form
the angle which is multiple of original minimum angle.
Reference: EAST implementation for ICDAR-2015 dataset:
https://github.com/argman/EAST/blob/dca414de39a3a4915a019c9a02c1832a31cdd0ca/icdar.py#L352
"""
p_lowest = np.argmax(bbox[:, 1])
if np.count_nonzero(bbox[:, 1] == bbox[p_lowest, 1]) == 2:
# This means there are two points in the horizantal axis (because two lowest points).
# That means 0 angle.
# The bottom edge is parallel to the X-axis, then p0 is the upper left corner.
p0_index = np.argmin(np.sum(bbox, axis=1))
p1_index = (p0_index + 1) % 4
p2_index = (p0_index + 2) % 4
p3_index = (p0_index + 3) % 4
return bbox[[p0_index, p1_index, p2_index, p3_index]], 0.0
else:
# Find the point to the right of the lowest point.
p_lowest_right = (p_lowest - 1) % 4
angle = np.arctan(
-(bbox[p_lowest][1] - bbox[p_lowest_right][1]) / (bbox[p_lowest][0] - bbox[p_lowest_right][0])
)
if angle / np.pi * 180 > 45:
# Lowest point is p2
p2_index = p_lowest
p1_index = (p2_index - 1) % 4
p0_index = (p2_index - 2) % 4
p3_index = (p2_index + 1) % 4
return bbox[[p0_index, p1_index, p2_index, p3_index]], -(np.pi/2 - angle)
else:
# Lowest point is p3
p3_index = p_lowest
p0_index = (p3_index + 1) % 4
p1_index = (p3_index + 2) % 4
p2_index = (p3_index + 3) % 4
return bbox[[p0_index, p1_index, p2_index, p3_index]], angle |
Python | def rectangle_from_parallelogram(poly):
'''
fit a rectangle from a parallelogram
:param poly:
:return:
'''
p0, p1, p2, p3 = poly
angle_p0 = np.arccos(np.dot(p1 - p0, p3 - p0) / (np.linalg.norm(p0 - p1) * np.linalg.norm(p3 - p0)))
if angle_p0 < 0.5 * np.pi:
if np.linalg.norm(p0 - p1) > np.linalg.norm(p0 - p3):
# p0 and p2
## p0
p2p3 = fit_line([p2[0], p3[0]], [p2[1], p3[1]])
p2p3_verticle = line_verticle(p2p3, p0)
new_p3 = line_cross_point(p2p3, p2p3_verticle)
## p2
p0p1 = fit_line([p0[0], p1[0]], [p0[1], p1[1]])
p0p1_verticle = line_verticle(p0p1, p2)
new_p1 = line_cross_point(p0p1, p0p1_verticle)
return np.array([p0, new_p1, p2, new_p3], dtype = np.float32)
else:
p1p2 = fit_line([p1[0], p2[0]], [p1[1], p2[1]])
p1p2_verticle = line_verticle(p1p2, p0)
new_p1 = line_cross_point(p1p2, p1p2_verticle)
p0p3 = fit_line([p0[0], p3[0]], [p0[1], p3[1]])
p0p3_verticle = line_verticle(p0p3, p2)
new_p3 = line_cross_point(p0p3, p0p3_verticle)
return np.array([p0, new_p1, p2, new_p3], dtype = np.float32)
else:
if np.linalg.norm(p0 - p1) > np.linalg.norm(p0 - p3):
# p1 and p3
## p1
p2p3 = fit_line([p2[0], p3[0]], [p2[1], p3[1]])
p2p3_verticle = line_verticle(p2p3, p1)
new_p2 = line_cross_point(p2p3, p2p3_verticle)
## p3
p0p1 = fit_line([p0[0], p1[0]], [p0[1], p1[1]])
p0p1_verticle = line_verticle(p0p1, p3)
new_p0 = line_cross_point(p0p1, p0p1_verticle)
return np.array([new_p0, p1, new_p2, p3], dtype = np.float32)
else:
p0p3 = fit_line([p0[0], p3[0]], [p0[1], p3[1]])
p0p3_verticle = line_verticle(p0p3, p1)
new_p0 = line_cross_point(p0p3, p0p3_verticle)
p1p2 = fit_line([p1[0], p2[0]], [p1[1], p2[1]])
p1p2_verticle = line_verticle(p1p2, p3)
new_p2 = line_cross_point(p1p2, p1p2_verticle)
return np.array([new_p0, p1, new_p2, p3], dtype = np.float32) | def rectangle_from_parallelogram(poly):
'''
fit a rectangle from a parallelogram
:param poly:
:return:
'''
p0, p1, p2, p3 = poly
angle_p0 = np.arccos(np.dot(p1 - p0, p3 - p0) / (np.linalg.norm(p0 - p1) * np.linalg.norm(p3 - p0)))
if angle_p0 < 0.5 * np.pi:
if np.linalg.norm(p0 - p1) > np.linalg.norm(p0 - p3):
# p0 and p2
## p0
p2p3 = fit_line([p2[0], p3[0]], [p2[1], p3[1]])
p2p3_verticle = line_verticle(p2p3, p0)
new_p3 = line_cross_point(p2p3, p2p3_verticle)
## p2
p0p1 = fit_line([p0[0], p1[0]], [p0[1], p1[1]])
p0p1_verticle = line_verticle(p0p1, p2)
new_p1 = line_cross_point(p0p1, p0p1_verticle)
return np.array([p0, new_p1, p2, new_p3], dtype = np.float32)
else:
p1p2 = fit_line([p1[0], p2[0]], [p1[1], p2[1]])
p1p2_verticle = line_verticle(p1p2, p0)
new_p1 = line_cross_point(p1p2, p1p2_verticle)
p0p3 = fit_line([p0[0], p3[0]], [p0[1], p3[1]])
p0p3_verticle = line_verticle(p0p3, p2)
new_p3 = line_cross_point(p0p3, p0p3_verticle)
return np.array([p0, new_p1, p2, new_p3], dtype = np.float32)
else:
if np.linalg.norm(p0 - p1) > np.linalg.norm(p0 - p3):
# p1 and p3
## p1
p2p3 = fit_line([p2[0], p3[0]], [p2[1], p3[1]])
p2p3_verticle = line_verticle(p2p3, p1)
new_p2 = line_cross_point(p2p3, p2p3_verticle)
## p3
p0p1 = fit_line([p0[0], p1[0]], [p0[1], p1[1]])
p0p1_verticle = line_verticle(p0p1, p3)
new_p0 = line_cross_point(p0p1, p0p1_verticle)
return np.array([new_p0, p1, new_p2, p3], dtype = np.float32)
else:
p0p3 = fit_line([p0[0], p3[0]], [p0[1], p3[1]])
p0p3_verticle = line_verticle(p0p3, p1)
new_p0 = line_cross_point(p0p3, p0p3_verticle)
p1p2 = fit_line([p1[0], p2[0]], [p1[1], p2[1]])
p1p2_verticle = line_verticle(p1p2, p3)
new_p2 = line_cross_point(p1p2, p1p2_verticle)
return np.array([new_p0, p1, new_p2, p3], dtype = np.float32) |
Python | def resize_image(image, image_size=512):
"""
Resize the given image to image_size * image_size
shaped square image.
"""
# First pad the given image to match the image_size or image's larger
# side (whichever is larger). [Create a square image]
img_h, img_w, _ = image.shape
max_size = max(image_size, img_w, img_h)
# Create new square image of appropriate size
img_padded = np.zeros((max_size, max_size, 3), dtype=np.float32)
# Copy the original image into new image
# (basically, new image is padded version of original image).
img_padded[:img_h, :img_w, :] = image.copy()
img_h, img_w, _ = img_padded.shape
# if image_size higher that image sides, then the current padded
# image will be of size image_size * image_size. But if not, resize the
# padded iamge. This is done to keep the aspect ratio same even after
# square resize.
img_padded = cv2.resize(img_padded, dsize=(image_size, image_size))
# We need the ratio of resized image width and heights to its
# older dimensions to scale the bounding boxes accordingly
scale_x = image_size / img_w
scale_y = image_size / img_h
return img_padded, scale_x, scale_y | def resize_image(image, image_size=512):
"""
Resize the given image to image_size * image_size
shaped square image.
"""
# First pad the given image to match the image_size or image's larger
# side (whichever is larger). [Create a square image]
img_h, img_w, _ = image.shape
max_size = max(image_size, img_w, img_h)
# Create new square image of appropriate size
img_padded = np.zeros((max_size, max_size, 3), dtype=np.float32)
# Copy the original image into new image
# (basically, new image is padded version of original image).
img_padded[:img_h, :img_w, :] = image.copy()
img_h, img_w, _ = img_padded.shape
# if image_size higher that image sides, then the current padded
# image will be of size image_size * image_size. But if not, resize the
# padded iamge. This is done to keep the aspect ratio same even after
# square resize.
img_padded = cv2.resize(img_padded, dsize=(image_size, image_size))
# We need the ratio of resized image width and heights to its
# older dimensions to scale the bounding boxes accordingly
scale_x = image_size / img_w
scale_y = image_size / img_h
return img_padded, scale_x, scale_y |
Python | def _cross_entropy_loss(self, y_true_clf, y_pred_clf, training_mask):
"""
Calculates cross entropy loss between per pixel prediction score map
and ground truths.
"""
return torch.nn.functional.binary_cross_entropy(y_pred_clf*training_mask, (y_true_clf*training_mask)) | def _cross_entropy_loss(self, y_true_clf, y_pred_clf, training_mask):
"""
Calculates cross entropy loss between per pixel prediction score map
and ground truths.
"""
return torch.nn.functional.binary_cross_entropy(y_pred_clf*training_mask, (y_true_clf*training_mask)) |
Python | def encode(self, text):
"""
Support batch or single str.
Args:
text (str or list of str): texts to convert.
Returns:
torch.IntTensor [length_0 + length_1 + ... length_{n - 1}]: encoded texts.
torch.IntTensor [n]: length of each text.
"""
if isinstance(text, str):
text = [
self.dict.get(char.lower() if self.ignore_case else char, self.dict['-'])
for char in text
]
length = [len(text)]
elif isinstance(text, collections.Iterable):
length = [len(s) for s in text]
text = ''.join(text)
text, _ = self.encode(text)
return (torch.tensor(text), torch.tensor(length)) | def encode(self, text):
"""
Support batch or single str.
Args:
text (str or list of str): texts to convert.
Returns:
torch.IntTensor [length_0 + length_1 + ... length_{n - 1}]: encoded texts.
torch.IntTensor [n]: length of each text.
"""
if isinstance(text, str):
text = [
self.dict.get(char.lower() if self.ignore_case else char, self.dict['-'])
for char in text
]
length = [len(text)]
elif isinstance(text, collections.Iterable):
length = [len(s) for s in text]
text = ''.join(text)
text, _ = self.encode(text)
return (torch.tensor(text), torch.tensor(length)) |
Python | def decode(self, t, length, raw=False):
"""
Decode encoded texts back into strs.
Args:
torch.IntTensor [length_0 + length_1 + ... length_{n - 1}]: encoded texts.
torch.IntTensor [n]: length of each text.
Raises:
AssertionError: when the texts and its length does not match.
Returns:
text (str or list of str): texts to convert.
"""
if length.numel() == 1:
length = length.item()
assert t.numel() == length, "text with length: {} does not match declared length: {}".format(t.numel(), length)
if raw:
return ''.join([self.alphabet[i - 1] for i in t])
else:
char_list = []
for i in range(length):
if t[i] != 0 and (not (i > 0 and t[i - 1] == t[i])):
char_list.append(self.alphabet[t[i] - 1])
return ''.join(char_list)
else:
# batch mode
assert t.numel() == length.sum(), "texts with length: {} does not match declared length: {}".format(t.numel(), length.sum())
texts = []
index = 0
for i in range(length.numel()):
l = length[i]
texts.append(
self.decode(
t[index:index + l], torch.IntTensor([l]), raw=raw))
index += l
return texts | def decode(self, t, length, raw=False):
"""
Decode encoded texts back into strs.
Args:
torch.IntTensor [length_0 + length_1 + ... length_{n - 1}]: encoded texts.
torch.IntTensor [n]: length of each text.
Raises:
AssertionError: when the texts and its length does not match.
Returns:
text (str or list of str): texts to convert.
"""
if length.numel() == 1:
length = length.item()
assert t.numel() == length, "text with length: {} does not match declared length: {}".format(t.numel(), length)
if raw:
return ''.join([self.alphabet[i - 1] for i in t])
else:
char_list = []
for i in range(length):
if t[i] != 0 and (not (i > 0 and t[i - 1] == t[i])):
char_list.append(self.alphabet[t[i] - 1])
return ''.join(char_list)
else:
# batch mode
assert t.numel() == length.sum(), "texts with length: {} does not match declared length: {}".format(t.numel(), length.sum())
texts = []
index = 0
for i in range(length.numel()):
l = length[i]
texts.append(
self.decode(
t[index:index + l], torch.IntTensor([l]), raw=raw))
index += l
return texts |
Python | def train(self):
"""Transition the FOTS model to training mode."""
# self.recognizer.train()
self.detector.train()
self.shared_conv.train() | def train(self):
"""Transition the FOTS model to training mode."""
# self.recognizer.train()
self.detector.train()
self.shared_conv.train() |
Python | def eval(self):
"""Transition the FOTS model to evaluation mode."""
# self.recognizer.eval()
self.detector.eval()
self.shared_conv.eval() | def eval(self):
"""Transition the FOTS model to evaluation mode."""
# self.recognizer.eval()
self.detector.eval()
self.shared_conv.eval() |
Python | def is_training(self):
"""Check whether the FOTS model is in training mode."""
return (
self.detector.training
# and self.recognizer.training
and self.shared_conv.training
) | def is_training(self):
"""Check whether the FOTS model is in training mode."""
return (
self.detector.training
# and self.recognizer.training
and self.shared_conv.training
) |
Python | def _load_from_file(self, image_path, gt_path):
"""
Load the image and corresponding ground truth from
the file using given paths.
"""
# Load the image
image = cv2.imread(os.path.join(self.image_dir, image_path), cv2.IMREAD_COLOR)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB).astype(np.float32)
# image /= 255.0 # Normalize
# Resize the image to required size
image, scale_x, scale_y = resize_image(image, self.image_size)
# Extract ground truth bboxes
# Reference: https://stackoverflow.com/a/49150749/5353128
with open(os.path.join(self.gt_dir, gt_path), 'r', encoding='utf-8-sig') as file:
content = file.read().split('\n')
# Removing empty lines (possibly the last line in CSV)
content = [line for line in content if line]
# Extract bboxes and convert them to numpy array of size n_box * 4 * 2
# where 4 is four coordinates of rectangle and 2 is for x and y components
# of each coordinate
bboxes = list(map(lambda str: str.split(',')[:-1], content))
bboxes = np.array([np.array(bbox)[:8].reshape(4, 2) for bbox in bboxes]).astype(np.float32)
transcript = np.array(
list(map(lambda str: str.split(',')[-1], content)), dtype='object'
)
# Scale the bounding boxes as per the resized image
# This is required because after resize, the position of the texts
# would have changed. Shape of bboxes: n_words * 4 * 2
bboxes[:, :, 0] *= scale_x # scale x coordinate
bboxes[:, :, 1] *= scale_y # scale y coordinate
return image, bboxes, transcript | def _load_from_file(self, image_path, gt_path):
"""
Load the image and corresponding ground truth from
the file using given paths.
"""
# Load the image
image = cv2.imread(os.path.join(self.image_dir, image_path), cv2.IMREAD_COLOR)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB).astype(np.float32)
# image /= 255.0 # Normalize
# Resize the image to required size
image, scale_x, scale_y = resize_image(image, self.image_size)
# Extract ground truth bboxes
# Reference: https://stackoverflow.com/a/49150749/5353128
with open(os.path.join(self.gt_dir, gt_path), 'r', encoding='utf-8-sig') as file:
content = file.read().split('\n')
# Removing empty lines (possibly the last line in CSV)
content = [line for line in content if line]
# Extract bboxes and convert them to numpy array of size n_box * 4 * 2
# where 4 is four coordinates of rectangle and 2 is for x and y components
# of each coordinate
bboxes = list(map(lambda str: str.split(',')[:-1], content))
bboxes = np.array([np.array(bbox)[:8].reshape(4, 2) for bbox in bboxes]).astype(np.float32)
transcript = np.array(
list(map(lambda str: str.split(',')[-1], content)), dtype='object'
)
# Scale the bounding boxes as per the resized image
# This is required because after resize, the position of the texts
# would have changed. Shape of bboxes: n_words * 4 * 2
bboxes[:, :, 0] *= scale_x # scale x coordinate
bboxes[:, :, 1] *= scale_y # scale y coordinate
return image, bboxes, transcript |
Python | def _load_from_file(self, image_path, bboxes):
"""Load the image from the file using given path."""
# Load the image
image = cv2.imread(image_path, cv2.IMREAD_COLOR)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB).astype(np.float32)
# image /= 255.0 # Normalize
# Resize the image to required size
image, scale_x, scale_y = resize_image(image, self.image_size)
# Scale the bounding boxes as per the resized image
# This is required because after resize, the position of the texts
# would have changed. Bboxes shape: 2 * 4 * n_words
if len(bboxes.shape) < 3:
bboxes = bboxes[:, :, np.newaxis]
bboxes[0, :, :] *= scale_x # scale x coordinate
bboxes[1, :, :] *= scale_y # scale y coordinate
bboxes = np.moveaxis(bboxes, [0, 2], [2, 0])
return image, bboxes | def _load_from_file(self, image_path, bboxes):
"""Load the image from the file using given path."""
# Load the image
image = cv2.imread(image_path, cv2.IMREAD_COLOR)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB).astype(np.float32)
# image /= 255.0 # Normalize
# Resize the image to required size
image, scale_x, scale_y = resize_image(image, self.image_size)
# Scale the bounding boxes as per the resized image
# This is required because after resize, the position of the texts
# would have changed. Bboxes shape: 2 * 4 * n_words
if len(bboxes.shape) < 3:
bboxes = bboxes[:, :, np.newaxis]
bboxes[0, :, :] *= scale_x # scale x coordinate
bboxes[1, :, :] *= scale_y # scale y coordinate
bboxes = np.moveaxis(bboxes, [0, 2], [2, 0])
return image, bboxes |
Python | def forward(self, feature_map, boxes, mapping):
"""
Affine transforms the rotated bboxes.
"""
max_width = 0
boxes_width = []
matrixes = []
images = []
for img_index, box in zip(mapping, boxes):
feature = feature_map[img_index] # B * H * W * C
images.append(feature)
x1, y1, x2, y2, x3, y3, x4, y4 = box / 4 # 512 -> 128
rotated_rect = cv2.minAreaRect(np.array([[x1, y1], [x2, y2], [x3, y3], [x4, y4]]))
box_w, box_h = rotated_rect[1][0], rotated_rect[1][1]
width = feature.shape[2]
height = feature.shape[1]
if box_w <= box_h:
box_w, box_h = box_h, box_w
mapped_x1, mapped_y1 = (0, 0)
mapped_x4, mapped_y4 = (0, self.height)
width_box = math.ceil(self.height * box_w / box_h)
width_box = min(width_box, width) # not to exceed feature map's width
max_width = width_box if width_box > max_width else max_width
mapped_x2, mapped_y2 = (width_box, 0)
src_pts = np.float32([(x1, y1), (x2, y2),(x4, y4)])
dst_pts = np.float32([
(mapped_x1, mapped_y1), (mapped_x2, mapped_y2), (mapped_x4, mapped_y4)
])
affine_matrix = cv2.getAffineTransform(src_pts.astype(np.float32), dst_pts.astype(np.float32))
affine_matrix = ROIRotate.param2theta(affine_matrix, width, height)
affine_matrix *= 1e20 # cancel the error when type conversion
affine_matrix = torch.tensor(affine_matrix, device=feature.device, dtype=torch.float)
affine_matrix /= 1e20
matrixes.append(affine_matrix)
boxes_width.append(width_box)
matrixes = torch.stack(matrixes)
images = torch.stack(images)
grid = nn.functional.affine_grid(matrixes, images.size())
feature_rotated = nn.functional.grid_sample(images, grid)
channels = feature_rotated.shape[1]
cropped_images_padded = torch.zeros((len(feature_rotated), channels, self.height, max_width),
dtype=feature_rotated.dtype,
device=feature_rotated.device)
for i in range(feature_rotated.shape[0]):
w = boxes_width[i]
if max_width == w:
cropped_images_padded[i] = feature_rotated[i, :, 0:self.height, 0:w]
else:
padded_part = torch.zeros((channels, self.height, max_width - w),
dtype=feature_rotated.dtype,
device=feature_rotated.device)
cropped_images_padded[i] = torch.cat([feature_rotated[i, :, 0:self.height, 0: w], padded_part], dim=-1)
lengths = np.array(boxes_width)
indices = np.argsort(lengths) # sort images by its width cause pack padded tensor needs it
indices = indices[::-1].copy() # descending order
lengths = lengths[indices]
cropped_images_padded = cropped_images_padded[indices]
return cropped_images_padded, lengths, indices | def forward(self, feature_map, boxes, mapping):
"""
Affine transforms the rotated bboxes.
"""
max_width = 0
boxes_width = []
matrixes = []
images = []
for img_index, box in zip(mapping, boxes):
feature = feature_map[img_index] # B * H * W * C
images.append(feature)
x1, y1, x2, y2, x3, y3, x4, y4 = box / 4 # 512 -> 128
rotated_rect = cv2.minAreaRect(np.array([[x1, y1], [x2, y2], [x3, y3], [x4, y4]]))
box_w, box_h = rotated_rect[1][0], rotated_rect[1][1]
width = feature.shape[2]
height = feature.shape[1]
if box_w <= box_h:
box_w, box_h = box_h, box_w
mapped_x1, mapped_y1 = (0, 0)
mapped_x4, mapped_y4 = (0, self.height)
width_box = math.ceil(self.height * box_w / box_h)
width_box = min(width_box, width) # not to exceed feature map's width
max_width = width_box if width_box > max_width else max_width
mapped_x2, mapped_y2 = (width_box, 0)
src_pts = np.float32([(x1, y1), (x2, y2),(x4, y4)])
dst_pts = np.float32([
(mapped_x1, mapped_y1), (mapped_x2, mapped_y2), (mapped_x4, mapped_y4)
])
affine_matrix = cv2.getAffineTransform(src_pts.astype(np.float32), dst_pts.astype(np.float32))
affine_matrix = ROIRotate.param2theta(affine_matrix, width, height)
affine_matrix *= 1e20 # cancel the error when type conversion
affine_matrix = torch.tensor(affine_matrix, device=feature.device, dtype=torch.float)
affine_matrix /= 1e20
matrixes.append(affine_matrix)
boxes_width.append(width_box)
matrixes = torch.stack(matrixes)
images = torch.stack(images)
grid = nn.functional.affine_grid(matrixes, images.size())
feature_rotated = nn.functional.grid_sample(images, grid)
channels = feature_rotated.shape[1]
cropped_images_padded = torch.zeros((len(feature_rotated), channels, self.height, max_width),
dtype=feature_rotated.dtype,
device=feature_rotated.device)
for i in range(feature_rotated.shape[0]):
w = boxes_width[i]
if max_width == w:
cropped_images_padded[i] = feature_rotated[i, :, 0:self.height, 0:w]
else:
padded_part = torch.zeros((channels, self.height, max_width - w),
dtype=feature_rotated.dtype,
device=feature_rotated.device)
cropped_images_padded[i] = torch.cat([feature_rotated[i, :, 0:self.height, 0: w], padded_part], dim=-1)
lengths = np.array(boxes_width)
indices = np.argsort(lengths) # sort images by its width cause pack padded tensor needs it
indices = indices[::-1].copy() # descending order
lengths = lengths[indices]
cropped_images_padded = cropped_images_padded[indices]
return cropped_images_padded, lengths, indices |
Python | def main(config):
"""Main entry point of train module."""
# Initialize the dataset
# Full dataset
# dataset = ICDARDataset('/content/ch4_training_images', '/content/ch4_training_localization_transcription_gt')
data_df = pd.read_csv(f"{config['data_base_dir']}/train.csv")
dataset = Synth800kPreprocessedDataset(config["data_base_dir"], data_df)
# Train test split
val_size = config["val_fraction"]
val_len = int(val_size * len(dataset))
train_len = len(dataset) - val_len
icdar_train_dataset, icdar_val_dataset = torch.utils.data.random_split(
dataset, [train_len, val_len]
)
icdar_train_data_loader = DataLoader(
icdar_train_dataset,
pin_memory=True,
**config["dataset_config"],
worker_init_fn=seed_worker
# collate_fn=icdar_collate
)
icdar_val_data_loader = DataLoader(
icdar_val_dataset,
**config["dataset_config"],
pin_memory=True,
worker_init_fn=seed_worker
# collate_fn=icdar_collate
)
# Initialize the model
model = FOTSModel()
# Count trainable parameters
print(f'The model has {count_parameters(model):,} trainable parameters.')
loss = FOTSLoss(config)
optimizer = model.get_optimizer(config["optimizer"], config["optimizer_config"])
lr_schedular = getattr(
optim.lr_scheduler, config["lr_schedular"], "ReduceLROnPlateau"
)(optimizer, **config["lr_scheduler_config"])
trainer = Train(
model, icdar_train_data_loader, icdar_val_data_loader, loss,
fots_metric, optimizer, lr_schedular, config
)
trainer.train() | def main(config):
"""Main entry point of train module."""
# Initialize the dataset
# Full dataset
# dataset = ICDARDataset('/content/ch4_training_images', '/content/ch4_training_localization_transcription_gt')
data_df = pd.read_csv(f"{config['data_base_dir']}/train.csv")
dataset = Synth800kPreprocessedDataset(config["data_base_dir"], data_df)
# Train test split
val_size = config["val_fraction"]
val_len = int(val_size * len(dataset))
train_len = len(dataset) - val_len
icdar_train_dataset, icdar_val_dataset = torch.utils.data.random_split(
dataset, [train_len, val_len]
)
icdar_train_data_loader = DataLoader(
icdar_train_dataset,
pin_memory=True,
**config["dataset_config"],
worker_init_fn=seed_worker
# collate_fn=icdar_collate
)
icdar_val_data_loader = DataLoader(
icdar_val_dataset,
**config["dataset_config"],
pin_memory=True,
worker_init_fn=seed_worker
# collate_fn=icdar_collate
)
# Initialize the model
model = FOTSModel()
# Count trainable parameters
print(f'The model has {count_parameters(model):,} trainable parameters.')
loss = FOTSLoss(config)
optimizer = model.get_optimizer(config["optimizer"], config["optimizer_config"])
lr_schedular = getattr(
optim.lr_scheduler, config["lr_schedular"], "ReduceLROnPlateau"
)(optimizer, **config["lr_scheduler_config"])
trainer = Train(
model, icdar_train_data_loader, icdar_val_data_loader, loss,
fots_metric, optimizer, lr_schedular, config
)
trainer.train() |
Python | def seed_all(seed=28):
"""Seed everything for result reproducibility."""
os.environ["PYTHONHASHSEED"] = str(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.cuda.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.enabled = False | def seed_all(seed=28):
"""Seed everything for result reproducibility."""
os.environ["PYTHONHASHSEED"] = str(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.cuda.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.enabled = False |
Python | def _extract_features(self, x):
"""Extract features from given input and backbone."""
x = self.back_bone.conv1(x)
x = self.back_bone.bn1(x)
x = self.back_bone.relu(x)
x = self.back_bone.maxpool(x)
res2 = self.back_bone.layer1(x)
res3 = self.back_bone.layer2(res2)
res4 = self.back_bone.layer3(res3)
res5 = self.back_bone.layer4(res4)
return res5, res4, res3, res2 | def _extract_features(self, x):
"""Extract features from given input and backbone."""
x = self.back_bone.conv1(x)
x = self.back_bone.bn1(x)
x = self.back_bone.relu(x)
x = self.back_bone.maxpool(x)
res2 = self.back_bone.layer1(x)
res3 = self.back_bone.layer2(res2)
res4 = self.back_bone.layer3(res3)
res5 = self.back_bone.layer4(res4)
return res5, res4, res3, res2 |
Python | def _mean_image_subtraction(self, images, means=[123.68, 116.78, 103.94]):
"""
Image Standardization. Subtracts the mean from the given image.
"""
num_channels = images.data.shape[1]
if len(means) != num_channels:
raise ValueError('len(means) must match the number of channels')
for i in range(num_channels):
images.data[:, i, :, :] -= means[i]
return images | def _mean_image_subtraction(self, images, means=[123.68, 116.78, 103.94]):
"""
Image Standardization. Subtracts the mean from the given image.
"""
num_channels = images.data.shape[1]
if len(means) != num_channels:
raise ValueError('len(means) must match the number of channels')
for i in range(num_channels):
images.data[:, i, :, :] -= means[i]
return images |
Python | def _deconv(self, feature):
"""
Apply deconv operation (inverse of pooling) on given feature map.
"""
# Upsample the given feature.
# Doc: https://pytorch.org/docs/stable/nn.functional.html#interpolate
return F.interpolate(
feature,
mode='bilinear',
scale_factor=2, # As per the paper
align_corners = True
) | def _deconv(self, feature):
"""
Apply deconv operation (inverse of pooling) on given feature map.
"""
# Upsample the given feature.
# Doc: https://pytorch.org/docs/stable/nn.functional.html#interpolate
return F.interpolate(
feature,
mode='bilinear',
scale_factor=2, # As per the paper
align_corners = True
) |
Python | def _load_model(model_path):
"""Load model from given path to available device."""
model = FOTSModel()
model.to(DEVICE)
model.load_state_dict(torch.load(model_path, map_location=DEVICE))
return model | def _load_model(model_path):
"""Load model from given path to available device."""
model = FOTSModel()
model.to(DEVICE)
model.load_state_dict(torch.load(model_path, map_location=DEVICE))
return model |
Python | def _load_image(image_path):
"""Load image form given path."""
image = cv2.imread(image_path, cv2.IMREAD_COLOR)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB).astype(np.float32)
image, _, _ = resize_image(image, 512)
image = torch.from_numpy(image[np.newaxis, :, :, :]).permute(0, 3, 1, 2)
image = image.to(DEVICE)
return image | def _load_image(image_path):
"""Load image form given path."""
image = cv2.imread(image_path, cv2.IMREAD_COLOR)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB).astype(np.float32)
image, _, _ = resize_image(image, 512)
image = torch.from_numpy(image[np.newaxis, :, :, :]).permute(0, 3, 1, 2)
image = image.to(DEVICE)
return image |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.