language
stringclasses 6
values | original_string
stringlengths 25
887k
| text
stringlengths 25
887k
|
---|---|---|
Python | def values(self, n):
"""Return an array containing approximatively `n` numbers."""
n1 = max(2, int(0.3*n))
n2 = max(2, int(0.2*n))
n3 = max(8, n - n1 - n2)
v1 = np.linspace(-1, 1, n1)
v2 = np.r_[np.linspace(-10, 10, max(0, n2-4)),
-9, -5.5, 5.5, 9]
if self.a >= 0 and self.b > 0:
v3 = np.r_[
np.logspace(-30, -1, 2 + n3//4),
np.logspace(5, np.log10(self.b), 1 + n3//4),
]
v4 = np.logspace(1, 5, 1 + n3//2)
elif self.a < 0 < self.b:
v3 = np.r_[
np.logspace(-30, -1, 2 + n3//8),
np.logspace(5, np.log10(self.b), 1 + n3//8),
-np.logspace(-30, -1, 2 + n3//8),
-np.logspace(5, np.log10(-self.a), 1 + n3//8)
]
v4 = np.r_[
np.logspace(1, 5, 1 + n3//4),
-np.logspace(1, 5, 1 + n3//4)
]
elif self.b < 0:
v3 = np.r_[
-np.logspace(-30, -1, 2 + n3//4),
-np.logspace(5, np.log10(-self.b), 1 + n3//4),
]
v4 = -np.logspace(1, 5, 1 + n3//2)
else:
v3 = []
v4 = []
v = np.r_[v1, v2, v3, v4, 0]
if self.inclusive_a:
v = v[v >= self.a]
else:
v = v[v > self.a]
if self.inclusive_b:
v = v[v <= self.b]
else:
v = v[v < self.b]
return np.unique(v) | def values(self, n):
"""Return an array containing approximatively `n` numbers."""
n1 = max(2, int(0.3*n))
n2 = max(2, int(0.2*n))
n3 = max(8, n - n1 - n2)
v1 = np.linspace(-1, 1, n1)
v2 = np.r_[np.linspace(-10, 10, max(0, n2-4)),
-9, -5.5, 5.5, 9]
if self.a >= 0 and self.b > 0:
v3 = np.r_[
np.logspace(-30, -1, 2 + n3//4),
np.logspace(5, np.log10(self.b), 1 + n3//4),
]
v4 = np.logspace(1, 5, 1 + n3//2)
elif self.a < 0 < self.b:
v3 = np.r_[
np.logspace(-30, -1, 2 + n3//8),
np.logspace(5, np.log10(self.b), 1 + n3//8),
-np.logspace(-30, -1, 2 + n3//8),
-np.logspace(5, np.log10(-self.a), 1 + n3//8)
]
v4 = np.r_[
np.logspace(1, 5, 1 + n3//4),
-np.logspace(1, 5, 1 + n3//4)
]
elif self.b < 0:
v3 = np.r_[
-np.logspace(-30, -1, 2 + n3//4),
-np.logspace(5, np.log10(-self.b), 1 + n3//4),
]
v4 = -np.logspace(1, 5, 1 + n3//2)
else:
v3 = []
v4 = []
v = np.r_[v1, v2, v3, v4, 0]
if self.inclusive_a:
v = v[v >= self.a]
else:
v = v[v > self.a]
if self.inclusive_b:
v = v[v <= self.b]
else:
v = v[v < self.b]
return np.unique(v) |
Python | def mpf2float(x):
"""
Convert an mpf to the nearest floating point number. Just using
float directly doesn't work because of results like this:
with mp.workdps(50):
float(mpf("0.99999999999999999")) = 0.9999999999999999
"""
return float(mpmath.nstr(x, 17, min_fixed=0, max_fixed=0)) | def mpf2float(x):
"""
Convert an mpf to the nearest floating point number. Just using
float directly doesn't work because of results like this:
with mp.workdps(50):
float(mpf("0.99999999999999999")) = 0.9999999999999999
"""
return float(mpmath.nstr(x, 17, min_fixed=0, max_fixed=0)) |
Python | def time_limited(timeout=0.5, return_val=np.nan, use_sigalrm=True):
"""
Decorator for setting a timeout for pure-Python functions.
If the function does not return within `timeout` seconds, the
value `return_val` is returned instead.
On POSIX this uses SIGALRM by default. On non-POSIX, settrace is
used. Do not use this with threads: the SIGALRM implementation
does probably not work well. The settrace implementation only
traces the current thread.
The settrace implementation slows down execution speed. Slowdown
by a factor around 10 is probably typical.
"""
if POSIX and use_sigalrm:
def sigalrm_handler(signum, frame):
raise TimeoutError()
def deco(func):
def wrap(*a, **kw):
old_handler = signal.signal(signal.SIGALRM, sigalrm_handler)
signal.setitimer(signal.ITIMER_REAL, timeout)
try:
return func(*a, **kw)
except TimeoutError:
return return_val
finally:
signal.setitimer(signal.ITIMER_REAL, 0)
signal.signal(signal.SIGALRM, old_handler)
return wrap
else:
def deco(func):
def wrap(*a, **kw):
start_time = time.time()
def trace(frame, event, arg):
if time.time() - start_time > timeout:
raise TimeoutError()
return None # turn off tracing except at function calls
sys.settrace(trace)
try:
return func(*a, **kw)
except TimeoutError:
sys.settrace(None)
return return_val
finally:
sys.settrace(None)
return wrap
return deco | def time_limited(timeout=0.5, return_val=np.nan, use_sigalrm=True):
"""
Decorator for setting a timeout for pure-Python functions.
If the function does not return within `timeout` seconds, the
value `return_val` is returned instead.
On POSIX this uses SIGALRM by default. On non-POSIX, settrace is
used. Do not use this with threads: the SIGALRM implementation
does probably not work well. The settrace implementation only
traces the current thread.
The settrace implementation slows down execution speed. Slowdown
by a factor around 10 is probably typical.
"""
if POSIX and use_sigalrm:
def sigalrm_handler(signum, frame):
raise TimeoutError()
def deco(func):
def wrap(*a, **kw):
old_handler = signal.signal(signal.SIGALRM, sigalrm_handler)
signal.setitimer(signal.ITIMER_REAL, timeout)
try:
return func(*a, **kw)
except TimeoutError:
return return_val
finally:
signal.setitimer(signal.ITIMER_REAL, 0)
signal.signal(signal.SIGALRM, old_handler)
return wrap
else:
def deco(func):
def wrap(*a, **kw):
start_time = time.time()
def trace(frame, event, arg):
if time.time() - start_time > timeout:
raise TimeoutError()
return None # turn off tracing except at function calls
sys.settrace(trace)
try:
return func(*a, **kw)
except TimeoutError:
sys.settrace(None)
return return_val
finally:
sys.settrace(None)
return wrap
return deco |
Python | def exception_to_nan(func):
"""Decorate function to return nan if it raises an exception"""
def wrap(*a, **kw):
try:
return func(*a, **kw)
except Exception:
return np.nan
return wrap | def exception_to_nan(func):
"""Decorate function to return nan if it raises an exception"""
def wrap(*a, **kw):
try:
return func(*a, **kw)
except Exception:
return np.nan
return wrap |
Python | def inf_to_nan(func):
"""Decorate function to return nan if it returns inf"""
def wrap(*a, **kw):
v = func(*a, **kw)
if not np.isfinite(v):
return np.nan
return v
return wrap | def inf_to_nan(func):
"""Decorate function to return nan if it returns inf"""
def wrap(*a, **kw):
v = func(*a, **kw)
if not np.isfinite(v):
return np.nan
return v
return wrap |
Python | def mp_assert_allclose(res, std, atol=0, rtol=1e-17):
"""
Compare lists of mpmath.mpf's or mpmath.mpc's directly so that it
can be done to higher precision than double.
"""
try:
len(res)
except TypeError:
res = list(res)
n = len(std)
if len(res) != n:
raise AssertionError("Lengths of inputs not equal.")
failures = []
for k in range(n):
try:
assert_(mpmath.fabs(res[k] - std[k]) <= atol + rtol*mpmath.fabs(std[k]))
except AssertionError:
failures.append(k)
ndigits = int(abs(np.log10(rtol)))
msg = [""]
msg.append("Bad results ({} out of {}) for the following points:"
.format(len(failures), n))
for k in failures:
resrep = mpmath.nstr(res[k], ndigits, min_fixed=0, max_fixed=0)
stdrep = mpmath.nstr(std[k], ndigits, min_fixed=0, max_fixed=0)
if std[k] == 0:
rdiff = "inf"
else:
rdiff = mpmath.fabs((res[k] - std[k])/std[k])
rdiff = mpmath.nstr(rdiff, 3)
msg.append("{}: {} != {} (rdiff {})".format(k, resrep, stdrep, rdiff))
if failures:
assert_(False, "\n".join(msg)) | def mp_assert_allclose(res, std, atol=0, rtol=1e-17):
"""
Compare lists of mpmath.mpf's or mpmath.mpc's directly so that it
can be done to higher precision than double.
"""
try:
len(res)
except TypeError:
res = list(res)
n = len(std)
if len(res) != n:
raise AssertionError("Lengths of inputs not equal.")
failures = []
for k in range(n):
try:
assert_(mpmath.fabs(res[k] - std[k]) <= atol + rtol*mpmath.fabs(std[k]))
except AssertionError:
failures.append(k)
ndigits = int(abs(np.log10(rtol)))
msg = [""]
msg.append("Bad results ({} out of {}) for the following points:"
.format(len(failures), n))
for k in failures:
resrep = mpmath.nstr(res[k], ndigits, min_fixed=0, max_fixed=0)
stdrep = mpmath.nstr(std[k], ndigits, min_fixed=0, max_fixed=0)
if std[k] == 0:
rdiff = "inf"
else:
rdiff = mpmath.fabs((res[k] - std[k])/std[k])
rdiff = mpmath.nstr(rdiff, 3)
msg.append("{}: {} != {} (rdiff {})".format(k, resrep, stdrep, rdiff))
if failures:
assert_(False, "\n".join(msg)) |
Python | def addsitedir(sitedir, known_paths=None):
"""Add 'sitedir' argument to sys.path if missing and handle .pth files in
'sitedir'"""
if known_paths is None:
known_paths = _init_pathinfo()
reset = 1
else:
reset = 0
sitedir, sitedircase = makepath(sitedir)
if not sitedircase in known_paths:
sys.path.append(sitedir) # Add path component
try:
names = os.listdir(sitedir)
except os.error:
return
dotpth = os.extsep + "pth"
names = [name for name in names if name.endswith(dotpth)]
for name in sorted(names):
addpackage(sitedir, name, known_paths)
if reset:
known_paths = None
return known_paths | def addsitedir(sitedir, known_paths=None):
"""Add 'sitedir' argument to sys.path if missing and handle .pth files in
'sitedir'"""
if known_paths is None:
known_paths = _init_pathinfo()
reset = 1
else:
reset = 0
sitedir, sitedircase = makepath(sitedir)
if not sitedircase in known_paths:
sys.path.append(sitedir) # Add path component
try:
names = os.listdir(sitedir)
except os.error:
return
dotpth = os.extsep + "pth"
names = [name for name in names if name.endswith(dotpth)]
for name in sorted(names):
addpackage(sitedir, name, known_paths)
if reset:
known_paths = None
return known_paths |
Python | def check_enableusersite():
"""Check if user site directory is safe for inclusion
The function tests for the command line flag (including environment var),
process uid/gid equal to effective uid/gid.
None: Disabled for security reasons
False: Disabled by user (command line option)
True: Safe and enabled
"""
if sys.flags.no_user_site:
return False
if hasattr(os, "getuid") and hasattr(os, "geteuid"):
# check process uid == effective uid
if os.geteuid() != os.getuid():
return None
if hasattr(os, "getgid") and hasattr(os, "getegid"):
# check process gid == effective gid
if os.getegid() != os.getgid():
return None
return True | def check_enableusersite():
"""Check if user site directory is safe for inclusion
The function tests for the command line flag (including environment var),
process uid/gid equal to effective uid/gid.
None: Disabled for security reasons
False: Disabled by user (command line option)
True: Safe and enabled
"""
if sys.flags.no_user_site:
return False
if hasattr(os, "getuid") and hasattr(os, "geteuid"):
# check process uid == effective uid
if os.geteuid() != os.getuid():
return None
if hasattr(os, "getgid") and hasattr(os, "getegid"):
# check process gid == effective gid
if os.getegid() != os.getgid():
return None
return True |
Python | def addusersitepackages(known_paths):
"""Add a per user site-package to sys.path
Each user has its own python directory with site-packages in the
home directory.
"""
# get the per user site-package path
# this call will also make sure USER_BASE and USER_SITE are set
user_site = getusersitepackages()
if ENABLE_USER_SITE and os.path.isdir(user_site):
addsitedir(user_site, known_paths)
return known_paths | def addusersitepackages(known_paths):
"""Add a per user site-package to sys.path
Each user has its own python directory with site-packages in the
home directory.
"""
# get the per user site-package path
# this call will also make sure USER_BASE and USER_SITE are set
user_site = getusersitepackages()
if ENABLE_USER_SITE and os.path.isdir(user_site):
addsitedir(user_site, known_paths)
return known_paths |
Python | def aliasmbcs():
"""On Windows, some default encodings are not provided by Python,
while they are always available as "mbcs" in each locale. Make
them usable by aliasing to "mbcs" in such a case."""
if sys.platform == 'win32':
import locale, codecs
enc = locale.getdefaultlocale()[1]
if enc.startswith('cp'): # "cp***" ?
try:
codecs.lookup(enc)
except LookupError:
import encodings
encodings._cache[enc] = encodings._unknown
encodings.aliases.aliases[enc] = 'mbcs' | def aliasmbcs():
"""On Windows, some default encodings are not provided by Python,
while they are always available as "mbcs" in each locale. Make
them usable by aliasing to "mbcs" in such a case."""
if sys.platform == 'win32':
import locale, codecs
enc = locale.getdefaultlocale()[1]
if enc.startswith('cp'): # "cp***" ?
try:
codecs.lookup(enc)
except LookupError:
import encodings
encodings._cache[enc] = encodings._unknown
encodings.aliases.aliases[enc] = 'mbcs' |
Python | def execsitecustomize():
"""Run custom site specific code, if available."""
try:
import sitecustomize
except ImportError:
pass
except Exception:
if sys.flags.verbose:
sys.excepthook(*sys.exc_info())
else:
print >>sys.stderr, \
"'import sitecustomize' failed; use -v for traceback" | def execsitecustomize():
"""Run custom site specific code, if available."""
try:
import sitecustomize
except ImportError:
pass
except Exception:
if sys.flags.verbose:
sys.excepthook(*sys.exc_info())
else:
print >>sys.stderr, \
"'import sitecustomize' failed; use -v for traceback" |
Python | def execusercustomize():
"""Run custom user specific code, if available."""
try:
import usercustomize
except ImportError:
pass
except Exception:
if sys.flags.verbose:
sys.excepthook(*sys.exc_info())
else:
print>>sys.stderr, \
"'import usercustomize' failed; use -v for traceback" | def execusercustomize():
"""Run custom user specific code, if available."""
try:
import usercustomize
except ImportError:
pass
except Exception:
if sys.flags.verbose:
sys.excepthook(*sys.exc_info())
else:
print>>sys.stderr, \
"'import usercustomize' failed; use -v for traceback" |
Python | def power(self, n, dtype=None):
"""
This function performs element-wise power.
Parameters
----------
n : n is a scalar
dtype : If dtype is not specified, the current dtype will be preserved.
"""
if not isscalarlike(n):
raise NotImplementedError("input is not scalar")
data = self._deduped_data()
if dtype is not None:
data = data.astype(dtype)
return self._with_data(data ** n) | def power(self, n, dtype=None):
"""
This function performs element-wise power.
Parameters
----------
n : n is a scalar
dtype : If dtype is not specified, the current dtype will be preserved.
"""
if not isscalarlike(n):
raise NotImplementedError("input is not scalar")
data = self._deduped_data()
if dtype is not None:
data = data.astype(dtype)
return self._with_data(data ** n) |
Python | def reshape(self, shape, order='C'):
"""
Gives a new shape to a sparse matrix without changing its data.
Parameters
----------
shape : length-2 tuple of ints
The new shape should be compatible with the original shape.
order : 'C', optional
This argument is in the signature *solely* for NumPy
compatibility reasons. Do not pass in anything except
for the default value, as this argument is not used.
Returns
-------
reshaped_matrix : `self` with the new dimensions of `shape`
See Also
--------
np.matrix.reshape : NumPy's implementation of 'reshape' for matrices
"""
raise NotImplementedError("Reshaping not implemented for %s." %
self.__class__.__name__) | def reshape(self, shape, order='C'):
"""
Gives a new shape to a sparse matrix without changing its data.
Parameters
----------
shape : length-2 tuple of ints
The new shape should be compatible with the original shape.
order : 'C', optional
This argument is in the signature *solely* for NumPy
compatibility reasons. Do not pass in anything except
for the default value, as this argument is not used.
Returns
-------
reshaped_matrix : `self` with the new dimensions of `shape`
See Also
--------
np.matrix.reshape : NumPy's implementation of 'reshape' for matrices
"""
raise NotImplementedError("Reshaping not implemented for %s." %
self.__class__.__name__) |
Python | def asfptype(self):
"""Upcast matrix to a floating point format (if necessary)"""
fp_types = ['f', 'd', 'F', 'D']
if self.dtype.char in fp_types:
return self
else:
for fp_type in fp_types:
if self.dtype <= np.dtype(fp_type):
return self.astype(fp_type)
raise TypeError('cannot upcast [%s] to a floating '
'point format' % self.dtype.name) | def asfptype(self):
"""Upcast matrix to a floating point format (if necessary)"""
fp_types = ['f', 'd', 'F', 'D']
if self.dtype.char in fp_types:
return self
else:
for fp_type in fp_types:
if self.dtype <= np.dtype(fp_type):
return self.astype(fp_type)
raise TypeError('cannot upcast [%s] to a floating '
'point format' % self.dtype.name) |
Python | def count_nonzero(self):
"""Number of non-zero entries, equivalent to
np.count_nonzero(a.toarray())
Unlike getnnz() and the nnz property, which return the number of stored
entries (the length of the data attribute), this method counts the
actual number of non-zero entries in data.
"""
raise NotImplementedError("count_nonzero not implemented for %s." %
self.__class__.__name__) | def count_nonzero(self):
"""Number of non-zero entries, equivalent to
np.count_nonzero(a.toarray())
Unlike getnnz() and the nnz property, which return the number of stored
entries (the length of the data attribute), this method counts the
actual number of non-zero entries in data.
"""
raise NotImplementedError("count_nonzero not implemented for %s." %
self.__class__.__name__) |
Python | def nnz(self):
"""Number of stored values, including explicit zeros.
See also
--------
count_nonzero : Number of non-zero entries
"""
return self.getnnz() | def nnz(self):
"""Number of stored values, including explicit zeros.
See also
--------
count_nonzero : Number of non-zero entries
"""
return self.getnnz() |
Python | def transpose(self, axes=None, copy=False):
"""
Reverses the dimensions of the sparse matrix.
Parameters
----------
axes : None, optional
This argument is in the signature *solely* for NumPy
compatibility reasons. Do not pass in anything except
for the default value.
copy : bool, optional
Indicates whether or not attributes of `self` should be
copied whenever possible. The degree to which attributes
are copied varies depending on the type of sparse matrix
being used.
Returns
-------
p : `self` with the dimensions reversed.
See Also
--------
np.matrix.transpose : NumPy's implementation of 'transpose'
for matrices
"""
return self.tocsr().transpose(axes=axes, copy=copy) | def transpose(self, axes=None, copy=False):
"""
Reverses the dimensions of the sparse matrix.
Parameters
----------
axes : None, optional
This argument is in the signature *solely* for NumPy
compatibility reasons. Do not pass in anything except
for the default value.
copy : bool, optional
Indicates whether or not attributes of `self` should be
copied whenever possible. The degree to which attributes
are copied varies depending on the type of sparse matrix
being used.
Returns
-------
p : `self` with the dimensions reversed.
See Also
--------
np.matrix.transpose : NumPy's implementation of 'transpose'
for matrices
"""
return self.tocsr().transpose(axes=axes, copy=copy) |
Python | def nonzero(self):
"""nonzero indices
Returns a tuple of arrays (row,col) containing the indices
of the non-zero elements of the matrix.
Examples
--------
>>> from scipy.sparse import csr_matrix
>>> A = csr_matrix([[1,2,0],[0,0,3],[4,0,5]])
>>> A.nonzero()
(array([0, 0, 1, 2, 2]), array([0, 1, 2, 0, 2]))
"""
# convert to COOrdinate format
A = self.tocoo()
nz_mask = A.data != 0
return (A.row[nz_mask], A.col[nz_mask]) | def nonzero(self):
"""nonzero indices
Returns a tuple of arrays (row,col) containing the indices
of the non-zero elements of the matrix.
Examples
--------
>>> from scipy.sparse import csr_matrix
>>> A = csr_matrix([[1,2,0],[0,0,3],[4,0,5]])
>>> A.nonzero()
(array([0, 0, 1, 2, 2]), array([0, 1, 2, 0, 2]))
"""
# convert to COOrdinate format
A = self.tocoo()
nz_mask = A.data != 0
return (A.row[nz_mask], A.col[nz_mask]) |
Python | def toarray(self, order=None, out=None):
"""
Return a dense ndarray representation of this matrix.
Parameters
----------
order : {'C', 'F'}, optional
Whether to store multi-dimensional data in C (row-major)
or Fortran (column-major) order in memory. The default
is 'None', indicating the NumPy default of C-ordered.
Cannot be specified in conjunction with the `out`
argument.
out : ndarray, 2-dimensional, optional
If specified, uses this array as the output buffer
instead of allocating a new array to return. The provided
array must have the same shape and dtype as the sparse
matrix on which you are calling the method. For most
sparse types, `out` is required to be memory contiguous
(either C or Fortran ordered).
Returns
-------
arr : ndarray, 2-dimensional
An array with the same shape and containing the same
data represented by the sparse matrix, with the requested
memory order. If `out` was passed, the same object is
returned after being modified in-place to contain the
appropriate values.
"""
return self.tocoo(copy=False).toarray(order=order, out=out) | def toarray(self, order=None, out=None):
"""
Return a dense ndarray representation of this matrix.
Parameters
----------
order : {'C', 'F'}, optional
Whether to store multi-dimensional data in C (row-major)
or Fortran (column-major) order in memory. The default
is 'None', indicating the NumPy default of C-ordered.
Cannot be specified in conjunction with the `out`
argument.
out : ndarray, 2-dimensional, optional
If specified, uses this array as the output buffer
instead of allocating a new array to return. The provided
array must have the same shape and dtype as the sparse
matrix on which you are calling the method. For most
sparse types, `out` is required to be memory contiguous
(either C or Fortran ordered).
Returns
-------
arr : ndarray, 2-dimensional
An array with the same shape and containing the same
data represented by the sparse matrix, with the requested
memory order. If `out` was passed, the same object is
returned after being modified in-place to contain the
appropriate values.
"""
return self.tocoo(copy=False).toarray(order=order, out=out) |
Python | def tocsr(self, copy=False):
"""Convert this matrix to Compressed Sparse Row format.
With copy=False, the data/indices may be shared between this matrix and
the resultant csr_matrix.
"""
return self.tocoo(copy=copy).tocsr(copy=False) | def tocsr(self, copy=False):
"""Convert this matrix to Compressed Sparse Row format.
With copy=False, the data/indices may be shared between this matrix and
the resultant csr_matrix.
"""
return self.tocoo(copy=copy).tocsr(copy=False) |
Python | def todok(self, copy=False):
"""Convert this matrix to Dictionary Of Keys format.
With copy=False, the data/indices may be shared between this matrix and
the resultant dok_matrix.
"""
return self.tocoo(copy=copy).todok(copy=False) | def todok(self, copy=False):
"""Convert this matrix to Dictionary Of Keys format.
With copy=False, the data/indices may be shared between this matrix and
the resultant dok_matrix.
"""
return self.tocoo(copy=copy).todok(copy=False) |
Python | def tocoo(self, copy=False):
"""Convert this matrix to COOrdinate format.
With copy=False, the data/indices may be shared between this matrix and
the resultant coo_matrix.
"""
return self.tocsr(copy=False).tocoo(copy=copy) | def tocoo(self, copy=False):
"""Convert this matrix to COOrdinate format.
With copy=False, the data/indices may be shared between this matrix and
the resultant coo_matrix.
"""
return self.tocsr(copy=False).tocoo(copy=copy) |
Python | def tolil(self, copy=False):
"""Convert this matrix to LInked List format.
With copy=False, the data/indices may be shared between this matrix and
the resultant lil_matrix.
"""
return self.tocsr(copy=False).tolil(copy=copy) | def tolil(self, copy=False):
"""Convert this matrix to LInked List format.
With copy=False, the data/indices may be shared between this matrix and
the resultant lil_matrix.
"""
return self.tocsr(copy=False).tolil(copy=copy) |
Python | def todia(self, copy=False):
"""Convert this matrix to sparse DIAgonal format.
With copy=False, the data/indices may be shared between this matrix and
the resultant dia_matrix.
"""
return self.tocoo(copy=copy).todia(copy=False) | def todia(self, copy=False):
"""Convert this matrix to sparse DIAgonal format.
With copy=False, the data/indices may be shared between this matrix and
the resultant dia_matrix.
"""
return self.tocoo(copy=copy).todia(copy=False) |
Python | def tobsr(self, blocksize=None, copy=False):
"""Convert this matrix to Block Sparse Row format.
With copy=False, the data/indices may be shared between this matrix and
the resultant bsr_matrix.
When blocksize=(R, C) is provided, it will be used for construction of
the bsr_matrix.
"""
return self.tocsr(copy=False).tobsr(blocksize=blocksize, copy=copy) | def tobsr(self, blocksize=None, copy=False):
"""Convert this matrix to Block Sparse Row format.
With copy=False, the data/indices may be shared between this matrix and
the resultant bsr_matrix.
When blocksize=(R, C) is provided, it will be used for construction of
the bsr_matrix.
"""
return self.tocsr(copy=False).tobsr(blocksize=blocksize, copy=copy) |
Python | def tocsc(self, copy=False):
"""Convert this matrix to Compressed Sparse Column format.
With copy=False, the data/indices may be shared between this matrix and
the resultant csc_matrix.
"""
return self.tocsr(copy=copy).tocsc(copy=False) | def tocsc(self, copy=False):
"""Convert this matrix to Compressed Sparse Column format.
With copy=False, the data/indices may be shared between this matrix and
the resultant csc_matrix.
"""
return self.tocsr(copy=copy).tocsc(copy=False) |
Python | def sum(self, axis=None, dtype=None, out=None):
"""
Sum the matrix elements over a given axis.
Parameters
----------
axis : {-2, -1, 0, 1, None} optional
Axis along which the sum is computed. The default is to
compute the sum of all the matrix elements, returning a scalar
(i.e. `axis` = `None`).
dtype : dtype, optional
The type of the returned matrix and of the accumulator in which
the elements are summed. The dtype of `a` is used by default
unless `a` has an integer dtype of less precision than the default
platform integer. In that case, if `a` is signed then the platform
integer is used while if `a` is unsigned then an unsigned integer
of the same precision as the platform integer is used.
.. versionadded: 0.18.0
out : np.matrix, optional
Alternative output matrix in which to place the result. It must
have the same shape as the expected output, but the type of the
output values will be cast if necessary.
.. versionadded: 0.18.0
Returns
-------
sum_along_axis : np.matrix
A matrix with the same shape as `self`, with the specified
axis removed.
See Also
--------
np.matrix.sum : NumPy's implementation of 'sum' for matrices
"""
validateaxis(axis)
# We use multiplication by a matrix of ones to achieve this.
# For some sparse matrix formats more efficient methods are
# possible -- these should override this function.
m, n = self.shape
# Mimic numpy's casting.
res_dtype = get_sum_dtype(self.dtype)
if axis is None:
# sum over rows and columns
row_sums = self.__mul__(np.ones((n, 1), dtype=res_dtype))
return row_sums.sum(dtype=dtype, out=out)
if axis < 0:
axis += 2
# axis = 0 or 1 now
if axis == 0:
# sum over columns
ret = self.__rmul__(np.ones((1, m), dtype=res_dtype))
else:
# sum over rows
ret = self.__mul__(np.ones((n, 1), dtype=res_dtype))
if out is not None and out.shape != ret.shape:
raise ValueError("dimensions do not match")
return ret.sum(axis=(), dtype=dtype, out=out) | def sum(self, axis=None, dtype=None, out=None):
"""
Sum the matrix elements over a given axis.
Parameters
----------
axis : {-2, -1, 0, 1, None} optional
Axis along which the sum is computed. The default is to
compute the sum of all the matrix elements, returning a scalar
(i.e. `axis` = `None`).
dtype : dtype, optional
The type of the returned matrix and of the accumulator in which
the elements are summed. The dtype of `a` is used by default
unless `a` has an integer dtype of less precision than the default
platform integer. In that case, if `a` is signed then the platform
integer is used while if `a` is unsigned then an unsigned integer
of the same precision as the platform integer is used.
.. versionadded: 0.18.0
out : np.matrix, optional
Alternative output matrix in which to place the result. It must
have the same shape as the expected output, but the type of the
output values will be cast if necessary.
.. versionadded: 0.18.0
Returns
-------
sum_along_axis : np.matrix
A matrix with the same shape as `self`, with the specified
axis removed.
See Also
--------
np.matrix.sum : NumPy's implementation of 'sum' for matrices
"""
validateaxis(axis)
# We use multiplication by a matrix of ones to achieve this.
# For some sparse matrix formats more efficient methods are
# possible -- these should override this function.
m, n = self.shape
# Mimic numpy's casting.
res_dtype = get_sum_dtype(self.dtype)
if axis is None:
# sum over rows and columns
row_sums = self.__mul__(np.ones((n, 1), dtype=res_dtype))
return row_sums.sum(dtype=dtype, out=out)
if axis < 0:
axis += 2
# axis = 0 or 1 now
if axis == 0:
# sum over columns
ret = self.__rmul__(np.ones((1, m), dtype=res_dtype))
else:
# sum over rows
ret = self.__mul__(np.ones((n, 1), dtype=res_dtype))
if out is not None and out.shape != ret.shape:
raise ValueError("dimensions do not match")
return ret.sum(axis=(), dtype=dtype, out=out) |
Python | def mean(self, axis=None, dtype=None, out=None):
"""
Compute the arithmetic mean along the specified axis.
Returns the average of the matrix elements. The average is taken
over all elements in the matrix by default, otherwise over the
specified axis. `float64` intermediate and return values are used
for integer inputs.
Parameters
----------
axis : {-2, -1, 0, 1, None} optional
Axis along which the mean is computed. The default is to compute
the mean of all elements in the matrix (i.e. `axis` = `None`).
dtype : data-type, optional
Type to use in computing the mean. For integer inputs, the default
is `float64`; for floating point inputs, it is the same as the
input dtype.
.. versionadded: 0.18.0
out : np.matrix, optional
Alternative output matrix in which to place the result. It must
have the same shape as the expected output, but the type of the
output values will be cast if necessary.
.. versionadded: 0.18.0
Returns
-------
m : np.matrix
See Also
--------
np.matrix.mean : NumPy's implementation of 'mean' for matrices
"""
def _is_integral(dtype):
return (np.issubdtype(dtype, np.integer) or
np.issubdtype(dtype, np.bool_))
validateaxis(axis)
res_dtype = self.dtype.type
integral = _is_integral(self.dtype)
# output dtype
if dtype is None:
if integral:
res_dtype = np.float64
else:
res_dtype = np.dtype(dtype).type
# intermediate dtype for summation
inter_dtype = np.float64 if integral else res_dtype
inter_self = self.astype(inter_dtype)
if axis is None:
return (inter_self / np.array(
self.shape[0] * self.shape[1]))\
.sum(dtype=res_dtype, out=out)
if axis < 0:
axis += 2
# axis = 0 or 1 now
if axis == 0:
return (inter_self * (1.0 / self.shape[0])).sum(
axis=0, dtype=res_dtype, out=out)
else:
return (inter_self * (1.0 / self.shape[1])).sum(
axis=1, dtype=res_dtype, out=out) | def mean(self, axis=None, dtype=None, out=None):
"""
Compute the arithmetic mean along the specified axis.
Returns the average of the matrix elements. The average is taken
over all elements in the matrix by default, otherwise over the
specified axis. `float64` intermediate and return values are used
for integer inputs.
Parameters
----------
axis : {-2, -1, 0, 1, None} optional
Axis along which the mean is computed. The default is to compute
the mean of all elements in the matrix (i.e. `axis` = `None`).
dtype : data-type, optional
Type to use in computing the mean. For integer inputs, the default
is `float64`; for floating point inputs, it is the same as the
input dtype.
.. versionadded: 0.18.0
out : np.matrix, optional
Alternative output matrix in which to place the result. It must
have the same shape as the expected output, but the type of the
output values will be cast if necessary.
.. versionadded: 0.18.0
Returns
-------
m : np.matrix
See Also
--------
np.matrix.mean : NumPy's implementation of 'mean' for matrices
"""
def _is_integral(dtype):
return (np.issubdtype(dtype, np.integer) or
np.issubdtype(dtype, np.bool_))
validateaxis(axis)
res_dtype = self.dtype.type
integral = _is_integral(self.dtype)
# output dtype
if dtype is None:
if integral:
res_dtype = np.float64
else:
res_dtype = np.dtype(dtype).type
# intermediate dtype for summation
inter_dtype = np.float64 if integral else res_dtype
inter_self = self.astype(inter_dtype)
if axis is None:
return (inter_self / np.array(
self.shape[0] * self.shape[1]))\
.sum(dtype=res_dtype, out=out)
if axis < 0:
axis += 2
# axis = 0 or 1 now
if axis == 0:
return (inter_self * (1.0 / self.shape[0])).sum(
axis=0, dtype=res_dtype, out=out)
else:
return (inter_self * (1.0 / self.shape[1])).sum(
axis=1, dtype=res_dtype, out=out) |
Python | def to_utf8(value):
"""
Converts value to string encoded into utf-8
:param value:
:return:
"""
if sys.version_info[0] < 3:
if not isinstance(value, basestring):
value = unicode(value)
if type(value) == str:
value = value.decode("utf-8", errors="ignore")
return value.encode('utf-8', 'ignore')
else:
return str(value) | def to_utf8(value):
"""
Converts value to string encoded into utf-8
:param value:
:return:
"""
if sys.version_info[0] < 3:
if not isinstance(value, basestring):
value = unicode(value)
if type(value) == str:
value = value.decode("utf-8", errors="ignore")
return value.encode('utf-8', 'ignore')
else:
return str(value) |
Python | def trim_string(s, max_bytes):
"""
Adjusts the length of the string s in order to fit it
into max_bytes bytes of storage after encoding as UTF-8.
Useful when cutting filesystem paths.
:param s: unicode string
:param max_bytes: number of bytes
:return the prefix of s
"""
if isinstance(s, six.text_type):
return _trim_unicode_string(s, max_bytes)
if isinstance(s, six.binary_type):
if len(s) <= max_bytes:
return s
s = s.decode('utf-8', errors='ignore')
s = _trim_unicode_string(s, max_bytes)
s = s.encode('utf-8', errors='ignore')
return s
raise TypeError('a string is expected') | def trim_string(s, max_bytes):
"""
Adjusts the length of the string s in order to fit it
into max_bytes bytes of storage after encoding as UTF-8.
Useful when cutting filesystem paths.
:param s: unicode string
:param max_bytes: number of bytes
:return the prefix of s
"""
if isinstance(s, six.text_type):
return _trim_unicode_string(s, max_bytes)
if isinstance(s, six.binary_type):
if len(s) <= max_bytes:
return s
s = s.decode('utf-8', errors='ignore')
s = _trim_unicode_string(s, max_bytes)
s = s.encode('utf-8', errors='ignore')
return s
raise TypeError('a string is expected') |
Python | def _common_prefix(string_list):
"""
Given a list of pathnames, returns the longest common leading component
"""
if not string_list:
return ""
min_str = min(string_list)
max_str = max(string_list)
for i, c in enumerate(min_str):
if c != max_str[i]:
return min_str[:i]
return min_str | def _common_prefix(string_list):
"""
Given a list of pathnames, returns the longest common leading component
"""
if not string_list:
return ""
min_str = min(string_list)
max_str = max(string_list)
for i, c in enumerate(min_str):
if c != max_str[i]:
return min_str[:i]
return min_str |
Python | def needs_g77_abi_wrapper(info):
"""Returns True if g77 ABI wrapper must be used."""
if uses_accelerate(info) or uses_veclib(info):
return True
elif uses_mkl(info):
return True
else:
return False | def needs_g77_abi_wrapper(info):
"""Returns True if g77 ABI wrapper must be used."""
if uses_accelerate(info) or uses_veclib(info):
return True
elif uses_mkl(info):
return True
else:
return False |
Python | def needs_sgemv_fix(info):
"""Returns True if SGEMV must be fixed."""
if uses_accelerate(info):
return True
else:
return False | def needs_sgemv_fix(info):
"""Returns True if SGEMV must be fixed."""
if uses_accelerate(info):
return True
else:
return False |
Python | def split_fortran_files(source_dir, subroutines=None):
"""Split each file in `source_dir` into separate files per subroutine.
Parameters
----------
source_dir : str
Full path to directory in which sources to be split are located.
subroutines : list of str, optional
Subroutines to split. (Default: all)
Returns
-------
fnames : list of str
List of file names (not including any path) that were created
in `source_dir`.
Notes
-----
This function is useful for code that can't be compiled with g77 because of
type casting errors which do work with gfortran.
Created files are named: ``original_name + '_subr_i' + '.f'``, with ``i``
starting at zero and ending at ``num_subroutines_in_file - 1``.
"""
if subroutines is not None:
subroutines = [x.lower() for x in subroutines]
def split_file(fname):
with open(fname, 'rb') as f:
lines = f.readlines()
subs = []
need_split_next = True
# find lines with SUBROUTINE statements
for ix, line in enumerate(lines):
m = re.match(b'^\\s+subroutine\\s+([a-z0-9_]+)\s*\\(', line, re.I)
if m and line[0] not in b'Cc!*':
if subroutines is not None:
subr_name = m.group(1).decode('ascii').lower()
subr_wanted = (subr_name in subroutines)
else:
subr_wanted = True
if subr_wanted or need_split_next:
need_split_next = subr_wanted
subs.append(ix)
# check if no split needed
if len(subs) <= 1:
return [fname]
# write out one file per subroutine
new_fnames = []
num_files = len(subs)
for nfile in range(num_files):
new_fname = fname[:-2] + '_subr_' + str(nfile) + '.f'
new_fnames.append(new_fname)
if not newer(fname, new_fname):
continue
with open(new_fname, 'wb') as fn:
if nfile + 1 == num_files:
fn.writelines(lines[subs[nfile]:])
else:
fn.writelines(lines[subs[nfile]:subs[nfile+1]])
return new_fnames
exclude_pattern = re.compile('_subr_[0-9]')
source_fnames = [f for f in glob.glob(os.path.join(source_dir, '*.f'))
if not exclude_pattern.search(os.path.basename(f))]
fnames = []
for source_fname in source_fnames:
created_files = split_file(source_fname)
if created_files is not None:
for cfile in created_files:
fnames.append(os.path.basename(cfile))
return fnames | def split_fortran_files(source_dir, subroutines=None):
"""Split each file in `source_dir` into separate files per subroutine.
Parameters
----------
source_dir : str
Full path to directory in which sources to be split are located.
subroutines : list of str, optional
Subroutines to split. (Default: all)
Returns
-------
fnames : list of str
List of file names (not including any path) that were created
in `source_dir`.
Notes
-----
This function is useful for code that can't be compiled with g77 because of
type casting errors which do work with gfortran.
Created files are named: ``original_name + '_subr_i' + '.f'``, with ``i``
starting at zero and ending at ``num_subroutines_in_file - 1``.
"""
if subroutines is not None:
subroutines = [x.lower() for x in subroutines]
def split_file(fname):
with open(fname, 'rb') as f:
lines = f.readlines()
subs = []
need_split_next = True
# find lines with SUBROUTINE statements
for ix, line in enumerate(lines):
m = re.match(b'^\\s+subroutine\\s+([a-z0-9_]+)\s*\\(', line, re.I)
if m and line[0] not in b'Cc!*':
if subroutines is not None:
subr_name = m.group(1).decode('ascii').lower()
subr_wanted = (subr_name in subroutines)
else:
subr_wanted = True
if subr_wanted or need_split_next:
need_split_next = subr_wanted
subs.append(ix)
# check if no split needed
if len(subs) <= 1:
return [fname]
# write out one file per subroutine
new_fnames = []
num_files = len(subs)
for nfile in range(num_files):
new_fname = fname[:-2] + '_subr_' + str(nfile) + '.f'
new_fnames.append(new_fname)
if not newer(fname, new_fname):
continue
with open(new_fname, 'wb') as fn:
if nfile + 1 == num_files:
fn.writelines(lines[subs[nfile]:])
else:
fn.writelines(lines[subs[nfile]:subs[nfile+1]])
return new_fnames
exclude_pattern = re.compile('_subr_[0-9]')
source_fnames = [f for f in glob.glob(os.path.join(source_dir, '*.f'))
if not exclude_pattern.search(os.path.basename(f))]
fnames = []
for source_fname in source_fnames:
created_files = split_file(source_fname)
if created_files is not None:
for cfile in created_files:
fnames.append(os.path.basename(cfile))
return fnames |
Python | def is_zipfile(filename):
"""Quickly see if a file is a ZIP file by checking the magic number.
The filename argument may be a file or file-like object too.
"""
result = False
try:
if hasattr(filename, "read"):
result = _check_zipfile(fp=filename)
else:
with open(filename, "rb") as fp:
result = _check_zipfile(fp)
except OSError:
pass
return result | def is_zipfile(filename):
"""Quickly see if a file is a ZIP file by checking the magic number.
The filename argument may be a file or file-like object too.
"""
result = False
try:
if hasattr(filename, "read"):
result = _check_zipfile(fp=filename)
else:
with open(filename, "rb") as fp:
result = _check_zipfile(fp)
except OSError:
pass
return result |
Python | def _EndRecData64(fpin, offset, endrec):
"""
Read the ZIP64 end-of-archive records and use that to update endrec
"""
try:
fpin.seek(offset - sizeEndCentDir64Locator, 2)
except OSError:
# If the seek fails, the file is not large enough to contain a ZIP64
# end-of-archive record, so just return the end record we were given.
return endrec
data = fpin.read(sizeEndCentDir64Locator)
if len(data) != sizeEndCentDir64Locator:
return endrec
sig, diskno, reloff, disks = struct.unpack(structEndArchive64Locator, data)
if sig != stringEndArchive64Locator:
return endrec
if diskno != 0 or disks > 1:
raise BadZipFile("zipfiles that span multiple disks are not supported")
# Assume no 'zip64 extensible data'
fpin.seek(offset - sizeEndCentDir64Locator - sizeEndCentDir64, 2)
data = fpin.read(sizeEndCentDir64)
if len(data) != sizeEndCentDir64:
return endrec
sig, sz, create_version, read_version, disk_num, disk_dir, \
dircount, dircount2, dirsize, diroffset = \
struct.unpack(structEndArchive64, data)
if sig != stringEndArchive64:
return endrec
# Update the original endrec using data from the ZIP64 record
endrec[_ECD_SIGNATURE] = sig
endrec[_ECD_DISK_NUMBER] = disk_num
endrec[_ECD_DISK_START] = disk_dir
endrec[_ECD_ENTRIES_THIS_DISK] = dircount
endrec[_ECD_ENTRIES_TOTAL] = dircount2
endrec[_ECD_SIZE] = dirsize
endrec[_ECD_OFFSET] = diroffset
return endrec | def _EndRecData64(fpin, offset, endrec):
"""
Read the ZIP64 end-of-archive records and use that to update endrec
"""
try:
fpin.seek(offset - sizeEndCentDir64Locator, 2)
except OSError:
# If the seek fails, the file is not large enough to contain a ZIP64
# end-of-archive record, so just return the end record we were given.
return endrec
data = fpin.read(sizeEndCentDir64Locator)
if len(data) != sizeEndCentDir64Locator:
return endrec
sig, diskno, reloff, disks = struct.unpack(structEndArchive64Locator, data)
if sig != stringEndArchive64Locator:
return endrec
if diskno != 0 or disks > 1:
raise BadZipFile("zipfiles that span multiple disks are not supported")
# Assume no 'zip64 extensible data'
fpin.seek(offset - sizeEndCentDir64Locator - sizeEndCentDir64, 2)
data = fpin.read(sizeEndCentDir64)
if len(data) != sizeEndCentDir64:
return endrec
sig, sz, create_version, read_version, disk_num, disk_dir, \
dircount, dircount2, dirsize, diroffset = \
struct.unpack(structEndArchive64, data)
if sig != stringEndArchive64:
return endrec
# Update the original endrec using data from the ZIP64 record
endrec[_ECD_SIGNATURE] = sig
endrec[_ECD_DISK_NUMBER] = disk_num
endrec[_ECD_DISK_START] = disk_dir
endrec[_ECD_ENTRIES_THIS_DISK] = dircount
endrec[_ECD_ENTRIES_TOTAL] = dircount2
endrec[_ECD_SIZE] = dirsize
endrec[_ECD_OFFSET] = diroffset
return endrec |
Python | def _EndRecData(fpin):
"""Return data from the "End of Central Directory" record, or None.
The data is a list of the nine items in the ZIP "End of central dir"
record followed by a tenth item, the file seek offset of this record."""
# Determine file size
fpin.seek(0, 2)
filesize = fpin.tell()
# Check to see if this is ZIP file with no archive comment (the
# "end of central directory" structure should be the last item in the
# file if this is the case).
try:
fpin.seek(-sizeEndCentDir, 2)
except OSError:
return None
data = fpin.read()
if (len(data) == sizeEndCentDir and
data[0:4] == stringEndArchive and
data[-2:] == b"\000\000"):
# the signature is correct and there's no comment, unpack structure
endrec = struct.unpack(structEndArchive, data)
endrec=list(endrec)
# Append a blank comment and record start offset
endrec.append(b"")
endrec.append(filesize - sizeEndCentDir)
# Try to read the "Zip64 end of central directory" structure
return _EndRecData64(fpin, -sizeEndCentDir, endrec)
# Either this is not a ZIP file, or it is a ZIP file with an archive
# comment. Search the end of the file for the "end of central directory"
# record signature. The comment is the last item in the ZIP file and may be
# up to 64K long. It is assumed that the "end of central directory" magic
# number does not appear in the comment.
maxCommentStart = max(filesize - (1 << 16) - sizeEndCentDir, 0)
fpin.seek(maxCommentStart, 0)
data = fpin.read()
start = data.rfind(stringEndArchive)
if start >= 0:
# found the magic number; attempt to unpack and interpret
recData = data[start:start+sizeEndCentDir]
if len(recData) != sizeEndCentDir:
# Zip file is corrupted.
return None
endrec = list(struct.unpack(structEndArchive, recData))
commentSize = endrec[_ECD_COMMENT_SIZE] #as claimed by the zip file
comment = data[start+sizeEndCentDir:start+sizeEndCentDir+commentSize]
endrec.append(comment)
endrec.append(maxCommentStart + start)
# Try to read the "Zip64 end of central directory" structure
return _EndRecData64(fpin, maxCommentStart + start - filesize,
endrec)
# Unable to find a valid end of central directory structure
return None | def _EndRecData(fpin):
"""Return data from the "End of Central Directory" record, or None.
The data is a list of the nine items in the ZIP "End of central dir"
record followed by a tenth item, the file seek offset of this record."""
# Determine file size
fpin.seek(0, 2)
filesize = fpin.tell()
# Check to see if this is ZIP file with no archive comment (the
# "end of central directory" structure should be the last item in the
# file if this is the case).
try:
fpin.seek(-sizeEndCentDir, 2)
except OSError:
return None
data = fpin.read()
if (len(data) == sizeEndCentDir and
data[0:4] == stringEndArchive and
data[-2:] == b"\000\000"):
# the signature is correct and there's no comment, unpack structure
endrec = struct.unpack(structEndArchive, data)
endrec=list(endrec)
# Append a blank comment and record start offset
endrec.append(b"")
endrec.append(filesize - sizeEndCentDir)
# Try to read the "Zip64 end of central directory" structure
return _EndRecData64(fpin, -sizeEndCentDir, endrec)
# Either this is not a ZIP file, or it is a ZIP file with an archive
# comment. Search the end of the file for the "end of central directory"
# record signature. The comment is the last item in the ZIP file and may be
# up to 64K long. It is assumed that the "end of central directory" magic
# number does not appear in the comment.
maxCommentStart = max(filesize - (1 << 16) - sizeEndCentDir, 0)
fpin.seek(maxCommentStart, 0)
data = fpin.read()
start = data.rfind(stringEndArchive)
if start >= 0:
# found the magic number; attempt to unpack and interpret
recData = data[start:start+sizeEndCentDir]
if len(recData) != sizeEndCentDir:
# Zip file is corrupted.
return None
endrec = list(struct.unpack(structEndArchive, recData))
commentSize = endrec[_ECD_COMMENT_SIZE] #as claimed by the zip file
comment = data[start+sizeEndCentDir:start+sizeEndCentDir+commentSize]
endrec.append(comment)
endrec.append(maxCommentStart + start)
# Try to read the "Zip64 end of central directory" structure
return _EndRecData64(fpin, maxCommentStart + start - filesize,
endrec)
# Unable to find a valid end of central directory structure
return None |
Python | def FileHeader(self, zip64=None):
"""Return the per-file header as a bytes object."""
dt = self.date_time
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
if self.flag_bits & 0x08:
# Set these to zero because we write them after the file data
CRC = compress_size = file_size = 0
else:
CRC = self.CRC
compress_size = self.compress_size
file_size = self.file_size
extra = self.extra
min_version = 0
if zip64 is None:
zip64 = file_size > ZIP64_LIMIT or compress_size > ZIP64_LIMIT
if zip64:
fmt = '<HHQQ'
extra = extra + struct.pack(fmt,
1, struct.calcsize(fmt)-4, file_size, compress_size)
if file_size > ZIP64_LIMIT or compress_size > ZIP64_LIMIT:
if not zip64:
raise LargeZipFile("Filesize would require ZIP64 extensions")
# File is larger than what fits into a 4 byte integer,
# fall back to the ZIP64 extension
file_size = 0xffffffff
compress_size = 0xffffffff
min_version = ZIP64_VERSION
if self.compress_type == ZIP_BZIP2:
min_version = max(BZIP2_VERSION, min_version)
elif self.compress_type == ZIP_LZMA:
min_version = max(LZMA_VERSION, min_version)
self.extract_version = max(min_version, self.extract_version)
self.create_version = max(min_version, self.create_version)
filename, flag_bits = self._encodeFilenameFlags()
header = struct.pack(structFileHeader, stringFileHeader,
self.extract_version, self.reserved, flag_bits,
self.compress_type, dostime, dosdate, CRC,
compress_size, file_size,
len(filename), len(extra))
return header + filename + extra | def FileHeader(self, zip64=None):
"""Return the per-file header as a bytes object."""
dt = self.date_time
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
if self.flag_bits & 0x08:
# Set these to zero because we write them after the file data
CRC = compress_size = file_size = 0
else:
CRC = self.CRC
compress_size = self.compress_size
file_size = self.file_size
extra = self.extra
min_version = 0
if zip64 is None:
zip64 = file_size > ZIP64_LIMIT or compress_size > ZIP64_LIMIT
if zip64:
fmt = '<HHQQ'
extra = extra + struct.pack(fmt,
1, struct.calcsize(fmt)-4, file_size, compress_size)
if file_size > ZIP64_LIMIT or compress_size > ZIP64_LIMIT:
if not zip64:
raise LargeZipFile("Filesize would require ZIP64 extensions")
# File is larger than what fits into a 4 byte integer,
# fall back to the ZIP64 extension
file_size = 0xffffffff
compress_size = 0xffffffff
min_version = ZIP64_VERSION
if self.compress_type == ZIP_BZIP2:
min_version = max(BZIP2_VERSION, min_version)
elif self.compress_type == ZIP_LZMA:
min_version = max(LZMA_VERSION, min_version)
self.extract_version = max(min_version, self.extract_version)
self.create_version = max(min_version, self.create_version)
filename, flag_bits = self._encodeFilenameFlags()
header = struct.pack(structFileHeader, stringFileHeader,
self.extract_version, self.reserved, flag_bits,
self.compress_type, dostime, dosdate, CRC,
compress_size, file_size,
len(filename), len(extra))
return header + filename + extra |
Python | def from_file(cls, filename, arcname=None):
"""Construct an appropriate ZipInfo for a file on the filesystem.
filename should be the path to a file or directory on the filesystem.
arcname is the name which it will have within the archive (by default,
this will be the same as filename, but without a drive letter and with
leading path separators removed).
"""
if isinstance(filename, os.PathLike):
filename = os.fspath(filename)
st = os.stat(filename)
isdir = stat.S_ISDIR(st.st_mode)
mtime = time.localtime(st.st_mtime)
date_time = mtime[0:6]
# Create ZipInfo instance to store file information
if arcname is None:
arcname = filename
arcname = os.path.normpath(os.path.splitdrive(arcname)[1])
while arcname[0] in (os.sep, os.altsep):
arcname = arcname[1:]
if isdir:
arcname += '/'
zinfo = cls(arcname, date_time)
zinfo.external_attr = (st.st_mode & 0xFFFF) << 16 # Unix attributes
if isdir:
zinfo.file_size = 0
zinfo.external_attr |= 0x10 # MS-DOS directory flag
else:
zinfo.file_size = st.st_size
return zinfo | def from_file(cls, filename, arcname=None):
"""Construct an appropriate ZipInfo for a file on the filesystem.
filename should be the path to a file or directory on the filesystem.
arcname is the name which it will have within the archive (by default,
this will be the same as filename, but without a drive letter and with
leading path separators removed).
"""
if isinstance(filename, os.PathLike):
filename = os.fspath(filename)
st = os.stat(filename)
isdir = stat.S_ISDIR(st.st_mode)
mtime = time.localtime(st.st_mtime)
date_time = mtime[0:6]
# Create ZipInfo instance to store file information
if arcname is None:
arcname = filename
arcname = os.path.normpath(os.path.splitdrive(arcname)[1])
while arcname[0] in (os.sep, os.altsep):
arcname = arcname[1:]
if isdir:
arcname += '/'
zinfo = cls(arcname, date_time)
zinfo.external_attr = (st.st_mode & 0xFFFF) << 16 # Unix attributes
if isdir:
zinfo.file_size = 0
zinfo.external_attr |= 0x10 # MS-DOS directory flag
else:
zinfo.file_size = st.st_size
return zinfo |
Python | def readline(self, limit=-1):
"""Read and return a line from the stream.
If limit is specified, at most limit bytes will be read.
"""
if limit < 0:
# Shortcut common case - newline found in buffer.
i = self._readbuffer.find(b'\n', self._offset) + 1
if i > 0:
line = self._readbuffer[self._offset: i]
self._offset = i
return line
return io.BufferedIOBase.readline(self, limit) | def readline(self, limit=-1):
"""Read and return a line from the stream.
If limit is specified, at most limit bytes will be read.
"""
if limit < 0:
# Shortcut common case - newline found in buffer.
i = self._readbuffer.find(b'\n', self._offset) + 1
if i > 0:
line = self._readbuffer[self._offset: i]
self._offset = i
return line
return io.BufferedIOBase.readline(self, limit) |
Python | def peek(self, n=1):
"""Returns buffered bytes without advancing the position."""
if n > len(self._readbuffer) - self._offset:
chunk = self.read(n)
if len(chunk) > self._offset:
self._readbuffer = chunk + self._readbuffer[self._offset:]
self._offset = 0
else:
self._offset -= len(chunk)
# Return up to 512 bytes to reduce allocation overhead for tight loops.
return self._readbuffer[self._offset: self._offset + 512] | def peek(self, n=1):
"""Returns buffered bytes without advancing the position."""
if n > len(self._readbuffer) - self._offset:
chunk = self.read(n)
if len(chunk) > self._offset:
self._readbuffer = chunk + self._readbuffer[self._offset:]
self._offset = 0
else:
self._offset -= len(chunk)
# Return up to 512 bytes to reduce allocation overhead for tight loops.
return self._readbuffer[self._offset: self._offset + 512] |
Python | def read(self, n=-1):
"""Read and return up to n bytes.
If the argument is omitted, None, or negative, data is read and returned until EOF is reached.
"""
if n is None or n < 0:
buf = self._readbuffer[self._offset:]
self._readbuffer = b''
self._offset = 0
while not self._eof:
buf += self._read1(self.MAX_N)
return buf
end = n + self._offset
if end < len(self._readbuffer):
buf = self._readbuffer[self._offset:end]
self._offset = end
return buf
n = end - len(self._readbuffer)
buf = self._readbuffer[self._offset:]
self._readbuffer = b''
self._offset = 0
while n > 0 and not self._eof:
data = self._read1(n)
if n < len(data):
self._readbuffer = data
self._offset = n
buf += data[:n]
break
buf += data
n -= len(data)
return buf | def read(self, n=-1):
"""Read and return up to n bytes.
If the argument is omitted, None, or negative, data is read and returned until EOF is reached.
"""
if n is None or n < 0:
buf = self._readbuffer[self._offset:]
self._readbuffer = b''
self._offset = 0
while not self._eof:
buf += self._read1(self.MAX_N)
return buf
end = n + self._offset
if end < len(self._readbuffer):
buf = self._readbuffer[self._offset:end]
self._offset = end
return buf
n = end - len(self._readbuffer)
buf = self._readbuffer[self._offset:]
self._readbuffer = b''
self._offset = 0
while n > 0 and not self._eof:
data = self._read1(n)
if n < len(data):
self._readbuffer = data
self._offset = n
buf += data[:n]
break
buf += data
n -= len(data)
return buf |
Python | def read1(self, n):
"""Read up to n bytes with at most one read() system call."""
if n is None or n < 0:
buf = self._readbuffer[self._offset:]
self._readbuffer = b''
self._offset = 0
while not self._eof:
data = self._read1(self.MAX_N)
if data:
buf += data
break
return buf
end = n + self._offset
if end < len(self._readbuffer):
buf = self._readbuffer[self._offset:end]
self._offset = end
return buf
n = end - len(self._readbuffer)
buf = self._readbuffer[self._offset:]
self._readbuffer = b''
self._offset = 0
if n > 0:
while not self._eof:
data = self._read1(n)
if n < len(data):
self._readbuffer = data
self._offset = n
buf += data[:n]
break
if data:
buf += data
break
return buf | def read1(self, n):
"""Read up to n bytes with at most one read() system call."""
if n is None or n < 0:
buf = self._readbuffer[self._offset:]
self._readbuffer = b''
self._offset = 0
while not self._eof:
data = self._read1(self.MAX_N)
if data:
buf += data
break
return buf
end = n + self._offset
if end < len(self._readbuffer):
buf = self._readbuffer[self._offset:end]
self._offset = end
return buf
n = end - len(self._readbuffer)
buf = self._readbuffer[self._offset:]
self._readbuffer = b''
self._offset = 0
if n > 0:
while not self._eof:
data = self._read1(n)
if n < len(data):
self._readbuffer = data
self._offset = n
buf += data[:n]
break
if data:
buf += data
break
return buf |
Python | def _RealGetContents(self):
"""Read in the table of contents for the ZIP file."""
fp = self.fp
try:
endrec = _EndRecData(fp)
except OSError:
raise BadZipFile("File is not a zip file")
if not endrec:
raise BadZipFile("File is not a zip file")
if self.debug > 1:
print(endrec)
size_cd = endrec[_ECD_SIZE] # bytes in central directory
offset_cd = endrec[_ECD_OFFSET] # offset of central directory
self._comment = endrec[_ECD_COMMENT] # archive comment
# "concat" is zero, unless zip was concatenated to another file
concat = endrec[_ECD_LOCATION] - size_cd - offset_cd
if endrec[_ECD_SIGNATURE] == stringEndArchive64:
# If Zip64 extension structures are present, account for them
concat -= (sizeEndCentDir64 + sizeEndCentDir64Locator)
if self.debug > 2:
inferred = concat + offset_cd
print("given, inferred, offset", offset_cd, inferred, concat)
# self.start_dir: Position of start of central directory
self.start_dir = offset_cd + concat
fp.seek(self.start_dir, 0)
data = fp.read(size_cd)
fp = io.BytesIO(data)
total = 0
while total < size_cd:
centdir = fp.read(sizeCentralDir)
if len(centdir) != sizeCentralDir:
raise BadZipFile("Truncated central directory")
centdir = struct.unpack(structCentralDir, centdir)
if centdir[_CD_SIGNATURE] != stringCentralDir:
raise BadZipFile("Bad magic number for central directory")
if self.debug > 2:
print(centdir)
filename = fp.read(centdir[_CD_FILENAME_LENGTH])
flags = centdir[5]
if flags & 0x800:
# UTF-8 file names extension
filename = filename.decode('utf-8')
else:
# Historical ZIP filename encoding
filename = filename.decode('cp437')
# Create ZipInfo instance to store file information
x = ZipInfo(filename)
x.extra = fp.read(centdir[_CD_EXTRA_FIELD_LENGTH])
x.comment = fp.read(centdir[_CD_COMMENT_LENGTH])
x.header_offset = centdir[_CD_LOCAL_HEADER_OFFSET]
(x.create_version, x.create_system, x.extract_version, x.reserved,
x.flag_bits, x.compress_type, t, d,
x.CRC, x.compress_size, x.file_size) = centdir[1:12]
if x.extract_version > MAX_EXTRACT_VERSION:
raise NotImplementedError("zip file version %.1f" %
(x.extract_version / 10))
x.volume, x.internal_attr, x.external_attr = centdir[15:18]
# Convert date/time code to (year, month, day, hour, min, sec)
x._raw_time = t
x.date_time = ( (d>>9)+1980, (d>>5)&0xF, d&0x1F,
t>>11, (t>>5)&0x3F, (t&0x1F) * 2 )
x._decodeExtra()
x.header_offset = x.header_offset + concat
self.filelist.append(x)
self.NameToInfo[x.filename] = x
# update total bytes read from central directory
total = (total + sizeCentralDir + centdir[_CD_FILENAME_LENGTH]
+ centdir[_CD_EXTRA_FIELD_LENGTH]
+ centdir[_CD_COMMENT_LENGTH])
if self.debug > 2:
print("total", total) | def _RealGetContents(self):
"""Read in the table of contents for the ZIP file."""
fp = self.fp
try:
endrec = _EndRecData(fp)
except OSError:
raise BadZipFile("File is not a zip file")
if not endrec:
raise BadZipFile("File is not a zip file")
if self.debug > 1:
print(endrec)
size_cd = endrec[_ECD_SIZE] # bytes in central directory
offset_cd = endrec[_ECD_OFFSET] # offset of central directory
self._comment = endrec[_ECD_COMMENT] # archive comment
# "concat" is zero, unless zip was concatenated to another file
concat = endrec[_ECD_LOCATION] - size_cd - offset_cd
if endrec[_ECD_SIGNATURE] == stringEndArchive64:
# If Zip64 extension structures are present, account for them
concat -= (sizeEndCentDir64 + sizeEndCentDir64Locator)
if self.debug > 2:
inferred = concat + offset_cd
print("given, inferred, offset", offset_cd, inferred, concat)
# self.start_dir: Position of start of central directory
self.start_dir = offset_cd + concat
fp.seek(self.start_dir, 0)
data = fp.read(size_cd)
fp = io.BytesIO(data)
total = 0
while total < size_cd:
centdir = fp.read(sizeCentralDir)
if len(centdir) != sizeCentralDir:
raise BadZipFile("Truncated central directory")
centdir = struct.unpack(structCentralDir, centdir)
if centdir[_CD_SIGNATURE] != stringCentralDir:
raise BadZipFile("Bad magic number for central directory")
if self.debug > 2:
print(centdir)
filename = fp.read(centdir[_CD_FILENAME_LENGTH])
flags = centdir[5]
if flags & 0x800:
# UTF-8 file names extension
filename = filename.decode('utf-8')
else:
# Historical ZIP filename encoding
filename = filename.decode('cp437')
# Create ZipInfo instance to store file information
x = ZipInfo(filename)
x.extra = fp.read(centdir[_CD_EXTRA_FIELD_LENGTH])
x.comment = fp.read(centdir[_CD_COMMENT_LENGTH])
x.header_offset = centdir[_CD_LOCAL_HEADER_OFFSET]
(x.create_version, x.create_system, x.extract_version, x.reserved,
x.flag_bits, x.compress_type, t, d,
x.CRC, x.compress_size, x.file_size) = centdir[1:12]
if x.extract_version > MAX_EXTRACT_VERSION:
raise NotImplementedError("zip file version %.1f" %
(x.extract_version / 10))
x.volume, x.internal_attr, x.external_attr = centdir[15:18]
# Convert date/time code to (year, month, day, hour, min, sec)
x._raw_time = t
x.date_time = ( (d>>9)+1980, (d>>5)&0xF, d&0x1F,
t>>11, (t>>5)&0x3F, (t&0x1F) * 2 )
x._decodeExtra()
x.header_offset = x.header_offset + concat
self.filelist.append(x)
self.NameToInfo[x.filename] = x
# update total bytes read from central directory
total = (total + sizeCentralDir + centdir[_CD_FILENAME_LENGTH]
+ centdir[_CD_EXTRA_FIELD_LENGTH]
+ centdir[_CD_COMMENT_LENGTH])
if self.debug > 2:
print("total", total) |
Python | def printdir(self, file=None):
"""Print a table of contents for the zip file."""
print("%-46s %19s %12s" % ("File Name", "Modified ", "Size"),
file=file)
for zinfo in self.filelist:
date = "%d-%02d-%02d %02d:%02d:%02d" % zinfo.date_time[:6]
print("%-46s %s %12d" % (zinfo.filename, date, zinfo.file_size),
file=file) | def printdir(self, file=None):
"""Print a table of contents for the zip file."""
print("%-46s %19s %12s" % ("File Name", "Modified ", "Size"),
file=file)
for zinfo in self.filelist:
date = "%d-%02d-%02d %02d:%02d:%02d" % zinfo.date_time[:6]
print("%-46s %s %12d" % (zinfo.filename, date, zinfo.file_size),
file=file) |
Python | def testzip(self):
"""Read all the files and check the CRC."""
chunk_size = 2 ** 20
for zinfo in self.filelist:
try:
# Read by chunks, to avoid an OverflowError or a
# MemoryError with very large embedded files.
with self.open(zinfo.filename, "r") as f:
while f.read(chunk_size): # Check CRC-32
pass
except BadZipFile:
return zinfo.filename | def testzip(self):
"""Read all the files and check the CRC."""
chunk_size = 2 ** 20
for zinfo in self.filelist:
try:
# Read by chunks, to avoid an OverflowError or a
# MemoryError with very large embedded files.
with self.open(zinfo.filename, "r") as f:
while f.read(chunk_size): # Check CRC-32
pass
except BadZipFile:
return zinfo.filename |
Python | def open(self, name, mode="r", pwd=None, *, force_zip64=False):
"""Return file-like object for 'name'.
name is a string for the file name within the ZIP file, or a ZipInfo
object.
mode should be 'r' to read a file already in the ZIP file, or 'w' to
write to a file newly added to the archive.
pwd is the password to decrypt files (only used for reading).
When writing, if the file size is not known in advance but may exceed
2 GiB, pass force_zip64 to use the ZIP64 format, which can handle large
files. If the size is known in advance, it is best to pass a ZipInfo
instance for name, with zinfo.file_size set.
"""
if mode not in {"r", "w"}:
raise ValueError('open() requires mode "r" or "w"')
if pwd and not isinstance(pwd, bytes):
raise TypeError("pwd: expected bytes, got %s" % type(pwd).__name__)
if pwd and (mode == "w"):
raise ValueError("pwd is only supported for reading files")
if not self.fp:
raise ValueError(
"Attempt to use ZIP archive that was already closed")
# Make sure we have an info object
if isinstance(name, ZipInfo):
# 'name' is already an info object
zinfo = name
elif mode == 'w':
zinfo = ZipInfo(name)
zinfo.compress_type = self.compression
zinfo._compresslevel = self.compresslevel
else:
# Get info object for name
zinfo = self.getinfo(name)
if mode == 'w':
return self._open_to_write(zinfo, force_zip64=force_zip64)
if self._writing:
raise ValueError("Can't read from the ZIP file while there "
"is an open writing handle on it. "
"Close the writing handle before trying to read.")
# Open for reading:
self._fileRefCnt += 1
zef_file = _SharedFile(self.fp, zinfo.header_offset,
self._fpclose, self._lock, lambda: self._writing)
try:
# Skip the file header:
fheader = zef_file.read(sizeFileHeader)
if len(fheader) != sizeFileHeader:
raise BadZipFile("Truncated file header")
fheader = struct.unpack(structFileHeader, fheader)
if fheader[_FH_SIGNATURE] != stringFileHeader:
raise BadZipFile("Bad magic number for file header")
fname = zef_file.read(fheader[_FH_FILENAME_LENGTH])
if fheader[_FH_EXTRA_FIELD_LENGTH]:
zef_file.read(fheader[_FH_EXTRA_FIELD_LENGTH])
if zinfo.flag_bits & 0x20:
# Zip 2.7: compressed patched data
raise NotImplementedError("compressed patched data (flag bit 5)")
if zinfo.flag_bits & 0x40:
# strong encryption
raise NotImplementedError("strong encryption (flag bit 6)")
if zinfo.flag_bits & 0x800:
# UTF-8 filename
fname_str = fname.decode("utf-8")
else:
fname_str = fname.decode("cp437")
if fname_str != zinfo.orig_filename:
raise BadZipFile(
'File name in directory %r and header %r differ.'
% (zinfo.orig_filename, fname))
# check for encrypted flag & handle password
is_encrypted = zinfo.flag_bits & 0x1
if is_encrypted:
if not pwd:
pwd = self.pwd
if not pwd:
raise RuntimeError("File %r is encrypted, password "
"required for extraction" % name)
else:
pwd = None
return ZipExtFile(zef_file, mode, zinfo, pwd, True)
except:
zef_file.close()
raise | def open(self, name, mode="r", pwd=None, *, force_zip64=False):
"""Return file-like object for 'name'.
name is a string for the file name within the ZIP file, or a ZipInfo
object.
mode should be 'r' to read a file already in the ZIP file, or 'w' to
write to a file newly added to the archive.
pwd is the password to decrypt files (only used for reading).
When writing, if the file size is not known in advance but may exceed
2 GiB, pass force_zip64 to use the ZIP64 format, which can handle large
files. If the size is known in advance, it is best to pass a ZipInfo
instance for name, with zinfo.file_size set.
"""
if mode not in {"r", "w"}:
raise ValueError('open() requires mode "r" or "w"')
if pwd and not isinstance(pwd, bytes):
raise TypeError("pwd: expected bytes, got %s" % type(pwd).__name__)
if pwd and (mode == "w"):
raise ValueError("pwd is only supported for reading files")
if not self.fp:
raise ValueError(
"Attempt to use ZIP archive that was already closed")
# Make sure we have an info object
if isinstance(name, ZipInfo):
# 'name' is already an info object
zinfo = name
elif mode == 'w':
zinfo = ZipInfo(name)
zinfo.compress_type = self.compression
zinfo._compresslevel = self.compresslevel
else:
# Get info object for name
zinfo = self.getinfo(name)
if mode == 'w':
return self._open_to_write(zinfo, force_zip64=force_zip64)
if self._writing:
raise ValueError("Can't read from the ZIP file while there "
"is an open writing handle on it. "
"Close the writing handle before trying to read.")
# Open for reading:
self._fileRefCnt += 1
zef_file = _SharedFile(self.fp, zinfo.header_offset,
self._fpclose, self._lock, lambda: self._writing)
try:
# Skip the file header:
fheader = zef_file.read(sizeFileHeader)
if len(fheader) != sizeFileHeader:
raise BadZipFile("Truncated file header")
fheader = struct.unpack(structFileHeader, fheader)
if fheader[_FH_SIGNATURE] != stringFileHeader:
raise BadZipFile("Bad magic number for file header")
fname = zef_file.read(fheader[_FH_FILENAME_LENGTH])
if fheader[_FH_EXTRA_FIELD_LENGTH]:
zef_file.read(fheader[_FH_EXTRA_FIELD_LENGTH])
if zinfo.flag_bits & 0x20:
# Zip 2.7: compressed patched data
raise NotImplementedError("compressed patched data (flag bit 5)")
if zinfo.flag_bits & 0x40:
# strong encryption
raise NotImplementedError("strong encryption (flag bit 6)")
if zinfo.flag_bits & 0x800:
# UTF-8 filename
fname_str = fname.decode("utf-8")
else:
fname_str = fname.decode("cp437")
if fname_str != zinfo.orig_filename:
raise BadZipFile(
'File name in directory %r and header %r differ.'
% (zinfo.orig_filename, fname))
# check for encrypted flag & handle password
is_encrypted = zinfo.flag_bits & 0x1
if is_encrypted:
if not pwd:
pwd = self.pwd
if not pwd:
raise RuntimeError("File %r is encrypted, password "
"required for extraction" % name)
else:
pwd = None
return ZipExtFile(zef_file, mode, zinfo, pwd, True)
except:
zef_file.close()
raise |
Python | def extract(self, member, path=None, pwd=None):
"""Extract a member from the archive to the current working directory,
using its full name. Its file information is extracted as accurately
as possible. `member' may be a filename or a ZipInfo object. You can
specify a different directory using `path'.
"""
if path is None:
path = os.getcwd()
else:
path = os.fspath(path)
return self._extract_member(member, path, pwd) | def extract(self, member, path=None, pwd=None):
"""Extract a member from the archive to the current working directory,
using its full name. Its file information is extracted as accurately
as possible. `member' may be a filename or a ZipInfo object. You can
specify a different directory using `path'.
"""
if path is None:
path = os.getcwd()
else:
path = os.fspath(path)
return self._extract_member(member, path, pwd) |
Python | def extractall(self, path=None, members=None, pwd=None):
"""Extract all members from the archive to the current working
directory. `path' specifies a different directory to extract to.
`members' is optional and must be a subset of the list returned
by namelist().
"""
if members is None:
members = self.namelist()
if path is None:
path = os.getcwd()
else:
path = os.fspath(path)
for zipinfo in members:
self._extract_member(zipinfo, path, pwd) | def extractall(self, path=None, members=None, pwd=None):
"""Extract all members from the archive to the current working
directory. `path' specifies a different directory to extract to.
`members' is optional and must be a subset of the list returned
by namelist().
"""
if members is None:
members = self.namelist()
if path is None:
path = os.getcwd()
else:
path = os.fspath(path)
for zipinfo in members:
self._extract_member(zipinfo, path, pwd) |
Python | def _sanitize_windows_name(cls, arcname, pathsep):
"""Replace bad characters and remove trailing dots from parts."""
table = cls._windows_illegal_name_trans_table
if not table:
illegal = ':<>|"?*'
table = str.maketrans(illegal, '_' * len(illegal))
cls._windows_illegal_name_trans_table = table
arcname = arcname.translate(table)
# remove trailing dots
arcname = (x.rstrip('.') for x in arcname.split(pathsep))
# rejoin, removing empty parts.
arcname = pathsep.join(x for x in arcname if x)
return arcname | def _sanitize_windows_name(cls, arcname, pathsep):
"""Replace bad characters and remove trailing dots from parts."""
table = cls._windows_illegal_name_trans_table
if not table:
illegal = ':<>|"?*'
table = str.maketrans(illegal, '_' * len(illegal))
cls._windows_illegal_name_trans_table = table
arcname = arcname.translate(table)
# remove trailing dots
arcname = (x.rstrip('.') for x in arcname.split(pathsep))
# rejoin, removing empty parts.
arcname = pathsep.join(x for x in arcname if x)
return arcname |
Python | def _extract_member(self, member, targetpath, pwd):
"""Extract the ZipInfo object 'member' to a physical
file on the path targetpath.
"""
if not isinstance(member, ZipInfo):
member = self.getinfo(member)
# build the destination pathname, replacing
# forward slashes to platform specific separators.
arcname = member.filename.replace('/', os.path.sep)
if os.path.altsep:
arcname = arcname.replace(os.path.altsep, os.path.sep)
# interpret absolute pathname as relative, remove drive letter or
# UNC path, redundant separators, "." and ".." components.
arcname = os.path.splitdrive(arcname)[1]
invalid_path_parts = ('', os.path.curdir, os.path.pardir)
arcname = os.path.sep.join(x for x in arcname.split(os.path.sep)
if x not in invalid_path_parts)
if os.path.sep == '\\':
# filter illegal characters on Windows
arcname = self._sanitize_windows_name(arcname, os.path.sep)
targetpath = os.path.join(targetpath, arcname)
targetpath = os.path.normpath(targetpath)
# Create all upper directories if necessary.
upperdirs = os.path.dirname(targetpath)
if upperdirs and not os.path.exists(upperdirs):
os.makedirs(upperdirs)
if member.is_dir():
if not os.path.isdir(targetpath):
os.mkdir(targetpath)
return targetpath
with self.open(member, pwd=pwd) as source, \
open(targetpath, "wb") as target:
shutil.copyfileobj(source, target)
return targetpath | def _extract_member(self, member, targetpath, pwd):
"""Extract the ZipInfo object 'member' to a physical
file on the path targetpath.
"""
if not isinstance(member, ZipInfo):
member = self.getinfo(member)
# build the destination pathname, replacing
# forward slashes to platform specific separators.
arcname = member.filename.replace('/', os.path.sep)
if os.path.altsep:
arcname = arcname.replace(os.path.altsep, os.path.sep)
# interpret absolute pathname as relative, remove drive letter or
# UNC path, redundant separators, "." and ".." components.
arcname = os.path.splitdrive(arcname)[1]
invalid_path_parts = ('', os.path.curdir, os.path.pardir)
arcname = os.path.sep.join(x for x in arcname.split(os.path.sep)
if x not in invalid_path_parts)
if os.path.sep == '\\':
# filter illegal characters on Windows
arcname = self._sanitize_windows_name(arcname, os.path.sep)
targetpath = os.path.join(targetpath, arcname)
targetpath = os.path.normpath(targetpath)
# Create all upper directories if necessary.
upperdirs = os.path.dirname(targetpath)
if upperdirs and not os.path.exists(upperdirs):
os.makedirs(upperdirs)
if member.is_dir():
if not os.path.isdir(targetpath):
os.mkdir(targetpath)
return targetpath
with self.open(member, pwd=pwd) as source, \
open(targetpath, "wb") as target:
shutil.copyfileobj(source, target)
return targetpath |
Python | def _writecheck(self, zinfo):
"""Check for errors before writing a file to the archive."""
if zinfo.filename in self.NameToInfo:
import warnings
warnings.warn('Duplicate name: %r' % zinfo.filename, stacklevel=3)
if self.mode not in ('w', 'x', 'a'):
raise ValueError("write() requires mode 'w', 'x', or 'a'")
if not self.fp:
raise ValueError(
"Attempt to write ZIP archive that was already closed")
_check_compression(zinfo.compress_type)
if not self._allowZip64:
requires_zip64 = None
if len(self.filelist) >= ZIP_FILECOUNT_LIMIT:
requires_zip64 = "Files count"
elif zinfo.file_size > ZIP64_LIMIT:
requires_zip64 = "Filesize"
elif zinfo.header_offset > ZIP64_LIMIT:
requires_zip64 = "Zipfile size"
if requires_zip64:
raise LargeZipFile(requires_zip64 +
" would require ZIP64 extensions") | def _writecheck(self, zinfo):
"""Check for errors before writing a file to the archive."""
if zinfo.filename in self.NameToInfo:
import warnings
warnings.warn('Duplicate name: %r' % zinfo.filename, stacklevel=3)
if self.mode not in ('w', 'x', 'a'):
raise ValueError("write() requires mode 'w', 'x', or 'a'")
if not self.fp:
raise ValueError(
"Attempt to write ZIP archive that was already closed")
_check_compression(zinfo.compress_type)
if not self._allowZip64:
requires_zip64 = None
if len(self.filelist) >= ZIP_FILECOUNT_LIMIT:
requires_zip64 = "Files count"
elif zinfo.file_size > ZIP64_LIMIT:
requires_zip64 = "Filesize"
elif zinfo.header_offset > ZIP64_LIMIT:
requires_zip64 = "Zipfile size"
if requires_zip64:
raise LargeZipFile(requires_zip64 +
" would require ZIP64 extensions") |
Python | def write(self, filename, arcname=None,
compress_type=None, compresslevel=None):
"""Put the bytes from filename into the archive under the name
arcname."""
if not self.fp:
raise ValueError(
"Attempt to write to ZIP archive that was already closed")
if self._writing:
raise ValueError(
"Can't write to ZIP archive while an open writing handle exists"
)
zinfo = ZipInfo.from_file(filename, arcname)
if zinfo.is_dir():
zinfo.compress_size = 0
zinfo.CRC = 0
else:
if compress_type is not None:
zinfo.compress_type = compress_type
else:
zinfo.compress_type = self.compression
if compresslevel is not None:
zinfo._compresslevel = compresslevel
else:
zinfo._compresslevel = self.compresslevel
if zinfo.is_dir():
with self._lock:
if self._seekable:
self.fp.seek(self.start_dir)
zinfo.header_offset = self.fp.tell() # Start of header bytes
if zinfo.compress_type == ZIP_LZMA:
# Compressed data includes an end-of-stream (EOS) marker
zinfo.flag_bits |= 0x02
self._writecheck(zinfo)
self._didModify = True
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
self.fp.write(zinfo.FileHeader(False))
self.start_dir = self.fp.tell()
else:
with open(filename, "rb") as src, self.open(zinfo, 'w') as dest:
shutil.copyfileobj(src, dest, 1024*8) | def write(self, filename, arcname=None,
compress_type=None, compresslevel=None):
"""Put the bytes from filename into the archive under the name
arcname."""
if not self.fp:
raise ValueError(
"Attempt to write to ZIP archive that was already closed")
if self._writing:
raise ValueError(
"Can't write to ZIP archive while an open writing handle exists"
)
zinfo = ZipInfo.from_file(filename, arcname)
if zinfo.is_dir():
zinfo.compress_size = 0
zinfo.CRC = 0
else:
if compress_type is not None:
zinfo.compress_type = compress_type
else:
zinfo.compress_type = self.compression
if compresslevel is not None:
zinfo._compresslevel = compresslevel
else:
zinfo._compresslevel = self.compresslevel
if zinfo.is_dir():
with self._lock:
if self._seekable:
self.fp.seek(self.start_dir)
zinfo.header_offset = self.fp.tell() # Start of header bytes
if zinfo.compress_type == ZIP_LZMA:
# Compressed data includes an end-of-stream (EOS) marker
zinfo.flag_bits |= 0x02
self._writecheck(zinfo)
self._didModify = True
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
self.fp.write(zinfo.FileHeader(False))
self.start_dir = self.fp.tell()
else:
with open(filename, "rb") as src, self.open(zinfo, 'w') as dest:
shutil.copyfileobj(src, dest, 1024*8) |
Python | def writestr(self, zinfo_or_arcname, data,
compress_type=None, compresslevel=None):
"""Write a file into the archive. The contents is 'data', which
may be either a 'str' or a 'bytes' instance; if it is a 'str',
it is encoded as UTF-8 first.
'zinfo_or_arcname' is either a ZipInfo instance or
the name of the file in the archive."""
if isinstance(data, str):
data = data.encode("utf-8")
if not isinstance(zinfo_or_arcname, ZipInfo):
zinfo = ZipInfo(filename=zinfo_or_arcname,
date_time=time.localtime(time.time())[:6])
zinfo.compress_type = self.compression
zinfo._compresslevel = self.compresslevel
if zinfo.filename[-1] == '/':
zinfo.external_attr = 0o40775 << 16 # drwxrwxr-x
zinfo.external_attr |= 0x10 # MS-DOS directory flag
else:
zinfo.external_attr = 0o600 << 16 # ?rw-------
else:
zinfo = zinfo_or_arcname
if not self.fp:
raise ValueError(
"Attempt to write to ZIP archive that was already closed")
if self._writing:
raise ValueError(
"Can't write to ZIP archive while an open writing handle exists."
)
if compress_type is not None:
zinfo.compress_type = compress_type
if compresslevel is not None:
zinfo._compresslevel = compresslevel
zinfo.file_size = len(data) # Uncompressed size
with self._lock:
with self.open(zinfo, mode='w') as dest:
dest.write(data) | def writestr(self, zinfo_or_arcname, data,
compress_type=None, compresslevel=None):
"""Write a file into the archive. The contents is 'data', which
may be either a 'str' or a 'bytes' instance; if it is a 'str',
it is encoded as UTF-8 first.
'zinfo_or_arcname' is either a ZipInfo instance or
the name of the file in the archive."""
if isinstance(data, str):
data = data.encode("utf-8")
if not isinstance(zinfo_or_arcname, ZipInfo):
zinfo = ZipInfo(filename=zinfo_or_arcname,
date_time=time.localtime(time.time())[:6])
zinfo.compress_type = self.compression
zinfo._compresslevel = self.compresslevel
if zinfo.filename[-1] == '/':
zinfo.external_attr = 0o40775 << 16 # drwxrwxr-x
zinfo.external_attr |= 0x10 # MS-DOS directory flag
else:
zinfo.external_attr = 0o600 << 16 # ?rw-------
else:
zinfo = zinfo_or_arcname
if not self.fp:
raise ValueError(
"Attempt to write to ZIP archive that was already closed")
if self._writing:
raise ValueError(
"Can't write to ZIP archive while an open writing handle exists."
)
if compress_type is not None:
zinfo.compress_type = compress_type
if compresslevel is not None:
zinfo._compresslevel = compresslevel
zinfo.file_size = len(data) # Uncompressed size
with self._lock:
with self.open(zinfo, mode='w') as dest:
dest.write(data) |
Python | def writepy(self, pathname, basename="", filterfunc=None):
"""Add all files from "pathname" to the ZIP archive.
If pathname is a package directory, search the directory and
all package subdirectories recursively for all *.py and enter
the modules into the archive. If pathname is a plain
directory, listdir *.py and enter all modules. Else, pathname
must be a Python *.py file and the module will be put into the
archive. Added modules are always module.pyc.
This method will compile the module.py into module.pyc if
necessary.
If filterfunc(pathname) is given, it is called with every argument.
When it is False, the file or directory is skipped.
"""
pathname = os.fspath(pathname)
if filterfunc and not filterfunc(pathname):
if self.debug:
label = 'path' if os.path.isdir(pathname) else 'file'
print('%s %r skipped by filterfunc' % (label, pathname))
return
dir, name = os.path.split(pathname)
if os.path.isdir(pathname):
initname = os.path.join(pathname, "__init__.py")
if os.path.isfile(initname):
# This is a package directory, add it
if basename:
basename = "%s/%s" % (basename, name)
else:
basename = name
if self.debug:
print("Adding package in", pathname, "as", basename)
fname, arcname = self._get_codename(initname[0:-3], basename)
if self.debug:
print("Adding", arcname)
self.write(fname, arcname)
dirlist = sorted(os.listdir(pathname))
dirlist.remove("__init__.py")
# Add all *.py files and package subdirectories
for filename in dirlist:
path = os.path.join(pathname, filename)
root, ext = os.path.splitext(filename)
if os.path.isdir(path):
if os.path.isfile(os.path.join(path, "__init__.py")):
# This is a package directory, add it
self.writepy(path, basename,
filterfunc=filterfunc) # Recursive call
elif ext == ".py":
if filterfunc and not filterfunc(path):
if self.debug:
print('file %r skipped by filterfunc' % path)
continue
fname, arcname = self._get_codename(path[0:-3],
basename)
if self.debug:
print("Adding", arcname)
self.write(fname, arcname)
else:
# This is NOT a package directory, add its files at top level
if self.debug:
print("Adding files from directory", pathname)
for filename in sorted(os.listdir(pathname)):
path = os.path.join(pathname, filename)
root, ext = os.path.splitext(filename)
if ext == ".py":
if filterfunc and not filterfunc(path):
if self.debug:
print('file %r skipped by filterfunc' % path)
continue
fname, arcname = self._get_codename(path[0:-3],
basename)
if self.debug:
print("Adding", arcname)
self.write(fname, arcname)
else:
if pathname[-3:] != ".py":
raise RuntimeError(
'Files added with writepy() must end with ".py"')
fname, arcname = self._get_codename(pathname[0:-3], basename)
if self.debug:
print("Adding file", arcname)
self.write(fname, arcname) | def writepy(self, pathname, basename="", filterfunc=None):
"""Add all files from "pathname" to the ZIP archive.
If pathname is a package directory, search the directory and
all package subdirectories recursively for all *.py and enter
the modules into the archive. If pathname is a plain
directory, listdir *.py and enter all modules. Else, pathname
must be a Python *.py file and the module will be put into the
archive. Added modules are always module.pyc.
This method will compile the module.py into module.pyc if
necessary.
If filterfunc(pathname) is given, it is called with every argument.
When it is False, the file or directory is skipped.
"""
pathname = os.fspath(pathname)
if filterfunc and not filterfunc(pathname):
if self.debug:
label = 'path' if os.path.isdir(pathname) else 'file'
print('%s %r skipped by filterfunc' % (label, pathname))
return
dir, name = os.path.split(pathname)
if os.path.isdir(pathname):
initname = os.path.join(pathname, "__init__.py")
if os.path.isfile(initname):
# This is a package directory, add it
if basename:
basename = "%s/%s" % (basename, name)
else:
basename = name
if self.debug:
print("Adding package in", pathname, "as", basename)
fname, arcname = self._get_codename(initname[0:-3], basename)
if self.debug:
print("Adding", arcname)
self.write(fname, arcname)
dirlist = sorted(os.listdir(pathname))
dirlist.remove("__init__.py")
# Add all *.py files and package subdirectories
for filename in dirlist:
path = os.path.join(pathname, filename)
root, ext = os.path.splitext(filename)
if os.path.isdir(path):
if os.path.isfile(os.path.join(path, "__init__.py")):
# This is a package directory, add it
self.writepy(path, basename,
filterfunc=filterfunc) # Recursive call
elif ext == ".py":
if filterfunc and not filterfunc(path):
if self.debug:
print('file %r skipped by filterfunc' % path)
continue
fname, arcname = self._get_codename(path[0:-3],
basename)
if self.debug:
print("Adding", arcname)
self.write(fname, arcname)
else:
# This is NOT a package directory, add its files at top level
if self.debug:
print("Adding files from directory", pathname)
for filename in sorted(os.listdir(pathname)):
path = os.path.join(pathname, filename)
root, ext = os.path.splitext(filename)
if ext == ".py":
if filterfunc and not filterfunc(path):
if self.debug:
print('file %r skipped by filterfunc' % path)
continue
fname, arcname = self._get_codename(path[0:-3],
basename)
if self.debug:
print("Adding", arcname)
self.write(fname, arcname)
else:
if pathname[-3:] != ".py":
raise RuntimeError(
'Files added with writepy() must end with ".py"')
fname, arcname = self._get_codename(pathname[0:-3], basename)
if self.debug:
print("Adding file", arcname)
self.write(fname, arcname) |
Python | def _get_codename(self, pathname, basename):
"""Return (filename, archivename) for the path.
Given a module name path, return the correct file path and
archive name, compiling if necessary. For example, given
/python/lib/string, return (/python/lib/string.pyc, string).
"""
def _compile(file, optimize=-1):
import py_compile
if self.debug:
print("Compiling", file)
try:
py_compile.compile(file, doraise=True, optimize=optimize)
except py_compile.PyCompileError as err:
print(err.msg)
return False
return True
file_py = pathname + ".py"
file_pyc = pathname + ".pyc"
pycache_opt0 = importlib.util.cache_from_source(file_py, optimization='')
pycache_opt1 = importlib.util.cache_from_source(file_py, optimization=1)
pycache_opt2 = importlib.util.cache_from_source(file_py, optimization=2)
if self._optimize == -1:
# legacy mode: use whatever file is present
if (os.path.isfile(file_pyc) and
os.stat(file_pyc).st_mtime >= os.stat(file_py).st_mtime):
# Use .pyc file.
arcname = fname = file_pyc
elif (os.path.isfile(pycache_opt0) and
os.stat(pycache_opt0).st_mtime >= os.stat(file_py).st_mtime):
# Use the __pycache__/*.pyc file, but write it to the legacy pyc
# file name in the archive.
fname = pycache_opt0
arcname = file_pyc
elif (os.path.isfile(pycache_opt1) and
os.stat(pycache_opt1).st_mtime >= os.stat(file_py).st_mtime):
# Use the __pycache__/*.pyc file, but write it to the legacy pyc
# file name in the archive.
fname = pycache_opt1
arcname = file_pyc
elif (os.path.isfile(pycache_opt2) and
os.stat(pycache_opt2).st_mtime >= os.stat(file_py).st_mtime):
# Use the __pycache__/*.pyc file, but write it to the legacy pyc
# file name in the archive.
fname = pycache_opt2
arcname = file_pyc
else:
# Compile py into PEP 3147 pyc file.
if _compile(file_py):
if sys.flags.optimize == 0:
fname = pycache_opt0
elif sys.flags.optimize == 1:
fname = pycache_opt1
else:
fname = pycache_opt2
arcname = file_pyc
else:
fname = arcname = file_py
else:
# new mode: use given optimization level
if self._optimize == 0:
fname = pycache_opt0
arcname = file_pyc
else:
arcname = file_pyc
if self._optimize == 1:
fname = pycache_opt1
elif self._optimize == 2:
fname = pycache_opt2
else:
msg = "invalid value for 'optimize': {!r}".format(self._optimize)
raise ValueError(msg)
if not (os.path.isfile(fname) and
os.stat(fname).st_mtime >= os.stat(file_py).st_mtime):
if not _compile(file_py, optimize=self._optimize):
fname = arcname = file_py
archivename = os.path.split(arcname)[1]
if basename:
archivename = "%s/%s" % (basename, archivename)
return (fname, archivename) | def _get_codename(self, pathname, basename):
"""Return (filename, archivename) for the path.
Given a module name path, return the correct file path and
archive name, compiling if necessary. For example, given
/python/lib/string, return (/python/lib/string.pyc, string).
"""
def _compile(file, optimize=-1):
import py_compile
if self.debug:
print("Compiling", file)
try:
py_compile.compile(file, doraise=True, optimize=optimize)
except py_compile.PyCompileError as err:
print(err.msg)
return False
return True
file_py = pathname + ".py"
file_pyc = pathname + ".pyc"
pycache_opt0 = importlib.util.cache_from_source(file_py, optimization='')
pycache_opt1 = importlib.util.cache_from_source(file_py, optimization=1)
pycache_opt2 = importlib.util.cache_from_source(file_py, optimization=2)
if self._optimize == -1:
# legacy mode: use whatever file is present
if (os.path.isfile(file_pyc) and
os.stat(file_pyc).st_mtime >= os.stat(file_py).st_mtime):
# Use .pyc file.
arcname = fname = file_pyc
elif (os.path.isfile(pycache_opt0) and
os.stat(pycache_opt0).st_mtime >= os.stat(file_py).st_mtime):
# Use the __pycache__/*.pyc file, but write it to the legacy pyc
# file name in the archive.
fname = pycache_opt0
arcname = file_pyc
elif (os.path.isfile(pycache_opt1) and
os.stat(pycache_opt1).st_mtime >= os.stat(file_py).st_mtime):
# Use the __pycache__/*.pyc file, but write it to the legacy pyc
# file name in the archive.
fname = pycache_opt1
arcname = file_pyc
elif (os.path.isfile(pycache_opt2) and
os.stat(pycache_opt2).st_mtime >= os.stat(file_py).st_mtime):
# Use the __pycache__/*.pyc file, but write it to the legacy pyc
# file name in the archive.
fname = pycache_opt2
arcname = file_pyc
else:
# Compile py into PEP 3147 pyc file.
if _compile(file_py):
if sys.flags.optimize == 0:
fname = pycache_opt0
elif sys.flags.optimize == 1:
fname = pycache_opt1
else:
fname = pycache_opt2
arcname = file_pyc
else:
fname = arcname = file_py
else:
# new mode: use given optimization level
if self._optimize == 0:
fname = pycache_opt0
arcname = file_pyc
else:
arcname = file_pyc
if self._optimize == 1:
fname = pycache_opt1
elif self._optimize == 2:
fname = pycache_opt2
else:
msg = "invalid value for 'optimize': {!r}".format(self._optimize)
raise ValueError(msg)
if not (os.path.isfile(fname) and
os.stat(fname).st_mtime >= os.stat(file_py).st_mtime):
if not _compile(file_py, optimize=self._optimize):
fname = arcname = file_py
archivename = os.path.split(arcname)[1]
if basename:
archivename = "%s/%s" % (basename, archivename)
return (fname, archivename) |
Python | def simplegeneric(func):
"""Make a trivial single-dispatch generic function"""
registry = {}
def wrapper(*args, **kw):
ob = args[0]
try:
cls = ob.__class__
except AttributeError:
cls = type(ob)
try:
mro = cls.__mro__
except AttributeError:
try:
class cls(cls, object):
pass
mro = cls.__mro__[1:]
except TypeError:
mro = object, # must be an ExtensionClass or some such :(
for t in mro:
if t in registry:
return registry[t](*args, **kw)
else:
return func(*args, **kw)
try:
wrapper.__name__ = func.__name__
except (TypeError, AttributeError):
pass # Python 2.3 doesn't allow functions to be renamed
def register(typ, func=None):
if func is None:
return lambda f: register(typ, f)
registry[typ] = func
return func
wrapper.__dict__ = func.__dict__
wrapper.__doc__ = func.__doc__
wrapper.register = register
return wrapper | def simplegeneric(func):
"""Make a trivial single-dispatch generic function"""
registry = {}
def wrapper(*args, **kw):
ob = args[0]
try:
cls = ob.__class__
except AttributeError:
cls = type(ob)
try:
mro = cls.__mro__
except AttributeError:
try:
class cls(cls, object):
pass
mro = cls.__mro__[1:]
except TypeError:
mro = object, # must be an ExtensionClass or some such :(
for t in mro:
if t in registry:
return registry[t](*args, **kw)
else:
return func(*args, **kw)
try:
wrapper.__name__ = func.__name__
except (TypeError, AttributeError):
pass # Python 2.3 doesn't allow functions to be renamed
def register(typ, func=None):
if func is None:
return lambda f: register(typ, f)
registry[typ] = func
return func
wrapper.__dict__ = func.__dict__
wrapper.__doc__ = func.__doc__
wrapper.register = register
return wrapper |
Python | def walk_packages(path=None, prefix='', onerror=None):
"""Yields (module_loader, name, ispkg) for all modules recursively
on path, or, if path is None, all accessible modules.
'path' should be either None or a list of paths to look for
modules in.
'prefix' is a string to output on the front of every module name
on output.
Note that this function must import all *packages* (NOT all
modules!) on the given path, in order to access the __path__
attribute to find submodules.
'onerror' is a function which gets called with one argument (the
name of the package which was being imported) if any exception
occurs while trying to import a package. If no onerror function is
supplied, ImportErrors are caught and ignored, while all other
exceptions are propagated, terminating the search.
Examples:
# list all modules python can access
walk_packages()
# list all submodules of ctypes
walk_packages(ctypes.__path__, ctypes.__name__+'.')
"""
def seen(p, m={}):
if p in m:
return True
m[p] = True
for importer, name, ispkg in iter_modules(path, prefix):
yield importer, name, ispkg
if ispkg:
try:
__import__(name)
except ImportError:
if onerror is not None:
onerror(name)
except Exception:
if onerror is not None:
onerror(name)
else:
raise
else:
path = getattr(sys.modules[name], '__path__', None) or []
# don't traverse path items we've seen before
path = [p for p in path if not seen(p)]
for item in walk_packages(path, name+'.', onerror):
yield item | def walk_packages(path=None, prefix='', onerror=None):
"""Yields (module_loader, name, ispkg) for all modules recursively
on path, or, if path is None, all accessible modules.
'path' should be either None or a list of paths to look for
modules in.
'prefix' is a string to output on the front of every module name
on output.
Note that this function must import all *packages* (NOT all
modules!) on the given path, in order to access the __path__
attribute to find submodules.
'onerror' is a function which gets called with one argument (the
name of the package which was being imported) if any exception
occurs while trying to import a package. If no onerror function is
supplied, ImportErrors are caught and ignored, while all other
exceptions are propagated, terminating the search.
Examples:
# list all modules python can access
walk_packages()
# list all submodules of ctypes
walk_packages(ctypes.__path__, ctypes.__name__+'.')
"""
def seen(p, m={}):
if p in m:
return True
m[p] = True
for importer, name, ispkg in iter_modules(path, prefix):
yield importer, name, ispkg
if ispkg:
try:
__import__(name)
except ImportError:
if onerror is not None:
onerror(name)
except Exception:
if onerror is not None:
onerror(name)
else:
raise
else:
path = getattr(sys.modules[name], '__path__', None) or []
# don't traverse path items we've seen before
path = [p for p in path if not seen(p)]
for item in walk_packages(path, name+'.', onerror):
yield item |
Python | def iter_importers(fullname=""):
"""Yield PEP 302 importers for the given module name
If fullname contains a '.', the importers will be for the package
containing fullname, otherwise they will be importers for sys.meta_path,
sys.path, and Python's "classic" import machinery, in that order. If
the named module is in a package, that package is imported as a side
effect of invoking this function.
Non PEP 302 mechanisms (e.g. the Windows registry) used by the
standard import machinery to find files in alternative locations
are partially supported, but are searched AFTER sys.path. Normally,
these locations are searched BEFORE sys.path, preventing sys.path
entries from shadowing them.
For this to cause a visible difference in behaviour, there must
be a module or package name that is accessible via both sys.path
and one of the non PEP 302 file system mechanisms. In this case,
the emulation will find the former version, while the builtin
import mechanism will find the latter.
Items of the following types can be affected by this discrepancy:
imp.C_EXTENSION, imp.PY_SOURCE, imp.PY_COMPILED, imp.PKG_DIRECTORY
"""
if fullname.startswith('.'):
raise ImportError("Relative module names not supported")
if '.' in fullname:
# Get the containing package's __path__
pkg = '.'.join(fullname.split('.')[:-1])
if pkg not in sys.modules:
__import__(pkg)
path = getattr(sys.modules[pkg], '__path__', None) or []
else:
for importer in sys.meta_path:
yield importer
path = sys.path
for item in path:
if item is None:
continue
yield get_importer(item)
if '.' not in fullname:
yield ImpImporter() | def iter_importers(fullname=""):
"""Yield PEP 302 importers for the given module name
If fullname contains a '.', the importers will be for the package
containing fullname, otherwise they will be importers for sys.meta_path,
sys.path, and Python's "classic" import machinery, in that order. If
the named module is in a package, that package is imported as a side
effect of invoking this function.
Non PEP 302 mechanisms (e.g. the Windows registry) used by the
standard import machinery to find files in alternative locations
are partially supported, but are searched AFTER sys.path. Normally,
these locations are searched BEFORE sys.path, preventing sys.path
entries from shadowing them.
For this to cause a visible difference in behaviour, there must
be a module or package name that is accessible via both sys.path
and one of the non PEP 302 file system mechanisms. In this case,
the emulation will find the former version, while the builtin
import mechanism will find the latter.
Items of the following types can be affected by this discrepancy:
imp.C_EXTENSION, imp.PY_SOURCE, imp.PY_COMPILED, imp.PKG_DIRECTORY
"""
if fullname.startswith('.'):
raise ImportError("Relative module names not supported")
if '.' in fullname:
# Get the containing package's __path__
pkg = '.'.join(fullname.split('.')[:-1])
if pkg not in sys.modules:
__import__(pkg)
path = getattr(sys.modules[pkg], '__path__', None) or []
else:
for importer in sys.meta_path:
yield importer
path = sys.path
for item in path:
if item is None:
continue
yield get_importer(item)
if '.' not in fullname:
yield ImpImporter() |
Python | def find_loader(fullname):
"""Find a PEP 302 "loader" object for fullname
If fullname contains dots, path must be the containing package's __path__.
Returns None if the module cannot be found or imported. This function uses
iter_importers(), and is thus subject to the same limitations regarding
platform-specific special import locations such as the Windows registry.
"""
for importer in iter_importers(fullname):
loader = importer.find_module(fullname)
if loader is not None:
return loader
return None | def find_loader(fullname):
"""Find a PEP 302 "loader" object for fullname
If fullname contains dots, path must be the containing package's __path__.
Returns None if the module cannot be found or imported. This function uses
iter_importers(), and is thus subject to the same limitations regarding
platform-specific special import locations such as the Windows registry.
"""
for importer in iter_importers(fullname):
loader = importer.find_module(fullname)
if loader is not None:
return loader
return None |
Python | def extend_path(path, name):
"""Extend a package's path.
Intended use is to place the following code in a package's __init__.py:
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
This will add to the package's __path__ all subdirectories of
directories on sys.path named after the package. This is useful
if one wants to distribute different parts of a single logical
package as multiple directories.
It also looks for *.pkg files beginning where * matches the name
argument. This feature is similar to *.pth files (see site.py),
except that it doesn't special-case lines starting with 'import'.
A *.pkg file is trusted at face value: apart from checking for
duplicates, all entries found in a *.pkg file are added to the
path, regardless of whether they are exist the filesystem. (This
is a feature.)
If the input path is not a list (as is the case for frozen
packages) it is returned unchanged. The input path is not
modified; an extended copy is returned. Items are only appended
to the copy at the end.
It is assumed that sys.path is a sequence. Items of sys.path that
are not (unicode or 8-bit) strings referring to existing
directories are ignored. Unicode items of sys.path that cause
errors when used as filenames may cause this function to raise an
exception (in line with os.path.isdir() behavior).
"""
if not isinstance(path, list):
# This could happen e.g. when this is called from inside a
# frozen package. Return the path unchanged in that case.
return path
pname = os.path.join(*name.split('.')) # Reconstitute as relative path
# Just in case os.extsep != '.'
sname = os.extsep.join(name.split('.'))
sname_pkg = sname + os.extsep + "pkg"
init_py = "__init__" + os.extsep + "py"
path = path[:] # Start with a copy of the existing path
for dir in sys.path:
if not isinstance(dir, basestring) or not os.path.isdir(dir):
continue
subdir = os.path.join(dir, pname)
# XXX This may still add duplicate entries to path on
# case-insensitive filesystems
initfile = os.path.join(subdir, init_py)
if subdir not in path and os.path.isfile(initfile):
path.append(subdir)
# XXX Is this the right thing for subpackages like zope.app?
# It looks for a file named "zope.app.pkg"
pkgfile = os.path.join(dir, sname_pkg)
if os.path.isfile(pkgfile):
try:
f = open(pkgfile)
except IOError, msg:
sys.stderr.write("Can't open %s: %s\n" %
(pkgfile, msg))
else:
for line in f:
line = line.rstrip('\n')
if not line or line.startswith('#'):
continue
path.append(line) # Don't check for existence!
f.close()
return path | def extend_path(path, name):
"""Extend a package's path.
Intended use is to place the following code in a package's __init__.py:
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
This will add to the package's __path__ all subdirectories of
directories on sys.path named after the package. This is useful
if one wants to distribute different parts of a single logical
package as multiple directories.
It also looks for *.pkg files beginning where * matches the name
argument. This feature is similar to *.pth files (see site.py),
except that it doesn't special-case lines starting with 'import'.
A *.pkg file is trusted at face value: apart from checking for
duplicates, all entries found in a *.pkg file are added to the
path, regardless of whether they are exist the filesystem. (This
is a feature.)
If the input path is not a list (as is the case for frozen
packages) it is returned unchanged. The input path is not
modified; an extended copy is returned. Items are only appended
to the copy at the end.
It is assumed that sys.path is a sequence. Items of sys.path that
are not (unicode or 8-bit) strings referring to existing
directories are ignored. Unicode items of sys.path that cause
errors when used as filenames may cause this function to raise an
exception (in line with os.path.isdir() behavior).
"""
if not isinstance(path, list):
# This could happen e.g. when this is called from inside a
# frozen package. Return the path unchanged in that case.
return path
pname = os.path.join(*name.split('.')) # Reconstitute as relative path
# Just in case os.extsep != '.'
sname = os.extsep.join(name.split('.'))
sname_pkg = sname + os.extsep + "pkg"
init_py = "__init__" + os.extsep + "py"
path = path[:] # Start with a copy of the existing path
for dir in sys.path:
if not isinstance(dir, basestring) or not os.path.isdir(dir):
continue
subdir = os.path.join(dir, pname)
# XXX This may still add duplicate entries to path on
# case-insensitive filesystems
initfile = os.path.join(subdir, init_py)
if subdir not in path and os.path.isfile(initfile):
path.append(subdir)
# XXX Is this the right thing for subpackages like zope.app?
# It looks for a file named "zope.app.pkg"
pkgfile = os.path.join(dir, sname_pkg)
if os.path.isfile(pkgfile):
try:
f = open(pkgfile)
except IOError, msg:
sys.stderr.write("Can't open %s: %s\n" %
(pkgfile, msg))
else:
for line in f:
line = line.rstrip('\n')
if not line or line.startswith('#'):
continue
path.append(line) # Don't check for existence!
f.close()
return path |
Python | def _get_func(func, ps='sdzc'):
"""Just a helper: return a specified BLAS function w/typecode."""
for p in ps:
f = getattr(fblas, p+func, None)
if f is None:
continue
yield f | def _get_func(func, ps='sdzc'):
"""Just a helper: return a specified BLAS function w/typecode."""
for p in ps:
f = getattr(fblas, p+func, None)
if f is None:
continue
yield f |
Python | def _get_row_ranges(self, rows, col_slice):
"""
Fast path for indexing in the case where column index is slice.
This gains performance improvement over brute force by more
efficient skipping of zeros, by accessing the elements
column-wise in order.
Parameters
----------
rows : sequence or xrange
Rows indexed. If xrange, must be within valid bounds.
col_slice : slice
Columns indexed
"""
j_start, j_stop, j_stride = col_slice.indices(self.shape[1])
col_range = xrange(j_start, j_stop, j_stride)
nj = len(col_range)
new = lil_matrix((len(rows), nj), dtype=self.dtype)
_csparsetools.lil_get_row_ranges(self.shape[0], self.shape[1],
self.rows, self.data,
new.rows, new.data,
rows,
j_start, j_stop, j_stride, nj)
return new | def _get_row_ranges(self, rows, col_slice):
"""
Fast path for indexing in the case where column index is slice.
This gains performance improvement over brute force by more
efficient skipping of zeros, by accessing the elements
column-wise in order.
Parameters
----------
rows : sequence or xrange
Rows indexed. If xrange, must be within valid bounds.
col_slice : slice
Columns indexed
"""
j_start, j_stop, j_stride = col_slice.indices(self.shape[1])
col_range = xrange(j_start, j_stop, j_stride)
nj = len(col_range)
new = lil_matrix((len(rows), nj), dtype=self.dtype)
_csparsetools.lil_get_row_ranges(self.shape[0], self.shape[1],
self.rows, self.data,
new.rows, new.data,
rows,
j_start, j_stop, j_stride, nj)
return new |
Python | def _prepare_index_for_memoryview(i, j, x=None):
"""
Convert index and data arrays to form suitable for passing to the
Cython fancy getset routines.
The conversions are necessary since to (i) ensure the integer
index arrays are in one of the accepted types, and (ii) to ensure
the arrays are writable so that Cython memoryview support doesn't
choke on them.
Parameters
----------
i, j
Index arrays
x : optional
Data arrays
Returns
-------
i, j, x
Re-formatted arrays (x is omitted, if input was None)
"""
if i.dtype > j.dtype:
j = j.astype(i.dtype)
elif i.dtype < j.dtype:
i = i.astype(j.dtype)
if not i.flags.writeable or i.dtype not in (np.int32, np.int64):
i = i.astype(np.intp)
if not j.flags.writeable or j.dtype not in (np.int32, np.int64):
j = j.astype(np.intp)
if x is not None:
if not x.flags.writeable:
x = x.copy()
return i, j, x
else:
return i, j | def _prepare_index_for_memoryview(i, j, x=None):
"""
Convert index and data arrays to form suitable for passing to the
Cython fancy getset routines.
The conversions are necessary since to (i) ensure the integer
index arrays are in one of the accepted types, and (ii) to ensure
the arrays are writable so that Cython memoryview support doesn't
choke on them.
Parameters
----------
i, j
Index arrays
x : optional
Data arrays
Returns
-------
i, j, x
Re-formatted arrays (x is omitted, if input was None)
"""
if i.dtype > j.dtype:
j = j.astype(i.dtype)
elif i.dtype < j.dtype:
i = i.astype(j.dtype)
if not i.flags.writeable or i.dtype not in (np.int32, np.int64):
i = i.astype(np.intp)
if not j.flags.writeable or j.dtype not in (np.int32, np.int64):
j = j.astype(np.intp)
if x is not None:
if not x.flags.writeable:
x = x.copy()
return i, j, x
else:
return i, j |
Python | def cho_factor(a, lower=False, overwrite_a=False, check_finite=True):
"""
Compute the Cholesky decomposition of a matrix, to use in cho_solve
Returns a matrix containing the Cholesky decomposition,
``A = L L*`` or ``A = U* U`` of a Hermitian positive-definite matrix `a`.
The return value can be directly used as the first parameter to cho_solve.
.. warning::
The returned matrix also contains random data in the entries not
used by the Cholesky decomposition. If you need to zero these
entries, use the function `cholesky` instead.
Parameters
----------
a : (M, M) array_like
Matrix to be decomposed
lower : bool, optional
Whether to compute the upper or lower triangular Cholesky factorization
(Default: upper-triangular)
overwrite_a : bool, optional
Whether to overwrite data in a (may improve performance)
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
c : (M, M) ndarray
Matrix whose upper or lower triangle contains the Cholesky factor
of `a`. Other parts of the matrix contain random data.
lower : bool
Flag indicating whether the factor is in the lower or upper triangle
Raises
------
LinAlgError
Raised if decomposition fails.
See also
--------
cho_solve : Solve a linear set equations using the Cholesky factorization
of a matrix.
"""
c, lower = _cholesky(a, lower=lower, overwrite_a=overwrite_a, clean=False,
check_finite=check_finite)
return c, lower | def cho_factor(a, lower=False, overwrite_a=False, check_finite=True):
"""
Compute the Cholesky decomposition of a matrix, to use in cho_solve
Returns a matrix containing the Cholesky decomposition,
``A = L L*`` or ``A = U* U`` of a Hermitian positive-definite matrix `a`.
The return value can be directly used as the first parameter to cho_solve.
.. warning::
The returned matrix also contains random data in the entries not
used by the Cholesky decomposition. If you need to zero these
entries, use the function `cholesky` instead.
Parameters
----------
a : (M, M) array_like
Matrix to be decomposed
lower : bool, optional
Whether to compute the upper or lower triangular Cholesky factorization
(Default: upper-triangular)
overwrite_a : bool, optional
Whether to overwrite data in a (may improve performance)
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
c : (M, M) ndarray
Matrix whose upper or lower triangle contains the Cholesky factor
of `a`. Other parts of the matrix contain random data.
lower : bool
Flag indicating whether the factor is in the lower or upper triangle
Raises
------
LinAlgError
Raised if decomposition fails.
See also
--------
cho_solve : Solve a linear set equations using the Cholesky factorization
of a matrix.
"""
c, lower = _cholesky(a, lower=lower, overwrite_a=overwrite_a, clean=False,
check_finite=check_finite)
return c, lower |
Python | def _cached_p_roots(n):
"""
Cache p_roots results to speed up calls of the fixed_quad function.
"""
if n in _cached_p_roots.cache:
return _cached_p_roots.cache[n]
_cached_p_roots.cache[n] = p_roots(n)
return _cached_p_roots.cache[n] | def _cached_p_roots(n):
"""
Cache p_roots results to speed up calls of the fixed_quad function.
"""
if n in _cached_p_roots.cache:
return _cached_p_roots.cache[n]
_cached_p_roots.cache[n] = p_roots(n)
return _cached_p_roots.cache[n] |
Python | def fixed_quad(func, a, b, args=(), n=5):
"""
Compute a definite integral using fixed-order Gaussian quadrature.
Integrate `func` from `a` to `b` using Gaussian quadrature of
order `n`.
Parameters
----------
func : callable
A Python function or method to integrate (must accept vector inputs).
a : float
Lower limit of integration.
b : float
Upper limit of integration.
args : tuple, optional
Extra arguments to pass to function, if any.
n : int, optional
Order of quadrature integration. Default is 5.
Returns
-------
val : float
Gaussian quadrature approximation to the integral
none : None
Statically returned value of None
See Also
--------
quad : adaptive quadrature using QUADPACK
dblquad : double integrals
tplquad : triple integrals
romberg : adaptive Romberg quadrature
quadrature : adaptive Gaussian quadrature
romb : integrators for sampled data
simps : integrators for sampled data
cumtrapz : cumulative integration for sampled data
ode : ODE integrator
odeint : ODE integrator
"""
x, w = _cached_p_roots(n)
x = np.real(x)
if np.isinf(a) or np.isinf(b):
raise ValueError("Gaussian quadrature is only available for "
"finite limits.")
y = (b-a)*(x+1)/2.0 + a
return (b-a)/2.0 * np.sum(w*func(y, *args), axis=0), None | def fixed_quad(func, a, b, args=(), n=5):
"""
Compute a definite integral using fixed-order Gaussian quadrature.
Integrate `func` from `a` to `b` using Gaussian quadrature of
order `n`.
Parameters
----------
func : callable
A Python function or method to integrate (must accept vector inputs).
a : float
Lower limit of integration.
b : float
Upper limit of integration.
args : tuple, optional
Extra arguments to pass to function, if any.
n : int, optional
Order of quadrature integration. Default is 5.
Returns
-------
val : float
Gaussian quadrature approximation to the integral
none : None
Statically returned value of None
See Also
--------
quad : adaptive quadrature using QUADPACK
dblquad : double integrals
tplquad : triple integrals
romberg : adaptive Romberg quadrature
quadrature : adaptive Gaussian quadrature
romb : integrators for sampled data
simps : integrators for sampled data
cumtrapz : cumulative integration for sampled data
ode : ODE integrator
odeint : ODE integrator
"""
x, w = _cached_p_roots(n)
x = np.real(x)
if np.isinf(a) or np.isinf(b):
raise ValueError("Gaussian quadrature is only available for "
"finite limits.")
y = (b-a)*(x+1)/2.0 + a
return (b-a)/2.0 * np.sum(w*func(y, *args), axis=0), None |
Python | def vectorize1(func, args=(), vec_func=False):
"""Vectorize the call to a function.
This is an internal utility function used by `romberg` and
`quadrature` to create a vectorized version of a function.
If `vec_func` is True, the function `func` is assumed to take vector
arguments.
Parameters
----------
func : callable
User defined function.
args : tuple, optional
Extra arguments for the function.
vec_func : bool, optional
True if the function func takes vector arguments.
Returns
-------
vfunc : callable
A function that will take a vector argument and return the
result.
"""
if vec_func:
def vfunc(x):
return func(x, *args)
else:
def vfunc(x):
if np.isscalar(x):
return func(x, *args)
x = np.asarray(x)
# call with first point to get output type
y0 = func(x[0], *args)
n = len(x)
dtype = getattr(y0, 'dtype', type(y0))
output = np.empty((n,), dtype=dtype)
output[0] = y0
for i in xrange(1, n):
output[i] = func(x[i], *args)
return output
return vfunc | def vectorize1(func, args=(), vec_func=False):
"""Vectorize the call to a function.
This is an internal utility function used by `romberg` and
`quadrature` to create a vectorized version of a function.
If `vec_func` is True, the function `func` is assumed to take vector
arguments.
Parameters
----------
func : callable
User defined function.
args : tuple, optional
Extra arguments for the function.
vec_func : bool, optional
True if the function func takes vector arguments.
Returns
-------
vfunc : callable
A function that will take a vector argument and return the
result.
"""
if vec_func:
def vfunc(x):
return func(x, *args)
else:
def vfunc(x):
if np.isscalar(x):
return func(x, *args)
x = np.asarray(x)
# call with first point to get output type
y0 = func(x[0], *args)
n = len(x)
dtype = getattr(y0, 'dtype', type(y0))
output = np.empty((n,), dtype=dtype)
output[0] = y0
for i in xrange(1, n):
output[i] = func(x[i], *args)
return output
return vfunc |
Python | def quadrature(func, a, b, args=(), tol=1.49e-8, rtol=1.49e-8, maxiter=50,
vec_func=True, miniter=1):
"""
Compute a definite integral using fixed-tolerance Gaussian quadrature.
Integrate `func` from `a` to `b` using Gaussian quadrature
with absolute tolerance `tol`.
Parameters
----------
func : function
A Python function or method to integrate.
a : float
Lower limit of integration.
b : float
Upper limit of integration.
args : tuple, optional
Extra arguments to pass to function.
tol, rtol : float, optional
Iteration stops when error between last two iterates is less than
`tol` OR the relative change is less than `rtol`.
maxiter : int, optional
Maximum order of Gaussian quadrature.
vec_func : bool, optional
True or False if func handles arrays as arguments (is
a "vector" function). Default is True.
miniter : int, optional
Minimum order of Gaussian quadrature.
Returns
-------
val : float
Gaussian quadrature approximation (within tolerance) to integral.
err : float
Difference between last two estimates of the integral.
See also
--------
romberg: adaptive Romberg quadrature
fixed_quad: fixed-order Gaussian quadrature
quad: adaptive quadrature using QUADPACK
dblquad: double integrals
tplquad: triple integrals
romb: integrator for sampled data
simps: integrator for sampled data
cumtrapz: cumulative integration for sampled data
ode: ODE integrator
odeint: ODE integrator
"""
if not isinstance(args, tuple):
args = (args,)
vfunc = vectorize1(func, args, vec_func=vec_func)
val = np.inf
err = np.inf
maxiter = max(miniter+1, maxiter)
for n in xrange(miniter, maxiter+1):
newval = fixed_quad(vfunc, a, b, (), n)[0]
err = abs(newval-val)
val = newval
if err < tol or err < rtol*abs(val):
break
else:
warnings.warn(
"maxiter (%d) exceeded. Latest difference = %e" % (maxiter, err),
AccuracyWarning)
return val, err | def quadrature(func, a, b, args=(), tol=1.49e-8, rtol=1.49e-8, maxiter=50,
vec_func=True, miniter=1):
"""
Compute a definite integral using fixed-tolerance Gaussian quadrature.
Integrate `func` from `a` to `b` using Gaussian quadrature
with absolute tolerance `tol`.
Parameters
----------
func : function
A Python function or method to integrate.
a : float
Lower limit of integration.
b : float
Upper limit of integration.
args : tuple, optional
Extra arguments to pass to function.
tol, rtol : float, optional
Iteration stops when error between last two iterates is less than
`tol` OR the relative change is less than `rtol`.
maxiter : int, optional
Maximum order of Gaussian quadrature.
vec_func : bool, optional
True or False if func handles arrays as arguments (is
a "vector" function). Default is True.
miniter : int, optional
Minimum order of Gaussian quadrature.
Returns
-------
val : float
Gaussian quadrature approximation (within tolerance) to integral.
err : float
Difference between last two estimates of the integral.
See also
--------
romberg: adaptive Romberg quadrature
fixed_quad: fixed-order Gaussian quadrature
quad: adaptive quadrature using QUADPACK
dblquad: double integrals
tplquad: triple integrals
romb: integrator for sampled data
simps: integrator for sampled data
cumtrapz: cumulative integration for sampled data
ode: ODE integrator
odeint: ODE integrator
"""
if not isinstance(args, tuple):
args = (args,)
vfunc = vectorize1(func, args, vec_func=vec_func)
val = np.inf
err = np.inf
maxiter = max(miniter+1, maxiter)
for n in xrange(miniter, maxiter+1):
newval = fixed_quad(vfunc, a, b, (), n)[0]
err = abs(newval-val)
val = newval
if err < tol or err < rtol*abs(val):
break
else:
warnings.warn(
"maxiter (%d) exceeded. Latest difference = %e" % (maxiter, err),
AccuracyWarning)
return val, err |
Python | def simps(y, x=None, dx=1, axis=-1, even='avg'):
"""
Integrate y(x) using samples along the given axis and the composite
Simpson's rule. If x is None, spacing of dx is assumed.
If there are an even number of samples, N, then there are an odd
number of intervals (N-1), but Simpson's rule requires an even number
of intervals. The parameter 'even' controls how this is handled.
Parameters
----------
y : array_like
Array to be integrated.
x : array_like, optional
If given, the points at which `y` is sampled.
dx : int, optional
Spacing of integration points along axis of `y`. Only used when
`x` is None. Default is 1.
axis : int, optional
Axis along which to integrate. Default is the last axis.
even : str {'avg', 'first', 'last'}, optional
'avg' : Average two results:1) use the first N-2 intervals with
a trapezoidal rule on the last interval and 2) use the last
N-2 intervals with a trapezoidal rule on the first interval.
'first' : Use Simpson's rule for the first N-2 intervals with
a trapezoidal rule on the last interval.
'last' : Use Simpson's rule for the last N-2 intervals with a
trapezoidal rule on the first interval.
See Also
--------
quad: adaptive quadrature using QUADPACK
romberg: adaptive Romberg quadrature
quadrature: adaptive Gaussian quadrature
fixed_quad: fixed-order Gaussian quadrature
dblquad: double integrals
tplquad: triple integrals
romb: integrators for sampled data
cumtrapz: cumulative integration for sampled data
ode: ODE integrators
odeint: ODE integrators
Notes
-----
For an odd number of samples that are equally spaced the result is
exact if the function is a polynomial of order 3 or less. If
the samples are not equally spaced, then the result is exact only
if the function is a polynomial of order 2 or less.
"""
y = np.asarray(y)
nd = len(y.shape)
N = y.shape[axis]
last_dx = dx
first_dx = dx
returnshape = 0
if x is not None:
x = np.asarray(x)
if len(x.shape) == 1:
shapex = [1] * nd
shapex[axis] = x.shape[0]
saveshape = x.shape
returnshape = 1
x = x.reshape(tuple(shapex))
elif len(x.shape) != len(y.shape):
raise ValueError("If given, shape of x must be 1-d or the "
"same as y.")
if x.shape[axis] != N:
raise ValueError("If given, length of x along axis must be the "
"same as y.")
if N % 2 == 0:
val = 0.0
result = 0.0
slice1 = (slice(None),)*nd
slice2 = (slice(None),)*nd
if even not in ['avg', 'last', 'first']:
raise ValueError("Parameter 'even' must be "
"'avg', 'last', or 'first'.")
# Compute using Simpson's rule on first intervals
if even in ['avg', 'first']:
slice1 = tupleset(slice1, axis, -1)
slice2 = tupleset(slice2, axis, -2)
if x is not None:
last_dx = x[slice1] - x[slice2]
val += 0.5*last_dx*(y[slice1]+y[slice2])
result = _basic_simps(y, 0, N-3, x, dx, axis)
# Compute using Simpson's rule on last set of intervals
if even in ['avg', 'last']:
slice1 = tupleset(slice1, axis, 0)
slice2 = tupleset(slice2, axis, 1)
if x is not None:
first_dx = x[tuple(slice2)] - x[tuple(slice1)]
val += 0.5*first_dx*(y[slice2]+y[slice1])
result += _basic_simps(y, 1, N-2, x, dx, axis)
if even == 'avg':
val /= 2.0
result /= 2.0
result = result + val
else:
result = _basic_simps(y, 0, N-2, x, dx, axis)
if returnshape:
x = x.reshape(saveshape)
return result | def simps(y, x=None, dx=1, axis=-1, even='avg'):
"""
Integrate y(x) using samples along the given axis and the composite
Simpson's rule. If x is None, spacing of dx is assumed.
If there are an even number of samples, N, then there are an odd
number of intervals (N-1), but Simpson's rule requires an even number
of intervals. The parameter 'even' controls how this is handled.
Parameters
----------
y : array_like
Array to be integrated.
x : array_like, optional
If given, the points at which `y` is sampled.
dx : int, optional
Spacing of integration points along axis of `y`. Only used when
`x` is None. Default is 1.
axis : int, optional
Axis along which to integrate. Default is the last axis.
even : str {'avg', 'first', 'last'}, optional
'avg' : Average two results:1) use the first N-2 intervals with
a trapezoidal rule on the last interval and 2) use the last
N-2 intervals with a trapezoidal rule on the first interval.
'first' : Use Simpson's rule for the first N-2 intervals with
a trapezoidal rule on the last interval.
'last' : Use Simpson's rule for the last N-2 intervals with a
trapezoidal rule on the first interval.
See Also
--------
quad: adaptive quadrature using QUADPACK
romberg: adaptive Romberg quadrature
quadrature: adaptive Gaussian quadrature
fixed_quad: fixed-order Gaussian quadrature
dblquad: double integrals
tplquad: triple integrals
romb: integrators for sampled data
cumtrapz: cumulative integration for sampled data
ode: ODE integrators
odeint: ODE integrators
Notes
-----
For an odd number of samples that are equally spaced the result is
exact if the function is a polynomial of order 3 or less. If
the samples are not equally spaced, then the result is exact only
if the function is a polynomial of order 2 or less.
"""
y = np.asarray(y)
nd = len(y.shape)
N = y.shape[axis]
last_dx = dx
first_dx = dx
returnshape = 0
if x is not None:
x = np.asarray(x)
if len(x.shape) == 1:
shapex = [1] * nd
shapex[axis] = x.shape[0]
saveshape = x.shape
returnshape = 1
x = x.reshape(tuple(shapex))
elif len(x.shape) != len(y.shape):
raise ValueError("If given, shape of x must be 1-d or the "
"same as y.")
if x.shape[axis] != N:
raise ValueError("If given, length of x along axis must be the "
"same as y.")
if N % 2 == 0:
val = 0.0
result = 0.0
slice1 = (slice(None),)*nd
slice2 = (slice(None),)*nd
if even not in ['avg', 'last', 'first']:
raise ValueError("Parameter 'even' must be "
"'avg', 'last', or 'first'.")
# Compute using Simpson's rule on first intervals
if even in ['avg', 'first']:
slice1 = tupleset(slice1, axis, -1)
slice2 = tupleset(slice2, axis, -2)
if x is not None:
last_dx = x[slice1] - x[slice2]
val += 0.5*last_dx*(y[slice1]+y[slice2])
result = _basic_simps(y, 0, N-3, x, dx, axis)
# Compute using Simpson's rule on last set of intervals
if even in ['avg', 'last']:
slice1 = tupleset(slice1, axis, 0)
slice2 = tupleset(slice2, axis, 1)
if x is not None:
first_dx = x[tuple(slice2)] - x[tuple(slice1)]
val += 0.5*first_dx*(y[slice2]+y[slice1])
result += _basic_simps(y, 1, N-2, x, dx, axis)
if even == 'avg':
val /= 2.0
result /= 2.0
result = result + val
else:
result = _basic_simps(y, 0, N-2, x, dx, axis)
if returnshape:
x = x.reshape(saveshape)
return result |
Python | def _difftrap(function, interval, numtraps):
"""
Perform part of the trapezoidal rule to integrate a function.
Assume that we had called difftrap with all lower powers-of-2
starting with 1. Calling difftrap only returns the summation
of the new ordinates. It does _not_ multiply by the width
of the trapezoids. This must be performed by the caller.
'function' is the function to evaluate (must accept vector arguments).
'interval' is a sequence with lower and upper limits
of integration.
'numtraps' is the number of trapezoids to use (must be a
power-of-2).
"""
if numtraps <= 0:
raise ValueError("numtraps must be > 0 in difftrap().")
elif numtraps == 1:
return 0.5*(function(interval[0])+function(interval[1]))
else:
numtosum = numtraps/2
h = float(interval[1]-interval[0])/numtosum
lox = interval[0] + 0.5 * h
points = lox + h * np.arange(numtosum)
s = np.sum(function(points), axis=0)
return s | def _difftrap(function, interval, numtraps):
"""
Perform part of the trapezoidal rule to integrate a function.
Assume that we had called difftrap with all lower powers-of-2
starting with 1. Calling difftrap only returns the summation
of the new ordinates. It does _not_ multiply by the width
of the trapezoids. This must be performed by the caller.
'function' is the function to evaluate (must accept vector arguments).
'interval' is a sequence with lower and upper limits
of integration.
'numtraps' is the number of trapezoids to use (must be a
power-of-2).
"""
if numtraps <= 0:
raise ValueError("numtraps must be > 0 in difftrap().")
elif numtraps == 1:
return 0.5*(function(interval[0])+function(interval[1]))
else:
numtosum = numtraps/2
h = float(interval[1]-interval[0])/numtosum
lox = interval[0] + 0.5 * h
points = lox + h * np.arange(numtosum)
s = np.sum(function(points), axis=0)
return s |
Python | def _romberg_diff(b, c, k):
"""
Compute the differences for the Romberg quadrature corrections.
See Forman Acton's "Real Computing Made Real," p 143.
"""
tmp = 4.0**k
return (tmp * c - b)/(tmp - 1.0) | def _romberg_diff(b, c, k):
"""
Compute the differences for the Romberg quadrature corrections.
See Forman Acton's "Real Computing Made Real," p 143.
"""
tmp = 4.0**k
return (tmp * c - b)/(tmp - 1.0) |
Python | def kron(A, B, format=None):
"""kronecker product of sparse matrices A and B
Parameters
----------
A : sparse or dense matrix
first matrix of the product
B : sparse or dense matrix
second matrix of the product
format : str, optional
format of the result (e.g. "csr")
Returns
-------
kronecker product in a sparse matrix format
Examples
--------
>>> from scipy import sparse
>>> A = sparse.csr_matrix(np.array([[0, 2], [5, 0]]))
>>> B = sparse.csr_matrix(np.array([[1, 2], [3, 4]]))
>>> sparse.kron(A, B).toarray()
array([[ 0, 0, 2, 4],
[ 0, 0, 6, 8],
[ 5, 10, 0, 0],
[15, 20, 0, 0]])
>>> sparse.kron(A, [[1, 2], [3, 4]]).toarray()
array([[ 0, 0, 2, 4],
[ 0, 0, 6, 8],
[ 5, 10, 0, 0],
[15, 20, 0, 0]])
"""
B = coo_matrix(B)
if (format is None or format == "bsr") and 2*B.nnz >= B.shape[0] * B.shape[1]:
# B is fairly dense, use BSR
A = csr_matrix(A,copy=True)
output_shape = (A.shape[0]*B.shape[0], A.shape[1]*B.shape[1])
if A.nnz == 0 or B.nnz == 0:
# kronecker product is the zero matrix
return coo_matrix(output_shape)
B = B.toarray()
data = A.data.repeat(B.size).reshape(-1,B.shape[0],B.shape[1])
data = data * B
return bsr_matrix((data,A.indices,A.indptr), shape=output_shape)
else:
# use COO
A = coo_matrix(A)
output_shape = (A.shape[0]*B.shape[0], A.shape[1]*B.shape[1])
if A.nnz == 0 or B.nnz == 0:
# kronecker product is the zero matrix
return coo_matrix(output_shape)
# expand entries of a into blocks
row = A.row.repeat(B.nnz)
col = A.col.repeat(B.nnz)
data = A.data.repeat(B.nnz)
row *= B.shape[0]
col *= B.shape[1]
# increment block indices
row,col = row.reshape(-1,B.nnz),col.reshape(-1,B.nnz)
row += B.row
col += B.col
row,col = row.reshape(-1),col.reshape(-1)
# compute block entries
data = data.reshape(-1,B.nnz) * B.data
data = data.reshape(-1)
return coo_matrix((data,(row,col)), shape=output_shape).asformat(format) | def kron(A, B, format=None):
"""kronecker product of sparse matrices A and B
Parameters
----------
A : sparse or dense matrix
first matrix of the product
B : sparse or dense matrix
second matrix of the product
format : str, optional
format of the result (e.g. "csr")
Returns
-------
kronecker product in a sparse matrix format
Examples
--------
>>> from scipy import sparse
>>> A = sparse.csr_matrix(np.array([[0, 2], [5, 0]]))
>>> B = sparse.csr_matrix(np.array([[1, 2], [3, 4]]))
>>> sparse.kron(A, B).toarray()
array([[ 0, 0, 2, 4],
[ 0, 0, 6, 8],
[ 5, 10, 0, 0],
[15, 20, 0, 0]])
>>> sparse.kron(A, [[1, 2], [3, 4]]).toarray()
array([[ 0, 0, 2, 4],
[ 0, 0, 6, 8],
[ 5, 10, 0, 0],
[15, 20, 0, 0]])
"""
B = coo_matrix(B)
if (format is None or format == "bsr") and 2*B.nnz >= B.shape[0] * B.shape[1]:
# B is fairly dense, use BSR
A = csr_matrix(A,copy=True)
output_shape = (A.shape[0]*B.shape[0], A.shape[1]*B.shape[1])
if A.nnz == 0 or B.nnz == 0:
# kronecker product is the zero matrix
return coo_matrix(output_shape)
B = B.toarray()
data = A.data.repeat(B.size).reshape(-1,B.shape[0],B.shape[1])
data = data * B
return bsr_matrix((data,A.indices,A.indptr), shape=output_shape)
else:
# use COO
A = coo_matrix(A)
output_shape = (A.shape[0]*B.shape[0], A.shape[1]*B.shape[1])
if A.nnz == 0 or B.nnz == 0:
# kronecker product is the zero matrix
return coo_matrix(output_shape)
# expand entries of a into blocks
row = A.row.repeat(B.nnz)
col = A.col.repeat(B.nnz)
data = A.data.repeat(B.nnz)
row *= B.shape[0]
col *= B.shape[1]
# increment block indices
row,col = row.reshape(-1,B.nnz),col.reshape(-1,B.nnz)
row += B.row
col += B.col
row,col = row.reshape(-1),col.reshape(-1)
# compute block entries
data = data.reshape(-1,B.nnz) * B.data
data = data.reshape(-1)
return coo_matrix((data,(row,col)), shape=output_shape).asformat(format) |
Python | def random(m, n, density=0.01, format='coo', dtype=None,
random_state=None, data_rvs=None):
"""Generate a sparse matrix of the given shape and density with randomly
distributed values.
Parameters
----------
m, n : int
shape of the matrix
density : real, optional
density of the generated matrix: density equal to one means a full
matrix, density of 0 means a matrix with no non-zero items.
format : str, optional
sparse matrix format.
dtype : dtype, optional
type of the returned matrix values.
random_state : {numpy.random.RandomState, int}, optional
Random number generator or random seed. If not given, the singleton
numpy.random will be used. This random state will be used
for sampling the sparsity structure, but not necessarily for sampling
the values of the structurally nonzero entries of the matrix.
data_rvs : callable, optional
Samples a requested number of random values.
This function should take a single argument specifying the length
of the ndarray that it will return. The structurally nonzero entries
of the sparse random matrix will be taken from the array sampled
by this function. By default, uniform [0, 1) random values will be
sampled using the same random state as is used for sampling
the sparsity structure.
Examples
--------
>>> from scipy.sparse import random
>>> from scipy import stats
>>> class CustomRandomState(object):
... def randint(self, k):
... i = np.random.randint(k)
... return i - i % 2
>>> rs = CustomRandomState()
>>> rvs = stats.poisson(25, loc=10).rvs
>>> S = random(3, 4, density=0.25, random_state=rs, data_rvs=rvs)
>>> S.A
array([[ 36., 0., 33., 0.], # random
[ 0., 0., 0., 0.],
[ 0., 0., 36., 0.]])
Notes
-----
Only float types are supported for now.
"""
if density < 0 or density > 1:
raise ValueError("density expected to be 0 <= density <= 1")
dtype = np.dtype(dtype)
if dtype.char not in 'fdg':
raise NotImplementedError("type %s not supported" % dtype)
mn = m * n
tp = np.intc
if mn > np.iinfo(tp).max:
tp = np.int64
if mn > np.iinfo(tp).max:
msg = """\
Trying to generate a random sparse matrix such as the product of dimensions is
greater than %d - this is not supported on this machine
"""
raise ValueError(msg % np.iinfo(tp).max)
# Number of non zero values
k = int(density * m * n)
if random_state is None:
random_state = np.random
elif isinstance(random_state, (int, np.integer)):
random_state = np.random.RandomState(random_state)
if data_rvs is None:
data_rvs = random_state.rand
# Use the algorithm from python's random.sample for k < mn/3.
if mn < 3*k:
# We should use this line, but choice is only available in numpy >= 1.7
# ind = random_state.choice(mn, size=k, replace=False)
ind = random_state.permutation(mn)[:k]
else:
ind = np.empty(k, dtype=tp)
selected = set()
for i in xrange(k):
j = random_state.randint(mn)
while j in selected:
j = random_state.randint(mn)
selected.add(j)
ind[i] = j
j = np.floor(ind * 1. / m).astype(tp)
i = (ind - j * m).astype(tp)
vals = data_rvs(k).astype(dtype)
return coo_matrix((vals, (i, j)), shape=(m, n)).asformat(format) | def random(m, n, density=0.01, format='coo', dtype=None,
random_state=None, data_rvs=None):
"""Generate a sparse matrix of the given shape and density with randomly
distributed values.
Parameters
----------
m, n : int
shape of the matrix
density : real, optional
density of the generated matrix: density equal to one means a full
matrix, density of 0 means a matrix with no non-zero items.
format : str, optional
sparse matrix format.
dtype : dtype, optional
type of the returned matrix values.
random_state : {numpy.random.RandomState, int}, optional
Random number generator or random seed. If not given, the singleton
numpy.random will be used. This random state will be used
for sampling the sparsity structure, but not necessarily for sampling
the values of the structurally nonzero entries of the matrix.
data_rvs : callable, optional
Samples a requested number of random values.
This function should take a single argument specifying the length
of the ndarray that it will return. The structurally nonzero entries
of the sparse random matrix will be taken from the array sampled
by this function. By default, uniform [0, 1) random values will be
sampled using the same random state as is used for sampling
the sparsity structure.
Examples
--------
>>> from scipy.sparse import random
>>> from scipy import stats
>>> class CustomRandomState(object):
... def randint(self, k):
... i = np.random.randint(k)
... return i - i % 2
>>> rs = CustomRandomState()
>>> rvs = stats.poisson(25, loc=10).rvs
>>> S = random(3, 4, density=0.25, random_state=rs, data_rvs=rvs)
>>> S.A
array([[ 36., 0., 33., 0.], # random
[ 0., 0., 0., 0.],
[ 0., 0., 36., 0.]])
Notes
-----
Only float types are supported for now.
"""
if density < 0 or density > 1:
raise ValueError("density expected to be 0 <= density <= 1")
dtype = np.dtype(dtype)
if dtype.char not in 'fdg':
raise NotImplementedError("type %s not supported" % dtype)
mn = m * n
tp = np.intc
if mn > np.iinfo(tp).max:
tp = np.int64
if mn > np.iinfo(tp).max:
msg = """\
Trying to generate a random sparse matrix such as the product of dimensions is
greater than %d - this is not supported on this machine
"""
raise ValueError(msg % np.iinfo(tp).max)
# Number of non zero values
k = int(density * m * n)
if random_state is None:
random_state = np.random
elif isinstance(random_state, (int, np.integer)):
random_state = np.random.RandomState(random_state)
if data_rvs is None:
data_rvs = random_state.rand
# Use the algorithm from python's random.sample for k < mn/3.
if mn < 3*k:
# We should use this line, but choice is only available in numpy >= 1.7
# ind = random_state.choice(mn, size=k, replace=False)
ind = random_state.permutation(mn)[:k]
else:
ind = np.empty(k, dtype=tp)
selected = set()
for i in xrange(k):
j = random_state.randint(mn)
while j in selected:
j = random_state.randint(mn)
selected.add(j)
ind[i] = j
j = np.floor(ind * 1. / m).astype(tp)
i = (ind - j * m).astype(tp)
vals = data_rvs(k).astype(dtype)
return coo_matrix((vals, (i, j)), shape=(m, n)).asformat(format) |
Python | def rand(m, n, density=0.01, format="coo", dtype=None, random_state=None):
"""Generate a sparse matrix of the given shape and density with uniformly
distributed values.
Parameters
----------
m, n : int
shape of the matrix
density : real, optional
density of the generated matrix: density equal to one means a full
matrix, density of 0 means a matrix with no non-zero items.
format : str, optional
sparse matrix format.
dtype : dtype, optional
type of the returned matrix values.
random_state : {numpy.random.RandomState, int}, optional
Random number generator or random seed. If not given, the singleton
numpy.random will be used.
Notes
-----
Only float types are supported for now.
"""
return random(m, n, density, format, dtype, random_state) | def rand(m, n, density=0.01, format="coo", dtype=None, random_state=None):
"""Generate a sparse matrix of the given shape and density with uniformly
distributed values.
Parameters
----------
m, n : int
shape of the matrix
density : real, optional
density of the generated matrix: density equal to one means a full
matrix, density of 0 means a matrix with no non-zero items.
format : str, optional
sparse matrix format.
dtype : dtype, optional
type of the returned matrix values.
random_state : {numpy.random.RandomState, int}, optional
Random number generator or random seed. If not given, the singleton
numpy.random will be used.
Notes
-----
Only float types are supported for now.
"""
return random(m, n, density, format, dtype, random_state) |
Python | def _get_row_slice(self, i, cslice):
"""Returns a copy of row self[i, cslice]
"""
if i < 0:
i += self.shape[0]
if i < 0 or i >= self.shape[0]:
raise IndexError('index (%d) out of range' % i)
start, stop, stride = cslice.indices(self.shape[1])
if stride == 1:
# for stride == 1, _get_submatrix is ~30% faster than below
row_slice = self._get_submatrix(i, cslice)
else:
# other strides need new code
row_indices = self.indices[self.indptr[i]:self.indptr[i + 1]]
row_data = self.data[self.indptr[i]:self.indptr[i + 1]]
if stride > 0:
ind = (row_indices >= start) & (row_indices < stop)
elif stride < 0:
ind = (row_indices <= start) & (row_indices > stop)
if abs(stride) > 1:
ind = ind & ((row_indices - start) % stride == 0)
row_indices = (row_indices[ind] - start) // stride
row_data = row_data[ind]
row_indptr = np.array([0, len(row_indices)])
if stride < 0:
row_data = row_data[::-1]
row_indices = abs(row_indices[::-1])
shape = (1, int(np.ceil(float(stop - start) / stride)))
row_slice = csr_matrix((row_data, row_indices, row_indptr),
shape=shape)
return row_slice | def _get_row_slice(self, i, cslice):
"""Returns a copy of row self[i, cslice]
"""
if i < 0:
i += self.shape[0]
if i < 0 or i >= self.shape[0]:
raise IndexError('index (%d) out of range' % i)
start, stop, stride = cslice.indices(self.shape[1])
if stride == 1:
# for stride == 1, _get_submatrix is ~30% faster than below
row_slice = self._get_submatrix(i, cslice)
else:
# other strides need new code
row_indices = self.indices[self.indptr[i]:self.indptr[i + 1]]
row_data = self.data[self.indptr[i]:self.indptr[i + 1]]
if stride > 0:
ind = (row_indices >= start) & (row_indices < stop)
elif stride < 0:
ind = (row_indices <= start) & (row_indices > stop)
if abs(stride) > 1:
ind = ind & ((row_indices - start) % stride == 0)
row_indices = (row_indices[ind] - start) // stride
row_data = row_data[ind]
row_indptr = np.array([0, len(row_indices)])
if stride < 0:
row_data = row_data[::-1]
row_indices = abs(row_indices[::-1])
shape = (1, int(np.ceil(float(stop - start) / stride)))
row_slice = csr_matrix((row_data, row_indices, row_indptr),
shape=shape)
return row_slice |
Python | def _get_submatrix(self, row_slice, col_slice):
"""Return a submatrix of this matrix (new matrix is created)."""
M,N = self.shape
def process_slice(sl, num):
if isinstance(sl, slice):
if sl.step not in (1, None):
raise ValueError('slicing with step != 1 not supported')
i0, i1 = sl.start, sl.stop
if i0 is None:
i0 = 0
elif i0 < 0:
i0 = num + i0
if i1 is None:
i1 = num
elif i1 < 0:
i1 = num + i1
return i0, i1
elif isintlike(sl):
if sl < 0:
sl += num
return sl, sl + 1
else:
raise TypeError('expected slice or scalar')
def check_bounds(i0, i1, num):
if not (0 <= i0 <= num) or not (0 <= i1 <= num) or not (i0 <= i1):
raise IndexError(
"index out of bounds: 0 <= %d <= %d, 0 <= %d <= %d,"
" %d <= %d" % (i0, num, i1, num, i0, i1))
i0, i1 = process_slice(row_slice, M)
j0, j1 = process_slice(col_slice, N)
check_bounds(i0, i1, M)
check_bounds(j0, j1, N)
indptr, indices, data = get_csr_submatrix(M, N,
self.indptr, self.indices, self.data,
int(i0), int(i1), int(j0), int(j1))
shape = (i1 - i0, j1 - j0)
return self.__class__((data,indices,indptr), shape=shape) | def _get_submatrix(self, row_slice, col_slice):
"""Return a submatrix of this matrix (new matrix is created)."""
M,N = self.shape
def process_slice(sl, num):
if isinstance(sl, slice):
if sl.step not in (1, None):
raise ValueError('slicing with step != 1 not supported')
i0, i1 = sl.start, sl.stop
if i0 is None:
i0 = 0
elif i0 < 0:
i0 = num + i0
if i1 is None:
i1 = num
elif i1 < 0:
i1 = num + i1
return i0, i1
elif isintlike(sl):
if sl < 0:
sl += num
return sl, sl + 1
else:
raise TypeError('expected slice or scalar')
def check_bounds(i0, i1, num):
if not (0 <= i0 <= num) or not (0 <= i1 <= num) or not (i0 <= i1):
raise IndexError(
"index out of bounds: 0 <= %d <= %d, 0 <= %d <= %d,"
" %d <= %d" % (i0, num, i1, num, i0, i1))
i0, i1 = process_slice(row_slice, M)
j0, j1 = process_slice(col_slice, N)
check_bounds(i0, i1, M)
check_bounds(j0, j1, N)
indptr, indices, data = get_csr_submatrix(M, N,
self.indptr, self.indices, self.data,
int(i0), int(i1), int(j0), int(j1))
shape = (i1 - i0, j1 - j0)
return self.__class__((data,indices,indptr), shape=shape) |
Python | def values(self, n):
"""Return an array containing approximatively n numbers."""
m = max(1, n//3)
v1 = np.logspace(-30, np.log10(0.3), m)
v2 = np.linspace(0.3, 0.7, m + 1, endpoint=False)[1:]
v3 = 1 - np.logspace(np.log10(0.3), -15, m)
v = np.r_[v1, v2, v3]
return np.unique(v) | def values(self, n):
"""Return an array containing approximatively n numbers."""
m = max(1, n//3)
v1 = np.logspace(-30, np.log10(0.3), m)
v2 = np.linspace(0.3, 0.7, m + 1, endpoint=False)[1:]
v3 = 1 - np.logspace(np.log10(0.3), -15, m)
v = np.r_[v1, v2, v3]
return np.unique(v) |
Python | def newton(func, x0, fprime=None, args=(), tol=1.48e-8, maxiter=50,
fprime2=None):
"""
Find a zero using the Newton-Raphson or secant method.
Find a zero of the function `func` given a nearby starting point `x0`.
The Newton-Raphson method is used if the derivative `fprime` of `func`
is provided, otherwise the secant method is used. If the second order
derivate `fprime2` of `func` is provided, parabolic Halley's method
is used.
Parameters
----------
func : function
The function whose zero is wanted. It must be a function of a
single variable of the form f(x,a,b,c...), where a,b,c... are extra
arguments that can be passed in the `args` parameter.
x0 : float
An initial estimate of the zero that should be somewhere near the
actual zero.
fprime : function, optional
The derivative of the function when available and convenient. If it
is None (default), then the secant method is used.
args : tuple, optional
Extra arguments to be used in the function call.
tol : float, optional
The allowable error of the zero value.
maxiter : int, optional
Maximum number of iterations.
fprime2 : function, optional
The second order derivative of the function when available and
convenient. If it is None (default), then the normal Newton-Raphson
or the secant method is used. If it is given, parabolic Halley's
method is used.
Returns
-------
zero : float
Estimated location where function is zero.
See Also
--------
brentq, brenth, ridder, bisect
fsolve : find zeroes in n dimensions.
Notes
-----
The convergence rate of the Newton-Raphson method is quadratic,
the Halley method is cubic, and the secant method is
sub-quadratic. This means that if the function is well behaved
the actual error in the estimated zero is approximately the square
(cube for Halley) of the requested tolerance up to roundoff
error. However, the stopping criterion used here is the step size
and there is no guarantee that a zero has been found. Consequently
the result should be verified. Safer algorithms are brentq,
brenth, ridder, and bisect, but they all require that the root
first be bracketed in an interval where the function changes
sign. The brentq algorithm is recommended for general use in one
dimensional problems when such an interval has been found.
"""
if tol <= 0:
raise ValueError("tol too small (%g <= 0)" % tol)
if maxiter < 1:
raise ValueError("maxiter must be greater than 0")
if fprime is not None:
# Newton-Rapheson method
# Multiply by 1.0 to convert to floating point. We don't use float(x0)
# so it still works if x0 is complex.
p0 = 1.0 * x0
fder2 = 0
for iter in range(maxiter):
myargs = (p0,) + args
fder = fprime(*myargs)
if fder == 0:
msg = "derivative was zero."
warnings.warn(msg, RuntimeWarning)
return p0
fval = func(*myargs)
if fprime2 is not None:
fder2 = fprime2(*myargs)
if fder2 == 0:
# Newton step
p = p0 - fval / fder
else:
# Parabolic Halley's method
discr = fder ** 2 - 2 * fval * fder2
if discr < 0:
p = p0 - fder / fder2
else:
p = p0 - 2*fval / (fder + sign(fder) * sqrt(discr))
if abs(p - p0) < tol:
return p
p0 = p
else:
# Secant method
p0 = x0
if x0 >= 0:
p1 = x0*(1 + 1e-4) + 1e-4
else:
p1 = x0*(1 + 1e-4) - 1e-4
q0 = func(*((p0,) + args))
q1 = func(*((p1,) + args))
for iter in range(maxiter):
if q1 == q0:
if p1 != p0:
msg = "Tolerance of %s reached" % (p1 - p0)
warnings.warn(msg, RuntimeWarning)
return (p1 + p0)/2.0
else:
p = p1 - q1*(p1 - p0)/(q1 - q0)
if abs(p - p1) < tol:
return p
p0 = p1
q0 = q1
p1 = p
q1 = func(*((p1,) + args))
msg = "Failed to converge after %d iterations, value is %s" % (maxiter, p)
raise RuntimeError(msg) | def newton(func, x0, fprime=None, args=(), tol=1.48e-8, maxiter=50,
fprime2=None):
"""
Find a zero using the Newton-Raphson or secant method.
Find a zero of the function `func` given a nearby starting point `x0`.
The Newton-Raphson method is used if the derivative `fprime` of `func`
is provided, otherwise the secant method is used. If the second order
derivate `fprime2` of `func` is provided, parabolic Halley's method
is used.
Parameters
----------
func : function
The function whose zero is wanted. It must be a function of a
single variable of the form f(x,a,b,c...), where a,b,c... are extra
arguments that can be passed in the `args` parameter.
x0 : float
An initial estimate of the zero that should be somewhere near the
actual zero.
fprime : function, optional
The derivative of the function when available and convenient. If it
is None (default), then the secant method is used.
args : tuple, optional
Extra arguments to be used in the function call.
tol : float, optional
The allowable error of the zero value.
maxiter : int, optional
Maximum number of iterations.
fprime2 : function, optional
The second order derivative of the function when available and
convenient. If it is None (default), then the normal Newton-Raphson
or the secant method is used. If it is given, parabolic Halley's
method is used.
Returns
-------
zero : float
Estimated location where function is zero.
See Also
--------
brentq, brenth, ridder, bisect
fsolve : find zeroes in n dimensions.
Notes
-----
The convergence rate of the Newton-Raphson method is quadratic,
the Halley method is cubic, and the secant method is
sub-quadratic. This means that if the function is well behaved
the actual error in the estimated zero is approximately the square
(cube for Halley) of the requested tolerance up to roundoff
error. However, the stopping criterion used here is the step size
and there is no guarantee that a zero has been found. Consequently
the result should be verified. Safer algorithms are brentq,
brenth, ridder, and bisect, but they all require that the root
first be bracketed in an interval where the function changes
sign. The brentq algorithm is recommended for general use in one
dimensional problems when such an interval has been found.
"""
if tol <= 0:
raise ValueError("tol too small (%g <= 0)" % tol)
if maxiter < 1:
raise ValueError("maxiter must be greater than 0")
if fprime is not None:
# Newton-Rapheson method
# Multiply by 1.0 to convert to floating point. We don't use float(x0)
# so it still works if x0 is complex.
p0 = 1.0 * x0
fder2 = 0
for iter in range(maxiter):
myargs = (p0,) + args
fder = fprime(*myargs)
if fder == 0:
msg = "derivative was zero."
warnings.warn(msg, RuntimeWarning)
return p0
fval = func(*myargs)
if fprime2 is not None:
fder2 = fprime2(*myargs)
if fder2 == 0:
# Newton step
p = p0 - fval / fder
else:
# Parabolic Halley's method
discr = fder ** 2 - 2 * fval * fder2
if discr < 0:
p = p0 - fder / fder2
else:
p = p0 - 2*fval / (fder + sign(fder) * sqrt(discr))
if abs(p - p0) < tol:
return p
p0 = p
else:
# Secant method
p0 = x0
if x0 >= 0:
p1 = x0*(1 + 1e-4) + 1e-4
else:
p1 = x0*(1 + 1e-4) - 1e-4
q0 = func(*((p0,) + args))
q1 = func(*((p1,) + args))
for iter in range(maxiter):
if q1 == q0:
if p1 != p0:
msg = "Tolerance of %s reached" % (p1 - p0)
warnings.warn(msg, RuntimeWarning)
return (p1 + p0)/2.0
else:
p = p1 - q1*(p1 - p0)/(q1 - q0)
if abs(p - p1) < tol:
return p
p0 = p1
q0 = q1
p1 = p
q1 = func(*((p1,) + args))
msg = "Failed to converge after %d iterations, value is %s" % (maxiter, p)
raise RuntimeError(msg) |
Python | def brentq(f, a, b, args=(),
xtol=_xtol, rtol=_rtol, maxiter=_iter,
full_output=False, disp=True):
"""
Find a root of a function in a bracketing interval using Brent's method.
Uses the classic Brent's method to find a zero of the function `f` on
the sign changing interval [a , b]. Generally considered the best of the
rootfinding routines here. It is a safe version of the secant method that
uses inverse quadratic extrapolation. Brent's method combines root
bracketing, interval bisection, and inverse quadratic interpolation. It is
sometimes known as the van Wijngaarden-Dekker-Brent method. Brent (1973)
claims convergence is guaranteed for functions computable within [a,b].
[Brent1973]_ provides the classic description of the algorithm. Another
description can be found in a recent edition of Numerical Recipes, including
[PressEtal1992]_. Another description is at
http://mathworld.wolfram.com/BrentsMethod.html. It should be easy to
understand the algorithm just by reading our code. Our code diverges a bit
from standard presentations: we choose a different formula for the
extrapolation step.
Parameters
----------
f : function
Python function returning a number. The function :math:`f`
must be continuous, and :math:`f(a)` and :math:`f(b)` must
have opposite signs.
a : number
One end of the bracketing interval :math:`[a, b]`.
b : number
The other end of the bracketing interval :math:`[a, b]`.
xtol : number, optional
The computed root ``x0`` will satisfy ``np.allclose(x, x0,
atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
parameter must be nonnegative. For nice functions, Brent's
method will often satisfy the above condition will ``xtol/2``
and ``rtol/2``. [Brent1973]_
rtol : number, optional
The computed root ``x0`` will satisfy ``np.allclose(x, x0,
atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
parameter cannot be smaller than its default value of
``4*np.finfo(float).eps``. For nice functions, Brent's
method will often satisfy the above condition will ``xtol/2``
and ``rtol/2``. [Brent1973]_
maxiter : number, optional
if convergence is not achieved in maxiter iterations, an error is
raised. Must be >= 0.
args : tuple, optional
containing extra arguments for the function `f`.
`f` is called by ``apply(f, (x)+args)``.
full_output : bool, optional
If `full_output` is False, the root is returned. If `full_output` is
True, the return value is ``(x, r)``, where `x` is the root, and `r` is
a RootResults object.
disp : bool, optional
If True, raise RuntimeError if the algorithm didn't converge.
Returns
-------
x0 : float
Zero of `f` between `a` and `b`.
r : RootResults (present if ``full_output = True``)
Object containing information about the convergence. In particular,
``r.converged`` is True if the routine converged.
See Also
--------
multivariate local optimizers
`fmin`, `fmin_powell`, `fmin_cg`, `fmin_bfgs`, `fmin_ncg`
nonlinear least squares minimizer
`leastsq`
constrained multivariate optimizers
`fmin_l_bfgs_b`, `fmin_tnc`, `fmin_cobyla`
global optimizers
`basinhopping`, `brute`, `differential_evolution`
local scalar minimizers
`fminbound`, `brent`, `golden`, `bracket`
n-dimensional root-finding
`fsolve`
one-dimensional root-finding
`brenth`, `ridder`, `bisect`, `newton`
scalar fixed-point finder
`fixed_point`
Notes
-----
`f` must be continuous. f(a) and f(b) must have opposite signs.
References
----------
.. [Brent1973]
Brent, R. P.,
*Algorithms for Minimization Without Derivatives*.
Englewood Cliffs, NJ: Prentice-Hall, 1973. Ch. 3-4.
.. [PressEtal1992]
Press, W. H.; Flannery, B. P.; Teukolsky, S. A.; and Vetterling, W. T.
*Numerical Recipes in FORTRAN: The Art of Scientific Computing*, 2nd ed.
Cambridge, England: Cambridge University Press, pp. 352-355, 1992.
Section 9.3: "Van Wijngaarden-Dekker-Brent Method."
"""
if not isinstance(args, tuple):
args = (args,)
if xtol <= 0:
raise ValueError("xtol too small (%g <= 0)" % xtol)
if rtol < _rtol:
raise ValueError("rtol too small (%g < %g)" % (rtol, _rtol))
r = _zeros._brentq(f,a,b,xtol,rtol,maxiter,args,full_output,disp)
return results_c(full_output, r) | def brentq(f, a, b, args=(),
xtol=_xtol, rtol=_rtol, maxiter=_iter,
full_output=False, disp=True):
"""
Find a root of a function in a bracketing interval using Brent's method.
Uses the classic Brent's method to find a zero of the function `f` on
the sign changing interval [a , b]. Generally considered the best of the
rootfinding routines here. It is a safe version of the secant method that
uses inverse quadratic extrapolation. Brent's method combines root
bracketing, interval bisection, and inverse quadratic interpolation. It is
sometimes known as the van Wijngaarden-Dekker-Brent method. Brent (1973)
claims convergence is guaranteed for functions computable within [a,b].
[Brent1973]_ provides the classic description of the algorithm. Another
description can be found in a recent edition of Numerical Recipes, including
[PressEtal1992]_. Another description is at
http://mathworld.wolfram.com/BrentsMethod.html. It should be easy to
understand the algorithm just by reading our code. Our code diverges a bit
from standard presentations: we choose a different formula for the
extrapolation step.
Parameters
----------
f : function
Python function returning a number. The function :math:`f`
must be continuous, and :math:`f(a)` and :math:`f(b)` must
have opposite signs.
a : number
One end of the bracketing interval :math:`[a, b]`.
b : number
The other end of the bracketing interval :math:`[a, b]`.
xtol : number, optional
The computed root ``x0`` will satisfy ``np.allclose(x, x0,
atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
parameter must be nonnegative. For nice functions, Brent's
method will often satisfy the above condition will ``xtol/2``
and ``rtol/2``. [Brent1973]_
rtol : number, optional
The computed root ``x0`` will satisfy ``np.allclose(x, x0,
atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
parameter cannot be smaller than its default value of
``4*np.finfo(float).eps``. For nice functions, Brent's
method will often satisfy the above condition will ``xtol/2``
and ``rtol/2``. [Brent1973]_
maxiter : number, optional
if convergence is not achieved in maxiter iterations, an error is
raised. Must be >= 0.
args : tuple, optional
containing extra arguments for the function `f`.
`f` is called by ``apply(f, (x)+args)``.
full_output : bool, optional
If `full_output` is False, the root is returned. If `full_output` is
True, the return value is ``(x, r)``, where `x` is the root, and `r` is
a RootResults object.
disp : bool, optional
If True, raise RuntimeError if the algorithm didn't converge.
Returns
-------
x0 : float
Zero of `f` between `a` and `b`.
r : RootResults (present if ``full_output = True``)
Object containing information about the convergence. In particular,
``r.converged`` is True if the routine converged.
See Also
--------
multivariate local optimizers
`fmin`, `fmin_powell`, `fmin_cg`, `fmin_bfgs`, `fmin_ncg`
nonlinear least squares minimizer
`leastsq`
constrained multivariate optimizers
`fmin_l_bfgs_b`, `fmin_tnc`, `fmin_cobyla`
global optimizers
`basinhopping`, `brute`, `differential_evolution`
local scalar minimizers
`fminbound`, `brent`, `golden`, `bracket`
n-dimensional root-finding
`fsolve`
one-dimensional root-finding
`brenth`, `ridder`, `bisect`, `newton`
scalar fixed-point finder
`fixed_point`
Notes
-----
`f` must be continuous. f(a) and f(b) must have opposite signs.
References
----------
.. [Brent1973]
Brent, R. P.,
*Algorithms for Minimization Without Derivatives*.
Englewood Cliffs, NJ: Prentice-Hall, 1973. Ch. 3-4.
.. [PressEtal1992]
Press, W. H.; Flannery, B. P.; Teukolsky, S. A.; and Vetterling, W. T.
*Numerical Recipes in FORTRAN: The Art of Scientific Computing*, 2nd ed.
Cambridge, England: Cambridge University Press, pp. 352-355, 1992.
Section 9.3: "Van Wijngaarden-Dekker-Brent Method."
"""
if not isinstance(args, tuple):
args = (args,)
if xtol <= 0:
raise ValueError("xtol too small (%g <= 0)" % xtol)
if rtol < _rtol:
raise ValueError("rtol too small (%g < %g)" % (rtol, _rtol))
r = _zeros._brentq(f,a,b,xtol,rtol,maxiter,args,full_output,disp)
return results_c(full_output, r) |
Python | def assert_allclose_cc(actual, desired, **kw):
"""Almost equal or complex conjugates almost equal"""
try:
assert_allclose(actual, desired, **kw)
except:
assert_allclose(actual, conj(desired), **kw) | def assert_allclose_cc(actual, desired, **kw):
"""Almost equal or complex conjugates almost equal"""
try:
assert_allclose(actual, desired, **kw)
except:
assert_allclose(actual, conj(desired), **kw) |
Python | def execute(
command, check_exit_code=True,
shell=False, timeout=None,
cwd=None, env=None,
stdin=None, stdout=None, stderr=None,
creationflags=0, wait=True,
process_progress_listener=None, close_fds=False,
collect_cores=True, check_sanitizer=True, preexec_fn=None, on_timeout=None,
# YT specific
input_data=None, output_data=None,
data_mine_strategy=None,
env_mine_strategy=None,
operation_spec=None, task_spec=None,
yt_proxy=None, output_result_path=None,
init_func=None, fini_func=None,
# Service args
spec_filename=None,
test_tool_bin=None,
executor=_YtExecutor,
runner_log_path=None,
runner_log2stderr=False,
runner_meta_path=None,
target_stdout_path=None,
target_stderr_path=None,
operation_log_path=None,
operation_description=None,
yt_token_path=None,
):
"""
Executes a command on the YT. Listed below are options whose behavior is different from yatest.common.execute
:param command: can be a list of arguments or a string (all paths matched prefixes yatest.common.*_path will be fixed)
:param timeout: timeout for command executed on the YT (doesn't take into account the time spent for execution preparation - uploading/downloading data, etc)
:param cwd: ignored
:param env: all paths matched prefixes yatest.common.*_path will be fixed
:param stdin: stdin will be fully read before execution and uploaded to the YT
:param stdout: stdout will be available after the execution of the command on the YT. Set to False to skip downloading
:param stderr: same as stdout
:param process_progress_listener: ignored
:param preexec_fn: ignored
:param on_timeout: ignored
:param input_data: map of input files/dirs required for command run which will be uploaded to YT (local path -> YT sandbox path)
:param output_data: map of output files/dirs which will be downloaded from YT after command execution (YT sandbox path -> local path)
Take into account that runner will call os.path.dirname(YT sandbox path) to create intermediate directories for every entry
:param data_mine_strategy: allows to provide own function to mine input data and fix cmd. For more info take a look at *_mine_strategy()
:param env_mine_strategy: allows to provide own function to mine input data and fix env. For more info take a look at *_mine_strategy()
:param operation_spec: YT operation spec
:param task_spec: YT task spec
:param output_result_path: specify path to output archive. Used for test purposes
:param init_func: Function which will be executed before target program. See note below
:param fini_func: Function which will be executed after target program. See note below
:return: Execution object
.. note::
init_func and fini_func have some limitations:
* every used module should be imported inside this functions, because functions will be called in a different environment and required modules may be not imported or available
* you can only use built-in python modules (because test_tool uploads itself and runs init/fini func inside itself)
"""
test_tool_bin = test_tool_bin or _get_test_tool_bin()
data_mine_strategy = data_mine_strategy or default_mine_strategy
env_mine_strategy = env_mine_strategy or default_env_mine_strategy
if not wait:
raise NotImplementedError()
orig_command = command
command, env, to_upload, to_download = _fix_user_data(command, shell, env, input_data, output_data, data_mine_strategy, env_mine_strategy)
command_name = ytc.process.get_command_name(command)
exec_spec = {
'env': env,
'command': command,
'timeout': timeout,
'input_data': to_upload,
'output_data': to_download,
'description': operation_description,
}
if stdin:
if isinstance(stdin, basestring):
stdin_path = stdin
else:
logger.deubg('Reading stdin')
with tempfile.NamedTemporaryFile(delete=False) as afile:
afile.write(stdin.read())
stdin_path = afile.name
to_upload[stdin_path] = get_yt_sandbox_path('env/stdin')
exec_spec['stdin'] = get_yt_sandbox_path('env/stdin')
for stream, name, filename in [
(True, 'meta', runner_meta_path),
(stdout, 'stdout', target_stdout_path),
(stderr, 'stderr', target_stderr_path),
]:
if stream is not False:
path = get_yt_sandbox_path("env/{}".format(name))
exec_spec[name] = path
to_download[path] = filename or ytc.get_unique_file_path(ytc.work_path(), 'yt_vanilla_{}_{}'.format(command_name, name))
runner_log_dst = get_yt_sandbox_path('env/runner_log')
exec_spec['runner_log'] = runner_log_dst
to_download[runner_log_dst] = runner_log_path or ytc.path.get_unique_file_path(ytc.test_output_path(), 'yt_vanilla_wrapper_{}.log'.format(command_name))
exec_spec['op_spec'] = _get_spec(
default={
'max_failed_job_count': 2,
# Preventing dangling operations in case when test is get killed - see https://st.yandex-team.ru/DEVTOOLS-4753#1539181402000
'time_limit': int(1000 * 60 * 60 * 1.5) # 1.5h (milliseconds)
},
user=operation_spec,
)
exec_spec['task_spec'] = _get_spec(
default={'memory_limit': 3 * (1024 ** 3)},
user=task_spec,
mandatory={'job_count': 1},
)
if init_func:
exec_spec['init_func'] = _dump_func(init_func)
if fini_func:
exec_spec['fini_func'] = _dump_func(fini_func)
exec_spec_path = _dump_spec(spec_filename, exec_spec)
executor_cmd = [
test_tool_bin, 'yt_vanilla_execute',
'--spec-file', exec_spec_path,
'--log-path', operation_log_path or ytc.path.get_unique_file_path(ytc.test_output_path(), 'yt_vanilla_op_{}.log'.format(command_name)),
]
if yt_proxy:
executor_cmd += ['--yt-proxy', yt_proxy]
if output_result_path:
executor_cmd += ['--output-path', output_result_path]
if runner_log2stderr:
executor_cmd += ['--log2stderr']
executor_stderr = sys.stderr
else:
executor_stderr = None
if yt_token_path:
executor_cmd += ['--yt-token-path', yt_token_path]
res = ytc.execute(
executor_cmd,
stderr=executor_stderr,
collect_cores=collect_cores,
wait=False,
check_sanitizer=check_sanitizer,
executor=executor,
)
if wait:
res.wait(exec_spec, orig_command, stdout, stderr, check_exit_code, timeout)
return res | def execute(
command, check_exit_code=True,
shell=False, timeout=None,
cwd=None, env=None,
stdin=None, stdout=None, stderr=None,
creationflags=0, wait=True,
process_progress_listener=None, close_fds=False,
collect_cores=True, check_sanitizer=True, preexec_fn=None, on_timeout=None,
# YT specific
input_data=None, output_data=None,
data_mine_strategy=None,
env_mine_strategy=None,
operation_spec=None, task_spec=None,
yt_proxy=None, output_result_path=None,
init_func=None, fini_func=None,
# Service args
spec_filename=None,
test_tool_bin=None,
executor=_YtExecutor,
runner_log_path=None,
runner_log2stderr=False,
runner_meta_path=None,
target_stdout_path=None,
target_stderr_path=None,
operation_log_path=None,
operation_description=None,
yt_token_path=None,
):
"""
Executes a command on the YT. Listed below are options whose behavior is different from yatest.common.execute
:param command: can be a list of arguments or a string (all paths matched prefixes yatest.common.*_path will be fixed)
:param timeout: timeout for command executed on the YT (doesn't take into account the time spent for execution preparation - uploading/downloading data, etc)
:param cwd: ignored
:param env: all paths matched prefixes yatest.common.*_path will be fixed
:param stdin: stdin will be fully read before execution and uploaded to the YT
:param stdout: stdout will be available after the execution of the command on the YT. Set to False to skip downloading
:param stderr: same as stdout
:param process_progress_listener: ignored
:param preexec_fn: ignored
:param on_timeout: ignored
:param input_data: map of input files/dirs required for command run which will be uploaded to YT (local path -> YT sandbox path)
:param output_data: map of output files/dirs which will be downloaded from YT after command execution (YT sandbox path -> local path)
Take into account that runner will call os.path.dirname(YT sandbox path) to create intermediate directories for every entry
:param data_mine_strategy: allows to provide own function to mine input data and fix cmd. For more info take a look at *_mine_strategy()
:param env_mine_strategy: allows to provide own function to mine input data and fix env. For more info take a look at *_mine_strategy()
:param operation_spec: YT operation spec
:param task_spec: YT task spec
:param output_result_path: specify path to output archive. Used for test purposes
:param init_func: Function which will be executed before target program. See note below
:param fini_func: Function which will be executed after target program. See note below
:return: Execution object
.. note::
init_func and fini_func have some limitations:
* every used module should be imported inside this functions, because functions will be called in a different environment and required modules may be not imported or available
* you can only use built-in python modules (because test_tool uploads itself and runs init/fini func inside itself)
"""
test_tool_bin = test_tool_bin or _get_test_tool_bin()
data_mine_strategy = data_mine_strategy or default_mine_strategy
env_mine_strategy = env_mine_strategy or default_env_mine_strategy
if not wait:
raise NotImplementedError()
orig_command = command
command, env, to_upload, to_download = _fix_user_data(command, shell, env, input_data, output_data, data_mine_strategy, env_mine_strategy)
command_name = ytc.process.get_command_name(command)
exec_spec = {
'env': env,
'command': command,
'timeout': timeout,
'input_data': to_upload,
'output_data': to_download,
'description': operation_description,
}
if stdin:
if isinstance(stdin, basestring):
stdin_path = stdin
else:
logger.deubg('Reading stdin')
with tempfile.NamedTemporaryFile(delete=False) as afile:
afile.write(stdin.read())
stdin_path = afile.name
to_upload[stdin_path] = get_yt_sandbox_path('env/stdin')
exec_spec['stdin'] = get_yt_sandbox_path('env/stdin')
for stream, name, filename in [
(True, 'meta', runner_meta_path),
(stdout, 'stdout', target_stdout_path),
(stderr, 'stderr', target_stderr_path),
]:
if stream is not False:
path = get_yt_sandbox_path("env/{}".format(name))
exec_spec[name] = path
to_download[path] = filename or ytc.get_unique_file_path(ytc.work_path(), 'yt_vanilla_{}_{}'.format(command_name, name))
runner_log_dst = get_yt_sandbox_path('env/runner_log')
exec_spec['runner_log'] = runner_log_dst
to_download[runner_log_dst] = runner_log_path or ytc.path.get_unique_file_path(ytc.test_output_path(), 'yt_vanilla_wrapper_{}.log'.format(command_name))
exec_spec['op_spec'] = _get_spec(
default={
'max_failed_job_count': 2,
# Preventing dangling operations in case when test is get killed - see https://st.yandex-team.ru/DEVTOOLS-4753#1539181402000
'time_limit': int(1000 * 60 * 60 * 1.5) # 1.5h (milliseconds)
},
user=operation_spec,
)
exec_spec['task_spec'] = _get_spec(
default={'memory_limit': 3 * (1024 ** 3)},
user=task_spec,
mandatory={'job_count': 1},
)
if init_func:
exec_spec['init_func'] = _dump_func(init_func)
if fini_func:
exec_spec['fini_func'] = _dump_func(fini_func)
exec_spec_path = _dump_spec(spec_filename, exec_spec)
executor_cmd = [
test_tool_bin, 'yt_vanilla_execute',
'--spec-file', exec_spec_path,
'--log-path', operation_log_path or ytc.path.get_unique_file_path(ytc.test_output_path(), 'yt_vanilla_op_{}.log'.format(command_name)),
]
if yt_proxy:
executor_cmd += ['--yt-proxy', yt_proxy]
if output_result_path:
executor_cmd += ['--output-path', output_result_path]
if runner_log2stderr:
executor_cmd += ['--log2stderr']
executor_stderr = sys.stderr
else:
executor_stderr = None
if yt_token_path:
executor_cmd += ['--yt-token-path', yt_token_path]
res = ytc.execute(
executor_cmd,
stderr=executor_stderr,
collect_cores=collect_cores,
wait=False,
check_sanitizer=check_sanitizer,
executor=executor,
)
if wait:
res.wait(exec_spec, orig_command, stdout, stderr, check_exit_code, timeout)
return res |
Python | def fun(self):
"""Value of objective function at current iteration."""
if self._f is None:
self._f = self._fun(self._x)
return self._f | def fun(self):
"""Value of objective function at current iteration."""
if self._f is None:
self._f = self._fun(self._x)
return self._f |
Python | def jac(self):
"""Value of jacobian of objective function at current iteration."""
if self._g is None:
self._g = self._jac(self._x)
return self._g | def jac(self):
"""Value of jacobian of objective function at current iteration."""
if self._g is None:
self._g = self._jac(self._x)
return self._g |
Python | def hess(self):
"""Value of hessian of objective function at current iteration."""
if self._h is None:
self._h = self._hess(self._x)
return self._h | def hess(self):
"""Value of hessian of objective function at current iteration."""
if self._h is None:
self._h = self._hess(self._x)
return self._h |
Python | def jac_mag(self):
"""Magniture of jacobian of objective function at current iteration."""
if self._g_mag is None:
self._g_mag = scipy.linalg.norm(self.jac)
return self._g_mag | def jac_mag(self):
"""Magniture of jacobian of objective function at current iteration."""
if self._g_mag is None:
self._g_mag = scipy.linalg.norm(self.jac)
return self._g_mag |
Python | def _minimize_trust_region(fun, x0, args=(), jac=None, hess=None, hessp=None,
subproblem=None, initial_trust_radius=1.0,
max_trust_radius=1000.0, eta=0.15, gtol=1e-4,
maxiter=None, disp=False, return_all=False,
callback=None, **unknown_options):
"""
Minimization of scalar function of one or more variables using a
trust-region algorithm.
Options for the trust-region algorithm are:
initial_trust_radius : float
Initial trust radius.
max_trust_radius : float
Never propose steps that are longer than this value.
eta : float
Trust region related acceptance stringency for proposed steps.
gtol : float
Gradient norm must be less than `gtol`
before successful termination.
maxiter : int
Maximum number of iterations to perform.
disp : bool
If True, print convergence message.
This function is called by the `minimize` function.
It is not supposed to be called directly.
"""
_check_unknown_options(unknown_options)
if jac is None:
raise ValueError('Jacobian is currently required for trust-region '
'methods')
if hess is None and hessp is None:
raise ValueError('Either the Hessian or the Hessian-vector product '
'is currently required for trust-region methods')
if subproblem is None:
raise ValueError('A subproblem solving strategy is required for '
'trust-region methods')
if not (0 <= eta < 0.25):
raise Exception('invalid acceptance stringency')
if max_trust_radius <= 0:
raise Exception('the max trust radius must be positive')
if initial_trust_radius <= 0:
raise ValueError('the initial trust radius must be positive')
if initial_trust_radius >= max_trust_radius:
raise ValueError('the initial trust radius must be less than the '
'max trust radius')
# force the initial guess into a nice format
x0 = np.asarray(x0).flatten()
# Wrap the functions, for a couple reasons.
# This tracks how many times they have been called
# and it automatically passes the args.
nfun, fun = wrap_function(fun, args)
njac, jac = wrap_function(jac, args)
nhess, hess = wrap_function(hess, args)
nhessp, hessp = wrap_function(hessp, args)
# limit the number of iterations
if maxiter is None:
maxiter = len(x0)*200
# init the search status
warnflag = 0
# initialize the search
trust_radius = initial_trust_radius
x = x0
if return_all:
allvecs = [x]
m = subproblem(x, fun, jac, hess, hessp)
k = 0
# search for the function min
while True:
# Solve the sub-problem.
# This gives us the proposed step relative to the current position
# and it tells us whether the proposed step
# has reached the trust region boundary or not.
try:
p, hits_boundary = m.solve(trust_radius)
except np.linalg.linalg.LinAlgError as e:
warnflag = 3
break
# calculate the predicted value at the proposed point
predicted_value = m(p)
# define the local approximation at the proposed point
x_proposed = x + p
m_proposed = subproblem(x_proposed, fun, jac, hess, hessp)
# evaluate the ratio defined in equation (4.4)
actual_reduction = m.fun - m_proposed.fun
predicted_reduction = m.fun - predicted_value
if predicted_reduction <= 0:
warnflag = 2
break
rho = actual_reduction / predicted_reduction
# update the trust radius according to the actual/predicted ratio
if rho < 0.25:
trust_radius *= 0.25
elif rho > 0.75 and hits_boundary:
trust_radius = min(2*trust_radius, max_trust_radius)
# if the ratio is high enough then accept the proposed step
if rho > eta:
x = x_proposed
m = m_proposed
# append the best guess, call back, increment the iteration count
if return_all:
allvecs.append(x)
if callback is not None:
callback(x)
k += 1
# check if the gradient is small enough to stop
if m.jac_mag < gtol:
warnflag = 0
break
# check if we have looked at enough iterations
if k >= maxiter:
warnflag = 1
break
# print some stuff if requested
status_messages = (
_status_message['success'],
_status_message['maxiter'],
'A bad approximation caused failure to predict improvement.',
'A linalg error occurred, such as a non-psd Hessian.',
)
if disp:
if warnflag == 0:
print(status_messages[warnflag])
else:
print('Warning: ' + status_messages[warnflag])
print(" Current function value: %f" % m.fun)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % nfun[0])
print(" Gradient evaluations: %d" % njac[0])
print(" Hessian evaluations: %d" % nhess[0])
result = OptimizeResult(x=x, success=(warnflag == 0), status=warnflag,
fun=m.fun, jac=m.jac, nfev=nfun[0], njev=njac[0],
nhev=nhess[0], nit=k,
message=status_messages[warnflag])
if hess is not None:
result['hess'] = m.hess
if return_all:
result['allvecs'] = allvecs
return result | def _minimize_trust_region(fun, x0, args=(), jac=None, hess=None, hessp=None,
subproblem=None, initial_trust_radius=1.0,
max_trust_radius=1000.0, eta=0.15, gtol=1e-4,
maxiter=None, disp=False, return_all=False,
callback=None, **unknown_options):
"""
Minimization of scalar function of one or more variables using a
trust-region algorithm.
Options for the trust-region algorithm are:
initial_trust_radius : float
Initial trust radius.
max_trust_radius : float
Never propose steps that are longer than this value.
eta : float
Trust region related acceptance stringency for proposed steps.
gtol : float
Gradient norm must be less than `gtol`
before successful termination.
maxiter : int
Maximum number of iterations to perform.
disp : bool
If True, print convergence message.
This function is called by the `minimize` function.
It is not supposed to be called directly.
"""
_check_unknown_options(unknown_options)
if jac is None:
raise ValueError('Jacobian is currently required for trust-region '
'methods')
if hess is None and hessp is None:
raise ValueError('Either the Hessian or the Hessian-vector product '
'is currently required for trust-region methods')
if subproblem is None:
raise ValueError('A subproblem solving strategy is required for '
'trust-region methods')
if not (0 <= eta < 0.25):
raise Exception('invalid acceptance stringency')
if max_trust_radius <= 0:
raise Exception('the max trust radius must be positive')
if initial_trust_radius <= 0:
raise ValueError('the initial trust radius must be positive')
if initial_trust_radius >= max_trust_radius:
raise ValueError('the initial trust radius must be less than the '
'max trust radius')
# force the initial guess into a nice format
x0 = np.asarray(x0).flatten()
# Wrap the functions, for a couple reasons.
# This tracks how many times they have been called
# and it automatically passes the args.
nfun, fun = wrap_function(fun, args)
njac, jac = wrap_function(jac, args)
nhess, hess = wrap_function(hess, args)
nhessp, hessp = wrap_function(hessp, args)
# limit the number of iterations
if maxiter is None:
maxiter = len(x0)*200
# init the search status
warnflag = 0
# initialize the search
trust_radius = initial_trust_radius
x = x0
if return_all:
allvecs = [x]
m = subproblem(x, fun, jac, hess, hessp)
k = 0
# search for the function min
while True:
# Solve the sub-problem.
# This gives us the proposed step relative to the current position
# and it tells us whether the proposed step
# has reached the trust region boundary or not.
try:
p, hits_boundary = m.solve(trust_radius)
except np.linalg.linalg.LinAlgError as e:
warnflag = 3
break
# calculate the predicted value at the proposed point
predicted_value = m(p)
# define the local approximation at the proposed point
x_proposed = x + p
m_proposed = subproblem(x_proposed, fun, jac, hess, hessp)
# evaluate the ratio defined in equation (4.4)
actual_reduction = m.fun - m_proposed.fun
predicted_reduction = m.fun - predicted_value
if predicted_reduction <= 0:
warnflag = 2
break
rho = actual_reduction / predicted_reduction
# update the trust radius according to the actual/predicted ratio
if rho < 0.25:
trust_radius *= 0.25
elif rho > 0.75 and hits_boundary:
trust_radius = min(2*trust_radius, max_trust_radius)
# if the ratio is high enough then accept the proposed step
if rho > eta:
x = x_proposed
m = m_proposed
# append the best guess, call back, increment the iteration count
if return_all:
allvecs.append(x)
if callback is not None:
callback(x)
k += 1
# check if the gradient is small enough to stop
if m.jac_mag < gtol:
warnflag = 0
break
# check if we have looked at enough iterations
if k >= maxiter:
warnflag = 1
break
# print some stuff if requested
status_messages = (
_status_message['success'],
_status_message['maxiter'],
'A bad approximation caused failure to predict improvement.',
'A linalg error occurred, such as a non-psd Hessian.',
)
if disp:
if warnflag == 0:
print(status_messages[warnflag])
else:
print('Warning: ' + status_messages[warnflag])
print(" Current function value: %f" % m.fun)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % nfun[0])
print(" Gradient evaluations: %d" % njac[0])
print(" Hessian evaluations: %d" % nhess[0])
result = OptimizeResult(x=x, success=(warnflag == 0), status=warnflag,
fun=m.fun, jac=m.jac, nfev=nfun[0], njev=njac[0],
nhev=nhess[0], nit=k,
message=status_messages[warnflag])
if hess is not None:
result['hess'] = m.hess
if return_all:
result['allvecs'] = allvecs
return result |
Python | def safecall(f, name, *args, **kwargs):
"""Call a LAPACK routine, determining lwork automatically and handling
error return values"""
lwork = kwargs.get("lwork", None)
if lwork in (None, -1):
kwargs['lwork'] = -1
ret = f(*args, **kwargs)
kwargs['lwork'] = ret[-2][0].real.astype(numpy.int)
ret = f(*args, **kwargs)
if ret[-1] < 0:
raise ValueError("illegal value in %d-th argument of internal %s"
% (-ret[-1], name))
return ret[:-2] | def safecall(f, name, *args, **kwargs):
"""Call a LAPACK routine, determining lwork automatically and handling
error return values"""
lwork = kwargs.get("lwork", None)
if lwork in (None, -1):
kwargs['lwork'] = -1
ret = f(*args, **kwargs)
kwargs['lwork'] = ret[-2][0].real.astype(numpy.int)
ret = f(*args, **kwargs)
if ret[-1] < 0:
raise ValueError("illegal value in %d-th argument of internal %s"
% (-ret[-1], name))
return ret[:-2] |
Python | def parse_setuppy_commands():
"""Check the commands and respond appropriately. Disable broken commands.
Return a boolean value for whether or not to run the build or not (avoid
parsing Cython and template files if False).
"""
if len(sys.argv) < 2:
# User forgot to give an argument probably, let setuptools handle that.
return True
info_commands = ['--help-commands', '--name', '--version', '-V',
'--fullname', '--author', '--author-email',
'--maintainer', '--maintainer-email', '--contact',
'--contact-email', '--url', '--license', '--description',
'--long-description', '--platforms', '--classifiers',
'--keywords', '--provides', '--requires', '--obsoletes']
# Add commands that do more than print info, but also don't need Cython and
# template parsing.
info_commands.extend(['egg_info', 'install_egg_info', 'rotate'])
for command in info_commands:
if command in sys.argv[1:]:
return False
# Note that 'alias', 'saveopts' and 'setopt' commands also seem to work
# fine as they are, but are usually used together with one of the commands
# below and not standalone. Hence they're not added to good_commands.
good_commands = ('develop', 'sdist', 'build', 'build_ext', 'build_py',
'build_clib', 'build_scripts', 'bdist_wheel', 'bdist_rpm',
'bdist_wininst', 'bdist_msi', 'bdist_mpkg',
'build_sphinx')
for command in good_commands:
if command in sys.argv[1:]:
return True
# The following commands are supported, but we need to show more
# useful messages to the user
if 'install' in sys.argv[1:]:
print(textwrap.dedent("""
Note: if you need reliable uninstall behavior, then install
with pip instead of using `setup.py install`:
- `pip install .` (from a git repo or downloaded source
release)
- `pip install scipy` (last SciPy release on PyPI)
"""))
return True
if '--help' in sys.argv[1:] or '-h' in sys.argv[1]:
print(textwrap.dedent("""
SciPy-specific help
-------------------
To install SciPy from here with reliable uninstall, we recommend
that you use `pip install .`. To install the latest SciPy release
from PyPI, use `pip install scipy`.
For help with build/installation issues, please ask on the
scipy-user mailing list. If you are sure that you have run
into a bug, please report it at https://github.com/scipy/scipy/issues.
Setuptools commands help
------------------------
"""))
return False
# The following commands aren't supported. They can only be executed when
# the user explicitly adds a --force command-line argument.
bad_commands = dict(
test="""
`setup.py test` is not supported. Use one of the following
instead:
- `python runtests.py` (to build and test)
- `python runtests.py --no-build` (to test installed scipy)
- `>>> scipy.test()` (run tests for installed scipy
from within an interpreter)
""",
upload="""
`setup.py upload` is not supported, because it's insecure.
Instead, build what you want to upload and upload those files
with `twine upload -s <filenames>` instead.
""",
upload_docs="`setup.py upload_docs` is not supported",
easy_install="`setup.py easy_install` is not supported",
clean="""
`setup.py clean` is not supported, use one of the following instead:
- `git clean -xdf` (cleans all files)
- `git clean -Xdf` (cleans all versioned files, doesn't touch
files that aren't checked into the git repo)
""",
check="`setup.py check` is not supported",
register="`setup.py register` is not supported",
bdist_dumb="`setup.py bdist_dumb` is not supported",
bdist="`setup.py bdist` is not supported",
flake8="`setup.py flake8` is not supported, use flake8 standalone",
)
bad_commands['nosetests'] = bad_commands['test']
for command in ('upload_docs', 'easy_install', 'bdist', 'bdist_dumb',
'register', 'check', 'install_data', 'install_headers',
'install_lib', 'install_scripts', ):
bad_commands[command] = "`setup.py %s` is not supported" % command
for command in bad_commands.keys():
if command in sys.argv[1:]:
print(textwrap.dedent(bad_commands[command]) +
"\nAdd `--force` to your command to use it anyway if you "
"must (unsupported).\n")
sys.exit(1)
# If we got here, we didn't detect what setup.py command was given
warnings.warn("Unrecognized setuptools command, proceeding with "
"generating Cython sources and expanding templates")
return True | def parse_setuppy_commands():
"""Check the commands and respond appropriately. Disable broken commands.
Return a boolean value for whether or not to run the build or not (avoid
parsing Cython and template files if False).
"""
if len(sys.argv) < 2:
# User forgot to give an argument probably, let setuptools handle that.
return True
info_commands = ['--help-commands', '--name', '--version', '-V',
'--fullname', '--author', '--author-email',
'--maintainer', '--maintainer-email', '--contact',
'--contact-email', '--url', '--license', '--description',
'--long-description', '--platforms', '--classifiers',
'--keywords', '--provides', '--requires', '--obsoletes']
# Add commands that do more than print info, but also don't need Cython and
# template parsing.
info_commands.extend(['egg_info', 'install_egg_info', 'rotate'])
for command in info_commands:
if command in sys.argv[1:]:
return False
# Note that 'alias', 'saveopts' and 'setopt' commands also seem to work
# fine as they are, but are usually used together with one of the commands
# below and not standalone. Hence they're not added to good_commands.
good_commands = ('develop', 'sdist', 'build', 'build_ext', 'build_py',
'build_clib', 'build_scripts', 'bdist_wheel', 'bdist_rpm',
'bdist_wininst', 'bdist_msi', 'bdist_mpkg',
'build_sphinx')
for command in good_commands:
if command in sys.argv[1:]:
return True
# The following commands are supported, but we need to show more
# useful messages to the user
if 'install' in sys.argv[1:]:
print(textwrap.dedent("""
Note: if you need reliable uninstall behavior, then install
with pip instead of using `setup.py install`:
- `pip install .` (from a git repo or downloaded source
release)
- `pip install scipy` (last SciPy release on PyPI)
"""))
return True
if '--help' in sys.argv[1:] or '-h' in sys.argv[1]:
print(textwrap.dedent("""
SciPy-specific help
-------------------
To install SciPy from here with reliable uninstall, we recommend
that you use `pip install .`. To install the latest SciPy release
from PyPI, use `pip install scipy`.
For help with build/installation issues, please ask on the
scipy-user mailing list. If you are sure that you have run
into a bug, please report it at https://github.com/scipy/scipy/issues.
Setuptools commands help
------------------------
"""))
return False
# The following commands aren't supported. They can only be executed when
# the user explicitly adds a --force command-line argument.
bad_commands = dict(
test="""
`setup.py test` is not supported. Use one of the following
instead:
- `python runtests.py` (to build and test)
- `python runtests.py --no-build` (to test installed scipy)
- `>>> scipy.test()` (run tests for installed scipy
from within an interpreter)
""",
upload="""
`setup.py upload` is not supported, because it's insecure.
Instead, build what you want to upload and upload those files
with `twine upload -s <filenames>` instead.
""",
upload_docs="`setup.py upload_docs` is not supported",
easy_install="`setup.py easy_install` is not supported",
clean="""
`setup.py clean` is not supported, use one of the following instead:
- `git clean -xdf` (cleans all files)
- `git clean -Xdf` (cleans all versioned files, doesn't touch
files that aren't checked into the git repo)
""",
check="`setup.py check` is not supported",
register="`setup.py register` is not supported",
bdist_dumb="`setup.py bdist_dumb` is not supported",
bdist="`setup.py bdist` is not supported",
flake8="`setup.py flake8` is not supported, use flake8 standalone",
)
bad_commands['nosetests'] = bad_commands['test']
for command in ('upload_docs', 'easy_install', 'bdist', 'bdist_dumb',
'register', 'check', 'install_data', 'install_headers',
'install_lib', 'install_scripts', ):
bad_commands[command] = "`setup.py %s` is not supported" % command
for command in bad_commands.keys():
if command in sys.argv[1:]:
print(textwrap.dedent(bad_commands[command]) +
"\nAdd `--force` to your command to use it anyway if you "
"must (unsupported).\n")
sys.exit(1)
# If we got here, we didn't detect what setup.py command was given
warnings.warn("Unrecognized setuptools command, proceeding with "
"generating Cython sources and expanding templates")
return True |
Python | def choose_ncv(k):
"""
Choose number of lanczos vectors based on target number
of singular/eigen values and vectors to compute, k.
"""
return max(2 * k + 1, 20) | def choose_ncv(k):
"""
Choose number of lanczos vectors based on target number
of singular/eigen values and vectors to compute, k.
"""
return max(2 * k + 1, 20) |
Python | def linprog_terse_callback(xk, **kwargs):
"""
A sample callback function demonstrating the linprog callback interface.
This callback produces brief output to sys.stdout before each iteration
and after the final iteration of the simplex algorithm.
Parameters
----------
xk : array_like
The current solution vector.
**kwargs : dict
A dictionary containing the following parameters:
tableau : array_like
The current tableau of the simplex algorithm.
Its structure is defined in _solve_simplex.
vars : tuple(str, ...)
Column headers for each column in tableau.
"x[i]" for actual variables, "s[i]" for slack surplus variables,
"a[i]" for artificial variables, and "RHS" for the constraint
RHS vector.
phase : int
The current Phase of the simplex algorithm (1 or 2)
nit : int
The current iteration number.
pivot : tuple(int, int)
The index of the tableau selected as the next pivot,
or nan if no pivot exists
basics : list[tuple(int, float)]
A list of the current basic variables.
Each element contains the index of a basic variable and
its value.
complete : bool
True if the simplex algorithm has completed
(and this is the final call to callback), otherwise False.
"""
nit = kwargs["nit"]
if nit == 0:
print("Iter: X:")
print("{0: <5d} ".format(nit), end="")
print(xk) | def linprog_terse_callback(xk, **kwargs):
"""
A sample callback function demonstrating the linprog callback interface.
This callback produces brief output to sys.stdout before each iteration
and after the final iteration of the simplex algorithm.
Parameters
----------
xk : array_like
The current solution vector.
**kwargs : dict
A dictionary containing the following parameters:
tableau : array_like
The current tableau of the simplex algorithm.
Its structure is defined in _solve_simplex.
vars : tuple(str, ...)
Column headers for each column in tableau.
"x[i]" for actual variables, "s[i]" for slack surplus variables,
"a[i]" for artificial variables, and "RHS" for the constraint
RHS vector.
phase : int
The current Phase of the simplex algorithm (1 or 2)
nit : int
The current iteration number.
pivot : tuple(int, int)
The index of the tableau selected as the next pivot,
or nan if no pivot exists
basics : list[tuple(int, float)]
A list of the current basic variables.
Each element contains the index of a basic variable and
its value.
complete : bool
True if the simplex algorithm has completed
(and this is the final call to callback), otherwise False.
"""
nit = kwargs["nit"]
if nit == 0:
print("Iter: X:")
print("{0: <5d} ".format(nit), end="")
print(xk) |
Python | def _solve_simplex(T, n, basis, maxiter=1000, phase=2, callback=None,
tol=1.0E-12, nit0=0, bland=False):
"""
Solve a linear programming problem in "standard maximization form" using
the Simplex Method.
Minimize :math:`f = c^T x`
subject to
.. math::
Ax = b
x_i >= 0
b_j >= 0
Parameters
----------
T : array_like
A 2-D array representing the simplex T corresponding to the
maximization problem. It should have the form:
[[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]],
[A[1, 0], A[1, 1], ..., A[1, n_total], b[1]],
.
.
.
[A[m, 0], A[m, 1], ..., A[m, n_total], b[m]],
[c[0], c[1], ..., c[n_total], 0]]
for a Phase 2 problem, or the form:
[[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]],
[A[1, 0], A[1, 1], ..., A[1, n_total], b[1]],
.
.
.
[A[m, 0], A[m, 1], ..., A[m, n_total], b[m]],
[c[0], c[1], ..., c[n_total], 0],
[c'[0], c'[1], ..., c'[n_total], 0]]
for a Phase 1 problem (a Problem in which a basic feasible solution is
sought prior to maximizing the actual objective. T is modified in
place by _solve_simplex.
n : int
The number of true variables in the problem.
basis : array
An array of the indices of the basic variables, such that basis[i]
contains the column corresponding to the basic variable for row i.
Basis is modified in place by _solve_simplex
maxiter : int
The maximum number of iterations to perform before aborting the
optimization.
phase : int
The phase of the optimization being executed. In phase 1 a basic
feasible solution is sought and the T has an additional row representing
an alternate objective function.
callback : callable, optional
If a callback function is provided, it will be called within each
iteration of the simplex algorithm. The callback must have the
signature `callback(xk, **kwargs)` where xk is the current solution
vector and kwargs is a dictionary containing the following::
"T" : The current Simplex algorithm T
"nit" : The current iteration.
"pivot" : The pivot (row, column) used for the next iteration.
"phase" : Whether the algorithm is in Phase 1 or Phase 2.
"basis" : The indices of the columns of the basic variables.
tol : float
The tolerance which determines when a solution is "close enough" to
zero in Phase 1 to be considered a basic feasible solution or close
enough to positive to to serve as an optimal solution.
nit0 : int
The initial iteration number used to keep an accurate iteration total
in a two-phase problem.
bland : bool
If True, choose pivots using Bland's rule [3]. In problems which
fail to converge due to cycling, using Bland's rule can provide
convergence at the expense of a less optimal path about the simplex.
Returns
-------
res : OptimizeResult
The optimization result represented as a ``OptimizeResult`` object.
Important attributes are: ``x`` the solution array, ``success`` a
Boolean flag indicating if the optimizer exited successfully and
``message`` which describes the cause of the termination. Possible
values for the ``status`` attribute are:
0 : Optimization terminated successfully
1 : Iteration limit reached
2 : Problem appears to be infeasible
3 : Problem appears to be unbounded
See `OptimizeResult` for a description of other attributes.
"""
nit = nit0
complete = False
if phase == 1:
m = T.shape[0]-2
elif phase == 2:
m = T.shape[0]-1
else:
raise ValueError("Argument 'phase' to _solve_simplex must be 1 or 2")
if phase == 2:
# Check if any artificial variables are still in the basis.
# If yes, check if any coefficients from this row and a column
# corresponding to one of the non-artificial variable is non-zero.
# If found, pivot at this term. If not, start phase 2.
# Do this for all artificial variables in the basis.
# Ref: "An Introduction to Linear Programming and Game Theory"
# by Paul R. Thie, Gerard E. Keough, 3rd Ed,
# Chapter 3.7 Redundant Systems (pag 102)
for pivrow in [row for row in range(basis.size)
if basis[row] > T.shape[1] - 2]:
non_zero_row = [col for col in range(T.shape[1] - 1)
if T[pivrow, col] != 0]
if len(non_zero_row) > 0:
pivcol = non_zero_row[0]
# variable represented by pivcol enters
# variable in basis[pivrow] leaves
basis[pivrow] = pivcol
pivval = T[pivrow][pivcol]
T[pivrow, :] = T[pivrow, :] / pivval
for irow in range(T.shape[0]):
if irow != pivrow:
T[irow, :] = T[irow, :] - T[pivrow, :]*T[irow, pivcol]
nit += 1
if len(basis[:m]) == 0:
solution = np.zeros(T.shape[1] - 1, dtype=np.float64)
else:
solution = np.zeros(max(T.shape[1] - 1, max(basis[:m]) + 1),
dtype=np.float64)
while not complete:
# Find the pivot column
pivcol_found, pivcol = _pivot_col(T, tol, bland)
if not pivcol_found:
pivcol = np.nan
pivrow = np.nan
status = 0
complete = True
else:
# Find the pivot row
pivrow_found, pivrow = _pivot_row(T, pivcol, phase, tol)
if not pivrow_found:
status = 3
complete = True
if callback is not None:
solution[:] = 0
solution[basis[:m]] = T[:m, -1]
callback(solution[:n], **{"tableau": T,
"phase":phase,
"nit":nit,
"pivot":(pivrow, pivcol),
"basis":basis,
"complete": complete and phase == 2})
if not complete:
if nit >= maxiter:
# Iteration limit exceeded
status = 1
complete = True
else:
# variable represented by pivcol enters
# variable in basis[pivrow] leaves
basis[pivrow] = pivcol
pivval = T[pivrow][pivcol]
T[pivrow, :] = T[pivrow, :] / pivval
for irow in range(T.shape[0]):
if irow != pivrow:
T[irow, :] = T[irow, :] - T[pivrow, :]*T[irow, pivcol]
nit += 1
return nit, status | def _solve_simplex(T, n, basis, maxiter=1000, phase=2, callback=None,
tol=1.0E-12, nit0=0, bland=False):
"""
Solve a linear programming problem in "standard maximization form" using
the Simplex Method.
Minimize :math:`f = c^T x`
subject to
.. math::
Ax = b
x_i >= 0
b_j >= 0
Parameters
----------
T : array_like
A 2-D array representing the simplex T corresponding to the
maximization problem. It should have the form:
[[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]],
[A[1, 0], A[1, 1], ..., A[1, n_total], b[1]],
.
.
.
[A[m, 0], A[m, 1], ..., A[m, n_total], b[m]],
[c[0], c[1], ..., c[n_total], 0]]
for a Phase 2 problem, or the form:
[[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]],
[A[1, 0], A[1, 1], ..., A[1, n_total], b[1]],
.
.
.
[A[m, 0], A[m, 1], ..., A[m, n_total], b[m]],
[c[0], c[1], ..., c[n_total], 0],
[c'[0], c'[1], ..., c'[n_total], 0]]
for a Phase 1 problem (a Problem in which a basic feasible solution is
sought prior to maximizing the actual objective. T is modified in
place by _solve_simplex.
n : int
The number of true variables in the problem.
basis : array
An array of the indices of the basic variables, such that basis[i]
contains the column corresponding to the basic variable for row i.
Basis is modified in place by _solve_simplex
maxiter : int
The maximum number of iterations to perform before aborting the
optimization.
phase : int
The phase of the optimization being executed. In phase 1 a basic
feasible solution is sought and the T has an additional row representing
an alternate objective function.
callback : callable, optional
If a callback function is provided, it will be called within each
iteration of the simplex algorithm. The callback must have the
signature `callback(xk, **kwargs)` where xk is the current solution
vector and kwargs is a dictionary containing the following::
"T" : The current Simplex algorithm T
"nit" : The current iteration.
"pivot" : The pivot (row, column) used for the next iteration.
"phase" : Whether the algorithm is in Phase 1 or Phase 2.
"basis" : The indices of the columns of the basic variables.
tol : float
The tolerance which determines when a solution is "close enough" to
zero in Phase 1 to be considered a basic feasible solution or close
enough to positive to to serve as an optimal solution.
nit0 : int
The initial iteration number used to keep an accurate iteration total
in a two-phase problem.
bland : bool
If True, choose pivots using Bland's rule [3]. In problems which
fail to converge due to cycling, using Bland's rule can provide
convergence at the expense of a less optimal path about the simplex.
Returns
-------
res : OptimizeResult
The optimization result represented as a ``OptimizeResult`` object.
Important attributes are: ``x`` the solution array, ``success`` a
Boolean flag indicating if the optimizer exited successfully and
``message`` which describes the cause of the termination. Possible
values for the ``status`` attribute are:
0 : Optimization terminated successfully
1 : Iteration limit reached
2 : Problem appears to be infeasible
3 : Problem appears to be unbounded
See `OptimizeResult` for a description of other attributes.
"""
nit = nit0
complete = False
if phase == 1:
m = T.shape[0]-2
elif phase == 2:
m = T.shape[0]-1
else:
raise ValueError("Argument 'phase' to _solve_simplex must be 1 or 2")
if phase == 2:
# Check if any artificial variables are still in the basis.
# If yes, check if any coefficients from this row and a column
# corresponding to one of the non-artificial variable is non-zero.
# If found, pivot at this term. If not, start phase 2.
# Do this for all artificial variables in the basis.
# Ref: "An Introduction to Linear Programming and Game Theory"
# by Paul R. Thie, Gerard E. Keough, 3rd Ed,
# Chapter 3.7 Redundant Systems (pag 102)
for pivrow in [row for row in range(basis.size)
if basis[row] > T.shape[1] - 2]:
non_zero_row = [col for col in range(T.shape[1] - 1)
if T[pivrow, col] != 0]
if len(non_zero_row) > 0:
pivcol = non_zero_row[0]
# variable represented by pivcol enters
# variable in basis[pivrow] leaves
basis[pivrow] = pivcol
pivval = T[pivrow][pivcol]
T[pivrow, :] = T[pivrow, :] / pivval
for irow in range(T.shape[0]):
if irow != pivrow:
T[irow, :] = T[irow, :] - T[pivrow, :]*T[irow, pivcol]
nit += 1
if len(basis[:m]) == 0:
solution = np.zeros(T.shape[1] - 1, dtype=np.float64)
else:
solution = np.zeros(max(T.shape[1] - 1, max(basis[:m]) + 1),
dtype=np.float64)
while not complete:
# Find the pivot column
pivcol_found, pivcol = _pivot_col(T, tol, bland)
if not pivcol_found:
pivcol = np.nan
pivrow = np.nan
status = 0
complete = True
else:
# Find the pivot row
pivrow_found, pivrow = _pivot_row(T, pivcol, phase, tol)
if not pivrow_found:
status = 3
complete = True
if callback is not None:
solution[:] = 0
solution[basis[:m]] = T[:m, -1]
callback(solution[:n], **{"tableau": T,
"phase":phase,
"nit":nit,
"pivot":(pivrow, pivcol),
"basis":basis,
"complete": complete and phase == 2})
if not complete:
if nit >= maxiter:
# Iteration limit exceeded
status = 1
complete = True
else:
# variable represented by pivcol enters
# variable in basis[pivrow] leaves
basis[pivrow] = pivcol
pivval = T[pivrow][pivcol]
T[pivrow, :] = T[pivrow, :] / pivval
for irow in range(T.shape[0]):
if irow != pivrow:
T[irow, :] = T[irow, :] - T[pivrow, :]*T[irow, pivcol]
nit += 1
return nit, status |
Python | def lgmres(A, b, x0=None, tol=1e-5, maxiter=1000, M=None, callback=None,
inner_m=30, outer_k=3, outer_v=None, store_outer_Av=True):
"""
Solve a matrix equation using the LGMRES algorithm.
The LGMRES algorithm [1]_ [2]_ is designed to avoid some problems
in the convergence in restarted GMRES, and often converges in fewer
iterations.
Parameters
----------
A : {sparse matrix, dense matrix, LinearOperator}
The real or complex N-by-N matrix of the linear system.
b : {array, matrix}
Right hand side of the linear system. Has shape (N,) or (N,1).
x0 : {array, matrix}
Starting guess for the solution.
tol : float, optional
Tolerance to achieve. The algorithm terminates when either the relative
or the absolute residual is below `tol`.
maxiter : int, optional
Maximum number of iterations. Iteration will stop after maxiter
steps even if the specified tolerance has not been achieved.
M : {sparse matrix, dense matrix, LinearOperator}, optional
Preconditioner for A. The preconditioner should approximate the
inverse of A. Effective preconditioning dramatically improves the
rate of convergence, which implies that fewer iterations are needed
to reach a given error tolerance.
callback : function, optional
User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector.
inner_m : int, optional
Number of inner GMRES iterations per each outer iteration.
outer_k : int, optional
Number of vectors to carry between inner GMRES iterations.
According to [1]_, good values are in the range of 1...3.
However, note that if you want to use the additional vectors to
accelerate solving multiple similar problems, larger values may
be beneficial.
outer_v : list of tuples, optional
List containing tuples ``(v, Av)`` of vectors and corresponding
matrix-vector products, used to augment the Krylov subspace, and
carried between inner GMRES iterations. The element ``Av`` can
be `None` if the matrix-vector product should be re-evaluated.
This parameter is modified in-place by `lgmres`, and can be used
to pass "guess" vectors in and out of the algorithm when solving
similar problems.
store_outer_Av : bool, optional
Whether LGMRES should store also A*v in addition to vectors `v`
in the `outer_v` list. Default is True.
Returns
-------
x : array or matrix
The converged solution.
info : int
Provides convergence information:
- 0 : successful exit
- >0 : convergence to tolerance not achieved, number of iterations
- <0 : illegal input or breakdown
Notes
-----
The LGMRES algorithm [1]_ [2]_ is designed to avoid the
slowing of convergence in restarted GMRES, due to alternating
residual vectors. Typically, it often outperforms GMRES(m) of
comparable memory requirements by some measure, or at least is not
much worse.
Another advantage in this algorithm is that you can supply it with
'guess' vectors in the `outer_v` argument that augment the Krylov
subspace. If the solution lies close to the span of these vectors,
the algorithm converges faster. This can be useful if several very
similar matrices need to be inverted one after another, such as in
Newton-Krylov iteration where the Jacobian matrix often changes
little in the nonlinear steps.
References
----------
.. [1] A.H. Baker and E.R. Jessup and T. Manteuffel,
SIAM J. Matrix Anal. Appl. 26, 962 (2005).
.. [2] A.H. Baker, PhD thesis, University of Colorado (2003).
http://amath.colorado.edu/activities/thesis/allisonb/Thesis.ps
"""
A,M,x,b,postprocess = make_system(A,M,x0,b)
if not np.isfinite(b).all():
raise ValueError("RHS must contain only finite numbers")
matvec = A.matvec
psolve = M.matvec
if outer_v is None:
outer_v = []
axpy, dot, scal = None, None, None
nrm2 = get_blas_funcs('nrm2', [b])
b_norm = nrm2(b)
if b_norm == 0:
b_norm = 1
for k_outer in xrange(maxiter):
r_outer = matvec(x) - b
# -- callback
if callback is not None:
callback(x)
# -- determine input type routines
if axpy is None:
if np.iscomplexobj(r_outer) and not np.iscomplexobj(x):
x = x.astype(r_outer.dtype)
axpy, dot, scal, nrm2 = get_blas_funcs(['axpy', 'dot', 'scal', 'nrm2'],
(x, r_outer))
trtrs = get_lapack_funcs('trtrs', (x, r_outer))
# -- check stopping condition
r_norm = nrm2(r_outer)
if r_norm <= tol * b_norm or r_norm <= tol:
break
# -- inner LGMRES iteration
vs0 = -psolve(r_outer)
inner_res_0 = nrm2(vs0)
if inner_res_0 == 0:
rnorm = nrm2(r_outer)
raise RuntimeError("Preconditioner returned a zero vector; "
"|v| ~ %.1g, |M v| = 0" % rnorm)
vs0 = scal(1.0/inner_res_0, vs0)
vs = [vs0]
ws = []
y = None
# H is stored in QR factorized form
Q = np.ones((1, 1), dtype=vs0.dtype)
R = np.zeros((1, 0), dtype=vs0.dtype)
eps = np.finfo(vs0.dtype).eps
breakdown = False
for j in xrange(1, 1 + inner_m + len(outer_v)):
# -- Arnoldi process:
#
# Build an orthonormal basis V and matrices W and H such that
# A W = V H
# Columns of W, V, and H are stored in `ws`, `vs` and `hs`.
#
# The first column of V is always the residual vector, `vs0`;
# V has *one more column* than the other of the three matrices.
#
# The other columns in V are built by feeding in, one
# by one, some vectors `z` and orthonormalizing them
# against the basis so far. The trick here is to
# feed in first some augmentation vectors, before
# starting to construct the Krylov basis on `v0`.
#
# It was shown in [BJM]_ that a good choice (the LGMRES choice)
# for these augmentation vectors are the `dx` vectors obtained
# from a couple of the previous restart cycles.
#
# Note especially that while `vs0` is always the first
# column in V, there is no reason why it should also be
# the first column in W. (In fact, below `vs0` comes in
# W only after the augmentation vectors.)
#
# The rest of the algorithm then goes as in GMRES, one
# solves a minimization problem in the smaller subspace
# spanned by W (range) and V (image).
#
# ++ evaluate
v_new = None
if j < len(outer_v) + 1:
z, v_new = outer_v[j-1]
elif j == len(outer_v) + 1:
z = vs0
else:
z = vs[-1]
if v_new is None:
v_new = psolve(matvec(z))
else:
# Note: v_new is modified in-place below. Must make a
# copy to ensure that the outer_v vectors are not
# clobbered.
v_new = v_new.copy()
# ++ orthogonalize
v_new_norm = nrm2(v_new)
hcur = np.zeros(j+1, dtype=Q.dtype)
for i, v in enumerate(vs):
alpha = dot(v, v_new)
hcur[i] = alpha
v_new = axpy(v, v_new, v.shape[0], -alpha) # v_new -= alpha*v
hcur[-1] = nrm2(v_new)
with np.errstate(over='ignore', divide='ignore'):
# Careful with denormals
alpha = 1/hcur[-1]
if np.isfinite(alpha):
v_new = scal(alpha, v_new)
if not (hcur[-1] > eps * v_new_norm):
# v_new essentially in the span of previous vectors,
# or we have nans. Bail out after updating the QR
# solution.
breakdown = True
vs.append(v_new)
ws.append(z)
# -- GMRES optimization problem
# Add new column to H=Q*R, padding other columns with zeros
Q2 = np.zeros((j+1, j+1), dtype=Q.dtype, order='F')
Q2[:j,:j] = Q
Q2[j,j] = 1
R2 = np.zeros((j+1, j-1), dtype=R.dtype, order='F')
R2[:j,:] = R
Q, R = qr_insert(Q2, R2, hcur, j-1, which='col',
overwrite_qru=True, check_finite=False)
# Transformed least squares problem
# || Q R y - inner_res_0 * e_1 ||_2 = min!
# Since R = [R'; 0], solution is y = inner_res_0 (R')^{-1} (Q^H)[:j,0]
# Residual is immediately known
inner_res = abs(Q[0,-1]) * inner_res_0
# -- check for termination
if inner_res <= tol * inner_res_0 or breakdown:
break
if not np.isfinite(R[j-1,j-1]):
# nans encountered, bail out
return postprocess(x), k_outer + 1
# -- Get the LSQ problem solution
#
# The problem is triangular, but the condition number may be
# bad (or in case of breakdown the last diagonal entry may be
# zero), so use lstsq instead of trtrs.
y, _, _, _, = lstsq(R[:j,:j], Q[0,:j].conj())
y *= inner_res_0
if not np.isfinite(y).all():
# Floating point over/underflow, non-finite result from
# matmul etc. -- report failure.
return postprocess(x), k_outer + 1
# -- GMRES terminated: eval solution
dx = ws[0]*y[0]
for w, yc in zip(ws[1:], y[1:]):
dx = axpy(w, dx, dx.shape[0], yc) # dx += w*yc
# -- Store LGMRES augmentation vectors
nx = nrm2(dx)
if nx > 0:
if store_outer_Av:
q = Q.dot(R.dot(y))
ax = vs[0]*q[0]
for v, qc in zip(vs[1:], q[1:]):
ax = axpy(v, ax, ax.shape[0], qc)
outer_v.append((dx/nx, ax/nx))
else:
outer_v.append((dx/nx, None))
# -- Retain only a finite number of augmentation vectors
while len(outer_v) > outer_k:
del outer_v[0]
# -- Apply step
x += dx
else:
# didn't converge ...
return postprocess(x), maxiter
return postprocess(x), 0 | def lgmres(A, b, x0=None, tol=1e-5, maxiter=1000, M=None, callback=None,
inner_m=30, outer_k=3, outer_v=None, store_outer_Av=True):
"""
Solve a matrix equation using the LGMRES algorithm.
The LGMRES algorithm [1]_ [2]_ is designed to avoid some problems
in the convergence in restarted GMRES, and often converges in fewer
iterations.
Parameters
----------
A : {sparse matrix, dense matrix, LinearOperator}
The real or complex N-by-N matrix of the linear system.
b : {array, matrix}
Right hand side of the linear system. Has shape (N,) or (N,1).
x0 : {array, matrix}
Starting guess for the solution.
tol : float, optional
Tolerance to achieve. The algorithm terminates when either the relative
or the absolute residual is below `tol`.
maxiter : int, optional
Maximum number of iterations. Iteration will stop after maxiter
steps even if the specified tolerance has not been achieved.
M : {sparse matrix, dense matrix, LinearOperator}, optional
Preconditioner for A. The preconditioner should approximate the
inverse of A. Effective preconditioning dramatically improves the
rate of convergence, which implies that fewer iterations are needed
to reach a given error tolerance.
callback : function, optional
User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector.
inner_m : int, optional
Number of inner GMRES iterations per each outer iteration.
outer_k : int, optional
Number of vectors to carry between inner GMRES iterations.
According to [1]_, good values are in the range of 1...3.
However, note that if you want to use the additional vectors to
accelerate solving multiple similar problems, larger values may
be beneficial.
outer_v : list of tuples, optional
List containing tuples ``(v, Av)`` of vectors and corresponding
matrix-vector products, used to augment the Krylov subspace, and
carried between inner GMRES iterations. The element ``Av`` can
be `None` if the matrix-vector product should be re-evaluated.
This parameter is modified in-place by `lgmres`, and can be used
to pass "guess" vectors in and out of the algorithm when solving
similar problems.
store_outer_Av : bool, optional
Whether LGMRES should store also A*v in addition to vectors `v`
in the `outer_v` list. Default is True.
Returns
-------
x : array or matrix
The converged solution.
info : int
Provides convergence information:
- 0 : successful exit
- >0 : convergence to tolerance not achieved, number of iterations
- <0 : illegal input or breakdown
Notes
-----
The LGMRES algorithm [1]_ [2]_ is designed to avoid the
slowing of convergence in restarted GMRES, due to alternating
residual vectors. Typically, it often outperforms GMRES(m) of
comparable memory requirements by some measure, or at least is not
much worse.
Another advantage in this algorithm is that you can supply it with
'guess' vectors in the `outer_v` argument that augment the Krylov
subspace. If the solution lies close to the span of these vectors,
the algorithm converges faster. This can be useful if several very
similar matrices need to be inverted one after another, such as in
Newton-Krylov iteration where the Jacobian matrix often changes
little in the nonlinear steps.
References
----------
.. [1] A.H. Baker and E.R. Jessup and T. Manteuffel,
SIAM J. Matrix Anal. Appl. 26, 962 (2005).
.. [2] A.H. Baker, PhD thesis, University of Colorado (2003).
http://amath.colorado.edu/activities/thesis/allisonb/Thesis.ps
"""
A,M,x,b,postprocess = make_system(A,M,x0,b)
if not np.isfinite(b).all():
raise ValueError("RHS must contain only finite numbers")
matvec = A.matvec
psolve = M.matvec
if outer_v is None:
outer_v = []
axpy, dot, scal = None, None, None
nrm2 = get_blas_funcs('nrm2', [b])
b_norm = nrm2(b)
if b_norm == 0:
b_norm = 1
for k_outer in xrange(maxiter):
r_outer = matvec(x) - b
# -- callback
if callback is not None:
callback(x)
# -- determine input type routines
if axpy is None:
if np.iscomplexobj(r_outer) and not np.iscomplexobj(x):
x = x.astype(r_outer.dtype)
axpy, dot, scal, nrm2 = get_blas_funcs(['axpy', 'dot', 'scal', 'nrm2'],
(x, r_outer))
trtrs = get_lapack_funcs('trtrs', (x, r_outer))
# -- check stopping condition
r_norm = nrm2(r_outer)
if r_norm <= tol * b_norm or r_norm <= tol:
break
# -- inner LGMRES iteration
vs0 = -psolve(r_outer)
inner_res_0 = nrm2(vs0)
if inner_res_0 == 0:
rnorm = nrm2(r_outer)
raise RuntimeError("Preconditioner returned a zero vector; "
"|v| ~ %.1g, |M v| = 0" % rnorm)
vs0 = scal(1.0/inner_res_0, vs0)
vs = [vs0]
ws = []
y = None
# H is stored in QR factorized form
Q = np.ones((1, 1), dtype=vs0.dtype)
R = np.zeros((1, 0), dtype=vs0.dtype)
eps = np.finfo(vs0.dtype).eps
breakdown = False
for j in xrange(1, 1 + inner_m + len(outer_v)):
# -- Arnoldi process:
#
# Build an orthonormal basis V and matrices W and H such that
# A W = V H
# Columns of W, V, and H are stored in `ws`, `vs` and `hs`.
#
# The first column of V is always the residual vector, `vs0`;
# V has *one more column* than the other of the three matrices.
#
# The other columns in V are built by feeding in, one
# by one, some vectors `z` and orthonormalizing them
# against the basis so far. The trick here is to
# feed in first some augmentation vectors, before
# starting to construct the Krylov basis on `v0`.
#
# It was shown in [BJM]_ that a good choice (the LGMRES choice)
# for these augmentation vectors are the `dx` vectors obtained
# from a couple of the previous restart cycles.
#
# Note especially that while `vs0` is always the first
# column in V, there is no reason why it should also be
# the first column in W. (In fact, below `vs0` comes in
# W only after the augmentation vectors.)
#
# The rest of the algorithm then goes as in GMRES, one
# solves a minimization problem in the smaller subspace
# spanned by W (range) and V (image).
#
# ++ evaluate
v_new = None
if j < len(outer_v) + 1:
z, v_new = outer_v[j-1]
elif j == len(outer_v) + 1:
z = vs0
else:
z = vs[-1]
if v_new is None:
v_new = psolve(matvec(z))
else:
# Note: v_new is modified in-place below. Must make a
# copy to ensure that the outer_v vectors are not
# clobbered.
v_new = v_new.copy()
# ++ orthogonalize
v_new_norm = nrm2(v_new)
hcur = np.zeros(j+1, dtype=Q.dtype)
for i, v in enumerate(vs):
alpha = dot(v, v_new)
hcur[i] = alpha
v_new = axpy(v, v_new, v.shape[0], -alpha) # v_new -= alpha*v
hcur[-1] = nrm2(v_new)
with np.errstate(over='ignore', divide='ignore'):
# Careful with denormals
alpha = 1/hcur[-1]
if np.isfinite(alpha):
v_new = scal(alpha, v_new)
if not (hcur[-1] > eps * v_new_norm):
# v_new essentially in the span of previous vectors,
# or we have nans. Bail out after updating the QR
# solution.
breakdown = True
vs.append(v_new)
ws.append(z)
# -- GMRES optimization problem
# Add new column to H=Q*R, padding other columns with zeros
Q2 = np.zeros((j+1, j+1), dtype=Q.dtype, order='F')
Q2[:j,:j] = Q
Q2[j,j] = 1
R2 = np.zeros((j+1, j-1), dtype=R.dtype, order='F')
R2[:j,:] = R
Q, R = qr_insert(Q2, R2, hcur, j-1, which='col',
overwrite_qru=True, check_finite=False)
# Transformed least squares problem
# || Q R y - inner_res_0 * e_1 ||_2 = min!
# Since R = [R'; 0], solution is y = inner_res_0 (R')^{-1} (Q^H)[:j,0]
# Residual is immediately known
inner_res = abs(Q[0,-1]) * inner_res_0
# -- check for termination
if inner_res <= tol * inner_res_0 or breakdown:
break
if not np.isfinite(R[j-1,j-1]):
# nans encountered, bail out
return postprocess(x), k_outer + 1
# -- Get the LSQ problem solution
#
# The problem is triangular, but the condition number may be
# bad (or in case of breakdown the last diagonal entry may be
# zero), so use lstsq instead of trtrs.
y, _, _, _, = lstsq(R[:j,:j], Q[0,:j].conj())
y *= inner_res_0
if not np.isfinite(y).all():
# Floating point over/underflow, non-finite result from
# matmul etc. -- report failure.
return postprocess(x), k_outer + 1
# -- GMRES terminated: eval solution
dx = ws[0]*y[0]
for w, yc in zip(ws[1:], y[1:]):
dx = axpy(w, dx, dx.shape[0], yc) # dx += w*yc
# -- Store LGMRES augmentation vectors
nx = nrm2(dx)
if nx > 0:
if store_outer_Av:
q = Q.dot(R.dot(y))
ax = vs[0]*q[0]
for v, qc in zip(vs[1:], q[1:]):
ax = axpy(v, ax, ax.shape[0], qc)
outer_v.append((dx/nx, ax/nx))
else:
outer_v.append((dx/nx, None))
# -- Retain only a finite number of augmentation vectors
while len(outer_v) > outer_k:
del outer_v[0]
# -- Apply step
x += dx
else:
# didn't converge ...
return postprocess(x), maxiter
return postprocess(x), 0 |
Python | def run(self, f, jac, y0, t0, t1, f_params, jac_params):
"""Integrate from t=t0 to t=t1 using y0 as an initial condition.
Return 2-tuple (y1,t1) where y1 is the result and t=t1
defines the stoppage coordinate of the result.
"""
raise NotImplementedError('all integrators must define '
'run(f, jac, t0, t1, y0, f_params, jac_params)') | def run(self, f, jac, y0, t0, t1, f_params, jac_params):
"""Integrate from t=t0 to t=t1 using y0 as an initial condition.
Return 2-tuple (y1,t1) where y1 is the result and t=t1
defines the stoppage coordinate of the result.
"""
raise NotImplementedError('all integrators must define '
'run(f, jac, t0, t1, y0, f_params, jac_params)') |
Python | def with_special_errors(func):
"""
Enable special function errors (such as underflow, overflow,
loss of precision, etc.)
"""
def wrapper(*a, **kw):
old_filters = list(getattr(warnings, 'filters', []))
old_errprint = sc.errprint(1)
warnings.filterwarnings("error", category=sc.SpecialFunctionWarning)
try:
return func(*a, **kw)
finally:
sc.errprint(old_errprint)
setattr(warnings, 'filters', old_filters)
wrapper.__name__ = func.__name__
wrapper.__doc__ = func.__doc__
return wrapper | def with_special_errors(func):
"""
Enable special function errors (such as underflow, overflow,
loss of precision, etc.)
"""
def wrapper(*a, **kw):
old_filters = list(getattr(warnings, 'filters', []))
old_errprint = sc.errprint(1)
warnings.filterwarnings("error", category=sc.SpecialFunctionWarning)
try:
return func(*a, **kw)
finally:
sc.errprint(old_errprint)
setattr(warnings, 'filters', old_filters)
wrapper.__name__ = func.__name__
wrapper.__doc__ = func.__doc__
return wrapper |
Python | def use_solver(**kwargs):
"""
Select default sparse direct solver to be used.
Parameters
----------
useUmfpack : bool, optional
Use UMFPACK over SuperLU. Has effect only if scikits.umfpack is
installed. Default: True
Notes
-----
The default sparse solver is umfpack when available
(scikits.umfpack is installed). This can be changed by passing
useUmfpack = False, which then causes the always present SuperLU
based solver to be used.
Umfpack requires a CSR/CSC matrix to have sorted column/row indices. If
sure that the matrix fulfills this, pass ``assumeSortedIndices=True``
to gain some speed.
"""
if 'useUmfpack' in kwargs:
globals()['useUmfpack'] = kwargs['useUmfpack']
#TODO: pass other options to scikit | def use_solver(**kwargs):
"""
Select default sparse direct solver to be used.
Parameters
----------
useUmfpack : bool, optional
Use UMFPACK over SuperLU. Has effect only if scikits.umfpack is
installed. Default: True
Notes
-----
The default sparse solver is umfpack when available
(scikits.umfpack is installed). This can be changed by passing
useUmfpack = False, which then causes the always present SuperLU
based solver to be used.
Umfpack requires a CSR/CSC matrix to have sorted column/row indices. If
sure that the matrix fulfills this, pass ``assumeSortedIndices=True``
to gain some speed.
"""
if 'useUmfpack' in kwargs:
globals()['useUmfpack'] = kwargs['useUmfpack']
#TODO: pass other options to scikit |
Python | def _get_umf_family(A):
"""Get umfpack family string given the sparse matrix dtype."""
family = {'di': 'di', 'Di': 'zi', 'dl': 'dl', 'Dl': 'zl'}
dt = A.dtype.char + A.indices.dtype.char
return family[dt] | def _get_umf_family(A):
"""Get umfpack family string given the sparse matrix dtype."""
family = {'di': 'di', 'Di': 'zi', 'dl': 'dl', 'Dl': 'zl'}
dt = A.dtype.char + A.indices.dtype.char
return family[dt] |
Python | def schur(a, output='real', lwork=None, overwrite_a=False, sort=None,
check_finite=True):
"""
Compute Schur decomposition of a matrix.
The Schur decomposition is::
A = Z T Z^H
where Z is unitary and T is either upper-triangular, or for real
Schur decomposition (output='real'), quasi-upper triangular. In
the quasi-triangular form, 2x2 blocks describing complex-valued
eigenvalue pairs may extrude from the diagonal.
Parameters
----------
a : (M, M) array_like
Matrix to decompose
output : {'real', 'complex'}, optional
Construct the real or complex Schur decomposition (for real matrices).
lwork : int, optional
Work array size. If None or -1, it is automatically computed.
overwrite_a : bool, optional
Whether to overwrite data in a (may improve performance).
sort : {None, callable, 'lhp', 'rhp', 'iuc', 'ouc'}, optional
Specifies whether the upper eigenvalues should be sorted. A callable
may be passed that, given a eigenvalue, returns a boolean denoting
whether the eigenvalue should be sorted to the top-left (True).
Alternatively, string parameters may be used::
'lhp' Left-hand plane (x.real < 0.0)
'rhp' Right-hand plane (x.real > 0.0)
'iuc' Inside the unit circle (x*x.conjugate() <= 1.0)
'ouc' Outside the unit circle (x*x.conjugate() > 1.0)
Defaults to None (no sorting).
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
T : (M, M) ndarray
Schur form of A. It is real-valued for the real Schur decomposition.
Z : (M, M) ndarray
An unitary Schur transformation matrix for A.
It is real-valued for the real Schur decomposition.
sdim : int
If and only if sorting was requested, a third return value will
contain the number of eigenvalues satisfying the sort condition.
Raises
------
LinAlgError
Error raised under three conditions:
1. The algorithm failed due to a failure of the QR algorithm to
compute all eigenvalues
2. If eigenvalue sorting was requested, the eigenvalues could not be
reordered due to a failure to separate eigenvalues, usually because
of poor conditioning
3. If eigenvalue sorting was requested, roundoff errors caused the
leading eigenvalues to no longer satisfy the sorting condition
See also
--------
rsf2csf : Convert real Schur form to complex Schur form
"""
if output not in ['real','complex','r','c']:
raise ValueError("argument must be 'real', or 'complex'")
if check_finite:
a1 = asarray_chkfinite(a)
else:
a1 = asarray(a)
if len(a1.shape) != 2 or (a1.shape[0] != a1.shape[1]):
raise ValueError('expected square matrix')
typ = a1.dtype.char
if output in ['complex','c'] and typ not in ['F','D']:
if typ in _double_precision:
a1 = a1.astype('D')
typ = 'D'
else:
a1 = a1.astype('F')
typ = 'F'
overwrite_a = overwrite_a or (_datacopied(a1, a))
gees, = get_lapack_funcs(('gees',), (a1,))
if lwork is None or lwork == -1:
# get optimal work array
result = gees(lambda x: None, a1, lwork=-1)
lwork = result[-2][0].real.astype(numpy.int)
if sort is None:
sort_t = 0
sfunction = lambda x: None
else:
sort_t = 1
if callable(sort):
sfunction = sort
elif sort == 'lhp':
sfunction = lambda x: (numpy.real(x) < 0.0)
elif sort == 'rhp':
sfunction = lambda x: (numpy.real(x) >= 0.0)
elif sort == 'iuc':
sfunction = lambda x: (abs(x) <= 1.0)
elif sort == 'ouc':
sfunction = lambda x: (abs(x) > 1.0)
else:
raise ValueError("sort parameter must be None, a callable, or " +
"one of ('lhp','rhp','iuc','ouc')")
result = gees(sfunction, a1, lwork=lwork, overwrite_a=overwrite_a,
sort_t=sort_t)
info = result[-1]
if info < 0:
raise ValueError('illegal value in %d-th argument of internal gees'
% -info)
elif info == a1.shape[0] + 1:
raise LinAlgError('Eigenvalues could not be separated for reordering.')
elif info == a1.shape[0] + 2:
raise LinAlgError('Leading eigenvalues do not satisfy sort condition.')
elif info > 0:
raise LinAlgError("Schur form not found. Possibly ill-conditioned.")
if sort_t == 0:
return result[0], result[-3]
else:
return result[0], result[-3], result[1] | def schur(a, output='real', lwork=None, overwrite_a=False, sort=None,
check_finite=True):
"""
Compute Schur decomposition of a matrix.
The Schur decomposition is::
A = Z T Z^H
where Z is unitary and T is either upper-triangular, or for real
Schur decomposition (output='real'), quasi-upper triangular. In
the quasi-triangular form, 2x2 blocks describing complex-valued
eigenvalue pairs may extrude from the diagonal.
Parameters
----------
a : (M, M) array_like
Matrix to decompose
output : {'real', 'complex'}, optional
Construct the real or complex Schur decomposition (for real matrices).
lwork : int, optional
Work array size. If None or -1, it is automatically computed.
overwrite_a : bool, optional
Whether to overwrite data in a (may improve performance).
sort : {None, callable, 'lhp', 'rhp', 'iuc', 'ouc'}, optional
Specifies whether the upper eigenvalues should be sorted. A callable
may be passed that, given a eigenvalue, returns a boolean denoting
whether the eigenvalue should be sorted to the top-left (True).
Alternatively, string parameters may be used::
'lhp' Left-hand plane (x.real < 0.0)
'rhp' Right-hand plane (x.real > 0.0)
'iuc' Inside the unit circle (x*x.conjugate() <= 1.0)
'ouc' Outside the unit circle (x*x.conjugate() > 1.0)
Defaults to None (no sorting).
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
T : (M, M) ndarray
Schur form of A. It is real-valued for the real Schur decomposition.
Z : (M, M) ndarray
An unitary Schur transformation matrix for A.
It is real-valued for the real Schur decomposition.
sdim : int
If and only if sorting was requested, a third return value will
contain the number of eigenvalues satisfying the sort condition.
Raises
------
LinAlgError
Error raised under three conditions:
1. The algorithm failed due to a failure of the QR algorithm to
compute all eigenvalues
2. If eigenvalue sorting was requested, the eigenvalues could not be
reordered due to a failure to separate eigenvalues, usually because
of poor conditioning
3. If eigenvalue sorting was requested, roundoff errors caused the
leading eigenvalues to no longer satisfy the sorting condition
See also
--------
rsf2csf : Convert real Schur form to complex Schur form
"""
if output not in ['real','complex','r','c']:
raise ValueError("argument must be 'real', or 'complex'")
if check_finite:
a1 = asarray_chkfinite(a)
else:
a1 = asarray(a)
if len(a1.shape) != 2 or (a1.shape[0] != a1.shape[1]):
raise ValueError('expected square matrix')
typ = a1.dtype.char
if output in ['complex','c'] and typ not in ['F','D']:
if typ in _double_precision:
a1 = a1.astype('D')
typ = 'D'
else:
a1 = a1.astype('F')
typ = 'F'
overwrite_a = overwrite_a or (_datacopied(a1, a))
gees, = get_lapack_funcs(('gees',), (a1,))
if lwork is None or lwork == -1:
# get optimal work array
result = gees(lambda x: None, a1, lwork=-1)
lwork = result[-2][0].real.astype(numpy.int)
if sort is None:
sort_t = 0
sfunction = lambda x: None
else:
sort_t = 1
if callable(sort):
sfunction = sort
elif sort == 'lhp':
sfunction = lambda x: (numpy.real(x) < 0.0)
elif sort == 'rhp':
sfunction = lambda x: (numpy.real(x) >= 0.0)
elif sort == 'iuc':
sfunction = lambda x: (abs(x) <= 1.0)
elif sort == 'ouc':
sfunction = lambda x: (abs(x) > 1.0)
else:
raise ValueError("sort parameter must be None, a callable, or " +
"one of ('lhp','rhp','iuc','ouc')")
result = gees(sfunction, a1, lwork=lwork, overwrite_a=overwrite_a,
sort_t=sort_t)
info = result[-1]
if info < 0:
raise ValueError('illegal value in %d-th argument of internal gees'
% -info)
elif info == a1.shape[0] + 1:
raise LinAlgError('Eigenvalues could not be separated for reordering.')
elif info == a1.shape[0] + 2:
raise LinAlgError('Leading eigenvalues do not satisfy sort condition.')
elif info > 0:
raise LinAlgError("Schur form not found. Possibly ill-conditioned.")
if sort_t == 0:
return result[0], result[-3]
else:
return result[0], result[-3], result[1] |
Python | def rsf2csf(T, Z, check_finite=True):
"""
Convert real Schur form to complex Schur form.
Convert a quasi-diagonal real-valued Schur form to the upper triangular
complex-valued Schur form.
Parameters
----------
T : (M, M) array_like
Real Schur form of the original matrix
Z : (M, M) array_like
Schur transformation matrix
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
T : (M, M) ndarray
Complex Schur form of the original matrix
Z : (M, M) ndarray
Schur transformation matrix corresponding to the complex form
See also
--------
schur : Schur decompose a matrix
"""
if check_finite:
Z, T = map(asarray_chkfinite, (Z, T))
else:
Z,T = map(asarray, (Z,T))
if len(Z.shape) != 2 or Z.shape[0] != Z.shape[1]:
raise ValueError("matrix must be square.")
if len(T.shape) != 2 or T.shape[0] != T.shape[1]:
raise ValueError("matrix must be square.")
if T.shape[0] != Z.shape[0]:
raise ValueError("matrices must be same dimension.")
N = T.shape[0]
arr = numpy.array
t = _commonType(Z, T, arr([3.0],'F'))
Z, T = _castCopy(t, Z, T)
conj = numpy.conj
dot = numpy.dot
r_ = numpy.r_
transp = numpy.transpose
for m in range(N-1, 0, -1):
if abs(T[m,m-1]) > eps*(abs(T[m-1,m-1]) + abs(T[m,m])):
k = slice(m-1, m+1)
mu = eigvals(T[k,k]) - T[m,m]
r = misc.norm([mu[0], T[m,m-1]])
c = mu[0] / r
s = T[m,m-1] / r
G = r_[arr([[conj(c), s]], dtype=t), arr([[-s, c]], dtype=t)]
Gc = conj(transp(G))
j = slice(m-1, N)
T[k,j] = dot(G, T[k,j])
i = slice(0, m+1)
T[i,k] = dot(T[i,k], Gc)
i = slice(0, N)
Z[i,k] = dot(Z[i,k], Gc)
T[m,m-1] = 0.0
return T, Z | def rsf2csf(T, Z, check_finite=True):
"""
Convert real Schur form to complex Schur form.
Convert a quasi-diagonal real-valued Schur form to the upper triangular
complex-valued Schur form.
Parameters
----------
T : (M, M) array_like
Real Schur form of the original matrix
Z : (M, M) array_like
Schur transformation matrix
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
T : (M, M) ndarray
Complex Schur form of the original matrix
Z : (M, M) ndarray
Schur transformation matrix corresponding to the complex form
See also
--------
schur : Schur decompose a matrix
"""
if check_finite:
Z, T = map(asarray_chkfinite, (Z, T))
else:
Z,T = map(asarray, (Z,T))
if len(Z.shape) != 2 or Z.shape[0] != Z.shape[1]:
raise ValueError("matrix must be square.")
if len(T.shape) != 2 or T.shape[0] != T.shape[1]:
raise ValueError("matrix must be square.")
if T.shape[0] != Z.shape[0]:
raise ValueError("matrices must be same dimension.")
N = T.shape[0]
arr = numpy.array
t = _commonType(Z, T, arr([3.0],'F'))
Z, T = _castCopy(t, Z, T)
conj = numpy.conj
dot = numpy.dot
r_ = numpy.r_
transp = numpy.transpose
for m in range(N-1, 0, -1):
if abs(T[m,m-1]) > eps*(abs(T[m-1,m-1]) + abs(T[m,m])):
k = slice(m-1, m+1)
mu = eigvals(T[k,k]) - T[m,m]
r = misc.norm([mu[0], T[m,m-1]])
c = mu[0] / r
s = T[m,m-1] / r
G = r_[arr([[conj(c), s]], dtype=t), arr([[-s, c]], dtype=t)]
Gc = conj(transp(G))
j = slice(m-1, N)
T[k,j] = dot(G, T[k,j])
i = slice(0, m+1)
T[i,k] = dot(T[i,k], Gc)
i = slice(0, N)
Z[i,k] = dot(Z[i,k], Gc)
T[m,m-1] = 0.0
return T, Z |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.