rem
stringlengths
0
322k
add
stringlengths
0
2.05M
context
stringlengths
8
228k
c,rowc,ptrc,ierr = func(data1,self.rowind[:nnz1],self.indptr,data2,other.rowind[:nnz2],other.indptr)
c,rowc,ptrc,ierr = func(data1,self.rowind[:nnz1],self.indptr,data2,ocs.rowind[:nnz2],ocs.indptr)
def __add__(self, other): ocs = csc_matrix(other) if (ocs.shape != self.shape): raise ValueError, "Inconsistent shapes." typecode = _coerce_rules[(self.typecode,other.typecode)] nnz1, nnz2 = self.nnz, other.nnz data1, data2 = _convert_data(self.data[:nnz1], other.data[:nnz2], typecode) func = getattr(sparsetools,_transtabl[typecode]+'cscadd') c,rowc,ptrc,ierr = func(data1,self.rowind[:nnz1],self.indptr,data2,other.rowind[:nnz2],other.indptr) if ierr: raise ValueError, "Ran out of space (but shouldn't have happened)." M, N = self.shape return csc_matrix(c,(rowc,ptrc),M=M,N=N)
tcode = s.typecode func = getattr(sparsetools,tcode+'transp')
func = getattr(sparsetools,s.ftype+'transp')
def __init__(self,s,ij=None,M=None,N=None,nzmax=100,typecode=Float,copy=0): spmatrix.__init__(self, 'csr') if isinstance(s,spmatrix): if isinstance(s, csr_matrix): # do nothing but copy information self.shape = s.shape if copy: self.data = s.data.copy() self.colind = s.colind.copy() self.indptr = s.indptr.copy() else: self.data = s.data self.colind = s.colind self.indptr = s.indptr elif isinstance(s, csc_matrix): self.shape = s.shape tcode = s.typecode func = getattr(sparsetools,tcode+'transp') self.data, self.colind, self.indptr = \ func(s.data, s.rowind, s.indptr) else: try: temp = s.tocsr() except AttributeError: temp = csr_matrix(s.tocsc()) self.data = temp.data self.rowind = temp.rowind self.indptr = temp.indptr self.shape = temp.shape elif isinstance(s,type(3)): M=s N=ij self.data = zeros((nzmax,),typecode) self.colind = zeros((nzmax,),'i') self.indptr = zeros((N+1,),'i') self.shape = (M,N) elif (isinstance(s,ArrayType) or \ isinstance(s,type([]))): s = asarray(s) if (rank(s) == 2): # converting from a full array ocsc = csc_matrix(transpose(s)) self.shape = ocsc.shape[1], ocsc.shape[0] self.colind = ocsc.rowind self.indptr = ocsc.indptr self.data = ocsc.data elif isinstance(ij, ArrayType) and (rank(ij) == 2) and (shape(ij) == (len(s),2)): ijnew = ij.copy() ijnew[:,0] = ij[:,1] ijnew[:,1] = ij[:,0] temp = coo_matrix(s,ijnew,M=M,N=N,nzmax=nzmax, typecode=typecode) temp = temp.tocsc() self.data = temp.data self.colind = temp.colind self.indptr = temp.indptr self.shape = temp.shape elif isinstance(ij, types.TupleType) and (len(ij)==2): self.data = asarray(s) self.colind = ij[0] self.indptr = ij[1] if N is None: N = max(self.colind) if M is None: M = len(self.indptr) - 1 self.shape = (M,N) else: raise ValueError, "Unrecognized form for csr_matrix constructor." else: raise ValueError, "Unrecognized form for csr_matrix constructor."
func(s.data, s.rowind, s.indptr)
func(s.shape[1], s.data, s.rowind, s.indptr)
def __init__(self,s,ij=None,M=None,N=None,nzmax=100,typecode=Float,copy=0): spmatrix.__init__(self, 'csr') if isinstance(s,spmatrix): if isinstance(s, csr_matrix): # do nothing but copy information self.shape = s.shape if copy: self.data = s.data.copy() self.colind = s.colind.copy() self.indptr = s.indptr.copy() else: self.data = s.data self.colind = s.colind self.indptr = s.indptr elif isinstance(s, csc_matrix): self.shape = s.shape tcode = s.typecode func = getattr(sparsetools,tcode+'transp') self.data, self.colind, self.indptr = \ func(s.data, s.rowind, s.indptr) else: try: temp = s.tocsr() except AttributeError: temp = csr_matrix(s.tocsc()) self.data = temp.data self.rowind = temp.rowind self.indptr = temp.indptr self.shape = temp.shape elif isinstance(s,type(3)): M=s N=ij self.data = zeros((nzmax,),typecode) self.colind = zeros((nzmax,),'i') self.indptr = zeros((N+1,),'i') self.shape = (M,N) elif (isinstance(s,ArrayType) or \ isinstance(s,type([]))): s = asarray(s) if (rank(s) == 2): # converting from a full array ocsc = csc_matrix(transpose(s)) self.shape = ocsc.shape[1], ocsc.shape[0] self.colind = ocsc.rowind self.indptr = ocsc.indptr self.data = ocsc.data elif isinstance(ij, ArrayType) and (rank(ij) == 2) and (shape(ij) == (len(s),2)): ijnew = ij.copy() ijnew[:,0] = ij[:,1] ijnew[:,1] = ij[:,0] temp = coo_matrix(s,ijnew,M=M,N=N,nzmax=nzmax, typecode=typecode) temp = temp.tocsc() self.data = temp.data self.colind = temp.colind self.indptr = temp.indptr self.shape = temp.shape elif isinstance(ij, types.TupleType) and (len(ij)==2): self.data = asarray(s) self.colind = ij[0] self.indptr = ij[1] if N is None: N = max(self.colind) if M is None: M = len(self.indptr) - 1 self.shape = (M,N) else: raise ValueError, "Unrecognized form for csr_matrix constructor." else: raise ValueError, "Unrecognized form for csr_matrix constructor."
if (len(self.data) != len(self.colind)):
if (len(self.data) != nzmax):
def _check(self): M,N = self.shape if (rank(self.data) != 1) or (rank(self.colind) != 1) or \ (rank(self.indptr) != 1): raise ValueError, "Data, colind, and indptr arrays "\ "should be rank 1." if (len(self.data) != len(self.colind)): raise ValueError, "Data and row list should have same length" if (len(self.indptr) != M+1): raise ValueError, "Index pointer should be of length #rows + 1" if (nzmax < nnz): raise ValueError, "Nzmax must not be less than nnz." if (nnz>0) and (max(self.colind[:nnz]) >= M): raise ValueError, "Column-values must be < N." if (self.indptr[-1] > len(self.colind)): raise ValueError, \ "Last value of index list should be less than "\ "the size of data list" self.nnz = self.indptr[-1] self.nzmax = len(self.colind) self.typecode = self.data.typecode() if self.typecode not in 'fdFD': self.typecode = 'd' self.data = self.data.astype('d') self.ftype = _transtabl[self.typecode]
if (nzmax < nnz): raise ValueError, "Nzmax must not be less than nnz."
def _check(self): M,N = self.shape if (rank(self.data) != 1) or (rank(self.colind) != 1) or \ (rank(self.indptr) != 1): raise ValueError, "Data, colind, and indptr arrays "\ "should be rank 1." if (len(self.data) != len(self.colind)): raise ValueError, "Data and row list should have same length" if (len(self.indptr) != M+1): raise ValueError, "Index pointer should be of length #rows + 1" if (nzmax < nnz): raise ValueError, "Nzmax must not be less than nnz." if (nnz>0) and (max(self.colind[:nnz]) >= M): raise ValueError, "Column-values must be < N." if (self.indptr[-1] > len(self.colind)): raise ValueError, \ "Last value of index list should be less than "\ "the size of data list" self.nnz = self.indptr[-1] self.nzmax = len(self.colind) self.typecode = self.data.typecode() if self.typecode not in 'fdFD': self.typecode = 'd' self.data = self.data.astype('d') self.ftype = _transtabl[self.typecode]
if (self.indptr[-1] > len(self.colind)):
if (nnz > nzmax):
def _check(self): M,N = self.shape if (rank(self.data) != 1) or (rank(self.colind) != 1) or \ (rank(self.indptr) != 1): raise ValueError, "Data, colind, and indptr arrays "\ "should be rank 1." if (len(self.data) != len(self.colind)): raise ValueError, "Data and row list should have same length" if (len(self.indptr) != M+1): raise ValueError, "Index pointer should be of length #rows + 1" if (nzmax < nnz): raise ValueError, "Nzmax must not be less than nnz." if (nnz>0) and (max(self.colind[:nnz]) >= M): raise ValueError, "Column-values must be < N." if (self.indptr[-1] > len(self.colind)): raise ValueError, \ "Last value of index list should be less than "\ "the size of data list" self.nnz = self.indptr[-1] self.nzmax = len(self.colind) self.typecode = self.data.typecode() if self.typecode not in 'fdFD': self.typecode = 'd' self.data = self.data.astype('d') self.ftype = _transtabl[self.typecode]
self.nnz = self.indptr[-1] self.nzmax = len(self.colind)
def _check(self): M,N = self.shape if (rank(self.data) != 1) or (rank(self.colind) != 1) or \ (rank(self.indptr) != 1): raise ValueError, "Data, colind, and indptr arrays "\ "should be rank 1." if (len(self.data) != len(self.colind)): raise ValueError, "Data and row list should have same length" if (len(self.indptr) != M+1): raise ValueError, "Index pointer should be of length #rows + 1" if (nzmax < nnz): raise ValueError, "Nzmax must not be less than nnz." if (nnz>0) and (max(self.colind[:nnz]) >= M): raise ValueError, "Column-values must be < N." if (self.indptr[-1] > len(self.colind)): raise ValueError, \ "Last value of index list should be less than "\ "the size of data list" self.nnz = self.indptr[-1] self.nzmax = len(self.colind) self.typecode = self.data.typecode() if self.typecode not in 'fdFD': self.typecode = 'd' self.data = self.data.astype('d') self.ftype = _transtabl[self.typecode]
new = csr_matrix(N,M,nzmax=0,typecode=self.typecode)
new = csc_matrix(N,M,nzmax=0,typecode=self.typecode)
def transp(self, copy=0): M,N = self.shape new = csr_matrix(N,M,nzmax=0,typecode=self.typecode) if copy: new.data = self.data.copy() new.colind = self.rowind.copy() new.indptr = self.indptr.copy() else: new.data = self.data new.colind = self.rowind new.indptr = self.indptr new._check() return new
new.colind = self.rowind.copy()
new.rowind = self.colind.copy()
def transp(self, copy=0): M,N = self.shape new = csr_matrix(N,M,nzmax=0,typecode=self.typecode) if copy: new.data = self.data.copy() new.colind = self.rowind.copy() new.indptr = self.indptr.copy() else: new.data = self.data new.colind = self.rowind new.indptr = self.indptr new._check() return new
new.colind = self.rowind
new.rowind = self.colind
def transp(self, copy=0): M,N = self.shape new = csr_matrix(N,M,nzmax=0,typecode=self.typecode) if copy: new.data = self.data.copy() new.colind = self.rowind.copy() new.indptr = self.indptr.copy() else: new.data = self.data new.colind = self.rowind new.indptr = self.indptr new._check() return new
dict['__header__'] = fid.fid.read(124).strip(' \t\n\000')
dict['__header__'] = fid.raw_read(124).strip(' \t\n\000')
def _parse_header(fid, dict): correct_endian = (ord('M')<<8) + ord('I') # if this number is read no BS fid.seek(126) # skip to endian detector endian_test = fid.read(1,'int16') if (endian_test == correct_endian): openstr = 'n' else: # must byteswap if LittleEndian: openstr = 'b' else: openstr = 'l' fid.setformat(openstr) # change byte-order if necessary fid.rewind() dict['__header__'] = fid.fid.read(124).strip(' \t\n\000') vers = fid.read(1,'int16') dict['__version__'] = '%d.%d' % (vers >> 8, vers & 255) fid.seek(2,1) # move to start of data return
test = fid.fid.read(1)
test = fid.raw_read(1)
def _get_element(fid): test = fid.fid.read(1) if len(test) == 0: # nothing left raise EOFError else: fid.rewind(1) # get the data tag raw_tag = fid.read(1,'u') # check for compressed numbytes = raw_tag >> 16 if numbytes > 0: # compressed format if numbytes > 4: raise IOError, "Problem with MAT file: " \ "too many bytes in compressed format." dtype = raw_tag & 65535 el = fid.read(numbytes,miDataTypes[dtype][2],c_is_b=1) fid.seek(4-numbytes,1) # skip padding return el, None # otherwise parse tag dtype = raw_tag numbytes = fid.read(1,'u') if dtype != miMATRIX: # basic data type try: outarr = fid.read(numbytes,miDataTypes[dtype][2],c_is_b=1) except KeyError: raise ValueError, "Unknown data type" mod8 = numbytes%8 if mod8: # skip past padding skip = 8-mod8 fid.seek(skip,1) return outarr, None # handle miMatrix type el, name = _parse_mimatrix(fid,numbytes) return el, name
if typecode is None: return out else: return out.astype(typecode)
if typecode is not None: out = out.astype(typecode) if not isinstance(out, ndarray): out = asarray(out) return out
def valarray(shape,value=nan,typecode=None): """Return an array of all value. """ out = reshape(repeat([value],product(shape)),shape) if typecode is None: return out else: return out.astype(typecode)
if not _active.window_is_alive():
if not _active.proxy_object_alive:
def validate_active(): global _active if _active is None: figure() try: if not _active.window_is_alive(): _active = None figure() except: pass
"""
""" fs =float(fs)
def bilinear(b,a,fs=1.0): """Return a digital filter from an analog filter using the bilinear transform. The bilinear transform substitutes (z-1) / (z+1) for s """ a,b = map(r1array,(a,b)) D = len(a) - 1 N = len(b) - 1 artype = Num.Float M = max([N,D]) Np = M Dp = M bprime = Num.zeros(Np+1,artype) aprime = Num.zeros(Dp+1,artype) for j in range(Np+1): val = 0.0 for i in range(N+1): for k in range(i+1): for l in range(M-i+1): if k+l == j: val += comb(i,k)*comb(M-i,l)*b[N-i]*pow(2*fs,i)*(-1)**k bprime[j] = val for j in range(Dp+1): val = 0.0 for i in range(D+1): for k in range(i+1): for l in range(M-i+1): if k+l == j: val += comb(i,k)*comb(M-i,l)*a[D-i]*pow(2*fs,i)*(-1)**k aprime[j] = val return normalize(bprime, aprime)
fs = 2
fs = 2.0
def iirfilter(N, Wn, rp=None, rs=None, btype='band', analog=0, ftype='butter', output='ba'): """IIR digital and analog filter design given order and critical points. Description: Design an Nth order lowpass digital or analog filter and return the filter coefficients in (B,A) (numerator, denominator) or (Z,P,K) form. Inputs: N -- the order of the filter. Wn -- a scalar or length-2 sequence giving the critical frequencies. rp, rs -- For chebyshev and elliptic filters provides the maximum ripple in the passband and the minimum attenuation in the stop band. btype -- the type of filter (lowpass, highpass, bandpass, or bandstop). analog -- non-zero to return an analog filter, otherwise a digital filter is returned. ftype -- the type of IIR filter (Butterworth, Cauer (Elliptic), Bessel, Chebyshev1, Chebyshev2) output -- 'ba' for (b,a) output, 'zpk' for (z,p,k) output. SEE ALSO butterord, cheb1ord, cheb2ord, ellipord """ ftype, btype, output = map(string.lower, (ftype, btype, output)) Wn = Num.asarray(Wn) try: btype = band_dict[btype] except KeyError: raise ValueError, "%s is an invalid bandtype for filter." % btype try: typefunc = filter_dict[ftype][0] except KeyError: raise ValueError, "%s is not a valid basic iir filter." % ftype if output not in ['ba', 'zpk']: raise ValueError, "%s is not a valid output form." % output #pre-warp frequencies for digital filter design if not analog: fs = 2 warped = 2*fs*tan(pi*Wn/fs) else: warped = Wn # convert to low-pass prototype if btype in ['lowpass', 'highpass']: wo = warped else: bw = warped[1] - warped[0] wo = sqrt(warped[0]*warped[1]) # Get analog lowpass prototype if typefunc in [buttap, besselap]: z, p, k = typefunc(N) elif typefunc == cheb1ap: if rp is None: raise ValueError, "passband ripple (rp) must be provided to design a Chebyshev I filter." z, p, k = typefunc(N, rp) elif typefunc == cheb2ap: if rs is None: raise ValueError, "stopband atteunatuion (rs) must be provided to design an Chebyshev II filter." z, p, k = typefunc(N, rs) else: # Elliptic filters if rs is None or rp is None: raise ValueErrro, "Both rp and rs must be provided to design an elliptic filter." z, p, k = typefunc(N, rp, rs) b, a = zpk2tf(z,p,k) # transform to lowpass, bandpass, highpass, or bandstop if btype == 'lowpass': b, a = lp2lp(b,a,wo=wo) elif btype == 'highpass': b, a = lp2hp(b,a,wo=wo) elif btype == 'bandpass': b, a = lp2bp(b,a,wo=wo,bw=bw) else: # 'bandstop' b, a = lp2bs(b,a,wo=wo,bw=bw) # Find discrete equivalent if necessary if not analog: b, a = bilinear(b, a, fs=fs) # Transform to proper out type (pole-zero, state-space, numer-denom) if output == 'zpk': return tf2zpk(b,a) else: return b,a
for k, col in enumerate(j):
for k, col in enumerate(seq):
def __setitem__(self, index, x): try: assert len(index) == 2 except (AssertionError, TypeError): raise IndexError, "invalid index" i, j = index if isinstance(i, int): if not (i>=0 and i<self.shape[0]): raise IndexError, "lil_matrix index out of range" else: if isinstance(i, slice): seq = xrange(i.start or 0, i.stop or self.shape[1], i.step or 1) elif operator.isSequenceType(i): seq = i else: raise IndexError, "invalid index" try: if not len(x) == len(seq): raise ValueError, "number of elements in source must be" \ " same as number of elements in destimation" except TypeError: # Either x or seq is not a sequence. Note that a sparse matrix # is also not a sequence under this definition. # Currently we don't support setting to/from non-sequence types. # This could be enhanced, though, to allow a scalar source, # and/or a sparse vector. raise TypeError, "unsupported type for lil_matrix.__setitem__" else: # Sequence: call __setitem__ recursively, once for each row for i in xrange(len(seq)): self[seq[i], index[1]] = x[i] return
a, axis = asarray(a, axis)
a, axis = _chk_asarray(a, axis)
def tmax(a,upperlimit,axis=0,inclusive=True): """Returns the maximum value of a, along axis, including only values greater than (or equal to, if inclusive is True) upperlimit. If the limit is set to None, a limit larger than the max value in the array is used. """ a, axis = asarray(a, axis) if inclusive: upperfcn = less else: upperfcn = less_equal if upperlimit is None: upperlimit = maximum.reduce(ravel(a))+1 smallest = minimum.reduce(ravel(a)) ta = where(upperfcn(a,upperlimit),a,smallest) return maximum.reduce(ta,axis)
assert_array_almost_equal(sort(roots(a3)),sort([-3,-2]),11)
assert_array_almost_equal(sort(roots(a3)),sort([-3,-2]),7)
def check_basic(self): a1 = [1,-4,4] a2 = [4,-16,16] a3 = [1,5,6] assert_array_almost_equal(roots(a1),[2,2],11) assert_array_almost_equal(roots(a2),[2,2],11) assert_array_almost_equal(sort(roots(a3)),sort([-3,-2]),11)
assert_array_almost_equal(sort(roots(poly(a))),sort(a),11)
assert_array_almost_equal(sort(roots(poly(a))),sort(a),5)
def check_inverse(self): a = rand(5) assert_array_almost_equal(sort(roots(poly(a))),sort(a),11)
assert_array_equal(trapz(y,x,1),val)
assert_array_equal(trapz(y,x,axis=1),val)
def check_nd(self): x = sort(20*rand(10,20,30)) y = x**2 + 2*x + 1 dx = x[:,1:,:] - x[:,:-1,:] val = add.reduce(dx*(y[:,1:,:] + y[:,:-1,:])/2.0,1) assert_array_equal(trapz(y,x,1),val)
"""blackman(M) returns the M-point Blackman window.
"""The M-point Blackman window.
def blackman(M): """blackman(M) returns the M-point Blackman window. """ n = arange(0,M) return 0.42-0.5*cos(2.0*pi*n/(M-1)) + 0.08*cos(4.0*pi*n/(M-1))
"""bartlett(M) returns the M-point Bartlett window.
"""The M-point Bartlett window.
def bartlett(M): """bartlett(M) returns the M-point Bartlett window. """ n = arange(0,M) return where(less_equal(n,(M-1)/2.0),2.0*n/(M-1),2.0-2.0*n/(M-1))
"""hanning(M) returns the M-point Hanning window.
"""The M-point Hanning window.
def hanning(M): """hanning(M) returns the M-point Hanning window. """ n = arange(0,M) return 0.5-0.5*cos(2.0*pi*n/(M-1))
"""hamming(M) returns the M-point Hamming window.
"""The M-point Hamming window.
def hamming(M): """hamming(M) returns the M-point Hamming window. """ n = arange(0,M) return 0.54-0.46*cos(2.0*pi*n/(M-1))
"""kaiser(M, beta) returns a Kaiser window of length M with shape parameter beta. It depends on the cephes module for the modified bessel function i0.
"""Returns a Kaiser window of length M with shape parameter beta.
def kaiser(M,beta): """kaiser(M, beta) returns a Kaiser window of length M with shape parameter beta. It depends on the cephes module for the modified bessel function i0. """ n = arange(0,M) alpha = (M-1)/2.0 return special.i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/special.i0(beta)
'csscal scopy ccopy sdot cdotu cdotc snrm2 scnrm2 sasum scasum'\ 'samax camax sgemv cgemv chemv ssymv strmv ctrmv sgemm cgemm'.split())
' csscal scopy ccopy sdot cdotu cdotc snrm2 scnrm2 sasum scasum'\ ' isamax icamax sgemv cgemv chemv ssymv strmv ctrmv'\ ' sgemm cgemm'.split())
def configuration(parent_package=''): package = 'linalg' from interface_gen import generate_interface config = default_config_dict(package,parent_package) local_path = get_path(__name__) atlas_info = get_info('atlas') #atlas_info = {} # uncomment if ATLAS is available but want to use # Fortran LAPACK/BLAS; useful for testing f_libs = [] blas_info,lapack_info = {},{} if not atlas_info: warnings.warn(AtlasNotFoundError.__doc__) blas_info = get_info('blas') #blas_info = {} # test building BLAS from sources. if not blas_info: warnings.warn(BlasNotFoundError.__doc__) blas_src_info = get_info('blas_src') if not blas_src_info: raise BlasSrcNotFoundError,BlasSrcNotFoundError.__doc__ dict_append(blas_info,libraries=['blas_src']) f_libs.append(fortran_library_item(\ 'blas_src',blas_src_info['sources'], )) lapack_info = get_info('lapack') #lapack_info = {} # test building LAPACK from sources. if not lapack_info: warnings.warn(LapackNotFoundError.__doc__) lapack_src_info = get_info('lapack_src') if not lapack_src_info: raise LapackSrcNotFoundError,LapackSrcNotFoundError.__doc__ dict_append(lapack_info,libraries=['lapack_src']) f_libs.append(fortran_library_item(\ 'lapack_src',lapack_src_info['sources'], )) mod_sources = {} if atlas_info or blas_info: mod_sources['fblas'] = ['generic_fblas.pyf', 'generic_fblas1.pyf', 'generic_fblas2.pyf', 'generic_fblas3.pyf', os.path.join('src','fblaswrap.f'), ] if atlas_info or lapack_info: mod_sources['flapack'] = ['generic_flapack.pyf'] if atlas_info: mod_sources['cblas'] = ['generic_cblas.pyf', 'generic_cblas1.pyf'] mod_sources['clapack'] = ['generic_clapack.pyf'] else: dict_append(atlas_info,**lapack_info) dict_append(atlas_info,**blas_info) skip_names = {'clapack':[],'flapack':[],'cblas':[],'fblas':[]} if skip_single_routines: skip_names['clapack'].extend(\ 'sgesv cgesv sgetrf cgetrf sgetrs cgetrs sgetri cgetri'\ ' sposv cposv spotrf cpotrf spotrs cpotrs spotri cpotri'\ ' slauum clauum strtri ctrtri'.split()) skip_names['flapack'].extend(skip_names['clapack']) skip_names['flapack'].extend(\ 'sgesdd cgesdd sgelss cgelss sgeqrf cgeqrf sgeev cgeev'\ ' sgegv cgegv ssyev cheev slaswp claswp sgees cgees' ' sggev cggev'.split()) skip_names['cblas'].extend('saxpy caxpy'.split()) skip_names['fblas'].extend(skip_names['cblas']) skip_names['fblas'].extend(\ 'srotg crotg srotmg srot csrot srotm sswap cswap sscal cscal'\ 'csscal scopy ccopy sdot cdotu cdotc snrm2 scnrm2 sasum scasum'\ 'samax camax sgemv cgemv chemv ssymv strmv ctrmv sgemm cgemm'.split()) if atlas_version_pre_3_3: skip_names['clapack'].extend(\ 'sgetri dgetri cgetri zgetri spotri dpotri cpotri zpotri'\ ' slauum dlauum clauum zlauum strtri dtrtri ctrtri ztrtri'.split()) for mod_name,sources in mod_sources.items(): sources = [os.path.join(local_path,s) for s in sources] mod_file = os.path.join(local_path,mod_name+'.pyf') if dep_util.newer_group(sources,mod_file): generate_interface(mod_name,sources[0],mod_file, skip_names.get(mod_name,[])) sources = filter(lambda s:s[-4:]!='.pyf',sources) ext_args = {'name':dot_join(parent_package,package,mod_name), 'sources':[mod_file]+sources} dict_append(ext_args,**atlas_info) ext = Extension(**ext_args) ext.need_fcompiler_opts = 1 config['ext_modules'].append(ext) flinalg = [] for f in ['det.f','lu.f', #'wrappers.c','inv.f', ]: flinalg.append(os.path.join(local_path,'src',f)) ext_args = {'name':dot_join(parent_package,package,'_flinalg'), 'sources':flinalg} dict_append(ext_args,**atlas_info) config['ext_modules'].append(Extension(**ext_args)) ext_args = {'name':dot_join(parent_package,package,'calc_lwork'), 'sources':[os.path.join(local_path,'src','calc_lwork.f')], } dict_append(ext_args,**atlas_info) config['ext_modules'].append(Extension(**ext_args)) config['fortran_libraries'].extend(f_libs) return config
entires = rows*cols
entries = rows*cols
def mmwrite(target,a,comment='',field=None,precision=None): """ Writes the sparse or dense matrix A to a Matrix Market formatted file. Inputs: target - Matrix Market filename (extension .mtx) or open file object a - sparse or full matrix comment - comments to be prepended to the Matrix Market file field - 'real' | 'complex' | 'pattern' | 'integer' precision - Number of digits to display for real or complex values. """ close_it = 0 if type(target) is type(''): if target[-4:] != '.mtx': target = target + '.mtx' target = open(target,'w') close_it = 1 if type(a) in [ListType,ArrayType,TupleType] or hasattr(a,'__array__'): rep = 'array' a = asarray(a) if len(a.shape) != 2: raise ValueError, 'expected matrix' rows,cols = a.shape entires = rows*cols typecode = a.typecode() if field is not None: if field=='integer': a = a.astype('i') elif field=='real': if typecode not in 'fd': a = a.astype('d') elif field=='complex': if typecode not in 'FD': a = a.astype('D') elif field=='pattern': pass else: raise ValueError,'unknown field '+field typecode = a.typecode() else: rep = 'coordinate' from scipy.sparse import spmatrix if not isinstance(a,spmatrix): raise ValueError,'unknown matrix type ' + `type(a)` rows,cols = a.shape entries = a.getnnz() typecode = a.gettypecode() if precision is None: if typecode in 'fF': precision = 8 else: precision = 16 if field is None: if typecode in 'li': field = 'integer' elif typecode in 'df': field = 'real' elif typecode in 'DF': field = 'complex' else: raise TypeError,'unexpected typecode '+typecode if rep == 'array': symm = _get_symmetry(a) else: symm = 'general' target.write('%%%%MatrixMarket matrix %s %s %s\n' % (rep,field,symm)) for line in comment.split('\n'): target.write('%%%s\n' % (line)) if field in ['real','integer']: if field=='real': format = '%%.%ie\n' % precision else: format = '%i\n' elif field=='complex': format = '%%.%ie %%.%ie\n' % (precision,precision) if rep == 'array': target.write('%i %i\n' % (rows,cols)) if field in ['real','integer']: if symm=='general': for j in range(cols): for i in range(rows): target.write(format % a[i,j]) else: for j in range(cols): for i in range(j,rows): target.write(format % a[i,j]) elif field=='complex': if symm=='general': for j in range(cols): for i in range(rows): aij = a[i,j] target.write(format % (real(aij),imag(aij))) else: for j in range(cols): for i in range(j,rows): aij = a[i,j] target.write(format % (real(aij),imag(aij))) elif field=='pattern': raise ValueError,'Pattern type inconsisted with dense matrix' else: raise TypeError,'Unknown matrix type '+`field` else: format = '%i %i ' + format target.write('%i %i %i\n' % (rows,cols,entires)) assert symm=='general',`symm` if field in ['real','integer']: for i in range(entries): target.write(format % (a.rowcol(i)+(a.getdata(i),))) elif field=='complex': for i in range(entries): value = a.getdata(i) target.write(format % ((a.rowcol(i))+(real(value),imag(value)))) elif field=='pattern': raise NotImplementedError,`field` else: raise TypeError,'Unknown matrix type '+`field` if close_it: target.close() else: target.flush() return
target.write('%i %i %i\n' % (rows,cols,entires))
target.write('%i %i %i\n' % (rows,cols,entries))
def mmwrite(target,a,comment='',field=None,precision=None): """ Writes the sparse or dense matrix A to a Matrix Market formatted file. Inputs: target - Matrix Market filename (extension .mtx) or open file object a - sparse or full matrix comment - comments to be prepended to the Matrix Market file field - 'real' | 'complex' | 'pattern' | 'integer' precision - Number of digits to display for real or complex values. """ close_it = 0 if type(target) is type(''): if target[-4:] != '.mtx': target = target + '.mtx' target = open(target,'w') close_it = 1 if type(a) in [ListType,ArrayType,TupleType] or hasattr(a,'__array__'): rep = 'array' a = asarray(a) if len(a.shape) != 2: raise ValueError, 'expected matrix' rows,cols = a.shape entires = rows*cols typecode = a.typecode() if field is not None: if field=='integer': a = a.astype('i') elif field=='real': if typecode not in 'fd': a = a.astype('d') elif field=='complex': if typecode not in 'FD': a = a.astype('D') elif field=='pattern': pass else: raise ValueError,'unknown field '+field typecode = a.typecode() else: rep = 'coordinate' from scipy.sparse import spmatrix if not isinstance(a,spmatrix): raise ValueError,'unknown matrix type ' + `type(a)` rows,cols = a.shape entries = a.getnnz() typecode = a.gettypecode() if precision is None: if typecode in 'fF': precision = 8 else: precision = 16 if field is None: if typecode in 'li': field = 'integer' elif typecode in 'df': field = 'real' elif typecode in 'DF': field = 'complex' else: raise TypeError,'unexpected typecode '+typecode if rep == 'array': symm = _get_symmetry(a) else: symm = 'general' target.write('%%%%MatrixMarket matrix %s %s %s\n' % (rep,field,symm)) for line in comment.split('\n'): target.write('%%%s\n' % (line)) if field in ['real','integer']: if field=='real': format = '%%.%ie\n' % precision else: format = '%i\n' elif field=='complex': format = '%%.%ie %%.%ie\n' % (precision,precision) if rep == 'array': target.write('%i %i\n' % (rows,cols)) if field in ['real','integer']: if symm=='general': for j in range(cols): for i in range(rows): target.write(format % a[i,j]) else: for j in range(cols): for i in range(j,rows): target.write(format % a[i,j]) elif field=='complex': if symm=='general': for j in range(cols): for i in range(rows): aij = a[i,j] target.write(format % (real(aij),imag(aij))) else: for j in range(cols): for i in range(j,rows): aij = a[i,j] target.write(format % (real(aij),imag(aij))) elif field=='pattern': raise ValueError,'Pattern type inconsisted with dense matrix' else: raise TypeError,'Unknown matrix type '+`field` else: format = '%i %i ' + format target.write('%i %i %i\n' % (rows,cols,entires)) assert symm=='general',`symm` if field in ['real','integer']: for i in range(entries): target.write(format % (a.rowcol(i)+(a.getdata(i),))) elif field=='complex': for i in range(entries): value = a.getdata(i) target.write(format % ((a.rowcol(i))+(real(value),imag(value)))) elif field=='pattern': raise NotImplementedError,`field` else: raise TypeError,'Unknown matrix type '+`field` if close_it: target.close() else: target.flush() return
new = zeros((newlen,), arr.dtype.char)
new = zeros((newlen,), arr.dtype)
def resize1d(arr, newlen): old = len(arr) new = zeros((newlen,), arr.dtype.char) new[:old] = arr return new
(self.shape + (self.dtype.char, self.getnnz(), self.nzmax, \
(self.shape + (self.dtype.type, self.getnnz(), self.nzmax, \
def __repr__(self): format = self.getformat() return "<%dx%d sparse matrix of type '%s'\n\twith %d stored "\ "elements (space for %d)\n\tin %s format>" % \ (self.shape + (self.dtype.char, self.getnnz(), self.nzmax, \ _formats[format][1]))
csc.dtype.char = csc.data.dtype.char
csc.dtype = csc.data.dtype
def _real(self): csc = self.tocsc() csc.data = real(csc.data) csc.dtype.char = csc.data.dtype.char csc.ftype = _transtabl[csc.dtype.char] return csc
csc.dtype.char = csc.data.dtype.char
csc.dtype = csc.data.dtype
def _imag(self): csc = self.tocsc() csc.data = imag(csc.data) csc.dtype.char = csc.data.dtype.char csc.ftype = _transtabl[csc.dtype.char] return csc
self.dtype.char = self.data.dtype.char
self.dtype = self.data.dtype
def _check(self): M, N = self.shape nnz = self.indptr[-1] nzmax = len(self.rowind)
new.dtype.char = new.data.dtype.char
new.dtype = new.data.dtype
def __mul__(self, other): """ Scalar, vector, or matrix multiplication """ if isscalar(other) or (isdense(other) and rank(other)==0): new = self.copy() new.data *= other new.dtype.char = new.data.dtype.char new.ftype = _transtabl[new.dtype.char] return new else: return self.dot(other) #else: # return TypeError, "unknown type for sparse matrix multiplication"
new.dtype.char = new.data.dtype.char
new.dtype = new.data.dtype
def __rmul__(self, other): # other * self if isscalar(other) or (isdense(other) and rank(other)==0): new = self.copy() new.data = other * new.data new.dtype.char = new.data.dtype.char new.ftype = _transtabl[new.dtype.char] return new else: other = asarray(other) return self.transpose().dot(other.transpose()).transpose()
new.dtype.char = new.data.dtype.char
new.dtype = new.data.dtype
def __pow__(self, other): """ Element-by-element power (unless other is a scalar, in which case return the matrix power.) """ if isscalar(other) or (isdense(other) and rank(other)==0): new = self.copy() new.data = new.data ** other new.dtype.char = new.data.dtype.char new.ftype = _transtabl[new.dtype.char] return new else: ocs = other.tocsc() if (ocs.shape != self.shape): raise ValueError, "inconsistent shapes" dtypechar = _coerce_rules[(self.dtype.char, ocs.dtype.char)] nnz1, nnz2 = self.nnz, ocs.nnz data1, data2 = _convert_data(self.data[:nnz1], ocs.data[:nnz2], dtypechar) func = getattr(sparsetools, _transtabl[dtypechar]+'cscmul') c, rowc, ptrc, ierr = func(data1, self.rowind[:nnz1], self.indptr, data2, ocs.rowind[:nnz2], ocs.indptr) if ierr: raise ValueError, "ran out of space (but shouldn't have happened)" M, N = self.shape return csc_matrix((c, rowc, ptrc), dims=(M, N))
new = csr_matrix((N, M), nzmax=self.nzmax, dtype=self.dtype.char)
new = csr_matrix((N, M), nzmax=self.nzmax, dtype=self.dtype)
def transpose(self, copy=False): M, N = self.shape new = csr_matrix((N, M), nzmax=self.nzmax, dtype=self.dtype.char) if copy: new.data = self.data.copy() new.colind = self.rowind.copy() new.indptr = self.indptr.copy() else: new.data = self.data new.colind = self.rowind new.indptr = self.indptr new._check() return new
new = csc_matrix(self.shape, nzmax=self.nzmax, dtype=self.dtype.char)
new = csc_matrix(self.shape, nzmax=self.nzmax, dtype=self.dtype)
def conj(self, copy=False): new = csc_matrix(self.shape, nzmax=self.nzmax, dtype=self.dtype.char) if copy: new.data = self.data.conj().copy() new.rowind = self.rowind.conj().copy() new.indptr = self.indptr.conj().copy() else: new.data = self.data.conj() new.rowind = self.rowind.conj() new.indptr = self.indptr.conj() new._check() return new
new = csc_matrix(self.shape, nzmax=self.nzmax, dtype=self.dtype.char)
new = csc_matrix(self.shape, nzmax=self.nzmax, dtype=self.dtype)
def copy(self): new = csc_matrix(self.shape, nzmax=self.nzmax, dtype=self.dtype.char) new.data = self.data.copy() new.rowind = self.rowind.copy() new.indptr = self.indptr.copy() new._check() return new
self.dtype.char = self.data.dtype.char
self.dtype = self.data.dtype
def _check(self): M, N = self.shape nnz = self.indptr[-1] nzmax = len(self.colind) if (rank(self.data) != 1) or (rank(self.colind) != 1) or \ (rank(self.indptr) != 1): raise ValueError, "data, colind, and indptr arrays "\ "should be rank 1" if (len(self.data) != nzmax): raise ValueError, "data and row list should have same length" if (len(self.indptr) != M+1): raise ValueError, "index pointer should be of length #rows + 1" if (nnz>0) and (amax(self.colind[:nnz]) >= N): raise ValueError, "column-values must be < N" if (nnz > nzmax): raise ValueError, \ "last value of index list should be less than "\ "the size of data list" self.nnz = nnz self.nzmax = nzmax self.dtype.char = self.data.dtype.char if self.dtype.char not in 'fdFD': self.data = self.data + 0.0 self.dtype.char = self.data.dtype.char self.ftype = _transtabl[self.dtype.char]
new.dtype.char = new.data.dtype.char
new.dtype = new.data.dtype
def __mul__(self, other): """ Scalar, vector, or matrix multiplication """ if isscalar(other) or (isdense(other) and rank(other)==0): new = self.copy() new.data = other * new.data # allows type conversion new.dtype.char = new.data.dtype.char new.ftype = _transtabl[new.dtype.char] return new else: return self.dot(other)
new.dtype.char = new.data.dtype.char
new.dtype = new.data.dtype
def __rmul__(self, other): # other * self if isscalar(other) or (isdense(other) and rank(other)==0): new = self.copy() new.data = other * new.data # allows type conversion new.dtype.char = new.data.dtype.char new.ftype = _transtabl[new.dtype.char] return new else: other = asarray(other) return self.transpose().dot(other.transpose()).transpose()
new.dtype.char = new.data.dtype.char
new.dtype = new.data.dtype
def __pow__(self, other): """ Element-by-element power (unless other is a scalar, in which case return the matrix power.) """ if isscalar(other) or (isdense(other) and rank(other)==0): new = self.copy() new.data = new.data ** other new.dtype.char = new.data.dtype.char new.ftype = _transtabl[new.dtype.char] return new elif isspmatrix(other): ocs = other.tocsr() if (ocs.shape != self.shape): raise ValueError, "inconsistent shapes" dtypechar = _coerce_rules[(self.dtype.char, ocs.dtype.char)] data1, data2 = _convert_data(self.data, ocs.data, dtypechar) func = getattr(sparsetools, _transtabl[dtypechar]+'cscmul') c, colc, ptrc, ierr = func(data1, self.colind, self.indptr, data2, ocs.colind, ocs.indptr) if ierr: raise ValueError, "ran out of space (but shouldn't have happened)" M, N = self.shape return csr_matrix((c, colc, ptrc), dims=(M, N)) else: raise TypeError, "unsupported type for sparse matrix power"
new = csc_matrix((N, M), nzmax=self.nzmax, dtype=self.dtype.char)
new = csc_matrix((N, M), nzmax=self.nzmax, dtype=self.dtype)
def transpose(self, copy=False): M, N = self.shape new = csc_matrix((N, M), nzmax=self.nzmax, dtype=self.dtype.char) if copy: new.data = self.data.copy() new.rowind = self.colind.copy() new.indptr = self.indptr.copy() else: new.data = self.data new.rowind = self.colind new.indptr = self.indptr new._check() return new
new = csr_matrix(self.shape, nzmax=self.nzmax, dtype=self.dtype.char)
new = csr_matrix(self.shape, nzmax=self.nzmax, dtype=self.dtype)
def copy(self): new = csr_matrix(self.shape, nzmax=self.nzmax, dtype=self.dtype.char) new.data = self.data.copy() new.colind = self.colind.copy() new.indptr = self.indptr.copy() new._check() return new
self.dtype.char = self.data.dtype.char
self.dtype = self.data.dtype
def __init__(self, obj, ij_in, dims=None, nzmax=None, dtype=None): spmatrix.__init__(self) try: # Assume the first calling convention # assert len(ij) == 2 if len(ij_in) != 2: if isdense( ij_in ) and (ij_in.shape[1] == 2): ij = (ij_in[:,0], ij_in[:,1]) else: raise AssertionError else: ij = ij_in if dims is None: M = int(amax(ij[0])) N = int(amax(ij[1])) self.shape = (M, N) else: # Use 2 steps to ensure dims has length 2. M, N = dims self.shape = (M, N) self.row = asarray(ij[0]) self.col = asarray(ij[1]) self.data = asarray(obj, dtype=dtype) self.dtype.char = self.data.dtype.char if nzmax is None: nzmax = len(self.data) self.nzmax = nzmax self._check() except Exception, e: raise e, "invalid input format"
self.data = array(data, self.dtype.char)
self.data = array(data, self.dtype)
def _normalize(self, rowfirst=False): if rowfirst: l = zip(self.row, self.col, self.data) l.sort() row, col, data = list(itertools.izip(*l)) return data, row, col if getattr(self, '_is_normalized', None): return self.data, self.row, self.col l = zip(self.col, self.row, self.data) l.sort() col, row, data = list(itertools.izip(*l)) self.col = asarray(col, 'i') self.row = asarray(row, 'i') self.data = array(data, self.dtype.char) setattr(self, '_is_normalized', 1) return self.data, self.row, self.col
if xb is None: xb=x[0] if xe is None: xe=x[-1]
if xb is None: xb=x.min() if xe is None: xe=x.max()
def splrep(x,y,w=None,xb=None,xe=None,k=3,task=0,s=None,t=None, full_output=0,nest=None,per=0,quiet=1): """Find the B-spline representation of 1-D curve. Description: Given the set of data points (x[i], y[i]) determine a smooth spline approximation of degree k on the interval xb <= x <= xe. The coefficients, c, and the knot points, t, are returned. Uses the FORTRAN routine curfit from FITPACK. Inputs: x, y -- The data points defining a curve y = f(x). w -- Strictly positive rank-1 array of weights the same length as x and y. The weights are used in computing the weighted least-squares spline fit. If the errors in the y values have standard-deviation given by the vector d, then w should be 1/d. Default is ones(len(x)). xb, xe -- The interval to fit. If None, these default to x[0] and x[-1] respectively. k -- The order of the spline fit. It is recommended to use cubic splines. Even order splines should be avoided especially with small s values. 1 <= k <= 5 task -- If task==0 find t and c for a given smoothing factor, s. If task==1 find t and c for another value of the smoothing factor, s. There must have been a previous call with task=0 or task=1 for the same set of data. If task=-1 find the weighted least square spline for a given set of knots, t. s -- A smoothing condition. The amount of smoothness is determined by satisfying the conditions: sum((w * (y - g))**2) <= s where g(x) is the smoothed interpolation of (x,y). The user can use s to control the tradeoff between closeness and smoothness of fit. Larger s means more smoothing while smaller values of s indicate less smoothing. Recommended values of s depend on the weights, w. If the weights represent the inverse of the standard-deviation of y, then a good s value should be found in the range (m-sqrt(2*m),m+sqrt(2*m)) where m is the number of datapoints in x, y, and w. t -- The knots needed for task=-1. full_output -- If non-zero, then return optional outputs. nest -- An over-estimate of the total number of knots of the spline to help in determining the storage space. By default nest=m/2. per -- If non-zero, data points are considered periodic with period x[m-1] - x[0] and a smooth periodic spline approximation is returned. Values of y[m-1] and w[m-1] are not used. quiet -- Non-zero to suppress messages. Outputs: (tck, {fp, ier, msg}) tck -- (t,c,k) a tuple containing the vector of knots, the B-spline coefficients, and the degree of the spline. fp -- The weighted sum of squared residuals of the spline approximation. ier -- An integer flag about splrep success. Success is indicated if ier<=0. If ier in [1,2,3] an error occurred but was not raised. Otherwise an error is raised. msg -- A message corresponding to the integer flag, ier. Remarks: SEE splev for evaluation of the spline and its derivatives. """ if task<=0: _curfit_cache = {'t': array([],'d'), 'wrk': array([],'d'), 'iwrk':array([],'i')} x,y=map(myasarray,[x,y]) m=len(x) if w is None: w=ones(m,'d') else: w=myasarray(w) if not len(w) == m: raise TypeError,' len(w)=%d is not equal to m=%d'%(len(w),m) if xb is None: xb=x[0] if xe is None: xe=x[-1] if not (-1<=task<=1): raise TypeError, 'task must be either -1,0, or 1' if s is None: s=m-sqrt(2*m) if t is None and task==-1: raise TypeError, 'Knots must be given for task=-1' if t is not None: _curfit_cache['t']=myasarray(t) n=len(_curfit_cache['t']) if task==-1 and n<2*k+2: raise TypeError, 'There must be at least 2*k+2 knots for task=-1' if (m != len(y)) or (m != len(w)): raise TypeError, 'Lengths of the first three arguments (x,y,w) must be equal' if not (1<=k<=5): raise TypeError, 'Given degree of the spline (k=%d) is not supported. (1<=k<=5)'%(k) if m<=k: raise TypeError, 'm>k must hold' if nest is None: nest=m/2 if nest<0: if per: nest=m+2*k else: nest=m+k+1 nest=max(nest,2*k+3) if task>=0 and s==0: if per: nest=m+2*k else: nest=m+k+1 if task==-1: _curfit_cache['t']=myasarray(t) if not (2*k+2<=len(t)<=min(nest,m+k+1)): raise TypeError, 'Number of knots n is not acceptable (2*k+2<=n<=min(nest,m+l+1))' t=_curfit_cache['t'] wrk=_curfit_cache['wrk'] iwrk=_curfit_cache['iwrk'] t,c,o = _fitpack._curfit(x,y,w,xb,xe,k,task,s,t,nest,wrk,iwrk,per) _curfit_cache['t']=t _curfit_cache['wrk']=o['wrk'] _curfit_cache['iwrk']=o['iwrk'] ier,fp=o['ier'],o['fp'] tck = [t,c,k] if ier<=0 and not quiet: print _iermess[ier][0] print "\tk=%d n=%d m=%d fp=%f s=%f"%(k,len(t),m,fp,s) if ier>0 and not full_output: if ier in [1,2,3]: print "Warning: "+_iermess[ier][0] else: try: raise _iermess[ier][1],_iermess[ier][0] except KeyError: raise _iermess['unknown'][1],_iermess['unknown'][0] if full_output: try: return tck,fp,ier,_iermess[ier][0] except KeyError: return tck,fp,ier,_iermess['unknown'][0] else: return tck
if xb is None: xb=x[0] if xe is None: xe=x[-1] if yb is None: yb=y[0] if ye is None: ye=y[-1]
if xb is None: xb=x.min() if xe is None: xe=x.max() if yb is None: yb=y.min() if ye is None: ye=y.max()
def bisplrep(x,y,z,w=None,xb=None,xe=None,yb=None,ye=None,kx=3,ky=3,task=0,s=None, eps=1e-16,tx=None,ty=None,full_output=0,nxest=None,nyest=None,quiet=1): """Find a bivariate B-spline representation of a surface. Description: Given a set of data points (x[i], y[i], z[i]) representing a surface z=f(x,y), compute a B-spline representation of the surface. Inputs: x, y, z -- Rank-1 arrays of data points. w -- Rank-1 array of weights. By default w=ones(len(x)). xb, xe -- End points of approximation interval in x. yb, ye -- End points of approximation interval in y. By default xb, xe, yb, ye = x[0], x[-1], y[0], y[-1] kx, ky -- The degrees of the spline (1 <= kx, ky <= 5). Third order (kx=ky=3) is recommended. task -- If task=0, find knots in x and y and coefficients for a given smoothing factor, s. If task=1, find knots and coefficients for another value of the smoothing factor, s. bisplrep must have been previously called with task=0 or task=1. If task=-1, find coefficients for a given set of knots tx, ty. s -- A non-negative smoothing factor. If weights correspond to the inverse of the standard-deviation of the errors in z, then a good s-value should be found in the range (m-sqrt(2*m),m+sqrt(2*m)) where m=len(x) eps -- A threshold for determining the effective rank of an over-determined linear system of equations (0 < eps < 1) --- not likely to need changing. tx, ty -- Rank-1 arrays of the knots of the spline for task=-1 full_output -- Non-zero to return optional outputs. nxest, nyest -- Over-estimates of the total number of knots. If None then nxest = max(kx+sqrt(m/2),2*kx+3), nyest = max(ky+sqrt(m/2),2*ky+3) quiet -- Non-zero to suppress printing of messages. Outputs: (tck, {fp, ier, msg}) tck -- A list [tx, ty, c, kx, ky] containing the knots (tx, ty) and coefficients (c) of the bivariate B-spline representation of the surface along with the degree of the spline. fp -- The weighted sum of squared residuals of the spline approximation. ier -- An integer flag about splrep success. Success is indicated if ier<=0. If ier in [1,2,3] an error occurred but was not raised. Otherwise an error is raised. msg -- A message corresponding to the integer flag, ier. Remarks: SEE bisplev to evaluate the value of the B-spline given its tck representation. """ x,y,z=map(myasarray,[x,y,z]) x,y,z=map(ravel,[x,y,z]) # ensure 1-d arrays. m=len(x) if not (m==len(y)==len(z)): raise TypeError, 'len(x)==len(y)==len(z) must hold.' if w is None: w=ones(m,'d') else: w=myasarray(w) if not len(w) == m: raise TypeError,' len(w)=%d is not equal to m=%d'%(len(w),m) if xb is None: xb=x[0] if xe is None: xe=x[-1] if yb is None: yb=y[0] if ye is None: ye=y[-1] if not (-1<=task<=1): raise TypeError, 'task must be either -1,0, or 1' if s is None: s=m-sqrt(2*m) if tx is None and task==-1: raise TypeError, 'Knots_x must be given for task=-1' if tx is not None: _curfit_cache['tx']=myasarray(tx) nx=len(_surfit_cache['tx']) if ty is None and task==-1: raise TypeError, 'Knots_y must be given for task=-1' if ty is not None: _curfit_cache['ty']=myasarray(ty) ny=len(_surfit_cache['ty']) if task==-1 and nx<2*kx+2: raise TypeError, 'There must be at least 2*kx+2 knots_x for task=-1' if task==-1 and ny<2*ky+2: raise TypeError, 'There must be at least 2*ky+2 knots_x for task=-1' if not ((1<=kx<=5) and (1<=ky<=5)): raise TypeError, 'Given degree of the spline (kx,ky=%d,%d) is not supported. (1<=k<=5)'%(kx,ky) if m<(kx+1)*(ky+1): raise TypeError, 'm>=(kx+1)(ky+1) must hold' if nxest is None: nxest=kx+sqrt(m/2) if nyest is None: nyest=ky+sqrt(m/2) nxest,nyest=max(nxest,2*kx+3),max(nyest,2*ky+3) if task>=0 and s==0: nxest=int(kx+sqrt(3*m)) nyest=int(ky+sqrt(3*m)) if task==-1: _surfit_cache['tx']=myasarray(tx) _surfit_cache['ty']=myasarray(ty) tx,ty=_surfit_cache['tx'],_surfit_cache['ty'] wrk=_surfit_cache['wrk'] iwrk=_surfit_cache['iwrk'] u,v,km,ne=nxest-kx-1,nyest-ky-1,max(kx,ky)+1,max(nxest,nyest) bx,by=kx*v+ky+1,ky*u+kx+1 b1,b2=bx,bx+v-ky if bx>by: b1,b2=by,by+u-kx lwrk1=u*v*(2+b1+b2)+2*(u+v+km*(m+ne)+ne-kx-ky)+b2+1 lwrk2=u*v*(b2+1)+b2 tx,ty,c,o = _fitpack._surfit(x,y,z,w,xb,xe,yb,ye,kx,ky,task,s,eps, tx,ty,nxest,nyest,wrk,lwrk1,lwrk2) _curfit_cache['tx']=tx _curfit_cache['ty']=ty _curfit_cache['wrk']=o['wrk'] ier,fp=o['ier'],o['fp'] tck=[tx,ty,c,kx,ky] if ier<=0 and not quiet: print _iermess2[ier][0] print "\tkx,ky=%d,%d nx,ny=%d,%d m=%d fp=%f s=%f"%(kx,ky,len(tx), len(ty),m,fp,s) ierm=min(11,max(-3,ier)) if ierm>0 and not full_output: if ier in [1,2,3,4,5]: print "Warning: "+_iermess2[ierm][0] print "\tkx,ky=%d,%d nx,ny=%d,%d m=%d fp=%f s=%f"%(kx,ky,len(tx), len(ty),m,fp,s) else: try: raise _iermess2[ierm][1],_iermess2[ierm][0] except KeyError: raise _iermess2['unknown'][1],_iermess2['unknown'][0] if full_output: try: return tck,fp,ier,_iermess2[ierm][0] except KeyError: return tck,fp,ier,_iermess2['unknown'][0] else: return tck
self.file.tell()
return self.file.tell()
def tell(self): self.file.tell()
if result[:11] == 'Bad command' and sys.platform == 'win32':
res = bbox_re.search(result) if res is None and sys.platform=='win32':
def convert_bounding_box(inname, outname): fid = open(inname,'r') oldfile = fid.read() fid.close() gsargs = '-dNOPAUSE -dBATCH -sDEVICE=bbox' # use cygwin gs if present cmd = 'gs %s %s' % (gsargs, inname) w, r = os.popen4(cmd) w.close() result = r.read() r.close() if result[:11] == 'Bad command' and sys.platform == 'win32': cmd = 'gswin32c %s %s' % (gsargs, inname) w, r = os.popen4(cmd) w.close() result = r.read() r.close() if result[:11] == 'Bad command': return False res = bbox_re.search(result) bbox = map(int,res.groups()) newstr = bbox2_re.sub("BoundingBox: %d %d %d %d" % tuple(bbox), oldfile) fid = open(outname, 'wb') fid.write(newstr) fid.close() return True
if result[:11] == 'Bad command':
res = bbox_re.search(result) if res is None: sys.stderr.write('To fix bounding box install ghostscript in the PATH')
def convert_bounding_box(inname, outname): fid = open(inname,'r') oldfile = fid.read() fid.close() gsargs = '-dNOPAUSE -dBATCH -sDEVICE=bbox' # use cygwin gs if present cmd = 'gs %s %s' % (gsargs, inname) w, r = os.popen4(cmd) w.close() result = r.read() r.close() if result[:11] == 'Bad command' and sys.platform == 'win32': cmd = 'gswin32c %s %s' % (gsargs, inname) w, r = os.popen4(cmd) w.close() result = r.read() r.close() if result[:11] == 'Bad command': return False res = bbox_re.search(result) bbox = map(int,res.groups()) newstr = bbox2_re.sub("BoundingBox: %d %d %d %d" % tuple(bbox), oldfile) fid = open(outname, 'wb') fid.write(newstr) fid.close() return True
res = bbox_re.search(result)
def convert_bounding_box(inname, outname): fid = open(inname,'r') oldfile = fid.read() fid.close() gsargs = '-dNOPAUSE -dBATCH -sDEVICE=bbox' # use cygwin gs if present cmd = 'gs %s %s' % (gsargs, inname) w, r = os.popen4(cmd) w.close() result = r.read() r.close() if result[:11] == 'Bad command' and sys.platform == 'win32': cmd = 'gswin32c %s %s' % (gsargs, inname) w, r = os.popen4(cmd) w.close() result = r.read() r.close() if result[:11] == 'Bad command': return False res = bbox_re.search(result) bbox = map(int,res.groups()) newstr = bbox2_re.sub("BoundingBox: %d %d %d %d" % tuple(bbox), oldfile) fid = open(outname, 'wb') fid.write(newstr) fid.close() return True
print diff
def kmeans_(obs,guess,thresh=1e-5): """* See kmeans Outputs: code_book -- the lowest distortion codebook found. avg_dist -- the average distance a observation is from a code in the book. Lower means the code_book matches the data better. Test: Note: not whitened in this example. >>> features = array([[ 1.9,2.3], ... [ 1.5,2.5], ... [ 0.8,0.6], ... [ 0.4,1.8], ... [ 1.0,1.0]]) >>> book = array((features[0],features[2])) >>> kmeans_(features,book) (array([[ 1.7 , 2.4 ], [ 0.73333333, 1.13333333]]), 0.40563916697728591) *""" code_book = array(guess,copy=1) Nc = code_book.shape[0] avg_dist=[] diff = thresh+1. while diff>thresh: #print diff #compute membership and distances between obs and code_book obs_code, distort = vq(obs,code_book,return_dist=1) avg_dist.append(scipy.mean(distort)) #recalc code_book as centroids of associated obs if(diff > thresh): has_members = [] for i in arange(Nc): cell_members = compress(equal(obs_code,i),obs,0) if cell_members.shape[0] > 0: code_book[i] = scipy.mean(cell_members,0) has_members.append(i) #remove code_books that didn't have any members #print has_members code_book = take(code_book,has_members,0) if len(avg_dist) > 1: diff = avg_dist[-2] - avg_dist[-1] #print avg_dist return code_book, avg_dist[-1]
[0,1,1,1],
[1,1,1,1],
def check_diag(self): assert_equal(tri(4,k=1),array([[1,1,0,0], [1,1,1,0], [0,1,1,1], [1,1,1,1]])) assert_equal(tri(4,k=-1),array([[0,0,0,0], [1,0,0,0], [1,1,0,0], [1,1,1,0]]))
assert_equal(tril(a,k=-2),b)
assert_equal(triu(a,k=-2),b)
def check_diag(self): a = (100*get_mat(5)).astype('f') b = a.copy() for k in range(5): for l in range(max((k-1,0)),5): b[l,k] = 0 assert_equal(triu(a,k=2),b) b = a.copy() for k in range(5): for l in range(k+3,5): b[l,k] = 0 assert_equal(tril(a,k=-2),b)
def _check_bdtrik(self): assert_equal(cephes.bdtrik(1,3,0.5),3.0)
def check_bdtrik(self): cephes.bdtrik(1,3,0.5)
def _check_bdtrik(self): assert_equal(cephes.bdtrik(1,3,0.5),3.0)
def _check_chndtrix(self):
def check_chndtrix(self):
def _check_chndtrix(self): assert_equal(cephes.chndtrix(0,1,0),0.0)
def _check_gdtrib(self): assert_equal(cephes.gdtrib(1,0,1),5.0)
def check_gdtrib(self): cephes.gdtrib(1,0,1)
def _check_gdtrib(self): assert_equal(cephes.gdtrib(1,0,1),5.0)
def _check_hankel1(self):
def check_hankel1(self):
def _check_hankel1(self): cephes.hankel1(1,1)
def _check_hankel1e(self):
def check_hankel1e(self):
def _check_hankel1e(self): cephes.hankel1e(1,1)
def _check_hankel2(self):
def check_hankel2(self):
def _check_hankel2(self): cephes.hankel2(1,1)
def _check_hankel2e(self):
def check_hankel2e(self):
def _check_hankel2e(self): cephes.hankel2e(1,1)
def _check_it2i0k0(self):
def check_it2i0k0(self):
def _check_it2i0k0(self): cephes.it2i0k0(1)
def _check_it2j0y0(self):
def check_it2j0y0(self):
def _check_it2j0y0(self): cephes.it2j0y0(1)
def _check_itairy(self):
def check_itairy(self):
def _check_itairy(self): cephes.itairy(1)
def check_ive(self):
def _check_ive(self):
def check_ive(self): assert_equal(cephes.ive(1,0),0.0)
def check_jve(self):
def _check_jve(self):
def check_jve(self): assert_equal(cephes.jve(0,0),1.0)
def _check_kei(self):
def check_kei(self):
def _check_kei(self): cephes.kei(2)
def _check_ker(self):
def check_ker(self):
def _check_ker(self): cephes.ker(2)
def _check_kerp(self):
def check_kerp(self):
def _check_kerp(self): cephes.kerp(2)
def _check_mathieu_modcem2(self):
def check_mathieu_modcem2(self):
def _check_mathieu_modcem2(self): cephes.mathieu_modcem2(1,1,1)
def _check_mathieu_modsem2(self):
def check_mathieu_modsem2(self):
def _check_mathieu_modsem2(self): cephes.mathieu_modsem2(1,1,1)
def check_modstruve(self):
def _check_modstruve(self):
def check_modstruve(self): assert_equal(cephes.modstruve(1,0),0.0)
cephes.nbdtrik(1,1,1)
cephes.nbdtrik(1,.4,.5)
def __check_nbdtrik(self): cephes.nbdtrik(1,1,1)
def _check_ncfdtr(self):
def check_ncfdtr(self):
def _check_ncfdtr(self): assert_equal(cephes.ncfdtr(1,1,1,0),0.0)
def _check_ncfdtri(self):
def check_ncfdtri(self):
def _check_ncfdtri(self): assert_equal(cephes.ncfdtri(1,1,1,0),0.0)
def _check_ncfdtridfd(self):
def check_ncfdtridfd(self):
def _check_ncfdtridfd(self): cephes.ncfdtridfd(1,0.5,0,1)
def _check_nctdtrit(self):
def check_nctdtrit(self):
def _check_nctdtrit(self): cephes.nctdtrit(.1,0.2,.5)
def _check_nrdtrimn(self):
def check_nrdtrimn(self):
def _check_nrdtrimn(self): assert_equal(cephes.nrdtrimn(0.5,1,1),1.0)
def _check_nrdtrisd(self):
def check_nrdtrisd(self):
def _check_nrdtrisd(self): assert_equal(cephes.nrdtrisd(0.5,0.5,0.5),0.0)
def _check_obl_ang1(self):
def check_obl_ang1(self):
def _check_obl_ang1(self): cephes.obl_ang1(1,1,1,0)
def _check_obl_ang1_cv(self): assert_equal(cephes.obl_ang1_cv(1,1,1,1,0),(1.0,0.0)) def check_obl_cv(self):
def check_obl_ang1_cv(self): result = cephes.obl_ang1_cv(1,1,1,1,0) assert_almost_equal(result[0],1.0) assert_almost_equal(result[1],0.0) def _check_obl_cv(self):
def _check_obl_ang1_cv(self): assert_equal(cephes.obl_ang1_cv(1,1,1,1,0),(1.0,0.0))
def _check_obl_rad1(self):
def check_obl_rad1(self):
def _check_obl_rad1(self): cephes.obl_rad1(1,1,1,0)
def _check_obl_rad1_cv(self):
def check_obl_rad1_cv(self):
def _check_obl_rad1_cv(self): cephes.obl_rad1_cv(1,1,1,1,0)
def _check_pbdv(self):
def check_pbdv(self):
def _check_pbdv(self): assert_equal(cephes.pbdv(1,0),(0.0,0.0))
def _check_pbvv(self):
def check_pbvv(self):
def _check_pbvv(self): cephes.pbvv(1,0)
def _check_pdtrik(self):
def check_pdtrik(self):
def _check_pdtrik(self): cephes.pdtrik(0.5,1)
def _check_pro_ang1(self):
def check_pro_ang1(self):
def _check_pro_ang1(self): cephes.pro_ang1(1,1,1,0)
def _check_pro_ang1_cv(self):
def check_pro_ang1_cv(self):
def _check_pro_ang1_cv(self): assert_equal(cephes.pro_ang1_cv(1,1,1,1,0),(1.0,0.0))
def check_pro_cv(self):
def _check_pro_cv(self):
def check_pro_cv(self): assert_equal(cephes.pro_cv(1,1,0),2.0)
cephes.pro_rad1(1,1,1,0)
cephes.pro_rad1(1,1,1,0.1)
def check_pro_rad1(self): # x>1 gives segfault with ifc, but not with gcc # x<1 returns nan, no segfault cephes.pro_rad1(1,1,1,0)
assert_equal(cephes.smirnov(1,1),0.0)
assert_equal(cephes.smirnov(1,.1),0.9)
def check_smirnov(self): assert_equal(cephes.smirnov(1,1),0.0)
assert_equal(cephes.smirnovi(1,0),1.0)
def check_smirnovi(self): assert_almost_equal(cephes.smirnov(1,cephes.smirnovi(1,0.4)),0.4) assert_almost_equal(cephes.smirnov(1,cephes.smirnovi(1,0.6)),0.6) assert_equal(cephes.smirnovi(1,0),1.0)
def _check_stdtridf(self):
def check_stdtridf(self):
def _check_stdtridf(self): cephes.stdtridf(0.7,1)
def _check_stdtrit(self):
def check_stdtrit(self):
def _check_stdtrit(self): cephes.stdtrit(1,0.7)
val *= exp(0.5*gammaln(n-m+1)-gammaln(n+m+1))
val *= exp(0.5*(gammaln(n-m+1)-gammaln(n+m+1)))
def _sph_harmonic(m,n,theta,phi): """inputs of (m,n,theta,phi) returns spherical harmonic of order m,n (|m|<=n) and argument theta and phi: Y^m_n(theta,phi) """ x = cos(phi) m,n = int(m), int(n) Pmn,Pmnd = lpmn(m,n,x) val = Pmn[m,n] val *= sqrt((2*n+1)/4.0/pi) val *= exp(0.5*gammaln(n-m+1)-gammaln(n+m+1)) val *= exp(1j*m*theta) return val
"""inputs of (m,n,theta,phi) returns spherical harmonic of order m,n (|m|<=n) and argument theta and phi: Y^m_n(theta,phi)
"""Spherical harmonic of order m,n (|m|<=n) and argument theta and phi: Y^m_n(theta,phi)
def _sph_harmonic(m,n,theta,phi): """inputs of (m,n,theta,phi) returns spherical harmonic of order m,n (|m|<=n) and argument theta and phi: Y^m_n(theta,phi) """ x = cos(phi) m,n = int(m), int(n) Pmn,Pmnd = lpmn(m,n,x) val = Pmn[m,n] val *= sqrt((2*n+1)/4.0/pi) val *= exp(0.5*(gammaln(n-m+1)-gammaln(n+m+1))) val *= exp(1j*m*theta) return val
val = Pmn[m,n]
val = Pmn[-1, -1]
def _sph_harmonic(m,n,theta,phi): """inputs of (m,n,theta,phi) returns spherical harmonic of order m,n (|m|<=n) and argument theta and phi: Y^m_n(theta,phi) """ x = cos(phi) m,n = int(m), int(n) Pmn,Pmnd = lpmn(m,n,x) val = Pmn[m,n] val *= sqrt((2*n+1)/4.0/pi) val *= exp(0.5*(gammaln(n-m+1)-gammaln(n+m+1))) val *= exp(1j*m*theta) return val