repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_documentation_string
stringlengths
1
47.2k
func_code_url
stringlengths
85
339
vallis/libstempo
libstempo/spharmORFbasis.py
real_rotated_Gammas
def real_rotated_Gammas(m,l,phi1,phi2,theta1,theta2,gamma_ml): """ This function returns the real-valued form of the Overlap Reduction Functions, see Eqs 47 in Mingarelli et al, 2013. """ if m>0: ans=(1./sqrt(2))*(rotated_Gamma_ml(m,l,phi1,phi2,theta1,theta2,gamma_ml) + \ (-1)**m*rotated_Gamma_ml(-m,l,phi1,phi2,theta1,theta2,gamma_ml)) return ans.real if m==0: return rotated_Gamma_ml(0,l,phi1,phi2,theta1,theta2,gamma_ml).real if m<0: ans=(1./sqrt(2)/complex(0.,1))*(rotated_Gamma_ml(-m,l,phi1,phi2,theta1,theta2,gamma_ml) - \ (-1)**m*rotated_Gamma_ml(m,l,phi1,phi2,theta1,theta2,gamma_ml)) return ans.real
python
def real_rotated_Gammas(m,l,phi1,phi2,theta1,theta2,gamma_ml): """ This function returns the real-valued form of the Overlap Reduction Functions, see Eqs 47 in Mingarelli et al, 2013. """ if m>0: ans=(1./sqrt(2))*(rotated_Gamma_ml(m,l,phi1,phi2,theta1,theta2,gamma_ml) + \ (-1)**m*rotated_Gamma_ml(-m,l,phi1,phi2,theta1,theta2,gamma_ml)) return ans.real if m==0: return rotated_Gamma_ml(0,l,phi1,phi2,theta1,theta2,gamma_ml).real if m<0: ans=(1./sqrt(2)/complex(0.,1))*(rotated_Gamma_ml(-m,l,phi1,phi2,theta1,theta2,gamma_ml) - \ (-1)**m*rotated_Gamma_ml(m,l,phi1,phi2,theta1,theta2,gamma_ml)) return ans.real
This function returns the real-valued form of the Overlap Reduction Functions, see Eqs 47 in Mingarelli et al, 2013.
https://github.com/vallis/libstempo/blob/0b19300a9b24d64c9ddc25cd6ddbfd12b6231990/libstempo/spharmORFbasis.py#L281-L297
vallis/libstempo
libstempo/fit.py
chisq
def chisq(psr,formbats=False): """Return the total chisq for the current timing solution, removing noise-averaged mean residual, and ignoring deleted points.""" if formbats: psr.formbats() res, err = psr.residuals(removemean=False)[psr.deleted == 0], psr.toaerrs[psr.deleted == 0] res -= numpy.sum(res/err**2) / numpy.sum(1/err**2) return numpy.sum(res * res / (1e-12 * err * err))
python
def chisq(psr,formbats=False): """Return the total chisq for the current timing solution, removing noise-averaged mean residual, and ignoring deleted points.""" if formbats: psr.formbats() res, err = psr.residuals(removemean=False)[psr.deleted == 0], psr.toaerrs[psr.deleted == 0] res -= numpy.sum(res/err**2) / numpy.sum(1/err**2) return numpy.sum(res * res / (1e-12 * err * err))
Return the total chisq for the current timing solution, removing noise-averaged mean residual, and ignoring deleted points.
https://github.com/vallis/libstempo/blob/0b19300a9b24d64c9ddc25cd6ddbfd12b6231990/libstempo/fit.py#L4-L15
vallis/libstempo
libstempo/fit.py
dchisq
def dchisq(psr,formbats=False,renormalize=True): """Return gradient of total chisq for the current timing solution, after removing noise-averaged mean residual, and ignoring deleted points.""" if formbats: psr.formbats() res, err = psr.residuals(removemean=False)[psr.deleted == 0], psr.toaerrs[psr.deleted == 0] res -= numpy.sum(res/err**2) / numpy.sum(1/err**2) # bats already updated by residuals(); skip constant-phase column M = psr.designmatrix(updatebats=False,fixunits=True,fixsigns=True)[psr.deleted==0,1:] # renormalize design-matrix columns if renormalize: norm = numpy.sqrt(numpy.sum(M**2,axis=0)) M /= norm else: norm = 1.0 # compute chisq derivative, de-renormalize dr = -2 * numpy.dot(M.T,res / (1e-12 * err**2)) * norm return dr
python
def dchisq(psr,formbats=False,renormalize=True): """Return gradient of total chisq for the current timing solution, after removing noise-averaged mean residual, and ignoring deleted points.""" if formbats: psr.formbats() res, err = psr.residuals(removemean=False)[psr.deleted == 0], psr.toaerrs[psr.deleted == 0] res -= numpy.sum(res/err**2) / numpy.sum(1/err**2) # bats already updated by residuals(); skip constant-phase column M = psr.designmatrix(updatebats=False,fixunits=True,fixsigns=True)[psr.deleted==0,1:] # renormalize design-matrix columns if renormalize: norm = numpy.sqrt(numpy.sum(M**2,axis=0)) M /= norm else: norm = 1.0 # compute chisq derivative, de-renormalize dr = -2 * numpy.dot(M.T,res / (1e-12 * err**2)) * norm return dr
Return gradient of total chisq for the current timing solution, after removing noise-averaged mean residual, and ignoring deleted points.
https://github.com/vallis/libstempo/blob/0b19300a9b24d64c9ddc25cd6ddbfd12b6231990/libstempo/fit.py#L17-L41
vallis/libstempo
libstempo/fit.py
findmin
def findmin(psr,method='Nelder-Mead',history=False,formbats=False,renormalize=True,bounds={},**kwargs): """Use scipy.optimize.minimize to find minimum-chisq timing solution, passing through all extra options. Resets psr[...].val to the final solution, and returns the final chisq. Will use chisq gradient if method requires it. Ignores deleted points.""" ctr, err = psr.vals(), psr.errs() # to avoid losing precision, we're searching in units of parameter errors if numpy.any(err == 0.0): print("Warning: one or more fit parameters have zero a priori error, and won't be searched.") hloc, hval = [], [] def func(xs): psr.vals([c + x*e for x,c,e in zip(xs,ctr,err)]) ret = chisq(psr,formbats=formbats) if numpy.isnan(ret): print("Warning: chisq is nan at {0}.".format(psr.vals())) if history: hloc.append(psr.vals()) hval.append(ret) return ret def dfunc(xs): psr.vals([c + x*e for x,c,e in zip(xs,ctr,err)]) dc = dchisq(psr,formbats=formbats,renormalize=renormalize) ret = numpy.array([d*e for d,e in zip(dc,err)],'d') return ret opts = kwargs.copy() if method not in ['Nelder-Mead','Powell']: opts['jac'] = dfunc if method in ['L-BFGS-B']: opts['bounds'] = [(float((bounds[par][0] - ctr[i])/err[i]), float((bounds[par][1] - ctr[i])/err[i])) if par in bounds else (None,None) for i,par in enumerate(psr.pars())] res = scipy.optimize.minimize(func,[0.0]*len(ctr),method=method,**opts) if hasattr(res,'message'): print(res.message) # this will also set parameters to the minloc minchisq = func(res.x) if history: return minchisq, numpy.array(hval), numpy.array(hloc) else: return minchisq
python
def findmin(psr,method='Nelder-Mead',history=False,formbats=False,renormalize=True,bounds={},**kwargs): """Use scipy.optimize.minimize to find minimum-chisq timing solution, passing through all extra options. Resets psr[...].val to the final solution, and returns the final chisq. Will use chisq gradient if method requires it. Ignores deleted points.""" ctr, err = psr.vals(), psr.errs() # to avoid losing precision, we're searching in units of parameter errors if numpy.any(err == 0.0): print("Warning: one or more fit parameters have zero a priori error, and won't be searched.") hloc, hval = [], [] def func(xs): psr.vals([c + x*e for x,c,e in zip(xs,ctr,err)]) ret = chisq(psr,formbats=formbats) if numpy.isnan(ret): print("Warning: chisq is nan at {0}.".format(psr.vals())) if history: hloc.append(psr.vals()) hval.append(ret) return ret def dfunc(xs): psr.vals([c + x*e for x,c,e in zip(xs,ctr,err)]) dc = dchisq(psr,formbats=formbats,renormalize=renormalize) ret = numpy.array([d*e for d,e in zip(dc,err)],'d') return ret opts = kwargs.copy() if method not in ['Nelder-Mead','Powell']: opts['jac'] = dfunc if method in ['L-BFGS-B']: opts['bounds'] = [(float((bounds[par][0] - ctr[i])/err[i]), float((bounds[par][1] - ctr[i])/err[i])) if par in bounds else (None,None) for i,par in enumerate(psr.pars())] res = scipy.optimize.minimize(func,[0.0]*len(ctr),method=method,**opts) if hasattr(res,'message'): print(res.message) # this will also set parameters to the minloc minchisq = func(res.x) if history: return minchisq, numpy.array(hval), numpy.array(hloc) else: return minchisq
Use scipy.optimize.minimize to find minimum-chisq timing solution, passing through all extra options. Resets psr[...].val to the final solution, and returns the final chisq. Will use chisq gradient if method requires it. Ignores deleted points.
https://github.com/vallis/libstempo/blob/0b19300a9b24d64c9ddc25cd6ddbfd12b6231990/libstempo/fit.py#L43-L101
vallis/libstempo
libstempo/fit.py
glsfit
def glsfit(psr,renormalize=True): """Solve local GLS problem using scipy.linalg.cholesky. Update psr[...].val and psr[...].err from solution. If renormalize=True, normalize each design-matrix column by its norm.""" mask = psr.deleted == 0 res, err = psr.residuals(removemean=False)[mask], psr.toaerrs[mask] M = psr.designmatrix(updatebats=False,fixunits=True,fixsigns=True)[mask,:] C = numpy.diag((err * 1e-6)**2) if renormalize: norm = numpy.sqrt(numpy.sum(M**2,axis=0)) M /= norm else: norm = np.ones_like(M[0,:]) mtcm = numpy.dot(M.T,numpy.dot(numpy.linalg.inv(C),M)) mtcy = numpy.dot(M.T,numpy.dot(numpy.linalg.inv(C),res)) xvar = numpy.linalg.inv(mtcm) c = scipy.linalg.cho_factor(mtcm) xhat = scipy.linalg.cho_solve(c,mtcy) sol = psr.vals() psr.vals(sol + xhat[1:] / norm[1:]) psr.errs(numpy.sqrt(numpy.diag(xvar)[1:]) / norm[1:]) return chisq(psr)
python
def glsfit(psr,renormalize=True): """Solve local GLS problem using scipy.linalg.cholesky. Update psr[...].val and psr[...].err from solution. If renormalize=True, normalize each design-matrix column by its norm.""" mask = psr.deleted == 0 res, err = psr.residuals(removemean=False)[mask], psr.toaerrs[mask] M = psr.designmatrix(updatebats=False,fixunits=True,fixsigns=True)[mask,:] C = numpy.diag((err * 1e-6)**2) if renormalize: norm = numpy.sqrt(numpy.sum(M**2,axis=0)) M /= norm else: norm = np.ones_like(M[0,:]) mtcm = numpy.dot(M.T,numpy.dot(numpy.linalg.inv(C),M)) mtcy = numpy.dot(M.T,numpy.dot(numpy.linalg.inv(C),res)) xvar = numpy.linalg.inv(mtcm) c = scipy.linalg.cho_factor(mtcm) xhat = scipy.linalg.cho_solve(c,mtcy) sol = psr.vals() psr.vals(sol + xhat[1:] / norm[1:]) psr.errs(numpy.sqrt(numpy.diag(xvar)[1:]) / norm[1:]) return chisq(psr)
Solve local GLS problem using scipy.linalg.cholesky. Update psr[...].val and psr[...].err from solution. If renormalize=True, normalize each design-matrix column by its norm.
https://github.com/vallis/libstempo/blob/0b19300a9b24d64c9ddc25cd6ddbfd12b6231990/libstempo/fit.py#L103-L132
vallis/libstempo
libstempo/utils.py
create_fourier_design_matrix
def create_fourier_design_matrix(t, nmodes, freq=False, Tspan=None, logf=False, fmin=None, fmax=None): """ Construct fourier design matrix from eq 11 of Lentati et al, 2013 :param t: vector of time series in seconds :param nmodes: number of fourier coefficients to use :param freq: option to output frequencies :param Tspan: option to some other Tspan :param logf: use log frequency spacing :param fmin: lower sampling frequency :param fmax: upper sampling frequency :return: F: fourier design matrix :return: f: Sampling frequencies (if freq=True) """ N = len(t) F = np.zeros((N, 2 * nmodes)) if Tspan is not None: T = Tspan else: T = t.max() - t.min() # define sampling frequencies if fmin is not None and fmax is not None: f = np.linspace(fmin, fmax, nmodes) else: f = np.linspace(1 / T, nmodes / T, nmodes) if logf: f = np.logspace(np.log10(1 / T), np.log10(nmodes / T), nmodes) Ffreqs = np.zeros(2 * nmodes) Ffreqs[0::2] = f Ffreqs[1::2] = f F[:,::2] = np.sin(2*np.pi*t[:,None]*f[None,:]) F[:,1::2] = np.cos(2*np.pi*t[:,None]*f[None,:]) if freq: return F, Ffreqs else: return F
python
def create_fourier_design_matrix(t, nmodes, freq=False, Tspan=None, logf=False, fmin=None, fmax=None): """ Construct fourier design matrix from eq 11 of Lentati et al, 2013 :param t: vector of time series in seconds :param nmodes: number of fourier coefficients to use :param freq: option to output frequencies :param Tspan: option to some other Tspan :param logf: use log frequency spacing :param fmin: lower sampling frequency :param fmax: upper sampling frequency :return: F: fourier design matrix :return: f: Sampling frequencies (if freq=True) """ N = len(t) F = np.zeros((N, 2 * nmodes)) if Tspan is not None: T = Tspan else: T = t.max() - t.min() # define sampling frequencies if fmin is not None and fmax is not None: f = np.linspace(fmin, fmax, nmodes) else: f = np.linspace(1 / T, nmodes / T, nmodes) if logf: f = np.logspace(np.log10(1 / T), np.log10(nmodes / T), nmodes) Ffreqs = np.zeros(2 * nmodes) Ffreqs[0::2] = f Ffreqs[1::2] = f F[:,::2] = np.sin(2*np.pi*t[:,None]*f[None,:]) F[:,1::2] = np.cos(2*np.pi*t[:,None]*f[None,:]) if freq: return F, Ffreqs else: return F
Construct fourier design matrix from eq 11 of Lentati et al, 2013 :param t: vector of time series in seconds :param nmodes: number of fourier coefficients to use :param freq: option to output frequencies :param Tspan: option to some other Tspan :param logf: use log frequency spacing :param fmin: lower sampling frequency :param fmax: upper sampling frequency :return: F: fourier design matrix :return: f: Sampling frequencies (if freq=True)
https://github.com/vallis/libstempo/blob/0b19300a9b24d64c9ddc25cd6ddbfd12b6231990/libstempo/utils.py#L31-L74
vallis/libstempo
libstempo/utils.py
powerlaw
def powerlaw(f, log10_A=-16, gamma=5): """Power-law PSD. :param f: Sampling frequencies :param log10_A: log10 of red noise Amplitude [GW units] :param gamma: Spectral index of red noise process """ fyr = 1 / 3.16e7 return (10**log10_A)**2 / 12.0 / np.pi**2 * fyr**(gamma-3) * f**(-gamma)
python
def powerlaw(f, log10_A=-16, gamma=5): """Power-law PSD. :param f: Sampling frequencies :param log10_A: log10 of red noise Amplitude [GW units] :param gamma: Spectral index of red noise process """ fyr = 1 / 3.16e7 return (10**log10_A)**2 / 12.0 / np.pi**2 * fyr**(gamma-3) * f**(-gamma)
Power-law PSD. :param f: Sampling frequencies :param log10_A: log10 of red noise Amplitude [GW units] :param gamma: Spectral index of red noise process
https://github.com/vallis/libstempo/blob/0b19300a9b24d64c9ddc25cd6ddbfd12b6231990/libstempo/utils.py#L77-L86
vallis/libstempo
libstempo/toasim.py
add_gwb
def add_gwb(psr, dist=1, ngw=1000, seed=None, flow=1e-8, fhigh=1e-5, gwAmp=1e-20, alpha=-0.66, logspacing=True): """Add a stochastic background from inspiraling binaries, using the tempo2 code that underlies the GWbkgrd plugin. Here 'dist' is the pulsar distance [in kpc]; 'ngw' is the number of binaries, 'seed' (a negative integer) reseeds the GWbkgrd pseudorandom-number-generator, 'flow' and 'fhigh' [Hz] determine the background band, 'gwAmp' and 'alpha' determine its amplitude and exponent, and setting 'logspacing' to False will use linear spacing for the individual sources. It is also possible to create a background object with gwb = GWB(ngw,seed,flow,fhigh,gwAmp,alpha,logspacing) then call the method gwb.add_gwb(pulsar[i],dist) repeatedly to get a consistent background for multiple pulsars. Returns the GWB object """ gwb = GWB(ngw,seed,flow,fhigh,gwAmp,alpha,logspacing) gwb.add_gwb(psr,dist) return gwb
python
def add_gwb(psr, dist=1, ngw=1000, seed=None, flow=1e-8, fhigh=1e-5, gwAmp=1e-20, alpha=-0.66, logspacing=True): """Add a stochastic background from inspiraling binaries, using the tempo2 code that underlies the GWbkgrd plugin. Here 'dist' is the pulsar distance [in kpc]; 'ngw' is the number of binaries, 'seed' (a negative integer) reseeds the GWbkgrd pseudorandom-number-generator, 'flow' and 'fhigh' [Hz] determine the background band, 'gwAmp' and 'alpha' determine its amplitude and exponent, and setting 'logspacing' to False will use linear spacing for the individual sources. It is also possible to create a background object with gwb = GWB(ngw,seed,flow,fhigh,gwAmp,alpha,logspacing) then call the method gwb.add_gwb(pulsar[i],dist) repeatedly to get a consistent background for multiple pulsars. Returns the GWB object """ gwb = GWB(ngw,seed,flow,fhigh,gwAmp,alpha,logspacing) gwb.add_gwb(psr,dist) return gwb
Add a stochastic background from inspiraling binaries, using the tempo2 code that underlies the GWbkgrd plugin. Here 'dist' is the pulsar distance [in kpc]; 'ngw' is the number of binaries, 'seed' (a negative integer) reseeds the GWbkgrd pseudorandom-number-generator, 'flow' and 'fhigh' [Hz] determine the background band, 'gwAmp' and 'alpha' determine its amplitude and exponent, and setting 'logspacing' to False will use linear spacing for the individual sources. It is also possible to create a background object with gwb = GWB(ngw,seed,flow,fhigh,gwAmp,alpha,logspacing) then call the method gwb.add_gwb(pulsar[i],dist) repeatedly to get a consistent background for multiple pulsars. Returns the GWB object
https://github.com/vallis/libstempo/blob/0b19300a9b24d64c9ddc25cd6ddbfd12b6231990/libstempo/toasim.py#L32-L56
vallis/libstempo
libstempo/toasim.py
add_dipole_gwb
def add_dipole_gwb(psr, dist=1, ngw=1000, seed=None, flow=1e-8, fhigh=1e-5, gwAmp=1e-20, alpha=-0.66, logspacing=True, dipoleamps=None, dipoledir=None, dipolemag=None): """Add a stochastic background from inspiraling binaries distributed according to a pure dipole distribution, using the tempo2 code that underlies the GWdipolebkgrd plugin. The basic use is identical to that of 'add_gwb': Here 'dist' is the pulsar distance [in kpc]; 'ngw' is the number of binaries, 'seed' (a negative integer) reseeds the GWbkgrd pseudorandom-number-generator, 'flow' and 'fhigh' [Hz] determine the background band, 'gwAmp' and 'alpha' determine its amplitude and exponent, and setting 'logspacing' to False will use linear spacing for the individual sources. Additionally, the dipole component can be specified by using one of two methods: 1) Specify the dipole direction as three dipole amplitudes, in the vector dipoleamps 2) Specify the direction of the dipole as a magnitude dipolemag, and a vector dipoledir=[dipolephi, dipoletheta] It is also possible to create a background object with gwb = GWB(ngw,seed,flow,fhigh,gwAmp,alpha,logspacing) then call the method gwb.add_gwb(pulsar[i],dist) repeatedly to get a consistent background for multiple pulsars. Returns the GWB object """ gwb = GWB(ngw, seed, flow, fhigh, gwAmp, alpha, logspacing, dipoleamps, dipoledir, dipolemag) gwb.add_gwb(psr,dist) return gwb
python
def add_dipole_gwb(psr, dist=1, ngw=1000, seed=None, flow=1e-8, fhigh=1e-5, gwAmp=1e-20, alpha=-0.66, logspacing=True, dipoleamps=None, dipoledir=None, dipolemag=None): """Add a stochastic background from inspiraling binaries distributed according to a pure dipole distribution, using the tempo2 code that underlies the GWdipolebkgrd plugin. The basic use is identical to that of 'add_gwb': Here 'dist' is the pulsar distance [in kpc]; 'ngw' is the number of binaries, 'seed' (a negative integer) reseeds the GWbkgrd pseudorandom-number-generator, 'flow' and 'fhigh' [Hz] determine the background band, 'gwAmp' and 'alpha' determine its amplitude and exponent, and setting 'logspacing' to False will use linear spacing for the individual sources. Additionally, the dipole component can be specified by using one of two methods: 1) Specify the dipole direction as three dipole amplitudes, in the vector dipoleamps 2) Specify the direction of the dipole as a magnitude dipolemag, and a vector dipoledir=[dipolephi, dipoletheta] It is also possible to create a background object with gwb = GWB(ngw,seed,flow,fhigh,gwAmp,alpha,logspacing) then call the method gwb.add_gwb(pulsar[i],dist) repeatedly to get a consistent background for multiple pulsars. Returns the GWB object """ gwb = GWB(ngw, seed, flow, fhigh, gwAmp, alpha, logspacing, dipoleamps, dipoledir, dipolemag) gwb.add_gwb(psr,dist) return gwb
Add a stochastic background from inspiraling binaries distributed according to a pure dipole distribution, using the tempo2 code that underlies the GWdipolebkgrd plugin. The basic use is identical to that of 'add_gwb': Here 'dist' is the pulsar distance [in kpc]; 'ngw' is the number of binaries, 'seed' (a negative integer) reseeds the GWbkgrd pseudorandom-number-generator, 'flow' and 'fhigh' [Hz] determine the background band, 'gwAmp' and 'alpha' determine its amplitude and exponent, and setting 'logspacing' to False will use linear spacing for the individual sources. Additionally, the dipole component can be specified by using one of two methods: 1) Specify the dipole direction as three dipole amplitudes, in the vector dipoleamps 2) Specify the direction of the dipole as a magnitude dipolemag, and a vector dipoledir=[dipolephi, dipoletheta] It is also possible to create a background object with gwb = GWB(ngw,seed,flow,fhigh,gwAmp,alpha,logspacing) then call the method gwb.add_gwb(pulsar[i],dist) repeatedly to get a consistent background for multiple pulsars. Returns the GWB object
https://github.com/vallis/libstempo/blob/0b19300a9b24d64c9ddc25cd6ddbfd12b6231990/libstempo/toasim.py#L58-L95
vallis/libstempo
libstempo/toasim.py
fakepulsar
def fakepulsar(parfile, obstimes, toaerr, freq=1440.0, observatory='AXIS', flags='', iters=3): """Returns a libstempo tempopulsar object corresponding to a noiseless set of observations for the pulsar specified in 'parfile', with observations happening at times (MJD) given in the array (or list) 'obstimes', with measurement errors given by toaerr (us). A new timfile can then be saved with pulsar.savetim(). Re the other parameters: - 'toaerr' needs to be either a common error, or a list of errors of the same length of 'obstimes'; - 'freq' can be either a common observation frequency in MHz, or a list; it defaults to 1440; - 'observatory' can be either a common observatory name, or a list; it defaults to the IPTA MDC 'AXIS'; - 'flags' can be a string (such as '-sys EFF.EBPP.1360') or a list of strings; it defaults to an empty string; - 'iters' is the number of iterative removals of computed residuals from TOAs (which is how the fake pulsar is made...)""" import tempfile outfile = tempfile.NamedTemporaryFile(delete=False) outfile.write(b'FORMAT 1\n') outfile.write(b'MODE 1\n') obsname = 'fake_' + os.path.basename(parfile) if obsname[-4:] == '.par': obsname = obsname[:-4] for i,t in enumerate(obstimes): outfile.write('{0} {1} {2} {3} {4} {5}\n'.format( obsname,_geti(freq,i),t,_geti(toaerr,i),_geti(observatory,i),_geti(flags,i) ).encode('ascii')) timfile = outfile.name outfile.close() pulsar = libstempo.tempopulsar(parfile,timfile,dofit=False) for i in range(iters): pulsar.stoas[:] -= pulsar.residuals() / 86400.0 pulsar.formbats() os.remove(timfile) return pulsar
python
def fakepulsar(parfile, obstimes, toaerr, freq=1440.0, observatory='AXIS', flags='', iters=3): """Returns a libstempo tempopulsar object corresponding to a noiseless set of observations for the pulsar specified in 'parfile', with observations happening at times (MJD) given in the array (or list) 'obstimes', with measurement errors given by toaerr (us). A new timfile can then be saved with pulsar.savetim(). Re the other parameters: - 'toaerr' needs to be either a common error, or a list of errors of the same length of 'obstimes'; - 'freq' can be either a common observation frequency in MHz, or a list; it defaults to 1440; - 'observatory' can be either a common observatory name, or a list; it defaults to the IPTA MDC 'AXIS'; - 'flags' can be a string (such as '-sys EFF.EBPP.1360') or a list of strings; it defaults to an empty string; - 'iters' is the number of iterative removals of computed residuals from TOAs (which is how the fake pulsar is made...)""" import tempfile outfile = tempfile.NamedTemporaryFile(delete=False) outfile.write(b'FORMAT 1\n') outfile.write(b'MODE 1\n') obsname = 'fake_' + os.path.basename(parfile) if obsname[-4:] == '.par': obsname = obsname[:-4] for i,t in enumerate(obstimes): outfile.write('{0} {1} {2} {3} {4} {5}\n'.format( obsname,_geti(freq,i),t,_geti(toaerr,i),_geti(observatory,i),_geti(flags,i) ).encode('ascii')) timfile = outfile.name outfile.close() pulsar = libstempo.tempopulsar(parfile,timfile,dofit=False) for i in range(iters): pulsar.stoas[:] -= pulsar.residuals() / 86400.0 pulsar.formbats() os.remove(timfile) return pulsar
Returns a libstempo tempopulsar object corresponding to a noiseless set of observations for the pulsar specified in 'parfile', with observations happening at times (MJD) given in the array (or list) 'obstimes', with measurement errors given by toaerr (us). A new timfile can then be saved with pulsar.savetim(). Re the other parameters: - 'toaerr' needs to be either a common error, or a list of errors of the same length of 'obstimes'; - 'freq' can be either a common observation frequency in MHz, or a list; it defaults to 1440; - 'observatory' can be either a common observatory name, or a list; it defaults to the IPTA MDC 'AXIS'; - 'flags' can be a string (such as '-sys EFF.EBPP.1360') or a list of strings; it defaults to an empty string; - 'iters' is the number of iterative removals of computed residuals from TOAs (which is how the fake pulsar is made...)
https://github.com/vallis/libstempo/blob/0b19300a9b24d64c9ddc25cd6ddbfd12b6231990/libstempo/toasim.py#L100-L145
vallis/libstempo
libstempo/toasim.py
add_efac
def add_efac(psr, efac=1.0, flagid=None, flags=None, seed=None): """Add nominal TOA errors, multiplied by `efac` factor. Optionally take a pseudorandom-number-generator seed.""" if seed is not None: N.random.seed(seed) # default efacvec efacvec = N.ones(psr.nobs) # check that efac is scalar if flags is None if flags is None: if not N.isscalar(efac): raise ValueError('ERROR: If flags is None, efac must be a scalar') else: efacvec = N.ones(psr.nobs) * efac if flags is not None and flagid is not None and not N.isscalar(efac): if len(efac) == len(flags): for ct, flag in enumerate(flags): ind = flag == N.array(psr.flagvals(flagid)) efacvec[ind] = efac[ct] psr.stoas[:] += efacvec * psr.toaerrs * (1e-6 / day) * N.random.randn(psr.nobs)
python
def add_efac(psr, efac=1.0, flagid=None, flags=None, seed=None): """Add nominal TOA errors, multiplied by `efac` factor. Optionally take a pseudorandom-number-generator seed.""" if seed is not None: N.random.seed(seed) # default efacvec efacvec = N.ones(psr.nobs) # check that efac is scalar if flags is None if flags is None: if not N.isscalar(efac): raise ValueError('ERROR: If flags is None, efac must be a scalar') else: efacvec = N.ones(psr.nobs) * efac if flags is not None and flagid is not None and not N.isscalar(efac): if len(efac) == len(flags): for ct, flag in enumerate(flags): ind = flag == N.array(psr.flagvals(flagid)) efacvec[ind] = efac[ct] psr.stoas[:] += efacvec * psr.toaerrs * (1e-6 / day) * N.random.randn(psr.nobs)
Add nominal TOA errors, multiplied by `efac` factor. Optionally take a pseudorandom-number-generator seed.
https://github.com/vallis/libstempo/blob/0b19300a9b24d64c9ddc25cd6ddbfd12b6231990/libstempo/toasim.py#L153-L176
vallis/libstempo
libstempo/toasim.py
add_equad
def add_equad(psr, equad, flagid=None, flags=None, seed=None): """Add quadrature noise of rms `equad` [s]. Optionally take a pseudorandom-number-generator seed.""" if seed is not None: N.random.seed(seed) # default equadvec equadvec = N.zeros(psr.nobs) # check that equad is scalar if flags is None if flags is None: if not N.isscalar(equad): raise ValueError('ERROR: If flags is None, equad must be a scalar') else: equadvec = N.ones(psr.nobs) * equad if flags is not None and flagid is not None and not N.isscalar(equad): if len(equad) == len(flags): for ct, flag in enumerate(flags): ind = flag == N.array(psr.flagvals(flagid)) equadvec[ind] = equad[ct] psr.stoas[:] += (equadvec / day) * N.random.randn(psr.nobs)
python
def add_equad(psr, equad, flagid=None, flags=None, seed=None): """Add quadrature noise of rms `equad` [s]. Optionally take a pseudorandom-number-generator seed.""" if seed is not None: N.random.seed(seed) # default equadvec equadvec = N.zeros(psr.nobs) # check that equad is scalar if flags is None if flags is None: if not N.isscalar(equad): raise ValueError('ERROR: If flags is None, equad must be a scalar') else: equadvec = N.ones(psr.nobs) * equad if flags is not None and flagid is not None and not N.isscalar(equad): if len(equad) == len(flags): for ct, flag in enumerate(flags): ind = flag == N.array(psr.flagvals(flagid)) equadvec[ind] = equad[ct] psr.stoas[:] += (equadvec / day) * N.random.randn(psr.nobs)
Add quadrature noise of rms `equad` [s]. Optionally take a pseudorandom-number-generator seed.
https://github.com/vallis/libstempo/blob/0b19300a9b24d64c9ddc25cd6ddbfd12b6231990/libstempo/toasim.py#L178-L201
vallis/libstempo
libstempo/toasim.py
add_jitter
def add_jitter(psr, ecorr ,flagid=None, flags=None, coarsegrain=0.1, seed=None): """Add correlated quadrature noise of rms `ecorr` [s], with coarse-graining time `coarsegrain` [days]. Optionally take a pseudorandom-number-generator seed.""" if seed is not None: N.random.seed(seed) if flags is None: t, U = quantize_fast(N.array(psr.toas(),'d'), dt=coarsegrain) elif flags is not None and flagid is not None: t, f, U = quantize_fast(N.array(psr.toas(),'d'), N.array(psr.flagvals(flagid)), dt=coarsegrain) # default jitter value ecorrvec = N.zeros(len(t)) # check that jitter is scalar if flags is None if flags is None: if not N.isscalar(ecorr): raise ValueError('ERROR: If flags is None, jitter must be a scalar') else: ecorrvec = N.ones(len(t)) * ecorr if flags is not None and flagid is not None and not N.isscalar(ecorr): if len(ecorr) == len(flags): for ct, flag in enumerate(flags): ind = flag == N.array(f) ecorrvec[ind] = ecorr[ct] psr.stoas[:] += (1 / day) * N.dot(U*ecorrvec, N.random.randn(U.shape[1]))
python
def add_jitter(psr, ecorr ,flagid=None, flags=None, coarsegrain=0.1, seed=None): """Add correlated quadrature noise of rms `ecorr` [s], with coarse-graining time `coarsegrain` [days]. Optionally take a pseudorandom-number-generator seed.""" if seed is not None: N.random.seed(seed) if flags is None: t, U = quantize_fast(N.array(psr.toas(),'d'), dt=coarsegrain) elif flags is not None and flagid is not None: t, f, U = quantize_fast(N.array(psr.toas(),'d'), N.array(psr.flagvals(flagid)), dt=coarsegrain) # default jitter value ecorrvec = N.zeros(len(t)) # check that jitter is scalar if flags is None if flags is None: if not N.isscalar(ecorr): raise ValueError('ERROR: If flags is None, jitter must be a scalar') else: ecorrvec = N.ones(len(t)) * ecorr if flags is not None and flagid is not None and not N.isscalar(ecorr): if len(ecorr) == len(flags): for ct, flag in enumerate(flags): ind = flag == N.array(f) ecorrvec[ind] = ecorr[ct] psr.stoas[:] += (1 / day) * N.dot(U*ecorrvec, N.random.randn(U.shape[1]))
Add correlated quadrature noise of rms `ecorr` [s], with coarse-graining time `coarsegrain` [days]. Optionally take a pseudorandom-number-generator seed.
https://github.com/vallis/libstempo/blob/0b19300a9b24d64c9ddc25cd6ddbfd12b6231990/libstempo/toasim.py#L255-L287
vallis/libstempo
libstempo/toasim.py
add_rednoise
def add_rednoise(psr,A,gamma,components=10,seed=None): """Add red noise with P(f) = A^2 / (12 pi^2) (f year)^-gamma, using `components` Fourier bases. Optionally take a pseudorandom-number-generator seed.""" if seed is not None: N.random.seed(seed) t = psr.toas() minx, maxx = N.min(t), N.max(t) x = (t - minx) / (maxx - minx) T = (day/year) * (maxx - minx) size = 2*components F = N.zeros((psr.nobs,size),'d') f = N.zeros(size,'d') for i in range(components): F[:,2*i] = N.cos(2*math.pi*(i+1)*x) F[:,2*i+1] = N.sin(2*math.pi*(i+1)*x) f[2*i] = f[2*i+1] = (i+1) / T norm = A**2 * year**2 / (12 * math.pi**2 * T) prior = norm * f**(-gamma) y = N.sqrt(prior) * N.random.randn(size) psr.stoas[:] += (1.0/day) * N.dot(F,y)
python
def add_rednoise(psr,A,gamma,components=10,seed=None): """Add red noise with P(f) = A^2 / (12 pi^2) (f year)^-gamma, using `components` Fourier bases. Optionally take a pseudorandom-number-generator seed.""" if seed is not None: N.random.seed(seed) t = psr.toas() minx, maxx = N.min(t), N.max(t) x = (t - minx) / (maxx - minx) T = (day/year) * (maxx - minx) size = 2*components F = N.zeros((psr.nobs,size),'d') f = N.zeros(size,'d') for i in range(components): F[:,2*i] = N.cos(2*math.pi*(i+1)*x) F[:,2*i+1] = N.sin(2*math.pi*(i+1)*x) f[2*i] = f[2*i+1] = (i+1) / T norm = A**2 * year**2 / (12 * math.pi**2 * T) prior = norm * f**(-gamma) y = N.sqrt(prior) * N.random.randn(size) psr.stoas[:] += (1.0/day) * N.dot(F,y)
Add red noise with P(f) = A^2 / (12 pi^2) (f year)^-gamma, using `components` Fourier bases. Optionally take a pseudorandom-number-generator seed.
https://github.com/vallis/libstempo/blob/0b19300a9b24d64c9ddc25cd6ddbfd12b6231990/libstempo/toasim.py#L290-L317
vallis/libstempo
libstempo/toasim.py
add_line
def add_line(psr,f,A,offset=0.5): """ Add a line of frequency `f` [Hz] and amplitude `A` [s], with origin at a fraction `offset` through the dataset. """ t = psr.toas() t0 = offset * (N.max(t) - N.min(t)) sine = A * N.cos(2 * math.pi * f * day * (t - t0)) psr.stoas[:] += sine / day
python
def add_line(psr,f,A,offset=0.5): """ Add a line of frequency `f` [Hz] and amplitude `A` [s], with origin at a fraction `offset` through the dataset. """ t = psr.toas() t0 = offset * (N.max(t) - N.min(t)) sine = A * N.cos(2 * math.pi * f * day * (t - t0)) psr.stoas[:] += sine / day
Add a line of frequency `f` [Hz] and amplitude `A` [s], with origin at a fraction `offset` through the dataset.
https://github.com/vallis/libstempo/blob/0b19300a9b24d64c9ddc25cd6ddbfd12b6231990/libstempo/toasim.py#L350-L360
vallis/libstempo
libstempo/toasim.py
add_glitch
def add_glitch(psr, epoch, amp): """ Like pulsar term BWM event, but now differently parameterized: just an amplitude (not log-amp) parameter, and an epoch. [source: piccard] :param psr: pulsar object :param epoch: TOA time (MJD) the burst hits the earth :param amp: amplitude of the glitch """ # Define the heaviside function heaviside = lambda x: 0.5 * (N.sign(x) + 1) # Glitches are spontaneous spin-up events. # Thus TOAs will be advanced, and resiudals will be negative. psr.stoas[:] -= amp * heaviside(psr.toas() - epoch) * \ (psr.toas() - epoch)*86400.0
python
def add_glitch(psr, epoch, amp): """ Like pulsar term BWM event, but now differently parameterized: just an amplitude (not log-amp) parameter, and an epoch. [source: piccard] :param psr: pulsar object :param epoch: TOA time (MJD) the burst hits the earth :param amp: amplitude of the glitch """ # Define the heaviside function heaviside = lambda x: 0.5 * (N.sign(x) + 1) # Glitches are spontaneous spin-up events. # Thus TOAs will be advanced, and resiudals will be negative. psr.stoas[:] -= amp * heaviside(psr.toas() - epoch) * \ (psr.toas() - epoch)*86400.0
Like pulsar term BWM event, but now differently parameterized: just an amplitude (not log-amp) parameter, and an epoch. [source: piccard] :param psr: pulsar object :param epoch: TOA time (MJD) the burst hits the earth :param amp: amplitude of the glitch
https://github.com/vallis/libstempo/blob/0b19300a9b24d64c9ddc25cd6ddbfd12b6231990/libstempo/toasim.py#L362-L379
vallis/libstempo
libstempo/toasim.py
add_cgw
def add_cgw(psr, gwtheta, gwphi, mc, dist, fgw, phase0, psi, inc, pdist=1.0, \ pphase=None, psrTerm=True, evolve=True, \ phase_approx=False, tref=0): """ Function to create GW-induced residuals from a SMBMB as defined in Ellis et. al 2012,2013. Tries to be smart about it... :param psr: pulsar object :param gwtheta: Polar angle of GW source in celestial coords [radians] :param gwphi: Azimuthal angle of GW source in celestial coords [radians] :param mc: Chirp mass of SMBMB [solar masses] :param dist: Luminosity distance to SMBMB [Mpc] :param fgw: Frequency of GW (twice the orbital frequency) [Hz] :param phase0: Initial Phase of GW source [radians] :param psi: Polarization of GW source [radians] :param inc: Inclination of GW source [radians] :param pdist: Pulsar distance to use other than those in psr [kpc] :param pphase: Use pulsar phase to determine distance [radian] :param psrTerm: Option to include pulsar term [boolean] :param evolve: Option to exclude evolution [boolean] :param tref: Fidicuial time at which initial parameters are referenced :returns: Vector of induced residuals """ # convert units mc *= eu.SOLAR2S # convert from solar masses to seconds dist *= eu.MPC2S # convert from Mpc to seconds # define initial orbital frequency w0 = N.pi * fgw phase0 /= 2 # orbital phase w053 = w0**(-5/3) # define variable for later use cosgwtheta, cosgwphi = N.cos(gwtheta), N.cos(gwphi) singwtheta, singwphi = N.sin(gwtheta), N.sin(gwphi) sin2psi, cos2psi = N.sin(2*psi), N.cos(2*psi) incfac1, incfac2 = 0.5*(3+N.cos(2*inc)), 2*N.cos(inc) # unit vectors to GW source m = N.array([singwphi, -cosgwphi, 0.0]) n = N.array([-cosgwtheta*cosgwphi, -cosgwtheta*singwphi, singwtheta]) omhat = N.array([-singwtheta*cosgwphi, -singwtheta*singwphi, -cosgwtheta]) # various factors invloving GW parameters fac1 = 256/5 * mc**(5/3) * w0**(8/3) fac2 = 1/32/mc**(5/3) fac3 = mc**(5/3)/dist # pulsar location if 'RAJ' and 'DECJ' in psr.pars(): ptheta = N.pi/2 - psr['DECJ'].val pphi = psr['RAJ'].val elif 'ELONG' and 'ELAT' in psr.pars(): fac = 180./N.pi coords = ephem.Equatorial(ephem.Ecliptic(str(psr['ELONG'].val*fac), str(psr['ELAT'].val*fac))) ptheta = N.pi/2 - float(repr(coords.dec)) pphi = float(repr(coords.ra)) # use definition from Sesana et al 2010 and Ellis et al 2012 phat = N.array([N.sin(ptheta)*N.cos(pphi), N.sin(ptheta)*N.sin(pphi),\ N.cos(ptheta)]) fplus = 0.5 * (N.dot(m, phat)**2 - N.dot(n, phat)**2) / (1+N.dot(omhat, phat)) fcross = (N.dot(m, phat)*N.dot(n, phat)) / (1 + N.dot(omhat, phat)) cosMu = -N.dot(omhat, phat) # get values from pulsar object toas = psr.toas()*86400 - tref if pphase is not None: pd = pphase/(2*N.pi*fgw*(1-cosMu)) / eu.KPC2S else: pd = pdist # convert units pd *= eu.KPC2S # convert from kpc to seconds # get pulsar time tp = toas-pd*(1-cosMu) # evolution if evolve: # calculate time dependent frequency at earth and pulsar omega = w0 * (1 - fac1 * toas)**(-3/8) omega_p = w0 * (1 - fac1 * tp)**(-3/8) # calculate time dependent phase phase = phase0 + fac2 * (w053 - omega**(-5/3)) phase_p = phase0 + fac2 * (w053 - omega_p**(-5/3)) # use approximation that frequency does not evlolve over observation time elif phase_approx: # frequencies omega = w0 omega_p = w0 * (1 + fac1 * pd*(1-cosMu))**(-3/8) # phases phase = phase0 + omega * toas phase_p = phase0 + fac2 * (w053 - omega_p**(-5/3)) + omega_p*toas # no evolution else: # monochromatic omega = w0 omega_p = omega # phases phase = phase0 + omega * toas phase_p = phase0 + omega * tp # define time dependent coefficients At = N.sin(2*phase) * incfac1 Bt = N.cos(2*phase) * incfac2 At_p = N.sin(2*phase_p) * incfac1 Bt_p = N.cos(2*phase_p) * incfac2 # now define time dependent amplitudes alpha = fac3 / omega**(1/3) alpha_p = fac3 / omega_p**(1/3) # define rplus and rcross rplus = alpha * (At*cos2psi + Bt*sin2psi) rcross = alpha * (-At*sin2psi + Bt*cos2psi) rplus_p = alpha_p * (At_p*cos2psi + Bt_p*sin2psi) rcross_p = alpha_p * (-At_p*sin2psi + Bt_p*cos2psi) # residuals if psrTerm: res = fplus*(rplus_p-rplus)+fcross*(rcross_p-rcross) else: res = -fplus*rplus - fcross*rcross psr.stoas[:] += res/86400
python
def add_cgw(psr, gwtheta, gwphi, mc, dist, fgw, phase0, psi, inc, pdist=1.0, \ pphase=None, psrTerm=True, evolve=True, \ phase_approx=False, tref=0): """ Function to create GW-induced residuals from a SMBMB as defined in Ellis et. al 2012,2013. Tries to be smart about it... :param psr: pulsar object :param gwtheta: Polar angle of GW source in celestial coords [radians] :param gwphi: Azimuthal angle of GW source in celestial coords [radians] :param mc: Chirp mass of SMBMB [solar masses] :param dist: Luminosity distance to SMBMB [Mpc] :param fgw: Frequency of GW (twice the orbital frequency) [Hz] :param phase0: Initial Phase of GW source [radians] :param psi: Polarization of GW source [radians] :param inc: Inclination of GW source [radians] :param pdist: Pulsar distance to use other than those in psr [kpc] :param pphase: Use pulsar phase to determine distance [radian] :param psrTerm: Option to include pulsar term [boolean] :param evolve: Option to exclude evolution [boolean] :param tref: Fidicuial time at which initial parameters are referenced :returns: Vector of induced residuals """ # convert units mc *= eu.SOLAR2S # convert from solar masses to seconds dist *= eu.MPC2S # convert from Mpc to seconds # define initial orbital frequency w0 = N.pi * fgw phase0 /= 2 # orbital phase w053 = w0**(-5/3) # define variable for later use cosgwtheta, cosgwphi = N.cos(gwtheta), N.cos(gwphi) singwtheta, singwphi = N.sin(gwtheta), N.sin(gwphi) sin2psi, cos2psi = N.sin(2*psi), N.cos(2*psi) incfac1, incfac2 = 0.5*(3+N.cos(2*inc)), 2*N.cos(inc) # unit vectors to GW source m = N.array([singwphi, -cosgwphi, 0.0]) n = N.array([-cosgwtheta*cosgwphi, -cosgwtheta*singwphi, singwtheta]) omhat = N.array([-singwtheta*cosgwphi, -singwtheta*singwphi, -cosgwtheta]) # various factors invloving GW parameters fac1 = 256/5 * mc**(5/3) * w0**(8/3) fac2 = 1/32/mc**(5/3) fac3 = mc**(5/3)/dist # pulsar location if 'RAJ' and 'DECJ' in psr.pars(): ptheta = N.pi/2 - psr['DECJ'].val pphi = psr['RAJ'].val elif 'ELONG' and 'ELAT' in psr.pars(): fac = 180./N.pi coords = ephem.Equatorial(ephem.Ecliptic(str(psr['ELONG'].val*fac), str(psr['ELAT'].val*fac))) ptheta = N.pi/2 - float(repr(coords.dec)) pphi = float(repr(coords.ra)) # use definition from Sesana et al 2010 and Ellis et al 2012 phat = N.array([N.sin(ptheta)*N.cos(pphi), N.sin(ptheta)*N.sin(pphi),\ N.cos(ptheta)]) fplus = 0.5 * (N.dot(m, phat)**2 - N.dot(n, phat)**2) / (1+N.dot(omhat, phat)) fcross = (N.dot(m, phat)*N.dot(n, phat)) / (1 + N.dot(omhat, phat)) cosMu = -N.dot(omhat, phat) # get values from pulsar object toas = psr.toas()*86400 - tref if pphase is not None: pd = pphase/(2*N.pi*fgw*(1-cosMu)) / eu.KPC2S else: pd = pdist # convert units pd *= eu.KPC2S # convert from kpc to seconds # get pulsar time tp = toas-pd*(1-cosMu) # evolution if evolve: # calculate time dependent frequency at earth and pulsar omega = w0 * (1 - fac1 * toas)**(-3/8) omega_p = w0 * (1 - fac1 * tp)**(-3/8) # calculate time dependent phase phase = phase0 + fac2 * (w053 - omega**(-5/3)) phase_p = phase0 + fac2 * (w053 - omega_p**(-5/3)) # use approximation that frequency does not evlolve over observation time elif phase_approx: # frequencies omega = w0 omega_p = w0 * (1 + fac1 * pd*(1-cosMu))**(-3/8) # phases phase = phase0 + omega * toas phase_p = phase0 + fac2 * (w053 - omega_p**(-5/3)) + omega_p*toas # no evolution else: # monochromatic omega = w0 omega_p = omega # phases phase = phase0 + omega * toas phase_p = phase0 + omega * tp # define time dependent coefficients At = N.sin(2*phase) * incfac1 Bt = N.cos(2*phase) * incfac2 At_p = N.sin(2*phase_p) * incfac1 Bt_p = N.cos(2*phase_p) * incfac2 # now define time dependent amplitudes alpha = fac3 / omega**(1/3) alpha_p = fac3 / omega_p**(1/3) # define rplus and rcross rplus = alpha * (At*cos2psi + Bt*sin2psi) rcross = alpha * (-At*sin2psi + Bt*cos2psi) rplus_p = alpha_p * (At_p*cos2psi + Bt_p*sin2psi) rcross_p = alpha_p * (-At_p*sin2psi + Bt_p*cos2psi) # residuals if psrTerm: res = fplus*(rplus_p-rplus)+fcross*(rcross_p-rcross) else: res = -fplus*rplus - fcross*rcross psr.stoas[:] += res/86400
Function to create GW-induced residuals from a SMBMB as defined in Ellis et. al 2012,2013. Tries to be smart about it... :param psr: pulsar object :param gwtheta: Polar angle of GW source in celestial coords [radians] :param gwphi: Azimuthal angle of GW source in celestial coords [radians] :param mc: Chirp mass of SMBMB [solar masses] :param dist: Luminosity distance to SMBMB [Mpc] :param fgw: Frequency of GW (twice the orbital frequency) [Hz] :param phase0: Initial Phase of GW source [radians] :param psi: Polarization of GW source [radians] :param inc: Inclination of GW source [radians] :param pdist: Pulsar distance to use other than those in psr [kpc] :param pphase: Use pulsar phase to determine distance [radian] :param psrTerm: Option to include pulsar term [boolean] :param evolve: Option to exclude evolution [boolean] :param tref: Fidicuial time at which initial parameters are referenced :returns: Vector of induced residuals
https://github.com/vallis/libstempo/blob/0b19300a9b24d64c9ddc25cd6ddbfd12b6231990/libstempo/toasim.py#L381-L521
vallis/libstempo
libstempo/toasim.py
add_ecc_cgw
def add_ecc_cgw(psr, gwtheta, gwphi, mc, dist, F, inc, psi, gamma0, e0, l0, q, nmax=100, nset=None, pd=None, periEv=True, psrTerm=True, tref=0, check=True, useFile=True): """ Simulate GW from eccentric SMBHB. Waveform models from Taylor et al. (2015) and Barack and Cutler (2004). WARNING: This residual waveform is only accurate if the GW frequency is not significantly evolving over the observation time of the pulsar. :param psr: pulsar object :param gwtheta: Polar angle of GW source in celestial coords [radians] :param gwphi: Azimuthal angle of GW source in celestial coords [radians] :param mc: Chirp mass of SMBMB [solar masses] :param dist: Luminosity distance to SMBMB [Mpc] :param F: Orbital frequency of SMBHB [Hz] :param inc: Inclination of GW source [radians] :param psi: Polarization of GW source [radians] :param gamma0: Initial angle of periastron [radians] :param e0: Initial eccentricity of SMBHB :param l0: Initial mean anomaly [radians] :param q: Mass ratio of SMBHB :param nmax: Number of harmonics to use in waveform decomposition :param nset: Fix the number of harmonics to be injected :param pd: Pulsar distance [kpc] :param periEv: Evolve the position of periapsis [boolean] :param psrTerm: Option to include pulsar term [boolean] :param tref: Fiducial time at which initial parameters are referenced [s] :param check: Check if frequency evolves significantly over obs. time :param useFile: Use pre-computed table of number of harmonics vs eccentricity :returns: Vector of induced residuals """ # define variable for later use cosgwtheta, cosgwphi = N.cos(gwtheta), N.cos(gwphi) singwtheta, singwphi = N.sin(gwtheta), N.sin(gwphi) sin2psi, cos2psi = N.sin(2*psi), N.cos(2*psi) # unit vectors to GW source m = N.array([singwphi, -cosgwphi, 0.0]) n = N.array([-cosgwtheta*cosgwphi, -cosgwtheta*singwphi, singwtheta]) omhat = N.array([-singwtheta*cosgwphi, -singwtheta*singwphi, -cosgwtheta]) # pulsar location if 'RAJ' and 'DECJ' in psr.pars(): ptheta = N.pi/2 - psr['DECJ'].val pphi = psr['RAJ'].val elif 'ELONG' and 'ELAT' in psr.pars(): fac = 180./N.pi coords = ephem.Equatorial(ephem.Ecliptic(str(psr['ELONG'].val*fac), str(psr['ELAT'].val*fac))) ptheta = N.pi/2 - float(repr(coords.dec)) pphi = float(repr(coords.ra)) # use definition from Sesana et al 2010 and Ellis et al 2012 phat = N.array([N.sin(ptheta)*N.cos(pphi), N.sin(ptheta)*N.sin(pphi),\ N.cos(ptheta)]) fplus = 0.5 * (N.dot(m, phat)**2 - N.dot(n, phat)**2) / (1+N.dot(omhat, phat)) fcross = (N.dot(m, phat)*N.dot(n, phat)) / (1 + N.dot(omhat, phat)) cosMu = -N.dot(omhat, phat) # get values from pulsar object toas = N.double(psr.toas())*86400 - tref if check: # check that frequency is not evolving significantly over obs. time y = eu.solve_coupled_ecc_solution(F, e0, gamma0, l0, mc, q, N.array([0.0,toas.max()])) # initial and final values over observation time Fc0, ec0, gc0, phic0 = y[0,:] Fc1, ec1, gc1, phic1 = y[-1,:] # observation time Tobs = 1/(toas.max()-toas.min()) if N.abs(Fc0-Fc1) > 1/Tobs: print('WARNING: Frequency is evolving over more than one frequency bin.') print('F0 = {0}, F1 = {1}, delta f = {2}'.format(Fc0, Fc1, 1/Tobs)) # get gammadot for earth term if periEv==False: gammadot = 0.0 else: gammadot = eu.get_gammadot(F, mc, q, e0) if nset is not None: nharm = nset elif useFile: if e0 > 0.001 and e0 < 0.999: nharm = min(int(ecc_interp(e0)), nmax) + 1 elif e0 < 0.001: nharm = 3 else: nharm = nmax else: nharm = nmax ##### earth term ##### splus, scross = eu.calculate_splus_scross(nharm, mc, dist, F, e0, toas, l0, gamma0, gammadot, inc) ##### pulsar term ##### if psrTerm: # convert units pd *= eu.KPC2S # convert from kpc to seconds # get pulsar time tp = toas - pd * (1-cosMu) # solve coupled system of equations to get pulsar term values y = eu.solve_coupled_ecc_solution(F, e0, gamma0, l0, mc, q, N.array([0.0, tp.min()])) # get pulsar term values if N.any(y): Fp, ep, gp, lp = y[-1,:] # get gammadot at pulsar term gammadotp = eu.get_gammadot(Fp, mc, q, ep) if useFile: if ep > 0.001 and ep < 0.999: nharm = min(int(ecc_interp(ep)), nmax) elif ep < 0.001: nharm = 3 else: nharm = nmax else: nharm = nmax splusp, scrossp = eu.calculate_splus_scross(nharm, mc, dist, Fp, ep, toas, lp, gp, gammadotp, inc) rr = (fplus*cos2psi - fcross*sin2psi) * (splusp - splus) + \ (fplus*sin2psi + fcross*cos2psi) * (scrossp - scross) else: rr = N.zeros(len(p.toas)) else: rr = - (fplus*cos2psi - fcross*sin2psi) * splus - \ (fplus*sin2psi + fcross*cos2psi) * scross psr.stoas[:] += rr/86400
python
def add_ecc_cgw(psr, gwtheta, gwphi, mc, dist, F, inc, psi, gamma0, e0, l0, q, nmax=100, nset=None, pd=None, periEv=True, psrTerm=True, tref=0, check=True, useFile=True): """ Simulate GW from eccentric SMBHB. Waveform models from Taylor et al. (2015) and Barack and Cutler (2004). WARNING: This residual waveform is only accurate if the GW frequency is not significantly evolving over the observation time of the pulsar. :param psr: pulsar object :param gwtheta: Polar angle of GW source in celestial coords [radians] :param gwphi: Azimuthal angle of GW source in celestial coords [radians] :param mc: Chirp mass of SMBMB [solar masses] :param dist: Luminosity distance to SMBMB [Mpc] :param F: Orbital frequency of SMBHB [Hz] :param inc: Inclination of GW source [radians] :param psi: Polarization of GW source [radians] :param gamma0: Initial angle of periastron [radians] :param e0: Initial eccentricity of SMBHB :param l0: Initial mean anomaly [radians] :param q: Mass ratio of SMBHB :param nmax: Number of harmonics to use in waveform decomposition :param nset: Fix the number of harmonics to be injected :param pd: Pulsar distance [kpc] :param periEv: Evolve the position of periapsis [boolean] :param psrTerm: Option to include pulsar term [boolean] :param tref: Fiducial time at which initial parameters are referenced [s] :param check: Check if frequency evolves significantly over obs. time :param useFile: Use pre-computed table of number of harmonics vs eccentricity :returns: Vector of induced residuals """ # define variable for later use cosgwtheta, cosgwphi = N.cos(gwtheta), N.cos(gwphi) singwtheta, singwphi = N.sin(gwtheta), N.sin(gwphi) sin2psi, cos2psi = N.sin(2*psi), N.cos(2*psi) # unit vectors to GW source m = N.array([singwphi, -cosgwphi, 0.0]) n = N.array([-cosgwtheta*cosgwphi, -cosgwtheta*singwphi, singwtheta]) omhat = N.array([-singwtheta*cosgwphi, -singwtheta*singwphi, -cosgwtheta]) # pulsar location if 'RAJ' and 'DECJ' in psr.pars(): ptheta = N.pi/2 - psr['DECJ'].val pphi = psr['RAJ'].val elif 'ELONG' and 'ELAT' in psr.pars(): fac = 180./N.pi coords = ephem.Equatorial(ephem.Ecliptic(str(psr['ELONG'].val*fac), str(psr['ELAT'].val*fac))) ptheta = N.pi/2 - float(repr(coords.dec)) pphi = float(repr(coords.ra)) # use definition from Sesana et al 2010 and Ellis et al 2012 phat = N.array([N.sin(ptheta)*N.cos(pphi), N.sin(ptheta)*N.sin(pphi),\ N.cos(ptheta)]) fplus = 0.5 * (N.dot(m, phat)**2 - N.dot(n, phat)**2) / (1+N.dot(omhat, phat)) fcross = (N.dot(m, phat)*N.dot(n, phat)) / (1 + N.dot(omhat, phat)) cosMu = -N.dot(omhat, phat) # get values from pulsar object toas = N.double(psr.toas())*86400 - tref if check: # check that frequency is not evolving significantly over obs. time y = eu.solve_coupled_ecc_solution(F, e0, gamma0, l0, mc, q, N.array([0.0,toas.max()])) # initial and final values over observation time Fc0, ec0, gc0, phic0 = y[0,:] Fc1, ec1, gc1, phic1 = y[-1,:] # observation time Tobs = 1/(toas.max()-toas.min()) if N.abs(Fc0-Fc1) > 1/Tobs: print('WARNING: Frequency is evolving over more than one frequency bin.') print('F0 = {0}, F1 = {1}, delta f = {2}'.format(Fc0, Fc1, 1/Tobs)) # get gammadot for earth term if periEv==False: gammadot = 0.0 else: gammadot = eu.get_gammadot(F, mc, q, e0) if nset is not None: nharm = nset elif useFile: if e0 > 0.001 and e0 < 0.999: nharm = min(int(ecc_interp(e0)), nmax) + 1 elif e0 < 0.001: nharm = 3 else: nharm = nmax else: nharm = nmax ##### earth term ##### splus, scross = eu.calculate_splus_scross(nharm, mc, dist, F, e0, toas, l0, gamma0, gammadot, inc) ##### pulsar term ##### if psrTerm: # convert units pd *= eu.KPC2S # convert from kpc to seconds # get pulsar time tp = toas - pd * (1-cosMu) # solve coupled system of equations to get pulsar term values y = eu.solve_coupled_ecc_solution(F, e0, gamma0, l0, mc, q, N.array([0.0, tp.min()])) # get pulsar term values if N.any(y): Fp, ep, gp, lp = y[-1,:] # get gammadot at pulsar term gammadotp = eu.get_gammadot(Fp, mc, q, ep) if useFile: if ep > 0.001 and ep < 0.999: nharm = min(int(ecc_interp(ep)), nmax) elif ep < 0.001: nharm = 3 else: nharm = nmax else: nharm = nmax splusp, scrossp = eu.calculate_splus_scross(nharm, mc, dist, Fp, ep, toas, lp, gp, gammadotp, inc) rr = (fplus*cos2psi - fcross*sin2psi) * (splusp - splus) + \ (fplus*sin2psi + fcross*cos2psi) * (scrossp - scross) else: rr = N.zeros(len(p.toas)) else: rr = - (fplus*cos2psi - fcross*sin2psi) * splus - \ (fplus*sin2psi + fcross*cos2psi) * scross psr.stoas[:] += rr/86400
Simulate GW from eccentric SMBHB. Waveform models from Taylor et al. (2015) and Barack and Cutler (2004). WARNING: This residual waveform is only accurate if the GW frequency is not significantly evolving over the observation time of the pulsar. :param psr: pulsar object :param gwtheta: Polar angle of GW source in celestial coords [radians] :param gwphi: Azimuthal angle of GW source in celestial coords [radians] :param mc: Chirp mass of SMBMB [solar masses] :param dist: Luminosity distance to SMBMB [Mpc] :param F: Orbital frequency of SMBHB [Hz] :param inc: Inclination of GW source [radians] :param psi: Polarization of GW source [radians] :param gamma0: Initial angle of periastron [radians] :param e0: Initial eccentricity of SMBHB :param l0: Initial mean anomaly [radians] :param q: Mass ratio of SMBHB :param nmax: Number of harmonics to use in waveform decomposition :param nset: Fix the number of harmonics to be injected :param pd: Pulsar distance [kpc] :param periEv: Evolve the position of periapsis [boolean] :param psrTerm: Option to include pulsar term [boolean] :param tref: Fiducial time at which initial parameters are referenced [s] :param check: Check if frequency evolves significantly over obs. time :param useFile: Use pre-computed table of number of harmonics vs eccentricity :returns: Vector of induced residuals
https://github.com/vallis/libstempo/blob/0b19300a9b24d64c9ddc25cd6ddbfd12b6231990/libstempo/toasim.py#L523-L674
vallis/libstempo
libstempo/toasim.py
extrap1d
def extrap1d(interpolator): """ Function to extend an interpolation function to an extrapolation function. :param interpolator: scipy interp1d object :returns ufunclike: extension of function to extrapolation """ xs = interpolator.x ys = interpolator.y def pointwise(x): if x < xs[0]: return ys[0] # +(x-xs[0])*(ys[1]-ys[0])/(xs[1]-xs[0]) elif x > xs[-1]: return ys[-1] # +(x-xs[-1])*(ys[-1]-ys[-2])/(xs[-1]-xs[-2]) else: return interpolator(x) def ufunclike(xs): return N.array(map(pointwise, N.array(xs))) return ufunclike
python
def extrap1d(interpolator): """ Function to extend an interpolation function to an extrapolation function. :param interpolator: scipy interp1d object :returns ufunclike: extension of function to extrapolation """ xs = interpolator.x ys = interpolator.y def pointwise(x): if x < xs[0]: return ys[0] # +(x-xs[0])*(ys[1]-ys[0])/(xs[1]-xs[0]) elif x > xs[-1]: return ys[-1] # +(x-xs[-1])*(ys[-1]-ys[-2])/(xs[-1]-xs[-2]) else: return interpolator(x) def ufunclike(xs): return N.array(map(pointwise, N.array(xs))) return ufunclike
Function to extend an interpolation function to an extrapolation function. :param interpolator: scipy interp1d object :returns ufunclike: extension of function to extrapolation
https://github.com/vallis/libstempo/blob/0b19300a9b24d64c9ddc25cd6ddbfd12b6231990/libstempo/toasim.py#L677-L701
vallis/libstempo
libstempo/toasim.py
createGWB
def createGWB(psr, Amp, gam, noCorr=False, seed=None, turnover=False, clm=[N.sqrt(4.0*N.pi)], lmax=0, f0=1e-9, beta=1, power=1, userSpec=None, npts=600, howml=10): """ Function to create GW-induced residuals from a stochastic GWB as defined in Chamberlin, Creighton, Demorest, et al. (2014). :param psr: pulsar object for single pulsar :param Amp: Amplitude of red noise in GW units :param gam: Red noise power law spectral index :param noCorr: Add red noise with no spatial correlations :param seed: Random number seed :param turnover: Produce spectrum with turnover at frequency f0 :param clm: coefficients of spherical harmonic decomposition of GW power :param lmax: maximum multipole of GW power decomposition :param f0: Frequency of spectrum turnover :param beta: Spectral index of power spectram for f << f0 :param power: Fudge factor for flatness of spectrum turnover :param userSpec: User-supplied characteristic strain spectrum (first column is freqs, second is spectrum) :param npts: Number of points used in interpolation :param howml: Lowest frequency is 1/(howml * T) :returns: list of residuals for each pulsar """ if seed is not None: N.random.seed(seed) # number of pulsars Npulsars = len(psr) # gw start and end times for entire data set start = N.min([p.toas().min()*86400 for p in psr]) - 86400 stop = N.max([p.toas().max()*86400 for p in psr]) + 86400 # duration of the signal dur = stop - start # get maximum number of points if npts is None: # default to cadence of 2 weeks npts = dur/(86400*14) # make a vector of evenly sampled data points ut = N.linspace(start, stop, npts) # time resolution in days dt = dur/npts # compute the overlap reduction function if noCorr: ORF = N.diag(N.ones(Npulsars)*2) else: psrlocs = N.zeros((Npulsars,2)) for ii in range(Npulsars): if 'RAJ' and 'DECJ' in psr[ii].pars(): psrlocs[ii] = N.double(psr[ii]['RAJ'].val), N.double(psr[ii]['DECJ'].val) elif 'ELONG' and 'ELAT' in psr[ii].pars(): fac = 180./N.pi # check for B name if 'B' in psr[ii].name: epoch = '1950' else: epoch = '2000' coords = ephem.Equatorial(ephem.Ecliptic(str(psr[ii]['ELONG'].val*fac), str(psr[ii]['ELAT'].val*fac)), epoch=epoch) psrlocs[ii] = float(repr(coords.ra)), float(repr(coords.dec)) psrlocs[:,1] = N.pi/2. - psrlocs[:,1] anisbasis = N.array(anis.CorrBasis(psrlocs,lmax)) ORF = sum(clm[kk]*anisbasis[kk] for kk in range(len(anisbasis))) ORF *= 2.0 # Define frequencies spanning from DC to Nyquist. # This is a vector spanning these frequencies in increments of 1/(dur*howml). f = N.arange(0, 1/(2*dt), 1/(dur*howml)) f[0] = f[1] # avoid divide by 0 warning Nf = len(f) # Use Cholesky transform to take 'square root' of ORF M = N.linalg.cholesky(ORF) # Create random frequency series from zero mean, unit variance, Gaussian distributions w = N.zeros((Npulsars, Nf), complex) for ll in range(Npulsars): w[ll,:] = N.random.randn(Nf) + 1j*N.random.randn(Nf) # strain amplitude if userSpec is None: f1yr = 1/3.16e7 alpha = -0.5 * (gam-3) hcf = Amp * (f/f1yr)**(alpha) if turnover: si = alpha - beta hcf /= (1+(f/f0)**(power*si))**(1/power) elif userSpec is not None: freqs = userSpec[:,0] if len(userSpec[:,0]) != len(freqs): raise ValueError("Number of supplied spectral points does not match number of frequencies!") else: fspec_in = interp.interp1d(N.log10(freqs), N.log10(userSpec[:,1]), kind='linear') fspec_ex = extrap1d(fspec_in) hcf = 10.0**fspec_ex(N.log10(f)) C = 1 / 96 / N.pi**2 * hcf**2 / f**3 * dur * howml ### injection residuals in the frequency domain Res_f = N.dot(M, w) for ll in range(Npulsars): Res_f[ll] = Res_f[ll] * C**(0.5) # rescale by frequency dependent factor Res_f[ll,0] = 0 # set DC bin to zero to avoid infinities Res_f[ll,-1] = 0 # set Nyquist bin to zero also # Now fill in bins after Nyquist (for fft data packing) and take inverse FT Res_f2 = N.zeros((Npulsars, 2*Nf-2), complex) Res_t = N.zeros((Npulsars, 2*Nf-2)) Res_f2[:,0:Nf] = Res_f[:,0:Nf] Res_f2[:, Nf:(2*Nf-2)] = N.conj(Res_f[:,(Nf-2):0:-1]) Res_t = N.real(N.fft.ifft(Res_f2)/dt) # shorten data and interpolate onto TOAs Res = N.zeros((Npulsars, npts)) res_gw = [] for ll in range(Npulsars): Res[ll,:] = Res_t[ll, 10:(npts+10)] f = interp.interp1d(ut, Res[ll,:], kind='linear') res_gw.append(f(psr[ll].toas()*86400)) #return res_gw ct = 0 for p in psr: p.stoas[:] += res_gw[ct]/86400.0 ct += 1
python
def createGWB(psr, Amp, gam, noCorr=False, seed=None, turnover=False, clm=[N.sqrt(4.0*N.pi)], lmax=0, f0=1e-9, beta=1, power=1, userSpec=None, npts=600, howml=10): """ Function to create GW-induced residuals from a stochastic GWB as defined in Chamberlin, Creighton, Demorest, et al. (2014). :param psr: pulsar object for single pulsar :param Amp: Amplitude of red noise in GW units :param gam: Red noise power law spectral index :param noCorr: Add red noise with no spatial correlations :param seed: Random number seed :param turnover: Produce spectrum with turnover at frequency f0 :param clm: coefficients of spherical harmonic decomposition of GW power :param lmax: maximum multipole of GW power decomposition :param f0: Frequency of spectrum turnover :param beta: Spectral index of power spectram for f << f0 :param power: Fudge factor for flatness of spectrum turnover :param userSpec: User-supplied characteristic strain spectrum (first column is freqs, second is spectrum) :param npts: Number of points used in interpolation :param howml: Lowest frequency is 1/(howml * T) :returns: list of residuals for each pulsar """ if seed is not None: N.random.seed(seed) # number of pulsars Npulsars = len(psr) # gw start and end times for entire data set start = N.min([p.toas().min()*86400 for p in psr]) - 86400 stop = N.max([p.toas().max()*86400 for p in psr]) + 86400 # duration of the signal dur = stop - start # get maximum number of points if npts is None: # default to cadence of 2 weeks npts = dur/(86400*14) # make a vector of evenly sampled data points ut = N.linspace(start, stop, npts) # time resolution in days dt = dur/npts # compute the overlap reduction function if noCorr: ORF = N.diag(N.ones(Npulsars)*2) else: psrlocs = N.zeros((Npulsars,2)) for ii in range(Npulsars): if 'RAJ' and 'DECJ' in psr[ii].pars(): psrlocs[ii] = N.double(psr[ii]['RAJ'].val), N.double(psr[ii]['DECJ'].val) elif 'ELONG' and 'ELAT' in psr[ii].pars(): fac = 180./N.pi # check for B name if 'B' in psr[ii].name: epoch = '1950' else: epoch = '2000' coords = ephem.Equatorial(ephem.Ecliptic(str(psr[ii]['ELONG'].val*fac), str(psr[ii]['ELAT'].val*fac)), epoch=epoch) psrlocs[ii] = float(repr(coords.ra)), float(repr(coords.dec)) psrlocs[:,1] = N.pi/2. - psrlocs[:,1] anisbasis = N.array(anis.CorrBasis(psrlocs,lmax)) ORF = sum(clm[kk]*anisbasis[kk] for kk in range(len(anisbasis))) ORF *= 2.0 # Define frequencies spanning from DC to Nyquist. # This is a vector spanning these frequencies in increments of 1/(dur*howml). f = N.arange(0, 1/(2*dt), 1/(dur*howml)) f[0] = f[1] # avoid divide by 0 warning Nf = len(f) # Use Cholesky transform to take 'square root' of ORF M = N.linalg.cholesky(ORF) # Create random frequency series from zero mean, unit variance, Gaussian distributions w = N.zeros((Npulsars, Nf), complex) for ll in range(Npulsars): w[ll,:] = N.random.randn(Nf) + 1j*N.random.randn(Nf) # strain amplitude if userSpec is None: f1yr = 1/3.16e7 alpha = -0.5 * (gam-3) hcf = Amp * (f/f1yr)**(alpha) if turnover: si = alpha - beta hcf /= (1+(f/f0)**(power*si))**(1/power) elif userSpec is not None: freqs = userSpec[:,0] if len(userSpec[:,0]) != len(freqs): raise ValueError("Number of supplied spectral points does not match number of frequencies!") else: fspec_in = interp.interp1d(N.log10(freqs), N.log10(userSpec[:,1]), kind='linear') fspec_ex = extrap1d(fspec_in) hcf = 10.0**fspec_ex(N.log10(f)) C = 1 / 96 / N.pi**2 * hcf**2 / f**3 * dur * howml ### injection residuals in the frequency domain Res_f = N.dot(M, w) for ll in range(Npulsars): Res_f[ll] = Res_f[ll] * C**(0.5) # rescale by frequency dependent factor Res_f[ll,0] = 0 # set DC bin to zero to avoid infinities Res_f[ll,-1] = 0 # set Nyquist bin to zero also # Now fill in bins after Nyquist (for fft data packing) and take inverse FT Res_f2 = N.zeros((Npulsars, 2*Nf-2), complex) Res_t = N.zeros((Npulsars, 2*Nf-2)) Res_f2[:,0:Nf] = Res_f[:,0:Nf] Res_f2[:, Nf:(2*Nf-2)] = N.conj(Res_f[:,(Nf-2):0:-1]) Res_t = N.real(N.fft.ifft(Res_f2)/dt) # shorten data and interpolate onto TOAs Res = N.zeros((Npulsars, npts)) res_gw = [] for ll in range(Npulsars): Res[ll,:] = Res_t[ll, 10:(npts+10)] f = interp.interp1d(ut, Res[ll,:], kind='linear') res_gw.append(f(psr[ll].toas()*86400)) #return res_gw ct = 0 for p in psr: p.stoas[:] += res_gw[ct]/86400.0 ct += 1
Function to create GW-induced residuals from a stochastic GWB as defined in Chamberlin, Creighton, Demorest, et al. (2014). :param psr: pulsar object for single pulsar :param Amp: Amplitude of red noise in GW units :param gam: Red noise power law spectral index :param noCorr: Add red noise with no spatial correlations :param seed: Random number seed :param turnover: Produce spectrum with turnover at frequency f0 :param clm: coefficients of spherical harmonic decomposition of GW power :param lmax: maximum multipole of GW power decomposition :param f0: Frequency of spectrum turnover :param beta: Spectral index of power spectram for f << f0 :param power: Fudge factor for flatness of spectrum turnover :param userSpec: User-supplied characteristic strain spectrum (first column is freqs, second is spectrum) :param npts: Number of points used in interpolation :param howml: Lowest frequency is 1/(howml * T) :returns: list of residuals for each pulsar
https://github.com/vallis/libstempo/blob/0b19300a9b24d64c9ddc25cd6ddbfd12b6231990/libstempo/toasim.py#L704-L842
vallis/libstempo
libstempo/toasim.py
computeORFMatrix
def computeORFMatrix(psr): """ Compute ORF matrix. :param psr: List of pulsar object instances :returns: Matrix that has the ORF values for every pulsar pair with 2 on the diagonals to account for the pulsar term. """ # begin loop over all pulsar pairs and calculate ORF npsr = len(psr) ORF = N.zeros((npsr, npsr)) phati = N.zeros(3) phatj = N.zeros(3) ptheta = [N.pi/2 - p['DECJ'].val for p in psr] pphi = [p['RAJ'].val for p in psr] for ll in range(0, npsr): phati[0] = N.cos(pphi[ll]) * N.sin(ptheta[ll]) phati[1] = N.sin(pphi[ll]) * N.sin(ptheta[ll]) phati[2] = N.cos(ptheta[ll]) for kk in range(0, npsr): phatj[0] = N.cos(pphi[kk]) * N.sin(ptheta[kk]) phatj[1] = N.sin(pphi[kk]) * N.sin(ptheta[kk]) phatj[2] = N.cos(ptheta[kk]) if ll != kk: xip = (1.-N.sum(phati*phatj)) / 2. ORF[ll, kk] = 3.*( 1./3. + xip * ( N.log(xip) -1./6.) ) else: ORF[ll, kk] = 2.0 return ORF
python
def computeORFMatrix(psr): """ Compute ORF matrix. :param psr: List of pulsar object instances :returns: Matrix that has the ORF values for every pulsar pair with 2 on the diagonals to account for the pulsar term. """ # begin loop over all pulsar pairs and calculate ORF npsr = len(psr) ORF = N.zeros((npsr, npsr)) phati = N.zeros(3) phatj = N.zeros(3) ptheta = [N.pi/2 - p['DECJ'].val for p in psr] pphi = [p['RAJ'].val for p in psr] for ll in range(0, npsr): phati[0] = N.cos(pphi[ll]) * N.sin(ptheta[ll]) phati[1] = N.sin(pphi[ll]) * N.sin(ptheta[ll]) phati[2] = N.cos(ptheta[ll]) for kk in range(0, npsr): phatj[0] = N.cos(pphi[kk]) * N.sin(ptheta[kk]) phatj[1] = N.sin(pphi[kk]) * N.sin(ptheta[kk]) phatj[2] = N.cos(ptheta[kk]) if ll != kk: xip = (1.-N.sum(phati*phatj)) / 2. ORF[ll, kk] = 3.*( 1./3. + xip * ( N.log(xip) -1./6.) ) else: ORF[ll, kk] = 2.0 return ORF
Compute ORF matrix. :param psr: List of pulsar object instances :returns: Matrix that has the ORF values for every pulsar pair with 2 on the diagonals to account for the pulsar term.
https://github.com/vallis/libstempo/blob/0b19300a9b24d64c9ddc25cd6ddbfd12b6231990/libstempo/toasim.py#L844-L879
vallis/libstempo
libstempo/plot.py
plotres
def plotres(psr,deleted=False,group=None,**kwargs): """Plot residuals, compute unweighted rms residual.""" res, t, errs = psr.residuals(), psr.toas(), psr.toaerrs if (not deleted) and N.any(psr.deleted != 0): res, t, errs = res[psr.deleted == 0], t[psr.deleted == 0], errs[psr.deleted == 0] print("Plotting {0}/{1} nondeleted points.".format(len(res),psr.nobs)) meanres = math.sqrt(N.mean(res**2)) / 1e-6 if group is None: i = N.argsort(t) P.errorbar(t[i],res[i]/1e-6,yerr=errs[i],fmt='x',**kwargs) else: if (not deleted) and N.any(psr.deleted): flagmask = psr.flagvals(group)[~psr.deleted] else: flagmask = psr.flagvals(group) unique = list(set(flagmask)) for flagval in unique: f = (flagmask == flagval) flagres, flagt, flagerrs = res[f], t[f], errs[f] i = N.argsort(flagt) P.errorbar(flagt[i],flagres[i]/1e-6,yerr=flagerrs[i],fmt='x',**kwargs) P.legend(unique,numpoints=1,bbox_to_anchor=(1.1,1.1)) P.xlabel('MJD'); P.ylabel('res [us]') P.title("{0} - rms res = {1:.2f} us".format(psr.name,meanres))
python
def plotres(psr,deleted=False,group=None,**kwargs): """Plot residuals, compute unweighted rms residual.""" res, t, errs = psr.residuals(), psr.toas(), psr.toaerrs if (not deleted) and N.any(psr.deleted != 0): res, t, errs = res[psr.deleted == 0], t[psr.deleted == 0], errs[psr.deleted == 0] print("Plotting {0}/{1} nondeleted points.".format(len(res),psr.nobs)) meanres = math.sqrt(N.mean(res**2)) / 1e-6 if group is None: i = N.argsort(t) P.errorbar(t[i],res[i]/1e-6,yerr=errs[i],fmt='x',**kwargs) else: if (not deleted) and N.any(psr.deleted): flagmask = psr.flagvals(group)[~psr.deleted] else: flagmask = psr.flagvals(group) unique = list(set(flagmask)) for flagval in unique: f = (flagmask == flagval) flagres, flagt, flagerrs = res[f], t[f], errs[f] i = N.argsort(flagt) P.errorbar(flagt[i],flagres[i]/1e-6,yerr=flagerrs[i],fmt='x',**kwargs) P.legend(unique,numpoints=1,bbox_to_anchor=(1.1,1.1)) P.xlabel('MJD'); P.ylabel('res [us]') P.title("{0} - rms res = {1:.2f} us".format(psr.name,meanres))
Plot residuals, compute unweighted rms residual.
https://github.com/vallis/libstempo/blob/0b19300a9b24d64c9ddc25cd6ddbfd12b6231990/libstempo/plot.py#L7-L38
vallis/libstempo
libstempo/plot.py
plotgwsrc
def plotgwsrc(gwb): """ Plot a GWB source population as a mollweide projection. """ theta, phi, omega, polarization = gwb.gw_dist() rho = phi-N.pi eta = 0.5*N.pi - theta # I don't know how to get rid of the RuntimeWarning -- RvH, Oct 10, 2014: # /Users/vhaaster/env/dev/lib/python2.7/site-packages/matplotlib/projections/geo.py:485: # RuntimeWarning: invalid value encountered in arcsin theta = np.arcsin(y / np.sqrt(2)) #old_settings = N.seterr(invalid='ignore') P.title("GWB source population") ax = P.axes(projection='mollweide') foo = P.scatter(rho, eta, marker='.', s=1) #bar = N.seterr(**old_settings) return foo
python
def plotgwsrc(gwb): """ Plot a GWB source population as a mollweide projection. """ theta, phi, omega, polarization = gwb.gw_dist() rho = phi-N.pi eta = 0.5*N.pi - theta # I don't know how to get rid of the RuntimeWarning -- RvH, Oct 10, 2014: # /Users/vhaaster/env/dev/lib/python2.7/site-packages/matplotlib/projections/geo.py:485: # RuntimeWarning: invalid value encountered in arcsin theta = np.arcsin(y / np.sqrt(2)) #old_settings = N.seterr(invalid='ignore') P.title("GWB source population") ax = P.axes(projection='mollweide') foo = P.scatter(rho, eta, marker='.', s=1) #bar = N.seterr(**old_settings) return foo
Plot a GWB source population as a mollweide projection.
https://github.com/vallis/libstempo/blob/0b19300a9b24d64c9ddc25cd6ddbfd12b6231990/libstempo/plot.py#L304-L324
vallis/libstempo
libstempo/like.py
loglike
def loglike(pulsar,efac=1.0,equad=None,jitter=None,Ared=None,gammared=None,marginalize=True,normalize=True,redcomponents=10,usedeleted=True): """Returns the Gaussian-process likelihood for 'pulsar'. The likelihood is evaluated at the current value of the pulsar parameters, as given by pulsar[parname].val. If efac, equad, and/or Ared are set, will compute the likelihood assuming the corresponding noise model. EFAC multiplies measurement noise; EQUAD adds in quadrature, and is given in us; red-noise is specified with the GW-like dimensionless amplitude Ared and exponent gamma, and is modeled with 'redcomponents' Fourier components. If marginalize=True (the default), loglike will marginalize over all the parameters in pulsar.fitpars, using an M-matrix formulation. """ mask = Mask(pulsar,usedeleted) err = 1.0e-6 * mask(pulsar.toaerrs) Cdiag = (efac*err)**2 if equad: Cdiag = Cdiag + (1e-6*equad)**2 * N.ones(len(err)) if Ared: redf, F = _setuprednoise(pulsar,redcomponents) F = mask(F) phi = Ared**2 * redf**(-gammared) if jitter: # quantize at 1 second; U plays the role of redF t, U = _quantize(86400.0 * mask(pulsar.toas()),1.0) phi_j = (1e-6*jitter)**2 * N.ones(U.shape[1]) # stack the basis arrays if we're also doing red noise phi = N.hstack((phi,phi_j)) if Ared else phi_j F = N.hstack((F,U)) if Ared else U if Ared or jitter: # Lentati formulation for correlated noise invphi = N.diag(1/phi) Ninv = N.diag(1/Cdiag) NinvF = dot(Ninv,F) # could be accelerated X = invphi + dot(F.T,NinvF) # invphi + FTNinvF Cinv = Ninv - dot(NinvF,N.linalg.inv(X),NinvF.T) logCdet = N.sum(N.log(Cdiag)) + N.sum(N.log(phi)) + N.linalg.slogdet(X)[1] # check else: # noise is all diagonal Cinv = N.diag(1/Cdiag) logCdet = N.sum(N.log(Cdiag)) if marginalize: M = mask(pulsar.designmatrix()) res = mask(N.array(pulsar.residuals(updatebats=False),'d')) CinvM = N.dot(Cinv,M) A = dot(M.T,CinvM) invA = N.linalg.inv(A) CinvMres = dot(res,CinvM) ret = -0.5 * dot(res,Cinv,res) + 0.5 * dot(CinvMres,invA,CinvMres.T) if normalize: ret = ret - 0.5 * logCdet - 0.5 * N.linalg.slogdet(A)[1] - 0.5 * (M.shape[0] - M.shape[1]) * math.log(2.0*math.pi) else: res = mask(N.array(pulsar.residuals(),'d')) ret = -0.5 * dot(res,Cinv,res) if normalize: ret = ret - 0.5 * logCdet - 0.5 * len(res) * math.log(2.0*math.pi) return ret
python
def loglike(pulsar,efac=1.0,equad=None,jitter=None,Ared=None,gammared=None,marginalize=True,normalize=True,redcomponents=10,usedeleted=True): """Returns the Gaussian-process likelihood for 'pulsar'. The likelihood is evaluated at the current value of the pulsar parameters, as given by pulsar[parname].val. If efac, equad, and/or Ared are set, will compute the likelihood assuming the corresponding noise model. EFAC multiplies measurement noise; EQUAD adds in quadrature, and is given in us; red-noise is specified with the GW-like dimensionless amplitude Ared and exponent gamma, and is modeled with 'redcomponents' Fourier components. If marginalize=True (the default), loglike will marginalize over all the parameters in pulsar.fitpars, using an M-matrix formulation. """ mask = Mask(pulsar,usedeleted) err = 1.0e-6 * mask(pulsar.toaerrs) Cdiag = (efac*err)**2 if equad: Cdiag = Cdiag + (1e-6*equad)**2 * N.ones(len(err)) if Ared: redf, F = _setuprednoise(pulsar,redcomponents) F = mask(F) phi = Ared**2 * redf**(-gammared) if jitter: # quantize at 1 second; U plays the role of redF t, U = _quantize(86400.0 * mask(pulsar.toas()),1.0) phi_j = (1e-6*jitter)**2 * N.ones(U.shape[1]) # stack the basis arrays if we're also doing red noise phi = N.hstack((phi,phi_j)) if Ared else phi_j F = N.hstack((F,U)) if Ared else U if Ared or jitter: # Lentati formulation for correlated noise invphi = N.diag(1/phi) Ninv = N.diag(1/Cdiag) NinvF = dot(Ninv,F) # could be accelerated X = invphi + dot(F.T,NinvF) # invphi + FTNinvF Cinv = Ninv - dot(NinvF,N.linalg.inv(X),NinvF.T) logCdet = N.sum(N.log(Cdiag)) + N.sum(N.log(phi)) + N.linalg.slogdet(X)[1] # check else: # noise is all diagonal Cinv = N.diag(1/Cdiag) logCdet = N.sum(N.log(Cdiag)) if marginalize: M = mask(pulsar.designmatrix()) res = mask(N.array(pulsar.residuals(updatebats=False),'d')) CinvM = N.dot(Cinv,M) A = dot(M.T,CinvM) invA = N.linalg.inv(A) CinvMres = dot(res,CinvM) ret = -0.5 * dot(res,Cinv,res) + 0.5 * dot(CinvMres,invA,CinvMres.T) if normalize: ret = ret - 0.5 * logCdet - 0.5 * N.linalg.slogdet(A)[1] - 0.5 * (M.shape[0] - M.shape[1]) * math.log(2.0*math.pi) else: res = mask(N.array(pulsar.residuals(),'d')) ret = -0.5 * dot(res,Cinv,res) if normalize: ret = ret - 0.5 * logCdet - 0.5 * len(res) * math.log(2.0*math.pi) return ret
Returns the Gaussian-process likelihood for 'pulsar'. The likelihood is evaluated at the current value of the pulsar parameters, as given by pulsar[parname].val. If efac, equad, and/or Ared are set, will compute the likelihood assuming the corresponding noise model. EFAC multiplies measurement noise; EQUAD adds in quadrature, and is given in us; red-noise is specified with the GW-like dimensionless amplitude Ared and exponent gamma, and is modeled with 'redcomponents' Fourier components. If marginalize=True (the default), loglike will marginalize over all the parameters in pulsar.fitpars, using an M-matrix formulation.
https://github.com/vallis/libstempo/blob/0b19300a9b24d64c9ddc25cd6ddbfd12b6231990/libstempo/like.py#L74-L148
vallis/libstempo
libstempo/like.py
expandranges
def expandranges(parlist): """Rewrite a list of parameters by expanding ranges (e.g., log10_efac{1-10}) into individual parameters.""" ret = [] for par in parlist: # match anything of the form XXX{number1-number2} m = re.match('(.*)\{([0-9]+)\-([0-9]+)\}',par) if m is None: ret.append(par) else: # (these are strings) root, number1, number2 = m.group(1), m.group(2), m.group(3) # if number1 begins with 0s, number parameters as 00, 01, 02, ..., # otherwise go with 0, 1, 2, ... fmt = '{{0}}{{1:0{0}d}}'.format(len(number1)) if number1[0] == '0' else '{0}{1:d}' ret = ret + [fmt.format(root,i) for i in range(int(m.group(2)),int(m.group(3))+1)] return ret
python
def expandranges(parlist): """Rewrite a list of parameters by expanding ranges (e.g., log10_efac{1-10}) into individual parameters.""" ret = [] for par in parlist: # match anything of the form XXX{number1-number2} m = re.match('(.*)\{([0-9]+)\-([0-9]+)\}',par) if m is None: ret.append(par) else: # (these are strings) root, number1, number2 = m.group(1), m.group(2), m.group(3) # if number1 begins with 0s, number parameters as 00, 01, 02, ..., # otherwise go with 0, 1, 2, ... fmt = '{{0}}{{1:0{0}d}}'.format(len(number1)) if number1[0] == '0' else '{0}{1:d}' ret = ret + [fmt.format(root,i) for i in range(int(m.group(2)),int(m.group(3))+1)] return ret
Rewrite a list of parameters by expanding ranges (e.g., log10_efac{1-10}) into individual parameters.
https://github.com/vallis/libstempo/blob/0b19300a9b24d64c9ddc25cd6ddbfd12b6231990/libstempo/like.py#L242-L264
vallis/libstempo
libstempo/like.py
_findrange
def _findrange(parlist,roots=['JUMP','DMXR1_','DMXR2_','DMX_','efac','log10_efac']): """Rewrite a list of parameters name by detecting ranges (e.g., JUMP1, JUMP2, ...) and compressing them.""" rootdict = {root: [] for root in roots} res = [] for par in parlist: found = False for root in roots: if len(par) > len(root) and par[:len(root)] == root: rootdict[root].append(int(par[len(root):])) found = True if not found: res.append(par) for root in roots: if rootdict[root]: if len(rootdict[root]) > 1: rmin, rmax = min(rootdict[root]), max(rootdict[root]) res.append('{0}{{{1}-{2}}}{3}'.format(root,rmin,rmax, '(incomplete)' if rmax - rmin != len(rootdict[root]) - 1 else '')) else: res.append('{0}{1}'.format(root,rootdict[root][0])) return res
python
def _findrange(parlist,roots=['JUMP','DMXR1_','DMXR2_','DMX_','efac','log10_efac']): """Rewrite a list of parameters name by detecting ranges (e.g., JUMP1, JUMP2, ...) and compressing them.""" rootdict = {root: [] for root in roots} res = [] for par in parlist: found = False for root in roots: if len(par) > len(root) and par[:len(root)] == root: rootdict[root].append(int(par[len(root):])) found = True if not found: res.append(par) for root in roots: if rootdict[root]: if len(rootdict[root]) > 1: rmin, rmax = min(rootdict[root]), max(rootdict[root]) res.append('{0}{{{1}-{2}}}{3}'.format(root,rmin,rmax, '(incomplete)' if rmax - rmin != len(rootdict[root]) - 1 else '')) else: res.append('{0}{1}'.format(root,rootdict[root][0])) return res
Rewrite a list of parameters name by detecting ranges (e.g., JUMP1, JUMP2, ...) and compressing them.
https://github.com/vallis/libstempo/blob/0b19300a9b24d64c9ddc25cd6ddbfd12b6231990/libstempo/like.py#L267-L291
vallis/libstempo
libstempo/emcee.py
merge
def merge(data,skip=50,fraction=1.0): """Merge one every 'skip' clouds into a single emcee population, using the later 'fraction' of the run.""" w,s,d = data.chains.shape start = int((1.0 - fraction) * s) total = int((s - start) / skip) return data.chains[:,start::skip,:].reshape((w*total,d))
python
def merge(data,skip=50,fraction=1.0): """Merge one every 'skip' clouds into a single emcee population, using the later 'fraction' of the run.""" w,s,d = data.chains.shape start = int((1.0 - fraction) * s) total = int((s - start) / skip) return data.chains[:,start::skip,:].reshape((w*total,d))
Merge one every 'skip' clouds into a single emcee population, using the later 'fraction' of the run.
https://github.com/vallis/libstempo/blob/0b19300a9b24d64c9ddc25cd6ddbfd12b6231990/libstempo/emcee.py#L46-L55
vallis/libstempo
libstempo/emcee.py
cull
def cull(data,index,min=None,max=None): """Sieve an emcee clouds by excluding walkers with search variable 'index' smaller than 'min' or larger than 'max'.""" ret = data if min is not None: ret = ret[ret[:,index] > min,:] if max is not None: ret = ret[ret[:,index] < max,:] return ret
python
def cull(data,index,min=None,max=None): """Sieve an emcee clouds by excluding walkers with search variable 'index' smaller than 'min' or larger than 'max'.""" ret = data if min is not None: ret = ret[ret[:,index] > min,:] if max is not None: ret = ret[ret[:,index] < max,:] return ret
Sieve an emcee clouds by excluding walkers with search variable 'index' smaller than 'min' or larger than 'max'.
https://github.com/vallis/libstempo/blob/0b19300a9b24d64c9ddc25cd6ddbfd12b6231990/libstempo/emcee.py#L57-L69
vallis/libstempo
libstempo/eccUtils.py
make_ecc_interpolant
def make_ecc_interpolant(): """ Make interpolation function from eccentricity file to determine number of harmonics to use for a given eccentricity. :returns: interpolant """ pth = resource_filename(Requirement.parse('libstempo'), 'libstempo/ecc_vs_nharm.txt') fil = np.loadtxt(pth) return interp1d(fil[:,0], fil[:,1])
python
def make_ecc_interpolant(): """ Make interpolation function from eccentricity file to determine number of harmonics to use for a given eccentricity. :returns: interpolant """ pth = resource_filename(Requirement.parse('libstempo'), 'libstempo/ecc_vs_nharm.txt') fil = np.loadtxt(pth) return interp1d(fil[:,0], fil[:,1])
Make interpolation function from eccentricity file to determine number of harmonics to use for a given eccentricity. :returns: interpolant
https://github.com/vallis/libstempo/blob/0b19300a9b24d64c9ddc25cd6ddbfd12b6231990/libstempo/eccUtils.py#L28-L41
vallis/libstempo
libstempo/eccUtils.py
get_edot
def get_edot(F, mc, e): """ Compute eccentricity derivative from Taylor et al. (2015) :param F: Orbital frequency [Hz] :param mc: Chirp mass of binary [Solar Mass] :param e: Eccentricity of binary :returns: de/dt """ # chirp mass mc *= SOLAR2S dedt = -304/(15*mc) * (2*np.pi*mc*F)**(8/3) * e * \ (1 + 121/304*e**2) / ((1-e**2)**(5/2)) return dedt
python
def get_edot(F, mc, e): """ Compute eccentricity derivative from Taylor et al. (2015) :param F: Orbital frequency [Hz] :param mc: Chirp mass of binary [Solar Mass] :param e: Eccentricity of binary :returns: de/dt """ # chirp mass mc *= SOLAR2S dedt = -304/(15*mc) * (2*np.pi*mc*F)**(8/3) * e * \ (1 + 121/304*e**2) / ((1-e**2)**(5/2)) return dedt
Compute eccentricity derivative from Taylor et al. (2015) :param F: Orbital frequency [Hz] :param mc: Chirp mass of binary [Solar Mass] :param e: Eccentricity of binary :returns: de/dt
https://github.com/vallis/libstempo/blob/0b19300a9b24d64c9ddc25cd6ddbfd12b6231990/libstempo/eccUtils.py#L45-L63
vallis/libstempo
libstempo/eccUtils.py
get_Fdot
def get_Fdot(F, mc, e): """ Compute frequency derivative from Taylor et al. (2015) :param F: Orbital frequency [Hz] :param mc: Chirp mass of binary [Solar Mass] :param e: Eccentricity of binary :returns: dF/dt """ # chirp mass mc *= SOLAR2S dFdt = 48 / (5*np.pi*mc**2) * (2*np.pi*mc*F)**(11/3) * \ (1 + 73/24*e**2 + 37/96*e**4) / ((1-e**2)**(7/2)) return dFdt
python
def get_Fdot(F, mc, e): """ Compute frequency derivative from Taylor et al. (2015) :param F: Orbital frequency [Hz] :param mc: Chirp mass of binary [Solar Mass] :param e: Eccentricity of binary :returns: dF/dt """ # chirp mass mc *= SOLAR2S dFdt = 48 / (5*np.pi*mc**2) * (2*np.pi*mc*F)**(11/3) * \ (1 + 73/24*e**2 + 37/96*e**4) / ((1-e**2)**(7/2)) return dFdt
Compute frequency derivative from Taylor et al. (2015) :param F: Orbital frequency [Hz] :param mc: Chirp mass of binary [Solar Mass] :param e: Eccentricity of binary :returns: dF/dt
https://github.com/vallis/libstempo/blob/0b19300a9b24d64c9ddc25cd6ddbfd12b6231990/libstempo/eccUtils.py#L65-L83
vallis/libstempo
libstempo/eccUtils.py
get_gammadot
def get_gammadot(F, mc, q, e): """ Compute gamma dot from Barack and Cutler (2004) :param F: Orbital frequency [Hz] :param mc: Chirp mass of binary [Solar Mass] :param q: Mass ratio of binary :param e: Eccentricity of binary :returns: dgamma/dt """ # chirp mass mc *= SOLAR2S #total mass m = (((1+q)**2)/q)**(3/5) * mc dgdt = 6*np.pi*F * (2*np.pi*F*m)**(2/3) / (1-e**2) * \ (1 + 0.25*(2*np.pi*F*m)**(2/3)/(1-e**2)*(26-15*e**2)) return dgdt
python
def get_gammadot(F, mc, q, e): """ Compute gamma dot from Barack and Cutler (2004) :param F: Orbital frequency [Hz] :param mc: Chirp mass of binary [Solar Mass] :param q: Mass ratio of binary :param e: Eccentricity of binary :returns: dgamma/dt """ # chirp mass mc *= SOLAR2S #total mass m = (((1+q)**2)/q)**(3/5) * mc dgdt = 6*np.pi*F * (2*np.pi*F*m)**(2/3) / (1-e**2) * \ (1 + 0.25*(2*np.pi*F*m)**(2/3)/(1-e**2)*(26-15*e**2)) return dgdt
Compute gamma dot from Barack and Cutler (2004) :param F: Orbital frequency [Hz] :param mc: Chirp mass of binary [Solar Mass] :param q: Mass ratio of binary :param e: Eccentricity of binary :returns: dgamma/dt
https://github.com/vallis/libstempo/blob/0b19300a9b24d64c9ddc25cd6ddbfd12b6231990/libstempo/eccUtils.py#L85-L107
vallis/libstempo
libstempo/eccUtils.py
get_coupled_ecc_eqns
def get_coupled_ecc_eqns(y, t, mc, q): """ Computes the coupled system of differential equations from Peters (1964) and Barack & Cutler (2004). This is a system of three variables: F: Orbital frequency [Hz] e: Orbital eccentricity gamma: Angle of precession of periastron [rad] phase0: Orbital phase [rad] :param y: Vector of input parameters [F, e, gamma] :param t: Time [s] :param mc: Chirp mass of binary [Solar Mass] :param q: Mass ratio of binary :returns: array of derivatives [dF/dt, de/dt, dgamma/dt, dphase/dt] """ F = y[0] e = y[1] gamma = y[2] phase = y[3] #total mass m = (((1+q)**2)/q)**(3/5) * mc dFdt = get_Fdot(F, mc, e) dedt = get_edot(F, mc, e) dgdt = get_gammadot(F, mc, q, e) dphasedt = 2*np.pi*F return np.array([dFdt, dedt, dgdt, dphasedt])
python
def get_coupled_ecc_eqns(y, t, mc, q): """ Computes the coupled system of differential equations from Peters (1964) and Barack & Cutler (2004). This is a system of three variables: F: Orbital frequency [Hz] e: Orbital eccentricity gamma: Angle of precession of periastron [rad] phase0: Orbital phase [rad] :param y: Vector of input parameters [F, e, gamma] :param t: Time [s] :param mc: Chirp mass of binary [Solar Mass] :param q: Mass ratio of binary :returns: array of derivatives [dF/dt, de/dt, dgamma/dt, dphase/dt] """ F = y[0] e = y[1] gamma = y[2] phase = y[3] #total mass m = (((1+q)**2)/q)**(3/5) * mc dFdt = get_Fdot(F, mc, e) dedt = get_edot(F, mc, e) dgdt = get_gammadot(F, mc, q, e) dphasedt = 2*np.pi*F return np.array([dFdt, dedt, dgdt, dphasedt])
Computes the coupled system of differential equations from Peters (1964) and Barack & Cutler (2004). This is a system of three variables: F: Orbital frequency [Hz] e: Orbital eccentricity gamma: Angle of precession of periastron [rad] phase0: Orbital phase [rad] :param y: Vector of input parameters [F, e, gamma] :param t: Time [s] :param mc: Chirp mass of binary [Solar Mass] :param q: Mass ratio of binary :returns: array of derivatives [dF/dt, de/dt, dgamma/dt, dphase/dt]
https://github.com/vallis/libstempo/blob/0b19300a9b24d64c9ddc25cd6ddbfd12b6231990/libstempo/eccUtils.py#L109-L141
vallis/libstempo
libstempo/eccUtils.py
solve_coupled_ecc_solution
def solve_coupled_ecc_solution(F0, e0, gamma0, phase0, mc, q, t): """ Compute the solution to the coupled system of equations from from Peters (1964) and Barack & Cutler (2004) at a given time. :param F0: Initial orbital frequency [Hz] :param e0: Initial orbital eccentricity :param gamma0: Initial angle of precession of periastron [rad] :param mc: Chirp mass of binary [Solar Mass] :param q: Mass ratio of binary :param t: Time at which to evaluate solution [s] :returns: (F(t), e(t), gamma(t), phase(t)) """ y0 = np.array([F0, e0, gamma0, phase0]) y, infodict = odeint(get_coupled_ecc_eqns, y0, t, args=(mc,q), full_output=True) if infodict['message'] == 'Integration successful.': ret = y else: ret = 0 return ret
python
def solve_coupled_ecc_solution(F0, e0, gamma0, phase0, mc, q, t): """ Compute the solution to the coupled system of equations from from Peters (1964) and Barack & Cutler (2004) at a given time. :param F0: Initial orbital frequency [Hz] :param e0: Initial orbital eccentricity :param gamma0: Initial angle of precession of periastron [rad] :param mc: Chirp mass of binary [Solar Mass] :param q: Mass ratio of binary :param t: Time at which to evaluate solution [s] :returns: (F(t), e(t), gamma(t), phase(t)) """ y0 = np.array([F0, e0, gamma0, phase0]) y, infodict = odeint(get_coupled_ecc_eqns, y0, t, args=(mc,q), full_output=True) if infodict['message'] == 'Integration successful.': ret = y else: ret = 0 return ret
Compute the solution to the coupled system of equations from from Peters (1964) and Barack & Cutler (2004) at a given time. :param F0: Initial orbital frequency [Hz] :param e0: Initial orbital eccentricity :param gamma0: Initial angle of precession of periastron [rad] :param mc: Chirp mass of binary [Solar Mass] :param q: Mass ratio of binary :param t: Time at which to evaluate solution [s] :returns: (F(t), e(t), gamma(t), phase(t))
https://github.com/vallis/libstempo/blob/0b19300a9b24d64c9ddc25cd6ddbfd12b6231990/libstempo/eccUtils.py#L143-L169
vallis/libstempo
libstempo/eccUtils.py
get_an
def get_an(n, mc, dl, F, e): """ Compute a_n from Eq. 22 of Taylor et al. (2015). :param n: Harmonic number :param mc: Chirp mass of binary [Solar Mass] :param dl: Luminosity distance [Mpc] :param F: Orbital frequency of binary [Hz] :param e: Orbital Eccentricity :returns: a_n """ # convert to seconds mc *= SOLAR2S dl *= MPC2S omega = 2 * np.pi * F amp = n * mc**(5/3) * omega**(2/3) / dl ret = -amp * (ss.jn(n-2,n*e) - 2*e*ss.jn(n-1,n*e) + (2/n)*ss.jn(n,n*e) + 2*e*ss.jn(n+1,n*e) - ss.jn(n+2,n*e)) return ret
python
def get_an(n, mc, dl, F, e): """ Compute a_n from Eq. 22 of Taylor et al. (2015). :param n: Harmonic number :param mc: Chirp mass of binary [Solar Mass] :param dl: Luminosity distance [Mpc] :param F: Orbital frequency of binary [Hz] :param e: Orbital Eccentricity :returns: a_n """ # convert to seconds mc *= SOLAR2S dl *= MPC2S omega = 2 * np.pi * F amp = n * mc**(5/3) * omega**(2/3) / dl ret = -amp * (ss.jn(n-2,n*e) - 2*e*ss.jn(n-1,n*e) + (2/n)*ss.jn(n,n*e) + 2*e*ss.jn(n+1,n*e) - ss.jn(n+2,n*e)) return ret
Compute a_n from Eq. 22 of Taylor et al. (2015). :param n: Harmonic number :param mc: Chirp mass of binary [Solar Mass] :param dl: Luminosity distance [Mpc] :param F: Orbital frequency of binary [Hz] :param e: Orbital Eccentricity :returns: a_n
https://github.com/vallis/libstempo/blob/0b19300a9b24d64c9ddc25cd6ddbfd12b6231990/libstempo/eccUtils.py#L171-L197
vallis/libstempo
libstempo/eccUtils.py
get_bn
def get_bn(n, mc, dl, F, e): """ Compute b_n from Eq. 22 of Taylor et al. (2015). :param n: Harmonic number :param mc: Chirp mass of binary [Solar Mass] :param dl: Luminosity distance [Mpc] :param F: Orbital frequency of binary [Hz] :param e: Orbital Eccentricity :returns: b_n """ # convert to seconds mc *= SOLAR2S dl *= MPC2S omega = 2 * np.pi * F amp = n * mc**(5/3) * omega**(2/3) / dl ret = -amp * np.sqrt(1-e**2) *(ss.jn(n-2,n*e) - 2*ss.jn(n,n*e) + ss.jn(n+2,n*e)) return ret
python
def get_bn(n, mc, dl, F, e): """ Compute b_n from Eq. 22 of Taylor et al. (2015). :param n: Harmonic number :param mc: Chirp mass of binary [Solar Mass] :param dl: Luminosity distance [Mpc] :param F: Orbital frequency of binary [Hz] :param e: Orbital Eccentricity :returns: b_n """ # convert to seconds mc *= SOLAR2S dl *= MPC2S omega = 2 * np.pi * F amp = n * mc**(5/3) * omega**(2/3) / dl ret = -amp * np.sqrt(1-e**2) *(ss.jn(n-2,n*e) - 2*ss.jn(n,n*e) + ss.jn(n+2,n*e)) return ret
Compute b_n from Eq. 22 of Taylor et al. (2015). :param n: Harmonic number :param mc: Chirp mass of binary [Solar Mass] :param dl: Luminosity distance [Mpc] :param F: Orbital frequency of binary [Hz] :param e: Orbital Eccentricity :returns: b_n
https://github.com/vallis/libstempo/blob/0b19300a9b24d64c9ddc25cd6ddbfd12b6231990/libstempo/eccUtils.py#L199-L224
vallis/libstempo
libstempo/eccUtils.py
get_cn
def get_cn(n, mc, dl, F, e): """ Compute c_n from Eq. 22 of Taylor et al. (2015). :param n: Harmonic number :param mc: Chirp mass of binary [Solar Mass] :param dl: Luminosity distance [Mpc] :param F: Orbital frequency of binary [Hz] :param e: Orbital Eccentricity :returns: c_n """ # convert to seconds mc *= SOLAR2S dl *= MPC2S omega = 2 * np.pi * F amp = 2 * mc**(5/3) * omega**(2/3) / dl ret = amp * ss.jn(n,n*e) / (n * omega) return ret
python
def get_cn(n, mc, dl, F, e): """ Compute c_n from Eq. 22 of Taylor et al. (2015). :param n: Harmonic number :param mc: Chirp mass of binary [Solar Mass] :param dl: Luminosity distance [Mpc] :param F: Orbital frequency of binary [Hz] :param e: Orbital Eccentricity :returns: c_n """ # convert to seconds mc *= SOLAR2S dl *= MPC2S omega = 2 * np.pi * F amp = 2 * mc**(5/3) * omega**(2/3) / dl ret = amp * ss.jn(n,n*e) / (n * omega) return ret
Compute c_n from Eq. 22 of Taylor et al. (2015). :param n: Harmonic number :param mc: Chirp mass of binary [Solar Mass] :param dl: Luminosity distance [Mpc] :param F: Orbital frequency of binary [Hz] :param e: Orbital Eccentricity :returns: c_n
https://github.com/vallis/libstempo/blob/0b19300a9b24d64c9ddc25cd6ddbfd12b6231990/libstempo/eccUtils.py#L226-L250
vallis/libstempo
libstempo/eccUtils.py
calculate_splus_scross
def calculate_splus_scross(nmax, mc, dl, F, e, t, l0, gamma, gammadot, inc): """ Calculate splus and scross summed over all harmonics. This waveform differs slightly from that in Taylor et al (2015) in that it includes the time dependence of the advance of periastron. :param nmax: Total number of harmonics to use :param mc: Chirp mass of binary [Solar Mass] :param dl: Luminosity distance [Mpc] :param F: Orbital frequency of binary [Hz] :param e: Orbital Eccentricity :param t: TOAs [s] :param l0: Initial eccentric anomoly [rad] :param gamma: Angle of periastron advance [rad] :param gammadot: Time derivative of angle of periastron advance [rad/s] :param inc: Inclination angle [rad] """ n = np.arange(1, nmax) # time dependent amplitudes an = get_an(n, mc, dl, F, e) bn = get_bn(n, mc, dl, F, e) cn = get_cn(n, mc, dl, F, e) # time dependent terms omega = 2*np.pi*F gt = gamma + gammadot * t lt = l0 + omega * t # tiled phase phase1 = n * np.tile(lt, (nmax-1,1)).T phase2 = np.tile(gt, (nmax-1,1)).T phasep = phase1 + 2*phase2 phasem = phase1 - 2*phase2 # intermediate terms sp = np.sin(phasem)/(n*omega-2*gammadot) + \ np.sin(phasep)/(n*omega+2*gammadot) sm = np.sin(phasem)/(n*omega-2*gammadot) - \ np.sin(phasep)/(n*omega+2*gammadot) cp = np.cos(phasem)/(n*omega-2*gammadot) + \ np.cos(phasep)/(n*omega+2*gammadot) cm = np.cos(phasem)/(n*omega-2*gammadot) - \ np.cos(phasep)/(n*omega+2*gammadot) splus_n = -0.5 * (1+np.cos(inc)**2) * (an*sp - bn*sm) + \ (1-np.cos(inc)**2)*cn * np.sin(phase1) scross_n = np.cos(inc) * (an*cm - bn*cp) return np.sum(splus_n, axis=1), np.sum(scross_n, axis=1)
python
def calculate_splus_scross(nmax, mc, dl, F, e, t, l0, gamma, gammadot, inc): """ Calculate splus and scross summed over all harmonics. This waveform differs slightly from that in Taylor et al (2015) in that it includes the time dependence of the advance of periastron. :param nmax: Total number of harmonics to use :param mc: Chirp mass of binary [Solar Mass] :param dl: Luminosity distance [Mpc] :param F: Orbital frequency of binary [Hz] :param e: Orbital Eccentricity :param t: TOAs [s] :param l0: Initial eccentric anomoly [rad] :param gamma: Angle of periastron advance [rad] :param gammadot: Time derivative of angle of periastron advance [rad/s] :param inc: Inclination angle [rad] """ n = np.arange(1, nmax) # time dependent amplitudes an = get_an(n, mc, dl, F, e) bn = get_bn(n, mc, dl, F, e) cn = get_cn(n, mc, dl, F, e) # time dependent terms omega = 2*np.pi*F gt = gamma + gammadot * t lt = l0 + omega * t # tiled phase phase1 = n * np.tile(lt, (nmax-1,1)).T phase2 = np.tile(gt, (nmax-1,1)).T phasep = phase1 + 2*phase2 phasem = phase1 - 2*phase2 # intermediate terms sp = np.sin(phasem)/(n*omega-2*gammadot) + \ np.sin(phasep)/(n*omega+2*gammadot) sm = np.sin(phasem)/(n*omega-2*gammadot) - \ np.sin(phasep)/(n*omega+2*gammadot) cp = np.cos(phasem)/(n*omega-2*gammadot) + \ np.cos(phasep)/(n*omega+2*gammadot) cm = np.cos(phasem)/(n*omega-2*gammadot) - \ np.cos(phasep)/(n*omega+2*gammadot) splus_n = -0.5 * (1+np.cos(inc)**2) * (an*sp - bn*sm) + \ (1-np.cos(inc)**2)*cn * np.sin(phase1) scross_n = np.cos(inc) * (an*cm - bn*cp) return np.sum(splus_n, axis=1), np.sum(scross_n, axis=1)
Calculate splus and scross summed over all harmonics. This waveform differs slightly from that in Taylor et al (2015) in that it includes the time dependence of the advance of periastron. :param nmax: Total number of harmonics to use :param mc: Chirp mass of binary [Solar Mass] :param dl: Luminosity distance [Mpc] :param F: Orbital frequency of binary [Hz] :param e: Orbital Eccentricity :param t: TOAs [s] :param l0: Initial eccentric anomoly [rad] :param gamma: Angle of periastron advance [rad] :param gammadot: Time derivative of angle of periastron advance [rad/s] :param inc: Inclination angle [rad]
https://github.com/vallis/libstempo/blob/0b19300a9b24d64c9ddc25cd6ddbfd12b6231990/libstempo/eccUtils.py#L252-L305
vallis/libstempo
libstempo/multinest.py
run
def run(LogLikelihood, Prior, n_dims, n_params = None, n_clustering_params = None, wrapped_params = None, importance_nested_sampling = True, multimodal = True, const_efficiency_mode = False, n_live_points = 400, evidence_tolerance = 0.5, sampling_efficiency = 0.8, n_iter_before_update = 100, null_log_evidence = -1e90, max_modes = 100, mode_tolerance = -1e90, outputfiles_basename = "./multinest-", seed = -1, verbose = False, resume = True, context = None, write_output = True, log_zero = -1e100, max_iter = 0, init_MPI = True, dump_callback = None): """ Runs MultiNest The most important parameters are the two log-probability functions Prior and LogLikelihood. They are called by MultiNest. Prior should transform the unit cube into the parameter cube. Here is an example for a uniform prior:: def Prior(cube, ndim, nparams): for i in range(ndim): cube[i] = cube[i] * 10 * math.pi The LogLikelihood function gets this parameter cube and should return the logarithm of the likelihood. Here is the example for the eggbox problem:: def Loglike(cube, ndim, nparams): chi = 1. for i in range(ndim): chi *= math.cos(cube[i] / 2.) return math.pow(2. + chi, 5) Some of the parameters are explained below. Otherwise consult the MultiNest documentation. @param importance_nested_sampling: If True, Multinest will use Importance Nested Sampling (INS). Read http://arxiv.org/abs/1306.2144 for more details on INS. Please read the MultiNest README file before using the INS in MultiNest v3.0. @param n_params: Total no. of parameters, should be equal to ndims in most cases but if you need to store some additional parameters with the actual parameters then you need to pass them through the likelihood routine. @param sampling_efficiency: defines the sampling efficiency. 0.8 and 0.3 are recommended for parameter estimation & evidence evalutation respectively. use 'parameter' or 'model' to select the respective default values @param mode_tolerance: MultiNest can find multiple modes & also specify which samples belong to which mode. It might be desirable to have separate samples & mode statistics for modes with local log-evidence value greater than a particular value in which case Ztol should be set to that value. If there isn't any particularly interesting Ztol value, then Ztol should be set to a very large negative number (e.g. -1e90). @param evidence_tolerance: A value of 0.5 should give good enough accuracy. @param n_clustering_params: If mmodal is T, MultiNest will attempt to separate out the modes. Mode separation is done through a clustering algorithm. Mode separation can be done on all the parameters (in which case nCdims should be set to ndims) & it can also be done on a subset of parameters (in which case nCdims < ndims) which might be advantageous as clustering is less accurate as the dimensionality increases. If nCdims < ndims then mode separation is done on the first nCdims parameters. @param null_log_evidence: If mmodal is T, MultiNest can find multiple modes & also specify which samples belong to which mode. It might be desirable to have separate samples & mode statistics for modes with local log-evidence value greater than a particular value in which case nullZ should be set to that value. If there isn't any particulrly interesting nullZ value, then nullZ should be set to a very large negative number (e.g. -1.d90). @param init_MPI: initialize MPI routines?, relevant only if compiling with MPI @param log_zero: points with loglike < logZero will be ignored by MultiNest @param max_iter: maximum number of iterations. 0 is unlimited. @param write_output: write output files? This is required for analysis. @param dump_callback: a callback function for dumping the current status """ if n_params == None: n_params = n_dims if n_clustering_params == None: n_clustering_params = n_dims if wrapped_params == None: wrapped_params = [0] * n_dims WrappedType = c_int * len(wrapped_params) wraps = WrappedType(*wrapped_params) if sampling_efficiency == 'parameter': sampling_efficiency = 0.8 if sampling_efficiency == 'model': sampling_efficiency = 0.3 # MV 20130923 loglike_type = CFUNCTYPE(c_double, POINTER(c_double),c_int,c_int,c_void_p) dumper_type = CFUNCTYPE(c_void_p, c_int,c_int,c_int, POINTER(c_double),POINTER(c_double),POINTER(c_double), c_double,c_double,c_double,c_void_p) if hasattr(LogLikelihood,'loglike') and hasattr(Prior,'remap') and hasattr(Prior,'prior'): def loglike(cube,ndim,nparams,nullcontext): # we're not using context with libstempo.like objects pprior = Prior.premap(cube) # mappers are supposed to throw a ValueError if they get out of range try: pars = Prior.remap(cube) except ValueError: return -N.inf prior = pprior * Prior.prior(pars) return -N.inf if not prior else math.log(prior) + LogLikelihood.loglike(pars) else: def loglike(cube,ndim,nparams,nullcontext): # it's actually easier to use the context, if any, at the Python level # and pass a null pointer to MultiNest... args = [cube,ndim,nparams] + ([] if context is None else context) if Prior: Prior(*args) return LogLikelihood(*args) def dumper(nSamples,nlive,nPar, physLive,posterior,paramConstr, maxLogLike,logZ,logZerr,nullcontext): if dump_callback: # It's not clear to me what the desired PyMultiNest dumper callback # syntax is... but this should pass back the right numpy arrays, # without copies. Untested! pc = as_array(paramConstr,shape=(nPar,4)) dump_callback(nSamples,nlive,nPar, as_array(physLive,shape=(nPar+1,nlive)).T, as_array(posterior,shape=(nPar+2,nSamples)).T, (pc[0,:],pc[1,:],pc[2,:],pc[3,:]), # (mean,std,bestfit,map) maxLogLike,logZ,logZerr) # MV 20130923: currently we support only multinest 3.2 (24 parameters), # but it would not be a problem to build up the parameter list dynamically lib.run(c_bool(importance_nested_sampling),c_bool(multimodal),c_bool(const_efficiency_mode), c_int(n_live_points),c_double(evidence_tolerance), c_double(sampling_efficiency),c_int(n_dims),c_int(n_params), c_int(n_clustering_params),c_int(max_modes), c_int(n_iter_before_update),c_double(mode_tolerance), create_string_buffer(outputfiles_basename.encode()), # MV 20130923: need a regular C string c_int(seed),wraps, c_bool(verbose),c_bool(resume), c_bool(write_output),c_bool(init_MPI), c_double(log_zero),c_int(max_iter), loglike_type(loglike),dumper_type(dumper), c_void_p(0))
python
def run(LogLikelihood, Prior, n_dims, n_params = None, n_clustering_params = None, wrapped_params = None, importance_nested_sampling = True, multimodal = True, const_efficiency_mode = False, n_live_points = 400, evidence_tolerance = 0.5, sampling_efficiency = 0.8, n_iter_before_update = 100, null_log_evidence = -1e90, max_modes = 100, mode_tolerance = -1e90, outputfiles_basename = "./multinest-", seed = -1, verbose = False, resume = True, context = None, write_output = True, log_zero = -1e100, max_iter = 0, init_MPI = True, dump_callback = None): """ Runs MultiNest The most important parameters are the two log-probability functions Prior and LogLikelihood. They are called by MultiNest. Prior should transform the unit cube into the parameter cube. Here is an example for a uniform prior:: def Prior(cube, ndim, nparams): for i in range(ndim): cube[i] = cube[i] * 10 * math.pi The LogLikelihood function gets this parameter cube and should return the logarithm of the likelihood. Here is the example for the eggbox problem:: def Loglike(cube, ndim, nparams): chi = 1. for i in range(ndim): chi *= math.cos(cube[i] / 2.) return math.pow(2. + chi, 5) Some of the parameters are explained below. Otherwise consult the MultiNest documentation. @param importance_nested_sampling: If True, Multinest will use Importance Nested Sampling (INS). Read http://arxiv.org/abs/1306.2144 for more details on INS. Please read the MultiNest README file before using the INS in MultiNest v3.0. @param n_params: Total no. of parameters, should be equal to ndims in most cases but if you need to store some additional parameters with the actual parameters then you need to pass them through the likelihood routine. @param sampling_efficiency: defines the sampling efficiency. 0.8 and 0.3 are recommended for parameter estimation & evidence evalutation respectively. use 'parameter' or 'model' to select the respective default values @param mode_tolerance: MultiNest can find multiple modes & also specify which samples belong to which mode. It might be desirable to have separate samples & mode statistics for modes with local log-evidence value greater than a particular value in which case Ztol should be set to that value. If there isn't any particularly interesting Ztol value, then Ztol should be set to a very large negative number (e.g. -1e90). @param evidence_tolerance: A value of 0.5 should give good enough accuracy. @param n_clustering_params: If mmodal is T, MultiNest will attempt to separate out the modes. Mode separation is done through a clustering algorithm. Mode separation can be done on all the parameters (in which case nCdims should be set to ndims) & it can also be done on a subset of parameters (in which case nCdims < ndims) which might be advantageous as clustering is less accurate as the dimensionality increases. If nCdims < ndims then mode separation is done on the first nCdims parameters. @param null_log_evidence: If mmodal is T, MultiNest can find multiple modes & also specify which samples belong to which mode. It might be desirable to have separate samples & mode statistics for modes with local log-evidence value greater than a particular value in which case nullZ should be set to that value. If there isn't any particulrly interesting nullZ value, then nullZ should be set to a very large negative number (e.g. -1.d90). @param init_MPI: initialize MPI routines?, relevant only if compiling with MPI @param log_zero: points with loglike < logZero will be ignored by MultiNest @param max_iter: maximum number of iterations. 0 is unlimited. @param write_output: write output files? This is required for analysis. @param dump_callback: a callback function for dumping the current status """ if n_params == None: n_params = n_dims if n_clustering_params == None: n_clustering_params = n_dims if wrapped_params == None: wrapped_params = [0] * n_dims WrappedType = c_int * len(wrapped_params) wraps = WrappedType(*wrapped_params) if sampling_efficiency == 'parameter': sampling_efficiency = 0.8 if sampling_efficiency == 'model': sampling_efficiency = 0.3 # MV 20130923 loglike_type = CFUNCTYPE(c_double, POINTER(c_double),c_int,c_int,c_void_p) dumper_type = CFUNCTYPE(c_void_p, c_int,c_int,c_int, POINTER(c_double),POINTER(c_double),POINTER(c_double), c_double,c_double,c_double,c_void_p) if hasattr(LogLikelihood,'loglike') and hasattr(Prior,'remap') and hasattr(Prior,'prior'): def loglike(cube,ndim,nparams,nullcontext): # we're not using context with libstempo.like objects pprior = Prior.premap(cube) # mappers are supposed to throw a ValueError if they get out of range try: pars = Prior.remap(cube) except ValueError: return -N.inf prior = pprior * Prior.prior(pars) return -N.inf if not prior else math.log(prior) + LogLikelihood.loglike(pars) else: def loglike(cube,ndim,nparams,nullcontext): # it's actually easier to use the context, if any, at the Python level # and pass a null pointer to MultiNest... args = [cube,ndim,nparams] + ([] if context is None else context) if Prior: Prior(*args) return LogLikelihood(*args) def dumper(nSamples,nlive,nPar, physLive,posterior,paramConstr, maxLogLike,logZ,logZerr,nullcontext): if dump_callback: # It's not clear to me what the desired PyMultiNest dumper callback # syntax is... but this should pass back the right numpy arrays, # without copies. Untested! pc = as_array(paramConstr,shape=(nPar,4)) dump_callback(nSamples,nlive,nPar, as_array(physLive,shape=(nPar+1,nlive)).T, as_array(posterior,shape=(nPar+2,nSamples)).T, (pc[0,:],pc[1,:],pc[2,:],pc[3,:]), # (mean,std,bestfit,map) maxLogLike,logZ,logZerr) # MV 20130923: currently we support only multinest 3.2 (24 parameters), # but it would not be a problem to build up the parameter list dynamically lib.run(c_bool(importance_nested_sampling),c_bool(multimodal),c_bool(const_efficiency_mode), c_int(n_live_points),c_double(evidence_tolerance), c_double(sampling_efficiency),c_int(n_dims),c_int(n_params), c_int(n_clustering_params),c_int(max_modes), c_int(n_iter_before_update),c_double(mode_tolerance), create_string_buffer(outputfiles_basename.encode()), # MV 20130923: need a regular C string c_int(seed),wraps, c_bool(verbose),c_bool(resume), c_bool(write_output),c_bool(init_MPI), c_double(log_zero),c_int(max_iter), loglike_type(loglike),dumper_type(dumper), c_void_p(0))
Runs MultiNest The most important parameters are the two log-probability functions Prior and LogLikelihood. They are called by MultiNest. Prior should transform the unit cube into the parameter cube. Here is an example for a uniform prior:: def Prior(cube, ndim, nparams): for i in range(ndim): cube[i] = cube[i] * 10 * math.pi The LogLikelihood function gets this parameter cube and should return the logarithm of the likelihood. Here is the example for the eggbox problem:: def Loglike(cube, ndim, nparams): chi = 1. for i in range(ndim): chi *= math.cos(cube[i] / 2.) return math.pow(2. + chi, 5) Some of the parameters are explained below. Otherwise consult the MultiNest documentation. @param importance_nested_sampling: If True, Multinest will use Importance Nested Sampling (INS). Read http://arxiv.org/abs/1306.2144 for more details on INS. Please read the MultiNest README file before using the INS in MultiNest v3.0. @param n_params: Total no. of parameters, should be equal to ndims in most cases but if you need to store some additional parameters with the actual parameters then you need to pass them through the likelihood routine. @param sampling_efficiency: defines the sampling efficiency. 0.8 and 0.3 are recommended for parameter estimation & evidence evalutation respectively. use 'parameter' or 'model' to select the respective default values @param mode_tolerance: MultiNest can find multiple modes & also specify which samples belong to which mode. It might be desirable to have separate samples & mode statistics for modes with local log-evidence value greater than a particular value in which case Ztol should be set to that value. If there isn't any particularly interesting Ztol value, then Ztol should be set to a very large negative number (e.g. -1e90). @param evidence_tolerance: A value of 0.5 should give good enough accuracy. @param n_clustering_params: If mmodal is T, MultiNest will attempt to separate out the modes. Mode separation is done through a clustering algorithm. Mode separation can be done on all the parameters (in which case nCdims should be set to ndims) & it can also be done on a subset of parameters (in which case nCdims < ndims) which might be advantageous as clustering is less accurate as the dimensionality increases. If nCdims < ndims then mode separation is done on the first nCdims parameters. @param null_log_evidence: If mmodal is T, MultiNest can find multiple modes & also specify which samples belong to which mode. It might be desirable to have separate samples & mode statistics for modes with local log-evidence value greater than a particular value in which case nullZ should be set to that value. If there isn't any particulrly interesting nullZ value, then nullZ should be set to a very large negative number (e.g. -1.d90). @param init_MPI: initialize MPI routines?, relevant only if compiling with MPI @param log_zero: points with loglike < logZero will be ignored by MultiNest @param max_iter: maximum number of iterations. 0 is unlimited. @param write_output: write output files? This is required for analysis. @param dump_callback: a callback function for dumping the current status
https://github.com/vallis/libstempo/blob/0b19300a9b24d64c9ddc25cd6ddbfd12b6231990/libstempo/multinest.py#L22-L208
gagneurlab/concise
concise/legacy/kmer.py
best_kmers
def best_kmers(dt, response, sequence, k=6, consider_shift=True, n_cores=1, seq_align="start", trim_seq_len=None): """ Find best k-mers for CONCISE initialization. Args: dt (pd.DataFrame): Table containing response variable and sequence. response (str): Name of the column used as the reponse variable. sequence (str): Name of the column storing the DNA/RNA sequences. k (int): Desired k-mer length. n_cores (int): Number of cores to use for computation. It can use up to 3 cores. consider_shift (boolean): When performing stepwise k-mer selection. Is TATTTA similar to ATTTAG? seq_align (str): one of ``{"start", "end"}``. To which end should we align sequences? trim_seq_len (int): Consider only first `trim_seq_len` bases of each sequence when generating the sequence design matrix. If :python:`None`, set :py:attr:`trim_seq_len` to the longest sequence length, hence whole sequences are considered. Returns: string list: Best set of motifs for this dataset sorted with respect to confidence (best candidate occuring first). Details: First a lasso model gets fitted to get a set of initial motifs. Next, the best subset of unrelated motifs is selected by stepwise selection. """ y = dt[response] seq = dt[sequence] if trim_seq_len is not None: seq = pad_sequences(seq, align=seq_align, maxlen=trim_seq_len) seq = [s.replace("N", "") for s in seq] dt_kmer = kmer_count(seq, k) Xsp = csc_matrix(dt_kmer) en = ElasticNet(alpha=1, standardize=False, n_splits=3) en.fit(Xsp, y) # which coefficients are nonzero?= nonzero_kmers = dt_kmer.columns.values[en.coef_ != 0].tolist() # perform stepwise selection # # TODO - how do we deal with the intercept? # largest number of motifs where they don't differ by more than 1 k-mer def find_next_best(dt_kmer, y, selected_kmers, to_be_selected_kmers, consider_shift=True): """ perform stepwise model selection while preventing to add a motif similar to the already selected motifs. """ F, pval = f_regression(dt_kmer[to_be_selected_kmers], y) kmer = to_be_selected_kmers.pop(pval.argmin()) selected_kmers.append(kmer) def select_criterion(s1, s2, consider_shift=True): if hamming_distance(s1, s2) <= 1: return False if consider_shift and hamming_distance(s1[1:], s2[:-1]) == 0: return False if consider_shift and hamming_distance(s1[:-1], s2[1:]) == 0: return False return True to_be_selected_kmers = [ckmer for ckmer in to_be_selected_kmers if select_criterion(ckmer, kmer, consider_shift)] if len(to_be_selected_kmers) == 0: return selected_kmers else: # regress out the new feature lm = LinearRegression() lm.fit(dt_kmer[selected_kmers], y) y_new = y - lm.predict(dt_kmer[selected_kmers]) return find_next_best(dt_kmer, y_new, selected_kmers, to_be_selected_kmers, consider_shift) selected_kmers = find_next_best(dt_kmer, y, [], nonzero_kmers, consider_shift) return selected_kmers
python
def best_kmers(dt, response, sequence, k=6, consider_shift=True, n_cores=1, seq_align="start", trim_seq_len=None): """ Find best k-mers for CONCISE initialization. Args: dt (pd.DataFrame): Table containing response variable and sequence. response (str): Name of the column used as the reponse variable. sequence (str): Name of the column storing the DNA/RNA sequences. k (int): Desired k-mer length. n_cores (int): Number of cores to use for computation. It can use up to 3 cores. consider_shift (boolean): When performing stepwise k-mer selection. Is TATTTA similar to ATTTAG? seq_align (str): one of ``{"start", "end"}``. To which end should we align sequences? trim_seq_len (int): Consider only first `trim_seq_len` bases of each sequence when generating the sequence design matrix. If :python:`None`, set :py:attr:`trim_seq_len` to the longest sequence length, hence whole sequences are considered. Returns: string list: Best set of motifs for this dataset sorted with respect to confidence (best candidate occuring first). Details: First a lasso model gets fitted to get a set of initial motifs. Next, the best subset of unrelated motifs is selected by stepwise selection. """ y = dt[response] seq = dt[sequence] if trim_seq_len is not None: seq = pad_sequences(seq, align=seq_align, maxlen=trim_seq_len) seq = [s.replace("N", "") for s in seq] dt_kmer = kmer_count(seq, k) Xsp = csc_matrix(dt_kmer) en = ElasticNet(alpha=1, standardize=False, n_splits=3) en.fit(Xsp, y) # which coefficients are nonzero?= nonzero_kmers = dt_kmer.columns.values[en.coef_ != 0].tolist() # perform stepwise selection # # TODO - how do we deal with the intercept? # largest number of motifs where they don't differ by more than 1 k-mer def find_next_best(dt_kmer, y, selected_kmers, to_be_selected_kmers, consider_shift=True): """ perform stepwise model selection while preventing to add a motif similar to the already selected motifs. """ F, pval = f_regression(dt_kmer[to_be_selected_kmers], y) kmer = to_be_selected_kmers.pop(pval.argmin()) selected_kmers.append(kmer) def select_criterion(s1, s2, consider_shift=True): if hamming_distance(s1, s2) <= 1: return False if consider_shift and hamming_distance(s1[1:], s2[:-1]) == 0: return False if consider_shift and hamming_distance(s1[:-1], s2[1:]) == 0: return False return True to_be_selected_kmers = [ckmer for ckmer in to_be_selected_kmers if select_criterion(ckmer, kmer, consider_shift)] if len(to_be_selected_kmers) == 0: return selected_kmers else: # regress out the new feature lm = LinearRegression() lm.fit(dt_kmer[selected_kmers], y) y_new = y - lm.predict(dt_kmer[selected_kmers]) return find_next_best(dt_kmer, y_new, selected_kmers, to_be_selected_kmers, consider_shift) selected_kmers = find_next_best(dt_kmer, y, [], nonzero_kmers, consider_shift) return selected_kmers
Find best k-mers for CONCISE initialization. Args: dt (pd.DataFrame): Table containing response variable and sequence. response (str): Name of the column used as the reponse variable. sequence (str): Name of the column storing the DNA/RNA sequences. k (int): Desired k-mer length. n_cores (int): Number of cores to use for computation. It can use up to 3 cores. consider_shift (boolean): When performing stepwise k-mer selection. Is TATTTA similar to ATTTAG? seq_align (str): one of ``{"start", "end"}``. To which end should we align sequences? trim_seq_len (int): Consider only first `trim_seq_len` bases of each sequence when generating the sequence design matrix. If :python:`None`, set :py:attr:`trim_seq_len` to the longest sequence length, hence whole sequences are considered. Returns: string list: Best set of motifs for this dataset sorted with respect to confidence (best candidate occuring first). Details: First a lasso model gets fitted to get a set of initial motifs. Next, the best subset of unrelated motifs is selected by stepwise selection.
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/legacy/kmer.py#L28-L102
gagneurlab/concise
concise/legacy/kmer.py
kmer_count
def kmer_count(seq_list, k): """ Generate k-mer counts from a set of sequences Args: seq_list (iterable): List of DNA sequences (with letters from {A, C, G, T}) k (int): K in k-mer. Returns: pandas.DataFrame: Count matrix for seach sequence in seq_list Example: >>> kmer_count(["ACGTTAT", "GACGCGA"], 2) AA AC AG AT CA CC CG CT GA GC GG GT TA TC TG TT 0 0 1 0 1 0 0 1 0 0 0 0 1 1 0 0 1 1 0 1 0 0 0 0 2 0 2 1 0 0 0 0 0 0 """ # generate all k-mers all_kmers = generate_all_kmers(k) kmer_count_list = [] for seq in seq_list: kmer_count_list.append([seq.count(kmer) for kmer in all_kmers]) return pd.DataFrame(kmer_count_list, columns=all_kmers)
python
def kmer_count(seq_list, k): """ Generate k-mer counts from a set of sequences Args: seq_list (iterable): List of DNA sequences (with letters from {A, C, G, T}) k (int): K in k-mer. Returns: pandas.DataFrame: Count matrix for seach sequence in seq_list Example: >>> kmer_count(["ACGTTAT", "GACGCGA"], 2) AA AC AG AT CA CC CG CT GA GC GG GT TA TC TG TT 0 0 1 0 1 0 0 1 0 0 0 0 1 1 0 0 1 1 0 1 0 0 0 0 2 0 2 1 0 0 0 0 0 0 """ # generate all k-mers all_kmers = generate_all_kmers(k) kmer_count_list = [] for seq in seq_list: kmer_count_list.append([seq.count(kmer) for kmer in all_kmers]) return pd.DataFrame(kmer_count_list, columns=all_kmers)
Generate k-mer counts from a set of sequences Args: seq_list (iterable): List of DNA sequences (with letters from {A, C, G, T}) k (int): K in k-mer. Returns: pandas.DataFrame: Count matrix for seach sequence in seq_list Example: >>> kmer_count(["ACGTTAT", "GACGCGA"], 2) AA AC AG AT CA CC CG CT GA GC GG GT TA TC TG TT 0 0 1 0 1 0 0 1 0 0 0 0 1 1 0 0 1 1 0 1 0 0 0 0 2 0 2 1 0 0 0 0 0 0
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/legacy/kmer.py#L107-L128
gagneurlab/concise
concise/legacy/kmer.py
generate_all_kmers
def generate_all_kmers(k): """ Generate all possible k-mers Example: >>> generate_all_kmers(2) ['AA', 'AC', 'AG', 'AT', 'CA', 'CC', 'CG', 'CT', 'GA', 'GC', 'GG', 'GT', 'TA', 'TC', 'TG', 'TT'] """ bases = ['A', 'C', 'G', 'T'] return [''.join(p) for p in itertools.product(bases, repeat=k)]
python
def generate_all_kmers(k): """ Generate all possible k-mers Example: >>> generate_all_kmers(2) ['AA', 'AC', 'AG', 'AT', 'CA', 'CC', 'CG', 'CT', 'GA', 'GC', 'GG', 'GT', 'TA', 'TC', 'TG', 'TT'] """ bases = ['A', 'C', 'G', 'T'] return [''.join(p) for p in itertools.product(bases, repeat=k)]
Generate all possible k-mers Example: >>> generate_all_kmers(2) ['AA', 'AC', 'AG', 'AT', 'CA', 'CC', 'CG', 'CT', 'GA', 'GC', 'GG', 'GT', 'TA', 'TC', 'TG', 'TT']
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/legacy/kmer.py#L130-L139
gagneurlab/concise
concise/utils/helper.py
dict_to_numpy_dict
def dict_to_numpy_dict(obj_dict): """ Convert a dictionary of lists into a dictionary of numpy arrays """ return {key: np.asarray(value) if value is not None else None for key, value in obj_dict.items()}
python
def dict_to_numpy_dict(obj_dict): """ Convert a dictionary of lists into a dictionary of numpy arrays """ return {key: np.asarray(value) if value is not None else None for key, value in obj_dict.items()}
Convert a dictionary of lists into a dictionary of numpy arrays
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/utils/helper.py#L65-L69
gagneurlab/concise
concise/utils/helper.py
rec_dict_to_numpy_dict
def rec_dict_to_numpy_dict(obj_dict): """ Same as dict_to_numpy_dict, but recursive """ if type(obj_dict) == dict: return {key: rec_dict_to_numpy_dict(value) if value is not None else None for key, value in obj_dict.items()} elif obj_dict is None: return None else: return np.asarray(obj_dict)
python
def rec_dict_to_numpy_dict(obj_dict): """ Same as dict_to_numpy_dict, but recursive """ if type(obj_dict) == dict: return {key: rec_dict_to_numpy_dict(value) if value is not None else None for key, value in obj_dict.items()} elif obj_dict is None: return None else: return np.asarray(obj_dict)
Same as dict_to_numpy_dict, but recursive
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/utils/helper.py#L72-L81
gagneurlab/concise
concise/utils/helper.py
compare_numpy_dict
def compare_numpy_dict(a, b, exact=True): """ Compare two recursive numpy dictionaries """ if type(a) != type(b) and type(a) != np.ndarray and type(b) != np.ndarray: return False # go through a dictionary if type(a) == dict and type(b) == dict: if not a.keys() == b.keys(): return False for key in a.keys(): res = compare_numpy_dict(a[key], b[key], exact) if res == False: print("false for key = ", key) return False return True # if type(a) == np.ndarray and type(b) == np.ndarray: if type(a) == np.ndarray or type(b) == np.ndarray: if exact: return (a == b).all() else: return np.testing.assert_almost_equal(a, b) if a is None and b is None: return True raise NotImplementedError
python
def compare_numpy_dict(a, b, exact=True): """ Compare two recursive numpy dictionaries """ if type(a) != type(b) and type(a) != np.ndarray and type(b) != np.ndarray: return False # go through a dictionary if type(a) == dict and type(b) == dict: if not a.keys() == b.keys(): return False for key in a.keys(): res = compare_numpy_dict(a[key], b[key], exact) if res == False: print("false for key = ", key) return False return True # if type(a) == np.ndarray and type(b) == np.ndarray: if type(a) == np.ndarray or type(b) == np.ndarray: if exact: return (a == b).all() else: return np.testing.assert_almost_equal(a, b) if a is None and b is None: return True raise NotImplementedError
Compare two recursive numpy dictionaries
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/utils/helper.py#L84-L111
gagneurlab/concise
concise/utils/splines.py
get_gam_splines
def get_gam_splines(start=0, end=100, n_bases=10, spline_order=3, add_intercept=True): """Main function required by (TF)Concise class """ # make sure n_bases is an int assert type(n_bases) == int x = np.arange(start, end + 1) knots = get_knots(start, end, n_bases, spline_order) X_splines = get_X_spline(x, knots, n_bases, spline_order, add_intercept) S = get_S(n_bases, spline_order, add_intercept) # Get the same knot positions as with mgcv # https://github.com/cran/mgcv/blob/master/R/smooth.r#L1560 return X_splines, S, knots
python
def get_gam_splines(start=0, end=100, n_bases=10, spline_order=3, add_intercept=True): """Main function required by (TF)Concise class """ # make sure n_bases is an int assert type(n_bases) == int x = np.arange(start, end + 1) knots = get_knots(start, end, n_bases, spline_order) X_splines = get_X_spline(x, knots, n_bases, spline_order, add_intercept) S = get_S(n_bases, spline_order, add_intercept) # Get the same knot positions as with mgcv # https://github.com/cran/mgcv/blob/master/R/smooth.r#L1560 return X_splines, S, knots
Main function required by (TF)Concise class
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/utils/splines.py#L102-L116
gagneurlab/concise
concise/utils/splines.py
get_knots
def get_knots(start, end, n_bases=10, spline_order=3): """ Arguments: x; np.array of dim 1 """ x_range = end - start start = start - x_range * 0.001 end = end + x_range * 0.001 # mgcv annotation m = spline_order - 1 nk = n_bases - m # number of interior knots dknots = (end - start) / (nk - 1) knots = np.linspace(start=start - dknots * (m + 1), stop=end + dknots * (m + 1), num=nk + 2 * m + 2) return knots.astype(np.float32)
python
def get_knots(start, end, n_bases=10, spline_order=3): """ Arguments: x; np.array of dim 1 """ x_range = end - start start = start - x_range * 0.001 end = end + x_range * 0.001 # mgcv annotation m = spline_order - 1 nk = n_bases - m # number of interior knots dknots = (end - start) / (nk - 1) knots = np.linspace(start=start - dknots * (m + 1), stop=end + dknots * (m + 1), num=nk + 2 * m + 2) return knots.astype(np.float32)
Arguments: x; np.array of dim 1
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/utils/splines.py#L123-L140
gagneurlab/concise
concise/utils/splines.py
get_X_spline
def get_X_spline(x, knots, n_bases=10, spline_order=3, add_intercept=True): """ Returns: np.array of shape [len(x), n_bases + (add_intercept)] # BSpline formula https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.BSpline.html#scipy.interpolate.BSpline Fortran code: https://github.com/scipy/scipy/blob/v0.19.0/scipy/interpolate/fitpack/splev.f """ if len(x.shape) is not 1: raise ValueError("x has to be 1 dimentional") tck = [knots, np.zeros(n_bases), spline_order] X = np.zeros([len(x), n_bases]) for i in range(n_bases): vec = np.zeros(n_bases) vec[i] = 1.0 tck[1] = vec X[:, i] = si.splev(x, tck, der=0) if add_intercept is True: ones = np.ones_like(X[:, :1]) X = np.hstack([ones, X]) return X.astype(np.float32)
python
def get_X_spline(x, knots, n_bases=10, spline_order=3, add_intercept=True): """ Returns: np.array of shape [len(x), n_bases + (add_intercept)] # BSpline formula https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.BSpline.html#scipy.interpolate.BSpline Fortran code: https://github.com/scipy/scipy/blob/v0.19.0/scipy/interpolate/fitpack/splev.f """ if len(x.shape) is not 1: raise ValueError("x has to be 1 dimentional") tck = [knots, np.zeros(n_bases), spline_order] X = np.zeros([len(x), n_bases]) for i in range(n_bases): vec = np.zeros(n_bases) vec[i] = 1.0 tck[1] = vec X[:, i] = si.splev(x, tck, der=0) if add_intercept is True: ones = np.ones_like(X[:, :1]) X = np.hstack([ones, X]) return X.astype(np.float32)
Returns: np.array of shape [len(x), n_bases + (add_intercept)] # BSpline formula https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.BSpline.html#scipy.interpolate.BSpline Fortran code: https://github.com/scipy/scipy/blob/v0.19.0/scipy/interpolate/fitpack/splev.f
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/utils/splines.py#L144-L173
gagneurlab/concise
concise/utils/splines.py
BSpline.getS
def getS(self, add_intercept=False): """Get the penalty matrix S Returns np.array, of shape (n_bases + add_intercept, n_bases + add_intercept) """ S = self.S if add_intercept is True: # S <- cbind(0, rbind(0, S)) # in R zeros = np.zeros_like(S[:1, :]) S = np.vstack([zeros, S]) zeros = np.zeros_like(S[:, :1]) S = np.hstack([zeros, S]) return S
python
def getS(self, add_intercept=False): """Get the penalty matrix S Returns np.array, of shape (n_bases + add_intercept, n_bases + add_intercept) """ S = self.S if add_intercept is True: # S <- cbind(0, rbind(0, S)) # in R zeros = np.zeros_like(S[:1, :]) S = np.vstack([zeros, S]) zeros = np.zeros_like(S[:, :1]) S = np.hstack([zeros, S]) return S
Get the penalty matrix S Returns np.array, of shape (n_bases + add_intercept, n_bases + add_intercept)
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/utils/splines.py#L49-L63
gagneurlab/concise
concise/utils/splines.py
BSpline.predict
def predict(self, x, add_intercept=False): """For some x, predict the bn(x) for each base Arguments: x: np.array; Vector of dimension 1 add_intercept: bool; should we add the intercept to the final array Returns: np.array, of shape (len(x), n_bases + (add_intercept)) """ # sanity check if x.min() < self.start: raise Warning("x.min() < self.start") if x.max() > self.end: raise Warning("x.max() > self.end") return get_X_spline(x=x, knots=self.knots, n_bases=self.n_bases, spline_order=self.spline_order, add_intercept=add_intercept)
python
def predict(self, x, add_intercept=False): """For some x, predict the bn(x) for each base Arguments: x: np.array; Vector of dimension 1 add_intercept: bool; should we add the intercept to the final array Returns: np.array, of shape (len(x), n_bases + (add_intercept)) """ # sanity check if x.min() < self.start: raise Warning("x.min() < self.start") if x.max() > self.end: raise Warning("x.max() > self.end") return get_X_spline(x=x, knots=self.knots, n_bases=self.n_bases, spline_order=self.spline_order, add_intercept=add_intercept)
For some x, predict the bn(x) for each base Arguments: x: np.array; Vector of dimension 1 add_intercept: bool; should we add the intercept to the final array Returns: np.array, of shape (len(x), n_bases + (add_intercept))
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/utils/splines.py#L65-L85
gagneurlab/concise
concise/data/encode.py
get_metadata
def get_metadata(): """Get pandas.DataFrame with metadata about the PWM's. Columns: - PWM_id (id of the PWM - pass to get_pwm_list() for getting the pwm - info1 - additional information about the motifs - info2 - consensus: PWM consensus sequence """ motifs = _load_motifs() motif_names = sorted(list(motifs.keys())) df = pd.Series(motif_names).str.split(expand=True) df.rename(columns={0: "PWM_id", 1: "info1", 2: "info2"}, inplace=True) # compute the consensus consensus = pd.Series([PWM(motifs[m]).get_consensus() for m in motif_names]) df["consensus"] = consensus return df
python
def get_metadata(): """Get pandas.DataFrame with metadata about the PWM's. Columns: - PWM_id (id of the PWM - pass to get_pwm_list() for getting the pwm - info1 - additional information about the motifs - info2 - consensus: PWM consensus sequence """ motifs = _load_motifs() motif_names = sorted(list(motifs.keys())) df = pd.Series(motif_names).str.split(expand=True) df.rename(columns={0: "PWM_id", 1: "info1", 2: "info2"}, inplace=True) # compute the consensus consensus = pd.Series([PWM(motifs[m]).get_consensus() for m in motif_names]) df["consensus"] = consensus return df
Get pandas.DataFrame with metadata about the PWM's. Columns: - PWM_id (id of the PWM - pass to get_pwm_list() for getting the pwm - info1 - additional information about the motifs - info2 - consensus: PWM consensus sequence
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/data/encode.py#L14-L31
gagneurlab/concise
concise/data/encode.py
get_pwm_list
def get_pwm_list(motif_name_list, pseudocountProb=0.0001): """Get a list of ENCODE PWM's. # Arguments pwm_id_list: List of id's from the `PWM_id` column in `get_metadata()` table pseudocountProb: Added pseudocount probabilities to the PWM # Returns List of `concise.utils.pwm.PWM` instances. """ l = _load_motifs() l = {k.split()[0]: v for k, v in l.items()} pwm_list = [PWM(l[m] + pseudocountProb, name=m) for m in motif_name_list] return pwm_list
python
def get_pwm_list(motif_name_list, pseudocountProb=0.0001): """Get a list of ENCODE PWM's. # Arguments pwm_id_list: List of id's from the `PWM_id` column in `get_metadata()` table pseudocountProb: Added pseudocount probabilities to the PWM # Returns List of `concise.utils.pwm.PWM` instances. """ l = _load_motifs() l = {k.split()[0]: v for k, v in l.items()} pwm_list = [PWM(l[m] + pseudocountProb, name=m) for m in motif_name_list] return pwm_list
Get a list of ENCODE PWM's. # Arguments pwm_id_list: List of id's from the `PWM_id` column in `get_metadata()` table pseudocountProb: Added pseudocount probabilities to the PWM # Returns List of `concise.utils.pwm.PWM` instances.
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/data/encode.py#L34-L47
gagneurlab/concise
concise/eval_metrics.py
auc
def auc(y_true, y_pred, round=True): """Area under the ROC curve """ y_true, y_pred = _mask_value_nan(y_true, y_pred) if round: y_true = y_true.round() if len(y_true) == 0 or len(np.unique(y_true)) < 2: return np.nan return skm.roc_auc_score(y_true, y_pred)
python
def auc(y_true, y_pred, round=True): """Area under the ROC curve """ y_true, y_pred = _mask_value_nan(y_true, y_pred) if round: y_true = y_true.round() if len(y_true) == 0 or len(np.unique(y_true)) < 2: return np.nan return skm.roc_auc_score(y_true, y_pred)
Area under the ROC curve
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/eval_metrics.py#L37-L46
gagneurlab/concise
concise/eval_metrics.py
auprc
def auprc(y_true, y_pred): """Area under the precision-recall curve """ y_true, y_pred = _mask_value_nan(y_true, y_pred) precision, recall, _ = skm.precision_recall_curve(y_true, y_pred) return skm.auc(recall, precision)
python
def auprc(y_true, y_pred): """Area under the precision-recall curve """ y_true, y_pred = _mask_value_nan(y_true, y_pred) precision, recall, _ = skm.precision_recall_curve(y_true, y_pred) return skm.auc(recall, precision)
Area under the precision-recall curve
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/eval_metrics.py#L49-L54
gagneurlab/concise
concise/eval_metrics.py
recall_at_precision
def recall_at_precision(y_true, y_pred, precision): """Recall at a certain precision threshold Args: y_true: true labels y_pred: predicted labels precision: resired precision level at which where to compute the recall """ y_true, y_pred = _mask_value_nan(y_true, y_pred) precision, recall, _ = skm.precision_recall_curve(y_true, y_pred) return recall[np.searchsorted(precision - precision, 0)]
python
def recall_at_precision(y_true, y_pred, precision): """Recall at a certain precision threshold Args: y_true: true labels y_pred: predicted labels precision: resired precision level at which where to compute the recall """ y_true, y_pred = _mask_value_nan(y_true, y_pred) precision, recall, _ = skm.precision_recall_curve(y_true, y_pred) return recall[np.searchsorted(precision - precision, 0)]
Recall at a certain precision threshold Args: y_true: true labels y_pred: predicted labels precision: resired precision level at which where to compute the recall
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/eval_metrics.py#L57-L67
gagneurlab/concise
concise/eval_metrics.py
accuracy
def accuracy(y_true, y_pred, round=True): """Classification accuracy """ y_true, y_pred = _mask_value_nan(y_true, y_pred) if round: y_true = np.round(y_true) y_pred = np.round(y_pred) return skm.accuracy_score(y_true, y_pred)
python
def accuracy(y_true, y_pred, round=True): """Classification accuracy """ y_true, y_pred = _mask_value_nan(y_true, y_pred) if round: y_true = np.round(y_true) y_pred = np.round(y_pred) return skm.accuracy_score(y_true, y_pred)
Classification accuracy
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/eval_metrics.py#L70-L77
gagneurlab/concise
concise/eval_metrics.py
tpr
def tpr(y_true, y_pred, round=True): """True positive rate `tp / (tp + fn)` """ y_true, y_pred = _mask_value_nan(y_true, y_pred) if round: y_true = np.round(y_true) y_pred = np.round(y_pred) return skm.recall_score(y_true, y_pred)
python
def tpr(y_true, y_pred, round=True): """True positive rate `tp / (tp + fn)` """ y_true, y_pred = _mask_value_nan(y_true, y_pred) if round: y_true = np.round(y_true) y_pred = np.round(y_pred) return skm.recall_score(y_true, y_pred)
True positive rate `tp / (tp + fn)`
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/eval_metrics.py#L80-L87
gagneurlab/concise
concise/eval_metrics.py
tnr
def tnr(y_true, y_pred, round=True): """True negative rate `tn / (tn + fp)` """ y_true, y_pred = _mask_value_nan(y_true, y_pred) if round: y_true = np.round(y_true) y_pred = np.round(y_pred) c = skm.confusion_matrix(y_true, y_pred) return c[0, 0] / c[0].sum()
python
def tnr(y_true, y_pred, round=True): """True negative rate `tn / (tn + fp)` """ y_true, y_pred = _mask_value_nan(y_true, y_pred) if round: y_true = np.round(y_true) y_pred = np.round(y_pred) c = skm.confusion_matrix(y_true, y_pred) return c[0, 0] / c[0].sum()
True negative rate `tn / (tn + fp)`
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/eval_metrics.py#L90-L98
gagneurlab/concise
concise/eval_metrics.py
mcc
def mcc(y_true, y_pred, round=True): """Matthews correlation coefficient """ y_true, y_pred = _mask_value_nan(y_true, y_pred) if round: y_true = np.round(y_true) y_pred = np.round(y_pred) return skm.matthews_corrcoef(y_true, y_pred)
python
def mcc(y_true, y_pred, round=True): """Matthews correlation coefficient """ y_true, y_pred = _mask_value_nan(y_true, y_pred) if round: y_true = np.round(y_true) y_pred = np.round(y_pred) return skm.matthews_corrcoef(y_true, y_pred)
Matthews correlation coefficient
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/eval_metrics.py#L101-L108
gagneurlab/concise
concise/eval_metrics.py
f1
def f1(y_true, y_pred, round=True): """F1 score: `2 * (p * r) / (p + r)`, where p=precision and r=recall. """ y_true, y_pred = _mask_value_nan(y_true, y_pred) if round: y_true = np.round(y_true) y_pred = np.round(y_pred) return skm.f1_score(y_true, y_pred)
python
def f1(y_true, y_pred, round=True): """F1 score: `2 * (p * r) / (p + r)`, where p=precision and r=recall. """ y_true, y_pred = _mask_value_nan(y_true, y_pred) if round: y_true = np.round(y_true) y_pred = np.round(y_pred) return skm.f1_score(y_true, y_pred)
F1 score: `2 * (p * r) / (p + r)`, where p=precision and r=recall.
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/eval_metrics.py#L111-L118
gagneurlab/concise
concise/eval_metrics.py
cat_acc
def cat_acc(y_true, y_pred): """Categorical accuracy """ return np.mean(y_true.argmax(axis=1) == y_pred.argmax(axis=1))
python
def cat_acc(y_true, y_pred): """Categorical accuracy """ return np.mean(y_true.argmax(axis=1) == y_pred.argmax(axis=1))
Categorical accuracy
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/eval_metrics.py#L123-L126
gagneurlab/concise
concise/eval_metrics.py
cor
def cor(y_true, y_pred): """Compute Pearson correlation coefficient. """ y_true, y_pred = _mask_nan(y_true, y_pred) return np.corrcoef(y_true, y_pred)[0, 1]
python
def cor(y_true, y_pred): """Compute Pearson correlation coefficient. """ y_true, y_pred = _mask_nan(y_true, y_pred) return np.corrcoef(y_true, y_pred)[0, 1]
Compute Pearson correlation coefficient.
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/eval_metrics.py#L131-L135
gagneurlab/concise
concise/eval_metrics.py
kendall
def kendall(y_true, y_pred, nb_sample=100000): """Kendall's tau coefficient, Kendall rank correlation coefficient """ y_true, y_pred = _mask_nan(y_true, y_pred) if len(y_true) > nb_sample: idx = np.arange(len(y_true)) np.random.shuffle(idx) idx = idx[:nb_sample] y_true = y_true[idx] y_pred = y_pred[idx] return kendalltau(y_true, y_pred)[0]
python
def kendall(y_true, y_pred, nb_sample=100000): """Kendall's tau coefficient, Kendall rank correlation coefficient """ y_true, y_pred = _mask_nan(y_true, y_pred) if len(y_true) > nb_sample: idx = np.arange(len(y_true)) np.random.shuffle(idx) idx = idx[:nb_sample] y_true = y_true[idx] y_pred = y_pred[idx] return kendalltau(y_true, y_pred)[0]
Kendall's tau coefficient, Kendall rank correlation coefficient
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/eval_metrics.py#L138-L148
gagneurlab/concise
concise/eval_metrics.py
mad
def mad(y_true, y_pred): """Median absolute deviation """ y_true, y_pred = _mask_nan(y_true, y_pred) return np.mean(np.abs(y_true - y_pred))
python
def mad(y_true, y_pred): """Median absolute deviation """ y_true, y_pred = _mask_nan(y_true, y_pred) return np.mean(np.abs(y_true - y_pred))
Median absolute deviation
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/eval_metrics.py#L151-L155
gagneurlab/concise
concise/eval_metrics.py
mse
def mse(y_true, y_pred): """Mean squared error """ y_true, y_pred = _mask_nan(y_true, y_pred) return ((y_true - y_pred) ** 2).mean(axis=None)
python
def mse(y_true, y_pred): """Mean squared error """ y_true, y_pred = _mask_nan(y_true, y_pred) return ((y_true - y_pred) ** 2).mean(axis=None)
Mean squared error
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/eval_metrics.py#L170-L174
gagneurlab/concise
concise/eval_metrics.py
var_explained
def var_explained(y_true, y_pred): """Fraction of variance explained. """ y_true, y_pred = _mask_nan(y_true, y_pred) var_resid = np.var(y_true - y_pred) var_y_true = np.var(y_true) return 1 - var_resid / var_y_true
python
def var_explained(y_true, y_pred): """Fraction of variance explained. """ y_true, y_pred = _mask_nan(y_true, y_pred) var_resid = np.var(y_true - y_pred) var_y_true = np.var(y_true) return 1 - var_resid / var_y_true
Fraction of variance explained.
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/eval_metrics.py#L183-L189
gagneurlab/concise
concise/legacy/args_sampler.py
sample_params
def sample_params(params): """Randomly sample hyper-parameters stored in a dictionary on a predefined range and scale. Useful for hyper-parameter random search. Args: params (dict): hyper-parameters to sample. Dictionary value-type parsing: - :python:`[1e3, 1e7]` - uniformly sample on a **log10** scale from the interval :python:`(1e3,1e7)` - :python:`(1, 10)` - uniformly sample on a **normal** scale from the interval :python:`(1,10)` - :python:`{1, 2}` - sample from a **set** of values. - :python:`1` - don't sample Returns: dict: Dictionary with the same keys as :py:attr:`params`, but with only one element as the value. Examples: >>> myparams = { "max_pool": True, # allways use True "step_size": [0.09, 0.005], "step_decay": (0.9, 1), "n_splines": {10, None}, # use either 10 or None "some_tuple": {(1,2), (1)}, } >>> concise.sample_params(myparams) {'step_decay': 0.9288, 'step_size': 0.0292, 'max_pool': True, 'n_splines': None, 'some_tuple': (1, 2)} >>> concise.sample_params(myparams) {'step_decay': 0.9243, 'step_size': 0.0293, 'max_pool': True, 'n_splines': None, 'some_tuple': (1)} >>> concise.sample_params(myparams) {'step_decay': 0.9460, 'step_size': 0.0301, 'max_pool': True, 'n_splines': 10, 'some_tuple': (1, 2)} Note: - :python:`{[1,2], [3,4]}` is invalid. Use :python:`{(1,2), (3,4)}` instead. - You can allways use :python:`{}` with a single element to by-pass sampling. """ def sample_log(myrange): x = np.random.uniform(np.log10(myrange[0]), np.log10(myrange[1])) return 10**x def sample_unif(myrange): x = np.random.uniform(myrange[0], myrange[1]) return x def sample_set(myset): x = random.sample(myset, 1) return x[0] def type_dep_sample(myrange): if type(myrange) is list: return sample_log(myrange) if type(myrange) is tuple: return sample_unif(myrange) if type(myrange) is set: return sample_set(myrange) return myrange return {k: type_dep_sample(v) for k, v in params.items()}
python
def sample_params(params): """Randomly sample hyper-parameters stored in a dictionary on a predefined range and scale. Useful for hyper-parameter random search. Args: params (dict): hyper-parameters to sample. Dictionary value-type parsing: - :python:`[1e3, 1e7]` - uniformly sample on a **log10** scale from the interval :python:`(1e3,1e7)` - :python:`(1, 10)` - uniformly sample on a **normal** scale from the interval :python:`(1,10)` - :python:`{1, 2}` - sample from a **set** of values. - :python:`1` - don't sample Returns: dict: Dictionary with the same keys as :py:attr:`params`, but with only one element as the value. Examples: >>> myparams = { "max_pool": True, # allways use True "step_size": [0.09, 0.005], "step_decay": (0.9, 1), "n_splines": {10, None}, # use either 10 or None "some_tuple": {(1,2), (1)}, } >>> concise.sample_params(myparams) {'step_decay': 0.9288, 'step_size': 0.0292, 'max_pool': True, 'n_splines': None, 'some_tuple': (1, 2)} >>> concise.sample_params(myparams) {'step_decay': 0.9243, 'step_size': 0.0293, 'max_pool': True, 'n_splines': None, 'some_tuple': (1)} >>> concise.sample_params(myparams) {'step_decay': 0.9460, 'step_size': 0.0301, 'max_pool': True, 'n_splines': 10, 'some_tuple': (1, 2)} Note: - :python:`{[1,2], [3,4]}` is invalid. Use :python:`{(1,2), (3,4)}` instead. - You can allways use :python:`{}` with a single element to by-pass sampling. """ def sample_log(myrange): x = np.random.uniform(np.log10(myrange[0]), np.log10(myrange[1])) return 10**x def sample_unif(myrange): x = np.random.uniform(myrange[0], myrange[1]) return x def sample_set(myset): x = random.sample(myset, 1) return x[0] def type_dep_sample(myrange): if type(myrange) is list: return sample_log(myrange) if type(myrange) is tuple: return sample_unif(myrange) if type(myrange) is set: return sample_set(myrange) return myrange return {k: type_dep_sample(v) for k, v in params.items()}
Randomly sample hyper-parameters stored in a dictionary on a predefined range and scale. Useful for hyper-parameter random search. Args: params (dict): hyper-parameters to sample. Dictionary value-type parsing: - :python:`[1e3, 1e7]` - uniformly sample on a **log10** scale from the interval :python:`(1e3,1e7)` - :python:`(1, 10)` - uniformly sample on a **normal** scale from the interval :python:`(1,10)` - :python:`{1, 2}` - sample from a **set** of values. - :python:`1` - don't sample Returns: dict: Dictionary with the same keys as :py:attr:`params`, but with only one element as the value. Examples: >>> myparams = { "max_pool": True, # allways use True "step_size": [0.09, 0.005], "step_decay": (0.9, 1), "n_splines": {10, None}, # use either 10 or None "some_tuple": {(1,2), (1)}, } >>> concise.sample_params(myparams) {'step_decay': 0.9288, 'step_size': 0.0292, 'max_pool': True, 'n_splines': None, 'some_tuple': (1, 2)} >>> concise.sample_params(myparams) {'step_decay': 0.9243, 'step_size': 0.0293, 'max_pool': True, 'n_splines': None, 'some_tuple': (1)} >>> concise.sample_params(myparams) {'step_decay': 0.9460, 'step_size': 0.0301, 'max_pool': True, 'n_splines': 10, 'some_tuple': (1, 2)} Note: - :python:`{[1,2], [3,4]}` is invalid. Use :python:`{(1,2), (3,4)}` instead. - You can allways use :python:`{}` with a single element to by-pass sampling.
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/legacy/args_sampler.py#L8-L68
gagneurlab/concise
concise/metrics.py
contingency_table
def contingency_table(y, z): """Note: if y and z are not rounded to 0 or 1, they are ignored """ y = K.cast(K.round(y), K.floatx()) z = K.cast(K.round(z), K.floatx()) def count_matches(y, z): return K.sum(K.cast(y, K.floatx()) * K.cast(z, K.floatx())) ones = K.ones_like(y) zeros = K.zeros_like(y) y_ones = K.equal(y, ones) y_zeros = K.equal(y, zeros) z_ones = K.equal(z, ones) z_zeros = K.equal(z, zeros) tp = count_matches(y_ones, z_ones) tn = count_matches(y_zeros, z_zeros) fp = count_matches(y_zeros, z_ones) fn = count_matches(y_ones, z_zeros) return (tp, tn, fp, fn)
python
def contingency_table(y, z): """Note: if y and z are not rounded to 0 or 1, they are ignored """ y = K.cast(K.round(y), K.floatx()) z = K.cast(K.round(z), K.floatx()) def count_matches(y, z): return K.sum(K.cast(y, K.floatx()) * K.cast(z, K.floatx())) ones = K.ones_like(y) zeros = K.zeros_like(y) y_ones = K.equal(y, ones) y_zeros = K.equal(y, zeros) z_ones = K.equal(z, ones) z_zeros = K.equal(z, zeros) tp = count_matches(y_ones, z_ones) tn = count_matches(y_zeros, z_zeros) fp = count_matches(y_zeros, z_ones) fn = count_matches(y_ones, z_zeros) return (tp, tn, fp, fn)
Note: if y and z are not rounded to 0 or 1, they are ignored
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/metrics.py#L17-L38
gagneurlab/concise
concise/metrics.py
tpr
def tpr(y, z): """True positive rate `tp / (tp + fn)` """ tp, tn, fp, fn = contingency_table(y, z) return tp / (tp + fn)
python
def tpr(y, z): """True positive rate `tp / (tp + fn)` """ tp, tn, fp, fn = contingency_table(y, z) return tp / (tp + fn)
True positive rate `tp / (tp + fn)`
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/metrics.py#L41-L45
gagneurlab/concise
concise/metrics.py
tnr
def tnr(y, z): """True negative rate `tn / (tn + fp)` """ tp, tn, fp, fn = contingency_table(y, z) return tn / (tn + fp)
python
def tnr(y, z): """True negative rate `tn / (tn + fp)` """ tp, tn, fp, fn = contingency_table(y, z) return tn / (tn + fp)
True negative rate `tn / (tn + fp)`
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/metrics.py#L48-L52
gagneurlab/concise
concise/metrics.py
fpr
def fpr(y, z): """False positive rate `fp / (fp + tn)` """ tp, tn, fp, fn = contingency_table(y, z) return fp / (fp + tn)
python
def fpr(y, z): """False positive rate `fp / (fp + tn)` """ tp, tn, fp, fn = contingency_table(y, z) return fp / (fp + tn)
False positive rate `fp / (fp + tn)`
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/metrics.py#L55-L59
gagneurlab/concise
concise/metrics.py
fnr
def fnr(y, z): """False negative rate `fn / (fn + tp)` """ tp, tn, fp, fn = contingency_table(y, z) return fn / (fn + tp)
python
def fnr(y, z): """False negative rate `fn / (fn + tp)` """ tp, tn, fp, fn = contingency_table(y, z) return fn / (fn + tp)
False negative rate `fn / (fn + tp)`
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/metrics.py#L62-L66
gagneurlab/concise
concise/metrics.py
precision
def precision(y, z): """Precision `tp / (tp + fp)` """ tp, tn, fp, fn = contingency_table(y, z) return tp / (tp + fp)
python
def precision(y, z): """Precision `tp / (tp + fp)` """ tp, tn, fp, fn = contingency_table(y, z) return tp / (tp + fp)
Precision `tp / (tp + fp)`
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/metrics.py#L69-L73
gagneurlab/concise
concise/metrics.py
fdr
def fdr(y, z): """False discovery rate `fp / (tp + fp)` """ tp, tn, fp, fn = contingency_table(y, z) return fp / (tp + fp)
python
def fdr(y, z): """False discovery rate `fp / (tp + fp)` """ tp, tn, fp, fn = contingency_table(y, z) return fp / (tp + fp)
False discovery rate `fp / (tp + fp)`
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/metrics.py#L76-L80
gagneurlab/concise
concise/metrics.py
accuracy
def accuracy(y, z): """Classification accuracy `(tp + tn) / (tp + tn + fp + fn)` """ tp, tn, fp, fn = contingency_table(y, z) return (tp + tn) / (tp + tn + fp + fn)
python
def accuracy(y, z): """Classification accuracy `(tp + tn) / (tp + tn + fp + fn)` """ tp, tn, fp, fn = contingency_table(y, z) return (tp + tn) / (tp + tn + fp + fn)
Classification accuracy `(tp + tn) / (tp + tn + fp + fn)`
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/metrics.py#L83-L87
gagneurlab/concise
concise/metrics.py
f1
def f1(y, z): """F1 score: `2 * (p * r) / (p + r)`, where p=precision and r=recall. """ _recall = recall(y, z) _prec = precision(y, z) return 2 * (_prec * _recall) / (_prec + _recall)
python
def f1(y, z): """F1 score: `2 * (p * r) / (p + r)`, where p=precision and r=recall. """ _recall = recall(y, z) _prec = precision(y, z) return 2 * (_prec * _recall) / (_prec + _recall)
F1 score: `2 * (p * r) / (p + r)`, where p=precision and r=recall.
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/metrics.py#L94-L99
gagneurlab/concise
concise/metrics.py
mcc
def mcc(y, z): """Matthews correlation coefficient """ tp, tn, fp, fn = contingency_table(y, z) return (tp * tn - fp * fn) / K.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
python
def mcc(y, z): """Matthews correlation coefficient """ tp, tn, fp, fn = contingency_table(y, z) return (tp * tn - fp * fn) / K.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
Matthews correlation coefficient
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/metrics.py#L102-L106
gagneurlab/concise
concise/metrics.py
cat_acc
def cat_acc(y, z): """Classification accuracy for multi-categorical case """ weights = _cat_sample_weights(y) _acc = K.cast(K.equal(K.argmax(y, axis=-1), K.argmax(z, axis=-1)), K.floatx()) _acc = K.sum(_acc * weights) / K.sum(weights) return _acc
python
def cat_acc(y, z): """Classification accuracy for multi-categorical case """ weights = _cat_sample_weights(y) _acc = K.cast(K.equal(K.argmax(y, axis=-1), K.argmax(z, axis=-1)), K.floatx()) _acc = K.sum(_acc * weights) / K.sum(weights) return _acc
Classification accuracy for multi-categorical case
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/metrics.py#L126-L134
gagneurlab/concise
concise/metrics.py
var_explained
def var_explained(y_true, y_pred): """Fraction of variance explained. """ var_resid = K.var(y_true - y_pred) var_y_true = K.var(y_true) return 1 - var_resid / var_y_true
python
def var_explained(y_true, y_pred): """Fraction of variance explained. """ var_resid = K.var(y_true - y_pred) var_y_true = K.var(y_true) return 1 - var_resid / var_y_true
Fraction of variance explained.
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/metrics.py#L152-L157
gagneurlab/concise
concise/utils/model_data.py
split_KFold_idx
def split_KFold_idx(train, cv_n_folds=5, stratified=False, random_state=None): """Get k-fold indices generator """ test_len(train) y = train[1] n_rows = y.shape[0] if stratified: if len(y.shape) > 1: if y.shape[1] > 1: raise ValueError("Can't use stratified K-fold with multi-column response variable") else: y = y[:, 0] # http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.StratifiedKFold.html#sklearn.model_selection.StratifiedKFold.split return model_selection.StratifiedKFold(n_splits=cv_n_folds, shuffle=True, random_state=random_state)\ .split(X=np.zeros((n_rows, 1)), y=y) else: return model_selection.KFold(n_splits=cv_n_folds, shuffle=True, random_state=random_state)\ .split(X=np.zeros((n_rows, 1)))
python
def split_KFold_idx(train, cv_n_folds=5, stratified=False, random_state=None): """Get k-fold indices generator """ test_len(train) y = train[1] n_rows = y.shape[0] if stratified: if len(y.shape) > 1: if y.shape[1] > 1: raise ValueError("Can't use stratified K-fold with multi-column response variable") else: y = y[:, 0] # http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.StratifiedKFold.html#sklearn.model_selection.StratifiedKFold.split return model_selection.StratifiedKFold(n_splits=cv_n_folds, shuffle=True, random_state=random_state)\ .split(X=np.zeros((n_rows, 1)), y=y) else: return model_selection.KFold(n_splits=cv_n_folds, shuffle=True, random_state=random_state)\ .split(X=np.zeros((n_rows, 1)))
Get k-fold indices generator
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/utils/model_data.py#L38-L55
gagneurlab/concise
concise/utils/model_data.py
subset
def subset(train, idx, keep_other=True): """Subset the `train=(x, y)` data tuple, each of the form: - list, np.ndarray - tuple, np.ndarray - dictionary, np.ndarray - np.ndarray, np.ndarray # Note In case there are other data present in the tuple: `(x, y, other1, other2, ...)`, these get passed on as: `(x_sub, y_sub, other1, other2)` # Arguments train: `(x,y, other1, other2, ...)` tuple of data idx: indices to subset the data with keep_other: bool; If True, the additional tuple elements `(other1, other2, ...)` are passed together with `(x, y)` but don't get subsetted. """ test_len(train) y = train[1][idx] # x split if isinstance(train[0], (list, tuple)): x = [x[idx] for x in train[0]] elif isinstance(train[0], dict): x = {k: v[idx] for k, v in train[0].items()} elif isinstance(train[0], np.ndarray): x = train[0][idx] else: raise ValueError("Input can only be of type: list, dict or np.ndarray") if keep_other: return (x, y) + train[2:] else: return (x, y)
python
def subset(train, idx, keep_other=True): """Subset the `train=(x, y)` data tuple, each of the form: - list, np.ndarray - tuple, np.ndarray - dictionary, np.ndarray - np.ndarray, np.ndarray # Note In case there are other data present in the tuple: `(x, y, other1, other2, ...)`, these get passed on as: `(x_sub, y_sub, other1, other2)` # Arguments train: `(x,y, other1, other2, ...)` tuple of data idx: indices to subset the data with keep_other: bool; If True, the additional tuple elements `(other1, other2, ...)` are passed together with `(x, y)` but don't get subsetted. """ test_len(train) y = train[1][idx] # x split if isinstance(train[0], (list, tuple)): x = [x[idx] for x in train[0]] elif isinstance(train[0], dict): x = {k: v[idx] for k, v in train[0].items()} elif isinstance(train[0], np.ndarray): x = train[0][idx] else: raise ValueError("Input can only be of type: list, dict or np.ndarray") if keep_other: return (x, y) + train[2:] else: return (x, y)
Subset the `train=(x, y)` data tuple, each of the form: - list, np.ndarray - tuple, np.ndarray - dictionary, np.ndarray - np.ndarray, np.ndarray # Note In case there are other data present in the tuple: `(x, y, other1, other2, ...)`, these get passed on as: `(x_sub, y_sub, other1, other2)` # Arguments train: `(x,y, other1, other2, ...)` tuple of data idx: indices to subset the data with keep_other: bool; If True, the additional tuple elements `(other1, other2, ...)` are passed together with `(x, y)` but don't get subsetted.
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/utils/model_data.py#L58-L92
gagneurlab/concise
concise/legacy/get_data.py
prepare_data
def prepare_data(dt, features, response, sequence, id_column=None, seq_align="end", trim_seq_len=None): """ Prepare data for Concise.train or ConciseCV.train. Args: dt: A pandas DataFrame containing all the required data. features (List of strings): Column names of `dt` used to produce the features design matrix. These columns should be numeric. response (str or list of strings): Name(s) of column(s) used as a reponse variable. sequence (str): Name of the column storing the DNA/RNA sequences. id_column (str): Name of the column used as the row identifier. seq_align (str): one of ``{"start", "end"}``. To which end should we align sequences? trim_seq_len (int): Consider only first `trim_seq_len` bases of each sequence when generating the sequence design matrix. If :python:`None`, set :py:attr:`trim_seq_len` to the longest sequence length, hence whole sequences are considered. standardize_features (bool): If True, column in the returned matrix matrix :py:attr:`X_seq` are normalied to have zero mean and unit variance. Returns: tuple: Tuple with elements: :code:`(X_feat: X_seq, y, id_vec)`, where: - :py:attr:`X_feat`: features design matrix of shape :code:`(N, D)`, where N is :code:`len(dt)` and :code:`D = len(features)` - :py:attr:`X_seq`: sequence matrix of shape :code:`(N, 1, trim_seq_len, 4)`. It represents 1-hot encoding of the DNA/RNA sequence. - :py:attr:`y`: Response variable 1-column matrix of shape :code:`(N, 1)` - :py:attr:`id_vec`: 1D Character array of shape :code:`(N)`. It represents the ID's of individual rows. Note: One-hot encoding of the DNA/RNA sequence is the following: .. code:: python { "A": np.array([1, 0, 0, 0]), "C": np.array([0, 1, 0, 0]), "G": np.array([0, 0, 1, 0]), "T": np.array([0, 0, 0, 1]), "U": np.array([0, 0, 0, 1]), "N": np.array([0, 0, 0, 0]), } """ if type(response) is str: response = [response] X_feat = np.array(dt[features], dtype="float32") y = np.array(dt[response], dtype="float32") X_seq = encodeDNA(seq_vec=dt[sequence], maxlen=trim_seq_len, seq_align=seq_align) X_seq = np.array(X_seq, dtype="float32") id_vec = np.array(dt[id_column]) return X_feat, X_seq, y, id_vec
python
def prepare_data(dt, features, response, sequence, id_column=None, seq_align="end", trim_seq_len=None): """ Prepare data for Concise.train or ConciseCV.train. Args: dt: A pandas DataFrame containing all the required data. features (List of strings): Column names of `dt` used to produce the features design matrix. These columns should be numeric. response (str or list of strings): Name(s) of column(s) used as a reponse variable. sequence (str): Name of the column storing the DNA/RNA sequences. id_column (str): Name of the column used as the row identifier. seq_align (str): one of ``{"start", "end"}``. To which end should we align sequences? trim_seq_len (int): Consider only first `trim_seq_len` bases of each sequence when generating the sequence design matrix. If :python:`None`, set :py:attr:`trim_seq_len` to the longest sequence length, hence whole sequences are considered. standardize_features (bool): If True, column in the returned matrix matrix :py:attr:`X_seq` are normalied to have zero mean and unit variance. Returns: tuple: Tuple with elements: :code:`(X_feat: X_seq, y, id_vec)`, where: - :py:attr:`X_feat`: features design matrix of shape :code:`(N, D)`, where N is :code:`len(dt)` and :code:`D = len(features)` - :py:attr:`X_seq`: sequence matrix of shape :code:`(N, 1, trim_seq_len, 4)`. It represents 1-hot encoding of the DNA/RNA sequence. - :py:attr:`y`: Response variable 1-column matrix of shape :code:`(N, 1)` - :py:attr:`id_vec`: 1D Character array of shape :code:`(N)`. It represents the ID's of individual rows. Note: One-hot encoding of the DNA/RNA sequence is the following: .. code:: python { "A": np.array([1, 0, 0, 0]), "C": np.array([0, 1, 0, 0]), "G": np.array([0, 0, 1, 0]), "T": np.array([0, 0, 0, 1]), "U": np.array([0, 0, 0, 1]), "N": np.array([0, 0, 0, 0]), } """ if type(response) is str: response = [response] X_feat = np.array(dt[features], dtype="float32") y = np.array(dt[response], dtype="float32") X_seq = encodeDNA(seq_vec=dt[sequence], maxlen=trim_seq_len, seq_align=seq_align) X_seq = np.array(X_seq, dtype="float32") id_vec = np.array(dt[id_column]) return X_feat, X_seq, y, id_vec
Prepare data for Concise.train or ConciseCV.train. Args: dt: A pandas DataFrame containing all the required data. features (List of strings): Column names of `dt` used to produce the features design matrix. These columns should be numeric. response (str or list of strings): Name(s) of column(s) used as a reponse variable. sequence (str): Name of the column storing the DNA/RNA sequences. id_column (str): Name of the column used as the row identifier. seq_align (str): one of ``{"start", "end"}``. To which end should we align sequences? trim_seq_len (int): Consider only first `trim_seq_len` bases of each sequence when generating the sequence design matrix. If :python:`None`, set :py:attr:`trim_seq_len` to the longest sequence length, hence whole sequences are considered. standardize_features (bool): If True, column in the returned matrix matrix :py:attr:`X_seq` are normalied to have zero mean and unit variance. Returns: tuple: Tuple with elements: :code:`(X_feat: X_seq, y, id_vec)`, where: - :py:attr:`X_feat`: features design matrix of shape :code:`(N, D)`, where N is :code:`len(dt)` and :code:`D = len(features)` - :py:attr:`X_seq`: sequence matrix of shape :code:`(N, 1, trim_seq_len, 4)`. It represents 1-hot encoding of the DNA/RNA sequence. - :py:attr:`y`: Response variable 1-column matrix of shape :code:`(N, 1)` - :py:attr:`id_vec`: 1D Character array of shape :code:`(N)`. It represents the ID's of individual rows. Note: One-hot encoding of the DNA/RNA sequence is the following: .. code:: python { "A": np.array([1, 0, 0, 0]), "C": np.array([0, 1, 0, 0]), "G": np.array([0, 0, 1, 0]), "T": np.array([0, 0, 0, 1]), "U": np.array([0, 0, 0, 1]), "N": np.array([0, 0, 0, 0]), }
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/legacy/get_data.py#L10-L59
gagneurlab/concise
concise/preprocessing/splines.py
_trunc
def _trunc(x, minval=None, maxval=None): """Truncate vector values to have values on range [minval, maxval] """ x = np.copy(x) if minval is not None: x[x < minval] = minval if maxval is not None: x[x > maxval] = maxval return x
python
def _trunc(x, minval=None, maxval=None): """Truncate vector values to have values on range [minval, maxval] """ x = np.copy(x) if minval is not None: x[x < minval] = minval if maxval is not None: x[x > maxval] = maxval return x
Truncate vector values to have values on range [minval, maxval]
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/preprocessing/splines.py#L8-L16
gagneurlab/concise
concise/preprocessing/splines.py
encodeSplines
def encodeSplines(x, n_bases=10, spline_order=3, start=None, end=None, warn=True): """**Deprecated**. Function version of the transformer class `EncodeSplines`. Get B-spline base-function expansion # Details First, the knots for B-spline basis functions are placed equidistantly on the [start, end] range. (inferred from the data if None). Next, b_n(x) value is is computed for each x and each n (spline-index) with `scipy.interpolate.splev`. # Arguments x: a numpy array of positions with 2 dimensions n_bases int: Number of spline bases. spline_order: 2 for quadratic, 3 for qubic splines start, end: range of values. If None, they are inferred from the data as minimum and maximum value. warn: Show warnings. # Returns `np.ndarray` of shape `(x.shape[0], x.shape[1], n_bases)` """ # TODO - make it general... if len(x.shape) == 1: x = x.reshape((-1, 1)) if start is None: start = np.nanmin(x) else: if x.min() < start: if warn: print("WARNING, x.min() < start for some elements. Truncating them to start: x[x < start] = start") x = _trunc(x, minval=start) if end is None: end = np.nanmax(x) else: if x.max() > end: if warn: print("WARNING, x.max() > end for some elements. Truncating them to end: x[x > end] = end") x = _trunc(x, maxval=end) bs = BSpline(start, end, n_bases=n_bases, spline_order=spline_order ) # concatenate x to long assert len(x.shape) == 2 n_rows = x.shape[0] n_cols = x.shape[1] x_long = x.reshape((-1,)) x_feat = bs.predict(x_long, add_intercept=False) # shape = (n_rows * n_cols, n_bases) x_final = x_feat.reshape((n_rows, n_cols, n_bases)) return x_final
python
def encodeSplines(x, n_bases=10, spline_order=3, start=None, end=None, warn=True): """**Deprecated**. Function version of the transformer class `EncodeSplines`. Get B-spline base-function expansion # Details First, the knots for B-spline basis functions are placed equidistantly on the [start, end] range. (inferred from the data if None). Next, b_n(x) value is is computed for each x and each n (spline-index) with `scipy.interpolate.splev`. # Arguments x: a numpy array of positions with 2 dimensions n_bases int: Number of spline bases. spline_order: 2 for quadratic, 3 for qubic splines start, end: range of values. If None, they are inferred from the data as minimum and maximum value. warn: Show warnings. # Returns `np.ndarray` of shape `(x.shape[0], x.shape[1], n_bases)` """ # TODO - make it general... if len(x.shape) == 1: x = x.reshape((-1, 1)) if start is None: start = np.nanmin(x) else: if x.min() < start: if warn: print("WARNING, x.min() < start for some elements. Truncating them to start: x[x < start] = start") x = _trunc(x, minval=start) if end is None: end = np.nanmax(x) else: if x.max() > end: if warn: print("WARNING, x.max() > end for some elements. Truncating them to end: x[x > end] = end") x = _trunc(x, maxval=end) bs = BSpline(start, end, n_bases=n_bases, spline_order=spline_order ) # concatenate x to long assert len(x.shape) == 2 n_rows = x.shape[0] n_cols = x.shape[1] x_long = x.reshape((-1,)) x_feat = bs.predict(x_long, add_intercept=False) # shape = (n_rows * n_cols, n_bases) x_final = x_feat.reshape((n_rows, n_cols, n_bases)) return x_final
**Deprecated**. Function version of the transformer class `EncodeSplines`. Get B-spline base-function expansion # Details First, the knots for B-spline basis functions are placed equidistantly on the [start, end] range. (inferred from the data if None). Next, b_n(x) value is is computed for each x and each n (spline-index) with `scipy.interpolate.splev`. # Arguments x: a numpy array of positions with 2 dimensions n_bases int: Number of spline bases. spline_order: 2 for quadratic, 3 for qubic splines start, end: range of values. If None, they are inferred from the data as minimum and maximum value. warn: Show warnings. # Returns `np.ndarray` of shape `(x.shape[0], x.shape[1], n_bases)`
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/preprocessing/splines.py#L93-L149
gagneurlab/concise
concise/preprocessing/splines.py
EncodeSplines.fit
def fit(self, x): """Calculate the knot placement from the values ranges. # Arguments x: numpy array, either N x D or N x L x D dimensional. """ assert x.ndim > 1 self.data_min_ = np.min(x, axis=tuple(range(x.ndim - 1))) self.data_max_ = np.max(x, axis=tuple(range(x.ndim - 1))) if self.share_knots: self.data_min_[:] = np.min(self.data_min_) self.data_max_[:] = np.max(self.data_max_)
python
def fit(self, x): """Calculate the knot placement from the values ranges. # Arguments x: numpy array, either N x D or N x L x D dimensional. """ assert x.ndim > 1 self.data_min_ = np.min(x, axis=tuple(range(x.ndim - 1))) self.data_max_ = np.max(x, axis=tuple(range(x.ndim - 1))) if self.share_knots: self.data_min_[:] = np.min(self.data_min_) self.data_max_[:] = np.max(self.data_max_)
Calculate the knot placement from the values ranges. # Arguments x: numpy array, either N x D or N x L x D dimensional.
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/preprocessing/splines.py#L52-L64
gagneurlab/concise
concise/preprocessing/splines.py
EncodeSplines.transform
def transform(self, x, warn=True): """Obtain the transformed values """ # 1. split across last dimension # 2. re-use ranges # 3. Merge array_list = [encodeSplines(x[..., i].reshape((-1, 1)), n_bases=self.n_bases, spline_order=self.degree, warn=warn, start=self.data_min_[i], end=self.data_max_[i]).reshape(x[..., i].shape + (self.n_bases,)) for i in range(x.shape[-1])] return np.stack(array_list, axis=-2)
python
def transform(self, x, warn=True): """Obtain the transformed values """ # 1. split across last dimension # 2. re-use ranges # 3. Merge array_list = [encodeSplines(x[..., i].reshape((-1, 1)), n_bases=self.n_bases, spline_order=self.degree, warn=warn, start=self.data_min_[i], end=self.data_max_[i]).reshape(x[..., i].shape + (self.n_bases,)) for i in range(x.shape[-1])] return np.stack(array_list, axis=-2)
Obtain the transformed values
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/preprocessing/splines.py#L66-L79
gagneurlab/concise
concise/layers.py
InputCodon
def InputCodon(seq_length, ignore_stop_codons=True, name=None, **kwargs): """Input placeholder for array returned by `encodeCodon` Note: The seq_length is divided by 3 Wrapper for: `keras.layers.Input((seq_length / 3, 61 or 61), name=name, **kwargs)` """ if ignore_stop_codons: vocab = CODONS else: vocab = CODONS + STOP_CODONS assert seq_length % 3 == 0 return Input((seq_length / 3, len(vocab)), name=name, **kwargs)
python
def InputCodon(seq_length, ignore_stop_codons=True, name=None, **kwargs): """Input placeholder for array returned by `encodeCodon` Note: The seq_length is divided by 3 Wrapper for: `keras.layers.Input((seq_length / 3, 61 or 61), name=name, **kwargs)` """ if ignore_stop_codons: vocab = CODONS else: vocab = CODONS + STOP_CODONS assert seq_length % 3 == 0 return Input((seq_length / 3, len(vocab)), name=name, **kwargs)
Input placeholder for array returned by `encodeCodon` Note: The seq_length is divided by 3 Wrapper for: `keras.layers.Input((seq_length / 3, 61 or 61), name=name, **kwargs)`
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/layers.py#L40-L53
gagneurlab/concise
concise/layers.py
InputAA
def InputAA(seq_length, name=None, **kwargs): """Input placeholder for array returned by `encodeAA` Wrapper for: `keras.layers.Input((seq_length, 22), name=name, **kwargs)` """ return Input((seq_length, len(AMINO_ACIDS)), name=name, **kwargs)
python
def InputAA(seq_length, name=None, **kwargs): """Input placeholder for array returned by `encodeAA` Wrapper for: `keras.layers.Input((seq_length, 22), name=name, **kwargs)` """ return Input((seq_length, len(AMINO_ACIDS)), name=name, **kwargs)
Input placeholder for array returned by `encodeAA` Wrapper for: `keras.layers.Input((seq_length, 22), name=name, **kwargs)`
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/layers.py#L56-L61
gagneurlab/concise
concise/layers.py
InputRNAStructure
def InputRNAStructure(seq_length, name=None, **kwargs): """Input placeholder for array returned by `encodeRNAStructure` Wrapper for: `keras.layers.Input((seq_length, 5), name=name, **kwargs)` """ return Input((seq_length, len(RNAplfold_PROFILES)), name=name, **kwargs)
python
def InputRNAStructure(seq_length, name=None, **kwargs): """Input placeholder for array returned by `encodeRNAStructure` Wrapper for: `keras.layers.Input((seq_length, 5), name=name, **kwargs)` """ return Input((seq_length, len(RNAplfold_PROFILES)), name=name, **kwargs)
Input placeholder for array returned by `encodeRNAStructure` Wrapper for: `keras.layers.Input((seq_length, 5), name=name, **kwargs)`
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/layers.py#L64-L69
gagneurlab/concise
concise/layers.py
InputSplines
def InputSplines(seq_length, n_bases=10, name=None, **kwargs): """Input placeholder for array returned by `encodeSplines` Wrapper for: `keras.layers.Input((seq_length, n_bases), name=name, **kwargs)` """ return Input((seq_length, n_bases), name=name, **kwargs)
python
def InputSplines(seq_length, n_bases=10, name=None, **kwargs): """Input placeholder for array returned by `encodeSplines` Wrapper for: `keras.layers.Input((seq_length, n_bases), name=name, **kwargs)` """ return Input((seq_length, n_bases), name=name, **kwargs)
Input placeholder for array returned by `encodeSplines` Wrapper for: `keras.layers.Input((seq_length, n_bases), name=name, **kwargs)`
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/layers.py#L73-L78
gagneurlab/concise
concise/layers.py
InputSplines1D
def InputSplines1D(seq_length, n_bases=10, name=None, **kwargs): """Input placeholder for array returned by `encodeSplines` Wrapper for: `keras.layers.Input((seq_length, n_bases), name=name, **kwargs)` """ return Input((seq_length, n_bases), name=name, **kwargs)
python
def InputSplines1D(seq_length, n_bases=10, name=None, **kwargs): """Input placeholder for array returned by `encodeSplines` Wrapper for: `keras.layers.Input((seq_length, n_bases), name=name, **kwargs)` """ return Input((seq_length, n_bases), name=name, **kwargs)
Input placeholder for array returned by `encodeSplines` Wrapper for: `keras.layers.Input((seq_length, n_bases), name=name, **kwargs)`
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/layers.py#L81-L86
gagneurlab/concise
concise/layers.py
InputDNAQuantity
def InputDNAQuantity(seq_length, n_features=1, name=None, **kwargs): """Convenience wrapper around `keras.layers.Input`: `Input((seq_length, n_features), name=name, **kwargs)` """ return Input((seq_length, n_features), name=name, **kwargs)
python
def InputDNAQuantity(seq_length, n_features=1, name=None, **kwargs): """Convenience wrapper around `keras.layers.Input`: `Input((seq_length, n_features), name=name, **kwargs)` """ return Input((seq_length, n_features), name=name, **kwargs)
Convenience wrapper around `keras.layers.Input`: `Input((seq_length, n_features), name=name, **kwargs)`
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/layers.py#L90-L95
gagneurlab/concise
concise/layers.py
InputDNAQuantitySplines
def InputDNAQuantitySplines(seq_length, n_bases=10, name="DNASmoothPosition", **kwargs): """Convenience wrapper around keras.layers.Input: `Input((seq_length, n_bases), name=name, **kwargs)` """ return Input((seq_length, n_bases), name=name, **kwargs)
python
def InputDNAQuantitySplines(seq_length, n_bases=10, name="DNASmoothPosition", **kwargs): """Convenience wrapper around keras.layers.Input: `Input((seq_length, n_bases), name=name, **kwargs)` """ return Input((seq_length, n_bases), name=name, **kwargs)
Convenience wrapper around keras.layers.Input: `Input((seq_length, n_bases), name=name, **kwargs)`
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/layers.py#L99-L104
gagneurlab/concise
concise/layers.py
ConvSequence._plot_weights_heatmap
def _plot_weights_heatmap(self, index=None, figsize=None, **kwargs): """Plot weights as a heatmap index = can be a particular index or a list of indicies **kwargs - additional arguments to concise.utils.plot.heatmap """ W = self.get_weights()[0] if index is None: index = np.arange(W.shape[2]) fig = heatmap(np.swapaxes(W[:, :, index], 0, 1), plot_name="filter: ", vocab=self.VOCAB, figsize=figsize, **kwargs) # plt.show() return fig
python
def _plot_weights_heatmap(self, index=None, figsize=None, **kwargs): """Plot weights as a heatmap index = can be a particular index or a list of indicies **kwargs - additional arguments to concise.utils.plot.heatmap """ W = self.get_weights()[0] if index is None: index = np.arange(W.shape[2]) fig = heatmap(np.swapaxes(W[:, :, index], 0, 1), plot_name="filter: ", vocab=self.VOCAB, figsize=figsize, **kwargs) # plt.show() return fig
Plot weights as a heatmap index = can be a particular index or a list of indicies **kwargs - additional arguments to concise.utils.plot.heatmap
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/layers.py#L197-L210
gagneurlab/concise
concise/layers.py
ConvSequence._plot_weights_motif
def _plot_weights_motif(self, index, plot_type="motif_raw", background_probs=DEFAULT_BASE_BACKGROUND, ncol=1, figsize=None): """Index can only be a single int """ w_all = self.get_weights() if len(w_all) == 0: raise Exception("Layer needs to be initialized first") W = w_all[0] if index is None: index = np.arange(W.shape[2]) if isinstance(index, int): index = [index] fig = plt.figure(figsize=figsize) if plot_type == "motif_pwm" and plot_type in self.AVAILABLE_PLOTS: arr = pssm_array2pwm_array(W, background_probs) elif plot_type == "motif_raw" and plot_type in self.AVAILABLE_PLOTS: arr = W elif plot_type == "motif_pwm_info" and plot_type in self.AVAILABLE_PLOTS: quasi_pwm = pssm_array2pwm_array(W, background_probs) arr = _pwm2pwm_info(quasi_pwm) else: raise ValueError("plot_type needs to be from {0}".format(self.AVAILABLE_PLOTS)) fig = seqlogo_fig(arr, vocab=self.VOCAB_name, figsize=figsize, ncol=ncol, plot_name="filter: ") # fig.show() return fig
python
def _plot_weights_motif(self, index, plot_type="motif_raw", background_probs=DEFAULT_BASE_BACKGROUND, ncol=1, figsize=None): """Index can only be a single int """ w_all = self.get_weights() if len(w_all) == 0: raise Exception("Layer needs to be initialized first") W = w_all[0] if index is None: index = np.arange(W.shape[2]) if isinstance(index, int): index = [index] fig = plt.figure(figsize=figsize) if plot_type == "motif_pwm" and plot_type in self.AVAILABLE_PLOTS: arr = pssm_array2pwm_array(W, background_probs) elif plot_type == "motif_raw" and plot_type in self.AVAILABLE_PLOTS: arr = W elif plot_type == "motif_pwm_info" and plot_type in self.AVAILABLE_PLOTS: quasi_pwm = pssm_array2pwm_array(W, background_probs) arr = _pwm2pwm_info(quasi_pwm) else: raise ValueError("plot_type needs to be from {0}".format(self.AVAILABLE_PLOTS)) fig = seqlogo_fig(arr, vocab=self.VOCAB_name, figsize=figsize, ncol=ncol, plot_name="filter: ") # fig.show() return fig
Index can only be a single int
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/layers.py#L212-L243
gagneurlab/concise
concise/layers.py
ConvSequence.plot_weights
def plot_weights(self, index=None, plot_type="motif_raw", figsize=None, ncol=1, **kwargs): """Plot filters as heatmap or motifs index = can be a particular index or a list of indicies **kwargs - additional arguments to concise.utils.plot.heatmap """ if "heatmap" in self.AVAILABLE_PLOTS and plot_type == "heatmap": return self._plot_weights_heatmap(index=index, figsize=figsize, ncol=ncol, **kwargs) elif plot_type[:5] == "motif": return self._plot_weights_motif(index=index, plot_type=plot_type, figsize=figsize, ncol=ncol, **kwargs) else: raise ValueError("plot_type needs to be from {0}".format(self.AVAILABLE_PLOTS))
python
def plot_weights(self, index=None, plot_type="motif_raw", figsize=None, ncol=1, **kwargs): """Plot filters as heatmap or motifs index = can be a particular index or a list of indicies **kwargs - additional arguments to concise.utils.plot.heatmap """ if "heatmap" in self.AVAILABLE_PLOTS and plot_type == "heatmap": return self._plot_weights_heatmap(index=index, figsize=figsize, ncol=ncol, **kwargs) elif plot_type[:5] == "motif": return self._plot_weights_motif(index=index, plot_type=plot_type, figsize=figsize, ncol=ncol, **kwargs) else: raise ValueError("plot_type needs to be from {0}".format(self.AVAILABLE_PLOTS))
Plot filters as heatmap or motifs index = can be a particular index or a list of indicies **kwargs - additional arguments to concise.utils.plot.heatmap
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/layers.py#L245-L257
gagneurlab/concise
concise/initializers.py
_check_pwm_list
def _check_pwm_list(pwm_list): """Check the input validity """ for pwm in pwm_list: if not isinstance(pwm, PWM): raise TypeError("element {0} of pwm_list is not of type PWM".format(pwm)) return True
python
def _check_pwm_list(pwm_list): """Check the input validity """ for pwm in pwm_list: if not isinstance(pwm, PWM): raise TypeError("element {0} of pwm_list is not of type PWM".format(pwm)) return True
Check the input validity
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/initializers.py#L22-L28
gagneurlab/concise
concise/initializers.py
_truncated_normal
def _truncated_normal(mean, stddev, seed=None, normalize=True, alpha=0.01): ''' Add noise with truncnorm from numpy. Bounded (0.001,0.999) ''' # within range () # provide entry to chose which adding noise way to use if seed is not None: np.random.seed(seed) if stddev == 0: X = mean else: gen_X = truncnorm((alpha - mean) / stddev, ((1 - alpha) - mean) / stddev, loc=mean, scale=stddev) X = gen_X.rvs() + mean if normalize: # Normalize, column sum to 1 col_sums = X.sum(1) X = X / col_sums[:, np.newaxis] return X
python
def _truncated_normal(mean, stddev, seed=None, normalize=True, alpha=0.01): ''' Add noise with truncnorm from numpy. Bounded (0.001,0.999) ''' # within range () # provide entry to chose which adding noise way to use if seed is not None: np.random.seed(seed) if stddev == 0: X = mean else: gen_X = truncnorm((alpha - mean) / stddev, ((1 - alpha) - mean) / stddev, loc=mean, scale=stddev) X = gen_X.rvs() + mean if normalize: # Normalize, column sum to 1 col_sums = X.sum(1) X = X / col_sums[:, np.newaxis] return X
Add noise with truncnorm from numpy. Bounded (0.001,0.999)
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/initializers.py#L31-L54
gagneurlab/concise
concise/utils/plot.py
heatmap
def heatmap(w, vmin=None, vmax=None, diverge_color=False, ncol=1, plot_name=None, vocab=["A", "C", "G", "T"], figsize=(6, 2)): """Plot a heatmap from weight matrix w vmin, vmax = z axis range diverge_color = Should we use diverging colors? plot_name = plot_title vocab = vocabulary (corresponds to the first axis) """ # Generate y and x values from the dimension lengths assert len(vocab) == w.shape[0] plt_y = np.arange(w.shape[0] + 1) + 0.5 plt_x = np.arange(w.shape[1] + 1) - 0.5 z_min = w.min() z_max = w.max() if vmin is None: vmin = z_min if vmax is None: vmax = z_max if diverge_color: color_map = plt.cm.RdBu else: color_map = plt.cm.Blues fig = plt.figure(figsize=figsize) # multiple axis if len(w.shape) == 3: # n_plots = w.shape[2] nrow = math.ceil(n_plots / ncol) else: n_plots = 1 nrow = 1 ncol = 1 for i in range(n_plots): if len(w.shape) == 3: w_cur = w[:, :, i] else: w_cur = w ax = plt.subplot(nrow, ncol, i + 1) plt.tight_layout() im = ax.pcolormesh(plt_x, plt_y, w_cur, cmap=color_map, vmin=vmin, vmax=vmax, edgecolors="white") ax.grid(False) ax.set_yticklabels([""] + vocab, minor=False) ax.yaxis.set_major_locator(MaxNLocator(integer=True)) ax.set_xticks(np.arange(w_cur.shape[1] + 1)) ax.set_xlim(plt_x.min(), plt_x.max()) ax.set_ylim(plt_y.min(), plt_y.max()) # nice scale location: # http://stackoverflow.com/questions/18195758/set-matplotlib-colorbar-size-to-match-graph divider = make_axes_locatable(ax) cax = divider.append_axes("right", size="5%", pad=0.05) fig.colorbar(im, cax=cax) if plot_name is not None: if n_plots > 0: pln = plot_name + " {0}".format(i) else: pln = plot_name ax.set_title(pln) ax.set_aspect('equal') return fig
python
def heatmap(w, vmin=None, vmax=None, diverge_color=False, ncol=1, plot_name=None, vocab=["A", "C", "G", "T"], figsize=(6, 2)): """Plot a heatmap from weight matrix w vmin, vmax = z axis range diverge_color = Should we use diverging colors? plot_name = plot_title vocab = vocabulary (corresponds to the first axis) """ # Generate y and x values from the dimension lengths assert len(vocab) == w.shape[0] plt_y = np.arange(w.shape[0] + 1) + 0.5 plt_x = np.arange(w.shape[1] + 1) - 0.5 z_min = w.min() z_max = w.max() if vmin is None: vmin = z_min if vmax is None: vmax = z_max if diverge_color: color_map = plt.cm.RdBu else: color_map = plt.cm.Blues fig = plt.figure(figsize=figsize) # multiple axis if len(w.shape) == 3: # n_plots = w.shape[2] nrow = math.ceil(n_plots / ncol) else: n_plots = 1 nrow = 1 ncol = 1 for i in range(n_plots): if len(w.shape) == 3: w_cur = w[:, :, i] else: w_cur = w ax = plt.subplot(nrow, ncol, i + 1) plt.tight_layout() im = ax.pcolormesh(plt_x, plt_y, w_cur, cmap=color_map, vmin=vmin, vmax=vmax, edgecolors="white") ax.grid(False) ax.set_yticklabels([""] + vocab, minor=False) ax.yaxis.set_major_locator(MaxNLocator(integer=True)) ax.set_xticks(np.arange(w_cur.shape[1] + 1)) ax.set_xlim(plt_x.min(), plt_x.max()) ax.set_ylim(plt_y.min(), plt_y.max()) # nice scale location: # http://stackoverflow.com/questions/18195758/set-matplotlib-colorbar-size-to-match-graph divider = make_axes_locatable(ax) cax = divider.append_axes("right", size="5%", pad=0.05) fig.colorbar(im, cax=cax) if plot_name is not None: if n_plots > 0: pln = plot_name + " {0}".format(i) else: pln = plot_name ax.set_title(pln) ax.set_aspect('equal') return fig
Plot a heatmap from weight matrix w vmin, vmax = z axis range diverge_color = Should we use diverging colors? plot_name = plot_title vocab = vocabulary (corresponds to the first axis)
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/utils/plot.py#L22-L89
gagneurlab/concise
concise/utils/plot.py
standardize_polygons_str
def standardize_polygons_str(data_str): """Given a POLYGON string, standardize the coordinates to a 1x1 grid. Input : data_str (taken from above) Output: tuple of polygon objects """ # find all of the polygons in the letter (for instance an A # needs to be constructed from 2 polygons) path_strs = re.findall("\(\(([^\)]+?)\)\)", data_str.strip()) # convert the data into a numpy array polygons_data = [] for path_str in path_strs: data = np.array([ tuple(map(float, x.split())) for x in path_str.strip().split(",")]) polygons_data.append(data) # standardize the coordinates min_coords = np.vstack(data.min(0) for data in polygons_data).min(0) max_coords = np.vstack(data.max(0) for data in polygons_data).max(0) for data in polygons_data: data[:, ] -= min_coords data[:, ] /= (max_coords - min_coords) polygons = [] for data in polygons_data: polygons.append(load_wkt( "POLYGON((%s))" % ",".join(" ".join(map(str, x)) for x in data))) return tuple(polygons)
python
def standardize_polygons_str(data_str): """Given a POLYGON string, standardize the coordinates to a 1x1 grid. Input : data_str (taken from above) Output: tuple of polygon objects """ # find all of the polygons in the letter (for instance an A # needs to be constructed from 2 polygons) path_strs = re.findall("\(\(([^\)]+?)\)\)", data_str.strip()) # convert the data into a numpy array polygons_data = [] for path_str in path_strs: data = np.array([ tuple(map(float, x.split())) for x in path_str.strip().split(",")]) polygons_data.append(data) # standardize the coordinates min_coords = np.vstack(data.min(0) for data in polygons_data).min(0) max_coords = np.vstack(data.max(0) for data in polygons_data).max(0) for data in polygons_data: data[:, ] -= min_coords data[:, ] /= (max_coords - min_coords) polygons = [] for data in polygons_data: polygons.append(load_wkt( "POLYGON((%s))" % ",".join(" ".join(map(str, x)) for x in data))) return tuple(polygons)
Given a POLYGON string, standardize the coordinates to a 1x1 grid. Input : data_str (taken from above) Output: tuple of polygon objects
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/utils/plot.py#L98-L126