Search is not available for this dataset
text
stringlengths 75
104k
|
---|
def reorder(self, updates_ids, offset=None, utc=None):
'''
Edit the order at which statuses for the specified social media profile will
be sent out of the buffer.
'''
url = PATHS['REORDER'] % self.profile_id
order_format = "order[]=%s&"
post_data = ''
if offset:
post_data += 'offset=%s&' % offset
if utc:
post_data += 'utc=%s&' % utc
for update in updates_ids:
post_data += order_format % update
return self.api.post(url=url, data=post_data) |
def new(self, text, shorten=None, now=None, top=None, media=None, when=None):
'''
Create one or more new status updates.
'''
url = PATHS['CREATE']
post_data = "text=%s&" % text
post_data += "profile_ids[]=%s&" % self.profile_id
if shorten:
post_data += "shorten=%s&" % shorten
if now:
post_data += "now=%s&" % now
if top:
post_data += "top=%s&" % top
if when:
post_data += "scheduled_at=%s&" % str(when)
if media:
media_format = "media[%s]=%s&"
for media_type, media_item in media.iteritems():
post_data += media_format % (media_type, media_item)
response = self.api.post(url=url, data=post_data)
new_update = Update(api=self.api, raw_response=response['updates'][0])
self.append(new_update)
return new_update |
def set_level(self, level='info', handlers=None):
"""
Set the logging level (which types of logs are actually printed / recorded)
to one of ['debug', 'info', 'warn', 'error', 'fatal'] in that order
of severity
"""
for h in self.get_handlers(handlers):
h.setLevel(levels[level]) |
def set_formatter(self, formatter='standard', handlers=None):
"""
Set the text format of messages to one of the pre-determined forms,
one of ['quiet', 'minimal', 'standard', 'verbose']
"""
for h in self.get_handlers(handlers):
h.setFormatter(logging.Formatter(formatters[formatter])) |
def add_handler(self, name='console-color', level='info', formatter='standard', **kwargs):
"""
Add another handler to the logging system if not present already.
Available handlers are currently: ['console-bw', 'console-color', 'rotating-log']
"""
# make sure the the log file has a name
if name == 'rotating-log' and 'filename' not in kwargs:
kwargs.update({'filename': self.logfilename})
# make sure the the log file has a name
if name == 'stringio' and 'stringio' not in kwargs:
kwargs.update({'stringio': StringIO.StringIO()})
handler = types[name](**kwargs)
self.add_handler_raw(handler, name, level=level, formatter=formatter) |
def remove_handler(self, name):
"""
Remove handler from the logging system if present already.
Available handlers are currently: ['console-bw', 'console-color', 'rotating-log']
"""
if name in self.handlers:
self.log.removeHandler(self.handlers[name]) |
def noformat(self):
""" Temporarily do not use any formatter so that text printed is raw """
try:
formats = {}
for h in self.get_handlers():
formats[h] = h.formatter
self.set_formatter(formatter='quiet')
yield
except Exception as e:
raise
finally:
for k,v in iteritems(formats):
k.formatter = v |
def set_verbosity(self, verbosity='vvv', handlers=None):
"""
Set the verbosity level of a certain log handler or of all handlers.
Parameters
----------
verbosity : 'v' to 'vvvvv'
the level of verbosity, more v's is more verbose
handlers : string, or list of strings
handler names can be found in ``peri.logger.types.keys()``
Current set is::
['console-bw', 'console-color', 'rotating-log']
"""
self.verbosity = sanitize(verbosity)
self.set_level(v2l[verbosity], handlers=handlers)
self.set_formatter(v2f[verbosity], handlers=handlers) |
def normalize(im, invert=False, scale=None, dtype=np.float64):
"""
Normalize a field to a (min, max) exposure range, default is (0, 255).
(min, max) exposure values. Invert the image if requested.
"""
if dtype not in {np.float16, np.float32, np.float64}:
raise ValueError('dtype must be numpy.float16, float32, or float64.')
out = im.astype('float').copy()
scale = scale or (0.0, 255.0)
l, u = (float(i) for i in scale)
out = (out - l) / (u - l)
if invert:
out = -out + (out.max() + out.min())
return out.astype(dtype) |
def generate_sphere(radius):
"""Generates a centered boolean mask of a 3D sphere"""
rint = np.ceil(radius).astype('int')
t = np.arange(-rint, rint+1, 1)
x,y,z = np.meshgrid(t, t, t, indexing='ij')
r = np.sqrt(x*x + y*y + z*z)
sphere = r < radius
return sphere |
def local_max_featuring(im, radius=2.5, noise_size=1., bkg_size=None,
minmass=1., trim_edge=False):
"""Local max featuring to identify bright spherical particles on a
dark background.
Parameters
----------
im : numpy.ndarray
The image to identify particles in.
radius : Float > 0, optional
Featuring radius of the particles. Default is 2.5
noise_size : Float, optional
Size of Gaussian kernel for smoothing out noise. Default is 1.
bkg_size : Float or None, optional
Size of the Gaussian kernel for removing long-wavelength
background. Default is None, which gives `2 * radius`
minmass : Float, optional
Return only particles with a ``mass > minmass``. Default is 1.
trim_edge : Bool, optional
Set to True to omit particles identified exactly at the edge
of the image. False-positive features frequently occur here
because of the reflected bandpass featuring. Default is
False, i.e. find particles at the edge of the image.
Returns
-------
pos, mass : numpy.ndarray
Particle positions and masses
"""
if radius <= 0:
raise ValueError('`radius` must be > 0')
#1. Remove noise
filtered = nd.gaussian_filter(im, noise_size, mode='mirror')
#2. Remove long-wavelength background:
if bkg_size is None:
bkg_size = 2*radius
filtered -= nd.gaussian_filter(filtered, bkg_size, mode='mirror')
#3. Local max feature
footprint = generate_sphere(radius)
e = nd.maximum_filter(filtered, footprint=footprint)
mass_im = nd.convolve(filtered, footprint, mode='mirror')
good_im = (e==filtered) * (mass_im > minmass)
pos = np.transpose(np.nonzero(good_im))
if trim_edge:
good = np.all(pos > 0, axis=1) & np.all(pos+1 < im.shape, axis=1)
pos = pos[good, :].copy()
masses = mass_im[pos[:,0], pos[:,1], pos[:,2]].copy()
return pos, masses |
def otsu_threshold(data, bins=255):
"""
Otsu threshold on data.
Otsu thresholding [1]_is a method for selecting an intensity value
for thresholding an image into foreground and background. The sel-
ected intensity threshold maximizes the inter-class variance.
Parameters
----------
data : numpy.ndarray
The data to threshold
bins : Int or numpy.ndarray, optional
Bin edges, as passed to numpy.histogram
Returns
-------
numpy.float
The value of the threshold which maximizes the inter-class
variance.
Notes
-----
This could be generalized to more than 2 classes.
References
----------
..[1] N. Otsu, "A Threshold Selection Method from Gray-level
Histograms," IEEE Trans. Syst., Man, Cybern., Syst., 9, 1,
62-66 (1979)
"""
h0, x0 = np.histogram(data.ravel(), bins=bins)
h = h0.astype('float') / h0.sum() #normalize
x = 0.5*(x0[1:] + x0[:-1]) #bin center
wk = np.array([h[:i+1].sum() for i in range(h.size)]) #omega_k
mk = np.array([sum(x[:i+1]*h[:i+1]) for i in range(h.size)]) #mu_k
mt = mk[-1] #mu_T
sb = (mt*wk - mk)**2 / (wk*(1-wk) + 1e-15) #sigma_b
ind = sb.argmax()
return 0.5*(x0[ind] + x0[ind+1]) |
def harris_feature(im, region_size=5, to_return='harris', scale=0.05):
"""
Harris-motivated feature detection on a d-dimensional image.
Parameters
---------
im
region_size
to_return : {'harris','matrix','trace-determinant'}
"""
ndim = im.ndim
#1. Gradient of image
grads = [nd.sobel(im, axis=i) for i in range(ndim)]
#2. Corner response matrix
matrix = np.zeros((ndim, ndim) + im.shape)
for a in range(ndim):
for b in range(ndim):
matrix[a,b] = nd.filters.gaussian_filter(grads[a]*grads[b],
region_size)
if to_return == 'matrix':
return matrix
#3. Trace, determinant
trc = np.trace(matrix, axis1=0, axis2=1)
det = np.linalg.det(matrix.T).T
if to_return == 'trace-determinant':
return trc, det
else:
#4. Harris detector:
harris = det - scale*trc*trc
return harris |
def identify_slab(im, sigma=5., region_size=10, masscut=1e4, asdict=False):
"""
Identifies slabs in an image.
Functions by running a Harris-inspired edge detection on the image,
thresholding the edge, then clustering.
Parameters
----------
im : numpy.ndarray
3D array of the image to analyze.
sigma : Float, optional
Gaussian blurring kernel to remove non-slab features such as
noise and particles. Default is 5.
region_size : Int, optional
The size of region for Harris corner featuring. Default is 10
masscut : Float, optional
The minimum number of pixels for a feature to be identified as
a slab. Default is 1e4; should be smaller for smaller images.
asdict : Bool, optional
Set to True to return a list of dicts, with keys of ``'theta'``
and ``'phi'`` as rotation angles about the x- and z- axes, and
of ``'zpos'`` for the z-position, i.e. a list of dicts which
can be unpacked into a :class:``peri.comp.objs.Slab``
Returns
-------
[poses, normals] : numpy.ndarray
The positions and normals of each slab in the image; ``poses[i]``
and ``normals[i]`` are the ``i``th slab. Returned if ``asdict``
is False
[list]
A list of dictionaries. Returned if ``asdict`` is True
"""
#1. edge detect:
fim = nd.filters.gaussian_filter(im, sigma)
trc, det = harris_feature(fim, region_size, to_return='trace-determinant')
#we want an edge == not a corner, so one eigenvalue is high and
#one is low compared to the other.
#So -- trc high, normalized det low:
dnrm = det / (trc*trc)
trc_cut = otsu_threshold(trc)
det_cut = otsu_threshold(dnrm)
slabs = (trc > trc_cut) & (dnrm < det_cut)
labeled, nslabs = nd.label(slabs)
#masscuts:
masses = [(labeled == i).sum() for i in range(1, nslabs+1)]
good = np.array([m > masscut for m in masses])
inds = np.nonzero(good)[0] + 1 #+1 b/c the lowest label is the bkg
#Slabs are identifiied, now getting the coords:
poses = np.array(nd.measurements.center_of_mass(trc, labeled, inds))
#normals from eigenvectors of the covariance matrix
normals = []
z = np.arange(im.shape[0]).reshape(-1,1,1).astype('float')
y = np.arange(im.shape[1]).reshape(1,-1,1).astype('float')
x = np.arange(im.shape[2]).reshape(1,1,-1).astype('float')
#We also need to identify the direction of the normal:
gim = [nd.sobel(fim, axis=i) for i in range(fim.ndim)]
for i, p in zip(range(1, nslabs+1), poses):
wts = trc * (labeled == i)
wts /= wts.sum()
zc, yc, xc = [xi-pi for xi, pi in zip([z,y,x],p.squeeze())]
cov = [[np.sum(xi*xj*wts) for xi in [zc,yc,xc]] for xj in [zc,yc,xc]]
vl, vc = np.linalg.eigh(cov)
#lowest eigenvalue is the normal:
normal = vc[:,0]
#Removing the sign ambiguity:
gn = np.sum([n*g[tuple(p.astype('int'))] for g,n in zip(gim, normal)])
normal *= np.sign(gn)
normals.append(normal)
if asdict:
get_theta = lambda n: -np.arctan2(n[1], -n[0])
get_phi = lambda n: np.arcsin(n[2])
return [{'zpos':p[0], 'angles':(get_theta(n), get_phi(n))}
for p, n in zip(poses, normals)]
else:
return poses, np.array(normals) |
def plot_errors_single(rad, crb, errors, labels=['trackpy', 'peri']):
fig = pl.figure()
comps = ['z', 'y', 'x']
markers = ['o', '^', '*']
colors = COLORS
for i in reversed(range(3)):
pl.plot(rad, crb[:,0,i], lw=2.5, label='CRB-'+comps[i], color=colors[i])
for c, (error, label) in enumerate(zip(errors, labels)):
mu = np.sqrt((error**2).mean(axis=1))[:,0,:]
std = np.std(np.sqrt((error**2)), axis=1)[:,0,:]
for i in reversed(range(len(mu[0]))):
pl.plot(rad, mu[:,i], marker=markers[c], color=colors[i], lw=0, label=label+"-"+comps[i], ms=13)
pl.ylim(1e-3, 8e0)
pl.semilogy()
pl.legend(loc='upper left', ncol=3, numpoints=1, prop={"size": 16})
pl.xlabel(r"radius (pixels)")
pl.ylabel(r"CRB / $\Delta$ (pixels)")
"""
ax = fig.add_axes([0.6, 0.6, 0.28, 0.28])
ax.plot(rad, crb[:,0,:], lw=2.5)
for c, error in enumerate(errors):
mu = np.sqrt((error**2).mean(axis=1))[:,0,:]
std = np.std(np.sqrt((error**2)), axis=1)[:,0,:]
for i in range(len(mu[0])):
ax.errorbar(rad, mu[:,i], yerr=std[:,i], fmt=markers[c], color=colors[i], lw=1)
ax.set_ylim(-0.1, 1.5)
ax.grid('off')
""" |
def sphere_triangle_cdf(dr, a, alpha):
""" Cumulative distribution function for the traingle distribution """
p0 = (dr+alpha)**2/(2*alpha**2)*(0 > dr)*(dr>-alpha)
p1 = 1*(dr>0)-(alpha-dr)**2/(2*alpha**2)*(0<dr)*(dr<alpha)
return (1-np.clip(p0+p1, 0, 1)) |
def sphere_analytical_gaussian(dr, a, alpha=0.2765):
"""
Analytically calculate the sphere's functional form by convolving the
Heavyside function with first order approximation to the sinc, a Gaussian.
The alpha parameters controls the width of the approximation -- should be
1, but is fit to be roughly 0.2765
"""
term1 = 0.5*(erf((dr+2*a)/(alpha*np.sqrt(2))) + erf(-dr/(alpha*np.sqrt(2))))
term2 = np.sqrt(0.5/np.pi)*(alpha/(dr+a+1e-10)) * (
np.exp(-0.5*dr**2/alpha**2) - np.exp(-0.5*(dr+2*a)**2/alpha**2)
)
return term1 - term2 |
def sphere_analytical_gaussian_trim(dr, a, alpha=0.2765, cut=1.6):
"""
See sphere_analytical_gaussian_exact.
I trimmed to terms from the functional form that are essentially zero (1e-8)
for r0 > cut (~1.5), a fine approximation for these platonic anyway.
"""
m = np.abs(dr) <= cut
# only compute on the relevant scales
rr = dr[m]
t = -rr/(alpha*np.sqrt(2))
q = 0.5*(1 + erf(t)) - np.sqrt(0.5/np.pi)*(alpha/(rr+a+1e-10)) * np.exp(-t*t)
# fill in the grid, inside the interpolation and outside where values are constant
ans = 0*dr
ans[m] = q
ans[dr > cut] = 0
ans[dr < -cut] = 1
return ans |
def sphere_analytical_gaussian_fast(dr, a, alpha=0.2765, cut=1.20):
"""
See sphere_analytical_gaussian_trim, but implemented in C with
fast erf and exp approximations found at
Abramowitz and Stegun: Handbook of Mathematical Functions
A Fast, Compact Approximation of the Exponential Function
The default cut 1.25 was chosen based on the accuracy of fast_erf
"""
code = """
double coeff1 = 1.0/(alpha*sqrt(2.0));
double coeff2 = sqrt(0.5/pi)*alpha;
for (int i=0; i<N; i++){
double dri = dr[i];
if (dri < cut && dri > -cut){
double t = -dri*coeff1;
ans[i] = 0.5*(1+fast_erf(t)) - coeff2/(dri+a+1e-10) * fast_exp(-t*t);
} else {
ans[i] = 0.0*(dri > cut) + 1.0*(dri < -cut);
}
}
"""
shape = r.shape
r = r.flatten()
N = self.N
ans = r*0
pi = np.pi
inline(code, arg_names=['dr', 'a', 'alpha', 'cut', 'ans', 'pi', 'N'],
support_code=functions, verbose=0)
return ans.reshape(shape) |
def sphere_constrained_cubic(dr, a, alpha):
"""
Sphere generated by a cubic interpolant constrained to be (1,0) on
(r0-sqrt(3)/2, r0+sqrt(3)/2), the size of the cube in the (111) direction.
"""
sqrt3 = np.sqrt(3)
b_coeff = a*0.5/sqrt3*(1 - 0.6*sqrt3*alpha)/(0.15 + a*a)
rscl = np.clip(dr, -0.5*sqrt3, 0.5*sqrt3)
a, d = rscl + 0.5*sqrt3, rscl - 0.5*sqrt3
return alpha*d*a*rscl + b_coeff*d*a - d/sqrt3 |
def exact_volume_sphere(rvec, pos, radius, zscale=1.0, volume_error=1e-5,
function=sphere_analytical_gaussian, max_radius_change=1e-2, args=()):
"""
Perform an iterative method to calculate the effective sphere that perfectly
(up to the volume_error) conserves volume. Return the resulting image
"""
vol_goal = 4./3*np.pi*radius**3 / zscale
rprime = radius
dr = inner(rvec, pos, rprime, zscale=zscale)
t = function(dr, rprime, *args)
for i in range(MAX_VOLUME_ITERATIONS):
vol_curr = np.abs(t.sum())
if np.abs(vol_goal - vol_curr)/vol_goal < volume_error:
break
rprime = rprime + 1.0*(vol_goal - vol_curr) / (4*np.pi*rprime**2)
if np.abs(rprime - radius)/radius > max_radius_change:
break
dr = inner(rvec, pos, rprime, zscale=zscale)
t = function(dr, rprime, *args)
return t |
def _tile(self, n):
"""Get the update tile surrounding particle `n` """
pos = self._trans(self.pos[n])
return Tile(pos, pos).pad(self.support_pad) |
def _p2i(self, param):
"""
Parameter to indices, returns (coord, index), e.g. for a pos
pos : ('x', 100)
"""
g = param.split('-')
if len(g) == 3:
return g[2], int(g[1])
else:
raise ValueError('`param` passed as incorrect format') |
def initialize(self):
"""Start from scratch and initialize all objects / draw self.particles"""
self.particles = np.zeros(self.shape.shape, dtype=self.float_precision)
for p0, arg0 in zip(self.pos, self._drawargs()):
self._draw_particle(p0, *listify(arg0)) |
def _vps(self, inds):
"""Clips a list of inds to be on [0, self.N]"""
return [j for j in inds if j >= 0 and j < self.N] |
def _i2p(self, ind, coord):
""" Translate index info to parameter name """
return '-'.join([self.param_prefix, str(ind), coord]) |
def get_update_tile(self, params, values):
""" Get the amount of support size required for a particular update."""
doglobal, particles = self._update_type(params)
if doglobal:
return self.shape.copy()
# 1) store the current parameters of interest
values0 = self.get_values(params)
# 2) calculate the current tileset
tiles0 = [self._tile(n) for n in particles]
# 3) update to newer parameters and calculate tileset
self.set_values(params, values)
tiles1 = [self._tile(n) for n in particles]
# 4) revert parameters & return union of all tiles
self.set_values(params, values0)
return Tile.boundingtile(tiles0 + tiles1) |
def update(self, params, values):
"""
Update the particles field given new parameter values
"""
#1. Figure out if we're going to do a global update, in which
# case we just draw from scratch.
global_update, particles = self._update_type(params)
# if we are doing a global update, everything must change, so
# starting fresh will be faster instead of add subtract
if global_update:
self.set_values(params, values)
self.initialize()
return
# otherwise, update individual particles. delete the current versions
# of the particles update the particles, and redraw them anew at the
# places given by (params, values)
oldargs = self._drawargs()
for n in particles:
self._draw_particle(self.pos[n], *listify(oldargs[n]), sign=-1)
self.set_values(params, values)
newargs = self._drawargs()
for n in particles:
self._draw_particle(self.pos[n], *listify(newargs[n]), sign=+1) |
def param_particle(self, ind):
""" Get position and radius of one or more particles """
ind = self._vps(listify(ind))
return [self._i2p(i, j) for i in ind for j in ['z', 'y', 'x', 'a']] |
def param_particle_pos(self, ind):
""" Get position of one or more particles """
ind = self._vps(listify(ind))
return [self._i2p(i, j) for i in ind for j in ['z', 'y', 'x']] |
def param_particle_rad(self, ind):
""" Get radius of one or more particles """
ind = self._vps(listify(ind))
return [self._i2p(i, 'a') for i in ind] |
def add_particle(self, pos, rad):
"""
Add a particle or list of particles given by a list of positions and
radii, both need to be array-like.
Parameters
----------
pos : array-like [N, 3]
Positions of all new particles
rad : array-like [N]
Corresponding radii of new particles
Returns
-------
inds : N-element numpy.ndarray.
Indices of the added particles.
"""
rad = listify(rad)
# add some zero mass particles to the list (same as not having these
# particles in the image, which is true at this moment)
inds = np.arange(self.N, self.N+len(rad))
self.pos = np.vstack([self.pos, pos])
self.rad = np.hstack([self.rad, np.zeros(len(rad))])
# update the parameters globally
self.setup_variables()
self.trigger_parameter_change()
# now request a drawing of the particle plz
params = self.param_particle_rad(inds)
self.trigger_update(params, rad)
return inds |
def remove_particle(self, inds):
"""
Remove the particle at index `inds`, may be a list.
Returns [3,N], [N] element numpy.ndarray of pos, rad.
"""
if self.rad.shape[0] == 0:
return
inds = listify(inds)
# Here's the game plan:
# 1. get all positions and sizes of particles that we will be
# removing (to return to user)
# 2. redraw those particles to 0.0 radius
# 3. remove the particles and trigger changes
# However, there is an issue -- if there are two particles at opposite
# ends of the image, it will be significantly slower than usual
pos = self.pos[inds].copy()
rad = self.rad[inds].copy()
self.trigger_update(self.param_particle_rad(inds), np.zeros(len(inds)))
self.pos = np.delete(self.pos, inds, axis=0)
self.rad = np.delete(self.rad, inds, axis=0)
# update the parameters globally
self.setup_variables()
self.trigger_parameter_change()
return np.array(pos).reshape(-1,3), np.array(rad).reshape(-1) |
def _update_type(self, params):
""" Returns dozscale and particle list of update """
dozscale = False
particles = []
for p in listify(params):
typ, ind = self._p2i(p)
particles.append(ind)
dozscale = dozscale or typ == 'zscale'
particles = set(particles)
return dozscale, particles |
def _tile(self, n):
""" Get the tile surrounding particle `n` """
zsc = np.array([1.0/self.zscale, 1, 1])
pos, rad = self.pos[n], self.rad[n]
pos = self._trans(pos)
return Tile(pos - zsc*rad, pos + zsc*rad).pad(self.support_pad) |
def update(self, params, values):
"""Calls an update, but clips radii to be > 0"""
# radparams = self.param_radii()
params = listify(params)
values = listify(values)
for i, p in enumerate(params):
# if (p in radparams) & (values[i] < 0):
if (p[-2:] == '-a') and (values[i] < 0):
values[i] = 0.0
super(PlatonicSpheresCollection, self).update(params, values) |
def rmatrix(self):
"""
Generate the composite rotation matrix that rotates the slab normal.
The rotation is a rotation about the x-axis, followed by a rotation
about the z-axis.
"""
t = self.param_dict[self.lbl_theta]
r0 = np.array([ [np.cos(t), -np.sin(t), 0],
[np.sin(t), np.cos(t), 0],
[0, 0, 1]])
p = self.param_dict[self.lbl_phi]
r1 = np.array([ [np.cos(p), 0, np.sin(p)],
[0, 1, 0],
[-np.sin(p), 0, np.cos(p)]])
return np.dot(r1, r0) |
def j2(x):
""" A fast j2 defined in terms of other special functions """
to_return = 2./(x+1e-15)*j1(x) - j0(x)
to_return[x==0] = 0
return to_return |
def calc_pts_hg(npts=20):
"""Returns Hermite-Gauss quadrature points for even functions"""
pts_hg, wts_hg = np.polynomial.hermite.hermgauss(npts*2)
pts_hg = pts_hg[npts:]
wts_hg = wts_hg[npts:] * np.exp(pts_hg*pts_hg)
return pts_hg, wts_hg |
def calc_pts_lag(npts=20):
"""
Returns Gauss-Laguerre quadrature points rescaled for line scan integration
Parameters
----------
npts : {15, 20, 25}, optional
The number of points to
Notes
-----
The scale is set internally as the best rescaling for a line scan
integral; it was checked numerically for the allowed npts.
Acceptable pts/scls/approximate line integral scan error:
(pts, scl ) : ERR
------------------------------------
(15, 0.072144) : 0.002193
(20, 0.051532) : 0.001498
(25, 0.043266) : 0.001209
The previous HG(20) error was ~0.13ish
"""
scl = { 15:0.072144,
20:0.051532,
25:0.043266}[npts]
pts0, wts0 = np.polynomial.laguerre.laggauss(npts)
pts = np.sinh(pts0*scl)
wts = scl*wts0*np.cosh(pts0*scl)*np.exp(pts0)
return pts, wts |
def f_theta(cos_theta, zint, z, n2n1=0.95, sph6_ab=None, **kwargs):
"""
Returns the wavefront aberration for an aberrated, defocused lens.
Calculates the portions of the wavefront distortion due to z, theta
only, for a lens with defocus and spherical aberration induced by
coverslip mismatch. (The rho portion can be analytically integrated
to Bessels.)
Parameters
----------
cos_theta : numpy.ndarray.
The N values of cos(theta) at which to compute f_theta.
zint : Float
The position of the lens relative to the interface.
z : numpy.ndarray
The M z-values to compute f_theta at. `z.size` is unrelated
to `cos_theta.size`
n2n1: Float, optional
The ratio of the index of the immersed medium to the optics.
Default is 0.95
sph6_ab : Float or None, optional
Set sph6_ab to a nonzero value to add residual 6th-order
spherical aberration that is proportional to sph6_ab. Default
is None (i.e. doesn't calculate).
Returns
-------
wvfront : numpy.ndarray
The aberrated wavefront, as a function of theta and z.
Shape is [z.size, cos_theta.size]
"""
wvfront = (np.outer(np.ones_like(z)*zint, cos_theta) -
np.outer(zint+z, csqrt(n2n1**2-1+cos_theta**2)))
if (sph6_ab is not None) and (not np.isnan(sph6_ab)):
sec2_theta = 1.0/(cos_theta*cos_theta)
wvfront += sph6_ab * (sec2_theta-1)*(sec2_theta-2)*cos_theta
#Ensuring evanescent waves are always suppressed:
if wvfront.dtype == np.dtype('complex128'):
wvfront.imag = -np.abs(wvfront.imag)
return wvfront |
def get_Kprefactor(z, cos_theta, zint=100.0, n2n1=0.95, get_hdet=False,
**kwargs):
"""
Returns a prefactor in the electric field integral.
This is an internal function called by get_K. The returned prefactor
in the integrand is independent of which integral is being called;
it is a combination of the exp(1j*phase) and apodization.
Parameters
----------
z : numpy.ndarray
The values of z (distance along optical axis) at which to
calculate the prefactor. Size is unrelated to the size of
`cos_theta`
cos_theta : numpy.ndarray
The values of cos(theta) (i.e. position on the incoming
focal spherical wavefront) at which to calculate the
prefactor. Size is unrelated to the size of `z`
zint : Float, optional
The position of the optical interface, in units of 1/k.
Default is 100.
n2n1 : Float, optional
The ratio of the index mismatch between the optics (n1) and
the sample (n2). Default is 0.95
get_hdet : Bool, optional
Set to True to calculate the detection prefactor vs the
illumination prefactor (i.e. False to include apodization).
Default is False
Returns
-------
numpy.ndarray
The prefactor, of size [`z.size`, `cos_theta.size`], sampled
at the values [`z`, `cos_theta`]
"""
phase = f_theta(cos_theta, zint, z, n2n1=n2n1, **kwargs)
to_return = np.exp(-1j*phase)
if not get_hdet:
to_return *= np.outer(np.ones_like(z),np.sqrt(cos_theta))
return to_return |
def get_K(rho, z, alpha=1.0, zint=100.0, n2n1=0.95, get_hdet=False, K=1,
Kprefactor=None, return_Kprefactor=False, npts=20, **kwargs):
"""
Calculates one of three electric field integrals.
Internal function for calculating point spread functions. Returns
one of three electric field integrals that describe the electric
field near the focus of a lens; these integrals appear in Hell's psf
calculation.
Parameters
----------
rho : numpy.ndarray
Rho in cylindrical coordinates, in units of 1/k.
z : numpy.ndarray
Z in cylindrical coordinates, in units of 1/k. `rho` and
`z` must be the same shape
alpha : Float, optional
The acceptance angle of the lens, on (0,pi/2). Default is 1.
zint : Float, optional
The distance of the len's unaberrated focal point from the
optical interface, in units of 1/k. Default is 100.
n2n1 : Float, optional
The ratio n2/n1 of the index mismatch between the sample
(index n2) and the optical train (index n1). Must be on
[0,inf) but should be near 1. Default is 0.95
get_hdet : Bool, optional
Set to True to get the detection portion of the psf; False
to get the illumination portion of the psf. Default is True
K : {1, 2, 3}, optional
Which of the 3 integrals to evaluate. Default is 1
Kprefactor : numpy.ndarray or None
This array is calculated internally and optionally returned;
pass it back to avoid recalculation and increase speed. Default
is None, i.e. calculate it internally.
return_Kprefactor : Bool, optional
Set to True to also return the Kprefactor (parameter above)
to speed up the calculation for the next values of K. Default
is False
npts : Int, optional
The number of points to use for Gauss-Legendre quadrature of
the integral. Default is 20, which is a good number for x,y,z
less than 100 or so.
Returns
-------
kint : numpy.ndarray
The integral K_i; rho.shape numpy.array
[, Kprefactor] : numpy.ndarray
The prefactor that is independent of which integral is being
calculated but does depend on the parameters; can be passed
back to the function for speed.
Notes
-----
npts=20 gives double precision (no difference between 20, 30, and
doing all the integrals with scipy.quad). The integrals are only
over the acceptance angle of the lens, so for moderate x,y,z they
don't vary too rapidly. For x,y,z, zint large compared to 100, a
higher npts might be necessary.
"""
# Comments:
# This is the only function that relies on rho,z being numpy.arrays,
# and it's just in a flag that I've added.... move to psf?
if type(rho) != np.ndarray or type(z) != np.ndarray or (rho.shape != z.shape):
raise ValueError('rho and z must be np.arrays of same shape.')
pts, wts = np.polynomial.legendre.leggauss(npts)
n1n2 = 1.0/n2n1
rr = np.ravel(rho)
zr = np.ravel(z)
#Getting the array of points to quad at
cos_theta = 0.5*(1-np.cos(alpha))*pts+0.5*(1+np.cos(alpha))
#[cos_theta,rho,z]
if Kprefactor is None:
Kprefactor = get_Kprefactor(z, cos_theta, zint=zint, \
n2n1=n2n1,get_hdet=get_hdet, **kwargs)
if K==1:
part_1 = j0(np.outer(rr,np.sqrt(1-cos_theta**2)))*\
np.outer(np.ones_like(rr), 0.5*(get_taus(cos_theta,n2n1=n2n1)+\
get_taup(cos_theta,n2n1=n2n1)*csqrt(1-n1n2**2*(1-cos_theta**2))))
integrand = Kprefactor * part_1
elif K==2:
part_2=j2(np.outer(rr,np.sqrt(1-cos_theta**2)))*\
np.outer(np.ones_like(rr),0.5*(get_taus(cos_theta,n2n1=n2n1)-\
get_taup(cos_theta,n2n1=n2n1)*csqrt(1-n1n2**2*(1-cos_theta**2))))
integrand = Kprefactor * part_2
elif K==3:
part_3=j1(np.outer(rho,np.sqrt(1-cos_theta**2)))*\
np.outer(np.ones_like(rr), n1n2*get_taup(cos_theta,n2n1=n2n1)*\
np.sqrt(1-cos_theta**2))
integrand = Kprefactor * part_3
else:
raise ValueError('K=1,2,3 only...')
big_wts=np.outer(np.ones_like(rr), wts)
kint = (big_wts*integrand).sum(axis=1) * 0.5*(1-np.cos(alpha))
if return_Kprefactor:
return kint.reshape(rho.shape), Kprefactor
else:
return kint.reshape(rho.shape) |
def get_hsym_asym(rho, z, get_hdet=False, include_K3_det=True, **kwargs):
"""
Calculates the symmetric and asymmetric portions of a confocal PSF.
Parameters
----------
rho : numpy.ndarray
Rho in cylindrical coordinates, in units of 1/k.
z : numpy.ndarray
Z in cylindrical coordinates, in units of 1/k. Must be the
same shape as `rho`
get_hdet : Bool, optional
Set to True to get the detection portion of the psf; False
to get the illumination portion of the psf. Default is True
include_K3_det : Bool, optional.
Flag to not calculate the `K3' component for the detection
PSF, corresponding to (I think) a low-aperature focusing
lens and no z-polarization of the focused light. Default
is True, i.e. calculates the K3 component as if the focusing
lens is high-aperture
Other Parameters
----------------
alpha : Float, optional
The acceptance angle of the lens, on (0,pi/2). Default is 1.
zint : Float, optional
The distance of the len's unaberrated focal point from the
optical interface, in units of 1/k. Default is 100.
n2n1 : Float, optional
The ratio n2/n1 of the index mismatch between the sample
(index n2) and the optical train (index n1). Must be on
[0,inf) but should be near 1. Default is 0.95
Returns
-------
hsym : numpy.ndarray
`rho`.shape numpy.array of the symmetric portion of the PSF
hasym : numpy.ndarray
`rho`.shape numpy.array of the asymmetric portion of the PSF
"""
K1, Kprefactor = get_K(rho, z, K=1, get_hdet=get_hdet, Kprefactor=None,
return_Kprefactor=True, **kwargs)
K2 = get_K(rho, z, K=2, get_hdet=get_hdet, Kprefactor=Kprefactor,
return_Kprefactor=False, **kwargs)
if get_hdet and not include_K3_det:
K3 = 0*K1
else:
K3 = get_K(rho, z, K=3, get_hdet=get_hdet, Kprefactor=Kprefactor,
return_Kprefactor=False, **kwargs)
hsym = K1*K1.conj() + K2*K2.conj() + 0.5*(K3*K3.conj())
hasym= K1*K2.conj() + K2*K1.conj() + 0.5*(K3*K3.conj())
return hsym.real, hasym.real |
def calculate_pinhole_psf(x, y, z, kfki=0.89, zint=100.0, normalize=False,
**kwargs):
"""
Calculates the perfect-pinhole PSF, for a set of points (x,y,z).
Parameters
-----------
x : numpy.ndarray
The x-coordinate of the PSF in units of 1/ the wavevector of
the incoming light.
y : numpy.ndarray
The y-coordinate.
z : numpy.ndarray
The z-coordinate.
kfki : Float
The (scalar) ratio of wavevectors of the outgoing light to the
incoming light. Default is 0.89.
zint : Float
The (scalar) distance from the interface, in units of
1/k_incoming. Default is 100.0
normalize : Bool
Set to True to normalize the psf correctly, accounting for
intensity variations with depth. This will give a psf that does
not sum to 1.
Other Parameters
----------------
alpha : Float
The opening angle of the lens. Default is 1.
n2n1 : Float
The ratio of the index in the 2nd medium to that in the first.
Default is 0.95
Returns
-------
psf : numpy.ndarray, of shape x.shape
Comments
--------
(1) The PSF is not necessarily centered on the z=0 pixel, since the
calculation includes the shift.
(2) If you want z-varying illumination of the psf then set
normalize=True. This does the normalization by doing:
hsym, hasym /= hsym.sum()
hdet /= hdet.sum()
and then calculating the psf that way. So if you want the
intensity to be correct you need to use a large-ish array of
roughly equally spaced points. Or do it manually by calling
get_hsym_asym()
"""
rho = np.sqrt(x**2 + y**2)
phi = np.arctan2(y, x)
hsym, hasym = get_hsym_asym(rho, z, zint=zint, get_hdet=False, **kwargs)
hdet, toss = get_hsym_asym(rho*kfki, z*kfki, zint=kfki*zint, get_hdet=True, **kwargs)
if normalize:
hasym /= hsym.sum()
hsym /= hsym.sum()
hdet /= hdet.sum()
return (hsym + np.cos(2*phi)*hasym)*hdet |
def get_polydisp_pts_wts(kfki, sigkf, dist_type='gaussian', nkpts=3):
"""
Calculates a set of Gauss quadrature points & weights for polydisperse
light.
Returns a list of points and weights of the final wavevector's distri-
bution, in units of the initial wavevector.
Parameters
----------
kfki : Float
The mean of the polydisperse outgoing wavevectors.
sigkf : Float
The standard dev. of the polydisperse outgoing wavevectors.
dist_type : {`gaussian`, `gamma`}, optional
The distribution, gaussian or gamma, of the wavevectors.
Default is `gaussian`
nkpts : Int, optional
The number of quadrature points to use. Default is 3
Returns
-------
kfkipts : numpy.ndarray
The Gauss quadrature points at which to calculate kfki.
wts : numpy.ndarray
The associated Gauss quadrature weights.
"""
if dist_type.lower() == 'gaussian':
pts, wts = np.polynomial.hermite.hermgauss(nkpts)
kfkipts = np.abs(kfki + sigkf*np.sqrt(2)*pts)
elif dist_type.lower() == 'laguerre' or dist_type.lower() == 'gamma':
k_scale = sigkf**2/kfki
associated_order = kfki**2/sigkf**2 - 1
#Associated Laguerre with alpha >~170 becomes numerically unstable, so:
max_order=150
if associated_order > max_order or associated_order < (-1+1e-3):
warnings.warn('Numerically unstable sigk, clipping', RuntimeWarning)
associated_order = np.clip(associated_order, -1+1e-3, max_order)
kfkipts, wts = la_roots(nkpts, associated_order)
kfkipts *= k_scale
else:
raise ValueError('dist_type must be either gaussian or laguerre')
return kfkipts, wts/wts.sum() |
def calculate_polychrome_pinhole_psf(x, y, z, normalize=False, kfki=0.889,
sigkf=0.1, zint=100., nkpts=3, dist_type='gaussian', **kwargs):
"""
Calculates the perfect-pinhole PSF, for a set of points (x,y,z).
Parameters
-----------
x : numpy.ndarray
The x-coordinate of the PSF in units of 1/ the wavevector of
the incoming light.
y : numpy.ndarray
The y-coordinate.
z : numpy.ndarray
The z-coordinate.
kfki : Float
The mean ratio of the outgoing light's wavevector to the incoming
light's. Default is 0.89.
sigkf : Float
Standard deviation of kfki; the distribution of the light values
will be approximately kfki +- sigkf.
zint : Float
The (scalar) distance from the interface, in units of
1/k_incoming. Default is 100.0
dist_type: The distribution type of the polychromatic light.
Can be one of 'laguerre'/'gamma' or 'gaussian.' If 'gaussian'
the resulting k-values are taken in absolute value. Default
is 'gaussian.'
normalize : Bool
Set to True to normalize the psf correctly, accounting for
intensity variations with depth. This will give a psf that does
not sum to 1. Default is False.
Other Parameters
----------------
alpha : Float
The opening angle of the lens. Default is 1.
n2n1 : Float
The ratio of the index in the 2nd medium to that in the first.
Default is 0.95
Returns
-------
psf : numpy.ndarray, of shape x.shape
Comments
--------
(1) The PSF is not necessarily centered on the z=0 pixel, since the
calculation includes the shift.
(2) If you want z-varying illumination of the psf then set
normalize=True. This does the normalization by doing:
hsym, hasym /= hsym.sum()
hdet /= hdet.sum()
and then calculating the psf that way. So if you want the
intensity to be correct you need to use a large-ish array of
roughly equally spaced points. Or do it manually by calling
get_hsym_asym()
"""
#0. Setup
kfkipts, wts = get_polydisp_pts_wts(kfki, sigkf, dist_type=dist_type,
nkpts=nkpts)
rho = np.sqrt(x**2 + y**2)
phi = np.arctan2(y, x)
#1. Hilm
hsym, hasym = get_hsym_asym(rho, z, zint=zint, get_hdet=False, **kwargs)
hilm = (hsym + np.cos(2*phi)*hasym)
#2. Hdet
hdet_func = lambda kfki: get_hsym_asym(rho*kfki, z*kfki,
zint=kfki*zint, get_hdet=True, **kwargs)[0]
inner = [wts[a] * hdet_func(kfkipts[a]) for a in range(nkpts)]
hdet = np.sum(inner, axis=0)
#3. Normalize and return
if normalize:
hilm /= hilm.sum()
hdet /= hdet.sum()
psf = hdet * hilm
return psf if normalize else psf / psf.sum() |
def get_psf_scalar(x, y, z, kfki=1., zint=100.0, normalize=False, **kwargs):
"""
Calculates a scalar (non-vectorial light) approximation to a confocal PSF
The calculation is approximate, since it ignores the effects of
polarization and apodization, but should be ~3x faster.
Parameters
----------
x : numpy.ndarray
The x-coordinate of the PSF in units of 1/ the wavevector
of the incoming light.
y : numpy.ndarray
The y-coordinate of the PSF in units of 1/ the wavevector
of the incoming light. Must be the same shape as `x`.
z : numpy.ndarray
The z-coordinate of the PSF in units of 1/ the wavevector
of the incoming light. Must be the same shape as `x`.
kfki : Float, optional
The ratio of wavevectors of the outgoing light to the
incoming light. Set to 1.0 to speed up the calculation
by another factor of 2. Default is 1.0
zint : Float, optional
The distance from to the optical interface, in units of
1/k_incoming. Default is 100.
normalize : Bool
Set to True to normalize the psf correctly, accounting for
intensity variations with depth. This will give a psf that does
not sum to 1. Default is False.
alpha : Float
The opening angle of the lens. Default is 1.
n2n1 : Float
The ratio of the index in the 2nd medium to that in the first.
Default is 0.95
Outputs:
- psf: x.shape numpy.array.
Comments:
(1) Note that the PSF is not necessarily centered on the z=0 pixel,
since the calculation includes the shift.
(2) If you want z-varying illumination of the psf then set
normalize=True. This does the normalization by doing:
hsym, hasym /= hsym.sum()
hdet /= hdet.sum()
and then calculating the psf that way. So if you want the
intensity to be correct you need to use a large-ish array of
roughly equally spaced points. Or do it manually by calling
get_hsym_asym()
"""
rho = np.sqrt(x**2 + y**2)
phi = np.arctan2(y, x)
K1 = get_K(rho, z, K=1,zint=zint,get_hdet=True, **kwargs)
hilm = np.real( K1*K1.conj() )
if np.abs(kfki - 1.0) > 1e-13:
Kdet = get_K(rho*kfki, z*kfki, K=1, zint=zint*kfki, get_hdet=True,
**kwargs)
hdet = np.real( Kdet*Kdet.conj() )
else:
hdet = hilm.copy()
if normalize:
hilm /= hsym.sum()
hdet /= hdet.sum()
psf = hilm * hdet
else:
psf = hilm * hdet
# psf /= psf.sum()
return psf |
def calculate_linescan_ilm_psf(y,z, polar_angle=0., nlpts=1,
pinhole_width=1, use_laggauss=False, **kwargs):
"""
Calculates the illumination PSF for a line-scanning confocal with the
confocal line oriented along the x direction.
Parameters
----------
y : numpy.ndarray
The y points (in-plane, perpendicular to the line direction)
at which to evaluate the illumination PSF, in units of 1/k.
Arbitrary shape.
z : numpy.ndarray
The z points (optical axis) at which to evaluate the illum-
ination PSF, in units of 1/k. Must be the same shape as `y`
polar_angle : Float, optional
The angle of the illuminating light's polarization with
respect to the line's orientation along x. Default is 0.
pinhole_width : Float, optional
The width of the geometric image of the line projected onto
the sample, in units of 1/k. Default is 1. The perfect line
image is assumed to be a Gaussian. If `nlpts` is set to 1,
the line will always be of zero width.
nlpts : Int, optional
The number of points to use for Hermite-gauss quadrature over
the line's width. Default is 1, corresponding to a zero-width
line.
use_laggauss : Bool, optional
Set to True to use a more-accurate sinh'd Laguerre-Gauss
quadrature for integration over the line's length (more accurate
in the same amount of time). Default is False for backwards
compatibility. FIXME what did we do here?
Other Parameters
----------------
alpha : Float, optional
The acceptance angle of the lens, on (0,pi/2). Default is 1.
zint : Float, optional
The distance of the len's unaberrated focal point from the
optical interface, in units of 1/k. Default is 100.
n2n1 : Float, optional
The ratio n2/n1 of the index mismatch between the sample
(index n2) and the optical train (index n1). Must be on
[0,inf) but should be near 1. Default is 0.95
Returns
-------
hilm : numpy.ndarray
The line illumination, of the same shape as y and z.
"""
if use_laggauss:
x_vals, wts = calc_pts_lag()
else:
x_vals, wts = calc_pts_hg()
#I'm assuming that y,z are already some sort of meshgrid
xg, yg, zg = [np.zeros( list(y.shape) + [x_vals.size] ) for a in range(3)]
hilm = np.zeros(xg.shape)
for a in range(x_vals.size):
xg[...,a] = x_vals[a]
yg[...,a] = y.copy()
zg[...,a] = z.copy()
y_pinhole, wts_pinhole = np.polynomial.hermite.hermgauss(nlpts)
y_pinhole *= np.sqrt(2)*pinhole_width
wts_pinhole /= np.sqrt(np.pi)
#Pinhole hermgauss first:
for yp, wp in zip(y_pinhole, wts_pinhole):
rho = np.sqrt(xg*xg + (yg-yp)*(yg-yp))
phi = np.arctan2(yg,xg)
hsym, hasym = get_hsym_asym(rho,zg,get_hdet = False, **kwargs)
hilm += wp*(hsym + np.cos(2*(phi-polar_angle))*hasym)
#Now line hermgauss
for a in range(x_vals.size):
hilm[...,a] *= wts[a]
return hilm.sum(axis=-1)*2. |
def calculate_linescan_psf(x, y, z, normalize=False, kfki=0.889, zint=100.,
polar_angle=0., wrap=True, **kwargs):
"""
Calculates the point spread function of a line-scanning confocal.
Make x,y,z __1D__ numpy.arrays, with x the direction along the
scan line. (to make the calculation faster since I dont' need the line
ilm for each x).
Parameters
----------
x : numpy.ndarray
_One_dimensional_ array of the x grid points (along the line
illumination) at which to evaluate the psf. In units of
1/k_incoming.
y : numpy.ndarray
_One_dimensional_ array of the y grid points (in plane,
perpendicular to the line illumination) at which to evaluate
the psf. In units of 1/k_incoming.
z : numpy.ndarray
_One_dimensional_ array of the z grid points (along the
optical axis) at which to evaluate the psf. In units of
1/k_incoming.
normalize : Bool, optional
Set to True to include the effects of PSF normalization on
the image intensity. Default is False.
kfki : Float, optional
The ratio of the final light's wavevector to the incoming.
Default is 0.889
zint : Float, optional
The position of the optical interface, in units of 1/k_incoming
Default is 100.
wrap : Bool, optional
If True, wraps the psf calculation for speed, assuming that
the input x, y are regularly-spaced points. If x,y are not
regularly spaced then `wrap` must be set to False. Default is True.
polar_angle : Float, optional
The polarization angle of the light (radians) with respect to
the line direction (x). Default is 0.
Other Parameters
----------------
alpha : Float
The opening angle of the lens. Default is 1.
n2n1 : Float
The ratio of the index in the 2nd medium to that in the first.
Default is 0.95
Returns
-------
numpy.ndarray
A 3D- numpy.array of the point-spread function. Indexing is
psf[x,y,z]; shape is [x.size, y,size, z.size]
"""
#0. Set up vecs
if wrap:
xpts = vec_to_halfvec(x)
ypts = vec_to_halfvec(y)
x3, y3, z3 = np.meshgrid(xpts, ypts, z, indexing='ij')
else:
x3,y3,z3 = np.meshgrid(x, y, z, indexing='ij')
rho3 = np.sqrt(x3*x3 + y3*y3)
#1. Hilm
if wrap:
y2,z2 = np.meshgrid(ypts, z, indexing='ij')
hilm0 = calculate_linescan_ilm_psf(y2, z2, zint=zint,
polar_angle=polar_angle, **kwargs)
if ypts[0] == 0:
hilm = np.append(hilm0[-1:0:-1], hilm0, axis=0)
else:
hilm = np.append(hilm0[::-1], hilm0, axis=0)
else:
y2,z2 = np.meshgrid(y, z, indexing='ij')
hilm = calculate_linescan_ilm_psf(y2, z2, zint=zint,
polar_angle=polar_angle, **kwargs)
#2. Hdet
if wrap:
#Lambda function that ignores its args but still returns correct values
func = lambda *args: get_hsym_asym(rho3*kfki, z3*kfki, zint=kfki*zint,
get_hdet=True, **kwargs)[0]
hdet = wrap_and_calc_psf(xpts, ypts, z, func)
else:
hdet, toss = get_hsym_asym(rho3*kfki, z3*kfki, zint=kfki*zint,
get_hdet=True, **kwargs)
if normalize:
hilm /= hilm.sum()
hdet /= hdet.sum()
for a in range(x.size):
hdet[a] *= hilm
return hdet if normalize else hdet / hdet.sum() |
def calculate_polychrome_linescan_psf(x, y, z, normalize=False, kfki=0.889,
sigkf=0.1, zint=100., nkpts=3, dist_type='gaussian', wrap=True,
**kwargs):
"""
Calculates the point spread function of a line-scanning confocal with
polydisperse dye emission.
Make x,y,z __1D__ numpy.arrays, with x the direction along the
scan line. (to make the calculation faster since I dont' need the line
ilm for each x).
Parameters
----------
x : numpy.ndarray
_One_dimensional_ array of the x grid points (along the line
illumination) at which to evaluate the psf. In units of
1/k_incoming.
y : numpy.ndarray
_One_dimensional_ array of the y grid points (in plane,
perpendicular to the line illumination) at which to evaluate
the psf. In units of 1/k_incoming.
z : numpy.ndarray
_One_dimensional_ array of the z grid points (along the
optical axis) at which to evaluate the psf. In units of
1/k_incoming.
normalize : Bool, optional
Set to True to include the effects of PSF normalization on
the image intensity. Default is False.
kfki : Float, optional
The mean of the ratio of the final light's wavevector to the
incoming. Default is 0.889
sigkf : Float, optional
The standard deviation of the ratio of the final light's
wavevector to the incoming. Default is 0.1
zint : Float, optional
The position of the optical interface, in units of 1/k_incoming
Default is 100.
dist_type : {`gaussian`, `gamma`}, optional
The distribution of the outgoing light. If 'gaussian' the
resulting k-values are taken in absolute value. Default
is `gaussian`
wrap : Bool, optional
If True, wraps the psf calculation for speed, assuming that
the input x, y are regularly-spaced points. If x,y are not
regularly spaced then `wrap` must be set to False. Default is True.
Other Parameters
----------------
polar_angle : Float, optional
The polarization angle of the light (radians) with respect to
the line direction (x). Default is 0.
alpha : Float
The opening angle of the lens. Default is 1.
n2n1 : Float
The ratio of the index in the 2nd medium to that in the first.
Default is 0.95
Returns
-------
numpy.ndarray
A 3D- numpy.array of the point-spread function. Indexing is
psf[x,y,z]; shape is [x.size, y,size, z.size]
Notes
-----
Neither distribution type is perfect. If sigkf/k0 is big (>0.5ish)
then part of the Gaussian is negative. To avoid issues an abs() is
taken, but then the actual mean and variance are not what is
supplied. Conversely, if sigkf/k0 is small (<0.0815), then the
requisite associated Laguerre quadrature becomes unstable. To
prevent this sigkf/k0 is effectively clipped to be > 0.0815.
"""
kfkipts, wts = get_polydisp_pts_wts(kfki, sigkf, dist_type=dist_type,
nkpts=nkpts)
#0. Set up vecs
if wrap:
xpts = vec_to_halfvec(x)
ypts = vec_to_halfvec(y)
x3, y3, z3 = np.meshgrid(xpts, ypts, z, indexing='ij')
else:
x3,y3,z3 = np.meshgrid(x, y, z, indexing='ij')
rho3 = np.sqrt(x3*x3 + y3*y3)
#1. Hilm
if wrap:
y2,z2 = np.meshgrid(ypts, z, indexing='ij')
hilm0 = calculate_linescan_ilm_psf(y2, z2, zint=zint, **kwargs)
if ypts[0] == 0:
hilm = np.append(hilm0[-1:0:-1], hilm0, axis=0)
else:
hilm = np.append(hilm0[::-1], hilm0, axis=0)
else:
y2,z2 = np.meshgrid(y, z, indexing='ij')
hilm = calculate_linescan_ilm_psf(y2, z2, zint=zint, **kwargs)
#2. Hdet
if wrap:
#Lambda function that ignores its args but still returns correct values
func = lambda x,y,z, kfki=1.: get_hsym_asym(rho3*kfki, z3*kfki,
zint=kfki*zint, get_hdet=True, **kwargs)[0]
hdet_func = lambda kfki: wrap_and_calc_psf(xpts,ypts,z, func, kfki=kfki)
else:
hdet_func = lambda kfki: get_hsym_asym(rho3*kfki, z3*kfki,
zint=kfki*zint, get_hdet=True, **kwargs)[0]
#####
inner = [wts[a] * hdet_func(kfkipts[a]) for a in range(nkpts)]
hdet = np.sum(inner, axis=0)
if normalize:
hilm /= hilm.sum()
hdet /= hdet.sum()
for a in range(x.size):
hdet[a] *= hilm
return hdet if normalize else hdet / hdet.sum() |
def wrap_and_calc_psf(xpts, ypts, zpts, func, **kwargs):
"""
Wraps a point-spread function in x and y.
Speeds up psf calculations by a factor of 4 for free / some broadcasting
by exploiting the x->-x, y->-y symmetry of a psf function. Pass x and y
as the positive (say) values of the coordinates at which to evaluate func,
and it will return the function sampled at [x[::-1]] + x. Note it is not
wrapped in z.
Parameters
----------
xpts : numpy.ndarray
1D N-element numpy.array of the x-points to evaluate func at.
ypts : numpy.ndarray
y-points to evaluate func at.
zpts : numpy.ndarray
z-points to evaluate func at.
func : function
The function to evaluate and wrap around. Syntax must be
func(x,y,z, **kwargs)
**kwargs : Any parameters passed to the function.
Outputs
-------
to_return : numpy.ndarray
The wrapped and calculated psf, of shape
[2*x.size - x0, 2*y.size - y0, z.size], where x0=1 if x[0]=0, etc.
Notes
-----
The coordinates should be something like numpy.arange(start, stop, diff),
with start near 0. If x[0]==0, all of x is calcualted but only x[1:]
is wrapped (i.e. it works whether or not x[0]=0).
This doesn't work directly for a linescan psf because the illumination
portion is not like a grid. However, the illumination and detection
are already combined with wrap_and_calc in calculate_linescan_psf etc.
"""
#1. Checking that everything is hunky-dory:
for t in [xpts,ypts,zpts]:
if len(t.shape) != 1:
raise ValueError('xpts,ypts,zpts must be 1D.')
dx = 1 if xpts[0]==0 else 0
dy = 1 if ypts[0]==0 else 0
xg,yg,zg = np.meshgrid(xpts,ypts,zpts, indexing='ij')
xs, ys, zs = [ pts.size for pts in [xpts,ypts,zpts] ]
to_return = np.zeros([2*xs-dx, 2*ys-dy, zs])
#2. Calculate:
up_corner_psf = func(xg,yg,zg, **kwargs)
to_return[xs-dx:,ys-dy:,:] = up_corner_psf.copy() #x>0, y>0
if dx == 0:
to_return[:xs-dx,ys-dy:,:] = up_corner_psf[::-1,:,:].copy() #x<0, y>0
else:
to_return[:xs-dx,ys-dy:,:] = up_corner_psf[-1:0:-1,:,:].copy() #x<0, y>0
if dy == 0:
to_return[xs-dx:,:ys-dy,:] = up_corner_psf[:,::-1,:].copy() #x>0, y<0
else:
to_return[xs-dx:,:ys-dy,:] = up_corner_psf[:,-1:0:-1,:].copy() #x>0, y<0
if (dx == 0) and (dy == 0):
to_return[:xs-dx,:ys-dy,:] = up_corner_psf[::-1,::-1,:].copy() #x<0,y<0
elif (dx == 0) and (dy != 0):
to_return[:xs-dx,:ys-dy,:] = up_corner_psf[::-1,-1:0:-1,:].copy() #x<0,y<0
elif (dy == 0) and (dx != 0):
to_return[:xs-dx,:ys-dy,:] = up_corner_psf[-1:0:-1,::-1,:].copy() #x<0,y<0
else: #dx==1 and dy==1
to_return[:xs-dx,:ys-dy,:] = up_corner_psf[-1:0:-1,-1:0:-1,:].copy()#x<0,y<0
return to_return |
def vec_to_halfvec(vec):
"""Transforms a vector np.arange(-N, M, dx) to np.arange(min(|vec|), max(N,M),dx)]"""
d = vec[1:] - vec[:-1]
if ((d/d.mean()).std() > 1e-14) or (d.mean() < 0):
raise ValueError('vec must be np.arange() in increasing order')
dx = d.mean()
lowest = np.abs(vec).min()
highest = np.abs(vec).max()
return np.arange(lowest, highest + 0.1*dx, dx).astype(vec.dtype) |
def listify(a):
"""
Convert a scalar ``a`` to a list and all iterables to list as well.
Examples
--------
>>> listify(0)
[0]
>>> listify([1,2,3])
[1, 2, 3]
>>> listify('a')
['a']
>>> listify(np.array([1,2,3]))
[1, 2, 3]
>>> listify('string')
['string']
"""
if a is None:
return []
elif not isinstance(a, (tuple, list, np.ndarray)):
return [a]
return list(a) |
def delistify(a, b=None):
"""
If a single element list, extract the element as an object, otherwise
leave as it is.
Examples
--------
>>> delistify('string')
'string'
>>> delistify(['string'])
'string'
>>> delistify(['string', 'other'])
['string', 'other']
>>> delistify(np.array([1.0]))
1.0
>>> delistify([1, 2, 3])
[1, 2, 3]
"""
if isinstance(b, (tuple, list, np.ndarray)):
if isinstance(a, (tuple, list, np.ndarray)):
return type(b)(a)
return type(b)([a])
else:
if isinstance(a, (tuple, list, np.ndarray)) and len(a) == 1:
return a[0]
return a
return a |
def aN(a, dim=3, dtype='int'):
"""
Convert an integer or iterable list to numpy array of length dim. This func
is used to allow other methods to take both scalars non-numpy arrays with
flexibility.
Parameters
----------
a : number, iterable, array-like
The object to convert to numpy array
dim : integer
The length of the resulting array
dtype : string or np.dtype
Type which the resulting array should be, e.g. 'float', np.int8
Returns
-------
arr : numpy array
Resulting numpy array of length ``dim`` and type ``dtype``
Examples
--------
>>> aN(1, dim=2, dtype='float')
array([ 1., 1.])
>>> aN(1, dtype='int')
array([1, 1, 1])
>>> aN(np.array([1,2,3]), dtype='float')
array([ 1., 2., 3.])
"""
if not hasattr(a, '__iter__'):
return np.array([a]*dim, dtype=dtype)
return np.array(a).astype(dtype) |
def cdd(d, k):
""" Conditionally delete key (or list of keys) 'k' from dict 'd' """
if not isinstance(k, list):
k = [k]
for i in k:
if i in d:
d.pop(i) |
def patch_docs(subclass, superclass):
"""
Apply the documentation from ``superclass`` to ``subclass`` by filling
in all overridden member function docstrings with those from the
parent class
"""
funcs0 = inspect.getmembers(subclass, predicate=inspect.ismethod)
funcs1 = inspect.getmembers(superclass, predicate=inspect.ismethod)
funcs1 = [f[0] for f in funcs1]
for name, func in funcs0:
if name.startswith('_'):
continue
if name not in funcs1:
continue
if func.__doc__ is None:
func = getattr(subclass, name)
func.__func__.__doc__ = getattr(superclass, name).__func__.__doc__ |
def indir(path):
"""
Context manager for switching the current path of the process. Can be used:
with indir('/tmp'):
<do something in tmp>
"""
cwd = os.getcwd()
try:
os.chdir(path)
yield
except Exception as e:
raise
finally:
os.chdir(cwd) |
def slicer(self):
"""
Array slicer object for this tile
>>> Tile((2,3)).slicer
(slice(0, 2, None), slice(0, 3, None))
>>> np.arange(10)[Tile((4,)).slicer]
array([0, 1, 2, 3])
"""
return tuple(np.s_[l:r] for l,r in zip(*self.bounds)) |
def oslicer(self, tile):
""" Opposite slicer, the outer part wrt to a field """
mask = None
vecs = tile.coords(form='meshed')
for v in vecs:
v[self.slicer] = -1
mask = mask & (v > 0) if mask is not None else (v>0)
return tuple(np.array(i).astype('int') for i in zip(*[v[mask] for v in vecs])) |
def kcenter(self):
""" Return the frequency center of the tile (says fftshift) """
return np.array([
np.abs(np.fft.fftshift(np.fft.fftfreq(q))).argmin()
for q in self.shape
]).astype('float') |
def corners(self):
"""
Iterate the vector of all corners of the hyperrectangles
>>> Tile(3, dim=2).corners
array([[0, 0],
[0, 3],
[3, 0],
[3, 3]])
"""
corners = []
for ind in itertools.product(*((0,1),)*self.dim):
ind = np.array(ind)
corners.append(self.l + ind*self.r)
return np.array(corners) |
def _format_vector(self, vecs, form='broadcast'):
"""
Format a 3d vector field in certain ways, see `coords` for a description
of each formatting method.
"""
if form == 'meshed':
return np.meshgrid(*vecs, indexing='ij')
elif form == 'vector':
vecs = np.meshgrid(*vecs, indexing='ij')
return np.rollaxis(np.array(np.broadcast_arrays(*vecs)),0,self.dim+1)
elif form == 'flat':
return vecs
else:
return [v[self._coord_slicers[i]] for i,v in enumerate(vecs)] |
def coords(self, norm=False, form='broadcast'):
"""
Returns the coordinate vectors associated with the tile.
Parameters
-----------
norm : boolean
can rescale the coordinates for you. False is no rescaling, True is
rescaling so that all coordinates are from 0 -> 1. If a scalar,
the same norm is applied uniformally while if an iterable, each
scale is applied to each dimension.
form : string
In what form to return the vector array. Can be one of:
'broadcast' -- return 1D arrays that are broadcasted to be 3D
'flat' -- return array without broadcasting so each component
is 1D and the appropriate length as the tile
'meshed' -- arrays are explicitly broadcasted and so all have
a 3D shape, each the size of the tile.
'vector' -- array is meshed and combined into one array with
the vector components along last dimension [Nz, Ny, Nx, 3]
Examples
--------
>>> Tile(3, dim=2).coords(form='meshed')[0]
array([[ 0., 0., 0.],
[ 1., 1., 1.],
[ 2., 2., 2.]])
>>> Tile(3, dim=2).coords(form='meshed')[1]
array([[ 0., 1., 2.],
[ 0., 1., 2.],
[ 0., 1., 2.]])
>>> Tile([4,5]).coords(form='vector').shape
(4, 5, 2)
>>> [i.shape for i in Tile((4,5), dim=2).coords(form='broadcast')]
[(4, 1), (1, 5)]
"""
if norm is False:
norm = 1
if norm is True:
norm = np.array(self.shape)
norm = aN(norm, self.dim, dtype='float')
v = list(np.arange(self.l[i], self.r[i]) / norm[i] for i in range(self.dim))
return self._format_vector(v, form=form) |
def kvectors(self, norm=False, form='broadcast', real=False, shift=False):
"""
Return the kvectors associated with this tile, given the standard form
of -0.5 to 0.5. `norm` and `form` arguments arethe same as that passed to
`Tile.coords`.
Parameters
-----------
real : boolean
whether to return kvectors associated with the real fft instead
"""
if norm is False:
norm = 1
if norm is True:
norm = np.array(self.shape)
norm = aN(norm, self.dim, dtype='float')
v = list(np.fft.fftfreq(self.shape[i])/norm[i] for i in range(self.dim))
if shift:
v = list(np.fft.fftshift(t) for t in v)
if real:
v[-1] = v[-1][:(self.shape[-1]+1)//2]
return self._format_vector(v, form=form) |
def contains(self, items, pad=0):
"""
Test whether coordinates are contained within this tile.
Parameters
----------
items : ndarray [3] or [N, 3]
N coordinates to check are within the bounds of the tile
pad : integer or ndarray [3]
anisotropic padding to apply in the contain test
Examples
--------
>>> Tile(5, dim=2).contains([[-1, 0], [2, 3], [2, 6]])
array([False, True, False], dtype=bool)
"""
o = ((items >= self.l-pad) & (items < self.r+pad))
if len(o.shape) == 2:
o = o.all(axis=-1)
elif len(o.shape) == 1:
o = o.all()
return o |
def intersection(tiles, *args):
"""
Intersection of tiles, returned as a tile
>>> Tile.intersection(Tile([0, 1], [5, 4]), Tile([1, 0], [4, 5]))
Tile [1, 1] -> [4, 4] ([3, 3])
"""
tiles = listify(tiles) + listify(args)
if len(tiles) < 2:
return tiles[0]
tile = tiles[0]
l, r = tile.l.copy(), tile.r.copy()
for tile in tiles[1:]:
l = amax(l, tile.l)
r = amin(r, tile.r)
return Tile(l, r, dtype=l.dtype) |
def translate(self, dr):
"""
Translate a tile by an amount dr
>>> Tile(5).translate(1)
Tile [1, 1, 1] -> [6, 6, 6] ([5, 5, 5])
"""
tile = self.copy()
tile.l += dr
tile.r += dr
return tile |
def pad(self, pad):
"""
Pad this tile by an equal amount on each side as specified by pad
>>> Tile(10).pad(2)
Tile [-2, -2, -2] -> [12, 12, 12] ([14, 14, 14])
>>> Tile(10).pad([1,2,3])
Tile [-1, -2, -3] -> [11, 12, 13] ([12, 14, 16])
"""
tile = self.copy()
tile.l -= pad
tile.r += pad
return tile |
def overhang(self, tile):
"""
Get the left and right absolute overflow -- the amount of box
overhanging `tile`, can be viewed as self \\ tile (set theory relative
complement, but in a bounding sense)
"""
ll = np.abs(amin(self.l - tile.l, aN(0, dim=self.dim)))
rr = np.abs(amax(self.r - tile.r, aN(0, dim=self.dim)))
return ll, rr |
def reflect_overhang(self, clip):
"""
Compute the overhang and reflect it internally so respect periodic
padding rules (see states._tile_from_particle_change). Returns both
the inner tile and the inner tile with necessary pad.
"""
orig = self.copy()
tile = self.copy()
hangl, hangr = tile.overhang(clip)
tile = tile.pad(hangl)
tile = tile.pad(hangr)
inner = Tile.intersection([clip, orig])
outer = Tile.intersection([clip, tile])
return inner, outer |
def filtered_image(self, im):
"""Returns a filtered image after applying the Fourier-space filters"""
q = np.fft.fftn(im)
for k,v in self.filters:
q[k] -= v
return np.real(np.fft.ifftn(q)) |
def set_filter(self, slices, values):
"""
Sets Fourier-space filters for the image. The image is filtered by
subtracting values from the image at slices.
Parameters
----------
slices : List of indices or slice objects.
The q-values in Fourier space to filter.
values : np.ndarray
The complete array of Fourier space peaks to subtract off. values
should be the same size as the FFT of the image; only the portions
of values at slices will be removed.
Examples
--------
To remove a two Fourier peaks in the data at q=(10, 10, 10) &
(245, 245, 245), where im is the residuals of a model:
* slices = [(10,10,10), (245, 245, 245)]
* values = np.fft.fftn(im)
* im.set_filter(slices, values)
"""
self.filters = [[sl,values[sl]] for sl in slices] |
def load_image(self):
""" Read the file and perform any transforms to get a loaded image """
try:
image = initializers.load_tiff(self.filename)
image = initializers.normalize(
image, invert=self.invert, scale=self.exposure,
dtype=self.float_precision
)
except IOError as e:
log.error("Could not find image '%s'" % self.filename)
raise e
return image |
def get_scale(self):
"""
If exposure was not set in the __init__, get the exposure associated
with this RawImage so that it may be used in other
:class:`~peri.util.RawImage`. This is useful for transferring exposure
parameters to a series of images.
Returns
-------
exposure : tuple of floats
The (emin, emax) which get mapped to (0, 1)
"""
if self.exposure is not None:
return self.exposure
raw = initializers.load_tiff(self.filename)
return raw.min(), raw.max() |
def get_scale_from_raw(raw, scaled):
"""
When given a raw image and the scaled version of the same image, it
extracts the ``exposure`` parameters associated with those images.
This is useful when
Parameters
----------
raw : array_like
The image loaded fresh from a file
scaled : array_like
Image scaled using :func:`peri.initializers.normalize`
Returns
-------
exposure : tuple of numbers
Returns the exposure parameters (emin, emax) which get mapped to
(0, 1) in the scaled image. Can be passed to
:func:`~peri.util.RawImage.__init__`
"""
t0, t1 = scaled.min(), scaled.max()
r0, r1 = float(raw.min()), float(raw.max())
rmin = (t1*r0 - t0*r1) / (t1 - t0)
rmax = (r1 - r0) / (t1 - t0) + rmin
return (rmin, rmax) |
def _draw(self):
""" Interal draw method, simply prints to screen """
if self.display:
print(self._formatstr.format(**self.__dict__), end='')
sys.stdout.flush() |
def update(self, value=0):
"""
Update the value of the progress and update progress bar.
Parameters
-----------
value : integer
The current iteration of the progress
"""
self._deltas.append(time.time())
self.value = value
self._percent = 100.0 * self.value / self.num
if self.bar:
self._bars = self._bar_symbol*int(np.round(self._percent / 100. * self._barsize))
if (len(self._deltas) < 2) or (self._deltas[-1] - self._deltas[-2]) > 1e-1:
self._estimate_time()
self._draw()
if self.value == self.num:
self.end() |
def init_app(self, app):
"""Flask application initialization."""
self.init_config(app)
app.register_blueprint(blueprint)
app.extensions['invenio-groups'] = self |
def check_consistency(self):
"""
Make sure that the required comps are included in the list of
components supplied by the user. Also check that the parameters are
consistent across the many components.
"""
error = False
regex = re.compile('([a-zA-Z_][a-zA-Z0-9_]*)')
# there at least must be the full model, not necessarily partial updates
if 'full' not in self.modelstr:
raise ModelError(
'Model must contain a `full` key describing '
'the entire image formation'
)
# Check that the two model descriptors are consistent
for name, eq in iteritems(self.modelstr):
var = regex.findall(eq)
for v in var:
# remove the derivative signs if there (dP -> P)
v = re.sub(r"^d", '', v)
if v not in self.varmap:
log.error(
"Variable '%s' (eq. '%s': '%s') not found in category map %r" %
(v, name, eq, self.varmap)
)
error = True
if error:
raise ModelError('Inconsistent varmap and modelstr descriptions') |
def check_inputs(self, comps):
"""
Check that the list of components `comp` is compatible with both the
varmap and modelstr for this Model
"""
error = False
compcats = [c.category for c in comps]
# Check that the components are all provided, given the categories
for k, v in iteritems(self.varmap):
if k not in self.modelstr['full']:
log.warn('Component (%s : %s) not used in model.' % (k,v))
if v not in compcats:
log.error('Map component (%s : %s) not found in list of components.' % (k,v))
error = True
if error:
raise ModelError('Component list incomplete or incorrect') |
def get_difference_model(self, category):
"""
Get the equation corresponding to a variation wrt category. For example
if::
modelstr = {
'full' :'H(I) + B',
'dH' : 'dH(I)',
'dI' : 'H(dI)',
'dB' : 'dB'
}
varmap = {'H': 'psf', 'I': 'obj', 'B': 'bkg'}
then ``get_difference_model('obj') == modelstr['dI'] == 'H(dI)'``
"""
name = self.diffname(self.ivarmap[category])
return self.modelstr.get(name) |
def map_vars(self, comps, funcname='get', diffmap=None, **kwargs):
"""
Map component function ``funcname`` result into model variables
dictionary for use in eval of the model. If ``diffmap`` is provided then
that symbol is translated into 'd'+diffmap.key and is replaced by
diffmap.value. ``**kwargs` are passed to the ``comp.funcname(**kwargs)``.
"""
out = {}
diffmap = diffmap or {}
for c in comps:
cat = c.category
if cat in diffmap:
symbol = self.diffname(self.ivarmap[cat])
out[symbol] = diffmap[cat]
else:
symbol = self.ivarmap[cat]
out[symbol] = getattr(c, funcname)(**kwargs)
return out |
def evaluate(self, comps, funcname='get', diffmap=None, **kwargs):
"""
Calculate the output of a model. It is recommended that at some point
before using `evaluate`, that you make sure the inputs are valid using
:class:`~peri.models.Model.check_inputs`
Parameters
-----------
comps : list of :class:`~peri.comp.comp.Component`
Components which will be used to evaluate the model
funcname : string (default: 'get')
Name of the function which to evaluate for the components which
represent their output. That is, when each component is used in the
evaluation, it is really their ``attr(comp, funcname)`` which is used.
diffmap : dictionary
Extra mapping of derivatives or other symbols to extra variables.
For example, the difference in a component has been evaluated as
diff_obj so we set ``{'I': diff_obj}``
``**kwargs``:
Arguments passed to ``funcname`` of component objects
"""
evar = self.map_vars(comps, funcname, diffmap=diffmap)
if diffmap is None:
return eval(self.get_base_model(), evar)
else:
compname = list(diffmap.keys())[0]
return eval(self.get_difference_model(compname), evar) |
def lbl(axis, label, size=22):
""" Put a figure label in an axis """
at = AnchoredText(label, loc=2, prop=dict(size=size), frameon=True)
at.patch.set_boxstyle("round,pad=0.,rounding_size=0.0")
#bb = axis.get_yaxis_transform()
#at = AnchoredText(label,
# loc=3, prop=dict(size=18), frameon=True,
# bbox_to_anchor=(-0.5,1),#(-.255, 0.90),
# bbox_transform=bb,#axis.transAxes
# )
axis.add_artist(at) |
def generative_model(s,x,y,z,r, factor=1.1):
"""
Samples x,y,z,r are created by:
b = s.blocks_particle(#)
h = runner.sample_state(s, b, stepout=0.05, N=2000, doprint=True)
z,y,x,r = h.get_histogram().T
"""
pl.close('all')
slicez = int(round(z.mean()))
slicex = s.image.shape[2]//2
slicer1 = np.s_[slicez,s.pad:-s.pad,s.pad:-s.pad]
slicer2 = np.s_[s.pad:-s.pad,s.pad:-s.pad,slicex]
center = (slicez, s.image.shape[1]//2, slicex)
fig = pl.figure(figsize=(factor*13,factor*10))
#=========================================================================
#=========================================================================
gs1 = ImageGrid(fig, rect=[0.0, 0.6, 1.0, 0.35], nrows_ncols=(1,3),
axes_pad=0.1)
ax_real = gs1[0]
ax_fake = gs1[1]
ax_diff = gs1[2]
diff = s.get_model_image() - s.image
ax_real.imshow(s.image[slicer1], cmap=pl.cm.bone_r)
ax_real.set_xticks([])
ax_real.set_yticks([])
ax_real.set_title("Confocal image", fontsize=24)
ax_fake.imshow(s.get_model_image()[slicer1], cmap=pl.cm.bone_r)
ax_fake.set_xticks([])
ax_fake.set_yticks([])
ax_fake.set_title("Model image", fontsize=24)
ax_diff.imshow(diff[slicer1], cmap=pl.cm.RdBu, vmin=-0.1, vmax=0.1)
ax_diff.set_xticks([])
ax_diff.set_yticks([])
ax_diff.set_title("Difference", fontsize=24)
#=========================================================================
#=========================================================================
gs2 = ImageGrid(fig, rect=[0.1, 0.0, 0.4, 0.55], nrows_ncols=(3,2),
axes_pad=0.1)
ax_plt1 = fig.add_subplot(gs2[0])
ax_plt2 = fig.add_subplot(gs2[1])
ax_ilm1 = fig.add_subplot(gs2[2])
ax_ilm2 = fig.add_subplot(gs2[3])
ax_psf1 = fig.add_subplot(gs2[4])
ax_psf2 = fig.add_subplot(gs2[5])
c = int(z.mean()), int(y.mean())+s.pad, int(x.mean())+s.pad
if s.image.shape[0] > 2*s.image.shape[1]//3:
w = s.image.shape[2] - 2*s.pad
h = 2*w//3
else:
h = s.image.shape[0] - 2*s.pad
w = 3*h//2
w,h = w//2, h//2
xyslice = np.s_[slicez, c[1]-h:c[1]+h, c[2]-w:c[2]+w]
yzslice = np.s_[c[0]-h:c[0]+h, c[1]-w:c[1]+w, slicex]
#h = s.image.shape[2]/2 - s.image.shape[0]/2
#slicer2 = np.s_[s.pad:-s.pad, s.pad:-s.pad, slicex]
#slicer3 = np.s_[slicez, s.pad+h:-s.pad-h, s.pad:-s.pad]
ax_plt1.imshow(1-s.obj.get_field()[xyslice], cmap=pl.cm.bone_r, vmin=0, vmax=1)
ax_plt1.set_xticks([])
ax_plt1.set_yticks([])
ax_plt1.set_ylabel("Platonic", fontsize=22)
ax_plt1.set_title("x-y", fontsize=24)
ax_plt2.imshow(1-s._platonic_image()[yzslice], cmap=pl.cm.bone_r, vmin=0, vmax=1)
ax_plt2.set_xticks([])
ax_plt2.set_yticks([])
ax_plt2.set_title("y-z", fontsize=24)
ax_ilm1.imshow(s.ilm.get_field()[xyslice], cmap=pl.cm.bone_r)
ax_ilm1.set_xticks([])
ax_ilm1.set_yticks([])
ax_ilm1.set_ylabel("ILM", fontsize=22)
ax_ilm2.imshow(s.ilm.get_field()[yzslice], cmap=pl.cm.bone_r)
ax_ilm2.set_xticks([])
ax_ilm2.set_yticks([])
t = s.ilm.get_field().copy()
t *= 0
t[c] = 1
s.psf.set_tile(util.Tile(t.shape))
psf = (s.psf.execute(t)+5e-5)**0.1
ax_psf1.imshow(psf[xyslice], cmap=pl.cm.bone)
ax_psf1.set_xticks([])
ax_psf1.set_yticks([])
ax_psf1.set_ylabel("PSF", fontsize=22)
ax_psf2.imshow(psf[yzslice], cmap=pl.cm.bone)
ax_psf2.set_xticks([])
ax_psf2.set_yticks([])
#=========================================================================
#=========================================================================
ax_zoom = fig.add_axes([0.48, 0.018, 0.45, 0.52])
#s.model_to_true_image()
im = s.image[slicer1]
sh = np.array(im.shape)
cx = x.mean()
cy = y.mean()
extent = [0,sh[0],0,sh[1]]
ax_zoom.set_xticks([])
ax_zoom.set_yticks([])
ax_zoom.imshow(im, extent=extent, cmap=pl.cm.bone_r)
ax_zoom.set_xlim(cx-12, cx+12)
ax_zoom.set_ylim(cy-12, cy+12)
ax_zoom.set_title("Sampled positions", fontsize=24)
ax_zoom.hexbin(x,y, gridsize=32, mincnt=0, cmap=pl.cm.hot)
zoom1 = zoomed_inset_axes(ax_zoom, 30, loc=3)
zoom1.imshow(im, extent=extent, cmap=pl.cm.bone_r)
zoom1.set_xlim(cx-1.0/6, cx+1.0/6)
zoom1.set_ylim(cy-1.0/6, cy+1.0/6)
zoom1.hexbin(x,y,gridsize=32, mincnt=5, cmap=pl.cm.hot)
zoom1.set_xticks([])
zoom1.set_yticks([])
zoom1.hlines(cy-1.0/6 + 1.0/32, cx-1.0/6+5e-2, cx-1.0/6+5e-2+1e-1, lw=3)
zoom1.text(cx-1.0/6 + 1.0/24, cy-1.0/6+5e-2, '0.1px')
mark_inset(ax_zoom, zoom1, loc1=2, loc2=4, fc="none", ec="0.0") |
def examine_unexplained_noise(state, bins=1000, xlim=(-10,10)):
"""
Compares a state's residuals in real and Fourier space with a Gaussian.
Point out that Fourier space should always be Gaussian and white
Parameters
----------
state : `peri.states.State`
The state to examine.
bins : int or sequence of scalars or str, optional
The number of bins in the histogram, as passed to numpy.histogram
Default is 1000
xlim : 2-element tuple, optional
The range, in sigma, of the x-axis on the plot. Default (-10,10).
Returns
-------
list
The axes handles for the real and Fourier space subplots.
"""
r = state.residuals
q = np.fft.fftn(r)
#Get the expected values of `sigma`:
calc_sig = lambda x: np.sqrt(np.dot(x,x) / x.size)
rh, xr = np.histogram(r.ravel() / calc_sig(r.ravel()), bins=bins,
density=True)
bigq = np.append(q.real.ravel(), q.imag.ravel())
qh, xq = np.histogram(bigq / calc_sig(q.real.ravel()), bins=bins,
density=True)
xr = 0.5*(xr[1:] + xr[:-1])
xq = 0.5*(xq[1:] + xq[:-1])
gauss = lambda t : np.exp(-t*t*0.5) / np.sqrt(2*np.pi)
plt.figure(figsize=[16,8])
axes = []
for a, (x, r, lbl) in enumerate([[xr, rh, 'Real'], [xq, qh, 'Fourier']]):
ax = plt.subplot(1,2,a+1)
ax.semilogy(x, r, label='Data')
ax.plot(x, gauss(x), label='Gauss Fit', scalex=False, scaley=False)
ax.set_xlabel('Residuals value $r/\sigma$')
ax.set_ylabel('Probability $P(r/\sigma)$')
ax.legend(loc='upper right')
ax.set_title('{}-Space'.format(lbl))
ax.set_xlim(xlim)
axes.append(ax)
return axes |
def compare_data_model_residuals(s, tile, data_vmin='calc', data_vmax='calc',
res_vmin=-0.1, res_vmax=0.1, edgepts='calc', do_imshow=True,
data_cmap=plt.cm.bone, res_cmap=plt.cm.RdBu):
"""
Compare the data, model, and residuals of a state.
Makes an image of any 2D slice of a state that compares the data,
model, and residuals. The upper left portion of the image is the raw
data, the central portion the model, and the lower right portion the
image. Either plots the image using plt.imshow() or returns a
np.ndarray of the image pixels for later use.
Parameters
----------
st : peri.ImageState object
The state to plot.
tile : peri.util.Tile object
The slice of the image to plot. Can be any xy, xz, or yz
projection, but it must return a valid 2D slice (the slice is
squeezed internally).
data_vmin : {Float, `calc`}, optional
vmin for the imshow for the data and generative model (shared).
Default is 'calc' = 0.5(data.min() + model.min())
data_vmax : {Float, `calc`}, optional
vmax for the imshow for the data and generative model (shared).
Default is 'calc' = 0.5(data.max() + model.max())
res_vmin : Float, optional
vmin for the imshow for the residuals. Default is -0.1
Default is 'calc' = 0.5(data.min() + model.min())
res_vmax : Float, optional
vmax for the imshow for the residuals. Default is +0.1
edgepts : {Nested list-like, Float, 'calc'}, optional.
The vertices of the triangles which determine the splitting of
the image. The vertices are at (image corner, (edge, y), and
(x,edge), where edge is the appropriate edge of the image.
edgepts[0] : (x,y) points for the upper edge
edgepts[1] : (x,y) points for the lower edge
where `x` is the coordinate along the image's 0th axis and `y`
along the images 1st axis. Default is 'calc,' which calculates
edge points by splitting the image into 3 regions of equal
area. If edgepts is a float scalar, calculates the edge points
based on a constant fraction of distance from the edge.
do_imshow : Bool
If True, imshow's and returns the returned handle.
If False, returns the array as a [M,N,4] array.
data_cmap : matplotlib colormap instance
The colormap to use for the data and model.
res_cmap : matplotlib colormap instance
The colormap to use for the residuals.
Returns
-------
image : {matplotlib.pyplot.AxesImage, numpy.ndarray}
If `do_imshow` == True, the returned handle from imshow.
If `do_imshow` == False, an [M,N,4] np.ndarray of the image
pixels.
"""
# This could be modified to alpha the borderline... or to embiggen
# the image and slice it more finely
residuals = s.residuals[tile.slicer].squeeze()
data = s.data[tile.slicer].squeeze()
model = s.model[tile.slicer].squeeze()
if data.ndim != 2:
raise ValueError('tile does not give a 2D slice')
im = np.zeros([data.shape[0], data.shape[1], 4])
if data_vmin == 'calc':
data_vmin = 0.5*(data.min() + model.min())
if data_vmax == 'calc':
data_vmax = 0.5*(data.max() + model.max())
#1. Get masks:
upper_mask, center_mask, lower_mask = trisect_image(im.shape, edgepts)
#2. Get colorbar'd images
gm = data_cmap(center_data(model, data_vmin, data_vmax))
dt = data_cmap(center_data(data, data_vmin, data_vmax))
rs = res_cmap(center_data(residuals, res_vmin, res_vmax))
for a in range(4):
im[:,:,a][upper_mask] = rs[:,:,a][upper_mask]
im[:,:,a][center_mask] = gm[:,:,a][center_mask]
im[:,:,a][lower_mask] = dt[:,:,a][lower_mask]
if do_imshow:
return plt.imshow(im)
else:
return im |
def trisect_image(imshape, edgepts='calc'):
"""
Returns 3 masks that trisect an image into 3 triangular portions.
Parameters
----------
imshape : 2-element list-like of ints
The shape of the image. Elements after the first 2 are ignored.
edgepts : Nested list-like, float, or `calc`, optional.
The vertices of the triangles which determine the splitting of
the image. The vertices are at (image corner, (edge, y), and
(x,edge), where edge is the appropriate edge of the image.
edgepts[0] : (x,y) points for the upper edge
edgepts[1] : (x,y) points for the lower edge
where `x` is the coordinate along the image's 0th axis and `y`
along the images 1st axis. Default is 'calc,' which calculates
edge points by splitting the image into 3 regions of equal
area. If edgepts is a float scalar, calculates the edge points
based on a constant fraction of distance from the edge.
Returns
-------
upper_mask : numpy.ndarray
Boolean array; True in the image's upper region.
center_mask : numpy.ndarray
Boolean array; True in the image's center region.
lower_mask : numpy.ndarray
Boolean array; True in the image's lower region.
"""
im_x, im_y = np.meshgrid(np.arange(imshape[0]), np.arange(imshape[1]),
indexing='ij')
if np.size(edgepts) == 1:
#Gets equal-area sections, at sqrt(2/3) of the sides
f = np.sqrt(2./3.) if edgepts == 'calc' else edgepts
# f = np.sqrt(2./3.)
lower_edge = (imshape[0] * (1-f), imshape[1] * f)
upper_edge = (imshape[0] * f, imshape[1] * (1-f))
else:
upper_edge, lower_edge = edgepts
#1. Get masks
lower_slope = lower_edge[1] / max(float(imshape[0] - lower_edge[0]), 1e-9)
upper_slope = (imshape[1] - upper_edge[1]) / float(upper_edge[0])
#and the edge points are the x or y intercepts
lower_intercept = -lower_slope * lower_edge[0]
upper_intercept = upper_edge[1]
lower_mask = im_y < (im_x * lower_slope + lower_intercept)
upper_mask = im_y > (im_x * upper_slope + upper_intercept)
center_mask= -(lower_mask | upper_mask)
return upper_mask, center_mask, lower_mask |
def center_data(data, vmin, vmax):
"""Clips data on [vmin, vmax]; then rescales to [0,1]"""
ans = data - vmin
ans /= (vmax - vmin)
return np.clip(ans, 0, 1) |
def sim_crb_diff(std0, std1, N=10000):
""" each element of std0 should correspond with the element of std1 """
a = std0*np.random.randn(N, len(std0))
b = std1*np.random.randn(N, len(std1))
return a - b |
def crb_compare(state0, samples0, state1, samples1, crb0=None, crb1=None,
zlayer=None, xlayer=None):
"""
To run, do:
s,h = pickle...
s1,h1 = pickle...
i.e. /media/scratch/bamf/vacancy/vacancy_zoom-1.tif_t002.tif-featured-v2.pkl
i.e. /media/scratch/bamf/frozen-particles/0.tif-featured-full.pkl
crb0 = diag_crb_particles(s); crb1 = diag_crb_particles(s1)
crb_compare(s,h[-25:],s1,h1[-25:], crb0, crb1)
"""
s0 = state0
s1 = state1
h0 = np.array(samples0)
h1 = np.array(samples1)
slicez = zlayer or s0.image.shape[0]//2
slicex = xlayer or s0.image.shape[2]//2
slicer1 = np.s_[slicez,s0.pad:-s0.pad,s0.pad:-s0.pad]
slicer2 = np.s_[s0.pad:-s0.pad,s0.pad:-s0.pad,slicex]
center = (slicez, s0.image.shape[1]//2, slicex)
mu0 = h0.mean(axis=0)
mu1 = h1.mean(axis=0)
std0 = h0.std(axis=0)
std1 = h1.std(axis=0)
mask0 = (s0.state[s0.b_typ]==1.) & (
analyze.trim_box(s0, mu0[s0.b_pos].reshape(-1,3)))
mask1 = (s1.state[s1.b_typ]==1.) & (
analyze.trim_box(s1, mu1[s1.b_pos].reshape(-1,3)))
active0 = np.arange(s0.N)[mask0]#s0.state[s0.b_typ]==1.]
active1 = np.arange(s1.N)[mask1]#s1.state[s1.b_typ]==1.]
pos0 = mu0[s0.b_pos].reshape(-1,3)[active0]
pos1 = mu1[s1.b_pos].reshape(-1,3)[active1]
rad0 = mu0[s0.b_rad][active0]
rad1 = mu1[s1.b_rad][active1]
link = analyze.nearest(pos0, pos1)
dpos = pos0 - pos1[link]
drad = rad0 - rad1[link]
drift = dpos.mean(axis=0)
log.info('drift {}'.format(drift))
dpos -= drift
fig = pl.figure(figsize=(24,10))
#=========================================================================
#=========================================================================
gs0 = ImageGrid(fig, rect=[0.02, 0.4, 0.4, 0.60], nrows_ncols=(2,3), axes_pad=0.1)
lbl(gs0[0], 'A')
for i,slicer in enumerate([slicer1, slicer2]):
ax_real = gs0[3*i+0]
ax_fake = gs0[3*i+1]
ax_diff = gs0[3*i+2]
diff0 = s0.get_model_image() - s0.image
diff1 = s1.get_model_image() - s1.image
a = (s0.image - s1.image)
b = (s0.get_model_image() - s1.get_model_image())
c = (diff0 - diff1)
ptp = 0.7*max([np.abs(a).max(), np.abs(b).max(), np.abs(c).max()])
cmap = pl.cm.RdBu_r
ax_real.imshow(a[slicer], cmap=cmap, vmin=-ptp, vmax=ptp)
ax_real.set_xticks([])
ax_real.set_yticks([])
ax_fake.imshow(b[slicer], cmap=cmap, vmin=-ptp, vmax=ptp)
ax_fake.set_xticks([])
ax_fake.set_yticks([])
ax_diff.imshow(c[slicer], cmap=cmap, vmin=-ptp, vmax=ptp)#cmap=pl.cm.RdBu, vmin=-1.0, vmax=1.0)
ax_diff.set_xticks([])
ax_diff.set_yticks([])
if i == 0:
ax_real.set_title(r"$\Delta$ Confocal image", fontsize=24)
ax_fake.set_title(r"$\Delta$ Model image", fontsize=24)
ax_diff.set_title(r"$\Delta$ Difference", fontsize=24)
ax_real.set_ylabel('x-y')
else:
ax_real.set_ylabel('x-z')
#=========================================================================
#=========================================================================
gs1 = GridSpec(1,3, left=0.05, bottom=0.125, right=0.42, top=0.37,
wspace=0.15, hspace=0.05)
spos0 = std0[s0.b_pos].reshape(-1,3)[active0]
spos1 = std1[s1.b_pos].reshape(-1,3)[active1]
srad0 = std0[s0.b_rad][active0]
srad1 = std1[s1.b_rad][active1]
def hist(ax, vals, bins, *args, **kwargs):
y,x = np.histogram(vals, bins=bins)
x = (x[1:] + x[:-1])/2
y /= len(vals)
ax.plot(x,y, *args, **kwargs)
def pp(ind, tarr, tsim, tcrb, var='x'):
bins = 10**np.linspace(-3, 0.0, 30)
bin2 = 10**np.linspace(-3, 0.0, 100)
bins = np.linspace(0.0, 0.2, 30)
bin2 = np.linspace(0.0, 0.2, 100)
xlim = (0.0, 0.12)
#xlim = (1e-3, 1e0)
ylim = (1e-2, 30)
ticks = ticker.FuncFormatter(lambda x, pos: '{:0.0f}'.format(np.log10(x)))
scaler = lambda x: x #np.log10(x)
ax_crb = pl.subplot(gs1[0,ind])
ax_crb.hist(scaler(np.abs(tarr)), bins=bins,
normed=True, alpha=0.7, histtype='stepfilled', lw=1)
ax_crb.hist(scaler(np.abs(tcrb)).ravel(), bins=bin2,
normed=True, alpha=1.0, histtype='step', ls='solid', lw=1.5, color='k')
ax_crb.hist(scaler(np.abs(tsim).ravel()), bins=bin2,
normed=True, alpha=1.0, histtype='step', lw=3)
ax_crb.set_xlabel(r"$\Delta = |%s(t_1) - %s(t_0)|$" % (var,var), fontsize=24)
#ax_crb.semilogx()
ax_crb.set_xlim(xlim)
#ax_crb.semilogy()
#ax_crb.set_ylim(ylim)
#ax_crb.xaxis.set_major_formatter(ticks)
ax_crb.grid(b=False, which='both', axis='both')
if ind == 0:
lbl(ax_crb, 'B')
ax_crb.set_ylabel(r"$P(\Delta)$")
else:
ax_crb.set_yticks([])
ax_crb.locator_params(axis='x', nbins=3)
f,g = 1.5, 1.95
sim = f*sim_crb_diff(spos0[:,1], spos1[:,1][link])
crb = g*sim_crb_diff(crb0[0][:,1][active0], crb1[0][:,1][active1][link])
pp(0, dpos[:,1], sim, crb, 'x')
sim = f*sim_crb_diff(spos0[:,0], spos1[:,0][link])
crb = g*sim_crb_diff(crb0[0][:,0][active0], crb1[0][:,0][active1][link])
pp(1, dpos[:,0], sim, crb, 'z')
sim = f*sim_crb_diff(srad0, srad1[link])
crb = g*sim_crb_diff(crb0[1][active0], crb1[1][active1][link])
pp(2, drad, sim, crb, 'a')
#ax_crb_r.locator_params(axis='both', nbins=3)
#gs1.tight_layout(fig)
#=========================================================================
#=========================================================================
gs2 = GridSpec(2,2, left=0.48, bottom=0.12, right=0.99, top=0.95,
wspace=0.35, hspace=0.35)
ax_hist = pl.subplot(gs2[0,0])
ax_hist.hist(std0[s0.b_pos], bins=np.logspace(-3.0, 0, 50), alpha=0.7, label='POS', histtype='stepfilled')
ax_hist.hist(std0[s0.b_rad], bins=np.logspace(-3.0, 0, 50), alpha=0.7, label='RAD', histtype='stepfilled')
ax_hist.set_xlim((10**-3.0, 1))
ax_hist.semilogx()
ax_hist.set_xlabel(r"$\bar{\sigma}$")
ax_hist.set_ylabel(r"$P(\bar{\sigma})$")
ax_hist.legend(loc='upper right')
lbl(ax_hist, 'C')
imdiff = ((s0.get_model_image() - s0.image)/s0._sigma_field)[s0.image_mask==1.].ravel()
mu = imdiff.mean()
#sig = imdiff.std()
#print mu, sig
x = np.linspace(-5,5,10000)
ax_diff = pl.subplot(gs2[0,1])
ax_diff.plot(x, 1.0/np.sqrt(2*np.pi) * np.exp(-(x-mu)**2 / 2), '-', alpha=0.7, color='k', lw=2)
ax_diff.hist(imdiff, bins=1000, histtype='step', alpha=0.7, normed=True)
ax_diff.semilogy()
ax_diff.set_ylabel(r"$P(\delta)$")
ax_diff.set_xlabel(r"$\delta = (M_i - d_i)/\sigma_i$")
ax_diff.locator_params(axis='x', nbins=5)
ax_diff.grid(b=False, which='minor', axis='y')
ax_diff.set_xlim(-5, 5)
ax_diff.set_ylim(1e-4, 1e0)
lbl(ax_diff, 'D')
pos = mu0[s0.b_pos].reshape(-1,3)
rad = mu0[s0.b_rad]
mask = analyze.trim_box(s0, pos)
pos = pos[mask]
rad = rad[mask]
gx, gy = analyze.gofr(pos, rad, mu0[s0.b_zscale][0], resolution=5e-2,mask_start=0.5)
mask = gx < 5
gx = gx[mask]
gy = gy[mask]
ax_gofr = pl.subplot(gs2[1,0])
ax_gofr.plot(gx, gy, '-', lw=1)
ax_gofr.set_xlabel(r"$r/a$")
ax_gofr.set_ylabel(r"$g(r/a)$")
ax_gofr.locator_params(axis='both', nbins=5)
#ax_gofr.semilogy()
lbl(ax_gofr, 'E')
gx, gy = analyze.gofr(pos, rad, mu0[s0.b_zscale][0], method='surface')
mask = gx < 5
gx = gx[mask]
gy = gy[mask]
gy[gy <= 0.] = gy[gy>0].min()
ax_gofrs = pl.subplot(gs2[1,1])
ax_gofrs.plot(gx, gy, '-', lw=1)
ax_gofrs.set_xlabel(r"$r/a$")
ax_gofrs.set_ylabel(r"$g_{\rm{surface}}(r/a)$")
ax_gofrs.locator_params(axis='both', nbins=5)
ax_gofrs.grid(b=False, which='minor', axis='y')
#ax_gofrs.semilogy()
lbl(ax_gofrs, 'F')
ylim = ax_gofrs.get_ylim()
ax_gofrs.set_ylim(gy.min(), ylim[1]) |
def twoslice(field, center=None, size=6.0, cmap='bone_r', vmin=0, vmax=1,
orientation='vertical', figpad=1.09, off=0.01):
"""
Plot two parts of the ortho view, the two sections given by ``orientation``.
"""
center = center or [i//2 for i in field.shape]
slices = []
for i,c in enumerate(center):
blank = [np.s_[:]]*len(center)
blank[i] = c
slices.append(tuple(blank))
z,y,x = [float(i) for i in field.shape]
w = float(x + z)
h = float(y + z)
def show(field, ax, slicer, transpose=False):
tmp = field[slicer] if not transpose else field[slicer].T
ax.imshow(
tmp, cmap=cmap, interpolation='nearest',
vmin=vmin, vmax=vmax
)
ax.set_xticks([])
ax.set_yticks([])
ax.grid('off')
if orientation.startswith('v'):
# rect = l,b,w,h
log.info('{} {} {} {} {} {}'.format(x, y, z, w, h, x/h))
r = x/h
q = y/h
f = 1 / (1 + 3*off)
fig = pl.figure(figsize=(size*r, size*f))
ax1 = fig.add_axes((off, f*(1-q)+2*off, f, f*q))
ax2 = fig.add_axes((off, off, f, f*(1-q)))
show(field, ax1, slices[0])
show(field, ax2, slices[1])
else:
# rect = l,b,w,h
r = y/w
q = x/w
f = 1 / (1 + 3*off)
fig = pl.figure(figsize=(size*f, size*r))
ax1 = fig.add_axes((off, off, f*q, f))
ax2 = fig.add_axes((2*off+f*q, off, f*(1-q), f))
show(field, ax1, slices[0])
show(field, ax2, slices[2], transpose=True)
return fig, ax1, ax2 |
def circles(st, layer, axis, ax=None, talpha=1.0, cedge='white', cface='white'):
"""
Plots a set of circles corresponding to a slice through the platonic
structure. Copied from twoslice_overlay with comments, standaloneness.
Inputs
------
pos : array of particle positions; [N,3]
rad : array of particle radii; [N]
ax : plt.axis instance
layer : Which layer of the slice to use.
axis : The slice of the image, 0, 1, or 2.
cedge : edge color
cface : face color
talpha : Alpha of the thing
"""
pos = st.obj_get_positions()
rad = st.obj_get_radii()
shape = st.ishape.shape.tolist()
shape.pop(axis) #shape is now the shape of the image
if ax is None:
fig = plt.figure()
axisbg = 'white' if cface == 'black' else 'black'
sx, sy = ((1,shape[1]/float(shape[0])) if shape[0] > shape[1] else
(shape[0]/float(shape[1]), 1))
ax = fig.add_axes((0,0, sx, sy), axisbg=axisbg)
# get the index of the particles we want to include
particles = np.arange(len(pos))[np.abs(pos[:,axis] - layer) < rad]
# for each of these particles display the effective radius
# in the proper place
scale = 1.0 #np.max(shape).astype('float')
for i in particles:
p = pos[i].copy()
r = 2*np.sqrt(rad[i]**2 - (p[axis] - layer)**2)
#CIRCLE IS IN FIGURE COORDINATES!!!
if axis==0:
ix = 1; iy = 2
elif axis == 1:
ix = 0; iy = 2
elif axis==2:
ix = 0; iy = 1
c = Circle((p[ix]/scale, p[iy]/scale), radius=r/2/scale, fc=cface,
ec=cedge, alpha=talpha)
ax.add_patch(c)
# plt.axis([0,1,0,1])
plt.axis('equal') #circles not ellipses
return ax |
def missing_particle(separation=0.0, radius=RADIUS, SNR=20):
""" create a two particle state and compare it to featuring using a single particle guess """
# create a base image of one particle
s = init.create_two_particle_state(imsize=6*radius+4, axis='x', sigma=1.0/SNR,
delta=separation, radius=radius, stateargs={'varyn': True}, psfargs={'error': 1e-6})
s.obj.typ[1] = 0.
s.reset()
return s, s.obj.pos.copy() |
def get_rand_Japprox(s, params, num_inds=1000, include_cost=False, **kwargs):
"""
Calculates a random approximation to J by returning J only at a
set of random pixel/voxel locations.
Parameters
----------
s : :class:`peri.states.State`
The state to calculate J for.
params : List
The list of parameter names to calculate the gradient of.
num_inds : Int, optional.
The number of pix/voxels at which to calculate the random
approximation to J. Default is 1000.
include_cost : Bool, optional
Set to True to append a finite-difference measure of the full
cost gradient onto the returned J.
Other Parameters
----------------
All kwargs parameters get passed to s.gradmodel only.
Returns
-------
J : numpy.ndarray
[d, num_inds] array of J, at the given indices.
return_inds : numpy.ndarray or slice
[num_inds] element array or slice(0, None) of the model
indices at which J was evaluated.
"""
start_time = time.time()
tot_pix = s.residuals.size
if num_inds < tot_pix:
inds = np.random.choice(tot_pix, size=num_inds, replace=False)
slicer = None
return_inds = np.sort(inds)
else:
inds = None
return_inds = slice(0, None)
slicer = [slice(0, None)]*len(s.residuals.shape)
if include_cost:
Jact, ge = s.gradmodel_e(params=params, inds=inds, slicer=slicer,flat=False,
**kwargs)
Jact *= -1
J = [Jact, ge]
else:
J = -s.gradmodel(params=params, inds=inds, slicer=slicer, flat=False,
**kwargs)
CLOG.debug('J:\t%f' % (time.time()-start_time))
return J, return_inds |
def name_globals(s, remove_params=None):
"""
Returns a list of the global parameter names.
Parameters
----------
s : :class:`peri.states.ImageState`
The state to name the globals of.
remove_params : Set or None
A set of unique additional parameters to remove from the globals
list.
Returns
-------
all_params : list
The list of the global parameter names, with each of
remove_params removed.
"""
all_params = s.params
for p in s.param_particle(np.arange(s.obj_get_positions().shape[0])):
all_params.remove(p)
if remove_params is not None:
for p in set(remove_params):
all_params.remove(p)
return all_params |
def get_num_px_jtj(s, nparams, decimate=1, max_mem=1e9, min_redundant=20):
"""
Calculates the number of pixels to use for J at a given memory usage.
Tries to pick a number of pixels as (size of image / `decimate`).
However, clips this to a maximum size and minimum size to ensure that
(1) too much memory isn't used and (2) J has enough elements so that
the inverse of JTJ will be well-conditioned.
Parameters
----------
s : :class:`peri.states.State`
The state on which to calculate J.
nparams : Int
The number of parameters that will be included in J.
decimate : Int, optional
The amount to decimate the number of pixels in the image by,
i.e. tries to pick num_px = size of image / decimate.
Default is 1
max_mem : Numeric, optional
The maximum allowed memory, in bytes, for J to occupy at
double-precision. Default is 1e9.
min_redundant : Int, optional
The number of pixels must be at least `min_redundant` *
`nparams`. If not, an error is raised. Default is 20
Returns
-------
num_px : Int
The number of pixels at which to calcualte J.
"""
#1. Max for a given max_mem:
px_mem = int(max_mem // 8 // nparams) #1 float = 8 bytes
#2. num_pix for a given redundancy
px_red = min_redundant*nparams
#3. And # desired for decimation
px_dec = s.residuals.size//decimate
if px_red > px_mem:
raise RuntimeError('Insufficient max_mem for desired redundancy.')
num_px = np.clip(px_dec, px_red, px_mem).astype('int')
return num_px |
def vectorize_damping(params, damping=1.0, increase_list=[['psf-', 1e4]]):
"""
Returns a non-constant damping vector, allowing certain parameters to be
more strongly damped than others.
Parameters
----------
params : List
The list of parameter names, in order.
damping : Float
The default value of the damping.
increase_list: List
A nested 2-element list of the params to increase and their
scale factors. All parameters containing the string
increase_list[i][0] are increased by a factor increase_list[i][1].
Returns
-------
damp_vec : np.ndarray
The damping vector to use.
"""
damp_vec = np.ones(len(params)) * damping
for nm, fctr in increase_list:
for a in range(damp_vec.size):
if nm in params[a]:
damp_vec[a] *= fctr
return damp_vec |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.